id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
156,690 | import argparse
import fnmatch
from typing import Dict
from mmengine.config import Config, ConfigDict
from opencompass.openicl.icl_inferencer import (CLPInferencer, GenInferencer,
PPLInferencer)
from opencompass.registry import ICL_PROMPT_TEMPLATES, ICL_RETRIEVERS
from opencompass.utils import (Menu, build_dataset_from_cfg,
build_model_from_cfg, dataset_abbr_from_cfg,
model_abbr_from_cfg)
def print_prompts(model_cfg, dataset_cfg):
# TODO: A really dirty method that copies code from PPLInferencer and
# GenInferencer. In the future, the prompt extraction code should be
# extracted and generalized as a static method in these Inferencers
# and reused here.
if model_cfg:
max_seq_len = model_cfg.max_seq_len
if not model_cfg['type'].is_api:
model_cfg['tokenizer_only'] = True
model = build_model_from_cfg(model_cfg)
else:
max_seq_len = None
model = None
infer_cfg = dataset_cfg.get('infer_cfg')
fix_id_list = infer_cfg.inferencer.get('fix_id_list', [])
dataset = build_dataset_from_cfg(dataset_cfg)
ice_template = None
if hasattr(infer_cfg, 'ice_template'):
ice_template = ICL_PROMPT_TEMPLATES.build(infer_cfg['ice_template'])
prompt_template = None
if hasattr(infer_cfg, 'prompt_template'):
prompt_template = ICL_PROMPT_TEMPLATES.build(
infer_cfg['prompt_template'])
infer_cfg['retriever']['dataset'] = dataset
retriever = ICL_RETRIEVERS.build(infer_cfg['retriever'])
if fix_id_list:
ice_idx_list = retriever.retrieve(fix_id_list)
else:
ice_idx_list = retriever.retrieve()
assert infer_cfg.inferencer.type in [PPLInferencer, GenInferencer], \
'Only PPLInferencer and GenInferencer are supported'
if infer_cfg.inferencer.type == PPLInferencer:
labels = retriever.get_labels(ice_template=ice_template,
prompt_template=prompt_template)
ice = [
retriever.generate_ice(ice_idx_list[idx],
ice_template=ice_template)
for idx in range(len(ice_idx_list))
]
print('-' * 100)
print('ICE Template:')
print('-' * 100)
print(ice[0])
print('-' * 100)
for label in labels:
idx = 0
prompt = retriever.generate_label_prompt(
idx,
ice[idx],
label,
ice_template=ice_template,
prompt_template=prompt_template,
remain_sep=None)
if max_seq_len is not None:
prompt_token_num = model.get_token_len_from_template(prompt)
while len(ice_idx_list[idx]
) > 0 and prompt_token_num > max_seq_len:
num_ice = len(ice_idx_list[idx])
print(f'Truncating ice {num_ice} -> {num_ice - 1}',
f'Number of tokens: {prompt_token_num} -> ...')
ice_idx_list[idx] = ice_idx_list[idx][:-1]
ice[idx] = retriever.generate_ice(
ice_idx_list[idx], ice_template=ice_template)
prompt = retriever.generate_label_prompt(
idx,
ice[idx],
label,
ice_template=ice_template,
prompt_template=prompt_template)
prompt_token_num = model.get_token_len_from_template(
prompt)
print(f'Number of tokens: {prompt_token_num}')
if model is not None:
prompt = model.parse_template(prompt, mode='ppl')
print('-' * 100)
print(f'Label: {label}')
print('Sample prompt:')
print('-' * 100)
print(prompt)
print('-' * 100)
elif infer_cfg.inferencer.type in [GenInferencer, CLPInferencer]:
idx, ice_idx = 0, ice_idx_list[0]
ice = retriever.generate_ice(ice_idx, ice_template=ice_template)
prompt = retriever.generate_prompt_for_generate_task(
idx,
ice,
gen_field_replace_token=infer_cfg.inferencer.get(
'gen_field_replace_token', ''),
ice_template=ice_template,
prompt_template=prompt_template)
if max_seq_len is not None:
prompt_token_num = model.get_token_len_from_template(prompt)
while len(ice_idx) > 0 and prompt_token_num > max_seq_len:
num_ice = len(ice_idx)
print(f'Truncating ice {num_ice} -> {num_ice - 1}',
f'Number of tokens: {prompt_token_num} -> ...')
ice_idx = ice_idx[:-1]
ice = retriever.generate_ice(ice_idx,
ice_template=ice_template)
prompt = retriever.generate_prompt_for_generate_task(
idx,
ice,
gen_field_replace_token=infer_cfg.inferencer.get(
'gen_field_replace_token', ''),
ice_template=ice_template,
prompt_template=prompt_template)
prompt_token_num = model.get_token_len_from_template(prompt)
print(f'Number of tokens: {prompt_token_num}')
if model is not None:
prompt = model.parse_template(prompt, mode='gen')
print('-' * 100)
print('Sample prompt:')
print('-' * 100)
print(prompt)
print('-' * 100) | null |
156,691 | import pandas as pd
import os
import argparse
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--path', help='file path', type=str)
parser.add_argument('--key', help='score column name', type=str)
args = parser.parse_args()
return args | null |
156,692 | import argparse
import copy
import json
import os.path as osp
import mmengine
from mmengine.config import Config, ConfigDict
from opencompass.utils import build_dataset_from_cfg, get_infer_output_path
def parse_args():
parser = argparse.ArgumentParser(description='Run an evaluation task')
parser.add_argument('config', help='Train config file path')
parser.add_argument('-w',
'--work-dir',
help='Work path, all the outputs will be '
'saved in this path, including the slurm logs, '
'the evaluation results, the summary results, etc.'
'If not specified, the work_dir will be set to '
'./outputs/default.',
default=None,
type=str)
args = parser.parse_args()
return args | null |
156,695 | import argparse
import os
from dataclasses import dataclass, field
from os.path import join
from typing import *
import bitsandbytes as bnb
import datasets
import evaluate
import torch
import transformers
from peft import get_peft_model, PeftModel, prepare_model_for_kbit_training, LoraConfig
from peft.tuners.lora import LoraLayer
from transformers import BitsAndBytesConfig
from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR
def preprocess_logits_for_metrics(logits, labels):
if isinstance(logits, tuple):
# Depending on the model and config, logits may contain extra tensors,
# like past_key_values, but logits always come first
logits = logits[0]
return logits.argmax(dim=-1) | null |
156,696 | import argparse
import os
from dataclasses import dataclass, field
from os.path import join
from typing import *
import bitsandbytes as bnb
import datasets
import evaluate
import torch
import transformers
from peft import get_peft_model, PeftModel, prepare_model_for_kbit_training, LoraConfig
from peft.tuners.lora import LoraLayer
from transformers import BitsAndBytesConfig
from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR
def compute_metrics(eval_preds):
preds, labels = eval_preds
labels = labels[:, 1:].reshape(-1)
preds = preds[:, :-1].reshape(-1)
metric = evaluate.load("accuracy")
return metric.compute(predictions=preds, references=labels) | null |
156,697 | import argparse
import os
from dataclasses import dataclass, field
from os.path import join
from typing import *
import bitsandbytes as bnb
import datasets
import evaluate
import torch
import transformers
from peft import get_peft_model, PeftModel, prepare_model_for_kbit_training, LoraConfig
from peft.tuners.lora import LoraLayer
from transformers import BitsAndBytesConfig
from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR
class PeftConfig:
model_name_or_path: Optional[str] = field(metadata={"help": "Path to pretrained model checkpoint"})
data_files: Optional[str] = field(default=None, metadata={"help": "Local data files"})
max_length: int = field(default=1024, metadata={
"help": "Maximum source + target sequence length. Sequences will be right padded (and possibly truncated)."}, )
preprocess_num_workers: int = field(default=4,
metadata={"help": "The number of processes to use for the preprocessing."})
pad_to_max_length: bool = field(default=True, metadata={
"help": "Pad all examples to max_length. This is for fair comparison between different batch size"})
# Lora related param here
adam8bit: bool = field(default=False, metadata={"help": "Use 8-bit adam."})
double_quant: bool = field(default=True,
metadata={"help": "Compress the quantization statistics through double quantization."})
quant_type: str = field(default="nf4",
metadata={"help": "Quantization data type to use. Should be one of `fp4` or `nf4`."})
bits: int = field(default=4, metadata={"help": "How many bits to use."})
lora_r: int = field(default=64, metadata={"help": "Lora R dimension."})
lora_alpha: float = field(default=16, metadata={"help": " Lora alpha."})
lora_dropout: float = field(default=0.0, metadata={"help": "Lora dropout."})
full_finetune: bool = field(default=False, metadata={"help": "Finetune the entire model without adapters."})
do_infer: bool = field(default=False, metadata={"help": "Finetune the entire model without adapters."})
class SavePeftModelCallback(transformers.TrainerCallback):
def save_model(self, args, state, kwargs):
print('Saving PEFT checkpoint...')
if state.best_model_checkpoint is not None:
checkpoint_folder = os.path.join(state.best_model_checkpoint, "adapter_model")
else:
checkpoint_folder = os.path.join(args.output_dir, f"{PREFIX_CHECKPOINT_DIR}-{state.global_step}")
peft_model_path = os.path.join(checkpoint_folder, "adapter_model")
kwargs["model"].save_pretrained(peft_model_path)
pytorch_model_path = os.path.join(checkpoint_folder, "pytorch_model.bin")
if os.path.exists(pytorch_model_path):
os.remove(pytorch_model_path)
def on_save(self, args, state, control, **kwargs):
self.save_model(args, state, kwargs)
return control
def on_train_end(self, args, state, control, **kwargs):
def touch(fname, times=None):
with open(fname, 'a'):
os.utime(fname, times)
touch(join(args.output_dir, 'completed'))
self.save_model(args, state, kwargs)
def print_trainable_parameters(model, bits):
"""
Prints the number of trainable parameters in the model.
"""
trainable_params = 0
all_param = 0
for _, param in model.named_parameters():
all_param += param.numel()
if param.requires_grad:
trainable_params += param.numel()
if bits == 4:
trainable_params /= 2
if all_param != 0:
print(
f"trainable params: {trainable_params} || "
f"all params: {all_param} || "
f"trainable: {100 * trainable_params / (all_param):.2f}%"
)
def get_accelerate_model(args, checkpoint_dir):
device_map = "auto"
if args.full_finetune:
assert args.bits in [16, 32]
print(f'loading base model {args.model_name_or_path}...')
compute_dtype = (torch.float16 if args.fp16 else (torch.bfloat16 if args.bf16 else torch.float32))
print(f"compute_type: {compute_dtype}")
if args.full_finetune:
model = transformers.AutoModelForCausalLM.from_pretrained(
args.model_name_or_path,
load_in_4bit=args.bits == 4,
load_in_8bit=args.bits == 8,
quantization_config=BitsAndBytesConfig(
load_in_4bit=args.bits == 4,
load_in_8bit=args.bits == 8,
llm_int8_threshold=6.0,
llm_int8_has_fp16_weight=False,
bnb_4bit_compute_dtype=compute_dtype,
bnb_4bit_use_double_quant=args.double_quant,
bnb_4bit_quant_type=args.quant_type,
),
torch_dtype=(torch.float16 if args.fp16 else (torch.bfloat16 if args.bf16 else torch.float32)),
)
else:
model = transformers.AutoModelForCausalLM.from_pretrained(
args.model_name_or_path,
load_in_4bit=args.bits == 4,
load_in_8bit=args.bits == 8,
device_map=device_map,
quantization_config=BitsAndBytesConfig(
load_in_4bit=args.bits == 4,
load_in_8bit=args.bits == 8,
llm_int8_threshold=6.0,
llm_int8_has_fp16_weight=False,
bnb_4bit_compute_dtype=compute_dtype,
bnb_4bit_use_double_quant=args.double_quant,
bnb_4bit_quant_type=args.quant_type,
),
torch_dtype=(torch.float16 if args.fp16 else (torch.bfloat16 if args.bf16 else torch.float32)),
)
if compute_dtype == torch.float16 and args.bits == 4:
major, minor = torch.cuda.get_device_capability()
if major >= 8:
print('=' * 80)
print('Your GPU supports bfloat16, you can accelerate training with the argument --bf16')
print('=' * 80)
setattr(model, 'model_parallel', True)
setattr(model, 'is_parallelizable', True)
model.config.torch_dtype = (torch.float16 if args.fp16 else (torch.bfloat16 if args.bf16 else torch.float32))
if not args.full_finetune:
model = prepare_model_for_kbit_training(model, use_gradient_checkpointing=args.gradient_checkpointing)
if args.gradient_checkpointing:
model.gradient_checkpointing_enable()
if not args.full_finetune:
if checkpoint_dir is not None:
print("Loading adapters from checkpoint.")
model = PeftModel.from_pretrained(model, join(checkpoint_dir, 'adapter_model'),
is_trainable=not args.do_infer)
if args.do_infer:
print("Merge adapter weights to base model.")
model = model.merge_and_unload()
else:
print(f'Adding LoRA modules...')
modules = find_all_linear_names(args, model)
config = LoraConfig(
r=args.lora_r,
lora_alpha=args.lora_alpha,
target_modules=modules,
lora_dropout=args.lora_dropout,
bias="none",
task_type="CAUSAL_LM",
)
model = get_peft_model(model, config)
for name, module in model.named_modules():
if isinstance(module, LoraLayer):
if args.bf16:
module = module.to(torch.bfloat16)
if 'norm' in name:
module = module.to(torch.float32)
if 'lm_head' in name or 'embed_tokens' in name:
if hasattr(module, 'weight'):
if args.bf16 and module.weight.dtype == torch.float32:
module = module.to(torch.bfloat16)
return model
def train():
parser = transformers.HfArgumentParser((PeftConfig, transformers.TrainingArguments))
peft_args, train_args = parser.parse_args_into_dataclasses()
args = argparse.Namespace(**vars(peft_args), **vars(train_args))
# Load model
model = get_accelerate_model(args, None)
print_trainable_parameters(model, args.bits)
tokenizer = transformers.AutoTokenizer.from_pretrained(args.model_name_or_path, padding_side='right',
trunction_side="right", max_length=args.max_length)
# Load dataset
train_ds, validation_ds = datasets.load_dataset('json', data_files=args.data_files,
split=['train[:80%]', 'train[80%:]'])
raw_datasets = datasets.DatasetDict({"train": train_ds, "validation": validation_ds})
def process_supervised(record):
input_s = record['instruction'] + '\n' + (record.get('input', '') or '')
output_s = record['output']
tokenized = tokenizer([input_s, output_s])
token_ids = [tok_id for tok_ids in tokenized['input_ids'] for tok_id in tok_ids]
attention_mask = [mask for masks in tokenized['attention_mask'] for mask in masks]
if token_ids[-1] != tokenizer.eos_token_id:
token_ids += [tokenizer.eos_token_id]
attention_mask += [1]
processed_record = {
"input_ids": token_ids[:args.max_length],
"attention_mask": attention_mask[:args.max_length],
"labels": token_ids.copy()[:args.max_length]
}
if args.pad_to_max_length:
processed_record = {
"input_ids": processed_record["input_ids"] + [tokenizer.pad_token_id] * (
args.max_length - len(processed_record["input_ids"])),
"attention_mask": processed_record["attention_mask"] + [0] * (
args.max_length - len(processed_record["attention_mask"])),
"labels": processed_record["labels"] + [-100] * (args.max_length - len(processed_record["labels"]))
}
# ignore input label, label is ignored if value is -100
processed_record["labels"][:min(len(tokenized["input_ids"][0]), args.max_length)] = [-100] * min(
len(tokenized["input_ids"][0]), args.max_length)
return {k: torch.tensor(v, dtype=torch.int) for k, v in processed_record.items()}
with train_args.main_process_first(desc="Process supervised dataset"):
sft_dataset = raw_datasets.map(
process_supervised,
batched=False,
num_proc=args.preprocess_num_workers,
remove_columns=raw_datasets["train"].column_names,
desc="Process supervised dataset"
)
trainer = transformers.Trainer(
model=model,
tokenizer=tokenizer,
args=train_args,
train_dataset=sft_dataset["train"],
eval_dataset=sft_dataset["validation"],
data_collator=transformers.DataCollatorForTokenClassification(tokenizer=tokenizer, padding="longest",
max_length=args.max_length,
label_pad_token_id=-100)
)
if not args.full_finetune:
trainer.add_callback(SavePeftModelCallback)
all_metrics = {"run_name": args.run_name}
if args.do_train:
print("*** Train ***")
# Note: `resume_from_checkpoint` not supported for adapter checkpoints by HF.
# Currently adapter checkpoint is reloaded as expected but optimizer/scheduler states are not.
train_result = trainer.train()
metrics = train_result.metrics
trainer.log_metrics("train", metrics)
trainer.save_metrics("train", metrics)
trainer.save_state()
all_metrics.update(metrics)
if args.do_eval:
print("*** Evaluate ***")
metrics = trainer.evaluate(metric_key_prefix="eval")
trainer.log_metrics("eval", metrics)
trainer.save_metrics("eval", metrics)
all_metrics.update(metrics) | null |
156,698 | import os
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import transformers
os.environ["WANDB_DISABLED"] = "true"
def check_file_exist(path: str):
if not os.path.exists(path):
raise ValueError(f"Path: {path} not exists!") | null |
156,699 | import os
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import transformers
def preprocess_logits_for_metrics(logits, labels):
if isinstance(logits, tuple):
# Depending on the model and config, logits may contain extra tensors,
# like past_key_values, but logits always come first
logits = logits[0]
return logits.argmax(dim=-1) | null |
156,700 | import os
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import transformers
def compute_metrics(eval_preds):
preds, labels = eval_preds
labels = labels[:, 1:].reshape(-1)
preds = preds[:, :-1].reshape(-1)
metric = evaluate.load("accuracy")
return metric.compute(predictions=preds, references=labels) | null |
156,701 | import os
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import torch
import transformers
os.environ["WANDB_DISABLED"] = "true"
def check_file_exist(path: str):
if not os.path.exists(path):
raise ValueError(f"Path: {path} not exists!") | null |
156,702 | import os
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import torch
import transformers
def preprocess_logits_for_metrics(logits, labels):
if isinstance(logits, tuple):
# Depending on the model and config, logits may contain extra tensors,
# like past_key_values, but logits always come first
logits = logits[0]
return logits.argmax(dim=-1) | null |
156,703 | import os
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import torch
import transformers
def compute_metrics(eval_preds):
preds, labels = eval_preds
labels = labels[:, 1:].reshape(-1)
preds = preds[:, :-1].reshape(-1)
metric = evaluate.load("accuracy")
return metric.compute(predictions=preds, references=labels) | null |
156,704 | import streamlit as st
import torch.cuda
from other_infer.exllamav2_hf_infer import get_model, generate_stream
import argparse
print(args)
def get_model(model_path):
def skip(*args, **kwargs):
pass
torch.nn.init.kaiming_uniform_ = skip
torch.nn.init.uniform_ = skip
torch.nn.init.normal_ = skip
print(f"Loading model from {model_path}...")
model = Exllamav2HF.from_pretrained(model_path)
print("Done")
print(f"Loading tokenizer from {model_path}...")
tokenizer = LlamaTokenizer.from_pretrained(model_path)
print("Done")
print(f"Loading generation config from {model_path}...")
generation_config = GenerationConfig.from_pretrained(model_path)
print("Done")
return model, tokenizer, generation_config
def cached_get_model(model_path):
print(f'Init model: {model_path}')
return get_model(model_path=model_path) | null |
156,705 | import asyncio
import datetime
import json
import os
import sys
import time
from contextlib import asynccontextmanager
from functools import partial
from threading import Thread
from typing import List, Literal, Optional, Union
import torch
import uvicorn
from accelerate import dispatch_model, infer_auto_device_map
from accelerate.utils import get_balanced_memory
from fastapi import FastAPI, HTTPException, Request
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel, Field
from sse_starlette.sse import EventSourceResponse
from transformers import (
AutoModelForCausalLM,
AutoTokenizer,
GenerationConfig,
TextIteratorStreamer,
)
async def lifespan(app: FastAPI):
yield
if torch.cuda.is_available():
torch.cuda.empty_cache()
torch.cuda.ipc_collect() | null |
156,706 | import asyncio
import datetime
import json
import os
import sys
import time
from contextlib import asynccontextmanager
from functools import partial
from threading import Thread
from typing import List, Literal, Optional, Union
import torch
import uvicorn
from accelerate import dispatch_model, infer_auto_device_map
from accelerate.utils import get_balanced_memory
from fastapi import FastAPI, HTTPException, Request
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel, Field
from sse_starlette.sse import EventSourceResponse
from transformers import (
AutoModelForCausalLM,
AutoTokenizer,
GenerationConfig,
TextIteratorStreamer,
)
async def root():
return "Hello! This is TigerBot API." | null |
156,707 | import asyncio
import datetime
import json
import os
import sys
import time
from contextlib import asynccontextmanager
from functools import partial
from threading import Thread
from typing import List, Literal, Optional, Union
import torch
import uvicorn
from accelerate import dispatch_model, infer_auto_device_map
from accelerate.utils import get_balanced_memory
from fastapi import FastAPI, HTTPException, Request
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel, Field
from sse_starlette.sse import EventSourceResponse
from transformers import (
AutoModelForCausalLM,
AutoTokenizer,
GenerationConfig,
TextIteratorStreamer,
)
def torch_gc():
if torch.cuda.is_available():
with torch.cuda.device(CUDA_DEVICE):
torch.cuda.empty_cache()
torch.cuda.ipc_collect()
def get_prompt(query, history=None):
if not history:
prompt = f"\n\n### Instruction:\n{query}\n\n### Response:\n"
else:
prompt = ""
for old_query, response in history:
prompt += f"\n\n### Instruction:\n{old_query}\n\n### Response:\n{response}"
prompt += f"\n\n### Instruction:\n{query}\n\n### Response:\n"
return prompt
async def create_item(request: Request):
global model, tokenizer, generation_kwargs
json_post_raw = await request.json()
json_post = json.dumps(json_post_raw)
json_post_list = json.loads(json_post)
prompt = json_post_list.get("prompt")
history = json_post_list.get("history")
max_input_length = json_post_list.get("max_input_length", 512)
generation_kwargs["max_length"] = json_post_list.get(
"max_generate_length", generation_kwargs.get("max_length", 1024)
)
generation_kwargs["top_p"] = json_post_list.get(
"top_p", generation_kwargs.get("top_p", 0.95)
)
generation_kwargs["temperature"] = json_post_list.get(
"temperature", generation_kwargs.get("temperature", 0.8)
)
if (
tokenizer.model_max_length is None
or tokenizer.model_max_length > generation_kwargs["max_length"]
):
tokenizer.model_max_length = generation_kwargs["max_length"]
device = torch.cuda.current_device()
prompt = prompt.lstrip("\n")
query = get_prompt(prompt, history)
query = query.strip()
inputs = tokenizer(
query,
return_tensors="pt",
truncation=True,
max_length=max_input_length,
)
inputs = {k: v.to(device) for k, v in inputs.items()}
output = model.generate(**inputs, **generation_kwargs)
response = ""
for tok_id in output[0][inputs["input_ids"].shape[1] :]:
if tok_id != tokenizer.eos_token_id:
response += tokenizer.decode(tok_id)
response = response.lstrip("\n").rstrip("\n### Response:")
history += [(prompt, response)]
now = datetime.datetime.now()
time = now.strftime("%Y-%m-%d %H:%M:%S")
answer = {
"response": response,
"history": history,
"status": 200,
"time": time,
}
# log = "[" + time + "] " + '", prompt:"' + prompt + '", response:"' + repr(response) + '"'
# print(log)
torch_gc()
return answer | null |
156,708 | import asyncio
import datetime
import json
import os
import sys
import time
from contextlib import asynccontextmanager
from functools import partial
from threading import Thread
from typing import List, Literal, Optional, Union
import torch
import uvicorn
from accelerate import dispatch_model, infer_auto_device_map
from accelerate.utils import get_balanced_memory
from fastapi import FastAPI, HTTPException, Request
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel, Field
from sse_starlette.sse import EventSourceResponse
from transformers import (
AutoModelForCausalLM,
AutoTokenizer,
GenerationConfig,
TextIteratorStreamer,
)
DEVICE = "cuda"
def torch_gc():
if torch.cuda.is_available():
with torch.cuda.device(CUDA_DEVICE):
torch.cuda.empty_cache()
torch.cuda.ipc_collect()
def get_prompt(query, history=None):
if not history:
prompt = f"\n\n### Instruction:\n{query}\n\n### Response:\n"
else:
prompt = ""
for old_query, response in history:
prompt += f"\n\n### Instruction:\n{old_query}\n\n### Response:\n{response}"
prompt += f"\n\n### Instruction:\n{query}\n\n### Response:\n"
return prompt
async def stream_chat(request: Request):
global model, tokenizer
json_post_raw = await request.json()
json_post = json.dumps(json_post_raw)
json_post_list = json.loads(json_post)
prompt = json_post_list.get("prompt")
history = json_post_list.get("history")
max_input_length = json_post_list.get("max_input_length", 512)
generation_kwargs["max_length"] = json_post_list.get(
"max_generate_length", generation_kwargs.get("max_length", 1024)
)
generation_kwargs["top_p"] = json_post_list.get(
"top_p", generation_kwargs.get("top_p", 0.95)
)
generation_kwargs["temperature"] = json_post_list.get(
"temperature", generation_kwargs.get("temperature", 0.8)
)
if (
tokenizer.model_max_length is None
or tokenizer.model_max_length > generation_kwargs["max_length"]
):
tokenizer.model_max_length = generation_kwargs["max_length"]
STREAM_DELAY = 1 # second
RETRY_TIMEOUT = 15000 # milisecond
async def event_generator(prompt, history, generation_kwargs):
streamer = TextIteratorStreamer(
tokenizer,
skip_prompt=True,
skip_special_tokens=True,
spaces_between_special_tokens=False,
)
query = get_prompt(prompt, history)
inputs = tokenizer(
query,
return_tensors="pt",
truncation=True,
max_length=max_input_length,
)
for k, v in inputs.items():
generation_kwargs[k] = v.to(DEVICE)
generation_kwargs["streamer"] = streamer
thread = Thread(target=model.generate, kwargs=generation_kwargs)
thread.start()
last_msg = ""
for new_text in streamer:
# If client closes connection, stop sending events
if await request.is_disconnected():
break
# Checks for new messages and return them to client if any
try:
temp_dict = {
"response": new_text,
"history": history + [(prompt, last_msg)],
"finish": False,
}
yield {
"event": "new_message",
"id": "message_id",
"retry": RETRY_TIMEOUT,
"data": json.dumps(temp_dict, ensure_ascii=False),
}
last_msg += new_text
except StopIteration:
await asyncio.sleep(STREAM_DELAY)
temp_dict = {
"response": new_text,
"history": history + [(prompt, last_msg)],
"finish": True,
}
yield {
"event": "finish",
"id": "finish_id",
"retry": RETRY_TIMEOUT,
"data": json.dumps(temp_dict, ensure_ascii=False),
}
torch_gc()
return EventSourceResponse(
event_generator(prompt, history, generation_kwargs)
) | null |
156,709 | import asyncio
import datetime
import json
import os
import sys
import time
from contextlib import asynccontextmanager
from functools import partial
from threading import Thread
from typing import List, Literal, Optional, Union
import torch
import uvicorn
from accelerate import dispatch_model, infer_auto_device_map
from accelerate.utils import get_balanced_memory
from fastapi import FastAPI, HTTPException, Request
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel, Field
from sse_starlette.sse import EventSourceResponse
from transformers import (
AutoModelForCausalLM,
AutoTokenizer,
GenerationConfig,
TextIteratorStreamer,
)
class ModelCard(BaseModel):
id: str
object: str = "model"
created: int = Field(default_factory=lambda: int(time.time()))
owned_by: str = "owner"
root: Optional[str] = None
parent: Optional[str] = None
permission: Optional[list] = None
class ModelList(BaseModel):
object: str = "list"
data: List[ModelCard] = []
async def list_models():
global model_args
model_card = ModelCard(id="gpt-3.5-turbo")
return ModelList(data=[model_card]) | null |
156,710 | import asyncio
import datetime
import json
import os
import sys
import time
from contextlib import asynccontextmanager
from functools import partial
from threading import Thread
from typing import List, Literal, Optional, Union
import torch
import uvicorn
from accelerate import dispatch_model, infer_auto_device_map
from accelerate.utils import get_balanced_memory
from fastapi import FastAPI, HTTPException, Request
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel, Field
from sse_starlette.sse import EventSourceResponse
from transformers import (
AutoModelForCausalLM,
AutoTokenizer,
GenerationConfig,
TextIteratorStreamer,
)
DEVICE = "cuda"
async def to_async(func, **kwargs):
loop = asyncio.get_event_loop()
partial_func = partial(func, **kwargs)
data = await loop.run_in_executor(None, partial_func)
return data
def get_prompt(query, history=None):
if not history:
prompt = f"\n\n### Instruction:\n{query}\n\n### Response:\n"
else:
prompt = ""
for old_query, response in history:
prompt += f"\n\n### Instruction:\n{old_query}\n\n### Response:\n{response}"
prompt += f"\n\n### Instruction:\n{query}\n\n### Response:\n"
return prompt
class ChatMessage(BaseModel):
role: Literal["user", "assistant", "system"]
content: str
class ChatCompletionRequest(BaseModel):
model: str
messages: List[ChatMessage]
temperature: Optional[float] = None
top_p: Optional[float] = None
max_length: Optional[int] = None
stream: Optional[bool] = False
class ChatCompletionResponseChoice(BaseModel):
index: int
message: ChatMessage
finish_reason: Literal["stop", "length"]
class ChatCompletionResponse(BaseModel):
model: str
object: Literal["chat.completion", "chat.completion.chunk"]
choices: List[
Union[
ChatCompletionResponseChoice,
ChatCompletionResponseStreamChoice,
]
]
created: Optional[int] = Field(
default_factory=lambda: int(time.time())
)
async def predict(query: str, history: List[List[str]], model_id: str):
global model, tokenizer
choice_data = ChatCompletionResponseStreamChoice(
index=0,
delta=DeltaMessage(role="assistant"),
finish_reason=None,
)
chunk = ChatCompletionResponse(
model=model_id,
choices=[choice_data],
object="chat.completion.chunk",
)
yield "{}".format(
chunk.json(exclude_unset=True, ensure_ascii=False)
)
streamer = TextIteratorStreamer(
tokenizer,
skip_prompt=True,
skip_special_tokens=True,
spaces_between_special_tokens=False,
)
query = get_prompt(query, history)
inputs = tokenizer(query, return_tensors="pt")
for k, v in inputs.items():
generation_kwargs[k] = v.to(DEVICE)
generation_kwargs["streamer"] = streamer
thread = Thread(target=model.generate, kwargs=generation_kwargs)
thread.start()
for new_text in streamer:
if len(new_text) == 0:
continue
choice_data = ChatCompletionResponseStreamChoice(
index=0,
delta=DeltaMessage(content=new_text),
finish_reason=None,
)
chunk = ChatCompletionResponse(
model=model_id,
choices=[choice_data],
object="chat.completion.chunk",
)
yield "{}".format(
chunk.json(exclude_unset=True, ensure_ascii=False)
)
choice_data = ChatCompletionResponseStreamChoice(
index=0, delta=DeltaMessage(), finish_reason="stop"
)
chunk = ChatCompletionResponse(
model=model_id,
choices=[choice_data],
object="chat.completion.chunk",
)
yield "{}".format(
chunk.json(exclude_unset=True, ensure_ascii=False)
)
yield "[DONE]"
async def create_chat_completion(request: ChatCompletionRequest):
global model, tokenizer, generation_kwargs
if request.messages[-1].role != "user":
raise HTTPException(status_code=400, detail="Invalid request")
query = request.messages[-1].content
prev_messages = request.messages[:-1]
if len(prev_messages) > 0 and prev_messages[0].role == "system":
query = prev_messages.pop(0).content + query
history = []
if len(prev_messages) % 2 == 0:
for i in range(0, len(prev_messages), 2):
if (
prev_messages[i].role == "user"
and prev_messages[i + 1].role == "assistant"
):
history.append(
[
prev_messages[i].content,
prev_messages[i + 1].content,
]
)
if request.stream:
generate = predict(query, history, request.model)
return EventSourceResponse(
generate, media_type="text/event-stream"
)
query_text = query.lstrip("\n").strip()
input_text = get_prompt(query_text, history)
inputs = tokenizer(input_text, return_tensors="pt")
for k, v in inputs.items():
generation_kwargs[k] = v.to(DEVICE)
output = await to_async(model.generate, **generation_kwargs)
# output = model.generate(**inputs, **generation_kwargs)
response = ""
for tok_id in output[0][inputs["input_ids"].shape[1] :]:
if tok_id != tokenizer.eos_token_id:
response += tokenizer.decode(tok_id)
response = response.lstrip("\n").rstrip("\n### Response:")
# response, _ = model.chat(tokenizer, query, history=history)
choice_data = ChatCompletionResponseChoice(
index=0,
message=ChatMessage(role="assistant", content=response),
finish_reason="stop",
)
return ChatCompletionResponse(
model=request.model,
choices=[choice_data],
object="chat.completion",
) | null |
156,711 | import json
import requests
import sseclient
def predict(prompt, history=None):
if history is None:
history = []
session = requests.Session()
url = 'http://127.0.0.1:8000/stream_chat'
data = {"prompt": prompt, "history": history}
headers = {'Content-Type': 'application/json'}
event_source = sseclient.SSEClient(url, json=data, headers=headers, session=session)
history = []
print("=" * 100)
for event in event_source:
# 将事件传递给回调函数进行处理
try:
data = json.loads(event.data)
if not data["finish"]:
response = data["response"]
history = data["history"]
print(response, end="", flush=True)
else:
break
except:
break
print()
print("=" * 100)
return history | null |
156,712 | import os
import gradio as gr
import mdtex2html
import sseclient
import requests
import json
def postprocess(self, y):
if y is None:
return []
for i, (message, response) in enumerate(y):
y[i] = (
None if message is None else mdtex2html.convert((message)),
None if response is None else mdtex2html.convert(response),
)
return y | null |
156,713 | import os
import gradio as gr
import mdtex2html
import sseclient
import requests
import json
def parse_text(text):
"""copy from https://github.com/GaiZhenbiao/ChuanhuChatGPT/"""
lines = text.split("\n")
lines = [line for line in lines if line != ""]
count = 0
for i, line in enumerate(lines):
if "```" in line:
count += 1
items = line.split('`')
if count % 2 == 1:
lines[i] = f'<pre><code class="language-{items[-1]}">'
else:
lines[i] = f'<br></code></pre>'
else:
if i > 0:
if count % 2 == 1:
line = line.replace("`", "\`")
line = line.replace("<", "<")
line = line.replace(">", ">")
line = line.replace(" ", " ")
line = line.replace("*", "*")
line = line.replace("_", "_")
line = line.replace("-", "-")
line = line.replace(".", ".")
line = line.replace("!", "!")
line = line.replace("(", "(")
line = line.replace(")", ")")
line = line.replace("$", "$")
lines[i] = "<br>"+line
text = "".join(lines)
return text
def predict(input, chatbot, max_input_length, max_generate_length, top_p, temperature, history):
if history is None:
history = []
chatbot.append((parse_text(input), ""))
session = requests.Session()
url = 'http://localhost:8000/stream_chat'
data = {
"prompt": input,
"history": history,
"max_input_length": max_input_length,
"max_generate_length": max_generate_length,
"top_p": top_p,
"temperature": temperature,
}
headers = {'Content-Type': 'application/json'}
event_source = sseclient.SSEClient(url, json=data, headers=headers,
session=session)
for event in event_source:
# 将事件传递给回调函数进行处理
data = json.loads(event.data)
if not data["finish"]:
response = data["response"]
history = data["history"]
chatbot[-1] = (parse_text(input), parse_text(response))
yield chatbot, history
else:
break | null |
156,714 | import os
import gradio as gr
import mdtex2html
import sseclient
import requests
import json
gr.Chatbot.postprocess = postprocess
with gr.Blocks() as demo:
gr.HTML("""<h1 align="center">TigerBot</h1>""")
chatbot = gr.Chatbot()
with gr.Row():
with gr.Column(scale=4):
with gr.Column(scale=12):
user_input = gr.Textbox(show_label=False, placeholder="Input...", lines=10).style(
container=False)
with gr.Column(min_width=32, scale=1):
submitBtn = gr.Button("Submit", variant="primary")
with gr.Column(scale=1):
emptyBtn = gr.Button("Clear History")
max_input_length = gr.Slider(0, 1024, value=512, step=1.0, label="Maximum input length", interactive=True)
max_generate_length = gr.Slider(0, 2048, value=1024, step=1.0, label="Maximum generate length", interactive=True)
top_p = gr.Slider(0, 1, value=0.7, step=0.01, label="Top P", interactive=True)
temperature = gr.Slider(0, 1, value=0.95, step=0.01, label="Temperature", interactive=True)
history = gr.State([])
submitBtn.click(predict, [user_input, chatbot, max_input_length, max_generate_length, top_p, temperature, history], [chatbot, history],
show_progress=True)
submitBtn.click(reset_user_input, [], [user_input])
emptyBtn.click(reset_state, outputs=[chatbot, history], show_progress=True)
def reset_user_input():
return gr.update(value='') | null |
156,715 | import os
import gradio as gr
import mdtex2html
import sseclient
import requests
import json
def reset_state():
return [], [] | null |
156,716 | import asyncio
import json
import aiohttp_sse_client.client
from aiohttp import ClientSession
from aiohttp_sse_client import client as sseclient
async def handle_event(event: aiohttp_sse_client.client.MessageEvent, event_source):
# 处理 SSE 事件的回调函数
# print(f'Event type: {event.type}')
# print(f'Event id: {event.last_event_id}')
# print(f'Event data: {event.data}')
# print(f'Event message: {event.message}')
data = json.loads(event.data)
# print("data", data)
if event.type == "finish":
try:
await event_source.close()
except Exception as err:
print("close with error", err)
return data["response"], data["history"], event.type
async def listen_sse(prompt, history=None, max_length=2048, top_p=0.7, temperature=0.96):
if history is None:
history = []
async with ClientSession() as session:
url = 'http://127.0.0.1:8000/stream_chat'
data = {
"prompt": prompt,
"history": history,
"max_length": max_length,
"top_p": top_p,
"temperature": temperature,
}
headers = {'Content-Type': 'application/json'}
response, history = None, None
print("=" * 100)
async with sseclient.EventSource(url, json=data, headers=headers, session=session) as event_source:
try:
async for event in event_source:
# 将事件传递给回调函数进行处理
response, history, e_type = await handle_event(event, event_source)
print(response, end="", flush=True)
if e_type == "finish":
break
except Exception as err:
print("event close", err)
print()
print("=" * 100)
return response, history | null |
156,717 | import streamlit as st
import torch.cuda
from utils.modeling_hack import get_model
from utils.streaming import generate_stream
import argparse
print(args)
def get_model(model_path: str, rope_scaling: Optional[str] = None, rope_factor: float = 8.0, ) -> Tuple[
transformers.AutoModelForCausalLM, transformers.AutoTokenizer, transformers.GenerationConfig]:
if rope_scaling is None:
rope_config = None
else:
rope_config = {"type": rope_scaling, "factor": rope_factor}
print(f"Loading model from {model_path}...")
model = transformers.AutoModelForCausalLM.from_pretrained(model_path, torch_dtype=torch.bfloat16, device_map='auto',
rope_scaling=rope_config)
print(model.model.layers[0].self_attn.rotary_emb)
print("Done")
print(f"Loading tokenizer from {model_path}...")
tokenizer = transformers.AutoTokenizer.from_pretrained(model_path)
print("Done")
print(f"Loading generation config from {model_path}...")
generation_config = transformers.GenerationConfig.from_pretrained(model_path)
print("Done")
return model, tokenizer, generation_config
def cached_get_model(model_path, rope_scaling, rope_factor):
print(f'Init model: {model_path} with rope_scaling: {rope_scaling}, rope_factor: {rope_factor}')
return get_model(model_path=model_path, rope_scaling=rope_scaling, rope_factor=rope_factor) | null |
156,718 | import threading
from typing import Iterator
import torch
import transformers
import string
The provided code snippet includes necessary dependencies for implementing the `put` function. Write a Python function `def put(self, value)` to solve the following problem:
Receives tokens, decodes them, and prints them to stdout as soon as they form entire words.
Here is the function:
def put(self, value):
"""
Receives tokens, decodes them, and prints them to stdout as soon as they form entire words.
"""
if len(value.shape) > 1 and value.shape[0] > 1:
raise ValueError("TextStreamer only supports batch size 1")
elif len(value.shape) > 1:
value = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
self.next_tokens_are_prompt = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist())
text = self.tokenizer.decode(self.token_cache, **self.decode_kwargs)
# After the symbol for a new line, we flush the cache.
if text.endswith("\n"):
printable_text = text[self.print_len:]
self.token_cache = []
self.print_len = 0
# If the last token is a CJK character, we print the characters.
elif len(text) > 0 and (
self._is_chinese_char(ord(text[-1])) or text[-1] in string.ascii_letters or text[-1] in set(
'!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~!“”#%&、‘’()*+,-。/:;《=》?@【】·「|」 \t\n\r\x0b\x0c')):
printable_text = text[self.print_len:]
self.print_len += len(printable_text)
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
printable_text = text[self.print_len: text.rfind(" ") + 1]
self.print_len += len(printable_text)
self.on_finalized_text(printable_text) | Receives tokens, decodes them, and prints them to stdout as soon as they form entire words. |
156,719 | import threading
from typing import Iterator
import torch
import transformers
import string
transformers.TextIteratorStreamer.put = put
def generate_stream(model: transformers.AutoModelForCausalLM, tokenizer: transformers.AutoModelForCausalLM,
input_ids: torch.Tensor, attention_mask: torch.Tensor,
generation_config: transformers.GenerationConfig):
streamer = transformers.TextIteratorStreamer(tokenizer, skip_prompt=True, timeout=180)
kwargs = generation_config.to_dict()
kwargs['input_ids'] = input_ids
kwargs['attention_mask'] = attention_mask
kwargs['streamer'] = streamer
threading.Thread(target=model.generate, kwargs=kwargs).start()
return streamer | null |
156,720 | import math
from typing import Optional, Tuple
import torch
import transformers
from transformers.models.llama.modeling_llama import LlamaAttention, LlamaLinearScalingRotaryEmbedding, \
LlamaRotaryEmbedding
def find_correction_dim(num_rotations, dim, base=10000, max_position_embeddings=2048):
return (dim * math.log(max_position_embeddings / (num_rotations * 2 * math.pi))) / (2 * math.log(base))
def find_correction_range(low_rot, high_rot, dim, base=10000, max_position_embeddings=2048):
low = math.floor(find_correction_dim(
low_rot, dim, base, max_position_embeddings))
high = math.ceil(find_correction_dim(
high_rot, dim, base, max_position_embeddings))
return max(low, 0), min(high, dim - 1) # Clamp values just in case | null |
156,721 | import math
from typing import Optional, Tuple
import torch
import transformers
from transformers.models.llama.modeling_llama import LlamaAttention, LlamaLinearScalingRotaryEmbedding, \
LlamaRotaryEmbedding
def linear_ramp_mask(min, max, dim):
if min == max:
max += 0.001 # Prevent singularity
linear_func = (torch.arange(dim, dtype=torch.float32) - min) / (max - min)
ramp_func = torch.clamp(linear_func, 0, 1)
return ramp_func | null |
156,722 | import math
from typing import Optional, Tuple
import torch
import transformers
from transformers.models.llama.modeling_llama import LlamaAttention, LlamaLinearScalingRotaryEmbedding, \
LlamaRotaryEmbedding
class LlamaYaRNRotaryEmbedding(LlamaRotaryEmbedding):
"""LlamaYaRNRotaryEmbedding extended with YaRN NTK scaling from [paper](https://arxiv.org/pdf/2309.00071.pdf)"""
def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=8.0,
extrapolation_factor=1., attn_factor=1., beta_fast=32, beta_slow=1):
self.dim = dim
self.max_position_embeddings = max_position_embeddings
self.base = base
self.extrapolation_factor = extrapolation_factor
self.scaling_factor = scaling_factor
self.attn_factor = attn_factor
self.beta_fast = beta_fast
self.beta_slow = beta_slow
super().__init__(self.dim, max_position_embeddings, base, device)
def _set_cos_sin_cache(self, seq_len, device, dtype):
# Reset the tables if the sequence length has changed,
# or if we're on a new device (possibly due to tracing for instance)
if seq_len <= self.max_position_embeddings:
seq_len = self.max_position_embeddings
self.max_seq_len_cached = seq_len
if seq_len > self.max_position_embeddings:
inv_freq_extrapolation = 1.0 / (
self.base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim)
)
freqs = 1.0 / inv_freq_extrapolation
inv_freq_interpolation = 1.0 / (self.scaling_factor * freqs)
low, high = find_correction_range(self.beta_fast, self.beta_slow, self.dim, self.base,
self.max_position_embeddings)
inv_freq_mask = (1 - linear_ramp_mask(low, high, self.dim // 2).float().to(
device)) * self.extrapolation_factor # Get n-d rotational scaling corrected for extrapolation
inv_freq = inv_freq_interpolation * (1 - inv_freq_mask) + inv_freq_extrapolation * inv_freq_mask
# Get n-d magnitude scaling corrected for interpolation
mscale = ((0.1 * math.log(self.scaling_factor) + 1.0) if self.scaling_factor > 1 else 1) * self.attn_factor
else:
inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim))
mscale = 1
t = torch.arange(seq_len, device=device, dtype=inv_freq.dtype)
# Don't do einsum, it converts fp32 to fp16
# freqs = torch.outer(t, self.inv_freq.to(device=t.device))
freqs = torch.einsum("i,j->ij", t, inv_freq)
emb = torch.cat((freqs, freqs), dim=-1) * mscale
self.register_buffer("cos_cached", emb.cos()[None, None, :, :].to(dtype), persistent=False)
self.register_buffer("sin_cached", emb.sin()[None, None, :, :].to(dtype), persistent=False)
def forward(self, x, seq_len=None):
"""
Return cos and sin for the asked position ids
"""
if not (seq_len <= self.max_seq_len_cached == self.max_position_embeddings) or \
self.cos_cached.device != x.device:
self._set_cos_sin_cache(seq_len, x.device, x.dtype)
return (
self.cos_cached[:, :, :seq_len, ...].to(dtype=x.dtype),
self.sin_cached[:, :, :seq_len, ...].to(dtype=x.dtype),
)
class LlamaDynamicNTKScalingRotaryEmbedding(LlamaRotaryEmbedding):
"""LlamaRotaryEmbedding extended with Dynamic NTK scaling. Credits to the Reddit users /u/bloc97 and /u/emozilla"""
def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0):
self.scaling_factor = scaling_factor
super().__init__(dim, max_position_embeddings, base, device)
def _set_cos_sin_cache(self, seq_len, device, dtype):
print('_set_cos_sin_cache', device)
if seq_len <= self.max_position_embeddings:
seq_len = self.max_position_embeddings
self.max_seq_len_cached = seq_len
if seq_len > self.max_position_embeddings:
base = self.base * (
(self.scaling_factor * seq_len / self.max_position_embeddings) - (self.scaling_factor - 1)
) ** (self.dim / (self.dim - 2))
else:
base = self.base
inv_freq = 1.0 / (base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim))
t = torch.arange(self.max_seq_len_cached, device=device, dtype=inv_freq.dtype)
freqs = torch.einsum("i,j->ij", t, inv_freq)
# Different from paper, but it uses a different permutation in order to obtain the same calculation
emb = torch.cat((freqs, freqs), dim=-1)
self.register_buffer("cos_cached", emb.cos()[None, None, :, :].to(dtype), persistent=False)
self.register_buffer("sin_cached", emb.sin()[None, None, :, :].to(dtype), persistent=False)
def forward(self, x, seq_len=None):
# x: [bs, num_attention_heads, seq_len, head_size]
if not (seq_len <= self.max_seq_len_cached == self.max_position_embeddings):
self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype)
return (
self.cos_cached[:, :, :seq_len, ...].to(dtype=x.dtype),
self.sin_cached[:, :, :seq_len, ...].to(dtype=x.dtype),
)
def _init_rope(self):
if self.config.rope_scaling is None:
self.rotary_emb = LlamaRotaryEmbedding(
self.head_dim,
max_position_embeddings=self.max_position_embeddings,
base=self.rope_theta,
)
else:
scaling_type = self.config.rope_scaling["type"]
scaling_factor = self.config.rope_scaling["factor"]
if scaling_type == "linear":
self.rotary_emb = LlamaLinearScalingRotaryEmbedding(
self.head_dim,
max_position_embeddings=self.max_position_embeddings,
scaling_factor=scaling_factor,
base=self.rope_theta,
)
elif scaling_type == "dynamic":
self.rotary_emb = LlamaDynamicNTKScalingRotaryEmbedding(
self.head_dim,
max_position_embeddings=self.max_position_embeddings,
scaling_factor=scaling_factor,
base=self.rope_theta,
)
elif scaling_type == 'yarn':
self.rotary_emb = LlamaYaRNRotaryEmbedding(
self.head_dim,
max_position_embeddings=self.max_position_embeddings,
scaling_factor=scaling_factor,
base=self.rope_theta
)
else:
raise ValueError(f"Unknown RoPE scaling type {scaling_type}") | null |
156,723 | import os
import re
import sys
from setuptools import setup, Command
__PATH__ = os.path.abspath(os.path.dirname(__file__))
with open("README.md", "r") as fh:
long_description = fh.read()
def read_version():
__PATH__ = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(__PATH__, 'fawkes/__init__.py')) as f:
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
f.read(), re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find __version__ string") | null |
156,724 | import errno
import glob
import gzip
import hashlib
import json
import os
import pickle
import random
import shutil
import sys
import tarfile
import zipfile
import PIL
import pkg_resources
import six
from keras.utils import Progbar
from six.moves.urllib.error import HTTPError, URLError
import keras
import keras.backend as K
import numpy as np
import tensorflow as tf
from PIL import Image, ExifTags
from keras.layers import Dense, Activation
from keras.models import Model
from keras.preprocessing import image
from fawkes.align_face import align
from six.moves.urllib.request import urlopen
def preprocess(X, method):
assert method in {'raw', 'imagenet', 'inception', 'mnist'}
if method == 'raw':
pass
elif method == 'imagenet':
X = imagenet_preprocessing(X)
else:
raise Exception('unknown method %s' % method)
return X
def reverse_preprocess(X, method):
assert method in {'raw', 'imagenet', 'inception', 'mnist'}
if method == 'raw':
pass
elif method == 'imagenet':
X = imagenet_reverse_preprocessing(X)
else:
raise Exception('unknown method %s' % method)
return X
def clip_img(X, preprocessing='raw'):
X = reverse_preprocess(X, preprocessing)
X = np.clip(X, 0.0, 255.0)
X = preprocess(X, preprocessing)
return X | null |
156,725 | import errno
import glob
import gzip
import hashlib
import json
import os
import pickle
import random
import shutil
import sys
import tarfile
import zipfile
import PIL
import pkg_resources
import six
from keras.utils import Progbar
from six.moves.urllib.error import HTTPError, URLError
import keras
import keras.backend as K
import numpy as np
import tensorflow as tf
from PIL import Image, ExifTags
from keras.layers import Dense, Activation
from keras.models import Model
from keras.preprocessing import image
from fawkes.align_face import align
from six.moves.urllib.request import urlopen
def load_image(path):
def filter_image_paths(image_paths):
print("Identify {} files in the directory".format(len(image_paths)))
new_image_paths = []
new_images = []
for p in image_paths:
img = load_image(p)
if img is None:
print("{} is not an image file, skipped".format(p.split("/")[-1]))
continue
new_image_paths.append(p)
new_images.append(img)
print("Identify {} images in the directory".format(len(new_image_paths)))
return new_image_paths, new_images | null |
156,726 | import errno
import glob
import gzip
import hashlib
import json
import os
import pickle
import random
import shutil
import sys
import tarfile
import zipfile
import PIL
import pkg_resources
import six
from keras.utils import Progbar
from six.moves.urllib.error import HTTPError, URLError
import keras
import keras.backend as K
import numpy as np
import tensorflow as tf
from PIL import Image, ExifTags
from keras.layers import Dense, Activation
from keras.models import Model
from keras.preprocessing import image
from fawkes.align_face import align
from six.moves.urllib.request import urlopen
def get_ends(longsize, window):
start = (longsize - window) // 2
end = start + window
return start, end | null |
156,727 | import errno
import glob
import gzip
import hashlib
import json
import os
import pickle
import random
import shutil
import sys
import tarfile
import zipfile
import PIL
import pkg_resources
import six
from keras.utils import Progbar
from six.moves.urllib.error import HTTPError, URLError
import keras
import keras.backend as K
import numpy as np
import tensorflow as tf
from PIL import Image, ExifTags
from keras.layers import Dense, Activation
from keras.models import Model
from keras.preprocessing import image
from fawkes.align_face import align
from six.moves.urllib.request import urlopen
def dump_dictionary_as_json(dict, outfile):
j = json.dumps(dict)
with open(outfile, "wb") as f:
f.write(j.encode()) | null |
156,728 | import errno
import glob
import gzip
import hashlib
import json
import os
import pickle
import random
import shutil
import sys
import tarfile
import zipfile
import PIL
import pkg_resources
import six
from keras.utils import Progbar
from six.moves.urllib.error import HTTPError, URLError
import keras
import keras.backend as K
import numpy as np
import tensorflow as tf
from PIL import Image, ExifTags
from keras.layers import Dense, Activation
from keras.models import Model
from keras.preprocessing import image
from fawkes.align_face import align
from six.moves.urllib.request import urlopen
def load_victim_model(number_classes, teacher_model=None, end2end=False):
for l in teacher_model.layers:
l.trainable = end2end
x = teacher_model.layers[-1].output
x = Dense(number_classes)(x)
x = Activation('softmax', name="act")(x)
model = Model(teacher_model.input, x)
opt = keras.optimizers.Adadelta()
model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy'])
return model | null |
156,729 | import errno
import glob
import gzip
import hashlib
import json
import os
import pickle
import random
import shutil
import sys
import tarfile
import zipfile
import PIL
import pkg_resources
import six
from keras.utils import Progbar
from six.moves.urllib.error import HTTPError, URLError
import keras
import keras.backend as K
import numpy as np
import tensorflow as tf
from PIL import Image, ExifTags
from keras.layers import Dense, Activation
from keras.models import Model
from keras.preprocessing import image
from fawkes.align_face import align
from six.moves.urllib.request import urlopen
The provided code snippet includes necessary dependencies for implementing the `init_gpu` function. Write a Python function `def init_gpu(gpu)` to solve the following problem:
code to initialize gpu in tf2
Here is the function:
def init_gpu(gpu):
''' code to initialize gpu in tf2'''
if isinstance(gpu, list):
gpu_num = ','.join([str(i) for i in gpu])
else:
gpu_num = str(gpu)
if "CUDA_VISIBLE_DEVICES" in os.environ:
print('GPU already initiated')
return
os.environ["CUDA_VISIBLE_DEVICES"] = gpu_num
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
try:
tf.config.experimental.set_visible_devices(gpus[0], 'GPU')
tf.config.experimental.set_memory_growth(gpus[0], True)
logical_gpus = tf.config.experimental.list_logical_devices('GPU')
print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPU")
except RuntimeError as e:
print(e) | code to initialize gpu in tf2 |
156,730 | import errno
import glob
import gzip
import hashlib
import json
import os
import pickle
import random
import shutil
import sys
import tarfile
import zipfile
import PIL
import pkg_resources
import six
from keras.utils import Progbar
from six.moves.urllib.error import HTTPError, URLError
import keras
import keras.backend as K
import numpy as np
import tensorflow as tf
from PIL import Image, ExifTags
from keras.layers import Dense, Activation
from keras.models import Model
from keras.preprocessing import image
from fawkes.align_face import align
from six.moves.urllib.request import urlopen
def fix_gpu_memory(mem_fraction=1):
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
tf_config = None
if tf.test.is_gpu_available():
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=mem_fraction)
tf_config = tf.ConfigProto(gpu_options=gpu_options)
tf_config.gpu_options.allow_growth = True
tf_config.log_device_placement = False
init_op = tf.global_variables_initializer()
sess = tf.Session(config=tf_config)
sess.run(init_op)
K.set_session(sess)
return sess | null |
156,731 | import errno
import glob
import gzip
import hashlib
import json
import os
import pickle
import random
import shutil
import sys
import tarfile
import zipfile
import PIL
import pkg_resources
import six
from keras.utils import Progbar
from six.moves.urllib.error import HTTPError, URLError
import keras
import keras.backend as K
import numpy as np
import tensorflow as tf
from PIL import Image, ExifTags
from keras.layers import Dense, Activation
from keras.models import Model
from keras.preprocessing import image
from fawkes.align_face import align
from six.moves.urllib.request import urlopen
def reverse_preprocess(X, method):
def reverse_process_cloaked(x, preprocess='imagenet'):
# x = clip_img(x, preprocess)
return reverse_preprocess(x, preprocess) | null |
156,732 | import errno
import glob
import gzip
import hashlib
import json
import os
import pickle
import random
import shutil
import sys
import tarfile
import zipfile
import PIL
import pkg_resources
import six
from keras.utils import Progbar
from six.moves.urllib.error import HTTPError, URLError
import keras
import keras.backend as K
import numpy as np
import tensorflow as tf
from PIL import Image, ExifTags
from keras.layers import Dense, Activation
from keras.models import Model
from keras.preprocessing import image
from fawkes.align_face import align
from six.moves.urllib.request import urlopen
def build_bottleneck_model(model, cut_off):
bottleneck_model = Model(model.input, model.get_layer(cut_off).output)
bottleneck_model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
return bottleneck_model | null |
156,733 | import errno
import glob
import gzip
import hashlib
import json
import os
import pickle
import random
import shutil
import sys
import tarfile
import zipfile
import PIL
import pkg_resources
import six
from keras.utils import Progbar
from six.moves.urllib.error import HTTPError, URLError
import keras
import keras.backend as K
import numpy as np
import tensorflow as tf
from PIL import Image, ExifTags
from keras.layers import Dense, Activation
from keras.models import Model
from keras.preprocessing import image
from fawkes.align_face import align
from six.moves.urllib.request import urlopen
class Extractor(object):
def __init__(self, model):
self.model = model
def predict(self, imgs):
imgs = imgs / 255.0
embeds = l2_norm(self.model(imgs))
return embeds
def __call__(self, x):
return self.predict(x)
def get_file(fname,
origin,
untar=False,
md5_hash=None,
file_hash=None,
cache_subdir='datasets',
hash_algorithm='auto',
extract=False,
archive_format='auto',
cache_dir=None):
if cache_dir is None:
cache_dir = os.path.join(os.path.expanduser('~'), '.keras')
if md5_hash is not None and file_hash is None:
file_hash = md5_hash
hash_algorithm = 'md5'
datadir_base = os.path.expanduser(cache_dir)
if not os.access(datadir_base, os.W_OK):
datadir_base = os.path.join('/tmp', '.keras')
datadir = os.path.join(datadir_base, cache_subdir)
_makedirs_exist_ok(datadir)
# fname = path_to_string(fname)
if untar:
untar_fpath = os.path.join(datadir, fname)
fpath = untar_fpath + '.tar.gz'
else:
fpath = os.path.join(datadir, fname)
download = False
if os.path.exists(fpath):
# File found; verify integrity if a hash was provided.
if file_hash is not None:
if not validate_file(fpath, file_hash, algorithm=hash_algorithm):
print('A local file was found, but it seems to be '
'incomplete or outdated because the ' + hash_algorithm +
' file hash does not match the original value of ' + file_hash +
' so we will re-download the data.')
download = True
else:
download = True
if download:
print('Downloading data from', origin)
class ProgressTracker(object):
# Maintain progbar for the lifetime of download.
# This design was chosen for Python 2.7 compatibility.
progbar = None
def dl_progress(count, block_size, total_size):
if ProgressTracker.progbar is None:
if total_size == -1:
total_size = None
ProgressTracker.progbar = Progbar(total_size)
else:
ProgressTracker.progbar.update(count * block_size)
error_msg = 'URL fetch failure on {}: {} -- {}'
try:
try:
urlretrieve(origin, fpath, dl_progress)
except HTTPError as e:
raise Exception(error_msg.format(origin, e.code, e.msg))
except URLError as e:
raise Exception(error_msg.format(origin, e.errno, e.reason))
except (Exception, KeyboardInterrupt) as e:
if os.path.exists(fpath):
os.remove(fpath)
raise
ProgressTracker.progbar = None
if untar:
if not os.path.exists(untar_fpath):
_extract_archive(fpath, datadir, archive_format='tar')
return untar_fpath
if extract:
_extract_archive(fpath, datadir, archive_format)
return fpath
def load_extractor(name):
hash_map = {"extractor_2": "ce703d481db2b83513bbdafa27434703",
"extractor_0": "94854151fd9077997d69ceda107f9c6b"}
assert name in ["extractor_2", 'extractor_0']
model_file = pkg_resources.resource_filename("fawkes", "model/{}.h5".format(name))
cur_hash = hash_map[name]
model_dir = pkg_resources.resource_filename("fawkes", "model/")
os.makedirs(model_dir, exist_ok=True)
get_file("{}.h5".format(name), "http://mirror.cs.uchicago.edu/fawkes/files/{}.h5".format(name),
cache_dir=model_dir, cache_subdir='', md5_hash=cur_hash)
model = keras.models.load_model(model_file)
model = Extractor(model)
return model | null |
156,734 | import errno
import glob
import gzip
import hashlib
import json
import os
import pickle
import random
import shutil
import sys
import tarfile
import zipfile
import PIL
import pkg_resources
import six
from keras.utils import Progbar
from six.moves.urllib.error import HTTPError, URLError
import keras
import keras.backend as K
import numpy as np
import tensorflow as tf
from PIL import Image, ExifTags
from keras.layers import Dense, Activation
from keras.models import Model
from keras.preprocessing import image
from fawkes.align_face import align
from six.moves.urllib.request import urlopen
def get_dataset_path(dataset):
model_dir = os.path.join(os.path.expanduser('~'), '.fawkes')
if not os.path.exists(os.path.join(model_dir, "config.json")):
raise Exception("Please config the datasets before running protection code. See more in README and config.py.")
config = json.load(open(os.path.join(model_dir, "config.json"), 'r'))
if dataset not in config:
raise Exception(
"Dataset {} does not exist, please download to data/ and add the path to this function... Abort".format(
dataset))
return config[dataset]['train_dir'], config[dataset]['test_dir'], config[dataset]['num_classes'], config[dataset][
'num_images'] | null |
156,735 | import errno
import glob
import gzip
import hashlib
import json
import os
import pickle
import random
import shutil
import sys
import tarfile
import zipfile
import PIL
import pkg_resources
import six
from keras.utils import Progbar
from six.moves.urllib.error import HTTPError, URLError
import keras
import keras.backend as K
import numpy as np
import tensorflow as tf
from PIL import Image, ExifTags
from keras.layers import Dense, Activation
from keras.models import Model
from keras.preprocessing import image
from fawkes.align_face import align
from six.moves.urllib.request import urlopen
def dump_image(x, filename, format="png", scale=False):
img = image.array_to_img(x, scale=scale)
img.save(filename, format)
return | null |
156,736 | import errno
import glob
import gzip
import hashlib
import json
import os
import pickle
import random
import shutil
import sys
import tarfile
import zipfile
import PIL
import pkg_resources
import six
from keras.utils import Progbar
from six.moves.urllib.error import HTTPError, URLError
import keras
import keras.backend as K
import numpy as np
import tensorflow as tf
from PIL import Image, ExifTags
from keras.layers import Dense, Activation
from keras.models import Model
from keras.preprocessing import image
from fawkes.align_face import align
from six.moves.urllib.request import urlopen
IMG_SIZE = 112
PREPROCESS = 'raw'
def resize(img, sz):
assert np.min(img) >= 0 and np.max(img) <= 255.0
from keras.preprocessing import image
im_data = image.array_to_img(img).resize((sz[1], sz[0]))
im_data = image.img_to_array(im_data)
return im_data
def preprocess(X, method):
assert method in {'raw', 'imagenet', 'inception', 'mnist'}
if method == 'raw':
pass
elif method == 'imagenet':
X = imagenet_preprocessing(X)
else:
raise Exception('unknown method %s' % method)
return X
def load_embeddings(feature_extractors_names):
model_dir = os.path.join(os.path.expanduser('~'), '.fawkes')
for extractor_name in feature_extractors_names:
fp = gzip.open(os.path.join(model_dir, "{}_emb.p.gz".format(extractor_name)), 'rb')
path2emb = pickle.load(fp)
fp.close()
return path2emb
def extractor_ls_predict(feature_extractors_ls, X):
feature_ls = []
for extractor in feature_extractors_ls:
cur_features = extractor.predict(X)
feature_ls.append(cur_features)
concated_feature_ls = np.concatenate(feature_ls, axis=1)
return concated_feature_ls
def pairwise_l2_distance(A, B):
BT = B.transpose()
vecProd = np.dot(A, BT)
SqA = A ** 2
sumSqA = np.matrix(np.sum(SqA, axis=1))
sumSqAEx = np.tile(sumSqA.transpose(), (1, vecProd.shape[1]))
SqB = B ** 2
sumSqB = np.sum(SqB, axis=1)
sumSqBEx = np.tile(sumSqB, (vecProd.shape[0], 1))
SqED = sumSqBEx + sumSqAEx - 2 * vecProd
SqED[SqED < 0] = 0.0
ED = np.sqrt(SqED)
return ED
def get_file(fname,
origin,
untar=False,
md5_hash=None,
file_hash=None,
cache_subdir='datasets',
hash_algorithm='auto',
extract=False,
archive_format='auto',
cache_dir=None):
if cache_dir is None:
cache_dir = os.path.join(os.path.expanduser('~'), '.keras')
if md5_hash is not None and file_hash is None:
file_hash = md5_hash
hash_algorithm = 'md5'
datadir_base = os.path.expanduser(cache_dir)
if not os.access(datadir_base, os.W_OK):
datadir_base = os.path.join('/tmp', '.keras')
datadir = os.path.join(datadir_base, cache_subdir)
_makedirs_exist_ok(datadir)
# fname = path_to_string(fname)
if untar:
untar_fpath = os.path.join(datadir, fname)
fpath = untar_fpath + '.tar.gz'
else:
fpath = os.path.join(datadir, fname)
download = False
if os.path.exists(fpath):
# File found; verify integrity if a hash was provided.
if file_hash is not None:
if not validate_file(fpath, file_hash, algorithm=hash_algorithm):
print('A local file was found, but it seems to be '
'incomplete or outdated because the ' + hash_algorithm +
' file hash does not match the original value of ' + file_hash +
' so we will re-download the data.')
download = True
else:
download = True
if download:
print('Downloading data from', origin)
class ProgressTracker(object):
# Maintain progbar for the lifetime of download.
# This design was chosen for Python 2.7 compatibility.
progbar = None
def dl_progress(count, block_size, total_size):
if ProgressTracker.progbar is None:
if total_size == -1:
total_size = None
ProgressTracker.progbar = Progbar(total_size)
else:
ProgressTracker.progbar.update(count * block_size)
error_msg = 'URL fetch failure on {}: {} -- {}'
try:
try:
urlretrieve(origin, fpath, dl_progress)
except HTTPError as e:
raise Exception(error_msg.format(origin, e.code, e.msg))
except URLError as e:
raise Exception(error_msg.format(origin, e.errno, e.reason))
except (Exception, KeyboardInterrupt) as e:
if os.path.exists(fpath):
os.remove(fpath)
raise
ProgressTracker.progbar = None
if untar:
if not os.path.exists(untar_fpath):
_extract_archive(fpath, datadir, archive_format='tar')
return untar_fpath
if extract:
_extract_archive(fpath, datadir, archive_format)
return fpath
def select_target_label(imgs, feature_extractors_ls, feature_extractors_names, metric='l2'):
model_dir = os.path.join(os.path.expanduser('~'), '.fawkes')
original_feature_x = extractor_ls_predict(feature_extractors_ls, imgs)
path2emb = load_embeddings(feature_extractors_names)
items = list([(k, v) for k, v in path2emb.items()])
paths = [p[0] for p in items]
embs = [p[1] for p in items]
embs = np.array(embs)
pair_dist = pairwise_l2_distance(original_feature_x, embs)
pair_dist = np.array(pair_dist)
max_sum = np.min(pair_dist, axis=0)
max_id_ls = np.argsort(max_sum)[::-1]
max_id = random.choice(max_id_ls[:20])
target_data_id = paths[int(max_id)]
print("target ID: {}".format(target_data_id))
image_dir = os.path.join(model_dir, "target_data/{}".format(target_data_id))
os.makedirs(os.path.join(model_dir, "target_data"), exist_ok=True)
os.makedirs(image_dir, exist_ok=True)
for i in range(10):
if os.path.exists(os.path.join(model_dir, "target_data/{}/{}.jpg".format(target_data_id, i))):
continue
try:
get_file("{}.jpg".format(i),
"http://mirror.cs.uchicago.edu/fawkes/files/target_data/{}/{}.jpg".format(target_data_id, i),
cache_dir=model_dir, cache_subdir='target_data/{}/'.format(target_data_id))
except Exception:
pass
image_paths = glob.glob(image_dir + "/*.jpg")
target_images = [image.img_to_array(image.load_img(cur_path)) for cur_path in
image_paths]
target_images = np.array([resize(x, (IMG_SIZE, IMG_SIZE)) for x in target_images])
target_images = preprocess(target_images, PREPROCESS)
target_images = list(target_images)
while len(target_images) < len(imgs):
target_images += target_images
target_images = random.sample(target_images, len(imgs))
return np.array(target_images) | null |
156,737 | import errno
import glob
import gzip
import hashlib
import json
import os
import pickle
import random
import shutil
import sys
import tarfile
import zipfile
import PIL
import pkg_resources
import six
from keras.utils import Progbar
from six.moves.urllib.error import HTTPError, URLError
import keras
import keras.backend as K
import numpy as np
import tensorflow as tf
from PIL import Image, ExifTags
from keras.layers import Dense, Activation
from keras.models import Model
from keras.preprocessing import image
from fawkes.align_face import align
from six.moves.urllib.request import urlopen
The provided code snippet includes necessary dependencies for implementing the `l2_norm` function. Write a Python function `def l2_norm(x, axis=1)` to solve the following problem:
l2 norm
Here is the function:
def l2_norm(x, axis=1):
"""l2 norm"""
norm = tf.norm(x, axis=axis, keepdims=True)
output = x / norm
return output | l2 norm |
156,738 | import numpy as np
from mtcnn import MTCNN
def aligner():
return MTCNN(min_face_size=30) | null |
156,739 | import numpy as np
from mtcnn import MTCNN
def to_rgb(img):
w, h = img.shape
ret = np.empty((w, h, 3), dtype=np.uint8)
ret[:, :, 0] = ret[:, :, 1] = ret[:, :, 2] = img
return ret
The provided code snippet includes necessary dependencies for implementing the `align` function. Write a Python function `def align(orig_img, aligner)` to solve the following problem:
run MTCNN face detector
Here is the function:
def align(orig_img, aligner):
""" run MTCNN face detector """
if orig_img.ndim < 2:
return None
if orig_img.ndim == 2:
orig_img = to_rgb(orig_img)
orig_img = orig_img[:, :, 0:3]
detect_results = aligner.detect_faces(orig_img)
cropped_arr = []
bounding_boxes_arr = []
for dic in detect_results:
if dic['confidence'] < 0.9:
continue
x, y, width, height = dic['box']
if width < 30 or height < 30:
continue
bb = [y, x, y + height, x + width]
cropped = orig_img[bb[0]:bb[2], bb[1]:bb[3], :]
cropped_arr.append(np.copy(cropped))
bounding_boxes_arr.append(bb)
return cropped_arr, bounding_boxes_arr
# if nrof_faces > 0:
# det = bounding_boxes[0]['box']
# det_arr = []
# img_size = np.asarray(orig_img.shape)[0:2]
# if nrof_faces > 1:
# margin = margin / 1.5
# if detect_multiple_faces:
# for i in range(nrof_faces):
# det_arr.append(np.squeeze(bounding_boxes[i]['box']))
# else:
# bounding_box_size = (det[1] + det[3])
# img_center = img_size / 2
# offsets = np.vstack([(det[0] + det[2]) / 2 - img_center[1],
# (det[1] + det[3]) / 2 - img_center[0]])
# offset_dist_squared = np.sum(np.power(offsets, 2.0), 0)
# index = np.argmax(bounding_box_size - offset_dist_squared * 2.0) # some extra weight on the centering
# det_arr.append(det[index, :])
# else:
# det_arr.append(np.squeeze(det))
#
# cropped_arr = []
# bounding_boxes_arr = []
# for i, det in enumerate(det_arr):
# det = np.squeeze(det)
# bb = np.zeros(4, dtype=np.int32)
# # add in margin
# marg1 = int((det[2] - det[0]) * margin)
# marg2 = int((det[3] - det[1]) * margin)
#
# bb[0] = max(det[0] - marg1 / 2, 0)
# bb[1] = max(det[1] - marg2 / 2, 0)
# bb[2] = min(det[0] + det[2] + marg1 / 2, img_size[0])
# bb[3] = min(det[1] + det[3] + marg2 / 2, img_size[1])
# cropped = orig_img[bb[0]:bb[2], bb[1]: bb[3], :]
# cropped_arr.append(cropped)
# bounding_boxes_arr.append([bb[0], bb[1], bb[2], bb[3]])
# return cropped_arr, bounding_boxes_arr
# else:
# return None | run MTCNN face detector |
156,740 | import argparse
import glob
import logging
import os
import sys
import tensorflow as tf
import numpy as np
from fawkes.differentiator import FawkesMaskGeneration
from fawkes.utils import init_gpu, dump_image, reverse_process_cloaked, \
Faces, filter_image_paths, load_extractor
from fawkes.align_face import aligner
def generate_cloak_images(protector, image_X, target_emb=None):
cloaked_image_X = protector.compute(image_X, target_emb)
return cloaked_image_X | null |
156,741 | from tradingview_ta import TA_Handler, Interval, Exchange
import os
import sys
import glob
import time
import threading
OSC_INDICATORS = ['MACD', 'Stoch.RSI', 'Mom']
OSC_THRESHOLD = 2
MA_INDICATORS = ['EMA10', 'EMA20']
MA_THRESHOLD = 2
PAIR_WITH = 'USDT'
TICKERS = 'signalsample.txt'
TIME_TO_WAIT = 4
def analyze(pairs):
signal_coins = {}
analysis = {}
handler = {}
if os.path.exists('signals/custsignalmod.exs'):
os.remove('signals/custsignalmod.exs')
for pair in pairs:
handler[pair] = TA_Handler(
symbol=pair,
exchange=EXCHANGE,
screener=SCREENER,
interval=INTERVAL,
timeout= 10)
for pair in pairs:
try:
analysis = handler[pair].get_analysis()
except Exception as e:
print("Signalsample:")
print("Exception:")
print(e)
print (f'Coin: {pair}')
print (f'handler: {handler[pair]}')
oscCheck=0
maCheck=0
for indicator in OSC_INDICATORS:
if analysis.oscillators ['COMPUTE'][indicator] == 'BUY': oscCheck +=1
for indicator in MA_INDICATORS:
if analysis.moving_averages ['COMPUTE'][indicator] == 'BUY': maCheck +=1
if FULL_LOG:
print(f'Custsignalmod:{pair} Oscillators:{oscCheck}/{len(OSC_INDICATORS)} Moving averages:{maCheck}/{len(MA_INDICATORS)}')
if oscCheck >= OSC_THRESHOLD and maCheck >= MA_THRESHOLD:
signal_coins[pair] = pair
print(f'Custsignalmod: Signal detected on {pair} at {oscCheck}/{len(OSC_INDICATORS)} oscillators and {maCheck}/{len(MA_INDICATORS)} moving averages.')
with open('signals/custsignalmod.exs','a+') as f:
f.write(pair + '\n')
return signal_coins
def do_work():
signal_coins = {}
pairs = {}
pairs=[line.strip() for line in open(TICKERS)]
for line in open(TICKERS):
pairs=[line.strip() + PAIR_WITH for line in open(TICKERS)]
while True:
if not threading.main_thread().is_alive(): exit()
print(f'Custsignalmod: Analyzing {len(pairs)} coins')
signal_coins = analyze(pairs)
print(f'Custsignalmod: {len(signal_coins)} coins above {OSC_THRESHOLD}/{len(OSC_INDICATORS)} oscillators and {MA_THRESHOLD}/{len(MA_INDICATORS)} moving averages Waiting {TIME_TO_WAIT} minutes for next analysis.')
time.sleep((TIME_TO_WAIT*60)) | null |
156,742 | import sys
import json
import os
from binance.client import Client
from datetime import datetime
from helpers.parameters import (
parse_args, load_config
)
from helpers.handle_creds import (
load_correct_creds
)
from colorama import init
LOG_FILE_PATH = '../' + LOG_FILE
with open('../coins_bought.json', 'r') as f:
coins = json.load(f)
total_profit = 0
total_price_change = 0
for coin in list(coins):
sell_coin = client.create_order(
symbol = coin,
side = 'SELL',
type = 'MARKET',
quantity = coins[coin]['volume']
)
BuyPrice = float(coins[coin]['bought_at'])
LastPrice = float(sell_coin['fills'][0]['price'])
profit = (LastPrice - BuyPrice) * coins[coin]['volume']
PriceChange = float((LastPrice - BuyPrice) / BuyPrice * 100)
total_profit += profit
total_price_change += PriceChange
text_color = txcolors.SELL_PROFIT if PriceChange >= 0. else txcolors.SELL_LOSS
console_log_text = f"{text_color}Sell: {coins[coin]['volume']} {coin} - {BuyPrice} - {LastPrice} Profit: {profit:.2f} {PriceChange:.2f}%{txcolors.DEFAULT}"
print(console_log_text)
if LOG_TRADES:
timestamp = datetime.now().strftime("%d/%m %H:%M:%S")
write_log(f"Sell: {coins[coin]['volume']} {coin} - {BuyPrice} - {LastPrice} Profit: {profit:.2f} {PriceChange:.2f}%")
text_color = txcolors.SELL_PROFIT if total_price_change >= 0. else txcolors.SELL_LOSS
print(f"Total Profit: {text_color}{total_profit:.2f}{txcolors.DEFAULT}. Total Price Change: {text_color}{total_price_change:.2f}%{txcolors.DEFAULT}")
def write_log(logline):
timestamp = datetime.now().strftime("%d/%m %H:%M:%S")
with open(LOG_FILE_PATH,'a+') as f:
f.write(timestamp + ' ' + logline + '\n') | null |
156,743 | from tradingview_ta import TA_Handler, Interval, Exchange
import os
import sys
import glob
import time
TA_BUY_THRESHOLD = 18
PAIR_WITH = 'USDT'
TICKERS = 'signalsample.txt'
TIME_TO_WAIT = 4
def analyze(pairs):
taMax = 0
taMaxCoin = 'none'
signal_coins = {}
first_analysis = {}
second_analysis = {}
first_handler = {}
second_handler = {}
if os.path.exists('signals/signalsample.exs'):
os.remove('signals/signalsample.exs')
for pair in pairs:
first_handler[pair] = TA_Handler(
symbol=pair,
exchange=MY_EXCHANGE,
screener=MY_SCREENER,
interval=MY_FIRST_INTERVAL,
timeout= 10
)
second_handler[pair] = TA_Handler(
symbol=pair,
exchange=MY_EXCHANGE,
screener=MY_SCREENER,
interval=MY_SECOND_INTERVAL,
timeout= 10
)
for pair in pairs:
try:
first_analysis = first_handler[pair].get_analysis()
second_analysis = second_handler[pair].get_analysis()
except Exception as e:
print("Signalsample:")
print("Exception:")
print(e)
print (f'Coin: {pair}')
print (f'First handler: {first_handler[pair]}')
print (f'Second handler: {second_handler[pair]}')
tacheckS = 0
first_tacheck = first_analysis.summary['BUY']
second_tacheck = second_analysis.summary['BUY']
if FULL_LOG:
print(f'Signalsample:{pair} First {first_tacheck} Second {second_tacheck}')
#else:
#print(".", end = '')
if first_tacheck > taMax:
taMax = first_tacheck
taMaxCoin = pair
if first_tacheck >= TA_BUY_THRESHOLD and second_tacheck >= TA_BUY_THRESHOLD:
signal_coins[pair] = pair
print(f'Signalsample: Signal detected on {pair}')
with open('signals/signalsample.exs','a+') as f:
f.write(pair + '\n')
print(f'Signalsample: Max signal by {taMaxCoin} at {taMax} on shortest timeframe')
return signal_coins
def do_work():
signal_coins = {}
pairs = {}
pairs=[line.strip() for line in open(TICKERS)]
for line in open(TICKERS):
pairs=[line.strip() + PAIR_WITH for line in open(TICKERS)]
while True:
print(f'Signalsample: Analyzing {len(pairs)} coins')
signal_coins = analyze(pairs)
if len(signal_coins) == 0:
print(f'Signalsample: No coins above {TA_BUY_THRESHOLD} threshold on both timeframes. Waiting {TIME_TO_WAIT} minutes for next analysis')
else:
print(f'Signalsample: {len(signal_coins)} coins above {TA_BUY_THRESHOLD} treshold on both timeframes. Waiting {TIME_TO_WAIT} minutes for next analysis')
time.sleep((TIME_TO_WAIT*60)) | null |
156,744 | import yaml
import argparse
def load_config(file):
try:
with open(file) as file:
return yaml.load(file, Loader=yaml.FullLoader)
except FileNotFoundError as fe:
exit(f'Could not find {file}')
except Exception as e:
exit(f'Encountered exception...\n {e}') | null |
156,745 | import yaml
import argparse
def parse_args():
x = argparse.ArgumentParser()
x.add_argument('--debug', '-d', help="extra logging", action='store_true')
x.add_argument('--config', '-c', help="Path to config.yml")
x.add_argument('--creds', '-u', help="Path to creds file")
x.add_argument('--notimeout', help="Dont use timeout in prod", action="store_true")
return x.parse_args() | null |
156,746 | from sys import exit
def load_correct_creds(creds):
try:
return creds['prod']['access_key'], creds['prod']['secret_key']
except TypeError as te:
message = 'Your credentials are formatted incorectly\n'
message += f'TypeError:Exception:\n\t{str(te)}'
exit(message)
except Exception as e:
message = 'oopsies, looks like you did something real bad. Fallback Exception caught...\n'
message += f'Exception:\n\t{str(e)}'
exit(message) | null |
156,747 | from sys import exit
The provided code snippet includes necessary dependencies for implementing the `test_api_key` function. Write a Python function `def test_api_key(client, BinanceAPIException)` to solve the following problem:
Checks to see if API keys supplied returns errors Args: client (class): binance client class BinanceAPIException (clas): binance exeptions class Returns: bool | msg: true/false depending on success, and message
Here is the function:
def test_api_key(client, BinanceAPIException):
"""Checks to see if API keys supplied returns errors
Args:
client (class): binance client class
BinanceAPIException (clas): binance exeptions class
Returns:
bool | msg: true/false depending on success, and message
"""
try:
client.get_account()
return True, "API key validated succesfully"
except BinanceAPIException as e:
if e.code in [-2015,-2014]:
bad_key = "Your API key is not formatted correctly..."
america = "If you are in america, you will have to update the config to set AMERICAN_USER: True"
ip_b = "If you set an IP block on your keys make sure this IP address is allowed. check ipinfo.io/ip"
msg = f"Your API key is either incorrect, IP blocked, or incorrect tld/permissons...\n most likely: {bad_key}\n {america}\n {ip_b}"
elif e.code == -2021:
issue = "https://github.com/CyberPunkMetalHead/Binance-volatility-trading-bot/issues/28"
desc = "Ensure your OS is time synced with a timeserver. See issue."
msg = f"Timestamp for this request was 1000ms ahead of the server's time.\n {issue}\n {desc}"
elif e.code == -1021:
desc = "Your operating system time is not properly synced... Please sync ntp time with 'pool.ntp.org'"
msg = f"{desc}\nmaybe try this:\n\tsudo ntpdate pool.ntp.org"
else:
msg = "Encountered an API Error code that was not caught nicely, please open issue...\n"
msg += str(e)
return False, msg
except Exception as e:
return False, f"Fallback exception occured:\n{e}" | Checks to see if API keys supplied returns errors Args: client (class): binance client class BinanceAPIException (clas): binance exeptions class Returns: bool | msg: true/false depending on success, and message |
156,748 | from tradingview_ta import TA_Handler, Interval, Exchange
import os
import time
import threading
TIME_TO_WAIT = 1
def analyze():
analysis = {}
handler = {}
handler = TA_Handler(
symbol=SYMBOL,
exchange=EXCHANGE,
screener=SCREENER,
interval=INTERVAL,
timeout= 10)
try:
analysis = handler.get_analysis()
except Exception as e:
print("pausebotmod:")
print("Exception:")
print(e)
ma_sell = analysis.moving_averages['SELL']
if ma_sell >= THRESHOLD:
paused = True
print(f'pausebotmod: Market not looking too good, bot paused from buying {ma_sell}/{THRESHOLD} Waiting {TIME_TO_WAIT} minutes for next market checkup')
else:
print(f'pausebotmod: Market looks ok, bot is running {ma_sell}/{THRESHOLD} Waiting {TIME_TO_WAIT} minutes for next market checkup ')
paused = False
return paused
def do_work():
while True:
if not threading.main_thread().is_alive(): exit()
# print(f'pausebotmod: Fetching market state')
paused = analyze()
if paused:
with open('signals/paused.exc','a+') as f:
f.write('yes')
else:
if os.path.isfile("signals/paused.exc"):
os.remove('signals/paused.exc')
# print(f'pausebotmod: Waiting {TIME_TO_WAIT} minutes for next market checkup')
time.sleep((TIME_TO_WAIT*60)) | null |
156,749 | import os
import sys
import threading
import importlib
import glob
from colorama import init
from binance.client import Client
from binance.exceptions import BinanceAPIException
from requests.exceptions import ReadTimeout, ConnectionError
from datetime import date, datetime, timedelta
import time
from itertools import count
import json
from helpers.parameters import (
parse_args, load_config
)
from helpers.handle_creds import (
load_correct_creds, test_api_key
)
class txcolors:
BUY = '\033[92m'
WARNING = '\033[93m'
SELL_LOSS = '\033[91m'
SELL_PROFIT = '\033[32m'
DIM = '\033[2m\033[35m'
DEFAULT = '\033[39m'
def convert_volume():
'''Converts the volume given in QUANTITY from USDT to the each coin's volume'''
volatile_coins, number_of_coins, last_price = wait_for_price()
lot_size = {}
volume = {}
for coin in volatile_coins:
# Find the correct step size for each coin
# max accuracy for BTC for example is 6 decimal points
# while XRP is only 1
try:
info = client.get_symbol_info(coin)
step_size = info['filters'][2]['stepSize']
lot_size[coin] = step_size.index('1') - 1
if lot_size[coin] < 0:
lot_size[coin] = 0
except:
pass
# calculate the volume in coin from QUANTITY in USDT (default)
volume[coin] = float(QUANTITY / float(last_price[coin]['price']))
# define the volume with the correct step size
if coin not in lot_size:
volume[coin] = float('{:.1f}'.format(volume[coin]))
else:
# if lot size has 0 decimal points, make the volume an integer
if lot_size[coin] == 0:
volume[coin] = int(volume[coin])
else:
volume[coin] = float('{:.{}f}'.format(volume[coin], lot_size[coin]))
return volume, last_price
def write_log(logline):
timestamp = datetime.now().strftime("%d/%m %H:%M:%S")
with open(LOG_FILE,'a+') as f:
f.write(timestamp + ' ' + logline + '\n')
The provided code snippet includes necessary dependencies for implementing the `buy` function. Write a Python function `def buy()` to solve the following problem:
Place Buy market orders for each volatile coin found
Here is the function:
def buy():
'''Place Buy market orders for each volatile coin found'''
volume, last_price = convert_volume()
orders = {}
for coin in volume:
# only buy if the there are no active trades on the coin
if coin not in coins_bought:
print(f"{txcolors.BUY}Preparing to buy {volume[coin]} {coin}{txcolors.DEFAULT}")
if TEST_MODE:
orders[coin] = [{
'symbol': coin,
'orderId': 0,
'time': datetime.now().timestamp()
}]
# Log trade
if LOG_TRADES:
write_log(f"Buy : {volume[coin]} {coin} - {last_price[coin]['price']}")
continue
# try to create a real order if the test orders did not raise an exception
try:
buy_limit = client.create_order(
symbol = coin,
side = 'BUY',
type = 'MARKET',
quantity = volume[coin]
)
# error handling here in case position cannot be placed
except Exception as e:
print(e)
# run the else block if the position has been placed and return order info
else:
orders[coin] = client.get_all_orders(symbol=coin, limit=1)
# binance sometimes returns an empty list, the code will wait here until binance returns the order
while orders[coin] == []:
print('Binance is being slow in returning the order, calling the API again...')
orders[coin] = client.get_all_orders(symbol=coin, limit=1)
time.sleep(1)
else:
print('Order returned, saving order to file')
# Log trade
if LOG_TRADES:
write_log(f"Buy : {volume[coin]} {coin} - {last_price[coin]['price']}")
else:
print(f'Signal detected, but there is already an active trade on {coin}')
return orders, last_price, volume | Place Buy market orders for each volatile coin found |
156,750 | import os
import sys
import threading
import importlib
import glob
from colorama import init
from binance.client import Client
from binance.exceptions import BinanceAPIException
from requests.exceptions import ReadTimeout, ConnectionError
from datetime import date, datetime, timedelta
import time
from itertools import count
import json
from helpers.parameters import (
parse_args, load_config
)
from helpers.handle_creds import (
load_correct_creds, test_api_key
)
The provided code snippet includes necessary dependencies for implementing the `update_portfolio` function. Write a Python function `def update_portfolio(orders, last_price, volume)` to solve the following problem:
add every coin bought to our portfolio for tracking/selling later
Here is the function:
def update_portfolio(orders, last_price, volume):
'''add every coin bought to our portfolio for tracking/selling later'''
if DEBUG: print(orders)
for coin in orders:
coins_bought[coin] = {
'symbol': orders[coin][0]['symbol'],
'orderid': orders[coin][0]['orderId'],
'timestamp': orders[coin][0]['time'],
'bought_at': last_price[coin]['price'],
'volume': volume[coin],
'stop_loss': -STOP_LOSS,
'take_profit': TAKE_PROFIT,
}
# save the coins in a json file in the same directory
with open(coins_bought_file_path, 'w') as file:
json.dump(coins_bought, file, indent=4)
print(f'Order with id {orders[coin][0]["orderId"]} placed and saved to file') | add every coin bought to our portfolio for tracking/selling later |
156,751 | from tradingview_ta import TA_Handler, Interval, Exchange
import os
import sys
import glob
import time
MY_EXCHANGE = 'BINANCE'
MY_SCREENER = 'CRYPTO'
MY_FIRST_INTERVAL = Interval.INTERVAL_1_MINUTE
MY_SECOND_INTERVAL = Interval.INTERVAL_5_MINUTES
TA_BUY_THRESHOLD = 18
FULL_LOG = False
def analyze(pairs):
taMax = 0
taMaxCoin = 'none'
signal_coins = {}
first_analysis = {}
second_analysis = {}
first_handler = {}
second_handler = {}
if os.path.exists('signals/signalsample.exs'):
os.remove('signals/signalsample.exs')
for pair in pairs:
first_handler[pair] = TA_Handler(
symbol=pair,
exchange=MY_EXCHANGE,
screener=MY_SCREENER,
interval=MY_FIRST_INTERVAL,
timeout= 10
)
second_handler[pair] = TA_Handler(
symbol=pair,
exchange=MY_EXCHANGE,
screener=MY_SCREENER,
interval=MY_SECOND_INTERVAL,
timeout= 10
)
for pair in pairs:
try:
first_analysis = first_handler[pair].get_analysis()
second_analysis = second_handler[pair].get_analysis()
except Exception as e:
print("Exeption:")
print(e)
print (f'Coin: {pair}')
print (f'First handler: {first_handler[pair]}')
print (f'Second handler: {second_handler[pair]}')
tacheckS = 0
first_tacheck = first_analysis.summary['BUY']
second_tacheck = second_analysis.summary['BUY']
if FULL_LOG:
print(f'{pair} First {first_tacheck} Second {second_tacheck}')
else:
print(".", end = '')
if first_tacheck > taMax:
taMax = first_tacheck
taMaxCoin = pair
if first_tacheck >= TA_BUY_THRESHOLD and second_tacheck >= TA_BUY_THRESHOLD:
signal_coins[pair] = pair
print("")
print(f'Signal detected on {pair}')
with open('signals/signalsample.exs','a+') as f:
f.write(pair + '\n')
print("")
print(f'Max signal by {taMaxCoin} at {taMax} on shortest timeframe')
return signal_coins | null |
156,752 | from models.mvs.mvs_utils import read_pfm
import os
import numpy as np
import cv2
from PIL import Image
import torch
from torchvision import transforms as T
import torchvision.transforms.functional as F
from kornia import create_meshgrid
import time
import json
from . import data_utils
from plyfile import PlyData, PlyElement
from torch.utils.data import Dataset, DataLoader
import torch
import h5py
from data.base_dataset import BaseDataset
import configparser
from os.path import join
import cv2
from .data_utils import get_dtu_raydir
def colorjitter(img, factor):
# brightness_factor,contrast_factor,saturation_factor,hue_factor
# img = F.adjust_brightness(img, factor[0])
# img = F.adjust_contrast(img, factor[1])
img = F.adjust_saturation(img, factor[2])
img = F.adjust_hue(img, factor[3]-1.0)
return img | null |
156,753 | from models.mvs.mvs_utils import read_pfm
import os
import numpy as np
import cv2
from PIL import Image
import torch
from torchvision import transforms as T
import torchvision.transforms.functional as F
from kornia import create_meshgrid
import time
import json
from . import data_utils
from plyfile import PlyData, PlyElement
from torch.utils.data import Dataset, DataLoader
import torch
import h5py
from data.base_dataset import BaseDataset
import configparser
from os.path import join
import cv2
from .data_utils import get_dtu_raydir
trans_t = lambda t : np.asarray([
[1,0,0,0],
[0,1,0,0],
[0,0,1,t],
[0,0,0,1],
], dtype=np.float32)
rot_phi = lambda phi : np.asarray([
[1,0,0,0],
[0,np.cos(phi),-np.sin(phi),0],
[0,np.sin(phi), np.cos(phi),0],
[0,0,0,1],
], dtype=np.float32)
rot_theta = lambda th : np.asarray([
[np.cos(th),0,-np.sin(th),0],
[0,1,0,0],
[np.sin(th),0, np.cos(th),0],
[0,0,0,1],
], dtype=np.float32)
def pose_spherical(theta, phi, radius):
c2w = trans_t(radius)
c2w = rot_phi(phi/180.*np.pi) @ c2w
c2w = rot_theta(theta/180.*np.pi) @ c2w
c2w = np.array([[-1,0,0,0],[0,0,1,0],[0,1,0,0],[0,0,0,1]]) @ c2w
c2w = c2w #@ np.array([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]])
return c2w | null |
156,754 | from models.mvs.mvs_utils import read_pfm
import os
import numpy as np
import cv2
from PIL import Image
import torch
from torchvision import transforms as T
import torchvision.transforms.functional as F
from kornia import create_meshgrid
import time
import json
from . import data_utils
from plyfile import PlyData, PlyElement
from torch.utils.data import Dataset, DataLoader
import torch
import h5py
from data.base_dataset import BaseDataset
import configparser
from os.path import join
import cv2
from .data_utils import get_dtu_raydir
The provided code snippet includes necessary dependencies for implementing the `get_rays` function. Write a Python function `def get_rays(directions, c2w)` to solve the following problem:
Get ray origin and normalized directions in world coordinate for all pixels in one image. Reference: https://www.scratchapixel.com/lessons/3d-basic-rendering/ ray-tracing-generating-camera-rays/standard-coordinate-systems Inputs: directions: (H, W, 3) precomputed ray directions in camera coordinate c2w: (3, 4) transformation matrix from camera coordinate to world coordinate Outputs: rays_o: (H*W, 3), the origin of the rays in world coordinate rays_d: (H*W, 3), the normalized direction of the rays in world coordinate
Here is the function:
def get_rays(directions, c2w):
"""
Get ray origin and normalized directions in world coordinate for all pixels in one image.
Reference: https://www.scratchapixel.com/lessons/3d-basic-rendering/
ray-tracing-generating-camera-rays/standard-coordinate-systems
Inputs:
directions: (H, W, 3) precomputed ray directions in camera coordinate
c2w: (3, 4) transformation matrix from camera coordinate to world coordinate
Outputs:
rays_o: (H*W, 3), the origin of the rays in world coordinate
rays_d: (H*W, 3), the normalized direction of the rays in world coordinate
"""
# Rotate ray directions from camera coordinate to the world coordinate
c2w = torch.FloatTensor(c2w)
rays_d = directions @ c2w[:3, :3].T # (H, W, 3)
# rays_d = rays_d / torch.norm(rays_d, dim=-1, keepdim=True)
# The origin of all rays is the camera origin in world coordinate
rays_o = c2w[:3, 3].expand(rays_d.shape) # (H, W, 3)
rays_d = rays_d.view(-1, 3)
rays_o = rays_o.view(-1, 3)
return rays_o, rays_d | Get ray origin and normalized directions in world coordinate for all pixels in one image. Reference: https://www.scratchapixel.com/lessons/3d-basic-rendering/ ray-tracing-generating-camera-rays/standard-coordinate-systems Inputs: directions: (H, W, 3) precomputed ray directions in camera coordinate c2w: (3, 4) transformation matrix from camera coordinate to world coordinate Outputs: rays_o: (H*W, 3), the origin of the rays in world coordinate rays_d: (H*W, 3), the normalized direction of the rays in world coordinate |
156,755 | from models.mvs.mvs_utils import read_pfm
import os
import numpy as np
import cv2
from PIL import Image
import torch
from torchvision import transforms as T
import torchvision.transforms.functional as F
from kornia import create_meshgrid
import time
import json
from . import data_utils
from plyfile import PlyData, PlyElement
from torch.utils.data import Dataset, DataLoader
import torch
import h5py
from data.base_dataset import BaseDataset
import configparser
from os.path import join
import cv2
from .data_utils import get_dtu_raydir
The provided code snippet includes necessary dependencies for implementing the `get_ray_directions` function. Write a Python function `def get_ray_directions(H, W, focal, center=None)` to solve the following problem:
Get ray directions for all pixels in camera coordinate. Reference: https://www.scratchapixel.com/lessons/3d-basic-rendering/ ray-tracing-generating-camera-rays/standard-coordinate-systems Inputs: H, W, focal: image height, width and focal length Outputs: directions: (H, W, 3), the direction of the rays in camera coordinate
Here is the function:
def get_ray_directions(H, W, focal, center=None):
"""
Get ray directions for all pixels in camera coordinate.
Reference: https://www.scratchapixel.com/lessons/3d-basic-rendering/
ray-tracing-generating-camera-rays/standard-coordinate-systems
Inputs:
H, W, focal: image height, width and focal length
Outputs:
directions: (H, W, 3), the direction of the rays in camera coordinate
"""
grid = create_meshgrid(H, W, normalized_coordinates=False)[0]
i, j = grid.unbind(-1)
# the direction here is without +0.5 pixel centering as calibration is not so accurate
# see https://github.com/bmild/nerf/issues/24
cent = center if center is not None else [W / 2, H / 2]
directions = torch.stack([(i - cent[0]) / focal[0], (j - cent[1]) / focal[1], torch.ones_like(i)],
-1) # (H, W, 3)
return directions | Get ray directions for all pixels in camera coordinate. Reference: https://www.scratchapixel.com/lessons/3d-basic-rendering/ ray-tracing-generating-camera-rays/standard-coordinate-systems Inputs: H, W, focal: image height, width and focal length Outputs: directions: (H, W, 3), the direction of the rays in camera coordinate |
156,756 | import argparse
import os
import urllib.request
import urllib
import tempfile
def get_release_scans(release_file):
print("release_file",release_file)
scan_lines = urllib.request.urlopen(release_file)
# scan_lines = urllib.urlopen(release_file)
scans = []
for scan_line in scan_lines:
scan_id = scan_line.decode('utf8').rstrip('\n')
scans.append(scan_id)
return scans | null |
156,757 | import argparse
import os
import urllib.request
import urllib
import tempfile
RELEASE_NAME = RELEASES_NAMES[0]
def download_scan(scan_id, out_dir, file_types, use_v1_sens):
print('Downloading ScanNet ' + RELEASE_NAME + ' scan ' + scan_id + ' ...')
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
for ft in file_types:
v1_sens = use_v1_sens and ft == '.sens'
url = BASE_URL + RELEASE + '/' + scan_id + '/' + scan_id + ft if not v1_sens else BASE_URL + RELEASES[V1_IDX] + '/' + scan_id + '/' + scan_id + ft
out_file = out_dir + '/' + scan_id + ft
download_file(url, out_file)
print('Downloaded scan ' + scan_id)
def download_release(release_scans, out_dir, file_types, use_v1_sens):
if len(release_scans) == 0:
return
print('Downloading ScanNet ' + RELEASE_NAME + ' release to ' + out_dir + '...')
for scan_id in release_scans:
scan_out_dir = os.path.join(out_dir, scan_id)
download_scan(scan_id, scan_out_dir, file_types, use_v1_sens)
print('Downloaded ScanNet ' + RELEASE_NAME + ' release.') | null |
156,758 | import argparse
import os
import urllib.request
import urllib
import tempfile
BASE_URL = 'http://kaldir.vc.in.tum.de/scannet/'
LABEL_MAP_FILES = ['scannetv2-labels.combined.tsv', 'scannet-labels.combined.tsv']
RELEASES_TASKS = ['v2/tasks', 'v1/tasks']
V1_IDX = 1
def download_file(url, out_file):
out_dir = os.path.dirname(out_file)
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
if not os.path.isfile(out_file):
print('\t' + url + ' > ' + out_file)
fh, out_file_tmp = tempfile.mkstemp(dir=out_dir)
f = os.fdopen(fh, 'w')
f.close()
urllib.request.urlretrieve(url, out_file_tmp)
# urllib.urlretrieve(url, out_file_tmp)
os.rename(out_file_tmp, out_file)
else:
print('WARNING: skipping download of existing file ' + out_file)
def download_task_data(out_dir):
print('Downloading ScanNet v1 task data...')
files = [
LABEL_MAP_FILES[V1_IDX], 'obj_classification/data.zip',
'obj_classification/trained_models.zip', 'voxel_labeling/data.zip',
'voxel_labeling/trained_models.zip'
]
for file in files:
url = BASE_URL + RELEASES_TASKS[V1_IDX] + '/' + file
localpath = os.path.join(out_dir, file)
localdir = os.path.dirname(localpath)
if not os.path.isdir(localdir):
os.makedirs(localdir)
download_file(url, localpath)
print('Downloaded task data.') | null |
156,759 | import argparse
import os
import urllib.request
import urllib
import tempfile
def download_file(url, out_file):
out_dir = os.path.dirname(out_file)
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
if not os.path.isfile(out_file):
print('\t' + url + ' > ' + out_file)
fh, out_file_tmp = tempfile.mkstemp(dir=out_dir)
f = os.fdopen(fh, 'w')
f.close()
urllib.request.urlretrieve(url, out_file_tmp)
# urllib.urlretrieve(url, out_file_tmp)
os.rename(out_file_tmp, out_file)
else:
print('WARNING: skipping download of existing file ' + out_file)
def download_tfrecords(in_dir, out_dir):
print('Downloading tf records (302 GB)...')
if not os.path.exists(out_dir):
os.makedirs(out_dir)
split_to_num_shards = {'train': 100, 'val': 25, 'test': 10}
for folder_name in ['hires_tfrecords', 'lores_tfrecords']:
folder_dir = '%s/%s' % (in_dir, folder_name)
save_dir = '%s/%s' % (out_dir, folder_name)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
for split, num_shards in split_to_num_shards.items():
for i in range(num_shards):
file_name = '%s-%05d-of-%05d.tfrecords' % (split, i, num_shards)
url = '%s/%s' % (folder_dir, file_name)
localpath = '%s/%s/%s' % (out_dir, folder_name, file_name)
download_file(url, localpath) | null |
156,760 | import argparse
import os
import urllib.request
import urllib
import tempfile
BASE_URL = 'http://kaldir.vc.in.tum.de/scannet/'
RELEASE_TASKS = RELEASES_TASKS[0]
RELEASE_NAME = RELEASES_NAMES[0]
LABEL_MAP_FILE = LABEL_MAP_FILES[0]
def download_file(url, out_file):
out_dir = os.path.dirname(out_file)
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
if not os.path.isfile(out_file):
print('\t' + url + ' > ' + out_file)
fh, out_file_tmp = tempfile.mkstemp(dir=out_dir)
f = os.fdopen(fh, 'w')
f.close()
urllib.request.urlretrieve(url, out_file_tmp)
# urllib.urlretrieve(url, out_file_tmp)
os.rename(out_file_tmp, out_file)
else:
print('WARNING: skipping download of existing file ' + out_file)
def download_label_map(out_dir):
print('Downloading ScanNet ' + RELEASE_NAME + ' label mapping file...')
files = [ LABEL_MAP_FILE ]
for file in files:
url = BASE_URL + RELEASE_TASKS + '/' + file
localpath = os.path.join(out_dir, file)
localdir = os.path.dirname(localpath)
if not os.path.isdir(localdir):
os.makedirs(localdir)
download_file(url, localpath)
print('Downloaded ScanNet ' + RELEASE_NAME + ' label mapping file.') | null |
156,761 | import numpy as np
import open3d as o3d
def get_cv_raydir(pixelcoords, height, width, focal, rot):
# pixelcoords: H x W x 2
if isinstance(focal, float):
focal = [focal, focal]
x = (pixelcoords[..., 0] - width / 2.0) / focal[0]
y = (pixelcoords[..., 1] - height / 2.0) / focal[1]
z = np.ones_like(x)
dirs = np.stack([x, y, z], axis=-1)
dirs = np.sum(rot[None,None,:,:] * dirs[...,None], axis=-2) # 1*1*3*3 x h*w*3*1
dirs = dirs / (np.linalg.norm(dirs, axis=-1, keepdims=True) + 1e-5)
return dirs | null |
156,762 | import numpy as np
import open3d as o3d
def get_camera_rotation(eye, center, up):
nz = center - eye
nz /= np.linalg.norm(nz)
x = np.cross(nz, up)
x /= np.linalg.norm(x)
y = np.cross(x, nz)
return np.array([x, y, -nz]).T
def get_blender_raydir(pixelcoords, height, width, focal, rot, dir_norm):
## pixelcoords: H x W x 2
x = (pixelcoords[..., 0] + 0.5 - width / 2.0) / focal
y = (pixelcoords[..., 1] + 0.5 - height / 2.0) / focal
z = np.ones_like(x)
dirs = np.stack([x, -y, -z], axis=-1)
dirs = np.sum(dirs[...,None,:] * rot[:,:], axis=-1) # h*w*1*3 x 3*3
if dir_norm:
# print("dirs",dirs-dirs / (np.linalg.norm(dirs, axis=-1, keepdims=True) + 1e-5))
dirs = dirs / (np.linalg.norm(dirs, axis=-1, keepdims=True) + 1e-5)
# print("dirs", dirs.shape)
return dirs
def get_optix_raydir(pixelcoords, height, width, focal, eye, center, up):
c2w = get_camera_rotation(eye, center, up)
return get_blender_raydir(pixelcoords, height, width, focal, c2w) | null |
156,763 | import numpy as np
import open3d as o3d
def flip_z(poses):
z_flip_matrix = np.eye(4, dtype=np.float32)
z_flip_matrix[2, 2] = -1.0
return np.matmul(poses, z_flip_matrix[None,...]) | null |
156,764 | import numpy as np
import open3d as o3d
def triangluation_bpa(pnts, test_pnts=None, full_comb=False):
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(pnts[:, :3])
pcd.normals = o3d.utility.Vector3dVector(pnts[:, :3] / np.linalg.norm(pnts[:, :3], axis=-1, keepdims=True))
# pcd.colors = o3d.utility.Vector3dVector(pnts[:, 3:6] / 255)
# pcd.normals = o3d.utility.Vector3dVector(pnts[:, 6:9])
# o3d.visualization.draw_geometries([pcd])
distances = pcd.compute_nearest_neighbor_distance()
avg_dist = np.mean(distances)
radius = 3 * avg_dist
dec_mesh = o3d.geometry.TriangleMesh.create_from_point_cloud_ball_pivoting(pcd, o3d.utility.DoubleVector(
[radius, radius * 2]))
# dec_mesh = dec_mesh.simplify_quadric_decimation(100000)
# dec_mesh.remove_degenerate_triangles()
# dec_mesh.remove_duplicated_triangles()
# dec_mesh.remove_duplicated_vertices()
# dec_mesh.remove_non_manifold_edges()
# vis_lst = [dec_mesh, pcd]
# vis_lst = [dec_mesh, pcd]
# o3d.visualization.draw_geometries(vis_lst)
# if test_pnts is not None :
# tpcd = o3d.geometry.PointCloud()
# print("test_pnts",test_pnts.shape)
# tpcd.points = o3d.utility.Vector3dVector(test_pnts[:, :3])
# tpcd.normals = o3d.utility.Vector3dVector(test_pnts[:, :3] / np.linalg.norm(test_pnts[:, :3], axis=-1, keepdims=True))
# o3d.visualization.draw_geometries([dec_mesh, tpcd] )
triangles = np.asarray(dec_mesh.triangles, dtype=np.int32)
if full_comb:
q, w, e = triangles[..., 0], triangles[..., 1], triangles[..., 2]
triangles2 = np.stack([w,q,e], axis=-1)
triangles3 = np.stack([e,q,w], axis=-1)
triangles = np.concatenate([triangles, triangles2, triangles3], axis=0)
return triangles | null |
156,765 | from models.mvs.mvs_utils import read_pfm
import os
import numpy as np
import cv2
from PIL import Image
import torch
from torchvision import transforms as T
import torchvision.transforms.functional as F
from kornia import create_meshgrid
import time
import json
from . import data_utils
from plyfile import PlyData, PlyElement
import copy
from torch.utils.data import Dataset, DataLoader
import torch
import os
from PIL import Image
import h5py
from data.base_dataset import BaseDataset
import configparser
from os.path import join
import cv2
from .data_utils import get_dtu_raydir
def colorjitter(img, factor):
# brightness_factor,contrast_factor,saturation_factor,hue_factor
# img = F.adjust_brightness(img, factor[0])
# img = F.adjust_contrast(img, factor[1])
img = F.adjust_saturation(img, factor[2])
img = F.adjust_hue(img, factor[3]-1.0)
return img | null |
156,766 | from models.mvs.mvs_utils import read_pfm
import os
import numpy as np
import cv2
from PIL import Image
import torch
from torchvision import transforms as T
import torchvision.transforms.functional as F
from kornia import create_meshgrid
import time
import json
from . import data_utils
from plyfile import PlyData, PlyElement
import copy
from torch.utils.data import Dataset, DataLoader
import torch
import os
from PIL import Image
import h5py
from data.base_dataset import BaseDataset
import configparser
from os.path import join
import cv2
from .data_utils import get_dtu_raydir
trans_t = lambda t : np.asarray([
[1,0,0,0],
[0,1,0,0],
[0,0,1,t],
[0,0,0,1],
], dtype=np.float32)
rot_phi = lambda phi : np.asarray([
[1,0,0,0],
[0,np.cos(phi),-np.sin(phi),0],
[0,np.sin(phi), np.cos(phi),0],
[0,0,0,1],
], dtype=np.float32)
rot_beta = lambda th : np.asarray([
[np.cos(th),-np.sin(th), 0, 0],
[np.sin(th),np.cos(th), 0, 0],
[0,0,1,0],
[0,0,0,1],
], dtype=np.float32)
def pose_spherical(theta, phi, radius):
c2w = trans_t(radius)
c2w = rot_phi(phi/180.*np.pi) @ c2w
# c2w = rot_theta(theta/180.*np.pi) @ c2w
c2w = rot_beta(theta/180.*np.pi) @ c2w
# c2w = rot_beta(90/180.*np.pi) @ c2w
c2w = np.array([[-1,0,0,0],[0,0,1,0],[0,1,0,0],[0,0,0,1]]) @ c2w
c2w = c2w #@ np.array([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]])
return c2w | null |
156,767 | from models.mvs.mvs_utils import read_pfm
import os
import numpy as np
import cv2
from PIL import Image
import torch
from torchvision import transforms as T
import torchvision.transforms.functional as F
from kornia import create_meshgrid
import time
import json
from . import data_utils
from plyfile import PlyData, PlyElement
import copy
from torch.utils.data import Dataset, DataLoader
import torch
import os
from PIL import Image
import h5py
from data.base_dataset import BaseDataset
import configparser
from os.path import join
import cv2
from .data_utils import get_dtu_raydir
The provided code snippet includes necessary dependencies for implementing the `get_rays` function. Write a Python function `def get_rays(directions, c2w)` to solve the following problem:
Get ray origin and normalized directions in world coordinate for all pixels in one image. Reference: https://www.scratchapixel.com/lessons/3d-basic-rendering/ ray-tracing-generating-camera-rays/standard-coordinate-systems Inputs: directions: (H, W, 3) precomputed ray directions in camera coordinate c2w: (3, 4) transformation matrix from camera coordinate to world coordinate Outputs: rays_o: (H*W, 3), the origin of the rays in world coordinate rays_d: (H*W, 3), the normalized direction of the rays in world coordinate
Here is the function:
def get_rays(directions, c2w):
"""
Get ray origin and normalized directions in world coordinate for all pixels in one image.
Reference: https://www.scratchapixel.com/lessons/3d-basic-rendering/
ray-tracing-generating-camera-rays/standard-coordinate-systems
Inputs:
directions: (H, W, 3) precomputed ray directions in camera coordinate
c2w: (3, 4) transformation matrix from camera coordinate to world coordinate
Outputs:
rays_o: (H*W, 3), the origin of the rays in world coordinate
rays_d: (H*W, 3), the normalized direction of the rays in world coordinate
"""
# Rotate ray directions from camera coordinate to the world coordinate
c2w = torch.FloatTensor(c2w)
rays_d = directions @ c2w[:3, :3].T # (H, W, 3)
# rays_d = rays_d / torch.norm(rays_d, dim=-1, keepdim=True)
# The origin of all rays is the camera origin in world coordinate
rays_o = c2w[:3, 3].expand(rays_d.shape) # (H, W, 3)
rays_d = rays_d.view(-1, 3)
rays_o = rays_o.view(-1, 3)
return rays_o, rays_d | Get ray origin and normalized directions in world coordinate for all pixels in one image. Reference: https://www.scratchapixel.com/lessons/3d-basic-rendering/ ray-tracing-generating-camera-rays/standard-coordinate-systems Inputs: directions: (H, W, 3) precomputed ray directions in camera coordinate c2w: (3, 4) transformation matrix from camera coordinate to world coordinate Outputs: rays_o: (H*W, 3), the origin of the rays in world coordinate rays_d: (H*W, 3), the normalized direction of the rays in world coordinate |
156,768 | from models.mvs.mvs_utils import read_pfm
import os
import numpy as np
import cv2
from PIL import Image
import torch
from torchvision import transforms as T
import torchvision.transforms.functional as F
from kornia import create_meshgrid
import time
import json
from . import data_utils
from plyfile import PlyData, PlyElement
import copy
from torch.utils.data import Dataset, DataLoader
import torch
import os
from PIL import Image
import h5py
from data.base_dataset import BaseDataset
import configparser
from os.path import join
import cv2
from .data_utils import get_dtu_raydir
The provided code snippet includes necessary dependencies for implementing the `get_ray_directions` function. Write a Python function `def get_ray_directions(H, W, focal, center=None)` to solve the following problem:
Get ray directions for all pixels in camera coordinate. Reference: https://www.scratchapixel.com/lessons/3d-basic-rendering/ ray-tracing-generating-camera-rays/standard-coordinate-systems Inputs: H, W, focal: image height, width and focal length Outputs: directions: (H, W, 3), the direction of the rays in camera coordinate
Here is the function:
def get_ray_directions(H, W, focal, center=None):
"""
Get ray directions for all pixels in camera coordinate.
Reference: https://www.scratchapixel.com/lessons/3d-basic-rendering/
ray-tracing-generating-camera-rays/standard-coordinate-systems
Inputs:
H, W, focal: image height, width and focal length
Outputs:
directions: (H, W, 3), the direction of the rays in camera coordinate
"""
grid = create_meshgrid(H, W, normalized_coordinates=False)[0]
i, j = grid.unbind(-1)
# the direction here is without +0.5 pixel centering as calibration is not so accurate
# see https://github.com/bmild/nerf/issues/24
cent = center if center is not None else [W / 2, H / 2]
directions = torch.stack([(i - cent[0]) / focal[0], (j - cent[1]) / focal[1], torch.ones_like(i)], -1) # (H, W, 3)
return directions | Get ray directions for all pixels in camera coordinate. Reference: https://www.scratchapixel.com/lessons/3d-basic-rendering/ ray-tracing-generating-camera-rays/standard-coordinate-systems Inputs: H, W, focal: image height, width and focal length Outputs: directions: (H, W, 3), the direction of the rays in camera coordinate |
156,769 | from models.mvs.mvs_utils import read_pfm
import os
import numpy as np
import cv2
from PIL import Image
import torch
from torchvision import transforms as T
import torchvision.transforms.functional as F
from kornia import create_meshgrid
import time
import json
from torch.utils.data import Dataset, DataLoader
import torch
import os
from PIL import Image
import h5py
from data.base_dataset import BaseDataset
import configparser
from os.path import join
import cv2
from .data_utils import get_dtu_raydir
def colorjitter(img, factor):
# brightness_factor,contrast_factor,saturation_factor,hue_factor
# img = F.adjust_brightness(img, factor[0])
# img = F.adjust_contrast(img, factor[1])
img = F.adjust_saturation(img, factor[2])
img = F.adjust_hue(img, factor[3]-1.0)
return img | null |
156,770 | from models.mvs.mvs_utils import read_pfm
import os
import numpy as np
import cv2
from PIL import Image
import torch
from torchvision import transforms as T
import torchvision.transforms.functional as F
from kornia import create_meshgrid
import time
import json
from torch.utils.data import Dataset, DataLoader
import torch
import os
from PIL import Image
import h5py
from data.base_dataset import BaseDataset
import configparser
from os.path import join
import cv2
from .data_utils import get_dtu_raydir
The provided code snippet includes necessary dependencies for implementing the `get_rays` function. Write a Python function `def get_rays(directions, c2w)` to solve the following problem:
Get ray origin and normalized directions in world coordinate for all pixels in one image. Reference: https://www.scratchapixel.com/lessons/3d-basic-rendering/ ray-tracing-generating-camera-rays/standard-coordinate-systems Inputs: directions: (H, W, 3) precomputed ray directions in camera coordinate c2w: (3, 4) transformation matrix from camera coordinate to world coordinate Outputs: rays_o: (H*W, 3), the origin of the rays in world coordinate rays_d: (H*W, 3), the normalized direction of the rays in world coordinate
Here is the function:
def get_rays(directions, c2w):
"""
Get ray origin and normalized directions in world coordinate for all pixels in one image.
Reference: https://www.scratchapixel.com/lessons/3d-basic-rendering/
ray-tracing-generating-camera-rays/standard-coordinate-systems
Inputs:
directions: (H, W, 3) precomputed ray directions in camera coordinate
c2w: (3, 4) transformation matrix from camera coordinate to world coordinate
Outputs:
rays_o: (H*W, 3), the origin of the rays in world coordinate
rays_d: (H*W, 3), the normalized direction of the rays in world coordinate
"""
# Rotate ray directions from camera coordinate to the world coordinate
c2w = torch.FloatTensor(c2w)
rays_d = directions @ c2w[:3, :3].T # (H, W, 3)
# rays_d = rays_d / torch.norm(rays_d, dim=-1, keepdim=True)
# The origin of all rays is the camera origin in world coordinate
rays_o = c2w[:3, 3].expand(rays_d.shape) # (H, W, 3)
rays_d = rays_d.view(-1, 3)
rays_o = rays_o.view(-1, 3)
return rays_o, rays_d | Get ray origin and normalized directions in world coordinate for all pixels in one image. Reference: https://www.scratchapixel.com/lessons/3d-basic-rendering/ ray-tracing-generating-camera-rays/standard-coordinate-systems Inputs: directions: (H, W, 3) precomputed ray directions in camera coordinate c2w: (3, 4) transformation matrix from camera coordinate to world coordinate Outputs: rays_o: (H*W, 3), the origin of the rays in world coordinate rays_d: (H*W, 3), the normalized direction of the rays in world coordinate |
156,771 | from models.mvs.mvs_utils import read_pfm
import os
import numpy as np
import cv2
from PIL import Image
import torch
from torchvision import transforms as T
import torchvision.transforms.functional as F
from kornia import create_meshgrid
import time
import json
from torch.utils.data import Dataset, DataLoader
import torch
import os
from PIL import Image
import h5py
from data.base_dataset import BaseDataset
import configparser
from os.path import join
import cv2
from .data_utils import get_dtu_raydir
The provided code snippet includes necessary dependencies for implementing the `get_ray_directions` function. Write a Python function `def get_ray_directions(H, W, focal, center=None)` to solve the following problem:
Get ray directions for all pixels in camera coordinate. Reference: https://www.scratchapixel.com/lessons/3d-basic-rendering/ ray-tracing-generating-camera-rays/standard-coordinate-systems Inputs: H, W, focal: image height, width and focal length Outputs: directions: (H, W, 3), the direction of the rays in camera coordinate
Here is the function:
def get_ray_directions(H, W, focal, center=None):
"""
Get ray directions for all pixels in camera coordinate.
Reference: https://www.scratchapixel.com/lessons/3d-basic-rendering/
ray-tracing-generating-camera-rays/standard-coordinate-systems
Inputs:
H, W, focal: image height, width and focal length
Outputs:
directions: (H, W, 3), the direction of the rays in camera coordinate
"""
grid = create_meshgrid(H, W, normalized_coordinates=False)[0]
i, j = grid.unbind(-1)
# the direction here is without +0.5 pixel centering as calibration is not so accurate
# see https://github.com/bmild/nerf/issues/24
cent = center if center is not None else [W / 2, H / 2]
directions = torch.stack([(i - cent[0]) / focal[0], (j - cent[1]) / focal[1], torch.ones_like(i)],
-1) # (H, W, 3)
return directions | Get ray directions for all pixels in camera coordinate. Reference: https://www.scratchapixel.com/lessons/3d-basic-rendering/ ray-tracing-generating-camera-rays/standard-coordinate-systems Inputs: H, W, focal: image height, width and focal length Outputs: directions: (H, W, 3), the direction of the rays in camera coordinate |
156,772 | import os
import numpy as np
import imageio
import json
import torch
import pickle, random
def load_blender_cloud(point_path, point_num):
point_norms = None
with open(point_path, 'rb') as f:
print("point_file_path ################", point_path)
all_infos = pickle.load(f)
point_xyz = all_infos["point_xyz"]
if "point_face_normal" in all_infos:
point_norms = all_infos["point_face_normal"]
print("surface point cloud ",len(point_xyz), "mean pos:", np.mean(point_xyz, axis=0), "min pos:",np.min(point_xyz, axis=0), "mean max:",np.max(point_xyz, axis=0))
if point_num < len(point_xyz):
inds = np.asarray(random.choices(range(len(point_xyz)), k=point_num))
point_norms = point_norms[inds, :] if point_norms is not None else None
return point_xyz[inds, :], point_norms
else:
return point_xyz, point_norms | null |
156,773 | from models.mvs.mvs_utils import read_pfm
import os
import numpy as np
import cv2
from PIL import Image
import torch
from torchvision import transforms as T
import torchvision.transforms.functional as F
from torch.utils.data import Dataset, DataLoader
import torch
import os
from PIL import Image
import h5py
from data.base_dataset import BaseDataset
import configparser
from os.path import join
import cv2
from .data_utils import get_dtu_raydir
def colorjitter(img, factor):
# brightness_factor,contrast_factor,saturation_factor,hue_factor
# img = F.adjust_brightness(img, factor[0])
# img = F.adjust_contrast(img, factor[1])
img = F.adjust_saturation(img, factor[2])
img = F.adjust_hue(img, factor[3]-1.0)
return img | null |
156,774 | from models.mvs.mvs_utils import read_pfm
import os
import numpy as np
import cv2
import torch
from torchvision import transforms as T
import torchvision.transforms.functional as F
from kornia import create_meshgrid
import time
import json
from tqdm import tqdm
from torch.utils.data import Dataset, DataLoader
import torch
import os
from PIL import Image
import h5py
import models.mvs.mvs_utils as mvs_utils
from data.base_dataset import BaseDataset
import configparser
from os.path import join
import cv2
from .data_utils import get_dtu_raydir
from plyfile import PlyData, PlyElement
def colorjitter(img, factor):
# brightness_factor,contrast_factor,saturation_factor,hue_factor
# img = F.adjust_brightness(img, factor[0])
# img = F.adjust_contrast(img, factor[1])
img = F.adjust_saturation(img, factor[2])
img = F.adjust_hue(img, factor[3]-1.0)
return img | null |
156,775 | from models.mvs.mvs_utils import read_pfm
import os
import numpy as np
import cv2
import torch
from torchvision import transforms as T
import torchvision.transforms.functional as F
from kornia import create_meshgrid
import time
import json
from tqdm import tqdm
from torch.utils.data import Dataset, DataLoader
import torch
import os
from PIL import Image
import h5py
import models.mvs.mvs_utils as mvs_utils
from data.base_dataset import BaseDataset
import configparser
from os.path import join
import cv2
from .data_utils import get_dtu_raydir
from plyfile import PlyData, PlyElement
The provided code snippet includes necessary dependencies for implementing the `get_rays` function. Write a Python function `def get_rays(directions, c2w)` to solve the following problem:
Get ray origin and normalized directions in world coordinate for all pixels in one image. Reference: https://www.scratchapixel.com/lessons/3d-basic-rendering/ ray-tracing-generating-camera-rays/standard-coordinate-systems Inputs: directions: (H, W, 3) precomputed ray directions in camera coordinate c2w: (3, 4) transformation matrix from camera coordinate to world coordinate Outputs: rays_o: (H*W, 3), the origin of the rays in world coordinate rays_d: (H*W, 3), the normalized direction of the rays in world coordinate
Here is the function:
def get_rays(directions, c2w):
"""
Get ray origin and normalized directions in world coordinate for all pixels in one image.
Reference: https://www.scratchapixel.com/lessons/3d-basic-rendering/
ray-tracing-generating-camera-rays/standard-coordinate-systems
Inputs:
directions: (H, W, 3) precomputed ray directions in camera coordinate
c2w: (3, 4) transformation matrix from camera coordinate to world coordinate
Outputs:
rays_o: (H*W, 3), the origin of the rays in world coordinate
rays_d: (H*W, 3), the normalized direction of the rays in world coordinate
"""
# Rotate ray directions from camera coordinate to the world coordinate
c2w = torch.FloatTensor(c2w)
rays_d = directions @ c2w[:3, :3].T # (H, W, 3)
# rays_d = rays_d / torch.norm(rays_d, dim=-1, keepdim=True)
# The origin of all rays is the camera origin in world coordinate
rays_o = c2w[:3, 3].expand(rays_d.shape) # (H, W, 3)
rays_d = rays_d.view(-1, 3)
rays_o = rays_o.view(-1, 3)
return rays_o, rays_d | Get ray origin and normalized directions in world coordinate for all pixels in one image. Reference: https://www.scratchapixel.com/lessons/3d-basic-rendering/ ray-tracing-generating-camera-rays/standard-coordinate-systems Inputs: directions: (H, W, 3) precomputed ray directions in camera coordinate c2w: (3, 4) transformation matrix from camera coordinate to world coordinate Outputs: rays_o: (H*W, 3), the origin of the rays in world coordinate rays_d: (H*W, 3), the normalized direction of the rays in world coordinate |
156,776 | from models.mvs.mvs_utils import read_pfm
import os
import numpy as np
import cv2
import torch
from torchvision import transforms as T
import torchvision.transforms.functional as F
from kornia import create_meshgrid
import time
import json
from tqdm import tqdm
from torch.utils.data import Dataset, DataLoader
import torch
import os
from PIL import Image
import h5py
import models.mvs.mvs_utils as mvs_utils
from data.base_dataset import BaseDataset
import configparser
from os.path import join
import cv2
from .data_utils import get_dtu_raydir
from plyfile import PlyData, PlyElement
The provided code snippet includes necessary dependencies for implementing the `get_ray_directions` function. Write a Python function `def get_ray_directions(H, W, focal, center=None)` to solve the following problem:
Get ray directions for all pixels in camera coordinate. Reference: https://www.scratchapixel.com/lessons/3d-basic-rendering/ ray-tracing-generating-camera-rays/standard-coordinate-systems Inputs: H, W, focal: image height, width and focal length Outputs: directions: (H, W, 3), the direction of the rays in camera coordinate
Here is the function:
def get_ray_directions(H, W, focal, center=None):
"""
Get ray directions for all pixels in camera coordinate.
Reference: https://www.scratchapixel.com/lessons/3d-basic-rendering/
ray-tracing-generating-camera-rays/standard-coordinate-systems
Inputs:
H, W, focal: image height, width and focal length
Outputs:
directions: (H, W, 3), the direction of the rays in camera coordinate
"""
grid = create_meshgrid(H, W, normalized_coordinates=False)[0]
i, j = grid.unbind(-1)
# the direction here is without +0.5 pixel centering as calibration is not so accurate
# see https://github.com/bmild/nerf/issues/24
cent = center if center is not None else [W / 2, H / 2]
directions = torch.stack([(i - cent[0]) / focal[0], (j - cent[1]) / focal[1], torch.ones_like(i)],
-1) # (H, W, 3)
return directions | Get ray directions for all pixels in camera coordinate. Reference: https://www.scratchapixel.com/lessons/3d-basic-rendering/ ray-tracing-generating-camera-rays/standard-coordinate-systems Inputs: H, W, focal: image height, width and focal length Outputs: directions: (H, W, 3), the direction of the rays in camera coordinate |
156,777 | from models.mvs.mvs_utils import read_pfm
import os
import numpy as np
import cv2
from PIL import Image
import torch
from torchvision import transforms as T
import torchvision.transforms.functional as F
from kornia import create_meshgrid
import time
import itertools
import random
from torch.utils.data import Dataset, DataLoader
import torch
import os
from PIL import Image
import h5py
from . import data_utils
from utils import util
from data.base_dataset import BaseDataset
import configparser
from os.path import join
import cv2
from .data_utils import get_dtu_raydir
def colorjitter(img, factor):
# brightness_factor,contrast_factor,saturation_factor,hue_factor
# img = F.adjust_brightness(img, factor[0])
# img = F.adjust_contrast(img, factor[1])
img = F.adjust_saturation(img, factor[2])
img = F.adjust_hue(img, factor[3]-1.0)
return img | null |
156,778 | from models.mvs.mvs_utils import read_pfm
import os
import numpy as np
import cv2
from PIL import Image
import torch
from torchvision import transforms as T
import torchvision.transforms.functional as F
from kornia import create_meshgrid
import time
import itertools
import random
from torch.utils.data import Dataset, DataLoader
import torch
import os
from PIL import Image
import h5py
from . import data_utils
from utils import util
from data.base_dataset import BaseDataset
import configparser
from os.path import join
import cv2
from .data_utils import get_dtu_raydir
The provided code snippet includes necessary dependencies for implementing the `get_rays` function. Write a Python function `def get_rays(directions, c2w)` to solve the following problem:
Get ray origin and normalized directions in world coordinate for all pixels in one image. Reference: https://www.scratchapixel.com/lessons/3d-basic-rendering/ ray-tracing-generating-camera-rays/standard-coordinate-systems Inputs: directions: (H, W, 3) precomputed ray directions in camera coordinate c2w: (3, 4) transformation matrix from camera coordinate to world coordinate Outputs: rays_o: (H*W, 3), the origin of the rays in world coordinate rays_d: (H*W, 3), the normalized direction of the rays in world coordinate
Here is the function:
def get_rays(directions, c2w):
"""
Get ray origin and normalized directions in world coordinate for all pixels in one image.
Reference: https://www.scratchapixel.com/lessons/3d-basic-rendering/
ray-tracing-generating-camera-rays/standard-coordinate-systems
Inputs:
directions: (H, W, 3) precomputed ray directions in camera coordinate
c2w: (3, 4) transformation matrix from camera coordinate to world coordinate
Outputs:
rays_o: (H*W, 3), the origin of the rays in world coordinate
rays_d: (H*W, 3), the normalized direction of the rays in world coordinate
"""
# Rotate ray directions from camera coordinate to the world coordinate
c2w = torch.FloatTensor(c2w)
rays_d = directions @ c2w[:3, :3].T # (H, W, 3)
# rays_d = rays_d / torch.norm(rays_d, dim=-1, keepdim=True)
# The origin of all rays is the camera origin in world coordinate
rays_o = c2w[:3, 3].expand(rays_d.shape) # (H, W, 3)
rays_d = rays_d.view(-1, 3)
rays_o = rays_o.view(-1, 3)
return rays_o, rays_d | Get ray origin and normalized directions in world coordinate for all pixels in one image. Reference: https://www.scratchapixel.com/lessons/3d-basic-rendering/ ray-tracing-generating-camera-rays/standard-coordinate-systems Inputs: directions: (H, W, 3) precomputed ray directions in camera coordinate c2w: (3, 4) transformation matrix from camera coordinate to world coordinate Outputs: rays_o: (H*W, 3), the origin of the rays in world coordinate rays_d: (H*W, 3), the normalized direction of the rays in world coordinate |
156,779 | from models.mvs.mvs_utils import read_pfm
import os
import numpy as np
import cv2
from PIL import Image
import torch
from torchvision import transforms as T
import torchvision.transforms.functional as F
from kornia import create_meshgrid
import time
import itertools
import random
from torch.utils.data import Dataset, DataLoader
import torch
import os
from PIL import Image
import h5py
from . import data_utils
from utils import util
from data.base_dataset import BaseDataset
import configparser
from os.path import join
import cv2
from .data_utils import get_dtu_raydir
The provided code snippet includes necessary dependencies for implementing the `get_ray_directions` function. Write a Python function `def get_ray_directions(H, W, focal, center=None)` to solve the following problem:
Get ray directions for all pixels in camera coordinate. Reference: https://www.scratchapixel.com/lessons/3d-basic-rendering/ ray-tracing-generating-camera-rays/standard-coordinate-systems Inputs: H, W, focal: image height, width and focal length Outputs: directions: (H, W, 3), the direction of the rays in camera coordinate
Here is the function:
def get_ray_directions(H, W, focal, center=None):
"""
Get ray directions for all pixels in camera coordinate.
Reference: https://www.scratchapixel.com/lessons/3d-basic-rendering/
ray-tracing-generating-camera-rays/standard-coordinate-systems
Inputs:
H, W, focal: image height, width and focal length
Outputs:
directions: (H, W, 3), the direction of the rays in camera coordinate
"""
grid = create_meshgrid(H, W, normalized_coordinates=False)[0]
i, j = grid.unbind(-1)
# the direction here is without +0.5 pixel centering as calibration is not so accurate
# see https://github.com/bmild/nerf/issues/24
cent = center if center is not None else [W / 2, H / 2]
directions = torch.stack([(i - cent[0]) / focal[0], (j - cent[1]) / focal[1], torch.ones_like(i)],
-1) # (H, W, 3)
return directions | Get ray directions for all pixels in camera coordinate. Reference: https://www.scratchapixel.com/lessons/3d-basic-rendering/ ray-tracing-generating-camera-rays/standard-coordinate-systems Inputs: H, W, focal: image height, width and focal length Outputs: directions: (H, W, 3), the direction of the rays in camera coordinate |
156,780 | from models.mvs.mvs_utils import read_pfm
import os
import numpy as np
import cv2
from PIL import Image
import torch
from torchvision import transforms as T
import torchvision.transforms.functional as F
from kornia import create_meshgrid
import time
import json
from . import data_utils
import glob
from torch.utils.data import Dataset, DataLoader
import torch
import os
from PIL import Image
import h5py
from data.base_dataset import BaseDataset
import configparser
import itertools
from os.path import join
import cv2
from .data_utils import get_dtu_raydir
import copy
def colorjitter(img, factor):
# brightness_factor,contrast_factor,saturation_factor,hue_factor
# img = F.adjust_brightness(img, factor[0])
# img = F.adjust_contrast(img, factor[1])
img = F.adjust_saturation(img, factor[2])
img = F.adjust_hue(img, factor[3]-1.0)
return img | null |
156,781 | from models.mvs.mvs_utils import read_pfm
import os
import numpy as np
import cv2
from PIL import Image
import torch
from torchvision import transforms as T
import torchvision.transforms.functional as F
from kornia import create_meshgrid
import time
import json
from . import data_utils
import glob
from torch.utils.data import Dataset, DataLoader
import torch
import os
from PIL import Image
import h5py
from data.base_dataset import BaseDataset
import configparser
import itertools
from os.path import join
import cv2
from .data_utils import get_dtu_raydir
import copy
def unique_lst(list1):
x = np.array(list1)
return np.unique(x) | null |
156,782 | from models.mvs.mvs_utils import read_pfm
import os
import numpy as np
import cv2
from PIL import Image
import torch
from torchvision import transforms as T
import torchvision.transforms.functional as F
from kornia import create_meshgrid
import time
import json
from . import data_utils
import glob
from torch.utils.data import Dataset, DataLoader
import torch
import os
from PIL import Image
import h5py
from data.base_dataset import BaseDataset
import configparser
import itertools
from os.path import join
import cv2
from .data_utils import get_dtu_raydir
import copy
def normalize(v):
"""Normalize a vector."""
return v / np.linalg.norm(v)
The provided code snippet includes necessary dependencies for implementing the `average_poses` function. Write a Python function `def average_poses(poses)` to solve the following problem:
Calculate the average pose, which is then used to center all poses using @center_poses. Its computation is as follows: 1. Compute the center: the average of pose centers. 2. Compute the z axis: the normalized average z axis. 3. Compute axis y': the average y axis. 4. Compute x' = y' cross product z, then normalize it as the x axis. 5. Compute the y axis: z cross product x. Note that at step 3, we cannot directly use y' as y axis since it's not necessarily orthogonal to z axis. We need to pass from x to y. Inputs: poses: (N_images, 3, 4) Outputs: pose_avg: (3, 4) the average pose
Here is the function:
def average_poses(poses):
"""
Calculate the average pose, which is then used to center all poses
using @center_poses. Its computation is as follows:
1. Compute the center: the average of pose centers.
2. Compute the z axis: the normalized average z axis.
3. Compute axis y': the average y axis.
4. Compute x' = y' cross product z, then normalize it as the x axis.
5. Compute the y axis: z cross product x.
Note that at step 3, we cannot directly use y' as y axis since it's
not necessarily orthogonal to z axis. We need to pass from x to y.
Inputs:
poses: (N_images, 3, 4)
Outputs:
pose_avg: (3, 4) the average pose
"""
# 1. Compute the center
center = poses[..., 3].mean(0) # (3)
# 2. Compute the z axis
z = normalize(poses[..., 2].mean(0)) # (3)
# 3. Compute axis y' (no need to normalize as it's not the final output)
y_ = poses[..., 1].mean(0) # (3)
# 4. Compute the x axis
x = normalize(np.cross(y_, z)) # (3)
# 5. Compute the y axis (as z and x are normalized, y is already of norm 1)
y = np.cross(z, x) # (3)
pose_avg = np.stack([x, y, z, center], 1) # (3, 4)
return pose_avg | Calculate the average pose, which is then used to center all poses using @center_poses. Its computation is as follows: 1. Compute the center: the average of pose centers. 2. Compute the z axis: the normalized average z axis. 3. Compute axis y': the average y axis. 4. Compute x' = y' cross product z, then normalize it as the x axis. 5. Compute the y axis: z cross product x. Note that at step 3, we cannot directly use y' as y axis since it's not necessarily orthogonal to z axis. We need to pass from x to y. Inputs: poses: (N_images, 3, 4) Outputs: pose_avg: (3, 4) the average pose |
156,783 | from models.mvs.mvs_utils import read_pfm
import os
import numpy as np
import cv2
from PIL import Image
import torch
from torchvision import transforms as T
import torchvision.transforms.functional as F
from kornia import create_meshgrid
import time
import json
from . import data_utils
import glob
from torch.utils.data import Dataset, DataLoader
import torch
import os
from PIL import Image
import h5py
from data.base_dataset import BaseDataset
import configparser
import itertools
from os.path import join
import cv2
from .data_utils import get_dtu_raydir
import copy
The provided code snippet includes necessary dependencies for implementing the `get_rays` function. Write a Python function `def get_rays(directions, c2w)` to solve the following problem:
Get ray origin and normalized directions in world coordinate for all pixels in one image. Reference: https://www.scratchapixel.com/lessons/3d-basic-rendering/ ray-tracing-generating-camera-rays/standard-coordinate-systems Inputs: directions: (H, W, 3) precomputed ray directions in camera coordinate c2w: (3, 4) transformation matrix from camera coordinate to world coordinate Outputs: rays_o: (H*W, 3), the origin of the rays in world coordinate rays_d: (H*W, 3), the normalized direction of the rays in world coordinate
Here is the function:
def get_rays(directions, c2w):
"""
Get ray origin and normalized directions in world coordinate for all pixels in one image.
Reference: https://www.scratchapixel.com/lessons/3d-basic-rendering/
ray-tracing-generating-camera-rays/standard-coordinate-systems
Inputs:
directions: (H, W, 3) precomputed ray directions in camera coordinate
c2w: (3, 4) transformation matrix from camera coordinate to world coordinate
Outputs:
rays_o: (H*W, 3), the origin of the rays in world coordinate
rays_d: (H*W, 3), the normalized direction of the rays in world coordinate
"""
# Rotate ray directions from camera coordinate to the world coordinate
c2w = torch.FloatTensor(c2w)
rays_d = directions @ c2w[:3, :3].T # (H, W, 3)
# rays_d = rays_d / torch.norm(rays_d, dim=-1, keepdim=True)
# The origin of all rays is the camera origin in world coordinate
rays_o = c2w[:3, 3].expand(rays_d.shape) # (H, W, 3)
rays_d = rays_d.view(-1, 3)
rays_o = rays_o.view(-1, 3)
return rays_o, rays_d | Get ray origin and normalized directions in world coordinate for all pixels in one image. Reference: https://www.scratchapixel.com/lessons/3d-basic-rendering/ ray-tracing-generating-camera-rays/standard-coordinate-systems Inputs: directions: (H, W, 3) precomputed ray directions in camera coordinate c2w: (3, 4) transformation matrix from camera coordinate to world coordinate Outputs: rays_o: (H*W, 3), the origin of the rays in world coordinate rays_d: (H*W, 3), the normalized direction of the rays in world coordinate |
156,784 | from models.mvs.mvs_utils import read_pfm
import os
import numpy as np
import cv2
from PIL import Image
import torch
from torchvision import transforms as T
import torchvision.transforms.functional as F
from kornia import create_meshgrid
import time
import json
from . import data_utils
import glob
from torch.utils.data import Dataset, DataLoader
import torch
import os
from PIL import Image
import h5py
from data.base_dataset import BaseDataset
import configparser
import itertools
from os.path import join
import cv2
from .data_utils import get_dtu_raydir
import copy
The provided code snippet includes necessary dependencies for implementing the `get_ray_directions` function. Write a Python function `def get_ray_directions(H, W, focal, center=None)` to solve the following problem:
Get ray directions for all pixels in camera coordinate. Reference: https://www.scratchapixel.com/lessons/3d-basic-rendering/ ray-tracing-generating-camera-rays/standard-coordinate-systems Inputs: H, W, focal: image height, width and focal length Outputs: directions: (H, W, 3), the direction of the rays in camera coordinate
Here is the function:
def get_ray_directions(H, W, focal, center=None):
"""
Get ray directions for all pixels in camera coordinate.
Reference: https://www.scratchapixel.com/lessons/3d-basic-rendering/
ray-tracing-generating-camera-rays/standard-coordinate-systems
Inputs:
H, W, focal: image height, width and focal length
Outputs:
directions: (H, W, 3), the direction of the rays in camera coordinate
"""
grid = create_meshgrid(H, W, normalized_coordinates=False)[0]
i, j = grid.unbind(-1)
# the direction here is without +0.5 pixel centering as calibration is not so accurate
# see https://github.com/bmild/nerf/issues/24
cent = center if center is not None else [W / 2, H / 2]
directions = torch.stack([(i - cent[0]) / focal[0], (j - cent[1]) / focal[1], torch.ones_like(i)],
-1) # (H, W, 3)
return directions | Get ray directions for all pixels in camera coordinate. Reference: https://www.scratchapixel.com/lessons/3d-basic-rendering/ ray-tracing-generating-camera-rays/standard-coordinate-systems Inputs: H, W, focal: image height, width and focal length Outputs: directions: (H, W, 3), the direction of the rays in camera coordinate |
156,785 | from models.mvs.mvs_utils import read_pfm
import os
import numpy as np
import cv2
from PIL import Image
import torch
from torchvision import transforms as T
import torchvision.transforms.functional as F
from kornia import create_meshgrid
import time
import json
from . import data_utils
import glob
from torch.utils.data import Dataset, DataLoader
import torch
import os
from PIL import Image
import h5py
from data.base_dataset import BaseDataset
import configparser
import itertools
from os.path import join
import cv2
from .data_utils import get_dtu_raydir
import copy
def flip_z(poses):
z_flip_matrix = np.eye(4, dtype=np.float32)
z_flip_matrix[2, 2] = -1.0
return np.matmul(poses, z_flip_matrix[None,...]) | null |
156,786 | import sys
import os
import pathlib
import copy
import torch
import numpy as np
import time
from options import TestOptions
from data import create_data_loader, create_dataset
from models import create_model
from utils.visualizer import Visualizer
from utils import format as fmt
from tqdm import trange
def render_vid(model, dataset, visualizer, opt, total_steps):
print(
'-----------------------------------Rendering Vid-----------------------------------'
)
model.eval()
render_num = len(dataset.render_poses)
patch_size = opt.random_sample_size
chunk_size = patch_size * patch_size
height = dataset.height
width = dataset.width
visual_lst = []
for i in range(render_num):
data = dataset.get_dummyrot_item(i)
raydir = data['raydir'].clone()
pixel_idx = data['pixel_idx'].view(data['pixel_idx'].shape[0], -1, data['pixel_idx'].shape[3]).clone()
visuals = None
starttime=time.time()
for k in range(0, height * width, chunk_size):
start = k
end = min([k + chunk_size, height * width])
data['raydir'] = raydir[:, start:end, :]
data["pixel_idx"] = pixel_idx[:, start:end, :]
# data['gt_image'] = gt_image[:, start:end, :]
# data['gt_mask'] = gt_mask[:, start:end, :]
model.set_input(data)
model.test()
curr_visuals = model.get_current_visuals()
if visuals is None:
visuals = {}
for key, value in curr_visuals.items():
if value is None or value.shape[-1] != 3 or not key.endswith("color"):
continue
chunk = value.cpu().numpy()
visuals[key] = np.zeros((height * width, 3)).astype(chunk.dtype)
visuals[key][start:end, :] = chunk
else:
for key, value in curr_visuals.items():
if value is None or value.shape[-1] != 3 or not key.endswith("color"):
continue
visuals[key][start:end, :] = value.cpu().numpy()
for key, value in visuals.items():
visuals[key] = visuals[key].reshape(height, width, 3)
visual_lst.append(visuals)
print("render time:", time.time() - starttime)
visualizer.display_video(visual_lst, total_steps)
model.train()
print(
'--------------------------------Finish Rendering--------------------------------'
)
return | null |
156,787 | import sys
import os
import pathlib
import copy
import torch
import numpy as np
import time
from options import TrainOptions
from data import create_data_loader, create_dataset
from models import create_model
from utils.visualizer import Visualizer
from utils import format as fmt
from run.evaluate import report_metrics
from render_vid import render_vid
torch.manual_seed(0)
np.random.seed(0)
def mse2psnr(x): return -10.* torch.log(x)/np.log(10.) | null |
156,788 | import sys
import os
import pathlib
import copy
import torch
import numpy as np
import time
from options import TrainOptions
from data import create_data_loader, create_dataset
from models import create_model
from utils.visualizer import Visualizer
from utils import format as fmt
from run.evaluate import report_metrics
from render_vid import render_vid
def get_latest_epoch(resume_dir):
os.makedirs(resume_dir, exist_ok=True)
str_epoch = [file.split("_")[0] for file in os.listdir(resume_dir) if file.endswith("_states.pth")]
int_epoch = [int(i) for i in str_epoch]
return None if len(int_epoch) == 0 else str_epoch[int_epoch.index(max(int_epoch))] | null |
156,789 | import sys
import os
import pathlib
import glob
import copy
import torch
import numpy as np
import time
from options import TrainOptions
from data import create_data_loader, create_dataset
from models import create_model
from models.mvs.mvs_points_model import MvsPointsModel
from models.mvs import mvs_utils, filter_utils
from pprint import pprint
from utils.visualizer import Visualizer
from utils import format as fmt
from run.evaluate import report_metrics
torch.manual_seed(0)
np.random.seed(0)
import random
import cv2
from PIL import Image
from tqdm import tqdm
import gc
def mse2psnr(x): return -10.* torch.log(x)/np.log(10.) | null |
156,790 | import sys
import os
import pathlib
import glob
import copy
import torch
import numpy as np
import time
from options import TrainOptions
from data import create_data_loader, create_dataset
from models import create_model
from models.mvs.mvs_points_model import MvsPointsModel
from models.mvs import mvs_utils, filter_utils
from pprint import pprint
from utils.visualizer import Visualizer
from utils import format as fmt
from run.evaluate import report_metrics
np.random.seed(0)
import random
import cv2
from PIL import Image
from tqdm import tqdm
import gc
def save_image(img_array, filepath):
assert len(img_array.shape) == 2 or (len(img_array.shape) == 3
and img_array.shape[2] in [3, 4])
if img_array.dtype != np.uint8:
img_array = (np.clip(img_array, 0, 1) * 255).astype(np.uint8)
os.makedirs(os.path.dirname(filepath), exist_ok=True)
Image.fromarray(img_array).save(filepath) | null |
156,791 | import sys
import os
import pathlib
import glob
import copy
import torch
import numpy as np
import time
from options import TrainOptions
from data import create_data_loader, create_dataset
from models import create_model
from models.mvs.mvs_points_model import MvsPointsModel
from models.mvs import mvs_utils, filter_utils
from pprint import pprint
from utils.visualizer import Visualizer
from utils import format as fmt
from run.evaluate import report_metrics
torch.manual_seed(0)
import random
import cv2
from PIL import Image
from tqdm import tqdm
import gc
def nearest_view(campos, raydir, xyz, id_list):
cam_ind = torch.zeros([0,1], device=campos.device, dtype=torch.long)
step=10000
for i in range(0, len(xyz), step):
dists = xyz[i:min(len(xyz),i+step), None, :] - campos[None, ...] # N, M, 3
dists_norm = torch.norm(dists, dim=-1) # N, M
dists_dir = dists / (dists_norm[...,None]+1e-6) # N, M, 3
dists = dists_norm / 200 + (1.1 - torch.sum(dists_dir * raydir[None, :],dim=-1)) # N, M
cam_ind = torch.cat([cam_ind, torch.argmin(dists, dim=1).view(-1,1)], dim=0) # N, 1
return cam_ind | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.