id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
154,844 | from colorama import Fore, Back, Style
import textwrap
def print_logo():
print(Fore.GREEN + """
▄████▄ ██░ ██ ██▓ ███▄ █ ▓█████ ██████ ▓█████
▒██▀ ▀█ ▓██░ ██▒▓██▒ ██ ▀█ █ ▓█ ▀ ▒██ ▒ ▓█ ▀
▒▓█ ▄ ▒██▀▀██░▒██▒▓██ ▀█ ██▒▒███ ░ ▓██▄ ▒███
▒▓▓▄ ▄██▒░▓█ ░██ ░██░▓██▒ ▐▌██▒▒▓█ ▄ ▒ ██▒▒▓█ ▄
▒ ▓███▀ ░░▓█▒░██▓░██░▒██░ ▓██░░▒████▒▒██████▒▒░▒████▒
░ ░▒ ▒ ░ ▒ ░░▒░▒░▓ ░ ▒░ ▒ ▒ ░░ ▒░ ░▒ ▒▓▒ ▒ ░░░ ▒░ ░
░ ▒ ▒ ░▒░ ░ ▒ ░░ ░░ ░ ▒░ ░ ░ ░░ ░▒ ░ ░ ░ ░ ░
░ ░ ░░ ░ ▒ ░ ▄▄▄ ░ ░ ██▓░ ░ ░ ░ ░
░ ░ ░ ░ ░ ░ ▒████▄ ░▓██▒░ ░ ░ ░ ░
░ ▒██ ▀█▄ ▒██▒
░██▄▄▄▄██ ░██░
▓█ ▓██▒░██░
▒▒ ▓▒█░░▓
▒ ▒▒ ░ ▒ ░
░ ▒ ▒ ░
▓█████▄ █ ██ ███▄ █ ▄████░▓█████ ▒█████ ███▄ █
▒██▀ ██▌ ██ ▓██▒ ██ ▀█ █ ██▒ ▀█▒▓█ ▀ ▒██▒ ██▒ ██ ▀█ █
░██ █▌▓██ ▒██░▓██ ▀█ ██▒▒██░▄▄▄░▒███ ▒██░ ██▒▓██ ▀█ ██▒
░▓█▄ ▌▓▓█ ░██░▓██▒ ▐▌██▒░▓█ ██▓▒▓█ ▄ ▒██ ██░▓██▒ ▐▌██▒
░▒████▓ ▒▒█████▓ ▒██░ ▓██░░▒▓███▀▒░▒████▒░ ████▓▒░▒██░ ▓██░
▒▒▓ ▒ ░▒▓▒ ▒ ▒ ░ ▒░ ▒ ▒ ░▒ ▒ ░░ ▒░ ░░ ▒░▒░▒░ ░ ▒░ ▒ ▒
░ ▒ ▒ ░░▒░ ░ ░ ░ ░░ ░ ▒░ ░ ░ ░ ░ ░ ░ ▒ ▒░ ░ ░░ ░ ▒░
░ ░ ░ ░░░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ▒ ░ ░ ░
░ ░ ░ ░ ░ ░ ░ ░ ░
""")
print(Style.BRIGHT + " 欢迎使用中文AI地牢,一个基于ChatGPT的中文文字冒险游戏")
print(Style.RESET_ALL) | null |
154,845 | import copy
from tkinter import *
from tkinter.ttk import Progressbar
import tkinter.messagebox
import tkinter.simpledialog
from typing import Callable
from story_rewrite import StoryTeller
from config import config
import threading
from concurrent import futures
def thread_it(func, *args):
# 创建
t = threading.Thread(target=func, args=args)
# 守护进程
t.daemon = True
t.setDaemon(True)
# 启动
t.start()
# t.join() | null |
154,846 | import copy
from tkinter import *
from tkinter.ttk import Progressbar
import tkinter.messagebox
import tkinter.simpledialog
from typing import Callable
from story_rewrite import StoryTeller
from config import config
import threading
from concurrent import futures
The provided code snippet includes necessary dependencies for implementing the `format_form` function. Write a Python function `def format_form(form, width, height)` to solve the following problem:
设置居中显示
Here is the function:
def format_form(form, width, height):
"""设置居中显示"""
# 得到屏幕宽度
win_width = form.winfo_screenwidth()
# 得到屏幕高度
win_height = form.winfo_screenheight()
# 计算偏移量
width_adjust = (win_width - width) / 2
height_adjust = (win_height - height) / 2
form.geometry("%dx%d+%d+%d" % (width, height, width_adjust, height_adjust)) | 设置居中显示 |
154,847 | import argparse
import os
from getpass import getpass
import fire
from dotenv import load_dotenv
from .simpleaichat import AIChat
ARGS = parser.parse_args()
class AIChat(BaseModel):
client: Any
default_session: Optional[ChatSession]
sessions: Dict[Union[str, UUID], ChatSession] = {}
def __init__(
self,
character: str = None,
character_command: str = None,
system: str = None,
id: Union[str, UUID] = uuid4(),
prime: bool = True,
default_session: bool = True,
console: bool = True,
**kwargs,
):
client = Client(proxies=os.getenv("https_proxy"))
system_format = self.build_system(character, character_command, system)
sessions = {}
new_default_session = None
if default_session:
new_session = self.new_session(
return_session=True, system=system_format, id=id, **kwargs
)
new_default_session = new_session
sessions = {new_session.id: new_session}
super().__init__(
client=client, default_session=new_default_session, sessions=sessions
)
if not system and console:
character = "ChatGPT" if not character else character
new_default_session.title = character
self.interactive_console(character=character, prime=prime)
def new_session(
self,
return_session: bool = False,
**kwargs,
) -> Optional[ChatGPTSession]:
if "model" not in kwargs: # set default
kwargs["model"] = "gpt-3.5-turbo"
# TODO: Add support for more models (PaLM, Claude)
if "gpt-" in kwargs["model"]:
gpt_api_key = kwargs.get("api_key") or os.getenv("OPENAI_API_KEY")
assert gpt_api_key, f"An API key for {kwargs['model'] } was not defined."
sess = ChatGPTSession(
auth={
"api_key": gpt_api_key,
},
**kwargs,
)
if return_session:
return sess
else:
self.sessions[sess.id] = sess
def get_session(self, id: Union[str, UUID] = None) -> ChatSession:
try:
sess = self.sessions[id] if id else self.default_session
except KeyError:
raise KeyError("No session by that key exists.")
if not sess:
raise ValueError("No default session exists.")
return sess
def reset_session(self, id: Union[str, UUID] = None) -> None:
sess = self.get_session(id)
sess.messages = []
def delete_session(self, id: Union[str, UUID] = None) -> None:
sess = self.get_session(id)
if self.default_session:
if sess.id == self.default_session.id:
self.default_session = None
del self.sessions[sess.id]
del sess
def session(self, **kwargs):
sess = self.new_session(return_session=True, **kwargs)
self.sessions[sess.id] = sess
try:
yield sess
finally:
self.delete_session(sess.id)
def __call__(
self,
prompt: Union[str, Any],
id: Union[str, UUID] = None,
system: str = None,
save_messages: bool = None,
params: Dict[str, Any] = None,
tools: List[Any] = None,
input_schema: Any = None,
output_schema: Any = None,
) -> str:
sess = self.get_session(id)
if tools:
assert (input_schema is None) and (
output_schema is None
), "When using tools, input/output schema are ignored"
for tool in tools:
assert tool.__doc__, f"Tool {tool} does not have a docstring."
assert len(tools) <= 9, "You can only have a maximum of 9 tools."
return sess.gen_with_tools(
prompt,
tools,
client=self.client,
system=system,
save_messages=save_messages,
params=params,
)
else:
return sess.gen(
prompt,
client=self.client,
system=system,
save_messages=save_messages,
params=params,
input_schema=input_schema,
output_schema=output_schema,
)
def stream(
self,
prompt: str,
id: Union[str, UUID] = None,
system: str = None,
save_messages: bool = None,
params: Dict[str, Any] = None,
input_schema: Any = None,
) -> str:
sess = self.get_session(id)
return sess.stream(
prompt,
client=self.client,
system=system,
save_messages=save_messages,
params=params,
input_schema=input_schema,
)
def build_system(
self, character: str = None, character_command: str = None, system: str = None
) -> str:
default = "You are a helpful assistant."
if character:
character_prompt = """
You must follow ALL these rules in all responses:
- You are the following character and should ALWAYS act as them: {0}
- NEVER speak in a formal tone.
- Concisely introduce yourself first in character.
"""
prompt = character_prompt.format(wikipedia_search_lookup(character)).strip()
if character_command:
character_system = """
- {0}
"""
prompt = (
prompt + "\n" + character_system.format(character_command).strip()
)
return prompt
elif system:
return system
else:
return default
def interactive_console(self, character: str = None, prime: bool = True) -> None:
console = Console(highlight=False, force_jupyter=False)
sess = self.default_session
ai_text_color = "bright_magenta"
# prime with a unique starting response to the user
if prime:
console.print(f"[b]{character}[/b]: ", end="", style=ai_text_color)
for chunk in sess.stream("Hello!", self.client):
console.print(chunk["delta"], end="", style=ai_text_color)
while True:
console.print()
try:
user_input = console.input("[b]You:[/b] ").strip()
if not user_input:
break
console.print(f"[b]{character}[/b]: ", end="", style=ai_text_color)
for chunk in sess.stream(user_input, self.client):
console.print(chunk["delta"], end="", style=ai_text_color)
except KeyboardInterrupt:
break
def __str__(self) -> str:
if self.default_session:
return self.default_session.model_dump_json(
exclude={"api_key", "api_url"},
exclude_none=True,
)
def __repr__(self) -> str:
return ""
# Save/Load Chats given a session id
def save_session(
self,
output_path: str = None,
id: Union[str, UUID] = None,
format: str = "csv",
minify: bool = False,
):
sess = self.get_session(id)
sess_dict = sess.model_dump(
exclude={"auth", "api_url", "input_fields"},
exclude_none=True,
)
output_path = output_path or f"chat_session.{format}"
if format == "csv":
with open(output_path, "w", encoding="utf-8") as f:
fields = [
"role",
"content",
"received_at",
"prompt_length",
"completion_length",
"total_length",
"finish_reason",
]
w = csv.DictWriter(f, fieldnames=fields)
w.writeheader()
for message in sess_dict["messages"]:
# datetime must be in common format to be loaded into spreadsheet
# for human-readability, the timezone is set to local machine
local_datetime = message["received_at"].astimezone()
message["received_at"] = local_datetime.strftime(
"%Y-%m-%d %H:%M:%S"
)
w.writerow(message)
elif format == "json":
with open(output_path, "wb") as f:
f.write(
orjson.dumps(
sess_dict, option=orjson.OPT_INDENT_2 if not minify else None
)
)
def load_session(self, input_path: str, id: Union[str, UUID] = uuid4(), **kwargs):
assert input_path.endswith(".csv") or input_path.endswith(
".json"
), "Only CSV and JSON imports are accepted."
if input_path.endswith(".csv"):
with open(input_path, "r", encoding="utf-8") as f:
r = csv.DictReader(f)
messages = []
for row in r:
# need to convert the datetime back to UTC
local_datetime = datetime.datetime.strptime(
row["received_at"], "%Y-%m-%d %H:%M:%S"
).replace(tzinfo=dateutil.tz.tzlocal())
row["received_at"] = local_datetime.astimezone(
datetime.timezone.utc
)
# https://stackoverflow.com/a/68305271
row = {k: (None if v == "" else v) for k, v in row.items()}
messages.append(ChatMessage(**row))
self.new_session(id=id, **kwargs)
self.sessions[id].messages = messages
if input_path.endswith(".json"):
with open(input_path, "rb") as f:
sess_dict = orjson.loads(f.read())
# update session with info not loaded, e.g. auth/api_url
for arg in kwargs:
sess_dict[arg] = kwargs[arg]
self.new_session(**sess_dict)
# Tabulators for returning total token counts
def message_totals(self, attr: str, id: Union[str, UUID] = None) -> int:
sess = self.get_session(id)
return getattr(sess, attr)
def total_prompt_length(self, id: Union[str, UUID] = None) -> int:
return self.message_totals("total_prompt_length", id)
def total_completion_length(self, id: Union[str, UUID] = None) -> int:
return self.message_totals("total_completion_length", id)
def total_length(self, id: Union[str, UUID] = None) -> int:
return self.message_totals("total_length", id)
# alias total_tokens to total_length for common use
def total_tokens(self, id: Union[str, UUID] = None) -> int:
return self.total_length(id)
def interactive_chat():
gpt_api_key = os.getenv("OPENAI_API_KEY")
if not gpt_api_key:
gpt_api_key = getpass("Input your OpenAI key here: ")
assert gpt_api_key, "An API key was not defined."
_ = AIChat(ARGS.character, ARGS.character_command, ARGS.prime) | null |
154,848 | import os
from typing import List, Union
import httpx
from pydantic import Field
def wikipedia_search(query: str, n: int = 1) -> Union[str, List[str]]:
SEARCH_PARAMS = {
"action": "query",
"list": "search",
"format": "json",
"srlimit": n,
"srsearch": query,
"srwhat": "text",
"srprop": "",
}
r_search = httpx.get(WIKIPEDIA_API_URL, params=SEARCH_PARAMS)
results = [x["title"] for x in r_search.json()["query"]["search"]]
return results[0] if n == 1 else results
def wikipedia_lookup(query: str, sentences: int = 1) -> str:
LOOKUP_PARAMS = {
"action": "query",
"prop": "extracts",
"exsentences": sentences,
"exlimit": "1",
"explaintext": "1",
"formatversion": "2",
"format": "json",
"titles": query,
}
r_lookup = httpx.get(WIKIPEDIA_API_URL, params=LOOKUP_PARAMS)
return r_lookup.json()["query"]["pages"][0]["extract"]
def wikipedia_search_lookup(query: str, sentences: int = 1) -> str:
return wikipedia_lookup(wikipedia_search(query, 1), sentences) | null |
154,849 | import os
from typing import List, Union
import httpx
from pydantic import Field
async def wikipedia_search_async(query: str, n: int = 1) -> Union[str, List[str]]:
SEARCH_PARAMS = {
"action": "query",
"list": "search",
"format": "json",
"srlimit": n,
"srsearch": query,
"srwhat": "text",
"srprop": "",
}
async with httpx.AsyncClient(proxies=os.getenv("https_proxy")) as client:
r_search = await client.get(WIKIPEDIA_API_URL, params=SEARCH_PARAMS)
results = [x["title"] for x in r_search.json()["query"]["search"]]
return results[0] if n == 1 else results
async def wikipedia_lookup_async(query: str, sentences: int = 1) -> str:
LOOKUP_PARAMS = {
"action": "query",
"prop": "extracts",
"exsentences": sentences,
"exlimit": "1",
"explaintext": "1",
"formatversion": "2",
"format": "json",
"titles": query,
}
async with httpx.AsyncClient(proxies=os.getenv("https_proxy")) as client:
r_lookup = await client.get(WIKIPEDIA_API_URL, params=LOOKUP_PARAMS)
return r_lookup.json()["query"]["pages"][0]["extract"]
async def wikipedia_search_lookup_async(query: str, sentences: int = 1) -> str:
return await wikipedia_lookup_async(
await wikipedia_search_async(query, 1), sentences
) | null |
154,850 | import os
from typing import List, Union
import httpx
from pydantic import Field
def fd(description: str, **kwargs):
return Field(description=description, **kwargs) | null |
154,851 | import os
from typing import List, Union
import httpx
from pydantic import Field
def remove_a_key(d, remove_key):
if isinstance(d, dict):
for key in list(d.keys()):
if key == remove_key:
del d[key]
else:
remove_a_key(d[key], remove_key) | null |
154,852 | import datetime
from typing import Any, Dict, List, Optional, Set, Union
from uuid import UUID, uuid4
import orjson
from pydantic import BaseModel, Field, HttpUrl, SecretStr
def orjson_dumps(v, *, default, **kwargs):
# orjson.dumps returns bytes, to match standard json.dumps we need to decode
return orjson.dumps(v, default=default, **kwargs).decode() | null |
154,853 | import datetime
from typing import Any, Dict, List, Optional, Set, Union
from uuid import UUID, uuid4
import orjson
from pydantic import BaseModel, Field, HttpUrl, SecretStr
def now_tz():
# Need datetime w/ timezone for cleanliness
# https://stackoverflow.com/a/24666683
return datetime.datetime.now(datetime.timezone.utc) | null |
154,854 | import argparse
import asyncio
import sys
from io import TextIOWrapper
from typing import Any, TextIO, Union
from edge_tts import Communicate, SubMaker, list_voices
async def _print_voices(*, proxy: str) -> None:
"""Print all available voices."""
voices = await list_voices(proxy=proxy)
voices = sorted(voices, key=lambda voice: voice["ShortName"])
for idx, voice in enumerate(voices):
if idx != 0:
print()
for key in voice.keys():
if key in (
"SuggestedCodec",
"FriendlyName",
"Status",
"VoiceTag",
"Name",
"Locale",
):
continue
pretty_key_name = key if key != "ShortName" else "Name"
print(f"{pretty_key_name}: {voice[key]}")
async def _run_tts(args: Any) -> None:
"""Run TTS after parsing arguments from command line."""
try:
if sys.stdin.isatty() and sys.stdout.isatty() and not args.write_media:
print(
"Warning: TTS output will be written to the terminal. "
"Use --write-media to write to a file.\n"
"Press Ctrl+C to cancel the operation. "
"Press Enter to continue.",
file=sys.stderr,
)
input()
except KeyboardInterrupt:
print("\nOperation canceled.", file=sys.stderr)
return
tts: Communicate = Communicate(
args.text,
args.voice,
proxy=args.proxy,
rate=args.rate,
volume=args.volume,
pitch=args.pitch,
)
subs: SubMaker = SubMaker()
with (
open(args.write_media, "wb") if args.write_media else sys.stdout.buffer
) as audio_file:
async for chunk in tts.stream():
if chunk["type"] == "audio":
audio_file.write(chunk["data"])
elif chunk["type"] == "WordBoundary":
subs.create_sub((chunk["offset"], chunk["duration"]), chunk["text"])
sub_file: Union[TextIOWrapper, TextIO] = (
open(args.write_subtitles, "w", encoding="utf-8")
if args.write_subtitles
else sys.stderr
)
with sub_file:
sub_file.write(subs.generate_subs(args.words_in_cue))
async def list_voices(*, proxy: Optional[str] = None) -> Any:
"""
List all available voices and their attributes.
This pulls data from the URL used by Microsoft Edge to return a list of
all available voices.
Returns:
dict: A dictionary of voice attributes.
"""
ssl_ctx = ssl.create_default_context(cafile=certifi.where())
async with aiohttp.ClientSession(trust_env=True) as session:
async with session.get(
VOICE_LIST,
headers={
"Authority": "speech.platform.bing.com",
"Sec-CH-UA": '" Not;A Brand";v="99", "Microsoft Edge";v="91", "Chromium";v="91"',
"Sec-CH-UA-Mobile": "?0",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 "
"(KHTML, like Gecko) Chrome/91.0.4472.77 Safari/537.36 Edg/91.0.864.41",
"Accept": "*/*",
"Sec-Fetch-Site": "none",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Dest": "empty",
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "en-US,en;q=0.9",
},
proxy=proxy,
ssl=ssl_ctx,
) as url:
data = json.loads(await url.text())
return data
The provided code snippet includes necessary dependencies for implementing the `amain` function. Write a Python function `async def amain() -> None` to solve the following problem:
Async main function
Here is the function:
async def amain() -> None:
"""Async main function"""
parser = argparse.ArgumentParser(description="Microsoft Edge TTS")
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("-t", "--text", help="what TTS will say")
group.add_argument("-f", "--file", help="same as --text but read from file")
parser.add_argument(
"-v",
"--voice",
help="voice for TTS. Default: en-US-AriaNeural",
default="en-US-AriaNeural",
)
group.add_argument(
"-l",
"--list-voices",
help="lists available voices and exits",
action="store_true",
)
parser.add_argument("--rate", help="set TTS rate. Default +0%%.", default="+0%")
parser.add_argument("--volume", help="set TTS volume. Default +0%%.", default="+0%")
parser.add_argument("--pitch", help="set TTS pitch. Default +0Hz.", default="+0Hz")
parser.add_argument(
"--words-in-cue",
help="number of words in a subtitle cue. Default: 10.",
default=10,
type=float,
)
parser.add_argument(
"--write-media", help="send media output to file instead of stdout"
)
parser.add_argument(
"--write-subtitles",
help="send subtitle output to provided file instead of stderr",
)
parser.add_argument("--proxy", help="use a proxy for TTS and voice list.")
args = parser.parse_args()
if args.list_voices:
await _print_voices(proxy=args.proxy)
sys.exit(0)
if args.file is not None:
# we need to use sys.stdin.read() because some devices
# like Windows and Termux don't have a /dev/stdin.
if args.file == "/dev/stdin":
args.text = sys.stdin.read()
else:
with open(args.file, "r", encoding="utf-8") as file:
args.text = file.read()
if args.text is not None:
await _run_tts(args) | Async main function |
154,855 | import json
import re
import ssl
import time
import uuid
from contextlib import nullcontext
from io import TextIOWrapper
from typing import (
Any,
AsyncGenerator,
ContextManager,
Dict,
Generator,
List,
Optional,
Tuple,
Union,
)
from xml.sax.saxutils import escape
import aiohttp
import certifi
from edge_tts.exceptions import (
NoAudioReceived,
UnexpectedResponse,
UnknownResponse,
WebSocketError,
)
from .constants import WSS_URL
The provided code snippet includes necessary dependencies for implementing the `get_headers_and_data` function. Write a Python function `def get_headers_and_data(data: Union[str, bytes]) -> Tuple[Dict[bytes, bytes], bytes]` to solve the following problem:
Returns the headers and data from the given data. Args: data (str or bytes): The data to be parsed. Returns: tuple: The headers and data to be used in the request.
Here is the function:
def get_headers_and_data(data: Union[str, bytes]) -> Tuple[Dict[bytes, bytes], bytes]:
"""
Returns the headers and data from the given data.
Args:
data (str or bytes): The data to be parsed.
Returns:
tuple: The headers and data to be used in the request.
"""
if isinstance(data, str):
data = data.encode("utf-8")
if not isinstance(data, bytes):
raise TypeError("data must be str or bytes")
headers = {}
for line in data[: data.find(b"\r\n\r\n")].split(b"\r\n"):
key, value = line.split(b":", 1)
headers[key] = value
return headers, data[data.find(b"\r\n\r\n") + 4 :] | Returns the headers and data from the given data. Args: data (str or bytes): The data to be parsed. Returns: tuple: The headers and data to be used in the request. |
154,856 | import json
import re
import ssl
import time
import uuid
from contextlib import nullcontext
from io import TextIOWrapper
from typing import (
Any,
AsyncGenerator,
ContextManager,
Dict,
Generator,
List,
Optional,
Tuple,
Union,
)
from xml.sax.saxutils import escape
import aiohttp
import certifi
from edge_tts.exceptions import (
NoAudioReceived,
UnexpectedResponse,
UnknownResponse,
WebSocketError,
)
from .constants import WSS_URL
The provided code snippet includes necessary dependencies for implementing the `remove_incompatible_characters` function. Write a Python function `def remove_incompatible_characters(string: Union[str, bytes]) -> str` to solve the following problem:
The service does not support a couple character ranges. Most important being the vertical tab character which is commonly present in OCR-ed PDFs. Not doing this will result in an error from the service. Args: string (str or bytes): The string to be cleaned. Returns: str: The cleaned string.
Here is the function:
def remove_incompatible_characters(string: Union[str, bytes]) -> str:
"""
The service does not support a couple character ranges.
Most important being the vertical tab character which is
commonly present in OCR-ed PDFs. Not doing this will
result in an error from the service.
Args:
string (str or bytes): The string to be cleaned.
Returns:
str: The cleaned string.
"""
if isinstance(string, bytes):
string = string.decode("utf-8")
if not isinstance(string, str):
raise TypeError("string must be str or bytes")
chars: List[str] = list(string)
for idx, char in enumerate(chars):
code: int = ord(char)
if (0 <= code <= 8) or (11 <= code <= 12) or (14 <= code <= 31):
chars[idx] = " "
return "".join(chars) | The service does not support a couple character ranges. Most important being the vertical tab character which is commonly present in OCR-ed PDFs. Not doing this will result in an error from the service. Args: string (str or bytes): The string to be cleaned. Returns: str: The cleaned string. |
154,857 | import json
import re
import ssl
import time
import uuid
from contextlib import nullcontext
from io import TextIOWrapper
from typing import (
Any,
AsyncGenerator,
ContextManager,
Dict,
Generator,
List,
Optional,
Tuple,
Union,
)
from xml.sax.saxutils import escape
import aiohttp
import certifi
from edge_tts.exceptions import (
NoAudioReceived,
UnexpectedResponse,
UnknownResponse,
WebSocketError,
)
from .constants import WSS_URL
The provided code snippet includes necessary dependencies for implementing the `split_text_by_byte_length` function. Write a Python function `def split_text_by_byte_length( text: Union[str, bytes], byte_length: int ) -> Generator[bytes, None, None]` to solve the following problem:
Splits a string into a list of strings of a given byte length while attempting to keep words together. This function assumes text will be inside of an XML tag. Args: text (str or bytes): The string to be split. byte_length (int): The maximum byte length of each string in the list. Yield: bytes: The next string in the list.
Here is the function:
def split_text_by_byte_length(
text: Union[str, bytes], byte_length: int
) -> Generator[bytes, None, None]:
"""
Splits a string into a list of strings of a given byte length
while attempting to keep words together. This function assumes
text will be inside of an XML tag.
Args:
text (str or bytes): The string to be split.
byte_length (int): The maximum byte length of each string in the list.
Yield:
bytes: The next string in the list.
"""
if isinstance(text, str):
text = text.encode("utf-8")
if not isinstance(text, bytes):
raise TypeError("text must be str or bytes")
if byte_length <= 0:
raise ValueError("byte_length must be greater than 0")
while len(text) > byte_length:
# Find the last space in the string
split_at = text.rfind(b" ", 0, byte_length)
# If no space found, split_at is byte_length
split_at = split_at if split_at != -1 else byte_length
# Verify all & are terminated with a ;
while b"&" in text[:split_at]:
ampersand_index = text.rindex(b"&", 0, split_at)
if text.find(b";", ampersand_index, split_at) != -1:
break
split_at = ampersand_index - 1
if split_at < 0:
raise ValueError("Maximum byte length is too small or invalid text")
if split_at == 0:
break
# Append the string to the list
new_text = text[:split_at].strip()
if new_text:
yield new_text
if split_at == 0:
split_at = 1
text = text[split_at:]
new_text = text.strip()
if new_text:
yield new_text | Splits a string into a list of strings of a given byte length while attempting to keep words together. This function assumes text will be inside of an XML tag. Args: text (str or bytes): The string to be split. byte_length (int): The maximum byte length of each string in the list. Yield: bytes: The next string in the list. |
154,858 | import json
import re
import ssl
import time
import uuid
from contextlib import nullcontext
from io import TextIOWrapper
from typing import (
Any,
AsyncGenerator,
ContextManager,
Dict,
Generator,
List,
Optional,
Tuple,
Union,
)
from xml.sax.saxutils import escape
import aiohttp
import certifi
from edge_tts.exceptions import (
NoAudioReceived,
UnexpectedResponse,
UnknownResponse,
WebSocketError,
)
from .constants import WSS_URL
def connect_id() -> str:
"""
Returns a UUID without dashes.
Returns:
str: A UUID without dashes.
"""
return str(uuid.uuid4()).replace("-", "")
def mkssml(
text: Union[str, bytes], voice: str, rate: str, volume: str, pitch: str
) -> str:
"""
Creates a SSML string from the given parameters.
Returns:
str: The SSML string.
"""
if isinstance(text, bytes):
text = text.decode("utf-8")
ssml = (
"<speak version='1.0' xmlns='http://www.w3.org/2001/10/synthesis' xml:lang='en-US'>"
f"<voice name='{voice}'><prosody pitch='{pitch}' rate='{rate}' volume='{volume}'>"
f"{text}</prosody></voice></speak>"
)
return ssml
def date_to_string() -> str:
"""
Return Javascript-style date string.
Returns:
str: Javascript-style date string.
"""
# %Z is not what we want, but it's the only way to get the timezone
# without having to use a library. We'll just use UTC and hope for the best.
# For example, right now %Z would return EEST when we need it to return
# Eastern European Summer Time.
return time.strftime(
"%a %b %d %Y %H:%M:%S GMT+0000 (Coordinated Universal Time)", time.gmtime()
)
def ssml_headers_plus_data(request_id: str, timestamp: str, ssml: str) -> str:
"""
Returns the headers and data to be used in the request.
Returns:
str: The headers and data to be used in the request.
"""
return (
f"X-RequestId:{request_id}\r\n"
"Content-Type:application/ssml+xml\r\n"
f"X-Timestamp:{timestamp}Z\r\n" # This is not a mistake, Microsoft Edge bug.
"Path:ssml\r\n\r\n"
f"{ssml}"
)
The provided code snippet includes necessary dependencies for implementing the `calc_max_mesg_size` function. Write a Python function `def calc_max_mesg_size(voice: str, rate: str, volume: str, pitch: str) -> int` to solve the following problem:
Calculates the maximum message size for the given voice, rate, and volume. Returns: int: The maximum message size.
Here is the function:
def calc_max_mesg_size(voice: str, rate: str, volume: str, pitch: str) -> int:
"""Calculates the maximum message size for the given voice, rate, and volume.
Returns:
int: The maximum message size.
"""
websocket_max_size: int = 2**16
overhead_per_message: int = (
len(
ssml_headers_plus_data(
connect_id(),
date_to_string(),
mkssml("", voice, rate, volume, pitch),
)
)
+ 50 # margin of error
)
return websocket_max_size - overhead_per_message | Calculates the maximum message size for the given voice, rate, and volume. Returns: int: The maximum message size. |
154,859 | import math
from typing import List, Tuple
from xml.sax.saxutils import escape, unescape
def mktimestamp(time_unit: float) -> str:
"""
mktimestamp returns the timecode of the subtitle.
The timecode is in the format of 00:00:00.000.
Returns:
str: The timecode of the subtitle.
"""
hour = math.floor(time_unit / 10**7 / 3600)
minute = math.floor((time_unit / 10**7 / 60) % 60)
seconds = (time_unit / 10**7) % 60
return f"{hour:02d}:{minute:02d}:{seconds:06.3f}"
The provided code snippet includes necessary dependencies for implementing the `formatter` function. Write a Python function `def formatter(start_time: float, end_time: float, subdata: str) -> str` to solve the following problem:
formatter returns the timecode and the text of the subtitle.
Here is the function:
def formatter(start_time: float, end_time: float, subdata: str) -> str:
"""
formatter returns the timecode and the text of the subtitle.
"""
return (
f"{mktimestamp(start_time)} --> {mktimestamp(end_time)}\r\n"
f"{escape(subdata)}\r\n\r\n"
) | formatter returns the timecode and the text of the subtitle. |
154,860 | import os
import subprocess
import sys
import tempfile
from shutil import which
def pr_err(msg: str) -> None:
def _main() -> None:
depcheck_failed = False
for dep in ("edge-tts", "mpv"):
if not which(dep):
pr_err(f"{dep} is not installed.")
depcheck_failed = True
if depcheck_failed:
pr_err("Please install the missing dependencies.")
sys.exit(1)
keep = os.environ.get("EDGE_PLAYBACK_KEEP_TEMP") is not None
mp3_fname = os.environ.get("EDGE_PLAYBACK_MP3_FILE")
vtt_fname = os.environ.get("EDGE_PLAYBACK_VTT_FILE")
media, subtitle = None, None
try:
if not mp3_fname:
media = tempfile.NamedTemporaryFile(suffix=".mp3", delete=False)
media.close()
mp3_fname = media.name
if not vtt_fname:
subtitle = tempfile.NamedTemporaryFile(suffix=".vtt", delete=False)
subtitle.close()
vtt_fname = subtitle.name
print(f"Media file: {mp3_fname}")
print(f"Subtitle file: {vtt_fname}\n")
with subprocess.Popen(
[
"edge-tts",
f"--write-media={mp3_fname}",
f"--write-subtitles={vtt_fname}",
]
+ sys.argv[1:]
) as process:
process.communicate()
with subprocess.Popen(
[
"mpv",
f"--sub-file={vtt_fname}",
mp3_fname,
]
) as process:
process.communicate()
finally:
if keep:
print(f"\nKeeping temporary files: {mp3_fname} and {vtt_fname}")
else:
if mp3_fname is not None and os.path.exists(mp3_fname):
os.unlink(mp3_fname)
if vtt_fname is not None and os.path.exists(vtt_fname):
os.unlink(vtt_fname) | null |
154,861 | import asyncio
import edge_tts
TEXT = "Hello World!"
VOICE = "en-GB-SoniaNeural"
OUTPUT_FILE = "test.mp3"
The provided code snippet includes necessary dependencies for implementing the `amain` function. Write a Python function `async def amain() -> None` to solve the following problem:
Main function
Here is the function:
async def amain() -> None:
"""Main function"""
communicate = edge_tts.Communicate(TEXT, VOICE)
await communicate.save(OUTPUT_FILE) | Main function |
154,862 | import asyncio
import edge_tts
TEXT = "Hello World!"
VOICE = "en-GB-SoniaNeural"
OUTPUT_FILE = "test.mp3"
The provided code snippet includes necessary dependencies for implementing the `amain` function. Write a Python function `async def amain() -> None` to solve the following problem:
Main function
Here is the function:
async def amain() -> None:
"""Main function"""
communicate = edge_tts.Communicate(TEXT, VOICE)
with open(OUTPUT_FILE, "wb") as file:
async for chunk in communicate.stream():
if chunk["type"] == "audio":
file.write(chunk["data"])
elif chunk["type"] == "WordBoundary":
print(f"WordBoundary: {chunk}") | Main function |
154,863 | import asyncio
import random
import edge_tts
from edge_tts import VoicesManager
TEXT = "Hoy es un buen día."
OUTPUT_FILE = "spanish.mp3"
The provided code snippet includes necessary dependencies for implementing the `amain` function. Write a Python function `async def amain() -> None` to solve the following problem:
Main function
Here is the function:
async def amain() -> None:
"""Main function"""
voices = await VoicesManager.create()
voice = voices.find(Gender="Male", Language="es")
# Also supports Locales
# voice = voices.find(Gender="Female", Locale="es-AR")
communicate = edge_tts.Communicate(TEXT, random.choice(voice)["Name"])
await communicate.save(OUTPUT_FILE) | Main function |
154,864 | import asyncio
import edge_tts
TEXT = "Hello World!"
VOICE = "en-GB-SoniaNeural"
OUTPUT_FILE = "test.mp3"
WEBVTT_FILE = "test.vtt"
The provided code snippet includes necessary dependencies for implementing the `amain` function. Write a Python function `async def amain() -> None` to solve the following problem:
Main function
Here is the function:
async def amain() -> None:
"""Main function"""
communicate = edge_tts.Communicate(TEXT, VOICE)
submaker = edge_tts.SubMaker()
with open(OUTPUT_FILE, "wb") as file:
async for chunk in communicate.stream():
if chunk["type"] == "audio":
file.write(chunk["data"])
elif chunk["type"] == "WordBoundary":
submaker.create_sub((chunk["offset"], chunk["duration"]), chunk["text"])
with open(WEBVTT_FILE, "w", encoding="utf-8") as file:
file.write(submaker.generate_subs()) | Main function |
154,865 | import argparse
from keyboard import keyboard
import mediapipe as mp
import cv2
from scipy.spatial import distance as dist
from math import atan, atan2, pi, degrees
from datetime import datetime
STRAIGHT_LIMB_MARGIN = 20
EXTENDED_LIMB_MARGIN = .8
def get_angle(a, b, c):
ang = degrees(atan2(c['y']-b['y'], c['x']-b['x']) - atan2(a['y']-b['y'], a['x']-b['x']))
return ang + 360 if ang < 0 else ang
def is_missing(part):
return any(joint['visibility'] < VISIBILITY_THRESHOLD for joint in part)
def is_limb_pointing(upper, mid, lower):
if is_missing([upper, mid, lower]):
return False
limb_angle = get_angle(upper, mid, lower)
is_in_line = abs(180 - limb_angle) < STRAIGHT_LIMB_MARGIN
if is_in_line:
upper_length = dist.euclidean([upper['x'], upper['y']], [mid['x'], mid['y']])
lower_length = dist.euclidean([lower['x'], lower['y']], [mid['x'], mid['y']])
is_extended = lower_length > EXTENDED_LIMB_MARGIN * upper_length
return is_extended
return False | null |
154,866 | import argparse
from keyboard import keyboard
import mediapipe as mp
import cv2
from scipy.spatial import distance as dist
from math import atan, atan2, pi, degrees
from datetime import datetime
def get_limb_direction(arm, closest_degrees=45):
# should also use atan2 but I don't want to do more math
dy = arm[2]['y'] - arm[0]['y'] # wrist -> shoulder
dx = arm[2]['x'] - arm[0]['x']
angle = degrees(atan(dy/dx))
if (dx < 0):
angle += 180
# collapse to nearest closest_degrees; 45 for semaphore
mod_close = angle % closest_degrees
angle -= mod_close
if mod_close > closest_degrees/2:
angle += closest_degrees
angle = int(angle)
if angle == 270:
angle = -90
return angle | null |
154,867 | import argparse
from keyboard import keyboard
import mediapipe as mp
import cv2
from scipy.spatial import distance as dist
from math import atan, atan2, pi, degrees
from datetime import datetime
ARM_CROSSED_RATIO = 2
def is_arm_crossed(elbow, wrist, max_dist):
return dist.euclidean([elbow['x'], elbow['y']], [wrist['x'], wrist['y']]) < max_dist
def is_arms_crossed(elbowL, wristL, elbowR, wristR, mouth_width):
max_dist = mouth_width * ARM_CROSSED_RATIO
return is_arm_crossed(elbowL, wristR, max_dist) and is_arm_crossed(elbowR, wristL, max_dist) | null |
154,868 | import argparse
from keyboard import keyboard
import mediapipe as mp
import cv2
from scipy.spatial import distance as dist
from math import atan, atan2, pi, degrees
from datetime import datetime
LEG_LIFT_MIN = -30
def is_missing(part):
return any(joint['visibility'] < VISIBILITY_THRESHOLD for joint in part)
def is_leg_lifted(leg):
if is_missing(leg):
return False
dy = leg[1]['y'] - leg[0]['y'] # knee -> hip
dx = leg[1]['x'] - leg[0]['x']
angle = degrees(atan2(dy, dx))
return angle > LEG_LIFT_MIN | null |
154,869 | import argparse
from keyboard import keyboard
import mediapipe as mp
import cv2
from scipy.spatial import distance as dist
from math import atan, atan2, pi, degrees
from datetime import datetime
JUMP_THRESHOLD = .0001
HALF_HISTORY = int(FRAME_HISTORY/2)
last_frames = FRAME_HISTORY*[empty_frame.copy()]
def is_missing(part):
return any(joint['visibility'] < VISIBILITY_THRESHOLD for joint in part)
def is_jumping(hipL, hipR):
global last_frames
if is_missing([hipL, hipR]):
return False
last_frames[-1]['hipL_y'] = hipL['y']
last_frames[-1]['hipR_y'] = hipR['y']
if (hipL['y'] > last_frames[-2]['hipL_y'] + JUMP_THRESHOLD) and (
hipR['y'] > last_frames[-2]['hipR_y'] + JUMP_THRESHOLD):
last_frames[-1]['hips_dy'] = 1 # rising
elif (hipL['y'] < last_frames[-2]['hipL_y'] - JUMP_THRESHOLD) and (
hipR['y'] < last_frames[-2]['hipR_y'] - JUMP_THRESHOLD):
last_frames[-1]['hips_dy'] = -1 # falling
else:
last_frames[-1]['hips_dy'] = 0 # not significant dy
# consistently rising first half, lowering second half
jump_up = all(frame['hips_dy'] == 1 for frame in last_frames[:HALF_HISTORY])
get_down = all(frame['hips_dy'] == -1 for frame in last_frames[HALF_HISTORY:])
return jump_up and get_down | null |
154,870 | import argparse
from keyboard import keyboard
import mediapipe as mp
import cv2
from scipy.spatial import distance as dist
from math import atan, atan2, pi, degrees
from datetime import datetime
MOUTH_COVER_THRESHOLD = .03
def is_missing(part):
return any(joint['visibility'] < VISIBILITY_THRESHOLD for joint in part)
def is_mouth_covered(mouth, palms):
if is_missing(palms):
return False
dxL = (mouth[0]['x'] - palms[0]['x'])
dyL = (mouth[0]['y'] - palms[0]['y'])
dxR = (mouth[1]['x'] - palms[1]['x'])
dyR = (mouth[1]['y'] - palms[1]['y'])
return all(abs(d) < MOUTH_COVER_THRESHOLD for d in [dxL, dyL, dxR, dyR]) | null |
154,871 | import argparse
from keyboard import keyboard
import mediapipe as mp
import cv2
from scipy.spatial import distance as dist
from math import atan, atan2, pi, degrees
from datetime import datetime
SQUAT_THRESHOLD = .1
def is_missing(part):
return any(joint['visibility'] < VISIBILITY_THRESHOLD for joint in part)
def is_squatting(hipL, kneeL, hipR, kneeR):
if is_missing([hipL, kneeL, hipR, kneeR]):
return False
dyL = abs(hipL['y'] - kneeL['y'])
dyR = abs(hipR['y'] - kneeR['y'])
return (dyL < SQUAT_THRESHOLD) and (dyR < SQUAT_THRESHOLD) | null |
154,872 | import argparse
from keyboard import keyboard
import mediapipe as mp
import cv2
from scipy.spatial import distance as dist
from math import atan, atan2, pi, degrees
from datetime import datetime
def is_finger_out(finger, palmL, palmR, min_finger_reach):
dL_finger = dist.euclidean([finger['x'], finger['y']], [palmL['x'], palmL['y']])
dR_finger = dist.euclidean([finger['x'], finger['y']], [palmR['x'], palmR['y']])
d_finger = min(dL_finger, dR_finger)
return d_finger > min_finger_reach
def is_hand_open(thumb, forefinger, pinky, palmL, palmR, min_finger_reach):
thumb_out = is_finger_out(thumb, palmL, palmR, min_finger_reach)
forefinger_out = is_finger_out(forefinger, palmL, palmR, min_finger_reach)
pinky_out = is_finger_out(pinky, palmL, palmR, min_finger_reach)
return thumb_out and forefinger_out and pinky_out | null |
154,873 | import argparse
from keyboard import keyboard
import mediapipe as mp
import cv2
from scipy.spatial import distance as dist
from math import atan, atan2, pi, degrees
from datetime import datetime
SEMAPHORES = {
(-90, -45): {'a': "a", 'n': "1"},
(-90, 0): {'a': "b", 'n': "2"},
(-90, 45): {'a': "c", 'n': "3"},
(-90, 90): {'a': "d", 'n': "4"},
(135, -90): {'a': "e", 'n': "5"},
(180, -90): {'a': "f", 'n': "6"},
(225, -90): {'a': "g", 'n': "7"},
(-45, 0): {'a': "h", 'n': "8"},
(-45, 45): {'a': "i", 'n': "9"},
(180, 90): {'a': "j", 'n': "capslock"},
(90, -45): {'a': "k", 'n': "0"},
(135, -45): {'a': "l", 'n': "\\"},
(180, -45): {'a': "m", 'n': "["},
(225, -45): {'a': "n", 'n': "]"},
(0, 45): {'a': "o", 'n': ","},
(90, 0): {'a': "p", 'n': ";"},
(135, 0): {'a': "q", 'n': "="},
(180, 0): {'a': "r", 'n': "-"},
(225, 0): {'a': "s", 'n': "."},
(90, 45): {'a': "t", 'n': "`"},
(135, 45): {'a': "u", 'n': "/"},
(225, 90): {'a': "v", 'n': '"'},
(135, 180): {'a': "w"},
(135, 225): {'a': "x", 'n': ""}, # clear last signal
(180, 45): {'a': "y"},
(180, 225): {'a': "z"},
(90, 90): {'a': "space", 'n': "enter"},
(135, 90): {'a': "tab"}, # custom "numerals" replacement
(225, 45): {'a': "escape"}, # custom "cancel" replacement
}
current_semaphore = ''
def type_and_remember(image=None, shift_on=False, command_on=False, control_on=False,
display_only=True, allow_repeat=False):
global current_semaphore, last_keys
if len(current_semaphore) == 0:
return
keys = []
if shift_on:
keys.append('shift')
if command_on:
keys.append('command')
if control_on:
keys.append('control')
keys.append(current_semaphore)
if allow_repeat or (keys != last_keys):
last_keys = keys.copy()
current_semaphore = ''
output(keys, image, display_only)
def type_semaphore(armL_angle, armR_angle, image, shift_on, numerals, command_on, control_on,
display_only, allow_repeat):
global current_semaphore
arm_match = SEMAPHORES.get((armL_angle, armR_angle), '')
if arm_match:
current_semaphore = arm_match.get('n', '') if numerals else arm_match.get('a', '')
type_and_remember(image, shift_on, command_on, control_on, display_only, allow_repeat)
return current_semaphore
return False | null |
154,874 | import argparse
from keyboard import keyboard
import mediapipe as mp
import cv2
from scipy.spatial import distance as dist
from math import atan, atan2, pi, degrees
from datetime import datetime
def render_and_maybe_exit(image, recording):
cv2.imshow('Semaphore', image)
if recording:
recording.write(image)
return cv2.waitKey(5) & 0xFF == 27 | null |
154,875 | import torch
class AlphaLossNV2(torch.nn.Module):
"""
Implement Neural Volumes alpha loss 2
"""
def __init__(self, lambda_alpha, clamp_alpha, init_epoch, force_opaque=False):
super().__init__()
self.lambda_alpha = lambda_alpha
self.clamp_alpha = clamp_alpha
self.init_epoch = init_epoch
self.force_opaque = force_opaque
if force_opaque:
self.bceloss = torch.nn.BCELoss()
self.register_buffer(
"epoch", torch.tensor(0, dtype=torch.long), persistent=True
)
def sched_step(self, num=1):
self.epoch += num
def forward(self, alpha_fine):
if self.lambda_alpha > 0.0 and self.epoch.item() >= self.init_epoch:
alpha_fine = torch.clamp(alpha_fine, 0.01, 0.99)
if self.force_opaque:
alpha_loss = self.lambda_alpha * self.bceloss(
alpha_fine, torch.ones_like(alpha_fine)
)
else:
alpha_loss = torch.log(alpha_fine) + torch.log(1.0 - alpha_fine)
alpha_loss = torch.clamp_min(alpha_loss, -self.clamp_alpha)
alpha_loss = self.lambda_alpha * alpha_loss.mean()
else:
alpha_loss = torch.zeros(1, device=alpha_fine.device)
return alpha_loss
def get_alpha_loss(conf):
lambda_alpha = conf.get_float("lambda_alpha")
clamp_alpha = conf.get_float("clamp_alpha")
init_epoch = conf.get_int("init_epoch")
force_opaque = conf.get_bool("force_opaque", False)
return AlphaLossNV2(
lambda_alpha, clamp_alpha, init_epoch, force_opaque=force_opaque
) | null |
154,876 | import torch
class RGBWithUncertainty(torch.nn.Module):
"""Implement the uncertainty loss from Kendall '17"""
def __init__(self, conf):
super().__init__()
self.element_loss = (
torch.nn.L1Loss(reduction="none")
if conf.get_bool("use_l1")
else torch.nn.MSELoss(reduction="none")
)
def forward(self, outputs, targets, betas):
"""computes the error per output, weights each element by the log variance
outputs is B x 3, targets is B x 3, betas is B"""
weighted_element_err = (
torch.mean(self.element_loss(outputs, targets), -1) / betas
)
return torch.mean(weighted_element_err) + torch.mean(torch.log(betas))
def get_rgb_loss(conf, coarse=True, using_bg=False, reduction="mean"):
if conf.get_bool("use_uncertainty", False) and not coarse:
print("using loss with uncertainty")
return RGBWithUncertainty(conf)
# if using_bg:
# print("using loss with background")
# return RGBWithBackground(conf)
print("using vanilla rgb loss")
return (
torch.nn.L1Loss(reduction=reduction)
if conf.get_bool("use_l1")
else torch.nn.MSELoss(reduction=reduction)
) | null |
154,877 | from .encoder import SpatialEncoder, ImageEncoder
from .resnetfc import ResnetFC
class ResnetFC(nn.Module):
def __init__(
self,
d_in,
d_out=4,
n_blocks=5,
d_latent=0,
d_hidden=128,
beta=0.0,
combine_layer=1000,
combine_type="average",
use_spade=False,
):
"""
:param d_in input size
:param d_out output size
:param n_blocks number of Resnet blocks
:param d_latent latent size, added in each resnet block (0 = disable)
:param d_hidden hiddent dimension throughout network
:param beta softplus beta, 100 is reasonable; if <=0 uses ReLU activations instead
"""
super().__init__()
if d_in > 0:
self.lin_in = nn.Linear(d_in, d_hidden)
nn.init.constant_(self.lin_in.bias, 0.0)
nn.init.kaiming_normal_(self.lin_in.weight, a=0, mode="fan_in")
self.lin_out = nn.Linear(d_hidden, d_out)
nn.init.constant_(self.lin_out.bias, 0.0)
nn.init.kaiming_normal_(self.lin_out.weight, a=0, mode="fan_in")
self.n_blocks = n_blocks
self.d_latent = d_latent
self.d_in = d_in
self.d_out = d_out
self.d_hidden = d_hidden
self.combine_layer = combine_layer
self.combine_type = combine_type
self.use_spade = use_spade
self.blocks = nn.ModuleList(
[ResnetBlockFC(d_hidden, beta=beta) for i in range(n_blocks)]
)
if d_latent != 0:
n_lin_z = min(combine_layer, n_blocks)
self.lin_z = nn.ModuleList(
[nn.Linear(d_latent, d_hidden) for i in range(n_lin_z)]
)
for i in range(n_lin_z):
nn.init.constant_(self.lin_z[i].bias, 0.0)
nn.init.kaiming_normal_(self.lin_z[i].weight, a=0, mode="fan_in")
if self.use_spade:
self.scale_z = nn.ModuleList(
[nn.Linear(d_latent, d_hidden) for _ in range(n_lin_z)]
)
for i in range(n_lin_z):
nn.init.constant_(self.scale_z[i].bias, 0.0)
nn.init.kaiming_normal_(self.scale_z[i].weight, a=0, mode="fan_in")
if beta > 0:
self.activation = nn.Softplus(beta=beta)
else:
self.activation = nn.ReLU()
def forward(self, zx, combine_inner_dims=(1,), combine_index=None, dim_size=None):
"""
:param zx (..., d_latent + d_in)
:param combine_inner_dims Combining dimensions for use with multiview inputs.
Tensor will be reshaped to (-1, combine_inner_dims, ...) and reduced using combine_type
on dim 1, at combine_layer
"""
with profiler.record_function("resnetfc_infer"):
assert zx.size(-1) == self.d_latent + self.d_in
if self.d_latent > 0:
z = zx[..., : self.d_latent]
x = zx[..., self.d_latent :]
else:
x = zx
if self.d_in > 0:
x = self.lin_in(x)
else:
x = torch.zeros(self.d_hidden, device=zx.device)
for blkid in range(self.n_blocks):
if blkid == self.combine_layer:
# The following implements camera frustum culling, requires torch_scatter
# if combine_index is not None:
# combine_type = (
# "mean"
# if self.combine_type == "average"
# else self.combine_type
# )
# if dim_size is not None:
# assert isinstance(dim_size, int)
# x = torch_scatter.scatter(
# x,
# combine_index,
# dim=0,
# dim_size=dim_size,
# reduce=combine_type,
# )
# else:
x = util.combine_interleaved(
x, combine_inner_dims, self.combine_type
)
if self.d_latent > 0 and blkid < self.combine_layer:
tz = self.lin_z[blkid](z)
if self.use_spade:
sz = self.scale_z[blkid](z)
x = sz * x + tz
else:
x = x + tz
x = self.blocks[blkid](x)
out = self.lin_out(self.activation(x))
return out
def from_conf(cls, conf, d_in, **kwargs):
# PyHocon construction
return cls(
d_in,
n_blocks=conf.get_int("n_blocks", 5),
d_hidden=conf.get_int("d_hidden", 128),
beta=conf.get_float("beta", 0.0),
combine_layer=conf.get_int("combine_layer", 1000),
combine_type=conf.get_string("combine_type", "average"), # average | max
use_spade=conf.get_bool("use_spade", False),
**kwargs
)
def make_mlp(conf, d_in, d_latent=0, allow_empty=False, **kwargs):
mlp_type = conf.get_string("type", "mlp") # mlp | resnet
if mlp_type == "mlp":
net = ImplicitNet.from_conf(conf, d_in + d_latent, **kwargs)
elif mlp_type == "resnet":
net = ResnetFC.from_conf(conf, d_in, d_latent=d_latent, **kwargs)
elif mlp_type == "empty" and allow_empty:
net = None
else:
raise NotImplementedError("Unsupported MLP type")
return net | null |
154,878 | from .encoder import SpatialEncoder, ImageEncoder
from .resnetfc import ResnetFC
class SpatialEncoder(nn.Module):
def __init__(
self,
backbone="resnet34",
pretrained=True,
num_layers=4,
index_interp="bilinear",
index_padding="border",
upsample_interp="bilinear",
feature_scale=1.0,
use_first_pool=True,
norm_type="batch",
):
def index(self, uv, cam_z=None, image_size=(), z_bounds=None):
def forward(self, x):
def from_conf(cls, conf):
class ImageEncoder(nn.Module):
def __init__(self, backbone="resnet34", pretrained=True, latent_size=128):
def index(self, uv, cam_z=None, image_size=(), z_bounds=()):
def forward(self, x):
def from_conf(cls, conf):
def make_encoder(conf, **kwargs):
enc_type = conf.get_string("type", "spatial") # spatial | global
if enc_type == "spatial":
net = SpatialEncoder.from_conf(conf, **kwargs)
elif enc_type == "global":
net = ImageEncoder.from_conf(conf, **kwargs)
else:
raise NotImplementedError("Unsupported encoder type")
return net | null |
154,879 | import cv2
import numpy as np
import torch
from torchvision import transforms
from torch import nn
from torch.nn import init
import torch.nn.functional as F
import functools
import math
import warnings
def image_float_to_uint8(img):
"""
Convert a float image (0.0-1.0) to uint8 (0-255)
"""
vmin = np.min(img)
vmax = np.max(img)
if vmax - vmin < 1e-10:
vmax += 1e-10
img = (img - vmin) / (vmax - vmin)
img *= 255.0
return img.astype(np.uint8)
The provided code snippet includes necessary dependencies for implementing the `cmap` function. Write a Python function `def cmap(img, color_map=cv2.COLORMAP_HOT)` to solve the following problem:
Apply 'HOT' color to a float image
Here is the function:
def cmap(img, color_map=cv2.COLORMAP_HOT):
"""
Apply 'HOT' color to a float image
"""
return cv2.applyColorMap(image_float_to_uint8(img), color_map) | Apply 'HOT' color to a float image |
154,880 | import cv2
import numpy as np
import torch
from torchvision import transforms
from torch import nn
from torch.nn import init
import torch.nn.functional as F
import functools
import math
import warnings
The provided code snippet includes necessary dependencies for implementing the `batched_index_select_nd` function. Write a Python function `def batched_index_select_nd(t, inds)` to solve the following problem:
Index select on dim 1 of a n-dimensional batched tensor. :param t (batch, n, ...) :param inds (batch, k) :return (batch, k, ...)
Here is the function:
def batched_index_select_nd(t, inds):
"""
Index select on dim 1 of a n-dimensional batched tensor.
:param t (batch, n, ...)
:param inds (batch, k)
:return (batch, k, ...)
"""
return t.gather(
1, inds[(...,) + (None,) * (len(t.shape) - 2)].expand(-1, -1, *t.shape[2:])
) | Index select on dim 1 of a n-dimensional batched tensor. :param t (batch, n, ...) :param inds (batch, k) :return (batch, k, ...) |
154,881 | import cv2
import numpy as np
import torch
from torchvision import transforms
from torch import nn
from torch.nn import init
import torch.nn.functional as F
import functools
import math
import warnings
The provided code snippet includes necessary dependencies for implementing the `batched_index_select_nd_last` function. Write a Python function `def batched_index_select_nd_last(t, inds)` to solve the following problem:
Index select on dim -1 of a >=2D multi-batched tensor. inds assumed to have all batch dimensions except one data dimension 'n' :param t (batch..., n, m) :param inds (batch..., k) :return (batch..., n, k)
Here is the function:
def batched_index_select_nd_last(t, inds):
"""
Index select on dim -1 of a >=2D multi-batched tensor. inds assumed
to have all batch dimensions except one data dimension 'n'
:param t (batch..., n, m)
:param inds (batch..., k)
:return (batch..., n, k)
"""
dummy = inds.unsqueeze(-2).expand(*inds.shape[:-1], t.size(-2), inds.size(-1))
out = t.gather(-1, dummy)
return out | Index select on dim -1 of a >=2D multi-batched tensor. inds assumed to have all batch dimensions except one data dimension 'n' :param t (batch..., n, m) :param inds (batch..., k) :return (batch..., n, k) |
154,882 | import cv2
import numpy as np
import torch
from torchvision import transforms
from torch import nn
from torch.nn import init
import torch.nn.functional as F
import functools
import math
import warnings
The provided code snippet includes necessary dependencies for implementing the `repeat_interleave` function. Write a Python function `def repeat_interleave(input, repeats, dim=0)` to solve the following problem:
Repeat interleave along axis 0 torch.repeat_interleave is currently very slow https://github.com/pytorch/pytorch/issues/31980
Here is the function:
def repeat_interleave(input, repeats, dim=0):
"""
Repeat interleave along axis 0
torch.repeat_interleave is currently very slow
https://github.com/pytorch/pytorch/issues/31980
"""
output = input.unsqueeze(1).expand(-1, repeats, *input.shape[1:])
return output.reshape(-1, *input.shape[1:]) | Repeat interleave along axis 0 torch.repeat_interleave is currently very slow https://github.com/pytorch/pytorch/issues/31980 |
154,883 | import cv2
import numpy as np
import torch
from torchvision import transforms
from torch import nn
from torch.nn import init
import torch.nn.functional as F
import functools
import math
import warnings
def get_image_to_tensor_balanced(image_size=0):
ops = []
if image_size > 0:
ops.append(transforms.Resize(image_size))
ops.extend(
[transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),]
)
return transforms.Compose(ops) | null |
154,884 | import cv2
import numpy as np
import torch
from torchvision import transforms
from torch import nn
from torch.nn import init
import torch.nn.functional as F
import functools
import math
import warnings
def get_mask_to_tensor():
return transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.0,), (1.0,))]
) | null |
154,885 | import cv2
import numpy as np
import torch
from torchvision import transforms
from torch import nn
from torch.nn import init
import torch.nn.functional as F
import functools
import math
import warnings
The provided code snippet includes necessary dependencies for implementing the `homogeneous` function. Write a Python function `def homogeneous(points)` to solve the following problem:
Concat 1 to each point :param points (..., 3) :return (..., 4)
Here is the function:
def homogeneous(points):
"""
Concat 1 to each point
:param points (..., 3)
:return (..., 4)
"""
return F.pad(points, (0, 1), "constant", 1.0) | Concat 1 to each point :param points (..., 3) :return (..., 4) |
154,886 | import cv2
import numpy as np
import torch
from torchvision import transforms
from torch import nn
from torch.nn import init
import torch.nn.functional as F
import functools
import math
import warnings
The provided code snippet includes necessary dependencies for implementing the `coord_from_blender` function. Write a Python function `def coord_from_blender(dtype=torch.float32, device="cpu")` to solve the following problem:
Blender to standard coordinate system transform. Standard coordinate system is: x right y up z out (out=screen to face) Blender coordinate system is: x right y in z up :return (4, 4)
Here is the function:
def coord_from_blender(dtype=torch.float32, device="cpu"):
"""
Blender to standard coordinate system transform.
Standard coordinate system is: x right y up z out (out=screen to face)
Blender coordinate system is: x right y in z up
:return (4, 4)
"""
return torch.tensor(
[[1, 0, 0, 0], [0, 0, 1, 0], [0, -1, 0, 0], [0, 0, 0, 1]],
dtype=dtype,
device=device,
) | Blender to standard coordinate system transform. Standard coordinate system is: x right y up z out (out=screen to face) Blender coordinate system is: x right y in z up :return (4, 4) |
154,887 | import cv2
import numpy as np
import torch
from torchvision import transforms
from torch import nn
from torch.nn import init
import torch.nn.functional as F
import functools
import math
import warnings
The provided code snippet includes necessary dependencies for implementing the `coord_to_blender` function. Write a Python function `def coord_to_blender(dtype=torch.float32, device="cpu")` to solve the following problem:
Standard to Blender coordinate system transform. Standard coordinate system is: x right y up z out (out=screen to face) Blender coordinate system is: x right y in z up :return (4, 4)
Here is the function:
def coord_to_blender(dtype=torch.float32, device="cpu"):
"""
Standard to Blender coordinate system transform.
Standard coordinate system is: x right y up z out (out=screen to face)
Blender coordinate system is: x right y in z up
:return (4, 4)
"""
return torch.tensor(
[[1, 0, 0, 0], [0, 0, -1, 0], [0, 1, 0, 0], [0, 0, 0, 1]],
dtype=dtype,
device=device,
) | Standard to Blender coordinate system transform. Standard coordinate system is: x right y up z out (out=screen to face) Blender coordinate system is: x right y in z up :return (4, 4) |
154,888 | import cv2
import numpy as np
import torch
from torchvision import transforms
from torch import nn
from torch.nn import init
import torch.nn.functional as F
import functools
import math
import warnings
The provided code snippet includes necessary dependencies for implementing the `look_at` function. Write a Python function `def look_at(origin, target, world_up=np.array([0, 1, 0], dtype=np.float32))` to solve the following problem:
Get 4x4 camera to world space matrix, for camera looking at target
Here is the function:
def look_at(origin, target, world_up=np.array([0, 1, 0], dtype=np.float32)):
"""
Get 4x4 camera to world space matrix, for camera looking at target
"""
back = origin - target
back /= np.linalg.norm(back)
right = np.cross(world_up, back)
right /= np.linalg.norm(right)
up = np.cross(back, right)
cam_to_world = np.empty((4, 4), dtype=np.float32)
cam_to_world[:3, 0] = right
cam_to_world[:3, 1] = up
cam_to_world[:3, 2] = back
cam_to_world[:3, 3] = origin
cam_to_world[3, :] = [0, 0, 0, 1]
return cam_to_world | Get 4x4 camera to world space matrix, for camera looking at target |
154,889 | import cv2
import numpy as np
import torch
from torchvision import transforms
from torch import nn
from torch.nn import init
import torch.nn.functional as F
import functools
import math
import warnings
The provided code snippet includes necessary dependencies for implementing the `get_cuda` function. Write a Python function `def get_cuda(gpu_id)` to solve the following problem:
Get a torch.device for GPU gpu_id. If GPU not available, returns CPU device.
Here is the function:
def get_cuda(gpu_id):
"""
Get a torch.device for GPU gpu_id. If GPU not available,
returns CPU device.
"""
return (
torch.device("cuda:%d" % gpu_id)
if torch.cuda.is_available()
else torch.device("cpu")
) | Get a torch.device for GPU gpu_id. If GPU not available, returns CPU device. |
154,890 | import cv2
import numpy as np
import torch
from torchvision import transforms
from torch import nn
from torch.nn import init
import torch.nn.functional as F
import functools
import math
import warnings
The provided code snippet includes necessary dependencies for implementing the `masked_sample` function. Write a Python function `def masked_sample(masks, num_pix, prop_inside, thresh=0.5)` to solve the following problem:
:return (num_pix, 3)
Here is the function:
def masked_sample(masks, num_pix, prop_inside, thresh=0.5):
"""
:return (num_pix, 3)
"""
num_inside = int(num_pix * prop_inside + 0.5)
num_outside = num_pix - num_inside
inside = (masks >= thresh).nonzero(as_tuple=False)
outside = (masks < thresh).nonzero(as_tuple=False)
pix_inside = inside[torch.randint(0, inside.shape[0], (num_inside,))]
pix_outside = outside[torch.randint(0, outside.shape[0], (num_outside,))]
pix = torch.cat((pix_inside, pix_outside))
return pix | :return (num_pix, 3) |
154,891 | import cv2
import numpy as np
import torch
from torchvision import transforms
from torch import nn
from torch.nn import init
import torch.nn.functional as F
import functools
import math
import warnings
The provided code snippet includes necessary dependencies for implementing the `bbox_sample` function. Write a Python function `def bbox_sample(bboxes, num_pix)` to solve the following problem:
:return (num_pix, 3)
Here is the function:
def bbox_sample(bboxes, num_pix):
"""
:return (num_pix, 3)
"""
image_ids = torch.randint(0, bboxes.shape[0], (num_pix,))
pix_bboxes = bboxes[image_ids]
x = (
torch.rand(num_pix) * (pix_bboxes[:, 2] + 1 - pix_bboxes[:, 0])
+ pix_bboxes[:, 0]
).long()
y = (
torch.rand(num_pix) * (pix_bboxes[:, 3] + 1 - pix_bboxes[:, 1])
+ pix_bboxes[:, 1]
).long()
pix = torch.stack((image_ids, y, x), dim=-1)
return pix | :return (num_pix, 3) |
154,892 | import cv2
import numpy as np
import torch
from torchvision import transforms
from torch import nn
from torch.nn import init
import torch.nn.functional as F
import functools
import math
import warnings
def unproj_map(width, height, f, c=None, device="cpu"):
"""
Get camera unprojection map for given image size.
[y,x] of output tensor will contain unit vector of camera ray of that pixel.
:param width image width
:param height image height
:param f focal length, either a number or tensor [fx, fy]
:param c principal point, optional, either None or tensor [fx, fy]
if not specified uses center of image
:return unproj map (height, width, 3)
"""
if c is None:
c = [width * 0.5, height * 0.5]
else:
c = c.squeeze()
if isinstance(f, float):
f = [f, f]
elif len(f.shape) == 0:
f = f[None].expand(2)
elif len(f.shape) == 1:
f = f.expand(2)
Y, X = torch.meshgrid(
torch.arange(height, dtype=torch.float32) - float(c[1]),
torch.arange(width, dtype=torch.float32) - float(c[0]),
)
X = X.to(device=device) / float(f[0])
Y = Y.to(device=device) / float(f[1])
Z = torch.ones_like(X)
unproj = torch.stack((X, -Y, -Z), dim=-1)
unproj /= torch.norm(unproj, dim=-1).unsqueeze(-1)
return unproj
The provided code snippet includes necessary dependencies for implementing the `gen_rays` function. Write a Python function `def gen_rays(poses, width, height, focal, z_near, z_far, c=None, ndc=False)` to solve the following problem:
Generate camera rays :return (B, H, W, 8)
Here is the function:
def gen_rays(poses, width, height, focal, z_near, z_far, c=None, ndc=False):
"""
Generate camera rays
:return (B, H, W, 8)
"""
num_images = poses.shape[0]
device = poses.device
cam_unproj_map = (
unproj_map(width, height, focal.squeeze(), c=c, device=device)
.unsqueeze(0)
.repeat(num_images, 1, 1, 1)
)
cam_centers = poses[:, None, None, :3, 3].expand(-1, height, width, -1)
cam_raydir = torch.matmul(
poses[:, None, None, :3, :3], cam_unproj_map.unsqueeze(-1)
)[:, :, :, :, 0]
if ndc:
if not (z_near == 0 and z_far == 1):
warnings.warn(
"dataset z near and z_far not compatible with NDC, setting them to 0, 1 NOW"
)
z_near, z_far = 0.0, 1.0
cam_centers, cam_raydir = ndc_rays(
width, height, focal, 1.0, cam_centers, cam_raydir
)
cam_nears = (
torch.tensor(z_near, device=device)
.view(1, 1, 1, 1)
.expand(num_images, height, width, -1)
)
cam_fars = (
torch.tensor(z_far, device=device)
.view(1, 1, 1, 1)
.expand(num_images, height, width, -1)
)
return torch.cat(
(cam_centers, cam_raydir, cam_nears, cam_fars), dim=-1
) # (B, H, W, 8) | Generate camera rays :return (B, H, W, 8) |
154,893 | import cv2
import numpy as np
import torch
from torchvision import transforms
from torch import nn
from torch.nn import init
import torch.nn.functional as F
import functools
import math
import warnings
def trans_t(t):
return torch.tensor(
[[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, t], [0, 0, 0, 1],], dtype=torch.float32,
)
def rot_phi(phi):
return torch.tensor(
[
[1, 0, 0, 0],
[0, np.cos(phi), -np.sin(phi), 0],
[0, np.sin(phi), np.cos(phi), 0],
[0, 0, 0, 1],
],
dtype=torch.float32,
)
def rot_theta(th):
return torch.tensor(
[
[np.cos(th), 0, -np.sin(th), 0],
[0, 1, 0, 0],
[np.sin(th), 0, np.cos(th), 0],
[0, 0, 0, 1],
],
dtype=torch.float32,
)
The provided code snippet includes necessary dependencies for implementing the `pose_spherical` function. Write a Python function `def pose_spherical(theta, phi, radius)` to solve the following problem:
Spherical rendering poses, from NeRF
Here is the function:
def pose_spherical(theta, phi, radius):
"""
Spherical rendering poses, from NeRF
"""
c2w = trans_t(radius)
c2w = rot_phi(phi / 180.0 * np.pi) @ c2w
c2w = rot_theta(theta / 180.0 * np.pi) @ c2w
c2w = (
torch.tensor(
[[-1, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 0], [0, 0, 0, 1]],
dtype=torch.float32,
)
@ c2w
)
return c2w | Spherical rendering poses, from NeRF |
154,894 | import cv2
import numpy as np
import torch
from torchvision import transforms
from torch import nn
from torch.nn import init
import torch.nn.functional as F
import functools
import math
import warnings
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad) | null |
154,895 | import cv2
import numpy as np
import torch
from torchvision import transforms
from torch import nn
from torch.nn import init
import torch.nn.functional as F
import functools
import math
import warnings
The provided code snippet includes necessary dependencies for implementing the `get_norm_layer` function. Write a Python function `def get_norm_layer(norm_type="instance", group_norm_groups=32)` to solve the following problem:
Return a normalization layer Parameters: norm_type (str) -- the name of the normalization layer: batch | instance | none For BatchNorm, we use learnable affine parameters and track running statistics (mean/stddev). For InstanceNorm, we do not use learnable affine parameters. We do not track running statistics.
Here is the function:
def get_norm_layer(norm_type="instance", group_norm_groups=32):
"""Return a normalization layer
Parameters:
norm_type (str) -- the name of the normalization layer: batch | instance | none
For BatchNorm, we use learnable affine parameters and track running statistics (mean/stddev).
For InstanceNorm, we do not use learnable affine parameters. We do not track running statistics.
"""
if norm_type == "batch":
norm_layer = functools.partial(
nn.BatchNorm2d, affine=True, track_running_stats=True
)
elif norm_type == "instance":
norm_layer = functools.partial(
nn.InstanceNorm2d, affine=False, track_running_stats=False
)
elif norm_type == "group":
norm_layer = functools.partial(nn.GroupNorm, group_norm_groups)
elif norm_type == "none":
norm_layer = None
else:
raise NotImplementedError("normalization layer [%s] is not found" % norm_type)
return norm_layer | Return a normalization layer Parameters: norm_type (str) -- the name of the normalization layer: batch | instance | none For BatchNorm, we use learnable affine parameters and track running statistics (mean/stddev). For InstanceNorm, we do not use learnable affine parameters. We do not track running statistics. |
154,896 | import cv2
import numpy as np
import torch
from torchvision import transforms
from torch import nn
from torch.nn import init
import torch.nn.functional as F
import functools
import math
import warnings
def make_conv_2d(
dim_in,
dim_out,
padding_type="reflect",
norm_layer=None,
activation=None,
kernel_size=3,
use_bias=False,
stride=1,
no_pad=False,
zero_init=False,
):
conv_block = []
amt = kernel_size // 2
if stride > 1 and not no_pad:
raise NotImplementedError(
"Padding with stride > 1 not supported, use same_pad_conv2d"
)
if amt > 0 and not no_pad:
if padding_type == "reflect":
conv_block += [nn.ReflectionPad2d(amt)]
elif padding_type == "replicate":
conv_block += [nn.ReplicationPad2d(amt)]
elif padding_type == "zero":
conv_block += [nn.ZeroPad2d(amt)]
else:
raise NotImplementedError("padding [%s] is not implemented" % padding_type)
conv_block.append(
nn.Conv2d(
dim_in, dim_out, kernel_size=kernel_size, bias=use_bias, stride=stride
)
)
if zero_init:
nn.init.zeros_(conv_block[-1].weight)
# else:
# nn.init.kaiming_normal_(conv_block[-1].weight)
if norm_layer is not None:
conv_block.append(norm_layer(dim_out))
if activation is not None:
conv_block.append(activation)
return nn.Sequential(*conv_block) | null |
154,897 | import cv2
import numpy as np
import torch
from torchvision import transforms
from torch import nn
from torch.nn import init
import torch.nn.functional as F
import functools
import math
import warnings
def calc_same_pad_conv2d(t_shape, kernel_size=3, stride=1):
in_height, in_width = t_shape[-2:]
out_height = math.ceil(in_height / stride)
out_width = math.ceil(in_width / stride)
pad_along_height = max((out_height - 1) * stride + kernel_size - in_height, 0)
pad_along_width = max((out_width - 1) * stride + kernel_size - in_width, 0)
pad_top = pad_along_height // 2
pad_bottom = pad_along_height - pad_top
pad_left = pad_along_width // 2
pad_right = pad_along_width - pad_left
return pad_left, pad_right, pad_top, pad_bottom
The provided code snippet includes necessary dependencies for implementing the `same_pad_conv2d` function. Write a Python function `def same_pad_conv2d(t, padding_type="reflect", kernel_size=3, stride=1, layer=None)` to solve the following problem:
Perform SAME padding on tensor, given kernel size/stride of conv operator assumes kernel/stride are equal in all dimensions. Use before conv called. Dilation not supported. :param t image tensor input (B, C, H, W) :param padding_type padding type constant | reflect | replicate | circular constant is 0-pad. :param kernel_size kernel size of conv :param stride stride of conv :param layer optionally, pass conv layer to automatically get kernel_size and stride (overrides these)
Here is the function:
def same_pad_conv2d(t, padding_type="reflect", kernel_size=3, stride=1, layer=None):
"""
Perform SAME padding on tensor, given kernel size/stride of conv operator
assumes kernel/stride are equal in all dimensions.
Use before conv called.
Dilation not supported.
:param t image tensor input (B, C, H, W)
:param padding_type padding type constant | reflect | replicate | circular
constant is 0-pad.
:param kernel_size kernel size of conv
:param stride stride of conv
:param layer optionally, pass conv layer to automatically get kernel_size and stride
(overrides these)
"""
if layer is not None:
if isinstance(layer, nn.Sequential):
layer = next(layer.children())
kernel_size = layer.kernel_size[0]
stride = layer.stride[0]
return F.pad(
t, calc_same_pad_conv2d(t.shape, kernel_size, stride), mode=padding_type
) | Perform SAME padding on tensor, given kernel size/stride of conv operator assumes kernel/stride are equal in all dimensions. Use before conv called. Dilation not supported. :param t image tensor input (B, C, H, W) :param padding_type padding type constant | reflect | replicate | circular constant is 0-pad. :param kernel_size kernel size of conv :param stride stride of conv :param layer optionally, pass conv layer to automatically get kernel_size and stride (overrides these) |
154,898 | import cv2
import numpy as np
import torch
from torchvision import transforms
from torch import nn
from torch.nn import init
import torch.nn.functional as F
import functools
import math
import warnings
def calc_same_pad_conv2d(t_shape, kernel_size=3, stride=1):
in_height, in_width = t_shape[-2:]
out_height = math.ceil(in_height / stride)
out_width = math.ceil(in_width / stride)
pad_along_height = max((out_height - 1) * stride + kernel_size - in_height, 0)
pad_along_width = max((out_width - 1) * stride + kernel_size - in_width, 0)
pad_top = pad_along_height // 2
pad_bottom = pad_along_height - pad_top
pad_left = pad_along_width // 2
pad_right = pad_along_width - pad_left
return pad_left, pad_right, pad_top, pad_bottom
The provided code snippet includes necessary dependencies for implementing the `same_unpad_deconv2d` function. Write a Python function `def same_unpad_deconv2d(t, kernel_size=3, stride=1, layer=None)` to solve the following problem:
Perform SAME unpad on tensor, given kernel/stride of deconv operator. Use after deconv called. Dilation not supported.
Here is the function:
def same_unpad_deconv2d(t, kernel_size=3, stride=1, layer=None):
"""
Perform SAME unpad on tensor, given kernel/stride of deconv operator.
Use after deconv called.
Dilation not supported.
"""
if layer is not None:
if isinstance(layer, nn.Sequential):
layer = next(layer.children())
kernel_size = layer.kernel_size[0]
stride = layer.stride[0]
h_scaled = (t.shape[-2] - 1) * stride
w_scaled = (t.shape[-1] - 1) * stride
pad_left, pad_right, pad_top, pad_bottom = calc_same_pad_conv2d(
(h_scaled, w_scaled), kernel_size, stride
)
if pad_right == 0:
pad_right = -10000
if pad_bottom == 0:
pad_bottom = -10000
return t[..., pad_top:-pad_bottom, pad_left:-pad_right] | Perform SAME unpad on tensor, given kernel/stride of deconv operator. Use after deconv called. Dilation not supported. |
154,899 | import cv2
import numpy as np
import torch
from torchvision import transforms
from torch import nn
from torch.nn import init
import torch.nn.functional as F
import functools
import math
import warnings
def combine_interleaved(t, inner_dims=(1,), agg_type="average"):
if len(inner_dims) == 1 and inner_dims[0] == 1:
return t
t = t.reshape(-1, *inner_dims, *t.shape[1:])
if agg_type == "average":
t = torch.mean(t, dim=1)
elif agg_type == "max":
t = torch.max(t, dim=1)[0]
else:
raise NotImplementedError("Unsupported combine type " + agg_type)
return t | null |
154,900 | import cv2
import numpy as np
import torch
from torchvision import transforms
from torch import nn
from torch.nn import init
import torch.nn.functional as F
import functools
import math
import warnings
The provided code snippet includes necessary dependencies for implementing the `psnr` function. Write a Python function `def psnr(pred, target)` to solve the following problem:
Compute PSNR of two tensors in decibels. pred/target should be of same size or broadcastable
Here is the function:
def psnr(pred, target):
"""
Compute PSNR of two tensors in decibels.
pred/target should be of same size or broadcastable
"""
mse = ((pred - target) ** 2).mean()
psnr = -10 * math.log10(mse)
return psnr | Compute PSNR of two tensors in decibels. pred/target should be of same size or broadcastable |
154,901 | import cv2
import numpy as np
import torch
from torchvision import transforms
from torch import nn
from torch.nn import init
import torch.nn.functional as F
import functools
import math
import warnings
The provided code snippet includes necessary dependencies for implementing the `quat_to_rot` function. Write a Python function `def quat_to_rot(q)` to solve the following problem:
Quaternion to rotation matrix
Here is the function:
def quat_to_rot(q):
"""
Quaternion to rotation matrix
"""
batch_size, _ = q.shape
q = F.normalize(q, dim=1)
R = torch.ones((batch_size, 3, 3), device=q.device)
qr = q[:, 0]
qi = q[:, 1]
qj = q[:, 2]
qk = q[:, 3]
R[:, 0, 0] = 1 - 2 * (qj ** 2 + qk ** 2)
R[:, 0, 1] = 2 * (qj * qi - qk * qr)
R[:, 0, 2] = 2 * (qi * qk + qr * qj)
R[:, 1, 0] = 2 * (qj * qi + qk * qr)
R[:, 1, 1] = 1 - 2 * (qi ** 2 + qk ** 2)
R[:, 1, 2] = 2 * (qj * qk - qi * qr)
R[:, 2, 0] = 2 * (qk * qi - qj * qr)
R[:, 2, 1] = 2 * (qj * qk + qi * qr)
R[:, 2, 2] = 1 - 2 * (qi ** 2 + qj ** 2)
return R | Quaternion to rotation matrix |
154,902 | import cv2
import numpy as np
import torch
from torchvision import transforms
from torch import nn
from torch.nn import init
import torch.nn.functional as F
import functools
import math
import warnings
The provided code snippet includes necessary dependencies for implementing the `rot_to_quat` function. Write a Python function `def rot_to_quat(R)` to solve the following problem:
Rotation matrix to quaternion
Here is the function:
def rot_to_quat(R):
"""
Rotation matrix to quaternion
"""
batch_size, _, _ = R.shape
q = torch.ones((batch_size, 4), device=R.device)
R00 = R[:, 0, 0]
R01 = R[:, 0, 1]
R02 = R[:, 0, 2]
R10 = R[:, 1, 0]
R11 = R[:, 1, 1]
R12 = R[:, 1, 2]
R20 = R[:, 2, 0]
R21 = R[:, 2, 1]
R22 = R[:, 2, 2]
q[:, 0] = torch.sqrt(1.0 + R00 + R11 + R22) / 2
q[:, 1] = (R21 - R12) / (4 * q[:, 0])
q[:, 2] = (R02 - R20) / (4 * q[:, 0])
q[:, 3] = (R10 - R01) / (4 * q[:, 0])
return q | Rotation matrix to quaternion |
154,903 | import cv2
import numpy as np
import torch
from torchvision import transforms
from torch import nn
from torch.nn import init
import torch.nn.functional as F
import functools
import math
import warnings
The provided code snippet includes necessary dependencies for implementing the `get_module` function. Write a Python function `def get_module(net)` to solve the following problem:
Shorthand for either net.module (if net is instance of DataParallel) or net
Here is the function:
def get_module(net):
"""
Shorthand for either net.module (if net is instance of DataParallel) or net
"""
if isinstance(net, torch.nn.DataParallel):
return net.module
else:
return net | Shorthand for either net.module (if net is instance of DataParallel) or net |
154,904 | import mcubes
import torch
import numpy as np
import util
import tqdm
import warnings
The provided code snippet includes necessary dependencies for implementing the `marching_cubes` function. Write a Python function `def marching_cubes( occu_net, c1=[-1, -1, -1], c2=[1, 1, 1], reso=[128, 128, 128], isosurface=50.0, sigma_idx=3, eval_batch_size=100000, coarse=True, device=None, )` to solve the following problem:
Run marching cubes on network. Uses PyMCubes. WARNING: does not make much sense with viewdirs in current form, since sigma depends on viewdirs. :param occu_net main NeRF type network :param c1 corner 1 of marching cube bounds x,y,z :param c2 corner 2 of marching cube bounds x,y,z (all > c1) :param reso resolutions of marching cubes x,y,z :param isosurface sigma-isosurface of marching cubes :param sigma_idx index of 'sigma' value in last dimension of occu_net's output :param eval_batch_size batch size for evaluation :param coarse whether to use coarse NeRF for evaluation :param device optionally, device to put points for evaluation. By default uses device of occu_net's first parameter.
Here is the function:
def marching_cubes(
occu_net,
c1=[-1, -1, -1],
c2=[1, 1, 1],
reso=[128, 128, 128],
isosurface=50.0,
sigma_idx=3,
eval_batch_size=100000,
coarse=True,
device=None,
):
"""
Run marching cubes on network. Uses PyMCubes.
WARNING: does not make much sense with viewdirs in current form, since
sigma depends on viewdirs.
:param occu_net main NeRF type network
:param c1 corner 1 of marching cube bounds x,y,z
:param c2 corner 2 of marching cube bounds x,y,z (all > c1)
:param reso resolutions of marching cubes x,y,z
:param isosurface sigma-isosurface of marching cubes
:param sigma_idx index of 'sigma' value in last dimension of occu_net's output
:param eval_batch_size batch size for evaluation
:param coarse whether to use coarse NeRF for evaluation
:param device optionally, device to put points for evaluation.
By default uses device of occu_net's first parameter.
"""
if occu_net.use_viewdirs:
warnings.warn(
"Running marching cubes with fake view dirs (pointing to origin), output may be invalid"
)
with torch.no_grad():
grid = util.gen_grid(*zip(c1, c2, reso), ij_indexing=True)
is_train = occu_net.training
print("Evaluating sigma @", grid.size(0), "points")
occu_net.eval()
all_sigmas = []
if device is None:
device = next(occu_net.parameters()).device
grid_spl = torch.split(grid, eval_batch_size, dim=0)
if occu_net.use_viewdirs:
fake_viewdirs = -grid / torch.norm(grid, dim=-1).unsqueeze(-1)
vd_spl = torch.split(fake_viewdirs, eval_batch_size, dim=0)
for pnts, vd in tqdm.tqdm(zip(grid_spl, vd_spl), total=len(grid_spl)):
outputs = occu_net(
pnts.to(device=device), coarse=coarse, viewdirs=vd.to(device=device)
)
sigmas = outputs[..., sigma_idx]
all_sigmas.append(sigmas.cpu())
else:
for pnts in tqdm.tqdm(grid_spl):
outputs = occu_net(pnts.to(device=device), coarse=coarse)
sigmas = outputs[..., sigma_idx]
all_sigmas.append(sigmas.cpu())
sigmas = torch.cat(all_sigmas, dim=0)
sigmas = sigmas.view(*reso).cpu().numpy()
print("Running marching cubes")
vertices, triangles = mcubes.marching_cubes(sigmas, isosurface)
# Scale
c1, c2 = np.array(c1), np.array(c2)
vertices *= (c2 - c1) / np.array(reso)
if is_train:
occu_net.train()
return vertices + c1, triangles | Run marching cubes on network. Uses PyMCubes. WARNING: does not make much sense with viewdirs in current form, since sigma depends on viewdirs. :param occu_net main NeRF type network :param c1 corner 1 of marching cube bounds x,y,z :param c2 corner 2 of marching cube bounds x,y,z (all > c1) :param reso resolutions of marching cubes x,y,z :param isosurface sigma-isosurface of marching cubes :param sigma_idx index of 'sigma' value in last dimension of occu_net's output :param eval_batch_size batch size for evaluation :param coarse whether to use coarse NeRF for evaluation :param device optionally, device to put points for evaluation. By default uses device of occu_net's first parameter. |
154,905 | import mcubes
import torch
import numpy as np
import util
import tqdm
import warnings
The provided code snippet includes necessary dependencies for implementing the `save_obj` function. Write a Python function `def save_obj(vertices, triangles, path, vert_rgb=None)` to solve the following problem:
Save OBJ file, optionally with vertex colors. This version is faster than PyMCubes and supports color. Taken from PIFu. :param vertices (N, 3) :param triangles (N, 3) :param vert_rgb (N, 3) rgb
Here is the function:
def save_obj(vertices, triangles, path, vert_rgb=None):
"""
Save OBJ file, optionally with vertex colors.
This version is faster than PyMCubes and supports color.
Taken from PIFu.
:param vertices (N, 3)
:param triangles (N, 3)
:param vert_rgb (N, 3) rgb
"""
file = open(path, "w")
if vert_rgb is None:
# No color
for v in vertices:
file.write("v %.4f %.4f %.4f\n" % (v[0], v[1], v[2]))
else:
# Color
for idx, v in enumerate(vertices):
c = vert_rgb[idx]
file.write(
"v %.4f %.4f %.4f %.4f %.4f %.4f\n"
% (v[0], v[1], v[2], c[0], c[1], c[2])
)
for f in triangles:
f_plus = f + 1
file.write("f %d %d %d\n" % (f_plus[0], f_plus[1], f_plus[2]))
file.close() | Save OBJ file, optionally with vertex colors. This version is faster than PyMCubes and supports color. Taken from PIFu. :param vertices (N, 3) :param triangles (N, 3) :param vert_rgb (N, 3) rgb |
154,906 | import sys
import os
import argparse
from pyhocon import ConfigFactory
def parse_args(
callback=None,
training=False,
default_conf="conf/default_mv.conf",
default_expname="example",
default_data_format="dvr",
default_num_epochs=10000000,
default_lr=1e-4,
default_gamma=1.00,
default_datadir="data",
default_ray_batch_size=50000,
):
parser = argparse.ArgumentParser()
parser.add_argument("--conf", "-c", type=str, default=None)
parser.add_argument("--resume", "-r", action="store_true", help="continue training")
parser.add_argument(
"--gpu_id", type=str, default="0", help="GPU(s) to use, space delimited"
)
parser.add_argument(
"--name", "-n", type=str, default=default_expname, help="experiment name"
)
parser.add_argument(
"--dataset_format",
"-F",
type=str,
default=None,
help="Dataset format, multi_obj | dvr | dvr_gen | dvr_dtu | srn",
)
parser.add_argument(
"--exp_group_name",
"-G",
type=str,
default=None,
help="if we want to group some experiments together",
)
parser.add_argument(
"--logs_path", type=str, default="logs", help="logs output directory",
)
parser.add_argument(
"--checkpoints_path",
type=str,
default="checkpoints",
help="checkpoints output directory",
)
parser.add_argument(
"--visual_path",
type=str,
default="visuals",
help="visualization output directory",
)
parser.add_argument(
"--epochs",
type=int,
default=default_num_epochs,
help="number of epochs to train for",
)
parser.add_argument("--lr", type=float, default=default_lr, help="learning rate")
parser.add_argument(
"--gamma", type=float, default=default_gamma, help="learning rate decay factor"
)
parser.add_argument(
"--datadir", "-D", type=str, default=None, help="Dataset directory"
)
parser.add_argument(
"--ray_batch_size", "-R", type=int, default=default_ray_batch_size, help="Ray batch size"
)
if callback is not None:
parser = callback(parser)
args = parser.parse_args()
if args.exp_group_name is not None:
args.logs_path = os.path.join(args.logs_path, args.exp_group_name)
args.checkpoints_path = os.path.join(args.checkpoints_path, args.exp_group_name)
args.visual_path = os.path.join(args.visual_path, args.exp_group_name)
os.makedirs(os.path.join(args.checkpoints_path, args.name), exist_ok=True)
os.makedirs(os.path.join(args.visual_path, args.name), exist_ok=True)
PROJECT_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", ".."))
EXPCONF_PATH = os.path.join(PROJECT_ROOT, "expconf.conf")
expconf = ConfigFactory.parse_file(EXPCONF_PATH)
if args.conf is None:
args.conf = expconf.get_string("config." + args.name, default_conf)
if args.conf is None:
args.conf = expconf.get_string("config." + args.name, default_conf)
if args.datadir is None:
args.datadir = expconf.get_string("datadir." + args.name, default_datadir)
conf = ConfigFactory.parse_file(args.conf)
if args.dataset_format is None:
args.dataset_format = conf.get_string("data.format", default_data_format)
args.gpu_id = list(map(int, args.gpu_id.split()))
print("EXPERIMENT NAME:", args.name)
if training:
print("CONTINUE?", "yes" if args.resume else "no")
print("* Config file:", args.conf)
print("* Dataset format:", args.dataset_format)
print("* Dataset location:", args.datadir)
return args, conf | null |
154,907 | import sys
import os
import warnings
import trainlib
from model import make_model, loss
from render import NeRFRenderer
from data import get_split_dataset
import util
import numpy as np
import torch.nn.functional as F
import torch
from dotmap import DotMap
def extra_args(parser):
parser.add_argument(
"--batch_size", "-B", type=int, default=4, help="Object batch size ('SB')"
)
parser.add_argument(
"--nviews",
"-V",
type=str,
default="1",
help="Number of source views (multiview); put multiple (space delim) to pick randomly per batch ('NV')",
)
parser.add_argument(
"--freeze_enc",
action="store_true",
default=None,
help="Freeze encoder weights and only train MLP",
)
parser.add_argument(
"--no_bbox_step",
type=int,
default=100000,
help="Step to stop using bbox sampling",
)
parser.add_argument(
"--fixed_test",
action="store_true",
default=None,
help="Freeze encoder weights and only train MLP",
)
return parser | null |
154,908 | import sys
import argparse
import os
import os.path as osp
import json
from math import floor, ceil
import point_rend
from detectron2.utils.logger import setup_logger
import numpy as np
import cv2
import torch
import tqdm
import glob
from matplotlib import pyplot as plt
import matplotlib.patches as patches
from detectron2 import model_zoo
from detectron2.engine import DefaultPredictor
from detectron2.config import get_cfg
from detectron2.utils.visualizer import Visualizer, ColorMode
from detectron2.data import MetadataCatalog
The provided code snippet includes necessary dependencies for implementing the `_crop_image` function. Write a Python function `def _crop_image(img, rect, const_border=False, value=0)` to solve the following problem:
Image cropping helper
Here is the function:
def _crop_image(img, rect, const_border=False, value=0):
"""
Image cropping helper
"""
x, y, w, h = rect
left = abs(x) if x < 0 else 0
top = abs(y) if y < 0 else 0
right = abs(img.shape[1] - (x + w)) if x + w >= img.shape[1] else 0
bottom = abs(img.shape[0] - (y + h)) if y + h >= img.shape[0] else 0
color = [value] * img.shape[2] if const_border else None
new_img = cv2.copyMakeBorder(
img,
top,
bottom,
left,
right,
cv2.BORDER_CONSTANT if const_border else cv2.BORDER_REPLICATE,
value=color,
)
if len(new_img.shape) == 2:
new_img = new_img[..., None]
x = x + left
y = y + top
return new_img[y : (y + h), x : (x + w), :] | Image cropping helper |
154,909 | import sys
import argparse
import os
import os.path as osp
import json
from math import floor, ceil
import point_rend
from detectron2.utils.logger import setup_logger
import numpy as np
import cv2
import torch
import tqdm
import glob
from matplotlib import pyplot as plt
import matplotlib.patches as patches
from detectron2 import model_zoo
from detectron2.engine import DefaultPredictor
from detectron2.config import get_cfg
from detectron2.utils.visualizer import Visualizer, ColorMode
from detectron2.data import MetadataCatalog
def _is_image_path(f):
return (
f.endswith(".jpg")
or f.endswith(".jpeg")
or f.endswith(".png")
or f.endswith(".bmp")
or f.endswith(".tiff")
or f.endswith(".gif")
) | null |
154,910 | import fvcore.nn.weight_init as weight_init
import torch
from torch import nn
from torch.nn import functional as F
from detectron2.layers import ShapeSpec, cat
from detectron2.structures import BitMasks
from detectron2.utils.events import get_event_storage
from detectron2.utils.registry import Registry
from .point_features import point_sample
def point_sample(input, point_coords, **kwargs):
"""
A wrapper around :function:`torch.nn.functional.grid_sample` to support 3D point_coords tensors.
Unlike :function:`torch.nn.functional.grid_sample` it assumes `point_coords` to lie inside
[0, 1] x [0, 1] square.
Args:
input (Tensor): A tensor of shape (N, C, H, W) that contains features map on a H x W grid.
point_coords (Tensor): A tensor of shape (N, P, 2) or (N, Hgrid, Wgrid, 2) that contains
[0, 1] x [0, 1] normalized point coordinates.
Returns:
output (Tensor): A tensor of shape (N, C, P) or (N, C, Hgrid, Wgrid) that contains
features for points in `point_coords`. The features are obtained via bilinear
interplation from `input` the same way as :function:`torch.nn.functional.grid_sample`.
"""
add_dim = False
if point_coords.dim() == 3:
add_dim = True
point_coords = point_coords.unsqueeze(2)
output = F.grid_sample(input, 2.0 * point_coords - 1.0, **kwargs)
if add_dim:
output = output.squeeze(3)
return output
The provided code snippet includes necessary dependencies for implementing the `roi_mask_point_loss` function. Write a Python function `def roi_mask_point_loss(mask_logits, instances, points_coord)` to solve the following problem:
Compute the point-based loss for instance segmentation mask predictions. Args: mask_logits (Tensor): A tensor of shape (R, C, P) or (R, 1, P) for class-specific or class-agnostic, where R is the total number of predicted masks in all images, C is the number of foreground classes, and P is the number of points sampled for each mask. The values are logits. instances (list[Instances]): A list of N Instances, where N is the number of images in the batch. These instances are in 1:1 correspondence with the `mask_logits`. So, i_th elememt of the list contains R_i objects and R_1 + ... + R_N is equal to R. The ground-truth labels (class, box, mask, ...) associated with each instance are stored in fields. points_coords (Tensor): A tensor of shape (R, P, 2), where R is the total number of predicted masks and P is the number of points for each mask. The coordinates are in the image pixel coordinate space, i.e. [0, H] x [0, W]. Returns: point_loss (Tensor): A scalar tensor containing the loss.
Here is the function:
def roi_mask_point_loss(mask_logits, instances, points_coord):
"""
Compute the point-based loss for instance segmentation mask predictions.
Args:
mask_logits (Tensor): A tensor of shape (R, C, P) or (R, 1, P) for class-specific or
class-agnostic, where R is the total number of predicted masks in all images, C is the
number of foreground classes, and P is the number of points sampled for each mask.
The values are logits.
instances (list[Instances]): A list of N Instances, where N is the number of images
in the batch. These instances are in 1:1 correspondence with the `mask_logits`. So, i_th
elememt of the list contains R_i objects and R_1 + ... + R_N is equal to R.
The ground-truth labels (class, box, mask, ...) associated with each instance are stored
in fields.
points_coords (Tensor): A tensor of shape (R, P, 2), where R is the total number of
predicted masks and P is the number of points for each mask. The coordinates are in
the image pixel coordinate space, i.e. [0, H] x [0, W].
Returns:
point_loss (Tensor): A scalar tensor containing the loss.
"""
with torch.no_grad():
cls_agnostic_mask = mask_logits.size(1) == 1
total_num_masks = mask_logits.size(0)
gt_classes = []
gt_mask_logits = []
idx = 0
for instances_per_image in instances:
if len(instances_per_image) == 0:
continue
assert isinstance(
instances_per_image.gt_masks, BitMasks
), "Point head works with GT in 'bitmask' format. Set INPUT.MASK_FORMAT to 'bitmask'."
if not cls_agnostic_mask:
gt_classes_per_image = instances_per_image.gt_classes.to(dtype=torch.int64)
gt_classes.append(gt_classes_per_image)
gt_bit_masks = instances_per_image.gt_masks.tensor
h, w = instances_per_image.gt_masks.image_size
scale = torch.tensor([w, h], dtype=torch.float, device=gt_bit_masks.device)
points_coord_grid_sample_format = (
points_coord[idx : idx + len(instances_per_image)] / scale
)
idx += len(instances_per_image)
gt_mask_logits.append(
point_sample(
gt_bit_masks.to(torch.float32).unsqueeze(1),
points_coord_grid_sample_format,
align_corners=False,
).squeeze(1)
)
if len(gt_mask_logits) == 0:
return mask_logits.sum() * 0
gt_mask_logits = cat(gt_mask_logits)
assert gt_mask_logits.numel() > 0, gt_mask_logits.shape
if cls_agnostic_mask:
mask_logits = mask_logits[:, 0]
else:
indices = torch.arange(total_num_masks)
gt_classes = cat(gt_classes, dim=0)
mask_logits = mask_logits[indices, gt_classes]
# Log the training accuracy (using gt classes and 0.0 threshold for the logits)
mask_accurate = (mask_logits > 0.0) == gt_mask_logits.to(dtype=torch.uint8)
mask_accuracy = mask_accurate.nonzero().size(0) / mask_accurate.numel()
get_event_storage().put_scalar("point_rend/accuracy", mask_accuracy)
point_loss = F.binary_cross_entropy_with_logits(
mask_logits, gt_mask_logits.to(dtype=torch.float32), reduction="mean"
)
return point_loss | Compute the point-based loss for instance segmentation mask predictions. Args: mask_logits (Tensor): A tensor of shape (R, C, P) or (R, 1, P) for class-specific or class-agnostic, where R is the total number of predicted masks in all images, C is the number of foreground classes, and P is the number of points sampled for each mask. The values are logits. instances (list[Instances]): A list of N Instances, where N is the number of images in the batch. These instances are in 1:1 correspondence with the `mask_logits`. So, i_th elememt of the list contains R_i objects and R_1 + ... + R_N is equal to R. The ground-truth labels (class, box, mask, ...) associated with each instance are stored in fields. points_coords (Tensor): A tensor of shape (R, P, 2), where R is the total number of predicted masks and P is the number of points for each mask. The coordinates are in the image pixel coordinate space, i.e. [0, H] x [0, W]. Returns: point_loss (Tensor): A scalar tensor containing the loss. |
154,911 | import fvcore.nn.weight_init as weight_init
import torch
from torch import nn
from torch.nn import functional as F
from detectron2.layers import ShapeSpec, cat
from detectron2.structures import BitMasks
from detectron2.utils.events import get_event_storage
from detectron2.utils.registry import Registry
from .point_features import point_sample
POINT_HEAD_REGISTRY = Registry("POINT_HEAD")
POINT_HEAD_REGISTRY.__doc__ = """
Registry for point heads, which makes prediction for a given set of per-point features.
The registered object will be called with `obj(cfg, input_shape)`.
"""
The provided code snippet includes necessary dependencies for implementing the `build_point_head` function. Write a Python function `def build_point_head(cfg, input_channels)` to solve the following problem:
Build a point head defined by `cfg.MODEL.POINT_HEAD.NAME`.
Here is the function:
def build_point_head(cfg, input_channels):
"""
Build a point head defined by `cfg.MODEL.POINT_HEAD.NAME`.
"""
head_name = cfg.MODEL.POINT_HEAD.NAME
return POINT_HEAD_REGISTRY.get(head_name)(cfg, input_channels) | Build a point head defined by `cfg.MODEL.POINT_HEAD.NAME`. |
154,912 | from detectron2.config import CfgNode as CN
The provided code snippet includes necessary dependencies for implementing the `add_pointrend_config` function. Write a Python function `def add_pointrend_config(cfg)` to solve the following problem:
Add config for PointRend.
Here is the function:
def add_pointrend_config(cfg):
"""
Add config for PointRend.
"""
# We retry random cropping until no single category in semantic segmentation GT occupies more
# than `SINGLE_CATEGORY_MAX_AREA` part of the crop.
cfg.INPUT.CROP.SINGLE_CATEGORY_MAX_AREA = 1.0
# Color augmentatition from SSD paper for semantic segmentation model during training.
cfg.INPUT.COLOR_AUG_SSD = False
# Names of the input feature maps to be used by a coarse mask head.
cfg.MODEL.ROI_MASK_HEAD.IN_FEATURES = ("p2",)
cfg.MODEL.ROI_MASK_HEAD.FC_DIM = 1024
cfg.MODEL.ROI_MASK_HEAD.NUM_FC = 2
# The side size of a coarse mask head prediction.
cfg.MODEL.ROI_MASK_HEAD.OUTPUT_SIDE_RESOLUTION = 7
# True if point head is used.
cfg.MODEL.ROI_MASK_HEAD.POINT_HEAD_ON = False
cfg.MODEL.POINT_HEAD = CN()
cfg.MODEL.POINT_HEAD.NAME = "StandardPointHead"
cfg.MODEL.POINT_HEAD.NUM_CLASSES = 80
# Names of the input feature maps to be used by a mask point head.
cfg.MODEL.POINT_HEAD.IN_FEATURES = ("p2",)
# Number of points sampled during training for a mask point head.
cfg.MODEL.POINT_HEAD.TRAIN_NUM_POINTS = 14 * 14
# Oversampling parameter for PointRend point sampling during training. Parameter `k` in the
# original paper.
cfg.MODEL.POINT_HEAD.OVERSAMPLE_RATIO = 3
# Importance sampling parameter for PointRend point sampling during training. Parametr `beta` in
# the original paper.
cfg.MODEL.POINT_HEAD.IMPORTANCE_SAMPLE_RATIO = 0.75
# Number of subdivision steps during inference.
cfg.MODEL.POINT_HEAD.SUBDIVISION_STEPS = 5
# Maximum number of points selected at each subdivision step (N).
cfg.MODEL.POINT_HEAD.SUBDIVISION_NUM_POINTS = 28 * 28
cfg.MODEL.POINT_HEAD.FC_DIM = 256
cfg.MODEL.POINT_HEAD.NUM_FC = 3
cfg.MODEL.POINT_HEAD.CLS_AGNOSTIC_MASK = False
# If True, then coarse prediction features are used as inout for each layer in PointRend's MLP.
cfg.MODEL.POINT_HEAD.COARSE_PRED_EACH_LAYER = True
cfg.MODEL.POINT_HEAD.COARSE_SEM_SEG_HEAD_NAME = "SemSegFPNHead" | Add config for PointRend. |
154,913 | import torch
from torch.nn import functional as F
from detectron2.layers import cat
from detectron2.structures import Boxes
The provided code snippet includes necessary dependencies for implementing the `generate_regular_grid_point_coords` function. Write a Python function `def generate_regular_grid_point_coords(R, side_size, device)` to solve the following problem:
Generate regular square grid of points in [0, 1] x [0, 1] coordinate space. Args: R (int): The number of grids to sample, one for each region. side_size (int): The side size of the regular grid. device (torch.device): Desired device of returned tensor. Returns: (Tensor): A tensor of shape (R, side_size^2, 2) that contains coordinates for the regular grids.
Here is the function:
def generate_regular_grid_point_coords(R, side_size, device):
"""
Generate regular square grid of points in [0, 1] x [0, 1] coordinate space.
Args:
R (int): The number of grids to sample, one for each region.
side_size (int): The side size of the regular grid.
device (torch.device): Desired device of returned tensor.
Returns:
(Tensor): A tensor of shape (R, side_size^2, 2) that contains coordinates
for the regular grids.
"""
aff = torch.tensor([[[0.5, 0, 0.5], [0, 0.5, 0.5]]], device=device)
r = F.affine_grid(aff, torch.Size((1, 1, side_size, side_size)), align_corners=False)
return r.view(1, -1, 2).expand(R, -1, -1) | Generate regular square grid of points in [0, 1] x [0, 1] coordinate space. Args: R (int): The number of grids to sample, one for each region. side_size (int): The side size of the regular grid. device (torch.device): Desired device of returned tensor. Returns: (Tensor): A tensor of shape (R, side_size^2, 2) that contains coordinates for the regular grids. |
154,914 | import torch
from torch.nn import functional as F
from detectron2.layers import cat
from detectron2.structures import Boxes
def point_sample(input, point_coords, **kwargs):
"""
A wrapper around :function:`torch.nn.functional.grid_sample` to support 3D point_coords tensors.
Unlike :function:`torch.nn.functional.grid_sample` it assumes `point_coords` to lie inside
[0, 1] x [0, 1] square.
Args:
input (Tensor): A tensor of shape (N, C, H, W) that contains features map on a H x W grid.
point_coords (Tensor): A tensor of shape (N, P, 2) or (N, Hgrid, Wgrid, 2) that contains
[0, 1] x [0, 1] normalized point coordinates.
Returns:
output (Tensor): A tensor of shape (N, C, P) or (N, C, Hgrid, Wgrid) that contains
features for points in `point_coords`. The features are obtained via bilinear
interplation from `input` the same way as :function:`torch.nn.functional.grid_sample`.
"""
add_dim = False
if point_coords.dim() == 3:
add_dim = True
point_coords = point_coords.unsqueeze(2)
output = F.grid_sample(input, 2.0 * point_coords - 1.0, **kwargs)
if add_dim:
output = output.squeeze(3)
return output
The provided code snippet includes necessary dependencies for implementing the `get_uncertain_point_coords_with_randomness` function. Write a Python function `def get_uncertain_point_coords_with_randomness( coarse_logits, uncertainty_func, num_points, oversample_ratio, importance_sample_ratio )` to solve the following problem:
Sample points in [0, 1] x [0, 1] coordinate space based on their uncertainty. The unceratinties are calculated for each point using 'uncertainty_func' function that takes point's logit prediction as input. See PointRend paper for details. Args: coarse_logits (Tensor): A tensor of shape (N, C, Hmask, Wmask) or (N, 1, Hmask, Wmask) for class-specific or class-agnostic prediction. uncertainty_func: A function that takes a Tensor of shape (N, C, P) or (N, 1, P) that contains logit predictions for P points and returns their uncertainties as a Tensor of shape (N, 1, P). num_points (int): The number of points P to sample. oversample_ratio (int): Oversampling parameter. importance_sample_ratio (float): Ratio of points that are sampled via importnace sampling. Returns: point_coords (Tensor): A tensor of shape (N, P, 2) that contains the coordinates of P sampled points.
Here is the function:
def get_uncertain_point_coords_with_randomness(
coarse_logits, uncertainty_func, num_points, oversample_ratio, importance_sample_ratio
):
"""
Sample points in [0, 1] x [0, 1] coordinate space based on their uncertainty. The unceratinties
are calculated for each point using 'uncertainty_func' function that takes point's logit
prediction as input.
See PointRend paper for details.
Args:
coarse_logits (Tensor): A tensor of shape (N, C, Hmask, Wmask) or (N, 1, Hmask, Wmask) for
class-specific or class-agnostic prediction.
uncertainty_func: A function that takes a Tensor of shape (N, C, P) or (N, 1, P) that
contains logit predictions for P points and returns their uncertainties as a Tensor of
shape (N, 1, P).
num_points (int): The number of points P to sample.
oversample_ratio (int): Oversampling parameter.
importance_sample_ratio (float): Ratio of points that are sampled via importnace sampling.
Returns:
point_coords (Tensor): A tensor of shape (N, P, 2) that contains the coordinates of P
sampled points.
"""
assert oversample_ratio >= 1
assert importance_sample_ratio <= 1 and importance_sample_ratio >= 0
num_boxes = coarse_logits.shape[0]
num_sampled = int(num_points * oversample_ratio)
point_coords = torch.rand(num_boxes, num_sampled, 2, device=coarse_logits.device)
point_logits = point_sample(coarse_logits, point_coords, align_corners=False)
# It is crucial to calculate uncertainty based on the sampled prediction value for the points.
# Calculating uncertainties of the coarse predictions first and sampling them for points leads
# to incorrect results.
# To illustrate this: assume uncertainty_func(logits)=-abs(logits), a sampled point between
# two coarse predictions with -1 and 1 logits has 0 logits, and therefore 0 uncertainty value.
# However, if we calculate uncertainties for the coarse predictions first,
# both will have -1 uncertainty, and the sampled point will get -1 uncertainty.
point_uncertainties = uncertainty_func(point_logits)
num_uncertain_points = int(importance_sample_ratio * num_points)
num_random_points = num_points - num_uncertain_points
idx = torch.topk(point_uncertainties[:, 0, :], k=num_uncertain_points, dim=1)[1]
shift = num_sampled * torch.arange(num_boxes, dtype=torch.long, device=coarse_logits.device)
idx += shift[:, None]
point_coords = point_coords.view(-1, 2)[idx.view(-1), :].view(
num_boxes, num_uncertain_points, 2
)
if num_random_points > 0:
point_coords = cat(
[
point_coords,
torch.rand(num_boxes, num_random_points, 2, device=coarse_logits.device),
],
dim=1,
)
return point_coords | Sample points in [0, 1] x [0, 1] coordinate space based on their uncertainty. The unceratinties are calculated for each point using 'uncertainty_func' function that takes point's logit prediction as input. See PointRend paper for details. Args: coarse_logits (Tensor): A tensor of shape (N, C, Hmask, Wmask) or (N, 1, Hmask, Wmask) for class-specific or class-agnostic prediction. uncertainty_func: A function that takes a Tensor of shape (N, C, P) or (N, 1, P) that contains logit predictions for P points and returns their uncertainties as a Tensor of shape (N, 1, P). num_points (int): The number of points P to sample. oversample_ratio (int): Oversampling parameter. importance_sample_ratio (float): Ratio of points that are sampled via importnace sampling. Returns: point_coords (Tensor): A tensor of shape (N, P, 2) that contains the coordinates of P sampled points. |
154,915 | import torch
from torch.nn import functional as F
from detectron2.layers import cat
from detectron2.structures import Boxes
The provided code snippet includes necessary dependencies for implementing the `get_uncertain_point_coords_on_grid` function. Write a Python function `def get_uncertain_point_coords_on_grid(uncertainty_map, num_points)` to solve the following problem:
Find `num_points` most uncertain points from `uncertainty_map` grid. Args: uncertainty_map (Tensor): A tensor of shape (N, 1, H, W) that contains uncertainty values for a set of points on a regular H x W grid. num_points (int): The number of points P to select. Returns: point_indices (Tensor): A tensor of shape (N, P) that contains indices from [0, H x W) of the most uncertain points. point_coords (Tensor): A tensor of shape (N, P, 2) that contains [0, 1] x [0, 1] normalized coordinates of the most uncertain points from the H x W grid.
Here is the function:
def get_uncertain_point_coords_on_grid(uncertainty_map, num_points):
"""
Find `num_points` most uncertain points from `uncertainty_map` grid.
Args:
uncertainty_map (Tensor): A tensor of shape (N, 1, H, W) that contains uncertainty
values for a set of points on a regular H x W grid.
num_points (int): The number of points P to select.
Returns:
point_indices (Tensor): A tensor of shape (N, P) that contains indices from
[0, H x W) of the most uncertain points.
point_coords (Tensor): A tensor of shape (N, P, 2) that contains [0, 1] x [0, 1] normalized
coordinates of the most uncertain points from the H x W grid.
"""
R, _, H, W = uncertainty_map.shape
h_step = 1.0 / float(H)
w_step = 1.0 / float(W)
num_points = min(H * W, num_points)
point_indices = torch.topk(uncertainty_map.view(R, H * W), k=num_points, dim=1)[1]
point_coords = torch.zeros(R, num_points, 2, dtype=torch.float, device=uncertainty_map.device)
point_coords[:, :, 0] = w_step / 2.0 + (point_indices % W).to(torch.float) * w_step
point_coords[:, :, 1] = h_step / 2.0 + (point_indices // W).to(torch.float) * h_step
return point_indices, point_coords | Find `num_points` most uncertain points from `uncertainty_map` grid. Args: uncertainty_map (Tensor): A tensor of shape (N, 1, H, W) that contains uncertainty values for a set of points on a regular H x W grid. num_points (int): The number of points P to select. Returns: point_indices (Tensor): A tensor of shape (N, P) that contains indices from [0, H x W) of the most uncertain points. point_coords (Tensor): A tensor of shape (N, P, 2) that contains [0, 1] x [0, 1] normalized coordinates of the most uncertain points from the H x W grid. |
154,916 | import torch
from torch.nn import functional as F
from detectron2.layers import cat
from detectron2.structures import Boxes
def point_sample(input, point_coords, **kwargs):
"""
A wrapper around :function:`torch.nn.functional.grid_sample` to support 3D point_coords tensors.
Unlike :function:`torch.nn.functional.grid_sample` it assumes `point_coords` to lie inside
[0, 1] x [0, 1] square.
Args:
input (Tensor): A tensor of shape (N, C, H, W) that contains features map on a H x W grid.
point_coords (Tensor): A tensor of shape (N, P, 2) or (N, Hgrid, Wgrid, 2) that contains
[0, 1] x [0, 1] normalized point coordinates.
Returns:
output (Tensor): A tensor of shape (N, C, P) or (N, C, Hgrid, Wgrid) that contains
features for points in `point_coords`. The features are obtained via bilinear
interplation from `input` the same way as :function:`torch.nn.functional.grid_sample`.
"""
add_dim = False
if point_coords.dim() == 3:
add_dim = True
point_coords = point_coords.unsqueeze(2)
output = F.grid_sample(input, 2.0 * point_coords - 1.0, **kwargs)
if add_dim:
output = output.squeeze(3)
return output
def get_point_coords_wrt_image(boxes_coords, point_coords):
"""
Convert box-normalized [0, 1] x [0, 1] point cooordinates to image-level coordinates.
Args:
boxes_coords (Tensor): A tensor of shape (R, 4) that contains bounding boxes.
coordinates.
point_coords (Tensor): A tensor of shape (R, P, 2) that contains
[0, 1] x [0, 1] box-normalized coordinates of the P sampled points.
Returns:
point_coords_wrt_image (Tensor): A tensor of shape (R, P, 2) that contains
image-normalized coordinates of P sampled points.
"""
with torch.no_grad():
point_coords_wrt_image = point_coords.clone()
point_coords_wrt_image[:, :, 0] = point_coords_wrt_image[:, :, 0] * (
boxes_coords[:, None, 2] - boxes_coords[:, None, 0]
)
point_coords_wrt_image[:, :, 1] = point_coords_wrt_image[:, :, 1] * (
boxes_coords[:, None, 3] - boxes_coords[:, None, 1]
)
point_coords_wrt_image[:, :, 0] += boxes_coords[:, None, 0]
point_coords_wrt_image[:, :, 1] += boxes_coords[:, None, 1]
return point_coords_wrt_image
The provided code snippet includes necessary dependencies for implementing the `point_sample_fine_grained_features` function. Write a Python function `def point_sample_fine_grained_features(features_list, feature_scales, boxes, point_coords)` to solve the following problem:
Get features from feature maps in `features_list` that correspond to specific point coordinates inside each bounding box from `boxes`. Args: features_list (list[Tensor]): A list of feature map tensors to get features from. feature_scales (list[float]): A list of scales for tensors in `features_list`. boxes (list[Boxes]): A list of I Boxes objects that contain R_1 + ... + R_I = R boxes all together. point_coords (Tensor): A tensor of shape (R, P, 2) that contains [0, 1] x [0, 1] box-normalized coordinates of the P sampled points. Returns: point_features (Tensor): A tensor of shape (R, C, P) that contains features sampled from all features maps in feature_list for P sampled points for all R boxes in `boxes`. point_coords_wrt_image (Tensor): A tensor of shape (R, P, 2) that contains image-level coordinates of P points.
Here is the function:
def point_sample_fine_grained_features(features_list, feature_scales, boxes, point_coords):
"""
Get features from feature maps in `features_list` that correspond to specific point coordinates
inside each bounding box from `boxes`.
Args:
features_list (list[Tensor]): A list of feature map tensors to get features from.
feature_scales (list[float]): A list of scales for tensors in `features_list`.
boxes (list[Boxes]): A list of I Boxes objects that contain R_1 + ... + R_I = R boxes all
together.
point_coords (Tensor): A tensor of shape (R, P, 2) that contains
[0, 1] x [0, 1] box-normalized coordinates of the P sampled points.
Returns:
point_features (Tensor): A tensor of shape (R, C, P) that contains features sampled
from all features maps in feature_list for P sampled points for all R boxes in `boxes`.
point_coords_wrt_image (Tensor): A tensor of shape (R, P, 2) that contains image-level
coordinates of P points.
"""
cat_boxes = Boxes.cat(boxes)
num_boxes = [len(b) for b in boxes]
point_coords_wrt_image = get_point_coords_wrt_image(cat_boxes.tensor, point_coords)
split_point_coords_wrt_image = torch.split(point_coords_wrt_image, num_boxes)
point_features = []
for idx_img, point_coords_wrt_image_per_image in enumerate(split_point_coords_wrt_image):
point_features_per_image = []
for idx_feature, feature_map in enumerate(features_list):
h, w = feature_map.shape[-2:]
scale = torch.tensor([w, h], device=feature_map.device) / feature_scales[idx_feature]
point_coords_scaled = point_coords_wrt_image_per_image / scale
point_features_per_image.append(
point_sample(
feature_map[idx_img].unsqueeze(0),
point_coords_scaled.unsqueeze(0),
align_corners=False,
)
.squeeze(0)
.transpose(1, 0)
)
point_features.append(cat(point_features_per_image, dim=1))
return cat(point_features, dim=0), point_coords_wrt_image | Get features from feature maps in `features_list` that correspond to specific point coordinates inside each bounding box from `boxes`. Args: features_list (list[Tensor]): A list of feature map tensors to get features from. feature_scales (list[float]): A list of scales for tensors in `features_list`. boxes (list[Boxes]): A list of I Boxes objects that contain R_1 + ... + R_I = R boxes all together. point_coords (Tensor): A tensor of shape (R, P, 2) that contains [0, 1] x [0, 1] box-normalized coordinates of the P sampled points. Returns: point_features (Tensor): A tensor of shape (R, C, P) that contains features sampled from all features maps in feature_list for P sampled points for all R boxes in `boxes`. point_coords_wrt_image (Tensor): A tensor of shape (R, P, 2) that contains image-level coordinates of P points. |
154,918 | import numpy as np
import torch
from detectron2.layers import ShapeSpec, cat, interpolate
from detectron2.modeling import ROI_HEADS_REGISTRY, StandardROIHeads
from detectron2.modeling.roi_heads.mask_head import (
build_mask_head,
mask_rcnn_inference,
mask_rcnn_loss,
)
from detectron2.modeling.roi_heads.roi_heads import select_foreground_proposals
from .point_features import (
generate_regular_grid_point_coords,
get_uncertain_point_coords_on_grid,
get_uncertain_point_coords_with_randomness,
point_sample,
point_sample_fine_grained_features,
)
from .point_head import build_point_head, roi_mask_point_loss
The provided code snippet includes necessary dependencies for implementing the `calculate_uncertainty` function. Write a Python function `def calculate_uncertainty(logits, classes)` to solve the following problem:
We estimate uncerainty as L1 distance between 0.0 and the logit prediction in 'logits' for the foreground class in `classes`. Args: logits (Tensor): A tensor of shape (R, C, ...) or (R, 1, ...) for class-specific or class-agnostic, where R is the total number of predicted masks in all images and C is the number of foreground classes. The values are logits. classes (list): A list of length R that contains either predicted of ground truth class for eash predicted mask. Returns: scores (Tensor): A tensor of shape (R, 1, ...) that contains uncertainty scores with the most uncertain locations having the highest uncertainty score.
Here is the function:
def calculate_uncertainty(logits, classes):
"""
We estimate uncerainty as L1 distance between 0.0 and the logit prediction in 'logits' for the
foreground class in `classes`.
Args:
logits (Tensor): A tensor of shape (R, C, ...) or (R, 1, ...) for class-specific or
class-agnostic, where R is the total number of predicted masks in all images and C is
the number of foreground classes. The values are logits.
classes (list): A list of length R that contains either predicted of ground truth class
for eash predicted mask.
Returns:
scores (Tensor): A tensor of shape (R, 1, ...) that contains uncertainty scores with
the most uncertain locations having the highest uncertainty score.
"""
if logits.shape[1] == 1:
gt_class_logits = logits.clone()
else:
gt_class_logits = logits[
torch.arange(logits.shape[0], device=logits.device), classes
].unsqueeze(1)
return -(torch.abs(gt_class_logits)) | We estimate uncerainty as L1 distance between 0.0 and the logit prediction in 'logits' for the foreground class in `classes`. Args: logits (Tensor): A tensor of shape (R, C, ...) or (R, 1, ...) for class-specific or class-agnostic, where R is the total number of predicted masks in all images and C is the number of foreground classes. The values are logits. classes (list): A list of length R that contains either predicted of ground truth class for eash predicted mask. Returns: scores (Tensor): A tensor of shape (R, 1, ...) that contains uncertainty scores with the most uncertain locations having the highest uncertainty score. |
154,919 | import argparse
from concurrent.futures import ProcessPoolExecutor
from dotmap import DotMap
import glob
import json
import os
import os.path as osp
import shutil
import sys
from time import time
import bpy
from mathutils import Vector
import numpy as np
from numpy.random import Generator, MT19937, SeedSequence
The provided code snippet includes necessary dependencies for implementing the `hide_objects` function. Write a Python function `def hide_objects(obj_names)` to solve the following problem:
Hide objects with names given in `object_names`
Here is the function:
def hide_objects(obj_names):
"""Hide objects with names given in `object_names`"""
bpy.ops.object.select_all(action="DESELECT")
for obj in bpy.data.objects:
obj.select_set(obj.name in obj_names)
for sel in bpy.context.selected_objects:
sel.hide = True | Hide objects with names given in `object_names` |
154,920 | import argparse
from concurrent.futures import ProcessPoolExecutor
from dotmap import DotMap
import glob
import json
import os
import os.path as osp
import shutil
import sys
from time import time
import bpy
from mathutils import Vector
import numpy as np
from numpy.random import Generator, MT19937, SeedSequence
def add_lamps():
bpy.ops.object.light_add(type="SUN", location=(6, 2, 5))
lamp = bpy.context.object
lamp.rotation_euler = (-0.5, 0.5, 0)
bpy.ops.object.light_add(type="SUN", location=(6, -2, 5))
lamp = bpy.context.object
lamp.rotation_euler = (-0.5, -0.5, 0)
def add_light_env(filepath, strength=1, rot_vec_rad=(0, 0, 0), scale=(1, 1, 1)):
"""Add an HDRI as the environment map for lighting.
Can only use if using CYCLES as rendering engine."""
engine = bpy.context.scene.render.engine
assert engine == "CYCLES", "Rendering engine is not Cycles"
bpy.data.images.load(filepath, check_existing=True)
env = bpy.data.images[osp.basename(filepath)]
world = bpy.context.scene.world
world.use_nodes = True
node_tree = world.node_tree
nodes = node_tree.nodes
links = node_tree.links
bg_node = nodes.new("ShaderNodeBackground")
links.new(bg_node.outputs["Background"], nodes["World Output"].inputs["Surface"])
# Environment map
texcoord_node = nodes.new("ShaderNodeTexCoord")
env_node = nodes.new("ShaderNodeTexEnvironment")
env_node.image = env
mapping_node = nodes.new("ShaderNodeMapping")
links.new(texcoord_node.outputs["Generated"], mapping_node.inputs["Vector"])
links.new(mapping_node.outputs["Vector"], env_node.inputs["Vector"])
links.new(env_node.outputs["Color"], bg_node.inputs["Color"])
bg_node.inputs["Strength"].default_value = strength
print_info("LIGHT STRENGTH:", strength)
def set_cycles(args):
"""Set up PBR rendering with the CYCLES rendering engine.
More photorealistic, much slower."""
scene = bpy.context.scene
scene.render.engine = "CYCLES"
cycles = scene.cycles
cycles.use_progressive_refine = True
cycles.samples = args.n_samples
cycles.max_bounces = 8
cycles.caustics_reflective = True
cycles.caustics_refractive = False
cycles.diffuse_bounces = 8
cycles.glossy_bounces = 4
cycles.volume_bounces = 0
# Avoid grainy renderings (fireflies)
world = bpy.data.worlds["World"]
world.cycles.sample_as_light = True
cycles.blur_glossy = 2.0
cycles.sample_clamp_indirect = 10.0
world.use_nodes = True
if args.use_gpu:
if args.gpus is not None:
select_devices("CUDA", args.gpus)
bpy.context.preferences.addons[
"cycles"
].preferences.compute_device_type = "CUDA"
bpy.context.scene.cycles.device = "GPU"
# XXX the following needs to be called to register preference update
devices = bpy.context.preferences.addons["cycles"].preferences.get_devices()
bpy.context.scene.render.use_persistent_data = True
# so we don't have to recompute the MIS map for the same world layer
bpy.context.scene.world.cycles.sampling_method = "MANUAL"
bpy.context.scene.world.cycles.sample_map_resolution = 1024
bpy.context.scene.view_layers["View Layer"].cycles.use_denoising = True
scene.render.tile_x = 256 if args.use_gpu else 16
scene.render.tile_y = 256 if args.use_gpu else 16
scene.render.resolution_x = args.res
scene.render.resolution_y = args.res
scene.render.resolution_percentage = 100
scene.render.use_file_extension = True
scene.render.image_settings.file_format = "PNG"
scene.render.image_settings.color_depth = str(args.color_depth)
def set_eevee(args):
"""Set up the render for the Blender Eevee rendering engine.
Rendering this way is NOT PBR, but is much faster."""
scene = bpy.context.scene
scene.render.engine = "BLENDER_EEVEE"
print_info(scene.render.engine)
args.render_bg = False
scene.render.resolution_x = args.res
scene.render.resolution_y = args.res
scene.render.resolution_percentage = 100
scene.render.use_file_extension = True
scene.render.image_settings.file_format = "PNG"
scene.render.image_settings.color_depth = str(args.color_depth)
def delete_objects(obj_names):
"""Delete objects with names given in `object_names`"""
for obj in bpy.data.objects:
obj.select_set(obj.name in obj_names)
bpy.ops.object.delete()
# Remove meshes, textures, materials, etc to avoid memory leaks.
for block in bpy.data.meshes:
if block.users == 0:
bpy.data.meshes.remove(block)
for block in bpy.data.materials:
if block.users == 0:
bpy.data.materials.remove(block)
for block in bpy.data.textures:
if block.users == 0:
bpy.data.textures.remove(block)
for block in bpy.data.images:
if block.users == 0:
bpy.data.images.remove(block)
bpy.context.view_layer.update()
def setup_global_render(args):
"""Set the rendering settings over all instances"""
scene = bpy.context.scene
scene.render.filepath = "/tmp/{}".format(time()) # throw away the composite
_add_object_output(scene)
scene.render.film_transparent = True
if args.render_bg: # render bg separately
_add_background_output(scene)
if args.render_alpha:
_add_alpha_output(scene)
if args.render_depth:
_add_depth_output(scene)
def _add_background_layer():
scene = bpy.context.scene
bpy.ops.scene.view_layer_add()
print(scene.view_layers.keys())
new_layer_name = [key for key in scene.view_layers.keys() if key.endswith(".001")][
0
]
bg_view_layer = scene.view_layers[new_layer_name]
bg_view_layer.name = "Background Layer"
bg_view_layer.use_ao = False
bg_view_layer.use_solid = False
bg_view_layer.use_strand = False
bg_view_layer.use_pass_combined = False
bg_view_layer.use_pass_z = False
bg_view_layer.use_pass_environment = True
bpy.context.window.view_layer = scene.view_layers["View Layer"]
# make new render layers and output node
bg_render_layers = scene.node_tree.nodes.new(type="CompositorNodeRLayers")
bg_render_layers.name = "Background Render Layers"
bg_render_layers.layer = bg_view_layer.name
The provided code snippet includes necessary dependencies for implementing the `global_setup` function. Write a Python function `def global_setup(args)` to solve the following problem:
Set up the scene and lighting to render for all instances in this process.
Here is the function:
def global_setup(args):
"""Set up the scene and lighting to render for all instances in this process."""
delete_objects([obj.name for obj in bpy.data.objects])
bpy.context.scene.use_nodes = True
if args.use_pbr:
set_cycles(args)
if args.light_env is not None:
add_light_env(args.light_env, args.light_strength)
_add_background_layer()
else:
set_eevee(args)
add_lamps()
setup_global_render(args) | Set up the scene and lighting to render for all instances in this process. |
154,921 | import argparse
from concurrent.futures import ProcessPoolExecutor
from dotmap import DotMap
import glob
import json
import os
import os.path as osp
import shutil
import sys
from time import time
import bpy
from mathutils import Vector
import numpy as np
from numpy.random import Generator, MT19937, SeedSequence
def _add_compositing(scene):
tree = scene.node_tree
alpha_node = tree.nodes.new("CompositorNodeAlphaOver")
composite_node = tree.nodes["Composite"]
tree.links.new(
tree.nodes["Render Layers"].outputs["Image"], alpha_node.inputs[1]
) # image 1
tree.links.new(
tree.nodes["Background Render Layers"].outputs["Image"], alpha_node.inputs[2]
) # image 2
tree.links.new(alpha_node.outputs["Image"], composite_node.inputs["Image"]) | null |
154,922 | import argparse
from concurrent.futures import ProcessPoolExecutor
from dotmap import DotMap
import glob
import json
import os
import os.path as osp
import shutil
import sys
from time import time
import bpy
from mathutils import Vector
import numpy as np
from numpy.random import Generator, MT19937, SeedSequence
def parse_args():
# Blender assumes all arguments before ' -- ' are Blender arguments.
argv = sys.argv
if "--" in argv:
argv = argv[argv.index("--") + 1 :]
else:
argv = []
# Object IDs taken from the ShapeNet category JSON
OBJ_IDS = dict(
table="04379243",
chair="03001627",
mug="03797390",
bench="02828884",
lamp="03636649",
bowl="02880940",
)
parser = argparse.ArgumentParser()
parser.add_argument(
"--out_dir", required=True, help="Where to write the rendered images"
)
parser.add_argument(
"--src_model_dir",
required=True,
help="Directory where ShapeNet models are stored",
)
parser.add_argument(
"--object",
choices=OBJ_IDS.keys(),
default="chair",
help="Which ShapeNet class to use",
)
parser.add_argument(
"--model_path",
type=str,
default="models/model_normalized.obj",
help="Path to model, inside an instance of the ShapeNet class directory",
)
parser.add_argument(
"--val_frac",
type=float,
default=0.2,
help="Fraction of instances to use as validation",
)
parser.add_argument(
"--test_frac",
type=float,
default=0.2,
help="Fraction of instances to use as test",
)
parser.add_argument(
"--split",
choices=["train", "val", "test"],
default="train",
help="Which split to render",
)
parser.add_argument(
"--n_views", type=int, default=20, help="Number of views to render per instance"
)
parser.add_argument(
"--start_idx",
type=int,
default=0,
help="If rendering a subset of the instances, starting instance to render.",
)
parser.add_argument(
"--end_idx",
type=int,
default=-1,
help="If rendering a subset of the instances, ending instance to render.",
)
parser.add_argument(
"--n_objects", type=int, default=1, help="number of objects in scene"
)
parser.add_argument("--use_pbr", action="store_true", help="Whether to render with physically based rendering (Blender Cycles) or not.")
parser.add_argument(
"--light_env",
default=None,
help="If using PBR rendering and an HDRI light map, the path to the HDRI",
)
parser.add_argument(
"--light_strength",
type=float,
default=3,
help="If using HDRI light map, HDRI strength",
)
parser.add_argument(
"--render_alpha", action="store_true", help="select to render the object masks"
)
parser.add_argument(
"--render_depth", action="store_true", help="select to render the depth map"
)
parser.add_argument(
"--render_bg",
action="store_true",
help="select to render the background layer",
)
parser.add_argument(
"--res", type=int, default=128, help="Output resolution of images (res x res), default 128"
)
parser.add_argument(
"--n_samples", type=int, default=128, help="Number of anti-aliasing samples, default 128"
)
parser.add_argument(
"--color_depth", type=int, default=16, help="Color depth of images (default 16)"
)
parser.add_argument(
"--use_gpu",
action="store_true",
default=False,
help="number of views to be rendered",
)
parser.add_argument(
"--gpus",
nargs="*",
type=int,
help="number of views to be rendered",
)
parser.add_argument("--overwrite", action="store_true", default=False, help="Overwrite existing renders")
parser.add_argument("--pool", action="store_true", default=False, help="Render in parallel. Improves performance.")
args = parser.parse_args(argv)
obj_id = OBJ_IDS[args.object]
args.src_model_dir = osp.join(args.src_model_dir, obj_id)
args.out_dir = osp.join(
args.out_dir, "{}_{}obj".format(obj_id, args.n_objects), args.split
)
print(args)
return args | null |
154,923 | import argparse
from concurrent.futures import ProcessPoolExecutor
from dotmap import DotMap
import glob
import json
import os
import os.path as osp
import shutil
import sys
from time import time
import bpy
from mathutils import Vector
import numpy as np
from numpy.random import Generator, MT19937, SeedSequence
def render_views(
args,
model_dirs,
rng,
):
"""Render the model with the specified viewpoint."""
start = time()
assert len(model_dirs) >= 1
out_dir = osp.join(args.out_dir, osp.basename(model_dirs[0]))
print_info(out_dir, osp.isdir(out_dir))
if (
osp.isdir(out_dir)
and len(os.listdir(out_dir)) >= args.n_views
and not args.overwrite
):
print_info("images already written for {}".format(out_dir))
return False
os.makedirs(out_dir, exist_ok=True)
print_info("saving outputs to {}".format(out_dir))
start = time()
objs, camera, track_to, view_dist, lookat = setup_scene(args, model_dirs)
print_info("VIEW_DIST", view_dist)
frames = []
files = []
pitch_range = [0, np.deg2rad(80)]
euler_zs = 6 * np.pi * np.arange(args.n_views) / args.n_views
if args.split == "train":
# if training, we use binned uniform views around the hemisphere
# and add bounded random noise to camera location
euler_xs = rng.uniform(*pitch_range, size=(args.n_views,))
euler_zs += rng.uniform(np.pi / args.n_views, size=(args.n_views,))
else:
# if val or test, we use the Archimedes spiral introduced by SRN
euler_xs = np.arange(args.n_views) / args.n_views * np.diff(pitch_range)
for i in range(args.n_views):
rot_euler = np.array([euler_xs[i], 0, euler_zs[i]])
track_to.rotation_euler = rot_euler
filepath = osp.join(out_dir, "view_{:03d}".format(i))
files.extend(_render_single(filepath, camera, args))
# NOTE: camera matrix must be written AFTER render because view layer is updated lazily
camera_matrix = np.array(camera.matrix_world).tolist()
frame_data = DotMap(transform_matrix=camera_matrix)
frame_data.file_path = filepath
frames.append(frame_data)
_move_files(out_dir, files)
delete_objects([obj.name for obj in objs])
out_data = DotMap(frames=frames)
out_data.model_ids = [osp.basename(name) for name in model_dirs]
out_data.camera_angle_x = camera.data.angle_x
with open(osp.join(out_dir, "transforms.json"), "w") as f:
json.dump(out_data, f, indent=1, separators=(",", ":"))
delta = time() - start
print_info("rendering {} took {} seconds".format(model_dirs[0], delta))
print_info("time to render {}: {}".format(model_dirs[0], time() - start))
return True
def get_split(args):
object_dir = args.src_model_dir
val_frac = args.val_frac
test_frac = args.test_frac
models_all = [
subd for subd in glob.glob("{}/*".format(object_dir)) if osp.isdir(subd)
]
n_total = len(models_all)
print("total models in {}: {}".format(object_dir, n_total))
n_val = int(val_frac * n_total)
n_test = int(test_frac * n_total)
n_train = n_total - (n_val + n_test)
train_split_path = osp.join(object_dir, "train_split_{}.txt".format(n_train))
val_split_path = osp.join(object_dir, "val_split_{}.txt".format(n_val))
test_split_path = osp.join(object_dir, "test_split_{}.txt".format(n_test))
if (
osp.isfile(train_split_path)
and osp.isfile(val_split_path)
and osp.isfile(test_split_path)
):
print(
"splits {}, {}, {} already exist".format(
train_split_path, val_split_path, test_split_path
)
)
else:
val_end = n_train + n_val
permute = np.random.permutation(n_total)
train_models = [models_all[i] for i in permute[:n_train]]
val_models = [models_all[i] for i in permute[n_train:val_end]]
test_models = [models_all[i] for i in permute[val_end:]]
with open(train_split_path, "w") as f:
f.write("\n".join(train_models))
with open(val_split_path, "w") as f:
f.write("\n".join(val_models))
with open(test_split_path, "w") as f:
f.write("\n".join(test_models))
print(
"wrote splits to {}, {}, {}".format(
train_split_path, val_split_path, test_split_path
)
)
if args.split == "train":
return _load_split_txt(train_split_path)
elif args.split == "val":
return _load_split_txt(val_split_path)
elif args.split == "test":
return _load_split_txt(test_split_path)
else:
raise NotImplementedError
The provided code snippet includes necessary dependencies for implementing the `_main_sequential` function. Write a Python function `def _main_sequential(args)` to solve the following problem:
Render everything in a single process
Here is the function:
def _main_sequential(args):
"""Render everything in a single process"""
model_dirs = get_split(args)
end_idx = args.end_idx if args.end_idx > 0 else len(model_dirs)
rng = np.random.default_rng(seed=9)
for model_dir in model_dirs[args.start_idx : end_idx]:
sel_dirs = [model_dir]
for _ in range(args.n_objects - 1):
sel_dirs.append(rng.choice(model_dirs))
render_views(args, sel_dirs, rng) | Render everything in a single process |
154,924 | import argparse
from concurrent.futures import ProcessPoolExecutor
from dotmap import DotMap
import glob
import json
import os
import os.path as osp
import shutil
import sys
from time import time
import bpy
from mathutils import Vector
import numpy as np
from numpy.random import Generator, MT19937, SeedSequence
def render_views(
args,
model_dirs,
rng,
):
"""Render the model with the specified viewpoint."""
start = time()
assert len(model_dirs) >= 1
out_dir = osp.join(args.out_dir, osp.basename(model_dirs[0]))
print_info(out_dir, osp.isdir(out_dir))
if (
osp.isdir(out_dir)
and len(os.listdir(out_dir)) >= args.n_views
and not args.overwrite
):
print_info("images already written for {}".format(out_dir))
return False
os.makedirs(out_dir, exist_ok=True)
print_info("saving outputs to {}".format(out_dir))
start = time()
objs, camera, track_to, view_dist, lookat = setup_scene(args, model_dirs)
print_info("VIEW_DIST", view_dist)
frames = []
files = []
pitch_range = [0, np.deg2rad(80)]
euler_zs = 6 * np.pi * np.arange(args.n_views) / args.n_views
if args.split == "train":
# if training, we use binned uniform views around the hemisphere
# and add bounded random noise to camera location
euler_xs = rng.uniform(*pitch_range, size=(args.n_views,))
euler_zs += rng.uniform(np.pi / args.n_views, size=(args.n_views,))
else:
# if val or test, we use the Archimedes spiral introduced by SRN
euler_xs = np.arange(args.n_views) / args.n_views * np.diff(pitch_range)
for i in range(args.n_views):
rot_euler = np.array([euler_xs[i], 0, euler_zs[i]])
track_to.rotation_euler = rot_euler
filepath = osp.join(out_dir, "view_{:03d}".format(i))
files.extend(_render_single(filepath, camera, args))
# NOTE: camera matrix must be written AFTER render because view layer is updated lazily
camera_matrix = np.array(camera.matrix_world).tolist()
frame_data = DotMap(transform_matrix=camera_matrix)
frame_data.file_path = filepath
frames.append(frame_data)
_move_files(out_dir, files)
delete_objects([obj.name for obj in objs])
out_data = DotMap(frames=frames)
out_data.model_ids = [osp.basename(name) for name in model_dirs]
out_data.camera_angle_x = camera.data.angle_x
with open(osp.join(out_dir, "transforms.json"), "w") as f:
json.dump(out_data, f, indent=1, separators=(",", ":"))
delta = time() - start
print_info("rendering {} took {} seconds".format(model_dirs[0], delta))
print_info("time to render {}: {}".format(model_dirs[0], time() - start))
return True
def get_split(args):
object_dir = args.src_model_dir
val_frac = args.val_frac
test_frac = args.test_frac
models_all = [
subd for subd in glob.glob("{}/*".format(object_dir)) if osp.isdir(subd)
]
n_total = len(models_all)
print("total models in {}: {}".format(object_dir, n_total))
n_val = int(val_frac * n_total)
n_test = int(test_frac * n_total)
n_train = n_total - (n_val + n_test)
train_split_path = osp.join(object_dir, "train_split_{}.txt".format(n_train))
val_split_path = osp.join(object_dir, "val_split_{}.txt".format(n_val))
test_split_path = osp.join(object_dir, "test_split_{}.txt".format(n_test))
if (
osp.isfile(train_split_path)
and osp.isfile(val_split_path)
and osp.isfile(test_split_path)
):
print(
"splits {}, {}, {} already exist".format(
train_split_path, val_split_path, test_split_path
)
)
else:
val_end = n_train + n_val
permute = np.random.permutation(n_total)
train_models = [models_all[i] for i in permute[:n_train]]
val_models = [models_all[i] for i in permute[n_train:val_end]]
test_models = [models_all[i] for i in permute[val_end:]]
with open(train_split_path, "w") as f:
f.write("\n".join(train_models))
with open(val_split_path, "w") as f:
f.write("\n".join(val_models))
with open(test_split_path, "w") as f:
f.write("\n".join(test_models))
print(
"wrote splits to {}, {}, {}".format(
train_split_path, val_split_path, test_split_path
)
)
if args.split == "train":
return _load_split_txt(train_split_path)
elif args.split == "val":
return _load_split_txt(val_split_path)
elif args.split == "test":
return _load_split_txt(test_split_path)
else:
raise NotImplementedError
The provided code snippet includes necessary dependencies for implementing the `_main_parallel` function. Write a Python function `def _main_parallel(args)` to solve the following problem:
Spawn child processes after global setup to speed up rendering
Here is the function:
def _main_parallel(args):
"""Spawn child processes after global setup to speed up rendering"""
model_dirs = get_split(args)
end_idx = args.end_idx if args.end_idx > 0 else len(model_dirs)
n_instances = end_idx - args.start_idx
# need to pass in separate RNGs into the child processes.
seed_gen = SeedSequence(9)
rngs = [Generator(MT19937(sg)) for sg in seed_gen.spawn(n_instances)]
futures = []
with ProcessPoolExecutor(max_workers=16) as executor:
for model_dir, rng in zip(model_dirs[args.start_idx : end_idx], rngs):
sel_dirs = [model_dir]
for _ in range(args.n_objects - 1):
sel_dirs.append(rng.choice(model_dirs))
futures.append(
executor.submit(
render_views,
args,
sel_dirs,
rng,
)
)
for future in futures:
_ = future.result() | Spawn child processes after global setup to speed up rendering |
154,925 | import os
import os.path as osp
import argparse
import skimage.measure
from tqdm import tqdm
import warnings
import lpips
import numpy as np
import torch
import imageio
import json
args = parser.parse_args()
if args.dataset_format == "dvr":
list_name = args.list_name + ".lst"
img_dir_name = "image"
elif args.dataset_format == "srn":
list_name = ""
img_dir_name = "rgb"
elif args.dataset_format == "nerf":
warnings.warn("test split not implemented for NeRF synthetic data format")
list_name = ""
img_dir_name = ""
else:
raise NotImplementedError("Not supported data format " + args.dataset_format)
data_root = args.datadir
render_root = args.output
def run_map():
if args.multicat:
cats = os.listdir(data_root)
def fmt_obj_name(c, x):
return c + "_" + x
else:
cats = ["."]
def fmt_obj_name(c, x):
return x
use_exclude_lut = len(args.viewlist) > 0
if use_exclude_lut:
print("Excluding views from list", args.viewlist)
with open(args.viewlist, "r") as f:
tmp = [x.strip().split() for x in f.readlines()]
exclude_lut = {
x[0] + "/" + x[1]: torch.tensor(list(map(int, x[2:])), dtype=torch.long)
for x in tmp
}
base_exclude_views = list(map(int, args.primary.split()))
if args.exclude_dtu_bad:
base_exclude_views.extend(
[3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 36, 37, 38, 39]
)
if args.eval_view_list is not None:
with open(args.eval_view_list, "r") as f:
eval_views = list(map(int, f.readline().split()))
print("Only using views", eval_views)
else:
eval_views = None
all_objs = []
print("CATEGORICAL SUMMARY")
total_objs = 0
for cat in cats:
cat_root = osp.join(data_root, cat)
if not osp.isdir(cat_root):
continue
objs = sorted([x for x in os.listdir(cat_root)])
if len(list_name) > 0:
list_path = osp.join(cat_root, list_name)
with open(list_path, "r") as f:
split = set([x.strip() for x in f.readlines()])
objs = [x for x in objs if x in split]
objs_rend = [osp.join(render_root, fmt_obj_name(cat, x)) for x in objs]
objs = [osp.join(cat_root, x) for x in objs]
objs = [x for x in objs if osp.isdir(x)]
objs = list(zip(objs, objs_rend))
objs_avail = [x for x in objs if osp.exists(x[1])]
print(cat, "TOTAL", len(objs), "AVAILABLE", len(objs_avail))
# assert len(objs) == len(objs_avail)
total_objs += len(objs)
all_objs.extend(objs_avail)
print(">>> USING", len(all_objs), "OF", total_objs, "OBJECTS")
cuda = "cuda:" + str(args.gpu_id)
lpips_vgg = lpips.LPIPS(net="vgg").to(device=cuda)
def get_metrics(rgb, gt):
ssim = skimage.measure.compare_ssim(rgb, gt, multichannel=True, data_range=1)
psnr = skimage.measure.compare_psnr(rgb, gt, data_range=1)
return psnr, ssim
def isimage(path):
ext = osp.splitext(path)[1]
return ext == ".jpg" or ext == ".png"
def process_obj(path, rend_path):
if len(img_dir_name) > 0:
im_root = osp.join(path, img_dir_name)
else:
im_root = path
out_path = osp.join(rend_path, "metrics.txt")
if osp.exists(out_path) and not args.overwrite:
return
ims = [x for x in sorted(os.listdir(im_root)) if isimage(x)]
psnr_avg = 0.0
ssim_avg = 0.0
gts = []
preds = []
num_ims = 0
if use_exclude_lut:
lut_key = osp.basename(rend_path).replace("_", "/")
exclude_views = exclude_lut[lut_key]
else:
exclude_views = []
exclude_views.extend(base_exclude_views)
for im_name in ims:
im_path = osp.join(im_root, im_name)
im_name_id = int(osp.splitext(im_name)[0])
im_name_out = "{:06}.png".format(im_name_id)
im_rend_path = osp.join(rend_path, im_name_out)
if osp.exists(im_rend_path) and im_name_id not in exclude_views:
if eval_views is not None and im_name_id not in eval_views:
continue
gt = imageio.imread(im_path).astype(np.float32)[..., :3] / 255.0
pred = imageio.imread(im_rend_path).astype(np.float32) / 255.0
psnr, ssim = get_metrics(pred, gt)
psnr_avg += psnr
ssim_avg += ssim
gts.append(torch.from_numpy(gt).permute(2, 0, 1) * 2.0 - 1.0)
preds.append(torch.from_numpy(pred).permute(2, 0, 1) * 2.0 - 1.0)
num_ims += 1
gts = torch.stack(gts)
preds = torch.stack(preds)
lpips_all = []
preds_spl = torch.split(preds, args.lpips_batch_size, dim=0)
gts_spl = torch.split(gts, args.lpips_batch_size, dim=0)
with torch.no_grad():
for predi, gti in zip(preds_spl, gts_spl):
lpips_i = lpips_vgg(predi.to(device=cuda), gti.to(device=cuda))
lpips_all.append(lpips_i)
lpips = torch.cat(lpips_all)
lpips = lpips.mean().item()
psnr_avg /= num_ims
ssim_avg /= num_ims
out_txt = "psnr {}\nssim {}\nlpips {}".format(psnr_avg, ssim_avg, lpips)
with open(out_path, "w") as f:
f.write(out_txt)
for obj_path, obj_rend_path in tqdm(all_objs):
process_obj(obj_path, obj_rend_path) | null |
154,926 | import os
import os.path as osp
import argparse
import skimage.measure
from tqdm import tqdm
import warnings
import lpips
import numpy as np
import torch
import imageio
import json
args = parser.parse_args()
if args.dataset_format == "dvr":
list_name = args.list_name + ".lst"
img_dir_name = "image"
elif args.dataset_format == "srn":
list_name = ""
img_dir_name = "rgb"
elif args.dataset_format == "nerf":
warnings.warn("test split not implemented for NeRF synthetic data format")
list_name = ""
img_dir_name = ""
else:
raise NotImplementedError("Not supported data format " + args.dataset_format)
render_root = args.output
def run_reduce():
if args.multicat:
meta = json.load(open(osp.join(args.datadir, args.metadata), "r"))
cats = sorted(list(meta.keys()))
cat_description = {cat: meta[cat]["name"].split(",")[0] for cat in cats}
all_objs = []
objs = [x for x in os.listdir(render_root)]
objs = [osp.join(render_root, x) for x in objs if x[0] != "_"]
objs = [x for x in objs if osp.isdir(x)]
if args.dtu_sort:
objs = sorted(objs, key=lambda x: int(x[x.rindex("/") + 5 :]))
else:
objs = sorted(objs)
all_objs.extend(objs)
print(">>> PROCESSING", len(all_objs), "OBJECTS")
METRIC_NAMES = ["psnr", "ssim", "lpips"]
out_metrics_path = osp.join(render_root, "all_metrics.txt")
if args.multicat:
cat_sz = {}
for cat in cats:
cat_sz[cat] = 0
all_metrics = {}
for name in METRIC_NAMES:
if args.multicat:
for cat in cats:
all_metrics[cat + "." + name] = 0.0
all_metrics[name] = 0.0
should_print_all_objs = len(all_objs) < 100
for obj_root in tqdm(all_objs):
metrics_path = osp.join(obj_root, "metrics.txt")
with open(metrics_path, "r") as f:
metrics = [line.split() for line in f.readlines()]
if args.multicat:
cat_name = osp.basename(obj_root).split("_")[0]
cat_sz[cat_name] += 1
for metric, val in metrics:
all_metrics[cat_name + "." + metric] += float(val)
for metric, val in metrics:
all_metrics[metric] += float(val)
if should_print_all_objs:
print(obj_root, end=" ")
for metric, val in metrics:
print(val, end=" ")
print()
for name in METRIC_NAMES:
if args.multicat:
for cat in cats:
if cat_sz[cat] > 0:
all_metrics[cat + "." + name] /= cat_sz[cat]
all_metrics[name] /= len(all_objs)
print(name, all_metrics[name])
metrics_txt = []
if args.multicat:
for cat in cats:
if cat_sz[cat] > 0:
cat_txt = "{:12s}".format(cat_description[cat])
for name in METRIC_NAMES:
cat_txt += " {}: {:.6f}".format(name, all_metrics[cat + "." + name])
cat_txt += " n_inst: {}".format(cat_sz[cat])
metrics_txt.append(cat_txt)
total_txt = "---\n{:12s}".format("total")
else:
total_txt = ""
for name in METRIC_NAMES:
total_txt += " {}: {:.6f}".format(name, all_metrics[name])
metrics_txt.append(total_txt)
metrics_txt = "\n".join(metrics_txt)
with open(out_metrics_path, "w") as f:
f.write(metrics_txt)
print("WROTE", out_metrics_path)
print(metrics_txt) | null |
154,927 | import sys
import os
import torch
import numpy as np
import imageio
import skimage.measure
import util
from data import get_split_dataset
from render import NeRFRenderer
from model import make_model
import tqdm
def extra_args(parser):
parser.add_argument(
"--split",
type=str,
default="val",
help="Split of data to use train | val | test",
)
parser.add_argument(
"--source",
"-P",
type=str,
default="64",
help="Source view(s) in image, in increasing order. -1 to use random 1 view.",
)
parser.add_argument("--batch_size", type=int, default=4, help="Batch size")
parser.add_argument(
"--seed",
type=int,
default=1234,
help="Random seed for selecting target views of each object",
)
parser.add_argument("--coarse", action="store_true", help="Coarse network as fine")
return parser | null |
154,928 | import sys
import os
ROOT_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
import util
import torch
import numpy as np
from model import make_model
from render import NeRFRenderer
import torchvision.transforms as T
import tqdm
import imageio
from PIL import Image
os.makedirs(args.output, exist_ok=True)
def extra_args(parser):
parser.add_argument(
"--input",
"-I",
type=str,
default=os.path.join(ROOT_DIR, "input"),
help="Image directory",
)
parser.add_argument(
"--output",
"-O",
type=str,
default=os.path.join(ROOT_DIR, "output"),
help="Output directory",
)
parser.add_argument("--size", type=int, default=128, help="Input image maxdim")
parser.add_argument(
"--out_size",
type=str,
default="128",
help="Output image size, either 1 or 2 number (w h)",
)
parser.add_argument("--focal", type=float, default=131.25, help="Focal length")
parser.add_argument("--radius", type=float, default=1.3, help="Camera distance")
parser.add_argument("--z_near", type=float, default=0.8)
parser.add_argument("--z_far", type=float, default=1.8)
parser.add_argument(
"--elevation",
"-e",
type=float,
default=0.0,
help="Elevation angle (negative is above)",
)
parser.add_argument(
"--num_views",
type=int,
default=24,
help="Number of video frames (rotated views)",
)
parser.add_argument("--fps", type=int, default=15, help="FPS of video")
parser.add_argument("--gif", action="store_true", help="Store gif instead of mp4")
parser.add_argument(
"--no_vid",
action="store_true",
help="Do not store video (only image frames will be written)",
)
return parser | null |
154,929 | import sys
import os
import torch
import numpy as np
import imageio
import skimage.measure
import util
from data import get_split_dataset
from model import make_model
from render import NeRFRenderer
import cv2
import tqdm
import ipdb
import warnings
def extra_args(parser):
parser.add_argument(
"--split",
type=str,
default="test",
help="Split of data to use train | val | test",
)
parser.add_argument(
"--source",
"-P",
type=str,
default="64",
help="Source view(s) for each object. Alternatively, specify -L to viewlist file and leave this blank.",
)
parser.add_argument(
"--eval_view_list", type=str, default=None, help="Path to eval view list"
)
parser.add_argument("--coarse", action="store_true", help="Coarse network as fine")
parser.add_argument(
"--no_compare_gt",
action="store_true",
help="Skip GT comparison (metric won't be computed) and only render images",
)
parser.add_argument(
"--multicat",
action="store_true",
help="Prepend category id to object id. Specify if model fits multiple categories.",
)
parser.add_argument(
"--viewlist",
"-L",
type=str,
default="",
help="Path to source view list e.g. src_dvr.txt; if specified, overrides source/P",
)
parser.add_argument(
"--output",
"-O",
type=str,
default="eval",
help="If specified, saves generated images to directory",
)
parser.add_argument(
"--include_src", action="store_true", help="Include source views in calculation"
)
parser.add_argument(
"--scale", type=float, default=1.0, help="Video scale relative to input size"
)
parser.add_argument("--write_depth", action="store_true", help="Write depth image")
parser.add_argument(
"--write_compare", action="store_true", help="Write GT comparison image"
)
parser.add_argument(
"--free_pose",
action="store_true",
help="Set to indicate poses may change between objects. In most of our datasets, the test set has fixed poses.",
)
return parser | null |
154,930 | import sys
import os
import torch
import torch.nn.functional as F
import numpy as np
import imageio
import util
import warnings
from data import get_split_dataset
from render import NeRFRenderer
from model import make_model
from scipy.interpolate import CubicSpline
import tqdm
def extra_args(parser):
parser.add_argument(
"--subset", "-S", type=int, default=0, help="Subset in data to use"
)
parser.add_argument(
"--split",
type=str,
default="train",
help="Split of data to use train | val | test",
)
parser.add_argument(
"--source",
"-P",
type=str,
default="64",
help="Source view(s) in image, in increasing order. -1 to do random",
)
parser.add_argument(
"--num_views",
type=int,
default=40,
help="Number of video frames (rotated views)",
)
parser.add_argument(
"--elevation",
type=float,
default=-10.0,
help="Elevation angle (negative is above)",
)
parser.add_argument(
"--scale", type=float, default=1.0, help="Video scale relative to input size"
)
parser.add_argument(
"--radius",
type=float,
default=0.0,
help="Distance of camera from origin, default is average of z_far, z_near of dataset (only for non-DTU)",
)
parser.add_argument("--fps", type=int, default=30, help="FPS of video")
return parser | null |
154,931 | import json
import typing as _t
from itertools import groupby
from mimetypes import MimeTypes
from operator import itemgetter
import pydantic
import pydantic_core
import typing_extensions as _te
from pydantic_core import core_schema
if _t.TYPE_CHECKING:
from . import json_schema
_mime_types = MimeTypes()
def get_content_type(file: ds.UploadFile) -> _t.Union[str, None]:
if file.content_type:
return file.content_type
elif file.filename:
return _mime_types.guess_type(file.filename)[0] | null |
154,932 | import json
import typing as _t
from itertools import groupby
from mimetypes import MimeTypes
from operator import itemgetter
import pydantic
import pydantic_core
import typing_extensions as _te
from pydantic_core import core_schema
if _t.TYPE_CHECKING:
from . import json_schema
def Textarea(rows: _t.Union[int, None] = None, cols: _t.Union[int, None] = None) -> _t.Any: # N802
return pydantic.Field(json_schema_extra={'format': 'textarea', 'rows': rows, 'cols': cols}) | null |
154,933 | from __future__ import annotations as _annotations
import re
import subprocess
from pathlib import Path
from typing import Any, cast
from pydantic import ImportString, TypeAdapter
from pydantic.json_schema import GenerateJsonSchema, JsonSchemaValue
from pydantic_core import core_schema, to_json
class CustomGenerateJsonSchema(GenerateJsonSchema):
def field_title_should_be_set(self, schema) -> bool:
return False
def default_schema(self, schema: core_schema.WithDefaultSchema) -> JsonSchemaValue:
inner = schema['schema']
if is_type_schema(schema):
return self.generate_inner(inner)
# if inner.get('type') == 'nullable' and inner.get('ref', '').startswith('fastui.class_name.ClassName:'):
# return self.generate_inner(inner)
return super().default_schema(schema)
def field_is_required(
self,
field: core_schema.ModelField | core_schema.DataclassField | core_schema.TypedDictField,
total: bool,
) -> bool:
inner = field['schema']
if inner['type'] == 'default' and is_type_schema(cast(core_schema.WithDefaultSchema, inner)):
return True
else:
return super().field_is_required(field, total)
def nullable_schema(self, schema: core_schema.NullableSchema) -> JsonSchemaValue:
null_schema = {'type': 'null'}
inner_json_schema = self.generate_inner(schema['schema'])
if inner_json_schema == null_schema:
return null_schema
else:
# since we use `exclude_none=True`, field can't be null
return inner_json_schema
def tagged_union_schema(self, schema: core_schema.TaggedUnionSchema) -> JsonSchemaValue:
if schema['discriminator'] == 'type':
if 'go-to' in schema['choices']:
schema['ref'] = 'fastui.events.AnyEvent'
return super().tagged_union_schema(schema)
def generate_json_schema(root_model: Any) -> JsonSchemaValue:
fastui_schema = TypeAdapter(root_model).json_schema(
by_alias=True, mode='serialization', schema_generator=CustomGenerateJsonSchema
)
# the following post-processing is a workaround for
# https://github.com/pydantic/pydantic/issues/8320
any_comp_def = fastui_schema['$defs']['Div']['properties']['components']['items'].copy()
any_comp_ref = {'$ref': '#/$defs/FastProps'}
def replace_any_comp(value: Any) -> Any:
if isinstance(value, dict):
if value == any_comp_def:
return any_comp_ref
else:
return {k: replace_any_comp(v) for k, v in value.items()}
elif isinstance(value, list):
return [replace_any_comp(v) for v in value]
else:
return value
fastui_schema['items'] = any_comp_ref
fastui_schema = replace_any_comp(fastui_schema)
fastui_schema['$defs']['FastProps'] = any_comp_def
fastui_schema.pop('description')
return fastui_schema | null |
154,934 | from __future__ import annotations as _annotations
import re
import subprocess
from pathlib import Path
from typing import Any, cast
from pydantic import ImportString, TypeAdapter
from pydantic.json_schema import GenerateJsonSchema, JsonSchemaValue
from pydantic_core import core_schema, to_json
def is_type_schema(schema: core_schema.WithDefaultSchema) -> bool:
inner = schema['schema']
if inner['type'] == 'literal':
expected = cast(core_schema.LiteralSchema, inner)['expected']
if len(expected) == 1 and expected[0] == schema.get('default', object()):
return True
return False | null |
154,935 | from __future__ import annotations as _annotations
import re
import subprocess
from pathlib import Path
from typing import Any, cast
from pydantic import ImportString, TypeAdapter
from pydantic.json_schema import GenerateJsonSchema, JsonSchemaValue
from pydantic_core import core_schema, to_json
TS_PREFIX = b"""\
/**
* This file was automatically generated by json-schema-to-typescript.
* DO NOT MODIFY IT BY HAND. Instead, modify python types, then run
* `fastui generate <python-object> <typescript-output-file>`.
*/"""
def json2ts(input_file: Path, output_file: Path): # pragma: no cover
args = (
'npx',
'json2ts',
str(input_file),
str(output_file),
'--additionalProperties',
'false',
'--no-style.semi',
'--style.singleQuote',
'--no-unknownAny',
)
try:
subprocess.run(args, check=True)
except (subprocess.CalledProcessError, FileNotFoundError) as e:
raise RuntimeError(
"Failed to run json2ts, you'll need to install `npx` and `json-schema-to-typescript`, "
f"then run the command:\n\n {' '.join(args)}\n\n"
) from e
else:
assert output_file.is_file()
# remove the root list type that we don't need
output = (
output_file.read_bytes()
.replace(b'export type FastUI = FastProps[]\n', b'')
.replace(b'/* eslint-disable */\n', b'')
)
output = re.sub(rb'/\*\*\s+\* This file was automatically generated.+?\*/', TS_PREFIX, output, flags=re.DOTALL)
output_file.write_bytes(output)
input_file.unlink() | null |
154,936 | import argparse
from pathlib import Path
from . import __version__, generate_typescript
def cli():
parser = argparse.ArgumentParser(prog='FastUI', description='FastUI CLI.')
parser.add_argument(
'--version',
action='version',
version=f'fastui {__version__}',
)
subparsers = parser.add_subparsers(dest='command', required=True)
generate_typescript_parser = subparsers.add_parser(
'generate', help='Generate typescript types from Python definitions of FastUI components.'
)
generate_typescript_parser.add_argument(
'python_object', metavar='python-object', type=str, help='Python object to generate types for.'
)
generate_typescript_parser.add_argument(
'typescript_output_file', metavar='typescript-output-file', type=Path, help='Path to output typescript file.'
)
args = parser.parse_args()
generate_typescript.main(args.python_object, args.typescript_output_file) | null |
154,937 | import json
from abc import ABC, abstractmethod
from typing import TYPE_CHECKING, List, Tuple, Union
from .. import AnyComponent, FastUI, events
from .. import components as c
class AuthException(ABC, Exception):
"""
Base exception for all auth-related errors.
"""
def response_data(self) -> Tuple[int, str]:
raise NotImplementedError
The provided code snippet includes necessary dependencies for implementing the `fastapi_auth_exception_handling` function. Write a Python function `def fastapi_auth_exception_handling(app: 'FastAPI') -> None` to solve the following problem:
Register an exception handler for any `AuthException` in a FastAPI app.
Here is the function:
def fastapi_auth_exception_handling(app: 'FastAPI') -> None:
"""
Register an exception handler for any `AuthException` in a FastAPI app.
"""
from fastapi import Request, Response
@app.exception_handler(AuthException)
def auth_exception_handler(_request: Request, e: AuthException) -> Response:
status_code, body = e.response_data()
return Response(body, media_type='application/json', status_code=status_code) | Register an exception handler for any `AuthException` in a FastAPI app. |
154,938 | import asyncio
import os
import signal
import typing as _t
from contextlib import asynccontextmanager
class DevReload:
def __init__(self, default_lifespan: _t.Union[types.Lifespan[FastAPI], None]):
self.default_lifespan = default_lifespan
self.stop = asyncio.Event()
async def lifespan(self, app: FastAPI):
signal.signal(signal.SIGTERM, self._on_signal)
if self.default_lifespan:
async with self.default_lifespan(app):
yield
else:
yield
async def dev_reload_endpoints(self):
return StreamingResponse(self.ping(), media_type='text/plain')
def _on_signal(self, *_args: _t.Any):
# print('setting stop', _args)
self.stop.set()
async def ping(self):
# print('connected', os.getpid())
yield b'fastui-dev-reload\n'
yield b'.'
while True:
try:
await asyncio.wait_for(self.stop.wait(), timeout=2)
except asyncio.TimeoutError:
yield b'.'
else:
yield b'%d' % os.getpid()
break
def dev_fastapi_app(reload_path: str = '/api/__dev__/reload', **fastapi_kwargs) -> FastAPI:
dev_reload = DevReload(fastapi_kwargs.pop('lifespan', None))
app = FastAPI(lifespan=dev_reload.lifespan)
app.get(reload_path, include_in_schema=False)(dev_reload.dev_reload_endpoints)
return app | null |
154,939 | import json
import re
import typing as _t
import typing_extensions as _ta
from pydantic import BaseModel
from .components.forms import (
FormField,
FormFieldBoolean,
FormFieldFile,
FormFieldInput,
FormFieldSelect,
FormFieldSelectSearch,
FormFieldTextarea,
InputHtmlType,
)
if _t.TYPE_CHECKING:
from .forms import SelectOption
else:
SelectOption = dict
JsonSchemaObject = _t.TypedDict(
'JsonSchemaObject',
{
'type': _ta.Required[_t.Literal['object']],
'properties': _t.Dict[str, JsonSchemaAny],
'$defs': JsonSchemaDefs,
'required': _t.List[str],
'title': str,
'description': str,
},
total=False,
)
def json_schema_obj_to_fields(
schema: JsonSchemaObject, loc: SchemeLocation, title: _t.List[str], defs: JsonSchemaDefs
) -> _t.Iterable[FormField]:
FormField = _t.Union[
FormFieldInput, FormFieldTextarea, FormFieldBoolean, FormFieldFile, FormFieldSelect, FormFieldSelectSearch
]
def model_json_schema_to_fields(model: _t.Type[BaseModel]) -> _t.List[FormField]:
schema = _t.cast(JsonSchemaObject, model.model_json_schema())
defs = schema.get('$defs', {})
return list(json_schema_obj_to_fields(schema, [], [], defs)) | null |
154,940 | from __future__ import annotations
import json
import re
from pathlib import Path
def replace_package_json(package_json: Path, new_version: str, deps: bool = False) -> tuple[Path, str]:
content = package_json.read_text()
content, r_count = re.subn(r'"version": *".*?"', f'"version": "{new_version}"', content, count=1)
assert r_count == 1 , f'Failed to update version in {package_json}, expect replacement count 1, got {r_count}'
if deps:
content, r_count = re.subn(r'"(@pydantic/.+?)": *".*?"', fr'"\1": "{new_version}"', content)
assert r_count == 1, f'Failed to update version in {package_json}, expect replacement count 1, got {r_count}'
return package_json, content | null |
154,941 | from __future__ import annotations as _annotations
import enum
from collections import defaultdict
from datetime import date
from typing import Annotated, Literal, TypeAlias
from fastapi import APIRouter, Request, UploadFile
from fastui import AnyComponent, FastUI
from fastui import components as c
from fastui.events import GoToEvent, PageEvent
from fastui.forms import FormFile, SelectSearchResponse, Textarea, fastui_form
from httpx import AsyncClient
from pydantic import BaseModel, EmailStr, Field, SecretStr, field_validator
from pydantic_core import PydanticCustomError
from .shared import demo_page
class SelectSearchResponse(pydantic.BaseModel):
async def search_view(request: Request, q: str) -> SelectSearchResponse:
path_ends = f'name/{q}' if q else 'all'
client: AsyncClient = request.app.state.httpx_client
r = await client.get(f'https://restcountries.com/v3.1/{path_ends}')
if r.status_code == 404:
options = []
else:
r.raise_for_status()
data = r.json()
if path_ends == 'all':
# if we got all, filter to the 20 most populous countries
data.sort(key=lambda x: x['population'], reverse=True)
data = data[0:20]
data.sort(key=lambda x: x['name']['common'])
regions = defaultdict(list)
for co in data:
regions[co['region']].append({'value': co['cca3'], 'label': co['name']['common']})
options = [{'label': k, 'options': v} for k, v in regions.items()]
return SelectSearchResponse(options=options) | null |
154,942 | from __future__ import annotations as _annotations
import enum
from collections import defaultdict
from datetime import date
from typing import Annotated, Literal, TypeAlias
from fastapi import APIRouter, Request, UploadFile
from fastui import AnyComponent, FastUI
from fastui import components as c
from fastui.events import GoToEvent, PageEvent
from fastui.forms import FormFile, SelectSearchResponse, Textarea, fastui_form
from httpx import AsyncClient
from pydantic import BaseModel, EmailStr, Field, SecretStr, field_validator
from pydantic_core import PydanticCustomError
from .shared import demo_page
FormKind: TypeAlias = Literal['login', 'select', 'big']
def form_content(kind: FormKind):
match kind:
case 'login':
return [
c.Heading(text='Login Form', level=2),
c.Paragraph(text='Simple login form with email and password.'),
c.ModelForm(model=LoginForm, display_mode='page', submit_url='/api/forms/login'),
]
case 'select':
return [
c.Heading(text='Select Form', level=2),
c.Paragraph(text='Form showing different ways of doing select.'),
c.ModelForm(model=SelectForm, display_mode='page', submit_url='/api/forms/select'),
]
case 'big':
return [
c.Heading(text='Large Form', level=2),
c.Paragraph(text='Form with a lot of fields.'),
c.ModelForm(model=BigModel, display_mode='page', submit_url='/api/forms/big'),
]
case _:
raise ValueError(f'Invalid kind {kind!r}')
class PageEvent(BaseModel):
name: str
push_path: Union[str, None] = Field(default=None, serialization_alias='pushPath')
context: Union[ContextType, None] = None
clear: Union[bool, None] = None
next_event: 'Union[AnyEvent, None]' = Field(default=None, serialization_alias='nextEvent')
type: Literal['page'] = 'page'
def demo_page(*components: AnyComponent, title: str | None = None) -> list[AnyComponent]:
return [
c.PageTitle(text=f'FastUI Demo — {title}' if title else 'FastUI Demo'),
c.Navbar(
title='FastUI Demo',
title_event=GoToEvent(url='/'),
start_links=[
c.Link(
components=[c.Text(text='Components')],
on_click=GoToEvent(url='/components'),
active='startswith:/components',
),
c.Link(
components=[c.Text(text='Tables')],
on_click=GoToEvent(url='/table/cities'),
active='startswith:/table',
),
c.Link(
components=[c.Text(text='Auth')],
on_click=GoToEvent(url='/auth/login/password'),
active='startswith:/auth',
),
c.Link(
components=[c.Text(text='Forms')],
on_click=GoToEvent(url='/forms/login'),
active='startswith:/forms',
),
],
),
c.Page(
components=[
*((c.Heading(text=title),) if title else ()),
*components,
],
),
c.Footer(
extra_text='FastUI Demo',
links=[
c.Link(
components=[c.Text(text='Github')], on_click=GoToEvent(url='https://github.com/pydantic/FastUI')
),
c.Link(components=[c.Text(text='PyPI')], on_click=GoToEvent(url='https://pypi.org/project/fastui/')),
c.Link(components=[c.Text(text='NPM')], on_click=GoToEvent(url='https://www.npmjs.com/org/pydantic/')),
],
),
]
def forms_view(kind: FormKind) -> list[AnyComponent]:
return demo_page(
c.LinkList(
links=[
c.Link(
components=[c.Text(text='Login Form')],
on_click=PageEvent(name='change-form', push_path='/forms/login', context={'kind': 'login'}),
active='/forms/login',
),
c.Link(
components=[c.Text(text='Select Form')],
on_click=PageEvent(name='change-form', push_path='/forms/select', context={'kind': 'select'}),
active='/forms/select',
),
c.Link(
components=[c.Text(text='Big Form')],
on_click=PageEvent(name='change-form', push_path='/forms/big', context={'kind': 'big'}),
active='/forms/big',
),
],
mode='tabs',
class_name='+ mb-4',
),
c.ServerLoad(
path='/forms/content/{kind}',
load_trigger=PageEvent(name='change-form'),
components=form_content(kind),
),
title='Forms',
) | null |
154,943 | from __future__ import annotations as _annotations
import enum
from collections import defaultdict
from datetime import date
from typing import Annotated, Literal, TypeAlias
from fastapi import APIRouter, Request, UploadFile
from fastui import AnyComponent, FastUI
from fastui import components as c
from fastui.events import GoToEvent, PageEvent
from fastui.forms import FormFile, SelectSearchResponse, Textarea, fastui_form
from httpx import AsyncClient
from pydantic import BaseModel, EmailStr, Field, SecretStr, field_validator
from pydantic_core import PydanticCustomError
from .shared import demo_page
class LoginForm(BaseModel):
email: EmailStr = Field(title='Email Address', description="Try 'x@y' to trigger server side validation")
password: SecretStr
class GoToEvent(BaseModel):
# can be a path or a full URL
url: Union[str, None] = None
query: Union[Dict[str, Union[str, float, None]], None] = None
target: Union[Literal['_blank'], None] = None
type: Literal['go-to'] = 'go-to'
def fastui_form(model: _t.Type[FormModel]) -> fastapi_params.Depends:
async def run_fastui_form(request: fastapi.Request):
async with request.form() as form_data:
model_data = unflatten(form_data)
try:
return model.model_validate(model_data)
except pydantic.ValidationError as e:
raise fastapi.HTTPException(
status_code=422,
detail={'form': e.errors(include_input=False, include_url=False, include_context=False)},
)
return fastapi.Depends(run_fastui_form)
async def login_form_post(form: Annotated[LoginForm, fastui_form(LoginForm)]):
print(form)
return [c.FireEvent(event=GoToEvent(url='/'))] | null |
154,944 | from __future__ import annotations as _annotations
import enum
from collections import defaultdict
from datetime import date
from typing import Annotated, Literal, TypeAlias
from fastapi import APIRouter, Request, UploadFile
from fastui import AnyComponent, FastUI
from fastui import components as c
from fastui.events import GoToEvent, PageEvent
from fastui.forms import FormFile, SelectSearchResponse, Textarea, fastui_form
from httpx import AsyncClient
from pydantic import BaseModel, EmailStr, Field, SecretStr, field_validator
from pydantic_core import PydanticCustomError
from .shared import demo_page
class SelectForm(BaseModel):
class GoToEvent(BaseModel):
def fastui_form(model: _t.Type[FormModel]) -> fastapi_params.Depends:
async def select_form_post(form: Annotated[SelectForm, fastui_form(SelectForm)]):
# print(form)
return [c.FireEvent(event=GoToEvent(url='/'))] | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.