id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
13,739 | import requests
import json
import re
import openai
from time import time, sleep
from datetime import datetime
from halo import Halo
import textwrap
import yaml
def get_messages(bus, layer):
url = f'http://127.0.0.1:900/message?bus={bus}&layer={layer}'
response = requests.get(url)
if response.status_code == 200:
messages = response.json()['messages']
return messages
else:
print('Failed to get messages') | null |
13,740 | import requests
import json
import re
import openai
from time import time, sleep
from datetime import datetime
from halo import Halo
import textwrap
import yaml
def format_messages(messages):
formatted_messages = []
for message in messages:
time = datetime.fromtimestamp(message['timestamp']).strftime('%Y-%m-%d %H:%M:%S')
bus = message['bus']
layer = message['layer']
text = message['message']
formatted_message = f'{time} - {bus} - Layer {layer} - {text}'
formatted_messages.append(formatted_message)
return '\n'.join(formatted_messages) | null |
13,741 | import requests
import json
import re
import openai
from time import time, sleep
from datetime import datetime
from halo import Halo
import textwrap
import yaml
def chatbot(conversation, model="gpt-4", temperature=0, max_tokens=2000):
try:
spinner = Halo(text='Thinking...', spinner='dots')
spinner.start()
response = openai.ChatCompletion.create(model=model, messages=conversation, temperature=temperature, max_tokens=max_tokens)
text = response['choices'][0]['message']['content'].strip()
spinner.stop()
return text, response['usage']['total_tokens']
except Exception as oops:
print(f'\n\nError communicating with OpenAI: "{oops}"')
sleep(5) | null |
13,742 | import requests
import json
import re
import openai
from time import time, sleep
from datetime import datetime
from halo import Halo
import textwrap
import yaml
def chat_print(text):
formatted_lines = [textwrap.fill(line, width=120, initial_indent=' ', subsequent_indent=' ') for line in text.split('\n')]
formatted_text = '\n'.join(formatted_lines)
print('\n\n\nLAYER:\n\n%s' % formatted_text) | null |
13,743 | from flask import Flask, request
import yaml
import time
import os
import glob
def post_message():
message = request.json
message['timestamp'] = time.time()
with open(f"logs/log_{message['timestamp']}_{message['bus']}_{message['layer']}.yaml", 'w', encoding='utf-8') as file:
yaml.dump(message, file)
print(message['bus'], message['layer'], message['message'])
return 'Message received', 200 | null |
13,744 | from flask import Flask, request
import yaml
import time
import os
import glob
def get_messages():
bus = request.args.get('bus')
layer = int(request.args.get('layer'))
files = glob.glob('logs/*.yaml')
messages = []
for file in files:
with open(file, 'r', encoding='utf-8') as f:
message = yaml.safe_load(f)
messages.append(message)
if bus == 'north':
filtered_messages = [m for m in messages if m['bus'] == 'north' and m['layer'] > layer]
else:
filtered_messages = [m for m in messages if m['bus'] == 'south' and m['layer'] < layer]
sorted_messages = sorted(filtered_messages, key=lambda m: m['timestamp'], reverse=True)
return {'messages': sorted_messages[:20]}, 200 | null |
13,745 | import sys
from typing import Any
import loguru
logger = get_logger()
The provided code snippet includes necessary dependencies for implementing the `get_logger` function. Write a Python function `def get_logger(handler: Any = sys.stderr, **kwargs: dict[str, Any]) -> Any` to solve the following problem:
Get a logger instance. Lanarky uses `loguru` for logging. Args: handler: The handler to use for the logger. Returns: A loguru logger instance.
Here is the function:
def get_logger(handler: Any = sys.stderr, **kwargs: dict[str, Any]) -> Any:
"""Get a logger instance.
Lanarky uses `loguru` for logging.
Args:
handler: The handler to use for the logger.
Returns:
A loguru logger instance.
"""
logger = loguru.logger
logger.remove()
logger.add(handler, **kwargs)
return logger | Get a logger instance. Lanarky uses `loguru` for logging. Args: handler: The handler to use for the logger. Returns: A loguru logger instance. |
13,746 | import re
from typing import Any, Awaitable, Callable
from fastapi import Depends
from pydantic import BaseModel, create_model
from starlette.routing import compile_path
from lanarky.events import Events
from lanarky.logging import logger
from lanarky.utils import model_dump, model_fields
from lanarky.websockets import WebSocket, WebsocketSession
from .resources import ChatCompletion, ChatCompletionResource, Message, OpenAIResource
from .responses import HTTPStatusDetail, StreamingResponse, status
def compile_openai_resource_factory(endpoint: Callable[..., Any]) -> OpenAIResource:
"""Compile an OpenAI resource factory function.
Args:
endpoint: openai resource factory function.
Returns:
An OpenAIResource instance.
"""
try:
resource = endpoint()
except TypeError:
raise TypeError("set default values for all factory endpoint parameters")
if not isinstance(resource, OpenAIResource):
raise TypeError("factory endpoint must return a LanarkyOpenAIResource instance")
return resource
def compile_model_prefix(path: str, resource: OpenAIResource) -> str:
"""Compile a prefix for pydantic models.
Args:
path: The path for the route.
resource: An OpenAIResource instance.
"""
# Remove placeholders like '{item}' using regex
path_wo_params = re.sub(r"\{.*?\}", "", path)
path_prefix = "".join([part.capitalize() for part in path_wo_params.split("/")])
resource_prefix = resource.__class__.__name__
return f"{path_prefix}{resource_prefix}"
def create_request_model(
resource: ChatCompletionResource, prefix: str = ""
) -> BaseModel:
"""Create a pydantic model for incoming requests.
Note: Support limited to ChatCompletion resource.
Args:
resource: An OpenAIResource instance.
prefix: A prefix for the model name.
"""
if not isinstance(resource, ChatCompletionResource):
raise TypeError("resource must be a ChatCompletion instance")
request_fields = {"messages": (list[Message], ...)}
prefix = prefix or resource.__class__.__name__
return create_model(f"{prefix}Request", **request_fields)
def model_dump(model: pydantic.BaseModel, **kwargs) -> dict[str, Any]:
"""Dump a pydantic model to a dictionary.
Args:
model: A pydantic model.
"""
if PYDANTIC_V2:
return model.model_dump(**kwargs)
else:
return model.dict(**kwargs)
class OpenAIResource:
"""Base class for OpenAI resources."""
def __init__(self, client: AsyncOpenAI = None):
self._client = client or AsyncOpenAI()
async def stream_response(
self, *args: Any, **kwargs: dict[str, Any]
) -> Generator[str, None, None]: ...
class StreamingResponse(_StreamingResponse):
"""StreamingResponse class for OpenAI resources."""
def __init__(
self,
resource: OpenAIResource,
messages: list[Message],
*args: Any,
**kwargs: dict[str, Any],
) -> None:
"""Constructor method.
Args:
resource: An OpenAIResource instance.
messages: A list of `Message` instances.
*args: Positional arguments to pass to the parent constructor.
**kwargs: Keyword arguments to pass to the parent constructor.
"""
super().__init__(*args, **kwargs)
self.resource = resource
self.messages = messages
async def stream_response(self, send: Send) -> None:
"""Stream chat completions.
If an exception occurs while iterating over the OpenAI resource, an
internal server error is sent to the client.
Args:
send: The ASGI send callable.
"""
await send(
{
"type": "http.response.start",
"status": self.status_code,
"headers": self.raw_headers,
}
)
try:
async for chunk in self.resource.stream_response(self.messages):
event_body = ServerSentEvent(
data=chunk,
event=Events.COMPLETION,
)
await send(
{
"type": "http.response.body",
"body": ensure_bytes(event_body, None),
"more_body": True,
}
)
except Exception as e:
logger.error(f"openai error: {e}")
error_event_body = ServerSentEvent(
data=dict(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail=HTTPStatusDetail.INTERNAL_SERVER_ERROR,
),
event=Events.ERROR,
)
await send(
{
"type": "http.response.body",
"body": ensure_bytes(error_event_body, None),
"more_body": True,
}
)
await send({"type": "http.response.body", "body": b"", "more_body": False})
The provided code snippet includes necessary dependencies for implementing the `build_factory_api_endpoint` function. Write a Python function `def build_factory_api_endpoint( path: str, endpoint: Callable[..., Any] ) -> Callable[..., Awaitable[Any]]` to solve the following problem:
Build a factory endpoint for API routes. Args: path: The path for the route. endpoint: openai resource factory function.
Here is the function:
def build_factory_api_endpoint(
path: str, endpoint: Callable[..., Any]
) -> Callable[..., Awaitable[Any]]:
"""Build a factory endpoint for API routes.
Args:
path: The path for the route.
endpoint: openai resource factory function.
"""
resource = compile_openai_resource_factory(endpoint)
# index 1 of `compile_path` contains path_format output
model_prefix = compile_model_prefix(compile_path(path)[1], resource)
request_model = create_request_model(resource, model_prefix)
async def factory_endpoint(
request: request_model, resource: OpenAIResource = Depends(endpoint)
):
return StreamingResponse(resource=resource, **model_dump(request))
return factory_endpoint | Build a factory endpoint for API routes. Args: path: The path for the route. endpoint: openai resource factory function. |
13,747 | import re
from typing import Any, Awaitable, Callable
from fastapi import Depends
from pydantic import BaseModel, create_model
from starlette.routing import compile_path
from lanarky.events import Events
from lanarky.logging import logger
from lanarky.utils import model_dump, model_fields
from lanarky.websockets import WebSocket, WebsocketSession
from .resources import ChatCompletion, ChatCompletionResource, Message, OpenAIResource
from .responses import HTTPStatusDetail, StreamingResponse, status
def compile_openai_resource_factory(endpoint: Callable[..., Any]) -> OpenAIResource:
"""Compile an OpenAI resource factory function.
Args:
endpoint: openai resource factory function.
Returns:
An OpenAIResource instance.
"""
try:
resource = endpoint()
except TypeError:
raise TypeError("set default values for all factory endpoint parameters")
if not isinstance(resource, OpenAIResource):
raise TypeError("factory endpoint must return a LanarkyOpenAIResource instance")
return resource
def compile_model_prefix(path: str, resource: OpenAIResource) -> str:
"""Compile a prefix for pydantic models.
Args:
path: The path for the route.
resource: An OpenAIResource instance.
"""
# Remove placeholders like '{item}' using regex
path_wo_params = re.sub(r"\{.*?\}", "", path)
path_prefix = "".join([part.capitalize() for part in path_wo_params.split("/")])
resource_prefix = resource.__class__.__name__
return f"{path_prefix}{resource_prefix}"
def create_request_model(
resource: ChatCompletionResource, prefix: str = ""
) -> BaseModel:
"""Create a pydantic model for incoming requests.
Note: Support limited to ChatCompletion resource.
Args:
resource: An OpenAIResource instance.
prefix: A prefix for the model name.
"""
if not isinstance(resource, ChatCompletionResource):
raise TypeError("resource must be a ChatCompletion instance")
request_fields = {"messages": (list[Message], ...)}
prefix = prefix or resource.__class__.__name__
return create_model(f"{prefix}Request", **request_fields)
class Events(StrEnum):
COMPLETION = "completion"
ERROR = "error"
END = "end"
logger = get_logger()
def model_dump(model: pydantic.BaseModel, **kwargs) -> dict[str, Any]:
"""Dump a pydantic model to a dictionary.
Args:
model: A pydantic model.
"""
if PYDANTIC_V2:
return model.model_dump(**kwargs)
else:
return model.dict(**kwargs)
class WebsocketSession:
"""Class to handle websocket connections.
Supports 3 data modes: JSON, TEXT, and BYTES.
To know more about WebSockets, read the
[FastAPI documentation](https://fastapi.tiangolo.com/advanced/websockets/).
"""
async def connect(
self, websocket: WebSocket, mode: DataMode = DataMode.JSON
) -> Generator:
"""Connect to a websocket and yield data from it.
Args:
websocket: The websocket to connect to.
mode: The data mode to use. Defaults to DataMode.JSON.
Yields:
Any: data from client side.
"""
await websocket.accept()
try:
if mode == DataMode.JSON:
yield self.iter_json(websocket)
elif mode == DataMode.TEXT:
yield self.iter_text(websocket)
elif mode == DataMode.BYTES:
yield self.iter_bytes(websocket)
else:
raise ValueError(f"Invalid DataMode: {mode}")
except WebSocketDisconnect:
logger.info("Websocket disconnected")
async def iter_text(self, websocket: WebSocket):
while True:
data = await websocket.receive_text()
yield data
async def iter_bytes(self, websocket: WebSocket):
while True:
data = await websocket.receive_bytes()
yield data
async def iter_json(self, websocket: WebSocket):
while True:
data = await websocket.receive_json()
yield data
class OpenAIResource:
"""Base class for OpenAI resources."""
def __init__(self, client: AsyncOpenAI = None):
self._client = client or AsyncOpenAI()
async def stream_response(
self, *args: Any, **kwargs: dict[str, Any]
) -> Generator[str, None, None]: ...
The provided code snippet includes necessary dependencies for implementing the `build_factory_websocket_endpoint` function. Write a Python function `def build_factory_websocket_endpoint( path: str, endpoint: Callable[..., Any] ) -> Callable[..., Awaitable[Any]]` to solve the following problem:
Build a factory endpoint for WebSocket routes. Args: path: The path for the route. endpoint: openai resource factory function.
Here is the function:
def build_factory_websocket_endpoint(
path: str, endpoint: Callable[..., Any]
) -> Callable[..., Awaitable[Any]]:
"""Build a factory endpoint for WebSocket routes.
Args:
path: The path for the route.
endpoint: openai resource factory function.
"""
resource = compile_openai_resource_factory(endpoint)
# index 1 of `compile_path` contains path_format output
model_prefix = compile_model_prefix(compile_path(path)[1], resource)
request_model = create_request_model(resource, model_prefix)
async def factory_endpoint(
websocket: WebSocket, resource: OpenAIResource = Depends(endpoint)
):
async with WebsocketSession().connect(websocket) as session:
async for data in session:
try:
async for chunk in resource.stream_response(
**model_dump(request_model(**data))
):
await websocket.send_json(
dict(
data=chunk,
event=Events.COMPLETION,
)
)
except Exception as e:
logger.error(f"openai error: {e}")
await websocket.send_json(
dict(
data=dict(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail=HTTPStatusDetail.INTERNAL_SERVER_ERROR,
),
event=Events.ERROR,
)
)
await websocket.send_json(dict(data="", event=Events.END))
return factory_endpoint | Build a factory endpoint for WebSocket routes. Args: path: The path for the route. endpoint: openai resource factory function. |
13,748 | from typing import Any, Callable, Optional
from fastapi import params
from lanarky.utils import model_dump
from .resources import OpenAIResource
from .utils import create_request_model, create_response_model
def model_dump(model: pydantic.BaseModel, **kwargs) -> dict[str, Any]:
"""Dump a pydantic model to a dictionary.
Args:
model: A pydantic model.
"""
if PYDANTIC_V2:
return model.model_dump(**kwargs)
else:
return model.dict(**kwargs)
class OpenAIResource:
"""Base class for OpenAI resources."""
def __init__(self, client: AsyncOpenAI = None):
self._client = client or AsyncOpenAI()
async def stream_response(
self, *args: Any, **kwargs: dict[str, Any]
) -> Generator[str, None, None]: ...
def create_request_model(
resource: ChatCompletionResource, prefix: str = ""
) -> BaseModel:
"""Create a pydantic model for incoming requests.
Note: Support limited to ChatCompletion resource.
Args:
resource: An OpenAIResource instance.
prefix: A prefix for the model name.
"""
if not isinstance(resource, ChatCompletionResource):
raise TypeError("resource must be a ChatCompletion instance")
request_fields = {"messages": (list[Message], ...)}
prefix = prefix or resource.__class__.__name__
return create_model(f"{prefix}Request", **request_fields)
def create_response_model(
resource: ChatCompletionResource, prefix: str = None
) -> BaseModel:
"""Create a pydantic model for responses.
Note: Support limited to ChatCompletion resource.
Args:
resource: An OpenAIResource instance.
prefix: A prefix for the model name.
"""
if not isinstance(resource, ChatCompletionResource):
raise TypeError("resource must be a ChatCompletion instance")
response_fields = {
k: (v.annotation, ...) for k, v in model_fields(ChatCompletion).items()
}
prefix = prefix or resource.__class__.__name__
return create_model(f"{prefix}Response", **response_fields)
The provided code snippet includes necessary dependencies for implementing the `Depends` function. Write a Python function `def Depends( dependency: Optional[Callable[..., Any]], *, dependency_kwargs: dict[str, Any] = {}, use_cache: bool = True ) -> params.Depends` to solve the following problem:
Dependency injection for OpenAI. Args: dependency: a "dependable" resource factory callable. dependency_kwargs: kwargs to pass to resource dependency. use_cache: use_cache parameter of `fastapi.Depends`.
Here is the function:
def Depends(
dependency: Optional[Callable[..., Any]],
*,
dependency_kwargs: dict[str, Any] = {},
use_cache: bool = True
) -> params.Depends:
"""Dependency injection for OpenAI.
Args:
dependency: a "dependable" resource factory callable.
dependency_kwargs: kwargs to pass to resource dependency.
use_cache: use_cache parameter of `fastapi.Depends`.
"""
try:
resource = dependency()
except TypeError:
raise TypeError("set default values for all dependency parameters")
if not isinstance(resource, OpenAIResource):
raise TypeError("dependency must return a OpenAIResource instance")
request_model = create_request_model(resource)
response_model = create_response_model(resource)
async def resource_dependency(
request: request_model,
resource: OpenAIResource = params.Depends(dependency, use_cache=use_cache),
) -> response_model:
resource_kwargs = {**model_dump(request), **dependency_kwargs}
return await resource(**resource_kwargs)
return params.Depends(resource_dependency, use_cache=use_cache) | Dependency injection for OpenAI. Args: dependency: a "dependable" resource factory callable. dependency_kwargs: kwargs to pass to resource dependency. use_cache: use_cache parameter of `fastapi.Depends`. |
13,749 | import re
from typing import Any, Awaitable, Callable
from fastapi import Depends
from langchain.agents import AgentExecutor
from langchain.chains.base import Chain
from langchain.schema.document import Document
from pydantic import BaseModel, create_model
from starlette.routing import compile_path
from lanarky.adapters.langchain.callbacks import (
FinalTokenStreamingCallbackHandler,
FinalTokenWebSocketCallbackHandler,
SourceDocumentsStreamingCallbackHandler,
SourceDocumentsWebSocketCallbackHandler,
TokenStreamingCallbackHandler,
TokenWebSocketCallbackHandler,
)
from lanarky.adapters.langchain.responses import HTTPStatusDetail, StreamingResponse
from lanarky.events import Events
from lanarky.logging import logger
from lanarky.utils import model_dump
from lanarky.websockets import WebSocket, WebsocketSession
def compile_chain_factory(endpoint: Callable[..., Any]):
"""Compile a LangChain instance factory function.
Args:
endpoint: LangChain instance factory function.
"""
try:
chain = endpoint()
except TypeError:
raise TypeError("set default values for all factory endpoint parameters")
if not isinstance(chain, Chain):
raise TypeError("factory endpoint must return a Chain instance")
return chain
def create_request_model(chain: Chain, prefix: str = "") -> BaseModel:
"""Create a pydantic request model for a LangChain instance.
Args:
chain: A LangChain instance.
prefix: A prefix for the model name.
"""
request_fields = {}
for key in chain.input_keys:
# TODO: add support for other input key types
# based on demand
if key == "chat_history":
request_fields[key] = (list[tuple[str, str]], ...)
else:
request_fields[key] = (str, ...)
prefix = prefix or chain.__class__.__name__
return create_model(f"{prefix}Request", **request_fields)
def compile_model_prefix(path: str, chain: Chain) -> str:
"""Compile a prefix for pydantic models.
Args:
path: The path for the route.
chain: A LangChain instance.
"""
# Remove placeholders like '{item}' using regex
path_wo_params = re.sub(r"\{.*?\}", "", path)
path_prefix = "".join([part.capitalize() for part in path_wo_params.split("/")])
chain_prefix = chain.__class__.__name__
return f"{path_prefix}{chain_prefix}"
def get_streaming_callbacks(chain: Chain) -> list[Callable]:
"""Get streaming callbacks for a LangChain instance.
Note: This function might not support all LangChain
chain and agent types. Please open an issue on GitHub to
request support for a specific type.
Args:
chain: A LangChain instance.
"""
callbacks = []
if "source_documents" in chain.output_keys:
callbacks.append(SourceDocumentsStreamingCallbackHandler())
if len(set(chain.output_keys) - {"source_documents"}) > 1:
logger.warning(
f"""multiple output keys found: {set(chain.output_keys) - {'source_documents'}}.
Only the first output key will be used for streaming tokens. For more complex API logic, define the endpoint function manually.
"""
)
if isinstance(chain, AgentExecutor):
callbacks.append(FinalTokenStreamingCallbackHandler())
else:
callbacks.extend(
[
TokenStreamingCallbackHandler(output_key=chain.output_keys[0]),
]
)
return callbacks
class StreamingResponse(_StreamingResponse):
"""StreamingResponse class for LangChain resources."""
def __init__(
self,
chain: Chain,
config: dict[str, Any],
run_mode: ChainRunMode = ChainRunMode.ASYNC,
*args: Any,
**kwargs: dict[str, Any],
) -> None:
"""Constructor method.
Args:
chain: A LangChain instance.
config: A config dict.
*args: Positional arguments to pass to the parent constructor.
**kwargs: Keyword arguments to pass to the parent constructor.
"""
super().__init__(*args, **kwargs)
self.chain = chain
self.config = config
if run_mode not in list(ChainRunMode):
raise ValueError(
f"Invalid run mode '{run_mode}'. Must be one of {list(ChainRunMode)}"
)
self.run_mode = run_mode
async def stream_response(self, send: Send) -> None:
"""Stream LangChain outputs.
If an exception occurs while iterating over the LangChain, an
internal server error is sent to the client.
Args:
send: The ASGI send callable.
"""
await send(
{
"type": "http.response.start",
"status": self.status_code,
"headers": self.raw_headers,
}
)
if "callbacks" in self.config:
for callback in self.config["callbacks"]:
if hasattr(callback, "send"):
callback.send = send
try:
# TODO: migrate to `.ainvoke` when adding support
# for LCEL
if self.run_mode == ChainRunMode.ASYNC:
outputs = await self.chain.acall(**self.config)
else:
loop = asyncio.get_event_loop()
outputs = await loop.run_in_executor(
None, partial(self.chain, **self.config)
)
if self.background is not None:
self.background.kwargs.update({"outputs": outputs})
except Exception as e:
logger.error(f"chain runtime error: {e}")
if self.background is not None:
self.background.kwargs.update({"outputs": {}, "error": e})
chunk = ServerSentEvent(
data=dict(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail=HTTPStatusDetail.INTERNAL_SERVER_ERROR,
),
event=Events.ERROR,
)
await send(
{
"type": "http.response.body",
"body": ensure_bytes(chunk, None),
"more_body": True,
}
)
await send({"type": "http.response.body", "body": b"", "more_body": False})
def model_dump(model: pydantic.BaseModel, **kwargs) -> dict[str, Any]:
"""Dump a pydantic model to a dictionary.
Args:
model: A pydantic model.
"""
if PYDANTIC_V2:
return model.model_dump(**kwargs)
else:
return model.dict(**kwargs)
The provided code snippet includes necessary dependencies for implementing the `build_factory_api_endpoint` function. Write a Python function `def build_factory_api_endpoint( path: str, endpoint: Callable[..., Any] ) -> Callable[..., Awaitable[Any]]` to solve the following problem:
Build a factory endpoint for API routes. Args: path: The path for the route. endpoint: LangChain instance factory function.
Here is the function:
def build_factory_api_endpoint(
path: str, endpoint: Callable[..., Any]
) -> Callable[..., Awaitable[Any]]:
"""Build a factory endpoint for API routes.
Args:
path: The path for the route.
endpoint: LangChain instance factory function.
"""
chain = compile_chain_factory(endpoint)
# index 1 of `compile_path` contains path_format output
model_prefix = compile_model_prefix(compile_path(path)[1], chain)
request_model = create_request_model(chain, model_prefix)
callbacks = get_streaming_callbacks(chain)
async def factory_endpoint(
request: request_model, chain: Chain = Depends(endpoint)
):
return StreamingResponse(
chain=chain, config={"inputs": model_dump(request), "callbacks": callbacks}
)
return factory_endpoint | Build a factory endpoint for API routes. Args: path: The path for the route. endpoint: LangChain instance factory function. |
13,750 | import re
from typing import Any, Awaitable, Callable
from fastapi import Depends
from langchain.agents import AgentExecutor
from langchain.chains.base import Chain
from langchain.schema.document import Document
from pydantic import BaseModel, create_model
from starlette.routing import compile_path
from lanarky.adapters.langchain.callbacks import (
FinalTokenStreamingCallbackHandler,
FinalTokenWebSocketCallbackHandler,
SourceDocumentsStreamingCallbackHandler,
SourceDocumentsWebSocketCallbackHandler,
TokenStreamingCallbackHandler,
TokenWebSocketCallbackHandler,
)
from lanarky.adapters.langchain.responses import HTTPStatusDetail, StreamingResponse
from lanarky.events import Events
from lanarky.logging import logger
from lanarky.utils import model_dump
from lanarky.websockets import WebSocket, WebsocketSession
def compile_chain_factory(endpoint: Callable[..., Any]):
"""Compile a LangChain instance factory function.
Args:
endpoint: LangChain instance factory function.
"""
try:
chain = endpoint()
except TypeError:
raise TypeError("set default values for all factory endpoint parameters")
if not isinstance(chain, Chain):
raise TypeError("factory endpoint must return a Chain instance")
return chain
def create_request_model(chain: Chain, prefix: str = "") -> BaseModel:
"""Create a pydantic request model for a LangChain instance.
Args:
chain: A LangChain instance.
prefix: A prefix for the model name.
"""
request_fields = {}
for key in chain.input_keys:
# TODO: add support for other input key types
# based on demand
if key == "chat_history":
request_fields[key] = (list[tuple[str, str]], ...)
else:
request_fields[key] = (str, ...)
prefix = prefix or chain.__class__.__name__
return create_model(f"{prefix}Request", **request_fields)
def compile_model_prefix(path: str, chain: Chain) -> str:
"""Compile a prefix for pydantic models.
Args:
path: The path for the route.
chain: A LangChain instance.
"""
# Remove placeholders like '{item}' using regex
path_wo_params = re.sub(r"\{.*?\}", "", path)
path_prefix = "".join([part.capitalize() for part in path_wo_params.split("/")])
chain_prefix = chain.__class__.__name__
return f"{path_prefix}{chain_prefix}"
def get_websocket_callbacks(chain: Chain, websocket: WebSocket) -> list[Callable]:
"""Get websocket callbacks for a LangChain instance.
Note: This function might not support all LangChain
chain and agent types. Please open an issue on GitHub to
request support for a specific type.
Args:
chain: A LangChain instance.
websocket: A WebSocket instance.
"""
callbacks = []
if "source_documents" in chain.output_keys:
callbacks.append(SourceDocumentsWebSocketCallbackHandler(websocket=websocket))
if len(set(chain.output_keys) - {"source_documents"}) > 1:
logger.warning(
f"""multiple output keys found: {set(chain.output_keys) - {'source_documents'}}.
Only the first output key will be used for sending tokens. For more complex websocket logic, define the endpoint function manually.
"""
)
if isinstance(chain, AgentExecutor):
callbacks.append(FinalTokenWebSocketCallbackHandler(websocket=websocket))
else:
callbacks.extend(
[
TokenWebSocketCallbackHandler(
output_key=chain.output_keys[0], websocket=websocket
),
]
)
return callbacks
class Events(StrEnum):
COMPLETION = "completion"
ERROR = "error"
END = "end"
logger = get_logger()
def model_dump(model: pydantic.BaseModel, **kwargs) -> dict[str, Any]:
"""Dump a pydantic model to a dictionary.
Args:
model: A pydantic model.
"""
if PYDANTIC_V2:
return model.model_dump(**kwargs)
else:
return model.dict(**kwargs)
class WebsocketSession:
"""Class to handle websocket connections.
Supports 3 data modes: JSON, TEXT, and BYTES.
To know more about WebSockets, read the
[FastAPI documentation](https://fastapi.tiangolo.com/advanced/websockets/).
"""
async def connect(
self, websocket: WebSocket, mode: DataMode = DataMode.JSON
) -> Generator:
"""Connect to a websocket and yield data from it.
Args:
websocket: The websocket to connect to.
mode: The data mode to use. Defaults to DataMode.JSON.
Yields:
Any: data from client side.
"""
await websocket.accept()
try:
if mode == DataMode.JSON:
yield self.iter_json(websocket)
elif mode == DataMode.TEXT:
yield self.iter_text(websocket)
elif mode == DataMode.BYTES:
yield self.iter_bytes(websocket)
else:
raise ValueError(f"Invalid DataMode: {mode}")
except WebSocketDisconnect:
logger.info("Websocket disconnected")
async def iter_text(self, websocket: WebSocket):
while True:
data = await websocket.receive_text()
yield data
async def iter_bytes(self, websocket: WebSocket):
while True:
data = await websocket.receive_bytes()
yield data
async def iter_json(self, websocket: WebSocket):
while True:
data = await websocket.receive_json()
yield data
The provided code snippet includes necessary dependencies for implementing the `build_factory_websocket_endpoint` function. Write a Python function `def build_factory_websocket_endpoint( path: str, endpoint: Callable[..., Any] ) -> Callable[..., Awaitable[Any]]` to solve the following problem:
Build a factory endpoint for WebSocket routes. Args: path: The path for the route. endpoint: LangChain instance factory function.
Here is the function:
def build_factory_websocket_endpoint(
path: str, endpoint: Callable[..., Any]
) -> Callable[..., Awaitable[Any]]:
"""Build a factory endpoint for WebSocket routes.
Args:
path: The path for the route.
endpoint: LangChain instance factory function.
"""
chain = compile_chain_factory(endpoint)
# index 1 of `compile_path` contains path_format output
model_prefix = compile_model_prefix(compile_path(path)[1], chain)
request_model = create_request_model(chain, model_prefix)
async def factory_endpoint(websocket: WebSocket, chain: Chain = Depends(endpoint)):
callbacks = get_websocket_callbacks(chain, websocket)
async with WebsocketSession().connect(websocket) as session:
async for data in session:
try:
await chain.acall(
inputs=model_dump(request_model(**data)),
callbacks=callbacks,
)
except Exception as e:
logger.error(f"langchain error: {e}")
await websocket.send_json(
dict(
data=dict(
status=500,
detail=HTTPStatusDetail(
code=500,
message="Internal Server Error",
),
),
event=Events.ERROR,
)
)
await websocket.send_json(dict(data="", event=Events.END))
return factory_endpoint | Build a factory endpoint for WebSocket routes. Args: path: The path for the route. endpoint: LangChain instance factory function. |
13,751 | from typing import Any, Optional
from fastapi.websockets import WebSocket
from langchain.callbacks.base import AsyncCallbackHandler
from langchain.callbacks.streaming_stdout_final_only import (
FinalStreamingStdOutCallbackHandler,
)
from langchain.globals import get_llm_cache
from langchain.schema.document import Document
from pydantic import BaseModel
from starlette.types import Message, Send
from lanarky.events import Events, ServerSentEvent, ensure_bytes
from lanarky.utils import StrEnum, model_dump_json
class TokenStreamMode(StrEnum):
TEXT = "text"
JSON = "json"
class TokenEventData(BaseModel):
"""Event data payload for tokens."""
token: str = ""
def model_dump_json(model: pydantic.BaseModel, **kwargs) -> str:
"""Dump a pydantic model to a JSON string.
Args:
model: A pydantic model.
"""
if PYDANTIC_V2:
return model.model_dump_json(**kwargs)
else:
return model.json(**kwargs)
The provided code snippet includes necessary dependencies for implementing the `get_token_data` function. Write a Python function `def get_token_data(token: str, mode: TokenStreamMode) -> str` to solve the following problem:
Get token data based on mode. Args: token: The token to use. mode: The stream mode.
Here is the function:
def get_token_data(token: str, mode: TokenStreamMode) -> str:
"""Get token data based on mode.
Args:
token: The token to use.
mode: The stream mode.
"""
if mode not in list(TokenStreamMode):
raise ValueError(f"Invalid stream mode: {mode}")
if mode == TokenStreamMode.TEXT:
return token
else:
return model_dump_json(TokenEventData(token=token)) | Get token data based on mode. Args: token: The token to use. mode: The stream mode. |
13,752 | from typing import Any, Callable, Optional
from fastapi import params
from langchain.chains.base import Chain
from lanarky.adapters.langchain.utils import create_request_model, create_response_model
from lanarky.utils import model_dump
def create_request_model(chain: Chain, prefix: str = "") -> BaseModel:
"""Create a pydantic request model for a LangChain instance.
Args:
chain: A LangChain instance.
prefix: A prefix for the model name.
"""
request_fields = {}
for key in chain.input_keys:
# TODO: add support for other input key types
# based on demand
if key == "chat_history":
request_fields[key] = (list[tuple[str, str]], ...)
else:
request_fields[key] = (str, ...)
prefix = prefix or chain.__class__.__name__
return create_model(f"{prefix}Request", **request_fields)
def create_response_model(chain: Chain, prefix: str = None) -> BaseModel:
"""Create a pydantic response model for a LangChain instance.
Args:
chain: A LangChain instance.
prefix: A prefix for the model name.
"""
response_fields = {}
for key in chain.output_keys:
# TODO: add support for other output key types
# based on demand
if key == "source_documents":
response_fields[key] = (list[Document], ...)
else:
response_fields[key] = (str, ...)
prefix = prefix or chain.__class__.__name__
return create_model(f"{prefix}Response", **response_fields)
def model_dump(model: pydantic.BaseModel, **kwargs) -> dict[str, Any]:
"""Dump a pydantic model to a dictionary.
Args:
model: A pydantic model.
"""
if PYDANTIC_V2:
return model.model_dump(**kwargs)
else:
return model.dict(**kwargs)
The provided code snippet includes necessary dependencies for implementing the `Depends` function. Write a Python function `def Depends( dependency: Optional[Callable[..., Any]], *, dependency_kwargs: dict[str, Any] = {}, use_cache: bool = True ) -> params.Depends` to solve the following problem:
Dependency injection for LangChain. Args: dependency: a "dependable" chain factory callable. dependency_kwargs: kwargs to pass to chain dependency. use_cache: use_cache parameter of `fastapi.Depends`.
Here is the function:
def Depends(
dependency: Optional[Callable[..., Any]],
*,
dependency_kwargs: dict[str, Any] = {},
use_cache: bool = True
) -> params.Depends:
"""Dependency injection for LangChain.
Args:
dependency: a "dependable" chain factory callable.
dependency_kwargs: kwargs to pass to chain dependency.
use_cache: use_cache parameter of `fastapi.Depends`.
"""
try:
chain = dependency()
except TypeError:
raise TypeError("set default values for all dependency parameters")
if not isinstance(chain, Chain):
raise TypeError("dependency must return a Chain instance")
request_model = create_request_model(chain)
response_model = create_response_model(chain)
async def chain_dependency(
request: request_model,
chain: Chain = params.Depends(dependency, use_cache=use_cache),
) -> response_model:
return await chain.acall(inputs=model_dump(request), **dependency_kwargs)
return params.Depends(chain_dependency, use_cache=use_cache) | Dependency injection for LangChain. Args: dependency: a "dependable" chain factory callable. dependency_kwargs: kwargs to pass to chain dependency. use_cache: use_cache parameter of `fastapi.Depends`. |
13,753 | import json
import gradio as gr
from langchain.chains import RetrievalQA
from langchain.chat_models import ChatOpenAI
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores.faiss import FAISS
from lanarky import Lanarky
from lanarky.adapters.langchain.routing import LangchainAPIRouter
from lanarky.clients import StreamingClient
def chat() -> RetrievalQA:
db = FAISS.load_local("db/", OpenAIEmbeddings())
return RetrievalQA.from_chain_type(
ChatOpenAI(streaming=True),
retriever=db.as_retriever(search_kwargs={"k": 2}),
return_source_documents=True,
)
SOURCE_DOCUMENT_TEMPLATE = """
<details><summary><b>Source {idx}</b></summary>{page_content}</details>
"""
class StreamingClient:
"""Test client for streaming server-sent events."""
def __init__(
self,
base_url: str = "http://localhost:8000",
client: Optional[httpx.Client] = None,
):
"""Constructor method.
Args:
base_url: The base URL of the server.
client: The HTTP client to use.
"""
self.base_url = base_url
self.client = client or httpx.Client()
def stream_response(
self, method: str, path: str, **kwargs: dict[str, Any]
) -> Iterator[ServerSentEvent]:
"""Stream data from the server.
Args:
method: The HTTP method to use.
path: The path to stream from.
**kwargs: The keyword arguments to pass to the HTTP client.
"""
url = self.base_url + path
with connect_sse(self.client, method, url, **kwargs) as event_source:
for sse in event_source.iter_sse():
yield sse
def mount_playground(app: Lanarky) -> Lanarky:
blocks = gr.Blocks(
title="paulGPT",
theme=gr.themes.Default(
primary_hue=gr.themes.colors.teal, secondary_hue=gr.themes.colors.teal
),
css="footer {visibility: hidden}",
)
with blocks:
blocks.load(
None,
None,
js="""
() => {
document.body.className = "white";
}""",
)
gr.HTML(
"""<div align="center"><img src="https://lanarky.ajndkr.com/assets/logo-light-mode.png" width="350"></div>"""
)
chatbot = gr.Chatbot(height=500, show_label=False)
with gr.Row():
user_input = gr.Textbox(
show_label=False, placeholder="Type a message...", scale=5
)
clear_btn = gr.Button("Clear")
def chat(history):
history[-1][1] = ""
for event in StreamingClient().stream_response(
"POST", "/chat", json={"query": history[-1][0]}
):
if event.event == "completion":
history[-1][1] += json.loads(event.data)["token"]
yield history
elif event.event == "source_documents":
for idx, document in enumerate(
json.loads(event.data)["source_documents"]
):
history[-1][1] += SOURCE_DOCUMENT_TEMPLATE.format(
idx=idx,
page_content=document["page_content"],
)
yield history
elif event.event == "error":
raise gr.Error(event.data)
user_input.submit(
lambda user_input, chatbot: ("", chatbot + [[user_input, None]]),
[user_input, chatbot],
[user_input, chatbot],
queue=False,
).then(chat, chatbot, chatbot)
clear_btn.click(lambda: None, None, chatbot, queue=False)
return gr.mount_gradio_app(app, blocks.queue(), "/") | null |
13,754 | import gradio as gr
from lanarky import Lanarky
from lanarky.adapters.openai.resources import ChatCompletionResource
from lanarky.adapters.openai.routing import OpenAIAPIRouter
from lanarky.clients import StreamingClient
def chat(system: str = "You are a sassy assistant") -> ChatCompletionResource:
return ChatCompletionResource(system=system, stream=True)
class StreamingClient:
"""Test client for streaming server-sent events."""
def __init__(
self,
base_url: str = "http://localhost:8000",
client: Optional[httpx.Client] = None,
):
"""Constructor method.
Args:
base_url: The base URL of the server.
client: The HTTP client to use.
"""
self.base_url = base_url
self.client = client or httpx.Client()
def stream_response(
self, method: str, path: str, **kwargs: dict[str, Any]
) -> Iterator[ServerSentEvent]:
"""Stream data from the server.
Args:
method: The HTTP method to use.
path: The path to stream from.
**kwargs: The keyword arguments to pass to the HTTP client.
"""
url = self.base_url + path
with connect_sse(self.client, method, url, **kwargs) as event_source:
for sse in event_source.iter_sse():
yield sse
def mount_playground(app: Lanarky) -> Lanarky:
blocks = gr.Blocks(
title="ChatGPT-clone",
theme=gr.themes.Default(
primary_hue=gr.themes.colors.teal,
secondary_hue=gr.themes.colors.teal,
text_size=gr.themes.sizes.text_lg,
),
css="footer {visibility: hidden}",
)
with blocks:
blocks.load(
None,
None,
js="""
() => {
document.body.className = "white";
}""",
)
gr.HTML(
"""<div align="center"><img src="https://lanarky.ajndkr.com/assets/logo-light-mode.png" width="350"></div>"""
)
system_message = gr.Textbox(
value="You are a sassy assistant", label="System Prompt"
)
chatbot = gr.Chatbot(height=500, show_label=False)
with gr.Row():
user_input = gr.Textbox(
show_label=False, placeholder="Type a message...", scale=5
)
clear_btn = gr.Button("Clear")
def chat(history, system):
messages = []
for human, assistant in history:
if human:
messages.append({"role": "user", "content": human})
if assistant:
messages.append({"role": "assistant", "content": assistant})
history[-1][1] = ""
for event in StreamingClient().stream_response(
"POST", "/chat", json={"messages": messages}, params={"system": system}
):
history[-1][1] += event.data
yield history
user_input.submit(
lambda user_input, chatbot: ("", chatbot + [[user_input, None]]),
[user_input, chatbot],
[user_input, chatbot],
queue=False,
).then(chat, [chatbot, system_message], chatbot)
clear_btn.click(lambda: None, None, chatbot, queue=False)
return gr.mount_gradio_app(app, blocks.queue(), "/") | null |
13,755 | import sys
from fastapi import FastAPI
version = f"{sys.version_info.major}.{sys.version_info.minor}"
async def read_root():
message = f"Hello world! From FastAPI running on Uvicorn with Gunicorn. Using Python {version}"
return {"message": message} | null |
13,756 | import os
import subprocess
import sys
build_push = os.environ.get("BUILD_PUSH")
def process_tag(*, env: dict):
use_env = {**os.environ, **env}
script = "scripts/test.sh"
if build_push:
script = "scripts/build-push.sh"
return_code = subprocess.call(["bash", script], env=use_env)
if return_code != 0:
sys.exit(return_code) | null |
13,757 | import os
import subprocess
import sys
environments = [
{"NAME": "latest", "PYTHON_VERSION": "3.11"},
{"NAME": "python3.11", "PYTHON_VERSION": "3.11"},
{"NAME": "python3.10", "PYTHON_VERSION": "3.10"},
{"NAME": "python3.9", "PYTHON_VERSION": "3.9"},
{"NAME": "python3.8", "PYTHON_VERSION": "3.8"},
{"NAME": "python3.7", "PYTHON_VERSION": "3.7"},
{"NAME": "python3.11-slim", "PYTHON_VERSION": "3.11"},
{"NAME": "python3.10-slim", "PYTHON_VERSION": "3.10"},
{"NAME": "python3.9-slim", "PYTHON_VERSION": "3.9"},
{"NAME": "python3.8-slim", "PYTHON_VERSION": "3.8"},
]
def print_version_envs():
env_lines = []
for env in environments:
env_vars = []
for key, value in env.items():
env_vars.append(f"{key}='{value}'")
env_lines.append(" ".join(env_vars))
for line in env_lines:
print(line) | null |
13,758 | import torch
import traceback
import socket
import json
from scene.cameras import MiniCam
host = "127.0.0.1"
port = 6009
listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def init(wish_host, wish_port):
global host, port, listener
host = wish_host
port = wish_port
listener.bind((host, port))
listener.listen()
listener.settimeout(0) | null |
13,759 | import torch
import traceback
import socket
import json
from scene.cameras import MiniCam
conn = None
addr = None
listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def try_connect():
global conn, addr, listener
try:
conn, addr = listener.accept()
print(f"\nConnected by {addr}")
conn.settimeout(None)
except Exception as inst:
pass | null |
13,760 | import torch
import traceback
import socket
import json
from scene.cameras import MiniCam
conn = None
def send(message_bytes, verify):
global conn
if message_bytes != None:
conn.sendall(message_bytes)
conn.sendall(len(verify).to_bytes(4, 'little'))
conn.sendall(bytes(verify, 'ascii')) | null |
13,761 | import torch
import traceback
import socket
import json
from scene.cameras import MiniCam
def read():
global conn
messageLength = conn.recv(4)
messageLength = int.from_bytes(messageLength, 'little')
message = conn.recv(messageLength)
return json.loads(message.decode("utf-8"))
class MiniCam:
def __init__(self, width, height, fovy, fovx, znear, zfar, world_view_transform, full_proj_transform):
self.image_width = width
self.image_height = height
self.FoVy = fovy
self.FoVx = fovx
self.znear = znear
self.zfar = zfar
self.world_view_transform = world_view_transform
self.full_proj_transform = full_proj_transform
view_inv = torch.inverse(self.world_view_transform)
self.camera_center = view_inv[3][:3]
def receive():
message = read()
width = message["resolution_x"]
height = message["resolution_y"]
if width != 0 and height != 0:
try:
do_training = bool(message["train"])
fovy = message["fov_y"]
fovx = message["fov_x"]
znear = message["z_near"]
zfar = message["z_far"]
do_shs_python = bool(message["shs_python"])
do_rot_scale_python = bool(message["rot_scale_python"])
keep_alive = bool(message["keep_alive"])
scaling_modifier = message["scaling_modifier"]
world_view_transform = torch.reshape(torch.tensor(message["view_matrix"]), (4, 4)).cuda()
world_view_transform[:,1] = -world_view_transform[:,1]
world_view_transform[:,2] = -world_view_transform[:,2]
full_proj_transform = torch.reshape(torch.tensor(message["view_projection_matrix"]), (4, 4)).cuda()
full_proj_transform[:,1] = -full_proj_transform[:,1]
custom_cam = MiniCam(width, height, fovy, fovx, znear, zfar, world_view_transform, full_proj_transform)
except Exception as e:
print("")
traceback.print_exc()
raise e
return custom_cam, do_training, do_shs_python, do_rot_scale_python, keep_alive, scaling_modifier
else:
return None, None, None, None, None, None | null |
13,762 | import numpy as np
import collections
import struct
def rotmat2qvec(R):
Rxx, Ryx, Rzx, Rxy, Ryy, Rzy, Rxz, Ryz, Rzz = R.flat
K = np.array([
[Rxx - Ryy - Rzz, 0, 0, 0],
[Ryx + Rxy, Ryy - Rxx - Rzz, 0, 0],
[Rzx + Rxz, Rzy + Ryz, Rzz - Rxx - Ryy, 0],
[Ryz - Rzy, Rzx - Rxz, Rxy - Ryx, Rxx + Ryy + Rzz]]) / 3.0
eigvals, eigvecs = np.linalg.eigh(K)
qvec = eigvecs[[3, 0, 1, 2], np.argmax(eigvals)]
if qvec[0] < 0:
qvec *= -1
return qvec | null |
13,763 | import numpy as np
import collections
import struct
The provided code snippet includes necessary dependencies for implementing the `read_colmap_bin_array` function. Write a Python function `def read_colmap_bin_array(path)` to solve the following problem:
Taken from https://github.com/colmap/colmap/blob/dev/scripts/python/read_dense.py :param path: path to the colmap binary file. :return: nd array with the floating point values in the value
Here is the function:
def read_colmap_bin_array(path):
"""
Taken from https://github.com/colmap/colmap/blob/dev/scripts/python/read_dense.py
:param path: path to the colmap binary file.
:return: nd array with the floating point values in the value
"""
with open(path, "rb") as fid:
width, height, channels = np.genfromtxt(fid, delimiter="&", max_rows=1,
usecols=(0, 1, 2), dtype=int)
fid.seek(0)
num_delimiter = 0
byte = fid.read(1)
while True:
if byte == b"&":
num_delimiter += 1
if num_delimiter >= 3:
break
byte = fid.read(1)
array = np.fromfile(fid, np.float32)
array = array.reshape((width, height, channels), order="F")
return np.transpose(array, (1, 0, 2)).squeeze() | Taken from https://github.com/colmap/colmap/blob/dev/scripts/python/read_dense.py :param path: path to the colmap binary file. :return: nd array with the floating point values in the value |
13,764 | import os
import sys
import json
from typing import NamedTuple
from pathlib import Path
import imageio
import torch
import numpy as np
from PIL import Image
from plyfile import PlyData, PlyElement
from scene.gaussian_model import BasicPointCloud
from scene.cameras import MiniCam, Camera
from scene.colmap_loader import read_extrinsics_text, read_intrinsics_text, qvec2rotmat, \
read_extrinsics_binary, read_intrinsics_binary, read_points3D_binary, read_points3D_text
from utils.graphics import getWorld2View2, focal2fov, fov2focal
from utils.graphics import getProjectionMatrix
from utils.trajectory import get_camerapaths
from utils.sh import SH2RGB
class SceneInfo(NamedTuple):
point_cloud: BasicPointCloud
train_cameras: list
test_cameras: list
preset_cameras: list
nerf_normalization: dict
ply_path: str
def getNerfppNorm(cam_info):
def get_center_and_diag(cam_centers):
cam_centers = np.hstack(cam_centers)
avg_cam_center = np.mean(cam_centers, axis=1, keepdims=True)
center = avg_cam_center
dist = np.linalg.norm(cam_centers - center, axis=0, keepdims=True)
diagonal = np.max(dist)
return center.flatten(), diagonal
cam_centers = []
for cam in cam_info:
W2C = getWorld2View2(cam.R, cam.T)
C2W = np.linalg.inv(W2C)
cam_centers.append(C2W[:3, 3:4])
center, diagonal = get_center_and_diag(cam_centers)
radius = diagonal * 1.1
translate = -center
return {"translate": translate, "radius": radius}
def readColmapCameras(cam_extrinsics, cam_intrinsics, images_folder):
cam_infos = []
for idx, key in enumerate(cam_extrinsics):
sys.stdout.write('\r')
# the exact output you're looking for:
sys.stdout.write("Reading camera {}/{}".format(idx+1, len(cam_extrinsics)))
sys.stdout.flush()
extr = cam_extrinsics[key]
intr = cam_intrinsics[extr.camera_id]
height = intr.height
width = intr.width
uid = intr.id
R = np.transpose(qvec2rotmat(extr.qvec))
T = np.array(extr.tvec)
if intr.model=="SIMPLE_PINHOLE":
focal_length_x = intr.params[0]
FovY = focal2fov(focal_length_x, height)
FovX = focal2fov(focal_length_x, width)
elif intr.model=="PINHOLE":
focal_length_x = intr.params[0]
focal_length_y = intr.params[1]
FovY = focal2fov(focal_length_y, height)
FovX = focal2fov(focal_length_x, width)
else:
assert False, "Colmap camera model not handled: only undistorted datasets (PINHOLE or SIMPLE_PINHOLE cameras) supported!"
image_path = os.path.join(images_folder, os.path.basename(extr.name))
image_name = os.path.basename(image_path).split(".")[0]
image = Image.open(image_path)
cam_info = CameraInfo(uid=uid, R=R, T=T, FovY=FovY, FovX=FovX, image=image,
image_path=image_path, image_name=image_name, width=width, height=height)
cam_infos.append(cam_info)
sys.stdout.write('\n')
return cam_infos
def fetchPly(path):
plydata = PlyData.read(path)
vertices = plydata['vertex']
idx = np.random.choice(len(vertices['x']),size=(min(len(vertices['x']), 100_000),),replace=False)
positions = np.vstack([vertices['x'][idx], vertices['y'][idx], vertices['z'][idx]]).T if 'x' in vertices else None
colors = np.vstack([vertices['red'][idx], vertices['green'][idx], vertices['blue'][idx]]).T / 255.0 if 'red' in vertices else None
normals = np.vstack([vertices['nx'][idx], vertices['ny'][idx], vertices['nz'][idx]]).T if 'nx' in vertices else None
return BasicPointCloud(points=positions, colors=colors, normals=normals)
def storePly(path, xyz, rgb):
# Define the dtype for the structured array
dtype = [('x', 'f4'), ('y', 'f4'), ('z', 'f4'),
('nx', 'f4'), ('ny', 'f4'), ('nz', 'f4'),
('red', 'u1'), ('green', 'u1'), ('blue', 'u1')]
normals = np.zeros_like(xyz)
elements = np.empty(xyz.shape[0], dtype=dtype)
attributes = np.concatenate((xyz, normals, rgb), axis=1)
elements[:] = list(map(tuple, attributes))
# Create the PlyData object and write to file
vertex_element = PlyElement.describe(elements, 'vertex')
ply_data = PlyData([vertex_element])
ply_data.write(path)
def readCamerasFromPreset(path, transformsfile):
cam_infos = []
with open(os.path.join(path, transformsfile)) as json_file:
contents = json.load(json_file)
FOV = contents["camera_angle_x"]*1.2
frames = contents["frames"]
for idx, frame in enumerate(frames):
# NeRF 'transform_matrix' is a camera-to-world transform
c2w = np.array(frame["transform_matrix"])
# change from OpenGL/Blender camera axes (Y up, Z back) to COLMAP (Y down, Z forward)
c2w[:3, 1:3] *= -1
# get the world-to-camera transform and set R, T
w2c = np.linalg.inv(np.concatenate((c2w, np.array([0,0,0,1]).reshape(1,4)), axis=0))
R = np.transpose(w2c[:3,:3]) # R is stored transposed due to 'glm' in CUDA code
T = w2c[:3, 3]
# R = c2w[:3,:3]
# T = - np.transpose(R).dot(c2w[:3,3])
image = Image.fromarray(np.zeros((512,512)), "RGB")
FovY = focal2fov(fov2focal(FOV, 512), image.size[0])
FovX = focal2fov(fov2focal(FOV, 512), image.size[1])
# FovX, FovY = contents["camera_angle_x"], contents["camera_angle_x"]
cam_infos.append(CameraInfo(uid=idx, R=R, T=T, FovY=FovY, FovX=FovX, image=image,
image_path='None', image_name='None', width=image.size[1], height=image.size[0]))
return cam_infos
def read_points3D_text(path):
"""
see: src/base/reconstruction.cc
void Reconstruction::ReadPoints3DText(const std::string& path)
void Reconstruction::WritePoints3DText(const std::string& path)
"""
xyzs = None
rgbs = None
errors = None
num_points = 0
with open(path, "r") as fid:
while True:
line = fid.readline()
if not line:
break
line = line.strip()
if len(line) > 0 and line[0] != "#":
num_points += 1
xyzs = np.empty((num_points, 3))
rgbs = np.empty((num_points, 3))
errors = np.empty((num_points, 1))
count = 0
with open(path, "r") as fid:
while True:
line = fid.readline()
if not line:
break
line = line.strip()
if len(line) > 0 and line[0] != "#":
elems = line.split()
xyz = np.array(tuple(map(float, elems[1:4])))
rgb = np.array(tuple(map(int, elems[4:7])))
error = np.array(float(elems[7]))
xyzs[count] = xyz
rgbs[count] = rgb
errors[count] = error
count += 1
return xyzs, rgbs, errors
def read_points3D_binary(path_to_model_file):
"""
see: src/base/reconstruction.cc
void Reconstruction::ReadPoints3DBinary(const std::string& path)
void Reconstruction::WritePoints3DBinary(const std::string& path)
"""
with open(path_to_model_file, "rb") as fid:
num_points = read_next_bytes(fid, 8, "Q")[0]
xyzs = np.empty((num_points, 3))
rgbs = np.empty((num_points, 3))
errors = np.empty((num_points, 1))
for p_id in range(num_points):
binary_point_line_properties = read_next_bytes(
fid, num_bytes=43, format_char_sequence="QdddBBBd")
xyz = np.array(binary_point_line_properties[1:4])
rgb = np.array(binary_point_line_properties[4:7])
error = np.array(binary_point_line_properties[7])
track_length = read_next_bytes(
fid, num_bytes=8, format_char_sequence="Q")[0]
track_elems = read_next_bytes(
fid, num_bytes=8*track_length,
format_char_sequence="ii"*track_length)
xyzs[p_id] = xyz
rgbs[p_id] = rgb
errors[p_id] = error
return xyzs, rgbs, errors
def read_intrinsics_text(path):
"""
Taken from https://github.com/colmap/colmap/blob/dev/scripts/python/read_write_model.py
"""
cameras = {}
with open(path, "r") as fid:
while True:
line = fid.readline()
if not line:
break
line = line.strip()
if len(line) > 0 and line[0] != "#":
elems = line.split()
camera_id = int(elems[0])
model = elems[1]
assert model == "PINHOLE", "While the loader support other types, the rest of the code assumes PINHOLE"
width = int(elems[2])
height = int(elems[3])
params = np.array(tuple(map(float, elems[4:])))
cameras[camera_id] = Camera(id=camera_id, model=model,
width=width, height=height,
params=params)
return cameras
def read_extrinsics_binary(path_to_model_file):
"""
see: src/base/reconstruction.cc
void Reconstruction::ReadImagesBinary(const std::string& path)
void Reconstruction::WriteImagesBinary(const std::string& path)
"""
images = {}
with open(path_to_model_file, "rb") as fid:
num_reg_images = read_next_bytes(fid, 8, "Q")[0]
for _ in range(num_reg_images):
binary_image_properties = read_next_bytes(
fid, num_bytes=64, format_char_sequence="idddddddi")
image_id = binary_image_properties[0]
qvec = np.array(binary_image_properties[1:5])
tvec = np.array(binary_image_properties[5:8])
camera_id = binary_image_properties[8]
image_name = ""
current_char = read_next_bytes(fid, 1, "c")[0]
while current_char != b"\x00": # look for the ASCII 0 entry
image_name += current_char.decode("utf-8")
current_char = read_next_bytes(fid, 1, "c")[0]
num_points2D = read_next_bytes(fid, num_bytes=8,
format_char_sequence="Q")[0]
x_y_id_s = read_next_bytes(fid, num_bytes=24*num_points2D,
format_char_sequence="ddq"*num_points2D)
xys = np.column_stack([tuple(map(float, x_y_id_s[0::3])),
tuple(map(float, x_y_id_s[1::3]))])
point3D_ids = np.array(tuple(map(int, x_y_id_s[2::3])))
images[image_id] = Image(
id=image_id, qvec=qvec, tvec=tvec,
camera_id=camera_id, name=image_name,
xys=xys, point3D_ids=point3D_ids)
return images
def read_intrinsics_binary(path_to_model_file):
"""
see: src/base/reconstruction.cc
void Reconstruction::WriteCamerasBinary(const std::string& path)
void Reconstruction::ReadCamerasBinary(const std::string& path)
"""
cameras = {}
with open(path_to_model_file, "rb") as fid:
num_cameras = read_next_bytes(fid, 8, "Q")[0]
for _ in range(num_cameras):
camera_properties = read_next_bytes(
fid, num_bytes=24, format_char_sequence="iiQQ")
camera_id = camera_properties[0]
model_id = camera_properties[1]
model_name = CAMERA_MODEL_IDS[camera_properties[1]].model_name
width = camera_properties[2]
height = camera_properties[3]
num_params = CAMERA_MODEL_IDS[model_id].num_params
params = read_next_bytes(fid, num_bytes=8*num_params,
format_char_sequence="d"*num_params)
cameras[camera_id] = Camera(id=camera_id,
model=model_name,
width=width,
height=height,
params=np.array(params))
assert len(cameras) == num_cameras
return cameras
def read_extrinsics_text(path):
"""
Taken from https://github.com/colmap/colmap/blob/dev/scripts/python/read_write_model.py
"""
images = {}
with open(path, "r") as fid:
while True:
line = fid.readline()
if not line:
break
line = line.strip()
if len(line) > 0 and line[0] != "#":
elems = line.split()
image_id = int(elems[0])
qvec = np.array(tuple(map(float, elems[1:5])))
tvec = np.array(tuple(map(float, elems[5:8])))
camera_id = int(elems[8])
image_name = elems[9]
elems = fid.readline().split()
xys = np.column_stack([tuple(map(float, elems[0::3])),
tuple(map(float, elems[1::3]))])
point3D_ids = np.array(tuple(map(int, elems[2::3])))
images[image_id] = Image(
id=image_id, qvec=qvec, tvec=tvec,
camera_id=camera_id, name=image_name,
xys=xys, point3D_ids=point3D_ids)
return images
def readColmapSceneInfo(path, images, eval, preset=None, llffhold=8):
try:
cameras_extrinsic_file = os.path.join(path, "sparse/0", "images.bin")
cameras_intrinsic_file = os.path.join(path, "sparse/0", "cameras.bin")
cam_extrinsics = read_extrinsics_binary(cameras_extrinsic_file)
cam_intrinsics = read_intrinsics_binary(cameras_intrinsic_file)
except:
cameras_extrinsic_file = os.path.join(path, "sparse/0", "images.txt")
cameras_intrinsic_file = os.path.join(path, "sparse/0", "cameras.txt")
cam_extrinsics = read_extrinsics_text(cameras_extrinsic_file)
cam_intrinsics = read_intrinsics_text(cameras_intrinsic_file)
reading_dir = "images" if images == None else images
cam_infos_unsorted = readColmapCameras(cam_extrinsics=cam_extrinsics, cam_intrinsics=cam_intrinsics, images_folder=os.path.join(path, reading_dir))
cam_infos = sorted(cam_infos_unsorted.copy(), key = lambda x : x.image_name)
if eval:
# train_cam_infos = [c for idx, c in enumerate(cam_infos) if idx % llffhold != 0]
# test_cam_infos = [c for idx, c in enumerate(cam_infos) if idx % llffhold == 0]
train_cam_infos = [c for idx, c in enumerate(cam_infos) if idx % 5 == 2 or idx % 5 == 0]
test_cam_infos = [c for idx, c in enumerate(cam_infos) if not (idx % 5 == 2 or idx % 5 == 0)]
else:
train_cam_infos = cam_infos
test_cam_infos = []
nerf_normalization = getNerfppNorm(train_cam_infos)
ply_path = os.path.join(path, "sparse/0/points3D.ply")
bin_path = os.path.join(path, "sparse/0/points3D.bin")
txt_path = os.path.join(path, "sparse/0/points3D.txt")
if not os.path.exists(ply_path):
print("Converting point3d.bin to .ply, will happen only the first time you open the scene.")
try:
xyz, rgb, _ = read_points3D_binary(bin_path)
except:
xyz, rgb, _ = read_points3D_text(txt_path)
storePly(ply_path, xyz, rgb)
try:
pcd = fetchPly(ply_path)
except:
pcd = None
if preset:
preset_cam_infos = readCamerasFromPreset('/home/chung/workspace/gaussian-splatting/poses_supplementary', f"{preset}.json")
else:
preset_cam_infos = None
scene_info = SceneInfo(point_cloud=pcd,
train_cameras=train_cam_infos,
test_cameras=test_cam_infos,
preset_cameras=preset_cam_infos,
nerf_normalization=nerf_normalization,
ply_path=ply_path)
return scene_info | null |
13,765 | import os
import sys
import json
from typing import NamedTuple
from pathlib import Path
import imageio
import torch
import numpy as np
from PIL import Image
from plyfile import PlyData, PlyElement
from scene.gaussian_model import BasicPointCloud
from scene.cameras import MiniCam, Camera
from scene.colmap_loader import read_extrinsics_text, read_intrinsics_text, qvec2rotmat, \
read_extrinsics_binary, read_intrinsics_binary, read_points3D_binary, read_points3D_text
from utils.graphics import getWorld2View2, focal2fov, fov2focal
from utils.graphics import getProjectionMatrix
from utils.trajectory import get_camerapaths
from utils.sh import SH2RGB
class SceneInfo(NamedTuple):
def getNerfppNorm(cam_info):
def fetchPly(path):
def storePly(path, xyz, rgb):
def readCamerasFromTransforms(path, transformsfile, white_background, extension=".png"):
def readCamerasFromPreset(path, transformsfile):
def SH2RGB(sh):
def readNerfSyntheticInfo(path, white_background, eval, preset=None, extension=".png"):
print("Reading Training Transforms")
train_cam_infos = readCamerasFromTransforms(path, "transforms_train.json", white_background, extension)
print("Reading Test Transforms")
test_cam_infos = readCamerasFromTransforms(path, "transforms_test.json", white_background, extension)
if preset:
preset_cam_infos = readCamerasFromPreset('/home/chung/workspace/gaussian-splatting/poses_supplementary', f"{preset}.json")
else:
preset_cam_infos = None
if not eval:
train_cam_infos.extend(test_cam_infos)
test_cam_infos = []
nerf_normalization = getNerfppNorm(train_cam_infos)
ply_path = os.path.join(path, "points3d.ply")
if not os.path.exists(ply_path):
# Since this data set has no colmap data, we start with random points
num_pts = 100_000
print(f"Generating random point cloud ({num_pts})...")
# We create random points inside the bounds of the synthetic Blender scenes
xyz = np.random.random((num_pts, 3)) * 2.6 - 1.3
shs = np.random.random((num_pts, 3)) / 255.0
pcd = BasicPointCloud(points=xyz, colors=SH2RGB(shs), normals=np.zeros((num_pts, 3)))
storePly(ply_path, xyz, SH2RGB(shs) * 255)
try:
pcd = fetchPly(ply_path)
except:
pcd = None
scene_info = SceneInfo(point_cloud=pcd,
train_cameras=train_cam_infos,
test_cameras=test_cam_infos,
preset_cameras=preset_cam_infos,
nerf_normalization=nerf_normalization,
ply_path=ply_path)
return scene_info | null |
13,766 | import os
import sys
import json
from typing import NamedTuple
from pathlib import Path
import imageio
import torch
import numpy as np
from PIL import Image
from plyfile import PlyData, PlyElement
from scene.gaussian_model import BasicPointCloud
from scene.cameras import MiniCam, Camera
from scene.colmap_loader import read_extrinsics_text, read_intrinsics_text, qvec2rotmat, \
read_extrinsics_binary, read_intrinsics_binary, read_points3D_binary, read_points3D_text
from utils.graphics import getWorld2View2, focal2fov, fov2focal
from utils.graphics import getProjectionMatrix
from utils.trajectory import get_camerapaths
from utils.sh import SH2RGB
class SceneInfo(NamedTuple):
def getNerfppNorm(cam_info):
def loadCamerasFromData(traindata, white_background):
def loadCameraPreset(traindata, presetdata):
def get_camerapaths():
def readDataInfo(traindata, white_background):
print("Reading Training Transforms")
train_cameras = loadCamerasFromData(traindata, white_background)
preset_minicams = loadCameraPreset(traindata, presetdata=get_camerapaths())
# if not eval:
# train_cam_infos.extend(test_cam_infos)
# test_cam_infos = []
nerf_normalization = getNerfppNorm(train_cameras)
pcd = BasicPointCloud(points=traindata['pcd_points'].T, colors=traindata['pcd_colors'], normals=None)
scene_info = SceneInfo(point_cloud=pcd,
train_cameras=train_cameras,
test_cameras=[],
preset_cameras=preset_minicams,
nerf_normalization=nerf_normalization,
ply_path='')
return scene_info | null |
13,767 | from zoedepth.utils.misc import count_parameters, parallelize
from zoedepth.utils.config import get_config
from zoedepth.utils.arg_utils import parse_unknown
from zoedepth.trainers.builder import get_trainer
from zoedepth.models.builder import build_model
from zoedepth.data.data_mono import MixedNYUKITTI
import torch.utils.data.distributed
import torch.multiprocessing as mp
import torch
import numpy as np
from pprint import pprint
import argparse
import os
def fix_random_seed(seed: int):
def load_ckpt(config, model, checkpoint_dir="./checkpoints", ckpt_type="best"):
def count_parameters(model, include_all=False):
def parallelize(config, model, find_unused_parameters=True):
def get_trainer(config):
def build_model(config) -> DepthModel:
class MixedNYUKITTI(object):
def __init__(self, config, mode, device='cpu', **kwargs):
def main_worker(gpu, ngpus_per_node, config):
try:
fix_random_seed(43)
config.gpu = gpu
model = build_model(config)
model = load_ckpt(config, model)
model = parallelize(config, model)
total_params = f"{round(count_parameters(model)/1e6,2)}M"
config.total_params = total_params
print(f"Total parameters : {total_params}")
train_loader = MixedNYUKITTI(config, "train").data
test_loader = MixedNYUKITTI(config, "online_eval").data
trainer = get_trainer(config)(
config, model, train_loader, test_loader, device=config.gpu)
trainer.train()
finally:
import wandb
wandb.finish() | null |
13,768 | import gradio as gr
from zoedepth.utils.misc import colorize
from PIL import Image
import tempfile
def predict_depth(model, image):
depth = model.infer_pil(image)
return depth
def colorize(value, vmin=None, vmax=None, cmap='gray_r', invalid_val=-99, invalid_mask=None, background_color=(128, 128, 128, 255), gamma_corrected=False, value_transform=None):
"""Converts a depth map to a color image.
Args:
value (torch.Tensor, numpy.ndarry): Input depth map. Shape: (H, W) or (1, H, W) or (1, 1, H, W). All singular dimensions are squeezed
vmin (float, optional): vmin-valued entries are mapped to start color of cmap. If None, value.min() is used. Defaults to None.
vmax (float, optional): vmax-valued entries are mapped to end color of cmap. If None, value.max() is used. Defaults to None.
cmap (str, optional): matplotlib colormap to use. Defaults to 'magma_r'.
invalid_val (int, optional): Specifies value of invalid pixels that should be colored as 'background_color'. Defaults to -99.
invalid_mask (numpy.ndarray, optional): Boolean mask for invalid regions. Defaults to None.
background_color (tuple[int], optional): 4-tuple RGB color to give to invalid pixels. Defaults to (128, 128, 128, 255).
gamma_corrected (bool, optional): Apply gamma correction to colored image. Defaults to False.
value_transform (Callable, optional): Apply transform function to valid pixels before coloring. Defaults to None.
Returns:
numpy.ndarray, dtype - uint8: Colored depth map. Shape: (H, W, 4)
"""
if isinstance(value, torch.Tensor):
value = value.detach().cpu().numpy()
value = value.squeeze()
if invalid_mask is None:
invalid_mask = value == invalid_val
mask = np.logical_not(invalid_mask)
# normalize
vmin = np.percentile(value[mask],2) if vmin is None else vmin
vmax = np.percentile(value[mask],85) if vmax is None else vmax
if vmin != vmax:
value = (value - vmin) / (vmax - vmin) # vmin..vmax
else:
# Avoid 0-division
value = value * 0.
# squeeze last dim if it exists
# grey out the invalid values
value[invalid_mask] = np.nan
cmapper = matplotlib.cm.get_cmap(cmap)
if value_transform:
value = value_transform(value)
# value = value / value.max()
value = cmapper(value, bytes=True) # (nxmx4)
# img = value[:, :, :]
img = value[...]
img[invalid_mask] = background_color
# return img.transpose((2, 0, 1))
if gamma_corrected:
# gamma correction
img = img / 255
img = np.power(img, 2.2)
img = img * 255
img = img.astype(np.uint8)
return img
def create_demo(model):
gr.Markdown("### Depth Prediction demo")
with gr.Row():
input_image = gr.Image(label="Input Image", type='pil', elem_id='img-display-input').style(height="auto")
depth_image = gr.Image(label="Depth Map", elem_id='img-display-output')
raw_file = gr.File(label="16-bit raw depth, multiplier:256")
submit = gr.Button("Submit")
def on_submit(image):
depth = predict_depth(model, image)
colored_depth = colorize(depth, cmap='gray_r')
tmp = tempfile.NamedTemporaryFile(suffix='.png', delete=False)
raw_depth = Image.fromarray((depth*256).astype('uint16'))
raw_depth.save(tmp.name)
return [colored_depth, tmp.name]
submit.click(on_submit, inputs=[input_image], outputs=[depth_image, raw_file])
# examples = gr.Examples(examples=["examples/person_1.jpeg", "examples/person_2.jpeg", "examples/person-leaves.png", "examples/living-room.jpeg"],
# inputs=[input_image]) | null |
13,769 | import gradio as gr
import numpy as np
import trimesh
from zoedepth.utils.geometry import depth_to_points, create_triangles
from functools import partial
import tempfile
def get_mesh(model, image, keep_edges=False):
image.thumbnail((1024,1024)) # limit the size of the input image
depth = predict_depth(model, image)
pts3d = depth_to_points(depth[None])
pts3d = pts3d.reshape(-1, 3)
# Create a trimesh mesh from the points
# Each pixel is connected to its 4 neighbors
# colors are the RGB values of the image
verts = pts3d.reshape(-1, 3)
image = np.array(image)
if keep_edges:
triangles = create_triangles(image.shape[0], image.shape[1])
else:
triangles = create_triangles(image.shape[0], image.shape[1], mask=~depth_edges_mask(depth))
colors = image.reshape(-1, 3)
mesh = trimesh.Trimesh(vertices=verts, faces=triangles, vertex_colors=colors)
# Save as glb
glb_file = tempfile.NamedTemporaryFile(suffix='.glb', delete=False)
glb_path = glb_file.name
mesh.export(glb_path)
return glb_path
def create_demo(model):
gr.Markdown("### Image to 3D mesh")
gr.Markdown("Convert a single 2D image to a 3D mesh")
with gr.Row():
image = gr.Image(label="Input Image", type='pil')
result = gr.Model3D(label="3d mesh reconstruction", clear_color=[
1.0, 1.0, 1.0, 1.0])
checkbox = gr.Checkbox(label="Keep occlusion edges", value=False)
submit = gr.Button("Submit")
submit.click(partial(get_mesh, model), inputs=[image, checkbox], outputs=[result])
# examples = gr.Examples(examples=["examples/aerial_beach.jpeg", "examples/mountains.jpeg", "examples/person_1.jpeg", "examples/ancient-carved.jpeg"],
# inputs=[image]) | null |
13,770 | import gradio as gr
import numpy as np
import trimesh
from zoedepth.utils.geometry import create_triangles
from functools import partial
import tempfile
def get_mesh(model, image, keep_edges=False):
image.thumbnail((1024,1024)) # limit the size of the image
depth = predict_depth(model, image)
pts3d = pano_depth_to_world_points(depth)
# Create a trimesh mesh from the points
# Each pixel is connected to its 4 neighbors
# colors are the RGB values of the image
verts = pts3d.reshape(-1, 3)
image = np.array(image)
if keep_edges:
triangles = create_triangles(image.shape[0], image.shape[1])
else:
triangles = create_triangles(image.shape[0], image.shape[1], mask=~depth_edges_mask(depth))
colors = image.reshape(-1, 3)
mesh = trimesh.Trimesh(vertices=verts, faces=triangles, vertex_colors=colors)
# Save as glb
glb_file = tempfile.NamedTemporaryFile(suffix='.glb', delete=False)
glb_path = glb_file.name
mesh.export(glb_path)
return glb_path
def create_demo(model):
gr.Markdown("### Panorama to 3D mesh")
gr.Markdown("Convert a 360 spherical panorama to a 3D mesh")
gr.Markdown("ZoeDepth was not trained on panoramic images. It doesn't know anything about panoramas or spherical projection. Here, we just treat the estimated depth as radius and some projection errors are expected. Nonetheless, ZoeDepth still works surprisingly well on 360 reconstruction.")
with gr.Row():
input_image = gr.Image(label="Input Image", type='pil')
result = gr.Model3D(label="3d mesh reconstruction", clear_color=[
1.0, 1.0, 1.0, 1.0])
checkbox = gr.Checkbox(label="Keep occlusion edges", value=True)
submit = gr.Button("Submit")
submit.click(partial(get_mesh, model), inputs=[input_image, checkbox], outputs=[result])
# examples = gr.Examples(examples=["examples/pano_1.jpeg", "examples/pano_2.jpeg", "examples/pano_3.jpeg"],
# inputs=[input_image]) | null |
13,771 | from zoedepth.utils.misc import count_parameters, parallelize
from zoedepth.utils.config import get_config
from zoedepth.utils.arg_utils import parse_unknown
from zoedepth.trainers.builder import get_trainer
from zoedepth.models.builder import build_model
from zoedepth.data.data_mono import DepthDataLoader
import torch.utils.data.distributed
import torch.multiprocessing as mp
import torch
import numpy as np
from pprint import pprint
import argparse
import os
def fix_random_seed(seed: int):
import random
import numpy
import torch
random.seed(seed)
numpy.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
def load_ckpt(config, model, checkpoint_dir="./checkpoints", ckpt_type="best"):
import glob
import os
from zoedepth.models.model_io import load_wts
if hasattr(config, "checkpoint"):
checkpoint = config.checkpoint
elif hasattr(config, "ckpt_pattern"):
pattern = config.ckpt_pattern
matches = glob.glob(os.path.join(
checkpoint_dir, f"*{pattern}*{ckpt_type}*"))
if not (len(matches) > 0):
raise ValueError(f"No matches found for the pattern {pattern}")
checkpoint = matches[0]
else:
return model
model = load_wts(model, checkpoint)
print("Loaded weights from {0}".format(checkpoint))
return model
def count_parameters(model, include_all=False):
return sum(p.numel() for p in model.parameters() if p.requires_grad or include_all)
def parallelize(config, model, find_unused_parameters=True):
if config.gpu is not None:
torch.cuda.set_device(config.gpu)
model = model.cuda(config.gpu)
config.multigpu = False
if config.distributed:
# Use DDP
config.multigpu = True
config.rank = config.rank * config.ngpus_per_node + config.gpu
dist.init_process_group(backend=config.dist_backend, init_method=config.dist_url,
world_size=config.world_size, rank=config.rank)
config.batch_size = int(config.batch_size / config.ngpus_per_node)
# config.batch_size = 8
config.workers = int(
(config.num_workers + config.ngpus_per_node - 1) / config.ngpus_per_node)
print("Device", config.gpu, "Rank", config.rank, "batch size",
config.batch_size, "Workers", config.workers)
torch.cuda.set_device(config.gpu)
model = nn.SyncBatchNorm.convert_sync_batchnorm(model)
model = model.cuda(config.gpu)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[config.gpu], output_device=config.gpu,
find_unused_parameters=find_unused_parameters)
elif config.gpu is None:
# Use DP
config.multigpu = True
model = model.cuda()
model = torch.nn.DataParallel(model)
return model
def get_trainer(config):
"""Builds and returns a trainer based on the config.
Args:
config (dict): the config dict (typically constructed using utils.config.get_config)
config.trainer (str): the name of the trainer to use. The module named "{config.trainer}_trainer" must exist in trainers root module
Raises:
ValueError: If the specified trainer does not exist under trainers/ folder
Returns:
Trainer (inherited from zoedepth.trainers.BaseTrainer): The Trainer object
"""
assert "trainer" in config and config.trainer is not None and config.trainer != '', "Trainer not specified. Config: {0}".format(
config)
try:
Trainer = getattr(import_module(
f"zoedepth.trainers.{config.trainer}_trainer"), 'Trainer')
except ModuleNotFoundError as e:
raise ValueError(f"Trainer {config.trainer}_trainer not found.") from e
return Trainer
def build_model(config) -> DepthModel:
"""Builds a model from a config. The model is specified by the model name and version in the config. The model is then constructed using the build_from_config function of the model interface.
This function should be used to construct models for training and evaluation.
Args:
config (dict): Config dict. Config is constructed in utils/config.py. Each model has its own config file(s) saved in its root model folder.
Returns:
torch.nn.Module: Model corresponding to name and version as specified in config
"""
module_name = f"zoedepth.models.{config.model}"
try:
module = import_module(module_name)
except ModuleNotFoundError as e:
# print the original error message
print(e)
raise ValueError(
f"Model {config.model} not found. Refer above error for details.") from e
try:
get_version = getattr(module, "get_version")
except AttributeError as e:
raise ValueError(
f"Model {config.model} has no get_version function.") from e
return get_version(config.version_name).build_from_config(config)
class DepthDataLoader(object):
def __init__(self, config, mode, device='cpu', transform=None, **kwargs):
"""
Data loader for depth datasets
Args:
config (dict): Config dictionary. Refer to utils/config.py
mode (str): "train" or "online_eval"
device (str, optional): Device to load the data on. Defaults to 'cpu'.
transform (torchvision.transforms, optional): Transform to apply to the data. Defaults to None.
"""
self.config = config
if config.dataset == 'ibims':
self.data = get_ibims_loader(config, batch_size=1, num_workers=1)
return
if config.dataset == 'sunrgbd':
self.data = get_sunrgbd_loader(
data_dir_root=config.sunrgbd_root, batch_size=1, num_workers=1)
return
if config.dataset == 'diml_indoor':
self.data = get_diml_indoor_loader(
data_dir_root=config.diml_indoor_root, batch_size=1, num_workers=1)
return
if config.dataset == 'diml_outdoor':
self.data = get_diml_outdoor_loader(
data_dir_root=config.diml_outdoor_root, batch_size=1, num_workers=1)
return
if "diode" in config.dataset:
self.data = get_diode_loader(
config[config.dataset+"_root"], batch_size=1, num_workers=1)
return
if config.dataset == 'hypersim_test':
self.data = get_hypersim_loader(
config.hypersim_test_root, batch_size=1, num_workers=1)
return
if config.dataset == 'vkitti':
self.data = get_vkitti_loader(
config.vkitti_root, batch_size=1, num_workers=1)
return
if config.dataset == 'vkitti2':
self.data = get_vkitti2_loader(
config.vkitti2_root, batch_size=1, num_workers=1)
return
if config.dataset == 'ddad':
self.data = get_ddad_loader(config.ddad_root, resize_shape=(
352, 1216), batch_size=1, num_workers=1)
return
img_size = self.config.get("img_size", None)
img_size = img_size if self.config.get(
"do_input_resize", False) else None
if transform is None:
transform = preprocessing_transforms(mode, size=img_size)
if mode == 'train':
Dataset = DataLoadPreprocess
self.training_samples = Dataset(
config, mode, transform=transform, device=device)
if config.distributed:
self.train_sampler = torch.utils.data.distributed.DistributedSampler(
self.training_samples)
else:
self.train_sampler = None
self.data = DataLoader(self.training_samples,
batch_size=config.batch_size,
shuffle=(self.train_sampler is None),
num_workers=config.workers,
pin_memory=True,
persistent_workers=True,
# prefetch_factor=2,
sampler=self.train_sampler)
elif mode == 'online_eval':
self.testing_samples = DataLoadPreprocess(
config, mode, transform=transform)
if config.distributed: # redundant. here only for readability and to be more explicit
# Give whole test set to all processes (and report evaluation only on one) regardless
self.eval_sampler = None
else:
self.eval_sampler = None
self.data = DataLoader(self.testing_samples, 1,
shuffle=kwargs.get("shuffle_test", False),
num_workers=1,
pin_memory=False,
sampler=self.eval_sampler)
elif mode == 'test':
self.testing_samples = DataLoadPreprocess(
config, mode, transform=transform)
self.data = DataLoader(self.testing_samples,
1, shuffle=False, num_workers=1)
else:
print(
'mode should be one of \'train, test, online_eval\'. Got {}'.format(mode))
def main_worker(gpu, ngpus_per_node, config):
try:
seed = config.seed if 'seed' in config and config.seed else 43
fix_random_seed(seed)
config.gpu = gpu
model = build_model(config)
model = load_ckpt(config, model)
model = parallelize(config, model)
total_params = f"{round(count_parameters(model)/1e6,2)}M"
config.total_params = total_params
print(f"Total parameters : {total_params}")
train_loader = DepthDataLoader(config, "train").data
test_loader = DepthDataLoader(config, "online_eval").data
trainer = get_trainer(config)(
config, model, train_loader, test_loader, device=config.gpu)
trainer.train()
finally:
import wandb
wandb.finish() | null |
13,772 | from zoedepth.utils.config import get_config
from zoedepth.models.builder import build_model
import numpy as np
import torch
def get_config(model_name, mode='train', dataset=None, **overwrite_kwargs):
"""Main entry point to get the config for the model.
Args:
model_name (str): name of the desired model.
mode (str, optional): "train" or "infer". Defaults to 'train'.
dataset (str, optional): If specified, the corresponding dataset configuration is loaded as well. Defaults to None.
Keyword Args: key-value pairs of arguments to overwrite the default config.
The order of precedence for overwriting the config is (Higher precedence first):
# 1. overwrite_kwargs
# 2. "config_version": Config file version if specified in overwrite_kwargs. The corresponding config loaded is config_{model_name}_{config_version}.json
# 3. "version_name": Default Model version specific config specified in overwrite_kwargs. The corresponding config loaded is config_{model_name}_{version_name}.json
# 4. common_config: Default config for all models specified in COMMON_CONFIG
Returns:
easydict: The config dictionary for the model.
"""
check_choices("Model", model_name, ["zoedepth", "zoedepth_nk"])
check_choices("Mode", mode, ["train", "infer", "eval"])
if mode == "train":
check_choices("Dataset", dataset, ["nyu", "kitti", "mix", None])
config = flatten({**COMMON_CONFIG, **COMMON_TRAINING_CONFIG})
config = update_model_config(config, mode, model_name)
# update with model version specific config
version_name = overwrite_kwargs.get("version_name", config["version_name"])
config = update_model_config(config, mode, model_name, version_name)
# update with config version if specified
config_version = overwrite_kwargs.get("config_version", None)
if config_version is not None:
print("Overwriting config with config_version", config_version)
config = update_model_config(config, mode, model_name, config_version)
# update with overwrite_kwargs
# Combined args are useful for hyperparameter search
overwrite_kwargs = split_combined_args(overwrite_kwargs)
config = {**config, **overwrite_kwargs}
# Casting to bool # TODO: Not necessary. Remove and test
for key in KEYS_TYPE_BOOL:
if key in config:
config[key] = bool(config[key])
# Model specific post processing of config
parse_list(config, "n_attractors")
# adjust n_bins for each bin configuration if bin_conf is given and n_bins is passed in overwrite_kwargs
if 'bin_conf' in config and 'n_bins' in overwrite_kwargs:
bin_conf = config['bin_conf'] # list of dicts
n_bins = overwrite_kwargs['n_bins']
new_bin_conf = []
for conf in bin_conf:
conf['n_bins'] = n_bins
new_bin_conf.append(conf)
config['bin_conf'] = new_bin_conf
if mode == "train":
orig_dataset = dataset
if dataset == "mix":
dataset = 'nyu' # Use nyu as default for mix. Dataset config is changed accordingly while loading the dataloader
if dataset is not None:
config['project'] = f"MonoDepth3-{orig_dataset}" # Set project for wandb
if dataset is not None:
config['dataset'] = dataset
config = {**DATASETS_CONFIG[dataset], **config}
config['model'] = model_name
typed_config = {k: infer_type(v) for k, v in config.items()}
# add hostname to config
config['hostname'] = platform.node()
return edict(typed_config)
def build_model(config) -> DepthModel:
"""Builds a model from a config. The model is specified by the model name and version in the config. The model is then constructed using the build_from_config function of the model interface.
This function should be used to construct models for training and evaluation.
Args:
config (dict): Config dict. Config is constructed in utils/config.py. Each model has its own config file(s) saved in its root model folder.
Returns:
torch.nn.Module: Model corresponding to name and version as specified in config
"""
module_name = f"zoedepth.models.{config.model}"
try:
module = import_module(module_name)
except ModuleNotFoundError as e:
# print the original error message
print(e)
raise ValueError(
f"Model {config.model} not found. Refer above error for details.") from e
try:
get_version = getattr(module, "get_version")
except AttributeError as e:
raise ValueError(
f"Model {config.model} has no get_version function.") from e
return get_version(config.version_name).build_from_config(config)
The provided code snippet includes necessary dependencies for implementing the `ZoeD_N` function. Write a Python function `def ZoeD_N(pretrained=False, midas_model_type="DPT_BEiT_L_384", config_mode="infer", **kwargs)` to solve the following problem:
Zoe_M12_N model. This is the version of ZoeDepth that has a single metric head Args: pretrained (bool): If True, returns a model pre-trained on NYU-Depth-V2 midas_model_type (str): Midas model type. Should be one of the models as listed in torch.hub.list("intel-isl/MiDaS"). Default: DPT_BEiT_L_384 config_mode (str): Config mode. Should be one of "infer", "train" or "eval". Default: "infer" Keyword Args: **kwargs: Additional arguments to pass to the model The following arguments are supported: train_midas (bool): If True, returns a model that with trainable midas base. Default: False use_pretrained_midas (bool): If True, returns a model that uses pretrained midas base. Default: False n_bins (int): Number of bin centers. Defaults to 64. bin_centers_type (str): "normed" or "softplus". Activation type used for bin centers. For "normed" bin centers, linear normalization trick is applied. This results in bounded bin centers. For "softplus", softplus activation is used and thus are unbounded. Defaults to "softplus". bin_embedding_dim (int): bin embedding dimension. Defaults to 128. min_depth (float): Lower bound for normed bin centers. Defaults to 1e-3. max_depth (float): Upper bound for normed bin centers. Defaults to 10. n_attractors (List[int]): Number of bin attractors at decoder layers. Defaults to [16, 8, 4, 1]. attractor_alpha (int): Proportional attractor strength. Refer to models.layers.attractor for more details. Defaults to 1000. attractor_gamma (int): Exponential attractor strength. Refer to models.layers.attractor for more details. Defaults to 2. attractor_kind (str): Attraction aggregation "sum" or "mean". Defaults to 'mean'. attractor_type (str): Type of attractor to use; "inv" (Inverse attractor) or "exp" (Exponential attractor). Defaults to 'inv'. min_temp (int): Lower bound for temperature of output probability distribution. Defaults to 0.0212. max_temp (int): Upper bound for temperature of output probability distribution. Defaults to 50. force_keep_ar (bool): If True, the model will keep the aspect ratio of the input image. Defaults to True.
Here is the function:
def ZoeD_N(pretrained=False, midas_model_type="DPT_BEiT_L_384", config_mode="infer", **kwargs):
"""Zoe_M12_N model. This is the version of ZoeDepth that has a single metric head
Args:
pretrained (bool): If True, returns a model pre-trained on NYU-Depth-V2
midas_model_type (str): Midas model type. Should be one of the models as listed in torch.hub.list("intel-isl/MiDaS"). Default: DPT_BEiT_L_384
config_mode (str): Config mode. Should be one of "infer", "train" or "eval". Default: "infer"
Keyword Args:
**kwargs: Additional arguments to pass to the model
The following arguments are supported:
train_midas (bool): If True, returns a model that with trainable midas base. Default: False
use_pretrained_midas (bool): If True, returns a model that uses pretrained midas base. Default: False
n_bins (int): Number of bin centers. Defaults to 64.
bin_centers_type (str): "normed" or "softplus". Activation type used for bin centers. For "normed" bin centers, linear normalization trick is applied. This results in bounded bin centers.
For "softplus", softplus activation is used and thus are unbounded. Defaults to "softplus".
bin_embedding_dim (int): bin embedding dimension. Defaults to 128.
min_depth (float): Lower bound for normed bin centers. Defaults to 1e-3.
max_depth (float): Upper bound for normed bin centers. Defaults to 10.
n_attractors (List[int]): Number of bin attractors at decoder layers. Defaults to [16, 8, 4, 1].
attractor_alpha (int): Proportional attractor strength. Refer to models.layers.attractor for more details. Defaults to 1000.
attractor_gamma (int): Exponential attractor strength. Refer to models.layers.attractor for more details. Defaults to 2.
attractor_kind (str): Attraction aggregation "sum" or "mean". Defaults to 'mean'.
attractor_type (str): Type of attractor to use; "inv" (Inverse attractor) or "exp" (Exponential attractor). Defaults to 'inv'.
min_temp (int): Lower bound for temperature of output probability distribution. Defaults to 0.0212.
max_temp (int): Upper bound for temperature of output probability distribution. Defaults to 50.
force_keep_ar (bool): If True, the model will keep the aspect ratio of the input image. Defaults to True.
"""
if pretrained and midas_model_type != "DPT_BEiT_L_384":
raise ValueError(f"Only DPT_BEiT_L_384 MiDaS model is supported for pretrained Zoe_N model, got: {midas_model_type}")
if not pretrained:
pretrained_resource = None
else:
pretrained_resource = "url::https://github.com/isl-org/ZoeDepth/releases/download/v1.0/ZoeD_M12_N.pt"
config = get_config("zoedepth", config_mode, pretrained_resource=pretrained_resource, **kwargs)
model = build_model(config)
return model | Zoe_M12_N model. This is the version of ZoeDepth that has a single metric head Args: pretrained (bool): If True, returns a model pre-trained on NYU-Depth-V2 midas_model_type (str): Midas model type. Should be one of the models as listed in torch.hub.list("intel-isl/MiDaS"). Default: DPT_BEiT_L_384 config_mode (str): Config mode. Should be one of "infer", "train" or "eval". Default: "infer" Keyword Args: **kwargs: Additional arguments to pass to the model The following arguments are supported: train_midas (bool): If True, returns a model that with trainable midas base. Default: False use_pretrained_midas (bool): If True, returns a model that uses pretrained midas base. Default: False n_bins (int): Number of bin centers. Defaults to 64. bin_centers_type (str): "normed" or "softplus". Activation type used for bin centers. For "normed" bin centers, linear normalization trick is applied. This results in bounded bin centers. For "softplus", softplus activation is used and thus are unbounded. Defaults to "softplus". bin_embedding_dim (int): bin embedding dimension. Defaults to 128. min_depth (float): Lower bound for normed bin centers. Defaults to 1e-3. max_depth (float): Upper bound for normed bin centers. Defaults to 10. n_attractors (List[int]): Number of bin attractors at decoder layers. Defaults to [16, 8, 4, 1]. attractor_alpha (int): Proportional attractor strength. Refer to models.layers.attractor for more details. Defaults to 1000. attractor_gamma (int): Exponential attractor strength. Refer to models.layers.attractor for more details. Defaults to 2. attractor_kind (str): Attraction aggregation "sum" or "mean". Defaults to 'mean'. attractor_type (str): Type of attractor to use; "inv" (Inverse attractor) or "exp" (Exponential attractor). Defaults to 'inv'. min_temp (int): Lower bound for temperature of output probability distribution. Defaults to 0.0212. max_temp (int): Upper bound for temperature of output probability distribution. Defaults to 50. force_keep_ar (bool): If True, the model will keep the aspect ratio of the input image. Defaults to True. |
13,773 | from zoedepth.utils.config import get_config
from zoedepth.models.builder import build_model
import numpy as np
import torch
def get_config(model_name, mode='train', dataset=None, **overwrite_kwargs):
"""Main entry point to get the config for the model.
Args:
model_name (str): name of the desired model.
mode (str, optional): "train" or "infer". Defaults to 'train'.
dataset (str, optional): If specified, the corresponding dataset configuration is loaded as well. Defaults to None.
Keyword Args: key-value pairs of arguments to overwrite the default config.
The order of precedence for overwriting the config is (Higher precedence first):
# 1. overwrite_kwargs
# 2. "config_version": Config file version if specified in overwrite_kwargs. The corresponding config loaded is config_{model_name}_{config_version}.json
# 3. "version_name": Default Model version specific config specified in overwrite_kwargs. The corresponding config loaded is config_{model_name}_{version_name}.json
# 4. common_config: Default config for all models specified in COMMON_CONFIG
Returns:
easydict: The config dictionary for the model.
"""
check_choices("Model", model_name, ["zoedepth", "zoedepth_nk"])
check_choices("Mode", mode, ["train", "infer", "eval"])
if mode == "train":
check_choices("Dataset", dataset, ["nyu", "kitti", "mix", None])
config = flatten({**COMMON_CONFIG, **COMMON_TRAINING_CONFIG})
config = update_model_config(config, mode, model_name)
# update with model version specific config
version_name = overwrite_kwargs.get("version_name", config["version_name"])
config = update_model_config(config, mode, model_name, version_name)
# update with config version if specified
config_version = overwrite_kwargs.get("config_version", None)
if config_version is not None:
print("Overwriting config with config_version", config_version)
config = update_model_config(config, mode, model_name, config_version)
# update with overwrite_kwargs
# Combined args are useful for hyperparameter search
overwrite_kwargs = split_combined_args(overwrite_kwargs)
config = {**config, **overwrite_kwargs}
# Casting to bool # TODO: Not necessary. Remove and test
for key in KEYS_TYPE_BOOL:
if key in config:
config[key] = bool(config[key])
# Model specific post processing of config
parse_list(config, "n_attractors")
# adjust n_bins for each bin configuration if bin_conf is given and n_bins is passed in overwrite_kwargs
if 'bin_conf' in config and 'n_bins' in overwrite_kwargs:
bin_conf = config['bin_conf'] # list of dicts
n_bins = overwrite_kwargs['n_bins']
new_bin_conf = []
for conf in bin_conf:
conf['n_bins'] = n_bins
new_bin_conf.append(conf)
config['bin_conf'] = new_bin_conf
if mode == "train":
orig_dataset = dataset
if dataset == "mix":
dataset = 'nyu' # Use nyu as default for mix. Dataset config is changed accordingly while loading the dataloader
if dataset is not None:
config['project'] = f"MonoDepth3-{orig_dataset}" # Set project for wandb
if dataset is not None:
config['dataset'] = dataset
config = {**DATASETS_CONFIG[dataset], **config}
config['model'] = model_name
typed_config = {k: infer_type(v) for k, v in config.items()}
# add hostname to config
config['hostname'] = platform.node()
return edict(typed_config)
def build_model(config) -> DepthModel:
"""Builds a model from a config. The model is specified by the model name and version in the config. The model is then constructed using the build_from_config function of the model interface.
This function should be used to construct models for training and evaluation.
Args:
config (dict): Config dict. Config is constructed in utils/config.py. Each model has its own config file(s) saved in its root model folder.
Returns:
torch.nn.Module: Model corresponding to name and version as specified in config
"""
module_name = f"zoedepth.models.{config.model}"
try:
module = import_module(module_name)
except ModuleNotFoundError as e:
# print the original error message
print(e)
raise ValueError(
f"Model {config.model} not found. Refer above error for details.") from e
try:
get_version = getattr(module, "get_version")
except AttributeError as e:
raise ValueError(
f"Model {config.model} has no get_version function.") from e
return get_version(config.version_name).build_from_config(config)
The provided code snippet includes necessary dependencies for implementing the `ZoeD_K` function. Write a Python function `def ZoeD_K(pretrained=False, midas_model_type="DPT_BEiT_L_384", config_mode="infer", **kwargs)` to solve the following problem:
Zoe_M12_K model. This is the version of ZoeDepth that has a single metric head Args: pretrained (bool): If True, returns a model pre-trained on NYU-Depth-V2 midas_model_type (str): Midas model type. Should be one of the models as listed in torch.hub.list("intel-isl/MiDaS"). Default: DPT_BEiT_L_384 config_mode (str): Config mode. Should be one of "infer", "train" or "eval". Default: "infer" Keyword Args: **kwargs: Additional arguments to pass to the model The following arguments are supported: train_midas (bool): If True, returns a model that with trainable midas base. Default: False use_pretrained_midas (bool): If True, returns a model that uses pretrained midas base. Default: False n_bins (int): Number of bin centers. Defaults to 64. bin_centers_type (str): "normed" or "softplus". Activation type used for bin centers. For "normed" bin centers, linear normalization trick is applied. This results in bounded bin centers. For "softplus", softplus activation is used and thus are unbounded. Defaults to "softplus". bin_embedding_dim (int): bin embedding dimension. Defaults to 128. min_depth (float): Lower bound for normed bin centers. Defaults to 1e-3. max_depth (float): Upper bound for normed bin centers. Defaults to 10. n_attractors (List[int]): Number of bin attractors at decoder layers. Defaults to [16, 8, 4, 1]. attractor_alpha (int): Proportional attractor strength. Refer to models.layers.attractor for more details. Defaults to 1000. attractor_gamma (int): Exponential attractor strength. Refer to models.layers.attractor for more details. Defaults to 2. attractor_kind (str): Attraction aggregation "sum" or "mean". Defaults to 'mean'. attractor_type (str): Type of attractor to use; "inv" (Inverse attractor) or "exp" (Exponential attractor). Defaults to 'inv'. min_temp (int): Lower bound for temperature of output probability distribution. Defaults to 0.0212. max_temp (int): Upper bound for temperature of output probability distribution. Defaults to 50. force_keep_ar (bool): If True, the model will keep the aspect ratio of the input image. Defaults to True.
Here is the function:
def ZoeD_K(pretrained=False, midas_model_type="DPT_BEiT_L_384", config_mode="infer", **kwargs):
"""Zoe_M12_K model. This is the version of ZoeDepth that has a single metric head
Args:
pretrained (bool): If True, returns a model pre-trained on NYU-Depth-V2
midas_model_type (str): Midas model type. Should be one of the models as listed in torch.hub.list("intel-isl/MiDaS"). Default: DPT_BEiT_L_384
config_mode (str): Config mode. Should be one of "infer", "train" or "eval". Default: "infer"
Keyword Args:
**kwargs: Additional arguments to pass to the model
The following arguments are supported:
train_midas (bool): If True, returns a model that with trainable midas base. Default: False
use_pretrained_midas (bool): If True, returns a model that uses pretrained midas base. Default: False
n_bins (int): Number of bin centers. Defaults to 64.
bin_centers_type (str): "normed" or "softplus". Activation type used for bin centers. For "normed" bin centers, linear normalization trick is applied. This results in bounded bin centers.
For "softplus", softplus activation is used and thus are unbounded. Defaults to "softplus".
bin_embedding_dim (int): bin embedding dimension. Defaults to 128.
min_depth (float): Lower bound for normed bin centers. Defaults to 1e-3.
max_depth (float): Upper bound for normed bin centers. Defaults to 10.
n_attractors (List[int]): Number of bin attractors at decoder layers. Defaults to [16, 8, 4, 1].
attractor_alpha (int): Proportional attractor strength. Refer to models.layers.attractor for more details. Defaults to 1000.
attractor_gamma (int): Exponential attractor strength. Refer to models.layers.attractor for more details. Defaults to 2.
attractor_kind (str): Attraction aggregation "sum" or "mean". Defaults to 'mean'.
attractor_type (str): Type of attractor to use; "inv" (Inverse attractor) or "exp" (Exponential attractor). Defaults to 'inv'.
min_temp (int): Lower bound for temperature of output probability distribution. Defaults to 0.0212.
max_temp (int): Upper bound for temperature of output probability distribution. Defaults to 50.
force_keep_ar (bool): If True, the model will keep the aspect ratio of the input image. Defaults to True.
"""
if pretrained and midas_model_type != "DPT_BEiT_L_384":
raise ValueError(f"Only DPT_BEiT_L_384 MiDaS model is supported for pretrained Zoe_K model, got: {midas_model_type}")
if not pretrained:
pretrained_resource = None
else:
pretrained_resource = "url::https://github.com/isl-org/ZoeDepth/releases/download/v1.0/ZoeD_M12_K.pt"
config = get_config("zoedepth", config_mode, pretrained_resource=pretrained_resource, config_version="kitti", **kwargs)
model = build_model(config)
return model | Zoe_M12_K model. This is the version of ZoeDepth that has a single metric head Args: pretrained (bool): If True, returns a model pre-trained on NYU-Depth-V2 midas_model_type (str): Midas model type. Should be one of the models as listed in torch.hub.list("intel-isl/MiDaS"). Default: DPT_BEiT_L_384 config_mode (str): Config mode. Should be one of "infer", "train" or "eval". Default: "infer" Keyword Args: **kwargs: Additional arguments to pass to the model The following arguments are supported: train_midas (bool): If True, returns a model that with trainable midas base. Default: False use_pretrained_midas (bool): If True, returns a model that uses pretrained midas base. Default: False n_bins (int): Number of bin centers. Defaults to 64. bin_centers_type (str): "normed" or "softplus". Activation type used for bin centers. For "normed" bin centers, linear normalization trick is applied. This results in bounded bin centers. For "softplus", softplus activation is used and thus are unbounded. Defaults to "softplus". bin_embedding_dim (int): bin embedding dimension. Defaults to 128. min_depth (float): Lower bound for normed bin centers. Defaults to 1e-3. max_depth (float): Upper bound for normed bin centers. Defaults to 10. n_attractors (List[int]): Number of bin attractors at decoder layers. Defaults to [16, 8, 4, 1]. attractor_alpha (int): Proportional attractor strength. Refer to models.layers.attractor for more details. Defaults to 1000. attractor_gamma (int): Exponential attractor strength. Refer to models.layers.attractor for more details. Defaults to 2. attractor_kind (str): Attraction aggregation "sum" or "mean". Defaults to 'mean'. attractor_type (str): Type of attractor to use; "inv" (Inverse attractor) or "exp" (Exponential attractor). Defaults to 'inv'. min_temp (int): Lower bound for temperature of output probability distribution. Defaults to 0.0212. max_temp (int): Upper bound for temperature of output probability distribution. Defaults to 50. force_keep_ar (bool): If True, the model will keep the aspect ratio of the input image. Defaults to True. |
13,774 | from zoedepth.utils.config import get_config
from zoedepth.models.builder import build_model
import numpy as np
import torch
def get_config(model_name, mode='train', dataset=None, **overwrite_kwargs):
"""Main entry point to get the config for the model.
Args:
model_name (str): name of the desired model.
mode (str, optional): "train" or "infer". Defaults to 'train'.
dataset (str, optional): If specified, the corresponding dataset configuration is loaded as well. Defaults to None.
Keyword Args: key-value pairs of arguments to overwrite the default config.
The order of precedence for overwriting the config is (Higher precedence first):
# 1. overwrite_kwargs
# 2. "config_version": Config file version if specified in overwrite_kwargs. The corresponding config loaded is config_{model_name}_{config_version}.json
# 3. "version_name": Default Model version specific config specified in overwrite_kwargs. The corresponding config loaded is config_{model_name}_{version_name}.json
# 4. common_config: Default config for all models specified in COMMON_CONFIG
Returns:
easydict: The config dictionary for the model.
"""
check_choices("Model", model_name, ["zoedepth", "zoedepth_nk"])
check_choices("Mode", mode, ["train", "infer", "eval"])
if mode == "train":
check_choices("Dataset", dataset, ["nyu", "kitti", "mix", None])
config = flatten({**COMMON_CONFIG, **COMMON_TRAINING_CONFIG})
config = update_model_config(config, mode, model_name)
# update with model version specific config
version_name = overwrite_kwargs.get("version_name", config["version_name"])
config = update_model_config(config, mode, model_name, version_name)
# update with config version if specified
config_version = overwrite_kwargs.get("config_version", None)
if config_version is not None:
print("Overwriting config with config_version", config_version)
config = update_model_config(config, mode, model_name, config_version)
# update with overwrite_kwargs
# Combined args are useful for hyperparameter search
overwrite_kwargs = split_combined_args(overwrite_kwargs)
config = {**config, **overwrite_kwargs}
# Casting to bool # TODO: Not necessary. Remove and test
for key in KEYS_TYPE_BOOL:
if key in config:
config[key] = bool(config[key])
# Model specific post processing of config
parse_list(config, "n_attractors")
# adjust n_bins for each bin configuration if bin_conf is given and n_bins is passed in overwrite_kwargs
if 'bin_conf' in config and 'n_bins' in overwrite_kwargs:
bin_conf = config['bin_conf'] # list of dicts
n_bins = overwrite_kwargs['n_bins']
new_bin_conf = []
for conf in bin_conf:
conf['n_bins'] = n_bins
new_bin_conf.append(conf)
config['bin_conf'] = new_bin_conf
if mode == "train":
orig_dataset = dataset
if dataset == "mix":
dataset = 'nyu' # Use nyu as default for mix. Dataset config is changed accordingly while loading the dataloader
if dataset is not None:
config['project'] = f"MonoDepth3-{orig_dataset}" # Set project for wandb
if dataset is not None:
config['dataset'] = dataset
config = {**DATASETS_CONFIG[dataset], **config}
config['model'] = model_name
typed_config = {k: infer_type(v) for k, v in config.items()}
# add hostname to config
config['hostname'] = platform.node()
return edict(typed_config)
def build_model(config) -> DepthModel:
"""Builds a model from a config. The model is specified by the model name and version in the config. The model is then constructed using the build_from_config function of the model interface.
This function should be used to construct models for training and evaluation.
Args:
config (dict): Config dict. Config is constructed in utils/config.py. Each model has its own config file(s) saved in its root model folder.
Returns:
torch.nn.Module: Model corresponding to name and version as specified in config
"""
module_name = f"zoedepth.models.{config.model}"
try:
module = import_module(module_name)
except ModuleNotFoundError as e:
# print the original error message
print(e)
raise ValueError(
f"Model {config.model} not found. Refer above error for details.") from e
try:
get_version = getattr(module, "get_version")
except AttributeError as e:
raise ValueError(
f"Model {config.model} has no get_version function.") from e
return get_version(config.version_name).build_from_config(config)
The provided code snippet includes necessary dependencies for implementing the `ZoeD_NK` function. Write a Python function `def ZoeD_NK(pretrained=False, midas_model_type="DPT_BEiT_L_384", config_mode="infer", **kwargs)` to solve the following problem:
ZoeDepthNK model. This is the version of ZoeDepth that has two metric heads and uses a learned router to route to experts. Args: pretrained (bool): If True, returns a model pre-trained on NYU-Depth-V2 midas_model_type (str): Midas model type. Should be one of the models as listed in torch.hub.list("intel-isl/MiDaS"). Default: DPT_BEiT_L_384 Keyword Args: **kwargs: Additional arguments to pass to the model The following arguments are supported: train_midas (bool): If True, returns a model that with trainable midas base. Defaults to True use_pretrained_midas (bool): If True, returns a model that uses pretrained midas base. Defaults to True bin_conf (List[dict]): A list of dictionaries that contain the bin configuration for each metric head. Each dictionary should contain the following keys: "name" (str, typically same as the dataset name), "n_bins" (int), "min_depth" (float), "max_depth" (float) The length of this list determines the number of metric heads. bin_centers_type (str): "normed" or "softplus". Activation type used for bin centers. For "normed" bin centers, linear normalization trick is applied. This results in bounded bin centers. For "softplus", softplus activation is used and thus are unbounded. Defaults to "softplus". bin_embedding_dim (int): bin embedding dimension. Defaults to 128. n_attractors (List[int]): Number of bin attractors at decoder layers. Defaults to [16, 8, 4, 1]. attractor_alpha (int): Proportional attractor strength. Refer to models.layers.attractor for more details. Defaults to 1000. attractor_gamma (int): Exponential attractor strength. Refer to models.layers.attractor for more details. Defaults to 2. attractor_kind (str): Attraction aggregation "sum" or "mean". Defaults to 'mean'. attractor_type (str): Type of attractor to use; "inv" (Inverse attractor) or "exp" (Exponential attractor). Defaults to 'inv'. min_temp (int): Lower bound for temperature of output probability distribution. Defaults to 0.0212. max_temp (int): Upper bound for temperature of output probability distribution. Defaults to 50. memory_efficient (bool): Whether to use memory efficient version of attractor layers. Memory efficient version is slower but is recommended incase of multiple metric heads in order save GPU memory. Defaults to True.
Here is the function:
def ZoeD_NK(pretrained=False, midas_model_type="DPT_BEiT_L_384", config_mode="infer", **kwargs):
"""ZoeDepthNK model. This is the version of ZoeDepth that has two metric heads and uses a learned router to route to experts.
Args:
pretrained (bool): If True, returns a model pre-trained on NYU-Depth-V2
midas_model_type (str): Midas model type. Should be one of the models as listed in torch.hub.list("intel-isl/MiDaS"). Default: DPT_BEiT_L_384
Keyword Args:
**kwargs: Additional arguments to pass to the model
The following arguments are supported:
train_midas (bool): If True, returns a model that with trainable midas base. Defaults to True
use_pretrained_midas (bool): If True, returns a model that uses pretrained midas base. Defaults to True
bin_conf (List[dict]): A list of dictionaries that contain the bin configuration for each metric head. Each dictionary should contain the following keys:
"name" (str, typically same as the dataset name), "n_bins" (int), "min_depth" (float), "max_depth" (float)
The length of this list determines the number of metric heads.
bin_centers_type (str): "normed" or "softplus". Activation type used for bin centers. For "normed" bin centers, linear normalization trick is applied. This results in bounded bin centers.
For "softplus", softplus activation is used and thus are unbounded. Defaults to "softplus".
bin_embedding_dim (int): bin embedding dimension. Defaults to 128.
n_attractors (List[int]): Number of bin attractors at decoder layers. Defaults to [16, 8, 4, 1].
attractor_alpha (int): Proportional attractor strength. Refer to models.layers.attractor for more details. Defaults to 1000.
attractor_gamma (int): Exponential attractor strength. Refer to models.layers.attractor for more details. Defaults to 2.
attractor_kind (str): Attraction aggregation "sum" or "mean". Defaults to 'mean'.
attractor_type (str): Type of attractor to use; "inv" (Inverse attractor) or "exp" (Exponential attractor). Defaults to 'inv'.
min_temp (int): Lower bound for temperature of output probability distribution. Defaults to 0.0212.
max_temp (int): Upper bound for temperature of output probability distribution. Defaults to 50.
memory_efficient (bool): Whether to use memory efficient version of attractor layers. Memory efficient version is slower but is recommended incase of multiple metric heads in order save GPU memory. Defaults to True.
"""
if pretrained and midas_model_type != "DPT_BEiT_L_384":
raise ValueError(f"Only DPT_BEiT_L_384 MiDaS model is supported for pretrained Zoe_NK model, got: {midas_model_type}")
if not pretrained:
pretrained_resource = None
else:
pretrained_resource = "url::https://github.com/isl-org/ZoeDepth/releases/download/v1.0/ZoeD_M12_NK.pt"
config = get_config("zoedepth_nk", config_mode, pretrained_resource=pretrained_resource, **kwargs)
model = build_model(config)
return model | ZoeDepthNK model. This is the version of ZoeDepth that has two metric heads and uses a learned router to route to experts. Args: pretrained (bool): If True, returns a model pre-trained on NYU-Depth-V2 midas_model_type (str): Midas model type. Should be one of the models as listed in torch.hub.list("intel-isl/MiDaS"). Default: DPT_BEiT_L_384 Keyword Args: **kwargs: Additional arguments to pass to the model The following arguments are supported: train_midas (bool): If True, returns a model that with trainable midas base. Defaults to True use_pretrained_midas (bool): If True, returns a model that uses pretrained midas base. Defaults to True bin_conf (List[dict]): A list of dictionaries that contain the bin configuration for each metric head. Each dictionary should contain the following keys: "name" (str, typically same as the dataset name), "n_bins" (int), "min_depth" (float), "max_depth" (float) The length of this list determines the number of metric heads. bin_centers_type (str): "normed" or "softplus". Activation type used for bin centers. For "normed" bin centers, linear normalization trick is applied. This results in bounded bin centers. For "softplus", softplus activation is used and thus are unbounded. Defaults to "softplus". bin_embedding_dim (int): bin embedding dimension. Defaults to 128. n_attractors (List[int]): Number of bin attractors at decoder layers. Defaults to [16, 8, 4, 1]. attractor_alpha (int): Proportional attractor strength. Refer to models.layers.attractor for more details. Defaults to 1000. attractor_gamma (int): Exponential attractor strength. Refer to models.layers.attractor for more details. Defaults to 2. attractor_kind (str): Attraction aggregation "sum" or "mean". Defaults to 'mean'. attractor_type (str): Type of attractor to use; "inv" (Inverse attractor) or "exp" (Exponential attractor). Defaults to 'inv'. min_temp (int): Lower bound for temperature of output probability distribution. Defaults to 0.0212. max_temp (int): Upper bound for temperature of output probability distribution. Defaults to 50. memory_efficient (bool): Whether to use memory efficient version of attractor layers. Memory efficient version is slower but is recommended incase of multiple metric heads in order save GPU memory. Defaults to True. |
13,776 | import os
import numpy as np
import torch
from PIL import Image
from torch.utils.data import DataLoader, Dataset
from torchvision import transforms
class DDAD(Dataset):
def __init__(self, data_dir_root, resize_shape):
import glob
# image paths are of the form <data_dir_root>/{outleft, depthmap}/*.png
self.image_files = glob.glob(os.path.join(data_dir_root, '*.png'))
self.depth_files = [r.replace("_rgb.png", "_depth.npy")
for r in self.image_files]
self.transform = ToTensor(resize_shape)
def __getitem__(self, idx):
image_path = self.image_files[idx]
depth_path = self.depth_files[idx]
image = np.asarray(Image.open(image_path), dtype=np.float32) / 255.0
depth = np.load(depth_path) # meters
# depth[depth > 8] = -1
depth = depth[..., None]
sample = dict(image=image, depth=depth)
sample = self.transform(sample)
if idx == 0:
print(sample["image"].shape)
return sample
def __len__(self):
return len(self.image_files)
def get_ddad_loader(data_dir_root, resize_shape, batch_size=1, **kwargs):
dataset = DDAD(data_dir_root, resize_shape)
return DataLoader(dataset, batch_size, **kwargs) | null |
13,783 | import glob
import os
import h5py
import numpy as np
import torch
from PIL import Image
from torch.utils.data import DataLoader, Dataset
from torchvision import transforms
class HyperSim(Dataset):
def __init__(self, data_dir_root):
def __getitem__(self, idx):
def __len__(self):
def get_hypersim_loader(data_dir_root, batch_size=1, **kwargs):
dataset = HyperSim(data_dir_root)
return DataLoader(dataset, batch_size, **kwargs) | null |
13,801 | import json
import os
from zoedepth.utils.easydict import EasyDict as edict
from zoedepth.utils.arg_utils import infer_type
import pathlib
import platform
DATASETS_CONFIG = {
"kitti": {
"dataset": "kitti",
"min_depth": 0.001,
"max_depth": 80,
"data_path": os.path.join(HOME_DIR, "shortcuts/datasets/kitti/raw"),
"gt_path": os.path.join(HOME_DIR, "shortcuts/datasets/kitti/gts"),
"filenames_file": "./train_test_inputs/kitti_eigen_train_files_with_gt.txt",
"input_height": 352,
"input_width": 1216, # 704
"data_path_eval": os.path.join(HOME_DIR, "shortcuts/datasets/kitti/raw"),
"gt_path_eval": os.path.join(HOME_DIR, "shortcuts/datasets/kitti/gts"),
"filenames_file_eval": "./train_test_inputs/kitti_eigen_test_files_with_gt.txt",
"min_depth_eval": 1e-3,
"max_depth_eval": 80,
"do_random_rotate": True,
"degree": 1.0,
"do_kb_crop": True,
"garg_crop": True,
"eigen_crop": False,
"use_right": False
},
"kitti_test": {
"dataset": "kitti",
"min_depth": 0.001,
"max_depth": 80,
"data_path": os.path.join(HOME_DIR, "shortcuts/datasets/kitti/raw"),
"gt_path": os.path.join(HOME_DIR, "shortcuts/datasets/kitti/gts"),
"filenames_file": "./train_test_inputs/kitti_eigen_train_files_with_gt.txt",
"input_height": 352,
"input_width": 1216,
"data_path_eval": os.path.join(HOME_DIR, "shortcuts/datasets/kitti/raw"),
"gt_path_eval": os.path.join(HOME_DIR, "shortcuts/datasets/kitti/gts"),
"filenames_file_eval": "./train_test_inputs/kitti_eigen_test_files_with_gt.txt",
"min_depth_eval": 1e-3,
"max_depth_eval": 80,
"do_random_rotate": False,
"degree": 1.0,
"do_kb_crop": True,
"garg_crop": True,
"eigen_crop": False,
"use_right": False
},
"nyu": {
"dataset": "nyu",
"avoid_boundary": False,
"min_depth": 1e-3, # originally 0.1
"max_depth": 10,
"data_path": os.path.join(HOME_DIR, "shortcuts/datasets/nyu_depth_v2/sync/"),
"gt_path": os.path.join(HOME_DIR, "shortcuts/datasets/nyu_depth_v2/sync/"),
"filenames_file": "./train_test_inputs/nyudepthv2_train_files_with_gt.txt",
"input_height": 480,
"input_width": 640,
"data_path_eval": os.path.join(HOME_DIR, "shortcuts/datasets/nyu_depth_v2/official_splits/test/"),
"gt_path_eval": os.path.join(HOME_DIR, "shortcuts/datasets/nyu_depth_v2/official_splits/test/"),
"filenames_file_eval": "./train_test_inputs/nyudepthv2_test_files_with_gt.txt",
"min_depth_eval": 1e-3,
"max_depth_eval": 10,
"min_depth_diff": -10,
"max_depth_diff": 10,
"do_random_rotate": True,
"degree": 1.0,
"do_kb_crop": False,
"garg_crop": False,
"eigen_crop": True
},
"ibims": {
"dataset": "ibims",
"ibims_root": os.path.join(HOME_DIR, "shortcuts/datasets/ibims/ibims1_core_raw/"),
"eigen_crop": True,
"garg_crop": False,
"do_kb_crop": False,
"min_depth_eval": 0,
"max_depth_eval": 10,
"min_depth": 1e-3,
"max_depth": 10
},
"sunrgbd": {
"dataset": "sunrgbd",
"sunrgbd_root": os.path.join(HOME_DIR, "shortcuts/datasets/SUNRGBD/test/"),
"eigen_crop": True,
"garg_crop": False,
"do_kb_crop": False,
"min_depth_eval": 0,
"max_depth_eval": 8,
"min_depth": 1e-3,
"max_depth": 10
},
"diml_indoor": {
"dataset": "diml_indoor",
"diml_indoor_root": os.path.join(HOME_DIR, "shortcuts/datasets/diml_indoor_test/"),
"eigen_crop": True,
"garg_crop": False,
"do_kb_crop": False,
"min_depth_eval": 0,
"max_depth_eval": 10,
"min_depth": 1e-3,
"max_depth": 10
},
"diml_outdoor": {
"dataset": "diml_outdoor",
"diml_outdoor_root": os.path.join(HOME_DIR, "shortcuts/datasets/diml_outdoor_test/"),
"eigen_crop": False,
"garg_crop": True,
"do_kb_crop": False,
"min_depth_eval": 2,
"max_depth_eval": 80,
"min_depth": 1e-3,
"max_depth": 80
},
"diode_indoor": {
"dataset": "diode_indoor",
"diode_indoor_root": os.path.join(HOME_DIR, "shortcuts/datasets/diode_indoor/"),
"eigen_crop": True,
"garg_crop": False,
"do_kb_crop": False,
"min_depth_eval": 1e-3,
"max_depth_eval": 10,
"min_depth": 1e-3,
"max_depth": 10
},
"diode_outdoor": {
"dataset": "diode_outdoor",
"diode_outdoor_root": os.path.join(HOME_DIR, "shortcuts/datasets/diode_outdoor/"),
"eigen_crop": False,
"garg_crop": True,
"do_kb_crop": False,
"min_depth_eval": 1e-3,
"max_depth_eval": 80,
"min_depth": 1e-3,
"max_depth": 80
},
"hypersim_test": {
"dataset": "hypersim_test",
"hypersim_test_root": os.path.join(HOME_DIR, "shortcuts/datasets/hypersim_test/"),
"eigen_crop": True,
"garg_crop": False,
"do_kb_crop": False,
"min_depth_eval": 1e-3,
"max_depth_eval": 80,
"min_depth": 1e-3,
"max_depth": 10
},
"vkitti": {
"dataset": "vkitti",
"vkitti_root": os.path.join(HOME_DIR, "shortcuts/datasets/vkitti_test/"),
"eigen_crop": False,
"garg_crop": True,
"do_kb_crop": True,
"min_depth_eval": 1e-3,
"max_depth_eval": 80,
"min_depth": 1e-3,
"max_depth": 80
},
"vkitti2": {
"dataset": "vkitti2",
"vkitti2_root": os.path.join(HOME_DIR, "shortcuts/datasets/vkitti2/"),
"eigen_crop": False,
"garg_crop": True,
"do_kb_crop": True,
"min_depth_eval": 1e-3,
"max_depth_eval": 80,
"min_depth": 1e-3,
"max_depth": 80,
},
"ddad": {
"dataset": "ddad",
"ddad_root": os.path.join(HOME_DIR, "shortcuts/datasets/ddad/ddad_val/"),
"eigen_crop": False,
"garg_crop": True,
"do_kb_crop": True,
"min_depth_eval": 1e-3,
"max_depth_eval": 80,
"min_depth": 1e-3,
"max_depth": 80,
},
}
def change_dataset(config, new_dataset):
config.update(DATASETS_CONFIG[new_dataset])
return config | null |
13,803 | from scipy import ndimage
import base64
import math
import re
from io import BytesIO
import matplotlib
import matplotlib.cm
import numpy as np
import requests
import torch
import torch.distributed as dist
import torch.nn
import torch.nn as nn
import torch.utils.data.distributed
from PIL import Image
from torchvision.transforms import ToTensor
class colors:
'''Colors class:
Reset all colors with colors.reset
Two subclasses fg for foreground and bg for background.
Use as colors.subclass.colorname.
i.e. colors.fg.red or colors.bg.green
Also, the generic bold, disable, underline, reverse, strikethrough,
and invisible work with the main class
i.e. colors.bold
'''
reset = '\033[0m'
bold = '\033[01m'
disable = '\033[02m'
underline = '\033[04m'
reverse = '\033[07m'
strikethrough = '\033[09m'
invisible = '\033[08m'
class fg:
black = '\033[30m'
red = '\033[31m'
green = '\033[32m'
orange = '\033[33m'
blue = '\033[34m'
purple = '\033[35m'
cyan = '\033[36m'
lightgrey = '\033[37m'
darkgrey = '\033[90m'
lightred = '\033[91m'
lightgreen = '\033[92m'
yellow = '\033[93m'
lightblue = '\033[94m'
pink = '\033[95m'
lightcyan = '\033[96m'
class bg:
black = '\033[40m'
red = '\033[41m'
green = '\033[42m'
orange = '\033[43m'
blue = '\033[44m'
purple = '\033[45m'
cyan = '\033[46m'
lightgrey = '\033[47m'
def printc(text, color):
print(f"{color}{text}{colors.reset}") | null |
13,804 | from scipy import ndimage
import base64
import math
import re
from io import BytesIO
import matplotlib
import matplotlib.cm
import numpy as np
import requests
import torch
import torch.distributed as dist
import torch.nn
import torch.nn as nn
import torch.utils.data.distributed
from PIL import Image
from torchvision.transforms import ToTensor
def get_image_from_url(url):
response = requests.get(url)
img = Image.open(BytesIO(response.content)).convert("RGB")
return img
def url_to_torch(url, size=(384, 384)):
img = get_image_from_url(url)
img = img.resize(size, Image.ANTIALIAS)
img = torch.from_numpy(np.asarray(img)).float()
img = img.permute(2, 0, 1)
img.div_(255)
return img | null |
13,808 | import argparse
from pprint import pprint
import torch
from zoedepth.utils.easydict import EasyDict as edict
from tqdm import tqdm
from zoedepth.data.data_mono import DepthDataLoader
from zoedepth.models.builder import build_model
from zoedepth.utils.arg_utils import parse_unknown
from zoedepth.utils.config import change_dataset, get_config, ALL_EVAL_DATASETS, ALL_INDOOR, ALL_OUTDOOR
from zoedepth.utils.misc import (RunningAverageDict, colors, compute_metrics,
count_parameters)
def infer(model, images, **kwargs):
"""Inference with flip augmentation"""
# images.shape = N, C, H, W
def get_depth_from_prediction(pred):
if isinstance(pred, torch.Tensor):
pred = pred # pass
elif isinstance(pred, (list, tuple)):
pred = pred[-1]
elif isinstance(pred, dict):
pred = pred['metric_depth'] if 'metric_depth' in pred else pred['out']
else:
raise NotImplementedError(f"Unknown output type {type(pred)}")
return pred
pred1 = model(images, **kwargs)
pred1 = get_depth_from_prediction(pred1)
pred2 = model(torch.flip(images, [3]), **kwargs)
pred2 = get_depth_from_prediction(pred2)
pred2 = torch.flip(pred2, [3])
mean_pred = 0.5 * (pred1 + pred2)
return mean_pred
class RunningAverageDict:
"""A dictionary of running averages."""
def __init__(self):
self._dict = None
def update(self, new_dict):
if new_dict is None:
return
if self._dict is None:
self._dict = dict()
for key, value in new_dict.items():
self._dict[key] = RunningAverage()
for key, value in new_dict.items():
self._dict[key].append(value)
def get_value(self):
if self._dict is None:
return None
return {key: value.get_value() for key, value in self._dict.items()}
def colorize(value, vmin=None, vmax=None, cmap='gray_r', invalid_val=-99, invalid_mask=None, background_color=(128, 128, 128, 255), gamma_corrected=False, value_transform=None):
"""Converts a depth map to a color image.
Args:
value (torch.Tensor, numpy.ndarry): Input depth map. Shape: (H, W) or (1, H, W) or (1, 1, H, W). All singular dimensions are squeezed
vmin (float, optional): vmin-valued entries are mapped to start color of cmap. If None, value.min() is used. Defaults to None.
vmax (float, optional): vmax-valued entries are mapped to end color of cmap. If None, value.max() is used. Defaults to None.
cmap (str, optional): matplotlib colormap to use. Defaults to 'magma_r'.
invalid_val (int, optional): Specifies value of invalid pixels that should be colored as 'background_color'. Defaults to -99.
invalid_mask (numpy.ndarray, optional): Boolean mask for invalid regions. Defaults to None.
background_color (tuple[int], optional): 4-tuple RGB color to give to invalid pixels. Defaults to (128, 128, 128, 255).
gamma_corrected (bool, optional): Apply gamma correction to colored image. Defaults to False.
value_transform (Callable, optional): Apply transform function to valid pixels before coloring. Defaults to None.
Returns:
numpy.ndarray, dtype - uint8: Colored depth map. Shape: (H, W, 4)
"""
if isinstance(value, torch.Tensor):
value = value.detach().cpu().numpy()
value = value.squeeze()
if invalid_mask is None:
invalid_mask = value == invalid_val
mask = np.logical_not(invalid_mask)
# normalize
vmin = np.percentile(value[mask],2) if vmin is None else vmin
vmax = np.percentile(value[mask],85) if vmax is None else vmax
if vmin != vmax:
value = (value - vmin) / (vmax - vmin) # vmin..vmax
else:
# Avoid 0-division
value = value * 0.
# squeeze last dim if it exists
# grey out the invalid values
value[invalid_mask] = np.nan
cmapper = matplotlib.cm.get_cmap(cmap)
if value_transform:
value = value_transform(value)
# value = value / value.max()
value = cmapper(value, bytes=True) # (nxmx4)
# img = value[:, :, :]
img = value[...]
img[invalid_mask] = background_color
# return img.transpose((2, 0, 1))
if gamma_corrected:
# gamma correction
img = img / 255
img = np.power(img, 2.2)
img = img * 255
img = img.astype(np.uint8)
return img
def compute_metrics(gt, pred, interpolate=True, garg_crop=False, eigen_crop=True, dataset='nyu', min_depth_eval=0.1, max_depth_eval=10, **kwargs):
"""Compute metrics of predicted depth maps. Applies cropping and masking as necessary or specified via arguments. Refer to compute_errors for more details on metrics.
"""
if 'config' in kwargs:
config = kwargs['config']
garg_crop = config.garg_crop
eigen_crop = config.eigen_crop
min_depth_eval = config.min_depth_eval
max_depth_eval = config.max_depth_eval
if gt.shape[-2:] != pred.shape[-2:] and interpolate:
pred = nn.functional.interpolate(
pred, gt.shape[-2:], mode='bilinear', align_corners=True)
pred = pred.squeeze().cpu().numpy()
pred[pred < min_depth_eval] = min_depth_eval
pred[pred > max_depth_eval] = max_depth_eval
pred[np.isinf(pred)] = max_depth_eval
pred[np.isnan(pred)] = min_depth_eval
gt_depth = gt.squeeze().cpu().numpy()
valid_mask = np.logical_and(
gt_depth > min_depth_eval, gt_depth < max_depth_eval)
if garg_crop or eigen_crop:
gt_height, gt_width = gt_depth.shape
eval_mask = np.zeros(valid_mask.shape)
if garg_crop:
eval_mask[int(0.40810811 * gt_height):int(0.99189189 * gt_height),
int(0.03594771 * gt_width):int(0.96405229 * gt_width)] = 1
elif eigen_crop:
# print("-"*10, " EIGEN CROP ", "-"*10)
if dataset == 'kitti':
eval_mask[int(0.3324324 * gt_height):int(0.91351351 * gt_height),
int(0.0359477 * gt_width):int(0.96405229 * gt_width)] = 1
else:
# assert gt_depth.shape == (480, 640), "Error: Eigen crop is currently only valid for (480, 640) images"
eval_mask[45:471, 41:601] = 1
else:
eval_mask = np.ones(valid_mask.shape)
valid_mask = np.logical_and(valid_mask, eval_mask)
return compute_errors(gt_depth[valid_mask], pred[valid_mask])
def evaluate(model, test_loader, config, round_vals=True, round_precision=3):
model.eval()
metrics = RunningAverageDict()
for i, sample in tqdm(enumerate(test_loader), total=len(test_loader)):
if 'has_valid_depth' in sample:
if not sample['has_valid_depth']:
continue
image, depth = sample['image'], sample['depth']
image, depth = image.cuda(), depth.cuda()
depth = depth.squeeze().unsqueeze(0).unsqueeze(0)
focal = sample.get('focal', torch.Tensor(
[715.0873]).cuda()) # This magic number (focal) is only used for evaluating BTS model
pred = infer(model, image, dataset=sample['dataset'][0], focal=focal)
# Save image, depth, pred for visualization
if "save_images" in config and config.save_images:
import os
# print("Saving images ...")
from PIL import Image
import torchvision.transforms as transforms
from zoedepth.utils.misc import colorize
os.makedirs(config.save_images, exist_ok=True)
# def save_image(img, path):
d = colorize(depth.squeeze().cpu().numpy(), 0, 10)
p = colorize(pred.squeeze().cpu().numpy(), 0, 10)
im = transforms.ToPILImage()(image.squeeze().cpu())
im.save(os.path.join(config.save_images, f"{i}_img.png"))
Image.fromarray(d).save(os.path.join(config.save_images, f"{i}_depth.png"))
Image.fromarray(p).save(os.path.join(config.save_images, f"{i}_pred.png"))
# print(depth.shape, pred.shape)
metrics.update(compute_metrics(depth, pred, config=config))
if round_vals:
def r(m): return round(m, round_precision)
else:
def r(m): return m
metrics = {k: r(v) for k, v in metrics.get_value().items()}
return metrics | null |
13,809 | import argparse
from pprint import pprint
import torch
from zoedepth.utils.easydict import EasyDict as edict
from tqdm import tqdm
from zoedepth.data.data_mono import DepthDataLoader
from zoedepth.models.builder import build_model
from zoedepth.utils.arg_utils import parse_unknown
from zoedepth.utils.config import change_dataset, get_config, ALL_EVAL_DATASETS, ALL_INDOOR, ALL_OUTDOOR
from zoedepth.utils.misc import (RunningAverageDict, colors, compute_metrics,
count_parameters)
def main(config):
def get_config(model_name, mode='train', dataset=None, **overwrite_kwargs):
def eval_model(model_name, pretrained_resource, dataset='nyu', **kwargs):
# Load default pretrained resource defined in config if not set
overwrite = {**kwargs, "pretrained_resource": pretrained_resource} if pretrained_resource else kwargs
config = get_config(model_name, "eval", dataset, **overwrite)
# config = change_dataset(config, dataset) # change the dataset
pprint(config)
print(f"Evaluating {model_name} on {dataset}...")
metrics = main(config)
return metrics | null |
13,810 | from errno import EEXIST
from os import makedirs, path
import os
def mkdir_p(folder_path):
# Creates a directory. equivalent to using mkdir -p on the command line
try:
makedirs(folder_path)
except OSError as exc: # Python >2.5
if exc.errno == EEXIST and path.isdir(folder_path):
pass
else:
raise | null |
13,811 | from errno import EEXIST
from os import makedirs, path
import os
def searchForMaxIteration(folder):
saved_iters = [int(fname.split("_")[-1]) for fname in os.listdir(folder)]
return max(saved_iters) | null |
13,812 | import json
import numpy as np
import torch
from scene.cameras import Camera, MiniCam
from utils.general import PILtoTorch
from utils.graphics import fov2focal, focal2fov, getWorld2View, getProjectionMatrix
class MiniCam:
def __init__(self, width, height, fovy, fovx, znear, zfar, world_view_transform, full_proj_transform):
def getWorld2View(R, t):
def getProjectionMatrix(znear, zfar, fovX, fovY):
def fov2focal(fov, pixels):
def focal2fov(focal, pixels):
def load_json(path, H, W):
cams = []
with open(path) as json_file:
contents = json.load(json_file)
FoVx = contents["camera_angle_x"]
FoVy = focal2fov(fov2focal(FoVx, W), H)
zfar = 100.0
znear = 0.01
frames = contents["frames"]
for idx, frame in enumerate(frames):
# NeRF 'transform_matrix' is a camera-to-world transform
c2w = np.array(frame["transform_matrix"])
# change from OpenGL/Blender camera axes (Y up, Z back) to COLMAP (Y down, Z forward)
c2w[:3, 1:3] *= -1
if c2w.shape[0] == 3:
one = np.zeros((1, 4))
one[0, -1] = 1
c2w = np.concatenate((c2w, one), axis=0)
# get the world-to-camera transform and set R, T
w2c = np.linalg.inv(c2w)
R = np.transpose(w2c[:3, :3]) # R is stored transposed due to 'glm' in CUDA code
T = w2c[:3, 3]
w2c = torch.as_tensor(getWorld2View(R, T)).T.cuda()
proj = getProjectionMatrix(znear, zfar, FoVx, FoVy).T.cuda()
cams.append(MiniCam(W, H, FoVx, FoVy, znear, zfar, w2c, w2c @ proj))
return cams | null |
13,813 | import json
import numpy as np
import torch
from scene.cameras import Camera, MiniCam
from utils.general import PILtoTorch
from utils.graphics import fov2focal, focal2fov, getWorld2View, getProjectionMatrix
def loadCam(args, id, cam_info, resolution_scale):
orig_w, orig_h = cam_info.image.size
if args.resolution in [1, 2, 4, 8]:
resolution = round(orig_w/(resolution_scale * args.resolution)), round(orig_h/(resolution_scale * args.resolution))
else: # should be a type that converts to float
if args.resolution == -1:
if orig_w > 1600:
global WARNED
if not WARNED:
print("[ INFO ] Encountered quite large input images (>1.6K pixels width), rescaling to 1.6K.\n "
"If this is not desired, please explicitly specify '--resolution/-r' as 1")
WARNED = True
global_down = orig_w / 1600
else:
global_down = 1
else:
global_down = orig_w / args.resolution
scale = float(global_down) * float(resolution_scale)
resolution = (int(orig_w / scale), int(orig_h / scale))
resized_image_rgb = PILtoTorch(cam_info.image, resolution)
gt_image = resized_image_rgb[:3, ...]
loaded_mask = None
if resized_image_rgb.shape[1] == 4:
loaded_mask = resized_image_rgb[3:4, ...]
return Camera(colmap_id=cam_info.uid, R=cam_info.R, T=cam_info.T,
FoVx=cam_info.FovX, FoVy=cam_info.FovY,
image=gt_image, gt_alpha_mask=loaded_mask,
image_name=cam_info.image_name, uid=id, data_device=args.data_device)
def cameraList_from_camInfos(cam_infos, resolution_scale, args):
camera_list = []
for id, c in enumerate(cam_infos):
camera_list.append(loadCam(args, id, c, resolution_scale))
return camera_list | null |
13,814 | import json
import numpy as np
import torch
from scene.cameras import Camera, MiniCam
from utils.general import PILtoTorch
from utils.graphics import fov2focal, focal2fov, getWorld2View, getProjectionMatrix
class Camera(nn.Module):
def __init__(self, colmap_id, R, T, FoVx, FoVy, image, gt_alpha_mask,
image_name, uid,
trans=np.array([0.0, 0.0, 0.0]), scale=1.0, data_device = "cuda"
):
super(Camera, self).__init__()
self.uid = uid
self.colmap_id = colmap_id
self.R = R
self.T = T
self.FoVx = FoVx
self.FoVy = FoVy
self.image_name = image_name
try:
self.data_device = torch.device(data_device)
except Exception as e:
print(e)
print(f"[Warning] Custom device {data_device} failed, fallback to default cuda device" )
self.data_device = torch.device("cuda")
self.original_image = image.clamp(0.0, 1.0).to(self.data_device)
self.canny_mask = image2canny(self.original_image.permute(1,2,0), 50, 150, isEdge1=False).detach().to(self.data_device)
self.image_width = self.original_image.shape[2]
self.image_height = self.original_image.shape[1]
if gt_alpha_mask is not None:
self.original_image *= gt_alpha_mask.to(self.data_device)
else:
self.original_image *= torch.ones((1, self.image_height, self.image_width), device=self.data_device)
self.zfar = 100.0
self.znear = 0.01
self.trans = trans
self.scale = scale
self.world_view_transform = torch.tensor(getWorld2View2(R, T, trans, scale)).transpose(0, 1).cuda()
self.projection_matrix = getProjectionMatrix(znear=self.znear, zfar=self.zfar, fovX=self.FoVx, fovY=self.FoVy).transpose(0,1).cuda()
self.full_proj_transform = (self.world_view_transform.unsqueeze(0).bmm(self.projection_matrix.unsqueeze(0))).squeeze(0)
self.camera_center = self.world_view_transform.inverse()[3, :3]
def fov2focal(fov, pixels):
return pixels / (2 * math.tan(fov / 2))
def camera_to_JSON(id, camera : Camera):
Rt = np.zeros((4, 4))
Rt[:3, :3] = camera.R.transpose()
Rt[:3, 3] = camera.T
Rt[3, 3] = 1.0
W2C = np.linalg.inv(Rt)
pos = W2C[:3, 3]
rot = W2C[:3, :3]
serializable_array_2d = [x.tolist() for x in rot]
camera_entry = {
'id' : id,
'img_name' : camera.image_name,
'width' : camera.width,
'height' : camera.height,
'position': pos.tolist(),
'rotation': serializable_array_2d,
'fy' : fov2focal(camera.FovY, camera.height),
'fx' : fov2focal(camera.FovX, camera.width)
}
return camera_entry | null |
13,815 | import matplotlib
import matplotlib.cm
import numpy as np
import torch
The provided code snippet includes necessary dependencies for implementing the `colorize` function. Write a Python function `def colorize(value, vmin=None, vmax=None, cmap='jet', invalid_val=-99, invalid_mask=None, background_color=(128, 128, 128, 255), gamma_corrected=False, value_transform=None)` to solve the following problem:
Converts a depth map to a color image. Args: value (torch.Tensor, numpy.ndarry): Input depth map. Shape: (H, W) or (1, H, W) or (1, 1, H, W). All singular dimensions are squeezed vmin (float, optional): vmin-valued entries are mapped to start color of cmap. If None, value.min() is used. Defaults to None. vmax (float, optional): vmax-valued entries are mapped to end color of cmap. If None, value.max() is used. Defaults to None. cmap (str, optional): matplotlib colormap to use. Defaults to 'magma_r'. invalid_val (int, optional): Specifies value of invalid pixels that should be colored as 'background_color'. Defaults to -99. invalid_mask (numpy.ndarray, optional): Boolean mask for invalid regions. Defaults to None. background_color (tuple[int], optional): 4-tuple RGB color to give to invalid pixels. Defaults to (128, 128, 128, 255). gamma_corrected (bool, optional): Apply gamma correction to colored image. Defaults to False. value_transform (Callable, optional): Apply transform function to valid pixels before coloring. Defaults to None. Returns: numpy.ndarray, dtype - uint8: Colored depth map. Shape: (H, W, 4)
Here is the function:
def colorize(value, vmin=None, vmax=None, cmap='jet', invalid_val=-99, invalid_mask=None, background_color=(128, 128, 128, 255), gamma_corrected=False, value_transform=None):
"""Converts a depth map to a color image.
Args:
value (torch.Tensor, numpy.ndarry): Input depth map. Shape: (H, W) or (1, H, W) or (1, 1, H, W). All singular dimensions are squeezed
vmin (float, optional): vmin-valued entries are mapped to start color of cmap. If None, value.min() is used. Defaults to None.
vmax (float, optional): vmax-valued entries are mapped to end color of cmap. If None, value.max() is used. Defaults to None.
cmap (str, optional): matplotlib colormap to use. Defaults to 'magma_r'.
invalid_val (int, optional): Specifies value of invalid pixels that should be colored as 'background_color'. Defaults to -99.
invalid_mask (numpy.ndarray, optional): Boolean mask for invalid regions. Defaults to None.
background_color (tuple[int], optional): 4-tuple RGB color to give to invalid pixels. Defaults to (128, 128, 128, 255).
gamma_corrected (bool, optional): Apply gamma correction to colored image. Defaults to False.
value_transform (Callable, optional): Apply transform function to valid pixels before coloring. Defaults to None.
Returns:
numpy.ndarray, dtype - uint8: Colored depth map. Shape: (H, W, 4)
"""
if isinstance(value, torch.Tensor):
value = value.detach().cpu().numpy()
value = value.squeeze()
if invalid_mask is None:
invalid_mask = value == invalid_val
mask = np.logical_not(invalid_mask)
# normalize
vmin = np.percentile(value[mask],2) if vmin is None else vmin
vmax = np.percentile(value[mask],98) if vmax is None else vmax
if vmin != vmax:
value = (value - vmin) / (vmax - vmin) # vmin..vmax
else:
# Avoid 0-division
value = value * 0.
# squeeze last dim if it exists
# grey out the invalid values
value[invalid_mask] = np.nan
cmapper = matplotlib.cm.get_cmap(cmap)
if value_transform:
value = value_transform(value)
# value = value / value.max()
value = cmapper(value, bytes=True) # (nxmx4)
# img = value[:, :, :]
img = value[...]
img[invalid_mask] = background_color
# return img.transpose((2, 0, 1))
if gamma_corrected:
# gamma correction
img = img / 255
img = np.power(img, 2.2)
img = img * 255
img = img.astype(np.uint8)
return img | Converts a depth map to a color image. Args: value (torch.Tensor, numpy.ndarry): Input depth map. Shape: (H, W) or (1, H, W) or (1, 1, H, W). All singular dimensions are squeezed vmin (float, optional): vmin-valued entries are mapped to start color of cmap. If None, value.min() is used. Defaults to None. vmax (float, optional): vmax-valued entries are mapped to end color of cmap. If None, value.max() is used. Defaults to None. cmap (str, optional): matplotlib colormap to use. Defaults to 'magma_r'. invalid_val (int, optional): Specifies value of invalid pixels that should be colored as 'background_color'. Defaults to -99. invalid_mask (numpy.ndarray, optional): Boolean mask for invalid regions. Defaults to None. background_color (tuple[int], optional): 4-tuple RGB color to give to invalid pixels. Defaults to (128, 128, 128, 255). gamma_corrected (bool, optional): Apply gamma correction to colored image. Defaults to False. value_transform (Callable, optional): Apply transform function to valid pixels before coloring. Defaults to None. Returns: numpy.ndarray, dtype - uint8: Colored depth map. Shape: (H, W, 4) |
13,816 | import torch
def mse(img1, img2):
return (((img1 - img2)) ** 2).view(img1.shape[0], -1).mean(1, keepdim=True)
def psnr(img1, img2):
mse = (((img1 - img2)) ** 2).view(img1.shape[0], -1).mean(1, keepdim=True)
return 20 * torch.log10(1.0 / torch.sqrt(mse)) | null |
13,817 | from math import exp
import torch
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
import cv2
with torch.no_grad():
kernelsize=3
conv = torch.nn.Conv2d(1, 1, kernel_size=kernelsize, padding=(kernelsize//2))
kernel = torch.tensor([[0.,1.,0.],[1.,0.,1.],[0.,1.,0.]]).reshape(1,1,kernelsize,kernelsize)
conv.weight.data = kernel #torch.ones((1,1,kernelsize,kernelsize))
conv.bias.data = torch.tensor([0.])
conv.requires_grad_(False)
conv = conv.cuda()
def l1_loss(network_output, gt):
return torch.abs((network_output - gt)).mean() | null |
13,818 | from math import exp
import torch
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
import cv2
def l2_loss(network_output, gt):
return ((network_output - gt) ** 2).mean() | null |
13,819 | from math import exp
import torch
import torch.nn.functional as F
from torch.autograd import Variable
def create_window(window_size, channel):
_1D_window = gaussian(window_size, 1.5).unsqueeze(1)
_2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0)
window = Variable(_2D_window.expand(channel, 1, window_size, window_size).contiguous())
return window
def _ssim(img1, img2, window, window_size, channel, size_average=True):
mu1 = F.conv2d(img1, window, padding=window_size // 2, groups=channel)
mu2 = F.conv2d(img2, window, padding=window_size // 2, groups=channel)
mu1_sq = mu1.pow(2)
mu2_sq = mu2.pow(2)
mu1_mu2 = mu1 * mu2
sigma1_sq = F.conv2d(img1 * img1, window, padding=window_size // 2, groups=channel) - mu1_sq
sigma2_sq = F.conv2d(img2 * img2, window, padding=window_size // 2, groups=channel) - mu2_sq
sigma12 = F.conv2d(img1 * img2, window, padding=window_size // 2, groups=channel) - mu1_mu2
C1 = 0.01 ** 2
C2 = 0.03 ** 2
ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) * (sigma1_sq + sigma2_sq + C2))
if size_average:
return ssim_map.mean()
else:
return ssim_map.mean(1).mean(1).mean(1)
import numpy as np
import cv2
def ssim(img1, img2, window_size=11, size_average=True):
channel = img1.size(-3)
window = create_window(window_size, channel)
if img1.is_cuda:
window = window.cuda(img1.get_device())
window = window.type_as(img1)
return _ssim(img1, img2, window, window_size, channel, size_average) | null |
13,820 | from math import exp
import torch
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
import cv2
with torch.no_grad():
kernelsize=3
conv = torch.nn.Conv2d(1, 1, kernel_size=kernelsize, padding=(kernelsize//2))
kernel = torch.tensor([[0.,1.,0.],[1.,0.,1.],[0.,1.,0.]]).reshape(1,1,kernelsize,kernelsize)
conv.weight.data = kernel #torch.ones((1,1,kernelsize,kernelsize))
conv.bias.data = torch.tensor([0.])
conv.requires_grad_(False)
conv = conv.cuda()
The provided code snippet includes necessary dependencies for implementing the `image2canny` function. Write a Python function `def image2canny(image, thres1, thres2, isEdge1=True)` to solve the following problem:
image: (H, W, 3)
Here is the function:
def image2canny(image, thres1, thres2, isEdge1=True):
""" image: (H, W, 3)"""
canny_mask = torch.from_numpy(cv2.Canny((image.detach().cpu().numpy()*255.).astype(np.uint8), thres1, thres2)/255.)
if not isEdge1:
canny_mask = 1. - canny_mask
return canny_mask.float() | image: (H, W, 3) |
13,821 | from math import exp
import torch
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
import cv2
with torch.no_grad():
kernelsize=3
conv = torch.nn.Conv2d(1, 1, kernel_size=kernelsize, padding=(kernelsize//2))
kernel = torch.tensor([[0.,1.,0.],[1.,0.,1.],[0.,1.,0.]]).reshape(1,1,kernelsize,kernelsize)
conv.weight.data = kernel #torch.ones((1,1,kernelsize,kernelsize))
conv.bias.data = torch.tensor([0.])
conv.requires_grad_(False)
conv = conv.cuda()
The provided code snippet includes necessary dependencies for implementing the `nearMean_map` function. Write a Python function `def nearMean_map(array, mask, kernelsize=3)` to solve the following problem:
array: (H,W) / mask: (H,W)
Here is the function:
def nearMean_map(array, mask, kernelsize=3):
""" array: (H,W) / mask: (H,W) """
cnt_map = torch.ones_like(array)
nearMean_map = conv((array * mask)[None,None])
cnt_map = conv((cnt_map * mask)[None,None])
nearMean_map = (nearMean_map / (cnt_map+1e-8)).squeeze()
return nearMean_map | array: (H,W) / mask: (H,W) |
13,822 | import os
import numpy as np
import torch
def generate_seed(scale, viewangle):
# World 2 Camera
#### rotate x,y
render_poses = [np.array([[1,0,0,0],[0,1,0,0],[0,0,1,0]])]
ang = 5
for i,j in zip([ang,2*ang,3*ang,2*ang,ang,0,-ang,-2*ang,-3*ang,-2*ang,-ang,0,ang,2*ang,3*ang,2*ang,ang,0], [0,0,0,ang,2*ang,3*ang,2*ang,ang,0,-ang,-2*ang,-3*ang,-2*ang,-ang,0,0,0,0]):
th, phi = i/180*np.pi, j/180*np.pi
posetemp = np.zeros((3, 4))
posetemp[:3,:3] = np.matmul(np.eye(3),
np.matmul(np.array([[np.cos(th), 0, np.sin(th)], [0, 1, 0], [-np.sin(th), 0, np.cos(th)]]), np.array([[1, 0, 0], [0, np.cos(phi), -np.sin(phi)], [0, np.sin(phi), np.cos(phi)]]))) # Turn left
posetemp[:3,3:4] = np.array([0,0,0]).reshape(3,1) # * scale # Transition vector
render_poses.append(posetemp)
for i,j in zip([-ang,-2*ang,-3*ang,-2*ang,-ang,0,ang,2*ang,3*ang,2*ang,ang,0,-ang,-2*ang,-3*ang,-2*ang,-ang,0], [0,0,0,ang,ang,ang,ang,ang,0,-ang,-ang,-ang,-ang,-ang,0,0,0,0]):
th, phi = i/180*np.pi, j/180*np.pi
posetemp = np.zeros((3, 4))
posetemp[:3,:3] = np.matmul(np.array([[np.cos(-3*ang/180*np.pi), 0, np.sin(-3*ang/180*np.pi)], [0, 1, 0], [-np.sin(-3*ang/180*np.pi), 0, np.cos(-3*ang/180*np.pi)]]),
np.matmul(np.array([[np.cos(th), 0, np.sin(th)], [0, 1, 0], [-np.sin(th), 0, np.cos(th)]]), np.array([[1, 0, 0], [0, np.cos(phi), -np.sin(phi)], [0, np.sin(phi), np.cos(phi)]])))
posetemp[:3,3:4] = np.array([1,0,0]).reshape(3,1) # * scale # Transition vector
render_poses.append(posetemp)
for i,j in zip([ang,2*ang,3*ang,2*ang,ang,0,-ang,-2*ang,-3*ang,-2*ang,-ang,0,ang,2*ang,3*ang,2*ang,ang,0], [0,0,0,ang,ang,ang,ang,ang,0,-ang,-ang,-ang,-ang,-ang,0,0,0,0]):
th, phi = i/180*np.pi, j/180*np.pi
posetemp = np.zeros((3, 4))
posetemp[:3,:3] = np.matmul(np.array([[np.cos(3*ang/180*np.pi), 0, np.sin(3*ang/180*np.pi)], [0, 1, 0], [-np.sin(3*ang/180*np.pi), 0, np.cos(3*ang/180*np.pi)]]),
np.matmul(np.array([[np.cos(th), 0, np.sin(th)], [0, 1, 0], [-np.sin(th), 0, np.cos(th)]]), np.array([[1, 0, 0], [0, np.cos(phi), -np.sin(phi)], [0, np.sin(phi), np.cos(phi)]])))
posetemp[:3,3:4] = np.array([-1,0,0]).reshape(3,1) # * scale # Transition vector
render_poses.append(posetemp)
# for i,j in zip([ang,2*ang,3*ang,2*ang,ang,0,-ang,-2*ang,-3*ang,-2*ang,-ang,0,ang,2*ang,3*ang,2*ang,ang,0], [0,0,0,ang,2*ang,3*ang,2*ang,ang,0,-ang,-2*ang,-3*ang,-2*ang,-ang,0,0,0,0]):
# th, phi = i/180*np.pi, j/180*np.pi
# posetemp = np.zeros((3, 4))
# posetemp[:3,:3] = np.matmul(np.eye(3),
# np.matmul(np.array([[np.cos(th), 0, np.sin(th)], [0, 1, 0], [-np.sin(th), 0, np.cos(th)]]), np.array([[1, 0, 0], [0, np.cos(phi), -np.sin(phi)], [0, np.sin(phi), np.cos(phi)]])))
# posetemp[:3,3:4] = np.array([0,0,1]).reshape(3,1) # * scale # Transition vector
# render_poses.append(posetemp)
rot_cam=viewangle/3
for i,j in zip([ang,2*ang,3*ang,2*ang,ang,0,-ang,-2*ang,-3*ang,-2*ang,-ang,0,ang,2*ang,3*ang,2*ang,ang,0], [0,0,0,ang,2*ang,3*ang,2*ang,ang,0,-ang,-2*ang,-3*ang,-2*ang,-ang,0,0,0,0]):
th, phi = i/180*np.pi, j/180*np.pi
posetemp = np.zeros((3, 4))
posetemp[:3,:3] = np.matmul(np.array([[np.cos(rot_cam/180*np.pi), 0, np.sin(rot_cam/180*np.pi)], [0, 1, 0], [-np.sin(rot_cam/180*np.pi), 0, np.cos(rot_cam/180*np.pi)]]),
np.matmul(np.array([[np.cos(th), 0, np.sin(th)], [0, 1, 0], [-np.sin(th), 0, np.cos(th)]]), np.array([[1, 0, 0], [0, np.cos(phi), -np.sin(phi)], [0, np.sin(phi), np.cos(phi)]]))) # Turn left
posetemp[:3,3:4] = np.array([0,0,0]).reshape(3,1) # * scale # Transition vector
render_poses.append(posetemp)
for i,j in zip([-ang,-2*ang,-3*ang,-2*ang,-ang,0,ang,2*ang,3*ang,2*ang,ang,0,-ang,-2*ang,-3*ang,-2*ang,-ang,0], [0,0,0,ang,ang,ang,ang,ang,0,-ang,-ang,-ang,-ang,-ang,0,0,0,0]):
th, phi = i/180*np.pi, j/180*np.pi
posetemp = np.zeros((3, 4))
posetemp[:3,:3] = np.matmul(np.array([[np.cos(rot_cam/180*np.pi), 0, np.sin(rot_cam/180*np.pi)], [0, 1, 0], [-np.sin(rot_cam/180*np.pi), 0, np.cos(rot_cam/180*np.pi)]]),
np.matmul(np.array([[np.cos(-3*ang/180*np.pi), 0, np.sin(-3*ang/180*np.pi)], [0, 1, 0], [-np.sin(-3*ang/180*np.pi), 0, np.cos(-3*ang/180*np.pi)]]),
np.matmul(np.array([[np.cos(th), 0, np.sin(th)], [0, 1, 0], [-np.sin(th), 0, np.cos(th)]]), np.array([[1, 0, 0], [0, np.cos(phi), -np.sin(phi)], [0, np.sin(phi), np.cos(phi)]]))))
posetemp[:3,3:4] = np.array([0,0,1]).reshape(3,1) # * scale # Transition vector
render_poses.append(posetemp)
for i,j in zip([ang,2*ang,3*ang,2*ang,ang,0,-ang,-2*ang,-3*ang,-2*ang,-ang,0,ang,2*ang,3*ang,2*ang,ang,0], [0,0,0,ang,ang,ang,ang,ang,0,-ang,-ang,-ang,-ang,-ang,0,0,0,0]):
th, phi = i/180*np.pi, j/180*np.pi
posetemp = np.zeros((3, 4))
posetemp[:3,:3] = np.matmul(np.array([[np.cos(rot_cam/180*np.pi), 0, np.sin(rot_cam/180*np.pi)], [0, 1, 0], [-np.sin(rot_cam/180*np.pi), 0, np.cos(rot_cam/180*np.pi)]]),
np.matmul(np.array([[np.cos(3*ang/180*np.pi), 0, np.sin(3*ang/180*np.pi)], [0, 1, 0], [-np.sin(3*ang/180*np.pi), 0, np.cos(3*ang/180*np.pi)]]),
np.matmul(np.array([[np.cos(th), 0, np.sin(th)], [0, 1, 0], [-np.sin(th), 0, np.cos(th)]]), np.array([[1, 0, 0], [0, np.cos(phi), -np.sin(phi)], [0, np.sin(phi), np.cos(phi)]]))))
posetemp[:3,3:4] = np.array([0,0,-1]).reshape(3,1) # * scale # Transition vector
render_poses.append(posetemp)
# for i,j in zip([ang,2*ang,3*ang,2*ang,ang,0,-ang,-2*ang,-3*ang,-2*ang,-ang,0,ang,2*ang,3*ang,2*ang,ang,0], [0,0,0,ang,2*ang,3*ang,2*ang,ang,0,-ang,-2*ang,-3*ang,-2*ang,-ang,0,0,0,0]):
# th, phi = i/180*np.pi, j/180*np.pi
# posetemp = np.zeros((3, 4))
# posetemp[:3,:3] = np.matmul(np.array([[np.cos(rot_cam/180*np.pi), 0, np.sin(rot_cam/180*np.pi)], [0, 1, 0], [-np.sin(rot_cam/180*np.pi), 0, np.cos(rot_cam/180*np.pi)]]),
# np.matmul(np.array([[np.cos(th), 0, np.sin(th)], [0, 1, 0], [-np.sin(th), 0, np.cos(th)]]), np.array([[1, 0, 0], [0, np.cos(phi), -np.sin(phi)], [0, np.sin(phi), np.cos(phi)]])))
# posetemp[:3,3:4] = np.array([1,0,0]).reshape(3,1) # * scale # Transition vector
# render_poses.append(posetemp)
rot_cam=viewangle*2/3
for i,j in zip([ang,2*ang,3*ang,2*ang,ang,0,-ang,-2*ang,-3*ang,-2*ang,-ang,0,ang,2*ang,3*ang,2*ang,ang,0], [0,0,0,ang,2*ang,3*ang,2*ang,ang,0,-ang,-2*ang,-3*ang,-2*ang,-ang,0,0,0,0]):
th, phi = i/180*np.pi, j/180*np.pi
posetemp = np.zeros((3, 4))
posetemp[:3,:3] = np.matmul(np.array([[np.cos(rot_cam/180*np.pi), 0, np.sin(rot_cam/180*np.pi)], [0, 1, 0], [-np.sin(rot_cam/180*np.pi), 0, np.cos(rot_cam/180*np.pi)]]),
np.matmul(np.array([[np.cos(th), 0, np.sin(th)], [0, 1, 0], [-np.sin(th), 0, np.cos(th)]]), np.array([[1, 0, 0], [0, np.cos(phi), -np.sin(phi)], [0, np.sin(phi), np.cos(phi)]]))) # Turn left
posetemp[:3,3:4] = np.array([0,0,0]).reshape(3,1) # * scale # Transition vector
render_poses.append(posetemp)
for i,j in zip([-ang,-2*ang,-3*ang,-2*ang,-ang,0,ang,2*ang,3*ang,2*ang,ang,0,-ang,-2*ang,-3*ang,-2*ang,-ang,0], [0,0,0,ang,ang,ang,ang,ang,0,-ang,-ang,-ang,-ang,-ang,0,0,0,0]):
th, phi = i/180*np.pi, j/180*np.pi
posetemp = np.zeros((3, 4))
posetemp[:3,:3] = np.matmul(np.array([[np.cos(rot_cam/180*np.pi), 0, np.sin(rot_cam/180*np.pi)], [0, 1, 0], [-np.sin(rot_cam/180*np.pi), 0, np.cos(rot_cam/180*np.pi)]]),
np.matmul(np.array([[np.cos(-3*ang/180*np.pi), 0, np.sin(-3*ang/180*np.pi)], [0, 1, 0], [-np.sin(-3*ang/180*np.pi), 0, np.cos(-3*ang/180*np.pi)]]),
np.matmul(np.array([[np.cos(th), 0, np.sin(th)], [0, 1, 0], [-np.sin(th), 0, np.cos(th)]]), np.array([[1, 0, 0], [0, np.cos(phi), -np.sin(phi)], [0, np.sin(phi), np.cos(phi)]]))))
posetemp[:3,3:4] = np.array([-1,0,0]).reshape(3,1) # * scale # Transition vector
render_poses.append(posetemp)
for i,j in zip([ang,2*ang,3*ang,2*ang,ang,0,-ang,-2*ang,-3*ang,-2*ang,-ang,0,ang,2*ang,3*ang,2*ang,ang,0], [0,0,0,ang,ang,ang,ang,ang,0,-ang,-ang,-ang,-ang,-ang,0,0,0,0]):
th, phi = i/180*np.pi, j/180*np.pi
posetemp = np.zeros((3, 4))
posetemp[:3,:3] = np.matmul(np.array([[np.cos(rot_cam/180*np.pi), 0, np.sin(rot_cam/180*np.pi)], [0, 1, 0], [-np.sin(rot_cam/180*np.pi), 0, np.cos(rot_cam/180*np.pi)]]),
np.matmul(np.array([[np.cos(3*ang/180*np.pi), 0, np.sin(3*ang/180*np.pi)], [0, 1, 0], [-np.sin(3*ang/180*np.pi), 0, np.cos(3*ang/180*np.pi)]]),
np.matmul(np.array([[np.cos(th), 0, np.sin(th)], [0, 1, 0], [-np.sin(th), 0, np.cos(th)]]), np.array([[1, 0, 0], [0, np.cos(phi), -np.sin(phi)], [0, np.sin(phi), np.cos(phi)]]))))
posetemp[:3,3:4] = np.array([1,0,0]).reshape(3,1) # * scale # Transition vector
render_poses.append(posetemp)
# for i,j in zip([ang,2*ang,3*ang,2*ang,ang,0,-ang,-2*ang,-3*ang,-2*ang,-ang,0,ang,2*ang,3*ang,2*ang,ang,0], [0,0,0,ang,2*ang,3*ang,2*ang,ang,0,-ang,-2*ang,-3*ang,-2*ang,-ang,0,0,0,0]):
# th, phi = i/180*np.pi, j/180*np.pi
# posetemp = np.zeros((3, 4))
# posetemp[:3,:3] = np.matmul(np.array([[np.cos(rot_cam/180*np.pi), 0, np.sin(rot_cam/180*np.pi)], [0, 1, 0], [-np.sin(rot_cam/180*np.pi), 0, np.cos(rot_cam/180*np.pi)]]),
# np.matmul(np.array([[np.cos(th), 0, np.sin(th)], [0, 1, 0], [-np.sin(th), 0, np.cos(th)]]), np.array([[1, 0, 0], [0, np.cos(phi), -np.sin(phi)], [0, np.sin(phi), np.cos(phi)]])))
# posetemp[:3,3:4] = np.array([0,0,-1]).reshape(3,1) # * scale # Transition vector
# render_poses.append(posetemp)
rot_cam=viewangle
for i,j in zip([ang,2*ang,3*ang,2*ang,ang,0,-ang,-2*ang,-3*ang,-2*ang,-ang,0,ang,2*ang,3*ang,2*ang,ang,0], [0,0,0,ang,2*ang,3*ang,2*ang,ang,0,-ang,-2*ang,-3*ang,-2*ang,-ang,0,0,0,0]):
th, phi = i/180*np.pi, j/180*np.pi
posetemp = np.zeros((3, 4))
posetemp[:3,:3] = np.matmul(np.array([[np.cos(rot_cam/180*np.pi), 0, np.sin(rot_cam/180*np.pi)], [0, 1, 0], [-np.sin(rot_cam/180*np.pi), 0, np.cos(rot_cam/180*np.pi)]]),
np.matmul(np.array([[np.cos(th), 0, np.sin(th)], [0, 1, 0], [-np.sin(th), 0, np.cos(th)]]), np.array([[1, 0, 0], [0, np.cos(phi), -np.sin(phi)], [0, np.sin(phi), np.cos(phi)]]))) # Turn left
posetemp[:3,3:4] = np.array([0,0,0]).reshape(3,1) # * scale # Transition vector
render_poses.append(posetemp)
for i,j in zip([-ang,-2*ang,-3*ang,-2*ang,-ang,0,ang,2*ang,3*ang,2*ang,ang,0,-ang,-2*ang,-3*ang,-2*ang,-ang,0], [0,0,0,ang,ang,ang,ang,ang,0,-ang,-ang,-ang,-ang,-ang,0,0,0,0]):
th, phi = i/180*np.pi, j/180*np.pi
posetemp = np.zeros((3, 4))
posetemp[:3,:3] = np.matmul(np.array([[np.cos(rot_cam/180*np.pi), 0, np.sin(rot_cam/180*np.pi)], [0, 1, 0], [-np.sin(rot_cam/180*np.pi), 0, np.cos(rot_cam/180*np.pi)]]),
np.matmul(np.array([[np.cos(-3*ang/180*np.pi), 0, np.sin(-3*ang/180*np.pi)], [0, 1, 0], [-np.sin(-3*ang/180*np.pi), 0, np.cos(-3*ang/180*np.pi)]]),
np.matmul(np.array([[np.cos(th), 0, np.sin(th)], [0, 1, 0], [-np.sin(th), 0, np.cos(th)]]), np.array([[1, 0, 0], [0, np.cos(phi), -np.sin(phi)], [0, np.sin(phi), np.cos(phi)]]))))
posetemp[:3,3:4] = np.array([0,0,-1]).reshape(3,1) # * scale # Transition vector
render_poses.append(posetemp)
for i,j in zip([ang,2*ang,3*ang,2*ang,ang,0,-ang,-2*ang,-3*ang,-2*ang,-ang,0,ang,2*ang,3*ang,2*ang,ang,0], [0,0,0,ang,ang,ang,ang,ang,0,-ang,-ang,-ang,-ang,-ang,0,0,0,0]):
th, phi = i/180*np.pi, j/180*np.pi
posetemp = np.zeros((3, 4))
posetemp[:3,:3] = np.matmul(np.array([[np.cos(rot_cam/180*np.pi), 0, np.sin(rot_cam/180*np.pi)], [0, 1, 0], [-np.sin(rot_cam/180*np.pi), 0, np.cos(rot_cam/180*np.pi)]]),
np.matmul(np.array([[np.cos(3*ang/180*np.pi), 0, np.sin(3*ang/180*np.pi)], [0, 1, 0], [-np.sin(3*ang/180*np.pi), 0, np.cos(3*ang/180*np.pi)]]),
np.matmul(np.array([[np.cos(th), 0, np.sin(th)], [0, 1, 0], [-np.sin(th), 0, np.cos(th)]]), np.array([[1, 0, 0], [0, np.cos(phi), -np.sin(phi)], [0, np.sin(phi), np.cos(phi)]]))))
posetemp[:3,3:4] = np.array([0,0,1]).reshape(3,1) # * scale # Transition vector
render_poses.append(posetemp)
# for i,j in zip([ang,2*ang,3*ang,2*ang,ang,0,-ang,-2*ang,-3*ang,-2*ang,-ang,0,ang,2*ang,3*ang,2*ang,ang,0], [0,0,0,ang,2*ang,3*ang,2*ang,ang,0,-ang,-2*ang,-3*ang,-2*ang,-ang,0,0,0,0]):
# th, phi = i/180*np.pi, j/180*np.pi
# posetemp = np.zeros((3, 4))
# posetemp[:3,:3] = np.matmul(np.array([[np.cos(rot_cam/180*np.pi), 0, np.sin(rot_cam/180*np.pi)], [0, 1, 0], [-np.sin(rot_cam/180*np.pi), 0, np.cos(rot_cam/180*np.pi)]]),
# np.matmul(np.array([[np.cos(th), 0, np.sin(th)], [0, 1, 0], [-np.sin(th), 0, np.cos(th)]]), np.array([[1, 0, 0], [0, np.cos(phi), -np.sin(phi)], [0, np.sin(phi), np.cos(phi)]])))
# posetemp[:3,3:4] = np.array([-1,0,0]).reshape(3,1) # * scale # Transition vector
# render_poses.append(posetemp)
render_poses.append(np.array([[1,0,0,0],[0,1,0,0],[0,0,1,0]]))
render_poses = np.stack(render_poses, axis=0)
return render_poses | null |
13,823 | import os
import numpy as np
import torch
def generate_seed_360_half(viewangle, n_views):
N = n_views // 2
halfangle = viewangle / 2
render_poses = np.zeros((N*2, 3, 4))
for i in range(N):
th = (halfangle/N)*i/180*np.pi
render_poses[i,:3,:3] = np.array([[np.cos(th), 0, np.sin(th)], [0, 1, 0], [-np.sin(th), 0, np.cos(th)]])
render_poses[i,:3,3:4] = np.random.randn(3,1)*0.0 # Transition vector
for i in range(N):
th = -(halfangle/N)*i/180*np.pi
render_poses[i+N,:3,:3] = np.array([[np.cos(th), 0, np.sin(th)], [0, 1, 0], [-np.sin(th), 0, np.cos(th)]])
render_poses[i+N,:3,3:4] = np.random.randn(3,1)*0.0 # Transition vector
return render_poses | null |
13,824 | import os
import numpy as np
import torch
def generate_seed_hemisphere_(degree, nviews):
# thlist = np.array([degree, 0, 0, 0, -degree])
# philist = np.array([0, -degree, 0, degree, 0])
thlist = degree * np.sin(np.linspace(0, 2*np.pi, nviews))
philist = degree * np.cos(np.linspace(0, 2*np.pi, nviews))
assert len(thlist) == len(philist)
render_poses = np.zeros((len(thlist), 3, 4))
for i in range(len(thlist)):
th = thlist[i]
phi = philist[i]
# curr_pose = np.zeros((1, 3, 4))
d = 4.3 # manual central point for arc / you can change this value
render_poses[i,:3,:3] = np.matmul(np.array([[np.cos(th/180*np.pi), 0, -np.sin(th/180*np.pi)], [0, 1, 0], [np.sin(th/180*np.pi), 0, np.cos(th/180*np.pi)]]), np.array([[1, 0, 0], [0, np.cos(phi/180*np.pi), -np.sin(phi/180*np.pi)], [0, np.sin(phi/180*np.pi), np.cos(phi/180*np.pi)]]))
render_poses[i,:3,3:4] = np.array([d*np.sin(th/180*np.pi), 0, d-d*np.cos(th/180*np.pi)]).reshape(3,1) + np.array([0, d*np.sin(phi/180*np.pi), d-d*np.cos(phi/180*np.pi)]).reshape(3,1)# Transition vector
return render_poses | null |
13,825 | import os
import numpy as np
import torch
def generate_seed_nothing():
degree = 5
thlist = np.array([0])
philist = np.array([0])
assert len(thlist) == len(philist)
render_poses = np.zeros((len(thlist), 3, 4))
for i in range(len(thlist)):
th = thlist[i]
phi = philist[i]
# curr_pose = np.zeros((1, 3, 4))
d = 4.3 #
render_poses[i,:3,:3] = np.matmul(np.array([[np.cos(th/180*np.pi), 0, -np.sin(th/180*np.pi)], [0, 1, 0], [np.sin(th/180*np.pi), 0, np.cos(th/180*np.pi)]]), np.array([[1, 0, 0], [0, np.cos(phi/180*np.pi), -np.sin(phi/180*np.pi)], [0, np.sin(phi/180*np.pi), np.cos(phi/180*np.pi)]]))
render_poses[i,:3,3:4] = np.array([d*np.sin(th/180*np.pi), 0, d-d*np.cos(th/180*np.pi)]).reshape(3,1) + np.array([0, d*np.sin(phi/180*np.pi), d-d*np.cos(phi/180*np.pi)]).reshape(3,1)# Transition vector
# render_poses[i,:3,3:4] = np.zeros((3,1))
return render_poses | null |
13,826 | import os
import numpy as np
import torch
def generate_seed_lookaround():
degsum = 60
thlist = np.concatenate((np.linspace(0, degsum, 4), np.linspace(0, -degsum, 4)[1:], np.linspace(0, degsum, 4), np.linspace(0, -degsum, 4)[1:], np.linspace(0, degsum, 4), np.linspace(0, -degsum, 4)[1:]))
philist = np.concatenate((np.linspace(0,0,7), np.linspace(22.5,22.5,7), np.linspace(-22.5,-22.5,7)))
assert len(thlist) == len(philist)
render_poses = []
# up / left --> right
thlist = np.linspace(-degsum, degsum, 2*degsum+1)
for i in range(len(thlist)):
render_pose = np.zeros((3,4))
th = thlist[i]
phi = 22.5
render_pose[:3,:3] = np.matmul(np.array([[np.cos(th/180*np.pi), 0, -np.sin(th/180*np.pi)], [0, 1, 0], [np.sin(th/180*np.pi), 0, np.cos(th/180*np.pi)]]), np.array([[1, 0, 0], [0, np.cos(phi/180*np.pi), -np.sin(phi/180*np.pi)], [0, np.sin(phi/180*np.pi), np.cos(phi/180*np.pi)]]))
render_pose[:3,3:4] = np.zeros((3,1))
render_poses.append(render_pose)
# right / up --> center
phlist = np.linspace(22.5, 0, 23)
# Exclude first frame (same as last frame before)
phlist = phlist[1:]
for i in range(len(phlist)):
render_pose = np.zeros((3,4))
th = degsum
phi = phlist[i]
render_pose[:3,:3] = np.matmul(np.array([[np.cos(th/180*np.pi), 0, -np.sin(th/180*np.pi)], [0, 1, 0], [np.sin(th/180*np.pi), 0, np.cos(th/180*np.pi)]]), np.array([[1, 0, 0], [0, np.cos(phi/180*np.pi), -np.sin(phi/180*np.pi)], [0, np.sin(phi/180*np.pi), np.cos(phi/180*np.pi)]]))
render_pose[:3,3:4] = np.zeros((3,1))
render_poses.append(render_pose)
# center / right --> left
thlist = np.linspace(degsum, -degsum, 2*degsum+1)
thlist = thlist[1:]
for i in range(len(thlist)):
render_pose = np.zeros((3,4))
th = thlist[i]
phi = 0
render_pose[:3,:3] = np.matmul(np.array([[np.cos(th/180*np.pi), 0, -np.sin(th/180*np.pi)], [0, 1, 0], [np.sin(th/180*np.pi), 0, np.cos(th/180*np.pi)]]), np.array([[1, 0, 0], [0, np.cos(phi/180*np.pi), -np.sin(phi/180*np.pi)], [0, np.sin(phi/180*np.pi), np.cos(phi/180*np.pi)]]))
render_pose[:3,3:4] = np.zeros((3,1))
render_poses.append(render_pose)
# left / center --> down
phlist = np.linspace(0, -22.5, 23)
phlist = phlist[1:]
for i in range(len(phlist)):
render_pose = np.zeros((3,4))
th = -degsum
phi = phlist[i]
render_pose[:3,:3] = np.matmul(np.array([[np.cos(th/180*np.pi), 0, -np.sin(th/180*np.pi)], [0, 1, 0], [np.sin(th/180*np.pi), 0, np.cos(th/180*np.pi)]]), np.array([[1, 0, 0], [0, np.cos(phi/180*np.pi), -np.sin(phi/180*np.pi)], [0, np.sin(phi/180*np.pi), np.cos(phi/180*np.pi)]]))
render_pose[:3,3:4] = np.zeros((3,1))
render_poses.append(render_pose)
thlist = np.linspace(-degsum, degsum, 2*degsum+1)
for i in range(len(thlist)):
render_pose = np.zeros((3,4))
th = thlist[i]
phi = -22.5
render_pose[:3,:3] = np.matmul(np.array([[np.cos(th/180*np.pi), 0, -np.sin(th/180*np.pi)], [0, 1, 0], [np.sin(th/180*np.pi), 0, np.cos(th/180*np.pi)]]), np.array([[1, 0, 0], [0, np.cos(phi/180*np.pi), -np.sin(phi/180*np.pi)], [0, np.sin(phi/180*np.pi), np.cos(phi/180*np.pi)]]))
render_pose[:3,3:4] = np.zeros((3,1))
render_poses.append(render_pose)
return render_poses | null |
13,827 | import os
import numpy as np
import torch
def generate_seed_lookdown():
degsum = 60
thlist = np.concatenate((np.linspace(0, degsum, 4), np.linspace(0, -degsum, 4)[1:], np.linspace(0, degsum, 4), np.linspace(0, -degsum, 4)[1:]))
philist = np.concatenate((np.linspace(0,0,7), np.linspace(-22.5,-22.5,7)))
assert len(thlist) == len(philist)
render_poses = np.zeros((len(thlist), 3, 4))
for i in range(len(thlist)):
th = thlist[i]
phi = philist[i]
render_poses[i,:3,:3] = np.matmul(np.array([[np.cos(th/180*np.pi), 0, -np.sin(th/180*np.pi)], [0, 1, 0], [np.sin(th/180*np.pi), 0, np.cos(th/180*np.pi)]]), np.array([[1, 0, 0], [0, np.cos(phi/180*np.pi), -np.sin(phi/180*np.pi)], [0, np.sin(phi/180*np.pi), np.cos(phi/180*np.pi)]]))
render_poses[i,:3,3:4] = np.zeros((3,1))
return render_poses | null |
13,828 | import os
import numpy as np
import torch
def generate_seed_headbanging_circle(maxdeg, nviews_per_round, round=3, fullround=1):
radius = np.concatenate((np.linspace(0, maxdeg, nviews_per_round*round), maxdeg*np.ones(nviews_per_round*fullround), np.linspace(maxdeg, 0, nviews_per_round*round)))
thlist = 2.66*radius * np.sin(np.linspace(0, 2*np.pi*(round+fullround+round), nviews_per_round*(round+fullround+round)))
philist = radius * np.cos(np.linspace(0, 2*np.pi*(round+fullround+round), nviews_per_round*(round+fullround+round)))
assert len(thlist) == len(philist)
render_poses = np.zeros((len(thlist), 3, 4))
for i in range(len(thlist)):
th = thlist[i]
phi = philist[i]
render_poses[i,:3,:3] = np.matmul(np.array([[np.cos(th/180*np.pi), 0, -np.sin(th/180*np.pi)], [0, 1, 0], [np.sin(th/180*np.pi), 0, np.cos(th/180*np.pi)]]), np.array([[1, 0, 0], [0, np.cos(phi/180*np.pi), -np.sin(phi/180*np.pi)], [0, np.sin(phi/180*np.pi), np.cos(phi/180*np.pi)]]))
render_poses[i,:3,3:4] = np.zeros((3,1))
return render_poses | null |
13,829 | import os
import numpy as np
import torch
def generate_seed_360(viewangle, n_views):
N = n_views
render_poses = np.zeros((N, 3, 4))
for i in range(N):
th = (viewangle/N)*i/180*np.pi
render_poses[i,:3,:3] = np.array([[np.cos(th), 0, np.sin(th)], [0, 1, 0], [-np.sin(th), 0, np.cos(th)]])
render_poses[i,:3,3:4] = np.random.randn(3,1)*0.0 # Transition vector
return render_poses
def generate_seed_preset():
degsum = 60
thlist = np.concatenate((np.linspace(0, degsum, 4), np.linspace(0, -degsum, 4)[1:], np.linspace(0, degsum, 4), np.linspace(0, -degsum, 4)[1:], np.linspace(0, degsum, 4), np.linspace(0, -degsum, 4)[1:]))
philist = np.concatenate((np.linspace(0,0,7), np.linspace(-22.5,-22.5,7), np.linspace(22.5,22.5,7)))
assert len(thlist) == len(philist)
render_poses = np.zeros((len(thlist), 3, 4))
for i in range(len(thlist)):
th = thlist[i]
phi = philist[i]
render_poses[i,:3,:3] = np.matmul(np.array([[np.cos(th/180*np.pi), 0, -np.sin(th/180*np.pi)], [0, 1, 0], [np.sin(th/180*np.pi), 0, np.cos(th/180*np.pi)]]), np.array([[1, 0, 0], [0, np.cos(phi/180*np.pi), -np.sin(phi/180*np.pi)], [0, np.sin(phi/180*np.pi), np.cos(phi/180*np.pi)]]))
render_poses[i,:3,3:4] = np.zeros((3,1))
return render_poses
def generate_seed_newpreset():
degsum = 60
thlist = np.concatenate((np.linspace(0, degsum, 4), np.linspace(0, -degsum, 4)[1:], np.linspace(0, degsum, 4), np.linspace(0, -degsum, 4)[1:]))
philist = np.concatenate((np.linspace(0,0,7), np.linspace(22.5,22.5,7)))
assert len(thlist) == len(philist)
render_poses = np.zeros((len(thlist), 3, 4))
for i in range(len(thlist)):
th = thlist[i]
phi = philist[i]
render_poses[i,:3,:3] = np.matmul(np.array([[np.cos(th/180*np.pi), 0, -np.sin(th/180*np.pi)], [0, 1, 0], [np.sin(th/180*np.pi), 0, np.cos(th/180*np.pi)]]), np.array([[1, 0, 0], [0, np.cos(phi/180*np.pi), -np.sin(phi/180*np.pi)], [0, np.sin(phi/180*np.pi), np.cos(phi/180*np.pi)]]))
render_poses[i,:3,3:4] = np.zeros((3,1))
return render_poses
def generate_seed_horizon():
movement = np.linspace(0, 5, 11)
render_poses = np.zeros((len(movement), 3, 4))
for i in range(len(movement)):
render_poses[i,:3,:3] = np.eye(3)
render_poses[i,:3,3:4] = np.array([[-movement[i]], [0], [0]])
return render_poses
def generate_seed_backward():
movement = np.linspace(0, 5, 11)
render_poses = np.zeros((len(movement), 3, 4))
for i in range(len(movement)):
render_poses[i,:3,:3] = np.eye(3)
render_poses[i,:3,3:4] = np.array([[0], [0], [movement[i]]])
return render_poses
def generate_seed_arc():
degree = 5
# thlist = np.array([degree, 0, 0, 0, -degree])
thlist = np.arange(0, degree, 5) + np.arange(0, -degree, 5)[1:]
phi = 0
render_poses = np.zeros((len(thlist), 3, 4))
for i in range(len(thlist)):
th = thlist[i]
d = 4.3 # manual central point for arc / you can change this value
render_poses[i,:3,:3] = np.matmul(np.array([[np.cos(th/180*np.pi), 0, -np.sin(th/180*np.pi)], [0, 1, 0], [np.sin(th/180*np.pi), 0, np.cos(th/180*np.pi)]]), np.array([[1, 0, 0], [0, np.cos(phi/180*np.pi), -np.sin(phi/180*np.pi)], [0, np.sin(phi/180*np.pi), np.cos(phi/180*np.pi)]]))
render_poses[i,:3,3:4] = np.array([d*np.sin(th/180*np.pi), 0, d-d*np.cos(th/180*np.pi)]).reshape(3,1) + np.array([0, d*np.sin(phi/180*np.pi), d-d*np.cos(phi/180*np.pi)]).reshape(3,1)# Transition vector
# render_poses[i,:3,3:4] = np.zeros((3,1))
return render_poses
def generate_seed_hemisphere(center_depth, degree=5):
degree = 5
thlist = np.array([degree, 0, 0, 0, -degree])
philist = np.array([0, -degree, 0, degree, 0])
assert len(thlist) == len(philist)
render_poses = np.zeros((len(thlist), 3, 4))
for i in range(len(thlist)):
th = thlist[i]
phi = philist[i]
# curr_pose = np.zeros((1, 3, 4))
d = center_depth # central point of (hemi)sphere / you can change this value
render_poses[i,:3,:3] = np.matmul(np.array([[np.cos(th/180*np.pi), 0, -np.sin(th/180*np.pi)], [0, 1, 0], [np.sin(th/180*np.pi), 0, np.cos(th/180*np.pi)]]), np.array([[1, 0, 0], [0, np.cos(phi/180*np.pi), -np.sin(phi/180*np.pi)], [0, np.sin(phi/180*np.pi), np.cos(phi/180*np.pi)]]))
render_poses[i,:3,3:4] = np.array([d*np.sin(th/180*np.pi), 0, d-d*np.cos(th/180*np.pi)]).reshape(3,1) + np.array([0, d*np.sin(phi/180*np.pi), d-d*np.cos(phi/180*np.pi)]).reshape(3,1)# Transition vector
# render_poses[i,:3,3:4] = np.zeros((3,1))
return render_poses
def get_pcdGenPoses(pcdgenpath, argdict={}):
if pcdgenpath == 'rotate360':
render_poses = generate_seed_360(360, 10)
elif pcdgenpath == 'lookaround':
render_poses = generate_seed_preset()
elif pcdgenpath == 'moveright':
render_poses = generate_seed_horizon()
elif pcdgenpath == 'moveback':
render_poses = generate_seed_backward()
elif pcdgenpath == 'arc':
render_poses = generate_seed_arc()
elif pcdgenpath == 'lookdown':
render_poses = generate_seed_newpreset()
elif pcdgenpath == 'hemisphere':
render_poses = generate_seed_hemisphere(argdict['center_depth'])
else:
raise("Invalid pcdgenpath")
return render_poses | null |
13,830 | import math
from typing import NamedTuple
import numpy as np
import torch
def geom_transform_points(points, transf_matrix):
P, _ = points.shape
ones = torch.ones(P, 1, dtype=points.dtype, device=points.device)
points_hom = torch.cat([points, ones], dim=1)
points_out = torch.matmul(points_hom, transf_matrix.unsqueeze(0))
denom = points_out[..., 3:] + 0.0000001
return (points_out[..., :3] / denom).squeeze(dim=0) | null |
13,831 | import sys
import random
from datetime import datetime
import numpy as np
import torch
def inverse_sigmoid(x):
return torch.log(x/(1-x)) | null |
13,832 | import sys
import random
from datetime import datetime
import numpy as np
import torch
The provided code snippet includes necessary dependencies for implementing the `get_expon_lr_func` function. Write a Python function `def get_expon_lr_func( lr_init, lr_final, lr_delay_steps=0, lr_delay_mult=1.0, max_steps=1000000 )` to solve the following problem:
Copied from Plenoxels Continuous learning rate decay function. Adapted from JaxNeRF The returned rate is lr_init when step=0 and lr_final when step=max_steps, and is log-linearly interpolated elsewhere (equivalent to exponential decay). If lr_delay_steps>0 then the learning rate will be scaled by some smooth function of lr_delay_mult, such that the initial learning rate is lr_init*lr_delay_mult at the beginning of optimization but will be eased back to the normal learning rate when steps>lr_delay_steps. :param conf: config subtree 'lr' or similar :param max_steps: int, the number of steps during optimization. :return HoF which takes step as input
Here is the function:
def get_expon_lr_func(
lr_init, lr_final, lr_delay_steps=0, lr_delay_mult=1.0, max_steps=1000000
):
"""
Copied from Plenoxels
Continuous learning rate decay function. Adapted from JaxNeRF
The returned rate is lr_init when step=0 and lr_final when step=max_steps, and
is log-linearly interpolated elsewhere (equivalent to exponential decay).
If lr_delay_steps>0 then the learning rate will be scaled by some smooth
function of lr_delay_mult, such that the initial learning rate is
lr_init*lr_delay_mult at the beginning of optimization but will be eased back
to the normal learning rate when steps>lr_delay_steps.
:param conf: config subtree 'lr' or similar
:param max_steps: int, the number of steps during optimization.
:return HoF which takes step as input
"""
def helper(step):
if step < 0 or (lr_init == 0.0 and lr_final == 0.0):
# Disable this parameter
return 0.0
if lr_delay_steps > 0:
# A kind of reverse cosine decay.
delay_rate = lr_delay_mult + (1 - lr_delay_mult) * np.sin(
0.5 * np.pi * np.clip(step / lr_delay_steps, 0, 1)
)
else:
delay_rate = 1.0
t = np.clip(step / max_steps, 0, 1)
log_lerp = np.exp(np.log(lr_init) * (1 - t) + np.log(lr_final) * t)
return delay_rate * log_lerp
return helper | Copied from Plenoxels Continuous learning rate decay function. Adapted from JaxNeRF The returned rate is lr_init when step=0 and lr_final when step=max_steps, and is log-linearly interpolated elsewhere (equivalent to exponential decay). If lr_delay_steps>0 then the learning rate will be scaled by some smooth function of lr_delay_mult, such that the initial learning rate is lr_init*lr_delay_mult at the beginning of optimization but will be eased back to the normal learning rate when steps>lr_delay_steps. :param conf: config subtree 'lr' or similar :param max_steps: int, the number of steps during optimization. :return HoF which takes step as input |
13,833 | import sys
import random
from datetime import datetime
import numpy as np
import torch
def strip_lowerdiag(L):
def strip_symmetric(sym):
return strip_lowerdiag(sym) | null |
13,834 | import sys
import random
from datetime import datetime
import numpy as np
import torch
def build_rotation(r):
def build_scaling_rotation(s, r):
L = torch.zeros((s.shape[0], 3, 3), dtype=torch.float, device="cuda")
R = build_rotation(r)
L[:,0,0] = s[:,0]
L[:,1,1] = s[:,1]
L[:,2,2] = s[:,2]
L = R @ L
return L | null |
13,835 | import sys
import random
from datetime import datetime
import numpy as np
import torch
def safe_state(silent):
old_f = sys.stdout
class F:
def __init__(self, silent):
self.silent = silent
def write(self, x):
if not self.silent:
if x.endswith("\n"):
old_f.write(x.replace("\n", " [{}]\n".format(str(datetime.now().strftime("%d/%m %H:%M:%S")))))
else:
old_f.write(x)
def flush(self):
old_f.flush()
sys.stdout = F(silent)
random.seed(0)
np.random.seed(0)
torch.manual_seed(0)
torch.cuda.set_device(torch.device("cuda:0")) | null |
13,836 | import torch
C0 = 0.28209479177387814
C1 = 0.4886025119029199
C2 = [
1.0925484305920792,
-1.0925484305920792,
0.31539156525252005,
-1.0925484305920792,
0.5462742152960396
]
C3 = [
-0.5900435899266435,
2.890611442640554,
-0.4570457994644658,
0.3731763325901154,
-0.4570457994644658,
1.445305721320277,
-0.5900435899266435
]
C4 = [
2.5033429417967046,
-1.7701307697799304,
0.9461746957575601,
-0.6690465435572892,
0.10578554691520431,
-0.6690465435572892,
0.47308734787878004,
-1.7701307697799304,
0.6258357354491761,
]
The provided code snippet includes necessary dependencies for implementing the `eval_sh` function. Write a Python function `def eval_sh(deg, sh, dirs)` to solve the following problem:
Evaluate spherical harmonics at unit directions using hardcoded SH polynomials. Works with torch/np/jnp. ... Can be 0 or more batch dimensions. Args: deg: int SH deg. Currently, 0-3 supported sh: jnp.ndarray SH coeffs [..., C, (deg + 1) ** 2] dirs: jnp.ndarray unit directions [..., 3] Returns: [..., C]
Here is the function:
def eval_sh(deg, sh, dirs):
"""
Evaluate spherical harmonics at unit directions
using hardcoded SH polynomials.
Works with torch/np/jnp.
... Can be 0 or more batch dimensions.
Args:
deg: int SH deg. Currently, 0-3 supported
sh: jnp.ndarray SH coeffs [..., C, (deg + 1) ** 2]
dirs: jnp.ndarray unit directions [..., 3]
Returns:
[..., C]
"""
assert deg <= 4 and deg >= 0
coeff = (deg + 1) ** 2
assert sh.shape[-1] >= coeff
result = C0 * sh[..., 0]
if deg > 0:
x, y, z = dirs[..., 0:1], dirs[..., 1:2], dirs[..., 2:3]
result = (result -
C1 * y * sh[..., 1] +
C1 * z * sh[..., 2] -
C1 * x * sh[..., 3])
if deg > 1:
xx, yy, zz = x * x, y * y, z * z
xy, yz, xz = x * y, y * z, x * z
result = (result +
C2[0] * xy * sh[..., 4] +
C2[1] * yz * sh[..., 5] +
C2[2] * (2.0 * zz - xx - yy) * sh[..., 6] +
C2[3] * xz * sh[..., 7] +
C2[4] * (xx - yy) * sh[..., 8])
if deg > 2:
result = (result +
C3[0] * y * (3 * xx - yy) * sh[..., 9] +
C3[1] * xy * z * sh[..., 10] +
C3[2] * y * (4 * zz - xx - yy)* sh[..., 11] +
C3[3] * z * (2 * zz - 3 * xx - 3 * yy) * sh[..., 12] +
C3[4] * x * (4 * zz - xx - yy) * sh[..., 13] +
C3[5] * z * (xx - yy) * sh[..., 14] +
C3[6] * x * (xx - 3 * yy) * sh[..., 15])
if deg > 3:
result = (result + C4[0] * xy * (xx - yy) * sh[..., 16] +
C4[1] * yz * (3 * xx - yy) * sh[..., 17] +
C4[2] * xy * (7 * zz - 1) * sh[..., 18] +
C4[3] * yz * (7 * zz - 3) * sh[..., 19] +
C4[4] * (zz * (35 * zz - 30) + 3) * sh[..., 20] +
C4[5] * xz * (7 * zz - 3) * sh[..., 21] +
C4[6] * (xx - yy) * (7 * zz - 1) * sh[..., 22] +
C4[7] * xz * (xx - 3 * yy) * sh[..., 23] +
C4[8] * (xx * (xx - 3 * yy) - yy * (3 * xx - yy)) * sh[..., 24])
return result | Evaluate spherical harmonics at unit directions using hardcoded SH polynomials. Works with torch/np/jnp. ... Can be 0 or more batch dimensions. Args: deg: int SH deg. Currently, 0-3 supported sh: jnp.ndarray SH coeffs [..., C, (deg + 1) ** 2] dirs: jnp.ndarray unit directions [..., 3] Returns: [..., C] |
13,837 | import torch
C0 = 0.28209479177387814
def RGB2SH(rgb):
return (rgb - 0.5) / C0 | null |
13,838 | import os
import sys
import hashlib
import logging
from typing import Union
from urllib.parse import urlparse
import numpy as np
import torch
from torch.hub import download_url_to_file, get_dir
def handle_error(model_path: str, model_md5: str, e: str) -> None:
_md5 = md5sum(model_path)
if _md5 != model_md5:
try:
os.remove(model_path)
logging.error(
f"Model md5: {_md5}, expected md5: {model_md5}, wrong model "
f"deleted. Please restart lama-cleaner. If you still have "
f"errors, please try download model manually first https://"
f"lama-cleaner-docs.vercel.app/install/download_model_"
f"manually.\n")
except:
logging.error(
f"Model md5: {_md5}, expected md5: {model_md5}, please delete"
f" {model_path} and restart lama-cleaner.")
else:
logging.error(
f"Failed to load model {model_path}, please submit an issue at "
f"https://github.com/ironjr/simple-lama/issues and include a "
f"screenshot of the error:\n{e}")
exit(-1)
def download_model(url: str, model_md5: str = None) -> str:
cached_file = get_cache_path_by_url(url)
if not os.path.exists(cached_file):
sys.stderr.write('Downloading: "{}" to {}\n'.format(url, cached_file))
hash_prefix = None
download_url_to_file(url, cached_file, hash_prefix, progress=True)
if model_md5:
_md5 = md5sum(cached_file)
if model_md5 == _md5:
logging.info(f"Download model success, md5: {_md5}")
else:
try:
os.remove(cached_file)
logging.error(
f"Model md5: {_md5}, expected md5: {model_md5}, wrong"
f" model deleted. Please restart lama-cleaner. If you"
f" still have errors, please try download model "
f"manually first https://lama-cleaner-docs.vercel"
f".app/install/download_model_manually.\n")
except:
logging.error(
f"Model md5: {_md5}, expected md5: {model_md5}, "
f"please delete {cached_file} and restart lama-"
f"cleaner.")
exit(-1)
return cached_file
def load_jit_model(
url_or_path: str,
device: Union[torch.device, str],
model_md5: str,
) -> torch.jit._script.RecursiveScriptModule:
if os.path.exists(url_or_path):
model_path = url_or_path
else:
model_path = download_model(url_or_path, model_md5)
logging.info(f"Loading model from: {model_path}")
try:
model = torch.jit.load(model_path, map_location="cpu").to(device)
except Exception as e:
handle_error(model_path, model_md5, e)
model.eval()
return model | null |
13,839 | import os
import sys
import hashlib
import logging
from typing import Union
from urllib.parse import urlparse
import numpy as np
import torch
from torch.hub import download_url_to_file, get_dir
def norm_img(np_img: np.ndarray) -> np.ndarray:
if len(np_img.shape) == 2:
np_img = np_img[:, :, np.newaxis]
np_img = np.transpose(np_img, (2, 0, 1))
np_img = np_img.astype("float32") / 255
return np_img | null |
13,840 | import os
import sys
import hashlib
import logging
from typing import Union
from urllib.parse import urlparse
import numpy as np
import torch
from torch.hub import download_url_to_file, get_dir
def ceil_modulo(x: int, mod: int) -> int:
def pad_img_to_modulo(img: np.ndarray, mod: int) -> np.ndarray:
if len(img.shape) == 2:
img = img[:, :, np.newaxis]
height, width = img.shape[:2]
out_height = ceil_modulo(height, mod)
out_width = ceil_modulo(width, mod)
return np.pad(
img,
((0, out_height - height), (0, out_width - width), (0, 0)),
mode="symmetric",
) | null |
13,841 | import os
from setuptools import setup, find_packages
def version() -> str:
with open(os.path.join(os.path.dirname(__file__), 'stable_whisper/_version.py')) as f:
return f.read().split('=')[-1].strip().strip('"').strip("'") | null |
13,842 | import os
from setuptools import setup, find_packages
def read_me() -> str:
with open('README.md', 'r', encoding='utf-8') as f:
return f.read() | null |
13,843 | from typing import Optional, Union
import numpy as np
import torch
from ..audio import convert_demucs_kwargs, prep_audio
from ..non_whisper import transcribe_any
from ..utils import isolate_useful_options
HF_MODELS = {
"tiny.en": "openai/whisper-tiny.en",
"tiny": "openai/whisper-tiny",
"base.en": "openai/whisper-base.en",
"base": "openai/whisper-base",
"small.en": "openai/whisper-small.en",
"small": "openai/whisper-small",
"medium.en": "openai/whisper-medium.en",
"medium": "openai/whisper-medium",
"large-v1": "openai/whisper-large-v1",
"large-v2": "openai/whisper-large-v2",
"large-v3": "openai/whisper-large-v3",
"large": "openai/whisper-large-v3",
}
def get_device(device: str = None) -> str:
if device:
return device
if torch.cuda.is_available():
return 'cuda:0'
if (mps := getattr(torch.backends, 'mps', None)) is not None and mps.is_available():
return 'mps'
return 'cpu'
def load_hf_pipe(model_name: str, device: str = None, flash: bool = False):
from transformers import AutoModelForSpeechSeq2Seq, AutoProcessor, pipeline
device = get_device(device)
is_cpu = (device if isinstance(device, str) else getattr(device, 'type', None)) == 'cpu'
dtype = torch.float32 if is_cpu or not torch.cuda.is_available() else torch.float16
model_id = HF_MODELS.get(model_name, model_name)
model = AutoModelForSpeechSeq2Seq.from_pretrained(
model_id,
torch_dtype=dtype,
low_cpu_mem_usage=True,
use_safetensors=True,
use_flash_attention_2=flash
).to(device)
processor = AutoProcessor.from_pretrained(model_id)
if not flash:
try:
model = model.to_bettertransformer()
except ValueError:
pass
pipe = pipeline(
"automatic-speech-recognition",
model=model,
tokenizer=processor.tokenizer,
feature_extractor=processor.feature_extractor,
max_new_tokens=128,
chunk_length_s=30,
torch_dtype=dtype,
device=device,
)
return pipe | null |
13,844 | import os
import warnings
import argparse
from typing import Optional, List, Union
from os.path import splitext, split, join
import numpy as np
import torch
from ..result import WhisperResult
from ..utils import isolate_useful_options, str_to_valid_type, get_func_parameters
from ..audio import SUPPORTED_DENOISERS
from ..default import *
from whisper import DecodingOptions, available_models
from whisper.tokenizer import LANGUAGES, TO_LANGUAGE_CODE
from whisper.utils import optional_int, optional_float
import warnings
warnings.filterwarnings('ignore', module='whisper', message='.*Triton.*', category=UserWarning)
class WhisperResult:
def __init__(
self,
result: Union[str, dict, list],
force_order: bool = False,
check_sorted: Union[bool, str] = True,
show_unsorted: bool = True
):
result, self.path = self._standardize_result(result)
self.ori_dict = result.get('ori_dict') or result
self.language = self.ori_dict.get('language')
self._regroup_history = result.get('regroup_history', '')
self._nonspeech_sections = result.get('nonspeech_sections', [])
segments = (result.get('segments', self.ori_dict.get('segments')) or {}).copy()
self.segments = [Segment(**s, ignore_unused_args=True) for s in segments] if segments else []
self._forced_order = force_order
if self._forced_order:
self.force_order()
self.raise_for_unsorted(check_sorted, show_unsorted)
self.remove_no_word_segments(any(seg.has_words for seg in self.segments))
def __getitem__(self, index: int) -> Segment:
return self.segments[index]
def __delitem__(self, index: int):
del self.segments[index]
self.reassign_ids(True, start=index)
def duration(self):
if not self.segments:
return 0.0
return _round_timestamp(self.segments[-1].end - self.segments[0].start)
def _standardize_result(result: Union[str, dict, List[dict], List[List[dict]]]) -> Tuple[dict, Union[str, None]]:
path = None
if isinstance(result, str):
path = result
result = load_result(path)
if isinstance(result, dict):
return result, path
if not isinstance(result, list):
raise TypeError(f'Expect result to be list but got {type(result)}')
if not result or not result[0]:
return {}, path
if isinstance(result[0], list):
if not isinstance(result[0][0], dict):
raise NotImplementedError(f'Got list of list of {type(result[0])} but expects list of list of dict')
result = dict(
segments=[
dict(
start=words[0]['start'],
end=words[-1]['end'],
text=''.join(w['word'] for w in words),
words=words
)
for words in result if words
]
)
elif isinstance(result[0], dict):
result = dict(segments=result)
else:
raise NotImplementedError(f'Got list of {type(result[0])} but expects list of list/dict')
return result, path
def force_order(self):
prev_ts_end = 0
timestamps = self.all_words_or_segments()
for i, ts in enumerate(timestamps, 1):
if ts.start < prev_ts_end:
ts.start = prev_ts_end
if ts.start > ts.end:
if prev_ts_end > ts.end:
warnings.warn('Multiple consecutive timestamps are out of order. Some parts will have no duration.')
ts.start = ts.end
for j in range(i-2, -1, -1):
if timestamps[j].end > ts.end:
timestamps[j].end = ts.end
if timestamps[j].start > ts.end:
timestamps[j].start = ts.end
else:
if ts.start != prev_ts_end:
ts.start = prev_ts_end
else:
ts.end = ts.start if i == len(timestamps) else timestamps[i].start
prev_ts_end = ts.end
def raise_for_unsorted(self, check_sorted: Union[bool, str] = True, show_unsorted: bool = True):
if check_sorted is False:
return
all_parts = self.all_words_or_segments()
if not all_parts:
return
is_word = isinstance(all_parts[0], WordTiming)
timestamps = np.array(list(chain.from_iterable((p.start, p.end) for p in all_parts)))
if len(timestamps) > 1 and (unsorted_mask := timestamps[:-1] > timestamps[1:]).any():
if show_unsorted:
def get_part_info(idx):
curr_part = all_parts[idx]
seg_id = curr_part.segment_id if is_word else curr_part.id
word_id_str = f'Word ID: {curr_part.id}\n' if is_word else ''
return (
f'Segment ID: {seg_id}\n{word_id_str}'
f'Start: {curr_part.start}\nEnd: {curr_part.end}\n'
f'Text: "{curr_part.word if is_word else curr_part.text}"'
), curr_part.start, curr_part.end
for i, unsorted in enumerate(unsorted_mask, 2):
if unsorted:
word_id = i//2-1
part_info, start, end = get_part_info(word_id)
if i % 2 == 1:
next_info, next_start, _ = get_part_info(word_id+1)
part_info += f'\nConflict: end ({end}) > next start ({next_start})\n{next_info}'
else:
part_info += f'\nConflict: start ({start}) > end ({end})'
print(part_info, end='\n\n')
data = self.to_dict()
if check_sorted is True:
raise UnsortedException(data=data)
warnings.warn('Timestamps are not in ascending order. '
'If data is produced by Stable-ts, please submit an issue with the saved data.')
save_as_json(data, check_sorted)
def update_all_segs_with_words(self):
warnings.warn('Attributes that required updating are now properties based on the ``words`` except for ``id``. '
'``update_all_segs_with_words()`` is deprecated and will be removed in future versions. '
'Use ``.reassign_ids()`` to manually update ids',
stacklevel=2)
self.reassign_ids()
def update_nonspeech_sections(self, silent_starts, silent_ends):
self._nonspeech_sections = [
dict(start=round(s, 3), end=round(e, 3)) for s, e in zip(silent_starts, silent_ends)
]
def add_segments(self, index0: int, index1: int, inplace: bool = False, lock: bool = False):
new_seg = self.segments[index0].add(self.segments[index1], copy_words=False)
if lock and self.segments[index0].has_words:
lock_idx = len(self.segments[index0].words)
new_seg.words[lock_idx - 1].lock_right()
if lock_idx < len(new_seg.words):
new_seg.words[lock_idx].lock_left()
if inplace:
i0, i1 = sorted([index0, index1])
self.segments[i0] = new_seg
del self.segments[i1]
return new_seg
def rescale_time(self, scale_factor: float):
for s in self.segments:
s.rescale_time(scale_factor)
def apply_min_dur(self, min_dur: float, inplace: bool = False):
"""
Merge any word/segment with adjacent word/segment if its duration is less than ``min_dur``.
"""
result = self if inplace else deepcopy(self)
max_i = len(result.segments) - 1
if max_i == 0:
return result
for i in reversed(range(len(result.segments))):
if max_i == 0:
break
if result.segments[i].duration < min_dur:
if i == max_i:
result.add_segments(i-1, i, inplace=True)
elif i == 0:
result.add_segments(i, i+1, inplace=True)
else:
if result.segments[i+1].duration < result.segments[i-1].duration:
result.add_segments(i-1, i, inplace=True)
else:
result.add_segments(i, i+1, inplace=True)
max_i -= 1
result.reassign_ids()
for s in result.segments:
s.apply_min_dur(min_dur, inplace=True)
return result
def offset_time(self, offset_seconds: float):
for s in self.segments:
s.offset_time(offset_seconds)
def suppress_silence(
self,
silent_starts: np.ndarray,
silent_ends: np.ndarray,
min_word_dur: Optional[float] = None,
word_level: bool = True,
nonspeech_error: float = 0.3,
use_word_position: bool = True,
verbose: bool = True
) -> "WhisperResult":
"""
Move any start/end timestamps in silence parts of audio to the boundaries of the silence.
Parameters
----------
silent_starts : numpy.ndarray
An array starting timestamps of silent sections of audio.
silent_ends : numpy.ndarray
An array ending timestamps of silent sections of audio.
min_word_dur : float or None, default None meaning use ``stable_whisper.default.DEFAULT_VALUES``
Shortest duration each word is allowed to reach for adjustments.
word_level : bool, default False
Whether to settings to word level timestamps.
nonspeech_error : float, default 0.3
Relative error of non-speech sections that appear in between a word for adjustments.
use_word_position : bool, default True
Whether to use position of the word in its segment to determine whether to keep end or start timestamps if
adjustments are required. If it is the first word, keep end. Else if it is the last word, keep the start.
verbose : bool, default True
Whether to use progressbar to show progress.
Returns
-------
stable_whisper.result.WhisperResult
The current instance after the changes.
"""
min_word_dur = get_min_word_dur(min_word_dur)
with tqdm(total=self.duration, unit='sec', disable=not verbose, desc='Adjustment') as tqdm_pbar:
for s in self.segments:
s.suppress_silence(
silent_starts,
silent_ends,
min_word_dur,
word_level=word_level,
nonspeech_error=nonspeech_error,
use_word_position=use_word_position
)
if verbose:
tqdm_pbar.update(s.end - tqdm_pbar.n)
tqdm_pbar.update(tqdm_pbar.total - tqdm_pbar.n)
return self
def adjust_by_silence(
self,
audio: Union[torch.Tensor, np.ndarray, str, bytes],
vad: bool = False,
*,
verbose: (bool, None) = False,
sample_rate: int = None,
vad_onnx: bool = False,
vad_threshold: float = 0.35,
q_levels: int = 20,
k_size: int = 5,
min_word_dur: Optional[float] = None,
word_level: bool = True,
nonspeech_error: float = 0.3,
use_word_position: bool = True
) -> "WhisperResult":
"""
Adjust timestamps base on detected speech gaps.
This is method combines :meth:`stable_whisper.result.WhisperResult.suppress_silence` with silence detection.
Parameters
----------
audio : str or numpy.ndarray or torch.Tensor or bytes
Path/URL to the audio file, the audio waveform, or bytes of audio file.
vad : bool, default False
Whether to use Silero VAD to generate timestamp suppression mask.
Silero VAD requires PyTorch 1.12.0+. Official repo, https://github.com/snakers4/silero-vad.
verbose : bool or None, default False
Whether to use progressbar to show progress.
If ``vad = True`` and ``False``, mute messages about hitting local caches.
Note that the message about first download cannot be muted.
sample_rate : int, default None, meaning ``whisper.audio.SAMPLE_RATE``, 16kHZ
The sample rate of ``audio``.
vad_onnx : bool, default False
Whether to use ONNX for Silero VAD.
vad_threshold : float, default 0.35
Threshold for detecting speech with Silero VAD. Low threshold reduces false positives for silence detection.
q_levels : int, default 20
Quantization levels for generating timestamp suppression mask; ignored if ``vad = true``.
Acts as a threshold to marking sound as silent.
Fewer levels will increase the threshold of volume at which to mark a sound as silent.
k_size : int, default 5
Kernel size for avg-pooling waveform to generate timestamp suppression mask; ignored if ``vad = true``.
Recommend 5 or 3; higher sizes will reduce detection of silence.
min_word_dur : float or None, default None meaning use ``stable_whisper.default.DEFAULT_VALUES``
Shortest duration each word is allowed to reach from adjustments.
word_level : bool, default False
Whether to settings to word level timestamps.
nonspeech_error : float, default 0.3
Relative error of non-speech sections that appear in between a word for adjustments.
use_word_position : bool, default True
Whether to use position of the word in its segment to determine whether to keep end or start timestamps if
adjustments are required. If it is the first word, keep end. Else if it is the last word, keep the start.
Returns
-------
stable_whisper.result.WhisperResult
The current instance after the changes.
Notes
-----
This operation is already performed by :func:`stable_whisper.whisper_word_level.transcribe_stable` /
:func:`stable_whisper.whisper_word_level.transcribe_minimal`/
:func:`stable_whisper.non_whisper.transcribe_any` / :func:`stable_whisper.alignment.align`
if ``suppress_silence = True``.
"""
min_word_dur = get_min_word_dur(min_word_dur)
if vad:
audio = audio_to_tensor_resample(audio, sample_rate, VAD_SAMPLE_RATES[0])
sample_rate = VAD_SAMPLE_RATES[0]
silent_timings = get_vad_silence_func(
onnx=vad_onnx,
verbose=verbose
)(audio, speech_threshold=vad_threshold, sr=sample_rate)
else:
silent_timings = audio2timings(audio, q_levels=q_levels, k_size=k_size, sr=sample_rate)
if silent_timings is None:
return self
self.suppress_silence(
*silent_timings,
min_word_dur=min_word_dur,
word_level=word_level,
nonspeech_error=nonspeech_error,
use_word_position=use_word_position,
verbose=verbose
)
self.update_nonspeech_sections(*silent_timings)
return self
def adjust_by_result(
self,
other_result: "WhisperResult",
min_word_dur: Optional[float] = None,
verbose: bool = False
):
"""
Minimize the duration of words using timestamps of another result.
Parameters
----------
other_result : "WhisperResult"
Timing data of the same words in a WhisperResult instance.
min_word_dur : float or None, default None meaning use ``stable_whisper.default.DEFAULT_VALUES``
Prevent changes to timestamps if the resultant word duration is less than ``min_word_dur``.
verbose : bool, default False
Whether to print out the timestamp changes.
"""
if not (self.has_words and other_result.has_words):
raise NotImplementedError('This operation can only be performed on results with word timestamps')
assert [w.word for w in self.all_words()] == [w.word for w in other_result.all_words()], \
'The words in [other_result] do not match the current words.'
min_word_dur = get_min_word_dur(min_word_dur)
for word, other_word in zip(self.all_words(), other_result.all_words()):
if word.end > other_word.start:
new_start = max(word.start, other_word.start)
new_end = min(word.end, other_word.end)
if new_end - new_start >= min_word_dur:
line = ''
if word.start != new_start:
if verbose:
line += f'[Start:{word.start:.3f}->{new_start:.3f}] '
word.start = new_start
if word.end != new_end:
if verbose:
line += f'[End:{word.end:.3f}->{new_end:.3f}] '
word.end = new_end
if line:
print(f'{line}"{word.word}"')
def reassign_ids(self, only_segments: bool = False, start: Optional[int] = None):
for i, s in enumerate(self.segments[start:], start or 0):
s.id = i
s.result = self
if not only_segments:
s.reassign_ids()
def remove_no_word_segments(self, ignore_ori=False, reassign_ids: bool = True):
for i in reversed(range(len(self.segments))):
if (ignore_ori or self.segments[i].ori_has_words) and not self.segments[i].has_words:
del self.segments[i]
if reassign_ids:
self.reassign_ids()
def get_locked_indices(self):
locked_indices = [i
for i, (left, right) in enumerate(zip(self.segments[1:], self.segments[:-1]))
if left.left_locked or right.right_locked]
return locked_indices
def get_gaps(self, as_ndarray=False):
s_ts = np.array([s.start for s in self.segments])
e_ts = np.array([s.end for s in self.segments])
gap = s_ts[1:] - e_ts[:-1]
return gap if as_ndarray else gap.tolist()
def get_gap_indices(self, min_gap: float = 0.1): # for merging
if len(self.segments) < 2:
return []
if min_gap is None:
min_gap = 0
indices = (self.get_gaps(True) <= min_gap).nonzero()[0].tolist()
return sorted(set(indices) - set(self.get_locked_indices()))
def get_punctuation_indices(self, punctuation: Union[List[str], List[Tuple[str, str]], str]): # for merging
if len(self.segments) < 2:
return []
if isinstance(punctuation, str):
punctuation = [punctuation]
indices = []
for p in punctuation:
if isinstance(p, str):
for i, s in enumerate(self.segments[:-1]):
if s.text.endswith(p):
indices.append(i)
elif i != 0 and s.text.startswith(p):
indices.append(i-1)
else:
ending, beginning = p
indices.extend([i for i, (s0, s1) in enumerate(zip(self.segments[:-1], self.segments[1:]))
if s0.text.endswith(ending) and s1.text.startswith(beginning)])
return sorted(set(indices) - set(self.get_locked_indices()))
def all_words(self):
return list(chain.from_iterable(s.words for s in self.segments))
def all_words_or_segments(self):
return self.all_words() if self.has_words else self.segments
def all_words_by_lock(self, only_text: bool = True, by_segment: bool = False, include_single: bool = False):
if by_segment:
return [
segment.words_by_lock(only_text=only_text, include_single=include_single)
for segment in self.segments
]
return _words_by_lock(self.all_words(), only_text=only_text, include_single=include_single)
def all_tokens(self):
return list(chain.from_iterable(s.tokens for s in self.all_words()))
def to_dict(self):
return dict(text=self.text,
segments=self.segments_to_dicts(),
language=self.language,
ori_dict=self.ori_dict,
regroup_history=self._regroup_history,
nonspeech_sections=self._nonspeech_sections)
def segments_to_dicts(self, reverse_text: Union[bool, tuple] = False):
return [s.to_dict(reverse_text=reverse_text) for s in self.segments]
def _split_segments(self, get_indices, args: list = None, *, lock: bool = False, newline: bool = False):
if args is None:
args = []
no_words = False
for i in reversed(range(0, len(self.segments))):
no_words = no_words or not self.segments[i].has_words
indices = sorted(set(get_indices(self.segments[i], *args)))
if not indices:
continue
if newline:
if indices[-1] == len(self.segments[i].words) - 1:
del indices[-1]
if not indices:
continue
for word_idx in indices:
if self.segments[i].words[word_idx].word.endswith('\n'):
continue
self.segments[i].words[word_idx].word += '\n'
if lock:
self.segments[i].words[word_idx].lock_right()
if word_idx + 1 < len(self.segments[i].words):
self.segments[i].words[word_idx+1].lock_left()
else:
new_segments = self.segments[i].split(indices)
if lock:
for s in new_segments:
if s == new_segments[0]:
s.lock_right()
elif s == new_segments[-1]:
s.lock_left()
else:
s.lock_both()
del self.segments[i]
for s in reversed(new_segments):
self.segments.insert(i, s)
if no_words:
warnings.warn('Found segment(s) without word timings. These segment(s) cannot be split.')
self.remove_no_word_segments()
def _merge_segments(self, indices: List[int],
*, max_words: int = None, max_chars: int = None, is_sum_max: bool = False, lock: bool = False):
if len(indices) == 0:
return
for i in reversed(indices):
seg = self.segments[i]
if (
(
max_words and
seg.has_words and
(
(seg.word_count() + self.segments[i + 1].word_count() > max_words)
if is_sum_max else
(seg.word_count() > max_words and self.segments[i + 1].word_count() > max_words)
)
) or
(
max_chars and
(
(seg.char_count() + self.segments[i + 1].char_count() > max_chars)
if is_sum_max else
(seg.char_count() > max_chars and self.segments[i + 1].char_count() > max_chars)
)
)
):
continue
self.add_segments(i, i + 1, inplace=True, lock=lock)
self.remove_no_word_segments()
def get_content_by_time(
self,
time: Union[float, Tuple[float, float], dict],
within: bool = False,
segment_level: bool = False
) -> Union[List[WordTiming], List[Segment]]:
"""
Return content in the ``time`` range.
Parameters
----------
time : float or tuple of (float, float) or dict
Range of time to find content. For tuple of two floats, first value is the start time and second value is
the end time. For a single float value, it is treated as both the start and end time.
within : bool, default False
Whether to only find content fully overlaps with ``time`` range.
segment_level : bool, default False
Whether to look only on the segment level and return instances of :class:`stable_whisper.result.Segment`
instead of :class:`stable_whisper.result.WordTiming`.
Returns
-------
list of stable_whisper.result.WordTiming or list of stable_whisper.result.Segment
List of contents in the ``time`` range. The contents are instances of
:class:`stable_whisper.result.Segment` if ``segment_level = True`` else
:class:`stable_whisper.result.WordTiming`.
"""
if not segment_level and not self.has_words:
raise ValueError('Missing word timestamps in result. Use ``segment_level=True`` instead.')
contents = self.segments if segment_level else self.all_words()
if isinstance(time, (float, int)):
time = [time, time]
elif isinstance(time, dict):
time = [time['start'], time['end']]
start, end = time
if within:
def is_in_range(c):
return start <= c.start and end >= c.end
else:
def is_in_range(c):
return start <= c.end and end >= c.start
return [c for c in contents if is_in_range(c)]
def split_by_gap(
self,
max_gap: float = 0.1,
lock: bool = False,
newline: bool = False
) -> "WhisperResult":
"""
Split (in-place) any segment where the gap between two of its words is greater than ``max_gap``.
Parameters
----------
max_gap : float, default 0.1
Maximum second(s) allowed between two words if the same segment.
lock : bool, default False
Whether to prevent future splits/merges from altering changes made by this method.
newline: bool, default False
Whether to insert line break at the split points instead of splitting into separate segments.
Returns
-------
stable_whisper.result.WhisperResult
The current instance after the changes.
"""
self._split_segments(lambda x: x.get_gap_indices(max_gap), lock=lock, newline=newline)
if self._regroup_history:
self._regroup_history += '_'
self._regroup_history += f'sg={max_gap}+{int(lock)}+{int(newline)}'
return self
def merge_by_gap(
self,
min_gap: float = 0.1,
max_words: int = None,
max_chars: int = None,
is_sum_max: bool = False,
lock: bool = False
) -> "WhisperResult":
"""
Merge (in-place) any pair of adjacent segments if the gap between them <= ``min_gap``.
Parameters
----------
min_gap : float, default 0.1
Minimum second(s) allow between two segment.
max_words : int, optional
Maximum number of words allowed in each segment.
max_chars : int, optional
Maximum number of characters allowed in each segment.
is_sum_max : bool, default False
Whether ``max_words`` and ``max_chars`` is applied to the merged segment instead of the individual segments
to be merged.
lock : bool, default False
Whether to prevent future splits/merges from altering changes made by this method.
Returns
-------
stable_whisper.result.WhisperResult
The current instance after the changes.
"""
indices = self.get_gap_indices(min_gap)
self._merge_segments(indices,
max_words=max_words, max_chars=max_chars, is_sum_max=is_sum_max, lock=lock)
if self._regroup_history:
self._regroup_history += '_'
self._regroup_history += f'mg={min_gap}+{max_words or ""}+{max_chars or ""}+{int(is_sum_max)}+{int(lock)}'
return self
def split_by_punctuation(
self,
punctuation: Union[List[str], List[Tuple[str, str]], str],
lock: bool = False,
newline: bool = False,
min_words: Optional[int] = None,
min_chars: Optional[int] = None,
min_dur: Optional[int] = None
) -> "WhisperResult":
"""
Split (in-place) segments at words that start/end with ``punctuation``.
Parameters
----------
punctuation : list of str of list of tuple of (str, str) or str
Punctuation(s) to split segments by.
lock : bool, default False
Whether to prevent future splits/merges from altering changes made by this method.
newline : bool, default False
Whether to insert line break at the split points instead of splitting into separate segments.
min_words : int, optional
Split segments with words >= ``min_words``.
min_chars : int, optional
Split segments with characters >= ``min_chars``.
min_dur : int, optional
split segments with duration (in seconds) >= ``min_dur``.
Returns
-------
stable_whisper.result.WhisperResult
The current instance after the changes.
"""
def _over_max(x: Segment):
return (
(min_words and len(x.words) >= min_words) or
(min_chars and x.char_count() >= min_chars) or
(min_dur and x.duration >= min_dur)
)
indices = set(s.id for s in self.segments if _over_max(s)) if any((min_words, min_chars, min_dur)) else None
def _get_indices(x: Segment):
return x.get_punctuation_indices(punctuation) if indices is None or x.id in indices else []
self._split_segments(_get_indices, lock=lock, newline=newline)
if self._regroup_history:
self._regroup_history += '_'
punct_str = '/'.join(p if isinstance(p, str) else '*'.join(p) for p in punctuation)
self._regroup_history += f'sp={punct_str}+{int(lock)}+{int(newline)}'
self._regroup_history += f'+{min_words or ""}+{min_chars or ""}+{min_dur or ""}'.rstrip('+')
return self
def merge_by_punctuation(
self,
punctuation: Union[List[str], List[Tuple[str, str]], str],
max_words: int = None,
max_chars: int = None,
is_sum_max: bool = False,
lock: bool = False
) -> "WhisperResult":
"""
Merge (in-place) any two segments that has specific punctuations inbetween.
Parameters
----------
punctuation : list of str of list of tuple of (str, str) or str
Punctuation(s) to merge segments by.
max_words : int, optional
Maximum number of words allowed in each segment.
max_chars : int, optional
Maximum number of characters allowed in each segment.
is_sum_max : bool, default False
Whether ``max_words`` and ``max_chars`` is applied to the merged segment instead of the individual segments
to be merged.
lock : bool, default False
Whether to prevent future splits/merges from altering changes made by this method.
Returns
-------
stable_whisper.result.WhisperResult
The current instance after the changes.
"""
indices = self.get_punctuation_indices(punctuation)
self._merge_segments(indices,
max_words=max_words, max_chars=max_chars, is_sum_max=is_sum_max, lock=lock)
if self._regroup_history:
self._regroup_history += '_'
punct_str = '/'.join(p if isinstance(p, str) else '*'.join(p) for p in punctuation)
self._regroup_history += f'mp={punct_str}+{max_words or ""}+{max_chars or ""}+{int(is_sum_max)}+{int(lock)}'
return self
def merge_all_segments(self) -> "WhisperResult":
"""
Merge all segments into one segment.
Returns
-------
stable_whisper.result.WhisperResult
The current instance after the changes.
"""
if not self.segments:
return self
if self.has_words:
new_seg = self.segments[0].copy(self.all_words(), keep_result=True, copy_words=False)
else:
new_seg = self.segments[0]
new_seg._default_text += ''.join(s.text for s in self.segments[1:])
if all(s.tokens is not None for s in self.segments):
new_seg._default_tokens += list(chain.from_iterable(s.tokens for s in self.segments[1:]))
new_seg.end = self.segments[-1].end
self.segments = [new_seg]
self.reassign_ids()
if self._regroup_history:
self._regroup_history += '_'
self._regroup_history += 'ms'
return self
def split_by_length(
self,
max_chars: int = None,
max_words: int = None,
even_split: bool = True,
force_len: bool = False,
lock: bool = False,
include_lock: bool = False,
newline: bool = False
) -> "WhisperResult":
"""
Split (in-place) any segment that exceeds ``max_chars`` or ``max_words`` into smaller segments.
Parameters
----------
max_chars : int, optional
Maximum number of characters allowed in each segment.
max_words : int, optional
Maximum number of words allowed in each segment.
even_split : bool, default True
Whether to evenly split a segment in length if it exceeds ``max_chars`` or ``max_words``.
force_len : bool, default False
Whether to force a constant length for each segment except the last segment.
This will ignore all previous non-locked segment boundaries.
lock : bool, default False
Whether to prevent future splits/merges from altering changes made by this method.
include_lock: bool, default False
Whether to include previous lock before splitting based on max_words, if ``even_split = False``.
Splitting will be done after the first non-locked word > ``max_chars`` / ``max_words``.
newline: bool, default False
Whether to insert line break at the split points instead of splitting into separate segments.
Returns
-------
stable_whisper.result.WhisperResult
The current instance after the changes.
Notes
-----
If ``even_split = True``, segments can still exceed ``max_chars`` and locked words will be ignored to avoid
uneven splitting.
"""
if force_len:
self.merge_all_segments()
self._split_segments(
lambda x: x.get_length_indices(
max_chars=max_chars,
max_words=max_words,
even_split=even_split,
include_lock=include_lock
),
lock=lock,
newline=newline
)
if self._regroup_history:
self._regroup_history += '_'
self._regroup_history += (f'sl={max_chars or ""}+{max_words or ""}+{int(even_split)}+{int(force_len)}'
f'+{int(lock)}+{int(include_lock)}+{int(newline)}')
return self
def split_by_duration(
self,
max_dur: float,
even_split: bool = True,
force_len: bool = False,
lock: bool = False,
include_lock: bool = False,
newline: bool = False
) -> "WhisperResult":
"""
Split (in-place) any segment that exceeds ``max_dur`` into smaller segments.
Parameters
----------
max_dur : float
Maximum duration (in seconds) per segment.
even_split : bool, default True
Whether to evenly split a segment in length if it exceeds ``max_dur``.
force_len : bool, default False
Whether to force a constant length for each segment except the last segment.
This will ignore all previous non-locked segment boundaries.
lock : bool, default False
Whether to prevent future splits/merges from altering changes made by this method.
include_lock: bool, default False
Whether to include previous lock before splitting based on max_words, if ``even_split = False``.
Splitting will be done after the first non-locked word > ``max_dur``.
newline: bool, default False
Whether to insert line break at the split points instead of splitting into separate segments.
Returns
-------
stable_whisper.result.WhisperResult
The current instance after the changes.
Notes
-----
If ``even_split = True``, segments can still exceed ``max_dur`` and locked words will be ignored to avoid
uneven splitting.
"""
if force_len:
self.merge_all_segments()
self._split_segments(
lambda x: x.get_duration_indices(
max_dur=max_dur,
even_split=even_split,
include_lock=include_lock
),
lock=lock,
newline=newline
)
if self._regroup_history:
self._regroup_history += '_'
self._regroup_history += (f'sd={max_dur}+{int(even_split)}+{int(force_len)}'
f'+{int(lock)}+{int(include_lock)}+{int(newline)}')
return self
def clamp_max(
self,
medium_factor: float = 2.5,
max_dur: float = None,
clip_start: Optional[bool] = None,
verbose: bool = False
) -> "WhisperResult":
"""
Clamp all word durations above certain value.
This is most effective when applied before and after other regroup operations.
Parameters
----------
medium_factor : float, default 2.5
Clamp durations above (``medium_factor`` * medium duration) per segment.
If ``medium_factor = None/0`` or segment has less than 3 words, it will be ignored and use only ``max_dur``.
max_dur : float, optional
Clamp durations above ``max_dur``.
clip_start : bool or None, default None
Whether to clamp the start of a word. If ``None``, clamp the start of first word and end of last word per
segment.
verbose : bool, default False
Whether to print out the timestamp changes.
Returns
-------
stable_whisper.result.WhisperResult
The current instance after the changes.
"""
if not (medium_factor or max_dur):
raise ValueError('At least one of following arguments requires non-zero value: medium_factor; max_dur')
if not self.has_words:
warnings.warn('Cannot clamp due to missing/no word-timestamps')
return self
for seg in self.segments:
curr_max_dur = None
if medium_factor and len(seg.words) > 2:
durations = np.array([word.duration for word in seg.words])
durations.sort()
curr_max_dur = medium_factor * durations[len(durations)//2 + 1]
if max_dur and (not curr_max_dur or curr_max_dur > max_dur):
curr_max_dur = max_dur
if not curr_max_dur:
continue
if clip_start is None:
seg.words[0].clamp_max(curr_max_dur, clip_start=True, verbose=verbose)
seg.words[-1].clamp_max(curr_max_dur, clip_start=False, verbose=verbose)
else:
for i, word in enumerate(seg.words):
word.clamp_max(curr_max_dur, clip_start=clip_start, verbose=verbose)
if self._regroup_history:
self._regroup_history += '_'
self._regroup_history += f'cm={medium_factor}+{max_dur or ""}+{clip_start or ""}+{int(verbose)}'
return self
def lock(
self,
startswith: Union[str, List[str]] = None,
endswith: Union[str, List[str]] = None,
right: bool = True,
left: bool = False,
case_sensitive: bool = False,
strip: bool = True
) -> "WhisperResult":
"""
Lock words/segments with matching prefix/suffix to prevent splitting/merging.
Parameters
----------
startswith: str or list of str
Prefixes to lock.
endswith: str or list of str
Suffixes to lock.
right : bool, default True
Whether prevent splits/merges with the next word/segment.
left : bool, default False
Whether prevent splits/merges with the previous word/segment.
case_sensitive : bool, default False
Whether to match the case of the prefixes/suffixes with the words/segments.
strip : bool, default True
Whether to ignore spaces before and after both words/segments and prefixes/suffixes.
Returns
-------
stable_whisper.result.WhisperResult
The current instance after the changes.
"""
assert startswith or endswith, 'Must specify [startswith] or/and [endswith].'
startswith = [] if startswith is None else ([startswith] if isinstance(startswith, str) else startswith)
endswith = [] if endswith is None else ([endswith] if isinstance(endswith, str) else endswith)
if not case_sensitive:
startswith = [t.lower() for t in startswith]
endswith = [t.lower() for t in endswith]
if strip:
startswith = [t.strip() for t in startswith]
endswith = [t.strip() for t in endswith]
for part in self.all_words_or_segments():
text = part.word if hasattr(part, 'word') else part.text
if not case_sensitive:
text = text.lower()
if strip:
text = text.strip()
for prefix in startswith:
if text.startswith(prefix):
if right:
part.lock_right()
if left:
part.lock_left()
for suffix in endswith:
if text.endswith(suffix):
if right:
part.lock_right()
if left:
part.lock_left()
if self._regroup_history:
self._regroup_history += '_'
startswith_str = (startswith if isinstance(startswith, str) else '/'.join(startswith)) if startswith else ""
endswith_str = (endswith if isinstance(endswith, str) else '/'.join(endswith)) if endswith else ""
self._regroup_history += (f'l={startswith_str}+{endswith_str}'
f'+{int(right)}+{int(left)}+{int(case_sensitive)}+{int(strip)}')
return self
def remove_word(
self,
word: Union[WordTiming, Tuple[int, int]],
reassign_ids: bool = True,
verbose: bool = True
) -> 'WhisperResult':
"""
Remove a word.
Parameters
----------
word : WordTiming or tuple of (int, int)
Instance of :class:`stable_whisper.result.WordTiming` or tuple of (segment index, word index).
reassign_ids : bool, default True
Whether to reassign segment and word ids (indices) after removing ``word``.
verbose : bool, default True
Whether to print detail of the removed word.
Returns
-------
stable_whisper.result.WhisperResult
The current instance after the changes.
"""
if isinstance(word, WordTiming):
if self[word.segment_id][word.id] is not word:
self.reassign_ids()
if self[word.segment_id][word.id] is not word:
raise ValueError('word not in result')
seg_id, word_id = word.segment_id, word.id
else:
seg_id, word_id = word
if verbose:
print(f'Removed: {self[seg_id][word_id].to_dict()}')
del self.segments[seg_id].words[word_id]
if not reassign_ids:
return self
if self[seg_id].has_words:
self[seg_id].reassign_ids()
else:
self.remove_no_word_segments()
return self
def remove_segment(
self,
segment: Union[Segment, int],
reassign_ids: bool = True,
verbose: bool = True
) -> 'WhisperResult':
"""
Remove a segment.
Parameters
----------
segment : Segment or int
Instance :class:`stable_whisper.result.Segment` or segment index.
reassign_ids : bool, default True
Whether to reassign segment IDs (indices) after removing ``segment``.
verbose : bool, default True
Whether to print detail of the removed word.
Returns
-------
stable_whisper.result.WhisperResult
The current instance after the changes.
"""
if isinstance(segment, Segment):
if self[segment.id] is not segment:
self.reassign_ids()
if self[segment.id] is not segment:
raise ValueError('segment not in result')
segment = segment.id
if verbose:
print(f'Removed: [id:{self[segment].id}] {self[segment].to_display_str(True)}')
del self.segments[segment]
if not reassign_ids:
return self
self.reassign_ids(True, start=segment)
return self
def remove_repetition(
self,
max_words: int = 1,
case_sensitive: bool = False,
strip: bool = True,
ignore_punctuations: str = "\"',.?!",
extend_duration: bool = True,
verbose: bool = True
) -> 'WhisperResult':
"""
Remove words that repeat consecutively.
Parameters
----------
max_words : int
Maximum number of words to look for consecutively.
case_sensitive : bool, default False
Whether the case of words need to match to be considered as repetition.
strip : bool, default True
Whether to ignore spaces before and after each word.
ignore_punctuations : bool, default '"',.?!'
Ending punctuations to ignore.
extend_duration: bool, default True
Whether to extend the duration of the previous word to cover the duration of the repetition.
verbose: bool, default True
Whether to print detail of the removed repetitions.
Returns
-------
stable_whisper.result.WhisperResult
The current instance after the changes.
"""
if not self.has_words:
return self
for count in range(1, max_words + 1):
all_words = self.all_words()
if len(all_words) < 2:
return self
all_words_str = [w.word for w in all_words]
if strip:
all_words_str = [w.strip() for w in all_words_str]
if ignore_punctuations:
ptn = f'[{ignore_punctuations}]+$'
all_words_str = [re.sub(ptn, '', w) for w in all_words_str]
if not case_sensitive:
all_words_str = [w.lower() for w in all_words_str]
next_i = None
changes = []
for i in reversed(range(count*2, len(all_words_str)+1)):
if next_i is not None:
if next_i != i:
continue
else:
next_i = None
s = i - count
if all_words_str[s - count:s] != all_words_str[s:i]:
continue
next_i = s
if extend_duration:
all_words[s-1].end = all_words[i-1].end
temp_changes = []
for j in reversed(range(s, i)):
if verbose:
temp_changes.append(f'- {all_words[j].to_dict()}')
self.remove_word(all_words[j], False, verbose=False)
if temp_changes:
changes.append(
f'Remove: [{format_timestamp(all_words[s].start)} -> {format_timestamp(all_words[i-1].end)}] '
+ ''.join(_w.word for _w in all_words[s:i]) + '\n'
+ '\n'.join(reversed(temp_changes)) + '\n'
)
for i0, i1 in zip(range(s - count, s), range(s, i)):
if len(all_words[i0].word) < len(all_words[i1].word):
all_words[i1].start = all_words[i0].start
all_words[i1].end = all_words[i0].end
_sid, _wid = all_words[i0].segment_id, all_words[i0].id
self.segments[_sid].words[_wid] = all_words[i1]
if changes:
print('\n'.join(reversed(changes)))
self.remove_no_word_segments(reassign_ids=False)
self.reassign_ids()
return self
def remove_words_by_str(
self,
words: Union[str, List[str], None],
case_sensitive: bool = False,
strip: bool = True,
ignore_punctuations: str = "\"',.?!",
min_prob: float = None,
filters: Callable = None,
verbose: bool = True
) -> 'WhisperResult':
"""
Remove words that match ``words``.
Parameters
----------
words : str or list of str or None
A word or list of words to remove.``None`` for all words to be passed into ``filters``.
case_sensitive : bool, default False
Whether the case of words need to match to be considered as repetition.
strip : bool, default True
Whether to ignore spaces before and after each word.
ignore_punctuations : bool, default '"',.?!'
Ending punctuations to ignore.
min_prob : float, optional
Acts as the first filter the for the words that match ``words``. Words with probability < ``min_prob`` will
be removed if ``filters`` is ``None``, else pass the words into ``filters``. Words without probability will
be treated as having probability < ``min_prob``.
filters : Callable, optional
A function that takes an instance of :class:`stable_whisper.result.WordTiming` as its only argument.
This function is custom filter for the words that match ``words`` and were not caught by ``min_prob``.
verbose:
Whether to print detail of the removed words.
Returns
-------
stable_whisper.result.WhisperResult
The current instance after the changes.
"""
if not self.has_words:
return self
if isinstance(words, str):
words = [words]
all_words = self.all_words()
all_words_str = [w.word for w in all_words]
if strip:
all_words_str = [w.strip() for w in all_words_str]
words = [w.strip() for w in words]
if ignore_punctuations:
ptn = f'[{ignore_punctuations}]+$'
all_words_str = [re.sub(ptn, '', w) for w in all_words_str]
words = [re.sub(ptn, '', w) for w in words]
if not case_sensitive:
all_words_str = [w.lower() for w in all_words_str]
words = [w.lower() for w in words]
changes = []
for i, w in reversed(list(enumerate(all_words_str))):
if not (words is None or any(w == _w for _w in words)):
continue
if (
(min_prob is None or all_words[i].probability is None or min_prob > all_words[i].probability) and
(filters is None or filters(all_words[i]))
):
if verbose:
changes.append(f'Removed: {all_words[i].to_dict()}')
self.remove_word(all_words[i], False, verbose=False)
if changes:
print('\n'.join(reversed(changes)))
self.remove_no_word_segments()
return self
def fill_in_gaps(
self,
other_result: Union['WhisperResult', str],
min_gap: float = 0.1,
case_sensitive: bool = False,
strip: bool = True,
ignore_punctuations: str = "\"',.?!",
verbose: bool = True
) -> 'WhisperResult':
"""
Fill in segment gaps larger than ``min_gap`` with content from ``other_result`` at the times of gaps.
Parameters
----------
other_result : WhisperResult or str
Another transcription result as an instance of :class:`stable_whisper.result.WhisperResult` or path to the
JSON of the result.
min_gap : float, default 0.1
The minimum seconds of a gap between segments that must be exceeded to be filled in.
case_sensitive : bool, default False
Whether to consider the case of the first and last word of the gap to determine overlapping words to remove
before filling in.
strip : bool, default True
Whether to ignore spaces before and after the first and last word of the gap to determine overlapping words
to remove before filling in.
ignore_punctuations : bool, default '"',.?!'
Ending punctuations to ignore in the first and last word of the gap to determine overlapping words to
remove before filling in.
verbose:
Whether to print detail of the filled content.
Returns
-------
stable_whisper.result.WhisperResult
The current instance after the changes.
"""
if len(self.segments) < 2:
return self
if isinstance(other_result, str):
other_result = WhisperResult(other_result)
if strip:
def strip_space(w):
return w.strip()
else:
def strip_space(w):
return w
if ignore_punctuations:
ptn = f'[{ignore_punctuations}]+$'
def strip_punctuations(w):
return re.sub(ptn, '', strip_space(w))
else:
strip_punctuations = strip_space
if case_sensitive:
strip = strip_punctuations
else:
def strip(w):
return strip_punctuations(w).lower()
seg_pairs = list(enumerate(zip(self.segments[:-1], self.segments[1:])))
seg_pairs.insert(0, (-1, (None, self.segments[0])))
seg_pairs.append((seg_pairs[-1][0]+1, (self.segments[-1], None)))
changes = []
for i, (seg0, seg1) in reversed(seg_pairs):
first_word = None if seg0 is None else seg0.words[-1]
last_word = None if seg1 is None else seg1.words[0]
start = (other_result[0].start if first_word is None else first_word.end)
end = other_result[-1].end if last_word is None else last_word.start
if end - start <= min_gap:
continue
gap_words = other_result.get_content_by_time((start, end))
if first_word is not None and gap_words and strip(first_word.word) == strip(gap_words[0].word):
first_word.end = gap_words[0].end
gap_words = gap_words[1:]
if last_word is not None and gap_words and strip(last_word.word) == strip(gap_words[-1].word):
last_word.start = gap_words[-1].start
gap_words = gap_words[:-1]
if not gap_words:
continue
if last_word is not None and last_word.start < gap_words[-1].end:
last_word.start = gap_words[-1].end
new_segments = [other_result[gap_words[0].segment_id].copy([])]
for j, new_word in enumerate(gap_words):
new_word_copy = new_word.copy(copy_tokens=True)
if j == 0 and first_word is not None and first_word.end > gap_words[0].start:
new_word_copy.start = first_word.end
if new_segments[-1].id != new_word.segment_id:
new_segments.append(other_result[new_word.segment_id].copy([]))
new_segments[-1].words.append(new_word_copy)
if verbose:
changes.append('\n'.join('Added: ' + s.to_display_str(True) for s in new_segments))
self.segments = self.segments[:i+1] + new_segments + self.segments[i+1:]
if changes:
print('\n'.join(reversed(changes)))
self.reassign_ids()
return self
def regroup(
self,
regroup_algo: Union[str, bool] = None,
verbose: bool = False,
only_show: bool = False
) -> "WhisperResult":
"""
Regroup (in-place) words into segments.
Parameters
----------
regroup_algo: str or bool, default 'da'
String representation of a custom regrouping algorithm or ``True`` use to the default algorithm 'da'.
verbose : bool, default False
Whether to show all the methods and arguments parsed from ``regroup_algo``.
only_show : bool, default False
Whether to show the all methods and arguments parsed from ``regroup_algo`` without running the methods
Returns
-------
stable_whisper.result.WhisperResult
The current instance after the changes.
Notes
-----
Syntax for string representation of custom regrouping algorithm.
Method keys:
sg: split_by_gap
sp: split_by_punctuation
sl: split_by_length
sd: split_by_duration
mg: merge_by_gap
mp: merge_by_punctuation
ms: merge_all_segment
cm: clamp_max
l: lock
us: unlock_all_segments
da: default algorithm (cm_sp=,* /,_sg=.5_mg=.3+3_sp=.* /。/?/?)
rw: remove_word
rs: remove_segment
rp: remove_repetition
rws: remove_words_by_str
fg: fill_in_gaps
Metacharacters:
= separates a method key and its arguments (not used if no argument)
_ separates method keys (after arguments if there are any)
+ separates arguments for a method key
/ separates an argument into list of strings
* separates an item in list of strings into a nested list of strings
Notes:
-arguments are parsed positionally
-if no argument is provided, the default ones will be used
-use 1 or 0 to represent True or False
Example 1:
merge_by_gap(.2, 10, lock=True)
mg=.2+10+++1
Note: [lock] is the 5th argument hence the 2 missing arguments inbetween the three + before 1
Example 2:
split_by_punctuation([('.', ' '), '。', '?', '?'], True)
sp=.* /。/?/?+1
Example 3:
merge_all_segments().split_by_gap(.5).merge_by_gap(.15, 3)
ms_sg=.5_mg=.15+3
"""
if regroup_algo is False:
return self
if regroup_algo is None or regroup_algo is True:
regroup_algo = 'da'
for method, kwargs, msg in self.parse_regroup_algo(regroup_algo, include_str=verbose or only_show):
if msg:
print(msg)
if not only_show:
method(**kwargs)
return self
def parse_regroup_algo(self, regroup_algo: str, include_str: bool = True) -> List[Tuple[Callable, dict, str]]:
methods = dict(
sg=self.split_by_gap,
sp=self.split_by_punctuation,
sl=self.split_by_length,
sd=self.split_by_duration,
mg=self.merge_by_gap,
mp=self.merge_by_punctuation,
ms=self.merge_all_segments,
cm=self.clamp_max,
us=self.unlock_all_segments,
l=self.lock,
rw=self.remove_word,
rs=self.remove_segment,
rp=self.remove_repetition,
rws=self.remove_words_by_str,
fg=self.fill_in_gaps,
)
if not regroup_algo:
return []
calls = regroup_algo.split('_')
if 'da' in calls:
default_calls = 'cm_sp=,* /,_sg=.5_mg=.3+3_sp=.* /。/?/?'.split('_')
calls = chain.from_iterable(default_calls if method == 'da' else [method] for method in calls)
operations = []
for method in calls:
method, args = method.split('=', maxsplit=1) if '=' in method else (method, '')
if method not in methods:
raise NotImplementedError(f'{method} is not one of the available methods: {tuple(methods.keys())}')
args = [] if len(args) == 0 else list(map(str_to_valid_type, args.split('+')))
kwargs = {k: v for k, v in zip(methods[method].__code__.co_varnames[1:], args) if v is not None}
if include_str:
kwargs_str = ', '.join(f'{k}="{v}"' if isinstance(v, str) else f'{k}={v}' for k, v in kwargs.items())
op_str = f'{methods[method].__name__}({kwargs_str})'
else:
op_str = None
operations.append((methods[method], kwargs, op_str))
return operations
def find(self, pattern: str, word_level=True, flags=None) -> "WhisperResultMatches":
"""
Find segments/words and timestamps with regular expression.
Parameters
----------
pattern : str
RegEx pattern to search for.
word_level : bool, default True
Whether to search at word-level.
flags : optional
RegEx flags.
Returns
-------
stable_whisper.result.WhisperResultMatches
An instance of :class:`stable_whisper.result.WhisperResultMatches` with word/segment that match ``pattern``.
"""
return WhisperResultMatches(self).find(pattern, word_level=word_level, flags=flags)
def text(self):
return ''.join(s.text for s in self.segments)
def regroup_history(self):
# same syntax as ``regroup_algo`` for :meth:``result.WhisperResult.regroup`
return self._regroup_history
def nonspeech_sections(self):
return self._nonspeech_sections
def show_regroup_history(self):
"""
Print details of all regrouping operations that been performed on data.
"""
if not self._regroup_history:
print('Result has no history.')
for *_, msg in self.parse_regroup_algo(self._regroup_history):
print(f'.{msg}')
def __len__(self):
return len(self.segments)
def unlock_all_segments(self):
for s in self.segments:
s.unlock_all_words()
return self
def reset(self):
"""
Restore all values to that at initialization.
"""
self.language = self.ori_dict.get('language')
self._regroup_history = ''
segments = self.ori_dict.get('segments')
self.segments = [Segment(**s, ignore_unused_args=True) for s in segments] if segments else []
if self._forced_order:
self.force_order()
self.remove_no_word_segments(any(seg.has_words for seg in self.segments))
def has_words(self):
return bool(self.segments) and all(seg.has_words for seg in self.segments)
to_srt_vtt = result_to_srt_vtt
to_ass = result_to_ass
to_tsv = result_to_tsv
to_txt = result_to_txt
save_as_json = save_as_json
def str_to_valid_type(val: str):
if len(val) == 0:
return None
if '/' in val:
return [a.split('*') if '*' in a else a for a in val.split('/')]
try:
val = float(val) if '.' in val else int(val)
except ValueError:
pass
finally:
return val
def get_func_parameters(func):
return inspect.signature(func).parameters.keys()
def isolate_useful_options(options: dict, method, pop: bool = False) -> dict:
_get = dict.pop if pop else dict.get
return {k: _get(options, k) for k in get_func_parameters(method) if k in options}
SUPPORTED_DENOISERS = {
'demucs': {'run': demucs_audio, 'load': load_demucs_model, 'access': is_demucs_available},
'dfnet': {'run': dfnet_audio, 'load': load_dfnet_model, 'access': is_dfnet_available}
}
import os
def get_prepend_punctuations(default: Optional[str] = None) -> str:
return get_val('prepend_punctuations', default)
def get_append_punctuations(default: Optional[str] = None) -> str:
return get_val('append_punctuations', default)
def get_min_word_dur(default: Optional[float] = None) -> float:
return get_val('min_word_dur', default)
def is_allow_overwrite(filepath: str, default: (bool, None) = None) -> bool:
if default is not None:
return default
if not os.path.isfile(filepath) or permissions.get('overwrite'):
return True
resp = input(f'"{filepath}" already exist, overwrite (y/n)? ').lower()
if resp in ('y', 'n'):
return resp == 'y'
print(f'Expected "y" or "n", but got {resp}.')
return is_allow_overwrite(filepath)
def set_global_overwrite_permission(overwrite: bool):
permissions['overwrite'] = overwrite
def load_model(name: str, device: Optional[Union[str, torch.device]] = None,
download_root: str = None, in_memory: bool = False,
cpu_preload: bool = True, dq: bool = False) -> "Whisper":
"""
Load an instance if :class:`whisper.model.Whisper`.
Parameters
----------
name : {'tiny', 'tiny.en', 'base', 'base.en', 'small', 'small.en', 'medium', 'medium.en', 'large-v1',
'large-v2', 'large-v3', or 'large'}
One of the official model names listed by :func:`whisper.available_models`, or
path to a model checkpoint containing the model dimensions and the model state_dict.
device : str or torch.device, optional
PyTorch device to put the model into.
download_root : str, optional
Path to download the model files; by default, it uses "~/.cache/whisper".
in_memory : bool, default False
Whether to preload the model weights into host memory.
cpu_preload : bool, default True
Load model into CPU memory first then move model to specified device
to reduce GPU memory usage when loading model
dq : bool, default False
Whether to apply Dynamic Quantization to model to reduced memory usage and increase inference speed
but at the cost of a slight decrease in accuracy. Only for CPU.
Returns
-------
model : "Whisper"
The Whisper ASR model instance.
Notes
-----
The overhead from ``dq = True`` might make inference slower for models smaller than 'large'.
"""
if device is None or dq:
device = "cuda" if torch.cuda.is_available() and not dq else "cpu"
if cpu_preload:
model = whisper.load_model(name, device='cpu', download_root=download_root, in_memory=in_memory)
cuda_index = None
if isinstance(device, str) and device.startswith('cuda'):
try:
cuda_index = [] if device == 'cuda' else [int(device.split(':')[-1])]
except ValueError:
pass
model = model.to(device=device) if cuda_index is None else model.cuda(*cuda_index)
else:
model = whisper.load_model(name, device=device, download_root=download_root, in_memory=in_memory)
modify_model(model)
if dq:
from ..quantization import ptdq_linear
ptdq_linear(model)
return model
def load_faster_whisper(model_size_or_path: str, **model_init_options):
"""
Load an instance of :class:`faster_whisper.WhisperModel`.
Parameters
----------
model_size_or_path : {'tiny', 'tiny.en', 'base', 'base.en', 'small', 'small.en', 'medium', 'medium.en', 'large-v1',
'large-v2', 'large-v3', or 'large'}
Size of the model.
model_init_options
Additional options to use for initialization of :class:`faster_whisper.WhisperModel`.
Returns
-------
faster_whisper.WhisperModel
A modified instance with :func:`stable_whisper.whisper_word_level.faster_whisper.faster_transcribe`
assigned to :meth:`faster_whisper.WhisperModel.transcribe_stable`.
"""
from faster_whisper import WhisperModel
faster_model = WhisperModel(model_size_or_path, **model_init_options)
faster_model.model_size_or_path = model_size_or_path
faster_model.transcribe_stable = MethodType(faster_transcribe, faster_model)
from ..alignment import align
faster_model.align = MethodType(align, faster_model)
return faster_model
def load_hf_whisper(model_name: str, device: str = None, flash: bool = False, pipeline=None):
return WhisperHF(model_name, device, flash=flash, pipeline=pipeline)
def cli():
supported_denoisers = tuple(SUPPORTED_DENOISERS.keys())
str2val = {"true": True, "false": False, "1": True, "0": False}
def str2bool(string: str) -> bool:
string = string.lower()
if string in str2val:
return str2val[string]
raise ValueError(f"Expected one of {set(str2val.keys())}, got {string}")
def valid_model_name(name):
if name in available_models() or os.path.exists(name):
return name
raise ValueError(
f"model should be one of {available_models()} or path to a model checkpoint"
)
def use_deprecated_args(
key: str, old_key: str, pop: bool = False, expected_default=None, new_default=None, eg: str = None,
):
new_val = args.pop(key) if pop else args[key]
old_val = args.pop(old_key) if pop else args[old_key]
if old_val != expected_default:
eg_str = eg if eg is None else f' (e.g. {eg})'
warnings.warn(f'{old_key} is deprecated and will be removed in future versions. '
f'Use {key}{eg_str}.', stacklevel=2)
if new_val == expected_default:
new_val = old_val
elif new_val == expected_default:
new_val = new_default
return new_val
def update_options_with_args(arg_key: Union[str, list], options: Optional[dict] = None, pop: bool = False):
extra_options = arg_key if isinstance(arg_key, list) else (args.pop(arg_key) if pop else args.get(arg_key))
if not extra_options:
return
extra_options = [kv.split('=', maxsplit=1) for kv in extra_options]
missing_val = [kv[0] for kv in extra_options if len(kv) == 1]
if missing_val:
raise ValueError(f'Following expected values for the following custom options: {missing_val}')
extra_options = dict((k, str_to_valid_type(v)) for k, v in extra_options)
if options is None:
return extra_options
options.update(extra_options)
def url_to_path(url: str):
if '://' in url:
from urllib.parse import urlparse
return urlparse(url).path.strip('/')
return url
OUTPUT_FORMATS_METHODS = {
"srt": "to_srt_vtt",
"ass": "to_ass",
"json": "save_as_json",
"vtt": "to_srt_vtt",
"tsv": "to_tsv",
"txt": "to_txt",
}
OUTPUT_FORMATS = set(OUTPUT_FORMATS_METHODS.keys())
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("inputs", nargs="+", type=str,
help="audio/video filepath/URL(s) to transcribe "
"or json file(s) to process into [output_format]")
parser.add_argument("--output", "-o", action="extend", nargs="+", type=str,
help="output filepaths(s);"
"if not specified, auto-named output file(s) will be saved to "
"[output_dir] or current dir if not specified.")
parser.add_argument("--model", '-m', default="base", type=valid_model_name,
help="name of the Whisper model to use")
parser.add_argument("--model_dir", type=str, default=None,
help="the path to save model files; uses ~/.cache/whisper by default")
parser.add_argument("--device", default="cuda" if torch.cuda.is_available() else "cpu",
help="device to use for PyTorch inference")
parser.add_argument("--cpu_preload", type=str2bool, default=True,
help="load model into CPU memory first then move model to specified device; "
"this reduces GPU memory usage when loading model.")
parser.add_argument("--output_dir", "-d", type=str,
help="directory to save the outputs;"
"if a path in [output] does not have parent, that output will be save to this directory")
parser.add_argument("--output_format", "-f", type=str,
help="format of the output file(s); "
f"Supported Formats: {OUTPUT_FORMATS}; "
"use ',' to separate multiple formats")
parser.add_argument("--verbose", '-v', type=int, default=1, choices=(0, 1, 2),
help="whether to display the text being decoded to the console; "
"if 2, display all the details; "
"if 1, display progressbar; "
"if 0, display nothing")
parser.add_argument("--dynamic_quantization", "-dq", action='store_true',
help="whether to apply Dynamic Quantization to model "
"to reduced memory usage (~half less) and increase inference speed "
"at cost of slight decrease in accuracy; Only for CPU; "
"NOTE: overhead might make inference slower for models smaller than 'large'")
parser.add_argument("--task", type=str, default="transcribe",
choices=["transcribe", "translate"],
help="whether to perform X->X speech recognition ('transcribe') "
"or X->English translation ('translate')")
parser.add_argument("--language", '-l', type=str, default=None,
choices=sorted(LANGUAGES.keys()) + sorted([k.title() for k in TO_LANGUAGE_CODE.keys()]),
help="language spoken in the audio, specify None to perform language detection")
parser.add_argument("--prepend_punctuations", '-pp', type=str, default=get_prepend_punctuations(),
help="Punctuations to prepend to next word")
parser.add_argument("--append_punctuations", '-ap', type=str, default=get_append_punctuations(),
help="Punctuations to append to previous word")
parser.add_argument("--gap_padding", type=str, default=" ...",
help="padding prepend to each segments for word timing alignment;"
"used to reduce the probability of model predicting timestamps "
"earlier than the first utterance")
parser.add_argument("--word_timestamps", type=str2bool, default=True,
help="extract word-level timestamps using the cross-attention pattern and dynamic time warping,"
"and include the timestamps for each word in each segment;"
"disabling this will prevent segments from splitting/merging properly.")
parser.add_argument("--regroup", type=str, default="True",
help="whether to regroup all words into segments with more natural boundaries;"
"specify string for customizing the regrouping algorithm"
"ignored if [word_timestamps]=False.")
parser.add_argument('--ts_num', type=int, default=0,
help="number of extra inferences to perform to find the mean timestamps")
parser.add_argument('--ts_noise', type=float, default=0.1,
help="percentage of noise to add to audio_features to perform inferences for [ts_num]")
parser.add_argument('--suppress_silence', type=str2bool, default=True,
help="whether to suppress timestamp where audio is silent at segment-level"
"and word-level if [suppress_word_ts]=True")
parser.add_argument('--suppress_word_ts', type=str2bool, default=True,
help="whether to suppress timestamps where audio is silent at word-level; "
"ignored if [suppress_silence]=False")
parser.add_argument('--suppress_ts_tokens', type=str2bool, default=False,
help="whether to use silence mask to suppress silent timestamp tokens during inference; "
"increases word accuracy in some cases, but tends reduce 'verbatimness' of the transcript"
"ignored if [suppress_silence]=False")
parser.add_argument("--q_levels", type=int, default=20,
help="quantization levels for generating timestamp suppression mask; "
"acts as a threshold to marking sound as silent;"
"fewer levels will increase the threshold of volume at which to mark a sound as silent")
parser.add_argument("--k_size", type=int, default=5,
help="Kernel size for average pooling waveform to generate suppression mask; "
"recommend 5 or 3; higher sizes will reduce detection of silence")
parser.add_argument('--time_scale', type=float,
help="factor for scaling audio duration for inference;"
"greater than 1.0 'slows down' the audio; "
"less than 1.0 'speeds up' the audio; "
"1.0 is no scaling")
parser.add_argument('--vad', type=str2bool, default=False,
help='whether to use Silero VAD to generate timestamp suppression mask; '
'Silero VAD requires PyTorch 1.12.0+;'
'Official repo: https://github.com/snakers4/silero-vad')
parser.add_argument('--vad_threshold', type=float, default=0.35,
help='threshold for detecting speech with Silero VAD. (Default: 0.35); '
'low threshold reduces false positives for silence detection')
parser.add_argument('--vad_onnx', type=str2bool, default=False,
help='whether to use ONNX for Silero VAD')
parser.add_argument('--min_word_dur', type=float, default=get_min_word_dur(),
help="shortest duration each word is allowed to reach for silence suppression")
parser.add_argument('--nonspeech_error', type=float, default=0.1,
help="relative error of non-speech sections that appear in between a word for "
"silence suppression.")
parser.add_argument('--max_chars', type=int,
help="maximum number of character allowed in each segment")
parser.add_argument('--max_words', type=int,
help="maximum number of words allowed in each segment")
parser.add_argument('--demucs', type=str2bool,
help='whether to reprocess the audio track with Demucs to isolate vocals/remove noise; '
'Demucs official repo: https://github.com/facebookresearch/demucs;'
'DEPRECATED and replace with --denoiser "demucs"')
parser.add_argument('--demucs_output', action="extend", nargs="+", type=str,
help='path(s) to save the vocals isolated by Demucs as WAV file(s); '
'ignored if --demucs False; DEPRECATED and replace with --denoiser_output')
parser.add_argument('--denoiser', type=str, choices=supported_denoisers,
help='name of denoiser to reprocess the audio track to isolate vocals/remove noise')
parser.add_argument('--denoiser_output', action="extend", nargs="+", type=str,
help='path(s) to save the denoised audio as WAV file(s); '
'ignored if --denoiser is unspecified')
parser.add_argument('--only_voice_freq', '-ovf', action='store_true',
help='whether to only use sound between 200 - 5000 Hz, where majority of human speech are.')
parser.add_argument('--strip', type=str2bool, default=True,
help="whether to remove spaces before and after text on each segment for output")
parser.add_argument('--tag', type=str, action="extend", nargs="+",
help="a pair tags used to change the properties a word at its predicted time"
"SRT Default: '<font color=\"#00ff00\">', '</font>'"
"VTT Default: '<u>', '</u>'"
"ASS Default: '{\\1c&HFF00&}', '{\\r}'")
parser.add_argument('--segment_level', type=str2bool, default=True,
help="whether to use segment-level timestamps in output")
parser.add_argument('--word_level', type=str2bool, default=True,
help="whether to use word-level timestamps in output")
parser.add_argument('--reverse_text', type=str2bool, default=False,
help="whether to reverse the order of words for each segment of text output")
# ass output
parser.add_argument('--font', type=str, default='Arial',
help="word font for ASS output(s)")
parser.add_argument('--font_size', type=int, default=48,
help="word font size for ASS output(s)")
parser.add_argument('--karaoke', type=str2bool, default=False,
help="whether to use progressive filling highlights for karaoke effect (only for ASS outputs)")
parser.add_argument("--temperature", type=float, default=0,
help="temperature to use for sampling")
parser.add_argument("--best_of", type=optional_int,
help="number of candidates when sampling with non-zero temperature")
parser.add_argument("--beam_size", type=optional_int,
help="number of beams in beam search, only applicable when temperature is zero")
parser.add_argument("--patience", type=float, default=None,
help="optional patience value to use in beam decoding, "
"as in https://arxiv.org/abs/2204.05424, "
"the default (1.0) is equivalent to conventional beam search")
parser.add_argument("--length_penalty", type=float, default=None,
help="optional token length penalty coefficient (alpha) "
"as in https://arxiv.org/abs/1609.08144, uses simple length normalization by default")
parser.add_argument("--suppress_tokens", type=str, default="-1",
help="comma-separated list of token ids to suppress during sampling; "
"'-1' will suppress most special characters except common punctuations")
parser.add_argument("--initial_prompt", type=str, default=None,
help="optional text to provide as a prompt for the first window.")
parser.add_argument("--condition_on_previous_text", type=str2bool, default=True,
help="if True, provide the previous output of the model as a prompt for the next window; "
"disabling may make the text inconsistent across windows, "
"but the model becomes less prone to getting stuck in a failure loop")
parser.add_argument("--fp16", type=str2bool, default=True,
help="whether to perform inference in fp16; True by default")
parser.add_argument("--temperature_increment_on_fallback", type=optional_float, default=0.2,
help="temperature to increase when falling back when the decoding fails to meet either of "
"the thresholds below")
parser.add_argument("--compression_ratio_threshold", type=optional_float, default=2.4,
help="if the gzip compression ratio is higher than this value, treat the decoding as failed")
parser.add_argument("--logprob_threshold", type=optional_float, default=-1.0,
help="if the average log probability is lower than this value, treat the decoding as failed")
parser.add_argument("--no_speech_threshold", type=optional_float, default=0.6,
help="if the probability of the <|nospeech|> token is higher than this value AND the decoding "
"has failed due to `logprob_threshold`, consider the segment as silence")
parser.add_argument("--threads", type=optional_int, default=0,
help="number of threads used by torch for CPU inference; "
"supercedes MKL_NUM_THREADS/OMP_NUM_THREADS")
parser.add_argument('--mel_first', action='store_true',
help='process entire audio track into log-Mel spectrogram first instead in chunks; '
'DEPRECATED and replaced with --no_stream')
parser.add_argument('--no_stream', action='store_true',
help='whether to always load the entire audio track into memory')
parser.add_argument('--only_ffmpeg', action='store_true',
help='whether to use only FFmpeg (and not yt-dlp) for URls')
parser.add_argument('--overwrite', '-y', action='store_true',
help='overwrite all output files')
parser.add_argument('--debug', action='store_true',
help='print all input/output pair(s) and all arguments used for transcribing/translating')
parser.add_argument('--transcribe_method', '-tm', type=str, default='transcribe',
choices=('transcribe', 'transcribe_minimal'))
parser.add_argument('--align', '-a', action="extend", nargs='+', type=str,
help='path(s) to TXT file(s) or JSON previous result(s)')
parser.add_argument('--refine', '-r', action='store_true',
help='Refine timestamps to increase precision of timestamps')
parser.add_argument('--locate', '-lc', action="extend", nargs='+', type=str,
help='words to locate in the audio(s); skips transcription and output')
parser.add_argument('--refine_option', '-ro', action="extend", nargs='+', type=str,
help='Extra option(s) to use for refining timestamps; Replace True/False with 1/0; '
'E.g. --refine_option "steps=sese" --refine_options "rel_prob_decrease=0.05"')
parser.add_argument('--demucs_option', '-do', action="extend", nargs='+', type=str,
help='Extra option(s) to use for demucs; Replace True/False with 1/0; '
'E.g. --demucs_option "shifts=3" --demucs_options "overlap=0.5"; '
'DEPRECATED and replaced with --denoiser_option')
parser.add_argument('--denoiser_option', '-dno', action="extend", nargs='+', type=str,
help='Extra option(s) to use for denoiser; Replace True/False with 1/0; '
'E.g. --denoiser_option "shifts=3" --denoiser_option "overlap=0.5"')
parser.add_argument('--model_option', '-mo', action="extend", nargs='+', type=str,
help='Extra option(s) to use for loading model; Replace True/False with 1/0; '
'E.g. --model_option "download_root=./downloads"')
parser.add_argument('--transcribe_option', '-to', action="extend", nargs='+', type=str,
help='Extra option(s) to use for transcribing/alignment/locating; Replace True/False with 1/0; '
'E.g. --transcribe_option "ignore_compatibility=1"')
parser.add_argument('--save_option', '-so', action="extend", nargs='+', type=str,
help='Extra option(s) to use for text outputs; Replace True/False with 1/0; '
'E.g. --save_option "highlight_color=ffffff"')
parser.add_argument('--faster_whisper', '-fw', action='store_true',
help='whether to use faster-whisper (https://github.com/guillaumekln/faster-whisper); '
'note: some features may not be available')
parser.add_argument('--huggingface_whisper', '-hw', action='store_true',
help='whether to run Whisper on Hugging Face Transformers for more speed than faster-whisper'
' and even more speed with Flash Attention enabled on supported GPUs'
'(https://huggingface.co/openai/whisper-large-v3); '
'note: some features may not be available')
args = parser.parse_args().__dict__
debug = args.pop('debug')
if not args['language'] and (args['align'] or args['locate']):
raise ValueError('langauge is required for --align / --locate')
is_faster_whisper = args.pop('faster_whisper')
is_hf_whisper = args.pop('huggingface_whisper')
assert not (is_faster_whisper and is_hf_whisper), f'--huggingface_whisper cannot be used with --faster_whisper'
is_original_whisper = not (is_faster_whisper or is_hf_whisper)
model_name: str = args.pop("model")
model_dir: str = args.pop("model_dir")
inputs: List[Union[str, torch.Tensor]] = args.pop("inputs")
outputs: List[str] = args.pop("output")
output_dir: str = args.pop("output_dir")
output_format = args.pop("output_format")
overwrite: bool = args.pop("overwrite")
no_stream = use_deprecated_args('no_stream', 'mel_first', pop=True, expected_default=False)
args['stream'] = None if not no_stream else False
if overwrite:
set_global_overwrite_permission(True)
denoiser = use_deprecated_args('denoiser', 'demucs', pop=True, eg='--denoiser "demucs"')
args['denoiser'] = 'demucs' if denoiser is True else (denoiser or None)
denoiser_outputs = use_deprecated_args('denoiser_output', 'demucs_output', pop=True)
denoiser_options = use_deprecated_args('denoiser_option', 'demucs_option', pop=True)
args['denoiser_options'] = update_options_with_args(denoiser_options or '') or {}
regroup = args.pop('regroup')
max_chars = args.pop('max_chars')
max_words = args.pop('max_words')
args['verbose'] = False if args['verbose'] == 1 else (True if args['verbose'] == 2 else None)
show_curr_task = args['verbose'] is not None
strings_to_locate = args.pop('locate')
if dq := args.pop('dynamic_quantization', False):
args['device'] = 'cpu'
if args['reverse_text']:
args['reverse_text'] = (args.get('prepend_punctuations'), args.get('append_punctuations'))
if is_original_whisper:
model_type_name = 'Whisper'
from .original_whisper import load_model as load_model_func
model_name_kwarg = dict(name=model_name)
else:
if is_faster_whisper:
model_type_name = 'Faster-Whisper'
from .faster_whisper import load_faster_whisper as load_model_func
model_name_kwarg = dict(model_size_or_path=model_name)
else:
model_type_name = 'Hugging Face Whisper'
from .hf_whisper import load_hf_whisper as load_model_func
model_name_kwarg = dict(model_name=model_name)
if args.get('transcribe_method') == 'transcribe_minimal':
warnings.warn(f'{model_type_name} models already run on a version of transcribe_minimal. '
'--transcribe_method "transcribe_minimal" will be ignored.')
if args.get('refine'):
raise NotImplementedError(f'--refine is not supported for {model_type_name} models.')
if strings_to_locate:
raise NotImplementedError(f'--locate is not supported for {model_type_name} models.')
if is_faster_whisper:
args['transcribe_method'] = 'transcribe_stable'
if regroup:
try:
regroup = str2bool(regroup)
except ValueError:
pass
curr_output_formats: List[str] = output_format.split(',') if output_format else []
unsupported_formats = list(set(map(str.lower, curr_output_formats)) - OUTPUT_FORMATS)
if outputs:
unsupported_formats.extend(list(set(splitext(o)[-1].lower().strip('.') for o in outputs) - OUTPUT_FORMATS))
if len(unsupported_formats) != 0:
raise NotImplementedError(f'{unsupported_formats} are not supported. Supported formats: {OUTPUT_FORMATS}.')
if denoiser_outputs and len(denoiser_outputs) != len(inputs):
raise ValueError(f'--denoiser_outputs and inputs do not match in count. '
f'Got {len(denoiser_outputs)} and {len(inputs)}')
if tag := args.get('tag'):
assert tag == ['-1'] or len(tag) == 2, f'[tag] must be a pair of str but got {tag}'
def make_parent(filepath: str):
if parent := split(filepath)[0]:
os.makedirs(parent, exist_ok=True)
def is_json(file: str):
return file.endswith(".json")
def call_method_with_options(method, options: dict, include_first: bool = True):
def val_to_str(val) -> str:
if isinstance(val, (np.ndarray, torch.Tensor)):
return f'{val.__class__}(shape:{list(val.shape)})'
elif isinstance(val, str):
return f'"{val}"'
elif isinstance(val, bytes):
return f'{type(val)}(len:{len(val)})'
elif isinstance(val, torch.nn.Module):
return str(type(val))
elif isinstance(val, dict):
return str({k: val_to_str(v) for k, v in val.items()})
return str(val)
params = tuple(get_func_parameters(method))
if debug:
temp_options = {k: options.pop(k) for k in params if k in options}
temp_options.update(options)
options = temp_options
options_str = ',\n'.join(
f' {k}={val_to_str(v)}'
for k, v in options.items()
if include_first or k != params[0]
)
if options_str:
options_str = f'\n{options_str}\n'
else:
print(options, params)
print(f'{method.__qualname__}({options_str})')
return method(**options)
if alignments := args['align']:
if unsupported_align_fmts := \
[_ext for p in alignments if (_ext := splitext(p)[-1].lower()) not in ('.json', '.txt')]:
raise NotImplementedError(
f'Unsupported format(s) for alignment: {unsupported_align_fmts}'
)
if len(inputs) != len(alignments):
raise NotImplementedError(
f'Got {len(inputs)} audio file(s) but specified {len(alignments)} file(s) to align.'
)
else:
alignments = ['']*len(inputs)
def finalize_outputs(input_file: str, _output: str = None, _alignment: str = None) -> List[str]:
_curr_output_formats = curr_output_formats.copy()
basename, ext = splitext(_output or url_to_path(input_file))
ext = ext[1:]
if _output:
if ext.lower() in OUTPUT_FORMATS:
_curr_output_formats.append(ext)
else:
basename = _output
if not _curr_output_formats:
_curr_output_formats = ["srt" if is_json(input_file) or is_json(_alignment) else "json"]
_outputs = [f'{basename}.{ext}' for ext in set(_curr_output_formats)]
if output_dir:
_outputs = [join(output_dir, o) for o in _outputs]
return _outputs
if outputs:
if len(outputs) != len(inputs):
raise NotImplementedError(f'Got {len(inputs)} audio file(s) but specified {len(outputs)} output file(s).')
final_outputs = [finalize_outputs(i, o, a) for i, o, a in zip(inputs, outputs, alignments)]
else:
if not output_dir:
output_dir = '.'
final_outputs = [finalize_outputs(i, _alignment=a) for i, a in zip(inputs, alignments)]
if not overwrite:
for paths in final_outputs:
for path in paths:
if not is_allow_overwrite(path):
return
if model_name.endswith(".en") and args["language"] not in {"en", "English"}:
if args["language"] is not None:
warnings.warn(f"{model_name} is an English-only model but receipted "
f"'{args['language']}'; using English instead.")
args["language"] = "en"
temperature = args.pop("temperature")
increment = args.pop("temperature_increment_on_fallback")
if increment is not None:
temperature = tuple(np.arange(temperature, 1.0 + 1e-6, increment))
else:
temperature = [temperature]
args['temperature'] = temperature
threads = args.pop("threads")
if threads > 0:
torch.set_num_threads(threads)
if debug:
print('Input(s) -> Outputs(s)')
for i, (input_audio, output_paths, alignment) in enumerate(zip(inputs, final_outputs, alignments)):
dm_output = f' {denoiser_outputs[i]} ->' if denoiser_outputs else ''
alignment = f' + "{alignment}"' if alignment else ''
print(f'"{input_audio}"{alignment} ->{dm_output} {output_paths}')
print('')
if show_curr_task:
model_from_str = '' if model_dir is None else f' from {model_dir}'
model_loading_str = (
f'{model_type_name} {model_name} model {model_from_str}'
)
print(f'Loading {model_loading_str}\r', end='\n' if debug else '')
else:
model_loading_str = ''
alignments = args['align']
model = None
def _load_model():
nonlocal model
if model is None:
model_options = dict(
device=args.get('device'),
download_root=model_dir,
dq=dq,
)
model_options.update(model_name_kwarg)
model_options = isolate_useful_options(model_options, load_model_func)
update_options_with_args('model_option', model_options)
model = call_method_with_options(load_model_func, model_options)
if model_loading_str:
print(f'Loaded {model_loading_str} ')
return model
for i, (input_audio, output_paths) in enumerate(zip(inputs, final_outputs)):
skip_output = False
if isinstance(input_audio, str) and is_json(input_audio):
result = WhisperResult(input_audio)
else:
model = _load_model()
args['regroup'] = False
args['audio'] = input_audio
if denoiser_outputs:
args['denoiser_options']['save_path'] = denoiser_outputs[i]
transcribe_method = args.get('transcribe_method')
text = None
if alignments and (text := alignments[i]):
if text.endswith('.json'):
text = WhisperResult(text)
else:
with open(text, 'r', encoding='utf-8') as f:
text = f.read()
args['text'] = text
transcribe_method = 'align'
if strings_to_locate and (text := strings_to_locate[i]):
args['text'] = text
transcribe_method = 'locate'
skip_output = args['verbose'] = True
transcribe_method = getattr(model, transcribe_method)
transcribe_options = isolate_useful_options(args, transcribe_method)
if not text and not is_hf_whisper:
decoding_options = (
isolate_useful_options(args, model.transcribe if is_faster_whisper else DecodingOptions)
)
if is_faster_whisper:
if decoding_options['suppress_tokens']:
decoding_options['suppress_tokens'] = (
list(map(int, decoding_options['suppress_tokens'].split(',')))
)
for k in list(decoding_options.keys()):
if decoding_options[k] is None:
del decoding_options[k]
transcribe_options.update(decoding_options)
update_options_with_args('transcribe_option', transcribe_options)
result: WhisperResult = call_method_with_options(transcribe_method, transcribe_options)
if skip_output:
continue
if args['refine']:
model = _load_model()
refine_options = isolate_useful_options(args, model.refine)
refine_options['result'] = result
update_options_with_args('refine_option', refine_options)
call_method_with_options(model.refine, refine_options)
if args.get('word_timestamps'):
if regroup:
result.regroup(regroup, verbose=args['verbose'] or debug)
if max_chars or max_words:
result.split_by_length(max_chars=max_chars, max_words=max_words)
for path in output_paths:
make_parent(path)
save_method = getattr(result, OUTPUT_FORMATS_METHODS[splitext(path)[-1][1:]])
args['filepath'] = path
args['path'] = path
save_options = isolate_useful_options(args, save_method)
update_options_with_args('save_option', save_options)
call_method_with_options(save_method, save_options) | null |
13,845 | import warnings
import re
import torch
import numpy as np
from typing import Union, List, Tuple, Optional, Callable
from copy import deepcopy
from itertools import chain
from tqdm import tqdm
from .stabilization import suppress_silence, get_vad_silence_func, VAD_SAMPLE_RATES
from .stabilization.nonvad import audio2timings
from .text_output import *
from .utils import str_to_valid_type, format_timestamp, UnsortedException
from .audio.utils import audio_to_tensor_resample
from .default import get_min_word_dur, get_append_punctuations, get_prepend_punctuations
def _combine_attr(obj: object, other_obj: object, attr: str):
if (val := getattr(obj, attr)) is not None:
other_val = getattr(other_obj, attr)
if isinstance(val, list):
if other_val is None:
setattr(obj, attr, None)
else:
val.extend(other_val)
else:
new_val = None if other_val is None else ((val + other_val) / 2)
setattr(obj, attr, new_val) | null |
13,846 | import warnings
import re
import torch
import numpy as np
from typing import Union, List, Tuple, Optional, Callable
from copy import deepcopy
from itertools import chain
from tqdm import tqdm
from .stabilization import suppress_silence, get_vad_silence_func, VAD_SAMPLE_RATES
from .stabilization.nonvad import audio2timings
from .text_output import *
from .utils import str_to_valid_type, format_timestamp, UnsortedException
from .audio.utils import audio_to_tensor_resample
from .default import get_min_word_dur, get_append_punctuations, get_prepend_punctuations
def _increment_attr(obj: object, attr: str, val: Union[int, float]):
if (curr_val := getattr(obj, attr, None)) is not None:
setattr(obj, attr, curr_val + val) | null |
13,847 | import warnings
import re
import torch
import numpy as np
from typing import Union, List, Tuple, Optional, Callable
from copy import deepcopy
from itertools import chain
from tqdm import tqdm
from .stabilization import suppress_silence, get_vad_silence_func, VAD_SAMPLE_RATES
from .stabilization.nonvad import audio2timings
from .text_output import *
from .utils import str_to_valid_type, format_timestamp, UnsortedException
from .audio.utils import audio_to_tensor_resample
from .default import get_min_word_dur, get_append_punctuations, get_prepend_punctuations
def _round_timestamp(ts: Union[float, None]):
if not ts:
return ts
return round(ts, 3) | null |
13,848 | import warnings
import re
import torch
import numpy as np
from typing import Union, List, Tuple, Optional, Callable
from copy import deepcopy
from itertools import chain
from tqdm import tqdm
from .stabilization import suppress_silence, get_vad_silence_func, VAD_SAMPLE_RATES
from .stabilization.nonvad import audio2timings
from .text_output import *
from .utils import str_to_valid_type, format_timestamp, UnsortedException
from .audio.utils import audio_to_tensor_resample
from .default import get_min_word_dur, get_append_punctuations, get_prepend_punctuations
class WordTiming:
def __init__(
self,
word: str,
start: float,
end: float,
probability: Optional[float] = None,
tokens: Optional[List[int]] = None,
left_locked: bool = False,
right_locked: bool = False,
segment_id: Optional[int] = None,
id: Optional[int] = None,
segment: Optional['Segment'] = None,
round_ts: bool = True,
ignore_unused_args: bool = False
):
if not ignore_unused_args and segment_id is not None:
warnings.warn('The parameter ``segment_id`` is ignored. '
'Specify the current segment instance with ``segment``.',
stacklevel=2)
self.round_ts = round_ts
self.word = word
self._start = self.round(start)
self._end = self.round(end)
self.probability = probability
self.tokens = tokens
self.left_locked = left_locked
self.right_locked = right_locked
self.segment = segment
self.id = id
def __repr__(self):
return f'WordTiming(start={self.start}, end={self.end}, word="{self.word}")'
def __len__(self):
return len(self.word)
def __add__(self, other: 'WordTiming'):
self_copy = WordTiming(
word=self.word + other.word,
start=min(self.start, other.start),
end=max(self.end, other.end),
probability=self.probability,
tokens=None if self.tokens is None else self.tokens.copy(),
left_locked=self.left_locked or other.left_locked,
right_locked=self.right_locked or other.right_locked,
id=self.id,
segment=self.segment
)
_combine_attr(self_copy, other, 'probability')
_combine_attr(self_copy, other, 'tokens')
return self_copy
def __deepcopy__(self, memo=None):
return self.copy(copy_tokens=True)
def __copy__(self):
return self.copy()
def copy(
self,
keep_segment: bool = False,
copy_tokens: bool = False
):
return WordTiming(
word=self.word,
start=self.start,
end=self.end,
probability=self.probability,
tokens=None if (self.tokens is None) else (self.tokens.copy() if copy_tokens else self.tokens),
left_locked=self.left_locked,
right_locked=self.right_locked,
id=self.id,
segment=self.segment if keep_segment else None,
round_ts=self.round_ts
)
def round(self, timestamp: float) -> float:
if not self.round_ts:
return timestamp
return _round_timestamp(timestamp)
def start(self):
return self._start
def end(self):
return self._end
def start(self, val):
self._start = self.round(val)
def end(self, val):
self._end = self.round(val)
def segment_id(self):
return None if self.segment is None else self.segment.id
def duration(self):
return self.round(self.end - self.start)
def round_all_timestamps(self):
warnings.warn('``.round_all_timestamps()`` is deprecated and will be removed in future versions. '
'Use ``.round_ts=True`` to round timestamps by default instead.',
stacklevel=2)
self.round_ts = True
def offset_time(self, offset_seconds: float):
self.start = self.start + offset_seconds
self.end = self.end + offset_seconds
def to_dict(self):
return dict(
word=self.word,
start=self.start,
end=self.end,
probability=self.probability,
tokens=None if self.tokens is None else self.tokens.copy()
)
def lock_left(self):
self.left_locked = True
def lock_right(self):
self.right_locked = True
def lock_both(self):
self.lock_left()
self.lock_right()
def unlock_both(self):
self.left_locked = False
self.right_locked = False
def suppress_silence(self,
silent_starts: np.ndarray,
silent_ends: np.ndarray,
min_word_dur: Optional[float] = None,
nonspeech_error: float = 0.3,
keep_end: Optional[bool] = True):
suppress_silence(self, silent_starts, silent_ends, min_word_dur, nonspeech_error, keep_end)
return self
def rescale_time(self, scale_factor: float):
self.start = self.start * scale_factor
self.end = self.end * scale_factor
def clamp_max(self, max_dur: float, clip_start: bool = False, verbose: bool = False):
if self.duration > max_dur:
if clip_start:
new_start = round(self.end - max_dur, 3)
if verbose:
print(f'Start: {self.start} -> {new_start}\nEnd: {self.end}\nText:"{self.word}"\n')
self.start = new_start
else:
new_end = round(self.start + max_dur, 3)
if verbose:
print(f'Start: {self.start}\nEnd: {self.end} -> {new_end}\nText:"{self.word}"\n')
self.end = new_end
def set_segment(self, segment: 'Segment'):
warnings.warn('``.set_segment(current_segment_instance)`` is deprecated and will be removed in future versions.'
' Use ``.segment = current_segment`` instead.',
stacklevel=2)
self.segment = segment
def get_segment(self) -> Union['Segment', None]:
"""
Return instance of :class:`stable_whisper.result.Segment` that this instance is a part of.
"""
warnings.warn('``.get_segment()`` will be removed in future versions. Use ``.segment`` instead.',
stacklevel=2)
return self.segment
The provided code snippet includes necessary dependencies for implementing the `_words_by_lock` function. Write a Python function `def _words_by_lock(words: List[WordTiming], only_text: bool = False, include_single: bool = False)` to solve the following problem:
Return a nested list of words such that each sublist contains words that are locked together.
Here is the function:
def _words_by_lock(words: List[WordTiming], only_text: bool = False, include_single: bool = False):
"""
Return a nested list of words such that each sublist contains words that are locked together.
"""
all_words = []
for word in words:
if len(all_words) == 0 or not (all_words[-1][-1].right_locked or word.left_locked):
all_words.append([word])
else:
all_words[-1].append(word)
if only_text:
all_words = list(map(lambda ws: list(map(lambda w: w.word, ws)), all_words))
if not include_single:
all_words = [ws for ws in all_words if len(ws) > 1]
return all_words | Return a nested list of words such that each sublist contains words that are locked together. |
13,849 | import subprocess
import warnings
from typing import Union, Optional, BinaryIO, Tuple, Iterable
import numpy as np
import torch
import torchaudio
from whisper.audio import SAMPLE_RATE
def audio_to_tensor_resample(
audio: Union[torch.Tensor, np.ndarray, str, bytes],
original_sample_rate: Optional[int] = None,
target_sample_rates: Optional[Union[int, Iterable[int]]] = None,
**kwargs
) -> torch.Tensor:
"""
Return ``audio`` as Tensor and resample as needed.
"""
if target_sample_rates and isinstance(target_sample_rates, int):
target_sample_rates = [target_sample_rates]
if isinstance(audio, (str, bytes)):
if target_sample_rates:
original_sample_rate = target_sample_rates[0]
audio = load_audio(audio, sr=original_sample_rate,**kwargs)
elif not original_sample_rate:
original_sample_rate = SAMPLE_RATE
if isinstance(audio, np.ndarray):
audio = torch.from_numpy(audio)
audio = audio.float()
if target_sample_rates and original_sample_rate not in target_sample_rates:
audio = resample(audio, original_sample_rate, target_sample_rates[0])
return audio
def standardize_audio(
audio: Union[torch.Tensor, np.ndarray, str, bytes],
resample_sr: Tuple[Optional[int], Union[int, Tuple[int]]] = None,
) -> torch.Tensor:
warnings.warn('This `standardize_audio()` is deprecated and will be removed. '
'Use `stable_whisper.audio.utils.audio_to_tensor_resample()` instead.')
return audio_to_tensor_resample(audio, *(resample_sr or ())) | null |
13,850 | import random
from typing import Union, Optional
import torch
from .utils import load_audio
from ..audio.utils import resample
from ..default import cached_model_instances
def load_demucs_model(cache: bool = True):
model_name = 'htdemucs'
_model_cache = cached_model_instances['demucs'] if cache else None
if _model_cache is not None and _model_cache[model_name] is not None:
return _model_cache[model_name]
is_demucs_available()
from demucs.pretrained import get_model_from_args
model = get_model_from_args(type('args', (object,), dict(name=model_name, repo=None))).cpu().eval().models[0]
if _model_cache is not None:
_model_cache[model_name] = model
return model
def apply_demucs_model(
model,
mix,
shifts=0,
split=True,
overlap=0.25,
transition_power=1.,
progress=False,
device=None,
num_workers=0,
pool=None
):
if device is None:
device = mix.device
else:
device = torch.device(device)
mix = mix.to(device)
if pool is None:
if num_workers > 0 and device.type == 'cpu':
from concurrent.futures import ThreadPoolExecutor
pool = ThreadPoolExecutor(num_workers)
else:
from demucs.utils import DummyPoolExecutor
pool = DummyPoolExecutor()
from demucs.apply import TensorChunk, tensor_chunk
from demucs.utils import center_trim
model = model.to(device)
assert transition_power >= 1, "transition_power < 1 leads to weird behavior."
max_shift = int(0.5 * model.samplerate)
def _inference(_mix):
_length = _mix.shape[-1]
valid_length = model.valid_length(_length) if hasattr(model, 'valid_length') else _length
padded_mix = tensor_chunk(_mix).padded(valid_length).to(device)
with torch.no_grad():
out = model(padded_mix)
return center_trim(out, _length)
def _split(_mix):
batch, channels, length = _mix.shape
out = torch.zeros(batch, len(model.sources), channels, length, device=device)
sum_weight = torch.zeros(length, device=device)
segment = int(model.samplerate * model.segment)
stride = int((1 - overlap) * segment)
offsets = range(0, length, stride)
# We start from a triangle shaped weight, with maximal weight in the middle
# of the segment. Then we normalize and take to the power `transition_power`.
# Large values of transition power will lead to sharper transitions.
weight = torch.cat(
[torch.arange(1, segment // 2 + 1, device=device),
torch.arange(segment - segment // 2, 0, -1, device=device)]
)
assert len(weight) == segment
# If the overlap < 50%, this will translate to linear transition when
# transition_power is 1.
weight = (weight / weight.max()) ** transition_power
futures = []
for offset in offsets:
chunk = TensorChunk(_mix, offset, segment)
future = pool.submit(_inference, chunk)
futures.append((future, offset))
offset += segment
samples_per_future = length / len(futures)
for future, offset in futures:
chunk_out = future.result()
chunk_length = chunk_out.shape[-1]
out[..., offset:offset + segment] += (weight[:chunk_length] * chunk_out).to(mix.device)
sum_weight[offset:offset + segment] += weight[:chunk_length].to(mix.device)
update_pbar(samples_per_future)
assert sum_weight.min() > 0
out /= sum_weight
return out
orig_length = mix.shape[-1]
if progress:
import tqdm
total_duration = round(orig_length / model.samplerate, 2)
pbar = tqdm.tqdm(total=total_duration, unit='sec', desc='Demucs')
def update_pbar(samples):
if samples is None:
pbar.update(pbar.total - pbar.n)
return
if shifts > 1:
samples /= shifts
seek_duration = min(round(pbar.n + (samples / model.samplerate), 2), total_duration)
pbar.update(seek_duration - pbar.n) # this keeps ``n`` rounded
else:
def update_pbar(samples):
pass
inference = _split if split else _inference
if not shifts:
output = inference(mix)
else:
output = 0
mix = tensor_chunk(mix).padded(orig_length + 2 * max_shift)
for _ in range(shifts):
shift_offset = random.randint(0, max_shift)
shifted = TensorChunk(mix, shift_offset, orig_length + max_shift - shift_offset)
shifted_out = inference(shifted)
output += shifted_out[..., max_shift - shift_offset:]
output /= shifts
update_pbar(None)
return output[0, model.sources.index('vocals')].mean(0)
def load_audio(
file: Union[str, bytes, BinaryIO],
sr: int = None,
verbose: Optional[bool] = True,
only_ffmpeg: bool = False,
):
"""
Open an audio file and read as mono waveform then resamples as necessary.
Parameters
----------
file : str or bytes or BinaryIO
The audio file to open, bytes of file, or URL to audio/video.
sr : int, default whisper.model.SAMPLE_RATE
The sample rate to resample the audio if necessary.
verbose : bool or None, default True
Verbosity for yd-dlp and displaying content metadata when ``file`` is a URL. If not ``None``, display metadata.
For yd-dlp: ``None`` is "--quiet"; ``True`` is "--progress"; ``False`` is "--progress" + "--quiet".
only_ffmpeg : bool, default False
Whether to use only FFmpeg (instead of yt-dlp) for URls.
Returns
-------
numpy.ndarray
A array containing the audio waveform in float32.
"""
if sr is None:
sr = SAMPLE_RATE
file = load_source(file, verbose=verbose, only_ffmpeg=only_ffmpeg, return_dict=False)
try:
# This launches a subprocess to decode audio while down-mixing and resampling as necessary.
# Requires the ffmpeg CLI in PATH.
cmd = [
"ffmpeg",
"-nostdin",
"-threads", "0",
"-i", file if isinstance(file, str) else "pipe:",
"-f", "s16le",
"-ac", "1",
"-acodec", "pcm_s16le",
"-ar", str(sr),
"-"
]
if isinstance(file, str):
out = subprocess.run(cmd, capture_output=True, check=True).stdout
else:
cmd = cmd[:1] + ["-loglevel", "error"] + cmd[1:]
stdin = subprocess.PIPE if isinstance(file, bytes) else file
out = subprocess.Popen(cmd, stdout=subprocess.PIPE, stdin=stdin)
out = out.communicate(input=file if isinstance(file, bytes) else None)[0]
if not out:
raise RuntimeError(f"Failed to load audio from bytes ({len(file)}).")
except (subprocess.CalledProcessError, subprocess.SubprocessError) as e:
raise RuntimeError(f"Failed to load audio: {e.stderr.decode()}") from e
return np.frombuffer(out, np.int16).flatten().astype(np.float32) / 32768.0
def resample(audio: torch.Tensor, in_sr: int, out_sr: int, **kwargs) -> torch.Tensor:
return torchaudio.functional.resample(audio, in_sr, out_sr, **kwargs)
def save_audio_tensor(
audio: Union[np.ndarray, torch.Tensor],
path: str,
sr: int,
verbose: Optional[bool] = False,
silent_timings: Optional[Union[np.ndarray, List[dict]]] = None,
channel: Optional[str] = 'l',
overwrite: Optional[bool] = None
):
"""
Save ``audio`` to ``path`` with sections muted according to ``silent_timing`` on ``channel``.
"""
if not is_allow_overwrite(path, overwrite):
return
if channel is not None and channel not in ('l', 'r'):
raise ValueError(f'``split`` must be "l" or "r" but got "{channel}".')
if isinstance(audio, np.ndarray):
audio = torch.from_numpy(audio)
else:
audio = audio.cpu()
dims = audio.ndim
if dims == 3 and audio.shape[0] == 1:
audio = audio[0]
if 0 == dims or dims > 2:
warnings.warn(f'{dims}D audio Tensor not supported.')
return
if dims == 1:
audio = audio[None]
if silent_timings is not None:
audio = audio.mean(dim=0) if audio.shape[0] == 2 else audio[0]
audio_copy = audio.clone()
for t in silent_timings:
s, e = (t['start'], t['end']) if isinstance(t, dict) else t
s = round(s * sr)
e = round(e * sr)
audio_copy[s:e] = 0
if channel is None:
audio = audio_copy[None]
else:
audio = (audio_copy, audio) if channel == 'l' else (audio, audio_copy)
audio = torch.stack(audio, dim=0)
try:
torchaudio.save(path, audio, sr)
except ValueError as e:
warnings.warn(f'Failed to save audio to "{path}". Error: {e}', stacklevel=2)
else:
if verbose is not None:
print(f'Saved: "{path}"')
The provided code snippet includes necessary dependencies for implementing the `demucs_audio` function. Write a Python function `def demucs_audio( audio: Union[torch.Tensor, str, bytes], input_sr: int = None, output_sr: int = None, model=None, device=None, verbose: bool = True, save_path: Optional[Union[str, callable]] = None, seed: Optional[int] = 1, **demucs_options ) -> torch.Tensor` to solve the following problem:
Isolates vocals / remove noise from ``audio`` with Demucs. Official repo, https://github.com/facebookresearch/demucs.
Here is the function:
def demucs_audio(
audio: Union[torch.Tensor, str, bytes],
input_sr: int = None,
output_sr: int = None,
model=None,
device=None,
verbose: bool = True,
save_path: Optional[Union[str, callable]] = None,
seed: Optional[int] = 1,
**demucs_options
) -> torch.Tensor:
"""
Isolates vocals / remove noise from ``audio`` with Demucs.
Official repo, https://github.com/facebookresearch/demucs.
"""
if model is None:
model = load_demucs_model()
if isinstance(audio, (str, bytes)):
audio = torch.from_numpy(load_audio(audio, model.samplerate))
elif input_sr != model.samplerate:
if input_sr is None:
raise ValueError('No ``input_sr`` specified for audio tensor.')
audio = resample(audio, input_sr, model.samplerate)
audio_dims = audio.dim()
assert audio_dims <= 3
if dims_missing := 3 - audio_dims:
audio = audio[[None]*dims_missing]
if audio.shape[-2] == 1:
audio = audio.repeat_interleave(2, -2)
if device is None:
device = "cuda" if torch.cuda.is_available() else "cpu"
apply_kwarg = dict(
model=model,
mix=audio,
device=device,
split=True,
overlap=.25,
progress=verbose is not None,
)
apply_kwarg.update(demucs_options)
if seed is not None:
random.seed(seed)
vocals = apply_demucs_model(**apply_kwarg)
if device != 'cpu':
torch.cuda.empty_cache()
if output_sr is not None and model.samplerate != output_sr:
vocals = resample(vocals, model.samplerate, output_sr)
if save_path is not None:
if isinstance(save_path, str):
from .output import save_audio_tensor
save_audio_tensor(vocals, save_path, output_sr or model.samplerate, verbose=verbose)
else:
save_path(vocals)
return vocals | Isolates vocals / remove noise from ``audio`` with Demucs. Official repo, https://github.com/facebookresearch/demucs. |
13,851 | from typing import Union, Optional
import torch
from .utils import load_audio
from ..audio.utils import resample
from ..default import cached_model_instances
def load_dfnet_model(cache: bool = True, **kwargs):
model_name = 'dfnet'
_model_cache = cached_model_instances['dfnet'] if cache else None
if _model_cache is not None and _model_cache[model_name] is not None:
return _model_cache[model_name]
is_dfnet_available()
from types import MethodType
from df.enhance import init_df, enhance
model, df_state, _ = init_df(**kwargs)
model.df_state = df_state
def enhance_wrapper(_model, audio, **enhance_kwargs):
return enhance(model=_model, df_state=_model.df_state, audio=audio, **enhance_kwargs)
model.enhance = MethodType(enhance_wrapper, model)
model.samplerate = df_state.sr()
if _model_cache is not None:
_model_cache[model_name] = model
return model
def load_audio(
file: Union[str, bytes, BinaryIO],
sr: int = None,
verbose: Optional[bool] = True,
only_ffmpeg: bool = False,
):
"""
Open an audio file and read as mono waveform then resamples as necessary.
Parameters
----------
file : str or bytes or BinaryIO
The audio file to open, bytes of file, or URL to audio/video.
sr : int, default whisper.model.SAMPLE_RATE
The sample rate to resample the audio if necessary.
verbose : bool or None, default True
Verbosity for yd-dlp and displaying content metadata when ``file`` is a URL. If not ``None``, display metadata.
For yd-dlp: ``None`` is "--quiet"; ``True`` is "--progress"; ``False`` is "--progress" + "--quiet".
only_ffmpeg : bool, default False
Whether to use only FFmpeg (instead of yt-dlp) for URls.
Returns
-------
numpy.ndarray
A array containing the audio waveform in float32.
"""
if sr is None:
sr = SAMPLE_RATE
file = load_source(file, verbose=verbose, only_ffmpeg=only_ffmpeg, return_dict=False)
try:
# This launches a subprocess to decode audio while down-mixing and resampling as necessary.
# Requires the ffmpeg CLI in PATH.
cmd = [
"ffmpeg",
"-nostdin",
"-threads", "0",
"-i", file if isinstance(file, str) else "pipe:",
"-f", "s16le",
"-ac", "1",
"-acodec", "pcm_s16le",
"-ar", str(sr),
"-"
]
if isinstance(file, str):
out = subprocess.run(cmd, capture_output=True, check=True).stdout
else:
cmd = cmd[:1] + ["-loglevel", "error"] + cmd[1:]
stdin = subprocess.PIPE if isinstance(file, bytes) else file
out = subprocess.Popen(cmd, stdout=subprocess.PIPE, stdin=stdin)
out = out.communicate(input=file if isinstance(file, bytes) else None)[0]
if not out:
raise RuntimeError(f"Failed to load audio from bytes ({len(file)}).")
except (subprocess.CalledProcessError, subprocess.SubprocessError) as e:
raise RuntimeError(f"Failed to load audio: {e.stderr.decode()}") from e
return np.frombuffer(out, np.int16).flatten().astype(np.float32) / 32768.0
def resample(audio: torch.Tensor, in_sr: int, out_sr: int, **kwargs) -> torch.Tensor:
return torchaudio.functional.resample(audio, in_sr, out_sr, **kwargs)
def save_audio_tensor(
audio: Union[np.ndarray, torch.Tensor],
path: str,
sr: int,
verbose: Optional[bool] = False,
silent_timings: Optional[Union[np.ndarray, List[dict]]] = None,
channel: Optional[str] = 'l',
overwrite: Optional[bool] = None
):
"""
Save ``audio`` to ``path`` with sections muted according to ``silent_timing`` on ``channel``.
"""
if not is_allow_overwrite(path, overwrite):
return
if channel is not None and channel not in ('l', 'r'):
raise ValueError(f'``split`` must be "l" or "r" but got "{channel}".')
if isinstance(audio, np.ndarray):
audio = torch.from_numpy(audio)
else:
audio = audio.cpu()
dims = audio.ndim
if dims == 3 and audio.shape[0] == 1:
audio = audio[0]
if 0 == dims or dims > 2:
warnings.warn(f'{dims}D audio Tensor not supported.')
return
if dims == 1:
audio = audio[None]
if silent_timings is not None:
audio = audio.mean(dim=0) if audio.shape[0] == 2 else audio[0]
audio_copy = audio.clone()
for t in silent_timings:
s, e = (t['start'], t['end']) if isinstance(t, dict) else t
s = round(s * sr)
e = round(e * sr)
audio_copy[s:e] = 0
if channel is None:
audio = audio_copy[None]
else:
audio = (audio_copy, audio) if channel == 'l' else (audio, audio_copy)
audio = torch.stack(audio, dim=0)
try:
torchaudio.save(path, audio, sr)
except ValueError as e:
warnings.warn(f'Failed to save audio to "{path}". Error: {e}', stacklevel=2)
else:
if verbose is not None:
print(f'Saved: "{path}"')
The provided code snippet includes necessary dependencies for implementing the `dfnet_audio` function. Write a Python function `def dfnet_audio( audio: Union[torch.Tensor, str, bytes], input_sr: int = None, output_sr: int = None, model=None, device=None, verbose: bool = True, save_path: Optional[Union[str, callable]] = None, **dfnet_options ) -> torch.Tensor` to solve the following problem:
Remove noise from ``audio`` with DeepFilterNet. Official repo: https://github.com/Rikorose/DeepFilterNet.
Here is the function:
def dfnet_audio(
audio: Union[torch.Tensor, str, bytes],
input_sr: int = None,
output_sr: int = None,
model=None,
device=None,
verbose: bool = True,
save_path: Optional[Union[str, callable]] = None,
**dfnet_options
) -> torch.Tensor:
"""
Remove noise from ``audio`` with DeepFilterNet.
Official repo: https://github.com/Rikorose/DeepFilterNet.
"""
if model is None:
model = load_dfnet_model()
if isinstance(audio, (str, bytes)):
audio = torch.from_numpy(load_audio(audio, model.samplerate))
elif input_sr != model.samplerate:
if input_sr is None:
raise ValueError('No ``input_sr`` specified for audio tensor.')
audio = resample(audio, input_sr, model.samplerate)
audio_dims = audio.dim()
assert audio_dims <= 2
if dims_missing := 2 - audio_dims:
audio = audio[[None]*dims_missing]
if audio.shape[-2] == 1:
audio = audio.repeat_interleave(2, -2)
dfnet_options.pop('progress', None) # not implemented
denoised_audio = model.enhance(audio=audio, **dfnet_options).mean(dim=0)
if device != 'cpu':
torch.cuda.empty_cache()
if output_sr is not None and model.samplerate != output_sr:
denoised_audio = resample(denoised_audio, model.samplerate, output_sr)
if save_path is not None:
if isinstance(save_path, str):
from .output import save_audio_tensor
save_audio_tensor(denoised_audio, save_path, output_sr or model.samplerate, verbose=verbose)
else:
save_path(denoised_audio)
return denoised_audio | Remove noise from ``audio`` with DeepFilterNet. Official repo: https://github.com/Rikorose/DeepFilterNet. |
13,852 | import os
from typing import Optional
def set_val(key: str, val):
has_key(key)
DEFAULT_KWARGS[key] = val
def get_val(key: str, default=None):
if default is None:
has_key(key)
return DEFAULT_KWARGS[key]
return default
def set_get_val(key: str, new_val=None):
if new_val is not None:
set_val(key, new_val)
return get_val(key) | null |
13,853 | from typing import Tuple
import numpy as np
import torch
from torch.nn import functional as F
from .utils import mask2timing, timing2mask
from ..audio.utils import audio_to_tensor_resample
from whisper.audio import N_SAMPLES_PER_TOKEN
def wav2mask(
audio: (torch.Tensor, np.ndarray, str, bytes),
q_levels: int = 20,
k_size: int = 5,
sr: int = None
) -> (Tuple[torch.Tensor, Tuple[np.ndarray, np.ndarray]], None):
"""
Generate 1D mask from waveform for suppressing timestamp tokens.
"""
audio = audio_to_tensor_resample(audio, sr, NONVAD_SAMPLE_RATES)
loudness_tensor = audio2loudness(audio)
if loudness_tensor is None:
return
p = k_size // 2 if k_size else 0
if p and p < loudness_tensor.shape[-1]:
assert k_size % 2, f'kernel_size must be odd but got {k_size}'
mask = torch.avg_pool1d(
F.pad(
loudness_tensor[None],
(p, p),
'reflect'
),
kernel_size=k_size,
stride=1
)[0]
else:
mask = loudness_tensor.clone()
if q_levels:
mask = mask.mul(q_levels).round()
mask = mask.bool()
if not mask.any(): # entirely silent
return ~mask
temp_timings = mask2timing(mask)
s, e = temp_timings
se_mask = (e - s) > 0.1
s = s[se_mask]
e = e[se_mask]
mask = ~timing2mask(s, e, loudness_tensor.shape[-1])
if not mask.any(): # no silence
return
return mask
def mask2timing(
silence_mask: (np.ndarray, torch.Tensor),
time_offset: float = 0.0,
second_per_unit: Optional[float] = None,
min_start: Optional[float] = None,
max_end: Optional[float] = None
) -> (Tuple[np.ndarray, np.ndarray], None):
"""
Return array of start timestamps and array of end timestamps corresponding to where ``silence_mask`` is ``True``.
"""
if silence_mask is None or not silence_mask.any() or not len(silence_mask):
return
assert silence_mask.ndim == 1
if isinstance(silence_mask, torch.Tensor):
silence_mask = silence_mask.cpu().numpy().copy()
elif not isinstance(silence_mask, np.ndarray):
raise NotImplementedError(f'Expected torch.Tensor or numpy.ndarray, but got {type(silence_mask)}')
mask = np.concatenate(([False], silence_mask, [False]))
silent_starts = np.logical_and(~mask[:-2], mask[1:-1]).nonzero()[0]
silent_ends = (np.logical_and(mask[1:-1], ~mask[2:]).nonzero()[0] + 1)
if second_per_unit is None:
silent_starts = silent_starts / TOKENS_PER_SECOND
silent_ends = silent_ends / TOKENS_PER_SECOND
else:
silent_starts = silent_starts * second_per_unit
silent_ends = silent_ends * second_per_unit
if time_offset:
silent_starts += time_offset
silent_ends += time_offset
if min_start is not None and silent_starts[0] < min_start:
assert min_start <= silent_ends[0]
silent_starts[0] = min_start
if max_end is not None and silent_ends[-1] > max_end:
assert max_end >= silent_starts[-1]
silent_ends[-1] = max_end
return silent_starts, silent_ends
def audio2timings(
audio: (torch.Tensor, np.ndarray, str, bytes),
q_levels: int = 20,
k_size: int = 5,
sr: int = None
) -> (Tuple[np.ndarray, np.ndarray], None):
return mask2timing(
wav2mask(audio, q_levels=q_levels, k_size=k_size, sr=sr)
) | null |
13,854 | from typing import Tuple
import numpy as np
import torch
from torch.nn import functional as F
from .utils import mask2timing, timing2mask
from ..audio.utils import audio_to_tensor_resample
from whisper.audio import N_SAMPLES_PER_TOKEN
def visualize_mask(
loudness_tensor: torch.Tensor,
silence_mask: torch.Tensor = None,
width: int = 1500,
height: int = 200,
output: str = None,
):
no_silence = silence_mask is None or not silence_mask.any()
assert no_silence or silence_mask.shape[0] == loudness_tensor.shape[0]
if loudness_tensor.shape[0] < 2:
raise NotImplementedError(f'audio size, {loudness_tensor.shape[0]}, is too short to visualize')
else:
width = loudness_tensor.shape[0] if width == -1 else width
im = torch.zeros((height, width, 3), dtype=torch.uint8)
mid = round(height / 2)
for i, j in enumerate(loudness_tensor.tolist()):
j = round(abs(j) * mid)
if j == 0 or width <= i:
continue
im[mid - j:mid + 1, i] = 255
im[mid + 1:mid + j + 1, i] = 255
if not no_silence:
im[:, silence_mask[:width], 1:] = 0
im = im.cpu().numpy()
if output and not output.endswith('.png'):
output += '.png'
try:
from PIL import Image
except ModuleNotFoundError:
try:
import cv2
except ModuleNotFoundError:
raise ModuleNotFoundError('Failed to import "PIL" or "cv2" to visualize suppression mask. '
'Try "pip install Pillow" or "pip install opencv-python"')
else:
im = im[..., [2, 1, 0]]
if isinstance(output, str):
cv2.imwrite(output, im)
else:
cv2.imshow('image', im)
cv2.waitKey(0)
else:
im = Image.fromarray(im)
if isinstance(output, str):
im.save(output)
else:
im.show(im)
if output:
print(f'Save: {output}') | null |
13,855 | import warnings
from typing import Optional, List
import torch
from tqdm import tqdm
from .utils import SetTorchThread
from ..default import cached_model_instances
cached_model_instances = dict(
demucs={
'htdemucs': None
},
silero_vad={
True: None,
False: None
},
dfnet={
'dfnet': None
}
)
def load_silero_vad_model(onnx=False, verbose: Optional[bool] = False, cache: bool = True):
model_cache = cached_model_instances['silero_vad'] if cache else None
if model_cache is not None and model_cache[onnx] is not None:
return model_cache[onnx]
model, utils = torch.hub.load(repo_or_dir='snakers4/silero-vad:master',
model='silero_vad',
verbose=verbose,
onnx=onnx,
trust_repo=True)
get_ts = utils[0]
if model_cache is not None:
model_cache[onnx] = (model, get_ts)
warnings.filterwarnings('ignore', message=r'operator \(\) profile_node.*', category=UserWarning)
return model, get_ts | null |
13,856 | import warnings
from typing import Optional, List
import torch
from tqdm import tqdm
from .utils import SetTorchThread
from ..default import cached_model_instances
class SetTorchThread:
def __init__(self, temp_thread_count: int):
self.original_thread_count = torch.get_num_threads()
self.temp_thread_count = temp_thread_count
def __enter__(self):
torch.set_num_threads(self.temp_thread_count)
def __exit__(self, exc_type, exc_val, exc_tb):
torch.set_num_threads(self.original_thread_count)
def compute_vad_probs(
model,
audio: torch.Tensor,
sampling_rate: int,
window: int,
progress: bool = True
) -> List[float]:
duration = round(audio.shape[-1] / sampling_rate, 2)
speech_probs = []
with SetTorchThread(1), tqdm(total=duration, unit='sec', desc='VAD', disable=not progress) as pbar:
for current_start_sample in range(0, audio.shape[-1], window):
chunk = audio[current_start_sample: current_start_sample + window]
if len(chunk) < window:
chunk = torch.nn.functional.pad(chunk, (0, int(window - len(chunk))))
prob = model(chunk.cpu(), sampling_rate).item()
speech_probs.append(prob)
if not pbar.disable:
seek_duration = min(
round((current_start_sample + window) / sampling_rate, 2),
duration
)
pbar.update(seek_duration - pbar.n)
return speech_probs | null |
13,857 | import warnings
from typing import Optional, List
import torch
from tqdm import tqdm
from .utils import SetTorchThread
from ..default import cached_model_instances
VAD_SAMPLE_RATES = (16000, 8000)
VAD_WINDOWS = (256, 512, 768, 1024, 1536)
def assert_sr_window(sr: int, window: int):
assert sr in VAD_SAMPLE_RATES, f'{sr} not in {VAD_SAMPLE_RATES}'
assert window in VAD_WINDOWS, f'{window} not in {VAD_WINDOWS}' | null |
13,858 | import json
import os
import warnings
from typing import List, Tuple, Union, Callable
from itertools import chain
from .stabilization.utils import valid_ts
def segment2vttblock(segment: dict, strip=True) -> str:
return f'{sec2vtt(segment["start"])} --> {sec2vtt(segment["end"])}\n' \
f'{finalize_text(segment["text"], strip)}'
def to_vtt_word_level_segments(segments: List[dict], tag: Tuple[str, str] = None) -> List[dict]:
def to_segment_string(segment: dict):
segment_string = ''
prev_end = 0
for i, word in enumerate(segment['words']):
if i != 0:
curr_start = word['start']
if prev_end == curr_start:
segment_string += f"<{sec2vtt(curr_start)}>"
else:
if segment_string.endswith(' '):
segment_string = segment_string[:-1]
elif segment['words'][i]['word'].startswith(' '):
segment['words'][i]['word'] = segment['words'][i]['word'][1:]
segment_string += f"<{sec2vtt(prev_end)}> <{sec2vtt(curr_start)}>"
segment_string += word['word']
prev_end = word['end']
return segment_string
return [
dict(
text=to_segment_string(s),
start=s['start'],
end=s['end']
)
for s in segments
]
def result_to_any(result: (dict, list),
filepath: str = None,
filetype: str = None,
segments2blocks: Callable = None,
segment_level=True,
word_level=True,
min_dur: float = 0.02,
tag: Tuple[str, str] = None,
default_tag: Tuple[str, str] = None,
strip=True,
reverse_text: Union[bool, tuple] = False,
to_word_level_string_callback: Callable = None):
"""
Generate file from ``result`` to display segment-level and/or word-level timestamp.
Returns
-------
str
String of the content if ``filepath`` is ``None``.
"""
segments, segment_level, word_level = _preprocess_args(
result, segment_level, word_level, min_dur, reverse_text=reverse_text
)
if filetype is None:
filetype = os.path.splitext(filepath)[-1][1:] or 'srt'
if filetype.lower() not in SUPPORTED_FORMATS:
raise NotImplementedError(f'{filetype} not supported')
if filepath and not filepath.lower().endswith(f'.{filetype}'):
filepath += f'.{filetype}'
if word_level and segment_level:
if tag is None:
if default_tag is None:
tag = ('<font color="#00ff00">', '</font>') if filetype == 'srt' else ('<u>', '</u>')
else:
tag = default_tag
if to_word_level_string_callback is None:
to_word_level_string_callback = to_word_level_segments
segments = to_word_level_string_callback(segments, tag)
elif word_level:
segments = to_word_level(segments)
if not valid_ts(segments, warn=False):
warnings.warn(message='Result contains out of order timestamp(s). Output file may not playback properly.')
if segments2blocks is None:
sub_str = '\n\n'.join(segment2srtblock(s, i, strip=strip) for i, s in enumerate(segments))
else:
sub_str = segments2blocks(segments)
if filepath:
_save_as_file(sub_str, filepath)
else:
return sub_str
The provided code snippet includes necessary dependencies for implementing the `result_to_srt_vtt` function. Write a Python function `def result_to_srt_vtt(result: (dict, list), filepath: str = None, segment_level=True, word_level=True, min_dur: float = 0.02, tag: Tuple[str, str] = None, vtt: bool = None, strip=True, reverse_text: Union[bool, tuple] = False)` to solve the following problem:
Generate SRT/VTT from ``result`` to display segment-level and/or word-level timestamp. Parameters ---------- result : dict or list or stable_whisper.result.WhisperResult Result of transcription. filepath : str, default None, meaning content will be returned as a ``str`` Path to save file. segment_level : bool, default True Whether to use segment-level timestamps in output. word_level : bool, default True Whether to use word-level timestamps in output. min_dur : float, default 0.2 Minimum duration allowed for any word/segment before the word/segments are merged with adjacent word/segments. tag: tuple of (str, str), default None, meaning ('<font color="#00ff00">', '</font>') if SRT else ('<u>', '</u>') Tag used to change the properties a word at its timestamp. vtt : bool, default None, meaning determined by extension of ``filepath`` or ``False`` if no valid extension. Whether to output VTT. strip : bool, default True Whether to remove spaces before and after text on each segment for output. reverse_text: bool or tuple, default False Whether to reverse the order of words for each segment or provide the ``prepend_punctuations`` and ``append_punctuations`` as tuple pair instead of ``True`` which is for the default punctuations. Returns ------- str String of the content if ``filepath`` is ``None``. Notes ----- ``reverse_text`` will not fix RTL text not displaying tags properly which is an issue with some video player. VLC seems to not suffer from this issue. Examples -------- >>> import stable_whisper >>> model = stable_whisper.load_model('base') >>> result = model.transcribe('audio.mp3') >>> result.to_srt_vtt('audio.srt') Saved: audio.srt
Here is the function:
def result_to_srt_vtt(result: (dict, list),
filepath: str = None,
segment_level=True,
word_level=True,
min_dur: float = 0.02,
tag: Tuple[str, str] = None,
vtt: bool = None,
strip=True,
reverse_text: Union[bool, tuple] = False):
"""
Generate SRT/VTT from ``result`` to display segment-level and/or word-level timestamp.
Parameters
----------
result : dict or list or stable_whisper.result.WhisperResult
Result of transcription.
filepath : str, default None, meaning content will be returned as a ``str``
Path to save file.
segment_level : bool, default True
Whether to use segment-level timestamps in output.
word_level : bool, default True
Whether to use word-level timestamps in output.
min_dur : float, default 0.2
Minimum duration allowed for any word/segment before the word/segments are merged with adjacent word/segments.
tag: tuple of (str, str), default None, meaning ('<font color="#00ff00">', '</font>') if SRT else ('<u>', '</u>')
Tag used to change the properties a word at its timestamp.
vtt : bool, default None, meaning determined by extension of ``filepath`` or ``False`` if no valid extension.
Whether to output VTT.
strip : bool, default True
Whether to remove spaces before and after text on each segment for output.
reverse_text: bool or tuple, default False
Whether to reverse the order of words for each segment or provide the ``prepend_punctuations`` and
``append_punctuations`` as tuple pair instead of ``True`` which is for the default punctuations.
Returns
-------
str
String of the content if ``filepath`` is ``None``.
Notes
-----
``reverse_text`` will not fix RTL text not displaying tags properly which is an issue with some video player. VLC
seems to not suffer from this issue.
Examples
--------
>>> import stable_whisper
>>> model = stable_whisper.load_model('base')
>>> result = model.transcribe('audio.mp3')
>>> result.to_srt_vtt('audio.srt')
Saved: audio.srt
"""
is_srt = (filepath is None or not filepath.lower().endswith('.vtt')) if vtt is None else not vtt
if is_srt:
segments2blocks = None
to_word_level_string_callback = None
else:
def segments2blocks(segments):
return 'WEBVTT\n\n' + '\n\n'.join(segment2vttblock(s, strip=strip) for i, s in enumerate(segments))
to_word_level_string_callback = to_vtt_word_level_segments if tag is None else tag
return result_to_any(
result=result,
filepath=filepath,
filetype=('vtt', 'srt')[is_srt],
segments2blocks=segments2blocks,
segment_level=segment_level,
word_level=word_level,
min_dur=min_dur,
tag=tag,
strip=strip,
reverse_text=reverse_text,
to_word_level_string_callback=to_word_level_string_callback
) | Generate SRT/VTT from ``result`` to display segment-level and/or word-level timestamp. Parameters ---------- result : dict or list or stable_whisper.result.WhisperResult Result of transcription. filepath : str, default None, meaning content will be returned as a ``str`` Path to save file. segment_level : bool, default True Whether to use segment-level timestamps in output. word_level : bool, default True Whether to use word-level timestamps in output. min_dur : float, default 0.2 Minimum duration allowed for any word/segment before the word/segments are merged with adjacent word/segments. tag: tuple of (str, str), default None, meaning ('<font color="#00ff00">', '</font>') if SRT else ('<u>', '</u>') Tag used to change the properties a word at its timestamp. vtt : bool, default None, meaning determined by extension of ``filepath`` or ``False`` if no valid extension. Whether to output VTT. strip : bool, default True Whether to remove spaces before and after text on each segment for output. reverse_text: bool or tuple, default False Whether to reverse the order of words for each segment or provide the ``prepend_punctuations`` and ``append_punctuations`` as tuple pair instead of ``True`` which is for the default punctuations. Returns ------- str String of the content if ``filepath`` is ``None``. Notes ----- ``reverse_text`` will not fix RTL text not displaying tags properly which is an issue with some video player. VLC seems to not suffer from this issue. Examples -------- >>> import stable_whisper >>> model = stable_whisper.load_model('base') >>> result = model.transcribe('audio.mp3') >>> result.to_srt_vtt('audio.srt') Saved: audio.srt |
13,859 | import json
import os
import warnings
from typing import List, Tuple, Union, Callable
from itertools import chain
from .stabilization.utils import valid_ts
def segment2tsvblock(segment: dict, strip=True) -> str:
return f'{sec2milliseconds(segment["start"])}' \
f'\t{sec2milliseconds(segment["end"])}' \
f'\t{segment["text"].strip() if strip else segment["text"]}'
def result_to_any(result: (dict, list),
filepath: str = None,
filetype: str = None,
segments2blocks: Callable = None,
segment_level=True,
word_level=True,
min_dur: float = 0.02,
tag: Tuple[str, str] = None,
default_tag: Tuple[str, str] = None,
strip=True,
reverse_text: Union[bool, tuple] = False,
to_word_level_string_callback: Callable = None):
"""
Generate file from ``result`` to display segment-level and/or word-level timestamp.
Returns
-------
str
String of the content if ``filepath`` is ``None``.
"""
segments, segment_level, word_level = _preprocess_args(
result, segment_level, word_level, min_dur, reverse_text=reverse_text
)
if filetype is None:
filetype = os.path.splitext(filepath)[-1][1:] or 'srt'
if filetype.lower() not in SUPPORTED_FORMATS:
raise NotImplementedError(f'{filetype} not supported')
if filepath and not filepath.lower().endswith(f'.{filetype}'):
filepath += f'.{filetype}'
if word_level and segment_level:
if tag is None:
if default_tag is None:
tag = ('<font color="#00ff00">', '</font>') if filetype == 'srt' else ('<u>', '</u>')
else:
tag = default_tag
if to_word_level_string_callback is None:
to_word_level_string_callback = to_word_level_segments
segments = to_word_level_string_callback(segments, tag)
elif word_level:
segments = to_word_level(segments)
if not valid_ts(segments, warn=False):
warnings.warn(message='Result contains out of order timestamp(s). Output file may not playback properly.')
if segments2blocks is None:
sub_str = '\n\n'.join(segment2srtblock(s, i, strip=strip) for i, s in enumerate(segments))
else:
sub_str = segments2blocks(segments)
if filepath:
_save_as_file(sub_str, filepath)
else:
return sub_str
The provided code snippet includes necessary dependencies for implementing the `result_to_tsv` function. Write a Python function `def result_to_tsv(result: (dict, list), filepath: str = None, segment_level: bool = None, word_level: bool = None, min_dur: float = 0.02, strip=True, reverse_text: Union[bool, tuple] = False)` to solve the following problem:
Generate TSV from ``result`` to display segment-level and/or word-level timestamp. Parameters ---------- result : dict or list or stable_whisper.result.WhisperResult Result of transcription. filepath : str, default None, meaning content will be returned as a ``str`` Path to save file. segment_level : bool, default True Whether to use segment-level timestamps in output. word_level : bool, default True Whether to use word-level timestamps in output. min_dur : float, default 0.2 Minimum duration allowed for any word/segment before the word/segments are merged with adjacent word/segments. strip : bool, default True Whether to remove spaces before and after text on each segment for output. reverse_text: bool or tuple, default False Whether to reverse the order of words for each segment or provide the ``prepend_punctuations`` and ``append_punctuations`` as tuple pair instead of ``True`` which is for the default punctuations. Returns ------- str String of the content if ``filepath`` is ``None``. Notes ----- ``reverse_text`` will not fix RTL text not displaying tags properly which is an issue with some video player. VLC seems to not suffer from this issue. Examples -------- >>> import stable_whisper >>> model = stable_whisper.load_model('base') >>> result = model.transcribe('audio.mp3') >>> result.to_tsv('audio.tsv') Saved: audio.tsv
Here is the function:
def result_to_tsv(result: (dict, list),
filepath: str = None,
segment_level: bool = None,
word_level: bool = None,
min_dur: float = 0.02,
strip=True,
reverse_text: Union[bool, tuple] = False):
"""
Generate TSV from ``result`` to display segment-level and/or word-level timestamp.
Parameters
----------
result : dict or list or stable_whisper.result.WhisperResult
Result of transcription.
filepath : str, default None, meaning content will be returned as a ``str``
Path to save file.
segment_level : bool, default True
Whether to use segment-level timestamps in output.
word_level : bool, default True
Whether to use word-level timestamps in output.
min_dur : float, default 0.2
Minimum duration allowed for any word/segment before the word/segments are merged with adjacent word/segments.
strip : bool, default True
Whether to remove spaces before and after text on each segment for output.
reverse_text: bool or tuple, default False
Whether to reverse the order of words for each segment or provide the ``prepend_punctuations`` and
``append_punctuations`` as tuple pair instead of ``True`` which is for the default punctuations.
Returns
-------
str
String of the content if ``filepath`` is ``None``.
Notes
-----
``reverse_text`` will not fix RTL text not displaying tags properly which is an issue with some video player. VLC
seems to not suffer from this issue.
Examples
--------
>>> import stable_whisper
>>> model = stable_whisper.load_model('base')
>>> result = model.transcribe('audio.mp3')
>>> result.to_tsv('audio.tsv')
Saved: audio.tsv
"""
if segment_level is None and word_level is None:
segment_level = True
assert word_level is not segment_level, '[word_level] and [segment_level] cannot be the same ' \
'since [tag] is not support for this format'
def segments2blocks(segments):
return '\n\n'.join(segment2tsvblock(s, strip=strip) for i, s in enumerate(segments))
return result_to_any(
result=result,
filepath=filepath,
filetype='tsv',
segments2blocks=segments2blocks,
segment_level=segment_level,
word_level=word_level,
min_dur=min_dur,
strip=strip,
reverse_text=reverse_text
) | Generate TSV from ``result`` to display segment-level and/or word-level timestamp. Parameters ---------- result : dict or list or stable_whisper.result.WhisperResult Result of transcription. filepath : str, default None, meaning content will be returned as a ``str`` Path to save file. segment_level : bool, default True Whether to use segment-level timestamps in output. word_level : bool, default True Whether to use word-level timestamps in output. min_dur : float, default 0.2 Minimum duration allowed for any word/segment before the word/segments are merged with adjacent word/segments. strip : bool, default True Whether to remove spaces before and after text on each segment for output. reverse_text: bool or tuple, default False Whether to reverse the order of words for each segment or provide the ``prepend_punctuations`` and ``append_punctuations`` as tuple pair instead of ``True`` which is for the default punctuations. Returns ------- str String of the content if ``filepath`` is ``None``. Notes ----- ``reverse_text`` will not fix RTL text not displaying tags properly which is an issue with some video player. VLC seems to not suffer from this issue. Examples -------- >>> import stable_whisper >>> model = stable_whisper.load_model('base') >>> result = model.transcribe('audio.mp3') >>> result.to_tsv('audio.tsv') Saved: audio.tsv |
13,860 | import json
import os
import warnings
from typing import List, Tuple, Union, Callable
from itertools import chain
from .stabilization.utils import valid_ts
def segment2assblock(segment: dict, idx: int, strip=True) -> str:
return f'Dialogue: {idx},{sec2ass(segment["start"])},{sec2ass(segment["end"])},Default,,0,0,0,,' \
f'{finalize_text(segment["text"], strip)}'
def to_ass_word_level_segments(segments: List[dict], tag: Tuple[str, str], karaoke: bool = False) -> List[dict]:
def to_segment_string(segment: dict):
segment_string = ''
for i, word in enumerate(segment['words']):
curr_word, space = (word['word'][1:], " ") if word['word'].startswith(" ") else (word['word'], "")
segment_string += (
space +
r"{\k" +
("f" if karaoke else "") +
f"{sec2centiseconds(word['end']-word['start'])}" +
r"}" +
curr_word
)
return segment_string
return [
dict(
text=to_segment_string(s),
start=s['start'],
end=s['end']
)
for s in segments
]
def result_to_any(result: (dict, list),
filepath: str = None,
filetype: str = None,
segments2blocks: Callable = None,
segment_level=True,
word_level=True,
min_dur: float = 0.02,
tag: Tuple[str, str] = None,
default_tag: Tuple[str, str] = None,
strip=True,
reverse_text: Union[bool, tuple] = False,
to_word_level_string_callback: Callable = None):
"""
Generate file from ``result`` to display segment-level and/or word-level timestamp.
Returns
-------
str
String of the content if ``filepath`` is ``None``.
"""
segments, segment_level, word_level = _preprocess_args(
result, segment_level, word_level, min_dur, reverse_text=reverse_text
)
if filetype is None:
filetype = os.path.splitext(filepath)[-1][1:] or 'srt'
if filetype.lower() not in SUPPORTED_FORMATS:
raise NotImplementedError(f'{filetype} not supported')
if filepath and not filepath.lower().endswith(f'.{filetype}'):
filepath += f'.{filetype}'
if word_level and segment_level:
if tag is None:
if default_tag is None:
tag = ('<font color="#00ff00">', '</font>') if filetype == 'srt' else ('<u>', '</u>')
else:
tag = default_tag
if to_word_level_string_callback is None:
to_word_level_string_callback = to_word_level_segments
segments = to_word_level_string_callback(segments, tag)
elif word_level:
segments = to_word_level(segments)
if not valid_ts(segments, warn=False):
warnings.warn(message='Result contains out of order timestamp(s). Output file may not playback properly.')
if segments2blocks is None:
sub_str = '\n\n'.join(segment2srtblock(s, i, strip=strip) for i, s in enumerate(segments))
else:
sub_str = segments2blocks(segments)
if filepath:
_save_as_file(sub_str, filepath)
else:
return sub_str
The provided code snippet includes necessary dependencies for implementing the `result_to_ass` function. Write a Python function `def result_to_ass(result: (dict, list), filepath: str = None, segment_level=True, word_level=True, min_dur: float = 0.02, tag: Union[Tuple[str, str], int] = None, font: str = None, font_size: int = 24, strip=True, highlight_color: str = None, karaoke=False, reverse_text: Union[bool, tuple] = False, **kwargs)` to solve the following problem:
Generate Advanced SubStation Alpha (ASS) file from ``result`` to display segment-level and/or word-level timestamp. Parameters ---------- result : dict or list or stable_whisper.result.WhisperResult Result of transcription. filepath : str, default None, meaning content will be returned as a ``str`` Path to save file. segment_level : bool, default True Whether to use segment-level timestamps in output. word_level : bool, default True Whether to use word-level timestamps in output. min_dur : float, default 0.2 Minimum duration allowed for any word/segment before the word/segments are merged with adjacent word/segments. tag: tuple of (str, str) or int, default None, meaning use default highlighting Tag used to change the properties a word at its timestamp. -1 for individual word highlight tag. font : str, default `Arial` Word font. font_size : int, default 48 Word font size. strip : bool, default True Whether to remove spaces before and after text on each segment for output. highlight_color : str, default '00ff00' Hexadecimal of the color use for default highlights as '<bb><gg><rr>'. karaoke : bool, default False Whether to use progressive filling highlights (for karaoke effect). reverse_text: bool or tuple, default False Whether to reverse the order of words for each segment or provide the ``prepend_punctuations`` and ``append_punctuations`` as tuple pair instead of ``True`` which is for the default punctuations. kwargs: Format styles: 'Name', 'Fontname', 'Fontsize', 'PrimaryColour', 'SecondaryColour', 'OutlineColour', 'BackColour', 'Bold', 'Italic', 'Underline', 'StrikeOut', 'ScaleX', 'ScaleY', 'Spacing', 'Angle', 'BorderStyle', 'Outline', 'Shadow', 'Alignment', 'MarginL', 'MarginR', 'MarginV', 'Encoding' Returns ------- str String of the content if ``filepath`` is ``None``. Notes ----- ``reverse_text`` will not fix RTL text not displaying tags properly which is an issue with some video player. VLC seems to not suffer from this issue. Examples -------- >>> import stable_whisper >>> model = stable_whisper.load_model('base') >>> result = model.transcribe('audio.mp3') >>> result.to_ass('audio.ass') Saved: audio.ass
Here is the function:
def result_to_ass(result: (dict, list),
filepath: str = None,
segment_level=True,
word_level=True,
min_dur: float = 0.02,
tag: Union[Tuple[str, str], int] = None,
font: str = None,
font_size: int = 24,
strip=True,
highlight_color: str = None,
karaoke=False,
reverse_text: Union[bool, tuple] = False,
**kwargs):
"""
Generate Advanced SubStation Alpha (ASS) file from ``result`` to display segment-level and/or word-level timestamp.
Parameters
----------
result : dict or list or stable_whisper.result.WhisperResult
Result of transcription.
filepath : str, default None, meaning content will be returned as a ``str``
Path to save file.
segment_level : bool, default True
Whether to use segment-level timestamps in output.
word_level : bool, default True
Whether to use word-level timestamps in output.
min_dur : float, default 0.2
Minimum duration allowed for any word/segment before the word/segments are merged with adjacent word/segments.
tag: tuple of (str, str) or int, default None, meaning use default highlighting
Tag used to change the properties a word at its timestamp. -1 for individual word highlight tag.
font : str, default `Arial`
Word font.
font_size : int, default 48
Word font size.
strip : bool, default True
Whether to remove spaces before and after text on each segment for output.
highlight_color : str, default '00ff00'
Hexadecimal of the color use for default highlights as '<bb><gg><rr>'.
karaoke : bool, default False
Whether to use progressive filling highlights (for karaoke effect).
reverse_text: bool or tuple, default False
Whether to reverse the order of words for each segment or provide the ``prepend_punctuations`` and
``append_punctuations`` as tuple pair instead of ``True`` which is for the default punctuations.
kwargs:
Format styles:
'Name', 'Fontname', 'Fontsize', 'PrimaryColour', 'SecondaryColour', 'OutlineColour', 'BackColour', 'Bold',
'Italic', 'Underline', 'StrikeOut', 'ScaleX', 'ScaleY', 'Spacing', 'Angle', 'BorderStyle', 'Outline',
'Shadow', 'Alignment', 'MarginL', 'MarginR', 'MarginV', 'Encoding'
Returns
-------
str
String of the content if ``filepath`` is ``None``.
Notes
-----
``reverse_text`` will not fix RTL text not displaying tags properly which is an issue with some video player. VLC
seems to not suffer from this issue.
Examples
--------
>>> import stable_whisper
>>> model = stable_whisper.load_model('base')
>>> result = model.transcribe('audio.mp3')
>>> result.to_ass('audio.ass')
Saved: audio.ass
"""
if tag == ['-1']: # CLI
tag = -1
if highlight_color is None:
highlight_color = '00ff00'
def segments2blocks(segments):
fmt_style_dict = {'Name': 'Default', 'Fontname': 'Arial', 'Fontsize': '48', 'PrimaryColour': '&Hffffff',
'SecondaryColour': '&Hffffff', 'OutlineColour': '&H0', 'BackColour': '&H0', 'Bold': '0',
'Italic': '0', 'Underline': '0', 'StrikeOut': '0', 'ScaleX': '100', 'ScaleY': '100',
'Spacing': '0', 'Angle': '0', 'BorderStyle': '1', 'Outline': '1', 'Shadow': '0',
'Alignment': '2', 'MarginL': '10', 'MarginR': '10', 'MarginV': '10', 'Encoding': '0'}
for k, v in filter(lambda x: 'colour' in x[0].lower() and not str(x[1]).startswith('&H'), kwargs.items()):
kwargs[k] = f'&H{kwargs[k]}'
fmt_style_dict.update((k, v) for k, v in kwargs.items() if k in fmt_style_dict)
if tag is None and 'PrimaryColour' not in kwargs:
fmt_style_dict['PrimaryColour'] = \
highlight_color if highlight_color.startswith('&H') else f'&H{highlight_color}'
if font:
fmt_style_dict.update(Fontname=font)
if font_size:
fmt_style_dict.update(Fontsize=font_size)
fmts = f'Format: {", ".join(map(str, fmt_style_dict.keys()))}'
styles = f'Style: {",".join(map(str, fmt_style_dict.values()))}'
sub_str = f'[Script Info]\nScriptType: v4.00+\nPlayResX: 384\nPlayResY: 288\nScaledBorderAndShadow: yes\n\n' \
f'[V4+ Styles]\n{fmts}\n{styles}\n\n' \
f'[Events]\nFormat: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text\n\n'
sub_str += '\n'.join(segment2assblock(s, i, strip=strip) for i, s in enumerate(segments))
return sub_str
if tag is not None and karaoke:
warnings.warn(f'``tag`` is not support for ``karaoke=True``; ``tag`` will be ignored.')
return result_to_any(
result=result,
filepath=filepath,
filetype='ass',
segments2blocks=segments2blocks,
segment_level=segment_level,
word_level=word_level,
min_dur=min_dur,
tag=None if tag == -1 else tag,
default_tag=(r'{\1c' + f'{highlight_color}&' + '}', r'{\r}'),
strip=strip,
reverse_text=reverse_text,
to_word_level_string_callback=(
(lambda s, t: to_ass_word_level_segments(s, t, karaoke=karaoke))
if karaoke or (word_level and segment_level and tag is None)
else None
)
) | Generate Advanced SubStation Alpha (ASS) file from ``result`` to display segment-level and/or word-level timestamp. Parameters ---------- result : dict or list or stable_whisper.result.WhisperResult Result of transcription. filepath : str, default None, meaning content will be returned as a ``str`` Path to save file. segment_level : bool, default True Whether to use segment-level timestamps in output. word_level : bool, default True Whether to use word-level timestamps in output. min_dur : float, default 0.2 Minimum duration allowed for any word/segment before the word/segments are merged with adjacent word/segments. tag: tuple of (str, str) or int, default None, meaning use default highlighting Tag used to change the properties a word at its timestamp. -1 for individual word highlight tag. font : str, default `Arial` Word font. font_size : int, default 48 Word font size. strip : bool, default True Whether to remove spaces before and after text on each segment for output. highlight_color : str, default '00ff00' Hexadecimal of the color use for default highlights as '<bb><gg><rr>'. karaoke : bool, default False Whether to use progressive filling highlights (for karaoke effect). reverse_text: bool or tuple, default False Whether to reverse the order of words for each segment or provide the ``prepend_punctuations`` and ``append_punctuations`` as tuple pair instead of ``True`` which is for the default punctuations. kwargs: Format styles: 'Name', 'Fontname', 'Fontsize', 'PrimaryColour', 'SecondaryColour', 'OutlineColour', 'BackColour', 'Bold', 'Italic', 'Underline', 'StrikeOut', 'ScaleX', 'ScaleY', 'Spacing', 'Angle', 'BorderStyle', 'Outline', 'Shadow', 'Alignment', 'MarginL', 'MarginR', 'MarginV', 'Encoding' Returns ------- str String of the content if ``filepath`` is ``None``. Notes ----- ``reverse_text`` will not fix RTL text not displaying tags properly which is an issue with some video player. VLC seems to not suffer from this issue. Examples -------- >>> import stable_whisper >>> model = stable_whisper.load_model('base') >>> result = model.transcribe('audio.mp3') >>> result.to_ass('audio.ass') Saved: audio.ass |
13,861 | import json
import os
import warnings
from typing import List, Tuple, Union, Callable
from itertools import chain
from .stabilization.utils import valid_ts
def result_to_any(result: (dict, list),
filepath: str = None,
filetype: str = None,
segments2blocks: Callable = None,
segment_level=True,
word_level=True,
min_dur: float = 0.02,
tag: Tuple[str, str] = None,
default_tag: Tuple[str, str] = None,
strip=True,
reverse_text: Union[bool, tuple] = False,
to_word_level_string_callback: Callable = None):
"""
Generate file from ``result`` to display segment-level and/or word-level timestamp.
Returns
-------
str
String of the content if ``filepath`` is ``None``.
"""
segments, segment_level, word_level = _preprocess_args(
result, segment_level, word_level, min_dur, reverse_text=reverse_text
)
if filetype is None:
filetype = os.path.splitext(filepath)[-1][1:] or 'srt'
if filetype.lower() not in SUPPORTED_FORMATS:
raise NotImplementedError(f'{filetype} not supported')
if filepath and not filepath.lower().endswith(f'.{filetype}'):
filepath += f'.{filetype}'
if word_level and segment_level:
if tag is None:
if default_tag is None:
tag = ('<font color="#00ff00">', '</font>') if filetype == 'srt' else ('<u>', '</u>')
else:
tag = default_tag
if to_word_level_string_callback is None:
to_word_level_string_callback = to_word_level_segments
segments = to_word_level_string_callback(segments, tag)
elif word_level:
segments = to_word_level(segments)
if not valid_ts(segments, warn=False):
warnings.warn(message='Result contains out of order timestamp(s). Output file may not playback properly.')
if segments2blocks is None:
sub_str = '\n\n'.join(segment2srtblock(s, i, strip=strip) for i, s in enumerate(segments))
else:
sub_str = segments2blocks(segments)
if filepath:
_save_as_file(sub_str, filepath)
else:
return sub_str
The provided code snippet includes necessary dependencies for implementing the `result_to_txt` function. Write a Python function `def result_to_txt( result: (dict, list), filepath: str = None, min_dur: float = 0.02, strip=True, reverse_text: Union[bool, tuple] = False )` to solve the following problem:
Generate plain-text without timestamps from ``result``. Parameters ---------- result : dict or list or stable_whisper.result.WhisperResult Result of transcription. filepath : str, default None, meaning content will be returned as a ``str`` Path to save file. min_dur : float, default 0.2 Minimum duration allowed for any word/segment before the word/segments are merged with adjacent word/segments. strip : bool, default True Whether to remove spaces before and after text on each segment for output. reverse_text: bool or tuple, default False Whether to reverse the order of words for each segment or provide the ``prepend_punctuations`` and ``append_punctuations`` as tuple pair instead of ``True`` which is for the default punctuations. Returns ------- str String of the content if ``filepath`` is ``None``. Notes ----- ``reverse_text`` will not fix RTL text not displaying tags properly which is an issue with some video player. VLC seems to not suffer from this issue. Examples -------- >>> import stable_whisper >>> model = stable_whisper.load_model('base') >>> result = model.transcribe('audio.mp3') >>> result.to_txt('audio.txt') Saved: audio.txt
Here is the function:
def result_to_txt(
result: (dict, list),
filepath: str = None,
min_dur: float = 0.02,
strip=True,
reverse_text: Union[bool, tuple] = False
):
"""
Generate plain-text without timestamps from ``result``.
Parameters
----------
result : dict or list or stable_whisper.result.WhisperResult
Result of transcription.
filepath : str, default None, meaning content will be returned as a ``str``
Path to save file.
min_dur : float, default 0.2
Minimum duration allowed for any word/segment before the word/segments are merged with adjacent word/segments.
strip : bool, default True
Whether to remove spaces before and after text on each segment for output.
reverse_text: bool or tuple, default False
Whether to reverse the order of words for each segment or provide the ``prepend_punctuations`` and
``append_punctuations`` as tuple pair instead of ``True`` which is for the default punctuations.
Returns
-------
str
String of the content if ``filepath`` is ``None``.
Notes
-----
``reverse_text`` will not fix RTL text not displaying tags properly which is an issue with some video player. VLC
seems to not suffer from this issue.
Examples
--------
>>> import stable_whisper
>>> model = stable_whisper.load_model('base')
>>> result = model.transcribe('audio.mp3')
>>> result.to_txt('audio.txt')
Saved: audio.txt
"""
def segments2blocks(segments: dict, _strip=True) -> str:
return '\n'.join(f'{segment["text"].strip() if _strip else segment["text"]}' for segment in segments)
return result_to_any(
result=result,
filepath=filepath,
filetype='txt',
segments2blocks=segments2blocks,
segment_level=True,
word_level=False,
min_dur=min_dur,
strip=strip,
reverse_text=reverse_text
) | Generate plain-text without timestamps from ``result``. Parameters ---------- result : dict or list or stable_whisper.result.WhisperResult Result of transcription. filepath : str, default None, meaning content will be returned as a ``str`` Path to save file. min_dur : float, default 0.2 Minimum duration allowed for any word/segment before the word/segments are merged with adjacent word/segments. strip : bool, default True Whether to remove spaces before and after text on each segment for output. reverse_text: bool or tuple, default False Whether to reverse the order of words for each segment or provide the ``prepend_punctuations`` and ``append_punctuations`` as tuple pair instead of ``True`` which is for the default punctuations. Returns ------- str String of the content if ``filepath`` is ``None``. Notes ----- ``reverse_text`` will not fix RTL text not displaying tags properly which is an issue with some video player. VLC seems to not suffer from this issue. Examples -------- >>> import stable_whisper >>> model = stable_whisper.load_model('base') >>> result = model.transcribe('audio.mp3') >>> result.to_txt('audio.txt') Saved: audio.txt |
13,862 | import json
import os
import warnings
from typing import List, Tuple, Union, Callable
from itertools import chain
from .stabilization.utils import valid_ts
def _save_as_file(content: str, path: str):
with open(path, 'w', encoding='utf-8') as f:
f.write(content)
print(f'Saved: {os.path.abspath(path)}')
The provided code snippet includes necessary dependencies for implementing the `save_as_json` function. Write a Python function `def save_as_json(result: dict, path: str, ensure_ascii: bool = False, **kwargs)` to solve the following problem:
Save ``result`` as JSON file to ``path``. Parameters ---------- result : dict or list or stable_whisper.result.WhisperResult Result of transcription. path : str Path to save file. ensure_ascii : bool, default False Whether to escape non-ASCII characters. Examples -------- >>> import stable_whisper >>> model = stable_whisper.load_model('base') >>> result = model.transcribe('audio.mp3') >>> result.save_as_json('audio.json') Saved: audio.json
Here is the function:
def save_as_json(result: dict, path: str, ensure_ascii: bool = False, **kwargs):
"""
Save ``result`` as JSON file to ``path``.
Parameters
----------
result : dict or list or stable_whisper.result.WhisperResult
Result of transcription.
path : str
Path to save file.
ensure_ascii : bool, default False
Whether to escape non-ASCII characters.
Examples
--------
>>> import stable_whisper
>>> model = stable_whisper.load_model('base')
>>> result = model.transcribe('audio.mp3')
>>> result.save_as_json('audio.json')
Saved: audio.json
"""
if not isinstance(result, dict) and callable(getattr(result, 'to_dict')):
result = result.to_dict()
if not path.lower().endswith('.json'):
path += '.json'
result = json.dumps(result, allow_nan=True, ensure_ascii=ensure_ascii, **kwargs)
_save_as_file(result, path) | Save ``result`` as JSON file to ``path``. Parameters ---------- result : dict or list or stable_whisper.result.WhisperResult Result of transcription. path : str Path to save file. ensure_ascii : bool, default False Whether to escape non-ASCII characters. Examples -------- >>> import stable_whisper >>> model = stable_whisper.load_model('base') >>> result = model.transcribe('audio.mp3') >>> result.save_as_json('audio.json') Saved: audio.json |
13,863 | import json
import os
import warnings
from typing import List, Tuple, Union, Callable
from itertools import chain
from .stabilization.utils import valid_ts
The provided code snippet includes necessary dependencies for implementing the `load_result` function. Write a Python function `def load_result(json_path: str) -> dict` to solve the following problem:
Return a ``dict`` of the contents in ``json_path``.
Here is the function:
def load_result(json_path: str) -> dict:
"""
Return a ``dict`` of the contents in ``json_path``.
"""
with open(json_path, 'r', encoding='utf-8') as f:
return json.load(f) | Return a ``dict`` of the contents in ``json_path``. |
13,864 | import os
import subprocess as sp
import warnings
from typing import List
The provided code snippet includes necessary dependencies for implementing the `encode_video_comparison` function. Write a Python function `def encode_video_comparison( audiofile: str, subtitle_files: List[str], output_videopath: str = None, *, labels: List[str] = None, height: int = 90, width: int = 720, color: str = 'black', fontsize: int = 70, border_color: str = 'white', label_color: str = 'white', label_size: int = 14, fps: int = 25, video_codec: str = None, audio_codec: str = None, overwrite=False, only_cmd: bool = False, verbose=True ) -> (str, None)` to solve the following problem:
Encode multiple subtitle files into one video with the subtitles vertically stacked. Parameters ---------- audiofile : str Path of audio file. subtitle_files : list of str List of paths for subtitle file. output_videopath : str, optional Output video path. labels : list of str, default, None, meaning use ``subtitle_files`` as labels List of labels for ``subtitle_files``. height : int, default 90 Height for each subtitle section. width : int, default 720 Width for each subtitle section. color : str, default 'black' Background color of the video. fontsize: int, default 70 Font size for subtitles. border_color : str, default 'white' Border color for separating the sections of subtitle. label_color : str, default 'white' Color of labels. label_size : int, default 14 Font size of labels. fps : int, default 25 Frame-rate of the video. video_codec : str, optional Video codec opf the video. audio_codec : str, optional Audio codec opf the video. overwrite : bool, default False Whether to overwrite existing video files with the same path as the output video. only_cmd : bool, default False Whether to skip encoding and only return the full command generate from the specified options. verbose : bool, default True Whether to display ffmpeg processing info. Returns ------- str or None Encoding command as a string if ``only_cmd = True``.
Here is the function:
def encode_video_comparison(
audiofile: str,
subtitle_files: List[str],
output_videopath: str = None,
*,
labels: List[str] = None,
height: int = 90,
width: int = 720,
color: str = 'black',
fontsize: int = 70,
border_color: str = 'white',
label_color: str = 'white',
label_size: int = 14,
fps: int = 25,
video_codec: str = None,
audio_codec: str = None,
overwrite=False,
only_cmd: bool = False,
verbose=True
) -> (str, None):
"""
Encode multiple subtitle files into one video with the subtitles vertically stacked.
Parameters
----------
audiofile : str
Path of audio file.
subtitle_files : list of str
List of paths for subtitle file.
output_videopath : str, optional
Output video path.
labels : list of str, default, None, meaning use ``subtitle_files`` as labels
List of labels for ``subtitle_files``.
height : int, default 90
Height for each subtitle section.
width : int, default 720
Width for each subtitle section.
color : str, default 'black'
Background color of the video.
fontsize: int, default 70
Font size for subtitles.
border_color : str, default 'white'
Border color for separating the sections of subtitle.
label_color : str, default 'white'
Color of labels.
label_size : int, default 14
Font size of labels.
fps : int, default 25
Frame-rate of the video.
video_codec : str, optional
Video codec opf the video.
audio_codec : str, optional
Audio codec opf the video.
overwrite : bool, default False
Whether to overwrite existing video files with the same path as the output video.
only_cmd : bool, default False
Whether to skip encoding and only return the full command generate from the specified options.
verbose : bool, default True
Whether to display ffmpeg processing info.
Returns
-------
str or None
Encoding command as a string if ``only_cmd = True``.
"""
vc = '' if video_codec is None else f' -c:v {video_codec}'
ac = '' if audio_codec is None else f' -c:a {audio_codec}'
background = f'-f lavfi -i color=size={width}x{height}:rate={fps}:color={color}'
border = f'-f lavfi -i color=size={width}x3:rate={fps}:color={border_color}'
audio = f'-i "{audiofile}"'
cfilters0 = []
assert labels is None or len(labels) == len(subtitle_files)
for i, sub in enumerate(subtitle_files):
label = sub if labels is None else labels[i]
label = label.replace("'", '"')
fil = f"[0]drawtext=text='{label}':fontcolor={label_color}:fontsize={label_size}:x=10:y=10[a{i}]," \
f"[a{i}]subtitles='{sub}':force_style='Fontsize={fontsize}'[b{i}]"
cfilters0.append(fil)
cfilters1 = (
'[1]'.join(
f'[b{i}]' for i in range(len(cfilters0))
)
+
f'vstack=inputs={len(cfilters0) * 2 - 1}'
)
final_fil = ','.join(cfilters0) + f';{cfilters1}'
ow = '-y' if overwrite else '-n'
if output_videopath is None:
name = os.path.split(os.path.splitext(audiofile)[0])[1]
output_videopath = f'{name}_sub_comparison.mp4'
cmd = (f'ffmpeg {ow} {background} {border} {audio} '
f'-filter_complex "{final_fil}"{vc}{ac} -shortest "{output_videopath}"')
if only_cmd:
return cmd
if verbose:
print(cmd)
rc = sp.run(cmd, capture_output=not verbose).returncode
if rc == 0:
if verbose:
print(f'Encoded: {output_videopath}')
else:
warnings.warn(f'Failed to encode {output_videopath}') | Encode multiple subtitle files into one video with the subtitles vertically stacked. Parameters ---------- audiofile : str Path of audio file. subtitle_files : list of str List of paths for subtitle file. output_videopath : str, optional Output video path. labels : list of str, default, None, meaning use ``subtitle_files`` as labels List of labels for ``subtitle_files``. height : int, default 90 Height for each subtitle section. width : int, default 720 Width for each subtitle section. color : str, default 'black' Background color of the video. fontsize: int, default 70 Font size for subtitles. border_color : str, default 'white' Border color for separating the sections of subtitle. label_color : str, default 'white' Color of labels. label_size : int, default 14 Font size of labels. fps : int, default 25 Frame-rate of the video. video_codec : str, optional Video codec opf the video. audio_codec : str, optional Audio codec opf the video. overwrite : bool, default False Whether to overwrite existing video files with the same path as the output video. only_cmd : bool, default False Whether to skip encoding and only return the full command generate from the specified options. verbose : bool, default True Whether to display ffmpeg processing info. Returns ------- str or None Encoding command as a string if ``only_cmd = True``. |
13,865 | from typing import TYPE_CHECKING, List, Union
from dataclasses import replace
import torch
import numpy as np
from whisper.decoding import DecodingTask, DecodingOptions, DecodingResult
def _suppress_ts(ts_logits: torch.Tensor, ts_token_mask: torch.Tensor = None):
if ts_token_mask is not None:
ts_logits[:, ts_token_mask] = -np.inf | null |
13,866 | from features import SignalGenerator, dilated_factor
from scipy.interpolate import interp1d
import torch
import numpy as np
import json
import os
def convert_continuos_f0(f0, f0_size):
# get start and end of f0
if (f0 == 0).all():
return np.zeros((f0_size,))
start_f0 = f0[f0 != 0][0]
end_f0 = f0[f0 != 0][-1]
# padding start and end of f0 sequence
cf0 = f0
start_idx = np.where(cf0 == start_f0)[0][0]
end_idx = np.where(cf0 == end_f0)[0][-1]
cf0[:start_idx] = start_f0
cf0[end_idx:] = end_f0
# get non-zero frame index
nz_frames = np.where(cf0 != 0)[0]
# perform linear interpolation
f = interp1d(nz_frames, cf0[nz_frames], bounds_error=False, fill_value=0.0)
cf0_ = f(np.arange(0, f0_size))
# print(cf0.shape, cf0_.shape, f0.shape, f0_size)
# print(cf0_)
return f(np.arange(0, f0_size)) | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.