id
stringlengths 19
21
| content
stringlengths 722
86.7k
|
|---|---|
evocodebench_data_101
|
import logging
from typing import List, Optional, Any
from agents.agent_lifecycle import AgentLifecycle
from agents.agent_similarity import AgentSimilarity
from agents.agent_persistence_manager import AgentPersistenceManager
from integrations.openaiwrapper import OpenAIAPIWrapper
logger= logging.getLogger()
class MicroAgentManager:
"""
Manages the creation and retrieval of micro agents.
"""
def __init__(self, openai_wrapper: OpenAIAPIWrapper, max_agents: int = 20, db_filename : str = "agents.db"):
self.max_agents = max_agents
self.openai_wrapper = openai_wrapper
self.agent_persistence = AgentPersistenceManager(db_filename)
self.agent_lifecycle = AgentLifecycle(self.openai_wrapper, self.agent_persistence, max_agents)
self.load_agents()
def stop_all_agents(self) -> None:
"""Stops all agents."""
self.agent_lifecycle.stop_all_agents()
def cleanup_agents(self):
"""Remove all agents with status stopped = True"""
self.agent_lifecycle.cleanup_agents()
def load_agents(self):
"""Loads agents from the database."""
loaded_agents = self.agent_persistence.load_all_agents(self.agent_lifecycle, self.openai_wrapper)
self.agent_lifecycle.agents.extend(loaded_agents)
logger.info(f"Loaded {len(loaded_agents)} agents from the database.")
def get_agents(self) -> List[Any]:
"""Returns the list of agents."""
self.cleanup_agents()
return self.agent_lifecycle.agents
def create_agents(self) -> None:
"""Creates prime agents and logs the process."""
logger.info("Creating agents...")
try:
self.agent_lifecycle.create_prime_agent()
logger.info("Agents created successfully.")
except Exception as e:
logger.exception(f"Error in creating agents: {e}")
raise
def get_or_create_agent(self, purpose: str, depth: int, sample_input: str, parent_agent=None) -> Any:
"""
Retrieves an existing agent or creates a new one based on the given purpose.
"""
logger.info(f"Getting or creating agent for purpose: {purpose}")
try:
agent = self.agent_lifecycle.get_or_create_agent(purpose, depth, sample_input, parent_agent=parent_agent)
logger.info(f"Agent for purpose '{purpose}' retrieved or created.")
return agent
except Exception as e:
logging.exception(f"Error in getting or creating agent: {e}")
raise
def display_agent_status(self):
"""Displays the current status of all agents."""
for agent in self.get_agents():
logger.info(f"Agent {agent.purpose}: Status = {agent.current_status}, Evolve Count = {agent.evolve_count}")
def display_active_agent_tree(self):
"""Displays a tree view of active agent relationships."""
for agent in self.get_agents():
if agent.active_agents:
logger.info(f"Agent {agent.purpose} is calling: {agent.active_agents}")
else:
logger.info(f"Agent {agent.purpose} is currently idle.")
|
evocodebench_data_102
|
import logging
from typing import List
from agents.microagent import MicroAgent
from integrations.openaiwrapper import OpenAIAPIWrapper
from agents.agent_similarity import AgentSimilarity
from agents.agent_persistence_manager import AgentPersistenceManager
from numpy import ndarray
from prompt_management.prompts import (
PRIME_PROMPT, PRIME_NAME,
PROMPT_ENGINEERING_SYSTEM_PROMPT,
PROMPT_ENGINEERING_TEMPLATE, EXAMPLES
)
logger = logging.getLogger()
DEFAULT_MAX_AGENTS = 2000
PRIME_AGENT_WEIGHT = 25
class AgentLifecycle:
def __init__(self, openai_wrapper: OpenAIAPIWrapper, agent_persistence_manager: AgentPersistenceManager, max_agents: int = DEFAULT_MAX_AGENTS):
self.agents: List[MicroAgent] = []
self.openai_wrapper = openai_wrapper
self.agent_persistence = agent_persistence_manager
self.max_agents = max_agents
def stop_all_agents(self) -> None:
"""Stops all agents."""
for agent in self.agents:
agent.stop()
def reset_all_agents(self) -> None:
"""Resets all agents."""
for agent in self.agents:
agent.reset()
def cleanup_agents(self):
"""Remove all agents with status stopped = True in an efficient manner."""
self.agents = [agent for agent in self.agents if not agent.stopped]
def create_prime_agent(self) -> None:
"""Creates the prime agent and adds it to the agent list."""
prime_agent = MicroAgent(
PRIME_PROMPT, PRIME_NAME, 0, self,
self.openai_wrapper, PRIME_AGENT_WEIGHT, True, True
)
self.agents.append(prime_agent)
def add_agent(self, agent: MicroAgent) -> None:
"""Adds an agent to the list of agents."""
self.agents.append(agent)
def get_available_agents_for_agent(self, agent) -> List[MicroAgent]:
"""Returns the list of available agents for the given purpose."""
agent_id = agent.id
available_agents = [agent for agent in self.agents if agent.purpose != "Bootstrap Agent" and agent.working_agent]
for agent in available_agents:
if agent.parent_id != agent_id:
available_agents.remove(agent)
return available_agents
def get_or_create_agent(self, purpose: str, depth: int, sample_input: str, force_new: bool = False, parent_agent=None) -> MicroAgent:
"""
Retrieves or creates an agent based on the given purpose.
Optionally creates a new agent regardless of similarity if force_new is True.
"""
if not force_new:
agent_similarity = AgentSimilarity(self.openai_wrapper, self.agents)
purpose_embedding = agent_similarity.get_embedding(purpose)
closest_agent, highest_similarity = agent_similarity.find_closest_agent(purpose_embedding)
similarity_threshold = agent_similarity.calculate_similarity_threshold()
if highest_similarity >= similarity_threshold:
closest_agent.usage_count += 1
return closest_agent
return self._create_and_add_agent(purpose, depth, sample_input, parent_agent=parent_agent)
def _create_and_add_agent(self, purpose: str, depth: int, sample_input: str, parent_agent=None) -> MicroAgent:
"""Helper method to create and add a new agent."""
if len(self.agents) >= self.max_agents:
self._remove_least_used_agent()
new_agent = MicroAgent(self._generate_llm_prompt(purpose, sample_input), purpose, depth, self, self.openai_wrapper, parent=parent_agent)
new_agent.usage_count = 1
self.agents.append(new_agent)
return new_agent
def _remove_least_used_agent(self):
"""Removes the least used agent."""
least_used_agent = min(self.agents, key=lambda agent: agent.usage_count)
self.agents.remove(least_used_agent)
def save_agent(self, agent: MicroAgent) -> None:
"""Saves the given agent with error handling."""
try:
self.agent_persistence.save_agent(agent)
except Exception as e:
logger.exception(f"Error in saving agent: {e}")
raise
def remove_agent(self, agent: MicroAgent) -> None:
"""Removes the given agent with error handling."""
try:
self.agent_persistence.remove_agent(agent)
except Exception as e:
logger.exception(f"Error in saving agent: {e}")
raise
def _generate_llm_prompt(self, goal: str, sample_input: str) -> str:
"""
Generates a prompt for the LLM based on the given goal and sample input.
"""
messages = [
{"role": "system", "content": PROMPT_ENGINEERING_SYSTEM_PROMPT},
{"role": "user", "content": PROMPT_ENGINEERING_TEMPLATE.format(goal=goal, sample_input=sample_input, examples=EXAMPLES)}
]
try:
return self.openai_wrapper.chat_completion(messages=messages)
except Exception as e:
logger.exception(f"Error generating LLM prompt: {e}")
return ""
|
evocodebench_data_103
|
import sqlite3
import json
from integrations.agent_persistence import AbstractAgentPersistence
class SQLiteAgentPersistence(AbstractAgentPersistence):
def __init__(self, filename="agents.db"):
self.filename = filename
self._initialize_database()
def _initialize_database(self):
"""
Initialize the SQLite database with the required schema.
"""
with sqlite3.connect(self.filename) as conn:
conn.execute("""
CREATE TABLE IF NOT EXISTS agents (
id TEXT PRIMARY KEY,
purpose TEXT,
data TEXT
)
""")
def remove_agent(self, purpose):
"""
Remove an agent from the SQLite database.
"""
with sqlite3.connect(self.filename) as conn:
conn.execute("DELETE FROM agents WHERE id = ?", (purpose,))
def save_agent(self, agent_dict):
"""
Save the serialized agent to an SQLite database.
"""
with sqlite3.connect(self.filename) as conn:
conn.execute(
# add id field
"REPLACE INTO agents (id, purpose, data) VALUES (?, ?, ?)",
(agent_dict['id'], agent_dict['purpose'], json.dumps(agent_dict))
)
def fetch_agent(self, purpose):
"""
Fetch a serialized agent based on its purpose from the SQLite database.
"""
with sqlite3.connect(self.filename) as conn:
cursor = conn.cursor()
cursor.execute("SELECT data FROM agents WHERE purpose = ?", (purpose,))
row = cursor.fetchone()
return json.loads(row[0]) if row else None
def load_all_purposes(self):
"""
Load all agent purposes from the SQLite database.
"""
with sqlite3.connect(self.filename) as conn:
cursor = conn.cursor()
cursor.execute("SELECT purpose FROM agents")
return [row[0] for row in cursor.fetchall()]
|
evocodebench_data_104
|
import sqlite3
import json
from integrations.agent_persistence import AbstractAgentPersistence
class SQLiteAgentPersistence(AbstractAgentPersistence):
def __init__(self, filename="agents.db"):
self.filename = filename
self._initialize_database()
def _initialize_database(self):
"""
Initialize the SQLite database with the required schema.
"""
with sqlite3.connect(self.filename) as conn:
conn.execute("""
CREATE TABLE IF NOT EXISTS agents (
id TEXT PRIMARY KEY,
purpose TEXT,
data TEXT
)
""")
def remove_agent(self, purpose):
"""
Remove an agent from the SQLite database.
"""
with sqlite3.connect(self.filename) as conn:
conn.execute("DELETE FROM agents WHERE id = ?", (purpose,))
def save_agent(self, agent_dict):
"""
Save the serialized agent to an SQLite database.
"""
with sqlite3.connect(self.filename) as conn:
conn.execute(
# add id field
"REPLACE INTO agents (id, purpose, data) VALUES (?, ?, ?)",
(agent_dict['id'], agent_dict['purpose'], json.dumps(agent_dict))
)
def fetch_agent(self, purpose):
"""
Fetch a serialized agent based on its purpose from the SQLite database.
"""
with sqlite3.connect(self.filename) as conn:
cursor = conn.cursor()
cursor.execute("SELECT data FROM agents WHERE purpose = ?", (purpose,))
row = cursor.fetchone()
return json.loads(row[0]) if row else None
def load_all_purposes(self):
"""
Load all agent purposes from the SQLite database.
"""
with sqlite3.connect(self.filename) as conn:
cursor = conn.cursor()
cursor.execute("SELECT purpose FROM agents")
return [row[0] for row in cursor.fetchall()]
|
evocodebench_data_105
|
import sqlite3
import json
from integrations.agent_persistence import AbstractAgentPersistence
class SQLiteAgentPersistence(AbstractAgentPersistence):
def __init__(self, filename="agents.db"):
self.filename = filename
self._initialize_database()
def _initialize_database(self):
"""
Initialize the SQLite database with the required schema.
"""
with sqlite3.connect(self.filename) as conn:
conn.execute("""
CREATE TABLE IF NOT EXISTS agents (
id TEXT PRIMARY KEY,
purpose TEXT,
data TEXT
)
""")
def remove_agent(self, purpose):
"""
Remove an agent from the SQLite database.
"""
with sqlite3.connect(self.filename) as conn:
conn.execute("DELETE FROM agents WHERE id = ?", (purpose,))
def save_agent(self, agent_dict):
"""
Save the serialized agent to an SQLite database.
"""
with sqlite3.connect(self.filename) as conn:
conn.execute(
# add id field
"REPLACE INTO agents (id, purpose, data) VALUES (?, ?, ?)",
(agent_dict['id'], agent_dict['purpose'], json.dumps(agent_dict))
)
def fetch_agent(self, purpose):
"""
Fetch a serialized agent based on its purpose from the SQLite database.
"""
with sqlite3.connect(self.filename) as conn:
cursor = conn.cursor()
cursor.execute("SELECT data FROM agents WHERE purpose = ?", (purpose,))
row = cursor.fetchone()
return json.loads(row[0]) if row else None
def load_all_purposes(self):
"""
Load all agent purposes from the SQLite database.
"""
with sqlite3.connect(self.filename) as conn:
cursor = conn.cursor()
cursor.execute("SELECT purpose FROM agents")
return [row[0] for row in cursor.fetchall()]
|
evocodebench_data_106
|
import sqlite3
import hashlib
import json
import functools
## Originally from https://www.kevinkatz.io/posts/memoize-to-sqlite
def memoize_to_sqlite(func_name: str, filename: str = "cache.db"):
"""
Memoization decorator that caches the output of a method in a SQLite
database.
"""
def decorator(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
with SQLiteMemoization(filename) as memoizer:
return memoizer.fetch_or_compute(func, func_name, *args, **kwargs)
return wrapped
return decorator
class SQLiteMemoization:
def __init__(self, filename):
self.filename = filename
self.connection = None
def __enter__(self):
self.connection = sqlite3.connect(self.filename)
self._initialize_database()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.connection.close()
self.connection = None
def _initialize_database(self):
self.connection.execute(
"CREATE TABLE IF NOT EXISTS cache (hash TEXT PRIMARY KEY, result TEXT)"
)
self.connection.execute(
"CREATE INDEX IF NOT EXISTS cache_ndx ON cache(hash)"
)
def fetch_or_compute(self, func, func_name, *args, **kwargs):
arg_hash = self._compute_hash(func_name, *args, **kwargs)
result = self._fetch_from_cache(arg_hash)
if result is not None:
return result
return self._compute_and_cache_result(func, arg_hash, *args, **kwargs)
def _compute_hash(self, func_name, *args, **kwargs):
data = f"{func_name}:{repr(args)}:{repr(kwargs)}".encode("utf-8")
return hashlib.sha256(data).hexdigest()
def _fetch_from_cache(self, arg_hash):
cursor = self.connection.cursor()
cursor.execute("SELECT result FROM cache WHERE hash = ?", (arg_hash,))
row = cursor.fetchone()
return json.loads(row[0]) if row else None
def _compute_and_cache_result(self, func, arg_hash, *args, **kwargs):
result = func(*args, **kwargs)
self._cache_result(arg_hash, result)
return result
def _cache_result(self, arg_hash, result):
cursor = self.connection.cursor()
cursor.execute(
"INSERT INTO cache (hash, result) VALUES (?, ?)",
(arg_hash, json.dumps(result))
)
self.connection.commit()
|
evocodebench_data_107
|
import sqlite3
import hashlib
import json
import functools
## Originally from https://www.kevinkatz.io/posts/memoize-to-sqlite
def memoize_to_sqlite(func_name: str, filename: str = "cache.db"):
"""
Memoization decorator that caches the output of a method in a SQLite
database.
"""
def decorator(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
with SQLiteMemoization(filename) as memoizer:
return memoizer.fetch_or_compute(func, func_name, *args, **kwargs)
return wrapped
return decorator
class SQLiteMemoization:
def __init__(self, filename):
self.filename = filename
self.connection = None
def __enter__(self):
self.connection = sqlite3.connect(self.filename)
self._initialize_database()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.connection.close()
self.connection = None
def _initialize_database(self):
self.connection.execute(
"CREATE TABLE IF NOT EXISTS cache (hash TEXT PRIMARY KEY, result TEXT)"
)
self.connection.execute(
"CREATE INDEX IF NOT EXISTS cache_ndx ON cache(hash)"
)
def fetch_or_compute(self, func, func_name, *args, **kwargs):
arg_hash = self._compute_hash(func_name, *args, **kwargs)
result = self._fetch_from_cache(arg_hash)
if result is not None:
return result
return self._compute_and_cache_result(func, arg_hash, *args, **kwargs)
def _compute_hash(self, func_name, *args, **kwargs):
data = f"{func_name}:{repr(args)}:{repr(kwargs)}".encode("utf-8")
return hashlib.sha256(data).hexdigest()
def _fetch_from_cache(self, arg_hash):
cursor = self.connection.cursor()
cursor.execute("SELECT result FROM cache WHERE hash = ?", (arg_hash,))
row = cursor.fetchone()
return json.loads(row[0]) if row else None
def _compute_and_cache_result(self, func, arg_hash, *args, **kwargs):
result = func(*args, **kwargs)
self._cache_result(arg_hash, result)
return result
def _cache_result(self, arg_hash, result):
cursor = self.connection.cursor()
cursor.execute(
"INSERT INTO cache (hash, result) VALUES (?, ?)",
(arg_hash, json.dumps(result))
)
self.connection.commit()
|
evocodebench_data_108
|
import os
from contextlib import redirect_stdout
import argparse
from copy import deepcopy
from XAgent.config import CONFIG, ARGS
from command import CommandLine, CommandLineParam
def parse_args() -> argparse.Namespace:
"""
Parse the command line arguments and return them as an argparse.Namespace object.
Returns:
argparse.Namespace: An object containing command line arguments and their values.
"""
parser = argparse.ArgumentParser()
parser.add_argument("--task", type=str, required=True, help="The task description.")
parser.add_argument("--upload-files", nargs='+', dest="upload_files", help="List of files to upload.")
parser.add_argument("--model", type=str, help="Model identifier for the task.")
parser.add_argument("--record-dir", type=str, dest="record_dir", help="Directory to record task execution logs.")
parser.add_argument("--mode", type=str, default="auto", help="Operational mode: 'auto' or 'manual'.")
parser.add_argument("--quiet", action="store_true", default=False, help="Run in quiet mode; minimal output.")
parser.add_argument("--max-subtask-chain-length", type=int, dest="max_subtask_chain_length",
help="Maximum length of subtask chain.")
parser.add_argument("--enable-ask-human-for-help", action="store_true", dest="enable_ask_human_for_help",
help="Flag to enable asking for human assistance.")
parser.add_argument("--max-plan-refine-chain-length", type=int, dest="max_plan_refine_chain_length",
help="Maximum length of plan refinement chain.")
parser.add_argument("--max-plan-tree-depth", type=int, dest="max_plan_tree_depth",
help="Maximum depth of the plan tree.")
parser.add_argument("--max-plan-tree-width", type=int, dest="max_plan_tree_width",
help="Maximum width of the plan tree.")
parser.add_argument("--max-retry-times", type=int, dest="max_retry_times", help="Maximum number of retry attempts.")
parser.add_argument("--config-file", type=str, default=os.getenv('CONFIG_FILE', 'assets/config.yml'),
dest="config_file", help="Path to the configuration file.")
return parser.parse_args()
def execute_command_line_process(args: argparse.Namespace, quiet_mode: bool = False) -> None:
"""
Execute the command line process based on the parsed arguments. If quiet mode is enabled,
redirect stdout to a file specified by the recorder's record_root_dir.
Args:
args (argparse.Namespace): Parsed command line arguments.
quiet_mode (bool): Whether to run in quiet mode, outputting to a file instead of the terminal.
"""
args_dict = vars(args)
for key, value in args_dict.items():
if value is not None:
if key == 'model':
ARGS['default_completion_kwargs'] = deepcopy(CONFIG['default_completion_kwargs'])
ARGS['default_completion_kwargs']['model'] = value
else:
ARGS[key] = value
# Redirect stdout to a file if quiet mode is true
if quiet_mode:
from XAgent.running_recorder import recorder
record_file_path = os.path.join(recorder.record_root_dir, "command_line.ansi")
with open(record_file_path, "w", encoding="utf-8") as file, redirect_stdout(file):
start_command_line(args_dict)
else:
start_command_line(args_dict)
def start_command_line(args_dict: dict) -> None:
"""
Start the command line interface with the provided arguments.
Args:
args_dict (dict): A dictionary of command line arguments.
"""
param = CommandLineParam(
task=args_dict['task'],
upload_files=args_dict.get('upload_files'),
role="Assistant",
mode=args_dict["mode"],
)
cmd = CommandLine(param)
cmd.start()
if __name__ == '__main__':
args = parse_args()
os.environ['CONFIG_FILE'] = args.config_file
# The quiet_mode argument is passed directly to the function
execute_command_line_process(args, quiet_mode=args.quiet)
|
evocodebench_data_109
|
import json
import openai
from XAgent.logs import logger
from XAgent.config import CONFIG, get_apiconfig_by_model, get_model_name
from tenacity import (
retry,
stop_after_attempt,
wait_exponential,
retry_if_not_exception_type,
wait_chain,
wait_none,
)
import importlib.metadata as metadata
if metadata.version("openai") < "1.0":
from openai.error import AuthenticationError, PermissionError, InvalidRequestError
RETRY_ERRORS = (
AuthenticationError,
PermissionError,
InvalidRequestError,
AssertionError,
)
@retry(
retry=retry_if_not_exception_type(RETRY_ERRORS),
stop=stop_after_attempt(CONFIG.max_retry_times + 3),
wait=wait_chain(
*[wait_none() for _ in range(3)] + [wait_exponential(min=61, max=293)]
),
reraise=True,
)
def chatcompletion_request(**kwargs):
"""Handle operation of OpenAI chat completion.
This function operates OpenAI chat completion with provided
arguments. It gets the model name, applies a JSON web token, if the
response indicates the context length has been exceeded, it attempts
to get a higher-capacity language model if it exists in the configuration
and reattempts the operation. Otherwise, it will raise an error message.
Args:
**kwargs: Variable length argument list including (model:str, etc.).
Returns:
dict: chat completion response.
Raises:
InvalidRequestError: If any error occurs during chat completion operation or
context length limit exceeded and no fallback models available.
"""
model_name = get_model_name(
kwargs.pop("model", CONFIG.default_completion_kwargs["model"])
)
logger.debug("chatcompletion: using " + model_name)
chatcompletion_kwargs = get_apiconfig_by_model(model_name)
if "azure_endpoint" in chatcompletion_kwargs:
api_base = chatcompletion_kwargs.pop("azure_endpoint", None)
chatcompletion_kwargs.update({"api_base": api_base})
chatcompletion_kwargs.update(kwargs)
try:
response = openai.ChatCompletion.create(**chatcompletion_kwargs)
response = json.loads(str(response))
if response["choices"][0]["finish_reason"] == "length":
raise InvalidRequestError("maximum context length exceeded", None)
except InvalidRequestError as e:
if "maximum context length" in e._message:
if model_name == "gpt-4":
if "gpt-4-32k" in CONFIG.api_keys:
model_name = "gpt-4-32k"
elif "gpt-4-1106-preview" in CONFIG.api_keys:
model_name = "gpt-4-1106-preview"
else:
model_name = "gpt-3.5-turbo-16k"
elif model_name == "gpt-3.5-turbo":
if "gpt-3.5-turbo-1106" in CONFIG.api_keys:
model_name = "gpt-3.5-turbo-1106"
else:
model_name = "gpt-3.5-turbo-16k"
else:
raise e
print("max context length reached, retrying with " + model_name)
chatcompletion_kwargs = get_apiconfig_by_model(model_name)
chatcompletion_kwargs.update(kwargs)
chatcompletion_kwargs.pop("schema_error_retry", None)
response = openai.ChatCompletion.create(**chatcompletion_kwargs)
response = json.loads(str(response))
else:
raise e
return response
else:
from openai import AuthenticationError, PermissionDeniedError, BadRequestError
RETRY_ERRORS = (
AuthenticationError,
PermissionDeniedError,
BadRequestError,
AssertionError,
)
@retry(
retry=retry_if_not_exception_type(RETRY_ERRORS),
stop=stop_after_attempt(CONFIG.max_retry_times + 3),
wait=wait_chain(
*[wait_none() for _ in range(3)] + [wait_exponential(min=61, max=293)]
),
reraise=True,
)
def chatcompletion_request(**kwargs):
"""Handle operation of OpenAI v1.x.x chat completion.
This function operates OpenAI v1.x.x chat completion with provided
arguments. It gets the model name, applies a JSON web token, if the
response indicates the context length has been exceeded, it attempts
to get a higher-capacity language model if it exists in the configuration
and reattempts the operation. Otherwise, it will raise an error message.
Args:
**kwargs: Variable length argument list including (model:str, etc.).
Returns:
response (dict): A dictionary containing the response from the Chat API.
The structure of the dictionary is based on the API response format.
Raises:
BadRequestError: If any error occurs during chat completion operation or
context length limit exceeded and no fallback models available.
"""
model_name = get_model_name(
kwargs.pop("model", CONFIG.default_completion_kwargs["model"])
)
logger.debug("chatcompletion: using " + model_name)
chatcompletion_kwargs = get_apiconfig_by_model(model_name)
request_timeout = kwargs.pop("request_timeout", 60)
if "api_version" in chatcompletion_kwargs:
if "base_url" in chatcompletion_kwargs:
base_url = chatcompletion_kwargs.pop("base_url", None)
else:
base_url = chatcompletion_kwargs.pop("api_base", None)
azure_endpoint = chatcompletion_kwargs.pop("azure_endpoint", base_url)
api_version = chatcompletion_kwargs.pop("api_version", None)
api_key = chatcompletion_kwargs.pop("api_key", None)
chatcompletion_kwargs.pop("api_type", None)
if "engine" in chatcompletion_kwargs:
model = chatcompletion_kwargs.pop("engine", None)
else:
model = chatcompletion_kwargs.pop("model", None)
chatcompletion_kwargs.update({"model": model})
chatcompletion_kwargs.update(kwargs)
client = openai.AzureOpenAI(
api_key=api_key,
azure_endpoint=azure_endpoint,
api_version=api_version,
timeout=request_timeout,
)
else:
if "base_url" in chatcompletion_kwargs:
base_url = chatcompletion_kwargs.pop("base_url", None)
else:
base_url = chatcompletion_kwargs.pop("api_base", None)
api_key = chatcompletion_kwargs.pop("api_key", None)
organization = chatcompletion_kwargs.pop("organization", None)
chatcompletion_kwargs.update(kwargs)
client = openai.OpenAI(
api_key=api_key,
organization=organization,
base_url=base_url,
timeout=request_timeout
)
try:
completions = client.chat.completions.create(**chatcompletion_kwargs)
response = completions.model_dump()
if response["choices"][0]["finish_reason"] == "length":
raise BadRequestError(
message="maximum context length exceeded", response=None, body=None
)
except BadRequestError as e:
if "maximum context length" in e.message:
if model_name == "gpt-4" and "gpt-4-32k" in CONFIG.api_keys:
model_name = "gpt-4-32k"
elif model_name == "gpt-4" and "gpt-4-1106-preview" in CONFIG.api_keys:
model_name = "gpt-4-1106-preview"
else:
if "gpt-3.5-turbo-1106" in CONFIG.api_keys:
model_name = "gpt-3.5-turbo-1106"
else:
model_name = "gpt-3.5-turbo-16k"
print(f"max context length reached, retrying with {model_name}")
chatcompletion_kwargs = get_apiconfig_by_model(model_name)
request_timeout = kwargs.pop("request_timeout", 60)
if "base_url" in chatcompletion_kwargs:
base_url = chatcompletion_kwargs.pop("base_url", None)
else:
base_url = chatcompletion_kwargs.pop("api_base", None)
api_key = chatcompletion_kwargs.pop("api_key", None)
chatcompletion_kwargs.update(kwargs)
chatcompletion_kwargs.pop("schema_error_retry", None)
completions = client.chat.completions.create(**chatcompletion_kwargs)
response = completions.model_dump()
else:
raise e
return response
|
evocodebench_data_110
|
import os
from time import time
from typing import Any, Optional
from litdata.constants import _BOTO3_AVAILABLE, _IS_IN_STUDIO
if _BOTO3_AVAILABLE:
import boto3
import botocore
from botocore.credentials import InstanceMetadataProvider
from botocore.utils import InstanceMetadataFetcher
class S3Client:
# TODO: Generalize to support more cloud providers.
def __init__(self, refetch_interval: int = 3300) -> None:
self._refetch_interval = refetch_interval
self._last_time: Optional[float] = None
self._client: Optional[Any] = None
def _create_client(self) -> None:
has_shared_credentials_file = (
os.getenv("AWS_SHARED_CREDENTIALS_FILE") == os.getenv("AWS_CONFIG_FILE") == "/.credentials/.aws_credentials"
)
if has_shared_credentials_file or not _IS_IN_STUDIO:
self._client = boto3.client(
"s3", config=botocore.config.Config(retries={"max_attempts": 1000, "mode": "adaptive"})
)
else:
provider = InstanceMetadataProvider(iam_role_fetcher=InstanceMetadataFetcher(timeout=3600, num_attempts=5))
credentials = provider.load()
self._client = boto3.client(
"s3",
aws_access_key_id=credentials.access_key,
aws_secret_access_key=credentials.secret_key,
aws_session_token=credentials.token,
config=botocore.config.Config(retries={"max_attempts": 1000, "mode": "adaptive"}),
)
@property
def client(self) -> Any:
if self._client is None:
self._create_client()
self._last_time = time()
# Re-generate credentials for EC2
if self._last_time is None or (time() - self._last_time) > self._refetch_interval:
self._create_client()
self._last_time = time()
return self._client
|
evocodebench_data_111
|
# Copyright The Lightning AI team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib
import os
from logging import Logger
from time import time
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
from torch.utils.data import IterableDataset
from litdata.constants import (
_DEFAULT_CACHE_DIR,
_INDEX_FILENAME,
)
from litdata.streaming import Cache
from litdata.streaming.item_loader import BaseItemLoader
from litdata.streaming.resolver import Dir, _resolve_dir
from litdata.streaming.sampler import ChunkedIndex
from litdata.streaming.serializers import Serializer
from litdata.streaming.shuffle import FullShuffle, NoShuffle, Shuffle
from litdata.utilities.env import _DistributedEnv, _is_in_dataloader_worker, _WorkerEnv
logger = Logger(__name__)
class StreamingDataset(IterableDataset):
"""The streaming dataset can be used once your data have been optimised using the DatasetOptimiser class."""
def __init__(
self,
input_dir: Union[str, "Dir"],
item_loader: Optional[BaseItemLoader] = None,
shuffle: bool = False,
drop_last: Optional[bool] = None,
seed: int = 42,
serializers: Optional[Dict[str, Serializer]] = None,
max_cache_size: Union[int, str] = "100GB",
) -> None:
"""The streaming dataset can be used once your data have been optimised using the DatasetOptimiser class.
Arguments:
input_dir: Path to the folder where the input data is stored.
item_loader: The logic to load an item from a chunk.
shuffle: Whether to shuffle the data.
drop_last: If `True`, drops the last items to ensure that
all processes/workers return the same amount of data.
The argument `drop_last` is set to `True` in a distributed setting
and `False` otherwise.
seed: Random seed for shuffling.
serializers: The serializers used to serialize and deserialize the chunks.
max_cache_size: The maximum cache size used by the StreamingDataset.
"""
super().__init__()
if not isinstance(shuffle, bool):
raise ValueError(f"Shuffle should be a boolean. Found {shuffle}")
input_dir = _resolve_dir(input_dir)
self.input_dir = input_dir
self.item_loader = item_loader
self.shuffle: bool = shuffle
self.distributed_env = _DistributedEnv.detect()
if self.distributed_env.world_size > 1:
if drop_last is False:
logger.warn(
"You're operating within a distributed environment and have disabled the `drop_last` option. "
"Please note that this configuration may lead to training interruptions if your system depends "
"on distributed collectives."
)
else:
drop_last = True
self.drop_last = drop_last or False
self.seed = seed
self.max_cache_size = max_cache_size
self.cache: Optional[Cache] = None
self.worker_env: Optional[_WorkerEnv] = None
self.worker_chunks: List[int] = []
self.worker_intervals: List[List[int]] = []
self.current_indexes: List[int] = []
self.chunk_index = 0
self.num_chunks: Optional[int] = None
self.global_index = 0
self.index = 0
self.has_triggered_download = False
self.min_items_per_replica: Optional[int] = None
self.current_epoch = 1
self.random_state = None
self.shuffler: Optional[Shuffle] = None
self.serializers = serializers
self._state_dict: Optional[Dict[str, Any]] = None
def set_shuffle(self, shuffle: bool) -> None:
self.shuffle = shuffle
def set_epoch(self, current_epoch: int) -> None:
"""Set the current epoch to the dataset on epoch starts.
When using the StreamingDataLoader, this is done automatically
"""
# If the state dict has been reloaded, don't override the current epoch
# The StreamingDataloader would clean this out
if self._state_dict is None:
self.current_epoch = current_epoch
def _create_cache(self, worker_env: _WorkerEnv) -> Cache:
if _should_replace_path(self.input_dir.path):
cache_path = _try_create_cache_dir(
input_dir=self.input_dir.path if self.input_dir.path else self.input_dir.url
)
if cache_path is not None:
self.input_dir.path = cache_path
cache = Cache(
input_dir=self.input_dir,
item_loader=self.item_loader,
chunk_bytes=1,
serializers=self.serializers,
max_cache_size=self.max_cache_size,
)
cache._reader._try_load_config()
if not cache.filled:
raise ValueError(
f"The provided dataset `{self.input_dir}` doesn't contain any {_INDEX_FILENAME} file."
" HINT: Did you successfully optimize a dataset to the provided `input_dir`?"
)
return cache
def _create_shuffler(self, cache: Cache) -> Shuffle:
seed = self.seed
drop_last = self.drop_last
if self._state_dict is not None:
state: Dict[str, Any] = self._state_dict
seed = state["seed"]
drop_last = state["drop_last"]
return FullShuffle(cache, seed, drop_last) if self.shuffle else NoShuffle(cache, seed, drop_last)
def __len__(self) -> int:
if self.shuffler is None:
cache = self._create_cache(worker_env=_WorkerEnv.detect())
self.shuffler = self._create_shuffler(cache)
return self.shuffler.get_len(self.distributed_env, self.current_epoch)
def __iter__(self) -> "StreamingDataset":
# When the StreamingDataset is used within map or optimize, let's refetch the distributed env.
if os.getenv("DATA_OPTIMIZER_GLOBAL_RANK"):
self.distributed_env = _DistributedEnv.detect()
self.worker_env = _WorkerEnv.detect()
self.cache = self._create_cache(worker_env=self.worker_env)
self.shuffler = self._create_shuffler(self.cache)
# Handle restart
if self._state_dict:
self._validate_state_dict()
state: Dict[str, Any] = self._state_dict
self.current_epoch = state["current_epoch"]
chunks_per_replica, intervals_per_replica = self.shuffler.get_chunks_and_intervals_per_ranks(
self.distributed_env, self.current_epoch
)
chunks_replica = chunks_per_replica[self.distributed_env.global_rank % self.distributed_env.world_size]
intervals_replica = intervals_per_replica[self.distributed_env.global_rank % self.distributed_env.world_size]
# Handle restart
if self._state_dict:
self._resume(chunks_replica, intervals_replica)
else:
chunks_per_replica, intervals_per_replica = self.shuffler.get_chunks_and_intervals_per_ranks(
self.distributed_env, self.current_epoch
)
chunks_replica = chunks_per_replica[self.distributed_env.global_rank % self.distributed_env.world_size]
intervals_replica = intervals_per_replica[
self.distributed_env.global_rank % self.distributed_env.world_size
]
self.worker_chunks = []
self.worker_intervals = []
for i, (chunk_index, chunk_interval) in enumerate(zip(chunks_replica, intervals_replica)):
if i % self.worker_env.world_size != self.worker_env.rank:
continue
self.worker_chunks.append(chunk_index)
self.worker_intervals.append(chunk_interval)
self.num_chunks = len(self.worker_chunks)
self.current_indexes = []
self.chunk_index = 0
self.global_index = 0
self.index = 0
self.has_triggered_download = False
self.last_time = time()
return self
def _resume(self, chunks_replica: List[int], intervals_replica: List[Any]) -> None:
assert self._state_dict
assert self.worker_env
assert self.shuffler
state: Dict[str, Any] = self._state_dict
num_workers = state["num_workers"]
batch_size = state["batch_size"]
# TODO: Implement elastic sampling where the number of workers, ranks can change.
num_samples_yielded = self._state_dict["num_samples_yielded"]
# replay sampling from each worker / chunks using the batch size
workers_chunks, workers_intervals = _associate_chunks_to_workers(
num_workers, self.worker_env, chunks_replica, intervals_replica
)
indexes = _replay_sampling(num_samples_yielded, batch_size, num_workers)
chunks_index, indexes = _replay_chunks_sampling(workers_intervals, indexes)
# select the chunks and intervals associated to this worker
worker_rank = self.worker_env.rank
self.num_chunks = len(workers_intervals[worker_rank])
self.chunk_index = chunks_index[worker_rank]
self.worker_chunks = workers_chunks[worker_rank]
self.worker_intervals = workers_intervals[worker_rank]
# replay the indexes for the current chunks
interval = self.worker_intervals[self.chunk_index]
current_indexes = np.arange(interval[0], interval[1])
# re-shuffle the indexes
current_indexes = self.shuffler(current_indexes, self.num_chunks, self.current_epoch, self.chunk_index)
# skip any indexes already consumed
current_indexes = current_indexes[indexes[worker_rank] :]
self.current_indexes = current_indexes
self.global_index = num_samples_yielded
# bump the chunk_index
self.chunk_index += 1
def __getitem__(self, index: Union[ChunkedIndex, int]) -> Any:
if self.cache is None:
self.worker_env = _WorkerEnv.detect()
self.cache = self._create_cache(worker_env=self.worker_env)
self.shuffler = self._create_shuffler(self.cache)
if isinstance(index, int):
index = ChunkedIndex(index, self.cache._get_chunk_index_from_index(index))
return self.cache[index]
def __next__(self) -> Any:
# Prevent to create more batch on a given process
if self.global_index >= len(self):
self.current_epoch += 1
raise StopIteration
# Lazily re-populate the interval to reduce memory usage.
if len(self.current_indexes) == 0:
if self.chunk_index == self.num_chunks:
self.current_epoch += 1
raise StopIteration
# reset index
self.index = 0
interval = self.worker_intervals[self.chunk_index]
current_indexes = np.arange(interval[0], interval[1])
assert self.shuffler is not None
assert self.num_chunks is not None
self.current_indexes = self.shuffler(current_indexes, self.num_chunks, self.current_epoch, self.chunk_index)
self.chunk_index += 1
# Get the first index
index = self.current_indexes.pop(0)
# Call the `__getitem__` method.
data = self.__getitem__(
ChunkedIndex(
index=index,
chunk_index=self.worker_chunks[self.chunk_index - 1],
# We provide the chunks indexes only one the first
chunk_indexes=None if self.has_triggered_download else self.worker_chunks,
is_last_index=(self.chunk_index - 1) == len(self.worker_intervals) and len(self.current_indexes) == 1,
)
)
self.has_triggered_download = True
self.global_index += 1
self.index += 1
return data
def state_dict(self, num_samples_yielded: int, num_workers: int, batch_size: int) -> Dict[str, Any]:
if _is_in_dataloader_worker():
raise RuntimeError("The method `state_dict` should only be called in the main process.")
if self._state_dict is not None:
self._state_dict["num_samples_yielded"] = num_samples_yielded
return self._state_dict
state = {
"num_samples_yielded": num_samples_yielded,
"num_workers": num_workers,
"batch_size": batch_size,
"current_epoch": self.current_epoch,
"input_dir_path": self.input_dir.path,
"input_dir_url": self.input_dir.url,
"item_loader": self.item_loader.state_dict() if self.item_loader else None,
"drop_last": self.drop_last,
"seed": self.seed,
"world_size": self.distributed_env.world_size,
"shuffle": self.shuffle,
}
return state
def load_state_dict(self, state_dict: Dict[str, Any]) -> None:
if state_dict:
# the state is restored within the workers
self._state_dict = state_dict
def _validate_state_dict(self) -> None:
assert self._state_dict
assert self.worker_env
assert self.cache
state: Dict[str, Any] = self._state_dict
if state["shuffle"] != self.shuffle:
raise ValueError(
"The provided `shuffle` state doesn't match the current one. "
f"Found `{self.shuffle}` instead of `{state['shuffle']}`."
)
if state["num_workers"] != self.worker_env.world_size:
raise ValueError(
"The provided `num_workers` state doesn't match the current one. "
f"Found `{self.worker_env.world_size}` instead of `{state['num_workers']}`."
)
# Note: We need to check whether the path has been resolved to its associated cache.
# In this case, validate the cache folder is the same.
if _should_replace_path(state["input_dir_path"]):
cache_path = _try_create_cache_dir(
input_dir=state["input_dir_path"] if state["input_dir_path"] else state["input_dir_url"]
)
if cache_path != self.input_dir.path:
raise ValueError(
"The provided `input_dir` path state doesn't match the current one. "
f"Found `{self.input_dir.path}` instead of `{cache_path}`."
)
elif state["input_dir_path"] != self.input_dir.path:
raise ValueError(
"The provided `input_dir` path state doesn't match the current one. "
f"Found `{self.input_dir.path}` instead of `{state['input_dir_path']}`."
)
if state["input_dir_url"] != self.input_dir.url:
raise ValueError(
"The provided `input_dir` URL state doesn't match the current one. "
f"Found `{self.input_dir.url}` instead of `{state['input_dir_url']}`."
)
if state["seed"] != self.seed:
raise ValueError(
"The provided `seed` state doesn't match the current one. "
f"Found `{self.seed}` instead of `{state['seed']}`."
)
if self.item_loader and state["item_loader"] != self.item_loader.state_dict():
raise ValueError(
"The provided `item_loader` state doesn't match the current one. "
f"Found `{self.item_loader.state_dict()}` instead of `{state['item_loader']}`."
)
if state["drop_last"] != self.drop_last:
raise ValueError(
"The provided `drop_last` state doesn't match the current one. "
f"Found `{self.drop_last}` instead of `{state['drop_last']}`."
)
def _try_create_cache_dir(input_dir: Optional[str]) -> Optional[str]:
hash_object = hashlib.md5((input_dir or "").encode())
if "LIGHTNING_CLUSTER_ID" not in os.environ or "LIGHTNING_CLOUD_PROJECT_ID" not in os.environ:
cache_dir = os.path.join(_DEFAULT_CACHE_DIR, hash_object.hexdigest())
os.makedirs(cache_dir, exist_ok=True)
return cache_dir
cache_dir = os.path.join("/cache", "chunks", hash_object.hexdigest())
os.makedirs(cache_dir, exist_ok=True)
return cache_dir
def _should_replace_path(path: Optional[str]) -> bool:
"""Whether the input path is a special path to be replaced."""
if path is None or path == "":
return True
return path.startswith("/teamspace/datasets/") or path.startswith("/teamspace/s3_connections/")
def is_integer(value: str) -> bool:
try:
int(value)
return True
except Exception:
return False
def _associate_chunks_to_workers(
num_workers: int, worker_env: _WorkerEnv, chunks_replica: List[int], intervals_replica: List[Any]
) -> Any:
workers_chunks = {}
workers_intervals = {}
for worker_idx in range(num_workers):
worker_chunks = []
worker_intervals = []
for i, (chunk_index, chunk_interval) in enumerate(zip(chunks_replica, intervals_replica)):
if i % worker_env.world_size != worker_idx:
continue
worker_chunks.append(chunk_index)
worker_intervals.append(chunk_interval)
workers_chunks[worker_idx] = worker_chunks
workers_intervals[worker_idx] = worker_intervals
return workers_chunks, workers_intervals
def _replay_sampling(num_samples_yielded: int, batch_size: int, num_workers: int) -> Dict[int, int]:
"""This function replays the sampling from the dataloader."""
divisible_num_batches_yielded = num_samples_yielded // (num_workers * batch_size)
indexes = {}
for worker_idx in range(num_workers):
indexes[worker_idx] = divisible_num_batches_yielded * batch_size
num_samples_yielded = num_samples_yielded - (num_workers * divisible_num_batches_yielded * batch_size)
# take care of the reminder
worker_idx = 0 # reset the worker_idx
while True:
if num_samples_yielded >= batch_size:
indexes[worker_idx] += batch_size
worker_idx = (worker_idx + 1) % num_workers
num_samples_yielded -= batch_size
else:
indexes[worker_idx] += num_samples_yielded
break
return indexes
def _replay_chunks_sampling(
workers_intervals: Dict[int, List[Any]], indexes: Dict[int, int]
) -> Tuple[Dict[int, int], Dict[int, int]]:
chunks_index = {}
for worker_idx in range(len(workers_intervals)):
chunks_index[worker_idx] = 0
for worker_idx, intervals in workers_intervals.items():
for interval in intervals:
size = interval[-1] - interval[0]
if indexes[worker_idx] >= size:
indexes[worker_idx] -= size
chunks_index[worker_idx] += 1
return chunks_index, indexes
|
evocodebench_data_112
|
# Copyright The Lightning AI team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib
import os
from logging import Logger
from time import time
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
from torch.utils.data import IterableDataset
from litdata.constants import (
_DEFAULT_CACHE_DIR,
_INDEX_FILENAME,
)
from litdata.streaming import Cache
from litdata.streaming.item_loader import BaseItemLoader
from litdata.streaming.resolver import Dir, _resolve_dir
from litdata.streaming.sampler import ChunkedIndex
from litdata.streaming.serializers import Serializer
from litdata.streaming.shuffle import FullShuffle, NoShuffle, Shuffle
from litdata.utilities.env import _DistributedEnv, _is_in_dataloader_worker, _WorkerEnv
logger = Logger(__name__)
class StreamingDataset(IterableDataset):
"""The streaming dataset can be used once your data have been optimised using the DatasetOptimiser class."""
def __init__(
self,
input_dir: Union[str, "Dir"],
item_loader: Optional[BaseItemLoader] = None,
shuffle: bool = False,
drop_last: Optional[bool] = None,
seed: int = 42,
serializers: Optional[Dict[str, Serializer]] = None,
max_cache_size: Union[int, str] = "100GB",
) -> None:
"""The streaming dataset can be used once your data have been optimised using the DatasetOptimiser class.
Arguments:
input_dir: Path to the folder where the input data is stored.
item_loader: The logic to load an item from a chunk.
shuffle: Whether to shuffle the data.
drop_last: If `True`, drops the last items to ensure that
all processes/workers return the same amount of data.
The argument `drop_last` is set to `True` in a distributed setting
and `False` otherwise.
seed: Random seed for shuffling.
serializers: The serializers used to serialize and deserialize the chunks.
max_cache_size: The maximum cache size used by the StreamingDataset.
"""
super().__init__()
if not isinstance(shuffle, bool):
raise ValueError(f"Shuffle should be a boolean. Found {shuffle}")
input_dir = _resolve_dir(input_dir)
self.input_dir = input_dir
self.item_loader = item_loader
self.shuffle: bool = shuffle
self.distributed_env = _DistributedEnv.detect()
if self.distributed_env.world_size > 1:
if drop_last is False:
logger.warn(
"You're operating within a distributed environment and have disabled the `drop_last` option. "
"Please note that this configuration may lead to training interruptions if your system depends "
"on distributed collectives."
)
else:
drop_last = True
self.drop_last = drop_last or False
self.seed = seed
self.max_cache_size = max_cache_size
self.cache: Optional[Cache] = None
self.worker_env: Optional[_WorkerEnv] = None
self.worker_chunks: List[int] = []
self.worker_intervals: List[List[int]] = []
self.current_indexes: List[int] = []
self.chunk_index = 0
self.num_chunks: Optional[int] = None
self.global_index = 0
self.index = 0
self.has_triggered_download = False
self.min_items_per_replica: Optional[int] = None
self.current_epoch = 1
self.random_state = None
self.shuffler: Optional[Shuffle] = None
self.serializers = serializers
self._state_dict: Optional[Dict[str, Any]] = None
def set_shuffle(self, shuffle: bool) -> None:
self.shuffle = shuffle
def set_epoch(self, current_epoch: int) -> None:
"""Set the current epoch to the dataset on epoch starts.
When using the StreamingDataLoader, this is done automatically
"""
# If the state dict has been reloaded, don't override the current epoch
# The StreamingDataloader would clean this out
if self._state_dict is None:
self.current_epoch = current_epoch
def _create_cache(self, worker_env: _WorkerEnv) -> Cache:
if _should_replace_path(self.input_dir.path):
cache_path = _try_create_cache_dir(
input_dir=self.input_dir.path if self.input_dir.path else self.input_dir.url
)
if cache_path is not None:
self.input_dir.path = cache_path
cache = Cache(
input_dir=self.input_dir,
item_loader=self.item_loader,
chunk_bytes=1,
serializers=self.serializers,
max_cache_size=self.max_cache_size,
)
cache._reader._try_load_config()
if not cache.filled:
raise ValueError(
f"The provided dataset `{self.input_dir}` doesn't contain any {_INDEX_FILENAME} file."
" HINT: Did you successfully optimize a dataset to the provided `input_dir`?"
)
return cache
def _create_shuffler(self, cache: Cache) -> Shuffle:
seed = self.seed
drop_last = self.drop_last
if self._state_dict is not None:
state: Dict[str, Any] = self._state_dict
seed = state["seed"]
drop_last = state["drop_last"]
return FullShuffle(cache, seed, drop_last) if self.shuffle else NoShuffle(cache, seed, drop_last)
def __len__(self) -> int:
if self.shuffler is None:
cache = self._create_cache(worker_env=_WorkerEnv.detect())
self.shuffler = self._create_shuffler(cache)
return self.shuffler.get_len(self.distributed_env, self.current_epoch)
def __iter__(self) -> "StreamingDataset":
# When the StreamingDataset is used within map or optimize, let's refetch the distributed env.
if os.getenv("DATA_OPTIMIZER_GLOBAL_RANK"):
self.distributed_env = _DistributedEnv.detect()
self.worker_env = _WorkerEnv.detect()
self.cache = self._create_cache(worker_env=self.worker_env)
self.shuffler = self._create_shuffler(self.cache)
# Handle restart
if self._state_dict:
self._validate_state_dict()
state: Dict[str, Any] = self._state_dict
self.current_epoch = state["current_epoch"]
chunks_per_replica, intervals_per_replica = self.shuffler.get_chunks_and_intervals_per_ranks(
self.distributed_env, self.current_epoch
)
chunks_replica = chunks_per_replica[self.distributed_env.global_rank % self.distributed_env.world_size]
intervals_replica = intervals_per_replica[self.distributed_env.global_rank % self.distributed_env.world_size]
# Handle restart
if self._state_dict:
self._resume(chunks_replica, intervals_replica)
else:
chunks_per_replica, intervals_per_replica = self.shuffler.get_chunks_and_intervals_per_ranks(
self.distributed_env, self.current_epoch
)
chunks_replica = chunks_per_replica[self.distributed_env.global_rank % self.distributed_env.world_size]
intervals_replica = intervals_per_replica[
self.distributed_env.global_rank % self.distributed_env.world_size
]
self.worker_chunks = []
self.worker_intervals = []
for i, (chunk_index, chunk_interval) in enumerate(zip(chunks_replica, intervals_replica)):
if i % self.worker_env.world_size != self.worker_env.rank:
continue
self.worker_chunks.append(chunk_index)
self.worker_intervals.append(chunk_interval)
self.num_chunks = len(self.worker_chunks)
self.current_indexes = []
self.chunk_index = 0
self.global_index = 0
self.index = 0
self.has_triggered_download = False
self.last_time = time()
return self
def _resume(self, chunks_replica: List[int], intervals_replica: List[Any]) -> None:
assert self._state_dict
assert self.worker_env
assert self.shuffler
state: Dict[str, Any] = self._state_dict
num_workers = state["num_workers"]
batch_size = state["batch_size"]
# TODO: Implement elastic sampling where the number of workers, ranks can change.
num_samples_yielded = self._state_dict["num_samples_yielded"]
# replay sampling from each worker / chunks using the batch size
workers_chunks, workers_intervals = _associate_chunks_to_workers(
num_workers, self.worker_env, chunks_replica, intervals_replica
)
indexes = _replay_sampling(num_samples_yielded, batch_size, num_workers)
chunks_index, indexes = _replay_chunks_sampling(workers_intervals, indexes)
# select the chunks and intervals associated to this worker
worker_rank = self.worker_env.rank
self.num_chunks = len(workers_intervals[worker_rank])
self.chunk_index = chunks_index[worker_rank]
self.worker_chunks = workers_chunks[worker_rank]
self.worker_intervals = workers_intervals[worker_rank]
# replay the indexes for the current chunks
interval = self.worker_intervals[self.chunk_index]
current_indexes = np.arange(interval[0], interval[1])
# re-shuffle the indexes
current_indexes = self.shuffler(current_indexes, self.num_chunks, self.current_epoch, self.chunk_index)
# skip any indexes already consumed
current_indexes = current_indexes[indexes[worker_rank] :]
self.current_indexes = current_indexes
self.global_index = num_samples_yielded
# bump the chunk_index
self.chunk_index += 1
def __getitem__(self, index: Union[ChunkedIndex, int]) -> Any:
if self.cache is None:
self.worker_env = _WorkerEnv.detect()
self.cache = self._create_cache(worker_env=self.worker_env)
self.shuffler = self._create_shuffler(self.cache)
if isinstance(index, int):
index = ChunkedIndex(index, self.cache._get_chunk_index_from_index(index))
return self.cache[index]
def __next__(self) -> Any:
# Prevent to create more batch on a given process
if self.global_index >= len(self):
self.current_epoch += 1
raise StopIteration
# Lazily re-populate the interval to reduce memory usage.
if len(self.current_indexes) == 0:
if self.chunk_index == self.num_chunks:
self.current_epoch += 1
raise StopIteration
# reset index
self.index = 0
interval = self.worker_intervals[self.chunk_index]
current_indexes = np.arange(interval[0], interval[1])
assert self.shuffler is not None
assert self.num_chunks is not None
self.current_indexes = self.shuffler(current_indexes, self.num_chunks, self.current_epoch, self.chunk_index)
self.chunk_index += 1
# Get the first index
index = self.current_indexes.pop(0)
# Call the `__getitem__` method.
data = self.__getitem__(
ChunkedIndex(
index=index,
chunk_index=self.worker_chunks[self.chunk_index - 1],
# We provide the chunks indexes only one the first
chunk_indexes=None if self.has_triggered_download else self.worker_chunks,
is_last_index=(self.chunk_index - 1) == len(self.worker_intervals) and len(self.current_indexes) == 1,
)
)
self.has_triggered_download = True
self.global_index += 1
self.index += 1
return data
def state_dict(self, num_samples_yielded: int, num_workers: int, batch_size: int) -> Dict[str, Any]:
if _is_in_dataloader_worker():
raise RuntimeError("The method `state_dict` should only be called in the main process.")
if self._state_dict is not None:
self._state_dict["num_samples_yielded"] = num_samples_yielded
return self._state_dict
state = {
"num_samples_yielded": num_samples_yielded,
"num_workers": num_workers,
"batch_size": batch_size,
"current_epoch": self.current_epoch,
"input_dir_path": self.input_dir.path,
"input_dir_url": self.input_dir.url,
"item_loader": self.item_loader.state_dict() if self.item_loader else None,
"drop_last": self.drop_last,
"seed": self.seed,
"world_size": self.distributed_env.world_size,
"shuffle": self.shuffle,
}
return state
def load_state_dict(self, state_dict: Dict[str, Any]) -> None:
if state_dict:
# the state is restored within the workers
self._state_dict = state_dict
def _validate_state_dict(self) -> None:
assert self._state_dict
assert self.worker_env
assert self.cache
state: Dict[str, Any] = self._state_dict
if state["shuffle"] != self.shuffle:
raise ValueError(
"The provided `shuffle` state doesn't match the current one. "
f"Found `{self.shuffle}` instead of `{state['shuffle']}`."
)
if state["num_workers"] != self.worker_env.world_size:
raise ValueError(
"The provided `num_workers` state doesn't match the current one. "
f"Found `{self.worker_env.world_size}` instead of `{state['num_workers']}`."
)
# Note: We need to check whether the path has been resolved to its associated cache.
# In this case, validate the cache folder is the same.
if _should_replace_path(state["input_dir_path"]):
cache_path = _try_create_cache_dir(
input_dir=state["input_dir_path"] if state["input_dir_path"] else state["input_dir_url"]
)
if cache_path != self.input_dir.path:
raise ValueError(
"The provided `input_dir` path state doesn't match the current one. "
f"Found `{self.input_dir.path}` instead of `{cache_path}`."
)
elif state["input_dir_path"] != self.input_dir.path:
raise ValueError(
"The provided `input_dir` path state doesn't match the current one. "
f"Found `{self.input_dir.path}` instead of `{state['input_dir_path']}`."
)
if state["input_dir_url"] != self.input_dir.url:
raise ValueError(
"The provided `input_dir` URL state doesn't match the current one. "
f"Found `{self.input_dir.url}` instead of `{state['input_dir_url']}`."
)
if state["seed"] != self.seed:
raise ValueError(
"The provided `seed` state doesn't match the current one. "
f"Found `{self.seed}` instead of `{state['seed']}`."
)
if self.item_loader and state["item_loader"] != self.item_loader.state_dict():
raise ValueError(
"The provided `item_loader` state doesn't match the current one. "
f"Found `{self.item_loader.state_dict()}` instead of `{state['item_loader']}`."
)
if state["drop_last"] != self.drop_last:
raise ValueError(
"The provided `drop_last` state doesn't match the current one. "
f"Found `{self.drop_last}` instead of `{state['drop_last']}`."
)
def _try_create_cache_dir(input_dir: Optional[str]) -> Optional[str]:
hash_object = hashlib.md5((input_dir or "").encode())
if "LIGHTNING_CLUSTER_ID" not in os.environ or "LIGHTNING_CLOUD_PROJECT_ID" not in os.environ:
cache_dir = os.path.join(_DEFAULT_CACHE_DIR, hash_object.hexdigest())
os.makedirs(cache_dir, exist_ok=True)
return cache_dir
cache_dir = os.path.join("/cache", "chunks", hash_object.hexdigest())
os.makedirs(cache_dir, exist_ok=True)
return cache_dir
def _should_replace_path(path: Optional[str]) -> bool:
"""Whether the input path is a special path to be replaced."""
if path is None or path == "":
return True
return path.startswith("/teamspace/datasets/") or path.startswith("/teamspace/s3_connections/")
def is_integer(value: str) -> bool:
try:
int(value)
return True
except Exception:
return False
def _associate_chunks_to_workers(
num_workers: int, worker_env: _WorkerEnv, chunks_replica: List[int], intervals_replica: List[Any]
) -> Any:
workers_chunks = {}
workers_intervals = {}
for worker_idx in range(num_workers):
worker_chunks = []
worker_intervals = []
for i, (chunk_index, chunk_interval) in enumerate(zip(chunks_replica, intervals_replica)):
if i % worker_env.world_size != worker_idx:
continue
worker_chunks.append(chunk_index)
worker_intervals.append(chunk_interval)
workers_chunks[worker_idx] = worker_chunks
workers_intervals[worker_idx] = worker_intervals
return workers_chunks, workers_intervals
def _replay_sampling(num_samples_yielded: int, batch_size: int, num_workers: int) -> Dict[int, int]:
"""This function replays the sampling from the dataloader."""
divisible_num_batches_yielded = num_samples_yielded // (num_workers * batch_size)
indexes = {}
for worker_idx in range(num_workers):
indexes[worker_idx] = divisible_num_batches_yielded * batch_size
num_samples_yielded = num_samples_yielded - (num_workers * divisible_num_batches_yielded * batch_size)
# take care of the reminder
worker_idx = 0 # reset the worker_idx
while True:
if num_samples_yielded >= batch_size:
indexes[worker_idx] += batch_size
worker_idx = (worker_idx + 1) % num_workers
num_samples_yielded -= batch_size
else:
indexes[worker_idx] += num_samples_yielded
break
return indexes
def _replay_chunks_sampling(
workers_intervals: Dict[int, List[Any]], indexes: Dict[int, int]
) -> Tuple[Dict[int, int], Dict[int, int]]:
chunks_index = {}
for worker_idx in range(len(workers_intervals)):
chunks_index[worker_idx] = 0
for worker_idx, intervals in workers_intervals.items():
for interval in intervals:
size = interval[-1] - interval[0]
if indexes[worker_idx] >= size:
indexes[worker_idx] -= size
chunks_index[worker_idx] += 1
return chunks_index, indexes
|
evocodebench_data_113
|
# Copyright The Lightning AI team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib
import os
from logging import Logger
from time import time
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
from torch.utils.data import IterableDataset
from litdata.constants import (
_DEFAULT_CACHE_DIR,
_INDEX_FILENAME,
)
from litdata.streaming import Cache
from litdata.streaming.item_loader import BaseItemLoader
from litdata.streaming.resolver import Dir, _resolve_dir
from litdata.streaming.sampler import ChunkedIndex
from litdata.streaming.serializers import Serializer
from litdata.streaming.shuffle import FullShuffle, NoShuffle, Shuffle
from litdata.utilities.env import _DistributedEnv, _is_in_dataloader_worker, _WorkerEnv
logger = Logger(__name__)
class StreamingDataset(IterableDataset):
"""The streaming dataset can be used once your data have been optimised using the DatasetOptimiser class."""
def __init__(
self,
input_dir: Union[str, "Dir"],
item_loader: Optional[BaseItemLoader] = None,
shuffle: bool = False,
drop_last: Optional[bool] = None,
seed: int = 42,
serializers: Optional[Dict[str, Serializer]] = None,
max_cache_size: Union[int, str] = "100GB",
) -> None:
"""The streaming dataset can be used once your data have been optimised using the DatasetOptimiser class.
Arguments:
input_dir: Path to the folder where the input data is stored.
item_loader: The logic to load an item from a chunk.
shuffle: Whether to shuffle the data.
drop_last: If `True`, drops the last items to ensure that
all processes/workers return the same amount of data.
The argument `drop_last` is set to `True` in a distributed setting
and `False` otherwise.
seed: Random seed for shuffling.
serializers: The serializers used to serialize and deserialize the chunks.
max_cache_size: The maximum cache size used by the StreamingDataset.
"""
super().__init__()
if not isinstance(shuffle, bool):
raise ValueError(f"Shuffle should be a boolean. Found {shuffle}")
input_dir = _resolve_dir(input_dir)
self.input_dir = input_dir
self.item_loader = item_loader
self.shuffle: bool = shuffle
self.distributed_env = _DistributedEnv.detect()
if self.distributed_env.world_size > 1:
if drop_last is False:
logger.warn(
"You're operating within a distributed environment and have disabled the `drop_last` option. "
"Please note that this configuration may lead to training interruptions if your system depends "
"on distributed collectives."
)
else:
drop_last = True
self.drop_last = drop_last or False
self.seed = seed
self.max_cache_size = max_cache_size
self.cache: Optional[Cache] = None
self.worker_env: Optional[_WorkerEnv] = None
self.worker_chunks: List[int] = []
self.worker_intervals: List[List[int]] = []
self.current_indexes: List[int] = []
self.chunk_index = 0
self.num_chunks: Optional[int] = None
self.global_index = 0
self.index = 0
self.has_triggered_download = False
self.min_items_per_replica: Optional[int] = None
self.current_epoch = 1
self.random_state = None
self.shuffler: Optional[Shuffle] = None
self.serializers = serializers
self._state_dict: Optional[Dict[str, Any]] = None
def set_shuffle(self, shuffle: bool) -> None:
self.shuffle = shuffle
def set_epoch(self, current_epoch: int) -> None:
"""Set the current epoch to the dataset on epoch starts.
When using the StreamingDataLoader, this is done automatically
"""
# If the state dict has been reloaded, don't override the current epoch
# The StreamingDataloader would clean this out
if self._state_dict is None:
self.current_epoch = current_epoch
def _create_cache(self, worker_env: _WorkerEnv) -> Cache:
if _should_replace_path(self.input_dir.path):
cache_path = _try_create_cache_dir(
input_dir=self.input_dir.path if self.input_dir.path else self.input_dir.url
)
if cache_path is not None:
self.input_dir.path = cache_path
cache = Cache(
input_dir=self.input_dir,
item_loader=self.item_loader,
chunk_bytes=1,
serializers=self.serializers,
max_cache_size=self.max_cache_size,
)
cache._reader._try_load_config()
if not cache.filled:
raise ValueError(
f"The provided dataset `{self.input_dir}` doesn't contain any {_INDEX_FILENAME} file."
" HINT: Did you successfully optimize a dataset to the provided `input_dir`?"
)
return cache
def _create_shuffler(self, cache: Cache) -> Shuffle:
seed = self.seed
drop_last = self.drop_last
if self._state_dict is not None:
state: Dict[str, Any] = self._state_dict
seed = state["seed"]
drop_last = state["drop_last"]
return FullShuffle(cache, seed, drop_last) if self.shuffle else NoShuffle(cache, seed, drop_last)
def __len__(self) -> int:
if self.shuffler is None:
cache = self._create_cache(worker_env=_WorkerEnv.detect())
self.shuffler = self._create_shuffler(cache)
return self.shuffler.get_len(self.distributed_env, self.current_epoch)
def __iter__(self) -> "StreamingDataset":
# When the StreamingDataset is used within map or optimize, let's refetch the distributed env.
if os.getenv("DATA_OPTIMIZER_GLOBAL_RANK"):
self.distributed_env = _DistributedEnv.detect()
self.worker_env = _WorkerEnv.detect()
self.cache = self._create_cache(worker_env=self.worker_env)
self.shuffler = self._create_shuffler(self.cache)
# Handle restart
if self._state_dict:
self._validate_state_dict()
state: Dict[str, Any] = self._state_dict
self.current_epoch = state["current_epoch"]
chunks_per_replica, intervals_per_replica = self.shuffler.get_chunks_and_intervals_per_ranks(
self.distributed_env, self.current_epoch
)
chunks_replica = chunks_per_replica[self.distributed_env.global_rank % self.distributed_env.world_size]
intervals_replica = intervals_per_replica[self.distributed_env.global_rank % self.distributed_env.world_size]
# Handle restart
if self._state_dict:
self._resume(chunks_replica, intervals_replica)
else:
chunks_per_replica, intervals_per_replica = self.shuffler.get_chunks_and_intervals_per_ranks(
self.distributed_env, self.current_epoch
)
chunks_replica = chunks_per_replica[self.distributed_env.global_rank % self.distributed_env.world_size]
intervals_replica = intervals_per_replica[
self.distributed_env.global_rank % self.distributed_env.world_size
]
self.worker_chunks = []
self.worker_intervals = []
for i, (chunk_index, chunk_interval) in enumerate(zip(chunks_replica, intervals_replica)):
if i % self.worker_env.world_size != self.worker_env.rank:
continue
self.worker_chunks.append(chunk_index)
self.worker_intervals.append(chunk_interval)
self.num_chunks = len(self.worker_chunks)
self.current_indexes = []
self.chunk_index = 0
self.global_index = 0
self.index = 0
self.has_triggered_download = False
self.last_time = time()
return self
def _resume(self, chunks_replica: List[int], intervals_replica: List[Any]) -> None:
assert self._state_dict
assert self.worker_env
assert self.shuffler
state: Dict[str, Any] = self._state_dict
num_workers = state["num_workers"]
batch_size = state["batch_size"]
# TODO: Implement elastic sampling where the number of workers, ranks can change.
num_samples_yielded = self._state_dict["num_samples_yielded"]
# replay sampling from each worker / chunks using the batch size
workers_chunks, workers_intervals = _associate_chunks_to_workers(
num_workers, self.worker_env, chunks_replica, intervals_replica
)
indexes = _replay_sampling(num_samples_yielded, batch_size, num_workers)
chunks_index, indexes = _replay_chunks_sampling(workers_intervals, indexes)
# select the chunks and intervals associated to this worker
worker_rank = self.worker_env.rank
self.num_chunks = len(workers_intervals[worker_rank])
self.chunk_index = chunks_index[worker_rank]
self.worker_chunks = workers_chunks[worker_rank]
self.worker_intervals = workers_intervals[worker_rank]
# replay the indexes for the current chunks
interval = self.worker_intervals[self.chunk_index]
current_indexes = np.arange(interval[0], interval[1])
# re-shuffle the indexes
current_indexes = self.shuffler(current_indexes, self.num_chunks, self.current_epoch, self.chunk_index)
# skip any indexes already consumed
current_indexes = current_indexes[indexes[worker_rank] :]
self.current_indexes = current_indexes
self.global_index = num_samples_yielded
# bump the chunk_index
self.chunk_index += 1
def __getitem__(self, index: Union[ChunkedIndex, int]) -> Any:
if self.cache is None:
self.worker_env = _WorkerEnv.detect()
self.cache = self._create_cache(worker_env=self.worker_env)
self.shuffler = self._create_shuffler(self.cache)
if isinstance(index, int):
index = ChunkedIndex(index, self.cache._get_chunk_index_from_index(index))
return self.cache[index]
def __next__(self) -> Any:
# Prevent to create more batch on a given process
if self.global_index >= len(self):
self.current_epoch += 1
raise StopIteration
# Lazily re-populate the interval to reduce memory usage.
if len(self.current_indexes) == 0:
if self.chunk_index == self.num_chunks:
self.current_epoch += 1
raise StopIteration
# reset index
self.index = 0
interval = self.worker_intervals[self.chunk_index]
current_indexes = np.arange(interval[0], interval[1])
assert self.shuffler is not None
assert self.num_chunks is not None
self.current_indexes = self.shuffler(current_indexes, self.num_chunks, self.current_epoch, self.chunk_index)
self.chunk_index += 1
# Get the first index
index = self.current_indexes.pop(0)
# Call the `__getitem__` method.
data = self.__getitem__(
ChunkedIndex(
index=index,
chunk_index=self.worker_chunks[self.chunk_index - 1],
# We provide the chunks indexes only one the first
chunk_indexes=None if self.has_triggered_download else self.worker_chunks,
is_last_index=(self.chunk_index - 1) == len(self.worker_intervals) and len(self.current_indexes) == 1,
)
)
self.has_triggered_download = True
self.global_index += 1
self.index += 1
return data
def state_dict(self, num_samples_yielded: int, num_workers: int, batch_size: int) -> Dict[str, Any]:
if _is_in_dataloader_worker():
raise RuntimeError("The method `state_dict` should only be called in the main process.")
if self._state_dict is not None:
self._state_dict["num_samples_yielded"] = num_samples_yielded
return self._state_dict
state = {
"num_samples_yielded": num_samples_yielded,
"num_workers": num_workers,
"batch_size": batch_size,
"current_epoch": self.current_epoch,
"input_dir_path": self.input_dir.path,
"input_dir_url": self.input_dir.url,
"item_loader": self.item_loader.state_dict() if self.item_loader else None,
"drop_last": self.drop_last,
"seed": self.seed,
"world_size": self.distributed_env.world_size,
"shuffle": self.shuffle,
}
return state
def load_state_dict(self, state_dict: Dict[str, Any]) -> None:
if state_dict:
# the state is restored within the workers
self._state_dict = state_dict
def _validate_state_dict(self) -> None:
assert self._state_dict
assert self.worker_env
assert self.cache
state: Dict[str, Any] = self._state_dict
if state["shuffle"] != self.shuffle:
raise ValueError(
"The provided `shuffle` state doesn't match the current one. "
f"Found `{self.shuffle}` instead of `{state['shuffle']}`."
)
if state["num_workers"] != self.worker_env.world_size:
raise ValueError(
"The provided `num_workers` state doesn't match the current one. "
f"Found `{self.worker_env.world_size}` instead of `{state['num_workers']}`."
)
# Note: We need to check whether the path has been resolved to its associated cache.
# In this case, validate the cache folder is the same.
if _should_replace_path(state["input_dir_path"]):
cache_path = _try_create_cache_dir(
input_dir=state["input_dir_path"] if state["input_dir_path"] else state["input_dir_url"]
)
if cache_path != self.input_dir.path:
raise ValueError(
"The provided `input_dir` path state doesn't match the current one. "
f"Found `{self.input_dir.path}` instead of `{cache_path}`."
)
elif state["input_dir_path"] != self.input_dir.path:
raise ValueError(
"The provided `input_dir` path state doesn't match the current one. "
f"Found `{self.input_dir.path}` instead of `{state['input_dir_path']}`."
)
if state["input_dir_url"] != self.input_dir.url:
raise ValueError(
"The provided `input_dir` URL state doesn't match the current one. "
f"Found `{self.input_dir.url}` instead of `{state['input_dir_url']}`."
)
if state["seed"] != self.seed:
raise ValueError(
"The provided `seed` state doesn't match the current one. "
f"Found `{self.seed}` instead of `{state['seed']}`."
)
if self.item_loader and state["item_loader"] != self.item_loader.state_dict():
raise ValueError(
"The provided `item_loader` state doesn't match the current one. "
f"Found `{self.item_loader.state_dict()}` instead of `{state['item_loader']}`."
)
if state["drop_last"] != self.drop_last:
raise ValueError(
"The provided `drop_last` state doesn't match the current one. "
f"Found `{self.drop_last}` instead of `{state['drop_last']}`."
)
def _try_create_cache_dir(input_dir: Optional[str]) -> Optional[str]:
hash_object = hashlib.md5((input_dir or "").encode())
if "LIGHTNING_CLUSTER_ID" not in os.environ or "LIGHTNING_CLOUD_PROJECT_ID" not in os.environ:
cache_dir = os.path.join(_DEFAULT_CACHE_DIR, hash_object.hexdigest())
os.makedirs(cache_dir, exist_ok=True)
return cache_dir
cache_dir = os.path.join("/cache", "chunks", hash_object.hexdigest())
os.makedirs(cache_dir, exist_ok=True)
return cache_dir
def _should_replace_path(path: Optional[str]) -> bool:
"""Whether the input path is a special path to be replaced."""
if path is None or path == "":
return True
return path.startswith("/teamspace/datasets/") or path.startswith("/teamspace/s3_connections/")
def is_integer(value: str) -> bool:
try:
int(value)
return True
except Exception:
return False
def _associate_chunks_to_workers(
num_workers: int, worker_env: _WorkerEnv, chunks_replica: List[int], intervals_replica: List[Any]
) -> Any:
workers_chunks = {}
workers_intervals = {}
for worker_idx in range(num_workers):
worker_chunks = []
worker_intervals = []
for i, (chunk_index, chunk_interval) in enumerate(zip(chunks_replica, intervals_replica)):
if i % worker_env.world_size != worker_idx:
continue
worker_chunks.append(chunk_index)
worker_intervals.append(chunk_interval)
workers_chunks[worker_idx] = worker_chunks
workers_intervals[worker_idx] = worker_intervals
return workers_chunks, workers_intervals
def _replay_sampling(num_samples_yielded: int, batch_size: int, num_workers: int) -> Dict[int, int]:
"""This function replays the sampling from the dataloader."""
divisible_num_batches_yielded = num_samples_yielded // (num_workers * batch_size)
indexes = {}
for worker_idx in range(num_workers):
indexes[worker_idx] = divisible_num_batches_yielded * batch_size
num_samples_yielded = num_samples_yielded - (num_workers * divisible_num_batches_yielded * batch_size)
# take care of the reminder
worker_idx = 0 # reset the worker_idx
while True:
if num_samples_yielded >= batch_size:
indexes[worker_idx] += batch_size
worker_idx = (worker_idx + 1) % num_workers
num_samples_yielded -= batch_size
else:
indexes[worker_idx] += num_samples_yielded
break
return indexes
def _replay_chunks_sampling(
workers_intervals: Dict[int, List[Any]], indexes: Dict[int, int]
) -> Tuple[Dict[int, int], Dict[int, int]]:
chunks_index = {}
for worker_idx in range(len(workers_intervals)):
chunks_index[worker_idx] = 0
for worker_idx, intervals in workers_intervals.items():
for interval in intervals:
size = interval[-1] - interval[0]
if indexes[worker_idx] >= size:
indexes[worker_idx] -= size
chunks_index[worker_idx] += 1
return chunks_index, indexes
|
evocodebench_data_114
|
# Copyright The Lightning AI team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib
import os
from logging import Logger
from time import time
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
from torch.utils.data import IterableDataset
from litdata.constants import (
_DEFAULT_CACHE_DIR,
_INDEX_FILENAME,
)
from litdata.streaming import Cache
from litdata.streaming.item_loader import BaseItemLoader
from litdata.streaming.resolver import Dir, _resolve_dir
from litdata.streaming.sampler import ChunkedIndex
from litdata.streaming.serializers import Serializer
from litdata.streaming.shuffle import FullShuffle, NoShuffle, Shuffle
from litdata.utilities.env import _DistributedEnv, _is_in_dataloader_worker, _WorkerEnv
logger = Logger(__name__)
class StreamingDataset(IterableDataset):
"""The streaming dataset can be used once your data have been optimised using the DatasetOptimiser class."""
def __init__(
self,
input_dir: Union[str, "Dir"],
item_loader: Optional[BaseItemLoader] = None,
shuffle: bool = False,
drop_last: Optional[bool] = None,
seed: int = 42,
serializers: Optional[Dict[str, Serializer]] = None,
max_cache_size: Union[int, str] = "100GB",
) -> None:
"""The streaming dataset can be used once your data have been optimised using the DatasetOptimiser class.
Arguments:
input_dir: Path to the folder where the input data is stored.
item_loader: The logic to load an item from a chunk.
shuffle: Whether to shuffle the data.
drop_last: If `True`, drops the last items to ensure that
all processes/workers return the same amount of data.
The argument `drop_last` is set to `True` in a distributed setting
and `False` otherwise.
seed: Random seed for shuffling.
serializers: The serializers used to serialize and deserialize the chunks.
max_cache_size: The maximum cache size used by the StreamingDataset.
"""
super().__init__()
if not isinstance(shuffle, bool):
raise ValueError(f"Shuffle should be a boolean. Found {shuffle}")
input_dir = _resolve_dir(input_dir)
self.input_dir = input_dir
self.item_loader = item_loader
self.shuffle: bool = shuffle
self.distributed_env = _DistributedEnv.detect()
if self.distributed_env.world_size > 1:
if drop_last is False:
logger.warn(
"You're operating within a distributed environment and have disabled the `drop_last` option. "
"Please note that this configuration may lead to training interruptions if your system depends "
"on distributed collectives."
)
else:
drop_last = True
self.drop_last = drop_last or False
self.seed = seed
self.max_cache_size = max_cache_size
self.cache: Optional[Cache] = None
self.worker_env: Optional[_WorkerEnv] = None
self.worker_chunks: List[int] = []
self.worker_intervals: List[List[int]] = []
self.current_indexes: List[int] = []
self.chunk_index = 0
self.num_chunks: Optional[int] = None
self.global_index = 0
self.index = 0
self.has_triggered_download = False
self.min_items_per_replica: Optional[int] = None
self.current_epoch = 1
self.random_state = None
self.shuffler: Optional[Shuffle] = None
self.serializers = serializers
self._state_dict: Optional[Dict[str, Any]] = None
def set_shuffle(self, shuffle: bool) -> None:
self.shuffle = shuffle
def set_epoch(self, current_epoch: int) -> None:
"""Set the current epoch to the dataset on epoch starts.
When using the StreamingDataLoader, this is done automatically
"""
# If the state dict has been reloaded, don't override the current epoch
# The StreamingDataloader would clean this out
if self._state_dict is None:
self.current_epoch = current_epoch
def _create_cache(self, worker_env: _WorkerEnv) -> Cache:
if _should_replace_path(self.input_dir.path):
cache_path = _try_create_cache_dir(
input_dir=self.input_dir.path if self.input_dir.path else self.input_dir.url
)
if cache_path is not None:
self.input_dir.path = cache_path
cache = Cache(
input_dir=self.input_dir,
item_loader=self.item_loader,
chunk_bytes=1,
serializers=self.serializers,
max_cache_size=self.max_cache_size,
)
cache._reader._try_load_config()
if not cache.filled:
raise ValueError(
f"The provided dataset `{self.input_dir}` doesn't contain any {_INDEX_FILENAME} file."
" HINT: Did you successfully optimize a dataset to the provided `input_dir`?"
)
return cache
def _create_shuffler(self, cache: Cache) -> Shuffle:
seed = self.seed
drop_last = self.drop_last
if self._state_dict is not None:
state: Dict[str, Any] = self._state_dict
seed = state["seed"]
drop_last = state["drop_last"]
return FullShuffle(cache, seed, drop_last) if self.shuffle else NoShuffle(cache, seed, drop_last)
def __len__(self) -> int:
if self.shuffler is None:
cache = self._create_cache(worker_env=_WorkerEnv.detect())
self.shuffler = self._create_shuffler(cache)
return self.shuffler.get_len(self.distributed_env, self.current_epoch)
def __iter__(self) -> "StreamingDataset":
# When the StreamingDataset is used within map or optimize, let's refetch the distributed env.
if os.getenv("DATA_OPTIMIZER_GLOBAL_RANK"):
self.distributed_env = _DistributedEnv.detect()
self.worker_env = _WorkerEnv.detect()
self.cache = self._create_cache(worker_env=self.worker_env)
self.shuffler = self._create_shuffler(self.cache)
# Handle restart
if self._state_dict:
self._validate_state_dict()
state: Dict[str, Any] = self._state_dict
self.current_epoch = state["current_epoch"]
chunks_per_replica, intervals_per_replica = self.shuffler.get_chunks_and_intervals_per_ranks(
self.distributed_env, self.current_epoch
)
chunks_replica = chunks_per_replica[self.distributed_env.global_rank % self.distributed_env.world_size]
intervals_replica = intervals_per_replica[self.distributed_env.global_rank % self.distributed_env.world_size]
# Handle restart
if self._state_dict:
self._resume(chunks_replica, intervals_replica)
else:
chunks_per_replica, intervals_per_replica = self.shuffler.get_chunks_and_intervals_per_ranks(
self.distributed_env, self.current_epoch
)
chunks_replica = chunks_per_replica[self.distributed_env.global_rank % self.distributed_env.world_size]
intervals_replica = intervals_per_replica[
self.distributed_env.global_rank % self.distributed_env.world_size
]
self.worker_chunks = []
self.worker_intervals = []
for i, (chunk_index, chunk_interval) in enumerate(zip(chunks_replica, intervals_replica)):
if i % self.worker_env.world_size != self.worker_env.rank:
continue
self.worker_chunks.append(chunk_index)
self.worker_intervals.append(chunk_interval)
self.num_chunks = len(self.worker_chunks)
self.current_indexes = []
self.chunk_index = 0
self.global_index = 0
self.index = 0
self.has_triggered_download = False
self.last_time = time()
return self
def _resume(self, chunks_replica: List[int], intervals_replica: List[Any]) -> None:
assert self._state_dict
assert self.worker_env
assert self.shuffler
state: Dict[str, Any] = self._state_dict
num_workers = state["num_workers"]
batch_size = state["batch_size"]
# TODO: Implement elastic sampling where the number of workers, ranks can change.
num_samples_yielded = self._state_dict["num_samples_yielded"]
# replay sampling from each worker / chunks using the batch size
workers_chunks, workers_intervals = _associate_chunks_to_workers(
num_workers, self.worker_env, chunks_replica, intervals_replica
)
indexes = _replay_sampling(num_samples_yielded, batch_size, num_workers)
chunks_index, indexes = _replay_chunks_sampling(workers_intervals, indexes)
# select the chunks and intervals associated to this worker
worker_rank = self.worker_env.rank
self.num_chunks = len(workers_intervals[worker_rank])
self.chunk_index = chunks_index[worker_rank]
self.worker_chunks = workers_chunks[worker_rank]
self.worker_intervals = workers_intervals[worker_rank]
# replay the indexes for the current chunks
interval = self.worker_intervals[self.chunk_index]
current_indexes = np.arange(interval[0], interval[1])
# re-shuffle the indexes
current_indexes = self.shuffler(current_indexes, self.num_chunks, self.current_epoch, self.chunk_index)
# skip any indexes already consumed
current_indexes = current_indexes[indexes[worker_rank] :]
self.current_indexes = current_indexes
self.global_index = num_samples_yielded
# bump the chunk_index
self.chunk_index += 1
def __getitem__(self, index: Union[ChunkedIndex, int]) -> Any:
if self.cache is None:
self.worker_env = _WorkerEnv.detect()
self.cache = self._create_cache(worker_env=self.worker_env)
self.shuffler = self._create_shuffler(self.cache)
if isinstance(index, int):
index = ChunkedIndex(index, self.cache._get_chunk_index_from_index(index))
return self.cache[index]
def __next__(self) -> Any:
# Prevent to create more batch on a given process
if self.global_index >= len(self):
self.current_epoch += 1
raise StopIteration
# Lazily re-populate the interval to reduce memory usage.
if len(self.current_indexes) == 0:
if self.chunk_index == self.num_chunks:
self.current_epoch += 1
raise StopIteration
# reset index
self.index = 0
interval = self.worker_intervals[self.chunk_index]
current_indexes = np.arange(interval[0], interval[1])
assert self.shuffler is not None
assert self.num_chunks is not None
self.current_indexes = self.shuffler(current_indexes, self.num_chunks, self.current_epoch, self.chunk_index)
self.chunk_index += 1
# Get the first index
index = self.current_indexes.pop(0)
# Call the `__getitem__` method.
data = self.__getitem__(
ChunkedIndex(
index=index,
chunk_index=self.worker_chunks[self.chunk_index - 1],
# We provide the chunks indexes only one the first
chunk_indexes=None if self.has_triggered_download else self.worker_chunks,
is_last_index=(self.chunk_index - 1) == len(self.worker_intervals) and len(self.current_indexes) == 1,
)
)
self.has_triggered_download = True
self.global_index += 1
self.index += 1
return data
def state_dict(self, num_samples_yielded: int, num_workers: int, batch_size: int) -> Dict[str, Any]:
if _is_in_dataloader_worker():
raise RuntimeError("The method `state_dict` should only be called in the main process.")
if self._state_dict is not None:
self._state_dict["num_samples_yielded"] = num_samples_yielded
return self._state_dict
state = {
"num_samples_yielded": num_samples_yielded,
"num_workers": num_workers,
"batch_size": batch_size,
"current_epoch": self.current_epoch,
"input_dir_path": self.input_dir.path,
"input_dir_url": self.input_dir.url,
"item_loader": self.item_loader.state_dict() if self.item_loader else None,
"drop_last": self.drop_last,
"seed": self.seed,
"world_size": self.distributed_env.world_size,
"shuffle": self.shuffle,
}
return state
def load_state_dict(self, state_dict: Dict[str, Any]) -> None:
if state_dict:
# the state is restored within the workers
self._state_dict = state_dict
def _validate_state_dict(self) -> None:
assert self._state_dict
assert self.worker_env
assert self.cache
state: Dict[str, Any] = self._state_dict
if state["shuffle"] != self.shuffle:
raise ValueError(
"The provided `shuffle` state doesn't match the current one. "
f"Found `{self.shuffle}` instead of `{state['shuffle']}`."
)
if state["num_workers"] != self.worker_env.world_size:
raise ValueError(
"The provided `num_workers` state doesn't match the current one. "
f"Found `{self.worker_env.world_size}` instead of `{state['num_workers']}`."
)
# Note: We need to check whether the path has been resolved to its associated cache.
# In this case, validate the cache folder is the same.
if _should_replace_path(state["input_dir_path"]):
cache_path = _try_create_cache_dir(
input_dir=state["input_dir_path"] if state["input_dir_path"] else state["input_dir_url"]
)
if cache_path != self.input_dir.path:
raise ValueError(
"The provided `input_dir` path state doesn't match the current one. "
f"Found `{self.input_dir.path}` instead of `{cache_path}`."
)
elif state["input_dir_path"] != self.input_dir.path:
raise ValueError(
"The provided `input_dir` path state doesn't match the current one. "
f"Found `{self.input_dir.path}` instead of `{state['input_dir_path']}`."
)
if state["input_dir_url"] != self.input_dir.url:
raise ValueError(
"The provided `input_dir` URL state doesn't match the current one. "
f"Found `{self.input_dir.url}` instead of `{state['input_dir_url']}`."
)
if state["seed"] != self.seed:
raise ValueError(
"The provided `seed` state doesn't match the current one. "
f"Found `{self.seed}` instead of `{state['seed']}`."
)
if self.item_loader and state["item_loader"] != self.item_loader.state_dict():
raise ValueError(
"The provided `item_loader` state doesn't match the current one. "
f"Found `{self.item_loader.state_dict()}` instead of `{state['item_loader']}`."
)
if state["drop_last"] != self.drop_last:
raise ValueError(
"The provided `drop_last` state doesn't match the current one. "
f"Found `{self.drop_last}` instead of `{state['drop_last']}`."
)
def _try_create_cache_dir(input_dir: Optional[str]) -> Optional[str]:
hash_object = hashlib.md5((input_dir or "").encode())
if "LIGHTNING_CLUSTER_ID" not in os.environ or "LIGHTNING_CLOUD_PROJECT_ID" not in os.environ:
cache_dir = os.path.join(_DEFAULT_CACHE_DIR, hash_object.hexdigest())
os.makedirs(cache_dir, exist_ok=True)
return cache_dir
cache_dir = os.path.join("/cache", "chunks", hash_object.hexdigest())
os.makedirs(cache_dir, exist_ok=True)
return cache_dir
def _should_replace_path(path: Optional[str]) -> bool:
"""Whether the input path is a special path to be replaced."""
if path is None or path == "":
return True
return path.startswith("/teamspace/datasets/") or path.startswith("/teamspace/s3_connections/")
def is_integer(value: str) -> bool:
try:
int(value)
return True
except Exception:
return False
def _associate_chunks_to_workers(
num_workers: int, worker_env: _WorkerEnv, chunks_replica: List[int], intervals_replica: List[Any]
) -> Any:
workers_chunks = {}
workers_intervals = {}
for worker_idx in range(num_workers):
worker_chunks = []
worker_intervals = []
for i, (chunk_index, chunk_interval) in enumerate(zip(chunks_replica, intervals_replica)):
if i % worker_env.world_size != worker_idx:
continue
worker_chunks.append(chunk_index)
worker_intervals.append(chunk_interval)
workers_chunks[worker_idx] = worker_chunks
workers_intervals[worker_idx] = worker_intervals
return workers_chunks, workers_intervals
def _replay_sampling(num_samples_yielded: int, batch_size: int, num_workers: int) -> Dict[int, int]:
"""This function replays the sampling from the dataloader."""
divisible_num_batches_yielded = num_samples_yielded // (num_workers * batch_size)
indexes = {}
for worker_idx in range(num_workers):
indexes[worker_idx] = divisible_num_batches_yielded * batch_size
num_samples_yielded = num_samples_yielded - (num_workers * divisible_num_batches_yielded * batch_size)
# take care of the reminder
worker_idx = 0 # reset the worker_idx
while True:
if num_samples_yielded >= batch_size:
indexes[worker_idx] += batch_size
worker_idx = (worker_idx + 1) % num_workers
num_samples_yielded -= batch_size
else:
indexes[worker_idx] += num_samples_yielded
break
return indexes
def _replay_chunks_sampling(
workers_intervals: Dict[int, List[Any]], indexes: Dict[int, int]
) -> Tuple[Dict[int, int], Dict[int, int]]:
chunks_index = {}
for worker_idx in range(len(workers_intervals)):
chunks_index[worker_idx] = 0
for worker_idx, intervals in workers_intervals.items():
for interval in intervals:
size = interval[-1] - interval[0]
if indexes[worker_idx] >= size:
indexes[worker_idx] -= size
chunks_index[worker_idx] += 1
return chunks_index, indexes
|
evocodebench_data_115
|
# Copyright The Lightning AI team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import subprocess
from abc import ABC
from typing import Any, Dict, List
from urllib import parse
from filelock import FileLock, Timeout
from litdata.constants import _INDEX_FILENAME
from litdata.streaming.client import S3Client
class Downloader(ABC):
def __init__(self, remote_dir: str, cache_dir: str, chunks: List[Dict[str, Any]]):
self._remote_dir = remote_dir
self._cache_dir = cache_dir
self._chunks = chunks
def download_chunk_from_index(self, chunk_index: int) -> None:
chunk_filename = self._chunks[chunk_index]["filename"]
local_chunkpath = os.path.join(self._cache_dir, chunk_filename)
remote_chunkpath = os.path.join(self._remote_dir, chunk_filename)
self.download_file(remote_chunkpath, local_chunkpath)
def download_file(self, remote_chunkpath: str, local_chunkpath: str) -> None:
pass
class S3Downloader(Downloader):
def __init__(self, remote_dir: str, cache_dir: str, chunks: List[Dict[str, Any]]):
super().__init__(remote_dir, cache_dir, chunks)
self._s5cmd_available = os.system("s5cmd > /dev/null 2>&1") == 0
if not self._s5cmd_available:
self._client = S3Client()
def download_file(self, remote_filepath: str, local_filepath: str) -> None:
obj = parse.urlparse(remote_filepath)
if obj.scheme != "s3":
raise ValueError(f"Expected obj.scheme to be `s3`, instead, got {obj.scheme} for remote={remote_filepath}")
if os.path.exists(local_filepath):
return
try:
with FileLock(local_filepath + ".lock", timeout=3 if obj.path.endswith(_INDEX_FILENAME) else 0):
if self._s5cmd_available:
proc = subprocess.Popen(
f"s5cmd cp {remote_filepath} {local_filepath}",
shell=True,
stdout=subprocess.PIPE,
)
proc.wait()
else:
from boto3.s3.transfer import TransferConfig
extra_args: Dict[str, Any] = {}
# try:
# with FileLock(local_filepath + ".lock", timeout=1):
if not os.path.exists(local_filepath):
# Issue: https://github.com/boto/boto3/issues/3113
self._client.client.download_file(
obj.netloc,
obj.path.lstrip("/"),
local_filepath,
ExtraArgs=extra_args,
Config=TransferConfig(use_threads=False),
)
except Timeout:
# another process is responsible to download that file, continue
pass
class LocalDownloader(Downloader):
def download_file(self, remote_filepath: str, local_filepath: str) -> None:
if not os.path.exists(remote_filepath):
raise FileNotFoundError(f"The provided remote_path doesn't exist: {remote_filepath}")
if remote_filepath != local_filepath and not os.path.exists(local_filepath):
shutil.copy(remote_filepath, local_filepath)
class LocalDownloaderWithCache(LocalDownloader):
def download_file(self, remote_filepath: str, local_filepath: str) -> None:
remote_filepath = remote_filepath.replace("local:", "")
super().download_file(remote_filepath, local_filepath)
_DOWNLOADERS = {"s3://": S3Downloader, "local:": LocalDownloaderWithCache, "": LocalDownloader}
def get_downloader_cls(remote_dir: str, cache_dir: str, chunks: List[Dict[str, Any]]) -> Downloader:
for k, cls in _DOWNLOADERS.items():
if str(remote_dir).startswith(k):
return cls(remote_dir, cache_dir, chunks)
raise ValueError(f"The provided `remote_dir` {remote_dir} doesn't have a downloader associated.")
|
evocodebench_data_116
|
# Copyright The Lightning AI team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib
import os
from logging import Logger
from time import time
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
from torch.utils.data import IterableDataset
from litdata.constants import (
_DEFAULT_CACHE_DIR,
_INDEX_FILENAME,
)
from litdata.streaming import Cache
from litdata.streaming.item_loader import BaseItemLoader
from litdata.streaming.resolver import Dir, _resolve_dir
from litdata.streaming.sampler import ChunkedIndex
from litdata.streaming.serializers import Serializer
from litdata.streaming.shuffle import FullShuffle, NoShuffle, Shuffle
from litdata.utilities.env import _DistributedEnv, _is_in_dataloader_worker, _WorkerEnv
logger = Logger(__name__)
class StreamingDataset(IterableDataset):
"""The streaming dataset can be used once your data have been optimised using the DatasetOptimiser class."""
def __init__(
self,
input_dir: Union[str, "Dir"],
item_loader: Optional[BaseItemLoader] = None,
shuffle: bool = False,
drop_last: Optional[bool] = None,
seed: int = 42,
serializers: Optional[Dict[str, Serializer]] = None,
max_cache_size: Union[int, str] = "100GB",
) -> None:
"""The streaming dataset can be used once your data have been optimised using the DatasetOptimiser class.
Arguments:
input_dir: Path to the folder where the input data is stored.
item_loader: The logic to load an item from a chunk.
shuffle: Whether to shuffle the data.
drop_last: If `True`, drops the last items to ensure that
all processes/workers return the same amount of data.
The argument `drop_last` is set to `True` in a distributed setting
and `False` otherwise.
seed: Random seed for shuffling.
serializers: The serializers used to serialize and deserialize the chunks.
max_cache_size: The maximum cache size used by the StreamingDataset.
"""
super().__init__()
if not isinstance(shuffle, bool):
raise ValueError(f"Shuffle should be a boolean. Found {shuffle}")
input_dir = _resolve_dir(input_dir)
self.input_dir = input_dir
self.item_loader = item_loader
self.shuffle: bool = shuffle
self.distributed_env = _DistributedEnv.detect()
if self.distributed_env.world_size > 1:
if drop_last is False:
logger.warn(
"You're operating within a distributed environment and have disabled the `drop_last` option. "
"Please note that this configuration may lead to training interruptions if your system depends "
"on distributed collectives."
)
else:
drop_last = True
self.drop_last = drop_last or False
self.seed = seed
self.max_cache_size = max_cache_size
self.cache: Optional[Cache] = None
self.worker_env: Optional[_WorkerEnv] = None
self.worker_chunks: List[int] = []
self.worker_intervals: List[List[int]] = []
self.current_indexes: List[int] = []
self.chunk_index = 0
self.num_chunks: Optional[int] = None
self.global_index = 0
self.index = 0
self.has_triggered_download = False
self.min_items_per_replica: Optional[int] = None
self.current_epoch = 1
self.random_state = None
self.shuffler: Optional[Shuffle] = None
self.serializers = serializers
self._state_dict: Optional[Dict[str, Any]] = None
def set_shuffle(self, shuffle: bool) -> None:
self.shuffle = shuffle
def set_epoch(self, current_epoch: int) -> None:
"""Set the current epoch to the dataset on epoch starts.
When using the StreamingDataLoader, this is done automatically
"""
# If the state dict has been reloaded, don't override the current epoch
# The StreamingDataloader would clean this out
if self._state_dict is None:
self.current_epoch = current_epoch
def _create_cache(self, worker_env: _WorkerEnv) -> Cache:
if _should_replace_path(self.input_dir.path):
cache_path = _try_create_cache_dir(
input_dir=self.input_dir.path if self.input_dir.path else self.input_dir.url
)
if cache_path is not None:
self.input_dir.path = cache_path
cache = Cache(
input_dir=self.input_dir,
item_loader=self.item_loader,
chunk_bytes=1,
serializers=self.serializers,
max_cache_size=self.max_cache_size,
)
cache._reader._try_load_config()
if not cache.filled:
raise ValueError(
f"The provided dataset `{self.input_dir}` doesn't contain any {_INDEX_FILENAME} file."
" HINT: Did you successfully optimize a dataset to the provided `input_dir`?"
)
return cache
def _create_shuffler(self, cache: Cache) -> Shuffle:
seed = self.seed
drop_last = self.drop_last
if self._state_dict is not None:
state: Dict[str, Any] = self._state_dict
seed = state["seed"]
drop_last = state["drop_last"]
return FullShuffle(cache, seed, drop_last) if self.shuffle else NoShuffle(cache, seed, drop_last)
def __len__(self) -> int:
if self.shuffler is None:
cache = self._create_cache(worker_env=_WorkerEnv.detect())
self.shuffler = self._create_shuffler(cache)
return self.shuffler.get_len(self.distributed_env, self.current_epoch)
def __iter__(self) -> "StreamingDataset":
# When the StreamingDataset is used within map or optimize, let's refetch the distributed env.
if os.getenv("DATA_OPTIMIZER_GLOBAL_RANK"):
self.distributed_env = _DistributedEnv.detect()
self.worker_env = _WorkerEnv.detect()
self.cache = self._create_cache(worker_env=self.worker_env)
self.shuffler = self._create_shuffler(self.cache)
# Handle restart
if self._state_dict:
self._validate_state_dict()
state: Dict[str, Any] = self._state_dict
self.current_epoch = state["current_epoch"]
chunks_per_replica, intervals_per_replica = self.shuffler.get_chunks_and_intervals_per_ranks(
self.distributed_env, self.current_epoch
)
chunks_replica = chunks_per_replica[self.distributed_env.global_rank % self.distributed_env.world_size]
intervals_replica = intervals_per_replica[self.distributed_env.global_rank % self.distributed_env.world_size]
# Handle restart
if self._state_dict:
self._resume(chunks_replica, intervals_replica)
else:
chunks_per_replica, intervals_per_replica = self.shuffler.get_chunks_and_intervals_per_ranks(
self.distributed_env, self.current_epoch
)
chunks_replica = chunks_per_replica[self.distributed_env.global_rank % self.distributed_env.world_size]
intervals_replica = intervals_per_replica[
self.distributed_env.global_rank % self.distributed_env.world_size
]
self.worker_chunks = []
self.worker_intervals = []
for i, (chunk_index, chunk_interval) in enumerate(zip(chunks_replica, intervals_replica)):
if i % self.worker_env.world_size != self.worker_env.rank:
continue
self.worker_chunks.append(chunk_index)
self.worker_intervals.append(chunk_interval)
self.num_chunks = len(self.worker_chunks)
self.current_indexes = []
self.chunk_index = 0
self.global_index = 0
self.index = 0
self.has_triggered_download = False
self.last_time = time()
return self
def _resume(self, chunks_replica: List[int], intervals_replica: List[Any]) -> None:
assert self._state_dict
assert self.worker_env
assert self.shuffler
state: Dict[str, Any] = self._state_dict
num_workers = state["num_workers"]
batch_size = state["batch_size"]
# TODO: Implement elastic sampling where the number of workers, ranks can change.
num_samples_yielded = self._state_dict["num_samples_yielded"]
# replay sampling from each worker / chunks using the batch size
workers_chunks, workers_intervals = _associate_chunks_to_workers(
num_workers, self.worker_env, chunks_replica, intervals_replica
)
indexes = _replay_sampling(num_samples_yielded, batch_size, num_workers)
chunks_index, indexes = _replay_chunks_sampling(workers_intervals, indexes)
# select the chunks and intervals associated to this worker
worker_rank = self.worker_env.rank
self.num_chunks = len(workers_intervals[worker_rank])
self.chunk_index = chunks_index[worker_rank]
self.worker_chunks = workers_chunks[worker_rank]
self.worker_intervals = workers_intervals[worker_rank]
# replay the indexes for the current chunks
interval = self.worker_intervals[self.chunk_index]
current_indexes = np.arange(interval[0], interval[1])
# re-shuffle the indexes
current_indexes = self.shuffler(current_indexes, self.num_chunks, self.current_epoch, self.chunk_index)
# skip any indexes already consumed
current_indexes = current_indexes[indexes[worker_rank] :]
self.current_indexes = current_indexes
self.global_index = num_samples_yielded
# bump the chunk_index
self.chunk_index += 1
def __getitem__(self, index: Union[ChunkedIndex, int]) -> Any:
if self.cache is None:
self.worker_env = _WorkerEnv.detect()
self.cache = self._create_cache(worker_env=self.worker_env)
self.shuffler = self._create_shuffler(self.cache)
if isinstance(index, int):
index = ChunkedIndex(index, self.cache._get_chunk_index_from_index(index))
return self.cache[index]
def __next__(self) -> Any:
# Prevent to create more batch on a given process
if self.global_index >= len(self):
self.current_epoch += 1
raise StopIteration
# Lazily re-populate the interval to reduce memory usage.
if len(self.current_indexes) == 0:
if self.chunk_index == self.num_chunks:
self.current_epoch += 1
raise StopIteration
# reset index
self.index = 0
interval = self.worker_intervals[self.chunk_index]
current_indexes = np.arange(interval[0], interval[1])
assert self.shuffler is not None
assert self.num_chunks is not None
self.current_indexes = self.shuffler(current_indexes, self.num_chunks, self.current_epoch, self.chunk_index)
self.chunk_index += 1
# Get the first index
index = self.current_indexes.pop(0)
# Call the `__getitem__` method.
data = self.__getitem__(
ChunkedIndex(
index=index,
chunk_index=self.worker_chunks[self.chunk_index - 1],
# We provide the chunks indexes only one the first
chunk_indexes=None if self.has_triggered_download else self.worker_chunks,
is_last_index=(self.chunk_index - 1) == len(self.worker_intervals) and len(self.current_indexes) == 1,
)
)
self.has_triggered_download = True
self.global_index += 1
self.index += 1
return data
def state_dict(self, num_samples_yielded: int, num_workers: int, batch_size: int) -> Dict[str, Any]:
if _is_in_dataloader_worker():
raise RuntimeError("The method `state_dict` should only be called in the main process.")
if self._state_dict is not None:
self._state_dict["num_samples_yielded"] = num_samples_yielded
return self._state_dict
state = {
"num_samples_yielded": num_samples_yielded,
"num_workers": num_workers,
"batch_size": batch_size,
"current_epoch": self.current_epoch,
"input_dir_path": self.input_dir.path,
"input_dir_url": self.input_dir.url,
"item_loader": self.item_loader.state_dict() if self.item_loader else None,
"drop_last": self.drop_last,
"seed": self.seed,
"world_size": self.distributed_env.world_size,
"shuffle": self.shuffle,
}
return state
def load_state_dict(self, state_dict: Dict[str, Any]) -> None:
if state_dict:
# the state is restored within the workers
self._state_dict = state_dict
def _validate_state_dict(self) -> None:
assert self._state_dict
assert self.worker_env
assert self.cache
state: Dict[str, Any] = self._state_dict
if state["shuffle"] != self.shuffle:
raise ValueError(
"The provided `shuffle` state doesn't match the current one. "
f"Found `{self.shuffle}` instead of `{state['shuffle']}`."
)
if state["num_workers"] != self.worker_env.world_size:
raise ValueError(
"The provided `num_workers` state doesn't match the current one. "
f"Found `{self.worker_env.world_size}` instead of `{state['num_workers']}`."
)
# Note: We need to check whether the path has been resolved to its associated cache.
# In this case, validate the cache folder is the same.
if _should_replace_path(state["input_dir_path"]):
cache_path = _try_create_cache_dir(
input_dir=state["input_dir_path"] if state["input_dir_path"] else state["input_dir_url"]
)
if cache_path != self.input_dir.path:
raise ValueError(
"The provided `input_dir` path state doesn't match the current one. "
f"Found `{self.input_dir.path}` instead of `{cache_path}`."
)
elif state["input_dir_path"] != self.input_dir.path:
raise ValueError(
"The provided `input_dir` path state doesn't match the current one. "
f"Found `{self.input_dir.path}` instead of `{state['input_dir_path']}`."
)
if state["input_dir_url"] != self.input_dir.url:
raise ValueError(
"The provided `input_dir` URL state doesn't match the current one. "
f"Found `{self.input_dir.url}` instead of `{state['input_dir_url']}`."
)
if state["seed"] != self.seed:
raise ValueError(
"The provided `seed` state doesn't match the current one. "
f"Found `{self.seed}` instead of `{state['seed']}`."
)
if self.item_loader and state["item_loader"] != self.item_loader.state_dict():
raise ValueError(
"The provided `item_loader` state doesn't match the current one. "
f"Found `{self.item_loader.state_dict()}` instead of `{state['item_loader']}`."
)
if state["drop_last"] != self.drop_last:
raise ValueError(
"The provided `drop_last` state doesn't match the current one. "
f"Found `{self.drop_last}` instead of `{state['drop_last']}`."
)
def _try_create_cache_dir(input_dir: Optional[str]) -> Optional[str]:
hash_object = hashlib.md5((input_dir or "").encode())
if "LIGHTNING_CLUSTER_ID" not in os.environ or "LIGHTNING_CLOUD_PROJECT_ID" not in os.environ:
cache_dir = os.path.join(_DEFAULT_CACHE_DIR, hash_object.hexdigest())
os.makedirs(cache_dir, exist_ok=True)
return cache_dir
cache_dir = os.path.join("/cache", "chunks", hash_object.hexdigest())
os.makedirs(cache_dir, exist_ok=True)
return cache_dir
def _should_replace_path(path: Optional[str]) -> bool:
"""Whether the input path is a special path to be replaced."""
if path is None or path == "":
return True
return path.startswith("/teamspace/datasets/") or path.startswith("/teamspace/s3_connections/")
def is_integer(value: str) -> bool:
try:
int(value)
return True
except Exception:
return False
def _associate_chunks_to_workers(
num_workers: int, worker_env: _WorkerEnv, chunks_replica: List[int], intervals_replica: List[Any]
) -> Any:
workers_chunks = {}
workers_intervals = {}
for worker_idx in range(num_workers):
worker_chunks = []
worker_intervals = []
for i, (chunk_index, chunk_interval) in enumerate(zip(chunks_replica, intervals_replica)):
if i % worker_env.world_size != worker_idx:
continue
worker_chunks.append(chunk_index)
worker_intervals.append(chunk_interval)
workers_chunks[worker_idx] = worker_chunks
workers_intervals[worker_idx] = worker_intervals
return workers_chunks, workers_intervals
def _replay_sampling(num_samples_yielded: int, batch_size: int, num_workers: int) -> Dict[int, int]:
"""This function replays the sampling from the dataloader."""
divisible_num_batches_yielded = num_samples_yielded // (num_workers * batch_size)
indexes = {}
for worker_idx in range(num_workers):
indexes[worker_idx] = divisible_num_batches_yielded * batch_size
num_samples_yielded = num_samples_yielded - (num_workers * divisible_num_batches_yielded * batch_size)
# take care of the reminder
worker_idx = 0 # reset the worker_idx
while True:
if num_samples_yielded >= batch_size:
indexes[worker_idx] += batch_size
worker_idx = (worker_idx + 1) % num_workers
num_samples_yielded -= batch_size
else:
indexes[worker_idx] += num_samples_yielded
break
return indexes
def _replay_chunks_sampling(
workers_intervals: Dict[int, List[Any]], indexes: Dict[int, int]
) -> Tuple[Dict[int, int], Dict[int, int]]:
chunks_index = {}
for worker_idx in range(len(workers_intervals)):
chunks_index[worker_idx] = 0
for worker_idx, intervals in workers_intervals.items():
for interval in intervals:
size = interval[-1] - interval[0]
if indexes[worker_idx] >= size:
indexes[worker_idx] -= size
chunks_index[worker_idx] += 1
return chunks_index, indexes
|
evocodebench_data_117
|
# Copyright The Lightning AI team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import subprocess
from abc import ABC
from typing import Any, Dict, List
from urllib import parse
from filelock import FileLock, Timeout
from litdata.constants import _INDEX_FILENAME
from litdata.streaming.client import S3Client
class Downloader(ABC):
def __init__(self, remote_dir: str, cache_dir: str, chunks: List[Dict[str, Any]]):
self._remote_dir = remote_dir
self._cache_dir = cache_dir
self._chunks = chunks
def download_chunk_from_index(self, chunk_index: int) -> None:
chunk_filename = self._chunks[chunk_index]["filename"]
local_chunkpath = os.path.join(self._cache_dir, chunk_filename)
remote_chunkpath = os.path.join(self._remote_dir, chunk_filename)
self.download_file(remote_chunkpath, local_chunkpath)
def download_file(self, remote_chunkpath: str, local_chunkpath: str) -> None:
pass
class S3Downloader(Downloader):
def __init__(self, remote_dir: str, cache_dir: str, chunks: List[Dict[str, Any]]):
super().__init__(remote_dir, cache_dir, chunks)
self._s5cmd_available = os.system("s5cmd > /dev/null 2>&1") == 0
if not self._s5cmd_available:
self._client = S3Client()
def download_file(self, remote_filepath: str, local_filepath: str) -> None:
obj = parse.urlparse(remote_filepath)
if obj.scheme != "s3":
raise ValueError(f"Expected obj.scheme to be `s3`, instead, got {obj.scheme} for remote={remote_filepath}")
if os.path.exists(local_filepath):
return
try:
with FileLock(local_filepath + ".lock", timeout=3 if obj.path.endswith(_INDEX_FILENAME) else 0):
if self._s5cmd_available:
proc = subprocess.Popen(
f"s5cmd cp {remote_filepath} {local_filepath}",
shell=True,
stdout=subprocess.PIPE,
)
proc.wait()
else:
from boto3.s3.transfer import TransferConfig
extra_args: Dict[str, Any] = {}
# try:
# with FileLock(local_filepath + ".lock", timeout=1):
if not os.path.exists(local_filepath):
# Issue: https://github.com/boto/boto3/issues/3113
self._client.client.download_file(
obj.netloc,
obj.path.lstrip("/"),
local_filepath,
ExtraArgs=extra_args,
Config=TransferConfig(use_threads=False),
)
except Timeout:
# another process is responsible to download that file, continue
pass
class LocalDownloader(Downloader):
def download_file(self, remote_filepath: str, local_filepath: str) -> None:
if not os.path.exists(remote_filepath):
raise FileNotFoundError(f"The provided remote_path doesn't exist: {remote_filepath}")
if remote_filepath != local_filepath and not os.path.exists(local_filepath):
shutil.copy(remote_filepath, local_filepath)
class LocalDownloaderWithCache(LocalDownloader):
def download_file(self, remote_filepath: str, local_filepath: str) -> None:
remote_filepath = remote_filepath.replace("local:", "")
super().download_file(remote_filepath, local_filepath)
_DOWNLOADERS = {"s3://": S3Downloader, "local:": LocalDownloaderWithCache, "": LocalDownloader}
def get_downloader_cls(remote_dir: str, cache_dir: str, chunks: List[Dict[str, Any]]) -> Downloader:
for k, cls in _DOWNLOADERS.items():
if str(remote_dir).startswith(k):
return cls(remote_dir, cache_dir, chunks)
raise ValueError(f"The provided `remote_dir` {remote_dir} doesn't have a downloader associated.")
|
evocodebench_data_118
|
# Copyright The Lightning AI team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import os
import pickle
import tempfile
from abc import ABC, abstractmethod
from collections import OrderedDict
from typing import Any, Dict, Optional, Tuple, Union
import numpy as np
import torch
from lightning_utilities.core.imports import RequirementCache
from litdata.constants import _NUMPY_DTYPES_MAPPING, _TORCH_DTYPES_MAPPING
_PIL_AVAILABLE = RequirementCache("PIL")
_TORCH_VISION_AVAILABLE = RequirementCache("torchvision")
_AV_AVAILABLE = RequirementCache("av")
if _PIL_AVAILABLE:
from PIL import Image
from PIL.GifImagePlugin import GifImageFile
from PIL.JpegImagePlugin import JpegImageFile
from PIL.PngImagePlugin import PngImageFile
from PIL.WebPImagePlugin import WebPImageFile
else:
Image = None
JpegImageFile = None
PngImageFile = None
if _TORCH_VISION_AVAILABLE:
from torchvision.io import decode_jpeg
from torchvision.transforms.functional import pil_to_tensor
class Serializer(ABC):
"""The base interface for any serializers.
A Serializer serialize and deserialize to and from bytes.
"""
@abstractmethod
def serialize(self, data: Any) -> Tuple[bytes, Optional[str]]:
pass
@abstractmethod
def deserialize(self, data: bytes) -> Any:
pass
@abstractmethod
def can_serialize(self, data: Any) -> bool:
pass
def setup(self, metadata: Any) -> None:
pass
class PILSerializer(Serializer):
"""The PILSerializer serialize and deserialize PIL Image to and from bytes."""
def serialize(self, item: Image) -> Tuple[bytes, Optional[str]]:
mode = item.mode.encode("utf-8")
width, height = item.size
raw = item.tobytes()
ints = np.array([width, height, len(mode)], np.uint32)
return ints.tobytes() + mode + raw, None
@classmethod
def deserialize(cls, data: bytes) -> Any:
idx = 3 * 4
width, height, mode_size = np.frombuffer(data[:idx], np.uint32)
idx2 = idx + mode_size
mode = data[idx:idx2].decode("utf-8")
size = width, height
raw = data[idx2:]
return Image.frombytes(mode, size, raw) # pyright: ignore
def can_serialize(self, item: Any) -> bool:
return bool(_PIL_AVAILABLE) and isinstance(item, Image.Image) and not isinstance(item, JpegImageFile)
class JPEGSerializer(Serializer):
"""The JPEGSerializer serialize and deserialize JPEG image to and from bytes."""
def serialize(self, item: Image) -> Tuple[bytes, Optional[str]]:
if isinstance(item, JpegImageFile):
if not hasattr(item, "filename"):
raise ValueError(
"The JPEG Image's filename isn't defined. HINT: Open the image in your Dataset __getitem__ method."
)
if item.filename and os.path.isfile(item.filename):
# read the content of the file directly
with open(item.filename, "rb") as f:
return f.read(), None
else:
item_bytes = io.BytesIO()
item.save(item_bytes, format="JPEG")
item_bytes = item_bytes.getvalue()
return item_bytes, None
if isinstance(item, (PngImageFile, WebPImageFile, GifImageFile, Image.Image)):
buff = io.BytesIO()
item.convert("RGB").save(buff, quality=100, format="JPEG")
buff.seek(0)
return buff.read(), None
raise TypeError(f"The provided item should be of type {JpegImageFile}. Found {item}.")
def deserialize(self, data: bytes) -> Union[JpegImageFile, torch.Tensor]:
if _TORCH_VISION_AVAILABLE:
array = torch.frombuffer(data, dtype=torch.uint8)
try:
return decode_jpeg(array)
except RuntimeError:
# Note: Some datasets like Imagenet contains some PNG images with JPEG extension, so we fallback to PIL
pass
img = PILSerializer.deserialize(data)
if _TORCH_VISION_AVAILABLE:
img = pil_to_tensor(img)
return img
def can_serialize(self, item: Any) -> bool:
return bool(_PIL_AVAILABLE) and isinstance(item, JpegImageFile)
class BytesSerializer(Serializer):
"""The BytesSerializer serialize and deserialize integer to and from bytes."""
def serialize(self, item: bytes) -> Tuple[bytes, Optional[str]]:
return item, None
def deserialize(self, item: bytes) -> bytes:
return item
def can_serialize(self, item: bytes) -> bool:
return isinstance(item, bytes)
class TensorSerializer(Serializer):
"""The TensorSerializer serialize and deserialize tensor to and from bytes."""
def __init__(self) -> None:
super().__init__()
self._dtype_to_indices = {v: k for k, v in _TORCH_DTYPES_MAPPING.items()}
def serialize(self, item: torch.Tensor) -> Tuple[bytes, Optional[str]]:
dtype_indice = self._dtype_to_indices[item.dtype]
data = [np.uint32(dtype_indice).tobytes()]
data.append(np.uint32(len(item.shape)).tobytes())
for dim in item.shape:
data.append(np.uint32(dim).tobytes())
data.append(item.numpy().tobytes(order="C"))
return b"".join(data), None
def deserialize(self, data: bytes) -> torch.Tensor:
dtype_indice = np.frombuffer(data[0:4], np.uint32).item()
dtype = _TORCH_DTYPES_MAPPING[dtype_indice]
shape_size = np.frombuffer(data[4:8], np.uint32).item()
shape = []
for shape_idx in range(shape_size):
shape.append(np.frombuffer(data[8 + 4 * shape_idx : 8 + 4 * (shape_idx + 1)], np.uint32).item())
tensor = torch.frombuffer(data[8 + 4 * (shape_idx + 1) : len(data)], dtype=dtype)
shape = torch.Size(shape)
if tensor.shape == shape:
return tensor
return torch.reshape(tensor, shape)
def can_serialize(self, item: torch.Tensor) -> bool:
return isinstance(item, torch.Tensor) and type(item) == torch.Tensor and len(item.shape) > 1
class NoHeaderTensorSerializer(Serializer):
"""The TensorSerializer serialize and deserialize tensor to and from bytes."""
def __init__(self) -> None:
super().__init__()
self._dtype_to_indices = {v: k for k, v in _TORCH_DTYPES_MAPPING.items()}
self._dtype: Optional[torch.dtype] = None
def setup(self, data_format: str) -> None:
self._dtype = _TORCH_DTYPES_MAPPING[int(data_format.split(":")[1])]
def serialize(self, item: torch.Tensor) -> Tuple[bytes, Optional[str]]:
dtype_indice = self._dtype_to_indices[item.dtype]
return item.numpy().tobytes(order="C"), f"no_header_tensor:{dtype_indice}"
def deserialize(self, data: bytes) -> torch.Tensor:
assert self._dtype
return torch.frombuffer(data, dtype=self._dtype)
def can_serialize(self, item: torch.Tensor) -> bool:
return isinstance(item, torch.Tensor) and type(item) == torch.Tensor and len(item.shape) == 1
class NumpySerializer(Serializer):
"""The NumpySerializer serialize and deserialize numpy to and from bytes."""
def __init__(self) -> None:
super().__init__()
self._dtype_to_indices = {v: k for k, v in _NUMPY_DTYPES_MAPPING.items()}
def serialize(self, item: np.ndarray) -> Tuple[bytes, Optional[str]]:
dtype_indice = self._dtype_to_indices[item.dtype]
data = [np.uint32(dtype_indice).tobytes()]
data.append(np.uint32(len(item.shape)).tobytes())
for dim in item.shape:
data.append(np.uint32(dim).tobytes())
data.append(item.tobytes(order="C"))
return b"".join(data), None
def deserialize(self, data: bytes) -> np.ndarray:
dtype_indice = np.frombuffer(data[0:4], np.uint32).item()
dtype = _NUMPY_DTYPES_MAPPING[dtype_indice]
shape_size = np.frombuffer(data[4:8], np.uint32).item()
shape = []
# deserialize the shape header
# Note: The start position of the shape value: 8 (dtype + shape length) + 4 * shape_idx
for shape_idx in range(shape_size):
shape.append(np.frombuffer(data[8 + 4 * shape_idx : 8 + 4 * (shape_idx + 1)], np.uint32).item())
# deserialize the numpy array bytes
tensor = np.frombuffer(data[8 + 4 * (shape_idx + 1) : len(data)], dtype=dtype)
if tensor.shape == shape:
return tensor
return np.reshape(tensor, shape)
def can_serialize(self, item: np.ndarray) -> bool:
return isinstance(item, np.ndarray) and type(item) == np.ndarray and len(item.shape) > 1
class NoHeaderNumpySerializer(Serializer):
"""The NoHeaderNumpySerializer serialize and deserialize numpy to and from bytes."""
def __init__(self) -> None:
super().__init__()
self._dtype_to_indices = {v: k for k, v in _NUMPY_DTYPES_MAPPING.items()}
self._dtype: Optional[np.dtype] = None
def setup(self, data_format: str) -> None:
self._dtype = _NUMPY_DTYPES_MAPPING[int(data_format.split(":")[1])]
def serialize(self, item: np.ndarray) -> Tuple[bytes, Optional[str]]:
dtype_indice: int = self._dtype_to_indices[item.dtype]
return item.tobytes(order="C"), f"no_header_numpy:{dtype_indice}"
def deserialize(self, data: bytes) -> np.ndarray:
assert self._dtype
return np.frombuffer(data, dtype=self._dtype)
def can_serialize(self, item: np.ndarray) -> bool:
return isinstance(item, np.ndarray) and type(item) == np.ndarray and len(item.shape) == 1
class PickleSerializer(Serializer):
"""The PickleSerializer serialize and deserialize python objects to and from bytes."""
def serialize(self, item: Any) -> Tuple[bytes, Optional[str]]:
return pickle.dumps(item), None
def deserialize(self, data: bytes) -> Any:
return pickle.loads(data)
def can_serialize(self, _: Any) -> bool:
return True
class FileSerializer(Serializer):
def serialize(self, filepath: str) -> Tuple[bytes, Optional[str]]:
_, file_extension = os.path.splitext(filepath)
with open(filepath, "rb") as f:
file_extension = file_extension.replace(".", "").lower()
return f.read(), f"file:{file_extension}"
def deserialize(self, data: bytes) -> Any:
return data
def can_serialize(self, data: Any) -> bool:
return isinstance(data, str) and os.path.isfile(data)
class VideoSerializer(Serializer):
_EXTENSIONS = ("mp4", "ogv", "mjpeg", "avi", "mov", "h264", "mpg", "webm", "wmv")
def serialize(self, filepath: str) -> Tuple[bytes, Optional[str]]:
_, file_extension = os.path.splitext(filepath)
with open(filepath, "rb") as f:
file_extension = file_extension.replace(".", "").lower()
return f.read(), f"video:{file_extension}"
def deserialize(self, data: bytes) -> Any:
if not _TORCH_VISION_AVAILABLE:
raise ModuleNotFoundError("torchvision is required. Run `pip install torchvision`")
if not _AV_AVAILABLE:
raise ModuleNotFoundError("av is required. Run `pip install av`")
# Add support for a better deserialization mechanism for videos
# TODO: Investigate https://pytorch.org/audio/main/generated/torchaudio.io.StreamReader.html
import torchvision.io
with tempfile.TemporaryDirectory() as dirname:
fname = os.path.join(dirname, "file.mp4")
with open(fname, "wb") as stream:
stream.write(data)
return torchvision.io.read_video(fname, pts_unit="sec")
def can_serialize(self, data: Any) -> bool:
return isinstance(data, str) and os.path.isfile(data) and any(data.endswith(ext) for ext in self._EXTENSIONS)
class StringSerializer(Serializer):
def serialize(self, obj: str) -> Tuple[bytes, Optional[str]]:
return obj.encode("utf-8"), None
def deserialize(self, data: bytes) -> str:
return data.decode("utf-8")
def can_serialize(self, data: str) -> bool:
return isinstance(data, str) and not os.path.isfile(data)
class NumericSerializer:
"""Store scalar."""
def __init__(self, dtype: type) -> None:
self.dtype = dtype
self.size = self.dtype().nbytes
def serialize(self, obj: Any) -> Tuple[bytes, Optional[str]]:
return self.dtype(obj).tobytes(), None
def deserialize(self, data: bytes) -> Any:
return np.frombuffer(data, self.dtype)[0]
class IntegerSerializer(NumericSerializer, Serializer):
def __init__(self) -> None:
super().__init__(np.int64)
def can_serialize(self, data: int) -> bool:
return isinstance(data, int)
class FloatSerializer(NumericSerializer, Serializer):
def __init__(self) -> None:
super().__init__(np.float64)
def can_serialize(self, data: float) -> bool:
return isinstance(data, float)
_SERIALIZERS = OrderedDict(**{
"str": StringSerializer(),
"int": IntegerSerializer(),
"float": FloatSerializer(),
"video": VideoSerializer(),
"tif": FileSerializer(),
"file": FileSerializer(),
"pil": PILSerializer(),
"jpeg": JPEGSerializer(),
"bytes": BytesSerializer(),
"no_header_numpy": NoHeaderNumpySerializer(),
"numpy": NumpySerializer(),
"no_header_tensor": NoHeaderTensorSerializer(),
"tensor": TensorSerializer(),
"pickle": PickleSerializer(),
})
def _get_serializers(serializers: Optional[Dict[str, Serializer]]) -> Dict[str, Serializer]:
if serializers:
serializers = OrderedDict(**serializers)
serializers.update(_SERIALIZERS)
else:
serializers = _SERIALIZERS
return serializers
|
evocodebench_data_119
|
# Copyright The Lightning AI team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import os
import pickle
import tempfile
from abc import ABC, abstractmethod
from collections import OrderedDict
from typing import Any, Dict, Optional, Tuple, Union
import numpy as np
import torch
from lightning_utilities.core.imports import RequirementCache
from litdata.constants import _NUMPY_DTYPES_MAPPING, _TORCH_DTYPES_MAPPING
_PIL_AVAILABLE = RequirementCache("PIL")
_TORCH_VISION_AVAILABLE = RequirementCache("torchvision")
_AV_AVAILABLE = RequirementCache("av")
if _PIL_AVAILABLE:
from PIL import Image
from PIL.GifImagePlugin import GifImageFile
from PIL.JpegImagePlugin import JpegImageFile
from PIL.PngImagePlugin import PngImageFile
from PIL.WebPImagePlugin import WebPImageFile
else:
Image = None
JpegImageFile = None
PngImageFile = None
if _TORCH_VISION_AVAILABLE:
from torchvision.io import decode_jpeg
from torchvision.transforms.functional import pil_to_tensor
class Serializer(ABC):
"""The base interface for any serializers.
A Serializer serialize and deserialize to and from bytes.
"""
@abstractmethod
def serialize(self, data: Any) -> Tuple[bytes, Optional[str]]:
pass
@abstractmethod
def deserialize(self, data: bytes) -> Any:
pass
@abstractmethod
def can_serialize(self, data: Any) -> bool:
pass
def setup(self, metadata: Any) -> None:
pass
class PILSerializer(Serializer):
"""The PILSerializer serialize and deserialize PIL Image to and from bytes."""
def serialize(self, item: Image) -> Tuple[bytes, Optional[str]]:
mode = item.mode.encode("utf-8")
width, height = item.size
raw = item.tobytes()
ints = np.array([width, height, len(mode)], np.uint32)
return ints.tobytes() + mode + raw, None
@classmethod
def deserialize(cls, data: bytes) -> Any:
idx = 3 * 4
width, height, mode_size = np.frombuffer(data[:idx], np.uint32)
idx2 = idx + mode_size
mode = data[idx:idx2].decode("utf-8")
size = width, height
raw = data[idx2:]
return Image.frombytes(mode, size, raw) # pyright: ignore
def can_serialize(self, item: Any) -> bool:
return bool(_PIL_AVAILABLE) and isinstance(item, Image.Image) and not isinstance(item, JpegImageFile)
class JPEGSerializer(Serializer):
"""The JPEGSerializer serialize and deserialize JPEG image to and from bytes."""
def serialize(self, item: Image) -> Tuple[bytes, Optional[str]]:
if isinstance(item, JpegImageFile):
if not hasattr(item, "filename"):
raise ValueError(
"The JPEG Image's filename isn't defined. HINT: Open the image in your Dataset __getitem__ method."
)
if item.filename and os.path.isfile(item.filename):
# read the content of the file directly
with open(item.filename, "rb") as f:
return f.read(), None
else:
item_bytes = io.BytesIO()
item.save(item_bytes, format="JPEG")
item_bytes = item_bytes.getvalue()
return item_bytes, None
if isinstance(item, (PngImageFile, WebPImageFile, GifImageFile, Image.Image)):
buff = io.BytesIO()
item.convert("RGB").save(buff, quality=100, format="JPEG")
buff.seek(0)
return buff.read(), None
raise TypeError(f"The provided item should be of type {JpegImageFile}. Found {item}.")
def deserialize(self, data: bytes) -> Union[JpegImageFile, torch.Tensor]:
if _TORCH_VISION_AVAILABLE:
array = torch.frombuffer(data, dtype=torch.uint8)
try:
return decode_jpeg(array)
except RuntimeError:
# Note: Some datasets like Imagenet contains some PNG images with JPEG extension, so we fallback to PIL
pass
img = PILSerializer.deserialize(data)
if _TORCH_VISION_AVAILABLE:
img = pil_to_tensor(img)
return img
def can_serialize(self, item: Any) -> bool:
return bool(_PIL_AVAILABLE) and isinstance(item, JpegImageFile)
class BytesSerializer(Serializer):
"""The BytesSerializer serialize and deserialize integer to and from bytes."""
def serialize(self, item: bytes) -> Tuple[bytes, Optional[str]]:
return item, None
def deserialize(self, item: bytes) -> bytes:
return item
def can_serialize(self, item: bytes) -> bool:
return isinstance(item, bytes)
class TensorSerializer(Serializer):
"""The TensorSerializer serialize and deserialize tensor to and from bytes."""
def __init__(self) -> None:
super().__init__()
self._dtype_to_indices = {v: k for k, v in _TORCH_DTYPES_MAPPING.items()}
def serialize(self, item: torch.Tensor) -> Tuple[bytes, Optional[str]]:
dtype_indice = self._dtype_to_indices[item.dtype]
data = [np.uint32(dtype_indice).tobytes()]
data.append(np.uint32(len(item.shape)).tobytes())
for dim in item.shape:
data.append(np.uint32(dim).tobytes())
data.append(item.numpy().tobytes(order="C"))
return b"".join(data), None
def deserialize(self, data: bytes) -> torch.Tensor:
dtype_indice = np.frombuffer(data[0:4], np.uint32).item()
dtype = _TORCH_DTYPES_MAPPING[dtype_indice]
shape_size = np.frombuffer(data[4:8], np.uint32).item()
shape = []
for shape_idx in range(shape_size):
shape.append(np.frombuffer(data[8 + 4 * shape_idx : 8 + 4 * (shape_idx + 1)], np.uint32).item())
tensor = torch.frombuffer(data[8 + 4 * (shape_idx + 1) : len(data)], dtype=dtype)
shape = torch.Size(shape)
if tensor.shape == shape:
return tensor
return torch.reshape(tensor, shape)
def can_serialize(self, item: torch.Tensor) -> bool:
return isinstance(item, torch.Tensor) and type(item) == torch.Tensor and len(item.shape) > 1
class NoHeaderTensorSerializer(Serializer):
"""The TensorSerializer serialize and deserialize tensor to and from bytes."""
def __init__(self) -> None:
super().__init__()
self._dtype_to_indices = {v: k for k, v in _TORCH_DTYPES_MAPPING.items()}
self._dtype: Optional[torch.dtype] = None
def setup(self, data_format: str) -> None:
self._dtype = _TORCH_DTYPES_MAPPING[int(data_format.split(":")[1])]
def serialize(self, item: torch.Tensor) -> Tuple[bytes, Optional[str]]:
dtype_indice = self._dtype_to_indices[item.dtype]
return item.numpy().tobytes(order="C"), f"no_header_tensor:{dtype_indice}"
def deserialize(self, data: bytes) -> torch.Tensor:
assert self._dtype
return torch.frombuffer(data, dtype=self._dtype)
def can_serialize(self, item: torch.Tensor) -> bool:
return isinstance(item, torch.Tensor) and type(item) == torch.Tensor and len(item.shape) == 1
class NumpySerializer(Serializer):
"""The NumpySerializer serialize and deserialize numpy to and from bytes."""
def __init__(self) -> None:
super().__init__()
self._dtype_to_indices = {v: k for k, v in _NUMPY_DTYPES_MAPPING.items()}
def serialize(self, item: np.ndarray) -> Tuple[bytes, Optional[str]]:
dtype_indice = self._dtype_to_indices[item.dtype]
data = [np.uint32(dtype_indice).tobytes()]
data.append(np.uint32(len(item.shape)).tobytes())
for dim in item.shape:
data.append(np.uint32(dim).tobytes())
data.append(item.tobytes(order="C"))
return b"".join(data), None
def deserialize(self, data: bytes) -> np.ndarray:
dtype_indice = np.frombuffer(data[0:4], np.uint32).item()
dtype = _NUMPY_DTYPES_MAPPING[dtype_indice]
shape_size = np.frombuffer(data[4:8], np.uint32).item()
shape = []
# deserialize the shape header
# Note: The start position of the shape value: 8 (dtype + shape length) + 4 * shape_idx
for shape_idx in range(shape_size):
shape.append(np.frombuffer(data[8 + 4 * shape_idx : 8 + 4 * (shape_idx + 1)], np.uint32).item())
# deserialize the numpy array bytes
tensor = np.frombuffer(data[8 + 4 * (shape_idx + 1) : len(data)], dtype=dtype)
if tensor.shape == shape:
return tensor
return np.reshape(tensor, shape)
def can_serialize(self, item: np.ndarray) -> bool:
return isinstance(item, np.ndarray) and type(item) == np.ndarray and len(item.shape) > 1
class NoHeaderNumpySerializer(Serializer):
"""The NoHeaderNumpySerializer serialize and deserialize numpy to and from bytes."""
def __init__(self) -> None:
super().__init__()
self._dtype_to_indices = {v: k for k, v in _NUMPY_DTYPES_MAPPING.items()}
self._dtype: Optional[np.dtype] = None
def setup(self, data_format: str) -> None:
self._dtype = _NUMPY_DTYPES_MAPPING[int(data_format.split(":")[1])]
def serialize(self, item: np.ndarray) -> Tuple[bytes, Optional[str]]:
dtype_indice: int = self._dtype_to_indices[item.dtype]
return item.tobytes(order="C"), f"no_header_numpy:{dtype_indice}"
def deserialize(self, data: bytes) -> np.ndarray:
assert self._dtype
return np.frombuffer(data, dtype=self._dtype)
def can_serialize(self, item: np.ndarray) -> bool:
return isinstance(item, np.ndarray) and type(item) == np.ndarray and len(item.shape) == 1
class PickleSerializer(Serializer):
"""The PickleSerializer serialize and deserialize python objects to and from bytes."""
def serialize(self, item: Any) -> Tuple[bytes, Optional[str]]:
return pickle.dumps(item), None
def deserialize(self, data: bytes) -> Any:
return pickle.loads(data)
def can_serialize(self, _: Any) -> bool:
return True
class FileSerializer(Serializer):
def serialize(self, filepath: str) -> Tuple[bytes, Optional[str]]:
_, file_extension = os.path.splitext(filepath)
with open(filepath, "rb") as f:
file_extension = file_extension.replace(".", "").lower()
return f.read(), f"file:{file_extension}"
def deserialize(self, data: bytes) -> Any:
return data
def can_serialize(self, data: Any) -> bool:
return isinstance(data, str) and os.path.isfile(data)
class VideoSerializer(Serializer):
_EXTENSIONS = ("mp4", "ogv", "mjpeg", "avi", "mov", "h264", "mpg", "webm", "wmv")
def serialize(self, filepath: str) -> Tuple[bytes, Optional[str]]:
_, file_extension = os.path.splitext(filepath)
with open(filepath, "rb") as f:
file_extension = file_extension.replace(".", "").lower()
return f.read(), f"video:{file_extension}"
def deserialize(self, data: bytes) -> Any:
if not _TORCH_VISION_AVAILABLE:
raise ModuleNotFoundError("torchvision is required. Run `pip install torchvision`")
if not _AV_AVAILABLE:
raise ModuleNotFoundError("av is required. Run `pip install av`")
# Add support for a better deserialization mechanism for videos
# TODO: Investigate https://pytorch.org/audio/main/generated/torchaudio.io.StreamReader.html
import torchvision.io
with tempfile.TemporaryDirectory() as dirname:
fname = os.path.join(dirname, "file.mp4")
with open(fname, "wb") as stream:
stream.write(data)
return torchvision.io.read_video(fname, pts_unit="sec")
def can_serialize(self, data: Any) -> bool:
return isinstance(data, str) and os.path.isfile(data) and any(data.endswith(ext) for ext in self._EXTENSIONS)
class StringSerializer(Serializer):
def serialize(self, obj: str) -> Tuple[bytes, Optional[str]]:
return obj.encode("utf-8"), None
def deserialize(self, data: bytes) -> str:
return data.decode("utf-8")
def can_serialize(self, data: str) -> bool:
return isinstance(data, str) and not os.path.isfile(data)
class NumericSerializer:
"""Store scalar."""
def __init__(self, dtype: type) -> None:
self.dtype = dtype
self.size = self.dtype().nbytes
def serialize(self, obj: Any) -> Tuple[bytes, Optional[str]]:
return self.dtype(obj).tobytes(), None
def deserialize(self, data: bytes) -> Any:
return np.frombuffer(data, self.dtype)[0]
class IntegerSerializer(NumericSerializer, Serializer):
def __init__(self) -> None:
super().__init__(np.int64)
def can_serialize(self, data: int) -> bool:
return isinstance(data, int)
class FloatSerializer(NumericSerializer, Serializer):
def __init__(self) -> None:
super().__init__(np.float64)
def can_serialize(self, data: float) -> bool:
return isinstance(data, float)
_SERIALIZERS = OrderedDict(**{
"str": StringSerializer(),
"int": IntegerSerializer(),
"float": FloatSerializer(),
"video": VideoSerializer(),
"tif": FileSerializer(),
"file": FileSerializer(),
"pil": PILSerializer(),
"jpeg": JPEGSerializer(),
"bytes": BytesSerializer(),
"no_header_numpy": NoHeaderNumpySerializer(),
"numpy": NumpySerializer(),
"no_header_tensor": NoHeaderTensorSerializer(),
"tensor": TensorSerializer(),
"pickle": PickleSerializer(),
})
def _get_serializers(serializers: Optional[Dict[str, Serializer]]) -> Dict[str, Serializer]:
if serializers:
serializers = OrderedDict(**serializers)
serializers.update(_SERIALIZERS)
else:
serializers = _SERIALIZERS
return serializers
|
evocodebench_data_120
|
# Copyright The Lightning AI team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import os
import pickle
import tempfile
from abc import ABC, abstractmethod
from collections import OrderedDict
from typing import Any, Dict, Optional, Tuple, Union
import numpy as np
import torch
from lightning_utilities.core.imports import RequirementCache
from litdata.constants import _NUMPY_DTYPES_MAPPING, _TORCH_DTYPES_MAPPING
_PIL_AVAILABLE = RequirementCache("PIL")
_TORCH_VISION_AVAILABLE = RequirementCache("torchvision")
_AV_AVAILABLE = RequirementCache("av")
if _PIL_AVAILABLE:
from PIL import Image
from PIL.GifImagePlugin import GifImageFile
from PIL.JpegImagePlugin import JpegImageFile
from PIL.PngImagePlugin import PngImageFile
from PIL.WebPImagePlugin import WebPImageFile
else:
Image = None
JpegImageFile = None
PngImageFile = None
if _TORCH_VISION_AVAILABLE:
from torchvision.io import decode_jpeg
from torchvision.transforms.functional import pil_to_tensor
class Serializer(ABC):
"""The base interface for any serializers.
A Serializer serialize and deserialize to and from bytes.
"""
@abstractmethod
def serialize(self, data: Any) -> Tuple[bytes, Optional[str]]:
pass
@abstractmethod
def deserialize(self, data: bytes) -> Any:
pass
@abstractmethod
def can_serialize(self, data: Any) -> bool:
pass
def setup(self, metadata: Any) -> None:
pass
class PILSerializer(Serializer):
"""The PILSerializer serialize and deserialize PIL Image to and from bytes."""
def serialize(self, item: Image) -> Tuple[bytes, Optional[str]]:
mode = item.mode.encode("utf-8")
width, height = item.size
raw = item.tobytes()
ints = np.array([width, height, len(mode)], np.uint32)
return ints.tobytes() + mode + raw, None
@classmethod
def deserialize(cls, data: bytes) -> Any:
idx = 3 * 4
width, height, mode_size = np.frombuffer(data[:idx], np.uint32)
idx2 = idx + mode_size
mode = data[idx:idx2].decode("utf-8")
size = width, height
raw = data[idx2:]
return Image.frombytes(mode, size, raw) # pyright: ignore
def can_serialize(self, item: Any) -> bool:
return bool(_PIL_AVAILABLE) and isinstance(item, Image.Image) and not isinstance(item, JpegImageFile)
class JPEGSerializer(Serializer):
"""The JPEGSerializer serialize and deserialize JPEG image to and from bytes."""
def serialize(self, item: Image) -> Tuple[bytes, Optional[str]]:
if isinstance(item, JpegImageFile):
if not hasattr(item, "filename"):
raise ValueError(
"The JPEG Image's filename isn't defined. HINT: Open the image in your Dataset __getitem__ method."
)
if item.filename and os.path.isfile(item.filename):
# read the content of the file directly
with open(item.filename, "rb") as f:
return f.read(), None
else:
item_bytes = io.BytesIO()
item.save(item_bytes, format="JPEG")
item_bytes = item_bytes.getvalue()
return item_bytes, None
if isinstance(item, (PngImageFile, WebPImageFile, GifImageFile, Image.Image)):
buff = io.BytesIO()
item.convert("RGB").save(buff, quality=100, format="JPEG")
buff.seek(0)
return buff.read(), None
raise TypeError(f"The provided item should be of type {JpegImageFile}. Found {item}.")
def deserialize(self, data: bytes) -> Union[JpegImageFile, torch.Tensor]:
if _TORCH_VISION_AVAILABLE:
array = torch.frombuffer(data, dtype=torch.uint8)
try:
return decode_jpeg(array)
except RuntimeError:
# Note: Some datasets like Imagenet contains some PNG images with JPEG extension, so we fallback to PIL
pass
img = PILSerializer.deserialize(data)
if _TORCH_VISION_AVAILABLE:
img = pil_to_tensor(img)
return img
def can_serialize(self, item: Any) -> bool:
return bool(_PIL_AVAILABLE) and isinstance(item, JpegImageFile)
class BytesSerializer(Serializer):
"""The BytesSerializer serialize and deserialize integer to and from bytes."""
def serialize(self, item: bytes) -> Tuple[bytes, Optional[str]]:
return item, None
def deserialize(self, item: bytes) -> bytes:
return item
def can_serialize(self, item: bytes) -> bool:
return isinstance(item, bytes)
class TensorSerializer(Serializer):
"""The TensorSerializer serialize and deserialize tensor to and from bytes."""
def __init__(self) -> None:
super().__init__()
self._dtype_to_indices = {v: k for k, v in _TORCH_DTYPES_MAPPING.items()}
def serialize(self, item: torch.Tensor) -> Tuple[bytes, Optional[str]]:
dtype_indice = self._dtype_to_indices[item.dtype]
data = [np.uint32(dtype_indice).tobytes()]
data.append(np.uint32(len(item.shape)).tobytes())
for dim in item.shape:
data.append(np.uint32(dim).tobytes())
data.append(item.numpy().tobytes(order="C"))
return b"".join(data), None
def deserialize(self, data: bytes) -> torch.Tensor:
dtype_indice = np.frombuffer(data[0:4], np.uint32).item()
dtype = _TORCH_DTYPES_MAPPING[dtype_indice]
shape_size = np.frombuffer(data[4:8], np.uint32).item()
shape = []
for shape_idx in range(shape_size):
shape.append(np.frombuffer(data[8 + 4 * shape_idx : 8 + 4 * (shape_idx + 1)], np.uint32).item())
tensor = torch.frombuffer(data[8 + 4 * (shape_idx + 1) : len(data)], dtype=dtype)
shape = torch.Size(shape)
if tensor.shape == shape:
return tensor
return torch.reshape(tensor, shape)
def can_serialize(self, item: torch.Tensor) -> bool:
return isinstance(item, torch.Tensor) and type(item) == torch.Tensor and len(item.shape) > 1
class NoHeaderTensorSerializer(Serializer):
"""The TensorSerializer serialize and deserialize tensor to and from bytes."""
def __init__(self) -> None:
super().__init__()
self._dtype_to_indices = {v: k for k, v in _TORCH_DTYPES_MAPPING.items()}
self._dtype: Optional[torch.dtype] = None
def setup(self, data_format: str) -> None:
self._dtype = _TORCH_DTYPES_MAPPING[int(data_format.split(":")[1])]
def serialize(self, item: torch.Tensor) -> Tuple[bytes, Optional[str]]:
dtype_indice = self._dtype_to_indices[item.dtype]
return item.numpy().tobytes(order="C"), f"no_header_tensor:{dtype_indice}"
def deserialize(self, data: bytes) -> torch.Tensor:
assert self._dtype
return torch.frombuffer(data, dtype=self._dtype)
def can_serialize(self, item: torch.Tensor) -> bool:
return isinstance(item, torch.Tensor) and type(item) == torch.Tensor and len(item.shape) == 1
class NumpySerializer(Serializer):
"""The NumpySerializer serialize and deserialize numpy to and from bytes."""
def __init__(self) -> None:
super().__init__()
self._dtype_to_indices = {v: k for k, v in _NUMPY_DTYPES_MAPPING.items()}
def serialize(self, item: np.ndarray) -> Tuple[bytes, Optional[str]]:
dtype_indice = self._dtype_to_indices[item.dtype]
data = [np.uint32(dtype_indice).tobytes()]
data.append(np.uint32(len(item.shape)).tobytes())
for dim in item.shape:
data.append(np.uint32(dim).tobytes())
data.append(item.tobytes(order="C"))
return b"".join(data), None
def deserialize(self, data: bytes) -> np.ndarray:
dtype_indice = np.frombuffer(data[0:4], np.uint32).item()
dtype = _NUMPY_DTYPES_MAPPING[dtype_indice]
shape_size = np.frombuffer(data[4:8], np.uint32).item()
shape = []
# deserialize the shape header
# Note: The start position of the shape value: 8 (dtype + shape length) + 4 * shape_idx
for shape_idx in range(shape_size):
shape.append(np.frombuffer(data[8 + 4 * shape_idx : 8 + 4 * (shape_idx + 1)], np.uint32).item())
# deserialize the numpy array bytes
tensor = np.frombuffer(data[8 + 4 * (shape_idx + 1) : len(data)], dtype=dtype)
if tensor.shape == shape:
return tensor
return np.reshape(tensor, shape)
def can_serialize(self, item: np.ndarray) -> bool:
return isinstance(item, np.ndarray) and type(item) == np.ndarray and len(item.shape) > 1
class NoHeaderNumpySerializer(Serializer):
"""The NoHeaderNumpySerializer serialize and deserialize numpy to and from bytes."""
def __init__(self) -> None:
super().__init__()
self._dtype_to_indices = {v: k for k, v in _NUMPY_DTYPES_MAPPING.items()}
self._dtype: Optional[np.dtype] = None
def setup(self, data_format: str) -> None:
self._dtype = _NUMPY_DTYPES_MAPPING[int(data_format.split(":")[1])]
def serialize(self, item: np.ndarray) -> Tuple[bytes, Optional[str]]:
dtype_indice: int = self._dtype_to_indices[item.dtype]
return item.tobytes(order="C"), f"no_header_numpy:{dtype_indice}"
def deserialize(self, data: bytes) -> np.ndarray:
assert self._dtype
return np.frombuffer(data, dtype=self._dtype)
def can_serialize(self, item: np.ndarray) -> bool:
return isinstance(item, np.ndarray) and type(item) == np.ndarray and len(item.shape) == 1
class PickleSerializer(Serializer):
"""The PickleSerializer serialize and deserialize python objects to and from bytes."""
def serialize(self, item: Any) -> Tuple[bytes, Optional[str]]:
return pickle.dumps(item), None
def deserialize(self, data: bytes) -> Any:
return pickle.loads(data)
def can_serialize(self, _: Any) -> bool:
return True
class FileSerializer(Serializer):
def serialize(self, filepath: str) -> Tuple[bytes, Optional[str]]:
_, file_extension = os.path.splitext(filepath)
with open(filepath, "rb") as f:
file_extension = file_extension.replace(".", "").lower()
return f.read(), f"file:{file_extension}"
def deserialize(self, data: bytes) -> Any:
return data
def can_serialize(self, data: Any) -> bool:
return isinstance(data, str) and os.path.isfile(data)
class VideoSerializer(Serializer):
_EXTENSIONS = ("mp4", "ogv", "mjpeg", "avi", "mov", "h264", "mpg", "webm", "wmv")
def serialize(self, filepath: str) -> Tuple[bytes, Optional[str]]:
_, file_extension = os.path.splitext(filepath)
with open(filepath, "rb") as f:
file_extension = file_extension.replace(".", "").lower()
return f.read(), f"video:{file_extension}"
def deserialize(self, data: bytes) -> Any:
if not _TORCH_VISION_AVAILABLE:
raise ModuleNotFoundError("torchvision is required. Run `pip install torchvision`")
if not _AV_AVAILABLE:
raise ModuleNotFoundError("av is required. Run `pip install av`")
# Add support for a better deserialization mechanism for videos
# TODO: Investigate https://pytorch.org/audio/main/generated/torchaudio.io.StreamReader.html
import torchvision.io
with tempfile.TemporaryDirectory() as dirname:
fname = os.path.join(dirname, "file.mp4")
with open(fname, "wb") as stream:
stream.write(data)
return torchvision.io.read_video(fname, pts_unit="sec")
def can_serialize(self, data: Any) -> bool:
return isinstance(data, str) and os.path.isfile(data) and any(data.endswith(ext) for ext in self._EXTENSIONS)
class StringSerializer(Serializer):
def serialize(self, obj: str) -> Tuple[bytes, Optional[str]]:
return obj.encode("utf-8"), None
def deserialize(self, data: bytes) -> str:
return data.decode("utf-8")
def can_serialize(self, data: str) -> bool:
return isinstance(data, str) and not os.path.isfile(data)
class NumericSerializer:
"""Store scalar."""
def __init__(self, dtype: type) -> None:
self.dtype = dtype
self.size = self.dtype().nbytes
def serialize(self, obj: Any) -> Tuple[bytes, Optional[str]]:
return self.dtype(obj).tobytes(), None
def deserialize(self, data: bytes) -> Any:
return np.frombuffer(data, self.dtype)[0]
class IntegerSerializer(NumericSerializer, Serializer):
def __init__(self) -> None:
super().__init__(np.int64)
def can_serialize(self, data: int) -> bool:
return isinstance(data, int)
class FloatSerializer(NumericSerializer, Serializer):
def __init__(self) -> None:
super().__init__(np.float64)
def can_serialize(self, data: float) -> bool:
return isinstance(data, float)
_SERIALIZERS = OrderedDict(**{
"str": StringSerializer(),
"int": IntegerSerializer(),
"float": FloatSerializer(),
"video": VideoSerializer(),
"tif": FileSerializer(),
"file": FileSerializer(),
"pil": PILSerializer(),
"jpeg": JPEGSerializer(),
"bytes": BytesSerializer(),
"no_header_numpy": NoHeaderNumpySerializer(),
"numpy": NumpySerializer(),
"no_header_tensor": NoHeaderTensorSerializer(),
"tensor": TensorSerializer(),
"pickle": PickleSerializer(),
})
def _get_serializers(serializers: Optional[Dict[str, Serializer]]) -> Dict[str, Serializer]:
if serializers:
serializers = OrderedDict(**serializers)
serializers.update(_SERIALIZERS)
else:
serializers = _SERIALIZERS
return serializers
|
evocodebench_data_121
|
# Copyright The Lightning AI team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import os
import pickle
import tempfile
from abc import ABC, abstractmethod
from collections import OrderedDict
from typing import Any, Dict, Optional, Tuple, Union
import numpy as np
import torch
from lightning_utilities.core.imports import RequirementCache
from litdata.constants import _NUMPY_DTYPES_MAPPING, _TORCH_DTYPES_MAPPING
_PIL_AVAILABLE = RequirementCache("PIL")
_TORCH_VISION_AVAILABLE = RequirementCache("torchvision")
_AV_AVAILABLE = RequirementCache("av")
if _PIL_AVAILABLE:
from PIL import Image
from PIL.GifImagePlugin import GifImageFile
from PIL.JpegImagePlugin import JpegImageFile
from PIL.PngImagePlugin import PngImageFile
from PIL.WebPImagePlugin import WebPImageFile
else:
Image = None
JpegImageFile = None
PngImageFile = None
if _TORCH_VISION_AVAILABLE:
from torchvision.io import decode_jpeg
from torchvision.transforms.functional import pil_to_tensor
class Serializer(ABC):
"""The base interface for any serializers.
A Serializer serialize and deserialize to and from bytes.
"""
@abstractmethod
def serialize(self, data: Any) -> Tuple[bytes, Optional[str]]:
pass
@abstractmethod
def deserialize(self, data: bytes) -> Any:
pass
@abstractmethod
def can_serialize(self, data: Any) -> bool:
pass
def setup(self, metadata: Any) -> None:
pass
class PILSerializer(Serializer):
"""The PILSerializer serialize and deserialize PIL Image to and from bytes."""
def serialize(self, item: Image) -> Tuple[bytes, Optional[str]]:
mode = item.mode.encode("utf-8")
width, height = item.size
raw = item.tobytes()
ints = np.array([width, height, len(mode)], np.uint32)
return ints.tobytes() + mode + raw, None
@classmethod
def deserialize(cls, data: bytes) -> Any:
idx = 3 * 4
width, height, mode_size = np.frombuffer(data[:idx], np.uint32)
idx2 = idx + mode_size
mode = data[idx:idx2].decode("utf-8")
size = width, height
raw = data[idx2:]
return Image.frombytes(mode, size, raw) # pyright: ignore
def can_serialize(self, item: Any) -> bool:
return bool(_PIL_AVAILABLE) and isinstance(item, Image.Image) and not isinstance(item, JpegImageFile)
class JPEGSerializer(Serializer):
"""The JPEGSerializer serialize and deserialize JPEG image to and from bytes."""
def serialize(self, item: Image) -> Tuple[bytes, Optional[str]]:
if isinstance(item, JpegImageFile):
if not hasattr(item, "filename"):
raise ValueError(
"The JPEG Image's filename isn't defined. HINT: Open the image in your Dataset __getitem__ method."
)
if item.filename and os.path.isfile(item.filename):
# read the content of the file directly
with open(item.filename, "rb") as f:
return f.read(), None
else:
item_bytes = io.BytesIO()
item.save(item_bytes, format="JPEG")
item_bytes = item_bytes.getvalue()
return item_bytes, None
if isinstance(item, (PngImageFile, WebPImageFile, GifImageFile, Image.Image)):
buff = io.BytesIO()
item.convert("RGB").save(buff, quality=100, format="JPEG")
buff.seek(0)
return buff.read(), None
raise TypeError(f"The provided item should be of type {JpegImageFile}. Found {item}.")
def deserialize(self, data: bytes) -> Union[JpegImageFile, torch.Tensor]:
if _TORCH_VISION_AVAILABLE:
array = torch.frombuffer(data, dtype=torch.uint8)
try:
return decode_jpeg(array)
except RuntimeError:
# Note: Some datasets like Imagenet contains some PNG images with JPEG extension, so we fallback to PIL
pass
img = PILSerializer.deserialize(data)
if _TORCH_VISION_AVAILABLE:
img = pil_to_tensor(img)
return img
def can_serialize(self, item: Any) -> bool:
return bool(_PIL_AVAILABLE) and isinstance(item, JpegImageFile)
class BytesSerializer(Serializer):
"""The BytesSerializer serialize and deserialize integer to and from bytes."""
def serialize(self, item: bytes) -> Tuple[bytes, Optional[str]]:
return item, None
def deserialize(self, item: bytes) -> bytes:
return item
def can_serialize(self, item: bytes) -> bool:
return isinstance(item, bytes)
class TensorSerializer(Serializer):
"""The TensorSerializer serialize and deserialize tensor to and from bytes."""
def __init__(self) -> None:
super().__init__()
self._dtype_to_indices = {v: k for k, v in _TORCH_DTYPES_MAPPING.items()}
def serialize(self, item: torch.Tensor) -> Tuple[bytes, Optional[str]]:
dtype_indice = self._dtype_to_indices[item.dtype]
data = [np.uint32(dtype_indice).tobytes()]
data.append(np.uint32(len(item.shape)).tobytes())
for dim in item.shape:
data.append(np.uint32(dim).tobytes())
data.append(item.numpy().tobytes(order="C"))
return b"".join(data), None
def deserialize(self, data: bytes) -> torch.Tensor:
dtype_indice = np.frombuffer(data[0:4], np.uint32).item()
dtype = _TORCH_DTYPES_MAPPING[dtype_indice]
shape_size = np.frombuffer(data[4:8], np.uint32).item()
shape = []
for shape_idx in range(shape_size):
shape.append(np.frombuffer(data[8 + 4 * shape_idx : 8 + 4 * (shape_idx + 1)], np.uint32).item())
tensor = torch.frombuffer(data[8 + 4 * (shape_idx + 1) : len(data)], dtype=dtype)
shape = torch.Size(shape)
if tensor.shape == shape:
return tensor
return torch.reshape(tensor, shape)
def can_serialize(self, item: torch.Tensor) -> bool:
return isinstance(item, torch.Tensor) and type(item) == torch.Tensor and len(item.shape) > 1
class NoHeaderTensorSerializer(Serializer):
"""The TensorSerializer serialize and deserialize tensor to and from bytes."""
def __init__(self) -> None:
super().__init__()
self._dtype_to_indices = {v: k for k, v in _TORCH_DTYPES_MAPPING.items()}
self._dtype: Optional[torch.dtype] = None
def setup(self, data_format: str) -> None:
self._dtype = _TORCH_DTYPES_MAPPING[int(data_format.split(":")[1])]
def serialize(self, item: torch.Tensor) -> Tuple[bytes, Optional[str]]:
dtype_indice = self._dtype_to_indices[item.dtype]
return item.numpy().tobytes(order="C"), f"no_header_tensor:{dtype_indice}"
def deserialize(self, data: bytes) -> torch.Tensor:
assert self._dtype
return torch.frombuffer(data, dtype=self._dtype)
def can_serialize(self, item: torch.Tensor) -> bool:
return isinstance(item, torch.Tensor) and type(item) == torch.Tensor and len(item.shape) == 1
class NumpySerializer(Serializer):
"""The NumpySerializer serialize and deserialize numpy to and from bytes."""
def __init__(self) -> None:
super().__init__()
self._dtype_to_indices = {v: k for k, v in _NUMPY_DTYPES_MAPPING.items()}
def serialize(self, item: np.ndarray) -> Tuple[bytes, Optional[str]]:
dtype_indice = self._dtype_to_indices[item.dtype]
data = [np.uint32(dtype_indice).tobytes()]
data.append(np.uint32(len(item.shape)).tobytes())
for dim in item.shape:
data.append(np.uint32(dim).tobytes())
data.append(item.tobytes(order="C"))
return b"".join(data), None
def deserialize(self, data: bytes) -> np.ndarray:
dtype_indice = np.frombuffer(data[0:4], np.uint32).item()
dtype = _NUMPY_DTYPES_MAPPING[dtype_indice]
shape_size = np.frombuffer(data[4:8], np.uint32).item()
shape = []
# deserialize the shape header
# Note: The start position of the shape value: 8 (dtype + shape length) + 4 * shape_idx
for shape_idx in range(shape_size):
shape.append(np.frombuffer(data[8 + 4 * shape_idx : 8 + 4 * (shape_idx + 1)], np.uint32).item())
# deserialize the numpy array bytes
tensor = np.frombuffer(data[8 + 4 * (shape_idx + 1) : len(data)], dtype=dtype)
if tensor.shape == shape:
return tensor
return np.reshape(tensor, shape)
def can_serialize(self, item: np.ndarray) -> bool:
return isinstance(item, np.ndarray) and type(item) == np.ndarray and len(item.shape) > 1
class NoHeaderNumpySerializer(Serializer):
"""The NoHeaderNumpySerializer serialize and deserialize numpy to and from bytes."""
def __init__(self) -> None:
super().__init__()
self._dtype_to_indices = {v: k for k, v in _NUMPY_DTYPES_MAPPING.items()}
self._dtype: Optional[np.dtype] = None
def setup(self, data_format: str) -> None:
self._dtype = _NUMPY_DTYPES_MAPPING[int(data_format.split(":")[1])]
def serialize(self, item: np.ndarray) -> Tuple[bytes, Optional[str]]:
dtype_indice: int = self._dtype_to_indices[item.dtype]
return item.tobytes(order="C"), f"no_header_numpy:{dtype_indice}"
def deserialize(self, data: bytes) -> np.ndarray:
assert self._dtype
return np.frombuffer(data, dtype=self._dtype)
def can_serialize(self, item: np.ndarray) -> bool:
return isinstance(item, np.ndarray) and type(item) == np.ndarray and len(item.shape) == 1
class PickleSerializer(Serializer):
"""The PickleSerializer serialize and deserialize python objects to and from bytes."""
def serialize(self, item: Any) -> Tuple[bytes, Optional[str]]:
return pickle.dumps(item), None
def deserialize(self, data: bytes) -> Any:
return pickle.loads(data)
def can_serialize(self, _: Any) -> bool:
return True
class FileSerializer(Serializer):
def serialize(self, filepath: str) -> Tuple[bytes, Optional[str]]:
_, file_extension = os.path.splitext(filepath)
with open(filepath, "rb") as f:
file_extension = file_extension.replace(".", "").lower()
return f.read(), f"file:{file_extension}"
def deserialize(self, data: bytes) -> Any:
return data
def can_serialize(self, data: Any) -> bool:
return isinstance(data, str) and os.path.isfile(data)
class VideoSerializer(Serializer):
_EXTENSIONS = ("mp4", "ogv", "mjpeg", "avi", "mov", "h264", "mpg", "webm", "wmv")
def serialize(self, filepath: str) -> Tuple[bytes, Optional[str]]:
_, file_extension = os.path.splitext(filepath)
with open(filepath, "rb") as f:
file_extension = file_extension.replace(".", "").lower()
return f.read(), f"video:{file_extension}"
def deserialize(self, data: bytes) -> Any:
if not _TORCH_VISION_AVAILABLE:
raise ModuleNotFoundError("torchvision is required. Run `pip install torchvision`")
if not _AV_AVAILABLE:
raise ModuleNotFoundError("av is required. Run `pip install av`")
# Add support for a better deserialization mechanism for videos
# TODO: Investigate https://pytorch.org/audio/main/generated/torchaudio.io.StreamReader.html
import torchvision.io
with tempfile.TemporaryDirectory() as dirname:
fname = os.path.join(dirname, "file.mp4")
with open(fname, "wb") as stream:
stream.write(data)
return torchvision.io.read_video(fname, pts_unit="sec")
def can_serialize(self, data: Any) -> bool:
return isinstance(data, str) and os.path.isfile(data) and any(data.endswith(ext) for ext in self._EXTENSIONS)
class StringSerializer(Serializer):
def serialize(self, obj: str) -> Tuple[bytes, Optional[str]]:
return obj.encode("utf-8"), None
def deserialize(self, data: bytes) -> str:
return data.decode("utf-8")
def can_serialize(self, data: str) -> bool:
return isinstance(data, str) and not os.path.isfile(data)
class NumericSerializer:
"""Store scalar."""
def __init__(self, dtype: type) -> None:
self.dtype = dtype
self.size = self.dtype().nbytes
def serialize(self, obj: Any) -> Tuple[bytes, Optional[str]]:
return self.dtype(obj).tobytes(), None
def deserialize(self, data: bytes) -> Any:
return np.frombuffer(data, self.dtype)[0]
class IntegerSerializer(NumericSerializer, Serializer):
def __init__(self) -> None:
super().__init__(np.int64)
def can_serialize(self, data: int) -> bool:
return isinstance(data, int)
class FloatSerializer(NumericSerializer, Serializer):
def __init__(self) -> None:
super().__init__(np.float64)
def can_serialize(self, data: float) -> bool:
return isinstance(data, float)
_SERIALIZERS = OrderedDict(**{
"str": StringSerializer(),
"int": IntegerSerializer(),
"float": FloatSerializer(),
"video": VideoSerializer(),
"tif": FileSerializer(),
"file": FileSerializer(),
"pil": PILSerializer(),
"jpeg": JPEGSerializer(),
"bytes": BytesSerializer(),
"no_header_numpy": NoHeaderNumpySerializer(),
"numpy": NumpySerializer(),
"no_header_tensor": NoHeaderTensorSerializer(),
"tensor": TensorSerializer(),
"pickle": PickleSerializer(),
})
def _get_serializers(serializers: Optional[Dict[str, Serializer]]) -> Dict[str, Serializer]:
if serializers:
serializers = OrderedDict(**serializers)
serializers.update(_SERIALIZERS)
else:
serializers = _SERIALIZERS
return serializers
|
evocodebench_data_122
|
# Copyright The Lightning AI team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import os
import pickle
import tempfile
from abc import ABC, abstractmethod
from collections import OrderedDict
from typing import Any, Dict, Optional, Tuple, Union
import numpy as np
import torch
from lightning_utilities.core.imports import RequirementCache
from litdata.constants import _NUMPY_DTYPES_MAPPING, _TORCH_DTYPES_MAPPING
_PIL_AVAILABLE = RequirementCache("PIL")
_TORCH_VISION_AVAILABLE = RequirementCache("torchvision")
_AV_AVAILABLE = RequirementCache("av")
if _PIL_AVAILABLE:
from PIL import Image
from PIL.GifImagePlugin import GifImageFile
from PIL.JpegImagePlugin import JpegImageFile
from PIL.PngImagePlugin import PngImageFile
from PIL.WebPImagePlugin import WebPImageFile
else:
Image = None
JpegImageFile = None
PngImageFile = None
if _TORCH_VISION_AVAILABLE:
from torchvision.io import decode_jpeg
from torchvision.transforms.functional import pil_to_tensor
class Serializer(ABC):
"""The base interface for any serializers.
A Serializer serialize and deserialize to and from bytes.
"""
@abstractmethod
def serialize(self, data: Any) -> Tuple[bytes, Optional[str]]:
pass
@abstractmethod
def deserialize(self, data: bytes) -> Any:
pass
@abstractmethod
def can_serialize(self, data: Any) -> bool:
pass
def setup(self, metadata: Any) -> None:
pass
class PILSerializer(Serializer):
"""The PILSerializer serialize and deserialize PIL Image to and from bytes."""
def serialize(self, item: Image) -> Tuple[bytes, Optional[str]]:
mode = item.mode.encode("utf-8")
width, height = item.size
raw = item.tobytes()
ints = np.array([width, height, len(mode)], np.uint32)
return ints.tobytes() + mode + raw, None
@classmethod
def deserialize(cls, data: bytes) -> Any:
idx = 3 * 4
width, height, mode_size = np.frombuffer(data[:idx], np.uint32)
idx2 = idx + mode_size
mode = data[idx:idx2].decode("utf-8")
size = width, height
raw = data[idx2:]
return Image.frombytes(mode, size, raw) # pyright: ignore
def can_serialize(self, item: Any) -> bool:
return bool(_PIL_AVAILABLE) and isinstance(item, Image.Image) and not isinstance(item, JpegImageFile)
class JPEGSerializer(Serializer):
"""The JPEGSerializer serialize and deserialize JPEG image to and from bytes."""
def serialize(self, item: Image) -> Tuple[bytes, Optional[str]]:
if isinstance(item, JpegImageFile):
if not hasattr(item, "filename"):
raise ValueError(
"The JPEG Image's filename isn't defined. HINT: Open the image in your Dataset __getitem__ method."
)
if item.filename and os.path.isfile(item.filename):
# read the content of the file directly
with open(item.filename, "rb") as f:
return f.read(), None
else:
item_bytes = io.BytesIO()
item.save(item_bytes, format="JPEG")
item_bytes = item_bytes.getvalue()
return item_bytes, None
if isinstance(item, (PngImageFile, WebPImageFile, GifImageFile, Image.Image)):
buff = io.BytesIO()
item.convert("RGB").save(buff, quality=100, format="JPEG")
buff.seek(0)
return buff.read(), None
raise TypeError(f"The provided item should be of type {JpegImageFile}. Found {item}.")
def deserialize(self, data: bytes) -> Union[JpegImageFile, torch.Tensor]:
if _TORCH_VISION_AVAILABLE:
array = torch.frombuffer(data, dtype=torch.uint8)
try:
return decode_jpeg(array)
except RuntimeError:
# Note: Some datasets like Imagenet contains some PNG images with JPEG extension, so we fallback to PIL
pass
img = PILSerializer.deserialize(data)
if _TORCH_VISION_AVAILABLE:
img = pil_to_tensor(img)
return img
def can_serialize(self, item: Any) -> bool:
return bool(_PIL_AVAILABLE) and isinstance(item, JpegImageFile)
class BytesSerializer(Serializer):
"""The BytesSerializer serialize and deserialize integer to and from bytes."""
def serialize(self, item: bytes) -> Tuple[bytes, Optional[str]]:
return item, None
def deserialize(self, item: bytes) -> bytes:
return item
def can_serialize(self, item: bytes) -> bool:
return isinstance(item, bytes)
class TensorSerializer(Serializer):
"""The TensorSerializer serialize and deserialize tensor to and from bytes."""
def __init__(self) -> None:
super().__init__()
self._dtype_to_indices = {v: k for k, v in _TORCH_DTYPES_MAPPING.items()}
def serialize(self, item: torch.Tensor) -> Tuple[bytes, Optional[str]]:
dtype_indice = self._dtype_to_indices[item.dtype]
data = [np.uint32(dtype_indice).tobytes()]
data.append(np.uint32(len(item.shape)).tobytes())
for dim in item.shape:
data.append(np.uint32(dim).tobytes())
data.append(item.numpy().tobytes(order="C"))
return b"".join(data), None
def deserialize(self, data: bytes) -> torch.Tensor:
dtype_indice = np.frombuffer(data[0:4], np.uint32).item()
dtype = _TORCH_DTYPES_MAPPING[dtype_indice]
shape_size = np.frombuffer(data[4:8], np.uint32).item()
shape = []
for shape_idx in range(shape_size):
shape.append(np.frombuffer(data[8 + 4 * shape_idx : 8 + 4 * (shape_idx + 1)], np.uint32).item())
tensor = torch.frombuffer(data[8 + 4 * (shape_idx + 1) : len(data)], dtype=dtype)
shape = torch.Size(shape)
if tensor.shape == shape:
return tensor
return torch.reshape(tensor, shape)
def can_serialize(self, item: torch.Tensor) -> bool:
return isinstance(item, torch.Tensor) and type(item) == torch.Tensor and len(item.shape) > 1
class NoHeaderTensorSerializer(Serializer):
"""The TensorSerializer serialize and deserialize tensor to and from bytes."""
def __init__(self) -> None:
super().__init__()
self._dtype_to_indices = {v: k for k, v in _TORCH_DTYPES_MAPPING.items()}
self._dtype: Optional[torch.dtype] = None
def setup(self, data_format: str) -> None:
self._dtype = _TORCH_DTYPES_MAPPING[int(data_format.split(":")[1])]
def serialize(self, item: torch.Tensor) -> Tuple[bytes, Optional[str]]:
dtype_indice = self._dtype_to_indices[item.dtype]
return item.numpy().tobytes(order="C"), f"no_header_tensor:{dtype_indice}"
def deserialize(self, data: bytes) -> torch.Tensor:
assert self._dtype
return torch.frombuffer(data, dtype=self._dtype)
def can_serialize(self, item: torch.Tensor) -> bool:
return isinstance(item, torch.Tensor) and type(item) == torch.Tensor and len(item.shape) == 1
class NumpySerializer(Serializer):
"""The NumpySerializer serialize and deserialize numpy to and from bytes."""
def __init__(self) -> None:
super().__init__()
self._dtype_to_indices = {v: k for k, v in _NUMPY_DTYPES_MAPPING.items()}
def serialize(self, item: np.ndarray) -> Tuple[bytes, Optional[str]]:
dtype_indice = self._dtype_to_indices[item.dtype]
data = [np.uint32(dtype_indice).tobytes()]
data.append(np.uint32(len(item.shape)).tobytes())
for dim in item.shape:
data.append(np.uint32(dim).tobytes())
data.append(item.tobytes(order="C"))
return b"".join(data), None
def deserialize(self, data: bytes) -> np.ndarray:
dtype_indice = np.frombuffer(data[0:4], np.uint32).item()
dtype = _NUMPY_DTYPES_MAPPING[dtype_indice]
shape_size = np.frombuffer(data[4:8], np.uint32).item()
shape = []
# deserialize the shape header
# Note: The start position of the shape value: 8 (dtype + shape length) + 4 * shape_idx
for shape_idx in range(shape_size):
shape.append(np.frombuffer(data[8 + 4 * shape_idx : 8 + 4 * (shape_idx + 1)], np.uint32).item())
# deserialize the numpy array bytes
tensor = np.frombuffer(data[8 + 4 * (shape_idx + 1) : len(data)], dtype=dtype)
if tensor.shape == shape:
return tensor
return np.reshape(tensor, shape)
def can_serialize(self, item: np.ndarray) -> bool:
return isinstance(item, np.ndarray) and type(item) == np.ndarray and len(item.shape) > 1
class NoHeaderNumpySerializer(Serializer):
"""The NoHeaderNumpySerializer serialize and deserialize numpy to and from bytes."""
def __init__(self) -> None:
super().__init__()
self._dtype_to_indices = {v: k for k, v in _NUMPY_DTYPES_MAPPING.items()}
self._dtype: Optional[np.dtype] = None
def setup(self, data_format: str) -> None:
self._dtype = _NUMPY_DTYPES_MAPPING[int(data_format.split(":")[1])]
def serialize(self, item: np.ndarray) -> Tuple[bytes, Optional[str]]:
dtype_indice: int = self._dtype_to_indices[item.dtype]
return item.tobytes(order="C"), f"no_header_numpy:{dtype_indice}"
def deserialize(self, data: bytes) -> np.ndarray:
assert self._dtype
return np.frombuffer(data, dtype=self._dtype)
def can_serialize(self, item: np.ndarray) -> bool:
return isinstance(item, np.ndarray) and type(item) == np.ndarray and len(item.shape) == 1
class PickleSerializer(Serializer):
"""The PickleSerializer serialize and deserialize python objects to and from bytes."""
def serialize(self, item: Any) -> Tuple[bytes, Optional[str]]:
return pickle.dumps(item), None
def deserialize(self, data: bytes) -> Any:
return pickle.loads(data)
def can_serialize(self, _: Any) -> bool:
return True
class FileSerializer(Serializer):
def serialize(self, filepath: str) -> Tuple[bytes, Optional[str]]:
_, file_extension = os.path.splitext(filepath)
with open(filepath, "rb") as f:
file_extension = file_extension.replace(".", "").lower()
return f.read(), f"file:{file_extension}"
def deserialize(self, data: bytes) -> Any:
return data
def can_serialize(self, data: Any) -> bool:
return isinstance(data, str) and os.path.isfile(data)
class VideoSerializer(Serializer):
_EXTENSIONS = ("mp4", "ogv", "mjpeg", "avi", "mov", "h264", "mpg", "webm", "wmv")
def serialize(self, filepath: str) -> Tuple[bytes, Optional[str]]:
_, file_extension = os.path.splitext(filepath)
with open(filepath, "rb") as f:
file_extension = file_extension.replace(".", "").lower()
return f.read(), f"video:{file_extension}"
def deserialize(self, data: bytes) -> Any:
if not _TORCH_VISION_AVAILABLE:
raise ModuleNotFoundError("torchvision is required. Run `pip install torchvision`")
if not _AV_AVAILABLE:
raise ModuleNotFoundError("av is required. Run `pip install av`")
# Add support for a better deserialization mechanism for videos
# TODO: Investigate https://pytorch.org/audio/main/generated/torchaudio.io.StreamReader.html
import torchvision.io
with tempfile.TemporaryDirectory() as dirname:
fname = os.path.join(dirname, "file.mp4")
with open(fname, "wb") as stream:
stream.write(data)
return torchvision.io.read_video(fname, pts_unit="sec")
def can_serialize(self, data: Any) -> bool:
return isinstance(data, str) and os.path.isfile(data) and any(data.endswith(ext) for ext in self._EXTENSIONS)
class StringSerializer(Serializer):
def serialize(self, obj: str) -> Tuple[bytes, Optional[str]]:
return obj.encode("utf-8"), None
def deserialize(self, data: bytes) -> str:
return data.decode("utf-8")
def can_serialize(self, data: str) -> bool:
return isinstance(data, str) and not os.path.isfile(data)
class NumericSerializer:
"""Store scalar."""
def __init__(self, dtype: type) -> None:
self.dtype = dtype
self.size = self.dtype().nbytes
def serialize(self, obj: Any) -> Tuple[bytes, Optional[str]]:
return self.dtype(obj).tobytes(), None
def deserialize(self, data: bytes) -> Any:
return np.frombuffer(data, self.dtype)[0]
class IntegerSerializer(NumericSerializer, Serializer):
def __init__(self) -> None:
super().__init__(np.int64)
def can_serialize(self, data: int) -> bool:
return isinstance(data, int)
class FloatSerializer(NumericSerializer, Serializer):
def __init__(self) -> None:
super().__init__(np.float64)
def can_serialize(self, data: float) -> bool:
return isinstance(data, float)
_SERIALIZERS = OrderedDict(**{
"str": StringSerializer(),
"int": IntegerSerializer(),
"float": FloatSerializer(),
"video": VideoSerializer(),
"tif": FileSerializer(),
"file": FileSerializer(),
"pil": PILSerializer(),
"jpeg": JPEGSerializer(),
"bytes": BytesSerializer(),
"no_header_numpy": NoHeaderNumpySerializer(),
"numpy": NumpySerializer(),
"no_header_tensor": NoHeaderTensorSerializer(),
"tensor": TensorSerializer(),
"pickle": PickleSerializer(),
})
def _get_serializers(serializers: Optional[Dict[str, Serializer]]) -> Dict[str, Serializer]:
if serializers:
serializers = OrderedDict(**serializers)
serializers.update(_SERIALIZERS)
else:
serializers = _SERIALIZERS
return serializers
|
evocodebench_data_123
|
# Copyright The Lightning AI team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import os
import pickle
import tempfile
from abc import ABC, abstractmethod
from collections import OrderedDict
from typing import Any, Dict, Optional, Tuple, Union
import numpy as np
import torch
from lightning_utilities.core.imports import RequirementCache
from litdata.constants import _NUMPY_DTYPES_MAPPING, _TORCH_DTYPES_MAPPING
_PIL_AVAILABLE = RequirementCache("PIL")
_TORCH_VISION_AVAILABLE = RequirementCache("torchvision")
_AV_AVAILABLE = RequirementCache("av")
if _PIL_AVAILABLE:
from PIL import Image
from PIL.GifImagePlugin import GifImageFile
from PIL.JpegImagePlugin import JpegImageFile
from PIL.PngImagePlugin import PngImageFile
from PIL.WebPImagePlugin import WebPImageFile
else:
Image = None
JpegImageFile = None
PngImageFile = None
if _TORCH_VISION_AVAILABLE:
from torchvision.io import decode_jpeg
from torchvision.transforms.functional import pil_to_tensor
class Serializer(ABC):
"""The base interface for any serializers.
A Serializer serialize and deserialize to and from bytes.
"""
@abstractmethod
def serialize(self, data: Any) -> Tuple[bytes, Optional[str]]:
pass
@abstractmethod
def deserialize(self, data: bytes) -> Any:
pass
@abstractmethod
def can_serialize(self, data: Any) -> bool:
pass
def setup(self, metadata: Any) -> None:
pass
class PILSerializer(Serializer):
"""The PILSerializer serialize and deserialize PIL Image to and from bytes."""
def serialize(self, item: Image) -> Tuple[bytes, Optional[str]]:
mode = item.mode.encode("utf-8")
width, height = item.size
raw = item.tobytes()
ints = np.array([width, height, len(mode)], np.uint32)
return ints.tobytes() + mode + raw, None
@classmethod
def deserialize(cls, data: bytes) -> Any:
idx = 3 * 4
width, height, mode_size = np.frombuffer(data[:idx], np.uint32)
idx2 = idx + mode_size
mode = data[idx:idx2].decode("utf-8")
size = width, height
raw = data[idx2:]
return Image.frombytes(mode, size, raw) # pyright: ignore
def can_serialize(self, item: Any) -> bool:
return bool(_PIL_AVAILABLE) and isinstance(item, Image.Image) and not isinstance(item, JpegImageFile)
class JPEGSerializer(Serializer):
"""The JPEGSerializer serialize and deserialize JPEG image to and from bytes."""
def serialize(self, item: Image) -> Tuple[bytes, Optional[str]]:
if isinstance(item, JpegImageFile):
if not hasattr(item, "filename"):
raise ValueError(
"The JPEG Image's filename isn't defined. HINT: Open the image in your Dataset __getitem__ method."
)
if item.filename and os.path.isfile(item.filename):
# read the content of the file directly
with open(item.filename, "rb") as f:
return f.read(), None
else:
item_bytes = io.BytesIO()
item.save(item_bytes, format="JPEG")
item_bytes = item_bytes.getvalue()
return item_bytes, None
if isinstance(item, (PngImageFile, WebPImageFile, GifImageFile, Image.Image)):
buff = io.BytesIO()
item.convert("RGB").save(buff, quality=100, format="JPEG")
buff.seek(0)
return buff.read(), None
raise TypeError(f"The provided item should be of type {JpegImageFile}. Found {item}.")
def deserialize(self, data: bytes) -> Union[JpegImageFile, torch.Tensor]:
if _TORCH_VISION_AVAILABLE:
array = torch.frombuffer(data, dtype=torch.uint8)
try:
return decode_jpeg(array)
except RuntimeError:
# Note: Some datasets like Imagenet contains some PNG images with JPEG extension, so we fallback to PIL
pass
img = PILSerializer.deserialize(data)
if _TORCH_VISION_AVAILABLE:
img = pil_to_tensor(img)
return img
def can_serialize(self, item: Any) -> bool:
return bool(_PIL_AVAILABLE) and isinstance(item, JpegImageFile)
class BytesSerializer(Serializer):
"""The BytesSerializer serialize and deserialize integer to and from bytes."""
def serialize(self, item: bytes) -> Tuple[bytes, Optional[str]]:
return item, None
def deserialize(self, item: bytes) -> bytes:
return item
def can_serialize(self, item: bytes) -> bool:
return isinstance(item, bytes)
class TensorSerializer(Serializer):
"""The TensorSerializer serialize and deserialize tensor to and from bytes."""
def __init__(self) -> None:
super().__init__()
self._dtype_to_indices = {v: k for k, v in _TORCH_DTYPES_MAPPING.items()}
def serialize(self, item: torch.Tensor) -> Tuple[bytes, Optional[str]]:
dtype_indice = self._dtype_to_indices[item.dtype]
data = [np.uint32(dtype_indice).tobytes()]
data.append(np.uint32(len(item.shape)).tobytes())
for dim in item.shape:
data.append(np.uint32(dim).tobytes())
data.append(item.numpy().tobytes(order="C"))
return b"".join(data), None
def deserialize(self, data: bytes) -> torch.Tensor:
dtype_indice = np.frombuffer(data[0:4], np.uint32).item()
dtype = _TORCH_DTYPES_MAPPING[dtype_indice]
shape_size = np.frombuffer(data[4:8], np.uint32).item()
shape = []
for shape_idx in range(shape_size):
shape.append(np.frombuffer(data[8 + 4 * shape_idx : 8 + 4 * (shape_idx + 1)], np.uint32).item())
tensor = torch.frombuffer(data[8 + 4 * (shape_idx + 1) : len(data)], dtype=dtype)
shape = torch.Size(shape)
if tensor.shape == shape:
return tensor
return torch.reshape(tensor, shape)
def can_serialize(self, item: torch.Tensor) -> bool:
return isinstance(item, torch.Tensor) and type(item) == torch.Tensor and len(item.shape) > 1
class NoHeaderTensorSerializer(Serializer):
"""The TensorSerializer serialize and deserialize tensor to and from bytes."""
def __init__(self) -> None:
super().__init__()
self._dtype_to_indices = {v: k for k, v in _TORCH_DTYPES_MAPPING.items()}
self._dtype: Optional[torch.dtype] = None
def setup(self, data_format: str) -> None:
self._dtype = _TORCH_DTYPES_MAPPING[int(data_format.split(":")[1])]
def serialize(self, item: torch.Tensor) -> Tuple[bytes, Optional[str]]:
dtype_indice = self._dtype_to_indices[item.dtype]
return item.numpy().tobytes(order="C"), f"no_header_tensor:{dtype_indice}"
def deserialize(self, data: bytes) -> torch.Tensor:
assert self._dtype
return torch.frombuffer(data, dtype=self._dtype)
def can_serialize(self, item: torch.Tensor) -> bool:
return isinstance(item, torch.Tensor) and type(item) == torch.Tensor and len(item.shape) == 1
class NumpySerializer(Serializer):
"""The NumpySerializer serialize and deserialize numpy to and from bytes."""
def __init__(self) -> None:
super().__init__()
self._dtype_to_indices = {v: k for k, v in _NUMPY_DTYPES_MAPPING.items()}
def serialize(self, item: np.ndarray) -> Tuple[bytes, Optional[str]]:
dtype_indice = self._dtype_to_indices[item.dtype]
data = [np.uint32(dtype_indice).tobytes()]
data.append(np.uint32(len(item.shape)).tobytes())
for dim in item.shape:
data.append(np.uint32(dim).tobytes())
data.append(item.tobytes(order="C"))
return b"".join(data), None
def deserialize(self, data: bytes) -> np.ndarray:
dtype_indice = np.frombuffer(data[0:4], np.uint32).item()
dtype = _NUMPY_DTYPES_MAPPING[dtype_indice]
shape_size = np.frombuffer(data[4:8], np.uint32).item()
shape = []
# deserialize the shape header
# Note: The start position of the shape value: 8 (dtype + shape length) + 4 * shape_idx
for shape_idx in range(shape_size):
shape.append(np.frombuffer(data[8 + 4 * shape_idx : 8 + 4 * (shape_idx + 1)], np.uint32).item())
# deserialize the numpy array bytes
tensor = np.frombuffer(data[8 + 4 * (shape_idx + 1) : len(data)], dtype=dtype)
if tensor.shape == shape:
return tensor
return np.reshape(tensor, shape)
def can_serialize(self, item: np.ndarray) -> bool:
return isinstance(item, np.ndarray) and type(item) == np.ndarray and len(item.shape) > 1
class NoHeaderNumpySerializer(Serializer):
"""The NoHeaderNumpySerializer serialize and deserialize numpy to and from bytes."""
def __init__(self) -> None:
super().__init__()
self._dtype_to_indices = {v: k for k, v in _NUMPY_DTYPES_MAPPING.items()}
self._dtype: Optional[np.dtype] = None
def setup(self, data_format: str) -> None:
self._dtype = _NUMPY_DTYPES_MAPPING[int(data_format.split(":")[1])]
def serialize(self, item: np.ndarray) -> Tuple[bytes, Optional[str]]:
dtype_indice: int = self._dtype_to_indices[item.dtype]
return item.tobytes(order="C"), f"no_header_numpy:{dtype_indice}"
def deserialize(self, data: bytes) -> np.ndarray:
assert self._dtype
return np.frombuffer(data, dtype=self._dtype)
def can_serialize(self, item: np.ndarray) -> bool:
return isinstance(item, np.ndarray) and type(item) == np.ndarray and len(item.shape) == 1
class PickleSerializer(Serializer):
"""The PickleSerializer serialize and deserialize python objects to and from bytes."""
def serialize(self, item: Any) -> Tuple[bytes, Optional[str]]:
return pickle.dumps(item), None
def deserialize(self, data: bytes) -> Any:
return pickle.loads(data)
def can_serialize(self, _: Any) -> bool:
return True
class FileSerializer(Serializer):
def serialize(self, filepath: str) -> Tuple[bytes, Optional[str]]:
_, file_extension = os.path.splitext(filepath)
with open(filepath, "rb") as f:
file_extension = file_extension.replace(".", "").lower()
return f.read(), f"file:{file_extension}"
def deserialize(self, data: bytes) -> Any:
return data
def can_serialize(self, data: Any) -> bool:
return isinstance(data, str) and os.path.isfile(data)
class VideoSerializer(Serializer):
_EXTENSIONS = ("mp4", "ogv", "mjpeg", "avi", "mov", "h264", "mpg", "webm", "wmv")
def serialize(self, filepath: str) -> Tuple[bytes, Optional[str]]:
_, file_extension = os.path.splitext(filepath)
with open(filepath, "rb") as f:
file_extension = file_extension.replace(".", "").lower()
return f.read(), f"video:{file_extension}"
def deserialize(self, data: bytes) -> Any:
if not _TORCH_VISION_AVAILABLE:
raise ModuleNotFoundError("torchvision is required. Run `pip install torchvision`")
if not _AV_AVAILABLE:
raise ModuleNotFoundError("av is required. Run `pip install av`")
# Add support for a better deserialization mechanism for videos
# TODO: Investigate https://pytorch.org/audio/main/generated/torchaudio.io.StreamReader.html
import torchvision.io
with tempfile.TemporaryDirectory() as dirname:
fname = os.path.join(dirname, "file.mp4")
with open(fname, "wb") as stream:
stream.write(data)
return torchvision.io.read_video(fname, pts_unit="sec")
def can_serialize(self, data: Any) -> bool:
return isinstance(data, str) and os.path.isfile(data) and any(data.endswith(ext) for ext in self._EXTENSIONS)
class StringSerializer(Serializer):
def serialize(self, obj: str) -> Tuple[bytes, Optional[str]]:
return obj.encode("utf-8"), None
def deserialize(self, data: bytes) -> str:
return data.decode("utf-8")
def can_serialize(self, data: str) -> bool:
return isinstance(data, str) and not os.path.isfile(data)
class NumericSerializer:
"""Store scalar."""
def __init__(self, dtype: type) -> None:
self.dtype = dtype
self.size = self.dtype().nbytes
def serialize(self, obj: Any) -> Tuple[bytes, Optional[str]]:
return self.dtype(obj).tobytes(), None
def deserialize(self, data: bytes) -> Any:
return np.frombuffer(data, self.dtype)[0]
class IntegerSerializer(NumericSerializer, Serializer):
def __init__(self) -> None:
super().__init__(np.int64)
def can_serialize(self, data: int) -> bool:
return isinstance(data, int)
class FloatSerializer(NumericSerializer, Serializer):
def __init__(self) -> None:
super().__init__(np.float64)
def can_serialize(self, data: float) -> bool:
return isinstance(data, float)
_SERIALIZERS = OrderedDict(**{
"str": StringSerializer(),
"int": IntegerSerializer(),
"float": FloatSerializer(),
"video": VideoSerializer(),
"tif": FileSerializer(),
"file": FileSerializer(),
"pil": PILSerializer(),
"jpeg": JPEGSerializer(),
"bytes": BytesSerializer(),
"no_header_numpy": NoHeaderNumpySerializer(),
"numpy": NumpySerializer(),
"no_header_tensor": NoHeaderTensorSerializer(),
"tensor": TensorSerializer(),
"pickle": PickleSerializer(),
})
def _get_serializers(serializers: Optional[Dict[str, Serializer]]) -> Dict[str, Serializer]:
if serializers:
serializers = OrderedDict(**serializers)
serializers.update(_SERIALIZERS)
else:
serializers = _SERIALIZERS
return serializers
|
evocodebench_data_124
|
# Copyright The Lightning AI team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import os
import pickle
import tempfile
from abc import ABC, abstractmethod
from collections import OrderedDict
from typing import Any, Dict, Optional, Tuple, Union
import numpy as np
import torch
from lightning_utilities.core.imports import RequirementCache
from litdata.constants import _NUMPY_DTYPES_MAPPING, _TORCH_DTYPES_MAPPING
_PIL_AVAILABLE = RequirementCache("PIL")
_TORCH_VISION_AVAILABLE = RequirementCache("torchvision")
_AV_AVAILABLE = RequirementCache("av")
if _PIL_AVAILABLE:
from PIL import Image
from PIL.GifImagePlugin import GifImageFile
from PIL.JpegImagePlugin import JpegImageFile
from PIL.PngImagePlugin import PngImageFile
from PIL.WebPImagePlugin import WebPImageFile
else:
Image = None
JpegImageFile = None
PngImageFile = None
if _TORCH_VISION_AVAILABLE:
from torchvision.io import decode_jpeg
from torchvision.transforms.functional import pil_to_tensor
class Serializer(ABC):
"""The base interface for any serializers.
A Serializer serialize and deserialize to and from bytes.
"""
@abstractmethod
def serialize(self, data: Any) -> Tuple[bytes, Optional[str]]:
pass
@abstractmethod
def deserialize(self, data: bytes) -> Any:
pass
@abstractmethod
def can_serialize(self, data: Any) -> bool:
pass
def setup(self, metadata: Any) -> None:
pass
class PILSerializer(Serializer):
"""The PILSerializer serialize and deserialize PIL Image to and from bytes."""
def serialize(self, item: Image) -> Tuple[bytes, Optional[str]]:
mode = item.mode.encode("utf-8")
width, height = item.size
raw = item.tobytes()
ints = np.array([width, height, len(mode)], np.uint32)
return ints.tobytes() + mode + raw, None
@classmethod
def deserialize(cls, data: bytes) -> Any:
idx = 3 * 4
width, height, mode_size = np.frombuffer(data[:idx], np.uint32)
idx2 = idx + mode_size
mode = data[idx:idx2].decode("utf-8")
size = width, height
raw = data[idx2:]
return Image.frombytes(mode, size, raw) # pyright: ignore
def can_serialize(self, item: Any) -> bool:
return bool(_PIL_AVAILABLE) and isinstance(item, Image.Image) and not isinstance(item, JpegImageFile)
class JPEGSerializer(Serializer):
"""The JPEGSerializer serialize and deserialize JPEG image to and from bytes."""
def serialize(self, item: Image) -> Tuple[bytes, Optional[str]]:
if isinstance(item, JpegImageFile):
if not hasattr(item, "filename"):
raise ValueError(
"The JPEG Image's filename isn't defined. HINT: Open the image in your Dataset __getitem__ method."
)
if item.filename and os.path.isfile(item.filename):
# read the content of the file directly
with open(item.filename, "rb") as f:
return f.read(), None
else:
item_bytes = io.BytesIO()
item.save(item_bytes, format="JPEG")
item_bytes = item_bytes.getvalue()
return item_bytes, None
if isinstance(item, (PngImageFile, WebPImageFile, GifImageFile, Image.Image)):
buff = io.BytesIO()
item.convert("RGB").save(buff, quality=100, format="JPEG")
buff.seek(0)
return buff.read(), None
raise TypeError(f"The provided item should be of type {JpegImageFile}. Found {item}.")
def deserialize(self, data: bytes) -> Union[JpegImageFile, torch.Tensor]:
if _TORCH_VISION_AVAILABLE:
array = torch.frombuffer(data, dtype=torch.uint8)
try:
return decode_jpeg(array)
except RuntimeError:
# Note: Some datasets like Imagenet contains some PNG images with JPEG extension, so we fallback to PIL
pass
img = PILSerializer.deserialize(data)
if _TORCH_VISION_AVAILABLE:
img = pil_to_tensor(img)
return img
def can_serialize(self, item: Any) -> bool:
return bool(_PIL_AVAILABLE) and isinstance(item, JpegImageFile)
class BytesSerializer(Serializer):
"""The BytesSerializer serialize and deserialize integer to and from bytes."""
def serialize(self, item: bytes) -> Tuple[bytes, Optional[str]]:
return item, None
def deserialize(self, item: bytes) -> bytes:
return item
def can_serialize(self, item: bytes) -> bool:
return isinstance(item, bytes)
class TensorSerializer(Serializer):
"""The TensorSerializer serialize and deserialize tensor to and from bytes."""
def __init__(self) -> None:
super().__init__()
self._dtype_to_indices = {v: k for k, v in _TORCH_DTYPES_MAPPING.items()}
def serialize(self, item: torch.Tensor) -> Tuple[bytes, Optional[str]]:
dtype_indice = self._dtype_to_indices[item.dtype]
data = [np.uint32(dtype_indice).tobytes()]
data.append(np.uint32(len(item.shape)).tobytes())
for dim in item.shape:
data.append(np.uint32(dim).tobytes())
data.append(item.numpy().tobytes(order="C"))
return b"".join(data), None
def deserialize(self, data: bytes) -> torch.Tensor:
dtype_indice = np.frombuffer(data[0:4], np.uint32).item()
dtype = _TORCH_DTYPES_MAPPING[dtype_indice]
shape_size = np.frombuffer(data[4:8], np.uint32).item()
shape = []
for shape_idx in range(shape_size):
shape.append(np.frombuffer(data[8 + 4 * shape_idx : 8 + 4 * (shape_idx + 1)], np.uint32).item())
tensor = torch.frombuffer(data[8 + 4 * (shape_idx + 1) : len(data)], dtype=dtype)
shape = torch.Size(shape)
if tensor.shape == shape:
return tensor
return torch.reshape(tensor, shape)
def can_serialize(self, item: torch.Tensor) -> bool:
return isinstance(item, torch.Tensor) and type(item) == torch.Tensor and len(item.shape) > 1
class NoHeaderTensorSerializer(Serializer):
"""The TensorSerializer serialize and deserialize tensor to and from bytes."""
def __init__(self) -> None:
super().__init__()
self._dtype_to_indices = {v: k for k, v in _TORCH_DTYPES_MAPPING.items()}
self._dtype: Optional[torch.dtype] = None
def setup(self, data_format: str) -> None:
self._dtype = _TORCH_DTYPES_MAPPING[int(data_format.split(":")[1])]
def serialize(self, item: torch.Tensor) -> Tuple[bytes, Optional[str]]:
dtype_indice = self._dtype_to_indices[item.dtype]
return item.numpy().tobytes(order="C"), f"no_header_tensor:{dtype_indice}"
def deserialize(self, data: bytes) -> torch.Tensor:
assert self._dtype
return torch.frombuffer(data, dtype=self._dtype)
def can_serialize(self, item: torch.Tensor) -> bool:
return isinstance(item, torch.Tensor) and type(item) == torch.Tensor and len(item.shape) == 1
class NumpySerializer(Serializer):
"""The NumpySerializer serialize and deserialize numpy to and from bytes."""
def __init__(self) -> None:
super().__init__()
self._dtype_to_indices = {v: k for k, v in _NUMPY_DTYPES_MAPPING.items()}
def serialize(self, item: np.ndarray) -> Tuple[bytes, Optional[str]]:
dtype_indice = self._dtype_to_indices[item.dtype]
data = [np.uint32(dtype_indice).tobytes()]
data.append(np.uint32(len(item.shape)).tobytes())
for dim in item.shape:
data.append(np.uint32(dim).tobytes())
data.append(item.tobytes(order="C"))
return b"".join(data), None
def deserialize(self, data: bytes) -> np.ndarray:
dtype_indice = np.frombuffer(data[0:4], np.uint32).item()
dtype = _NUMPY_DTYPES_MAPPING[dtype_indice]
shape_size = np.frombuffer(data[4:8], np.uint32).item()
shape = []
# deserialize the shape header
# Note: The start position of the shape value: 8 (dtype + shape length) + 4 * shape_idx
for shape_idx in range(shape_size):
shape.append(np.frombuffer(data[8 + 4 * shape_idx : 8 + 4 * (shape_idx + 1)], np.uint32).item())
# deserialize the numpy array bytes
tensor = np.frombuffer(data[8 + 4 * (shape_idx + 1) : len(data)], dtype=dtype)
if tensor.shape == shape:
return tensor
return np.reshape(tensor, shape)
def can_serialize(self, item: np.ndarray) -> bool:
return isinstance(item, np.ndarray) and type(item) == np.ndarray and len(item.shape) > 1
class NoHeaderNumpySerializer(Serializer):
"""The NoHeaderNumpySerializer serialize and deserialize numpy to and from bytes."""
def __init__(self) -> None:
super().__init__()
self._dtype_to_indices = {v: k for k, v in _NUMPY_DTYPES_MAPPING.items()}
self._dtype: Optional[np.dtype] = None
def setup(self, data_format: str) -> None:
self._dtype = _NUMPY_DTYPES_MAPPING[int(data_format.split(":")[1])]
def serialize(self, item: np.ndarray) -> Tuple[bytes, Optional[str]]:
dtype_indice: int = self._dtype_to_indices[item.dtype]
return item.tobytes(order="C"), f"no_header_numpy:{dtype_indice}"
def deserialize(self, data: bytes) -> np.ndarray:
assert self._dtype
return np.frombuffer(data, dtype=self._dtype)
def can_serialize(self, item: np.ndarray) -> bool:
return isinstance(item, np.ndarray) and type(item) == np.ndarray and len(item.shape) == 1
class PickleSerializer(Serializer):
"""The PickleSerializer serialize and deserialize python objects to and from bytes."""
def serialize(self, item: Any) -> Tuple[bytes, Optional[str]]:
return pickle.dumps(item), None
def deserialize(self, data: bytes) -> Any:
return pickle.loads(data)
def can_serialize(self, _: Any) -> bool:
return True
class FileSerializer(Serializer):
def serialize(self, filepath: str) -> Tuple[bytes, Optional[str]]:
_, file_extension = os.path.splitext(filepath)
with open(filepath, "rb") as f:
file_extension = file_extension.replace(".", "").lower()
return f.read(), f"file:{file_extension}"
def deserialize(self, data: bytes) -> Any:
return data
def can_serialize(self, data: Any) -> bool:
return isinstance(data, str) and os.path.isfile(data)
class VideoSerializer(Serializer):
_EXTENSIONS = ("mp4", "ogv", "mjpeg", "avi", "mov", "h264", "mpg", "webm", "wmv")
def serialize(self, filepath: str) -> Tuple[bytes, Optional[str]]:
_, file_extension = os.path.splitext(filepath)
with open(filepath, "rb") as f:
file_extension = file_extension.replace(".", "").lower()
return f.read(), f"video:{file_extension}"
def deserialize(self, data: bytes) -> Any:
if not _TORCH_VISION_AVAILABLE:
raise ModuleNotFoundError("torchvision is required. Run `pip install torchvision`")
if not _AV_AVAILABLE:
raise ModuleNotFoundError("av is required. Run `pip install av`")
# Add support for a better deserialization mechanism for videos
# TODO: Investigate https://pytorch.org/audio/main/generated/torchaudio.io.StreamReader.html
import torchvision.io
with tempfile.TemporaryDirectory() as dirname:
fname = os.path.join(dirname, "file.mp4")
with open(fname, "wb") as stream:
stream.write(data)
return torchvision.io.read_video(fname, pts_unit="sec")
def can_serialize(self, data: Any) -> bool:
return isinstance(data, str) and os.path.isfile(data) and any(data.endswith(ext) for ext in self._EXTENSIONS)
class StringSerializer(Serializer):
def serialize(self, obj: str) -> Tuple[bytes, Optional[str]]:
return obj.encode("utf-8"), None
def deserialize(self, data: bytes) -> str:
return data.decode("utf-8")
def can_serialize(self, data: str) -> bool:
return isinstance(data, str) and not os.path.isfile(data)
class NumericSerializer:
"""Store scalar."""
def __init__(self, dtype: type) -> None:
self.dtype = dtype
self.size = self.dtype().nbytes
def serialize(self, obj: Any) -> Tuple[bytes, Optional[str]]:
return self.dtype(obj).tobytes(), None
def deserialize(self, data: bytes) -> Any:
return np.frombuffer(data, self.dtype)[0]
class IntegerSerializer(NumericSerializer, Serializer):
def __init__(self) -> None:
super().__init__(np.int64)
def can_serialize(self, data: int) -> bool:
return isinstance(data, int)
class FloatSerializer(NumericSerializer, Serializer):
def __init__(self) -> None:
super().__init__(np.float64)
def can_serialize(self, data: float) -> bool:
return isinstance(data, float)
_SERIALIZERS = OrderedDict(**{
"str": StringSerializer(),
"int": IntegerSerializer(),
"float": FloatSerializer(),
"video": VideoSerializer(),
"tif": FileSerializer(),
"file": FileSerializer(),
"pil": PILSerializer(),
"jpeg": JPEGSerializer(),
"bytes": BytesSerializer(),
"no_header_numpy": NoHeaderNumpySerializer(),
"numpy": NumpySerializer(),
"no_header_tensor": NoHeaderTensorSerializer(),
"tensor": TensorSerializer(),
"pickle": PickleSerializer(),
})
def _get_serializers(serializers: Optional[Dict[str, Serializer]]) -> Dict[str, Serializer]:
if serializers:
serializers = OrderedDict(**serializers)
serializers.update(_SERIALIZERS)
else:
serializers = _SERIALIZERS
return serializers
|
evocodebench_data_125
|
# Copyright The Lightning AI team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import os
import pickle
import tempfile
from abc import ABC, abstractmethod
from collections import OrderedDict
from typing import Any, Dict, Optional, Tuple, Union
import numpy as np
import torch
from lightning_utilities.core.imports import RequirementCache
from litdata.constants import _NUMPY_DTYPES_MAPPING, _TORCH_DTYPES_MAPPING
_PIL_AVAILABLE = RequirementCache("PIL")
_TORCH_VISION_AVAILABLE = RequirementCache("torchvision")
_AV_AVAILABLE = RequirementCache("av")
if _PIL_AVAILABLE:
from PIL import Image
from PIL.GifImagePlugin import GifImageFile
from PIL.JpegImagePlugin import JpegImageFile
from PIL.PngImagePlugin import PngImageFile
from PIL.WebPImagePlugin import WebPImageFile
else:
Image = None
JpegImageFile = None
PngImageFile = None
if _TORCH_VISION_AVAILABLE:
from torchvision.io import decode_jpeg
from torchvision.transforms.functional import pil_to_tensor
class Serializer(ABC):
"""The base interface for any serializers.
A Serializer serialize and deserialize to and from bytes.
"""
@abstractmethod
def serialize(self, data: Any) -> Tuple[bytes, Optional[str]]:
pass
@abstractmethod
def deserialize(self, data: bytes) -> Any:
pass
@abstractmethod
def can_serialize(self, data: Any) -> bool:
pass
def setup(self, metadata: Any) -> None:
pass
class PILSerializer(Serializer):
"""The PILSerializer serialize and deserialize PIL Image to and from bytes."""
def serialize(self, item: Image) -> Tuple[bytes, Optional[str]]:
mode = item.mode.encode("utf-8")
width, height = item.size
raw = item.tobytes()
ints = np.array([width, height, len(mode)], np.uint32)
return ints.tobytes() + mode + raw, None
@classmethod
def deserialize(cls, data: bytes) -> Any:
idx = 3 * 4
width, height, mode_size = np.frombuffer(data[:idx], np.uint32)
idx2 = idx + mode_size
mode = data[idx:idx2].decode("utf-8")
size = width, height
raw = data[idx2:]
return Image.frombytes(mode, size, raw) # pyright: ignore
def can_serialize(self, item: Any) -> bool:
return bool(_PIL_AVAILABLE) and isinstance(item, Image.Image) and not isinstance(item, JpegImageFile)
class JPEGSerializer(Serializer):
"""The JPEGSerializer serialize and deserialize JPEG image to and from bytes."""
def serialize(self, item: Image) -> Tuple[bytes, Optional[str]]:
if isinstance(item, JpegImageFile):
if not hasattr(item, "filename"):
raise ValueError(
"The JPEG Image's filename isn't defined. HINT: Open the image in your Dataset __getitem__ method."
)
if item.filename and os.path.isfile(item.filename):
# read the content of the file directly
with open(item.filename, "rb") as f:
return f.read(), None
else:
item_bytes = io.BytesIO()
item.save(item_bytes, format="JPEG")
item_bytes = item_bytes.getvalue()
return item_bytes, None
if isinstance(item, (PngImageFile, WebPImageFile, GifImageFile, Image.Image)):
buff = io.BytesIO()
item.convert("RGB").save(buff, quality=100, format="JPEG")
buff.seek(0)
return buff.read(), None
raise TypeError(f"The provided item should be of type {JpegImageFile}. Found {item}.")
def deserialize(self, data: bytes) -> Union[JpegImageFile, torch.Tensor]:
if _TORCH_VISION_AVAILABLE:
array = torch.frombuffer(data, dtype=torch.uint8)
try:
return decode_jpeg(array)
except RuntimeError:
# Note: Some datasets like Imagenet contains some PNG images with JPEG extension, so we fallback to PIL
pass
img = PILSerializer.deserialize(data)
if _TORCH_VISION_AVAILABLE:
img = pil_to_tensor(img)
return img
def can_serialize(self, item: Any) -> bool:
return bool(_PIL_AVAILABLE) and isinstance(item, JpegImageFile)
class BytesSerializer(Serializer):
"""The BytesSerializer serialize and deserialize integer to and from bytes."""
def serialize(self, item: bytes) -> Tuple[bytes, Optional[str]]:
return item, None
def deserialize(self, item: bytes) -> bytes:
return item
def can_serialize(self, item: bytes) -> bool:
return isinstance(item, bytes)
class TensorSerializer(Serializer):
"""The TensorSerializer serialize and deserialize tensor to and from bytes."""
def __init__(self) -> None:
super().__init__()
self._dtype_to_indices = {v: k for k, v in _TORCH_DTYPES_MAPPING.items()}
def serialize(self, item: torch.Tensor) -> Tuple[bytes, Optional[str]]:
dtype_indice = self._dtype_to_indices[item.dtype]
data = [np.uint32(dtype_indice).tobytes()]
data.append(np.uint32(len(item.shape)).tobytes())
for dim in item.shape:
data.append(np.uint32(dim).tobytes())
data.append(item.numpy().tobytes(order="C"))
return b"".join(data), None
def deserialize(self, data: bytes) -> torch.Tensor:
dtype_indice = np.frombuffer(data[0:4], np.uint32).item()
dtype = _TORCH_DTYPES_MAPPING[dtype_indice]
shape_size = np.frombuffer(data[4:8], np.uint32).item()
shape = []
for shape_idx in range(shape_size):
shape.append(np.frombuffer(data[8 + 4 * shape_idx : 8 + 4 * (shape_idx + 1)], np.uint32).item())
tensor = torch.frombuffer(data[8 + 4 * (shape_idx + 1) : len(data)], dtype=dtype)
shape = torch.Size(shape)
if tensor.shape == shape:
return tensor
return torch.reshape(tensor, shape)
def can_serialize(self, item: torch.Tensor) -> bool:
return isinstance(item, torch.Tensor) and type(item) == torch.Tensor and len(item.shape) > 1
class NoHeaderTensorSerializer(Serializer):
"""The TensorSerializer serialize and deserialize tensor to and from bytes."""
def __init__(self) -> None:
super().__init__()
self._dtype_to_indices = {v: k for k, v in _TORCH_DTYPES_MAPPING.items()}
self._dtype: Optional[torch.dtype] = None
def setup(self, data_format: str) -> None:
self._dtype = _TORCH_DTYPES_MAPPING[int(data_format.split(":")[1])]
def serialize(self, item: torch.Tensor) -> Tuple[bytes, Optional[str]]:
dtype_indice = self._dtype_to_indices[item.dtype]
return item.numpy().tobytes(order="C"), f"no_header_tensor:{dtype_indice}"
def deserialize(self, data: bytes) -> torch.Tensor:
assert self._dtype
return torch.frombuffer(data, dtype=self._dtype)
def can_serialize(self, item: torch.Tensor) -> bool:
return isinstance(item, torch.Tensor) and type(item) == torch.Tensor and len(item.shape) == 1
class NumpySerializer(Serializer):
"""The NumpySerializer serialize and deserialize numpy to and from bytes."""
def __init__(self) -> None:
super().__init__()
self._dtype_to_indices = {v: k for k, v in _NUMPY_DTYPES_MAPPING.items()}
def serialize(self, item: np.ndarray) -> Tuple[bytes, Optional[str]]:
dtype_indice = self._dtype_to_indices[item.dtype]
data = [np.uint32(dtype_indice).tobytes()]
data.append(np.uint32(len(item.shape)).tobytes())
for dim in item.shape:
data.append(np.uint32(dim).tobytes())
data.append(item.tobytes(order="C"))
return b"".join(data), None
def deserialize(self, data: bytes) -> np.ndarray:
dtype_indice = np.frombuffer(data[0:4], np.uint32).item()
dtype = _NUMPY_DTYPES_MAPPING[dtype_indice]
shape_size = np.frombuffer(data[4:8], np.uint32).item()
shape = []
# deserialize the shape header
# Note: The start position of the shape value: 8 (dtype + shape length) + 4 * shape_idx
for shape_idx in range(shape_size):
shape.append(np.frombuffer(data[8 + 4 * shape_idx : 8 + 4 * (shape_idx + 1)], np.uint32).item())
# deserialize the numpy array bytes
tensor = np.frombuffer(data[8 + 4 * (shape_idx + 1) : len(data)], dtype=dtype)
if tensor.shape == shape:
return tensor
return np.reshape(tensor, shape)
def can_serialize(self, item: np.ndarray) -> bool:
return isinstance(item, np.ndarray) and type(item) == np.ndarray and len(item.shape) > 1
class NoHeaderNumpySerializer(Serializer):
"""The NoHeaderNumpySerializer serialize and deserialize numpy to and from bytes."""
def __init__(self) -> None:
super().__init__()
self._dtype_to_indices = {v: k for k, v in _NUMPY_DTYPES_MAPPING.items()}
self._dtype: Optional[np.dtype] = None
def setup(self, data_format: str) -> None:
self._dtype = _NUMPY_DTYPES_MAPPING[int(data_format.split(":")[1])]
def serialize(self, item: np.ndarray) -> Tuple[bytes, Optional[str]]:
dtype_indice: int = self._dtype_to_indices[item.dtype]
return item.tobytes(order="C"), f"no_header_numpy:{dtype_indice}"
def deserialize(self, data: bytes) -> np.ndarray:
assert self._dtype
return np.frombuffer(data, dtype=self._dtype)
def can_serialize(self, item: np.ndarray) -> bool:
return isinstance(item, np.ndarray) and type(item) == np.ndarray and len(item.shape) == 1
class PickleSerializer(Serializer):
"""The PickleSerializer serialize and deserialize python objects to and from bytes."""
def serialize(self, item: Any) -> Tuple[bytes, Optional[str]]:
return pickle.dumps(item), None
def deserialize(self, data: bytes) -> Any:
return pickle.loads(data)
def can_serialize(self, _: Any) -> bool:
return True
class FileSerializer(Serializer):
def serialize(self, filepath: str) -> Tuple[bytes, Optional[str]]:
_, file_extension = os.path.splitext(filepath)
with open(filepath, "rb") as f:
file_extension = file_extension.replace(".", "").lower()
return f.read(), f"file:{file_extension}"
def deserialize(self, data: bytes) -> Any:
return data
def can_serialize(self, data: Any) -> bool:
return isinstance(data, str) and os.path.isfile(data)
class VideoSerializer(Serializer):
_EXTENSIONS = ("mp4", "ogv", "mjpeg", "avi", "mov", "h264", "mpg", "webm", "wmv")
def serialize(self, filepath: str) -> Tuple[bytes, Optional[str]]:
_, file_extension = os.path.splitext(filepath)
with open(filepath, "rb") as f:
file_extension = file_extension.replace(".", "").lower()
return f.read(), f"video:{file_extension}"
def deserialize(self, data: bytes) -> Any:
if not _TORCH_VISION_AVAILABLE:
raise ModuleNotFoundError("torchvision is required. Run `pip install torchvision`")
if not _AV_AVAILABLE:
raise ModuleNotFoundError("av is required. Run `pip install av`")
# Add support for a better deserialization mechanism for videos
# TODO: Investigate https://pytorch.org/audio/main/generated/torchaudio.io.StreamReader.html
import torchvision.io
with tempfile.TemporaryDirectory() as dirname:
fname = os.path.join(dirname, "file.mp4")
with open(fname, "wb") as stream:
stream.write(data)
return torchvision.io.read_video(fname, pts_unit="sec")
def can_serialize(self, data: Any) -> bool:
return isinstance(data, str) and os.path.isfile(data) and any(data.endswith(ext) for ext in self._EXTENSIONS)
class StringSerializer(Serializer):
def serialize(self, obj: str) -> Tuple[bytes, Optional[str]]:
return obj.encode("utf-8"), None
def deserialize(self, data: bytes) -> str:
return data.decode("utf-8")
def can_serialize(self, data: str) -> bool:
return isinstance(data, str) and not os.path.isfile(data)
class NumericSerializer:
"""Store scalar."""
def __init__(self, dtype: type) -> None:
self.dtype = dtype
self.size = self.dtype().nbytes
def serialize(self, obj: Any) -> Tuple[bytes, Optional[str]]:
return self.dtype(obj).tobytes(), None
def deserialize(self, data: bytes) -> Any:
return np.frombuffer(data, self.dtype)[0]
class IntegerSerializer(NumericSerializer, Serializer):
def __init__(self) -> None:
super().__init__(np.int64)
def can_serialize(self, data: int) -> bool:
return isinstance(data, int)
class FloatSerializer(NumericSerializer, Serializer):
def __init__(self) -> None:
super().__init__(np.float64)
def can_serialize(self, data: float) -> bool:
return isinstance(data, float)
_SERIALIZERS = OrderedDict(**{
"str": StringSerializer(),
"int": IntegerSerializer(),
"float": FloatSerializer(),
"video": VideoSerializer(),
"tif": FileSerializer(),
"file": FileSerializer(),
"pil": PILSerializer(),
"jpeg": JPEGSerializer(),
"bytes": BytesSerializer(),
"no_header_numpy": NoHeaderNumpySerializer(),
"numpy": NumpySerializer(),
"no_header_tensor": NoHeaderTensorSerializer(),
"tensor": TensorSerializer(),
"pickle": PickleSerializer(),
})
def _get_serializers(serializers: Optional[Dict[str, Serializer]]) -> Dict[str, Serializer]:
if serializers:
serializers = OrderedDict(**serializers)
serializers.update(_SERIALIZERS)
else:
serializers = _SERIALIZERS
return serializers
|
evocodebench_data_126
|
# Copyright The Lightning AI team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import os
import pickle
import tempfile
from abc import ABC, abstractmethod
from collections import OrderedDict
from typing import Any, Dict, Optional, Tuple, Union
import numpy as np
import torch
from lightning_utilities.core.imports import RequirementCache
from litdata.constants import _NUMPY_DTYPES_MAPPING, _TORCH_DTYPES_MAPPING
_PIL_AVAILABLE = RequirementCache("PIL")
_TORCH_VISION_AVAILABLE = RequirementCache("torchvision")
_AV_AVAILABLE = RequirementCache("av")
if _PIL_AVAILABLE:
from PIL import Image
from PIL.GifImagePlugin import GifImageFile
from PIL.JpegImagePlugin import JpegImageFile
from PIL.PngImagePlugin import PngImageFile
from PIL.WebPImagePlugin import WebPImageFile
else:
Image = None
JpegImageFile = None
PngImageFile = None
if _TORCH_VISION_AVAILABLE:
from torchvision.io import decode_jpeg
from torchvision.transforms.functional import pil_to_tensor
class Serializer(ABC):
"""The base interface for any serializers.
A Serializer serialize and deserialize to and from bytes.
"""
@abstractmethod
def serialize(self, data: Any) -> Tuple[bytes, Optional[str]]:
pass
@abstractmethod
def deserialize(self, data: bytes) -> Any:
pass
@abstractmethod
def can_serialize(self, data: Any) -> bool:
pass
def setup(self, metadata: Any) -> None:
pass
class PILSerializer(Serializer):
"""The PILSerializer serialize and deserialize PIL Image to and from bytes."""
def serialize(self, item: Image) -> Tuple[bytes, Optional[str]]:
mode = item.mode.encode("utf-8")
width, height = item.size
raw = item.tobytes()
ints = np.array([width, height, len(mode)], np.uint32)
return ints.tobytes() + mode + raw, None
@classmethod
def deserialize(cls, data: bytes) -> Any:
idx = 3 * 4
width, height, mode_size = np.frombuffer(data[:idx], np.uint32)
idx2 = idx + mode_size
mode = data[idx:idx2].decode("utf-8")
size = width, height
raw = data[idx2:]
return Image.frombytes(mode, size, raw) # pyright: ignore
def can_serialize(self, item: Any) -> bool:
return bool(_PIL_AVAILABLE) and isinstance(item, Image.Image) and not isinstance(item, JpegImageFile)
class JPEGSerializer(Serializer):
"""The JPEGSerializer serialize and deserialize JPEG image to and from bytes."""
def serialize(self, item: Image) -> Tuple[bytes, Optional[str]]:
if isinstance(item, JpegImageFile):
if not hasattr(item, "filename"):
raise ValueError(
"The JPEG Image's filename isn't defined. HINT: Open the image in your Dataset __getitem__ method."
)
if item.filename and os.path.isfile(item.filename):
# read the content of the file directly
with open(item.filename, "rb") as f:
return f.read(), None
else:
item_bytes = io.BytesIO()
item.save(item_bytes, format="JPEG")
item_bytes = item_bytes.getvalue()
return item_bytes, None
if isinstance(item, (PngImageFile, WebPImageFile, GifImageFile, Image.Image)):
buff = io.BytesIO()
item.convert("RGB").save(buff, quality=100, format="JPEG")
buff.seek(0)
return buff.read(), None
raise TypeError(f"The provided item should be of type {JpegImageFile}. Found {item}.")
def deserialize(self, data: bytes) -> Union[JpegImageFile, torch.Tensor]:
if _TORCH_VISION_AVAILABLE:
array = torch.frombuffer(data, dtype=torch.uint8)
try:
return decode_jpeg(array)
except RuntimeError:
# Note: Some datasets like Imagenet contains some PNG images with JPEG extension, so we fallback to PIL
pass
img = PILSerializer.deserialize(data)
if _TORCH_VISION_AVAILABLE:
img = pil_to_tensor(img)
return img
def can_serialize(self, item: Any) -> bool:
return bool(_PIL_AVAILABLE) and isinstance(item, JpegImageFile)
class BytesSerializer(Serializer):
"""The BytesSerializer serialize and deserialize integer to and from bytes."""
def serialize(self, item: bytes) -> Tuple[bytes, Optional[str]]:
return item, None
def deserialize(self, item: bytes) -> bytes:
return item
def can_serialize(self, item: bytes) -> bool:
return isinstance(item, bytes)
class TensorSerializer(Serializer):
"""The TensorSerializer serialize and deserialize tensor to and from bytes."""
def __init__(self) -> None:
super().__init__()
self._dtype_to_indices = {v: k for k, v in _TORCH_DTYPES_MAPPING.items()}
def serialize(self, item: torch.Tensor) -> Tuple[bytes, Optional[str]]:
dtype_indice = self._dtype_to_indices[item.dtype]
data = [np.uint32(dtype_indice).tobytes()]
data.append(np.uint32(len(item.shape)).tobytes())
for dim in item.shape:
data.append(np.uint32(dim).tobytes())
data.append(item.numpy().tobytes(order="C"))
return b"".join(data), None
def deserialize(self, data: bytes) -> torch.Tensor:
dtype_indice = np.frombuffer(data[0:4], np.uint32).item()
dtype = _TORCH_DTYPES_MAPPING[dtype_indice]
shape_size = np.frombuffer(data[4:8], np.uint32).item()
shape = []
for shape_idx in range(shape_size):
shape.append(np.frombuffer(data[8 + 4 * shape_idx : 8 + 4 * (shape_idx + 1)], np.uint32).item())
tensor = torch.frombuffer(data[8 + 4 * (shape_idx + 1) : len(data)], dtype=dtype)
shape = torch.Size(shape)
if tensor.shape == shape:
return tensor
return torch.reshape(tensor, shape)
def can_serialize(self, item: torch.Tensor) -> bool:
return isinstance(item, torch.Tensor) and type(item) == torch.Tensor and len(item.shape) > 1
class NoHeaderTensorSerializer(Serializer):
"""The TensorSerializer serialize and deserialize tensor to and from bytes."""
def __init__(self) -> None:
super().__init__()
self._dtype_to_indices = {v: k for k, v in _TORCH_DTYPES_MAPPING.items()}
self._dtype: Optional[torch.dtype] = None
def setup(self, data_format: str) -> None:
self._dtype = _TORCH_DTYPES_MAPPING[int(data_format.split(":")[1])]
def serialize(self, item: torch.Tensor) -> Tuple[bytes, Optional[str]]:
dtype_indice = self._dtype_to_indices[item.dtype]
return item.numpy().tobytes(order="C"), f"no_header_tensor:{dtype_indice}"
def deserialize(self, data: bytes) -> torch.Tensor:
assert self._dtype
return torch.frombuffer(data, dtype=self._dtype)
def can_serialize(self, item: torch.Tensor) -> bool:
return isinstance(item, torch.Tensor) and type(item) == torch.Tensor and len(item.shape) == 1
class NumpySerializer(Serializer):
"""The NumpySerializer serialize and deserialize numpy to and from bytes."""
def __init__(self) -> None:
super().__init__()
self._dtype_to_indices = {v: k for k, v in _NUMPY_DTYPES_MAPPING.items()}
def serialize(self, item: np.ndarray) -> Tuple[bytes, Optional[str]]:
dtype_indice = self._dtype_to_indices[item.dtype]
data = [np.uint32(dtype_indice).tobytes()]
data.append(np.uint32(len(item.shape)).tobytes())
for dim in item.shape:
data.append(np.uint32(dim).tobytes())
data.append(item.tobytes(order="C"))
return b"".join(data), None
def deserialize(self, data: bytes) -> np.ndarray:
dtype_indice = np.frombuffer(data[0:4], np.uint32).item()
dtype = _NUMPY_DTYPES_MAPPING[dtype_indice]
shape_size = np.frombuffer(data[4:8], np.uint32).item()
shape = []
# deserialize the shape header
# Note: The start position of the shape value: 8 (dtype + shape length) + 4 * shape_idx
for shape_idx in range(shape_size):
shape.append(np.frombuffer(data[8 + 4 * shape_idx : 8 + 4 * (shape_idx + 1)], np.uint32).item())
# deserialize the numpy array bytes
tensor = np.frombuffer(data[8 + 4 * (shape_idx + 1) : len(data)], dtype=dtype)
if tensor.shape == shape:
return tensor
return np.reshape(tensor, shape)
def can_serialize(self, item: np.ndarray) -> bool:
return isinstance(item, np.ndarray) and type(item) == np.ndarray and len(item.shape) > 1
class NoHeaderNumpySerializer(Serializer):
"""The NoHeaderNumpySerializer serialize and deserialize numpy to and from bytes."""
def __init__(self) -> None:
super().__init__()
self._dtype_to_indices = {v: k for k, v in _NUMPY_DTYPES_MAPPING.items()}
self._dtype: Optional[np.dtype] = None
def setup(self, data_format: str) -> None:
self._dtype = _NUMPY_DTYPES_MAPPING[int(data_format.split(":")[1])]
def serialize(self, item: np.ndarray) -> Tuple[bytes, Optional[str]]:
dtype_indice: int = self._dtype_to_indices[item.dtype]
return item.tobytes(order="C"), f"no_header_numpy:{dtype_indice}"
def deserialize(self, data: bytes) -> np.ndarray:
assert self._dtype
return np.frombuffer(data, dtype=self._dtype)
def can_serialize(self, item: np.ndarray) -> bool:
return isinstance(item, np.ndarray) and type(item) == np.ndarray and len(item.shape) == 1
class PickleSerializer(Serializer):
"""The PickleSerializer serialize and deserialize python objects to and from bytes."""
def serialize(self, item: Any) -> Tuple[bytes, Optional[str]]:
return pickle.dumps(item), None
def deserialize(self, data: bytes) -> Any:
return pickle.loads(data)
def can_serialize(self, _: Any) -> bool:
return True
class FileSerializer(Serializer):
def serialize(self, filepath: str) -> Tuple[bytes, Optional[str]]:
_, file_extension = os.path.splitext(filepath)
with open(filepath, "rb") as f:
file_extension = file_extension.replace(".", "").lower()
return f.read(), f"file:{file_extension}"
def deserialize(self, data: bytes) -> Any:
return data
def can_serialize(self, data: Any) -> bool:
return isinstance(data, str) and os.path.isfile(data)
class VideoSerializer(Serializer):
_EXTENSIONS = ("mp4", "ogv", "mjpeg", "avi", "mov", "h264", "mpg", "webm", "wmv")
def serialize(self, filepath: str) -> Tuple[bytes, Optional[str]]:
_, file_extension = os.path.splitext(filepath)
with open(filepath, "rb") as f:
file_extension = file_extension.replace(".", "").lower()
return f.read(), f"video:{file_extension}"
def deserialize(self, data: bytes) -> Any:
if not _TORCH_VISION_AVAILABLE:
raise ModuleNotFoundError("torchvision is required. Run `pip install torchvision`")
if not _AV_AVAILABLE:
raise ModuleNotFoundError("av is required. Run `pip install av`")
# Add support for a better deserialization mechanism for videos
# TODO: Investigate https://pytorch.org/audio/main/generated/torchaudio.io.StreamReader.html
import torchvision.io
with tempfile.TemporaryDirectory() as dirname:
fname = os.path.join(dirname, "file.mp4")
with open(fname, "wb") as stream:
stream.write(data)
return torchvision.io.read_video(fname, pts_unit="sec")
def can_serialize(self, data: Any) -> bool:
return isinstance(data, str) and os.path.isfile(data) and any(data.endswith(ext) for ext in self._EXTENSIONS)
class StringSerializer(Serializer):
def serialize(self, obj: str) -> Tuple[bytes, Optional[str]]:
return obj.encode("utf-8"), None
def deserialize(self, data: bytes) -> str:
return data.decode("utf-8")
def can_serialize(self, data: str) -> bool:
return isinstance(data, str) and not os.path.isfile(data)
class NumericSerializer:
"""Store scalar."""
def __init__(self, dtype: type) -> None:
self.dtype = dtype
self.size = self.dtype().nbytes
def serialize(self, obj: Any) -> Tuple[bytes, Optional[str]]:
return self.dtype(obj).tobytes(), None
def deserialize(self, data: bytes) -> Any:
return np.frombuffer(data, self.dtype)[0]
class IntegerSerializer(NumericSerializer, Serializer):
def __init__(self) -> None:
super().__init__(np.int64)
def can_serialize(self, data: int) -> bool:
return isinstance(data, int)
class FloatSerializer(NumericSerializer, Serializer):
def __init__(self) -> None:
super().__init__(np.float64)
def can_serialize(self, data: float) -> bool:
return isinstance(data, float)
_SERIALIZERS = OrderedDict(**{
"str": StringSerializer(),
"int": IntegerSerializer(),
"float": FloatSerializer(),
"video": VideoSerializer(),
"tif": FileSerializer(),
"file": FileSerializer(),
"pil": PILSerializer(),
"jpeg": JPEGSerializer(),
"bytes": BytesSerializer(),
"no_header_numpy": NoHeaderNumpySerializer(),
"numpy": NumpySerializer(),
"no_header_tensor": NoHeaderTensorSerializer(),
"tensor": TensorSerializer(),
"pickle": PickleSerializer(),
})
def _get_serializers(serializers: Optional[Dict[str, Serializer]]) -> Dict[str, Serializer]:
if serializers:
serializers = OrderedDict(**serializers)
serializers.update(_SERIALIZERS)
else:
serializers = _SERIALIZERS
return serializers
|
evocodebench_data_127
|
# Copyright The Lightning AI team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import os
import pickle
import tempfile
from abc import ABC, abstractmethod
from collections import OrderedDict
from typing import Any, Dict, Optional, Tuple, Union
import numpy as np
import torch
from lightning_utilities.core.imports import RequirementCache
from litdata.constants import _NUMPY_DTYPES_MAPPING, _TORCH_DTYPES_MAPPING
_PIL_AVAILABLE = RequirementCache("PIL")
_TORCH_VISION_AVAILABLE = RequirementCache("torchvision")
_AV_AVAILABLE = RequirementCache("av")
if _PIL_AVAILABLE:
from PIL import Image
from PIL.GifImagePlugin import GifImageFile
from PIL.JpegImagePlugin import JpegImageFile
from PIL.PngImagePlugin import PngImageFile
from PIL.WebPImagePlugin import WebPImageFile
else:
Image = None
JpegImageFile = None
PngImageFile = None
if _TORCH_VISION_AVAILABLE:
from torchvision.io import decode_jpeg
from torchvision.transforms.functional import pil_to_tensor
class Serializer(ABC):
"""The base interface for any serializers.
A Serializer serialize and deserialize to and from bytes.
"""
@abstractmethod
def serialize(self, data: Any) -> Tuple[bytes, Optional[str]]:
pass
@abstractmethod
def deserialize(self, data: bytes) -> Any:
pass
@abstractmethod
def can_serialize(self, data: Any) -> bool:
pass
def setup(self, metadata: Any) -> None:
pass
class PILSerializer(Serializer):
"""The PILSerializer serialize and deserialize PIL Image to and from bytes."""
def serialize(self, item: Image) -> Tuple[bytes, Optional[str]]:
mode = item.mode.encode("utf-8")
width, height = item.size
raw = item.tobytes()
ints = np.array([width, height, len(mode)], np.uint32)
return ints.tobytes() + mode + raw, None
@classmethod
def deserialize(cls, data: bytes) -> Any:
idx = 3 * 4
width, height, mode_size = np.frombuffer(data[:idx], np.uint32)
idx2 = idx + mode_size
mode = data[idx:idx2].decode("utf-8")
size = width, height
raw = data[idx2:]
return Image.frombytes(mode, size, raw) # pyright: ignore
def can_serialize(self, item: Any) -> bool:
return bool(_PIL_AVAILABLE) and isinstance(item, Image.Image) and not isinstance(item, JpegImageFile)
class JPEGSerializer(Serializer):
"""The JPEGSerializer serialize and deserialize JPEG image to and from bytes."""
def serialize(self, item: Image) -> Tuple[bytes, Optional[str]]:
if isinstance(item, JpegImageFile):
if not hasattr(item, "filename"):
raise ValueError(
"The JPEG Image's filename isn't defined. HINT: Open the image in your Dataset __getitem__ method."
)
if item.filename and os.path.isfile(item.filename):
# read the content of the file directly
with open(item.filename, "rb") as f:
return f.read(), None
else:
item_bytes = io.BytesIO()
item.save(item_bytes, format="JPEG")
item_bytes = item_bytes.getvalue()
return item_bytes, None
if isinstance(item, (PngImageFile, WebPImageFile, GifImageFile, Image.Image)):
buff = io.BytesIO()
item.convert("RGB").save(buff, quality=100, format="JPEG")
buff.seek(0)
return buff.read(), None
raise TypeError(f"The provided item should be of type {JpegImageFile}. Found {item}.")
def deserialize(self, data: bytes) -> Union[JpegImageFile, torch.Tensor]:
if _TORCH_VISION_AVAILABLE:
array = torch.frombuffer(data, dtype=torch.uint8)
try:
return decode_jpeg(array)
except RuntimeError:
# Note: Some datasets like Imagenet contains some PNG images with JPEG extension, so we fallback to PIL
pass
img = PILSerializer.deserialize(data)
if _TORCH_VISION_AVAILABLE:
img = pil_to_tensor(img)
return img
def can_serialize(self, item: Any) -> bool:
return bool(_PIL_AVAILABLE) and isinstance(item, JpegImageFile)
class BytesSerializer(Serializer):
"""The BytesSerializer serialize and deserialize integer to and from bytes."""
def serialize(self, item: bytes) -> Tuple[bytes, Optional[str]]:
return item, None
def deserialize(self, item: bytes) -> bytes:
return item
def can_serialize(self, item: bytes) -> bool:
return isinstance(item, bytes)
class TensorSerializer(Serializer):
"""The TensorSerializer serialize and deserialize tensor to and from bytes."""
def __init__(self) -> None:
super().__init__()
self._dtype_to_indices = {v: k for k, v in _TORCH_DTYPES_MAPPING.items()}
def serialize(self, item: torch.Tensor) -> Tuple[bytes, Optional[str]]:
dtype_indice = self._dtype_to_indices[item.dtype]
data = [np.uint32(dtype_indice).tobytes()]
data.append(np.uint32(len(item.shape)).tobytes())
for dim in item.shape:
data.append(np.uint32(dim).tobytes())
data.append(item.numpy().tobytes(order="C"))
return b"".join(data), None
def deserialize(self, data: bytes) -> torch.Tensor:
dtype_indice = np.frombuffer(data[0:4], np.uint32).item()
dtype = _TORCH_DTYPES_MAPPING[dtype_indice]
shape_size = np.frombuffer(data[4:8], np.uint32).item()
shape = []
for shape_idx in range(shape_size):
shape.append(np.frombuffer(data[8 + 4 * shape_idx : 8 + 4 * (shape_idx + 1)], np.uint32).item())
tensor = torch.frombuffer(data[8 + 4 * (shape_idx + 1) : len(data)], dtype=dtype)
shape = torch.Size(shape)
if tensor.shape == shape:
return tensor
return torch.reshape(tensor, shape)
def can_serialize(self, item: torch.Tensor) -> bool:
return isinstance(item, torch.Tensor) and type(item) == torch.Tensor and len(item.shape) > 1
class NoHeaderTensorSerializer(Serializer):
"""The TensorSerializer serialize and deserialize tensor to and from bytes."""
def __init__(self) -> None:
super().__init__()
self._dtype_to_indices = {v: k for k, v in _TORCH_DTYPES_MAPPING.items()}
self._dtype: Optional[torch.dtype] = None
def setup(self, data_format: str) -> None:
self._dtype = _TORCH_DTYPES_MAPPING[int(data_format.split(":")[1])]
def serialize(self, item: torch.Tensor) -> Tuple[bytes, Optional[str]]:
dtype_indice = self._dtype_to_indices[item.dtype]
return item.numpy().tobytes(order="C"), f"no_header_tensor:{dtype_indice}"
def deserialize(self, data: bytes) -> torch.Tensor:
assert self._dtype
return torch.frombuffer(data, dtype=self._dtype)
def can_serialize(self, item: torch.Tensor) -> bool:
return isinstance(item, torch.Tensor) and type(item) == torch.Tensor and len(item.shape) == 1
class NumpySerializer(Serializer):
"""The NumpySerializer serialize and deserialize numpy to and from bytes."""
def __init__(self) -> None:
super().__init__()
self._dtype_to_indices = {v: k for k, v in _NUMPY_DTYPES_MAPPING.items()}
def serialize(self, item: np.ndarray) -> Tuple[bytes, Optional[str]]:
dtype_indice = self._dtype_to_indices[item.dtype]
data = [np.uint32(dtype_indice).tobytes()]
data.append(np.uint32(len(item.shape)).tobytes())
for dim in item.shape:
data.append(np.uint32(dim).tobytes())
data.append(item.tobytes(order="C"))
return b"".join(data), None
def deserialize(self, data: bytes) -> np.ndarray:
dtype_indice = np.frombuffer(data[0:4], np.uint32).item()
dtype = _NUMPY_DTYPES_MAPPING[dtype_indice]
shape_size = np.frombuffer(data[4:8], np.uint32).item()
shape = []
# deserialize the shape header
# Note: The start position of the shape value: 8 (dtype + shape length) + 4 * shape_idx
for shape_idx in range(shape_size):
shape.append(np.frombuffer(data[8 + 4 * shape_idx : 8 + 4 * (shape_idx + 1)], np.uint32).item())
# deserialize the numpy array bytes
tensor = np.frombuffer(data[8 + 4 * (shape_idx + 1) : len(data)], dtype=dtype)
if tensor.shape == shape:
return tensor
return np.reshape(tensor, shape)
def can_serialize(self, item: np.ndarray) -> bool:
return isinstance(item, np.ndarray) and type(item) == np.ndarray and len(item.shape) > 1
class NoHeaderNumpySerializer(Serializer):
"""The NoHeaderNumpySerializer serialize and deserialize numpy to and from bytes."""
def __init__(self) -> None:
super().__init__()
self._dtype_to_indices = {v: k for k, v in _NUMPY_DTYPES_MAPPING.items()}
self._dtype: Optional[np.dtype] = None
def setup(self, data_format: str) -> None:
self._dtype = _NUMPY_DTYPES_MAPPING[int(data_format.split(":")[1])]
def serialize(self, item: np.ndarray) -> Tuple[bytes, Optional[str]]:
dtype_indice: int = self._dtype_to_indices[item.dtype]
return item.tobytes(order="C"), f"no_header_numpy:{dtype_indice}"
def deserialize(self, data: bytes) -> np.ndarray:
assert self._dtype
return np.frombuffer(data, dtype=self._dtype)
def can_serialize(self, item: np.ndarray) -> bool:
return isinstance(item, np.ndarray) and type(item) == np.ndarray and len(item.shape) == 1
class PickleSerializer(Serializer):
"""The PickleSerializer serialize and deserialize python objects to and from bytes."""
def serialize(self, item: Any) -> Tuple[bytes, Optional[str]]:
return pickle.dumps(item), None
def deserialize(self, data: bytes) -> Any:
return pickle.loads(data)
def can_serialize(self, _: Any) -> bool:
return True
class FileSerializer(Serializer):
def serialize(self, filepath: str) -> Tuple[bytes, Optional[str]]:
_, file_extension = os.path.splitext(filepath)
with open(filepath, "rb") as f:
file_extension = file_extension.replace(".", "").lower()
return f.read(), f"file:{file_extension}"
def deserialize(self, data: bytes) -> Any:
return data
def can_serialize(self, data: Any) -> bool:
return isinstance(data, str) and os.path.isfile(data)
class VideoSerializer(Serializer):
_EXTENSIONS = ("mp4", "ogv", "mjpeg", "avi", "mov", "h264", "mpg", "webm", "wmv")
def serialize(self, filepath: str) -> Tuple[bytes, Optional[str]]:
_, file_extension = os.path.splitext(filepath)
with open(filepath, "rb") as f:
file_extension = file_extension.replace(".", "").lower()
return f.read(), f"video:{file_extension}"
def deserialize(self, data: bytes) -> Any:
if not _TORCH_VISION_AVAILABLE:
raise ModuleNotFoundError("torchvision is required. Run `pip install torchvision`")
if not _AV_AVAILABLE:
raise ModuleNotFoundError("av is required. Run `pip install av`")
# Add support for a better deserialization mechanism for videos
# TODO: Investigate https://pytorch.org/audio/main/generated/torchaudio.io.StreamReader.html
import torchvision.io
with tempfile.TemporaryDirectory() as dirname:
fname = os.path.join(dirname, "file.mp4")
with open(fname, "wb") as stream:
stream.write(data)
return torchvision.io.read_video(fname, pts_unit="sec")
def can_serialize(self, data: Any) -> bool:
return isinstance(data, str) and os.path.isfile(data) and any(data.endswith(ext) for ext in self._EXTENSIONS)
class StringSerializer(Serializer):
def serialize(self, obj: str) -> Tuple[bytes, Optional[str]]:
return obj.encode("utf-8"), None
def deserialize(self, data: bytes) -> str:
return data.decode("utf-8")
def can_serialize(self, data: str) -> bool:
return isinstance(data, str) and not os.path.isfile(data)
class NumericSerializer:
"""Store scalar."""
def __init__(self, dtype: type) -> None:
self.dtype = dtype
self.size = self.dtype().nbytes
def serialize(self, obj: Any) -> Tuple[bytes, Optional[str]]:
return self.dtype(obj).tobytes(), None
def deserialize(self, data: bytes) -> Any:
return np.frombuffer(data, self.dtype)[0]
class IntegerSerializer(NumericSerializer, Serializer):
def __init__(self) -> None:
super().__init__(np.int64)
def can_serialize(self, data: int) -> bool:
return isinstance(data, int)
class FloatSerializer(NumericSerializer, Serializer):
def __init__(self) -> None:
super().__init__(np.float64)
def can_serialize(self, data: float) -> bool:
return isinstance(data, float)
_SERIALIZERS = OrderedDict(**{
"str": StringSerializer(),
"int": IntegerSerializer(),
"float": FloatSerializer(),
"video": VideoSerializer(),
"tif": FileSerializer(),
"file": FileSerializer(),
"pil": PILSerializer(),
"jpeg": JPEGSerializer(),
"bytes": BytesSerializer(),
"no_header_numpy": NoHeaderNumpySerializer(),
"numpy": NumpySerializer(),
"no_header_tensor": NoHeaderTensorSerializer(),
"tensor": TensorSerializer(),
"pickle": PickleSerializer(),
})
def _get_serializers(serializers: Optional[Dict[str, Serializer]]) -> Dict[str, Serializer]:
if serializers:
serializers = OrderedDict(**serializers)
serializers.update(_SERIALIZERS)
else:
serializers = _SERIALIZERS
return serializers
|
evocodebench_data_128
|
# Copyright The Lightning AI team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import os
import pickle
import tempfile
from abc import ABC, abstractmethod
from collections import OrderedDict
from typing import Any, Dict, Optional, Tuple, Union
import numpy as np
import torch
from lightning_utilities.core.imports import RequirementCache
from litdata.constants import _NUMPY_DTYPES_MAPPING, _TORCH_DTYPES_MAPPING
_PIL_AVAILABLE = RequirementCache("PIL")
_TORCH_VISION_AVAILABLE = RequirementCache("torchvision")
_AV_AVAILABLE = RequirementCache("av")
if _PIL_AVAILABLE:
from PIL import Image
from PIL.GifImagePlugin import GifImageFile
from PIL.JpegImagePlugin import JpegImageFile
from PIL.PngImagePlugin import PngImageFile
from PIL.WebPImagePlugin import WebPImageFile
else:
Image = None
JpegImageFile = None
PngImageFile = None
if _TORCH_VISION_AVAILABLE:
from torchvision.io import decode_jpeg
from torchvision.transforms.functional import pil_to_tensor
class Serializer(ABC):
"""The base interface for any serializers.
A Serializer serialize and deserialize to and from bytes.
"""
@abstractmethod
def serialize(self, data: Any) -> Tuple[bytes, Optional[str]]:
pass
@abstractmethod
def deserialize(self, data: bytes) -> Any:
pass
@abstractmethod
def can_serialize(self, data: Any) -> bool:
pass
def setup(self, metadata: Any) -> None:
pass
class PILSerializer(Serializer):
"""The PILSerializer serialize and deserialize PIL Image to and from bytes."""
def serialize(self, item: Image) -> Tuple[bytes, Optional[str]]:
mode = item.mode.encode("utf-8")
width, height = item.size
raw = item.tobytes()
ints = np.array([width, height, len(mode)], np.uint32)
return ints.tobytes() + mode + raw, None
@classmethod
def deserialize(cls, data: bytes) -> Any:
idx = 3 * 4
width, height, mode_size = np.frombuffer(data[:idx], np.uint32)
idx2 = idx + mode_size
mode = data[idx:idx2].decode("utf-8")
size = width, height
raw = data[idx2:]
return Image.frombytes(mode, size, raw) # pyright: ignore
def can_serialize(self, item: Any) -> bool:
return bool(_PIL_AVAILABLE) and isinstance(item, Image.Image) and not isinstance(item, JpegImageFile)
class JPEGSerializer(Serializer):
"""The JPEGSerializer serialize and deserialize JPEG image to and from bytes."""
def serialize(self, item: Image) -> Tuple[bytes, Optional[str]]:
if isinstance(item, JpegImageFile):
if not hasattr(item, "filename"):
raise ValueError(
"The JPEG Image's filename isn't defined. HINT: Open the image in your Dataset __getitem__ method."
)
if item.filename and os.path.isfile(item.filename):
# read the content of the file directly
with open(item.filename, "rb") as f:
return f.read(), None
else:
item_bytes = io.BytesIO()
item.save(item_bytes, format="JPEG")
item_bytes = item_bytes.getvalue()
return item_bytes, None
if isinstance(item, (PngImageFile, WebPImageFile, GifImageFile, Image.Image)):
buff = io.BytesIO()
item.convert("RGB").save(buff, quality=100, format="JPEG")
buff.seek(0)
return buff.read(), None
raise TypeError(f"The provided item should be of type {JpegImageFile}. Found {item}.")
def deserialize(self, data: bytes) -> Union[JpegImageFile, torch.Tensor]:
if _TORCH_VISION_AVAILABLE:
array = torch.frombuffer(data, dtype=torch.uint8)
try:
return decode_jpeg(array)
except RuntimeError:
# Note: Some datasets like Imagenet contains some PNG images with JPEG extension, so we fallback to PIL
pass
img = PILSerializer.deserialize(data)
if _TORCH_VISION_AVAILABLE:
img = pil_to_tensor(img)
return img
def can_serialize(self, item: Any) -> bool:
return bool(_PIL_AVAILABLE) and isinstance(item, JpegImageFile)
class BytesSerializer(Serializer):
"""The BytesSerializer serialize and deserialize integer to and from bytes."""
def serialize(self, item: bytes) -> Tuple[bytes, Optional[str]]:
return item, None
def deserialize(self, item: bytes) -> bytes:
return item
def can_serialize(self, item: bytes) -> bool:
return isinstance(item, bytes)
class TensorSerializer(Serializer):
"""The TensorSerializer serialize and deserialize tensor to and from bytes."""
def __init__(self) -> None:
super().__init__()
self._dtype_to_indices = {v: k for k, v in _TORCH_DTYPES_MAPPING.items()}
def serialize(self, item: torch.Tensor) -> Tuple[bytes, Optional[str]]:
dtype_indice = self._dtype_to_indices[item.dtype]
data = [np.uint32(dtype_indice).tobytes()]
data.append(np.uint32(len(item.shape)).tobytes())
for dim in item.shape:
data.append(np.uint32(dim).tobytes())
data.append(item.numpy().tobytes(order="C"))
return b"".join(data), None
def deserialize(self, data: bytes) -> torch.Tensor:
dtype_indice = np.frombuffer(data[0:4], np.uint32).item()
dtype = _TORCH_DTYPES_MAPPING[dtype_indice]
shape_size = np.frombuffer(data[4:8], np.uint32).item()
shape = []
for shape_idx in range(shape_size):
shape.append(np.frombuffer(data[8 + 4 * shape_idx : 8 + 4 * (shape_idx + 1)], np.uint32).item())
tensor = torch.frombuffer(data[8 + 4 * (shape_idx + 1) : len(data)], dtype=dtype)
shape = torch.Size(shape)
if tensor.shape == shape:
return tensor
return torch.reshape(tensor, shape)
def can_serialize(self, item: torch.Tensor) -> bool:
return isinstance(item, torch.Tensor) and type(item) == torch.Tensor and len(item.shape) > 1
class NoHeaderTensorSerializer(Serializer):
"""The TensorSerializer serialize and deserialize tensor to and from bytes."""
def __init__(self) -> None:
super().__init__()
self._dtype_to_indices = {v: k for k, v in _TORCH_DTYPES_MAPPING.items()}
self._dtype: Optional[torch.dtype] = None
def setup(self, data_format: str) -> None:
self._dtype = _TORCH_DTYPES_MAPPING[int(data_format.split(":")[1])]
def serialize(self, item: torch.Tensor) -> Tuple[bytes, Optional[str]]:
dtype_indice = self._dtype_to_indices[item.dtype]
return item.numpy().tobytes(order="C"), f"no_header_tensor:{dtype_indice}"
def deserialize(self, data: bytes) -> torch.Tensor:
assert self._dtype
return torch.frombuffer(data, dtype=self._dtype)
def can_serialize(self, item: torch.Tensor) -> bool:
return isinstance(item, torch.Tensor) and type(item) == torch.Tensor and len(item.shape) == 1
class NumpySerializer(Serializer):
"""The NumpySerializer serialize and deserialize numpy to and from bytes."""
def __init__(self) -> None:
super().__init__()
self._dtype_to_indices = {v: k for k, v in _NUMPY_DTYPES_MAPPING.items()}
def serialize(self, item: np.ndarray) -> Tuple[bytes, Optional[str]]:
dtype_indice = self._dtype_to_indices[item.dtype]
data = [np.uint32(dtype_indice).tobytes()]
data.append(np.uint32(len(item.shape)).tobytes())
for dim in item.shape:
data.append(np.uint32(dim).tobytes())
data.append(item.tobytes(order="C"))
return b"".join(data), None
def deserialize(self, data: bytes) -> np.ndarray:
dtype_indice = np.frombuffer(data[0:4], np.uint32).item()
dtype = _NUMPY_DTYPES_MAPPING[dtype_indice]
shape_size = np.frombuffer(data[4:8], np.uint32).item()
shape = []
# deserialize the shape header
# Note: The start position of the shape value: 8 (dtype + shape length) + 4 * shape_idx
for shape_idx in range(shape_size):
shape.append(np.frombuffer(data[8 + 4 * shape_idx : 8 + 4 * (shape_idx + 1)], np.uint32).item())
# deserialize the numpy array bytes
tensor = np.frombuffer(data[8 + 4 * (shape_idx + 1) : len(data)], dtype=dtype)
if tensor.shape == shape:
return tensor
return np.reshape(tensor, shape)
def can_serialize(self, item: np.ndarray) -> bool:
return isinstance(item, np.ndarray) and type(item) == np.ndarray and len(item.shape) > 1
class NoHeaderNumpySerializer(Serializer):
"""The NoHeaderNumpySerializer serialize and deserialize numpy to and from bytes."""
def __init__(self) -> None:
super().__init__()
self._dtype_to_indices = {v: k for k, v in _NUMPY_DTYPES_MAPPING.items()}
self._dtype: Optional[np.dtype] = None
def setup(self, data_format: str) -> None:
self._dtype = _NUMPY_DTYPES_MAPPING[int(data_format.split(":")[1])]
def serialize(self, item: np.ndarray) -> Tuple[bytes, Optional[str]]:
dtype_indice: int = self._dtype_to_indices[item.dtype]
return item.tobytes(order="C"), f"no_header_numpy:{dtype_indice}"
def deserialize(self, data: bytes) -> np.ndarray:
assert self._dtype
return np.frombuffer(data, dtype=self._dtype)
def can_serialize(self, item: np.ndarray) -> bool:
return isinstance(item, np.ndarray) and type(item) == np.ndarray and len(item.shape) == 1
class PickleSerializer(Serializer):
"""The PickleSerializer serialize and deserialize python objects to and from bytes."""
def serialize(self, item: Any) -> Tuple[bytes, Optional[str]]:
return pickle.dumps(item), None
def deserialize(self, data: bytes) -> Any:
return pickle.loads(data)
def can_serialize(self, _: Any) -> bool:
return True
class FileSerializer(Serializer):
def serialize(self, filepath: str) -> Tuple[bytes, Optional[str]]:
_, file_extension = os.path.splitext(filepath)
with open(filepath, "rb") as f:
file_extension = file_extension.replace(".", "").lower()
return f.read(), f"file:{file_extension}"
def deserialize(self, data: bytes) -> Any:
return data
def can_serialize(self, data: Any) -> bool:
return isinstance(data, str) and os.path.isfile(data)
class VideoSerializer(Serializer):
_EXTENSIONS = ("mp4", "ogv", "mjpeg", "avi", "mov", "h264", "mpg", "webm", "wmv")
def serialize(self, filepath: str) -> Tuple[bytes, Optional[str]]:
_, file_extension = os.path.splitext(filepath)
with open(filepath, "rb") as f:
file_extension = file_extension.replace(".", "").lower()
return f.read(), f"video:{file_extension}"
def deserialize(self, data: bytes) -> Any:
if not _TORCH_VISION_AVAILABLE:
raise ModuleNotFoundError("torchvision is required. Run `pip install torchvision`")
if not _AV_AVAILABLE:
raise ModuleNotFoundError("av is required. Run `pip install av`")
# Add support for a better deserialization mechanism for videos
# TODO: Investigate https://pytorch.org/audio/main/generated/torchaudio.io.StreamReader.html
import torchvision.io
with tempfile.TemporaryDirectory() as dirname:
fname = os.path.join(dirname, "file.mp4")
with open(fname, "wb") as stream:
stream.write(data)
return torchvision.io.read_video(fname, pts_unit="sec")
def can_serialize(self, data: Any) -> bool:
return isinstance(data, str) and os.path.isfile(data) and any(data.endswith(ext) for ext in self._EXTENSIONS)
class StringSerializer(Serializer):
def serialize(self, obj: str) -> Tuple[bytes, Optional[str]]:
return obj.encode("utf-8"), None
def deserialize(self, data: bytes) -> str:
return data.decode("utf-8")
def can_serialize(self, data: str) -> bool:
return isinstance(data, str) and not os.path.isfile(data)
class NumericSerializer:
"""Store scalar."""
def __init__(self, dtype: type) -> None:
self.dtype = dtype
self.size = self.dtype().nbytes
def serialize(self, obj: Any) -> Tuple[bytes, Optional[str]]:
return self.dtype(obj).tobytes(), None
def deserialize(self, data: bytes) -> Any:
return np.frombuffer(data, self.dtype)[0]
class IntegerSerializer(NumericSerializer, Serializer):
def __init__(self) -> None:
super().__init__(np.int64)
def can_serialize(self, data: int) -> bool:
return isinstance(data, int)
class FloatSerializer(NumericSerializer, Serializer):
def __init__(self) -> None:
super().__init__(np.float64)
def can_serialize(self, data: float) -> bool:
return isinstance(data, float)
_SERIALIZERS = OrderedDict(**{
"str": StringSerializer(),
"int": IntegerSerializer(),
"float": FloatSerializer(),
"video": VideoSerializer(),
"tif": FileSerializer(),
"file": FileSerializer(),
"pil": PILSerializer(),
"jpeg": JPEGSerializer(),
"bytes": BytesSerializer(),
"no_header_numpy": NoHeaderNumpySerializer(),
"numpy": NumpySerializer(),
"no_header_tensor": NoHeaderTensorSerializer(),
"tensor": TensorSerializer(),
"pickle": PickleSerializer(),
})
def _get_serializers(serializers: Optional[Dict[str, Serializer]]) -> Dict[str, Serializer]:
if serializers:
serializers = OrderedDict(**serializers)
serializers.update(_SERIALIZERS)
else:
serializers = _SERIALIZERS
return serializers
|
evocodebench_data_129
|
# Copyright The Lightning AI team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import os
import pickle
import tempfile
from abc import ABC, abstractmethod
from collections import OrderedDict
from typing import Any, Dict, Optional, Tuple, Union
import numpy as np
import torch
from lightning_utilities.core.imports import RequirementCache
from litdata.constants import _NUMPY_DTYPES_MAPPING, _TORCH_DTYPES_MAPPING
_PIL_AVAILABLE = RequirementCache("PIL")
_TORCH_VISION_AVAILABLE = RequirementCache("torchvision")
_AV_AVAILABLE = RequirementCache("av")
if _PIL_AVAILABLE:
from PIL import Image
from PIL.GifImagePlugin import GifImageFile
from PIL.JpegImagePlugin import JpegImageFile
from PIL.PngImagePlugin import PngImageFile
from PIL.WebPImagePlugin import WebPImageFile
else:
Image = None
JpegImageFile = None
PngImageFile = None
if _TORCH_VISION_AVAILABLE:
from torchvision.io import decode_jpeg
from torchvision.transforms.functional import pil_to_tensor
class Serializer(ABC):
"""The base interface for any serializers.
A Serializer serialize and deserialize to and from bytes.
"""
@abstractmethod
def serialize(self, data: Any) -> Tuple[bytes, Optional[str]]:
pass
@abstractmethod
def deserialize(self, data: bytes) -> Any:
pass
@abstractmethod
def can_serialize(self, data: Any) -> bool:
pass
def setup(self, metadata: Any) -> None:
pass
class PILSerializer(Serializer):
"""The PILSerializer serialize and deserialize PIL Image to and from bytes."""
def serialize(self, item: Image) -> Tuple[bytes, Optional[str]]:
mode = item.mode.encode("utf-8")
width, height = item.size
raw = item.tobytes()
ints = np.array([width, height, len(mode)], np.uint32)
return ints.tobytes() + mode + raw, None
@classmethod
def deserialize(cls, data: bytes) -> Any:
idx = 3 * 4
width, height, mode_size = np.frombuffer(data[:idx], np.uint32)
idx2 = idx + mode_size
mode = data[idx:idx2].decode("utf-8")
size = width, height
raw = data[idx2:]
return Image.frombytes(mode, size, raw) # pyright: ignore
def can_serialize(self, item: Any) -> bool:
return bool(_PIL_AVAILABLE) and isinstance(item, Image.Image) and not isinstance(item, JpegImageFile)
class JPEGSerializer(Serializer):
"""The JPEGSerializer serialize and deserialize JPEG image to and from bytes."""
def serialize(self, item: Image) -> Tuple[bytes, Optional[str]]:
if isinstance(item, JpegImageFile):
if not hasattr(item, "filename"):
raise ValueError(
"The JPEG Image's filename isn't defined. HINT: Open the image in your Dataset __getitem__ method."
)
if item.filename and os.path.isfile(item.filename):
# read the content of the file directly
with open(item.filename, "rb") as f:
return f.read(), None
else:
item_bytes = io.BytesIO()
item.save(item_bytes, format="JPEG")
item_bytes = item_bytes.getvalue()
return item_bytes, None
if isinstance(item, (PngImageFile, WebPImageFile, GifImageFile, Image.Image)):
buff = io.BytesIO()
item.convert("RGB").save(buff, quality=100, format="JPEG")
buff.seek(0)
return buff.read(), None
raise TypeError(f"The provided item should be of type {JpegImageFile}. Found {item}.")
def deserialize(self, data: bytes) -> Union[JpegImageFile, torch.Tensor]:
if _TORCH_VISION_AVAILABLE:
array = torch.frombuffer(data, dtype=torch.uint8)
try:
return decode_jpeg(array)
except RuntimeError:
# Note: Some datasets like Imagenet contains some PNG images with JPEG extension, so we fallback to PIL
pass
img = PILSerializer.deserialize(data)
if _TORCH_VISION_AVAILABLE:
img = pil_to_tensor(img)
return img
def can_serialize(self, item: Any) -> bool:
return bool(_PIL_AVAILABLE) and isinstance(item, JpegImageFile)
class BytesSerializer(Serializer):
"""The BytesSerializer serialize and deserialize integer to and from bytes."""
def serialize(self, item: bytes) -> Tuple[bytes, Optional[str]]:
return item, None
def deserialize(self, item: bytes) -> bytes:
return item
def can_serialize(self, item: bytes) -> bool:
return isinstance(item, bytes)
class TensorSerializer(Serializer):
"""The TensorSerializer serialize and deserialize tensor to and from bytes."""
def __init__(self) -> None:
super().__init__()
self._dtype_to_indices = {v: k for k, v in _TORCH_DTYPES_MAPPING.items()}
def serialize(self, item: torch.Tensor) -> Tuple[bytes, Optional[str]]:
dtype_indice = self._dtype_to_indices[item.dtype]
data = [np.uint32(dtype_indice).tobytes()]
data.append(np.uint32(len(item.shape)).tobytes())
for dim in item.shape:
data.append(np.uint32(dim).tobytes())
data.append(item.numpy().tobytes(order="C"))
return b"".join(data), None
def deserialize(self, data: bytes) -> torch.Tensor:
dtype_indice = np.frombuffer(data[0:4], np.uint32).item()
dtype = _TORCH_DTYPES_MAPPING[dtype_indice]
shape_size = np.frombuffer(data[4:8], np.uint32).item()
shape = []
for shape_idx in range(shape_size):
shape.append(np.frombuffer(data[8 + 4 * shape_idx : 8 + 4 * (shape_idx + 1)], np.uint32).item())
tensor = torch.frombuffer(data[8 + 4 * (shape_idx + 1) : len(data)], dtype=dtype)
shape = torch.Size(shape)
if tensor.shape == shape:
return tensor
return torch.reshape(tensor, shape)
def can_serialize(self, item: torch.Tensor) -> bool:
return isinstance(item, torch.Tensor) and type(item) == torch.Tensor and len(item.shape) > 1
class NoHeaderTensorSerializer(Serializer):
"""The TensorSerializer serialize and deserialize tensor to and from bytes."""
def __init__(self) -> None:
super().__init__()
self._dtype_to_indices = {v: k for k, v in _TORCH_DTYPES_MAPPING.items()}
self._dtype: Optional[torch.dtype] = None
def setup(self, data_format: str) -> None:
self._dtype = _TORCH_DTYPES_MAPPING[int(data_format.split(":")[1])]
def serialize(self, item: torch.Tensor) -> Tuple[bytes, Optional[str]]:
dtype_indice = self._dtype_to_indices[item.dtype]
return item.numpy().tobytes(order="C"), f"no_header_tensor:{dtype_indice}"
def deserialize(self, data: bytes) -> torch.Tensor:
assert self._dtype
return torch.frombuffer(data, dtype=self._dtype)
def can_serialize(self, item: torch.Tensor) -> bool:
return isinstance(item, torch.Tensor) and type(item) == torch.Tensor and len(item.shape) == 1
class NumpySerializer(Serializer):
"""The NumpySerializer serialize and deserialize numpy to and from bytes."""
def __init__(self) -> None:
super().__init__()
self._dtype_to_indices = {v: k for k, v in _NUMPY_DTYPES_MAPPING.items()}
def serialize(self, item: np.ndarray) -> Tuple[bytes, Optional[str]]:
dtype_indice = self._dtype_to_indices[item.dtype]
data = [np.uint32(dtype_indice).tobytes()]
data.append(np.uint32(len(item.shape)).tobytes())
for dim in item.shape:
data.append(np.uint32(dim).tobytes())
data.append(item.tobytes(order="C"))
return b"".join(data), None
def deserialize(self, data: bytes) -> np.ndarray:
dtype_indice = np.frombuffer(data[0:4], np.uint32).item()
dtype = _NUMPY_DTYPES_MAPPING[dtype_indice]
shape_size = np.frombuffer(data[4:8], np.uint32).item()
shape = []
# deserialize the shape header
# Note: The start position of the shape value: 8 (dtype + shape length) + 4 * shape_idx
for shape_idx in range(shape_size):
shape.append(np.frombuffer(data[8 + 4 * shape_idx : 8 + 4 * (shape_idx + 1)], np.uint32).item())
# deserialize the numpy array bytes
tensor = np.frombuffer(data[8 + 4 * (shape_idx + 1) : len(data)], dtype=dtype)
if tensor.shape == shape:
return tensor
return np.reshape(tensor, shape)
def can_serialize(self, item: np.ndarray) -> bool:
return isinstance(item, np.ndarray) and type(item) == np.ndarray and len(item.shape) > 1
class NoHeaderNumpySerializer(Serializer):
"""The NoHeaderNumpySerializer serialize and deserialize numpy to and from bytes."""
def __init__(self) -> None:
super().__init__()
self._dtype_to_indices = {v: k for k, v in _NUMPY_DTYPES_MAPPING.items()}
self._dtype: Optional[np.dtype] = None
def setup(self, data_format: str) -> None:
self._dtype = _NUMPY_DTYPES_MAPPING[int(data_format.split(":")[1])]
def serialize(self, item: np.ndarray) -> Tuple[bytes, Optional[str]]:
dtype_indice: int = self._dtype_to_indices[item.dtype]
return item.tobytes(order="C"), f"no_header_numpy:{dtype_indice}"
def deserialize(self, data: bytes) -> np.ndarray:
assert self._dtype
return np.frombuffer(data, dtype=self._dtype)
def can_serialize(self, item: np.ndarray) -> bool:
return isinstance(item, np.ndarray) and type(item) == np.ndarray and len(item.shape) == 1
class PickleSerializer(Serializer):
"""The PickleSerializer serialize and deserialize python objects to and from bytes."""
def serialize(self, item: Any) -> Tuple[bytes, Optional[str]]:
return pickle.dumps(item), None
def deserialize(self, data: bytes) -> Any:
return pickle.loads(data)
def can_serialize(self, _: Any) -> bool:
return True
class FileSerializer(Serializer):
def serialize(self, filepath: str) -> Tuple[bytes, Optional[str]]:
_, file_extension = os.path.splitext(filepath)
with open(filepath, "rb") as f:
file_extension = file_extension.replace(".", "").lower()
return f.read(), f"file:{file_extension}"
def deserialize(self, data: bytes) -> Any:
return data
def can_serialize(self, data: Any) -> bool:
return isinstance(data, str) and os.path.isfile(data)
class VideoSerializer(Serializer):
_EXTENSIONS = ("mp4", "ogv", "mjpeg", "avi", "mov", "h264", "mpg", "webm", "wmv")
def serialize(self, filepath: str) -> Tuple[bytes, Optional[str]]:
_, file_extension = os.path.splitext(filepath)
with open(filepath, "rb") as f:
file_extension = file_extension.replace(".", "").lower()
return f.read(), f"video:{file_extension}"
def deserialize(self, data: bytes) -> Any:
if not _TORCH_VISION_AVAILABLE:
raise ModuleNotFoundError("torchvision is required. Run `pip install torchvision`")
if not _AV_AVAILABLE:
raise ModuleNotFoundError("av is required. Run `pip install av`")
# Add support for a better deserialization mechanism for videos
# TODO: Investigate https://pytorch.org/audio/main/generated/torchaudio.io.StreamReader.html
import torchvision.io
with tempfile.TemporaryDirectory() as dirname:
fname = os.path.join(dirname, "file.mp4")
with open(fname, "wb") as stream:
stream.write(data)
return torchvision.io.read_video(fname, pts_unit="sec")
def can_serialize(self, data: Any) -> bool:
return isinstance(data, str) and os.path.isfile(data) and any(data.endswith(ext) for ext in self._EXTENSIONS)
class StringSerializer(Serializer):
def serialize(self, obj: str) -> Tuple[bytes, Optional[str]]:
return obj.encode("utf-8"), None
def deserialize(self, data: bytes) -> str:
return data.decode("utf-8")
def can_serialize(self, data: str) -> bool:
return isinstance(data, str) and not os.path.isfile(data)
class NumericSerializer:
"""Store scalar."""
def __init__(self, dtype: type) -> None:
self.dtype = dtype
self.size = self.dtype().nbytes
def serialize(self, obj: Any) -> Tuple[bytes, Optional[str]]:
return self.dtype(obj).tobytes(), None
def deserialize(self, data: bytes) -> Any:
return np.frombuffer(data, self.dtype)[0]
class IntegerSerializer(NumericSerializer, Serializer):
def __init__(self) -> None:
super().__init__(np.int64)
def can_serialize(self, data: int) -> bool:
return isinstance(data, int)
class FloatSerializer(NumericSerializer, Serializer):
def __init__(self) -> None:
super().__init__(np.float64)
def can_serialize(self, data: float) -> bool:
return isinstance(data, float)
_SERIALIZERS = OrderedDict(**{
"str": StringSerializer(),
"int": IntegerSerializer(),
"float": FloatSerializer(),
"video": VideoSerializer(),
"tif": FileSerializer(),
"file": FileSerializer(),
"pil": PILSerializer(),
"jpeg": JPEGSerializer(),
"bytes": BytesSerializer(),
"no_header_numpy": NoHeaderNumpySerializer(),
"numpy": NumpySerializer(),
"no_header_tensor": NoHeaderTensorSerializer(),
"tensor": TensorSerializer(),
"pickle": PickleSerializer(),
})
def _get_serializers(serializers: Optional[Dict[str, Serializer]]) -> Dict[str, Serializer]:
if serializers:
serializers = OrderedDict(**serializers)
serializers.update(_SERIALIZERS)
else:
serializers = _SERIALIZERS
return serializers
|
evocodebench_data_130
|
# Copyright The Lightning AI team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import inspect
import logging
import os
from copy import deepcopy
from importlib import reload
from itertools import cycle
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from torch.utils.data import Dataset, IterableDataset
from torch.utils.data._utils.collate import default_collate
from torch.utils.data._utils.fetch import _BaseDatasetFetcher
from torch.utils.data.dataloader import (
DataLoader,
_BaseDataLoaderIter,
_DatasetKind,
_MultiProcessingDataLoaderIter,
_SingleProcessDataLoaderIter,
)
from torch.utils.data.sampler import BatchSampler, Sampler
from litdata.constants import _DEFAULT_CHUNK_BYTES, _TORCH_GREATER_EQUAL_2_1_0, _VIZ_TRACKER_AVAILABLE
from litdata.streaming import Cache
from litdata.streaming.combined import (
__NUM_SAMPLES_YIELDED_KEY__,
__SAMPLES_KEY__,
CombinedStreamingDataset,
)
from litdata.streaming.dataset import StreamingDataset
from litdata.streaming.sampler import CacheBatchSampler
from litdata.utilities.env import _DistributedEnv
if _TORCH_GREATER_EQUAL_2_1_0:
from torch.utils._pytree import tree_flatten
logger = logging.Logger(__name__)
def _equal_items(data_1: Any, data_2: Any) -> bool:
data_1_flattened, _ = tree_flatten(data_1)
data_2_flattened, _ = tree_flatten(data_2)
if len(data_1_flattened) != len(data_2_flattened):
return False
return all(_equal_item(d1, d2) for d1, d2 in zip(data_1_flattened, data_2_flattened))
def _equal_item(d1: Any, d2: Any) -> bool:
if not isinstance(d1, type(d2)):
return False
equality = d1 == d2
if isinstance(equality, torch.Tensor):
return bool(equality.all().item())
if equality is True:
return True
return False
class CacheDataset(Dataset):
def __init__(
self,
dataset: Any,
cache_dir: str,
chunk_bytes: Optional[int],
chunk_size: Optional[int],
compression: Optional[str],
):
"""The `CacheDataset` is a dataset wraper to provide a beginner experience with the Cache.
Arguments:
dataset: The dataset of the user
cache_dir: The folder where the chunks are written to.
chunk_bytes: The maximal number of bytes to write within a chunk.
chunk_sie: The maximal number of items to write to a chunk.
compression: The compression algorithm to use to reduce the size of the chunk.
"""
self._dataset = dataset
self._cache = Cache(cache_dir, chunk_bytes=chunk_bytes, chunk_size=chunk_size, compression=compression)
self._is_deterministic = False
def __len__(self) -> int:
return len(self._cache) if self._cache.filled else len(self._dataset)
def __getitem__(self, index: int) -> Any:
data_1 = self._cache[index] if self._cache.filled else self._dataset[index]
if not self._cache.filled:
if not self._is_deterministic:
data2 = self._dataset[index]
if not _equal_items(data_1, data2):
raise ValueError(
f"Your dataset items aren't deterministic. Found {data_1} and {data2} for index {index}."
" HINT: Use the `litdata.cache.Cache` directly within your dataset."
)
self._is_deterministic = True
self._cache[index] = data_1
return data_1
class CacheCollateFn:
"""This CacheCollateFn is used to accelerate the processing of the data generated using the Cache.
During the chunking phase, there is no need to return any data from the DataLoader reducing some time.
Additionally, if the user makes their __getitem__ asynchronous, the collate executes them in parallel.
"""
def __init__(self, collate_fn: Optional[Callable] = None) -> None:
self.collate_fn = collate_fn or default_collate
def __call__(self, items: List[Any]) -> Any:
if all(item is None for item in items):
return None
# If the __getitem__ method is asynchornous, collect all the items.
if all(inspect.iscoroutine(item) for item in items):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
items = loop.run_until_complete(asyncio.gather(*items))
return self.collate_fn([item for item in items if item is not None])
class _SingleProcessDataLoaderIterPatch(_SingleProcessDataLoaderIter):
"""This is overriden to inform the cache is done chunking."""
def _next_data(self) -> Any:
try:
data = None
while data is None:
data = super()._next_data()
return data
except StopIteration:
for v in self._dataset_fetcher.dataset.__dict__.values():
if isinstance(v, Cache):
v.done()
if not v.filled:
v.merge(1)
raise StopIteration()
class WorkerLoop:
"""Wrap the PyTorch DataLoader WorkerLoop to perform caching and profiling."""
def __init__(self, global_rank: int, profile: bool = False) -> None:
self._global_rank = global_rank
self._profile = profile
def __call__(
self,
dataset_kind: Any,
dataset: Any,
index_queue: Any,
data_queue: Any,
done_event: Any,
auto_collation: Any,
collate_fn: Any,
drop_last: Any,
base_seed: Any,
init_fn: Any,
worker_id: Any,
*args: Any,
**kwargs: Any,
) -> None:
from torch.utils.data._utils import worker
from litdata.streaming.cache import Cache
enable_profiling = self._global_rank == 0 and worker_id == 0 and _VIZ_TRACKER_AVAILABLE and self._profile
if enable_profiling:
from viztracer import VizTracer
tracer = VizTracer(output_file=os.path.join(os.getcwd(), "trace.json"))
tracer.start()
# Reload to remove the patching
reloaded_worker = reload(worker)
create_fetcher = _DatasetKind.create_fetcher
fetcher = None
def create_fetcher_fn(*args: Any, **kwargs: Any) -> "_BaseDatasetFetcher":
nonlocal fetcher
fetcher = create_fetcher(*args, **kwargs)
return fetcher
_DatasetKind.create_fetcher = create_fetcher_fn # type: ignore
reloaded_worker._worker_loop(
dataset_kind,
dataset,
index_queue,
data_queue,
done_event,
auto_collation,
collate_fn,
drop_last,
base_seed,
init_fn,
worker_id,
*args,
**kwargs,
)
if dataset_kind == _DatasetKind.Map:
assert fetcher
for v in fetcher.dataset.__dict__.values():
if isinstance(v, Cache):
v.done()
if enable_profiling:
tracer.stop()
tracer.save()
class _MultiProcessingDataLoaderIterPatch(_MultiProcessingDataLoaderIter):
def __init__(self, loader: DataLoader) -> None:
self._cache = loader._cache
self._num_workers = loader.num_workers
# Patch PyTorch worker loop to call the `cache.done()` method.
from torch.utils.data._utils import worker
worker._worker_loop = WorkerLoop(loader._global_rank, loader._profile)
super().__init__(loader)
def _shutdown_workers(self) -> None:
super()._shutdown_workers()
# If the data isn't filled, we trigger an indedm merge
if not self._cache.filled:
self._cache.merge(self._num_workers)
def _next_data(self) -> Any:
try:
data = None
while data is None:
data = super()._next_data()
return data
except StopIteration as e:
raise e
class CacheDataLoader(DataLoader):
__doc__ = DataLoader.__doc__
def __init__(
self,
dataset: Any,
*args: Any,
sampler: Optional[Sampler] = None,
batch_sampler: Optional[BatchSampler] = None,
num_workers: int = 0,
shuffle: bool = False,
generator: Optional[torch.Generator] = None,
batch_size: Optional[int] = None,
drop_last: bool = False,
cache_dir: Optional[str] = None,
chunk_bytes: Optional[int] = _DEFAULT_CHUNK_BYTES,
compression: Optional[str] = None,
profile: bool = False,
collate_fn: Optional[Callable] = None,
**kwargs: Any,
) -> None:
if sampler:
raise ValueError(
"The CacheDataLoader relies on its own internal sampler. Passing a sampler isn't supported."
)
if batch_sampler:
raise ValueError(
"The CacheDataLoader relies on its own internal sampler. Passing a batch_sampler isn't supported."
)
if isinstance(dataset, IterableDataset):
raise ValueError("Only map-based dataset are supported by the CacheDataLoader for now.")
if profile and not _VIZ_TRACKER_AVAILABLE:
raise ModuleNotFoundError("To enable DataLoader profiling, run `pip install viztracer`.")
cache_list = [v for v in dataset.__dict__.values() if isinstance(v, Cache)]
if len(cache_list) > 1:
raise ValueError(
"We found several Cache used as attributes from your dataset. Only one is support for now."
)
if len(cache_list) == 0:
if cache_dir is None:
raise ValueError("You should provide a `cache_dir` filepath to the CacheDataLoader.")
dataset = CacheDataset(dataset, cache_dir, chunk_bytes, batch_size, compression)
cache = dataset._cache
else:
cache = cache_list[0]
if not cache.filled and shuffle:
logger.info("Shuffle is ignored during the caching phase phase.")
self._cache = cache
distributed_env = _DistributedEnv.detect()
self._global_rank = distributed_env.global_rank
batch_sampler = CacheBatchSampler(
len(dataset),
distributed_env.world_size,
self._global_rank,
num_workers,
batch_size or 1,
drop_last,
shuffle,
cache,
)
self._profile = profile
super().__init__(
dataset,
*args,
batch_sampler=batch_sampler, # type: ignore
collate_fn=CacheCollateFn(collate_fn),
num_workers=num_workers,
**kwargs,
)
def _get_iterator(self) -> "_BaseDataLoaderIter":
"""Overriden to ensure the `Cache.done()` method is triggered on iteration done."""
if self.num_workers == 0:
return _SingleProcessDataLoaderIterPatch(self)
self.check_worker_number_rationality()
return _MultiProcessingDataLoaderIterPatch(self)
def _wrapper(fetcher: Any, func: Callable, tracer: Any, profile: int, profile_dir: str) -> Callable:
counter = 0
def wrap(*args: Any, **kwargs: Any) -> Any:
nonlocal counter
result = func(*args, **kwargs)
if tracer.enable and counter == profile:
tracer.stop()
tracer.save()
print(
f"Saved {os.path.join(profile_dir, 'result.json')} file after {profile} batches."
"Use chrome://tracing/ to view it."
)
fetcher.fetch = func
counter += 1
return result
return wrap
class _ProfileWorkerLoop:
"""Wrap the PyTorch DataLoader WorkerLoop to add profiling."""
def __init__(self, profile: Union[int, bool], profile_dir: Optional[str] = None):
self._profile = profile
self._profile_dir = profile_dir if profile_dir else os.getcwd()
def __call__(
self,
dataset_kind: Any,
dataset: Any,
index_queue: Any,
data_queue: Any,
done_event: Any,
auto_collation: Any,
collate_fn: Any,
drop_last: Any,
base_seed: Any,
init_fn: Any,
worker_id: Any,
*args: Any,
**kwargs: Any,
) -> None:
from torch.utils.data._utils import worker
from viztracer import VizTracer
if worker_id == 0:
output_file = os.path.join(self._profile_dir, "result.json")
if os.path.exists(output_file):
os.remove(output_file)
tracer = VizTracer(output_file=output_file, verbose=0)
tracer.start()
# Reload to remove the patching
reloaded_worker = reload(worker)
create_fetcher = _DatasetKind.create_fetcher
fetcher = None
def create_fetcher_fn(*args: Any, **kwargs: Any) -> "_BaseDatasetFetcher":
nonlocal fetcher
fetcher = create_fetcher(*args, **kwargs)
if worker_id == 0 and isinstance(self._profile, int):
fetcher.fetch = _wrapper(fetcher, fetcher.fetch, tracer, self._profile, self._profile_dir)
return fetcher
_DatasetKind.create_fetcher = create_fetcher_fn # type: ignore
reloaded_worker._worker_loop(
dataset_kind,
dataset,
index_queue,
data_queue,
done_event,
auto_collation,
collate_fn,
drop_last,
base_seed,
init_fn,
worker_id,
*args,
**kwargs,
)
if worker_id == 0 and isinstance(self._profile, bool):
tracer.stop()
tracer.save()
class _StreamingMultiProcessingDataLoaderIter(_MultiProcessingDataLoaderIter):
def __init__(self, loader: DataLoader) -> None:
self._loader = loader
self._indexes = (
list(range(self._loader._latest_worker_idx, self._loader.num_workers))
if self._loader._latest_worker_idx > 0
else []
)
self._num_workers = loader.num_workers
distributed_env = _DistributedEnv.detect()
if self._loader._profile_batches and distributed_env.global_rank == 0 and _VIZ_TRACKER_AVAILABLE:
from torch.utils.data._utils import worker
worker._worker_loop = _ProfileWorkerLoop(self._loader._profile_batches, self._loader._profile_dir)
super().__init__(loader)
def _try_put_index(self) -> None:
# Used to restart on the right DataLoader worker
if self._loader.restore and self._indexes:
assert self._tasks_outstanding < self._prefetch_factor * self._num_workers
try:
index = self._next_index()
except StopIteration:
return
worker_queue_idx = self._indexes.pop(0)
self._index_queues[worker_queue_idx].put((self._send_idx, index))
self._task_info[self._send_idx] = (worker_queue_idx,)
self._tasks_outstanding += 1
self._send_idx += 1
else:
super()._try_put_index()
class StreamingDataLoader(DataLoader):
r"""The StreamingDataLoader combines a dataset and a sampler, and provides an iterable over the given dataset.
The :class:`~litdata.streaming.dataloader.StreamingDataLoader` supports either a
StreamingDataset and CombinedStreamingDataset datasets with single- or multi-process loading,
customizing
loading order and optional automatic batching (collation) and memory pinning.
See :py:mod:`torch.utils.data` documentation page for more details.
Args:
dataset (Dataset): dataset from which to load the data.
batch_size (int, optional): how many samples per batch to load
(default: ``1``).
shuffle (bool, optional): set to ``True`` to have the data reshuffled
at every epoch (default: ``False``).
num_workers (int, optional): how many subprocesses to use for data
loading. ``0`` means that the data will be loaded in the main process.
(default: ``0``)
collate_fn (Callable, optional): merges a list of samples to form a
mini-batch of Tensor(s). Used when using batched loading from a
map-style dataset.
pin_memory (bool, optional): If ``True``, the data loader will copy Tensors
into device/CUDA pinned memory before returning them. If your data elements
are a custom type, or your :attr:`collate_fn` returns a batch that is a custom type,
see the example below.
timeout (numeric, optional): if positive, the timeout value for collecting a batch
from workers. Should always be non-negative. (default: ``0``)
worker_init_fn (Callable, optional): If not ``None``, this will be called on each
worker subprocess with the worker id (an int in ``[0, num_workers - 1]``) as
input, after seeding and before data loading. (default: ``None``)
multiprocessing_context (str or multiprocessing.context.BaseContext, optional): If
``None``, the default `multiprocessing context`_ of your operating system will
be used. (default: ``None``)
generator (torch.Generator, optional): If not ``None``, this RNG will be used
by RandomSampler to generate random indexes and multiprocessing to generate
``base_seed`` for workers. (default: ``None``)
prefetch_factor (int, optional, keyword-only arg): Number of batches loaded
in advance by each worker. ``2`` means there will be a total of
2 * num_workers batches prefetched across all workers. (default value depends
on the set value for num_workers. If value of num_workers=0 default is ``None``.
Otherwise, if value of ``num_workers > 0`` default is ``2``).
persistent_workers (bool, optional): If ``True``, the data loader will not shut down
the worker processes after a dataset has been consumed once. This allows to
maintain the workers `Dataset` instances alive. (default: ``False``)
pin_memory_device (str, optional): the device to :attr:`pin_memory` to if ``pin_memory`` is
``True``.
profile_batches (int, bool, optional): Whether to record data loading profile and generate a result.json file.
profile_dir (int, bool, optional): Where to store the recorded trace when profile_batches is enabled.
"""
__doc__ = DataLoader.__doc__
def __init__(
self,
dataset: Union[StreamingDataset, CombinedStreamingDataset],
*args: Any,
batch_size: int = 1,
num_workers: int = 0,
profile_batches: Union[bool, int] = False,
profile_dir: Optional[str] = None,
prefetch_factor: Optional[int] = None,
shuffle: Optional[bool] = None,
**kwargs: Any,
) -> None: # pyright: ignore
if not isinstance(dataset, (StreamingDataset, CombinedStreamingDataset)):
raise RuntimeError(
"The provided dataset should be either an instance of StreamingDataset or CombinedStreamingDataset."
f" Found {dataset}."
)
if shuffle is not None:
dataset.set_shuffle(shuffle)
shuffle = None
if profile_batches and not _VIZ_TRACKER_AVAILABLE:
raise ModuleNotFoundError("To use profile_batches, viztracer is required. Run `pip install viztracer`")
if profile_batches and num_workers == 0:
raise ValueError("Profiling is supported only with num_workers >= 1.")
self.current_epoch = 0
self.batch_size = batch_size
self.num_workers = num_workers
self._profile_batches = profile_batches
self._profile_dir = profile_dir
self._num_samples_yielded_streaming = 0
self._num_samples_yielded_combined: Dict[int, List[Any]] = {}
self.rng_state: Optional[Any] = None
self._worker_idx = cycle(list(range(self.num_workers if self.num_workers > 0 else 1)))
self._worker_idx_iter: Optional[Any] = None
self._latest_worker_idx = 0
self.restore = False
super().__init__(
dataset,
*args,
batch_size=batch_size,
num_workers=num_workers,
prefetch_factor=(10 if num_workers > 0 else None) if prefetch_factor is None else prefetch_factor,
**kwargs,
) # type: ignore
def __iter__(self) -> Any:
if not self.restore:
self._latest_worker_idx = 0
self._worker_idx = cycle(list(range(self.num_workers if self.num_workers > 0 else 1)))
self._worker_idx_iter = iter(self._worker_idx)
self.current_epoch += 1
self._num_samples_yielded_combined = {}
self._num_samples_yielded_streaming = 0
self.dataset.set_epoch(self.current_epoch)
if isinstance(self.dataset, StreamingDataset):
assert self.batch_size
for batch in super().__iter__():
self._latest_worker_idx = next(self._worker_idx_iter) # type: ignore
self._num_samples_yielded_streaming += self.batch_size
yield batch
else:
self.dataset._set_use_streaming_dataloader(True)
assert self.batch_size
# TODO: Inject a custom collate function to avoid collating the __NUM_SAMPLES_YIELDED__ key
for batch in super().__iter__():
self._latest_worker_idx = next(self._worker_idx_iter) # type: ignore
if isinstance(batch, dict) and __NUM_SAMPLES_YIELDED_KEY__ in batch:
self._num_samples_yielded_combined[self._latest_worker_idx] = [
sample[-1].item() if self.batch_size > 1 else sample.item()
for sample in batch[__NUM_SAMPLES_YIELDED_KEY__]
]
yield batch[__SAMPLES_KEY__]
else:
yield batch
self.restore = False
def state_dict(self) -> Dict[str, Any]:
if isinstance(self.dataset, StreamingDataset):
assert self.batch_size
return {
"dataset": self.dataset.state_dict(
self._num_samples_yielded_streaming, self.num_workers, self.batch_size
),
"current_epoch": self.current_epoch,
"num_samples_yielded": self._num_samples_yielded_streaming,
"latest_worker_idx": self._latest_worker_idx,
}
num_samples_yieled = [0 for _ in range(len(list(self._num_samples_yielded_combined.values())[0]))]
for worker_idx in self._num_samples_yielded_combined:
for dataset_idx, samples_yieled in enumerate(self._num_samples_yielded_combined[worker_idx]):
num_samples_yieled[dataset_idx] += samples_yieled
return {
"dataset": self.dataset.state_dict(self.num_workers, self.batch_size, num_samples_yieled),
"current_epoch": self.current_epoch if self.restore else self.current_epoch - 1,
"latest_worker_idx": self._latest_worker_idx,
"num_samples_yielded": deepcopy(self._num_samples_yielded_combined),
}
def load_state_dict(self, obj: Dict[str, Any]) -> None:
"""Load a dict containing training state (called from non-worker process).
This is called on each copy of the dataset when resuming.
Args:
obj (Any): The state.
"""
self.current_epoch = obj["current_epoch"]
if isinstance(self.dataset, StreamingDataset):
self._num_samples_yielded_streaming = obj["num_samples_yielded"]
else:
self._num_samples_yielded_combined = obj["num_samples_yielded"]
# Used to restart on the next DataLoader worker from the previous run.
self._latest_worker_idx = obj["latest_worker_idx"] + 1
self._worker_idx_iter = iter(self._worker_idx)
for _ in range(self._latest_worker_idx):
next(self._worker_idx_iter)
# Inform we are resuming and disable resetting the StreamingDataLoader state.
# This is toggle back to False when the `__iter__` method of the StreamingDataLoader completes.
self.restore = True
if isinstance(self.dataset, CombinedStreamingDataset):
self.dataset._set_use_streaming_dataloader(True)
self.dataset.load_state_dict(obj)
elif isinstance(self.dataset, StreamingDataset):
self.dataset.load_state_dict(obj["dataset"])
else:
raise RuntimeError("The provided dataset should be a `StreamingDataset` or a `CombinedStreamingDataset`.")
def _get_iterator(self) -> "_BaseDataLoaderIter":
"""Overriden to ensure the `Cache.done()` method is triggered on iteration done."""
if self.num_workers == 0:
return _SingleProcessDataLoaderIter(self)
self.check_worker_number_rationality()
return _StreamingMultiProcessingDataLoaderIter(self)
|
evocodebench_data_131
|
# Copyright The Lightning AI team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import os
import pickle
import tempfile
from abc import ABC, abstractmethod
from collections import OrderedDict
from typing import Any, Dict, Optional, Tuple, Union
import numpy as np
import torch
from lightning_utilities.core.imports import RequirementCache
from litdata.constants import _NUMPY_DTYPES_MAPPING, _TORCH_DTYPES_MAPPING
_PIL_AVAILABLE = RequirementCache("PIL")
_TORCH_VISION_AVAILABLE = RequirementCache("torchvision")
_AV_AVAILABLE = RequirementCache("av")
if _PIL_AVAILABLE:
from PIL import Image
from PIL.GifImagePlugin import GifImageFile
from PIL.JpegImagePlugin import JpegImageFile
from PIL.PngImagePlugin import PngImageFile
from PIL.WebPImagePlugin import WebPImageFile
else:
Image = None
JpegImageFile = None
PngImageFile = None
if _TORCH_VISION_AVAILABLE:
from torchvision.io import decode_jpeg
from torchvision.transforms.functional import pil_to_tensor
class Serializer(ABC):
"""The base interface for any serializers.
A Serializer serialize and deserialize to and from bytes.
"""
@abstractmethod
def serialize(self, data: Any) -> Tuple[bytes, Optional[str]]:
pass
@abstractmethod
def deserialize(self, data: bytes) -> Any:
pass
@abstractmethod
def can_serialize(self, data: Any) -> bool:
pass
def setup(self, metadata: Any) -> None:
pass
class PILSerializer(Serializer):
"""The PILSerializer serialize and deserialize PIL Image to and from bytes."""
def serialize(self, item: Image) -> Tuple[bytes, Optional[str]]:
mode = item.mode.encode("utf-8")
width, height = item.size
raw = item.tobytes()
ints = np.array([width, height, len(mode)], np.uint32)
return ints.tobytes() + mode + raw, None
@classmethod
def deserialize(cls, data: bytes) -> Any:
idx = 3 * 4
width, height, mode_size = np.frombuffer(data[:idx], np.uint32)
idx2 = idx + mode_size
mode = data[idx:idx2].decode("utf-8")
size = width, height
raw = data[idx2:]
return Image.frombytes(mode, size, raw) # pyright: ignore
def can_serialize(self, item: Any) -> bool:
return bool(_PIL_AVAILABLE) and isinstance(item, Image.Image) and not isinstance(item, JpegImageFile)
class JPEGSerializer(Serializer):
"""The JPEGSerializer serialize and deserialize JPEG image to and from bytes."""
def serialize(self, item: Image) -> Tuple[bytes, Optional[str]]:
if isinstance(item, JpegImageFile):
if not hasattr(item, "filename"):
raise ValueError(
"The JPEG Image's filename isn't defined. HINT: Open the image in your Dataset __getitem__ method."
)
if item.filename and os.path.isfile(item.filename):
# read the content of the file directly
with open(item.filename, "rb") as f:
return f.read(), None
else:
item_bytes = io.BytesIO()
item.save(item_bytes, format="JPEG")
item_bytes = item_bytes.getvalue()
return item_bytes, None
if isinstance(item, (PngImageFile, WebPImageFile, GifImageFile, Image.Image)):
buff = io.BytesIO()
item.convert("RGB").save(buff, quality=100, format="JPEG")
buff.seek(0)
return buff.read(), None
raise TypeError(f"The provided item should be of type {JpegImageFile}. Found {item}.")
def deserialize(self, data: bytes) -> Union[JpegImageFile, torch.Tensor]:
if _TORCH_VISION_AVAILABLE:
array = torch.frombuffer(data, dtype=torch.uint8)
try:
return decode_jpeg(array)
except RuntimeError:
# Note: Some datasets like Imagenet contains some PNG images with JPEG extension, so we fallback to PIL
pass
img = PILSerializer.deserialize(data)
if _TORCH_VISION_AVAILABLE:
img = pil_to_tensor(img)
return img
def can_serialize(self, item: Any) -> bool:
return bool(_PIL_AVAILABLE) and isinstance(item, JpegImageFile)
class BytesSerializer(Serializer):
"""The BytesSerializer serialize and deserialize integer to and from bytes."""
def serialize(self, item: bytes) -> Tuple[bytes, Optional[str]]:
return item, None
def deserialize(self, item: bytes) -> bytes:
return item
def can_serialize(self, item: bytes) -> bool:
return isinstance(item, bytes)
class TensorSerializer(Serializer):
"""The TensorSerializer serialize and deserialize tensor to and from bytes."""
def __init__(self) -> None:
super().__init__()
self._dtype_to_indices = {v: k for k, v in _TORCH_DTYPES_MAPPING.items()}
def serialize(self, item: torch.Tensor) -> Tuple[bytes, Optional[str]]:
dtype_indice = self._dtype_to_indices[item.dtype]
data = [np.uint32(dtype_indice).tobytes()]
data.append(np.uint32(len(item.shape)).tobytes())
for dim in item.shape:
data.append(np.uint32(dim).tobytes())
data.append(item.numpy().tobytes(order="C"))
return b"".join(data), None
def deserialize(self, data: bytes) -> torch.Tensor:
dtype_indice = np.frombuffer(data[0:4], np.uint32).item()
dtype = _TORCH_DTYPES_MAPPING[dtype_indice]
shape_size = np.frombuffer(data[4:8], np.uint32).item()
shape = []
for shape_idx in range(shape_size):
shape.append(np.frombuffer(data[8 + 4 * shape_idx : 8 + 4 * (shape_idx + 1)], np.uint32).item())
tensor = torch.frombuffer(data[8 + 4 * (shape_idx + 1) : len(data)], dtype=dtype)
shape = torch.Size(shape)
if tensor.shape == shape:
return tensor
return torch.reshape(tensor, shape)
def can_serialize(self, item: torch.Tensor) -> bool:
return isinstance(item, torch.Tensor) and type(item) == torch.Tensor and len(item.shape) > 1
class NoHeaderTensorSerializer(Serializer):
"""The TensorSerializer serialize and deserialize tensor to and from bytes."""
def __init__(self) -> None:
super().__init__()
self._dtype_to_indices = {v: k for k, v in _TORCH_DTYPES_MAPPING.items()}
self._dtype: Optional[torch.dtype] = None
def setup(self, data_format: str) -> None:
self._dtype = _TORCH_DTYPES_MAPPING[int(data_format.split(":")[1])]
def serialize(self, item: torch.Tensor) -> Tuple[bytes, Optional[str]]:
dtype_indice = self._dtype_to_indices[item.dtype]
return item.numpy().tobytes(order="C"), f"no_header_tensor:{dtype_indice}"
def deserialize(self, data: bytes) -> torch.Tensor:
assert self._dtype
return torch.frombuffer(data, dtype=self._dtype)
def can_serialize(self, item: torch.Tensor) -> bool:
return isinstance(item, torch.Tensor) and type(item) == torch.Tensor and len(item.shape) == 1
class NumpySerializer(Serializer):
"""The NumpySerializer serialize and deserialize numpy to and from bytes."""
def __init__(self) -> None:
super().__init__()
self._dtype_to_indices = {v: k for k, v in _NUMPY_DTYPES_MAPPING.items()}
def serialize(self, item: np.ndarray) -> Tuple[bytes, Optional[str]]:
dtype_indice = self._dtype_to_indices[item.dtype]
data = [np.uint32(dtype_indice).tobytes()]
data.append(np.uint32(len(item.shape)).tobytes())
for dim in item.shape:
data.append(np.uint32(dim).tobytes())
data.append(item.tobytes(order="C"))
return b"".join(data), None
def deserialize(self, data: bytes) -> np.ndarray:
dtype_indice = np.frombuffer(data[0:4], np.uint32).item()
dtype = _NUMPY_DTYPES_MAPPING[dtype_indice]
shape_size = np.frombuffer(data[4:8], np.uint32).item()
shape = []
# deserialize the shape header
# Note: The start position of the shape value: 8 (dtype + shape length) + 4 * shape_idx
for shape_idx in range(shape_size):
shape.append(np.frombuffer(data[8 + 4 * shape_idx : 8 + 4 * (shape_idx + 1)], np.uint32).item())
# deserialize the numpy array bytes
tensor = np.frombuffer(data[8 + 4 * (shape_idx + 1) : len(data)], dtype=dtype)
if tensor.shape == shape:
return tensor
return np.reshape(tensor, shape)
def can_serialize(self, item: np.ndarray) -> bool:
return isinstance(item, np.ndarray) and type(item) == np.ndarray and len(item.shape) > 1
class NoHeaderNumpySerializer(Serializer):
"""The NoHeaderNumpySerializer serialize and deserialize numpy to and from bytes."""
def __init__(self) -> None:
super().__init__()
self._dtype_to_indices = {v: k for k, v in _NUMPY_DTYPES_MAPPING.items()}
self._dtype: Optional[np.dtype] = None
def setup(self, data_format: str) -> None:
self._dtype = _NUMPY_DTYPES_MAPPING[int(data_format.split(":")[1])]
def serialize(self, item: np.ndarray) -> Tuple[bytes, Optional[str]]:
dtype_indice: int = self._dtype_to_indices[item.dtype]
return item.tobytes(order="C"), f"no_header_numpy:{dtype_indice}"
def deserialize(self, data: bytes) -> np.ndarray:
assert self._dtype
return np.frombuffer(data, dtype=self._dtype)
def can_serialize(self, item: np.ndarray) -> bool:
return isinstance(item, np.ndarray) and type(item) == np.ndarray and len(item.shape) == 1
class PickleSerializer(Serializer):
"""The PickleSerializer serialize and deserialize python objects to and from bytes."""
def serialize(self, item: Any) -> Tuple[bytes, Optional[str]]:
return pickle.dumps(item), None
def deserialize(self, data: bytes) -> Any:
return pickle.loads(data)
def can_serialize(self, _: Any) -> bool:
return True
class FileSerializer(Serializer):
def serialize(self, filepath: str) -> Tuple[bytes, Optional[str]]:
_, file_extension = os.path.splitext(filepath)
with open(filepath, "rb") as f:
file_extension = file_extension.replace(".", "").lower()
return f.read(), f"file:{file_extension}"
def deserialize(self, data: bytes) -> Any:
return data
def can_serialize(self, data: Any) -> bool:
return isinstance(data, str) and os.path.isfile(data)
class VideoSerializer(Serializer):
_EXTENSIONS = ("mp4", "ogv", "mjpeg", "avi", "mov", "h264", "mpg", "webm", "wmv")
def serialize(self, filepath: str) -> Tuple[bytes, Optional[str]]:
_, file_extension = os.path.splitext(filepath)
with open(filepath, "rb") as f:
file_extension = file_extension.replace(".", "").lower()
return f.read(), f"video:{file_extension}"
def deserialize(self, data: bytes) -> Any:
if not _TORCH_VISION_AVAILABLE:
raise ModuleNotFoundError("torchvision is required. Run `pip install torchvision`")
if not _AV_AVAILABLE:
raise ModuleNotFoundError("av is required. Run `pip install av`")
# Add support for a better deserialization mechanism for videos
# TODO: Investigate https://pytorch.org/audio/main/generated/torchaudio.io.StreamReader.html
import torchvision.io
with tempfile.TemporaryDirectory() as dirname:
fname = os.path.join(dirname, "file.mp4")
with open(fname, "wb") as stream:
stream.write(data)
return torchvision.io.read_video(fname, pts_unit="sec")
def can_serialize(self, data: Any) -> bool:
return isinstance(data, str) and os.path.isfile(data) and any(data.endswith(ext) for ext in self._EXTENSIONS)
class StringSerializer(Serializer):
def serialize(self, obj: str) -> Tuple[bytes, Optional[str]]:
return obj.encode("utf-8"), None
def deserialize(self, data: bytes) -> str:
return data.decode("utf-8")
def can_serialize(self, data: str) -> bool:
return isinstance(data, str) and not os.path.isfile(data)
class NumericSerializer:
"""Store scalar."""
def __init__(self, dtype: type) -> None:
self.dtype = dtype
self.size = self.dtype().nbytes
def serialize(self, obj: Any) -> Tuple[bytes, Optional[str]]:
return self.dtype(obj).tobytes(), None
def deserialize(self, data: bytes) -> Any:
return np.frombuffer(data, self.dtype)[0]
class IntegerSerializer(NumericSerializer, Serializer):
def __init__(self) -> None:
super().__init__(np.int64)
def can_serialize(self, data: int) -> bool:
return isinstance(data, int)
class FloatSerializer(NumericSerializer, Serializer):
def __init__(self) -> None:
super().__init__(np.float64)
def can_serialize(self, data: float) -> bool:
return isinstance(data, float)
_SERIALIZERS = OrderedDict(**{
"str": StringSerializer(),
"int": IntegerSerializer(),
"float": FloatSerializer(),
"video": VideoSerializer(),
"tif": FileSerializer(),
"file": FileSerializer(),
"pil": PILSerializer(),
"jpeg": JPEGSerializer(),
"bytes": BytesSerializer(),
"no_header_numpy": NoHeaderNumpySerializer(),
"numpy": NumpySerializer(),
"no_header_tensor": NoHeaderTensorSerializer(),
"tensor": TensorSerializer(),
"pickle": PickleSerializer(),
})
def _get_serializers(serializers: Optional[Dict[str, Serializer]]) -> Dict[str, Serializer]:
if serializers:
serializers = OrderedDict(**serializers)
serializers.update(_SERIALIZERS)
else:
serializers = _SERIALIZERS
return serializers
|
evocodebench_data_132
|
# Copyright The Lightning AI team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import warnings
from dataclasses import dataclass
from time import sleep
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import torch
from litdata.constants import _INDEX_FILENAME, _TORCH_GREATER_EQUAL_2_1_0
from litdata.processing.utilities import get_worker_rank
from litdata.streaming.compression import _COMPRESSORS, Compressor
from litdata.streaming.serializers import Serializer, _get_serializers
from litdata.utilities.env import _DistributedEnv, _WorkerEnv
from litdata.utilities.format import _convert_bytes_to_int, _human_readable_bytes
if _TORCH_GREATER_EQUAL_2_1_0:
from torch.utils._pytree import PyTree, tree_flatten, treespec_dumps
@dataclass
class Item:
index: int
data: bytes
bytes: int
dim: Optional[int] = None
def __len__(self) -> int:
return self.bytes
class BinaryWriter:
def __init__(
self,
cache_dir: str,
chunk_size: Optional[int] = None,
chunk_bytes: Optional[Union[int, str]] = None,
compression: Optional[str] = None,
follow_tensor_dimension: bool = True,
serializers: Optional[Dict[str, Serializer]] = None,
):
"""The BinaryWriter enables to chunk dataset into an efficient streaming format for cloud training.
Arguments:
cache_dir: The path to where the chunks will be saved.
chunk_bytes: The maximum number of bytes within a chunk.
chunk_size: The maximum number of items within a chunk.
compression: The compression algorithm to use.
serializers: Provide your own serializers.
"""
self._cache_dir = cache_dir
if (isinstance(self._cache_dir, str) and not os.path.exists(self._cache_dir)) or self._cache_dir is None:
raise FileNotFoundError(f"The provided cache directory `{self._cache_dir}` doesn't exist.")
if (chunk_size is None and chunk_bytes is None) or (chunk_size and chunk_bytes):
raise ValueError("Either one of the `chunk_size` or the `chunk_bytes` need to be provided.")
self._serializers: Dict[str, Serializer] = _get_serializers(serializers)
self._serializers_extra: Dict[str, Serializer] = {}
self._chunk_size = chunk_size
self._chunk_bytes = _convert_bytes_to_int(chunk_bytes) if isinstance(chunk_bytes, str) else chunk_bytes
self._compression = compression
self._data_format: Optional[List[str]] = None
self._data_spec: Optional[PyTree] = None
if self._compression:
if len(_COMPRESSORS) == 0:
raise ValueError("No compresion algorithms are installed.")
if self._compression not in _COMPRESSORS:
raise ValueError(
f"The provided compression {self._compression} isn't available in {sorted(_COMPRESSORS)}"
)
self._compressor: Compressor = _COMPRESSORS[self._compression]
self._serialized_items: Dict[int, Item] = {}
self._chunk_index = 0
self._min_index: Optional[int] = None
self._max_index: Optional[int] = None
self._chunks_info: List[Dict[str, Any]] = []
self._worker_env: Optional[_WorkerEnv] = None
self._rank: Optional[int] = None
self._is_done = False
self._distributed_env = _DistributedEnv.detect()
self._follow_tensor_dimension = follow_tensor_dimension
@property
def filled(self) -> bool:
"""Returns whether the caching phase is done."""
if self._is_done:
return True
files = os.listdir(self._cache_dir)
index_files = [f for f in files if f.endswith(_INDEX_FILENAME)]
worker_env = _WorkerEnv.detect()
data_optimiser_num_workers = os.getenv("DATA_OPTIMIZER_NUM_WORKERS", None)
if data_optimiser_num_workers is not None:
self._is_done = len(index_files) == int(data_optimiser_num_workers)
else:
self._is_done = len(index_files) == self._distributed_env.world_size * worker_env.world_size
return self._is_done
@property
def rank(self) -> int:
"""Returns the rank of the writer."""
if self._rank is None:
rank = os.getenv("DATA_OPTIMIZER_GLOBAL_RANK", None)
if rank:
self._rank = int(rank)
else:
self._worker_env = _WorkerEnv.detect()
self._rank = self._distributed_env.global_rank * self._worker_env.world_size + self._worker_env.rank
return self._rank
def get_config(self) -> Dict[str, Any]:
"""Returns the config of the writer."""
out = {
"compression": self._compression,
"chunk_size": self._chunk_size,
"chunk_bytes": self._chunk_bytes,
"data_format": self._data_format,
"data_spec": treespec_dumps(self._data_spec) if self._data_spec else None,
}
return out
def serialize(self, items: Any) -> Tuple[bytes, Optional[int]]:
"""Serialize a dictionary into its binary format."""
# Flatten the items provided by the users
flattened, data_spec = tree_flatten(items)
is_single_tensor = len(flattened) == 1 and isinstance(flattened[0], torch.Tensor)
# Collect the sizes and associated bytes for each item
sizes: List[int] = []
data: List[bytes] = []
if self._data_format is None:
data_format: List[str] = []
for item in flattened:
data_format.append(self._serialize(item, sizes, data))
worker_rank = get_worker_rank()
if worker_rank is not None:
print(f"Rank {worker_rank} inferred the following `{data_format}` data format.")
self._data_format = data_format
self._data_spec = data_spec
else:
# tiny optimization to avoid looping over all the data format
self._serialize_with_data_format(flattened, sizes, data, self._data_format)
# If there is a single element and it is a tensor, enable continous array.
if is_single_tensor:
return data[0], flattened[0].shape[0]
# Concatenante into a single byte array
head = np.array(sizes, np.uint32).tobytes()
body = b"".join(data)
return head + body, None
def _serialize(self, item: Any, sizes: List[int], data: List[bytes]) -> str:
"""Serialize a given item and append its size and bytes to the sizes and data array."""
for serializer_name, serializer in self._serializers.items():
if serializer.can_serialize(item):
serialized_item, name = serializer.serialize(item)
data.append(serialized_item)
sizes.append(serializer.size if hasattr(serializer, "size") else len(serialized_item))
name = name or serializer_name
if name and name not in self._serializers_extra:
self._serializers_extra[name] = serializer
return name
raise ValueError(f"The provided item isn't serializable. Found {item}")
def _serialize_with_data_format(
self, item: Any, sizes: List[int], data: List[bytes], data_format: List[str]
) -> None:
"""Serialize a given item and append its size and bytes to the sizes and data array."""
assert data_format
for element, item_format in zip(item, data_format):
serializer = self._serializers_extra[item_format]
serialized_item, _ = serializer.serialize(element)
data.append(serialized_item)
sizes.append(serializer.size if hasattr(serializer, "size") else len(serialized_item))
def _create_chunk(self, filename: str, on_done: bool = False) -> bytes:
"""Create a binary chunk from all the binarized items."""
items = []
if on_done:
indices = sorted(self._serialized_items.keys())
for i in range(len(indices) - 1):
assert indices[i] == indices[i + 1] - 1, indices
items = [self._serialized_items.pop(index) for index in indices]
else:
assert self._max_index is not None, (self._max_index, self._min_index)
assert self._min_index is not None, (self._max_index, self._min_index)
if self._max_index == self._min_index:
# A single item is larger than the target chunk size; allow the chunk to be bigger than the target size
items.append(self._serialized_items.pop(self._max_index))
items.extend(self._serialized_items.pop(index) for index in range(self._min_index, self._max_index))
if len(items) == 0:
raise RuntimeError(
"The items shouldn't have an empty length. Something went wrong."
f" Found {self._pretty_serialized_items()} with boundaries: {self._min_index}, {self._max_index}."
)
num_items = np.uint32(len(items))
sizes = list(map(len, items))
offsets = np.array([0] + sizes).cumsum().astype(np.uint32)
offsets += len(num_items.tobytes()) + len(offsets.tobytes())
sample_data = b"".join([item.data for item in items])
data = num_items.tobytes() + offsets.tobytes() + sample_data
current_chunk_bytes = sum([item.bytes for item in items])
if self._chunk_bytes and current_chunk_bytes > self._chunk_bytes:
warnings.warn(
f"An item was larger than the target chunk size ({_human_readable_bytes(self._chunk_bytes)})."
f" The current chunk will be {_human_readable_bytes(current_chunk_bytes)} in size.",
UserWarning,
)
if self._chunk_size:
assert num_items.item() <= self._chunk_size
dim: Optional[int] = None
if items[0].dim:
dim = sum([item.dim if item.dim is not None else 0 for item in items])
chunk_info = {
"chunk_bytes": current_chunk_bytes,
"chunk_size": num_items.item(),
"filename": filename,
"dim": dim,
}
self._chunks_info.append(chunk_info)
return data
def get_chunk_filename(self) -> str:
if self._compression:
return f"chunk-{self.rank}-{self._chunk_index}.{self._compression}.bin"
return f"chunk-{self.rank}-{self._chunk_index}.bin"
def write_chunk(self, on_done: bool = False) -> str:
"""Write a chunk to the filesystem."""
filename = self.get_chunk_filename()
self.write_chunk_to_file(self._create_chunk(filename, on_done=on_done), filename)
self._chunk_index += 1
return os.path.join(self._cache_dir, filename)
def __setitem__(self, index: int, items: Any) -> None:
"""Store an item to a chunk.
The index needs to be provided in order.
This is handled by the samplers automatically. This ensures we can map an index to a shard from an interval.
"""
self.add_item(index, items)
def add_item(self, index: int, items: Any) -> Optional[str]:
# Track the minimum index provided to the writer
# Serialize the items and store an Item object.
if index in self._serialized_items:
raise ValueError(f"The provided index {index} already exists in the cache.")
data, dim = self.serialize(items)
self._serialized_items[index] = Item(
index=index,
data=data,
bytes=len(data),
dim=dim,
)
if self._should_write():
filepath = os.path.join(self._cache_dir, self.get_chunk_filename())
self.write_chunk()
self._min_index = None
self._max_index = None
return filepath
def _should_write(self) -> bool:
# TODO: Misleading method name, it modifies `self._min_index` and `self._max_index`!
if not self._serialized_items:
return False
indexes = list(self._serialized_items.keys())
self._min_index = index = indexes[0] if len(indexes) == 1 else min(*indexes)
num_bytes = 0
num_items = 0
while True:
item = self._serialized_items.get(index, None)
if item:
num_bytes += item.bytes
num_items += item.dim if item.dim else 1
index += 1
if (self._chunk_bytes and self._chunk_bytes < num_bytes) or (
self._chunk_size and num_items > self._chunk_size
):
self._max_index = index - 1
return True
else:
return False
def write_chunk_to_file(
self,
raw_data: bytes,
filename: str,
) -> None:
"""Write chunk bytes to a file."""
# Whether to compress the raw bytes
if self._compression:
raw_data = self._compressor.compress(raw_data)
# Write the binary chunk file
with open(os.path.join(self._cache_dir, filename), "wb") as out:
out.write(raw_data)
def write_chunks_index(self) -> str:
"""Write the chunks index to a JSON file."""
if len(self._chunks_info) == 0:
return ""
filepath = os.path.join(self._cache_dir, f"{self.rank}.{_INDEX_FILENAME}")
config = self.get_config()
with open(filepath, "w") as out:
json.dump({"chunks": self._chunks_info, "config": config}, out, sort_keys=True)
return filepath
def done(self) -> List[str]:
"""Called when StopIteration is triggered."""
filepaths: List[str] = []
if self.filled:
return filepaths
# Try writing down an chunks
while self._should_write():
filepaths.append(self.write_chunk())
# If any elements is left, try writing one last chunk
if self._serialized_items:
filepaths.append(self.write_chunk(True))
# Write down the index file
self.write_chunks_index()
self._is_done = True
return filepaths
def merge(self, num_workers: int = 1, node_rank: Optional[int] = None) -> None:
"""Once all the workers have written their own index, the merge function is responsible to read and merge them
into a single index."""
num_workers = num_workers or 1
# Only for non rank 0
if self.rank != 0:
while not os.path.exists(os.path.join(self._cache_dir, _INDEX_FILENAME)):
sleep(0.01)
return
# Wait for all indexes to be available
is_done = False
while not is_done:
files = os.listdir(self._cache_dir)
# Return if the index already exists
if _INDEX_FILENAME in files:
return
index_files = [f for f in files if f.endswith(_INDEX_FILENAME)]
# When using the Data Optimizer, we don't use multi processes.
is_done = len(index_files) == self._distributed_env.world_size * num_workers
sleep(0.01)
self._merge_no_wait(node_rank=node_rank)
def _merge_no_wait(self, node_rank: Optional[int] = None) -> None:
"""Once all the workers have written their own index, the merge function is responsible to read and merge them
into a single index."""
files = os.listdir(self._cache_dir)
index_files = [f for f in files if f.endswith(_INDEX_FILENAME)]
chunks_info = []
config = None
for index_filename in sorted(index_files):
chunk_path = os.path.join(self._cache_dir, index_filename)
with open(chunk_path) as f:
data = json.load(f)
if config is None:
config = data["config"]
elif config != data["config"]:
raise Exception(
"The config isn't consistent between chunks. This shouldn't have happened."
f"Found {config} {data['config']}."
)
chunks_info.extend(data["chunks"])
os.remove(chunk_path)
if node_rank is None:
with open(os.path.join(self._cache_dir, _INDEX_FILENAME), "w") as f:
json.dump({"chunks": chunks_info, "config": config}, f, sort_keys=True)
else:
with open(os.path.join(self._cache_dir, f"{node_rank}-{_INDEX_FILENAME}"), "w") as f:
json.dump({"chunks": chunks_info, "config": config}, f, sort_keys=True)
def _should_raise(self, data_format_1: List[str], data_format_2: List[str]) -> bool:
if len(data_format_1) != len(data_format_2):
return True
def is_non_valid(f1: str, f2: str) -> bool:
if f1 in ["pil", "jpeg"] and f2 in ["pil", "jpeg"]:
return False
return f1 != f2
return any(is_non_valid(f1, f2) for f1, f2 in zip(data_format_1, data_format_2))
def _pretty_serialized_items(self) -> Dict[int, Item]:
out = {}
for key, value in self._serialized_items.items():
# drop `data` as it would make logs unreadable.
out[key] = Item(
index=value.index,
bytes=value.bytes,
dim=value.dim,
data=b"",
)
return out
|
evocodebench_data_133
|
# Copyright The Lightning AI team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import inspect
import logging
import os
from copy import deepcopy
from importlib import reload
from itertools import cycle
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from torch.utils.data import Dataset, IterableDataset
from torch.utils.data._utils.collate import default_collate
from torch.utils.data._utils.fetch import _BaseDatasetFetcher
from torch.utils.data.dataloader import (
DataLoader,
_BaseDataLoaderIter,
_DatasetKind,
_MultiProcessingDataLoaderIter,
_SingleProcessDataLoaderIter,
)
from torch.utils.data.sampler import BatchSampler, Sampler
from litdata.constants import _DEFAULT_CHUNK_BYTES, _TORCH_GREATER_EQUAL_2_1_0, _VIZ_TRACKER_AVAILABLE
from litdata.streaming import Cache
from litdata.streaming.combined import (
__NUM_SAMPLES_YIELDED_KEY__,
__SAMPLES_KEY__,
CombinedStreamingDataset,
)
from litdata.streaming.dataset import StreamingDataset
from litdata.streaming.sampler import CacheBatchSampler
from litdata.utilities.env import _DistributedEnv
if _TORCH_GREATER_EQUAL_2_1_0:
from torch.utils._pytree import tree_flatten
logger = logging.Logger(__name__)
def _equal_items(data_1: Any, data_2: Any) -> bool:
data_1_flattened, _ = tree_flatten(data_1)
data_2_flattened, _ = tree_flatten(data_2)
if len(data_1_flattened) != len(data_2_flattened):
return False
return all(_equal_item(d1, d2) for d1, d2 in zip(data_1_flattened, data_2_flattened))
def _equal_item(d1: Any, d2: Any) -> bool:
if not isinstance(d1, type(d2)):
return False
equality = d1 == d2
if isinstance(equality, torch.Tensor):
return bool(equality.all().item())
if equality is True:
return True
return False
class CacheDataset(Dataset):
def __init__(
self,
dataset: Any,
cache_dir: str,
chunk_bytes: Optional[int],
chunk_size: Optional[int],
compression: Optional[str],
):
"""The `CacheDataset` is a dataset wraper to provide a beginner experience with the Cache.
Arguments:
dataset: The dataset of the user
cache_dir: The folder where the chunks are written to.
chunk_bytes: The maximal number of bytes to write within a chunk.
chunk_sie: The maximal number of items to write to a chunk.
compression: The compression algorithm to use to reduce the size of the chunk.
"""
self._dataset = dataset
self._cache = Cache(cache_dir, chunk_bytes=chunk_bytes, chunk_size=chunk_size, compression=compression)
self._is_deterministic = False
def __len__(self) -> int:
return len(self._cache) if self._cache.filled else len(self._dataset)
def __getitem__(self, index: int) -> Any:
data_1 = self._cache[index] if self._cache.filled else self._dataset[index]
if not self._cache.filled:
if not self._is_deterministic:
data2 = self._dataset[index]
if not _equal_items(data_1, data2):
raise ValueError(
f"Your dataset items aren't deterministic. Found {data_1} and {data2} for index {index}."
" HINT: Use the `litdata.cache.Cache` directly within your dataset."
)
self._is_deterministic = True
self._cache[index] = data_1
return data_1
class CacheCollateFn:
"""This CacheCollateFn is used to accelerate the processing of the data generated using the Cache.
During the chunking phase, there is no need to return any data from the DataLoader reducing some time.
Additionally, if the user makes their __getitem__ asynchronous, the collate executes them in parallel.
"""
def __init__(self, collate_fn: Optional[Callable] = None) -> None:
self.collate_fn = collate_fn or default_collate
def __call__(self, items: List[Any]) -> Any:
if all(item is None for item in items):
return None
# If the __getitem__ method is asynchornous, collect all the items.
if all(inspect.iscoroutine(item) for item in items):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
items = loop.run_until_complete(asyncio.gather(*items))
return self.collate_fn([item for item in items if item is not None])
class _SingleProcessDataLoaderIterPatch(_SingleProcessDataLoaderIter):
"""This is overriden to inform the cache is done chunking."""
def _next_data(self) -> Any:
try:
data = None
while data is None:
data = super()._next_data()
return data
except StopIteration:
for v in self._dataset_fetcher.dataset.__dict__.values():
if isinstance(v, Cache):
v.done()
if not v.filled:
v.merge(1)
raise StopIteration()
class WorkerLoop:
"""Wrap the PyTorch DataLoader WorkerLoop to perform caching and profiling."""
def __init__(self, global_rank: int, profile: bool = False) -> None:
self._global_rank = global_rank
self._profile = profile
def __call__(
self,
dataset_kind: Any,
dataset: Any,
index_queue: Any,
data_queue: Any,
done_event: Any,
auto_collation: Any,
collate_fn: Any,
drop_last: Any,
base_seed: Any,
init_fn: Any,
worker_id: Any,
*args: Any,
**kwargs: Any,
) -> None:
from torch.utils.data._utils import worker
from litdata.streaming.cache import Cache
enable_profiling = self._global_rank == 0 and worker_id == 0 and _VIZ_TRACKER_AVAILABLE and self._profile
if enable_profiling:
from viztracer import VizTracer
tracer = VizTracer(output_file=os.path.join(os.getcwd(), "trace.json"))
tracer.start()
# Reload to remove the patching
reloaded_worker = reload(worker)
create_fetcher = _DatasetKind.create_fetcher
fetcher = None
def create_fetcher_fn(*args: Any, **kwargs: Any) -> "_BaseDatasetFetcher":
nonlocal fetcher
fetcher = create_fetcher(*args, **kwargs)
return fetcher
_DatasetKind.create_fetcher = create_fetcher_fn # type: ignore
reloaded_worker._worker_loop(
dataset_kind,
dataset,
index_queue,
data_queue,
done_event,
auto_collation,
collate_fn,
drop_last,
base_seed,
init_fn,
worker_id,
*args,
**kwargs,
)
if dataset_kind == _DatasetKind.Map:
assert fetcher
for v in fetcher.dataset.__dict__.values():
if isinstance(v, Cache):
v.done()
if enable_profiling:
tracer.stop()
tracer.save()
class _MultiProcessingDataLoaderIterPatch(_MultiProcessingDataLoaderIter):
def __init__(self, loader: DataLoader) -> None:
self._cache = loader._cache
self._num_workers = loader.num_workers
# Patch PyTorch worker loop to call the `cache.done()` method.
from torch.utils.data._utils import worker
worker._worker_loop = WorkerLoop(loader._global_rank, loader._profile)
super().__init__(loader)
def _shutdown_workers(self) -> None:
super()._shutdown_workers()
# If the data isn't filled, we trigger an indedm merge
if not self._cache.filled:
self._cache.merge(self._num_workers)
def _next_data(self) -> Any:
try:
data = None
while data is None:
data = super()._next_data()
return data
except StopIteration as e:
raise e
class CacheDataLoader(DataLoader):
__doc__ = DataLoader.__doc__
def __init__(
self,
dataset: Any,
*args: Any,
sampler: Optional[Sampler] = None,
batch_sampler: Optional[BatchSampler] = None,
num_workers: int = 0,
shuffle: bool = False,
generator: Optional[torch.Generator] = None,
batch_size: Optional[int] = None,
drop_last: bool = False,
cache_dir: Optional[str] = None,
chunk_bytes: Optional[int] = _DEFAULT_CHUNK_BYTES,
compression: Optional[str] = None,
profile: bool = False,
collate_fn: Optional[Callable] = None,
**kwargs: Any,
) -> None:
if sampler:
raise ValueError(
"The CacheDataLoader relies on its own internal sampler. Passing a sampler isn't supported."
)
if batch_sampler:
raise ValueError(
"The CacheDataLoader relies on its own internal sampler. Passing a batch_sampler isn't supported."
)
if isinstance(dataset, IterableDataset):
raise ValueError("Only map-based dataset are supported by the CacheDataLoader for now.")
if profile and not _VIZ_TRACKER_AVAILABLE:
raise ModuleNotFoundError("To enable DataLoader profiling, run `pip install viztracer`.")
cache_list = [v for v in dataset.__dict__.values() if isinstance(v, Cache)]
if len(cache_list) > 1:
raise ValueError(
"We found several Cache used as attributes from your dataset. Only one is support for now."
)
if len(cache_list) == 0:
if cache_dir is None:
raise ValueError("You should provide a `cache_dir` filepath to the CacheDataLoader.")
dataset = CacheDataset(dataset, cache_dir, chunk_bytes, batch_size, compression)
cache = dataset._cache
else:
cache = cache_list[0]
if not cache.filled and shuffle:
logger.info("Shuffle is ignored during the caching phase phase.")
self._cache = cache
distributed_env = _DistributedEnv.detect()
self._global_rank = distributed_env.global_rank
batch_sampler = CacheBatchSampler(
len(dataset),
distributed_env.world_size,
self._global_rank,
num_workers,
batch_size or 1,
drop_last,
shuffle,
cache,
)
self._profile = profile
super().__init__(
dataset,
*args,
batch_sampler=batch_sampler, # type: ignore
collate_fn=CacheCollateFn(collate_fn),
num_workers=num_workers,
**kwargs,
)
def _get_iterator(self) -> "_BaseDataLoaderIter":
"""Overriden to ensure the `Cache.done()` method is triggered on iteration done."""
if self.num_workers == 0:
return _SingleProcessDataLoaderIterPatch(self)
self.check_worker_number_rationality()
return _MultiProcessingDataLoaderIterPatch(self)
def _wrapper(fetcher: Any, func: Callable, tracer: Any, profile: int, profile_dir: str) -> Callable:
counter = 0
def wrap(*args: Any, **kwargs: Any) -> Any:
nonlocal counter
result = func(*args, **kwargs)
if tracer.enable and counter == profile:
tracer.stop()
tracer.save()
print(
f"Saved {os.path.join(profile_dir, 'result.json')} file after {profile} batches."
"Use chrome://tracing/ to view it."
)
fetcher.fetch = func
counter += 1
return result
return wrap
class _ProfileWorkerLoop:
"""Wrap the PyTorch DataLoader WorkerLoop to add profiling."""
def __init__(self, profile: Union[int, bool], profile_dir: Optional[str] = None):
self._profile = profile
self._profile_dir = profile_dir if profile_dir else os.getcwd()
def __call__(
self,
dataset_kind: Any,
dataset: Any,
index_queue: Any,
data_queue: Any,
done_event: Any,
auto_collation: Any,
collate_fn: Any,
drop_last: Any,
base_seed: Any,
init_fn: Any,
worker_id: Any,
*args: Any,
**kwargs: Any,
) -> None:
from torch.utils.data._utils import worker
from viztracer import VizTracer
if worker_id == 0:
output_file = os.path.join(self._profile_dir, "result.json")
if os.path.exists(output_file):
os.remove(output_file)
tracer = VizTracer(output_file=output_file, verbose=0)
tracer.start()
# Reload to remove the patching
reloaded_worker = reload(worker)
create_fetcher = _DatasetKind.create_fetcher
fetcher = None
def create_fetcher_fn(*args: Any, **kwargs: Any) -> "_BaseDatasetFetcher":
nonlocal fetcher
fetcher = create_fetcher(*args, **kwargs)
if worker_id == 0 and isinstance(self._profile, int):
fetcher.fetch = _wrapper(fetcher, fetcher.fetch, tracer, self._profile, self._profile_dir)
return fetcher
_DatasetKind.create_fetcher = create_fetcher_fn # type: ignore
reloaded_worker._worker_loop(
dataset_kind,
dataset,
index_queue,
data_queue,
done_event,
auto_collation,
collate_fn,
drop_last,
base_seed,
init_fn,
worker_id,
*args,
**kwargs,
)
if worker_id == 0 and isinstance(self._profile, bool):
tracer.stop()
tracer.save()
class _StreamingMultiProcessingDataLoaderIter(_MultiProcessingDataLoaderIter):
def __init__(self, loader: DataLoader) -> None:
self._loader = loader
self._indexes = (
list(range(self._loader._latest_worker_idx, self._loader.num_workers))
if self._loader._latest_worker_idx > 0
else []
)
self._num_workers = loader.num_workers
distributed_env = _DistributedEnv.detect()
if self._loader._profile_batches and distributed_env.global_rank == 0 and _VIZ_TRACKER_AVAILABLE:
from torch.utils.data._utils import worker
worker._worker_loop = _ProfileWorkerLoop(self._loader._profile_batches, self._loader._profile_dir)
super().__init__(loader)
def _try_put_index(self) -> None:
# Used to restart on the right DataLoader worker
if self._loader.restore and self._indexes:
assert self._tasks_outstanding < self._prefetch_factor * self._num_workers
try:
index = self._next_index()
except StopIteration:
return
worker_queue_idx = self._indexes.pop(0)
self._index_queues[worker_queue_idx].put((self._send_idx, index))
self._task_info[self._send_idx] = (worker_queue_idx,)
self._tasks_outstanding += 1
self._send_idx += 1
else:
super()._try_put_index()
class StreamingDataLoader(DataLoader):
r"""The StreamingDataLoader combines a dataset and a sampler, and provides an iterable over the given dataset.
The :class:`~litdata.streaming.dataloader.StreamingDataLoader` supports either a
StreamingDataset and CombinedStreamingDataset datasets with single- or multi-process loading,
customizing
loading order and optional automatic batching (collation) and memory pinning.
See :py:mod:`torch.utils.data` documentation page for more details.
Args:
dataset (Dataset): dataset from which to load the data.
batch_size (int, optional): how many samples per batch to load
(default: ``1``).
shuffle (bool, optional): set to ``True`` to have the data reshuffled
at every epoch (default: ``False``).
num_workers (int, optional): how many subprocesses to use for data
loading. ``0`` means that the data will be loaded in the main process.
(default: ``0``)
collate_fn (Callable, optional): merges a list of samples to form a
mini-batch of Tensor(s). Used when using batched loading from a
map-style dataset.
pin_memory (bool, optional): If ``True``, the data loader will copy Tensors
into device/CUDA pinned memory before returning them. If your data elements
are a custom type, or your :attr:`collate_fn` returns a batch that is a custom type,
see the example below.
timeout (numeric, optional): if positive, the timeout value for collecting a batch
from workers. Should always be non-negative. (default: ``0``)
worker_init_fn (Callable, optional): If not ``None``, this will be called on each
worker subprocess with the worker id (an int in ``[0, num_workers - 1]``) as
input, after seeding and before data loading. (default: ``None``)
multiprocessing_context (str or multiprocessing.context.BaseContext, optional): If
``None``, the default `multiprocessing context`_ of your operating system will
be used. (default: ``None``)
generator (torch.Generator, optional): If not ``None``, this RNG will be used
by RandomSampler to generate random indexes and multiprocessing to generate
``base_seed`` for workers. (default: ``None``)
prefetch_factor (int, optional, keyword-only arg): Number of batches loaded
in advance by each worker. ``2`` means there will be a total of
2 * num_workers batches prefetched across all workers. (default value depends
on the set value for num_workers. If value of num_workers=0 default is ``None``.
Otherwise, if value of ``num_workers > 0`` default is ``2``).
persistent_workers (bool, optional): If ``True``, the data loader will not shut down
the worker processes after a dataset has been consumed once. This allows to
maintain the workers `Dataset` instances alive. (default: ``False``)
pin_memory_device (str, optional): the device to :attr:`pin_memory` to if ``pin_memory`` is
``True``.
profile_batches (int, bool, optional): Whether to record data loading profile and generate a result.json file.
profile_dir (int, bool, optional): Where to store the recorded trace when profile_batches is enabled.
"""
__doc__ = DataLoader.__doc__
def __init__(
self,
dataset: Union[StreamingDataset, CombinedStreamingDataset],
*args: Any,
batch_size: int = 1,
num_workers: int = 0,
profile_batches: Union[bool, int] = False,
profile_dir: Optional[str] = None,
prefetch_factor: Optional[int] = None,
shuffle: Optional[bool] = None,
**kwargs: Any,
) -> None: # pyright: ignore
if not isinstance(dataset, (StreamingDataset, CombinedStreamingDataset)):
raise RuntimeError(
"The provided dataset should be either an instance of StreamingDataset or CombinedStreamingDataset."
f" Found {dataset}."
)
if shuffle is not None:
dataset.set_shuffle(shuffle)
shuffle = None
if profile_batches and not _VIZ_TRACKER_AVAILABLE:
raise ModuleNotFoundError("To use profile_batches, viztracer is required. Run `pip install viztracer`")
if profile_batches and num_workers == 0:
raise ValueError("Profiling is supported only with num_workers >= 1.")
self.current_epoch = 0
self.batch_size = batch_size
self.num_workers = num_workers
self._profile_batches = profile_batches
self._profile_dir = profile_dir
self._num_samples_yielded_streaming = 0
self._num_samples_yielded_combined: Dict[int, List[Any]] = {}
self.rng_state: Optional[Any] = None
self._worker_idx = cycle(list(range(self.num_workers if self.num_workers > 0 else 1)))
self._worker_idx_iter: Optional[Any] = None
self._latest_worker_idx = 0
self.restore = False
super().__init__(
dataset,
*args,
batch_size=batch_size,
num_workers=num_workers,
prefetch_factor=(10 if num_workers > 0 else None) if prefetch_factor is None else prefetch_factor,
**kwargs,
) # type: ignore
def __iter__(self) -> Any:
if not self.restore:
self._latest_worker_idx = 0
self._worker_idx = cycle(list(range(self.num_workers if self.num_workers > 0 else 1)))
self._worker_idx_iter = iter(self._worker_idx)
self.current_epoch += 1
self._num_samples_yielded_combined = {}
self._num_samples_yielded_streaming = 0
self.dataset.set_epoch(self.current_epoch)
if isinstance(self.dataset, StreamingDataset):
assert self.batch_size
for batch in super().__iter__():
self._latest_worker_idx = next(self._worker_idx_iter) # type: ignore
self._num_samples_yielded_streaming += self.batch_size
yield batch
else:
self.dataset._set_use_streaming_dataloader(True)
assert self.batch_size
# TODO: Inject a custom collate function to avoid collating the __NUM_SAMPLES_YIELDED__ key
for batch in super().__iter__():
self._latest_worker_idx = next(self._worker_idx_iter) # type: ignore
if isinstance(batch, dict) and __NUM_SAMPLES_YIELDED_KEY__ in batch:
self._num_samples_yielded_combined[self._latest_worker_idx] = [
sample[-1].item() if self.batch_size > 1 else sample.item()
for sample in batch[__NUM_SAMPLES_YIELDED_KEY__]
]
yield batch[__SAMPLES_KEY__]
else:
yield batch
self.restore = False
def state_dict(self) -> Dict[str, Any]:
if isinstance(self.dataset, StreamingDataset):
assert self.batch_size
return {
"dataset": self.dataset.state_dict(
self._num_samples_yielded_streaming, self.num_workers, self.batch_size
),
"current_epoch": self.current_epoch,
"num_samples_yielded": self._num_samples_yielded_streaming,
"latest_worker_idx": self._latest_worker_idx,
}
num_samples_yieled = [0 for _ in range(len(list(self._num_samples_yielded_combined.values())[0]))]
for worker_idx in self._num_samples_yielded_combined:
for dataset_idx, samples_yieled in enumerate(self._num_samples_yielded_combined[worker_idx]):
num_samples_yieled[dataset_idx] += samples_yieled
return {
"dataset": self.dataset.state_dict(self.num_workers, self.batch_size, num_samples_yieled),
"current_epoch": self.current_epoch if self.restore else self.current_epoch - 1,
"latest_worker_idx": self._latest_worker_idx,
"num_samples_yielded": deepcopy(self._num_samples_yielded_combined),
}
def load_state_dict(self, obj: Dict[str, Any]) -> None:
"""Load a dict containing training state (called from non-worker process).
This is called on each copy of the dataset when resuming.
Args:
obj (Any): The state.
"""
self.current_epoch = obj["current_epoch"]
if isinstance(self.dataset, StreamingDataset):
self._num_samples_yielded_streaming = obj["num_samples_yielded"]
else:
self._num_samples_yielded_combined = obj["num_samples_yielded"]
# Used to restart on the next DataLoader worker from the previous run.
self._latest_worker_idx = obj["latest_worker_idx"] + 1
self._worker_idx_iter = iter(self._worker_idx)
for _ in range(self._latest_worker_idx):
next(self._worker_idx_iter)
# Inform we are resuming and disable resetting the StreamingDataLoader state.
# This is toggle back to False when the `__iter__` method of the StreamingDataLoader completes.
self.restore = True
if isinstance(self.dataset, CombinedStreamingDataset):
self.dataset._set_use_streaming_dataloader(True)
self.dataset.load_state_dict(obj)
elif isinstance(self.dataset, StreamingDataset):
self.dataset.load_state_dict(obj["dataset"])
else:
raise RuntimeError("The provided dataset should be a `StreamingDataset` or a `CombinedStreamingDataset`.")
def _get_iterator(self) -> "_BaseDataLoaderIter":
"""Overriden to ensure the `Cache.done()` method is triggered on iteration done."""
if self.num_workers == 0:
return _SingleProcessDataLoaderIter(self)
self.check_worker_number_rationality()
return _StreamingMultiProcessingDataLoaderIter(self)
|
evocodebench_data_134
|
# Copyright The Lightning AI team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
from typing import Any, Dict, Iterator, List, Optional, Sequence
from torch.utils.data import IterableDataset
from litdata.streaming.dataset import StreamingDataset
from litdata.utilities.env import _WorkerEnv
__NUM_SAMPLES_YIELDED_KEY__ = "__NUM_SAMPLES_YIELDED__"
__SAMPLES_KEY__ = "__SAMPLES__"
class CombinedStreamingDataset(IterableDataset):
"""The `CombinedStreamingDataset` enables to stream data from multiple StreamingDataset with the sampling ratio of
your choice.
Addtionally, the `CombinedStreamingDataset` keeps track of the number of samples fetched to enable resumability
of the datasets.
Note that due to the random sampling, the number of samples returned from the iterator is variable and a function
of the given seed. The combined dataset will raise a StopIteration as soon as any of the datasets is exhausted.
"""
def __init__(
self, datasets: List[StreamingDataset], seed: int = 42, weights: Optional[Sequence[float]] = None
) -> None:
self._check_datasets(datasets)
self._seed = seed
self._datasets = datasets
self._weights = weights
num_datasets = len(datasets)
if weights is None:
# Inversely weighted based on length
self._weights = [1 / float(num_datasets)] * num_datasets
else:
self._weights = [w / sum(weights) for w in weights]
self._iterator: Optional[_CombinedDatasetIterator] = None
self._use_streaming_dataloader = False
self._num_samples_yielded: Optional[List[int]] = None
self._current_epoch = 0
def set_epoch(self, current_epoch: int) -> None:
"""Set the current epoch to the datasets on epoch starts.
When using the StreamingDataLoader, this is done automatically
"""
self._current_epoch = current_epoch
for dataset in self._datasets:
dataset.set_epoch(current_epoch)
def set_shuffle(self, shuffle: bool) -> None:
"""Set the current shuffle to the datasets."""
for dataset in self._datasets:
dataset.set_shuffle(shuffle)
def _check_datasets(self, datasets: List[StreamingDataset]) -> None:
if any(not isinstance(d, StreamingDataset) for d in datasets):
raise RuntimeError("The provided datasets should be instances of the StreamingDataset.")
def _set_use_streaming_dataloader(self, use_streaming_dataloader: bool) -> None:
# Used to prevent returning num_samples_yielded when using PyTorch DataLoader
self._use_streaming_dataloader = use_streaming_dataloader
def __iter__(self) -> Iterator[Any]:
assert self._weights
worker_env = _WorkerEnv.detect()
num_samples_yielded = None
if self._num_samples_yielded is not None and worker_env.rank in self._num_samples_yielded:
num_samples_yielded = self._num_samples_yielded[worker_env.rank]
self._iterator = _CombinedDatasetIterator(
self._datasets,
self._seed,
self._weights,
self._use_streaming_dataloader,
num_samples_yielded,
)
return self._iterator
def state_dict(
self, num_workers: int, batch_size: int, num_samples_yielded: Optional[List[int]] = None
) -> Dict[str, Any]:
if self._iterator is None:
if num_samples_yielded is None:
return {}
return _state_dict(self._datasets, num_samples_yielded, num_workers, batch_size)
return self._iterator.state_dict(num_workers, batch_size)
def load_state_dict(self, state_dict: Dict[str, Any]) -> None:
if not state_dict:
return
if len(state_dict["dataset"]) != len(self._datasets):
raise RuntimeError(f"The provided state doesn't match the current number of datasets: {self._datasets}.")
for dataset_idx, dataset in enumerate(self._datasets):
if str(dataset_idx) not in state_dict["dataset"]:
raise RuntimeError(f"The provided state doesn't contain the index {dataset_idx}.")
dataset.load_state_dict(state_dict["dataset"][str(dataset_idx)])
# Used to iterate over the sampler to avoid sampling the same samples
if self._use_streaming_dataloader:
self._num_samples_yielded = state_dict["num_samples_yielded"]
class _CombinedDatasetIterator(Iterator):
def __init__(
self,
datasets: List[StreamingDataset],
seed: int,
weights: Sequence[float],
use_streaming_dataloader: bool,
num_samples_yielded: Optional[Any] = None,
) -> None:
self._datasets = datasets
self._dataset_iters = [iter(dataset) for dataset in datasets]
self._dataset_indexes = list(range(len(datasets)))
self._num_samples_yielded = [0 for _ in range(len(datasets))]
self._weights = weights
self._rng = random.Random(seed)
if num_samples_yielded is not None:
self._num_samples_yielded = num_samples_yielded
for _ in range(sum(num_samples_yielded)):
self._rng.choices(self._dataset_indexes, weights=self._weights, k=1)
self._use_streaming_dataloader = use_streaming_dataloader
def __next__(self) -> Any:
# randomly select a dataset index
(dataset_index,) = self._rng.choices(self._dataset_indexes, weights=self._weights, k=1)
# keep track the sample was fetched
self._num_samples_yielded[dataset_index] += 1
sample = next(self._dataset_iters[dataset_index])
# return a new sample
if self._use_streaming_dataloader:
return {
__SAMPLES_KEY__: sample,
__NUM_SAMPLES_YIELDED_KEY__: self._num_samples_yielded,
}
return sample
def state_dict(self, num_workers: int = 0, batch_size: int = 1) -> Dict[str, Any]:
return _state_dict(self._datasets, self._num_samples_yielded, num_workers, batch_size)
def _state_dict(
datasets: List[StreamingDataset], num_samples_yielded: List[int], num_workers: int = 0, batch_size: int = 1
) -> Dict[str, Any]:
return {
str(dataset_idx): dataset.state_dict(
num_samples_yielded=num_samples_yielded[dataset_idx], num_workers=num_workers, batch_size=batch_size
)
for dataset_idx, dataset in enumerate(datasets)
}
|
evocodebench_data_135
|
# Copyright The Lightning AI team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
from typing import Any, Dict, Iterator, List, Optional, Sequence
from torch.utils.data import IterableDataset
from litdata.streaming.dataset import StreamingDataset
from litdata.utilities.env import _WorkerEnv
__NUM_SAMPLES_YIELDED_KEY__ = "__NUM_SAMPLES_YIELDED__"
__SAMPLES_KEY__ = "__SAMPLES__"
class CombinedStreamingDataset(IterableDataset):
"""The `CombinedStreamingDataset` enables to stream data from multiple StreamingDataset with the sampling ratio of
your choice.
Addtionally, the `CombinedStreamingDataset` keeps track of the number of samples fetched to enable resumability
of the datasets.
Note that due to the random sampling, the number of samples returned from the iterator is variable and a function
of the given seed. The combined dataset will raise a StopIteration as soon as any of the datasets is exhausted.
"""
def __init__(
self, datasets: List[StreamingDataset], seed: int = 42, weights: Optional[Sequence[float]] = None
) -> None:
self._check_datasets(datasets)
self._seed = seed
self._datasets = datasets
self._weights = weights
num_datasets = len(datasets)
if weights is None:
# Inversely weighted based on length
self._weights = [1 / float(num_datasets)] * num_datasets
else:
self._weights = [w / sum(weights) for w in weights]
self._iterator: Optional[_CombinedDatasetIterator] = None
self._use_streaming_dataloader = False
self._num_samples_yielded: Optional[List[int]] = None
self._current_epoch = 0
def set_epoch(self, current_epoch: int) -> None:
"""Set the current epoch to the datasets on epoch starts.
When using the StreamingDataLoader, this is done automatically
"""
self._current_epoch = current_epoch
for dataset in self._datasets:
dataset.set_epoch(current_epoch)
def set_shuffle(self, shuffle: bool) -> None:
"""Set the current shuffle to the datasets."""
for dataset in self._datasets:
dataset.set_shuffle(shuffle)
def _check_datasets(self, datasets: List[StreamingDataset]) -> None:
if any(not isinstance(d, StreamingDataset) for d in datasets):
raise RuntimeError("The provided datasets should be instances of the StreamingDataset.")
def _set_use_streaming_dataloader(self, use_streaming_dataloader: bool) -> None:
# Used to prevent returning num_samples_yielded when using PyTorch DataLoader
self._use_streaming_dataloader = use_streaming_dataloader
def __iter__(self) -> Iterator[Any]:
assert self._weights
worker_env = _WorkerEnv.detect()
num_samples_yielded = None
if self._num_samples_yielded is not None and worker_env.rank in self._num_samples_yielded:
num_samples_yielded = self._num_samples_yielded[worker_env.rank]
self._iterator = _CombinedDatasetIterator(
self._datasets,
self._seed,
self._weights,
self._use_streaming_dataloader,
num_samples_yielded,
)
return self._iterator
def state_dict(
self, num_workers: int, batch_size: int, num_samples_yielded: Optional[List[int]] = None
) -> Dict[str, Any]:
if self._iterator is None:
if num_samples_yielded is None:
return {}
return _state_dict(self._datasets, num_samples_yielded, num_workers, batch_size)
return self._iterator.state_dict(num_workers, batch_size)
def load_state_dict(self, state_dict: Dict[str, Any]) -> None:
if not state_dict:
return
if len(state_dict["dataset"]) != len(self._datasets):
raise RuntimeError(f"The provided state doesn't match the current number of datasets: {self._datasets}.")
for dataset_idx, dataset in enumerate(self._datasets):
if str(dataset_idx) not in state_dict["dataset"]:
raise RuntimeError(f"The provided state doesn't contain the index {dataset_idx}.")
dataset.load_state_dict(state_dict["dataset"][str(dataset_idx)])
# Used to iterate over the sampler to avoid sampling the same samples
if self._use_streaming_dataloader:
self._num_samples_yielded = state_dict["num_samples_yielded"]
class _CombinedDatasetIterator(Iterator):
def __init__(
self,
datasets: List[StreamingDataset],
seed: int,
weights: Sequence[float],
use_streaming_dataloader: bool,
num_samples_yielded: Optional[Any] = None,
) -> None:
self._datasets = datasets
self._dataset_iters = [iter(dataset) for dataset in datasets]
self._dataset_indexes = list(range(len(datasets)))
self._num_samples_yielded = [0 for _ in range(len(datasets))]
self._weights = weights
self._rng = random.Random(seed)
if num_samples_yielded is not None:
self._num_samples_yielded = num_samples_yielded
for _ in range(sum(num_samples_yielded)):
self._rng.choices(self._dataset_indexes, weights=self._weights, k=1)
self._use_streaming_dataloader = use_streaming_dataloader
def __next__(self) -> Any:
# randomly select a dataset index
(dataset_index,) = self._rng.choices(self._dataset_indexes, weights=self._weights, k=1)
# keep track the sample was fetched
self._num_samples_yielded[dataset_index] += 1
sample = next(self._dataset_iters[dataset_index])
# return a new sample
if self._use_streaming_dataloader:
return {
__SAMPLES_KEY__: sample,
__NUM_SAMPLES_YIELDED_KEY__: self._num_samples_yielded,
}
return sample
def state_dict(self, num_workers: int = 0, batch_size: int = 1) -> Dict[str, Any]:
return _state_dict(self._datasets, self._num_samples_yielded, num_workers, batch_size)
def _state_dict(
datasets: List[StreamingDataset], num_samples_yielded: List[int], num_workers: int = 0, batch_size: int = 1
) -> Dict[str, Any]:
return {
str(dataset_idx): dataset.state_dict(
num_samples_yielded=num_samples_yielded[dataset_idx], num_workers=num_workers, batch_size=batch_size
)
for dataset_idx, dataset in enumerate(datasets)
}
|
evocodebench_data_136
|
import datetime
import os
import re
import sys
from dataclasses import dataclass
from pathlib import Path
from time import sleep
from typing import Optional, Union
from urllib import parse
from lightning_cloud.openapi import V1CloudSpace
from lightning_cloud.rest_client import LightningClient
# To avoid adding lightning_utilities as a dependency for now.
try:
import boto3
import botocore
_BOTO3_AVAILABLE = True
except Exception:
_BOTO3_AVAILABLE = False
try:
from lightning_sdk import Machine, Studio
_LIGHTNING_SDK_AVAILABLE = True
except (ImportError, ModuleNotFoundError):
class Machine: # type: ignore
pass
_LIGHTNING_SDK_AVAILABLE = False
@dataclass
class Dir:
"""Holds a directory path and possibly its associated remote URL."""
path: Optional[str] = None
url: Optional[str] = None
def _resolve_dir(dir_path: Optional[Union[str, Dir]]) -> Dir:
if isinstance(dir_path, Dir):
return Dir(path=str(dir_path.path) if dir_path.path else None, url=str(dir_path.url) if dir_path.url else None)
if dir_path is None:
return Dir()
if not isinstance(dir_path, str):
raise ValueError(f"`dir_path` must be a `Dir` or a string, got: {dir_path}")
assert isinstance(dir_path, str)
if dir_path.startswith("s3://"):
return Dir(path=None, url=dir_path)
if dir_path.startswith("local:"):
return Dir(path=None, url=dir_path)
dir_path = _resolve_time_template(dir_path)
dir_path_absolute = str(Path(dir_path).absolute().resolve())
if dir_path_absolute.startswith("/teamspace/studios/this_studio"):
return Dir(path=dir_path_absolute, url=None)
if dir_path_absolute.startswith("/.project"):
dir_path_absolute = dir_path
if dir_path_absolute.startswith("/teamspace/studios") and len(dir_path_absolute.split("/")) > 3:
return _resolve_studio(dir_path_absolute, dir_path_absolute.split("/")[3], None)
if dir_path_absolute.startswith("/teamspace/s3_connections") and len(dir_path_absolute.split("/")) > 3:
return _resolve_s3_connections(dir_path_absolute)
if dir_path_absolute.startswith("/teamspace/datasets") and len(dir_path_absolute.split("/")) > 3:
return _resolve_datasets(dir_path_absolute)
return Dir(path=dir_path_absolute, url=None)
def _match_studio(target_id: Optional[str], target_name: Optional[str], cloudspace: V1CloudSpace) -> bool:
if cloudspace.name is not None and target_name is not None and cloudspace.name.lower() == target_name.lower():
return True
if target_id is not None and cloudspace.id == target_id:
return True
if (
cloudspace.display_name is not None
and target_name is not None
and cloudspace.display_name.lower() == target_name.lower()
):
return True
return False
def _resolve_studio(dir_path: str, target_name: Optional[str], target_id: Optional[str]) -> Dir:
client = LightningClient(max_tries=2)
# Get the ids from env variables
cluster_id = os.getenv("LIGHTNING_CLUSTER_ID", None)
project_id = os.getenv("LIGHTNING_CLOUD_PROJECT_ID", None)
if cluster_id is None:
raise RuntimeError("The `cluster_id` couldn't be found from the environement variables.")
if project_id is None:
raise RuntimeError("The `project_id` couldn't be found from the environement variables.")
clusters = client.cluster_service_list_project_clusters(project_id).clusters
cloudspaces = client.cloud_space_service_list_cloud_spaces(project_id=project_id, cluster_id=cluster_id).cloudspaces
target_cloud_space = [cloudspace for cloudspace in cloudspaces if _match_studio(target_id, target_name, cloudspace)]
if not target_cloud_space:
raise ValueError(f"We didn't find any matching Studio for the provided name `{target_name}`.")
target_cluster = [cluster for cluster in clusters if cluster.id == target_cloud_space[0].cluster_id]
if not target_cluster:
raise ValueError(
f"We didn't find a matching cluster associated with the id {target_cloud_space[0].cluster_id}."
)
bucket_name = target_cluster[0].spec.aws_v1.bucket_name
return Dir(
path=dir_path,
url=os.path.join(
f"s3://{bucket_name}/projects/{project_id}/cloudspaces/{target_cloud_space[0].id}/code/content",
*dir_path.split("/")[4:],
),
)
def _resolve_s3_connections(dir_path: str) -> Dir:
client = LightningClient(max_tries=2)
# Get the ids from env variables
project_id = os.getenv("LIGHTNING_CLOUD_PROJECT_ID", None)
if project_id is None:
raise RuntimeError("The `project_id` couldn't be found from the environement variables.")
target_name = dir_path.split("/")[3]
data_connections = client.data_connection_service_list_data_connections(project_id).data_connections
data_connection = [dc for dc in data_connections if dc.name == target_name]
if not data_connection:
raise ValueError(f"We didn't find any matching data connection with the provided name `{target_name}`.")
return Dir(path=dir_path, url=os.path.join(data_connection[0].aws.source, *dir_path.split("/")[4:]))
def _resolve_datasets(dir_path: str) -> Dir:
client = LightningClient(max_tries=2)
# Get the ids from env variables
cluster_id = os.getenv("LIGHTNING_CLUSTER_ID", None)
project_id = os.getenv("LIGHTNING_CLOUD_PROJECT_ID", None)
cloud_space_id = os.getenv("LIGHTNING_CLOUD_SPACE_ID", None)
if cluster_id is None:
raise RuntimeError("The `cluster_id` couldn't be found from the environement variables.")
if project_id is None:
raise RuntimeError("The `project_id` couldn't be found from the environement variables.")
if cloud_space_id is None:
raise RuntimeError("The `cloud_space_id` couldn't be found from the environement variables.")
clusters = client.cluster_service_list_project_clusters(project_id).clusters
target_cloud_space = [
cloudspace
for cloudspace in client.cloud_space_service_list_cloud_spaces(
project_id=project_id, cluster_id=cluster_id
).cloudspaces
if cloudspace.id == cloud_space_id
]
if not target_cloud_space:
raise ValueError(f"We didn't find any matching Studio for the provided id `{cloud_space_id}`.")
target_cluster = [cluster for cluster in clusters if cluster.id == target_cloud_space[0].cluster_id]
if not target_cluster:
raise ValueError(
f"We didn't find a matching cluster associated with the id {target_cloud_space[0].cluster_id}."
)
return Dir(
path=dir_path,
url=os.path.join(
f"s3://{target_cluster[0].spec.aws_v1.bucket_name}/projects/{project_id}/datasets/",
*dir_path.split("/")[3:],
),
)
def _assert_dir_is_empty(output_dir: Dir, append: bool = False, overwrite: bool = False) -> None:
if not isinstance(output_dir, Dir):
raise ValueError("The provided output_dir isn't a Dir Object.")
if output_dir.url is None:
return
obj = parse.urlparse(output_dir.url)
if obj.scheme != "s3":
raise ValueError(f"The provided folder should start with s3://. Found {output_dir.path}.")
s3 = boto3.client("s3")
objects = s3.list_objects_v2(
Bucket=obj.netloc,
Delimiter="/",
Prefix=obj.path.lstrip("/").rstrip("/") + "/",
)
# We aren't alloweing to add more data
# TODO: Add support for `append` and `overwrite`.
if objects["KeyCount"] > 0:
raise RuntimeError(
f"The provided output_dir `{output_dir.path}` already contains data and datasets are meant to be immutable."
" HINT: Did you consider changing the `output_dir` with your own versioning as a suffix?"
)
def _assert_dir_has_index_file(output_dir: Dir) -> None:
if not isinstance(output_dir, Dir):
raise ValueError("The provided output_dir isn't a Dir Object.")
if output_dir.url is None:
return
obj = parse.urlparse(output_dir.url)
if obj.scheme != "s3":
raise ValueError(f"The provided folder should start with s3://. Found {output_dir.path}.")
s3 = boto3.client("s3")
prefix = obj.path.lstrip("/").rstrip("/") + "/"
objects = s3.list_objects_v2(
Bucket=obj.netloc,
Delimiter="/",
Prefix=prefix,
)
# No files are found in this folder
if objects["KeyCount"] == 0:
return
# Check the index file exists
try:
s3.head_object(Bucket=obj.netloc, Key=os.path.join(prefix, "index.json"))
has_index_file = True
except botocore.exceptions.ClientError:
has_index_file = False
if has_index_file:
raise RuntimeError(
f"The provided output_dir `{output_dir.path}` already contains an optimized immutable datasets."
" HINT: Did you consider changing the `output_dir` with your own versioning as a suffix?"
)
bucket_name = obj.netloc
s3 = boto3.resource("s3")
for obj in s3.Bucket(bucket_name).objects.filter(Prefix=prefix):
s3.Object(bucket_name, obj.key).delete()
def _get_lightning_cloud_url() -> str:
# detect local development
if os.getenv("VSCODE_PROXY_URI", "").startswith("http://localhost:9800"):
return "http://localhost:9800"
# DO NOT CHANGE!
return os.getenv("LIGHTNING_CLOUD_URL", "https://lightning.ai")
def _resolve_time_template(path: str) -> str:
match = re.search("^.*{%.*}$", path)
if match is None:
return path
pattern = path.split("{")[1].split("}")[0]
return path.replace("{" + pattern + "}", datetime.datetime.now().strftime(pattern))
def _execute(
name: str,
num_nodes: int,
machine: Optional[Machine] = None,
command: Optional[str] = None,
) -> None:
"""Remotely execute the current operator."""
if not _LIGHTNING_SDK_AVAILABLE:
raise ModuleNotFoundError("The `lightning_sdk` is required.")
lightning_skip_install = os.getenv("LIGHTNING_SKIP_INSTALL", "")
if lightning_skip_install:
lightning_skip_install = f" LIGHTNING_SKIP_INSTALL={lightning_skip_install} "
lightning_branch = os.getenv("LIGHTNING_BRANCH", "")
if lightning_branch:
lightning_branch = f" LIGHTNING_BRANCH={lightning_branch} "
studio = Studio()
job = studio._studio_api.create_data_prep_machine_job(
command or f"cd {os.getcwd()} &&{lightning_skip_install}{lightning_branch} python {' '.join(sys.argv)}",
name=name,
num_instances=num_nodes,
studio_id=studio._studio.id,
teamspace_id=studio._teamspace.id,
cluster_id=studio._studio.cluster_id,
machine=machine or studio._studio_api.get_machine(studio._studio.id, studio._teamspace.id),
)
has_printed = False
while True:
curr_job = studio._studio_api._client.lightningapp_instance_service_get_lightningapp_instance(
project_id=studio._teamspace.id, id=job.id
)
if not has_printed:
cloud_url = os.getenv("LIGHTNING_CLOUD_URL", "https://lightning.ai").replace(":443", "")
job_url = f"{cloud_url}/{studio.owner}/{studio._teamspace.name}"
job_url += f"/studios/{studio.name}/app?app_id=data-prep&job_name={curr_job.name}"
print(f"Find your job at {job_url}")
has_printed = True
if curr_job.status.phase == "LIGHTNINGAPP_INSTANCE_STATE_FAILED":
raise RuntimeError(f"job {curr_job.name} failed!")
if curr_job.status.phase in ["LIGHTNINGAPP_INSTANCE_STATE_STOPPED", "LIGHTNINGAPP_INSTANCE_STATE_COMPLETED"]:
break
sleep(1)
|
evocodebench_data_137
|
import datetime
import os
import re
import sys
from dataclasses import dataclass
from pathlib import Path
from time import sleep
from typing import Optional, Union
from urllib import parse
from lightning_cloud.openapi import V1CloudSpace
from lightning_cloud.rest_client import LightningClient
# To avoid adding lightning_utilities as a dependency for now.
try:
import boto3
import botocore
_BOTO3_AVAILABLE = True
except Exception:
_BOTO3_AVAILABLE = False
try:
from lightning_sdk import Machine, Studio
_LIGHTNING_SDK_AVAILABLE = True
except (ImportError, ModuleNotFoundError):
class Machine: # type: ignore
pass
_LIGHTNING_SDK_AVAILABLE = False
@dataclass
class Dir:
"""Holds a directory path and possibly its associated remote URL."""
path: Optional[str] = None
url: Optional[str] = None
def _resolve_dir(dir_path: Optional[Union[str, Dir]]) -> Dir:
if isinstance(dir_path, Dir):
return Dir(path=str(dir_path.path) if dir_path.path else None, url=str(dir_path.url) if dir_path.url else None)
if dir_path is None:
return Dir()
if not isinstance(dir_path, str):
raise ValueError(f"`dir_path` must be a `Dir` or a string, got: {dir_path}")
assert isinstance(dir_path, str)
if dir_path.startswith("s3://"):
return Dir(path=None, url=dir_path)
if dir_path.startswith("local:"):
return Dir(path=None, url=dir_path)
dir_path = _resolve_time_template(dir_path)
dir_path_absolute = str(Path(dir_path).absolute().resolve())
if dir_path_absolute.startswith("/teamspace/studios/this_studio"):
return Dir(path=dir_path_absolute, url=None)
if dir_path_absolute.startswith("/.project"):
dir_path_absolute = dir_path
if dir_path_absolute.startswith("/teamspace/studios") and len(dir_path_absolute.split("/")) > 3:
return _resolve_studio(dir_path_absolute, dir_path_absolute.split("/")[3], None)
if dir_path_absolute.startswith("/teamspace/s3_connections") and len(dir_path_absolute.split("/")) > 3:
return _resolve_s3_connections(dir_path_absolute)
if dir_path_absolute.startswith("/teamspace/datasets") and len(dir_path_absolute.split("/")) > 3:
return _resolve_datasets(dir_path_absolute)
return Dir(path=dir_path_absolute, url=None)
def _match_studio(target_id: Optional[str], target_name: Optional[str], cloudspace: V1CloudSpace) -> bool:
if cloudspace.name is not None and target_name is not None and cloudspace.name.lower() == target_name.lower():
return True
if target_id is not None and cloudspace.id == target_id:
return True
if (
cloudspace.display_name is not None
and target_name is not None
and cloudspace.display_name.lower() == target_name.lower()
):
return True
return False
def _resolve_studio(dir_path: str, target_name: Optional[str], target_id: Optional[str]) -> Dir:
client = LightningClient(max_tries=2)
# Get the ids from env variables
cluster_id = os.getenv("LIGHTNING_CLUSTER_ID", None)
project_id = os.getenv("LIGHTNING_CLOUD_PROJECT_ID", None)
if cluster_id is None:
raise RuntimeError("The `cluster_id` couldn't be found from the environement variables.")
if project_id is None:
raise RuntimeError("The `project_id` couldn't be found from the environement variables.")
clusters = client.cluster_service_list_project_clusters(project_id).clusters
cloudspaces = client.cloud_space_service_list_cloud_spaces(project_id=project_id, cluster_id=cluster_id).cloudspaces
target_cloud_space = [cloudspace for cloudspace in cloudspaces if _match_studio(target_id, target_name, cloudspace)]
if not target_cloud_space:
raise ValueError(f"We didn't find any matching Studio for the provided name `{target_name}`.")
target_cluster = [cluster for cluster in clusters if cluster.id == target_cloud_space[0].cluster_id]
if not target_cluster:
raise ValueError(
f"We didn't find a matching cluster associated with the id {target_cloud_space[0].cluster_id}."
)
bucket_name = target_cluster[0].spec.aws_v1.bucket_name
return Dir(
path=dir_path,
url=os.path.join(
f"s3://{bucket_name}/projects/{project_id}/cloudspaces/{target_cloud_space[0].id}/code/content",
*dir_path.split("/")[4:],
),
)
def _resolve_s3_connections(dir_path: str) -> Dir:
client = LightningClient(max_tries=2)
# Get the ids from env variables
project_id = os.getenv("LIGHTNING_CLOUD_PROJECT_ID", None)
if project_id is None:
raise RuntimeError("The `project_id` couldn't be found from the environement variables.")
target_name = dir_path.split("/")[3]
data_connections = client.data_connection_service_list_data_connections(project_id).data_connections
data_connection = [dc for dc in data_connections if dc.name == target_name]
if not data_connection:
raise ValueError(f"We didn't find any matching data connection with the provided name `{target_name}`.")
return Dir(path=dir_path, url=os.path.join(data_connection[0].aws.source, *dir_path.split("/")[4:]))
def _resolve_datasets(dir_path: str) -> Dir:
client = LightningClient(max_tries=2)
# Get the ids from env variables
cluster_id = os.getenv("LIGHTNING_CLUSTER_ID", None)
project_id = os.getenv("LIGHTNING_CLOUD_PROJECT_ID", None)
cloud_space_id = os.getenv("LIGHTNING_CLOUD_SPACE_ID", None)
if cluster_id is None:
raise RuntimeError("The `cluster_id` couldn't be found from the environement variables.")
if project_id is None:
raise RuntimeError("The `project_id` couldn't be found from the environement variables.")
if cloud_space_id is None:
raise RuntimeError("The `cloud_space_id` couldn't be found from the environement variables.")
clusters = client.cluster_service_list_project_clusters(project_id).clusters
target_cloud_space = [
cloudspace
for cloudspace in client.cloud_space_service_list_cloud_spaces(
project_id=project_id, cluster_id=cluster_id
).cloudspaces
if cloudspace.id == cloud_space_id
]
if not target_cloud_space:
raise ValueError(f"We didn't find any matching Studio for the provided id `{cloud_space_id}`.")
target_cluster = [cluster for cluster in clusters if cluster.id == target_cloud_space[0].cluster_id]
if not target_cluster:
raise ValueError(
f"We didn't find a matching cluster associated with the id {target_cloud_space[0].cluster_id}."
)
return Dir(
path=dir_path,
url=os.path.join(
f"s3://{target_cluster[0].spec.aws_v1.bucket_name}/projects/{project_id}/datasets/",
*dir_path.split("/")[3:],
),
)
def _assert_dir_is_empty(output_dir: Dir, append: bool = False, overwrite: bool = False) -> None:
if not isinstance(output_dir, Dir):
raise ValueError("The provided output_dir isn't a Dir Object.")
if output_dir.url is None:
return
obj = parse.urlparse(output_dir.url)
if obj.scheme != "s3":
raise ValueError(f"The provided folder should start with s3://. Found {output_dir.path}.")
s3 = boto3.client("s3")
objects = s3.list_objects_v2(
Bucket=obj.netloc,
Delimiter="/",
Prefix=obj.path.lstrip("/").rstrip("/") + "/",
)
# We aren't alloweing to add more data
# TODO: Add support for `append` and `overwrite`.
if objects["KeyCount"] > 0:
raise RuntimeError(
f"The provided output_dir `{output_dir.path}` already contains data and datasets are meant to be immutable."
" HINT: Did you consider changing the `output_dir` with your own versioning as a suffix?"
)
def _assert_dir_has_index_file(output_dir: Dir) -> None:
if not isinstance(output_dir, Dir):
raise ValueError("The provided output_dir isn't a Dir Object.")
if output_dir.url is None:
return
obj = parse.urlparse(output_dir.url)
if obj.scheme != "s3":
raise ValueError(f"The provided folder should start with s3://. Found {output_dir.path}.")
s3 = boto3.client("s3")
prefix = obj.path.lstrip("/").rstrip("/") + "/"
objects = s3.list_objects_v2(
Bucket=obj.netloc,
Delimiter="/",
Prefix=prefix,
)
# No files are found in this folder
if objects["KeyCount"] == 0:
return
# Check the index file exists
try:
s3.head_object(Bucket=obj.netloc, Key=os.path.join(prefix, "index.json"))
has_index_file = True
except botocore.exceptions.ClientError:
has_index_file = False
if has_index_file:
raise RuntimeError(
f"The provided output_dir `{output_dir.path}` already contains an optimized immutable datasets."
" HINT: Did you consider changing the `output_dir` with your own versioning as a suffix?"
)
bucket_name = obj.netloc
s3 = boto3.resource("s3")
for obj in s3.Bucket(bucket_name).objects.filter(Prefix=prefix):
s3.Object(bucket_name, obj.key).delete()
def _get_lightning_cloud_url() -> str:
# detect local development
if os.getenv("VSCODE_PROXY_URI", "").startswith("http://localhost:9800"):
return "http://localhost:9800"
# DO NOT CHANGE!
return os.getenv("LIGHTNING_CLOUD_URL", "https://lightning.ai")
def _resolve_time_template(path: str) -> str:
match = re.search("^.*{%.*}$", path)
if match is None:
return path
pattern = path.split("{")[1].split("}")[0]
return path.replace("{" + pattern + "}", datetime.datetime.now().strftime(pattern))
def _execute(
name: str,
num_nodes: int,
machine: Optional[Machine] = None,
command: Optional[str] = None,
) -> None:
"""Remotely execute the current operator."""
if not _LIGHTNING_SDK_AVAILABLE:
raise ModuleNotFoundError("The `lightning_sdk` is required.")
lightning_skip_install = os.getenv("LIGHTNING_SKIP_INSTALL", "")
if lightning_skip_install:
lightning_skip_install = f" LIGHTNING_SKIP_INSTALL={lightning_skip_install} "
lightning_branch = os.getenv("LIGHTNING_BRANCH", "")
if lightning_branch:
lightning_branch = f" LIGHTNING_BRANCH={lightning_branch} "
studio = Studio()
job = studio._studio_api.create_data_prep_machine_job(
command or f"cd {os.getcwd()} &&{lightning_skip_install}{lightning_branch} python {' '.join(sys.argv)}",
name=name,
num_instances=num_nodes,
studio_id=studio._studio.id,
teamspace_id=studio._teamspace.id,
cluster_id=studio._studio.cluster_id,
machine=machine or studio._studio_api.get_machine(studio._studio.id, studio._teamspace.id),
)
has_printed = False
while True:
curr_job = studio._studio_api._client.lightningapp_instance_service_get_lightningapp_instance(
project_id=studio._teamspace.id, id=job.id
)
if not has_printed:
cloud_url = os.getenv("LIGHTNING_CLOUD_URL", "https://lightning.ai").replace(":443", "")
job_url = f"{cloud_url}/{studio.owner}/{studio._teamspace.name}"
job_url += f"/studios/{studio.name}/app?app_id=data-prep&job_name={curr_job.name}"
print(f"Find your job at {job_url}")
has_printed = True
if curr_job.status.phase == "LIGHTNINGAPP_INSTANCE_STATE_FAILED":
raise RuntimeError(f"job {curr_job.name} failed!")
if curr_job.status.phase in ["LIGHTNINGAPP_INSTANCE_STATE_STOPPED", "LIGHTNINGAPP_INSTANCE_STATE_COMPLETED"]:
break
sleep(1)
|
evocodebench_data_138
|
import datetime
import os
import re
import sys
from dataclasses import dataclass
from pathlib import Path
from time import sleep
from typing import Optional, Union
from urllib import parse
from lightning_cloud.openapi import V1CloudSpace
from lightning_cloud.rest_client import LightningClient
# To avoid adding lightning_utilities as a dependency for now.
try:
import boto3
import botocore
_BOTO3_AVAILABLE = True
except Exception:
_BOTO3_AVAILABLE = False
try:
from lightning_sdk import Machine, Studio
_LIGHTNING_SDK_AVAILABLE = True
except (ImportError, ModuleNotFoundError):
class Machine: # type: ignore
pass
_LIGHTNING_SDK_AVAILABLE = False
@dataclass
class Dir:
"""Holds a directory path and possibly its associated remote URL."""
path: Optional[str] = None
url: Optional[str] = None
def _resolve_dir(dir_path: Optional[Union[str, Dir]]) -> Dir:
if isinstance(dir_path, Dir):
return Dir(path=str(dir_path.path) if dir_path.path else None, url=str(dir_path.url) if dir_path.url else None)
if dir_path is None:
return Dir()
if not isinstance(dir_path, str):
raise ValueError(f"`dir_path` must be a `Dir` or a string, got: {dir_path}")
assert isinstance(dir_path, str)
if dir_path.startswith("s3://"):
return Dir(path=None, url=dir_path)
if dir_path.startswith("local:"):
return Dir(path=None, url=dir_path)
dir_path = _resolve_time_template(dir_path)
dir_path_absolute = str(Path(dir_path).absolute().resolve())
if dir_path_absolute.startswith("/teamspace/studios/this_studio"):
return Dir(path=dir_path_absolute, url=None)
if dir_path_absolute.startswith("/.project"):
dir_path_absolute = dir_path
if dir_path_absolute.startswith("/teamspace/studios") and len(dir_path_absolute.split("/")) > 3:
return _resolve_studio(dir_path_absolute, dir_path_absolute.split("/")[3], None)
if dir_path_absolute.startswith("/teamspace/s3_connections") and len(dir_path_absolute.split("/")) > 3:
return _resolve_s3_connections(dir_path_absolute)
if dir_path_absolute.startswith("/teamspace/datasets") and len(dir_path_absolute.split("/")) > 3:
return _resolve_datasets(dir_path_absolute)
return Dir(path=dir_path_absolute, url=None)
def _match_studio(target_id: Optional[str], target_name: Optional[str], cloudspace: V1CloudSpace) -> bool:
if cloudspace.name is not None and target_name is not None and cloudspace.name.lower() == target_name.lower():
return True
if target_id is not None and cloudspace.id == target_id:
return True
if (
cloudspace.display_name is not None
and target_name is not None
and cloudspace.display_name.lower() == target_name.lower()
):
return True
return False
def _resolve_studio(dir_path: str, target_name: Optional[str], target_id: Optional[str]) -> Dir:
client = LightningClient(max_tries=2)
# Get the ids from env variables
cluster_id = os.getenv("LIGHTNING_CLUSTER_ID", None)
project_id = os.getenv("LIGHTNING_CLOUD_PROJECT_ID", None)
if cluster_id is None:
raise RuntimeError("The `cluster_id` couldn't be found from the environement variables.")
if project_id is None:
raise RuntimeError("The `project_id` couldn't be found from the environement variables.")
clusters = client.cluster_service_list_project_clusters(project_id).clusters
cloudspaces = client.cloud_space_service_list_cloud_spaces(project_id=project_id, cluster_id=cluster_id).cloudspaces
target_cloud_space = [cloudspace for cloudspace in cloudspaces if _match_studio(target_id, target_name, cloudspace)]
if not target_cloud_space:
raise ValueError(f"We didn't find any matching Studio for the provided name `{target_name}`.")
target_cluster = [cluster for cluster in clusters if cluster.id == target_cloud_space[0].cluster_id]
if not target_cluster:
raise ValueError(
f"We didn't find a matching cluster associated with the id {target_cloud_space[0].cluster_id}."
)
bucket_name = target_cluster[0].spec.aws_v1.bucket_name
return Dir(
path=dir_path,
url=os.path.join(
f"s3://{bucket_name}/projects/{project_id}/cloudspaces/{target_cloud_space[0].id}/code/content",
*dir_path.split("/")[4:],
),
)
def _resolve_s3_connections(dir_path: str) -> Dir:
client = LightningClient(max_tries=2)
# Get the ids from env variables
project_id = os.getenv("LIGHTNING_CLOUD_PROJECT_ID", None)
if project_id is None:
raise RuntimeError("The `project_id` couldn't be found from the environement variables.")
target_name = dir_path.split("/")[3]
data_connections = client.data_connection_service_list_data_connections(project_id).data_connections
data_connection = [dc for dc in data_connections if dc.name == target_name]
if not data_connection:
raise ValueError(f"We didn't find any matching data connection with the provided name `{target_name}`.")
return Dir(path=dir_path, url=os.path.join(data_connection[0].aws.source, *dir_path.split("/")[4:]))
def _resolve_datasets(dir_path: str) -> Dir:
client = LightningClient(max_tries=2)
# Get the ids from env variables
cluster_id = os.getenv("LIGHTNING_CLUSTER_ID", None)
project_id = os.getenv("LIGHTNING_CLOUD_PROJECT_ID", None)
cloud_space_id = os.getenv("LIGHTNING_CLOUD_SPACE_ID", None)
if cluster_id is None:
raise RuntimeError("The `cluster_id` couldn't be found from the environement variables.")
if project_id is None:
raise RuntimeError("The `project_id` couldn't be found from the environement variables.")
if cloud_space_id is None:
raise RuntimeError("The `cloud_space_id` couldn't be found from the environement variables.")
clusters = client.cluster_service_list_project_clusters(project_id).clusters
target_cloud_space = [
cloudspace
for cloudspace in client.cloud_space_service_list_cloud_spaces(
project_id=project_id, cluster_id=cluster_id
).cloudspaces
if cloudspace.id == cloud_space_id
]
if not target_cloud_space:
raise ValueError(f"We didn't find any matching Studio for the provided id `{cloud_space_id}`.")
target_cluster = [cluster for cluster in clusters if cluster.id == target_cloud_space[0].cluster_id]
if not target_cluster:
raise ValueError(
f"We didn't find a matching cluster associated with the id {target_cloud_space[0].cluster_id}."
)
return Dir(
path=dir_path,
url=os.path.join(
f"s3://{target_cluster[0].spec.aws_v1.bucket_name}/projects/{project_id}/datasets/",
*dir_path.split("/")[3:],
),
)
def _assert_dir_is_empty(output_dir: Dir, append: bool = False, overwrite: bool = False) -> None:
if not isinstance(output_dir, Dir):
raise ValueError("The provided output_dir isn't a Dir Object.")
if output_dir.url is None:
return
obj = parse.urlparse(output_dir.url)
if obj.scheme != "s3":
raise ValueError(f"The provided folder should start with s3://. Found {output_dir.path}.")
s3 = boto3.client("s3")
objects = s3.list_objects_v2(
Bucket=obj.netloc,
Delimiter="/",
Prefix=obj.path.lstrip("/").rstrip("/") + "/",
)
# We aren't alloweing to add more data
# TODO: Add support for `append` and `overwrite`.
if objects["KeyCount"] > 0:
raise RuntimeError(
f"The provided output_dir `{output_dir.path}` already contains data and datasets are meant to be immutable."
" HINT: Did you consider changing the `output_dir` with your own versioning as a suffix?"
)
def _assert_dir_has_index_file(output_dir: Dir) -> None:
if not isinstance(output_dir, Dir):
raise ValueError("The provided output_dir isn't a Dir Object.")
if output_dir.url is None:
return
obj = parse.urlparse(output_dir.url)
if obj.scheme != "s3":
raise ValueError(f"The provided folder should start with s3://. Found {output_dir.path}.")
s3 = boto3.client("s3")
prefix = obj.path.lstrip("/").rstrip("/") + "/"
objects = s3.list_objects_v2(
Bucket=obj.netloc,
Delimiter="/",
Prefix=prefix,
)
# No files are found in this folder
if objects["KeyCount"] == 0:
return
# Check the index file exists
try:
s3.head_object(Bucket=obj.netloc, Key=os.path.join(prefix, "index.json"))
has_index_file = True
except botocore.exceptions.ClientError:
has_index_file = False
if has_index_file:
raise RuntimeError(
f"The provided output_dir `{output_dir.path}` already contains an optimized immutable datasets."
" HINT: Did you consider changing the `output_dir` with your own versioning as a suffix?"
)
bucket_name = obj.netloc
s3 = boto3.resource("s3")
for obj in s3.Bucket(bucket_name).objects.filter(Prefix=prefix):
s3.Object(bucket_name, obj.key).delete()
def _get_lightning_cloud_url() -> str:
# detect local development
if os.getenv("VSCODE_PROXY_URI", "").startswith("http://localhost:9800"):
return "http://localhost:9800"
# DO NOT CHANGE!
return os.getenv("LIGHTNING_CLOUD_URL", "https://lightning.ai")
def _resolve_time_template(path: str) -> str:
match = re.search("^.*{%.*}$", path)
if match is None:
return path
pattern = path.split("{")[1].split("}")[0]
return path.replace("{" + pattern + "}", datetime.datetime.now().strftime(pattern))
def _execute(
name: str,
num_nodes: int,
machine: Optional[Machine] = None,
command: Optional[str] = None,
) -> None:
"""Remotely execute the current operator."""
if not _LIGHTNING_SDK_AVAILABLE:
raise ModuleNotFoundError("The `lightning_sdk` is required.")
lightning_skip_install = os.getenv("LIGHTNING_SKIP_INSTALL", "")
if lightning_skip_install:
lightning_skip_install = f" LIGHTNING_SKIP_INSTALL={lightning_skip_install} "
lightning_branch = os.getenv("LIGHTNING_BRANCH", "")
if lightning_branch:
lightning_branch = f" LIGHTNING_BRANCH={lightning_branch} "
studio = Studio()
job = studio._studio_api.create_data_prep_machine_job(
command or f"cd {os.getcwd()} &&{lightning_skip_install}{lightning_branch} python {' '.join(sys.argv)}",
name=name,
num_instances=num_nodes,
studio_id=studio._studio.id,
teamspace_id=studio._teamspace.id,
cluster_id=studio._studio.cluster_id,
machine=machine or studio._studio_api.get_machine(studio._studio.id, studio._teamspace.id),
)
has_printed = False
while True:
curr_job = studio._studio_api._client.lightningapp_instance_service_get_lightningapp_instance(
project_id=studio._teamspace.id, id=job.id
)
if not has_printed:
cloud_url = os.getenv("LIGHTNING_CLOUD_URL", "https://lightning.ai").replace(":443", "")
job_url = f"{cloud_url}/{studio.owner}/{studio._teamspace.name}"
job_url += f"/studios/{studio.name}/app?app_id=data-prep&job_name={curr_job.name}"
print(f"Find your job at {job_url}")
has_printed = True
if curr_job.status.phase == "LIGHTNINGAPP_INSTANCE_STATE_FAILED":
raise RuntimeError(f"job {curr_job.name} failed!")
if curr_job.status.phase in ["LIGHTNINGAPP_INSTANCE_STATE_STOPPED", "LIGHTNINGAPP_INSTANCE_STATE_COMPLETED"]:
break
sleep(1)
|
evocodebench_data_139
|
# Copyright The Lightning AI team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import warnings
from dataclasses import dataclass
from time import sleep
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import torch
from litdata.constants import _INDEX_FILENAME, _TORCH_GREATER_EQUAL_2_1_0
from litdata.processing.utilities import get_worker_rank
from litdata.streaming.compression import _COMPRESSORS, Compressor
from litdata.streaming.serializers import Serializer, _get_serializers
from litdata.utilities.env import _DistributedEnv, _WorkerEnv
from litdata.utilities.format import _convert_bytes_to_int, _human_readable_bytes
if _TORCH_GREATER_EQUAL_2_1_0:
from torch.utils._pytree import PyTree, tree_flatten, treespec_dumps
@dataclass
class Item:
index: int
data: bytes
bytes: int
dim: Optional[int] = None
def __len__(self) -> int:
return self.bytes
class BinaryWriter:
def __init__(
self,
cache_dir: str,
chunk_size: Optional[int] = None,
chunk_bytes: Optional[Union[int, str]] = None,
compression: Optional[str] = None,
follow_tensor_dimension: bool = True,
serializers: Optional[Dict[str, Serializer]] = None,
):
"""The BinaryWriter enables to chunk dataset into an efficient streaming format for cloud training.
Arguments:
cache_dir: The path to where the chunks will be saved.
chunk_bytes: The maximum number of bytes within a chunk.
chunk_size: The maximum number of items within a chunk.
compression: The compression algorithm to use.
serializers: Provide your own serializers.
"""
self._cache_dir = cache_dir
if (isinstance(self._cache_dir, str) and not os.path.exists(self._cache_dir)) or self._cache_dir is None:
raise FileNotFoundError(f"The provided cache directory `{self._cache_dir}` doesn't exist.")
if (chunk_size is None and chunk_bytes is None) or (chunk_size and chunk_bytes):
raise ValueError("Either one of the `chunk_size` or the `chunk_bytes` need to be provided.")
self._serializers: Dict[str, Serializer] = _get_serializers(serializers)
self._serializers_extra: Dict[str, Serializer] = {}
self._chunk_size = chunk_size
self._chunk_bytes = _convert_bytes_to_int(chunk_bytes) if isinstance(chunk_bytes, str) else chunk_bytes
self._compression = compression
self._data_format: Optional[List[str]] = None
self._data_spec: Optional[PyTree] = None
if self._compression:
if len(_COMPRESSORS) == 0:
raise ValueError("No compresion algorithms are installed.")
if self._compression not in _COMPRESSORS:
raise ValueError(
f"The provided compression {self._compression} isn't available in {sorted(_COMPRESSORS)}"
)
self._compressor: Compressor = _COMPRESSORS[self._compression]
self._serialized_items: Dict[int, Item] = {}
self._chunk_index = 0
self._min_index: Optional[int] = None
self._max_index: Optional[int] = None
self._chunks_info: List[Dict[str, Any]] = []
self._worker_env: Optional[_WorkerEnv] = None
self._rank: Optional[int] = None
self._is_done = False
self._distributed_env = _DistributedEnv.detect()
self._follow_tensor_dimension = follow_tensor_dimension
@property
def filled(self) -> bool:
"""Returns whether the caching phase is done."""
if self._is_done:
return True
files = os.listdir(self._cache_dir)
index_files = [f for f in files if f.endswith(_INDEX_FILENAME)]
worker_env = _WorkerEnv.detect()
data_optimiser_num_workers = os.getenv("DATA_OPTIMIZER_NUM_WORKERS", None)
if data_optimiser_num_workers is not None:
self._is_done = len(index_files) == int(data_optimiser_num_workers)
else:
self._is_done = len(index_files) == self._distributed_env.world_size * worker_env.world_size
return self._is_done
@property
def rank(self) -> int:
"""Returns the rank of the writer."""
if self._rank is None:
rank = os.getenv("DATA_OPTIMIZER_GLOBAL_RANK", None)
if rank:
self._rank = int(rank)
else:
self._worker_env = _WorkerEnv.detect()
self._rank = self._distributed_env.global_rank * self._worker_env.world_size + self._worker_env.rank
return self._rank
def get_config(self) -> Dict[str, Any]:
"""Returns the config of the writer."""
out = {
"compression": self._compression,
"chunk_size": self._chunk_size,
"chunk_bytes": self._chunk_bytes,
"data_format": self._data_format,
"data_spec": treespec_dumps(self._data_spec) if self._data_spec else None,
}
return out
def serialize(self, items: Any) -> Tuple[bytes, Optional[int]]:
"""Serialize a dictionary into its binary format."""
# Flatten the items provided by the users
flattened, data_spec = tree_flatten(items)
is_single_tensor = len(flattened) == 1 and isinstance(flattened[0], torch.Tensor)
# Collect the sizes and associated bytes for each item
sizes: List[int] = []
data: List[bytes] = []
if self._data_format is None:
data_format: List[str] = []
for item in flattened:
data_format.append(self._serialize(item, sizes, data))
worker_rank = get_worker_rank()
if worker_rank is not None:
print(f"Rank {worker_rank} inferred the following `{data_format}` data format.")
self._data_format = data_format
self._data_spec = data_spec
else:
# tiny optimization to avoid looping over all the data format
self._serialize_with_data_format(flattened, sizes, data, self._data_format)
# If there is a single element and it is a tensor, enable continous array.
if is_single_tensor:
return data[0], flattened[0].shape[0]
# Concatenante into a single byte array
head = np.array(sizes, np.uint32).tobytes()
body = b"".join(data)
return head + body, None
def _serialize(self, item: Any, sizes: List[int], data: List[bytes]) -> str:
"""Serialize a given item and append its size and bytes to the sizes and data array."""
for serializer_name, serializer in self._serializers.items():
if serializer.can_serialize(item):
serialized_item, name = serializer.serialize(item)
data.append(serialized_item)
sizes.append(serializer.size if hasattr(serializer, "size") else len(serialized_item))
name = name or serializer_name
if name and name not in self._serializers_extra:
self._serializers_extra[name] = serializer
return name
raise ValueError(f"The provided item isn't serializable. Found {item}")
def _serialize_with_data_format(
self, item: Any, sizes: List[int], data: List[bytes], data_format: List[str]
) -> None:
"""Serialize a given item and append its size and bytes to the sizes and data array."""
assert data_format
for element, item_format in zip(item, data_format):
serializer = self._serializers_extra[item_format]
serialized_item, _ = serializer.serialize(element)
data.append(serialized_item)
sizes.append(serializer.size if hasattr(serializer, "size") else len(serialized_item))
def _create_chunk(self, filename: str, on_done: bool = False) -> bytes:
"""Create a binary chunk from all the binarized items."""
items = []
if on_done:
indices = sorted(self._serialized_items.keys())
for i in range(len(indices) - 1):
assert indices[i] == indices[i + 1] - 1, indices
items = [self._serialized_items.pop(index) for index in indices]
else:
assert self._max_index is not None, (self._max_index, self._min_index)
assert self._min_index is not None, (self._max_index, self._min_index)
if self._max_index == self._min_index:
# A single item is larger than the target chunk size; allow the chunk to be bigger than the target size
items.append(self._serialized_items.pop(self._max_index))
items.extend(self._serialized_items.pop(index) for index in range(self._min_index, self._max_index))
if len(items) == 0:
raise RuntimeError(
"The items shouldn't have an empty length. Something went wrong."
f" Found {self._pretty_serialized_items()} with boundaries: {self._min_index}, {self._max_index}."
)
num_items = np.uint32(len(items))
sizes = list(map(len, items))
offsets = np.array([0] + sizes).cumsum().astype(np.uint32)
offsets += len(num_items.tobytes()) + len(offsets.tobytes())
sample_data = b"".join([item.data for item in items])
data = num_items.tobytes() + offsets.tobytes() + sample_data
current_chunk_bytes = sum([item.bytes for item in items])
if self._chunk_bytes and current_chunk_bytes > self._chunk_bytes:
warnings.warn(
f"An item was larger than the target chunk size ({_human_readable_bytes(self._chunk_bytes)})."
f" The current chunk will be {_human_readable_bytes(current_chunk_bytes)} in size.",
UserWarning,
)
if self._chunk_size:
assert num_items.item() <= self._chunk_size
dim: Optional[int] = None
if items[0].dim:
dim = sum([item.dim if item.dim is not None else 0 for item in items])
chunk_info = {
"chunk_bytes": current_chunk_bytes,
"chunk_size": num_items.item(),
"filename": filename,
"dim": dim,
}
self._chunks_info.append(chunk_info)
return data
def get_chunk_filename(self) -> str:
if self._compression:
return f"chunk-{self.rank}-{self._chunk_index}.{self._compression}.bin"
return f"chunk-{self.rank}-{self._chunk_index}.bin"
def write_chunk(self, on_done: bool = False) -> str:
"""Write a chunk to the filesystem."""
filename = self.get_chunk_filename()
self.write_chunk_to_file(self._create_chunk(filename, on_done=on_done), filename)
self._chunk_index += 1
return os.path.join(self._cache_dir, filename)
def __setitem__(self, index: int, items: Any) -> None:
"""Store an item to a chunk.
The index needs to be provided in order.
This is handled by the samplers automatically. This ensures we can map an index to a shard from an interval.
"""
self.add_item(index, items)
def add_item(self, index: int, items: Any) -> Optional[str]:
# Track the minimum index provided to the writer
# Serialize the items and store an Item object.
if index in self._serialized_items:
raise ValueError(f"The provided index {index} already exists in the cache.")
data, dim = self.serialize(items)
self._serialized_items[index] = Item(
index=index,
data=data,
bytes=len(data),
dim=dim,
)
if self._should_write():
filepath = os.path.join(self._cache_dir, self.get_chunk_filename())
self.write_chunk()
self._min_index = None
self._max_index = None
return filepath
def _should_write(self) -> bool:
# TODO: Misleading method name, it modifies `self._min_index` and `self._max_index`!
if not self._serialized_items:
return False
indexes = list(self._serialized_items.keys())
self._min_index = index = indexes[0] if len(indexes) == 1 else min(*indexes)
num_bytes = 0
num_items = 0
while True:
item = self._serialized_items.get(index, None)
if item:
num_bytes += item.bytes
num_items += item.dim if item.dim else 1
index += 1
if (self._chunk_bytes and self._chunk_bytes < num_bytes) or (
self._chunk_size and num_items > self._chunk_size
):
self._max_index = index - 1
return True
else:
return False
def write_chunk_to_file(
self,
raw_data: bytes,
filename: str,
) -> None:
"""Write chunk bytes to a file."""
# Whether to compress the raw bytes
if self._compression:
raw_data = self._compressor.compress(raw_data)
# Write the binary chunk file
with open(os.path.join(self._cache_dir, filename), "wb") as out:
out.write(raw_data)
def write_chunks_index(self) -> str:
"""Write the chunks index to a JSON file."""
if len(self._chunks_info) == 0:
return ""
filepath = os.path.join(self._cache_dir, f"{self.rank}.{_INDEX_FILENAME}")
config = self.get_config()
with open(filepath, "w") as out:
json.dump({"chunks": self._chunks_info, "config": config}, out, sort_keys=True)
return filepath
def done(self) -> List[str]:
"""Called when StopIteration is triggered."""
filepaths: List[str] = []
if self.filled:
return filepaths
# Try writing down an chunks
while self._should_write():
filepaths.append(self.write_chunk())
# If any elements is left, try writing one last chunk
if self._serialized_items:
filepaths.append(self.write_chunk(True))
# Write down the index file
self.write_chunks_index()
self._is_done = True
return filepaths
def merge(self, num_workers: int = 1, node_rank: Optional[int] = None) -> None:
"""Once all the workers have written their own index, the merge function is responsible to read and merge them
into a single index."""
num_workers = num_workers or 1
# Only for non rank 0
if self.rank != 0:
while not os.path.exists(os.path.join(self._cache_dir, _INDEX_FILENAME)):
sleep(0.01)
return
# Wait for all indexes to be available
is_done = False
while not is_done:
files = os.listdir(self._cache_dir)
# Return if the index already exists
if _INDEX_FILENAME in files:
return
index_files = [f for f in files if f.endswith(_INDEX_FILENAME)]
# When using the Data Optimizer, we don't use multi processes.
is_done = len(index_files) == self._distributed_env.world_size * num_workers
sleep(0.01)
self._merge_no_wait(node_rank=node_rank)
def _merge_no_wait(self, node_rank: Optional[int] = None) -> None:
"""Once all the workers have written their own index, the merge function is responsible to read and merge them
into a single index."""
files = os.listdir(self._cache_dir)
index_files = [f for f in files if f.endswith(_INDEX_FILENAME)]
chunks_info = []
config = None
for index_filename in sorted(index_files):
chunk_path = os.path.join(self._cache_dir, index_filename)
with open(chunk_path) as f:
data = json.load(f)
if config is None:
config = data["config"]
elif config != data["config"]:
raise Exception(
"The config isn't consistent between chunks. This shouldn't have happened."
f"Found {config} {data['config']}."
)
chunks_info.extend(data["chunks"])
os.remove(chunk_path)
if node_rank is None:
with open(os.path.join(self._cache_dir, _INDEX_FILENAME), "w") as f:
json.dump({"chunks": chunks_info, "config": config}, f, sort_keys=True)
else:
with open(os.path.join(self._cache_dir, f"{node_rank}-{_INDEX_FILENAME}"), "w") as f:
json.dump({"chunks": chunks_info, "config": config}, f, sort_keys=True)
def _should_raise(self, data_format_1: List[str], data_format_2: List[str]) -> bool:
if len(data_format_1) != len(data_format_2):
return True
def is_non_valid(f1: str, f2: str) -> bool:
if f1 in ["pil", "jpeg"] and f2 in ["pil", "jpeg"]:
return False
return f1 != f2
return any(is_non_valid(f1, f2) for f1, f2 in zip(data_format_1, data_format_2))
def _pretty_serialized_items(self) -> Dict[int, Item]:
out = {}
for key, value in self._serialized_items.items():
# drop `data` as it would make logs unreadable.
out[key] = Item(
index=value.index,
bytes=value.bytes,
dim=value.dim,
data=b"",
)
return out
|
evocodebench_data_140
|
import datetime
import os
import re
import sys
from dataclasses import dataclass
from pathlib import Path
from time import sleep
from typing import Optional, Union
from urllib import parse
from lightning_cloud.openapi import V1CloudSpace
from lightning_cloud.rest_client import LightningClient
# To avoid adding lightning_utilities as a dependency for now.
try:
import boto3
import botocore
_BOTO3_AVAILABLE = True
except Exception:
_BOTO3_AVAILABLE = False
try:
from lightning_sdk import Machine, Studio
_LIGHTNING_SDK_AVAILABLE = True
except (ImportError, ModuleNotFoundError):
class Machine: # type: ignore
pass
_LIGHTNING_SDK_AVAILABLE = False
@dataclass
class Dir:
"""Holds a directory path and possibly its associated remote URL."""
path: Optional[str] = None
url: Optional[str] = None
def _resolve_dir(dir_path: Optional[Union[str, Dir]]) -> Dir:
if isinstance(dir_path, Dir):
return Dir(path=str(dir_path.path) if dir_path.path else None, url=str(dir_path.url) if dir_path.url else None)
if dir_path is None:
return Dir()
if not isinstance(dir_path, str):
raise ValueError(f"`dir_path` must be a `Dir` or a string, got: {dir_path}")
assert isinstance(dir_path, str)
if dir_path.startswith("s3://"):
return Dir(path=None, url=dir_path)
if dir_path.startswith("local:"):
return Dir(path=None, url=dir_path)
dir_path = _resolve_time_template(dir_path)
dir_path_absolute = str(Path(dir_path).absolute().resolve())
if dir_path_absolute.startswith("/teamspace/studios/this_studio"):
return Dir(path=dir_path_absolute, url=None)
if dir_path_absolute.startswith("/.project"):
dir_path_absolute = dir_path
if dir_path_absolute.startswith("/teamspace/studios") and len(dir_path_absolute.split("/")) > 3:
return _resolve_studio(dir_path_absolute, dir_path_absolute.split("/")[3], None)
if dir_path_absolute.startswith("/teamspace/s3_connections") and len(dir_path_absolute.split("/")) > 3:
return _resolve_s3_connections(dir_path_absolute)
if dir_path_absolute.startswith("/teamspace/datasets") and len(dir_path_absolute.split("/")) > 3:
return _resolve_datasets(dir_path_absolute)
return Dir(path=dir_path_absolute, url=None)
def _match_studio(target_id: Optional[str], target_name: Optional[str], cloudspace: V1CloudSpace) -> bool:
if cloudspace.name is not None and target_name is not None and cloudspace.name.lower() == target_name.lower():
return True
if target_id is not None and cloudspace.id == target_id:
return True
if (
cloudspace.display_name is not None
and target_name is not None
and cloudspace.display_name.lower() == target_name.lower()
):
return True
return False
def _resolve_studio(dir_path: str, target_name: Optional[str], target_id: Optional[str]) -> Dir:
client = LightningClient(max_tries=2)
# Get the ids from env variables
cluster_id = os.getenv("LIGHTNING_CLUSTER_ID", None)
project_id = os.getenv("LIGHTNING_CLOUD_PROJECT_ID", None)
if cluster_id is None:
raise RuntimeError("The `cluster_id` couldn't be found from the environement variables.")
if project_id is None:
raise RuntimeError("The `project_id` couldn't be found from the environement variables.")
clusters = client.cluster_service_list_project_clusters(project_id).clusters
cloudspaces = client.cloud_space_service_list_cloud_spaces(project_id=project_id, cluster_id=cluster_id).cloudspaces
target_cloud_space = [cloudspace for cloudspace in cloudspaces if _match_studio(target_id, target_name, cloudspace)]
if not target_cloud_space:
raise ValueError(f"We didn't find any matching Studio for the provided name `{target_name}`.")
target_cluster = [cluster for cluster in clusters if cluster.id == target_cloud_space[0].cluster_id]
if not target_cluster:
raise ValueError(
f"We didn't find a matching cluster associated with the id {target_cloud_space[0].cluster_id}."
)
bucket_name = target_cluster[0].spec.aws_v1.bucket_name
return Dir(
path=dir_path,
url=os.path.join(
f"s3://{bucket_name}/projects/{project_id}/cloudspaces/{target_cloud_space[0].id}/code/content",
*dir_path.split("/")[4:],
),
)
def _resolve_s3_connections(dir_path: str) -> Dir:
client = LightningClient(max_tries=2)
# Get the ids from env variables
project_id = os.getenv("LIGHTNING_CLOUD_PROJECT_ID", None)
if project_id is None:
raise RuntimeError("The `project_id` couldn't be found from the environement variables.")
target_name = dir_path.split("/")[3]
data_connections = client.data_connection_service_list_data_connections(project_id).data_connections
data_connection = [dc for dc in data_connections if dc.name == target_name]
if not data_connection:
raise ValueError(f"We didn't find any matching data connection with the provided name `{target_name}`.")
return Dir(path=dir_path, url=os.path.join(data_connection[0].aws.source, *dir_path.split("/")[4:]))
def _resolve_datasets(dir_path: str) -> Dir:
client = LightningClient(max_tries=2)
# Get the ids from env variables
cluster_id = os.getenv("LIGHTNING_CLUSTER_ID", None)
project_id = os.getenv("LIGHTNING_CLOUD_PROJECT_ID", None)
cloud_space_id = os.getenv("LIGHTNING_CLOUD_SPACE_ID", None)
if cluster_id is None:
raise RuntimeError("The `cluster_id` couldn't be found from the environement variables.")
if project_id is None:
raise RuntimeError("The `project_id` couldn't be found from the environement variables.")
if cloud_space_id is None:
raise RuntimeError("The `cloud_space_id` couldn't be found from the environement variables.")
clusters = client.cluster_service_list_project_clusters(project_id).clusters
target_cloud_space = [
cloudspace
for cloudspace in client.cloud_space_service_list_cloud_spaces(
project_id=project_id, cluster_id=cluster_id
).cloudspaces
if cloudspace.id == cloud_space_id
]
if not target_cloud_space:
raise ValueError(f"We didn't find any matching Studio for the provided id `{cloud_space_id}`.")
target_cluster = [cluster for cluster in clusters if cluster.id == target_cloud_space[0].cluster_id]
if not target_cluster:
raise ValueError(
f"We didn't find a matching cluster associated with the id {target_cloud_space[0].cluster_id}."
)
return Dir(
path=dir_path,
url=os.path.join(
f"s3://{target_cluster[0].spec.aws_v1.bucket_name}/projects/{project_id}/datasets/",
*dir_path.split("/")[3:],
),
)
def _assert_dir_is_empty(output_dir: Dir, append: bool = False, overwrite: bool = False) -> None:
if not isinstance(output_dir, Dir):
raise ValueError("The provided output_dir isn't a Dir Object.")
if output_dir.url is None:
return
obj = parse.urlparse(output_dir.url)
if obj.scheme != "s3":
raise ValueError(f"The provided folder should start with s3://. Found {output_dir.path}.")
s3 = boto3.client("s3")
objects = s3.list_objects_v2(
Bucket=obj.netloc,
Delimiter="/",
Prefix=obj.path.lstrip("/").rstrip("/") + "/",
)
# We aren't alloweing to add more data
# TODO: Add support for `append` and `overwrite`.
if objects["KeyCount"] > 0:
raise RuntimeError(
f"The provided output_dir `{output_dir.path}` already contains data and datasets are meant to be immutable."
" HINT: Did you consider changing the `output_dir` with your own versioning as a suffix?"
)
def _assert_dir_has_index_file(output_dir: Dir) -> None:
if not isinstance(output_dir, Dir):
raise ValueError("The provided output_dir isn't a Dir Object.")
if output_dir.url is None:
return
obj = parse.urlparse(output_dir.url)
if obj.scheme != "s3":
raise ValueError(f"The provided folder should start with s3://. Found {output_dir.path}.")
s3 = boto3.client("s3")
prefix = obj.path.lstrip("/").rstrip("/") + "/"
objects = s3.list_objects_v2(
Bucket=obj.netloc,
Delimiter="/",
Prefix=prefix,
)
# No files are found in this folder
if objects["KeyCount"] == 0:
return
# Check the index file exists
try:
s3.head_object(Bucket=obj.netloc, Key=os.path.join(prefix, "index.json"))
has_index_file = True
except botocore.exceptions.ClientError:
has_index_file = False
if has_index_file:
raise RuntimeError(
f"The provided output_dir `{output_dir.path}` already contains an optimized immutable datasets."
" HINT: Did you consider changing the `output_dir` with your own versioning as a suffix?"
)
bucket_name = obj.netloc
s3 = boto3.resource("s3")
for obj in s3.Bucket(bucket_name).objects.filter(Prefix=prefix):
s3.Object(bucket_name, obj.key).delete()
def _get_lightning_cloud_url() -> str:
# detect local development
if os.getenv("VSCODE_PROXY_URI", "").startswith("http://localhost:9800"):
return "http://localhost:9800"
# DO NOT CHANGE!
return os.getenv("LIGHTNING_CLOUD_URL", "https://lightning.ai")
def _resolve_time_template(path: str) -> str:
match = re.search("^.*{%.*}$", path)
if match is None:
return path
pattern = path.split("{")[1].split("}")[0]
return path.replace("{" + pattern + "}", datetime.datetime.now().strftime(pattern))
def _execute(
name: str,
num_nodes: int,
machine: Optional[Machine] = None,
command: Optional[str] = None,
) -> None:
"""Remotely execute the current operator."""
if not _LIGHTNING_SDK_AVAILABLE:
raise ModuleNotFoundError("The `lightning_sdk` is required.")
lightning_skip_install = os.getenv("LIGHTNING_SKIP_INSTALL", "")
if lightning_skip_install:
lightning_skip_install = f" LIGHTNING_SKIP_INSTALL={lightning_skip_install} "
lightning_branch = os.getenv("LIGHTNING_BRANCH", "")
if lightning_branch:
lightning_branch = f" LIGHTNING_BRANCH={lightning_branch} "
studio = Studio()
job = studio._studio_api.create_data_prep_machine_job(
command or f"cd {os.getcwd()} &&{lightning_skip_install}{lightning_branch} python {' '.join(sys.argv)}",
name=name,
num_instances=num_nodes,
studio_id=studio._studio.id,
teamspace_id=studio._teamspace.id,
cluster_id=studio._studio.cluster_id,
machine=machine or studio._studio_api.get_machine(studio._studio.id, studio._teamspace.id),
)
has_printed = False
while True:
curr_job = studio._studio_api._client.lightningapp_instance_service_get_lightningapp_instance(
project_id=studio._teamspace.id, id=job.id
)
if not has_printed:
cloud_url = os.getenv("LIGHTNING_CLOUD_URL", "https://lightning.ai").replace(":443", "")
job_url = f"{cloud_url}/{studio.owner}/{studio._teamspace.name}"
job_url += f"/studios/{studio.name}/app?app_id=data-prep&job_name={curr_job.name}"
print(f"Find your job at {job_url}")
has_printed = True
if curr_job.status.phase == "LIGHTNINGAPP_INSTANCE_STATE_FAILED":
raise RuntimeError(f"job {curr_job.name} failed!")
if curr_job.status.phase in ["LIGHTNINGAPP_INSTANCE_STATE_STOPPED", "LIGHTNINGAPP_INSTANCE_STATE_COMPLETED"]:
break
sleep(1)
|
evocodebench_data_141
|
# Copyright The Lightning AI team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import multiprocessing
import os
import warnings
from logging import Logger
from queue import Empty
from threading import Thread
from typing import Any, Dict, List, Optional, Tuple, Union
from litdata.constants import _TORCH_GREATER_EQUAL_2_1_0
from litdata.streaming.config import ChunksConfig
from litdata.streaming.item_loader import BaseItemLoader, PyTreeLoader
from litdata.streaming.sampler import ChunkedIndex
from litdata.streaming.serializers import Serializer, _get_serializers
from litdata.utilities.env import _DistributedEnv, _WorkerEnv
warnings.filterwarnings("ignore", message=".*The given buffer is not writable.*")
if _TORCH_GREATER_EQUAL_2_1_0:
pass
logger = Logger(__name__)
_END_TOKEN = "END"
# Note: The timeout here should not be too short. We need to prevent the caller from aggressively
# querying the queue and consuming too many CPU cycles.
_DEFAULT_TIMEOUT = 0.1
_LONG_DEFAULT_TIMEOUT = 5
class PrepareChunksThread(Thread):
"""This thread is responsible to download the chunks associated to a given worker."""
def __init__(
self,
config: ChunksConfig,
item_loader: BaseItemLoader,
distributed_env: _DistributedEnv,
max_cache_size: Optional[int] = None,
max_pre_download: int = 2,
) -> None:
super().__init__(daemon=True)
self._config = config
self._item_loader = item_loader
self._max_pre_download = max_pre_download
self._pre_download_counter = 0
self._distributed_env = distributed_env
self._chunks_index_to_be_deleted: List[int] = []
self._max_cache_size = max_cache_size
self._parent_cache_dir = os.path.dirname(self._config._cache_dir)
self._to_download_queue: multiprocessing.Queue = multiprocessing.Queue()
self._to_delete_queue: multiprocessing.Queue = multiprocessing.Queue()
# Check whether a dataset slice fits on the node
num_bytes_per_nodes = self._config.num_bytes // self._distributed_env.num_nodes
self._delete_chunks_when_processed = num_bytes_per_nodes > max_cache_size if max_cache_size else False
self._has_exited = False
def download(self, chunk_indexes: List[int]) -> None:
"""Receive the list of the chunk indices to download for the current epoch."""
for chunk_index in chunk_indexes:
self._to_download_queue.put(chunk_index)
def delete(self, chunk_indexes: List[int]) -> None:
"""Receive the list of the chunk indices to delete for the current epoch."""
for chunk_index in chunk_indexes:
self._to_delete_queue.put(chunk_index)
def _delete(self, chunk_index: int) -> None:
"""Inform the item loader of the chunk to delete."""
chunk_filepath, _, _ = self._config[ChunkedIndex(index=-1, chunk_index=chunk_index)]
self._item_loader.delete(chunk_index, chunk_filepath)
def stop(self) -> None:
"""Receive the list of the chunk indices to download for the current epoch."""
self._to_download_queue.put(_END_TOKEN)
def _maybe_delete_chunks(self) -> None:
reached_pre_download = self._pre_download_counter == self._max_pre_download
# we have already pre-downloaded some chunks, we just need to wait for them to be processed.
chunk_index = _get_from_queue(
self._to_delete_queue, timeout=_LONG_DEFAULT_TIMEOUT if reached_pre_download else _DEFAULT_TIMEOUT
)
if chunk_index is not None:
self._pre_download_counter -= 1
# Store the current chunk index
self._chunks_index_to_be_deleted.append(chunk_index)
# Get the current cache size and decide whether we need to start cleanup. Otherwise, keep track of it
while self._max_cache_size and self._chunks_index_to_be_deleted and self._can_delete_chunk():
# Delete the oldest chunk
self._delete(self._chunks_index_to_be_deleted.pop(0))
return
def _can_delete_chunk(self) -> bool:
if self._delete_chunks_when_processed:
return self._pre_download_counter >= self._max_pre_download - 1
return self._max_cache_size is not None and _get_folder_size(self._parent_cache_dir) >= self._max_cache_size
def _pre_load_chunk(self, chunk_index: int) -> None:
chunk_filepath, _, _ = self._config[ChunkedIndex(index=-1, chunk_index=chunk_index)]
self._item_loader.pre_load_chunk(chunk_index, chunk_filepath)
def run(self) -> None:
while True:
if self._pre_download_counter < self._max_pre_download:
chunk_index = _get_from_queue(self._to_download_queue)
if chunk_index == _END_TOKEN:
self._has_exited = True
return
if chunk_index is not None:
self._config.download_chunk_from_index(chunk_index)
# Preload item if possible to gain some time but only
# if this is one of the pre-downloaded chunk
if self._pre_download_counter > 0:
self._pre_load_chunk(chunk_index)
# Avoid downloading too many chunks in advance at the risk of over using the disk space
self._pre_download_counter += 1
if self._max_cache_size:
self._maybe_delete_chunks()
class BinaryReader:
def __init__(
self,
cache_dir: str,
max_cache_size: Optional[Union[int, str]] = None,
remote_input_dir: Optional[str] = None,
compression: Optional[str] = None,
item_loader: Optional[BaseItemLoader] = None,
serializers: Optional[Dict[str, Serializer]] = None,
) -> None:
"""The BinaryReader enables to read chunked dataset in an efficient way.
Arguments:
cache_dir: The path to cache folder.
remote_input_dir: The path to a remote folder where the data are located.
The scheme needs to be added to the path.
compression: The algorithm to decompress the chunks.
item_loader: The chunk sampler to create sub arrays from a chunk.
max_cache_size: The maximum cache size used by the reader when fetching the chunks.
serializers: Provide your own serializers.
"""
super().__init__()
warnings.filterwarnings("ignore", message=".*The given buffer is not writable.*")
self._cache_dir = cache_dir
self._remote_input_dir = remote_input_dir
if not os.path.exists(self._cache_dir):
raise FileNotFoundError(f"The provided cache_dir `{self._cache_dir}` doesn't exist.")
self._compression = compression
self._intervals: Optional[List[str]] = None
self._serializers: Dict[str, Serializer] = _get_serializers(serializers)
self._distributed_env = _DistributedEnv.detect()
self._rank: Optional[int] = None
self._config: Optional[ChunksConfig] = None
self._prepare_thread: Optional[PrepareChunksThread] = None
self._item_loader = item_loader or PyTreeLoader()
self._last_chunk_index: Optional[int] = None
self._max_cache_size = int(os.getenv("MAX_CACHE_SIZE", max_cache_size or 0))
def _get_chunk_index_from_index(self, index: int) -> int:
# Load the config containing the index
if self._config is None and self._try_load_config() is None:
raise Exception("The reader index isn't defined.")
return self._config._get_chunk_index_from_index(index) # type: ignore
def _try_load_config(self) -> Optional[ChunksConfig]:
"""Try to load the chunks config if the index files are available."""
self._config = ChunksConfig.load(self._cache_dir, self._serializers, self._remote_input_dir, self._item_loader)
return self._config
@property
def config(self) -> ChunksConfig:
if self._config is None:
raise RuntimeError("The config should be defined.")
return self._config
@property
def rank(self) -> int:
"""Returns the rank of the writer."""
if self._rank is None:
self._worker_env = _WorkerEnv.detect()
self._rank = self._distributed_env.global_rank * self._worker_env.world_size + self._worker_env.rank
return self._rank
def read(self, index: ChunkedIndex) -> Any:
"""Read an item for the given from a chunk.
If the chunk isn't available locally or in memory, it will be downloaded.
Prefetching should reduce the wait time to be the batch available.
"""
if not isinstance(index, ChunkedIndex):
raise ValueError("The Reader.read(...) method expects a chunked Index.")
# Load the config containing the index
if self._config is None and self._try_load_config() is None:
raise Exception("The reader index isn't defined.")
if self._config and (self._config._remote_dir or self._config._compressor):
# Create and start the prepare chunks thread
if self._prepare_thread is None and self._config:
self._prepare_thread = PrepareChunksThread(
self._config, self._item_loader, self._distributed_env, self._max_cache_size
)
self._prepare_thread.start()
if index.chunk_indexes:
self._prepare_thread.download(index.chunk_indexes)
# If the chunk_index is new, request for it to be downloaded.
if index.chunk_index != self._last_chunk_index:
assert self._prepare_thread
self._prepare_thread.download([index.chunk_index])
if self._last_chunk_index is None:
self._last_chunk_index = index.chunk_index
# Fetch the element
chunk_filepath, begin, _ = self.config[index]
item = self._item_loader.load_item_from_chunk(index.index, index.chunk_index, chunk_filepath, begin)
# We need to request deletion after the latest element has been loaded.
# Otherwise, this could trigger segmentation fault error depending on the item loader used.
if self._config and self._config._remote_dir and index.chunk_index != self._last_chunk_index:
assert self._prepare_thread
assert self._last_chunk_index is not None
# inform the chunk has been completely consumed
self._prepare_thread.delete([self._last_chunk_index])
# track the new chunk index as the latest one
self._last_chunk_index = index.chunk_index
if index.is_last_index and self._prepare_thread:
# inform the thread it is time to stop
self._prepare_thread.stop()
self._prepare_thread = None
return item
def get_length(self) -> int:
"""Get the number of samples across all chunks."""
if self._config is None and self._try_load_config() is None:
raise Exception("The reader index isn't defined.")
return len(self.config)
def get_chunk_intervals(self) -> List[Tuple[int, int]]:
"""Get the index interval of each chunk."""
if self._config is None and self._try_load_config() is None:
raise Exception("The reader index isn't defined.")
return self.config.intervals
def __getstate__(self) -> Dict[str, Any]:
state = self.__dict__.copy()
state["_prepare_thread"] = None
return state
def _get_folder_size(path: str) -> int:
"""Collect the size of each files within a folder.
This method is robust to file deletion races
"""
size = 0
for dirpath, _, filenames in os.walk(str(path)):
for filename in filenames:
with contextlib.suppress(FileNotFoundError):
size += os.stat(os.path.join(dirpath, filename)).st_size
return size
def _get_from_queue(queue: multiprocessing.Queue, timeout: float = _DEFAULT_TIMEOUT) -> Optional[Any]:
try:
return queue.get(timeout=timeout)
except Empty:
pass
except OSError as err:
# handle closed queue before the thread terminates
if "handle is closed" in str(err) or "Bad file descriptor" in str(err):
logger.debug(err)
else:
raise err
except EOFError as err:
logger.debug(err)
return None
|
evocodebench_data_142
|
# Copyright The Lightning AI team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import multiprocessing
import os
import warnings
from logging import Logger
from queue import Empty
from threading import Thread
from typing import Any, Dict, List, Optional, Tuple, Union
from litdata.constants import _TORCH_GREATER_EQUAL_2_1_0
from litdata.streaming.config import ChunksConfig
from litdata.streaming.item_loader import BaseItemLoader, PyTreeLoader
from litdata.streaming.sampler import ChunkedIndex
from litdata.streaming.serializers import Serializer, _get_serializers
from litdata.utilities.env import _DistributedEnv, _WorkerEnv
warnings.filterwarnings("ignore", message=".*The given buffer is not writable.*")
if _TORCH_GREATER_EQUAL_2_1_0:
pass
logger = Logger(__name__)
_END_TOKEN = "END"
# Note: The timeout here should not be too short. We need to prevent the caller from aggressively
# querying the queue and consuming too many CPU cycles.
_DEFAULT_TIMEOUT = 0.1
_LONG_DEFAULT_TIMEOUT = 5
class PrepareChunksThread(Thread):
"""This thread is responsible to download the chunks associated to a given worker."""
def __init__(
self,
config: ChunksConfig,
item_loader: BaseItemLoader,
distributed_env: _DistributedEnv,
max_cache_size: Optional[int] = None,
max_pre_download: int = 2,
) -> None:
super().__init__(daemon=True)
self._config = config
self._item_loader = item_loader
self._max_pre_download = max_pre_download
self._pre_download_counter = 0
self._distributed_env = distributed_env
self._chunks_index_to_be_deleted: List[int] = []
self._max_cache_size = max_cache_size
self._parent_cache_dir = os.path.dirname(self._config._cache_dir)
self._to_download_queue: multiprocessing.Queue = multiprocessing.Queue()
self._to_delete_queue: multiprocessing.Queue = multiprocessing.Queue()
# Check whether a dataset slice fits on the node
num_bytes_per_nodes = self._config.num_bytes // self._distributed_env.num_nodes
self._delete_chunks_when_processed = num_bytes_per_nodes > max_cache_size if max_cache_size else False
self._has_exited = False
def download(self, chunk_indexes: List[int]) -> None:
"""Receive the list of the chunk indices to download for the current epoch."""
for chunk_index in chunk_indexes:
self._to_download_queue.put(chunk_index)
def delete(self, chunk_indexes: List[int]) -> None:
"""Receive the list of the chunk indices to delete for the current epoch."""
for chunk_index in chunk_indexes:
self._to_delete_queue.put(chunk_index)
def _delete(self, chunk_index: int) -> None:
"""Inform the item loader of the chunk to delete."""
chunk_filepath, _, _ = self._config[ChunkedIndex(index=-1, chunk_index=chunk_index)]
self._item_loader.delete(chunk_index, chunk_filepath)
def stop(self) -> None:
"""Receive the list of the chunk indices to download for the current epoch."""
self._to_download_queue.put(_END_TOKEN)
def _maybe_delete_chunks(self) -> None:
reached_pre_download = self._pre_download_counter == self._max_pre_download
# we have already pre-downloaded some chunks, we just need to wait for them to be processed.
chunk_index = _get_from_queue(
self._to_delete_queue, timeout=_LONG_DEFAULT_TIMEOUT if reached_pre_download else _DEFAULT_TIMEOUT
)
if chunk_index is not None:
self._pre_download_counter -= 1
# Store the current chunk index
self._chunks_index_to_be_deleted.append(chunk_index)
# Get the current cache size and decide whether we need to start cleanup. Otherwise, keep track of it
while self._max_cache_size and self._chunks_index_to_be_deleted and self._can_delete_chunk():
# Delete the oldest chunk
self._delete(self._chunks_index_to_be_deleted.pop(0))
return
def _can_delete_chunk(self) -> bool:
if self._delete_chunks_when_processed:
return self._pre_download_counter >= self._max_pre_download - 1
return self._max_cache_size is not None and _get_folder_size(self._parent_cache_dir) >= self._max_cache_size
def _pre_load_chunk(self, chunk_index: int) -> None:
chunk_filepath, _, _ = self._config[ChunkedIndex(index=-1, chunk_index=chunk_index)]
self._item_loader.pre_load_chunk(chunk_index, chunk_filepath)
def run(self) -> None:
while True:
if self._pre_download_counter < self._max_pre_download:
chunk_index = _get_from_queue(self._to_download_queue)
if chunk_index == _END_TOKEN:
self._has_exited = True
return
if chunk_index is not None:
self._config.download_chunk_from_index(chunk_index)
# Preload item if possible to gain some time but only
# if this is one of the pre-downloaded chunk
if self._pre_download_counter > 0:
self._pre_load_chunk(chunk_index)
# Avoid downloading too many chunks in advance at the risk of over using the disk space
self._pre_download_counter += 1
if self._max_cache_size:
self._maybe_delete_chunks()
class BinaryReader:
def __init__(
self,
cache_dir: str,
max_cache_size: Optional[Union[int, str]] = None,
remote_input_dir: Optional[str] = None,
compression: Optional[str] = None,
item_loader: Optional[BaseItemLoader] = None,
serializers: Optional[Dict[str, Serializer]] = None,
) -> None:
"""The BinaryReader enables to read chunked dataset in an efficient way.
Arguments:
cache_dir: The path to cache folder.
remote_input_dir: The path to a remote folder where the data are located.
The scheme needs to be added to the path.
compression: The algorithm to decompress the chunks.
item_loader: The chunk sampler to create sub arrays from a chunk.
max_cache_size: The maximum cache size used by the reader when fetching the chunks.
serializers: Provide your own serializers.
"""
super().__init__()
warnings.filterwarnings("ignore", message=".*The given buffer is not writable.*")
self._cache_dir = cache_dir
self._remote_input_dir = remote_input_dir
if not os.path.exists(self._cache_dir):
raise FileNotFoundError(f"The provided cache_dir `{self._cache_dir}` doesn't exist.")
self._compression = compression
self._intervals: Optional[List[str]] = None
self._serializers: Dict[str, Serializer] = _get_serializers(serializers)
self._distributed_env = _DistributedEnv.detect()
self._rank: Optional[int] = None
self._config: Optional[ChunksConfig] = None
self._prepare_thread: Optional[PrepareChunksThread] = None
self._item_loader = item_loader or PyTreeLoader()
self._last_chunk_index: Optional[int] = None
self._max_cache_size = int(os.getenv("MAX_CACHE_SIZE", max_cache_size or 0))
def _get_chunk_index_from_index(self, index: int) -> int:
# Load the config containing the index
if self._config is None and self._try_load_config() is None:
raise Exception("The reader index isn't defined.")
return self._config._get_chunk_index_from_index(index) # type: ignore
def _try_load_config(self) -> Optional[ChunksConfig]:
"""Try to load the chunks config if the index files are available."""
self._config = ChunksConfig.load(self._cache_dir, self._serializers, self._remote_input_dir, self._item_loader)
return self._config
@property
def config(self) -> ChunksConfig:
if self._config is None:
raise RuntimeError("The config should be defined.")
return self._config
@property
def rank(self) -> int:
"""Returns the rank of the writer."""
if self._rank is None:
self._worker_env = _WorkerEnv.detect()
self._rank = self._distributed_env.global_rank * self._worker_env.world_size + self._worker_env.rank
return self._rank
def read(self, index: ChunkedIndex) -> Any:
"""Read an item for the given from a chunk.
If the chunk isn't available locally or in memory, it will be downloaded.
Prefetching should reduce the wait time to be the batch available.
"""
if not isinstance(index, ChunkedIndex):
raise ValueError("The Reader.read(...) method expects a chunked Index.")
# Load the config containing the index
if self._config is None and self._try_load_config() is None:
raise Exception("The reader index isn't defined.")
if self._config and (self._config._remote_dir or self._config._compressor):
# Create and start the prepare chunks thread
if self._prepare_thread is None and self._config:
self._prepare_thread = PrepareChunksThread(
self._config, self._item_loader, self._distributed_env, self._max_cache_size
)
self._prepare_thread.start()
if index.chunk_indexes:
self._prepare_thread.download(index.chunk_indexes)
# If the chunk_index is new, request for it to be downloaded.
if index.chunk_index != self._last_chunk_index:
assert self._prepare_thread
self._prepare_thread.download([index.chunk_index])
if self._last_chunk_index is None:
self._last_chunk_index = index.chunk_index
# Fetch the element
chunk_filepath, begin, _ = self.config[index]
item = self._item_loader.load_item_from_chunk(index.index, index.chunk_index, chunk_filepath, begin)
# We need to request deletion after the latest element has been loaded.
# Otherwise, this could trigger segmentation fault error depending on the item loader used.
if self._config and self._config._remote_dir and index.chunk_index != self._last_chunk_index:
assert self._prepare_thread
assert self._last_chunk_index is not None
# inform the chunk has been completely consumed
self._prepare_thread.delete([self._last_chunk_index])
# track the new chunk index as the latest one
self._last_chunk_index = index.chunk_index
if index.is_last_index and self._prepare_thread:
# inform the thread it is time to stop
self._prepare_thread.stop()
self._prepare_thread = None
return item
def get_length(self) -> int:
"""Get the number of samples across all chunks."""
if self._config is None and self._try_load_config() is None:
raise Exception("The reader index isn't defined.")
return len(self.config)
def get_chunk_intervals(self) -> List[Tuple[int, int]]:
"""Get the index interval of each chunk."""
if self._config is None and self._try_load_config() is None:
raise Exception("The reader index isn't defined.")
return self.config.intervals
def __getstate__(self) -> Dict[str, Any]:
state = self.__dict__.copy()
state["_prepare_thread"] = None
return state
def _get_folder_size(path: str) -> int:
"""Collect the size of each files within a folder.
This method is robust to file deletion races
"""
size = 0
for dirpath, _, filenames in os.walk(str(path)):
for filename in filenames:
with contextlib.suppress(FileNotFoundError):
size += os.stat(os.path.join(dirpath, filename)).st_size
return size
def _get_from_queue(queue: multiprocessing.Queue, timeout: float = _DEFAULT_TIMEOUT) -> Optional[Any]:
try:
return queue.get(timeout=timeout)
except Empty:
pass
except OSError as err:
# handle closed queue before the thread terminates
if "handle is closed" in str(err) or "Bad file descriptor" in str(err):
logger.debug(err)
else:
raise err
except EOFError as err:
logger.debug(err)
return None
|
evocodebench_data_143
|
# Copyright The Lightning AI team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import multiprocessing
import os
import warnings
from logging import Logger
from queue import Empty
from threading import Thread
from typing import Any, Dict, List, Optional, Tuple, Union
from litdata.constants import _TORCH_GREATER_EQUAL_2_1_0
from litdata.streaming.config import ChunksConfig
from litdata.streaming.item_loader import BaseItemLoader, PyTreeLoader
from litdata.streaming.sampler import ChunkedIndex
from litdata.streaming.serializers import Serializer, _get_serializers
from litdata.utilities.env import _DistributedEnv, _WorkerEnv
warnings.filterwarnings("ignore", message=".*The given buffer is not writable.*")
if _TORCH_GREATER_EQUAL_2_1_0:
pass
logger = Logger(__name__)
_END_TOKEN = "END"
# Note: The timeout here should not be too short. We need to prevent the caller from aggressively
# querying the queue and consuming too many CPU cycles.
_DEFAULT_TIMEOUT = 0.1
_LONG_DEFAULT_TIMEOUT = 5
class PrepareChunksThread(Thread):
"""This thread is responsible to download the chunks associated to a given worker."""
def __init__(
self,
config: ChunksConfig,
item_loader: BaseItemLoader,
distributed_env: _DistributedEnv,
max_cache_size: Optional[int] = None,
max_pre_download: int = 2,
) -> None:
super().__init__(daemon=True)
self._config = config
self._item_loader = item_loader
self._max_pre_download = max_pre_download
self._pre_download_counter = 0
self._distributed_env = distributed_env
self._chunks_index_to_be_deleted: List[int] = []
self._max_cache_size = max_cache_size
self._parent_cache_dir = os.path.dirname(self._config._cache_dir)
self._to_download_queue: multiprocessing.Queue = multiprocessing.Queue()
self._to_delete_queue: multiprocessing.Queue = multiprocessing.Queue()
# Check whether a dataset slice fits on the node
num_bytes_per_nodes = self._config.num_bytes // self._distributed_env.num_nodes
self._delete_chunks_when_processed = num_bytes_per_nodes > max_cache_size if max_cache_size else False
self._has_exited = False
def download(self, chunk_indexes: List[int]) -> None:
"""Receive the list of the chunk indices to download for the current epoch."""
for chunk_index in chunk_indexes:
self._to_download_queue.put(chunk_index)
def delete(self, chunk_indexes: List[int]) -> None:
"""Receive the list of the chunk indices to delete for the current epoch."""
for chunk_index in chunk_indexes:
self._to_delete_queue.put(chunk_index)
def _delete(self, chunk_index: int) -> None:
"""Inform the item loader of the chunk to delete."""
chunk_filepath, _, _ = self._config[ChunkedIndex(index=-1, chunk_index=chunk_index)]
self._item_loader.delete(chunk_index, chunk_filepath)
def stop(self) -> None:
"""Receive the list of the chunk indices to download for the current epoch."""
self._to_download_queue.put(_END_TOKEN)
def _maybe_delete_chunks(self) -> None:
reached_pre_download = self._pre_download_counter == self._max_pre_download
# we have already pre-downloaded some chunks, we just need to wait for them to be processed.
chunk_index = _get_from_queue(
self._to_delete_queue, timeout=_LONG_DEFAULT_TIMEOUT if reached_pre_download else _DEFAULT_TIMEOUT
)
if chunk_index is not None:
self._pre_download_counter -= 1
# Store the current chunk index
self._chunks_index_to_be_deleted.append(chunk_index)
# Get the current cache size and decide whether we need to start cleanup. Otherwise, keep track of it
while self._max_cache_size and self._chunks_index_to_be_deleted and self._can_delete_chunk():
# Delete the oldest chunk
self._delete(self._chunks_index_to_be_deleted.pop(0))
return
def _can_delete_chunk(self) -> bool:
if self._delete_chunks_when_processed:
return self._pre_download_counter >= self._max_pre_download - 1
return self._max_cache_size is not None and _get_folder_size(self._parent_cache_dir) >= self._max_cache_size
def _pre_load_chunk(self, chunk_index: int) -> None:
chunk_filepath, _, _ = self._config[ChunkedIndex(index=-1, chunk_index=chunk_index)]
self._item_loader.pre_load_chunk(chunk_index, chunk_filepath)
def run(self) -> None:
while True:
if self._pre_download_counter < self._max_pre_download:
chunk_index = _get_from_queue(self._to_download_queue)
if chunk_index == _END_TOKEN:
self._has_exited = True
return
if chunk_index is not None:
self._config.download_chunk_from_index(chunk_index)
# Preload item if possible to gain some time but only
# if this is one of the pre-downloaded chunk
if self._pre_download_counter > 0:
self._pre_load_chunk(chunk_index)
# Avoid downloading too many chunks in advance at the risk of over using the disk space
self._pre_download_counter += 1
if self._max_cache_size:
self._maybe_delete_chunks()
class BinaryReader:
def __init__(
self,
cache_dir: str,
max_cache_size: Optional[Union[int, str]] = None,
remote_input_dir: Optional[str] = None,
compression: Optional[str] = None,
item_loader: Optional[BaseItemLoader] = None,
serializers: Optional[Dict[str, Serializer]] = None,
) -> None:
"""The BinaryReader enables to read chunked dataset in an efficient way.
Arguments:
cache_dir: The path to cache folder.
remote_input_dir: The path to a remote folder where the data are located.
The scheme needs to be added to the path.
compression: The algorithm to decompress the chunks.
item_loader: The chunk sampler to create sub arrays from a chunk.
max_cache_size: The maximum cache size used by the reader when fetching the chunks.
serializers: Provide your own serializers.
"""
super().__init__()
warnings.filterwarnings("ignore", message=".*The given buffer is not writable.*")
self._cache_dir = cache_dir
self._remote_input_dir = remote_input_dir
if not os.path.exists(self._cache_dir):
raise FileNotFoundError(f"The provided cache_dir `{self._cache_dir}` doesn't exist.")
self._compression = compression
self._intervals: Optional[List[str]] = None
self._serializers: Dict[str, Serializer] = _get_serializers(serializers)
self._distributed_env = _DistributedEnv.detect()
self._rank: Optional[int] = None
self._config: Optional[ChunksConfig] = None
self._prepare_thread: Optional[PrepareChunksThread] = None
self._item_loader = item_loader or PyTreeLoader()
self._last_chunk_index: Optional[int] = None
self._max_cache_size = int(os.getenv("MAX_CACHE_SIZE", max_cache_size or 0))
def _get_chunk_index_from_index(self, index: int) -> int:
# Load the config containing the index
if self._config is None and self._try_load_config() is None:
raise Exception("The reader index isn't defined.")
return self._config._get_chunk_index_from_index(index) # type: ignore
def _try_load_config(self) -> Optional[ChunksConfig]:
"""Try to load the chunks config if the index files are available."""
self._config = ChunksConfig.load(self._cache_dir, self._serializers, self._remote_input_dir, self._item_loader)
return self._config
@property
def config(self) -> ChunksConfig:
if self._config is None:
raise RuntimeError("The config should be defined.")
return self._config
@property
def rank(self) -> int:
"""Returns the rank of the writer."""
if self._rank is None:
self._worker_env = _WorkerEnv.detect()
self._rank = self._distributed_env.global_rank * self._worker_env.world_size + self._worker_env.rank
return self._rank
def read(self, index: ChunkedIndex) -> Any:
"""Read an item for the given from a chunk.
If the chunk isn't available locally or in memory, it will be downloaded.
Prefetching should reduce the wait time to be the batch available.
"""
if not isinstance(index, ChunkedIndex):
raise ValueError("The Reader.read(...) method expects a chunked Index.")
# Load the config containing the index
if self._config is None and self._try_load_config() is None:
raise Exception("The reader index isn't defined.")
if self._config and (self._config._remote_dir or self._config._compressor):
# Create and start the prepare chunks thread
if self._prepare_thread is None and self._config:
self._prepare_thread = PrepareChunksThread(
self._config, self._item_loader, self._distributed_env, self._max_cache_size
)
self._prepare_thread.start()
if index.chunk_indexes:
self._prepare_thread.download(index.chunk_indexes)
# If the chunk_index is new, request for it to be downloaded.
if index.chunk_index != self._last_chunk_index:
assert self._prepare_thread
self._prepare_thread.download([index.chunk_index])
if self._last_chunk_index is None:
self._last_chunk_index = index.chunk_index
# Fetch the element
chunk_filepath, begin, _ = self.config[index]
item = self._item_loader.load_item_from_chunk(index.index, index.chunk_index, chunk_filepath, begin)
# We need to request deletion after the latest element has been loaded.
# Otherwise, this could trigger segmentation fault error depending on the item loader used.
if self._config and self._config._remote_dir and index.chunk_index != self._last_chunk_index:
assert self._prepare_thread
assert self._last_chunk_index is not None
# inform the chunk has been completely consumed
self._prepare_thread.delete([self._last_chunk_index])
# track the new chunk index as the latest one
self._last_chunk_index = index.chunk_index
if index.is_last_index and self._prepare_thread:
# inform the thread it is time to stop
self._prepare_thread.stop()
self._prepare_thread = None
return item
def get_length(self) -> int:
"""Get the number of samples across all chunks."""
if self._config is None and self._try_load_config() is None:
raise Exception("The reader index isn't defined.")
return len(self.config)
def get_chunk_intervals(self) -> List[Tuple[int, int]]:
"""Get the index interval of each chunk."""
if self._config is None and self._try_load_config() is None:
raise Exception("The reader index isn't defined.")
return self.config.intervals
def __getstate__(self) -> Dict[str, Any]:
state = self.__dict__.copy()
state["_prepare_thread"] = None
return state
def _get_folder_size(path: str) -> int:
"""Collect the size of each files within a folder.
This method is robust to file deletion races
"""
size = 0
for dirpath, _, filenames in os.walk(str(path)):
for filename in filenames:
with contextlib.suppress(FileNotFoundError):
size += os.stat(os.path.join(dirpath, filename)).st_size
return size
def _get_from_queue(queue: multiprocessing.Queue, timeout: float = _DEFAULT_TIMEOUT) -> Optional[Any]:
try:
return queue.get(timeout=timeout)
except Empty:
pass
except OSError as err:
# handle closed queue before the thread terminates
if "handle is closed" in str(err) or "Bad file descriptor" in str(err):
logger.debug(err)
else:
raise err
except EOFError as err:
logger.debug(err)
return None
|
evocodebench_data_144
|
# Copyright The Lightning AI team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import multiprocessing
import os
import warnings
from logging import Logger
from queue import Empty
from threading import Thread
from typing import Any, Dict, List, Optional, Tuple, Union
from litdata.constants import _TORCH_GREATER_EQUAL_2_1_0
from litdata.streaming.config import ChunksConfig
from litdata.streaming.item_loader import BaseItemLoader, PyTreeLoader
from litdata.streaming.sampler import ChunkedIndex
from litdata.streaming.serializers import Serializer, _get_serializers
from litdata.utilities.env import _DistributedEnv, _WorkerEnv
warnings.filterwarnings("ignore", message=".*The given buffer is not writable.*")
if _TORCH_GREATER_EQUAL_2_1_0:
pass
logger = Logger(__name__)
_END_TOKEN = "END"
# Note: The timeout here should not be too short. We need to prevent the caller from aggressively
# querying the queue and consuming too many CPU cycles.
_DEFAULT_TIMEOUT = 0.1
_LONG_DEFAULT_TIMEOUT = 5
class PrepareChunksThread(Thread):
"""This thread is responsible to download the chunks associated to a given worker."""
def __init__(
self,
config: ChunksConfig,
item_loader: BaseItemLoader,
distributed_env: _DistributedEnv,
max_cache_size: Optional[int] = None,
max_pre_download: int = 2,
) -> None:
super().__init__(daemon=True)
self._config = config
self._item_loader = item_loader
self._max_pre_download = max_pre_download
self._pre_download_counter = 0
self._distributed_env = distributed_env
self._chunks_index_to_be_deleted: List[int] = []
self._max_cache_size = max_cache_size
self._parent_cache_dir = os.path.dirname(self._config._cache_dir)
self._to_download_queue: multiprocessing.Queue = multiprocessing.Queue()
self._to_delete_queue: multiprocessing.Queue = multiprocessing.Queue()
# Check whether a dataset slice fits on the node
num_bytes_per_nodes = self._config.num_bytes // self._distributed_env.num_nodes
self._delete_chunks_when_processed = num_bytes_per_nodes > max_cache_size if max_cache_size else False
self._has_exited = False
def download(self, chunk_indexes: List[int]) -> None:
"""Receive the list of the chunk indices to download for the current epoch."""
for chunk_index in chunk_indexes:
self._to_download_queue.put(chunk_index)
def delete(self, chunk_indexes: List[int]) -> None:
"""Receive the list of the chunk indices to delete for the current epoch."""
for chunk_index in chunk_indexes:
self._to_delete_queue.put(chunk_index)
def _delete(self, chunk_index: int) -> None:
"""Inform the item loader of the chunk to delete."""
chunk_filepath, _, _ = self._config[ChunkedIndex(index=-1, chunk_index=chunk_index)]
self._item_loader.delete(chunk_index, chunk_filepath)
def stop(self) -> None:
"""Receive the list of the chunk indices to download for the current epoch."""
self._to_download_queue.put(_END_TOKEN)
def _maybe_delete_chunks(self) -> None:
reached_pre_download = self._pre_download_counter == self._max_pre_download
# we have already pre-downloaded some chunks, we just need to wait for them to be processed.
chunk_index = _get_from_queue(
self._to_delete_queue, timeout=_LONG_DEFAULT_TIMEOUT if reached_pre_download else _DEFAULT_TIMEOUT
)
if chunk_index is not None:
self._pre_download_counter -= 1
# Store the current chunk index
self._chunks_index_to_be_deleted.append(chunk_index)
# Get the current cache size and decide whether we need to start cleanup. Otherwise, keep track of it
while self._max_cache_size and self._chunks_index_to_be_deleted and self._can_delete_chunk():
# Delete the oldest chunk
self._delete(self._chunks_index_to_be_deleted.pop(0))
return
def _can_delete_chunk(self) -> bool:
if self._delete_chunks_when_processed:
return self._pre_download_counter >= self._max_pre_download - 1
return self._max_cache_size is not None and _get_folder_size(self._parent_cache_dir) >= self._max_cache_size
def _pre_load_chunk(self, chunk_index: int) -> None:
chunk_filepath, _, _ = self._config[ChunkedIndex(index=-1, chunk_index=chunk_index)]
self._item_loader.pre_load_chunk(chunk_index, chunk_filepath)
def run(self) -> None:
while True:
if self._pre_download_counter < self._max_pre_download:
chunk_index = _get_from_queue(self._to_download_queue)
if chunk_index == _END_TOKEN:
self._has_exited = True
return
if chunk_index is not None:
self._config.download_chunk_from_index(chunk_index)
# Preload item if possible to gain some time but only
# if this is one of the pre-downloaded chunk
if self._pre_download_counter > 0:
self._pre_load_chunk(chunk_index)
# Avoid downloading too many chunks in advance at the risk of over using the disk space
self._pre_download_counter += 1
if self._max_cache_size:
self._maybe_delete_chunks()
class BinaryReader:
def __init__(
self,
cache_dir: str,
max_cache_size: Optional[Union[int, str]] = None,
remote_input_dir: Optional[str] = None,
compression: Optional[str] = None,
item_loader: Optional[BaseItemLoader] = None,
serializers: Optional[Dict[str, Serializer]] = None,
) -> None:
"""The BinaryReader enables to read chunked dataset in an efficient way.
Arguments:
cache_dir: The path to cache folder.
remote_input_dir: The path to a remote folder where the data are located.
The scheme needs to be added to the path.
compression: The algorithm to decompress the chunks.
item_loader: The chunk sampler to create sub arrays from a chunk.
max_cache_size: The maximum cache size used by the reader when fetching the chunks.
serializers: Provide your own serializers.
"""
super().__init__()
warnings.filterwarnings("ignore", message=".*The given buffer is not writable.*")
self._cache_dir = cache_dir
self._remote_input_dir = remote_input_dir
if not os.path.exists(self._cache_dir):
raise FileNotFoundError(f"The provided cache_dir `{self._cache_dir}` doesn't exist.")
self._compression = compression
self._intervals: Optional[List[str]] = None
self._serializers: Dict[str, Serializer] = _get_serializers(serializers)
self._distributed_env = _DistributedEnv.detect()
self._rank: Optional[int] = None
self._config: Optional[ChunksConfig] = None
self._prepare_thread: Optional[PrepareChunksThread] = None
self._item_loader = item_loader or PyTreeLoader()
self._last_chunk_index: Optional[int] = None
self._max_cache_size = int(os.getenv("MAX_CACHE_SIZE", max_cache_size or 0))
def _get_chunk_index_from_index(self, index: int) -> int:
# Load the config containing the index
if self._config is None and self._try_load_config() is None:
raise Exception("The reader index isn't defined.")
return self._config._get_chunk_index_from_index(index) # type: ignore
def _try_load_config(self) -> Optional[ChunksConfig]:
"""Try to load the chunks config if the index files are available."""
self._config = ChunksConfig.load(self._cache_dir, self._serializers, self._remote_input_dir, self._item_loader)
return self._config
@property
def config(self) -> ChunksConfig:
if self._config is None:
raise RuntimeError("The config should be defined.")
return self._config
@property
def rank(self) -> int:
"""Returns the rank of the writer."""
if self._rank is None:
self._worker_env = _WorkerEnv.detect()
self._rank = self._distributed_env.global_rank * self._worker_env.world_size + self._worker_env.rank
return self._rank
def read(self, index: ChunkedIndex) -> Any:
"""Read an item for the given from a chunk.
If the chunk isn't available locally or in memory, it will be downloaded.
Prefetching should reduce the wait time to be the batch available.
"""
if not isinstance(index, ChunkedIndex):
raise ValueError("The Reader.read(...) method expects a chunked Index.")
# Load the config containing the index
if self._config is None and self._try_load_config() is None:
raise Exception("The reader index isn't defined.")
if self._config and (self._config._remote_dir or self._config._compressor):
# Create and start the prepare chunks thread
if self._prepare_thread is None and self._config:
self._prepare_thread = PrepareChunksThread(
self._config, self._item_loader, self._distributed_env, self._max_cache_size
)
self._prepare_thread.start()
if index.chunk_indexes:
self._prepare_thread.download(index.chunk_indexes)
# If the chunk_index is new, request for it to be downloaded.
if index.chunk_index != self._last_chunk_index:
assert self._prepare_thread
self._prepare_thread.download([index.chunk_index])
if self._last_chunk_index is None:
self._last_chunk_index = index.chunk_index
# Fetch the element
chunk_filepath, begin, _ = self.config[index]
item = self._item_loader.load_item_from_chunk(index.index, index.chunk_index, chunk_filepath, begin)
# We need to request deletion after the latest element has been loaded.
# Otherwise, this could trigger segmentation fault error depending on the item loader used.
if self._config and self._config._remote_dir and index.chunk_index != self._last_chunk_index:
assert self._prepare_thread
assert self._last_chunk_index is not None
# inform the chunk has been completely consumed
self._prepare_thread.delete([self._last_chunk_index])
# track the new chunk index as the latest one
self._last_chunk_index = index.chunk_index
if index.is_last_index and self._prepare_thread:
# inform the thread it is time to stop
self._prepare_thread.stop()
self._prepare_thread = None
return item
def get_length(self) -> int:
"""Get the number of samples across all chunks."""
if self._config is None and self._try_load_config() is None:
raise Exception("The reader index isn't defined.")
return len(self.config)
def get_chunk_intervals(self) -> List[Tuple[int, int]]:
"""Get the index interval of each chunk."""
if self._config is None and self._try_load_config() is None:
raise Exception("The reader index isn't defined.")
return self.config.intervals
def __getstate__(self) -> Dict[str, Any]:
state = self.__dict__.copy()
state["_prepare_thread"] = None
return state
def _get_folder_size(path: str) -> int:
"""Collect the size of each files within a folder.
This method is robust to file deletion races
"""
size = 0
for dirpath, _, filenames in os.walk(str(path)):
for filename in filenames:
with contextlib.suppress(FileNotFoundError):
size += os.stat(os.path.join(dirpath, filename)).st_size
return size
def _get_from_queue(queue: multiprocessing.Queue, timeout: float = _DEFAULT_TIMEOUT) -> Optional[Any]:
try:
return queue.get(timeout=timeout)
except Empty:
pass
except OSError as err:
# handle closed queue before the thread terminates
if "handle is closed" in str(err) or "Bad file descriptor" in str(err):
logger.debug(err)
else:
raise err
except EOFError as err:
logger.debug(err)
return None
|
evocodebench_data_145
|
# Copyright The Lightning AI team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import multiprocessing
import os
import warnings
from logging import Logger
from queue import Empty
from threading import Thread
from typing import Any, Dict, List, Optional, Tuple, Union
from litdata.constants import _TORCH_GREATER_EQUAL_2_1_0
from litdata.streaming.config import ChunksConfig
from litdata.streaming.item_loader import BaseItemLoader, PyTreeLoader
from litdata.streaming.sampler import ChunkedIndex
from litdata.streaming.serializers import Serializer, _get_serializers
from litdata.utilities.env import _DistributedEnv, _WorkerEnv
warnings.filterwarnings("ignore", message=".*The given buffer is not writable.*")
if _TORCH_GREATER_EQUAL_2_1_0:
pass
logger = Logger(__name__)
_END_TOKEN = "END"
# Note: The timeout here should not be too short. We need to prevent the caller from aggressively
# querying the queue and consuming too many CPU cycles.
_DEFAULT_TIMEOUT = 0.1
_LONG_DEFAULT_TIMEOUT = 5
class PrepareChunksThread(Thread):
"""This thread is responsible to download the chunks associated to a given worker."""
def __init__(
self,
config: ChunksConfig,
item_loader: BaseItemLoader,
distributed_env: _DistributedEnv,
max_cache_size: Optional[int] = None,
max_pre_download: int = 2,
) -> None:
super().__init__(daemon=True)
self._config = config
self._item_loader = item_loader
self._max_pre_download = max_pre_download
self._pre_download_counter = 0
self._distributed_env = distributed_env
self._chunks_index_to_be_deleted: List[int] = []
self._max_cache_size = max_cache_size
self._parent_cache_dir = os.path.dirname(self._config._cache_dir)
self._to_download_queue: multiprocessing.Queue = multiprocessing.Queue()
self._to_delete_queue: multiprocessing.Queue = multiprocessing.Queue()
# Check whether a dataset slice fits on the node
num_bytes_per_nodes = self._config.num_bytes // self._distributed_env.num_nodes
self._delete_chunks_when_processed = num_bytes_per_nodes > max_cache_size if max_cache_size else False
self._has_exited = False
def download(self, chunk_indexes: List[int]) -> None:
"""Receive the list of the chunk indices to download for the current epoch."""
for chunk_index in chunk_indexes:
self._to_download_queue.put(chunk_index)
def delete(self, chunk_indexes: List[int]) -> None:
"""Receive the list of the chunk indices to delete for the current epoch."""
for chunk_index in chunk_indexes:
self._to_delete_queue.put(chunk_index)
def _delete(self, chunk_index: int) -> None:
"""Inform the item loader of the chunk to delete."""
chunk_filepath, _, _ = self._config[ChunkedIndex(index=-1, chunk_index=chunk_index)]
self._item_loader.delete(chunk_index, chunk_filepath)
def stop(self) -> None:
"""Receive the list of the chunk indices to download for the current epoch."""
self._to_download_queue.put(_END_TOKEN)
def _maybe_delete_chunks(self) -> None:
reached_pre_download = self._pre_download_counter == self._max_pre_download
# we have already pre-downloaded some chunks, we just need to wait for them to be processed.
chunk_index = _get_from_queue(
self._to_delete_queue, timeout=_LONG_DEFAULT_TIMEOUT if reached_pre_download else _DEFAULT_TIMEOUT
)
if chunk_index is not None:
self._pre_download_counter -= 1
# Store the current chunk index
self._chunks_index_to_be_deleted.append(chunk_index)
# Get the current cache size and decide whether we need to start cleanup. Otherwise, keep track of it
while self._max_cache_size and self._chunks_index_to_be_deleted and self._can_delete_chunk():
# Delete the oldest chunk
self._delete(self._chunks_index_to_be_deleted.pop(0))
return
def _can_delete_chunk(self) -> bool:
if self._delete_chunks_when_processed:
return self._pre_download_counter >= self._max_pre_download - 1
return self._max_cache_size is not None and _get_folder_size(self._parent_cache_dir) >= self._max_cache_size
def _pre_load_chunk(self, chunk_index: int) -> None:
chunk_filepath, _, _ = self._config[ChunkedIndex(index=-1, chunk_index=chunk_index)]
self._item_loader.pre_load_chunk(chunk_index, chunk_filepath)
def run(self) -> None:
while True:
if self._pre_download_counter < self._max_pre_download:
chunk_index = _get_from_queue(self._to_download_queue)
if chunk_index == _END_TOKEN:
self._has_exited = True
return
if chunk_index is not None:
self._config.download_chunk_from_index(chunk_index)
# Preload item if possible to gain some time but only
# if this is one of the pre-downloaded chunk
if self._pre_download_counter > 0:
self._pre_load_chunk(chunk_index)
# Avoid downloading too many chunks in advance at the risk of over using the disk space
self._pre_download_counter += 1
if self._max_cache_size:
self._maybe_delete_chunks()
class BinaryReader:
def __init__(
self,
cache_dir: str,
max_cache_size: Optional[Union[int, str]] = None,
remote_input_dir: Optional[str] = None,
compression: Optional[str] = None,
item_loader: Optional[BaseItemLoader] = None,
serializers: Optional[Dict[str, Serializer]] = None,
) -> None:
"""The BinaryReader enables to read chunked dataset in an efficient way.
Arguments:
cache_dir: The path to cache folder.
remote_input_dir: The path to a remote folder where the data are located.
The scheme needs to be added to the path.
compression: The algorithm to decompress the chunks.
item_loader: The chunk sampler to create sub arrays from a chunk.
max_cache_size: The maximum cache size used by the reader when fetching the chunks.
serializers: Provide your own serializers.
"""
super().__init__()
warnings.filterwarnings("ignore", message=".*The given buffer is not writable.*")
self._cache_dir = cache_dir
self._remote_input_dir = remote_input_dir
if not os.path.exists(self._cache_dir):
raise FileNotFoundError(f"The provided cache_dir `{self._cache_dir}` doesn't exist.")
self._compression = compression
self._intervals: Optional[List[str]] = None
self._serializers: Dict[str, Serializer] = _get_serializers(serializers)
self._distributed_env = _DistributedEnv.detect()
self._rank: Optional[int] = None
self._config: Optional[ChunksConfig] = None
self._prepare_thread: Optional[PrepareChunksThread] = None
self._item_loader = item_loader or PyTreeLoader()
self._last_chunk_index: Optional[int] = None
self._max_cache_size = int(os.getenv("MAX_CACHE_SIZE", max_cache_size or 0))
def _get_chunk_index_from_index(self, index: int) -> int:
# Load the config containing the index
if self._config is None and self._try_load_config() is None:
raise Exception("The reader index isn't defined.")
return self._config._get_chunk_index_from_index(index) # type: ignore
def _try_load_config(self) -> Optional[ChunksConfig]:
"""Try to load the chunks config if the index files are available."""
self._config = ChunksConfig.load(self._cache_dir, self._serializers, self._remote_input_dir, self._item_loader)
return self._config
@property
def config(self) -> ChunksConfig:
if self._config is None:
raise RuntimeError("The config should be defined.")
return self._config
@property
def rank(self) -> int:
"""Returns the rank of the writer."""
if self._rank is None:
self._worker_env = _WorkerEnv.detect()
self._rank = self._distributed_env.global_rank * self._worker_env.world_size + self._worker_env.rank
return self._rank
def read(self, index: ChunkedIndex) -> Any:
"""Read an item for the given from a chunk.
If the chunk isn't available locally or in memory, it will be downloaded.
Prefetching should reduce the wait time to be the batch available.
"""
if not isinstance(index, ChunkedIndex):
raise ValueError("The Reader.read(...) method expects a chunked Index.")
# Load the config containing the index
if self._config is None and self._try_load_config() is None:
raise Exception("The reader index isn't defined.")
if self._config and (self._config._remote_dir or self._config._compressor):
# Create and start the prepare chunks thread
if self._prepare_thread is None and self._config:
self._prepare_thread = PrepareChunksThread(
self._config, self._item_loader, self._distributed_env, self._max_cache_size
)
self._prepare_thread.start()
if index.chunk_indexes:
self._prepare_thread.download(index.chunk_indexes)
# If the chunk_index is new, request for it to be downloaded.
if index.chunk_index != self._last_chunk_index:
assert self._prepare_thread
self._prepare_thread.download([index.chunk_index])
if self._last_chunk_index is None:
self._last_chunk_index = index.chunk_index
# Fetch the element
chunk_filepath, begin, _ = self.config[index]
item = self._item_loader.load_item_from_chunk(index.index, index.chunk_index, chunk_filepath, begin)
# We need to request deletion after the latest element has been loaded.
# Otherwise, this could trigger segmentation fault error depending on the item loader used.
if self._config and self._config._remote_dir and index.chunk_index != self._last_chunk_index:
assert self._prepare_thread
assert self._last_chunk_index is not None
# inform the chunk has been completely consumed
self._prepare_thread.delete([self._last_chunk_index])
# track the new chunk index as the latest one
self._last_chunk_index = index.chunk_index
if index.is_last_index and self._prepare_thread:
# inform the thread it is time to stop
self._prepare_thread.stop()
self._prepare_thread = None
return item
def get_length(self) -> int:
"""Get the number of samples across all chunks."""
if self._config is None and self._try_load_config() is None:
raise Exception("The reader index isn't defined.")
return len(self.config)
def get_chunk_intervals(self) -> List[Tuple[int, int]]:
"""Get the index interval of each chunk."""
if self._config is None and self._try_load_config() is None:
raise Exception("The reader index isn't defined.")
return self.config.intervals
def __getstate__(self) -> Dict[str, Any]:
state = self.__dict__.copy()
state["_prepare_thread"] = None
return state
def _get_folder_size(path: str) -> int:
"""Collect the size of each files within a folder.
This method is robust to file deletion races
"""
size = 0
for dirpath, _, filenames in os.walk(str(path)):
for filename in filenames:
with contextlib.suppress(FileNotFoundError):
size += os.stat(os.path.join(dirpath, filename)).st_size
return size
def _get_from_queue(queue: multiprocessing.Queue, timeout: float = _DEFAULT_TIMEOUT) -> Optional[Any]:
try:
return queue.get(timeout=timeout)
except Empty:
pass
except OSError as err:
# handle closed queue before the thread terminates
if "handle is closed" in str(err) or "Bad file descriptor" in str(err):
logger.debug(err)
else:
raise err
except EOFError as err:
logger.debug(err)
return None
|
evocodebench_data_146
|
# Copyright The Lightning AI team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import pickle
from logging import Logger
from typing import Any, Callable, Dict, Optional
from urllib.parse import urljoin
import requests
import urllib3
# for backwards compatibility
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry
logger = Logger(__name__)
_CONNECTION_RETRY_TOTAL = 2880
_CONNECTION_RETRY_BACKOFF_FACTOR = 0.5
_DEFAULT_REQUEST_TIMEOUT = 30 # seconds
class _CustomRetryAdapter(HTTPAdapter):
def __init__(self, *args: Any, **kwargs: Any) -> None:
self.timeout = kwargs.pop("timeout", _DEFAULT_REQUEST_TIMEOUT)
super().__init__(*args, **kwargs)
def send(self, request: Any, *args: Any, **kwargs: Any) -> Any:
kwargs["timeout"] = kwargs.get("timeout", self.timeout)
return super().send(request, **kwargs)
def _response(r: Any, *args: Any, **kwargs: Any) -> Any:
return r.raise_for_status()
class _HTTPClient:
"""A wrapper class around the requests library which handles chores like logging, retries, and timeouts
automatically."""
def __init__(
self,
base_url: str,
auth_token: Optional[str] = None,
log_callback: Optional[Callable] = None,
use_retry: bool = True,
) -> None:
self.base_url = base_url
retry_strategy = Retry(
# wait time between retries increases exponentially according to: backoff_factor * (2 ** (retry - 1))
# but the the maximum wait time is 120 secs. By setting a large value (2880), we'll make sure clients
# are going to be alive for a very long time (~ 4 days) but retries every 120 seconds
total=_CONNECTION_RETRY_TOTAL,
backoff_factor=_CONNECTION_RETRY_BACKOFF_FACTOR,
status_forcelist=[
408, # Request Timeout
429, # Too Many Requests
500, # Internal Server Error
502, # Bad Gateway
503, # Service Unavailable
504, # Gateway Timeout
],
)
adapter = _CustomRetryAdapter(max_retries=retry_strategy, timeout=_DEFAULT_REQUEST_TIMEOUT)
self.session = requests.Session()
self.session.hooks = {"response": _response}
if use_retry:
self.session.mount("http://", adapter)
self.session.mount("https://", adapter)
if auth_token:
self.session.headers.update({"Authorization": f"Bearer {auth_token}"})
def get(self, path: str) -> Any:
url = urljoin(self.base_url, path)
return self.session.get(url)
def post(
self, path: str, *, query_params: Optional[Dict] = None, data: Optional[bytes] = None, json: Any = None
) -> Any:
url = urljoin(self.base_url, path)
return self.session.post(url, data=data, params=query_params, json=json)
def delete(self, path: str) -> Any:
url = urljoin(self.base_url, path)
return self.session.delete(url)
class _ImmutableDistributedMap:
"""The _ImmutableDistributedMap enables to create a distributed key value pair in the cloud.
The first process to perform the set operation defines its value.
"""
def __init__(self) -> None:
token = _get_token()
lightning_app_external_url = os.getenv("LIGHTNING_APP_EXTERNAL_URL")
if lightning_app_external_url is None:
raise RuntimeError("The `LIGHTNING_APP_EXTERNAL_URL` should be set.")
self.public_client: _HTTPClient = _HTTPClient(lightning_app_external_url, auth_token=token, use_retry=False)
lightning_app_state_url = os.getenv("LIGHTNING_APP_STATE_URL")
if lightning_app_state_url is None:
raise RuntimeError("The `LIGHTNING_APP_STATE_URL` should be set.")
self.private_client: _HTTPClient = _HTTPClient(lightning_app_state_url, auth_token=token, use_retry=False)
def set_and_get(self, key: str, value: Any) -> Any:
payload = {"key": key, "value": pickle.dumps(value, 0).decode()}
# Try the public address first
try:
resp = self.public_client.post("/broadcast", json=payload)
except (requests.exceptions.ConnectionError, urllib3.exceptions.MaxRetryError):
# fallback to the private one
resp = self.private_client.post("/broadcast", json=payload)
if resp.status_code != 200:
raise RuntimeError(f"Failed to broadcast the following {key=} {value=}.")
return pickle.loads(bytes(resp.json()["value"], "utf-8"))
def broadcast_object(key: str, obj: Any) -> Any:
"""This function enables to broadcast object across machines."""
if os.getenv("LIGHTNING_APP_EXTERNAL_URL") is not None:
return _ImmutableDistributedMap().set_and_get(key, obj)
return obj
def _get_token() -> Optional[str]:
"""This function tries to retrieve a temporary token."""
if os.getenv("LIGHTNING_CLOUD_URL") is None:
return None
payload = {"apiKey": os.getenv("LIGHTNING_API_KEY"), "username": os.getenv("LIGHTNING_USERNAME")}
url_login = os.getenv("LIGHTNING_CLOUD_URL", "") + "/v1/auth/login"
res = requests.post(url_login, data=json.dumps(payload))
if "token" not in res.json():
raise RuntimeError(
f"You haven't properly setup your environment variables with {url_login} and data: \n{payload}"
)
return res.json()["token"]
|
evocodebench_data_147
|
from typing import Any, List, Tuple
import numpy as np
from litdata.utilities.env import _DistributedEnv
def _intra_node_chunk_shuffle(
distributed_env: _DistributedEnv,
chunks_per_ranks: List[List[int]],
seed: int,
current_epoch: int,
) -> List[int]:
chunk_indexes_per_nodes: Any = [[] for _ in range(distributed_env.num_nodes)]
process_per_node = distributed_env.world_size // distributed_env.num_nodes
for rank, chunks_per_rank in enumerate(chunks_per_ranks):
chunk_indexes_per_nodes[0 if distributed_env.num_nodes == 1 else rank // process_per_node].extend(
chunks_per_rank
)
# shuffle the chunks associated to the node
for i in range(len(chunk_indexes_per_nodes)):
# permute the indexes within the node
chunk_indexes_per_nodes[i] = np.random.RandomState(seed=seed + current_epoch).permutation(
chunk_indexes_per_nodes[i]
)
return [index for chunks in chunk_indexes_per_nodes for index in chunks]
def _associate_chunks_and_internals_to_ranks(
distributed_env: _DistributedEnv,
indexes: Any,
chunk_intervals: Any,
drop_last: bool,
) -> Tuple[List[List[int]], List[Any]]:
num_items = sum([(interval[-1] - interval[0]) for interval in chunk_intervals])
num_items_per_ranks: List[int] = [
num_items // distributed_env.world_size + num_items % distributed_env.world_size
if rank == distributed_env.world_size - 1 and not drop_last
else num_items // distributed_env.world_size
for rank in range(distributed_env.world_size)
]
chunks_per_ranks: List[List[int]] = [[] for _ in range(distributed_env.world_size)]
intervals_per_ranks: List[List[List[int]]] = [[] for _ in range(distributed_env.world_size)]
# 4. Assign the chunk & intervals to each rank
for chunk_index, chunk_interval in zip(indexes, chunk_intervals):
rank = 0
while True:
if rank == len(num_items_per_ranks):
break
items_left_to_assign = num_items_per_ranks[rank]
if items_left_to_assign == 0:
rank += 1
continue
items_in_chunk = chunk_interval[-1] - chunk_interval[0]
if items_in_chunk == 0:
break
if items_in_chunk > items_left_to_assign:
chunks_per_ranks[rank].append(chunk_index)
begin, end = chunk_interval
intervals_per_ranks[rank].append([begin, begin + items_left_to_assign])
chunk_interval = (begin + items_left_to_assign, end)
num_items_per_ranks[rank] = 0
rank += 1
else:
chunks_per_ranks[rank].append(chunk_index)
intervals_per_ranks[rank].append(chunk_interval)
num_items_per_ranks[rank] -= items_in_chunk
break
return chunks_per_ranks, intervals_per_ranks
|
evocodebench_data_148
|
# Copyright The Lightning AI team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import concurrent.futures
import inspect
import os
from datetime import datetime
from functools import partial
from pathlib import Path
from types import FunctionType
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
import torch
from litdata.constants import _IS_IN_STUDIO, _TORCH_GREATER_EQUAL_2_1_0
from litdata.processing.data_processor import DataChunkRecipe, DataProcessor, DataTransformRecipe
from litdata.processing.readers import BaseReader
from litdata.processing.utilities import optimize_dns_context
from litdata.streaming.dataloader import StreamingDataLoader
from litdata.streaming.resolver import (
Dir,
_assert_dir_has_index_file,
_assert_dir_is_empty,
_execute,
_resolve_dir,
)
if _TORCH_GREATER_EQUAL_2_1_0:
from torch.utils._pytree import tree_flatten
def _get_indexed_paths(data: Any) -> Dict[int, str]:
flattened_item, _ = tree_flatten(data)
indexed_paths = {
index: element
for index, element in enumerate(flattened_item)
if isinstance(element, str) and os.path.exists(element)
}
return indexed_paths
def _get_input_dir(inputs: Sequence[Any]) -> Optional[str]:
indexed_paths = _get_indexed_paths(inputs[0])
if len(indexed_paths) == 0:
# Check whether the second element has any input_path
indexed_paths = _get_indexed_paths(inputs[1])
if len(indexed_paths) == 0:
return None
# Every element should have filepaths if any contains one.
raise ValueError(f"The provided item {inputs[0]} didn't contain any filepaths.")
absolute_path = str(Path(list(indexed_paths.values())[0]).resolve())
if "/.project" in absolute_path:
return "/" + os.path.join(*str(list(indexed_paths.values())[0]).split("/")[:4])
return "/" + os.path.join(*str(absolute_path).split("/")[:4])
def _get_default_num_workers() -> int:
if torch.cuda.is_available():
return torch.cuda.device_count()
return os.cpu_count() or 1
class LambdaDataTransformRecipe(DataTransformRecipe):
def __init__(self, fn: Callable[[str, Any], None], inputs: Sequence[Any]):
super().__init__()
self._fn = fn
self._inputs = inputs
self._device: Optional[str] = None
_fn = self._fn if isinstance(self._fn, FunctionType) else self._fn.__call__ # type: ignore
params = inspect.signature(_fn).parameters
self._contains_device = "device" in params
self._contains_is_last = "is_last" in params
def prepare_structure(self, _: Optional[str]) -> Any:
return self._inputs
def prepare_item(self, item_metadata: Any, output_dir: str, is_last: bool) -> None:
if self._contains_device and self._device is None:
self._find_device()
kwargs: Dict[str, Any] = {}
if self._contains_device:
kwargs["device"] = self._device
if self._contains_is_last:
kwargs["is_last"] = is_last
if isinstance(self._fn, (FunctionType, partial)):
self._fn(item_metadata, output_dir, **kwargs)
elif callable(self._fn):
self._fn.__call__(item_metadata, output_dir, **kwargs) # type: ignore
else:
raise ValueError(f"The provided {self._fn} isn't supported.")
def _find_device(self) -> None:
global_rank = os.getenv("DATA_OPTIMIZER_GLOBAL_RANK", None)
if torch.cuda.is_available() and global_rank:
num_gpus = torch.cuda.device_count()
device = int(global_rank) % num_gpus
self._device = f"cuda:{device}"
class LambdaDataChunkRecipe(DataChunkRecipe):
def __init__(
self,
fn: Callable[[Any], None],
inputs: Sequence[Any],
chunk_size: Optional[int],
chunk_bytes: Optional[Union[int, str]],
compression: Optional[str],
):
super().__init__(chunk_size=chunk_size, chunk_bytes=chunk_bytes, compression=compression)
self._fn = fn
self._inputs = inputs
def prepare_structure(self, input_dir: Optional[str]) -> Any:
return self._inputs
def prepare_item(self, item_metadata: Any) -> Any:
if isinstance(self._fn, partial):
yield from self._fn(item_metadata)
elif isinstance(self._fn, FunctionType):
if inspect.isgeneratorfunction(self._fn):
yield from self._fn(item_metadata)
else:
yield self._fn(item_metadata)
elif callable(self._fn):
if inspect.isgeneratorfunction(self._fn.__call__): # type: ignore
yield from self._fn.__call__(item_metadata) # type: ignore
else:
yield self._fn.__call__(item_metadata) # type: ignore
else:
raise ValueError(f"The provided {self._fn} isn't supported.")
def map(
fn: Callable[[str, Any], None],
inputs: Sequence[Any],
output_dir: Union[str, Dir],
weights: Optional[List[int]] = None,
num_workers: Optional[int] = None,
fast_dev_run: Union[bool, int] = False,
num_nodes: Optional[int] = None,
machine: Optional[str] = None,
num_downloaders: Optional[int] = None,
num_uploaders: Optional[int] = None,
reorder_files: bool = True,
error_when_not_empty: bool = False,
reader: Optional[BaseReader] = None,
batch_size: Optional[int] = None,
) -> None:
"""This function map a callbable over a collection of files possibly in a distributed way.
Arguments:
fn: A function to be executed over each input element
inputs: A sequence of input to be processed by the `fn` function.
Each input should contain at least a valid filepath.
output_dir: The folder where the processed data should be stored.
weights: Provide an associated weight to each input. This is used to balance work among workers.
num_workers: The number of workers to use during processing
fast_dev_run: Whether to use process only a sub part of the inputs
num_nodes: When doing remote execution, the number of nodes to use. Only supported on https://lightning.ai/.
machine: When doing remote execution, the machine to use. Only supported on https://lightning.ai/.
num_downloaders: The number of downloaders per worker.
num_uploaders: The number of uploaders per workers.
reorder_files: By default, reorders the files by file size to distribute work equally among all workers.
Set this to ``False`` if the order in which samples are processed should be preserved.
error_when_not_empty: Whether we should error if the output folder isn't empty.
batch_size: Group the inputs into batches of batch_size length.
"""
if isinstance(inputs, StreamingDataLoader) and batch_size is not None:
raise ValueError("When providing a streaming dataloader, pass the batch_size to the dataloader directly.")
if isinstance(inputs, StreamingDataLoader) and weights is not None:
raise ValueError("When providing a streaming dataloader, weights isn't supported.")
if not isinstance(inputs, (Sequence, StreamingDataLoader)):
raise ValueError(f"The provided inputs should be non empty sequence or a streaming dataloader. Found {inputs}.")
if len(inputs) == 0:
raise ValueError(f"The provided inputs should be non empty. Found {inputs}.")
if not _IS_IN_STUDIO and (machine is not None or num_nodes is not None):
raise ValueError(
"Only https://lightning.ai/ supports multiple nodes or selecting a machine."
" Create an account to try it out."
)
if not _IS_IN_STUDIO:
print(
"Create an account on https://lightning.ai/ to transform your data faster using "
"multiple nodes and large machines."
)
if num_nodes is None or int(os.getenv("DATA_OPTIMIZER_NUM_NODES", 0)) > 0:
_output_dir: Dir = _resolve_dir(output_dir)
if _output_dir.url and "cloudspaces" in _output_dir.url:
raise ValueError(
f"The provided `output_dir` isn't valid. Found {_output_dir.path if _output_dir else None}."
" HINT: You can either use `/teamspace/s3_connections/...` or `/teamspace/datasets/...`."
)
if error_when_not_empty:
_assert_dir_is_empty(_output_dir)
if not isinstance(inputs, StreamingDataLoader):
input_dir = _resolve_dir(_get_input_dir(inputs))
if isinstance(batch_size, int) and batch_size > 1:
inputs = [inputs[pos : pos + batch_size] for pos in range(0, len(inputs), batch_size)]
else:
input_dir = Dir()
data_processor = DataProcessor(
input_dir=input_dir,
output_dir=_output_dir,
num_workers=num_workers or _get_default_num_workers(),
fast_dev_run=fast_dev_run,
num_downloaders=num_downloaders,
num_uploaders=num_uploaders,
reorder_files=reorder_files,
weights=weights,
reader=reader,
)
with optimize_dns_context(True):
return data_processor.run(LambdaDataTransformRecipe(fn, inputs))
return _execute(
f"data-prep-map-{datetime.now().strftime('%Y-%m-%d_%H-%M-%S')}",
num_nodes,
machine,
)
def optimize(
fn: Callable[[Any], Any],
inputs: Sequence[Any],
output_dir: str,
weights: Optional[List[int]] = None,
chunk_size: Optional[int] = None,
chunk_bytes: Optional[Union[int, str]] = None,
compression: Optional[str] = None,
num_workers: Optional[int] = None,
fast_dev_run: bool = False,
num_nodes: Optional[int] = None,
machine: Optional[str] = None,
num_downloaders: Optional[int] = None,
num_uploaders: Optional[int] = None,
reorder_files: bool = True,
reader: Optional[BaseReader] = None,
batch_size: Optional[int] = None,
) -> None:
"""This function converts a dataset into chunks possibly in a distributed way.
Arguments:
fn: A function to be executed over each input element
inputs: A sequence of input to be processed by the `fn` function.
Each input should contain at least a valid filepath.
output_dir: The folder where the processed data should be stored.
weights: Provide an associated weight to each input. This is used to balance work among workers.
chunk_size: The maximum number of elements to hold within a chunk.
chunk_bytes: The maximum number of bytes to hold within a chunk.
compression: The compression algorithm to use over the chunks.
num_workers: The number of workers to use during processing
fast_dev_run: Whether to use process only a sub part of the inputs
num_nodes: When doing remote execution, the number of nodes to use. Only supported on https://lightning.ai/.
machine: When doing remote execution, the machine to use. Only supported on https://lightning.ai/.
num_downloaders: The number of downloaders per worker.
num_uploaders: The numbers of uploaders per worker.
reorder_files: By default, reorders the files by file size to distribute work equally among all workers.
Set this to ``False`` if the order in which samples are processed should be preserved.
batch_size: Group the inputs into batches of batch_size length.
"""
if isinstance(inputs, StreamingDataLoader) and batch_size is not None:
raise ValueError("When providing a streaming dataloader, pass the batch_size to the dataloader directly.")
if isinstance(inputs, StreamingDataLoader) and weights is not None:
raise ValueError("When providing a streaming dataloader, weights isn't supported.")
if not isinstance(inputs, (Sequence, StreamingDataLoader)):
raise ValueError(f"The provided inputs should be non empty sequence or a streaming dataloader. Found {inputs}.")
if len(inputs) == 0:
raise ValueError(f"The provided inputs should be non empty. Found {inputs}.")
if chunk_size is None and chunk_bytes is None:
raise ValueError("Either `chunk_size` or `chunk_bytes` needs to be defined.")
if not _IS_IN_STUDIO and (machine is not None or num_nodes is not None):
raise ValueError(
"Only https://lightning.ai/ supports multiple nodes or selecting a machine."
"Create an account to try it out."
)
if not _IS_IN_STUDIO:
print(
"Create an account on https://lightning.ai/ to optimize your data faster "
"using multiple nodes and large machines."
)
if num_nodes is None or int(os.getenv("DATA_OPTIMIZER_NUM_NODES", 0)) > 0:
_output_dir: Dir = _resolve_dir(output_dir)
if _output_dir.url is not None and "cloudspaces" in _output_dir.url:
raise ValueError(
f"The provided `output_dir` isn't valid. Found {_output_dir.path}."
" HINT: You can either use `/teamspace/s3_connections/...` or `/teamspace/datasets/...`."
)
_assert_dir_has_index_file(_output_dir)
if not isinstance(inputs, StreamingDataLoader):
input_dir = _resolve_dir(_get_input_dir(inputs))
if isinstance(batch_size, int) and batch_size > 1:
inputs = [inputs[pos : pos + batch_size] for pos in range(0, len(inputs), batch_size)]
else:
input_dir = Dir()
data_processor = DataProcessor(
input_dir=input_dir,
output_dir=_output_dir,
num_workers=num_workers or _get_default_num_workers(),
fast_dev_run=fast_dev_run,
num_downloaders=num_downloaders,
num_uploaders=num_uploaders,
reorder_files=reorder_files,
reader=reader,
)
with optimize_dns_context(True):
data_processor.run(
LambdaDataChunkRecipe(
fn,
inputs,
chunk_size=chunk_size,
chunk_bytes=chunk_bytes,
compression=compression,
)
)
return None
return _execute(
f"data-prep-optimize-{datetime.now().strftime('%Y-%m-%d_%H-%M-%S')}",
num_nodes,
machine,
)
def _listdir(folder: str) -> Tuple[str, List[str]]:
return folder, os.listdir(folder)
class walk:
"""This class is an optimized version of os.walk for listing files and folders from cloud filesystem.
Note: The order of files and folders yielded aren't depth-first anymore due to the asynchronous listing call.
"""
def __init__(self, folder: str, max_workers: Optional[int] = os.cpu_count()) -> None:
self.folders = [folder]
self.max_workers = max_workers or 1
self.futures: List[concurrent.futures.Future] = []
if not _IS_IN_STUDIO:
print("This method is optimized to run on https://lightning.ai/. Don't use it otherwise.")
def __iter__(self) -> Any:
"""This function queues the folders to perform listdir across multiple workers."""
with concurrent.futures.ThreadPoolExecutor(max_workers=self.max_workers) as executor:
while len(self.folders):
folder = self.folders.pop(0)
future = executor.submit(_listdir, folder)
self.futures.append(future)
while self.futures:
for future in concurrent.futures.as_completed(self.futures):
filenames = []
folders = []
folder, files_or_folders = future.result()
self.futures = [f for f in self.futures if f != future]
for file_or_folder in files_or_folders:
if os.path.isfile(os.path.join(folder, file_or_folder)):
filenames.append(file_or_folder)
else:
folders.append(file_or_folder)
self.folders.append(os.path.join(folder, file_or_folder))
yield folder, folders, filenames
while len(self.folders) and len(self.futures) <= self.max_workers * 2:
folder = self.folders.pop(0)
future = executor.submit(_listdir, folder)
self.futures.append(future)
return
|
evocodebench_data_149
|
import io
import os
import urllib
from contextlib import contextmanager
from subprocess import DEVNULL, Popen
from typing import Any, Callable, List, Optional, Tuple, Union
from litdata.constants import _IS_IN_STUDIO, _LIGHTNING_CLOUD_LATEST
if _LIGHTNING_CLOUD_LATEST:
from lightning_cloud.openapi import (
ProjectIdDatasetsBody,
V1DatasetType,
)
from lightning_cloud.openapi.rest import ApiException
from lightning_cloud.rest_client import LightningClient
def _create_dataset(
input_dir: Optional[str],
storage_dir: str,
dataset_type: V1DatasetType,
empty: Optional[bool] = None,
size: Optional[int] = None,
num_bytes: Optional[str] = None,
data_format: Optional[Union[str, Tuple[str]]] = None,
compression: Optional[str] = None,
num_chunks: Optional[int] = None,
num_bytes_per_chunk: Optional[List[int]] = None,
name: Optional[str] = None,
version: Optional[int] = None,
) -> None:
"""Create a dataset with metadata information about its source and destination."""
project_id = os.getenv("LIGHTNING_CLOUD_PROJECT_ID", None)
cluster_id = os.getenv("LIGHTNING_CLUSTER_ID", None)
user_id = os.getenv("LIGHTNING_USER_ID", None)
cloud_space_id = os.getenv("LIGHTNING_CLOUD_SPACE_ID", None)
lightning_app_id = os.getenv("LIGHTNING_CLOUD_APP_ID", None)
if project_id is None:
return
if not storage_dir:
raise ValueError("The storage_dir should be defined.")
client = LightningClient(retry=False)
try:
client.dataset_service_create_dataset(
body=ProjectIdDatasetsBody(
cloud_space_id=cloud_space_id if lightning_app_id is None else None,
cluster_id=cluster_id,
creator_id=user_id,
empty=empty,
input_dir=input_dir,
lightning_app_id=lightning_app_id,
name=name,
size=size,
num_bytes=num_bytes,
data_format=str(data_format) if data_format else data_format,
compression=compression,
num_chunks=num_chunks,
num_bytes_per_chunk=num_bytes_per_chunk,
storage_dir=storage_dir,
type=dataset_type,
version=version,
),
project_id=project_id,
)
except ApiException as ex:
if "already exists" in str(ex.body):
pass
else:
raise ex
def get_worker_rank() -> Optional[str]:
return os.getenv("DATA_OPTIMIZER_GLOBAL_RANK")
def catch(func: Callable) -> Callable:
def _wrapper(*args: Any, **kwargs: Any) -> Tuple[Any, Optional[Exception]]:
try:
return func(*args, **kwargs), None
except Exception as e:
return None, e
return _wrapper
# Credit to the https://github.com/rom1504/img2dataset Github repo
# The code was taken from there. It has a MIT License.
def make_request(
url: str,
timeout: int = 10,
user_agent_token: str = "pytorch-lightning",
) -> io.BytesIO:
"""Download an image with urllib."""
user_agent_string = "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:72.0) Gecko/20100101 Firefox/72.0"
if user_agent_token:
user_agent_string += f" (compatible; {user_agent_token}; +https://github.com/Lightning-AI/pytorch-lightning)"
with urllib.request.urlopen( # noqa: S310
urllib.request.Request(url, data=None, headers={"User-Agent": user_agent_string}), timeout=timeout
) as r:
img_stream = io.BytesIO(r.read())
return img_stream
@contextmanager
def optimize_dns_context(enable: bool) -> Any:
optimize_dns(enable)
try:
yield
optimize_dns(False) # always disable the optimize DNS
except Exception as e:
optimize_dns(False) # always disable the optimize DNS
raise e
def optimize_dns(enable: bool) -> None:
if not _IS_IN_STUDIO:
return
with open("/etc/resolv.conf") as f:
lines = f.readlines()
if (enable and any("127.0.0.53" in line for line in lines)) or (
not enable and any("127.0.0.1" in line for line in lines)
):
cmd = (
f"sudo /home/zeus/miniconda3/envs/cloudspace/bin/python"
f" -c 'from litdata.processing.utilities import _optimize_dns; _optimize_dns({enable})'"
)
Popen(cmd, shell=True, stdout=DEVNULL, stderr=DEVNULL).wait() # E501
def _optimize_dns(enable: bool) -> None:
with open("/etc/resolv.conf") as f:
lines = f.readlines()
write_lines = []
for line in lines:
if "nameserver 127" in line:
if enable:
write_lines.append("nameserver 127.0.0.1\n")
else:
write_lines.append("nameserver 127.0.0.53\n")
else:
write_lines.append(line)
with open("/etc/resolv.conf", "w") as f:
for line in write_lines:
f.write(line)
|
evocodebench_data_150
|
from typing import Any, List, Tuple
import numpy as np
from litdata.utilities.env import _DistributedEnv
def _intra_node_chunk_shuffle(
distributed_env: _DistributedEnv,
chunks_per_ranks: List[List[int]],
seed: int,
current_epoch: int,
) -> List[int]:
chunk_indexes_per_nodes: Any = [[] for _ in range(distributed_env.num_nodes)]
process_per_node = distributed_env.world_size // distributed_env.num_nodes
for rank, chunks_per_rank in enumerate(chunks_per_ranks):
chunk_indexes_per_nodes[0 if distributed_env.num_nodes == 1 else rank // process_per_node].extend(
chunks_per_rank
)
# shuffle the chunks associated to the node
for i in range(len(chunk_indexes_per_nodes)):
# permute the indexes within the node
chunk_indexes_per_nodes[i] = np.random.RandomState(seed=seed + current_epoch).permutation(
chunk_indexes_per_nodes[i]
)
return [index for chunks in chunk_indexes_per_nodes for index in chunks]
def _associate_chunks_and_internals_to_ranks(
distributed_env: _DistributedEnv,
indexes: Any,
chunk_intervals: Any,
drop_last: bool,
) -> Tuple[List[List[int]], List[Any]]:
num_items = sum([(interval[-1] - interval[0]) for interval in chunk_intervals])
num_items_per_ranks: List[int] = [
num_items // distributed_env.world_size + num_items % distributed_env.world_size
if rank == distributed_env.world_size - 1 and not drop_last
else num_items // distributed_env.world_size
for rank in range(distributed_env.world_size)
]
chunks_per_ranks: List[List[int]] = [[] for _ in range(distributed_env.world_size)]
intervals_per_ranks: List[List[List[int]]] = [[] for _ in range(distributed_env.world_size)]
# 4. Assign the chunk & intervals to each rank
for chunk_index, chunk_interval in zip(indexes, chunk_intervals):
rank = 0
while True:
if rank == len(num_items_per_ranks):
break
items_left_to_assign = num_items_per_ranks[rank]
if items_left_to_assign == 0:
rank += 1
continue
items_in_chunk = chunk_interval[-1] - chunk_interval[0]
if items_in_chunk == 0:
break
if items_in_chunk > items_left_to_assign:
chunks_per_ranks[rank].append(chunk_index)
begin, end = chunk_interval
intervals_per_ranks[rank].append([begin, begin + items_left_to_assign])
chunk_interval = (begin + items_left_to_assign, end)
num_items_per_ranks[rank] = 0
rank += 1
else:
chunks_per_ranks[rank].append(chunk_index)
intervals_per_ranks[rank].append(chunk_interval)
num_items_per_ranks[rank] -= items_in_chunk
break
return chunks_per_ranks, intervals_per_ranks
|
evocodebench_data_151
|
# Copyright The Lightning AI team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import concurrent.futures
import inspect
import os
from datetime import datetime
from functools import partial
from pathlib import Path
from types import FunctionType
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
import torch
from litdata.constants import _IS_IN_STUDIO, _TORCH_GREATER_EQUAL_2_1_0
from litdata.processing.data_processor import DataChunkRecipe, DataProcessor, DataTransformRecipe
from litdata.processing.readers import BaseReader
from litdata.processing.utilities import optimize_dns_context
from litdata.streaming.dataloader import StreamingDataLoader
from litdata.streaming.resolver import (
Dir,
_assert_dir_has_index_file,
_assert_dir_is_empty,
_execute,
_resolve_dir,
)
if _TORCH_GREATER_EQUAL_2_1_0:
from torch.utils._pytree import tree_flatten
def _get_indexed_paths(data: Any) -> Dict[int, str]:
flattened_item, _ = tree_flatten(data)
indexed_paths = {
index: element
for index, element in enumerate(flattened_item)
if isinstance(element, str) and os.path.exists(element)
}
return indexed_paths
def _get_input_dir(inputs: Sequence[Any]) -> Optional[str]:
indexed_paths = _get_indexed_paths(inputs[0])
if len(indexed_paths) == 0:
# Check whether the second element has any input_path
indexed_paths = _get_indexed_paths(inputs[1])
if len(indexed_paths) == 0:
return None
# Every element should have filepaths if any contains one.
raise ValueError(f"The provided item {inputs[0]} didn't contain any filepaths.")
absolute_path = str(Path(list(indexed_paths.values())[0]).resolve())
if "/.project" in absolute_path:
return "/" + os.path.join(*str(list(indexed_paths.values())[0]).split("/")[:4])
return "/" + os.path.join(*str(absolute_path).split("/")[:4])
def _get_default_num_workers() -> int:
if torch.cuda.is_available():
return torch.cuda.device_count()
return os.cpu_count() or 1
class LambdaDataTransformRecipe(DataTransformRecipe):
def __init__(self, fn: Callable[[str, Any], None], inputs: Sequence[Any]):
super().__init__()
self._fn = fn
self._inputs = inputs
self._device: Optional[str] = None
_fn = self._fn if isinstance(self._fn, FunctionType) else self._fn.__call__ # type: ignore
params = inspect.signature(_fn).parameters
self._contains_device = "device" in params
self._contains_is_last = "is_last" in params
def prepare_structure(self, _: Optional[str]) -> Any:
return self._inputs
def prepare_item(self, item_metadata: Any, output_dir: str, is_last: bool) -> None:
if self._contains_device and self._device is None:
self._find_device()
kwargs: Dict[str, Any] = {}
if self._contains_device:
kwargs["device"] = self._device
if self._contains_is_last:
kwargs["is_last"] = is_last
if isinstance(self._fn, (FunctionType, partial)):
self._fn(item_metadata, output_dir, **kwargs)
elif callable(self._fn):
self._fn.__call__(item_metadata, output_dir, **kwargs) # type: ignore
else:
raise ValueError(f"The provided {self._fn} isn't supported.")
def _find_device(self) -> None:
global_rank = os.getenv("DATA_OPTIMIZER_GLOBAL_RANK", None)
if torch.cuda.is_available() and global_rank:
num_gpus = torch.cuda.device_count()
device = int(global_rank) % num_gpus
self._device = f"cuda:{device}"
class LambdaDataChunkRecipe(DataChunkRecipe):
def __init__(
self,
fn: Callable[[Any], None],
inputs: Sequence[Any],
chunk_size: Optional[int],
chunk_bytes: Optional[Union[int, str]],
compression: Optional[str],
):
super().__init__(chunk_size=chunk_size, chunk_bytes=chunk_bytes, compression=compression)
self._fn = fn
self._inputs = inputs
def prepare_structure(self, input_dir: Optional[str]) -> Any:
return self._inputs
def prepare_item(self, item_metadata: Any) -> Any:
if isinstance(self._fn, partial):
yield from self._fn(item_metadata)
elif isinstance(self._fn, FunctionType):
if inspect.isgeneratorfunction(self._fn):
yield from self._fn(item_metadata)
else:
yield self._fn(item_metadata)
elif callable(self._fn):
if inspect.isgeneratorfunction(self._fn.__call__): # type: ignore
yield from self._fn.__call__(item_metadata) # type: ignore
else:
yield self._fn.__call__(item_metadata) # type: ignore
else:
raise ValueError(f"The provided {self._fn} isn't supported.")
def map(
fn: Callable[[str, Any], None],
inputs: Sequence[Any],
output_dir: Union[str, Dir],
weights: Optional[List[int]] = None,
num_workers: Optional[int] = None,
fast_dev_run: Union[bool, int] = False,
num_nodes: Optional[int] = None,
machine: Optional[str] = None,
num_downloaders: Optional[int] = None,
num_uploaders: Optional[int] = None,
reorder_files: bool = True,
error_when_not_empty: bool = False,
reader: Optional[BaseReader] = None,
batch_size: Optional[int] = None,
) -> None:
"""This function map a callbable over a collection of files possibly in a distributed way.
Arguments:
fn: A function to be executed over each input element
inputs: A sequence of input to be processed by the `fn` function.
Each input should contain at least a valid filepath.
output_dir: The folder where the processed data should be stored.
weights: Provide an associated weight to each input. This is used to balance work among workers.
num_workers: The number of workers to use during processing
fast_dev_run: Whether to use process only a sub part of the inputs
num_nodes: When doing remote execution, the number of nodes to use. Only supported on https://lightning.ai/.
machine: When doing remote execution, the machine to use. Only supported on https://lightning.ai/.
num_downloaders: The number of downloaders per worker.
num_uploaders: The number of uploaders per workers.
reorder_files: By default, reorders the files by file size to distribute work equally among all workers.
Set this to ``False`` if the order in which samples are processed should be preserved.
error_when_not_empty: Whether we should error if the output folder isn't empty.
batch_size: Group the inputs into batches of batch_size length.
"""
if isinstance(inputs, StreamingDataLoader) and batch_size is not None:
raise ValueError("When providing a streaming dataloader, pass the batch_size to the dataloader directly.")
if isinstance(inputs, StreamingDataLoader) and weights is not None:
raise ValueError("When providing a streaming dataloader, weights isn't supported.")
if not isinstance(inputs, (Sequence, StreamingDataLoader)):
raise ValueError(f"The provided inputs should be non empty sequence or a streaming dataloader. Found {inputs}.")
if len(inputs) == 0:
raise ValueError(f"The provided inputs should be non empty. Found {inputs}.")
if not _IS_IN_STUDIO and (machine is not None or num_nodes is not None):
raise ValueError(
"Only https://lightning.ai/ supports multiple nodes or selecting a machine."
" Create an account to try it out."
)
if not _IS_IN_STUDIO:
print(
"Create an account on https://lightning.ai/ to transform your data faster using "
"multiple nodes and large machines."
)
if num_nodes is None or int(os.getenv("DATA_OPTIMIZER_NUM_NODES", 0)) > 0:
_output_dir: Dir = _resolve_dir(output_dir)
if _output_dir.url and "cloudspaces" in _output_dir.url:
raise ValueError(
f"The provided `output_dir` isn't valid. Found {_output_dir.path if _output_dir else None}."
" HINT: You can either use `/teamspace/s3_connections/...` or `/teamspace/datasets/...`."
)
if error_when_not_empty:
_assert_dir_is_empty(_output_dir)
if not isinstance(inputs, StreamingDataLoader):
input_dir = _resolve_dir(_get_input_dir(inputs))
if isinstance(batch_size, int) and batch_size > 1:
inputs = [inputs[pos : pos + batch_size] for pos in range(0, len(inputs), batch_size)]
else:
input_dir = Dir()
data_processor = DataProcessor(
input_dir=input_dir,
output_dir=_output_dir,
num_workers=num_workers or _get_default_num_workers(),
fast_dev_run=fast_dev_run,
num_downloaders=num_downloaders,
num_uploaders=num_uploaders,
reorder_files=reorder_files,
weights=weights,
reader=reader,
)
with optimize_dns_context(True):
return data_processor.run(LambdaDataTransformRecipe(fn, inputs))
return _execute(
f"data-prep-map-{datetime.now().strftime('%Y-%m-%d_%H-%M-%S')}",
num_nodes,
machine,
)
def optimize(
fn: Callable[[Any], Any],
inputs: Sequence[Any],
output_dir: str,
weights: Optional[List[int]] = None,
chunk_size: Optional[int] = None,
chunk_bytes: Optional[Union[int, str]] = None,
compression: Optional[str] = None,
num_workers: Optional[int] = None,
fast_dev_run: bool = False,
num_nodes: Optional[int] = None,
machine: Optional[str] = None,
num_downloaders: Optional[int] = None,
num_uploaders: Optional[int] = None,
reorder_files: bool = True,
reader: Optional[BaseReader] = None,
batch_size: Optional[int] = None,
) -> None:
"""This function converts a dataset into chunks possibly in a distributed way.
Arguments:
fn: A function to be executed over each input element
inputs: A sequence of input to be processed by the `fn` function.
Each input should contain at least a valid filepath.
output_dir: The folder where the processed data should be stored.
weights: Provide an associated weight to each input. This is used to balance work among workers.
chunk_size: The maximum number of elements to hold within a chunk.
chunk_bytes: The maximum number of bytes to hold within a chunk.
compression: The compression algorithm to use over the chunks.
num_workers: The number of workers to use during processing
fast_dev_run: Whether to use process only a sub part of the inputs
num_nodes: When doing remote execution, the number of nodes to use. Only supported on https://lightning.ai/.
machine: When doing remote execution, the machine to use. Only supported on https://lightning.ai/.
num_downloaders: The number of downloaders per worker.
num_uploaders: The numbers of uploaders per worker.
reorder_files: By default, reorders the files by file size to distribute work equally among all workers.
Set this to ``False`` if the order in which samples are processed should be preserved.
batch_size: Group the inputs into batches of batch_size length.
"""
if isinstance(inputs, StreamingDataLoader) and batch_size is not None:
raise ValueError("When providing a streaming dataloader, pass the batch_size to the dataloader directly.")
if isinstance(inputs, StreamingDataLoader) and weights is not None:
raise ValueError("When providing a streaming dataloader, weights isn't supported.")
if not isinstance(inputs, (Sequence, StreamingDataLoader)):
raise ValueError(f"The provided inputs should be non empty sequence or a streaming dataloader. Found {inputs}.")
if len(inputs) == 0:
raise ValueError(f"The provided inputs should be non empty. Found {inputs}.")
if chunk_size is None and chunk_bytes is None:
raise ValueError("Either `chunk_size` or `chunk_bytes` needs to be defined.")
if not _IS_IN_STUDIO and (machine is not None or num_nodes is not None):
raise ValueError(
"Only https://lightning.ai/ supports multiple nodes or selecting a machine."
"Create an account to try it out."
)
if not _IS_IN_STUDIO:
print(
"Create an account on https://lightning.ai/ to optimize your data faster "
"using multiple nodes and large machines."
)
if num_nodes is None or int(os.getenv("DATA_OPTIMIZER_NUM_NODES", 0)) > 0:
_output_dir: Dir = _resolve_dir(output_dir)
if _output_dir.url is not None and "cloudspaces" in _output_dir.url:
raise ValueError(
f"The provided `output_dir` isn't valid. Found {_output_dir.path}."
" HINT: You can either use `/teamspace/s3_connections/...` or `/teamspace/datasets/...`."
)
_assert_dir_has_index_file(_output_dir)
if not isinstance(inputs, StreamingDataLoader):
input_dir = _resolve_dir(_get_input_dir(inputs))
if isinstance(batch_size, int) and batch_size > 1:
inputs = [inputs[pos : pos + batch_size] for pos in range(0, len(inputs), batch_size)]
else:
input_dir = Dir()
data_processor = DataProcessor(
input_dir=input_dir,
output_dir=_output_dir,
num_workers=num_workers or _get_default_num_workers(),
fast_dev_run=fast_dev_run,
num_downloaders=num_downloaders,
num_uploaders=num_uploaders,
reorder_files=reorder_files,
reader=reader,
)
with optimize_dns_context(True):
data_processor.run(
LambdaDataChunkRecipe(
fn,
inputs,
chunk_size=chunk_size,
chunk_bytes=chunk_bytes,
compression=compression,
)
)
return None
return _execute(
f"data-prep-optimize-{datetime.now().strftime('%Y-%m-%d_%H-%M-%S')}",
num_nodes,
machine,
)
def _listdir(folder: str) -> Tuple[str, List[str]]:
return folder, os.listdir(folder)
class walk:
"""This class is an optimized version of os.walk for listing files and folders from cloud filesystem.
Note: The order of files and folders yielded aren't depth-first anymore due to the asynchronous listing call.
"""
def __init__(self, folder: str, max_workers: Optional[int] = os.cpu_count()) -> None:
self.folders = [folder]
self.max_workers = max_workers or 1
self.futures: List[concurrent.futures.Future] = []
if not _IS_IN_STUDIO:
print("This method is optimized to run on https://lightning.ai/. Don't use it otherwise.")
def __iter__(self) -> Any:
"""This function queues the folders to perform listdir across multiple workers."""
with concurrent.futures.ThreadPoolExecutor(max_workers=self.max_workers) as executor:
while len(self.folders):
folder = self.folders.pop(0)
future = executor.submit(_listdir, folder)
self.futures.append(future)
while self.futures:
for future in concurrent.futures.as_completed(self.futures):
filenames = []
folders = []
folder, files_or_folders = future.result()
self.futures = [f for f in self.futures if f != future]
for file_or_folder in files_or_folders:
if os.path.isfile(os.path.join(folder, file_or_folder)):
filenames.append(file_or_folder)
else:
folders.append(file_or_folder)
self.folders.append(os.path.join(folder, file_or_folder))
yield folder, folders, filenames
while len(self.folders) and len(self.futures) <= self.max_workers * 2:
folder = self.folders.pop(0)
future = executor.submit(_listdir, folder)
self.futures.append(future)
return
|
evocodebench_data_152
|
import concurrent
import json
import logging
import os
import random
import shutil
import signal
import tempfile
import traceback
import types
from abc import abstractmethod
from dataclasses import dataclass
from multiprocessing import Process, Queue
from pathlib import Path
from queue import Empty
from time import sleep, time
from typing import Any, Dict, List, Optional, Tuple, TypeVar, Union
from urllib import parse
import numpy as np
import torch
from tqdm.auto import tqdm as _tqdm
from litdata.constants import (
_BOTO3_AVAILABLE,
_DEFAULT_FAST_DEV_RUN_ITEMS,
_INDEX_FILENAME,
_IS_IN_STUDIO,
_LIGHTNING_CLOUD_LATEST,
_TORCH_GREATER_EQUAL_2_1_0,
)
from litdata.processing.readers import BaseReader, StreamingDataLoaderReader
from litdata.processing.utilities import _create_dataset
from litdata.streaming import Cache
from litdata.streaming.cache import Dir
from litdata.streaming.client import S3Client
from litdata.streaming.dataloader import StreamingDataLoader
from litdata.streaming.resolver import _resolve_dir
from litdata.utilities.broadcast import broadcast_object
from litdata.utilities.packing import _pack_greedily
if _TORCH_GREATER_EQUAL_2_1_0:
from torch.utils._pytree import tree_flatten, tree_unflatten, treespec_loads
if _LIGHTNING_CLOUD_LATEST:
from lightning_cloud.openapi import V1DatasetType
if _BOTO3_AVAILABLE:
import botocore
logger = logging.Logger(__name__)
def _get_num_nodes() -> int:
"""Returns the number of nodes."""
return int(os.getenv("DATA_OPTIMIZER_NUM_NODES", 1))
def _get_node_rank() -> int:
"""Returns the current node rank of the instance."""
return int(os.getenv("DATA_OPTIMIZER_NODE_RANK", 0))
def _get_fast_dev_run() -> int:
"""Returns whether fast dev mode is enabled."""
return bool(int(os.getenv("DATA_OPTIMIZER_FAST_DEV_RUN", 1)))
def _get_default_cache() -> str:
return "/cache" if _IS_IN_STUDIO else tempfile.gettempdir()
def _get_cache_dir(name: Optional[str] = None) -> str:
"""Returns the cache directory used by the Cache to store the chunks."""
cache_dir = os.getenv("DATA_OPTIMIZER_CACHE_FOLDER", f"{_get_default_cache()}/chunks")
if name is None:
return cache_dir
return os.path.join(cache_dir, name.lstrip("/"))
def _get_cache_data_dir(name: Optional[str] = None) -> str:
"""Returns the cache data directory used by the DataProcessor workers to download the files."""
cache_dir = os.getenv("DATA_OPTIMIZER_DATA_CACHE_FOLDER", f"{_get_default_cache()}/data")
if name is None:
return os.path.join(cache_dir)
return os.path.join(cache_dir, name.lstrip("/"))
def _wait_for_file_to_exist(s3: S3Client, obj: parse.ParseResult, sleep_time: int = 2) -> Any:
"""This function check."""
while True:
try:
return s3.client.head_object(Bucket=obj.netloc, Key=obj.path.lstrip("/"))
except botocore.exceptions.ClientError as e:
if "the HeadObject operation: Not Found" in str(e):
sleep(sleep_time)
else:
raise e
def _wait_for_disk_usage_higher_than_threshold(input_dir: str, threshold_in_gb: int = 25, sleep_time: int = 3) -> None:
usage = shutil.disk_usage(input_dir)
while (usage.free / 1000 / 1000 / 1000) <= threshold_in_gb:
sleep(sleep_time)
usage = shutil.disk_usage(input_dir)
return
def _download_data_target(input_dir: Dir, cache_dir: str, queue_in: Queue, queue_out: Queue) -> None:
"""This function is used to download data from a remote directory to a cache directory to optimise reading."""
s3 = S3Client()
while True:
# 2. Fetch from the queue
r: Optional[Tuple[int, List[str]]] = queue_in.get()
# 3. Terminate the process if we received a termination signal
if r is None:
queue_out.put(None)
return
# 4. Unpack
index, paths = r
# 5. Check whether all the files are already downloaded
if input_dir.path and all(
os.path.exists(p.replace(input_dir.path, cache_dir) if input_dir else p) for p in paths
):
queue_out.put(index)
continue
if input_dir.url is not None or input_dir.path is not None:
if input_dir.url:
# 6. Wait for the removers to catch up when we are downloading data.
_wait_for_disk_usage_higher_than_threshold("/", 25)
# 7. Download all the required paths to unblock the current index
for path in paths:
if input_dir.path:
local_path = path.replace(input_dir.path, cache_dir)
if input_dir.url and input_dir.path:
path = path.replace(input_dir.path, input_dir.url)
obj = parse.urlparse(path)
if obj.scheme == "s3":
dirpath = os.path.dirname(local_path)
os.makedirs(dirpath, exist_ok=True)
with open(local_path, "wb") as f:
s3.client.download_fileobj(obj.netloc, obj.path.lstrip("/"), f)
elif os.path.isfile(path):
if not path.startswith("/teamspace/studios/this_studio"):
os.makedirs(os.path.dirname(local_path), exist_ok=True)
shutil.copyfile(path, local_path)
else:
raise ValueError(f"The provided {input_dir.url} isn't supported.")
# 7. Inform the worker the current files are available
queue_out.put(index)
def _remove_target(input_dir: Dir, cache_dir: str, queue_in: Queue) -> None:
"""This function is used to delete files from the cache directory to minimise disk space."""
while True:
# 1. Collect paths
paths = queue_in.get()
# 2. Terminate the process if we received a termination signal
if paths is None:
return
# 3. Iterate through the paths and delete them sequentially.
for path in paths:
if input_dir:
if not path.startswith(cache_dir) and input_dir.path is not None:
path = path.replace(input_dir.path, cache_dir)
if os.path.exists(path):
os.remove(path)
elif os.path.exists(path) and "s3_connections" not in path:
os.remove(path)
def _upload_fn(upload_queue: Queue, remove_queue: Queue, cache_dir: str, output_dir: Dir) -> None:
"""This function is used to upload optimised chunks from a local to remote dataset directory."""
obj = parse.urlparse(output_dir.url if output_dir.url else output_dir.path)
if obj.scheme == "s3":
s3 = S3Client()
while True:
data: Optional[Union[str, Tuple[str, str]]] = upload_queue.get()
tmpdir = None
if isinstance(data, str) or data is None:
local_filepath = data
else:
tmpdir, local_filepath = data
# Terminate the process if we received a termination signal
if local_filepath is None:
return
# Upload the file to the target cloud storage
if not local_filepath.startswith(cache_dir):
local_filepath = os.path.join(cache_dir, local_filepath)
if obj.scheme == "s3":
try:
if tmpdir is None:
output_filepath = os.path.join(str(obj.path).lstrip("/"), os.path.basename(local_filepath))
else:
output_filepath = os.path.join(str(obj.path).lstrip("/"), local_filepath.replace(tmpdir, "")[1:])
s3.client.upload_file(
local_filepath,
obj.netloc,
output_filepath,
)
except Exception as e:
print(e)
elif output_dir.path:
if tmpdir is None:
output_filepath = os.path.join(output_dir.path, os.path.basename(local_filepath))
else:
output_filepath = os.path.join(output_dir.path, local_filepath.replace(tmpdir, "")[1:])
os.makedirs(os.path.dirname(output_filepath), exist_ok=True)
shutil.move(local_filepath, output_filepath)
else:
raise ValueError(f"The provided {output_dir.path} isn't supported.")
# Inform the remover to delete the file
if remove_queue and os.path.exists(local_filepath):
remove_queue.put([local_filepath])
def _map_items_to_workers_sequentially(num_workers: int, user_items: List[Any]) -> List[List[Any]]:
from typing import List, Any
import os
total_nodes = _get_num_nodes()
node_rank = _get_node_rank()
total_workers = total_nodes * num_workers
items_per_worker = len(user_items) // total_workers
extra_items = len(user_items) % total_workers
start = 0
result = []
for i in range(total_workers):
worker_items = items_per_worker + 1 if i < extra_items else items_per_worker
end = start + worker_items
result.append(user_items[start:end])
start = end
if len(result) != num_workers:
raise RuntimeError("Improper assignment of items to workers")
return result
def _map_items_to_workers_weighted(
num_workers: int,
user_items: List[Any],
weights: Optional[List[int]] = None,
file_size: bool = True,
) -> List[List[Any]]:
# Associate the items to the workers based on number of nodes and node rank.
weights = [1] * len(user_items) if weights is None else weights
num_nodes = _get_num_nodes()
node_rank = _get_node_rank()
world_size = num_nodes * num_workers
worker_items, worker_weights = _pack_greedily(items=user_items, weights=weights, num_bins=world_size)
worker_ids_this_node = range(node_rank * num_workers, (node_rank + 1) * num_workers)
for worker_id, size in worker_weights.items():
if worker_id not in worker_ids_this_node:
continue
if file_size:
print(f"Worker {worker_id} gets {size / 1e6:.1f} MB ({len(worker_items[worker_id])} files)")
else:
print(f"Worker {worker_id} gets ({len(worker_items[worker_id])}) items for a total weight of {size}.")
return [np.random.permutation(worker_items[worker_id]).tolist() for worker_id in worker_ids_this_node]
def _get_num_bytes(item: Any, base_path: str) -> int:
flattened_item, _ = tree_flatten(item)
num_bytes = 0
for element in flattened_item:
if isinstance(element, str):
element = Path(element).resolve()
if not element.exists():
continue
file_bytes = os.path.getsize(element)
if file_bytes == 0:
raise RuntimeError(f"The file {element} has 0 bytes!")
num_bytes += file_bytes
return num_bytes
def _get_item_filesizes(items: List[Any], base_path: str = "") -> List[int]:
"""Computes the total size in bytes of all file paths for every datastructure in the given list."""
item_sizes = []
cpu_count = os.cpu_count() or 1
# Parallelize to accelerate retrieving the number of file bytes to read for each item
with concurrent.futures.ThreadPoolExecutor(max_workers=cpu_count * 2 if cpu_count > 4 else cpu_count) as executor:
futures = [executor.submit(_get_num_bytes, item, base_path) for item in items]
for future in futures:
item_sizes.append(future.result())
return item_sizes
def _to_path(element: str) -> str:
return element if _IS_IN_STUDIO and element.startswith("/teamspace") else str(Path(element).resolve())
def _is_path(input_dir: Optional[str], element: Any) -> bool:
if not isinstance(element, str):
return False
if _IS_IN_STUDIO and input_dir is not None:
if element.startswith(input_dir):
return True
element = str(Path(element).absolute())
if element.startswith(input_dir):
return True
return os.path.exists(element)
class BaseWorker:
def __init__(
self,
worker_index: int,
num_workers: int,
node_rank: int,
data_recipe: "DataRecipe",
input_dir: Dir,
output_dir: Dir,
items: List[Any],
progress_queue: Queue,
error_queue: Queue,
stop_queue: Queue,
num_downloaders: int,
num_uploaders: int,
remove: bool,
reader: Optional[BaseReader] = None,
) -> None:
"""The BaseWorker is responsible to process the user data."""
self.worker_index = worker_index
self.num_workers = num_workers
self.node_rank = node_rank
self.data_recipe = data_recipe
self.input_dir = input_dir
self.output_dir = output_dir
self.items = items
self.num_items = len(self.items)
self.num_downloaders = num_downloaders
self.num_uploaders = num_uploaders
self.remove = remove
self.reader = reader
self.paths: List[List[str]] = []
self.remover: Optional[Process] = None
self.downloaders: List[Process] = []
self.uploaders: List[Process] = []
self.to_download_queues: List[Queue] = []
self.to_upload_queues: List[Queue] = []
self.stop_queue = stop_queue
self.ready_to_process_queue: Queue = Queue()
self.remove_queue: Queue = Queue()
self.progress_queue: Queue = progress_queue
self.error_queue: Queue = error_queue
self._counter = 0
self._last_time = time()
self._index_counter = 0
def run(self) -> None:
try:
self._setup()
self._loop()
except Exception:
traceback_format = traceback.format_exc()
print(traceback_format)
self.error_queue.put(traceback_format)
print(f"Worker {str(_get_node_rank() * self.num_workers + self.worker_index)} is done.")
def _setup(self) -> None:
self._set_environ_variables()
self._create_cache()
self._collect_paths()
self._start_downloaders()
self._start_uploaders()
self._start_remover()
def _loop(self) -> None:
num_downloader_finished = 0
while True:
index = self.ready_to_process_queue.get()
if index is None:
num_downloader_finished += 1
if num_downloader_finished == self.num_downloaders:
print(f"Worker {str(_get_node_rank() * self.num_workers + self.worker_index)} is terminating.")
if isinstance(self.data_recipe, DataChunkRecipe):
self._handle_data_chunk_recipe_end()
if self.output_dir.url if self.output_dir.url else self.output_dir.path:
# Inform the uploaders they are doing working
for i in range(self.num_uploaders):
self.to_upload_queues[i].put(None)
# Wait for them all to be finished
for uploader in self.uploaders:
uploader.join()
if self.remove:
assert self.remover
self.remove_queue.put(None)
self.remover.join()
if self.progress_queue:
self.progress_queue.put((self.worker_index, self._counter))
return
continue
if isinstance(self.data_recipe, DataChunkRecipe):
self._handle_data_chunk_recipe(index)
else:
self._handle_data_transform_recipe(index)
self._counter += 1
# Don't send the last progress update, so the main thread awaits for the uploader and remover
if self.progress_queue and (time() - self._last_time) > 1 and self._counter < (self.num_items - 2):
self.progress_queue.put((self.worker_index, self._counter))
self._last_time = time()
if self.remove and self.input_dir.path is not None and self.reader is None:
self.remove_queue.put(self.paths[index])
try:
self.stop_queue.get(timeout=0.0001)
return
except Empty:
pass
def _set_environ_variables(self) -> None:
# set the optimizer global rank and world_size
os.environ["DATA_OPTIMIZER_GLOBAL_RANK"] = str(_get_node_rank() * self.num_workers + self.worker_index)
os.environ["DATA_OPTIMIZER_NUM_WORKERS"] = str(self.num_workers)
def _create_cache(self) -> None:
self.cache_data_dir = _get_cache_data_dir()
os.makedirs(self.cache_data_dir, exist_ok=True)
self.cache_chunks_dir = _get_cache_dir()
os.makedirs(self.cache_chunks_dir, exist_ok=True)
if isinstance(self.data_recipe, DataTransformRecipe):
return
self.cache = Cache(
self.cache_chunks_dir,
chunk_bytes=self.data_recipe.chunk_bytes,
chunk_size=self.data_recipe.chunk_size,
compression=self.data_recipe.compression,
)
self.cache._reader._rank = _get_node_rank() * self.num_workers + self.worker_index
def _try_upload(self, data: Optional[Union[str, Tuple[str, str]]]) -> None:
if not data or (self.output_dir.url if self.output_dir.url else self.output_dir.path) is None:
return
if isinstance(data, str):
assert os.path.exists(data), data
else:
assert os.path.exists(data[-1]), data
self.to_upload_queues[self._counter % self.num_uploaders].put(data)
def _collect_paths(self) -> None:
if self.input_dir.path is None or self.reader is not None:
for index in range(len(self.items)):
self.ready_to_process_queue.put(index)
for _ in range(self.num_downloaders):
self.ready_to_process_queue.put(None)
return
items = []
for item in self.items:
flattened_item, spec = tree_flatten(item)
# For speed reasons, we assume starting with `self.input_dir` is enough to be a real file.
# Other alternative would be too slow.
# TODO: Try using dictionary for higher accurary.
indexed_paths = {
index: _to_path(element)
for index, element in enumerate(flattened_item)
if _is_path(self.input_dir.path, element)
}
if len(indexed_paths) == 0:
raise ValueError(
f"The provided item {item} didn't contain any filepaths. The input_dir is {self.input_dir.path}."
)
paths = []
for index, path in indexed_paths.items():
paths.append(path)
if self.input_dir and not self.input_dir.path.startswith("/teamspace/studios/this_studio"):
path = path.replace(self.input_dir.path, self.cache_data_dir)
flattened_item[index] = path
self.paths.append(paths)
items.append(tree_unflatten(flattened_item, spec))
self.items = items
def _start_downloaders(self) -> None:
if self.input_dir.path is None or self.reader is not None:
return
for _ in range(self.num_downloaders):
to_download_queue: Queue = Queue()
p = Process(
target=_download_data_target,
args=(
self.input_dir,
self.cache_data_dir,
to_download_queue,
self.ready_to_process_queue,
),
)
p.start()
self.downloaders.append(p)
self.to_download_queues.append(to_download_queue)
for index, paths in enumerate(self.paths):
self.to_download_queues[index % self.num_downloaders].put((index, paths))
for downloader_index in range(self.num_downloaders):
self.to_download_queues[downloader_index].put(None)
def _start_remover(self) -> None:
if not self.remove:
return
self.remover = Process(
target=_remove_target,
args=(
self.input_dir,
self.cache_data_dir,
self.remove_queue,
),
)
self.remover.start()
def _start_uploaders(self) -> None:
if self.output_dir.path is None and self.output_dir.url is None:
return
for _ in range(self.num_uploaders):
to_upload_queue: Queue = Queue()
p = Process(
target=_upload_fn,
args=(
to_upload_queue,
self.remove_queue,
self.cache_chunks_dir,
self.output_dir,
),
)
p.start()
self.uploaders.append(p)
self.to_upload_queues.append(to_upload_queue)
def _handle_data_chunk_recipe(self, index: int) -> None:
try:
current_item = self.items[index] if self.reader is None else self.reader.read(self.items[index])
item_data_or_generator = self.data_recipe.prepare_item(current_item)
if isinstance(item_data_or_generator, types.GeneratorType):
for item_data in item_data_or_generator:
if item_data is not None:
chunk_filepath = self.cache._add_item(self._index_counter, item_data)
self._try_upload(chunk_filepath)
self._index_counter += 1
elif item_data_or_generator is not None:
chunk_filepath = self.cache._add_item(self._index_counter, item_data_or_generator)
self._try_upload(chunk_filepath)
self._index_counter += 1
except Exception as e:
raise RuntimeError(f"Failed processing {self.items[index]}") from e
def _handle_data_chunk_recipe_end(self) -> None:
chunks_filepaths = self.cache.done()
if chunks_filepaths and len(self.to_upload_queues):
for i, chunk_filepath in enumerate(chunks_filepaths):
if isinstance(chunk_filepath, str) and os.path.exists(chunk_filepath):
self.to_upload_queues[i % self.num_uploaders].put(chunk_filepath)
def _handle_data_transform_recipe(self, index: int) -> None:
# Don't use a context manager to avoid deleting files that are being uploaded.
output_dir = tempfile.mkdtemp()
item = self.items[index] if self.reader is None else self.reader.read(self.items[index])
item_data = self.data_recipe.prepare_item(item, str(output_dir), len(self.items) - 1 == index)
if item_data is not None:
raise ValueError(
"When using a `DataTransformRecipe`, the `prepare_item` shouldn't return anything."
" Simply store your files under the output_dir."
)
filepaths = []
for directory, _, filenames in os.walk(output_dir):
for filename in filenames:
filepaths.append(os.path.join(directory, filename))
for filepath in filepaths:
self._try_upload((output_dir, filepath))
class DataWorkerProcess(BaseWorker, Process):
def __init__(self, *args: Any, **kwargs: Any) -> None:
"""The DataWorkerProcess is responsible to process the user data inside processes."""
BaseWorker.__init__(self, *args, **kwargs)
Process.__init__(self)
@dataclass
class _Result:
size: Optional[int] = None
num_bytes: Optional[str] = None
data_format: Optional[str] = None
compression: Optional[str] = None
num_chunks: Optional[int] = None
num_bytes_per_chunk: Optional[List[int]] = None
T = TypeVar("T")
class DataRecipe:
@abstractmethod
def prepare_structure(self, input_dir: Optional[str]) -> List[T]:
pass
@abstractmethod
def prepare_item(self, *args: Any, **kwargs: Any) -> Any:
pass
def __init__(self) -> None:
self._name: Optional[str] = None
def _done(self, size: int, delete_cached_files: bool, output_dir: Dir) -> _Result:
return _Result(size=size)
class DataChunkRecipe(DataRecipe):
def __init__(
self,
chunk_size: Optional[int] = None,
chunk_bytes: Optional[Union[int, str]] = None,
compression: Optional[str] = None,
):
super().__init__()
if chunk_size is not None and chunk_bytes is not None:
raise ValueError("Either one of the `chunk_size` or the `chunk_bytes` need to be provided.")
self.chunk_size = chunk_size
self.chunk_bytes = 1 << 26 if chunk_size is None else chunk_bytes
self.compression = compression
@abstractmethod
def prepare_structure(self, input_dir: Optional[str]) -> List[T]:
"""Return the structure of your data.
Each element should contain at least a filepath.
"""
@abstractmethod
def prepare_item(self, item_metadata: T) -> Any:
"""The return of this `prepare_item` method is persisted in chunked binary files."""
def _done(self, size: int, delete_cached_files: bool, output_dir: Dir) -> _Result:
num_nodes = _get_num_nodes()
cache_dir = _get_cache_dir()
chunks = [file for file in os.listdir(cache_dir) if file.endswith(".bin")]
if chunks and delete_cached_files and output_dir.path is not None:
raise RuntimeError(f"All the chunks should have been deleted. Found {chunks}")
merge_cache = Cache(cache_dir, chunk_bytes=1)
node_rank = _get_node_rank()
merge_cache._merge_no_wait(node_rank if num_nodes > 1 else None)
self._upload_index(output_dir, cache_dir, num_nodes, node_rank)
if num_nodes == node_rank + 1:
with open(os.path.join(cache_dir, _INDEX_FILENAME)) as f:
config = json.load(f)
size = sum([c["dim"] if c["dim"] is not None else c["chunk_size"] for c in config["chunks"]])
num_bytes = sum([c["chunk_bytes"] for c in config["chunks"]])
if config["config"] is not None:
data_format = tree_unflatten(
config["config"]["data_format"], treespec_loads(config["config"]["data_spec"])
)
else:
data_format = None
num_chunks = len(config["chunks"])
# The platform can't store more than 1024 entries.
# Note: This isn't really used right now, so it is fine to skip if too big.
num_bytes_per_chunk = [c["chunk_size"] for c in config["chunks"]] if num_chunks < 1024 else []
return _Result(
size=size,
num_bytes=num_bytes,
data_format=data_format,
compression=config["config"]["compression"] if config["config"] else None,
num_chunks=len(config["chunks"]),
num_bytes_per_chunk=num_bytes_per_chunk,
)
return _Result(
size=size,
)
def _upload_index(self, output_dir: Dir, cache_dir: str, num_nodes: int, node_rank: Optional[int]) -> None:
"""This method upload the index file to the remote cloud directory."""
if output_dir.path is None and output_dir.url is None:
return
obj = parse.urlparse(output_dir.url if output_dir.url else output_dir.path)
if num_nodes > 1:
local_filepath = os.path.join(cache_dir, f"{node_rank}-{_INDEX_FILENAME}")
else:
local_filepath = os.path.join(cache_dir, _INDEX_FILENAME)
if obj.scheme == "s3":
s3 = S3Client()
s3.client.upload_file(
local_filepath, obj.netloc, os.path.join(str(obj.path).lstrip("/"), os.path.basename(local_filepath))
)
elif output_dir.path and os.path.isdir(output_dir.path):
shutil.copyfile(local_filepath, os.path.join(output_dir.path, os.path.basename(local_filepath)))
if num_nodes == 1 or node_rank is None:
return
# Merge the index files generated by each node.
# Note: When using the Data Optimizer, they should be a single process on each node executing this section
# So no risk to get race conditon.
if num_nodes == node_rank + 1:
# Get the index file locally
for node_rank in range(num_nodes - 1):
output_dir_path = output_dir.url if output_dir.url else output_dir.path
assert output_dir_path
remote_filepath = os.path.join(output_dir_path, f"{node_rank}-{_INDEX_FILENAME}")
node_index_filepath = os.path.join(cache_dir, os.path.basename(remote_filepath))
if obj.scheme == "s3":
obj = parse.urlparse(remote_filepath)
_wait_for_file_to_exist(s3, obj)
with open(node_index_filepath, "wb") as f:
s3.client.download_fileobj(obj.netloc, obj.path.lstrip("/"), f)
elif output_dir.path and os.path.isdir(output_dir.path):
shutil.copyfile(remote_filepath, node_index_filepath)
merge_cache = Cache(cache_dir, chunk_bytes=1)
merge_cache._merge_no_wait()
self._upload_index(output_dir, cache_dir, 1, None)
class DataTransformRecipe(DataRecipe):
@abstractmethod
def prepare_structure(self, input_dir: Optional[str]) -> List[T]:
"""Return the structure of your data.
Each element should contain at least a filepath.
"""
@abstractmethod
def prepare_item(self, item_metadata: T, output_dir: str, is_last: bool) -> None:
"""Use your item metadata to process your files and save the file outputs into `output_dir`."""
class DataProcessor:
def __init__(
self,
input_dir: Union[str, Dir],
output_dir: Optional[Union[str, Dir]] = None,
num_workers: Optional[int] = None,
num_downloaders: Optional[int] = None,
num_uploaders: Optional[int] = None,
delete_cached_files: bool = True,
fast_dev_run: Optional[Union[bool, int]] = None,
random_seed: Optional[int] = 42,
reorder_files: bool = True,
weights: Optional[List[int]] = None,
reader: Optional[BaseReader] = None,
):
"""The `DatasetOptimiser` provides an efficient way to process data across multiple machine into chunks to make
training faster.
Arguments:
input_dir: The path to where the input data are stored.
output_dir: The path to where the output data are stored.
num_workers: The number of worker threads to use.
num_downloaders: The number of file downloaders to use.
num_uploaders: The number of file uploaders to use.
delete_cached_files: Whether to delete the cached files.
fast_dev_run: Whether to run a quick dev run.
random_seed: The random seed to be set before shuffling the data.
reorder_files: By default, reorders the files by file size to distribute work equally among all workers.
Set this to ``False`` if the order in which samples are processed should be preserved.
weights: Provide a list of weights associated to the inputs.
This is used to evenly split the work among the workers.
reader: Map the inputs to worker inputs and provides a read method to read a slice of the data.
"""
self.input_dir = _resolve_dir(input_dir)
self.output_dir = _resolve_dir(output_dir)
self.num_workers = num_workers or (1 if fast_dev_run else (os.cpu_count() or 1) * 4)
self.num_downloaders = num_downloaders or 2
self.num_uploaders = num_uploaders or 5
self.delete_cached_files = delete_cached_files
self.fast_dev_run = _get_fast_dev_run() if fast_dev_run is None else fast_dev_run
self.workers: Any = []
self.workers_tracker: Dict[int, int] = {}
self.progress_queue: Optional[Queue] = None
self.error_queue: Queue = Queue()
self.stop_queues: List[Queue] = []
self.reorder_files = reorder_files
self.weights = weights
self.reader = reader
if self.reader is not None and self.weights is not None:
raise ValueError("Either the reader or the weights needs to be defined.")
# Ensure the input dir is the same across all nodes
self.input_dir = broadcast_object("input_dir", self.input_dir)
if self.output_dir:
# Ensure the output dir is the same across all nodes
self.output_dir = broadcast_object("output_dir", self.output_dir)
print(f"Storing the files under {self.output_dir.path}")
self.random_seed = random_seed
def run(self, data_recipe: DataRecipe) -> None:
"""The `DataProcessor.run(...)` method triggers the data recipe processing over your dataset."""
if not isinstance(data_recipe, DataRecipe):
raise ValueError("The provided value should be a data recipe.")
t0 = time()
print(f"Setup started with fast_dev_run={self.fast_dev_run}.")
# Force random seed to be fixed
random.seed(self.random_seed)
np.random.seed(self.random_seed)
torch.manual_seed(self.random_seed)
# Call the setup method of the user
user_items: List[Any] = data_recipe.prepare_structure(self.input_dir.path if self.input_dir else None)
if not isinstance(user_items, (list, StreamingDataLoader)):
raise ValueError("The `prepare_structure` should return a list of item metadata.")
if isinstance(user_items, StreamingDataLoader):
self.reader = StreamingDataLoaderReader(user_items)
if self.reader:
user_items = self.reader.remap_items(user_items, self.num_workers)
if self.weights is not None:
if len(self.weights) != len(user_items):
raise ValueError("The provided weights length should match the inputs' length.")
workers_user_items = _map_items_to_workers_weighted(
num_workers=self.num_workers, user_items=user_items, weights=self.weights, file_size=False
)
elif self.reorder_files and self.input_dir.path:
# TODO: Only do this on node 0, and broadcast the item sizes to the other nodes.
item_sizes = _get_item_filesizes(user_items, base_path=self.input_dir.path)
workers_user_items = _map_items_to_workers_weighted(
num_workers=self.num_workers, user_items=user_items, weights=item_sizes
)
else:
workers_user_items = _map_items_to_workers_sequentially(num_workers=self.num_workers, user_items=user_items)
print(f"Setup finished in {round(time() - t0, 3)} seconds. Found {len(user_items)} items to process.")
if self.fast_dev_run:
items_to_keep = self.fast_dev_run if type(self.fast_dev_run) is int else _DEFAULT_FAST_DEV_RUN_ITEMS
workers_user_items = [w[:items_to_keep] for w in workers_user_items]
print(f"Fast dev run is enabled. Limiting to {items_to_keep} items per process.")
num_items = sum([len(items) for items in workers_user_items])
self._cleanup_cache()
print(f"Starting {self.num_workers} workers with {num_items} items.")
if self.input_dir is None and self.src_resolver is not None and self.input_dir:
self.input_dir = self.src_resolver(self.input_dir)
print(f"The remote_dir is `{self.input_dir}`.")
signal.signal(signal.SIGINT, self._signal_handler)
self._create_process_workers(data_recipe, workers_user_items)
print("Workers are ready ! Starting data processing...")
current_total = 0
has_failed = False
pbar = _tqdm(
desc="Progress",
total=num_items,
smoothing=0,
position=-1,
mininterval=1,
leave=True,
dynamic_ncols=True,
)
while True:
try:
error = self.error_queue.get(timeout=0.001)
self._exit_on_error(error)
except Empty:
assert self.progress_queue
try:
index, counter = self.progress_queue.get(timeout=0.001)
except Empty:
continue
self.workers_tracker[index] = counter
new_total = sum(self.workers_tracker.values())
pbar.update(new_total - current_total)
current_total = new_total
if current_total == num_items:
break
# Exit early if all the workers are done.
# This means there were some kinda of errors.
if all(not w.is_alive() for w in self.workers):
has_failed = True
break
pbar.close()
num_nodes = _get_num_nodes()
node_rank = _get_node_rank()
# TODO: Understand why it hangs.
if num_nodes == 1:
for w in self.workers:
w.join(0)
print("Workers are finished.")
result = data_recipe._done(len(user_items), self.delete_cached_files, self.output_dir)
if num_nodes == node_rank + 1 and self.output_dir.url and _IS_IN_STUDIO:
assert self.output_dir.path
_create_dataset(
input_dir=self.input_dir.path,
storage_dir=self.output_dir.path,
dataset_type=V1DatasetType.CHUNKED
if isinstance(data_recipe, DataChunkRecipe)
else V1DatasetType.TRANSFORMED,
empty=False,
size=result.size,
num_bytes=result.num_bytes,
data_format=result.data_format,
compression=result.compression,
num_chunks=result.num_chunks,
num_bytes_per_chunk=result.num_bytes_per_chunk,
)
print("Finished data processing!")
# TODO: Understand why it is required to avoid long shutdown.
if _get_num_nodes() > 1:
os._exit(int(has_failed))
def _exit_on_error(self, error: str) -> None:
for w in self.workers:
w.join(0)
raise RuntimeError(f"We found the following error {error}.")
def _create_process_workers(self, data_recipe: DataRecipe, workers_user_items: List[List[Any]]) -> None:
self.progress_queue = Queue()
workers: List[DataWorkerProcess] = []
stop_queues: List[Queue] = []
for worker_idx, worker_user_items in enumerate(workers_user_items):
stop_queues.append(Queue())
worker = DataWorkerProcess(
worker_idx,
self.num_workers,
_get_node_rank(),
data_recipe,
self.input_dir,
self.output_dir,
worker_user_items,
self.progress_queue,
self.error_queue,
stop_queues[-1],
self.num_downloaders,
self.num_uploaders,
self.delete_cached_files,
self.reader,
)
worker.start()
workers.append(worker)
# Note: Don't store within the loop as weakref aren't serializable
self.workers = workers
self.stop_queues = stop_queues
def _signal_handler(self, signal: Any, frame: Any) -> None:
"""On temrination, we stop all the processes to avoid leaking RAM."""
for stop_queue in self.stop_queues:
stop_queue.put(None)
for w in self.workers:
w.join(0)
os._exit(0)
def _cleanup_cache(self) -> None:
cache_dir = _get_cache_dir()
# Cleanup the cache dir folder to avoid corrupted files from previous run to be there.
if os.path.exists(cache_dir):
shutil.rmtree(cache_dir, ignore_errors=True)
os.makedirs(cache_dir, exist_ok=True)
cache_data_dir = _get_cache_data_dir()
# Cleanup the cache data folder to avoid corrupted files from previous run to be there.
if os.path.exists(cache_data_dir):
shutil.rmtree(cache_data_dir, ignore_errors=True)
os.makedirs(cache_data_dir, exist_ok=True)
|
evocodebench_data_153
|
# Copyright The Lightning AI team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import concurrent.futures
import inspect
import os
from datetime import datetime
from functools import partial
from pathlib import Path
from types import FunctionType
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
import torch
from litdata.constants import _IS_IN_STUDIO, _TORCH_GREATER_EQUAL_2_1_0
from litdata.processing.data_processor import DataChunkRecipe, DataProcessor, DataTransformRecipe
from litdata.processing.readers import BaseReader
from litdata.processing.utilities import optimize_dns_context
from litdata.streaming.dataloader import StreamingDataLoader
from litdata.streaming.resolver import (
Dir,
_assert_dir_has_index_file,
_assert_dir_is_empty,
_execute,
_resolve_dir,
)
if _TORCH_GREATER_EQUAL_2_1_0:
from torch.utils._pytree import tree_flatten
def _get_indexed_paths(data: Any) -> Dict[int, str]:
flattened_item, _ = tree_flatten(data)
indexed_paths = {
index: element
for index, element in enumerate(flattened_item)
if isinstance(element, str) and os.path.exists(element)
}
return indexed_paths
def _get_input_dir(inputs: Sequence[Any]) -> Optional[str]:
indexed_paths = _get_indexed_paths(inputs[0])
if len(indexed_paths) == 0:
# Check whether the second element has any input_path
indexed_paths = _get_indexed_paths(inputs[1])
if len(indexed_paths) == 0:
return None
# Every element should have filepaths if any contains one.
raise ValueError(f"The provided item {inputs[0]} didn't contain any filepaths.")
absolute_path = str(Path(list(indexed_paths.values())[0]).resolve())
if "/.project" in absolute_path:
return "/" + os.path.join(*str(list(indexed_paths.values())[0]).split("/")[:4])
return "/" + os.path.join(*str(absolute_path).split("/")[:4])
def _get_default_num_workers() -> int:
if torch.cuda.is_available():
return torch.cuda.device_count()
return os.cpu_count() or 1
class LambdaDataTransformRecipe(DataTransformRecipe):
def __init__(self, fn: Callable[[str, Any], None], inputs: Sequence[Any]):
super().__init__()
self._fn = fn
self._inputs = inputs
self._device: Optional[str] = None
_fn = self._fn if isinstance(self._fn, FunctionType) else self._fn.__call__ # type: ignore
params = inspect.signature(_fn).parameters
self._contains_device = "device" in params
self._contains_is_last = "is_last" in params
def prepare_structure(self, _: Optional[str]) -> Any:
return self._inputs
def prepare_item(self, item_metadata: Any, output_dir: str, is_last: bool) -> None:
if self._contains_device and self._device is None:
self._find_device()
kwargs: Dict[str, Any] = {}
if self._contains_device:
kwargs["device"] = self._device
if self._contains_is_last:
kwargs["is_last"] = is_last
if isinstance(self._fn, (FunctionType, partial)):
self._fn(item_metadata, output_dir, **kwargs)
elif callable(self._fn):
self._fn.__call__(item_metadata, output_dir, **kwargs) # type: ignore
else:
raise ValueError(f"The provided {self._fn} isn't supported.")
def _find_device(self) -> None:
global_rank = os.getenv("DATA_OPTIMIZER_GLOBAL_RANK", None)
if torch.cuda.is_available() and global_rank:
num_gpus = torch.cuda.device_count()
device = int(global_rank) % num_gpus
self._device = f"cuda:{device}"
class LambdaDataChunkRecipe(DataChunkRecipe):
def __init__(
self,
fn: Callable[[Any], None],
inputs: Sequence[Any],
chunk_size: Optional[int],
chunk_bytes: Optional[Union[int, str]],
compression: Optional[str],
):
super().__init__(chunk_size=chunk_size, chunk_bytes=chunk_bytes, compression=compression)
self._fn = fn
self._inputs = inputs
def prepare_structure(self, input_dir: Optional[str]) -> Any:
return self._inputs
def prepare_item(self, item_metadata: Any) -> Any:
if isinstance(self._fn, partial):
yield from self._fn(item_metadata)
elif isinstance(self._fn, FunctionType):
if inspect.isgeneratorfunction(self._fn):
yield from self._fn(item_metadata)
else:
yield self._fn(item_metadata)
elif callable(self._fn):
if inspect.isgeneratorfunction(self._fn.__call__): # type: ignore
yield from self._fn.__call__(item_metadata) # type: ignore
else:
yield self._fn.__call__(item_metadata) # type: ignore
else:
raise ValueError(f"The provided {self._fn} isn't supported.")
def map(
fn: Callable[[str, Any], None],
inputs: Sequence[Any],
output_dir: Union[str, Dir],
weights: Optional[List[int]] = None,
num_workers: Optional[int] = None,
fast_dev_run: Union[bool, int] = False,
num_nodes: Optional[int] = None,
machine: Optional[str] = None,
num_downloaders: Optional[int] = None,
num_uploaders: Optional[int] = None,
reorder_files: bool = True,
error_when_not_empty: bool = False,
reader: Optional[BaseReader] = None,
batch_size: Optional[int] = None,
) -> None:
"""This function map a callbable over a collection of files possibly in a distributed way.
Arguments:
fn: A function to be executed over each input element
inputs: A sequence of input to be processed by the `fn` function.
Each input should contain at least a valid filepath.
output_dir: The folder where the processed data should be stored.
weights: Provide an associated weight to each input. This is used to balance work among workers.
num_workers: The number of workers to use during processing
fast_dev_run: Whether to use process only a sub part of the inputs
num_nodes: When doing remote execution, the number of nodes to use. Only supported on https://lightning.ai/.
machine: When doing remote execution, the machine to use. Only supported on https://lightning.ai/.
num_downloaders: The number of downloaders per worker.
num_uploaders: The number of uploaders per workers.
reorder_files: By default, reorders the files by file size to distribute work equally among all workers.
Set this to ``False`` if the order in which samples are processed should be preserved.
error_when_not_empty: Whether we should error if the output folder isn't empty.
batch_size: Group the inputs into batches of batch_size length.
"""
if isinstance(inputs, StreamingDataLoader) and batch_size is not None:
raise ValueError("When providing a streaming dataloader, pass the batch_size to the dataloader directly.")
if isinstance(inputs, StreamingDataLoader) and weights is not None:
raise ValueError("When providing a streaming dataloader, weights isn't supported.")
if not isinstance(inputs, (Sequence, StreamingDataLoader)):
raise ValueError(f"The provided inputs should be non empty sequence or a streaming dataloader. Found {inputs}.")
if len(inputs) == 0:
raise ValueError(f"The provided inputs should be non empty. Found {inputs}.")
if not _IS_IN_STUDIO and (machine is not None or num_nodes is not None):
raise ValueError(
"Only https://lightning.ai/ supports multiple nodes or selecting a machine."
" Create an account to try it out."
)
if not _IS_IN_STUDIO:
print(
"Create an account on https://lightning.ai/ to transform your data faster using "
"multiple nodes and large machines."
)
if num_nodes is None or int(os.getenv("DATA_OPTIMIZER_NUM_NODES", 0)) > 0:
_output_dir: Dir = _resolve_dir(output_dir)
if _output_dir.url and "cloudspaces" in _output_dir.url:
raise ValueError(
f"The provided `output_dir` isn't valid. Found {_output_dir.path if _output_dir else None}."
" HINT: You can either use `/teamspace/s3_connections/...` or `/teamspace/datasets/...`."
)
if error_when_not_empty:
_assert_dir_is_empty(_output_dir)
if not isinstance(inputs, StreamingDataLoader):
input_dir = _resolve_dir(_get_input_dir(inputs))
if isinstance(batch_size, int) and batch_size > 1:
inputs = [inputs[pos : pos + batch_size] for pos in range(0, len(inputs), batch_size)]
else:
input_dir = Dir()
data_processor = DataProcessor(
input_dir=input_dir,
output_dir=_output_dir,
num_workers=num_workers or _get_default_num_workers(),
fast_dev_run=fast_dev_run,
num_downloaders=num_downloaders,
num_uploaders=num_uploaders,
reorder_files=reorder_files,
weights=weights,
reader=reader,
)
with optimize_dns_context(True):
return data_processor.run(LambdaDataTransformRecipe(fn, inputs))
return _execute(
f"data-prep-map-{datetime.now().strftime('%Y-%m-%d_%H-%M-%S')}",
num_nodes,
machine,
)
def optimize(
fn: Callable[[Any], Any],
inputs: Sequence[Any],
output_dir: str,
weights: Optional[List[int]] = None,
chunk_size: Optional[int] = None,
chunk_bytes: Optional[Union[int, str]] = None,
compression: Optional[str] = None,
num_workers: Optional[int] = None,
fast_dev_run: bool = False,
num_nodes: Optional[int] = None,
machine: Optional[str] = None,
num_downloaders: Optional[int] = None,
num_uploaders: Optional[int] = None,
reorder_files: bool = True,
reader: Optional[BaseReader] = None,
batch_size: Optional[int] = None,
) -> None:
"""This function converts a dataset into chunks possibly in a distributed way.
Arguments:
fn: A function to be executed over each input element
inputs: A sequence of input to be processed by the `fn` function.
Each input should contain at least a valid filepath.
output_dir: The folder where the processed data should be stored.
weights: Provide an associated weight to each input. This is used to balance work among workers.
chunk_size: The maximum number of elements to hold within a chunk.
chunk_bytes: The maximum number of bytes to hold within a chunk.
compression: The compression algorithm to use over the chunks.
num_workers: The number of workers to use during processing
fast_dev_run: Whether to use process only a sub part of the inputs
num_nodes: When doing remote execution, the number of nodes to use. Only supported on https://lightning.ai/.
machine: When doing remote execution, the machine to use. Only supported on https://lightning.ai/.
num_downloaders: The number of downloaders per worker.
num_uploaders: The numbers of uploaders per worker.
reorder_files: By default, reorders the files by file size to distribute work equally among all workers.
Set this to ``False`` if the order in which samples are processed should be preserved.
batch_size: Group the inputs into batches of batch_size length.
"""
if isinstance(inputs, StreamingDataLoader) and batch_size is not None:
raise ValueError("When providing a streaming dataloader, pass the batch_size to the dataloader directly.")
if isinstance(inputs, StreamingDataLoader) and weights is not None:
raise ValueError("When providing a streaming dataloader, weights isn't supported.")
if not isinstance(inputs, (Sequence, StreamingDataLoader)):
raise ValueError(f"The provided inputs should be non empty sequence or a streaming dataloader. Found {inputs}.")
if len(inputs) == 0:
raise ValueError(f"The provided inputs should be non empty. Found {inputs}.")
if chunk_size is None and chunk_bytes is None:
raise ValueError("Either `chunk_size` or `chunk_bytes` needs to be defined.")
if not _IS_IN_STUDIO and (machine is not None or num_nodes is not None):
raise ValueError(
"Only https://lightning.ai/ supports multiple nodes or selecting a machine."
"Create an account to try it out."
)
if not _IS_IN_STUDIO:
print(
"Create an account on https://lightning.ai/ to optimize your data faster "
"using multiple nodes and large machines."
)
if num_nodes is None or int(os.getenv("DATA_OPTIMIZER_NUM_NODES", 0)) > 0:
_output_dir: Dir = _resolve_dir(output_dir)
if _output_dir.url is not None and "cloudspaces" in _output_dir.url:
raise ValueError(
f"The provided `output_dir` isn't valid. Found {_output_dir.path}."
" HINT: You can either use `/teamspace/s3_connections/...` or `/teamspace/datasets/...`."
)
_assert_dir_has_index_file(_output_dir)
if not isinstance(inputs, StreamingDataLoader):
input_dir = _resolve_dir(_get_input_dir(inputs))
if isinstance(batch_size, int) and batch_size > 1:
inputs = [inputs[pos : pos + batch_size] for pos in range(0, len(inputs), batch_size)]
else:
input_dir = Dir()
data_processor = DataProcessor(
input_dir=input_dir,
output_dir=_output_dir,
num_workers=num_workers or _get_default_num_workers(),
fast_dev_run=fast_dev_run,
num_downloaders=num_downloaders,
num_uploaders=num_uploaders,
reorder_files=reorder_files,
reader=reader,
)
with optimize_dns_context(True):
data_processor.run(
LambdaDataChunkRecipe(
fn,
inputs,
chunk_size=chunk_size,
chunk_bytes=chunk_bytes,
compression=compression,
)
)
return None
return _execute(
f"data-prep-optimize-{datetime.now().strftime('%Y-%m-%d_%H-%M-%S')}",
num_nodes,
machine,
)
def _listdir(folder: str) -> Tuple[str, List[str]]:
return folder, os.listdir(folder)
class walk:
"""This class is an optimized version of os.walk for listing files and folders from cloud filesystem.
Note: The order of files and folders yielded aren't depth-first anymore due to the asynchronous listing call.
"""
def __init__(self, folder: str, max_workers: Optional[int] = os.cpu_count()) -> None:
self.folders = [folder]
self.max_workers = max_workers or 1
self.futures: List[concurrent.futures.Future] = []
if not _IS_IN_STUDIO:
print("This method is optimized to run on https://lightning.ai/. Don't use it otherwise.")
def __iter__(self) -> Any:
"""This function queues the folders to perform listdir across multiple workers."""
with concurrent.futures.ThreadPoolExecutor(max_workers=self.max_workers) as executor:
while len(self.folders):
folder = self.folders.pop(0)
future = executor.submit(_listdir, folder)
self.futures.append(future)
while self.futures:
for future in concurrent.futures.as_completed(self.futures):
filenames = []
folders = []
folder, files_or_folders = future.result()
self.futures = [f for f in self.futures if f != future]
for file_or_folder in files_or_folders:
if os.path.isfile(os.path.join(folder, file_or_folder)):
filenames.append(file_or_folder)
else:
folders.append(file_or_folder)
self.folders.append(os.path.join(folder, file_or_folder))
yield folder, folders, filenames
while len(self.folders) and len(self.futures) <= self.max_workers * 2:
folder = self.folders.pop(0)
future = executor.submit(_listdir, folder)
self.futures.append(future)
return
|
evocodebench_data_154
|
# Copyright The Lightning AI team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import concurrent.futures
import inspect
import os
from datetime import datetime
from functools import partial
from pathlib import Path
from types import FunctionType
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
import torch
from litdata.constants import _IS_IN_STUDIO, _TORCH_GREATER_EQUAL_2_1_0
from litdata.processing.data_processor import DataChunkRecipe, DataProcessor, DataTransformRecipe
from litdata.processing.readers import BaseReader
from litdata.processing.utilities import optimize_dns_context
from litdata.streaming.dataloader import StreamingDataLoader
from litdata.streaming.resolver import (
Dir,
_assert_dir_has_index_file,
_assert_dir_is_empty,
_execute,
_resolve_dir,
)
if _TORCH_GREATER_EQUAL_2_1_0:
from torch.utils._pytree import tree_flatten
def _get_indexed_paths(data: Any) -> Dict[int, str]:
flattened_item, _ = tree_flatten(data)
indexed_paths = {
index: element
for index, element in enumerate(flattened_item)
if isinstance(element, str) and os.path.exists(element)
}
return indexed_paths
def _get_input_dir(inputs: Sequence[Any]) -> Optional[str]:
indexed_paths = _get_indexed_paths(inputs[0])
if len(indexed_paths) == 0:
# Check whether the second element has any input_path
indexed_paths = _get_indexed_paths(inputs[1])
if len(indexed_paths) == 0:
return None
# Every element should have filepaths if any contains one.
raise ValueError(f"The provided item {inputs[0]} didn't contain any filepaths.")
absolute_path = str(Path(list(indexed_paths.values())[0]).resolve())
if "/.project" in absolute_path:
return "/" + os.path.join(*str(list(indexed_paths.values())[0]).split("/")[:4])
return "/" + os.path.join(*str(absolute_path).split("/")[:4])
def _get_default_num_workers() -> int:
if torch.cuda.is_available():
return torch.cuda.device_count()
return os.cpu_count() or 1
class LambdaDataTransformRecipe(DataTransformRecipe):
def __init__(self, fn: Callable[[str, Any], None], inputs: Sequence[Any]):
super().__init__()
self._fn = fn
self._inputs = inputs
self._device: Optional[str] = None
_fn = self._fn if isinstance(self._fn, FunctionType) else self._fn.__call__ # type: ignore
params = inspect.signature(_fn).parameters
self._contains_device = "device" in params
self._contains_is_last = "is_last" in params
def prepare_structure(self, _: Optional[str]) -> Any:
return self._inputs
def prepare_item(self, item_metadata: Any, output_dir: str, is_last: bool) -> None:
if self._contains_device and self._device is None:
self._find_device()
kwargs: Dict[str, Any] = {}
if self._contains_device:
kwargs["device"] = self._device
if self._contains_is_last:
kwargs["is_last"] = is_last
if isinstance(self._fn, (FunctionType, partial)):
self._fn(item_metadata, output_dir, **kwargs)
elif callable(self._fn):
self._fn.__call__(item_metadata, output_dir, **kwargs) # type: ignore
else:
raise ValueError(f"The provided {self._fn} isn't supported.")
def _find_device(self) -> None:
global_rank = os.getenv("DATA_OPTIMIZER_GLOBAL_RANK", None)
if torch.cuda.is_available() and global_rank:
num_gpus = torch.cuda.device_count()
device = int(global_rank) % num_gpus
self._device = f"cuda:{device}"
class LambdaDataChunkRecipe(DataChunkRecipe):
def __init__(
self,
fn: Callable[[Any], None],
inputs: Sequence[Any],
chunk_size: Optional[int],
chunk_bytes: Optional[Union[int, str]],
compression: Optional[str],
):
super().__init__(chunk_size=chunk_size, chunk_bytes=chunk_bytes, compression=compression)
self._fn = fn
self._inputs = inputs
def prepare_structure(self, input_dir: Optional[str]) -> Any:
return self._inputs
def prepare_item(self, item_metadata: Any) -> Any:
if isinstance(self._fn, partial):
yield from self._fn(item_metadata)
elif isinstance(self._fn, FunctionType):
if inspect.isgeneratorfunction(self._fn):
yield from self._fn(item_metadata)
else:
yield self._fn(item_metadata)
elif callable(self._fn):
if inspect.isgeneratorfunction(self._fn.__call__): # type: ignore
yield from self._fn.__call__(item_metadata) # type: ignore
else:
yield self._fn.__call__(item_metadata) # type: ignore
else:
raise ValueError(f"The provided {self._fn} isn't supported.")
def map(
fn: Callable[[str, Any], None],
inputs: Sequence[Any],
output_dir: Union[str, Dir],
weights: Optional[List[int]] = None,
num_workers: Optional[int] = None,
fast_dev_run: Union[bool, int] = False,
num_nodes: Optional[int] = None,
machine: Optional[str] = None,
num_downloaders: Optional[int] = None,
num_uploaders: Optional[int] = None,
reorder_files: bool = True,
error_when_not_empty: bool = False,
reader: Optional[BaseReader] = None,
batch_size: Optional[int] = None,
) -> None:
"""This function map a callbable over a collection of files possibly in a distributed way.
Arguments:
fn: A function to be executed over each input element
inputs: A sequence of input to be processed by the `fn` function.
Each input should contain at least a valid filepath.
output_dir: The folder where the processed data should be stored.
weights: Provide an associated weight to each input. This is used to balance work among workers.
num_workers: The number of workers to use during processing
fast_dev_run: Whether to use process only a sub part of the inputs
num_nodes: When doing remote execution, the number of nodes to use. Only supported on https://lightning.ai/.
machine: When doing remote execution, the machine to use. Only supported on https://lightning.ai/.
num_downloaders: The number of downloaders per worker.
num_uploaders: The number of uploaders per workers.
reorder_files: By default, reorders the files by file size to distribute work equally among all workers.
Set this to ``False`` if the order in which samples are processed should be preserved.
error_when_not_empty: Whether we should error if the output folder isn't empty.
batch_size: Group the inputs into batches of batch_size length.
"""
if isinstance(inputs, StreamingDataLoader) and batch_size is not None:
raise ValueError("When providing a streaming dataloader, pass the batch_size to the dataloader directly.")
if isinstance(inputs, StreamingDataLoader) and weights is not None:
raise ValueError("When providing a streaming dataloader, weights isn't supported.")
if not isinstance(inputs, (Sequence, StreamingDataLoader)):
raise ValueError(f"The provided inputs should be non empty sequence or a streaming dataloader. Found {inputs}.")
if len(inputs) == 0:
raise ValueError(f"The provided inputs should be non empty. Found {inputs}.")
if not _IS_IN_STUDIO and (machine is not None or num_nodes is not None):
raise ValueError(
"Only https://lightning.ai/ supports multiple nodes or selecting a machine."
" Create an account to try it out."
)
if not _IS_IN_STUDIO:
print(
"Create an account on https://lightning.ai/ to transform your data faster using "
"multiple nodes and large machines."
)
if num_nodes is None or int(os.getenv("DATA_OPTIMIZER_NUM_NODES", 0)) > 0:
_output_dir: Dir = _resolve_dir(output_dir)
if _output_dir.url and "cloudspaces" in _output_dir.url:
raise ValueError(
f"The provided `output_dir` isn't valid. Found {_output_dir.path if _output_dir else None}."
" HINT: You can either use `/teamspace/s3_connections/...` or `/teamspace/datasets/...`."
)
if error_when_not_empty:
_assert_dir_is_empty(_output_dir)
if not isinstance(inputs, StreamingDataLoader):
input_dir = _resolve_dir(_get_input_dir(inputs))
if isinstance(batch_size, int) and batch_size > 1:
inputs = [inputs[pos : pos + batch_size] for pos in range(0, len(inputs), batch_size)]
else:
input_dir = Dir()
data_processor = DataProcessor(
input_dir=input_dir,
output_dir=_output_dir,
num_workers=num_workers or _get_default_num_workers(),
fast_dev_run=fast_dev_run,
num_downloaders=num_downloaders,
num_uploaders=num_uploaders,
reorder_files=reorder_files,
weights=weights,
reader=reader,
)
with optimize_dns_context(True):
return data_processor.run(LambdaDataTransformRecipe(fn, inputs))
return _execute(
f"data-prep-map-{datetime.now().strftime('%Y-%m-%d_%H-%M-%S')}",
num_nodes,
machine,
)
def optimize(
fn: Callable[[Any], Any],
inputs: Sequence[Any],
output_dir: str,
weights: Optional[List[int]] = None,
chunk_size: Optional[int] = None,
chunk_bytes: Optional[Union[int, str]] = None,
compression: Optional[str] = None,
num_workers: Optional[int] = None,
fast_dev_run: bool = False,
num_nodes: Optional[int] = None,
machine: Optional[str] = None,
num_downloaders: Optional[int] = None,
num_uploaders: Optional[int] = None,
reorder_files: bool = True,
reader: Optional[BaseReader] = None,
batch_size: Optional[int] = None,
) -> None:
"""This function converts a dataset into chunks possibly in a distributed way.
Arguments:
fn: A function to be executed over each input element
inputs: A sequence of input to be processed by the `fn` function.
Each input should contain at least a valid filepath.
output_dir: The folder where the processed data should be stored.
weights: Provide an associated weight to each input. This is used to balance work among workers.
chunk_size: The maximum number of elements to hold within a chunk.
chunk_bytes: The maximum number of bytes to hold within a chunk.
compression: The compression algorithm to use over the chunks.
num_workers: The number of workers to use during processing
fast_dev_run: Whether to use process only a sub part of the inputs
num_nodes: When doing remote execution, the number of nodes to use. Only supported on https://lightning.ai/.
machine: When doing remote execution, the machine to use. Only supported on https://lightning.ai/.
num_downloaders: The number of downloaders per worker.
num_uploaders: The numbers of uploaders per worker.
reorder_files: By default, reorders the files by file size to distribute work equally among all workers.
Set this to ``False`` if the order in which samples are processed should be preserved.
batch_size: Group the inputs into batches of batch_size length.
"""
if isinstance(inputs, StreamingDataLoader) and batch_size is not None:
raise ValueError("When providing a streaming dataloader, pass the batch_size to the dataloader directly.")
if isinstance(inputs, StreamingDataLoader) and weights is not None:
raise ValueError("When providing a streaming dataloader, weights isn't supported.")
if not isinstance(inputs, (Sequence, StreamingDataLoader)):
raise ValueError(f"The provided inputs should be non empty sequence or a streaming dataloader. Found {inputs}.")
if len(inputs) == 0:
raise ValueError(f"The provided inputs should be non empty. Found {inputs}.")
if chunk_size is None and chunk_bytes is None:
raise ValueError("Either `chunk_size` or `chunk_bytes` needs to be defined.")
if not _IS_IN_STUDIO and (machine is not None or num_nodes is not None):
raise ValueError(
"Only https://lightning.ai/ supports multiple nodes or selecting a machine."
"Create an account to try it out."
)
if not _IS_IN_STUDIO:
print(
"Create an account on https://lightning.ai/ to optimize your data faster "
"using multiple nodes and large machines."
)
if num_nodes is None or int(os.getenv("DATA_OPTIMIZER_NUM_NODES", 0)) > 0:
_output_dir: Dir = _resolve_dir(output_dir)
if _output_dir.url is not None and "cloudspaces" in _output_dir.url:
raise ValueError(
f"The provided `output_dir` isn't valid. Found {_output_dir.path}."
" HINT: You can either use `/teamspace/s3_connections/...` or `/teamspace/datasets/...`."
)
_assert_dir_has_index_file(_output_dir)
if not isinstance(inputs, StreamingDataLoader):
input_dir = _resolve_dir(_get_input_dir(inputs))
if isinstance(batch_size, int) and batch_size > 1:
inputs = [inputs[pos : pos + batch_size] for pos in range(0, len(inputs), batch_size)]
else:
input_dir = Dir()
data_processor = DataProcessor(
input_dir=input_dir,
output_dir=_output_dir,
num_workers=num_workers or _get_default_num_workers(),
fast_dev_run=fast_dev_run,
num_downloaders=num_downloaders,
num_uploaders=num_uploaders,
reorder_files=reorder_files,
reader=reader,
)
with optimize_dns_context(True):
data_processor.run(
LambdaDataChunkRecipe(
fn,
inputs,
chunk_size=chunk_size,
chunk_bytes=chunk_bytes,
compression=compression,
)
)
return None
return _execute(
f"data-prep-optimize-{datetime.now().strftime('%Y-%m-%d_%H-%M-%S')}",
num_nodes,
machine,
)
def _listdir(folder: str) -> Tuple[str, List[str]]:
return folder, os.listdir(folder)
class walk:
"""This class is an optimized version of os.walk for listing files and folders from cloud filesystem.
Note: The order of files and folders yielded aren't depth-first anymore due to the asynchronous listing call.
"""
def __init__(self, folder: str, max_workers: Optional[int] = os.cpu_count()) -> None:
self.folders = [folder]
self.max_workers = max_workers or 1
self.futures: List[concurrent.futures.Future] = []
if not _IS_IN_STUDIO:
print("This method is optimized to run on https://lightning.ai/. Don't use it otherwise.")
def __iter__(self) -> Any:
"""This function queues the folders to perform listdir across multiple workers."""
with concurrent.futures.ThreadPoolExecutor(max_workers=self.max_workers) as executor:
while len(self.folders):
folder = self.folders.pop(0)
future = executor.submit(_listdir, folder)
self.futures.append(future)
while self.futures:
for future in concurrent.futures.as_completed(self.futures):
filenames = []
folders = []
folder, files_or_folders = future.result()
self.futures = [f for f in self.futures if f != future]
for file_or_folder in files_or_folders:
if os.path.isfile(os.path.join(folder, file_or_folder)):
filenames.append(file_or_folder)
else:
folders.append(file_or_folder)
self.folders.append(os.path.join(folder, file_or_folder))
yield folder, folders, filenames
while len(self.folders) and len(self.futures) <= self.max_workers * 2:
folder = self.folders.pop(0)
future = executor.submit(_listdir, folder)
self.futures.append(future)
return
|
evocodebench_data_155
|
import concurrent
import json
import logging
import os
import random
import shutil
import signal
import tempfile
import traceback
import types
from abc import abstractmethod
from dataclasses import dataclass
from multiprocessing import Process, Queue
from pathlib import Path
from queue import Empty
from time import sleep, time
from typing import Any, Dict, List, Optional, Tuple, TypeVar, Union
from urllib import parse
import numpy as np
import torch
from tqdm.auto import tqdm as _tqdm
from litdata.constants import (
_BOTO3_AVAILABLE,
_DEFAULT_FAST_DEV_RUN_ITEMS,
_INDEX_FILENAME,
_IS_IN_STUDIO,
_LIGHTNING_CLOUD_LATEST,
_TORCH_GREATER_EQUAL_2_1_0,
)
from litdata.processing.readers import BaseReader, StreamingDataLoaderReader
from litdata.processing.utilities import _create_dataset
from litdata.streaming import Cache
from litdata.streaming.cache import Dir
from litdata.streaming.client import S3Client
from litdata.streaming.dataloader import StreamingDataLoader
from litdata.streaming.resolver import _resolve_dir
from litdata.utilities.broadcast import broadcast_object
from litdata.utilities.packing import _pack_greedily
if _TORCH_GREATER_EQUAL_2_1_0:
from torch.utils._pytree import tree_flatten, tree_unflatten, treespec_loads
if _LIGHTNING_CLOUD_LATEST:
from lightning_cloud.openapi import V1DatasetType
if _BOTO3_AVAILABLE:
import botocore
logger = logging.Logger(__name__)
def _get_num_nodes() -> int:
"""Returns the number of nodes."""
return int(os.getenv("DATA_OPTIMIZER_NUM_NODES", 1))
def _get_node_rank() -> int:
"""Returns the current node rank of the instance."""
return int(os.getenv("DATA_OPTIMIZER_NODE_RANK", 0))
def _get_fast_dev_run() -> int:
"""Returns whether fast dev mode is enabled."""
return bool(int(os.getenv("DATA_OPTIMIZER_FAST_DEV_RUN", 1)))
def _get_default_cache() -> str:
return "/cache" if _IS_IN_STUDIO else tempfile.gettempdir()
def _get_cache_dir(name: Optional[str] = None) -> str:
"""Returns the cache directory used by the Cache to store the chunks."""
cache_dir = os.getenv("DATA_OPTIMIZER_CACHE_FOLDER", f"{_get_default_cache()}/chunks")
if name is None:
return cache_dir
return os.path.join(cache_dir, name.lstrip("/"))
def _get_cache_data_dir(name: Optional[str] = None) -> str:
"""Returns the cache data directory used by the DataProcessor workers to download the files."""
cache_dir = os.getenv("DATA_OPTIMIZER_DATA_CACHE_FOLDER", f"{_get_default_cache()}/data")
if name is None:
return os.path.join(cache_dir)
return os.path.join(cache_dir, name.lstrip("/"))
def _wait_for_file_to_exist(s3: S3Client, obj: parse.ParseResult, sleep_time: int = 2) -> Any:
"""This function check."""
while True:
try:
return s3.client.head_object(Bucket=obj.netloc, Key=obj.path.lstrip("/"))
except botocore.exceptions.ClientError as e:
if "the HeadObject operation: Not Found" in str(e):
sleep(sleep_time)
else:
raise e
def _wait_for_disk_usage_higher_than_threshold(input_dir: str, threshold_in_gb: int = 25, sleep_time: int = 3) -> None:
usage = shutil.disk_usage(input_dir)
while (usage.free / 1000 / 1000 / 1000) <= threshold_in_gb:
sleep(sleep_time)
usage = shutil.disk_usage(input_dir)
return
def _download_data_target(input_dir: Dir, cache_dir: str, queue_in: Queue, queue_out: Queue) -> None:
"""This function is used to download data from a remote directory to a cache directory to optimise reading."""
s3 = S3Client()
while True:
# 2. Fetch from the queue
r: Optional[Tuple[int, List[str]]] = queue_in.get()
# 3. Terminate the process if we received a termination signal
if r is None:
queue_out.put(None)
return
# 4. Unpack
index, paths = r
# 5. Check whether all the files are already downloaded
if input_dir.path and all(
os.path.exists(p.replace(input_dir.path, cache_dir) if input_dir else p) for p in paths
):
queue_out.put(index)
continue
if input_dir.url is not None or input_dir.path is not None:
if input_dir.url:
# 6. Wait for the removers to catch up when we are downloading data.
_wait_for_disk_usage_higher_than_threshold("/", 25)
# 7. Download all the required paths to unblock the current index
for path in paths:
if input_dir.path:
local_path = path.replace(input_dir.path, cache_dir)
if input_dir.url and input_dir.path:
path = path.replace(input_dir.path, input_dir.url)
obj = parse.urlparse(path)
if obj.scheme == "s3":
dirpath = os.path.dirname(local_path)
os.makedirs(dirpath, exist_ok=True)
with open(local_path, "wb") as f:
s3.client.download_fileobj(obj.netloc, obj.path.lstrip("/"), f)
elif os.path.isfile(path):
if not path.startswith("/teamspace/studios/this_studio"):
os.makedirs(os.path.dirname(local_path), exist_ok=True)
shutil.copyfile(path, local_path)
else:
raise ValueError(f"The provided {input_dir.url} isn't supported.")
# 7. Inform the worker the current files are available
queue_out.put(index)
def _remove_target(input_dir: Dir, cache_dir: str, queue_in: Queue) -> None:
"""This function is used to delete files from the cache directory to minimise disk space."""
while True:
# 1. Collect paths
paths = queue_in.get()
# 2. Terminate the process if we received a termination signal
if paths is None:
return
# 3. Iterate through the paths and delete them sequentially.
for path in paths:
if input_dir:
if not path.startswith(cache_dir) and input_dir.path is not None:
path = path.replace(input_dir.path, cache_dir)
if os.path.exists(path):
os.remove(path)
elif os.path.exists(path) and "s3_connections" not in path:
os.remove(path)
def _upload_fn(upload_queue: Queue, remove_queue: Queue, cache_dir: str, output_dir: Dir) -> None:
"""This function is used to upload optimised chunks from a local to remote dataset directory."""
obj = parse.urlparse(output_dir.url if output_dir.url else output_dir.path)
if obj.scheme == "s3":
s3 = S3Client()
while True:
data: Optional[Union[str, Tuple[str, str]]] = upload_queue.get()
tmpdir = None
if isinstance(data, str) or data is None:
local_filepath = data
else:
tmpdir, local_filepath = data
# Terminate the process if we received a termination signal
if local_filepath is None:
return
# Upload the file to the target cloud storage
if not local_filepath.startswith(cache_dir):
local_filepath = os.path.join(cache_dir, local_filepath)
if obj.scheme == "s3":
try:
if tmpdir is None:
output_filepath = os.path.join(str(obj.path).lstrip("/"), os.path.basename(local_filepath))
else:
output_filepath = os.path.join(str(obj.path).lstrip("/"), local_filepath.replace(tmpdir, "")[1:])
s3.client.upload_file(
local_filepath,
obj.netloc,
output_filepath,
)
except Exception as e:
print(e)
elif output_dir.path:
if tmpdir is None:
output_filepath = os.path.join(output_dir.path, os.path.basename(local_filepath))
else:
output_filepath = os.path.join(output_dir.path, local_filepath.replace(tmpdir, "")[1:])
os.makedirs(os.path.dirname(output_filepath), exist_ok=True)
shutil.move(local_filepath, output_filepath)
else:
raise ValueError(f"The provided {output_dir.path} isn't supported.")
# Inform the remover to delete the file
if remove_queue and os.path.exists(local_filepath):
remove_queue.put([local_filepath])
def _map_items_to_workers_sequentially(num_workers: int, user_items: List[Any]) -> List[List[Any]]:
from typing import List, Any
import os
total_nodes = _get_num_nodes()
node_rank = _get_node_rank()
total_workers = total_nodes * num_workers
items_per_worker = len(user_items) // total_workers
extra_items = len(user_items) % total_workers
start = 0
result = []
for i in range(total_workers):
worker_items = items_per_worker + 1 if i < extra_items else items_per_worker
end = start + worker_items
result.append(user_items[start:end])
start = end
if len(result) != num_workers:
raise RuntimeError("Improper assignment of items to workers")
return result
def _map_items_to_workers_weighted(
num_workers: int,
user_items: List[Any],
weights: Optional[List[int]] = None,
file_size: bool = True,
) -> List[List[Any]]:
# Associate the items to the workers based on number of nodes and node rank.
weights = [1] * len(user_items) if weights is None else weights
num_nodes = _get_num_nodes()
node_rank = _get_node_rank()
world_size = num_nodes * num_workers
worker_items, worker_weights = _pack_greedily(items=user_items, weights=weights, num_bins=world_size)
worker_ids_this_node = range(node_rank * num_workers, (node_rank + 1) * num_workers)
for worker_id, size in worker_weights.items():
if worker_id not in worker_ids_this_node:
continue
if file_size:
print(f"Worker {worker_id} gets {size / 1e6:.1f} MB ({len(worker_items[worker_id])} files)")
else:
print(f"Worker {worker_id} gets ({len(worker_items[worker_id])}) items for a total weight of {size}.")
return [np.random.permutation(worker_items[worker_id]).tolist() for worker_id in worker_ids_this_node]
def _get_num_bytes(item: Any, base_path: str) -> int:
flattened_item, _ = tree_flatten(item)
num_bytes = 0
for element in flattened_item:
if isinstance(element, str):
element = Path(element).resolve()
if not element.exists():
continue
file_bytes = os.path.getsize(element)
if file_bytes == 0:
raise RuntimeError(f"The file {element} has 0 bytes!")
num_bytes += file_bytes
return num_bytes
def _get_item_filesizes(items: List[Any], base_path: str = "") -> List[int]:
"""Computes the total size in bytes of all file paths for every datastructure in the given list."""
item_sizes = []
cpu_count = os.cpu_count() or 1
# Parallelize to accelerate retrieving the number of file bytes to read for each item
with concurrent.futures.ThreadPoolExecutor(max_workers=cpu_count * 2 if cpu_count > 4 else cpu_count) as executor:
futures = [executor.submit(_get_num_bytes, item, base_path) for item in items]
for future in futures:
item_sizes.append(future.result())
return item_sizes
def _to_path(element: str) -> str:
return element if _IS_IN_STUDIO and element.startswith("/teamspace") else str(Path(element).resolve())
def _is_path(input_dir: Optional[str], element: Any) -> bool:
if not isinstance(element, str):
return False
if _IS_IN_STUDIO and input_dir is not None:
if element.startswith(input_dir):
return True
element = str(Path(element).absolute())
if element.startswith(input_dir):
return True
return os.path.exists(element)
class BaseWorker:
def __init__(
self,
worker_index: int,
num_workers: int,
node_rank: int,
data_recipe: "DataRecipe",
input_dir: Dir,
output_dir: Dir,
items: List[Any],
progress_queue: Queue,
error_queue: Queue,
stop_queue: Queue,
num_downloaders: int,
num_uploaders: int,
remove: bool,
reader: Optional[BaseReader] = None,
) -> None:
"""The BaseWorker is responsible to process the user data."""
self.worker_index = worker_index
self.num_workers = num_workers
self.node_rank = node_rank
self.data_recipe = data_recipe
self.input_dir = input_dir
self.output_dir = output_dir
self.items = items
self.num_items = len(self.items)
self.num_downloaders = num_downloaders
self.num_uploaders = num_uploaders
self.remove = remove
self.reader = reader
self.paths: List[List[str]] = []
self.remover: Optional[Process] = None
self.downloaders: List[Process] = []
self.uploaders: List[Process] = []
self.to_download_queues: List[Queue] = []
self.to_upload_queues: List[Queue] = []
self.stop_queue = stop_queue
self.ready_to_process_queue: Queue = Queue()
self.remove_queue: Queue = Queue()
self.progress_queue: Queue = progress_queue
self.error_queue: Queue = error_queue
self._counter = 0
self._last_time = time()
self._index_counter = 0
def run(self) -> None:
try:
self._setup()
self._loop()
except Exception:
traceback_format = traceback.format_exc()
print(traceback_format)
self.error_queue.put(traceback_format)
print(f"Worker {str(_get_node_rank() * self.num_workers + self.worker_index)} is done.")
def _setup(self) -> None:
self._set_environ_variables()
self._create_cache()
self._collect_paths()
self._start_downloaders()
self._start_uploaders()
self._start_remover()
def _loop(self) -> None:
num_downloader_finished = 0
while True:
index = self.ready_to_process_queue.get()
if index is None:
num_downloader_finished += 1
if num_downloader_finished == self.num_downloaders:
print(f"Worker {str(_get_node_rank() * self.num_workers + self.worker_index)} is terminating.")
if isinstance(self.data_recipe, DataChunkRecipe):
self._handle_data_chunk_recipe_end()
if self.output_dir.url if self.output_dir.url else self.output_dir.path:
# Inform the uploaders they are doing working
for i in range(self.num_uploaders):
self.to_upload_queues[i].put(None)
# Wait for them all to be finished
for uploader in self.uploaders:
uploader.join()
if self.remove:
assert self.remover
self.remove_queue.put(None)
self.remover.join()
if self.progress_queue:
self.progress_queue.put((self.worker_index, self._counter))
return
continue
if isinstance(self.data_recipe, DataChunkRecipe):
self._handle_data_chunk_recipe(index)
else:
self._handle_data_transform_recipe(index)
self._counter += 1
# Don't send the last progress update, so the main thread awaits for the uploader and remover
if self.progress_queue and (time() - self._last_time) > 1 and self._counter < (self.num_items - 2):
self.progress_queue.put((self.worker_index, self._counter))
self._last_time = time()
if self.remove and self.input_dir.path is not None and self.reader is None:
self.remove_queue.put(self.paths[index])
try:
self.stop_queue.get(timeout=0.0001)
return
except Empty:
pass
def _set_environ_variables(self) -> None:
# set the optimizer global rank and world_size
os.environ["DATA_OPTIMIZER_GLOBAL_RANK"] = str(_get_node_rank() * self.num_workers + self.worker_index)
os.environ["DATA_OPTIMIZER_NUM_WORKERS"] = str(self.num_workers)
def _create_cache(self) -> None:
self.cache_data_dir = _get_cache_data_dir()
os.makedirs(self.cache_data_dir, exist_ok=True)
self.cache_chunks_dir = _get_cache_dir()
os.makedirs(self.cache_chunks_dir, exist_ok=True)
if isinstance(self.data_recipe, DataTransformRecipe):
return
self.cache = Cache(
self.cache_chunks_dir,
chunk_bytes=self.data_recipe.chunk_bytes,
chunk_size=self.data_recipe.chunk_size,
compression=self.data_recipe.compression,
)
self.cache._reader._rank = _get_node_rank() * self.num_workers + self.worker_index
def _try_upload(self, data: Optional[Union[str, Tuple[str, str]]]) -> None:
if not data or (self.output_dir.url if self.output_dir.url else self.output_dir.path) is None:
return
if isinstance(data, str):
assert os.path.exists(data), data
else:
assert os.path.exists(data[-1]), data
self.to_upload_queues[self._counter % self.num_uploaders].put(data)
def _collect_paths(self) -> None:
if self.input_dir.path is None or self.reader is not None:
for index in range(len(self.items)):
self.ready_to_process_queue.put(index)
for _ in range(self.num_downloaders):
self.ready_to_process_queue.put(None)
return
items = []
for item in self.items:
flattened_item, spec = tree_flatten(item)
# For speed reasons, we assume starting with `self.input_dir` is enough to be a real file.
# Other alternative would be too slow.
# TODO: Try using dictionary for higher accurary.
indexed_paths = {
index: _to_path(element)
for index, element in enumerate(flattened_item)
if _is_path(self.input_dir.path, element)
}
if len(indexed_paths) == 0:
raise ValueError(
f"The provided item {item} didn't contain any filepaths. The input_dir is {self.input_dir.path}."
)
paths = []
for index, path in indexed_paths.items():
paths.append(path)
if self.input_dir and not self.input_dir.path.startswith("/teamspace/studios/this_studio"):
path = path.replace(self.input_dir.path, self.cache_data_dir)
flattened_item[index] = path
self.paths.append(paths)
items.append(tree_unflatten(flattened_item, spec))
self.items = items
def _start_downloaders(self) -> None:
if self.input_dir.path is None or self.reader is not None:
return
for _ in range(self.num_downloaders):
to_download_queue: Queue = Queue()
p = Process(
target=_download_data_target,
args=(
self.input_dir,
self.cache_data_dir,
to_download_queue,
self.ready_to_process_queue,
),
)
p.start()
self.downloaders.append(p)
self.to_download_queues.append(to_download_queue)
for index, paths in enumerate(self.paths):
self.to_download_queues[index % self.num_downloaders].put((index, paths))
for downloader_index in range(self.num_downloaders):
self.to_download_queues[downloader_index].put(None)
def _start_remover(self) -> None:
if not self.remove:
return
self.remover = Process(
target=_remove_target,
args=(
self.input_dir,
self.cache_data_dir,
self.remove_queue,
),
)
self.remover.start()
def _start_uploaders(self) -> None:
if self.output_dir.path is None and self.output_dir.url is None:
return
for _ in range(self.num_uploaders):
to_upload_queue: Queue = Queue()
p = Process(
target=_upload_fn,
args=(
to_upload_queue,
self.remove_queue,
self.cache_chunks_dir,
self.output_dir,
),
)
p.start()
self.uploaders.append(p)
self.to_upload_queues.append(to_upload_queue)
def _handle_data_chunk_recipe(self, index: int) -> None:
try:
current_item = self.items[index] if self.reader is None else self.reader.read(self.items[index])
item_data_or_generator = self.data_recipe.prepare_item(current_item)
if isinstance(item_data_or_generator, types.GeneratorType):
for item_data in item_data_or_generator:
if item_data is not None:
chunk_filepath = self.cache._add_item(self._index_counter, item_data)
self._try_upload(chunk_filepath)
self._index_counter += 1
elif item_data_or_generator is not None:
chunk_filepath = self.cache._add_item(self._index_counter, item_data_or_generator)
self._try_upload(chunk_filepath)
self._index_counter += 1
except Exception as e:
raise RuntimeError(f"Failed processing {self.items[index]}") from e
def _handle_data_chunk_recipe_end(self) -> None:
chunks_filepaths = self.cache.done()
if chunks_filepaths and len(self.to_upload_queues):
for i, chunk_filepath in enumerate(chunks_filepaths):
if isinstance(chunk_filepath, str) and os.path.exists(chunk_filepath):
self.to_upload_queues[i % self.num_uploaders].put(chunk_filepath)
def _handle_data_transform_recipe(self, index: int) -> None:
# Don't use a context manager to avoid deleting files that are being uploaded.
output_dir = tempfile.mkdtemp()
item = self.items[index] if self.reader is None else self.reader.read(self.items[index])
item_data = self.data_recipe.prepare_item(item, str(output_dir), len(self.items) - 1 == index)
if item_data is not None:
raise ValueError(
"When using a `DataTransformRecipe`, the `prepare_item` shouldn't return anything."
" Simply store your files under the output_dir."
)
filepaths = []
for directory, _, filenames in os.walk(output_dir):
for filename in filenames:
filepaths.append(os.path.join(directory, filename))
for filepath in filepaths:
self._try_upload((output_dir, filepath))
class DataWorkerProcess(BaseWorker, Process):
def __init__(self, *args: Any, **kwargs: Any) -> None:
"""The DataWorkerProcess is responsible to process the user data inside processes."""
BaseWorker.__init__(self, *args, **kwargs)
Process.__init__(self)
@dataclass
class _Result:
size: Optional[int] = None
num_bytes: Optional[str] = None
data_format: Optional[str] = None
compression: Optional[str] = None
num_chunks: Optional[int] = None
num_bytes_per_chunk: Optional[List[int]] = None
T = TypeVar("T")
class DataRecipe:
@abstractmethod
def prepare_structure(self, input_dir: Optional[str]) -> List[T]:
pass
@abstractmethod
def prepare_item(self, *args: Any, **kwargs: Any) -> Any:
pass
def __init__(self) -> None:
self._name: Optional[str] = None
def _done(self, size: int, delete_cached_files: bool, output_dir: Dir) -> _Result:
return _Result(size=size)
class DataChunkRecipe(DataRecipe):
def __init__(
self,
chunk_size: Optional[int] = None,
chunk_bytes: Optional[Union[int, str]] = None,
compression: Optional[str] = None,
):
super().__init__()
if chunk_size is not None and chunk_bytes is not None:
raise ValueError("Either one of the `chunk_size` or the `chunk_bytes` need to be provided.")
self.chunk_size = chunk_size
self.chunk_bytes = 1 << 26 if chunk_size is None else chunk_bytes
self.compression = compression
@abstractmethod
def prepare_structure(self, input_dir: Optional[str]) -> List[T]:
"""Return the structure of your data.
Each element should contain at least a filepath.
"""
@abstractmethod
def prepare_item(self, item_metadata: T) -> Any:
"""The return of this `prepare_item` method is persisted in chunked binary files."""
def _done(self, size: int, delete_cached_files: bool, output_dir: Dir) -> _Result:
num_nodes = _get_num_nodes()
cache_dir = _get_cache_dir()
chunks = [file for file in os.listdir(cache_dir) if file.endswith(".bin")]
if chunks and delete_cached_files and output_dir.path is not None:
raise RuntimeError(f"All the chunks should have been deleted. Found {chunks}")
merge_cache = Cache(cache_dir, chunk_bytes=1)
node_rank = _get_node_rank()
merge_cache._merge_no_wait(node_rank if num_nodes > 1 else None)
self._upload_index(output_dir, cache_dir, num_nodes, node_rank)
if num_nodes == node_rank + 1:
with open(os.path.join(cache_dir, _INDEX_FILENAME)) as f:
config = json.load(f)
size = sum([c["dim"] if c["dim"] is not None else c["chunk_size"] for c in config["chunks"]])
num_bytes = sum([c["chunk_bytes"] for c in config["chunks"]])
if config["config"] is not None:
data_format = tree_unflatten(
config["config"]["data_format"], treespec_loads(config["config"]["data_spec"])
)
else:
data_format = None
num_chunks = len(config["chunks"])
# The platform can't store more than 1024 entries.
# Note: This isn't really used right now, so it is fine to skip if too big.
num_bytes_per_chunk = [c["chunk_size"] for c in config["chunks"]] if num_chunks < 1024 else []
return _Result(
size=size,
num_bytes=num_bytes,
data_format=data_format,
compression=config["config"]["compression"] if config["config"] else None,
num_chunks=len(config["chunks"]),
num_bytes_per_chunk=num_bytes_per_chunk,
)
return _Result(
size=size,
)
def _upload_index(self, output_dir: Dir, cache_dir: str, num_nodes: int, node_rank: Optional[int]) -> None:
"""This method upload the index file to the remote cloud directory."""
if output_dir.path is None and output_dir.url is None:
return
obj = parse.urlparse(output_dir.url if output_dir.url else output_dir.path)
if num_nodes > 1:
local_filepath = os.path.join(cache_dir, f"{node_rank}-{_INDEX_FILENAME}")
else:
local_filepath = os.path.join(cache_dir, _INDEX_FILENAME)
if obj.scheme == "s3":
s3 = S3Client()
s3.client.upload_file(
local_filepath, obj.netloc, os.path.join(str(obj.path).lstrip("/"), os.path.basename(local_filepath))
)
elif output_dir.path and os.path.isdir(output_dir.path):
shutil.copyfile(local_filepath, os.path.join(output_dir.path, os.path.basename(local_filepath)))
if num_nodes == 1 or node_rank is None:
return
# Merge the index files generated by each node.
# Note: When using the Data Optimizer, they should be a single process on each node executing this section
# So no risk to get race conditon.
if num_nodes == node_rank + 1:
# Get the index file locally
for node_rank in range(num_nodes - 1):
output_dir_path = output_dir.url if output_dir.url else output_dir.path
assert output_dir_path
remote_filepath = os.path.join(output_dir_path, f"{node_rank}-{_INDEX_FILENAME}")
node_index_filepath = os.path.join(cache_dir, os.path.basename(remote_filepath))
if obj.scheme == "s3":
obj = parse.urlparse(remote_filepath)
_wait_for_file_to_exist(s3, obj)
with open(node_index_filepath, "wb") as f:
s3.client.download_fileobj(obj.netloc, obj.path.lstrip("/"), f)
elif output_dir.path and os.path.isdir(output_dir.path):
shutil.copyfile(remote_filepath, node_index_filepath)
merge_cache = Cache(cache_dir, chunk_bytes=1)
merge_cache._merge_no_wait()
self._upload_index(output_dir, cache_dir, 1, None)
class DataTransformRecipe(DataRecipe):
@abstractmethod
def prepare_structure(self, input_dir: Optional[str]) -> List[T]:
"""Return the structure of your data.
Each element should contain at least a filepath.
"""
@abstractmethod
def prepare_item(self, item_metadata: T, output_dir: str, is_last: bool) -> None:
"""Use your item metadata to process your files and save the file outputs into `output_dir`."""
class DataProcessor:
def __init__(
self,
input_dir: Union[str, Dir],
output_dir: Optional[Union[str, Dir]] = None,
num_workers: Optional[int] = None,
num_downloaders: Optional[int] = None,
num_uploaders: Optional[int] = None,
delete_cached_files: bool = True,
fast_dev_run: Optional[Union[bool, int]] = None,
random_seed: Optional[int] = 42,
reorder_files: bool = True,
weights: Optional[List[int]] = None,
reader: Optional[BaseReader] = None,
):
"""The `DatasetOptimiser` provides an efficient way to process data across multiple machine into chunks to make
training faster.
Arguments:
input_dir: The path to where the input data are stored.
output_dir: The path to where the output data are stored.
num_workers: The number of worker threads to use.
num_downloaders: The number of file downloaders to use.
num_uploaders: The number of file uploaders to use.
delete_cached_files: Whether to delete the cached files.
fast_dev_run: Whether to run a quick dev run.
random_seed: The random seed to be set before shuffling the data.
reorder_files: By default, reorders the files by file size to distribute work equally among all workers.
Set this to ``False`` if the order in which samples are processed should be preserved.
weights: Provide a list of weights associated to the inputs.
This is used to evenly split the work among the workers.
reader: Map the inputs to worker inputs and provides a read method to read a slice of the data.
"""
self.input_dir = _resolve_dir(input_dir)
self.output_dir = _resolve_dir(output_dir)
self.num_workers = num_workers or (1 if fast_dev_run else (os.cpu_count() or 1) * 4)
self.num_downloaders = num_downloaders or 2
self.num_uploaders = num_uploaders or 5
self.delete_cached_files = delete_cached_files
self.fast_dev_run = _get_fast_dev_run() if fast_dev_run is None else fast_dev_run
self.workers: Any = []
self.workers_tracker: Dict[int, int] = {}
self.progress_queue: Optional[Queue] = None
self.error_queue: Queue = Queue()
self.stop_queues: List[Queue] = []
self.reorder_files = reorder_files
self.weights = weights
self.reader = reader
if self.reader is not None and self.weights is not None:
raise ValueError("Either the reader or the weights needs to be defined.")
# Ensure the input dir is the same across all nodes
self.input_dir = broadcast_object("input_dir", self.input_dir)
if self.output_dir:
# Ensure the output dir is the same across all nodes
self.output_dir = broadcast_object("output_dir", self.output_dir)
print(f"Storing the files under {self.output_dir.path}")
self.random_seed = random_seed
def run(self, data_recipe: DataRecipe) -> None:
"""The `DataProcessor.run(...)` method triggers the data recipe processing over your dataset."""
if not isinstance(data_recipe, DataRecipe):
raise ValueError("The provided value should be a data recipe.")
t0 = time()
print(f"Setup started with fast_dev_run={self.fast_dev_run}.")
# Force random seed to be fixed
random.seed(self.random_seed)
np.random.seed(self.random_seed)
torch.manual_seed(self.random_seed)
# Call the setup method of the user
user_items: List[Any] = data_recipe.prepare_structure(self.input_dir.path if self.input_dir else None)
if not isinstance(user_items, (list, StreamingDataLoader)):
raise ValueError("The `prepare_structure` should return a list of item metadata.")
if isinstance(user_items, StreamingDataLoader):
self.reader = StreamingDataLoaderReader(user_items)
if self.reader:
user_items = self.reader.remap_items(user_items, self.num_workers)
if self.weights is not None:
if len(self.weights) != len(user_items):
raise ValueError("The provided weights length should match the inputs' length.")
workers_user_items = _map_items_to_workers_weighted(
num_workers=self.num_workers, user_items=user_items, weights=self.weights, file_size=False
)
elif self.reorder_files and self.input_dir.path:
# TODO: Only do this on node 0, and broadcast the item sizes to the other nodes.
item_sizes = _get_item_filesizes(user_items, base_path=self.input_dir.path)
workers_user_items = _map_items_to_workers_weighted(
num_workers=self.num_workers, user_items=user_items, weights=item_sizes
)
else:
workers_user_items = _map_items_to_workers_sequentially(num_workers=self.num_workers, user_items=user_items)
print(f"Setup finished in {round(time() - t0, 3)} seconds. Found {len(user_items)} items to process.")
if self.fast_dev_run:
items_to_keep = self.fast_dev_run if type(self.fast_dev_run) is int else _DEFAULT_FAST_DEV_RUN_ITEMS
workers_user_items = [w[:items_to_keep] for w in workers_user_items]
print(f"Fast dev run is enabled. Limiting to {items_to_keep} items per process.")
num_items = sum([len(items) for items in workers_user_items])
self._cleanup_cache()
print(f"Starting {self.num_workers} workers with {num_items} items.")
if self.input_dir is None and self.src_resolver is not None and self.input_dir:
self.input_dir = self.src_resolver(self.input_dir)
print(f"The remote_dir is `{self.input_dir}`.")
signal.signal(signal.SIGINT, self._signal_handler)
self._create_process_workers(data_recipe, workers_user_items)
print("Workers are ready ! Starting data processing...")
current_total = 0
has_failed = False
pbar = _tqdm(
desc="Progress",
total=num_items,
smoothing=0,
position=-1,
mininterval=1,
leave=True,
dynamic_ncols=True,
)
while True:
try:
error = self.error_queue.get(timeout=0.001)
self._exit_on_error(error)
except Empty:
assert self.progress_queue
try:
index, counter = self.progress_queue.get(timeout=0.001)
except Empty:
continue
self.workers_tracker[index] = counter
new_total = sum(self.workers_tracker.values())
pbar.update(new_total - current_total)
current_total = new_total
if current_total == num_items:
break
# Exit early if all the workers are done.
# This means there were some kinda of errors.
if all(not w.is_alive() for w in self.workers):
has_failed = True
break
pbar.close()
num_nodes = _get_num_nodes()
node_rank = _get_node_rank()
# TODO: Understand why it hangs.
if num_nodes == 1:
for w in self.workers:
w.join(0)
print("Workers are finished.")
result = data_recipe._done(len(user_items), self.delete_cached_files, self.output_dir)
if num_nodes == node_rank + 1 and self.output_dir.url and _IS_IN_STUDIO:
assert self.output_dir.path
_create_dataset(
input_dir=self.input_dir.path,
storage_dir=self.output_dir.path,
dataset_type=V1DatasetType.CHUNKED
if isinstance(data_recipe, DataChunkRecipe)
else V1DatasetType.TRANSFORMED,
empty=False,
size=result.size,
num_bytes=result.num_bytes,
data_format=result.data_format,
compression=result.compression,
num_chunks=result.num_chunks,
num_bytes_per_chunk=result.num_bytes_per_chunk,
)
print("Finished data processing!")
# TODO: Understand why it is required to avoid long shutdown.
if _get_num_nodes() > 1:
os._exit(int(has_failed))
def _exit_on_error(self, error: str) -> None:
for w in self.workers:
w.join(0)
raise RuntimeError(f"We found the following error {error}.")
def _create_process_workers(self, data_recipe: DataRecipe, workers_user_items: List[List[Any]]) -> None:
self.progress_queue = Queue()
workers: List[DataWorkerProcess] = []
stop_queues: List[Queue] = []
for worker_idx, worker_user_items in enumerate(workers_user_items):
stop_queues.append(Queue())
worker = DataWorkerProcess(
worker_idx,
self.num_workers,
_get_node_rank(),
data_recipe,
self.input_dir,
self.output_dir,
worker_user_items,
self.progress_queue,
self.error_queue,
stop_queues[-1],
self.num_downloaders,
self.num_uploaders,
self.delete_cached_files,
self.reader,
)
worker.start()
workers.append(worker)
# Note: Don't store within the loop as weakref aren't serializable
self.workers = workers
self.stop_queues = stop_queues
def _signal_handler(self, signal: Any, frame: Any) -> None:
"""On temrination, we stop all the processes to avoid leaking RAM."""
for stop_queue in self.stop_queues:
stop_queue.put(None)
for w in self.workers:
w.join(0)
os._exit(0)
def _cleanup_cache(self) -> None:
cache_dir = _get_cache_dir()
# Cleanup the cache dir folder to avoid corrupted files from previous run to be there.
if os.path.exists(cache_dir):
shutil.rmtree(cache_dir, ignore_errors=True)
os.makedirs(cache_dir, exist_ok=True)
cache_data_dir = _get_cache_data_dir()
# Cleanup the cache data folder to avoid corrupted files from previous run to be there.
if os.path.exists(cache_data_dir):
shutil.rmtree(cache_data_dir, ignore_errors=True)
os.makedirs(cache_data_dir, exist_ok=True)
|
evocodebench_data_156
|
import concurrent
import json
import logging
import os
import random
import shutil
import signal
import tempfile
import traceback
import types
from abc import abstractmethod
from dataclasses import dataclass
from multiprocessing import Process, Queue
from pathlib import Path
from queue import Empty
from time import sleep, time
from typing import Any, Dict, List, Optional, Tuple, TypeVar, Union
from urllib import parse
import numpy as np
import torch
from tqdm.auto import tqdm as _tqdm
from litdata.constants import (
_BOTO3_AVAILABLE,
_DEFAULT_FAST_DEV_RUN_ITEMS,
_INDEX_FILENAME,
_IS_IN_STUDIO,
_LIGHTNING_CLOUD_LATEST,
_TORCH_GREATER_EQUAL_2_1_0,
)
from litdata.processing.readers import BaseReader, StreamingDataLoaderReader
from litdata.processing.utilities import _create_dataset
from litdata.streaming import Cache
from litdata.streaming.cache import Dir
from litdata.streaming.client import S3Client
from litdata.streaming.dataloader import StreamingDataLoader
from litdata.streaming.resolver import _resolve_dir
from litdata.utilities.broadcast import broadcast_object
from litdata.utilities.packing import _pack_greedily
if _TORCH_GREATER_EQUAL_2_1_0:
from torch.utils._pytree import tree_flatten, tree_unflatten, treespec_loads
if _LIGHTNING_CLOUD_LATEST:
from lightning_cloud.openapi import V1DatasetType
if _BOTO3_AVAILABLE:
import botocore
logger = logging.Logger(__name__)
def _get_num_nodes() -> int:
"""Returns the number of nodes."""
return int(os.getenv("DATA_OPTIMIZER_NUM_NODES", 1))
def _get_node_rank() -> int:
"""Returns the current node rank of the instance."""
return int(os.getenv("DATA_OPTIMIZER_NODE_RANK", 0))
def _get_fast_dev_run() -> int:
"""Returns whether fast dev mode is enabled."""
return bool(int(os.getenv("DATA_OPTIMIZER_FAST_DEV_RUN", 1)))
def _get_default_cache() -> str:
return "/cache" if _IS_IN_STUDIO else tempfile.gettempdir()
def _get_cache_dir(name: Optional[str] = None) -> str:
"""Returns the cache directory used by the Cache to store the chunks."""
cache_dir = os.getenv("DATA_OPTIMIZER_CACHE_FOLDER", f"{_get_default_cache()}/chunks")
if name is None:
return cache_dir
return os.path.join(cache_dir, name.lstrip("/"))
def _get_cache_data_dir(name: Optional[str] = None) -> str:
"""Returns the cache data directory used by the DataProcessor workers to download the files."""
cache_dir = os.getenv("DATA_OPTIMIZER_DATA_CACHE_FOLDER", f"{_get_default_cache()}/data")
if name is None:
return os.path.join(cache_dir)
return os.path.join(cache_dir, name.lstrip("/"))
def _wait_for_file_to_exist(s3: S3Client, obj: parse.ParseResult, sleep_time: int = 2) -> Any:
"""This function check."""
while True:
try:
return s3.client.head_object(Bucket=obj.netloc, Key=obj.path.lstrip("/"))
except botocore.exceptions.ClientError as e:
if "the HeadObject operation: Not Found" in str(e):
sleep(sleep_time)
else:
raise e
def _wait_for_disk_usage_higher_than_threshold(input_dir: str, threshold_in_gb: int = 25, sleep_time: int = 3) -> None:
usage = shutil.disk_usage(input_dir)
while (usage.free / 1000 / 1000 / 1000) <= threshold_in_gb:
sleep(sleep_time)
usage = shutil.disk_usage(input_dir)
return
def _download_data_target(input_dir: Dir, cache_dir: str, queue_in: Queue, queue_out: Queue) -> None:
"""This function is used to download data from a remote directory to a cache directory to optimise reading."""
s3 = S3Client()
while True:
# 2. Fetch from the queue
r: Optional[Tuple[int, List[str]]] = queue_in.get()
# 3. Terminate the process if we received a termination signal
if r is None:
queue_out.put(None)
return
# 4. Unpack
index, paths = r
# 5. Check whether all the files are already downloaded
if input_dir.path and all(
os.path.exists(p.replace(input_dir.path, cache_dir) if input_dir else p) for p in paths
):
queue_out.put(index)
continue
if input_dir.url is not None or input_dir.path is not None:
if input_dir.url:
# 6. Wait for the removers to catch up when we are downloading data.
_wait_for_disk_usage_higher_than_threshold("/", 25)
# 7. Download all the required paths to unblock the current index
for path in paths:
if input_dir.path:
local_path = path.replace(input_dir.path, cache_dir)
if input_dir.url and input_dir.path:
path = path.replace(input_dir.path, input_dir.url)
obj = parse.urlparse(path)
if obj.scheme == "s3":
dirpath = os.path.dirname(local_path)
os.makedirs(dirpath, exist_ok=True)
with open(local_path, "wb") as f:
s3.client.download_fileobj(obj.netloc, obj.path.lstrip("/"), f)
elif os.path.isfile(path):
if not path.startswith("/teamspace/studios/this_studio"):
os.makedirs(os.path.dirname(local_path), exist_ok=True)
shutil.copyfile(path, local_path)
else:
raise ValueError(f"The provided {input_dir.url} isn't supported.")
# 7. Inform the worker the current files are available
queue_out.put(index)
def _remove_target(input_dir: Dir, cache_dir: str, queue_in: Queue) -> None:
"""This function is used to delete files from the cache directory to minimise disk space."""
while True:
# 1. Collect paths
paths = queue_in.get()
# 2. Terminate the process if we received a termination signal
if paths is None:
return
# 3. Iterate through the paths and delete them sequentially.
for path in paths:
if input_dir:
if not path.startswith(cache_dir) and input_dir.path is not None:
path = path.replace(input_dir.path, cache_dir)
if os.path.exists(path):
os.remove(path)
elif os.path.exists(path) and "s3_connections" not in path:
os.remove(path)
def _upload_fn(upload_queue: Queue, remove_queue: Queue, cache_dir: str, output_dir: Dir) -> None:
"""This function is used to upload optimised chunks from a local to remote dataset directory."""
obj = parse.urlparse(output_dir.url if output_dir.url else output_dir.path)
if obj.scheme == "s3":
s3 = S3Client()
while True:
data: Optional[Union[str, Tuple[str, str]]] = upload_queue.get()
tmpdir = None
if isinstance(data, str) or data is None:
local_filepath = data
else:
tmpdir, local_filepath = data
# Terminate the process if we received a termination signal
if local_filepath is None:
return
# Upload the file to the target cloud storage
if not local_filepath.startswith(cache_dir):
local_filepath = os.path.join(cache_dir, local_filepath)
if obj.scheme == "s3":
try:
if tmpdir is None:
output_filepath = os.path.join(str(obj.path).lstrip("/"), os.path.basename(local_filepath))
else:
output_filepath = os.path.join(str(obj.path).lstrip("/"), local_filepath.replace(tmpdir, "")[1:])
s3.client.upload_file(
local_filepath,
obj.netloc,
output_filepath,
)
except Exception as e:
print(e)
elif output_dir.path:
if tmpdir is None:
output_filepath = os.path.join(output_dir.path, os.path.basename(local_filepath))
else:
output_filepath = os.path.join(output_dir.path, local_filepath.replace(tmpdir, "")[1:])
os.makedirs(os.path.dirname(output_filepath), exist_ok=True)
shutil.move(local_filepath, output_filepath)
else:
raise ValueError(f"The provided {output_dir.path} isn't supported.")
# Inform the remover to delete the file
if remove_queue and os.path.exists(local_filepath):
remove_queue.put([local_filepath])
def _map_items_to_workers_sequentially(num_workers: int, user_items: List[Any]) -> List[List[Any]]:
from typing import List, Any
import os
total_nodes = _get_num_nodes()
node_rank = _get_node_rank()
total_workers = total_nodes * num_workers
items_per_worker = len(user_items) // total_workers
extra_items = len(user_items) % total_workers
start = 0
result = []
for i in range(total_workers):
worker_items = items_per_worker + 1 if i < extra_items else items_per_worker
end = start + worker_items
result.append(user_items[start:end])
start = end
if len(result) != num_workers:
raise RuntimeError("Improper assignment of items to workers")
return result
def _map_items_to_workers_weighted(
num_workers: int,
user_items: List[Any],
weights: Optional[List[int]] = None,
file_size: bool = True,
) -> List[List[Any]]:
# Associate the items to the workers based on number of nodes and node rank.
weights = [1] * len(user_items) if weights is None else weights
num_nodes = _get_num_nodes()
node_rank = _get_node_rank()
world_size = num_nodes * num_workers
worker_items, worker_weights = _pack_greedily(items=user_items, weights=weights, num_bins=world_size)
worker_ids_this_node = range(node_rank * num_workers, (node_rank + 1) * num_workers)
for worker_id, size in worker_weights.items():
if worker_id not in worker_ids_this_node:
continue
if file_size:
print(f"Worker {worker_id} gets {size / 1e6:.1f} MB ({len(worker_items[worker_id])} files)")
else:
print(f"Worker {worker_id} gets ({len(worker_items[worker_id])}) items for a total weight of {size}.")
return [np.random.permutation(worker_items[worker_id]).tolist() for worker_id in worker_ids_this_node]
def _get_num_bytes(item: Any, base_path: str) -> int:
flattened_item, _ = tree_flatten(item)
num_bytes = 0
for element in flattened_item:
if isinstance(element, str):
element = Path(element).resolve()
if not element.exists():
continue
file_bytes = os.path.getsize(element)
if file_bytes == 0:
raise RuntimeError(f"The file {element} has 0 bytes!")
num_bytes += file_bytes
return num_bytes
def _get_item_filesizes(items: List[Any], base_path: str = "") -> List[int]:
"""Computes the total size in bytes of all file paths for every datastructure in the given list."""
item_sizes = []
cpu_count = os.cpu_count() or 1
# Parallelize to accelerate retrieving the number of file bytes to read for each item
with concurrent.futures.ThreadPoolExecutor(max_workers=cpu_count * 2 if cpu_count > 4 else cpu_count) as executor:
futures = [executor.submit(_get_num_bytes, item, base_path) for item in items]
for future in futures:
item_sizes.append(future.result())
return item_sizes
def _to_path(element: str) -> str:
return element if _IS_IN_STUDIO and element.startswith("/teamspace") else str(Path(element).resolve())
def _is_path(input_dir: Optional[str], element: Any) -> bool:
if not isinstance(element, str):
return False
if _IS_IN_STUDIO and input_dir is not None:
if element.startswith(input_dir):
return True
element = str(Path(element).absolute())
if element.startswith(input_dir):
return True
return os.path.exists(element)
class BaseWorker:
def __init__(
self,
worker_index: int,
num_workers: int,
node_rank: int,
data_recipe: "DataRecipe",
input_dir: Dir,
output_dir: Dir,
items: List[Any],
progress_queue: Queue,
error_queue: Queue,
stop_queue: Queue,
num_downloaders: int,
num_uploaders: int,
remove: bool,
reader: Optional[BaseReader] = None,
) -> None:
"""The BaseWorker is responsible to process the user data."""
self.worker_index = worker_index
self.num_workers = num_workers
self.node_rank = node_rank
self.data_recipe = data_recipe
self.input_dir = input_dir
self.output_dir = output_dir
self.items = items
self.num_items = len(self.items)
self.num_downloaders = num_downloaders
self.num_uploaders = num_uploaders
self.remove = remove
self.reader = reader
self.paths: List[List[str]] = []
self.remover: Optional[Process] = None
self.downloaders: List[Process] = []
self.uploaders: List[Process] = []
self.to_download_queues: List[Queue] = []
self.to_upload_queues: List[Queue] = []
self.stop_queue = stop_queue
self.ready_to_process_queue: Queue = Queue()
self.remove_queue: Queue = Queue()
self.progress_queue: Queue = progress_queue
self.error_queue: Queue = error_queue
self._counter = 0
self._last_time = time()
self._index_counter = 0
def run(self) -> None:
try:
self._setup()
self._loop()
except Exception:
traceback_format = traceback.format_exc()
print(traceback_format)
self.error_queue.put(traceback_format)
print(f"Worker {str(_get_node_rank() * self.num_workers + self.worker_index)} is done.")
def _setup(self) -> None:
self._set_environ_variables()
self._create_cache()
self._collect_paths()
self._start_downloaders()
self._start_uploaders()
self._start_remover()
def _loop(self) -> None:
num_downloader_finished = 0
while True:
index = self.ready_to_process_queue.get()
if index is None:
num_downloader_finished += 1
if num_downloader_finished == self.num_downloaders:
print(f"Worker {str(_get_node_rank() * self.num_workers + self.worker_index)} is terminating.")
if isinstance(self.data_recipe, DataChunkRecipe):
self._handle_data_chunk_recipe_end()
if self.output_dir.url if self.output_dir.url else self.output_dir.path:
# Inform the uploaders they are doing working
for i in range(self.num_uploaders):
self.to_upload_queues[i].put(None)
# Wait for them all to be finished
for uploader in self.uploaders:
uploader.join()
if self.remove:
assert self.remover
self.remove_queue.put(None)
self.remover.join()
if self.progress_queue:
self.progress_queue.put((self.worker_index, self._counter))
return
continue
if isinstance(self.data_recipe, DataChunkRecipe):
self._handle_data_chunk_recipe(index)
else:
self._handle_data_transform_recipe(index)
self._counter += 1
# Don't send the last progress update, so the main thread awaits for the uploader and remover
if self.progress_queue and (time() - self._last_time) > 1 and self._counter < (self.num_items - 2):
self.progress_queue.put((self.worker_index, self._counter))
self._last_time = time()
if self.remove and self.input_dir.path is not None and self.reader is None:
self.remove_queue.put(self.paths[index])
try:
self.stop_queue.get(timeout=0.0001)
return
except Empty:
pass
def _set_environ_variables(self) -> None:
# set the optimizer global rank and world_size
os.environ["DATA_OPTIMIZER_GLOBAL_RANK"] = str(_get_node_rank() * self.num_workers + self.worker_index)
os.environ["DATA_OPTIMIZER_NUM_WORKERS"] = str(self.num_workers)
def _create_cache(self) -> None:
self.cache_data_dir = _get_cache_data_dir()
os.makedirs(self.cache_data_dir, exist_ok=True)
self.cache_chunks_dir = _get_cache_dir()
os.makedirs(self.cache_chunks_dir, exist_ok=True)
if isinstance(self.data_recipe, DataTransformRecipe):
return
self.cache = Cache(
self.cache_chunks_dir,
chunk_bytes=self.data_recipe.chunk_bytes,
chunk_size=self.data_recipe.chunk_size,
compression=self.data_recipe.compression,
)
self.cache._reader._rank = _get_node_rank() * self.num_workers + self.worker_index
def _try_upload(self, data: Optional[Union[str, Tuple[str, str]]]) -> None:
if not data or (self.output_dir.url if self.output_dir.url else self.output_dir.path) is None:
return
if isinstance(data, str):
assert os.path.exists(data), data
else:
assert os.path.exists(data[-1]), data
self.to_upload_queues[self._counter % self.num_uploaders].put(data)
def _collect_paths(self) -> None:
if self.input_dir.path is None or self.reader is not None:
for index in range(len(self.items)):
self.ready_to_process_queue.put(index)
for _ in range(self.num_downloaders):
self.ready_to_process_queue.put(None)
return
items = []
for item in self.items:
flattened_item, spec = tree_flatten(item)
# For speed reasons, we assume starting with `self.input_dir` is enough to be a real file.
# Other alternative would be too slow.
# TODO: Try using dictionary for higher accurary.
indexed_paths = {
index: _to_path(element)
for index, element in enumerate(flattened_item)
if _is_path(self.input_dir.path, element)
}
if len(indexed_paths) == 0:
raise ValueError(
f"The provided item {item} didn't contain any filepaths. The input_dir is {self.input_dir.path}."
)
paths = []
for index, path in indexed_paths.items():
paths.append(path)
if self.input_dir and not self.input_dir.path.startswith("/teamspace/studios/this_studio"):
path = path.replace(self.input_dir.path, self.cache_data_dir)
flattened_item[index] = path
self.paths.append(paths)
items.append(tree_unflatten(flattened_item, spec))
self.items = items
def _start_downloaders(self) -> None:
if self.input_dir.path is None or self.reader is not None:
return
for _ in range(self.num_downloaders):
to_download_queue: Queue = Queue()
p = Process(
target=_download_data_target,
args=(
self.input_dir,
self.cache_data_dir,
to_download_queue,
self.ready_to_process_queue,
),
)
p.start()
self.downloaders.append(p)
self.to_download_queues.append(to_download_queue)
for index, paths in enumerate(self.paths):
self.to_download_queues[index % self.num_downloaders].put((index, paths))
for downloader_index in range(self.num_downloaders):
self.to_download_queues[downloader_index].put(None)
def _start_remover(self) -> None:
if not self.remove:
return
self.remover = Process(
target=_remove_target,
args=(
self.input_dir,
self.cache_data_dir,
self.remove_queue,
),
)
self.remover.start()
def _start_uploaders(self) -> None:
if self.output_dir.path is None and self.output_dir.url is None:
return
for _ in range(self.num_uploaders):
to_upload_queue: Queue = Queue()
p = Process(
target=_upload_fn,
args=(
to_upload_queue,
self.remove_queue,
self.cache_chunks_dir,
self.output_dir,
),
)
p.start()
self.uploaders.append(p)
self.to_upload_queues.append(to_upload_queue)
def _handle_data_chunk_recipe(self, index: int) -> None:
try:
current_item = self.items[index] if self.reader is None else self.reader.read(self.items[index])
item_data_or_generator = self.data_recipe.prepare_item(current_item)
if isinstance(item_data_or_generator, types.GeneratorType):
for item_data in item_data_or_generator:
if item_data is not None:
chunk_filepath = self.cache._add_item(self._index_counter, item_data)
self._try_upload(chunk_filepath)
self._index_counter += 1
elif item_data_or_generator is not None:
chunk_filepath = self.cache._add_item(self._index_counter, item_data_or_generator)
self._try_upload(chunk_filepath)
self._index_counter += 1
except Exception as e:
raise RuntimeError(f"Failed processing {self.items[index]}") from e
def _handle_data_chunk_recipe_end(self) -> None:
chunks_filepaths = self.cache.done()
if chunks_filepaths and len(self.to_upload_queues):
for i, chunk_filepath in enumerate(chunks_filepaths):
if isinstance(chunk_filepath, str) and os.path.exists(chunk_filepath):
self.to_upload_queues[i % self.num_uploaders].put(chunk_filepath)
def _handle_data_transform_recipe(self, index: int) -> None:
# Don't use a context manager to avoid deleting files that are being uploaded.
output_dir = tempfile.mkdtemp()
item = self.items[index] if self.reader is None else self.reader.read(self.items[index])
item_data = self.data_recipe.prepare_item(item, str(output_dir), len(self.items) - 1 == index)
if item_data is not None:
raise ValueError(
"When using a `DataTransformRecipe`, the `prepare_item` shouldn't return anything."
" Simply store your files under the output_dir."
)
filepaths = []
for directory, _, filenames in os.walk(output_dir):
for filename in filenames:
filepaths.append(os.path.join(directory, filename))
for filepath in filepaths:
self._try_upload((output_dir, filepath))
class DataWorkerProcess(BaseWorker, Process):
def __init__(self, *args: Any, **kwargs: Any) -> None:
"""The DataWorkerProcess is responsible to process the user data inside processes."""
BaseWorker.__init__(self, *args, **kwargs)
Process.__init__(self)
@dataclass
class _Result:
size: Optional[int] = None
num_bytes: Optional[str] = None
data_format: Optional[str] = None
compression: Optional[str] = None
num_chunks: Optional[int] = None
num_bytes_per_chunk: Optional[List[int]] = None
T = TypeVar("T")
class DataRecipe:
@abstractmethod
def prepare_structure(self, input_dir: Optional[str]) -> List[T]:
pass
@abstractmethod
def prepare_item(self, *args: Any, **kwargs: Any) -> Any:
pass
def __init__(self) -> None:
self._name: Optional[str] = None
def _done(self, size: int, delete_cached_files: bool, output_dir: Dir) -> _Result:
return _Result(size=size)
class DataChunkRecipe(DataRecipe):
def __init__(
self,
chunk_size: Optional[int] = None,
chunk_bytes: Optional[Union[int, str]] = None,
compression: Optional[str] = None,
):
super().__init__()
if chunk_size is not None and chunk_bytes is not None:
raise ValueError("Either one of the `chunk_size` or the `chunk_bytes` need to be provided.")
self.chunk_size = chunk_size
self.chunk_bytes = 1 << 26 if chunk_size is None else chunk_bytes
self.compression = compression
@abstractmethod
def prepare_structure(self, input_dir: Optional[str]) -> List[T]:
"""Return the structure of your data.
Each element should contain at least a filepath.
"""
@abstractmethod
def prepare_item(self, item_metadata: T) -> Any:
"""The return of this `prepare_item` method is persisted in chunked binary files."""
def _done(self, size: int, delete_cached_files: bool, output_dir: Dir) -> _Result:
num_nodes = _get_num_nodes()
cache_dir = _get_cache_dir()
chunks = [file for file in os.listdir(cache_dir) if file.endswith(".bin")]
if chunks and delete_cached_files and output_dir.path is not None:
raise RuntimeError(f"All the chunks should have been deleted. Found {chunks}")
merge_cache = Cache(cache_dir, chunk_bytes=1)
node_rank = _get_node_rank()
merge_cache._merge_no_wait(node_rank if num_nodes > 1 else None)
self._upload_index(output_dir, cache_dir, num_nodes, node_rank)
if num_nodes == node_rank + 1:
with open(os.path.join(cache_dir, _INDEX_FILENAME)) as f:
config = json.load(f)
size = sum([c["dim"] if c["dim"] is not None else c["chunk_size"] for c in config["chunks"]])
num_bytes = sum([c["chunk_bytes"] for c in config["chunks"]])
if config["config"] is not None:
data_format = tree_unflatten(
config["config"]["data_format"], treespec_loads(config["config"]["data_spec"])
)
else:
data_format = None
num_chunks = len(config["chunks"])
# The platform can't store more than 1024 entries.
# Note: This isn't really used right now, so it is fine to skip if too big.
num_bytes_per_chunk = [c["chunk_size"] for c in config["chunks"]] if num_chunks < 1024 else []
return _Result(
size=size,
num_bytes=num_bytes,
data_format=data_format,
compression=config["config"]["compression"] if config["config"] else None,
num_chunks=len(config["chunks"]),
num_bytes_per_chunk=num_bytes_per_chunk,
)
return _Result(
size=size,
)
def _upload_index(self, output_dir: Dir, cache_dir: str, num_nodes: int, node_rank: Optional[int]) -> None:
"""This method upload the index file to the remote cloud directory."""
if output_dir.path is None and output_dir.url is None:
return
obj = parse.urlparse(output_dir.url if output_dir.url else output_dir.path)
if num_nodes > 1:
local_filepath = os.path.join(cache_dir, f"{node_rank}-{_INDEX_FILENAME}")
else:
local_filepath = os.path.join(cache_dir, _INDEX_FILENAME)
if obj.scheme == "s3":
s3 = S3Client()
s3.client.upload_file(
local_filepath, obj.netloc, os.path.join(str(obj.path).lstrip("/"), os.path.basename(local_filepath))
)
elif output_dir.path and os.path.isdir(output_dir.path):
shutil.copyfile(local_filepath, os.path.join(output_dir.path, os.path.basename(local_filepath)))
if num_nodes == 1 or node_rank is None:
return
# Merge the index files generated by each node.
# Note: When using the Data Optimizer, they should be a single process on each node executing this section
# So no risk to get race conditon.
if num_nodes == node_rank + 1:
# Get the index file locally
for node_rank in range(num_nodes - 1):
output_dir_path = output_dir.url if output_dir.url else output_dir.path
assert output_dir_path
remote_filepath = os.path.join(output_dir_path, f"{node_rank}-{_INDEX_FILENAME}")
node_index_filepath = os.path.join(cache_dir, os.path.basename(remote_filepath))
if obj.scheme == "s3":
obj = parse.urlparse(remote_filepath)
_wait_for_file_to_exist(s3, obj)
with open(node_index_filepath, "wb") as f:
s3.client.download_fileobj(obj.netloc, obj.path.lstrip("/"), f)
elif output_dir.path and os.path.isdir(output_dir.path):
shutil.copyfile(remote_filepath, node_index_filepath)
merge_cache = Cache(cache_dir, chunk_bytes=1)
merge_cache._merge_no_wait()
self._upload_index(output_dir, cache_dir, 1, None)
class DataTransformRecipe(DataRecipe):
@abstractmethod
def prepare_structure(self, input_dir: Optional[str]) -> List[T]:
"""Return the structure of your data.
Each element should contain at least a filepath.
"""
@abstractmethod
def prepare_item(self, item_metadata: T, output_dir: str, is_last: bool) -> None:
"""Use your item metadata to process your files and save the file outputs into `output_dir`."""
class DataProcessor:
def __init__(
self,
input_dir: Union[str, Dir],
output_dir: Optional[Union[str, Dir]] = None,
num_workers: Optional[int] = None,
num_downloaders: Optional[int] = None,
num_uploaders: Optional[int] = None,
delete_cached_files: bool = True,
fast_dev_run: Optional[Union[bool, int]] = None,
random_seed: Optional[int] = 42,
reorder_files: bool = True,
weights: Optional[List[int]] = None,
reader: Optional[BaseReader] = None,
):
"""The `DatasetOptimiser` provides an efficient way to process data across multiple machine into chunks to make
training faster.
Arguments:
input_dir: The path to where the input data are stored.
output_dir: The path to where the output data are stored.
num_workers: The number of worker threads to use.
num_downloaders: The number of file downloaders to use.
num_uploaders: The number of file uploaders to use.
delete_cached_files: Whether to delete the cached files.
fast_dev_run: Whether to run a quick dev run.
random_seed: The random seed to be set before shuffling the data.
reorder_files: By default, reorders the files by file size to distribute work equally among all workers.
Set this to ``False`` if the order in which samples are processed should be preserved.
weights: Provide a list of weights associated to the inputs.
This is used to evenly split the work among the workers.
reader: Map the inputs to worker inputs and provides a read method to read a slice of the data.
"""
self.input_dir = _resolve_dir(input_dir)
self.output_dir = _resolve_dir(output_dir)
self.num_workers = num_workers or (1 if fast_dev_run else (os.cpu_count() or 1) * 4)
self.num_downloaders = num_downloaders or 2
self.num_uploaders = num_uploaders or 5
self.delete_cached_files = delete_cached_files
self.fast_dev_run = _get_fast_dev_run() if fast_dev_run is None else fast_dev_run
self.workers: Any = []
self.workers_tracker: Dict[int, int] = {}
self.progress_queue: Optional[Queue] = None
self.error_queue: Queue = Queue()
self.stop_queues: List[Queue] = []
self.reorder_files = reorder_files
self.weights = weights
self.reader = reader
if self.reader is not None and self.weights is not None:
raise ValueError("Either the reader or the weights needs to be defined.")
# Ensure the input dir is the same across all nodes
self.input_dir = broadcast_object("input_dir", self.input_dir)
if self.output_dir:
# Ensure the output dir is the same across all nodes
self.output_dir = broadcast_object("output_dir", self.output_dir)
print(f"Storing the files under {self.output_dir.path}")
self.random_seed = random_seed
def run(self, data_recipe: DataRecipe) -> None:
"""The `DataProcessor.run(...)` method triggers the data recipe processing over your dataset."""
if not isinstance(data_recipe, DataRecipe):
raise ValueError("The provided value should be a data recipe.")
t0 = time()
print(f"Setup started with fast_dev_run={self.fast_dev_run}.")
# Force random seed to be fixed
random.seed(self.random_seed)
np.random.seed(self.random_seed)
torch.manual_seed(self.random_seed)
# Call the setup method of the user
user_items: List[Any] = data_recipe.prepare_structure(self.input_dir.path if self.input_dir else None)
if not isinstance(user_items, (list, StreamingDataLoader)):
raise ValueError("The `prepare_structure` should return a list of item metadata.")
if isinstance(user_items, StreamingDataLoader):
self.reader = StreamingDataLoaderReader(user_items)
if self.reader:
user_items = self.reader.remap_items(user_items, self.num_workers)
if self.weights is not None:
if len(self.weights) != len(user_items):
raise ValueError("The provided weights length should match the inputs' length.")
workers_user_items = _map_items_to_workers_weighted(
num_workers=self.num_workers, user_items=user_items, weights=self.weights, file_size=False
)
elif self.reorder_files and self.input_dir.path:
# TODO: Only do this on node 0, and broadcast the item sizes to the other nodes.
item_sizes = _get_item_filesizes(user_items, base_path=self.input_dir.path)
workers_user_items = _map_items_to_workers_weighted(
num_workers=self.num_workers, user_items=user_items, weights=item_sizes
)
else:
workers_user_items = _map_items_to_workers_sequentially(num_workers=self.num_workers, user_items=user_items)
print(f"Setup finished in {round(time() - t0, 3)} seconds. Found {len(user_items)} items to process.")
if self.fast_dev_run:
items_to_keep = self.fast_dev_run if type(self.fast_dev_run) is int else _DEFAULT_FAST_DEV_RUN_ITEMS
workers_user_items = [w[:items_to_keep] for w in workers_user_items]
print(f"Fast dev run is enabled. Limiting to {items_to_keep} items per process.")
num_items = sum([len(items) for items in workers_user_items])
self._cleanup_cache()
print(f"Starting {self.num_workers} workers with {num_items} items.")
if self.input_dir is None and self.src_resolver is not None and self.input_dir:
self.input_dir = self.src_resolver(self.input_dir)
print(f"The remote_dir is `{self.input_dir}`.")
signal.signal(signal.SIGINT, self._signal_handler)
self._create_process_workers(data_recipe, workers_user_items)
print("Workers are ready ! Starting data processing...")
current_total = 0
has_failed = False
pbar = _tqdm(
desc="Progress",
total=num_items,
smoothing=0,
position=-1,
mininterval=1,
leave=True,
dynamic_ncols=True,
)
while True:
try:
error = self.error_queue.get(timeout=0.001)
self._exit_on_error(error)
except Empty:
assert self.progress_queue
try:
index, counter = self.progress_queue.get(timeout=0.001)
except Empty:
continue
self.workers_tracker[index] = counter
new_total = sum(self.workers_tracker.values())
pbar.update(new_total - current_total)
current_total = new_total
if current_total == num_items:
break
# Exit early if all the workers are done.
# This means there were some kinda of errors.
if all(not w.is_alive() for w in self.workers):
has_failed = True
break
pbar.close()
num_nodes = _get_num_nodes()
node_rank = _get_node_rank()
# TODO: Understand why it hangs.
if num_nodes == 1:
for w in self.workers:
w.join(0)
print("Workers are finished.")
result = data_recipe._done(len(user_items), self.delete_cached_files, self.output_dir)
if num_nodes == node_rank + 1 and self.output_dir.url and _IS_IN_STUDIO:
assert self.output_dir.path
_create_dataset(
input_dir=self.input_dir.path,
storage_dir=self.output_dir.path,
dataset_type=V1DatasetType.CHUNKED
if isinstance(data_recipe, DataChunkRecipe)
else V1DatasetType.TRANSFORMED,
empty=False,
size=result.size,
num_bytes=result.num_bytes,
data_format=result.data_format,
compression=result.compression,
num_chunks=result.num_chunks,
num_bytes_per_chunk=result.num_bytes_per_chunk,
)
print("Finished data processing!")
# TODO: Understand why it is required to avoid long shutdown.
if _get_num_nodes() > 1:
os._exit(int(has_failed))
def _exit_on_error(self, error: str) -> None:
for w in self.workers:
w.join(0)
raise RuntimeError(f"We found the following error {error}.")
def _create_process_workers(self, data_recipe: DataRecipe, workers_user_items: List[List[Any]]) -> None:
self.progress_queue = Queue()
workers: List[DataWorkerProcess] = []
stop_queues: List[Queue] = []
for worker_idx, worker_user_items in enumerate(workers_user_items):
stop_queues.append(Queue())
worker = DataWorkerProcess(
worker_idx,
self.num_workers,
_get_node_rank(),
data_recipe,
self.input_dir,
self.output_dir,
worker_user_items,
self.progress_queue,
self.error_queue,
stop_queues[-1],
self.num_downloaders,
self.num_uploaders,
self.delete_cached_files,
self.reader,
)
worker.start()
workers.append(worker)
# Note: Don't store within the loop as weakref aren't serializable
self.workers = workers
self.stop_queues = stop_queues
def _signal_handler(self, signal: Any, frame: Any) -> None:
"""On temrination, we stop all the processes to avoid leaking RAM."""
for stop_queue in self.stop_queues:
stop_queue.put(None)
for w in self.workers:
w.join(0)
os._exit(0)
def _cleanup_cache(self) -> None:
cache_dir = _get_cache_dir()
# Cleanup the cache dir folder to avoid corrupted files from previous run to be there.
if os.path.exists(cache_dir):
shutil.rmtree(cache_dir, ignore_errors=True)
os.makedirs(cache_dir, exist_ok=True)
cache_data_dir = _get_cache_data_dir()
# Cleanup the cache data folder to avoid corrupted files from previous run to be there.
if os.path.exists(cache_data_dir):
shutil.rmtree(cache_data_dir, ignore_errors=True)
os.makedirs(cache_data_dir, exist_ok=True)
|
evocodebench_data_157
|
import concurrent
import json
import logging
import os
import random
import shutil
import signal
import tempfile
import traceback
import types
from abc import abstractmethod
from dataclasses import dataclass
from multiprocessing import Process, Queue
from pathlib import Path
from queue import Empty
from time import sleep, time
from typing import Any, Dict, List, Optional, Tuple, TypeVar, Union
from urllib import parse
import numpy as np
import torch
from tqdm.auto import tqdm as _tqdm
from litdata.constants import (
_BOTO3_AVAILABLE,
_DEFAULT_FAST_DEV_RUN_ITEMS,
_INDEX_FILENAME,
_IS_IN_STUDIO,
_LIGHTNING_CLOUD_LATEST,
_TORCH_GREATER_EQUAL_2_1_0,
)
from litdata.processing.readers import BaseReader, StreamingDataLoaderReader
from litdata.processing.utilities import _create_dataset
from litdata.streaming import Cache
from litdata.streaming.cache import Dir
from litdata.streaming.client import S3Client
from litdata.streaming.dataloader import StreamingDataLoader
from litdata.streaming.resolver import _resolve_dir
from litdata.utilities.broadcast import broadcast_object
from litdata.utilities.packing import _pack_greedily
if _TORCH_GREATER_EQUAL_2_1_0:
from torch.utils._pytree import tree_flatten, tree_unflatten, treespec_loads
if _LIGHTNING_CLOUD_LATEST:
from lightning_cloud.openapi import V1DatasetType
if _BOTO3_AVAILABLE:
import botocore
logger = logging.Logger(__name__)
def _get_num_nodes() -> int:
"""Returns the number of nodes."""
return int(os.getenv("DATA_OPTIMIZER_NUM_NODES", 1))
def _get_node_rank() -> int:
"""Returns the current node rank of the instance."""
return int(os.getenv("DATA_OPTIMIZER_NODE_RANK", 0))
def _get_fast_dev_run() -> int:
"""Returns whether fast dev mode is enabled."""
return bool(int(os.getenv("DATA_OPTIMIZER_FAST_DEV_RUN", 1)))
def _get_default_cache() -> str:
return "/cache" if _IS_IN_STUDIO else tempfile.gettempdir()
def _get_cache_dir(name: Optional[str] = None) -> str:
"""Returns the cache directory used by the Cache to store the chunks."""
cache_dir = os.getenv("DATA_OPTIMIZER_CACHE_FOLDER", f"{_get_default_cache()}/chunks")
if name is None:
return cache_dir
return os.path.join(cache_dir, name.lstrip("/"))
def _get_cache_data_dir(name: Optional[str] = None) -> str:
"""Returns the cache data directory used by the DataProcessor workers to download the files."""
cache_dir = os.getenv("DATA_OPTIMIZER_DATA_CACHE_FOLDER", f"{_get_default_cache()}/data")
if name is None:
return os.path.join(cache_dir)
return os.path.join(cache_dir, name.lstrip("/"))
def _wait_for_file_to_exist(s3: S3Client, obj: parse.ParseResult, sleep_time: int = 2) -> Any:
"""This function check."""
while True:
try:
return s3.client.head_object(Bucket=obj.netloc, Key=obj.path.lstrip("/"))
except botocore.exceptions.ClientError as e:
if "the HeadObject operation: Not Found" in str(e):
sleep(sleep_time)
else:
raise e
def _wait_for_disk_usage_higher_than_threshold(input_dir: str, threshold_in_gb: int = 25, sleep_time: int = 3) -> None:
usage = shutil.disk_usage(input_dir)
while (usage.free / 1000 / 1000 / 1000) <= threshold_in_gb:
sleep(sleep_time)
usage = shutil.disk_usage(input_dir)
return
def _download_data_target(input_dir: Dir, cache_dir: str, queue_in: Queue, queue_out: Queue) -> None:
"""This function is used to download data from a remote directory to a cache directory to optimise reading."""
s3 = S3Client()
while True:
# 2. Fetch from the queue
r: Optional[Tuple[int, List[str]]] = queue_in.get()
# 3. Terminate the process if we received a termination signal
if r is None:
queue_out.put(None)
return
# 4. Unpack
index, paths = r
# 5. Check whether all the files are already downloaded
if input_dir.path and all(
os.path.exists(p.replace(input_dir.path, cache_dir) if input_dir else p) for p in paths
):
queue_out.put(index)
continue
if input_dir.url is not None or input_dir.path is not None:
if input_dir.url:
# 6. Wait for the removers to catch up when we are downloading data.
_wait_for_disk_usage_higher_than_threshold("/", 25)
# 7. Download all the required paths to unblock the current index
for path in paths:
if input_dir.path:
local_path = path.replace(input_dir.path, cache_dir)
if input_dir.url and input_dir.path:
path = path.replace(input_dir.path, input_dir.url)
obj = parse.urlparse(path)
if obj.scheme == "s3":
dirpath = os.path.dirname(local_path)
os.makedirs(dirpath, exist_ok=True)
with open(local_path, "wb") as f:
s3.client.download_fileobj(obj.netloc, obj.path.lstrip("/"), f)
elif os.path.isfile(path):
if not path.startswith("/teamspace/studios/this_studio"):
os.makedirs(os.path.dirname(local_path), exist_ok=True)
shutil.copyfile(path, local_path)
else:
raise ValueError(f"The provided {input_dir.url} isn't supported.")
# 7. Inform the worker the current files are available
queue_out.put(index)
def _remove_target(input_dir: Dir, cache_dir: str, queue_in: Queue) -> None:
"""This function is used to delete files from the cache directory to minimise disk space."""
while True:
# 1. Collect paths
paths = queue_in.get()
# 2. Terminate the process if we received a termination signal
if paths is None:
return
# 3. Iterate through the paths and delete them sequentially.
for path in paths:
if input_dir:
if not path.startswith(cache_dir) and input_dir.path is not None:
path = path.replace(input_dir.path, cache_dir)
if os.path.exists(path):
os.remove(path)
elif os.path.exists(path) and "s3_connections" not in path:
os.remove(path)
def _upload_fn(upload_queue: Queue, remove_queue: Queue, cache_dir: str, output_dir: Dir) -> None:
"""This function is used to upload optimised chunks from a local to remote dataset directory."""
obj = parse.urlparse(output_dir.url if output_dir.url else output_dir.path)
if obj.scheme == "s3":
s3 = S3Client()
while True:
data: Optional[Union[str, Tuple[str, str]]] = upload_queue.get()
tmpdir = None
if isinstance(data, str) or data is None:
local_filepath = data
else:
tmpdir, local_filepath = data
# Terminate the process if we received a termination signal
if local_filepath is None:
return
# Upload the file to the target cloud storage
if not local_filepath.startswith(cache_dir):
local_filepath = os.path.join(cache_dir, local_filepath)
if obj.scheme == "s3":
try:
if tmpdir is None:
output_filepath = os.path.join(str(obj.path).lstrip("/"), os.path.basename(local_filepath))
else:
output_filepath = os.path.join(str(obj.path).lstrip("/"), local_filepath.replace(tmpdir, "")[1:])
s3.client.upload_file(
local_filepath,
obj.netloc,
output_filepath,
)
except Exception as e:
print(e)
elif output_dir.path:
if tmpdir is None:
output_filepath = os.path.join(output_dir.path, os.path.basename(local_filepath))
else:
output_filepath = os.path.join(output_dir.path, local_filepath.replace(tmpdir, "")[1:])
os.makedirs(os.path.dirname(output_filepath), exist_ok=True)
shutil.move(local_filepath, output_filepath)
else:
raise ValueError(f"The provided {output_dir.path} isn't supported.")
# Inform the remover to delete the file
if remove_queue and os.path.exists(local_filepath):
remove_queue.put([local_filepath])
def _map_items_to_workers_sequentially(num_workers: int, user_items: List[Any]) -> List[List[Any]]:
from typing import List, Any
import os
total_nodes = _get_num_nodes()
node_rank = _get_node_rank()
total_workers = total_nodes * num_workers
items_per_worker = len(user_items) // total_workers
extra_items = len(user_items) % total_workers
start = 0
result = []
for i in range(total_workers):
worker_items = items_per_worker + 1 if i < extra_items else items_per_worker
end = start + worker_items
result.append(user_items[start:end])
start = end
if len(result) != num_workers:
raise RuntimeError("Improper assignment of items to workers")
return result
def _map_items_to_workers_weighted(
num_workers: int,
user_items: List[Any],
weights: Optional[List[int]] = None,
file_size: bool = True,
) -> List[List[Any]]:
# Associate the items to the workers based on number of nodes and node rank.
weights = [1] * len(user_items) if weights is None else weights
num_nodes = _get_num_nodes()
node_rank = _get_node_rank()
world_size = num_nodes * num_workers
worker_items, worker_weights = _pack_greedily(items=user_items, weights=weights, num_bins=world_size)
worker_ids_this_node = range(node_rank * num_workers, (node_rank + 1) * num_workers)
for worker_id, size in worker_weights.items():
if worker_id not in worker_ids_this_node:
continue
if file_size:
print(f"Worker {worker_id} gets {size / 1e6:.1f} MB ({len(worker_items[worker_id])} files)")
else:
print(f"Worker {worker_id} gets ({len(worker_items[worker_id])}) items for a total weight of {size}.")
return [np.random.permutation(worker_items[worker_id]).tolist() for worker_id in worker_ids_this_node]
def _get_num_bytes(item: Any, base_path: str) -> int:
flattened_item, _ = tree_flatten(item)
num_bytes = 0
for element in flattened_item:
if isinstance(element, str):
element = Path(element).resolve()
if not element.exists():
continue
file_bytes = os.path.getsize(element)
if file_bytes == 0:
raise RuntimeError(f"The file {element} has 0 bytes!")
num_bytes += file_bytes
return num_bytes
def _get_item_filesizes(items: List[Any], base_path: str = "") -> List[int]:
"""Computes the total size in bytes of all file paths for every datastructure in the given list."""
item_sizes = []
cpu_count = os.cpu_count() or 1
# Parallelize to accelerate retrieving the number of file bytes to read for each item
with concurrent.futures.ThreadPoolExecutor(max_workers=cpu_count * 2 if cpu_count > 4 else cpu_count) as executor:
futures = [executor.submit(_get_num_bytes, item, base_path) for item in items]
for future in futures:
item_sizes.append(future.result())
return item_sizes
def _to_path(element: str) -> str:
return element if _IS_IN_STUDIO and element.startswith("/teamspace") else str(Path(element).resolve())
def _is_path(input_dir: Optional[str], element: Any) -> bool:
if not isinstance(element, str):
return False
if _IS_IN_STUDIO and input_dir is not None:
if element.startswith(input_dir):
return True
element = str(Path(element).absolute())
if element.startswith(input_dir):
return True
return os.path.exists(element)
class BaseWorker:
def __init__(
self,
worker_index: int,
num_workers: int,
node_rank: int,
data_recipe: "DataRecipe",
input_dir: Dir,
output_dir: Dir,
items: List[Any],
progress_queue: Queue,
error_queue: Queue,
stop_queue: Queue,
num_downloaders: int,
num_uploaders: int,
remove: bool,
reader: Optional[BaseReader] = None,
) -> None:
"""The BaseWorker is responsible to process the user data."""
self.worker_index = worker_index
self.num_workers = num_workers
self.node_rank = node_rank
self.data_recipe = data_recipe
self.input_dir = input_dir
self.output_dir = output_dir
self.items = items
self.num_items = len(self.items)
self.num_downloaders = num_downloaders
self.num_uploaders = num_uploaders
self.remove = remove
self.reader = reader
self.paths: List[List[str]] = []
self.remover: Optional[Process] = None
self.downloaders: List[Process] = []
self.uploaders: List[Process] = []
self.to_download_queues: List[Queue] = []
self.to_upload_queues: List[Queue] = []
self.stop_queue = stop_queue
self.ready_to_process_queue: Queue = Queue()
self.remove_queue: Queue = Queue()
self.progress_queue: Queue = progress_queue
self.error_queue: Queue = error_queue
self._counter = 0
self._last_time = time()
self._index_counter = 0
def run(self) -> None:
try:
self._setup()
self._loop()
except Exception:
traceback_format = traceback.format_exc()
print(traceback_format)
self.error_queue.put(traceback_format)
print(f"Worker {str(_get_node_rank() * self.num_workers + self.worker_index)} is done.")
def _setup(self) -> None:
self._set_environ_variables()
self._create_cache()
self._collect_paths()
self._start_downloaders()
self._start_uploaders()
self._start_remover()
def _loop(self) -> None:
num_downloader_finished = 0
while True:
index = self.ready_to_process_queue.get()
if index is None:
num_downloader_finished += 1
if num_downloader_finished == self.num_downloaders:
print(f"Worker {str(_get_node_rank() * self.num_workers + self.worker_index)} is terminating.")
if isinstance(self.data_recipe, DataChunkRecipe):
self._handle_data_chunk_recipe_end()
if self.output_dir.url if self.output_dir.url else self.output_dir.path:
# Inform the uploaders they are doing working
for i in range(self.num_uploaders):
self.to_upload_queues[i].put(None)
# Wait for them all to be finished
for uploader in self.uploaders:
uploader.join()
if self.remove:
assert self.remover
self.remove_queue.put(None)
self.remover.join()
if self.progress_queue:
self.progress_queue.put((self.worker_index, self._counter))
return
continue
if isinstance(self.data_recipe, DataChunkRecipe):
self._handle_data_chunk_recipe(index)
else:
self._handle_data_transform_recipe(index)
self._counter += 1
# Don't send the last progress update, so the main thread awaits for the uploader and remover
if self.progress_queue and (time() - self._last_time) > 1 and self._counter < (self.num_items - 2):
self.progress_queue.put((self.worker_index, self._counter))
self._last_time = time()
if self.remove and self.input_dir.path is not None and self.reader is None:
self.remove_queue.put(self.paths[index])
try:
self.stop_queue.get(timeout=0.0001)
return
except Empty:
pass
def _set_environ_variables(self) -> None:
# set the optimizer global rank and world_size
os.environ["DATA_OPTIMIZER_GLOBAL_RANK"] = str(_get_node_rank() * self.num_workers + self.worker_index)
os.environ["DATA_OPTIMIZER_NUM_WORKERS"] = str(self.num_workers)
def _create_cache(self) -> None:
self.cache_data_dir = _get_cache_data_dir()
os.makedirs(self.cache_data_dir, exist_ok=True)
self.cache_chunks_dir = _get_cache_dir()
os.makedirs(self.cache_chunks_dir, exist_ok=True)
if isinstance(self.data_recipe, DataTransformRecipe):
return
self.cache = Cache(
self.cache_chunks_dir,
chunk_bytes=self.data_recipe.chunk_bytes,
chunk_size=self.data_recipe.chunk_size,
compression=self.data_recipe.compression,
)
self.cache._reader._rank = _get_node_rank() * self.num_workers + self.worker_index
def _try_upload(self, data: Optional[Union[str, Tuple[str, str]]]) -> None:
if not data or (self.output_dir.url if self.output_dir.url else self.output_dir.path) is None:
return
if isinstance(data, str):
assert os.path.exists(data), data
else:
assert os.path.exists(data[-1]), data
self.to_upload_queues[self._counter % self.num_uploaders].put(data)
def _collect_paths(self) -> None:
if self.input_dir.path is None or self.reader is not None:
for index in range(len(self.items)):
self.ready_to_process_queue.put(index)
for _ in range(self.num_downloaders):
self.ready_to_process_queue.put(None)
return
items = []
for item in self.items:
flattened_item, spec = tree_flatten(item)
# For speed reasons, we assume starting with `self.input_dir` is enough to be a real file.
# Other alternative would be too slow.
# TODO: Try using dictionary for higher accurary.
indexed_paths = {
index: _to_path(element)
for index, element in enumerate(flattened_item)
if _is_path(self.input_dir.path, element)
}
if len(indexed_paths) == 0:
raise ValueError(
f"The provided item {item} didn't contain any filepaths. The input_dir is {self.input_dir.path}."
)
paths = []
for index, path in indexed_paths.items():
paths.append(path)
if self.input_dir and not self.input_dir.path.startswith("/teamspace/studios/this_studio"):
path = path.replace(self.input_dir.path, self.cache_data_dir)
flattened_item[index] = path
self.paths.append(paths)
items.append(tree_unflatten(flattened_item, spec))
self.items = items
def _start_downloaders(self) -> None:
if self.input_dir.path is None or self.reader is not None:
return
for _ in range(self.num_downloaders):
to_download_queue: Queue = Queue()
p = Process(
target=_download_data_target,
args=(
self.input_dir,
self.cache_data_dir,
to_download_queue,
self.ready_to_process_queue,
),
)
p.start()
self.downloaders.append(p)
self.to_download_queues.append(to_download_queue)
for index, paths in enumerate(self.paths):
self.to_download_queues[index % self.num_downloaders].put((index, paths))
for downloader_index in range(self.num_downloaders):
self.to_download_queues[downloader_index].put(None)
def _start_remover(self) -> None:
if not self.remove:
return
self.remover = Process(
target=_remove_target,
args=(
self.input_dir,
self.cache_data_dir,
self.remove_queue,
),
)
self.remover.start()
def _start_uploaders(self) -> None:
if self.output_dir.path is None and self.output_dir.url is None:
return
for _ in range(self.num_uploaders):
to_upload_queue: Queue = Queue()
p = Process(
target=_upload_fn,
args=(
to_upload_queue,
self.remove_queue,
self.cache_chunks_dir,
self.output_dir,
),
)
p.start()
self.uploaders.append(p)
self.to_upload_queues.append(to_upload_queue)
def _handle_data_chunk_recipe(self, index: int) -> None:
try:
current_item = self.items[index] if self.reader is None else self.reader.read(self.items[index])
item_data_or_generator = self.data_recipe.prepare_item(current_item)
if isinstance(item_data_or_generator, types.GeneratorType):
for item_data in item_data_or_generator:
if item_data is not None:
chunk_filepath = self.cache._add_item(self._index_counter, item_data)
self._try_upload(chunk_filepath)
self._index_counter += 1
elif item_data_or_generator is not None:
chunk_filepath = self.cache._add_item(self._index_counter, item_data_or_generator)
self._try_upload(chunk_filepath)
self._index_counter += 1
except Exception as e:
raise RuntimeError(f"Failed processing {self.items[index]}") from e
def _handle_data_chunk_recipe_end(self) -> None:
chunks_filepaths = self.cache.done()
if chunks_filepaths and len(self.to_upload_queues):
for i, chunk_filepath in enumerate(chunks_filepaths):
if isinstance(chunk_filepath, str) and os.path.exists(chunk_filepath):
self.to_upload_queues[i % self.num_uploaders].put(chunk_filepath)
def _handle_data_transform_recipe(self, index: int) -> None:
# Don't use a context manager to avoid deleting files that are being uploaded.
output_dir = tempfile.mkdtemp()
item = self.items[index] if self.reader is None else self.reader.read(self.items[index])
item_data = self.data_recipe.prepare_item(item, str(output_dir), len(self.items) - 1 == index)
if item_data is not None:
raise ValueError(
"When using a `DataTransformRecipe`, the `prepare_item` shouldn't return anything."
" Simply store your files under the output_dir."
)
filepaths = []
for directory, _, filenames in os.walk(output_dir):
for filename in filenames:
filepaths.append(os.path.join(directory, filename))
for filepath in filepaths:
self._try_upload((output_dir, filepath))
class DataWorkerProcess(BaseWorker, Process):
def __init__(self, *args: Any, **kwargs: Any) -> None:
"""The DataWorkerProcess is responsible to process the user data inside processes."""
BaseWorker.__init__(self, *args, **kwargs)
Process.__init__(self)
@dataclass
class _Result:
size: Optional[int] = None
num_bytes: Optional[str] = None
data_format: Optional[str] = None
compression: Optional[str] = None
num_chunks: Optional[int] = None
num_bytes_per_chunk: Optional[List[int]] = None
T = TypeVar("T")
class DataRecipe:
@abstractmethod
def prepare_structure(self, input_dir: Optional[str]) -> List[T]:
pass
@abstractmethod
def prepare_item(self, *args: Any, **kwargs: Any) -> Any:
pass
def __init__(self) -> None:
self._name: Optional[str] = None
def _done(self, size: int, delete_cached_files: bool, output_dir: Dir) -> _Result:
return _Result(size=size)
class DataChunkRecipe(DataRecipe):
def __init__(
self,
chunk_size: Optional[int] = None,
chunk_bytes: Optional[Union[int, str]] = None,
compression: Optional[str] = None,
):
super().__init__()
if chunk_size is not None and chunk_bytes is not None:
raise ValueError("Either one of the `chunk_size` or the `chunk_bytes` need to be provided.")
self.chunk_size = chunk_size
self.chunk_bytes = 1 << 26 if chunk_size is None else chunk_bytes
self.compression = compression
@abstractmethod
def prepare_structure(self, input_dir: Optional[str]) -> List[T]:
"""Return the structure of your data.
Each element should contain at least a filepath.
"""
@abstractmethod
def prepare_item(self, item_metadata: T) -> Any:
"""The return of this `prepare_item` method is persisted in chunked binary files."""
def _done(self, size: int, delete_cached_files: bool, output_dir: Dir) -> _Result:
num_nodes = _get_num_nodes()
cache_dir = _get_cache_dir()
chunks = [file for file in os.listdir(cache_dir) if file.endswith(".bin")]
if chunks and delete_cached_files and output_dir.path is not None:
raise RuntimeError(f"All the chunks should have been deleted. Found {chunks}")
merge_cache = Cache(cache_dir, chunk_bytes=1)
node_rank = _get_node_rank()
merge_cache._merge_no_wait(node_rank if num_nodes > 1 else None)
self._upload_index(output_dir, cache_dir, num_nodes, node_rank)
if num_nodes == node_rank + 1:
with open(os.path.join(cache_dir, _INDEX_FILENAME)) as f:
config = json.load(f)
size = sum([c["dim"] if c["dim"] is not None else c["chunk_size"] for c in config["chunks"]])
num_bytes = sum([c["chunk_bytes"] for c in config["chunks"]])
if config["config"] is not None:
data_format = tree_unflatten(
config["config"]["data_format"], treespec_loads(config["config"]["data_spec"])
)
else:
data_format = None
num_chunks = len(config["chunks"])
# The platform can't store more than 1024 entries.
# Note: This isn't really used right now, so it is fine to skip if too big.
num_bytes_per_chunk = [c["chunk_size"] for c in config["chunks"]] if num_chunks < 1024 else []
return _Result(
size=size,
num_bytes=num_bytes,
data_format=data_format,
compression=config["config"]["compression"] if config["config"] else None,
num_chunks=len(config["chunks"]),
num_bytes_per_chunk=num_bytes_per_chunk,
)
return _Result(
size=size,
)
def _upload_index(self, output_dir: Dir, cache_dir: str, num_nodes: int, node_rank: Optional[int]) -> None:
"""This method upload the index file to the remote cloud directory."""
if output_dir.path is None and output_dir.url is None:
return
obj = parse.urlparse(output_dir.url if output_dir.url else output_dir.path)
if num_nodes > 1:
local_filepath = os.path.join(cache_dir, f"{node_rank}-{_INDEX_FILENAME}")
else:
local_filepath = os.path.join(cache_dir, _INDEX_FILENAME)
if obj.scheme == "s3":
s3 = S3Client()
s3.client.upload_file(
local_filepath, obj.netloc, os.path.join(str(obj.path).lstrip("/"), os.path.basename(local_filepath))
)
elif output_dir.path and os.path.isdir(output_dir.path):
shutil.copyfile(local_filepath, os.path.join(output_dir.path, os.path.basename(local_filepath)))
if num_nodes == 1 or node_rank is None:
return
# Merge the index files generated by each node.
# Note: When using the Data Optimizer, they should be a single process on each node executing this section
# So no risk to get race conditon.
if num_nodes == node_rank + 1:
# Get the index file locally
for node_rank in range(num_nodes - 1):
output_dir_path = output_dir.url if output_dir.url else output_dir.path
assert output_dir_path
remote_filepath = os.path.join(output_dir_path, f"{node_rank}-{_INDEX_FILENAME}")
node_index_filepath = os.path.join(cache_dir, os.path.basename(remote_filepath))
if obj.scheme == "s3":
obj = parse.urlparse(remote_filepath)
_wait_for_file_to_exist(s3, obj)
with open(node_index_filepath, "wb") as f:
s3.client.download_fileobj(obj.netloc, obj.path.lstrip("/"), f)
elif output_dir.path and os.path.isdir(output_dir.path):
shutil.copyfile(remote_filepath, node_index_filepath)
merge_cache = Cache(cache_dir, chunk_bytes=1)
merge_cache._merge_no_wait()
self._upload_index(output_dir, cache_dir, 1, None)
class DataTransformRecipe(DataRecipe):
@abstractmethod
def prepare_structure(self, input_dir: Optional[str]) -> List[T]:
"""Return the structure of your data.
Each element should contain at least a filepath.
"""
@abstractmethod
def prepare_item(self, item_metadata: T, output_dir: str, is_last: bool) -> None:
"""Use your item metadata to process your files and save the file outputs into `output_dir`."""
class DataProcessor:
def __init__(
self,
input_dir: Union[str, Dir],
output_dir: Optional[Union[str, Dir]] = None,
num_workers: Optional[int] = None,
num_downloaders: Optional[int] = None,
num_uploaders: Optional[int] = None,
delete_cached_files: bool = True,
fast_dev_run: Optional[Union[bool, int]] = None,
random_seed: Optional[int] = 42,
reorder_files: bool = True,
weights: Optional[List[int]] = None,
reader: Optional[BaseReader] = None,
):
"""The `DatasetOptimiser` provides an efficient way to process data across multiple machine into chunks to make
training faster.
Arguments:
input_dir: The path to where the input data are stored.
output_dir: The path to where the output data are stored.
num_workers: The number of worker threads to use.
num_downloaders: The number of file downloaders to use.
num_uploaders: The number of file uploaders to use.
delete_cached_files: Whether to delete the cached files.
fast_dev_run: Whether to run a quick dev run.
random_seed: The random seed to be set before shuffling the data.
reorder_files: By default, reorders the files by file size to distribute work equally among all workers.
Set this to ``False`` if the order in which samples are processed should be preserved.
weights: Provide a list of weights associated to the inputs.
This is used to evenly split the work among the workers.
reader: Map the inputs to worker inputs and provides a read method to read a slice of the data.
"""
self.input_dir = _resolve_dir(input_dir)
self.output_dir = _resolve_dir(output_dir)
self.num_workers = num_workers or (1 if fast_dev_run else (os.cpu_count() or 1) * 4)
self.num_downloaders = num_downloaders or 2
self.num_uploaders = num_uploaders or 5
self.delete_cached_files = delete_cached_files
self.fast_dev_run = _get_fast_dev_run() if fast_dev_run is None else fast_dev_run
self.workers: Any = []
self.workers_tracker: Dict[int, int] = {}
self.progress_queue: Optional[Queue] = None
self.error_queue: Queue = Queue()
self.stop_queues: List[Queue] = []
self.reorder_files = reorder_files
self.weights = weights
self.reader = reader
if self.reader is not None and self.weights is not None:
raise ValueError("Either the reader or the weights needs to be defined.")
# Ensure the input dir is the same across all nodes
self.input_dir = broadcast_object("input_dir", self.input_dir)
if self.output_dir:
# Ensure the output dir is the same across all nodes
self.output_dir = broadcast_object("output_dir", self.output_dir)
print(f"Storing the files under {self.output_dir.path}")
self.random_seed = random_seed
def run(self, data_recipe: DataRecipe) -> None:
"""The `DataProcessor.run(...)` method triggers the data recipe processing over your dataset."""
if not isinstance(data_recipe, DataRecipe):
raise ValueError("The provided value should be a data recipe.")
t0 = time()
print(f"Setup started with fast_dev_run={self.fast_dev_run}.")
# Force random seed to be fixed
random.seed(self.random_seed)
np.random.seed(self.random_seed)
torch.manual_seed(self.random_seed)
# Call the setup method of the user
user_items: List[Any] = data_recipe.prepare_structure(self.input_dir.path if self.input_dir else None)
if not isinstance(user_items, (list, StreamingDataLoader)):
raise ValueError("The `prepare_structure` should return a list of item metadata.")
if isinstance(user_items, StreamingDataLoader):
self.reader = StreamingDataLoaderReader(user_items)
if self.reader:
user_items = self.reader.remap_items(user_items, self.num_workers)
if self.weights is not None:
if len(self.weights) != len(user_items):
raise ValueError("The provided weights length should match the inputs' length.")
workers_user_items = _map_items_to_workers_weighted(
num_workers=self.num_workers, user_items=user_items, weights=self.weights, file_size=False
)
elif self.reorder_files and self.input_dir.path:
# TODO: Only do this on node 0, and broadcast the item sizes to the other nodes.
item_sizes = _get_item_filesizes(user_items, base_path=self.input_dir.path)
workers_user_items = _map_items_to_workers_weighted(
num_workers=self.num_workers, user_items=user_items, weights=item_sizes
)
else:
workers_user_items = _map_items_to_workers_sequentially(num_workers=self.num_workers, user_items=user_items)
print(f"Setup finished in {round(time() - t0, 3)} seconds. Found {len(user_items)} items to process.")
if self.fast_dev_run:
items_to_keep = self.fast_dev_run if type(self.fast_dev_run) is int else _DEFAULT_FAST_DEV_RUN_ITEMS
workers_user_items = [w[:items_to_keep] for w in workers_user_items]
print(f"Fast dev run is enabled. Limiting to {items_to_keep} items per process.")
num_items = sum([len(items) for items in workers_user_items])
self._cleanup_cache()
print(f"Starting {self.num_workers} workers with {num_items} items.")
if self.input_dir is None and self.src_resolver is not None and self.input_dir:
self.input_dir = self.src_resolver(self.input_dir)
print(f"The remote_dir is `{self.input_dir}`.")
signal.signal(signal.SIGINT, self._signal_handler)
self._create_process_workers(data_recipe, workers_user_items)
print("Workers are ready ! Starting data processing...")
current_total = 0
has_failed = False
pbar = _tqdm(
desc="Progress",
total=num_items,
smoothing=0,
position=-1,
mininterval=1,
leave=True,
dynamic_ncols=True,
)
while True:
try:
error = self.error_queue.get(timeout=0.001)
self._exit_on_error(error)
except Empty:
assert self.progress_queue
try:
index, counter = self.progress_queue.get(timeout=0.001)
except Empty:
continue
self.workers_tracker[index] = counter
new_total = sum(self.workers_tracker.values())
pbar.update(new_total - current_total)
current_total = new_total
if current_total == num_items:
break
# Exit early if all the workers are done.
# This means there were some kinda of errors.
if all(not w.is_alive() for w in self.workers):
has_failed = True
break
pbar.close()
num_nodes = _get_num_nodes()
node_rank = _get_node_rank()
# TODO: Understand why it hangs.
if num_nodes == 1:
for w in self.workers:
w.join(0)
print("Workers are finished.")
result = data_recipe._done(len(user_items), self.delete_cached_files, self.output_dir)
if num_nodes == node_rank + 1 and self.output_dir.url and _IS_IN_STUDIO:
assert self.output_dir.path
_create_dataset(
input_dir=self.input_dir.path,
storage_dir=self.output_dir.path,
dataset_type=V1DatasetType.CHUNKED
if isinstance(data_recipe, DataChunkRecipe)
else V1DatasetType.TRANSFORMED,
empty=False,
size=result.size,
num_bytes=result.num_bytes,
data_format=result.data_format,
compression=result.compression,
num_chunks=result.num_chunks,
num_bytes_per_chunk=result.num_bytes_per_chunk,
)
print("Finished data processing!")
# TODO: Understand why it is required to avoid long shutdown.
if _get_num_nodes() > 1:
os._exit(int(has_failed))
def _exit_on_error(self, error: str) -> None:
for w in self.workers:
w.join(0)
raise RuntimeError(f"We found the following error {error}.")
def _create_process_workers(self, data_recipe: DataRecipe, workers_user_items: List[List[Any]]) -> None:
self.progress_queue = Queue()
workers: List[DataWorkerProcess] = []
stop_queues: List[Queue] = []
for worker_idx, worker_user_items in enumerate(workers_user_items):
stop_queues.append(Queue())
worker = DataWorkerProcess(
worker_idx,
self.num_workers,
_get_node_rank(),
data_recipe,
self.input_dir,
self.output_dir,
worker_user_items,
self.progress_queue,
self.error_queue,
stop_queues[-1],
self.num_downloaders,
self.num_uploaders,
self.delete_cached_files,
self.reader,
)
worker.start()
workers.append(worker)
# Note: Don't store within the loop as weakref aren't serializable
self.workers = workers
self.stop_queues = stop_queues
def _signal_handler(self, signal: Any, frame: Any) -> None:
"""On temrination, we stop all the processes to avoid leaking RAM."""
for stop_queue in self.stop_queues:
stop_queue.put(None)
for w in self.workers:
w.join(0)
os._exit(0)
def _cleanup_cache(self) -> None:
cache_dir = _get_cache_dir()
# Cleanup the cache dir folder to avoid corrupted files from previous run to be there.
if os.path.exists(cache_dir):
shutil.rmtree(cache_dir, ignore_errors=True)
os.makedirs(cache_dir, exist_ok=True)
cache_data_dir = _get_cache_data_dir()
# Cleanup the cache data folder to avoid corrupted files from previous run to be there.
if os.path.exists(cache_data_dir):
shutil.rmtree(cache_data_dir, ignore_errors=True)
os.makedirs(cache_data_dir, exist_ok=True)
|
evocodebench_data_158
|
import concurrent
import json
import logging
import os
import random
import shutil
import signal
import tempfile
import traceback
import types
from abc import abstractmethod
from dataclasses import dataclass
from multiprocessing import Process, Queue
from pathlib import Path
from queue import Empty
from time import sleep, time
from typing import Any, Dict, List, Optional, Tuple, TypeVar, Union
from urllib import parse
import numpy as np
import torch
from tqdm.auto import tqdm as _tqdm
from litdata.constants import (
_BOTO3_AVAILABLE,
_DEFAULT_FAST_DEV_RUN_ITEMS,
_INDEX_FILENAME,
_IS_IN_STUDIO,
_LIGHTNING_CLOUD_LATEST,
_TORCH_GREATER_EQUAL_2_1_0,
)
from litdata.processing.readers import BaseReader, StreamingDataLoaderReader
from litdata.processing.utilities import _create_dataset
from litdata.streaming import Cache
from litdata.streaming.cache import Dir
from litdata.streaming.client import S3Client
from litdata.streaming.dataloader import StreamingDataLoader
from litdata.streaming.resolver import _resolve_dir
from litdata.utilities.broadcast import broadcast_object
from litdata.utilities.packing import _pack_greedily
if _TORCH_GREATER_EQUAL_2_1_0:
from torch.utils._pytree import tree_flatten, tree_unflatten, treespec_loads
if _LIGHTNING_CLOUD_LATEST:
from lightning_cloud.openapi import V1DatasetType
if _BOTO3_AVAILABLE:
import botocore
logger = logging.Logger(__name__)
def _get_num_nodes() -> int:
"""Returns the number of nodes."""
return int(os.getenv("DATA_OPTIMIZER_NUM_NODES", 1))
def _get_node_rank() -> int:
"""Returns the current node rank of the instance."""
return int(os.getenv("DATA_OPTIMIZER_NODE_RANK", 0))
def _get_fast_dev_run() -> int:
"""Returns whether fast dev mode is enabled."""
return bool(int(os.getenv("DATA_OPTIMIZER_FAST_DEV_RUN", 1)))
def _get_default_cache() -> str:
return "/cache" if _IS_IN_STUDIO else tempfile.gettempdir()
def _get_cache_dir(name: Optional[str] = None) -> str:
"""Returns the cache directory used by the Cache to store the chunks."""
cache_dir = os.getenv("DATA_OPTIMIZER_CACHE_FOLDER", f"{_get_default_cache()}/chunks")
if name is None:
return cache_dir
return os.path.join(cache_dir, name.lstrip("/"))
def _get_cache_data_dir(name: Optional[str] = None) -> str:
"""Returns the cache data directory used by the DataProcessor workers to download the files."""
cache_dir = os.getenv("DATA_OPTIMIZER_DATA_CACHE_FOLDER", f"{_get_default_cache()}/data")
if name is None:
return os.path.join(cache_dir)
return os.path.join(cache_dir, name.lstrip("/"))
def _wait_for_file_to_exist(s3: S3Client, obj: parse.ParseResult, sleep_time: int = 2) -> Any:
"""This function check."""
while True:
try:
return s3.client.head_object(Bucket=obj.netloc, Key=obj.path.lstrip("/"))
except botocore.exceptions.ClientError as e:
if "the HeadObject operation: Not Found" in str(e):
sleep(sleep_time)
else:
raise e
def _wait_for_disk_usage_higher_than_threshold(input_dir: str, threshold_in_gb: int = 25, sleep_time: int = 3) -> None:
usage = shutil.disk_usage(input_dir)
while (usage.free / 1000 / 1000 / 1000) <= threshold_in_gb:
sleep(sleep_time)
usage = shutil.disk_usage(input_dir)
return
def _download_data_target(input_dir: Dir, cache_dir: str, queue_in: Queue, queue_out: Queue) -> None:
"""This function is used to download data from a remote directory to a cache directory to optimise reading."""
s3 = S3Client()
while True:
# 2. Fetch from the queue
r: Optional[Tuple[int, List[str]]] = queue_in.get()
# 3. Terminate the process if we received a termination signal
if r is None:
queue_out.put(None)
return
# 4. Unpack
index, paths = r
# 5. Check whether all the files are already downloaded
if input_dir.path and all(
os.path.exists(p.replace(input_dir.path, cache_dir) if input_dir else p) for p in paths
):
queue_out.put(index)
continue
if input_dir.url is not None or input_dir.path is not None:
if input_dir.url:
# 6. Wait for the removers to catch up when we are downloading data.
_wait_for_disk_usage_higher_than_threshold("/", 25)
# 7. Download all the required paths to unblock the current index
for path in paths:
if input_dir.path:
local_path = path.replace(input_dir.path, cache_dir)
if input_dir.url and input_dir.path:
path = path.replace(input_dir.path, input_dir.url)
obj = parse.urlparse(path)
if obj.scheme == "s3":
dirpath = os.path.dirname(local_path)
os.makedirs(dirpath, exist_ok=True)
with open(local_path, "wb") as f:
s3.client.download_fileobj(obj.netloc, obj.path.lstrip("/"), f)
elif os.path.isfile(path):
if not path.startswith("/teamspace/studios/this_studio"):
os.makedirs(os.path.dirname(local_path), exist_ok=True)
shutil.copyfile(path, local_path)
else:
raise ValueError(f"The provided {input_dir.url} isn't supported.")
# 7. Inform the worker the current files are available
queue_out.put(index)
def _remove_target(input_dir: Dir, cache_dir: str, queue_in: Queue) -> None:
"""This function is used to delete files from the cache directory to minimise disk space."""
while True:
# 1. Collect paths
paths = queue_in.get()
# 2. Terminate the process if we received a termination signal
if paths is None:
return
# 3. Iterate through the paths and delete them sequentially.
for path in paths:
if input_dir:
if not path.startswith(cache_dir) and input_dir.path is not None:
path = path.replace(input_dir.path, cache_dir)
if os.path.exists(path):
os.remove(path)
elif os.path.exists(path) and "s3_connections" not in path:
os.remove(path)
def _upload_fn(upload_queue: Queue, remove_queue: Queue, cache_dir: str, output_dir: Dir) -> None:
"""This function is used to upload optimised chunks from a local to remote dataset directory."""
obj = parse.urlparse(output_dir.url if output_dir.url else output_dir.path)
if obj.scheme == "s3":
s3 = S3Client()
while True:
data: Optional[Union[str, Tuple[str, str]]] = upload_queue.get()
tmpdir = None
if isinstance(data, str) or data is None:
local_filepath = data
else:
tmpdir, local_filepath = data
# Terminate the process if we received a termination signal
if local_filepath is None:
return
# Upload the file to the target cloud storage
if not local_filepath.startswith(cache_dir):
local_filepath = os.path.join(cache_dir, local_filepath)
if obj.scheme == "s3":
try:
if tmpdir is None:
output_filepath = os.path.join(str(obj.path).lstrip("/"), os.path.basename(local_filepath))
else:
output_filepath = os.path.join(str(obj.path).lstrip("/"), local_filepath.replace(tmpdir, "")[1:])
s3.client.upload_file(
local_filepath,
obj.netloc,
output_filepath,
)
except Exception as e:
print(e)
elif output_dir.path:
if tmpdir is None:
output_filepath = os.path.join(output_dir.path, os.path.basename(local_filepath))
else:
output_filepath = os.path.join(output_dir.path, local_filepath.replace(tmpdir, "")[1:])
os.makedirs(os.path.dirname(output_filepath), exist_ok=True)
shutil.move(local_filepath, output_filepath)
else:
raise ValueError(f"The provided {output_dir.path} isn't supported.")
# Inform the remover to delete the file
if remove_queue and os.path.exists(local_filepath):
remove_queue.put([local_filepath])
def _map_items_to_workers_sequentially(num_workers: int, user_items: List[Any]) -> List[List[Any]]:
from typing import List, Any
import os
total_nodes = _get_num_nodes()
node_rank = _get_node_rank()
total_workers = total_nodes * num_workers
items_per_worker = len(user_items) // total_workers
extra_items = len(user_items) % total_workers
start = 0
result = []
for i in range(total_workers):
worker_items = items_per_worker + 1 if i < extra_items else items_per_worker
end = start + worker_items
result.append(user_items[start:end])
start = end
if len(result) != num_workers:
raise RuntimeError("Improper assignment of items to workers")
return result
def _map_items_to_workers_weighted(
num_workers: int,
user_items: List[Any],
weights: Optional[List[int]] = None,
file_size: bool = True,
) -> List[List[Any]]:
# Associate the items to the workers based on number of nodes and node rank.
weights = [1] * len(user_items) if weights is None else weights
num_nodes = _get_num_nodes()
node_rank = _get_node_rank()
world_size = num_nodes * num_workers
worker_items, worker_weights = _pack_greedily(items=user_items, weights=weights, num_bins=world_size)
worker_ids_this_node = range(node_rank * num_workers, (node_rank + 1) * num_workers)
for worker_id, size in worker_weights.items():
if worker_id not in worker_ids_this_node:
continue
if file_size:
print(f"Worker {worker_id} gets {size / 1e6:.1f} MB ({len(worker_items[worker_id])} files)")
else:
print(f"Worker {worker_id} gets ({len(worker_items[worker_id])}) items for a total weight of {size}.")
return [np.random.permutation(worker_items[worker_id]).tolist() for worker_id in worker_ids_this_node]
def _get_num_bytes(item: Any, base_path: str) -> int:
flattened_item, _ = tree_flatten(item)
num_bytes = 0
for element in flattened_item:
if isinstance(element, str):
element = Path(element).resolve()
if not element.exists():
continue
file_bytes = os.path.getsize(element)
if file_bytes == 0:
raise RuntimeError(f"The file {element} has 0 bytes!")
num_bytes += file_bytes
return num_bytes
def _get_item_filesizes(items: List[Any], base_path: str = "") -> List[int]:
"""Computes the total size in bytes of all file paths for every datastructure in the given list."""
item_sizes = []
cpu_count = os.cpu_count() or 1
# Parallelize to accelerate retrieving the number of file bytes to read for each item
with concurrent.futures.ThreadPoolExecutor(max_workers=cpu_count * 2 if cpu_count > 4 else cpu_count) as executor:
futures = [executor.submit(_get_num_bytes, item, base_path) for item in items]
for future in futures:
item_sizes.append(future.result())
return item_sizes
def _to_path(element: str) -> str:
return element if _IS_IN_STUDIO and element.startswith("/teamspace") else str(Path(element).resolve())
def _is_path(input_dir: Optional[str], element: Any) -> bool:
if not isinstance(element, str):
return False
if _IS_IN_STUDIO and input_dir is not None:
if element.startswith(input_dir):
return True
element = str(Path(element).absolute())
if element.startswith(input_dir):
return True
return os.path.exists(element)
class BaseWorker:
def __init__(
self,
worker_index: int,
num_workers: int,
node_rank: int,
data_recipe: "DataRecipe",
input_dir: Dir,
output_dir: Dir,
items: List[Any],
progress_queue: Queue,
error_queue: Queue,
stop_queue: Queue,
num_downloaders: int,
num_uploaders: int,
remove: bool,
reader: Optional[BaseReader] = None,
) -> None:
"""The BaseWorker is responsible to process the user data."""
self.worker_index = worker_index
self.num_workers = num_workers
self.node_rank = node_rank
self.data_recipe = data_recipe
self.input_dir = input_dir
self.output_dir = output_dir
self.items = items
self.num_items = len(self.items)
self.num_downloaders = num_downloaders
self.num_uploaders = num_uploaders
self.remove = remove
self.reader = reader
self.paths: List[List[str]] = []
self.remover: Optional[Process] = None
self.downloaders: List[Process] = []
self.uploaders: List[Process] = []
self.to_download_queues: List[Queue] = []
self.to_upload_queues: List[Queue] = []
self.stop_queue = stop_queue
self.ready_to_process_queue: Queue = Queue()
self.remove_queue: Queue = Queue()
self.progress_queue: Queue = progress_queue
self.error_queue: Queue = error_queue
self._counter = 0
self._last_time = time()
self._index_counter = 0
def run(self) -> None:
try:
self._setup()
self._loop()
except Exception:
traceback_format = traceback.format_exc()
print(traceback_format)
self.error_queue.put(traceback_format)
print(f"Worker {str(_get_node_rank() * self.num_workers + self.worker_index)} is done.")
def _setup(self) -> None:
self._set_environ_variables()
self._create_cache()
self._collect_paths()
self._start_downloaders()
self._start_uploaders()
self._start_remover()
def _loop(self) -> None:
num_downloader_finished = 0
while True:
index = self.ready_to_process_queue.get()
if index is None:
num_downloader_finished += 1
if num_downloader_finished == self.num_downloaders:
print(f"Worker {str(_get_node_rank() * self.num_workers + self.worker_index)} is terminating.")
if isinstance(self.data_recipe, DataChunkRecipe):
self._handle_data_chunk_recipe_end()
if self.output_dir.url if self.output_dir.url else self.output_dir.path:
# Inform the uploaders they are doing working
for i in range(self.num_uploaders):
self.to_upload_queues[i].put(None)
# Wait for them all to be finished
for uploader in self.uploaders:
uploader.join()
if self.remove:
assert self.remover
self.remove_queue.put(None)
self.remover.join()
if self.progress_queue:
self.progress_queue.put((self.worker_index, self._counter))
return
continue
if isinstance(self.data_recipe, DataChunkRecipe):
self._handle_data_chunk_recipe(index)
else:
self._handle_data_transform_recipe(index)
self._counter += 1
# Don't send the last progress update, so the main thread awaits for the uploader and remover
if self.progress_queue and (time() - self._last_time) > 1 and self._counter < (self.num_items - 2):
self.progress_queue.put((self.worker_index, self._counter))
self._last_time = time()
if self.remove and self.input_dir.path is not None and self.reader is None:
self.remove_queue.put(self.paths[index])
try:
self.stop_queue.get(timeout=0.0001)
return
except Empty:
pass
def _set_environ_variables(self) -> None:
# set the optimizer global rank and world_size
os.environ["DATA_OPTIMIZER_GLOBAL_RANK"] = str(_get_node_rank() * self.num_workers + self.worker_index)
os.environ["DATA_OPTIMIZER_NUM_WORKERS"] = str(self.num_workers)
def _create_cache(self) -> None:
self.cache_data_dir = _get_cache_data_dir()
os.makedirs(self.cache_data_dir, exist_ok=True)
self.cache_chunks_dir = _get_cache_dir()
os.makedirs(self.cache_chunks_dir, exist_ok=True)
if isinstance(self.data_recipe, DataTransformRecipe):
return
self.cache = Cache(
self.cache_chunks_dir,
chunk_bytes=self.data_recipe.chunk_bytes,
chunk_size=self.data_recipe.chunk_size,
compression=self.data_recipe.compression,
)
self.cache._reader._rank = _get_node_rank() * self.num_workers + self.worker_index
def _try_upload(self, data: Optional[Union[str, Tuple[str, str]]]) -> None:
if not data or (self.output_dir.url if self.output_dir.url else self.output_dir.path) is None:
return
if isinstance(data, str):
assert os.path.exists(data), data
else:
assert os.path.exists(data[-1]), data
self.to_upload_queues[self._counter % self.num_uploaders].put(data)
def _collect_paths(self) -> None:
if self.input_dir.path is None or self.reader is not None:
for index in range(len(self.items)):
self.ready_to_process_queue.put(index)
for _ in range(self.num_downloaders):
self.ready_to_process_queue.put(None)
return
items = []
for item in self.items:
flattened_item, spec = tree_flatten(item)
# For speed reasons, we assume starting with `self.input_dir` is enough to be a real file.
# Other alternative would be too slow.
# TODO: Try using dictionary for higher accurary.
indexed_paths = {
index: _to_path(element)
for index, element in enumerate(flattened_item)
if _is_path(self.input_dir.path, element)
}
if len(indexed_paths) == 0:
raise ValueError(
f"The provided item {item} didn't contain any filepaths. The input_dir is {self.input_dir.path}."
)
paths = []
for index, path in indexed_paths.items():
paths.append(path)
if self.input_dir and not self.input_dir.path.startswith("/teamspace/studios/this_studio"):
path = path.replace(self.input_dir.path, self.cache_data_dir)
flattened_item[index] = path
self.paths.append(paths)
items.append(tree_unflatten(flattened_item, spec))
self.items = items
def _start_downloaders(self) -> None:
if self.input_dir.path is None or self.reader is not None:
return
for _ in range(self.num_downloaders):
to_download_queue: Queue = Queue()
p = Process(
target=_download_data_target,
args=(
self.input_dir,
self.cache_data_dir,
to_download_queue,
self.ready_to_process_queue,
),
)
p.start()
self.downloaders.append(p)
self.to_download_queues.append(to_download_queue)
for index, paths in enumerate(self.paths):
self.to_download_queues[index % self.num_downloaders].put((index, paths))
for downloader_index in range(self.num_downloaders):
self.to_download_queues[downloader_index].put(None)
def _start_remover(self) -> None:
if not self.remove:
return
self.remover = Process(
target=_remove_target,
args=(
self.input_dir,
self.cache_data_dir,
self.remove_queue,
),
)
self.remover.start()
def _start_uploaders(self) -> None:
if self.output_dir.path is None and self.output_dir.url is None:
return
for _ in range(self.num_uploaders):
to_upload_queue: Queue = Queue()
p = Process(
target=_upload_fn,
args=(
to_upload_queue,
self.remove_queue,
self.cache_chunks_dir,
self.output_dir,
),
)
p.start()
self.uploaders.append(p)
self.to_upload_queues.append(to_upload_queue)
def _handle_data_chunk_recipe(self, index: int) -> None:
try:
current_item = self.items[index] if self.reader is None else self.reader.read(self.items[index])
item_data_or_generator = self.data_recipe.prepare_item(current_item)
if isinstance(item_data_or_generator, types.GeneratorType):
for item_data in item_data_or_generator:
if item_data is not None:
chunk_filepath = self.cache._add_item(self._index_counter, item_data)
self._try_upload(chunk_filepath)
self._index_counter += 1
elif item_data_or_generator is not None:
chunk_filepath = self.cache._add_item(self._index_counter, item_data_or_generator)
self._try_upload(chunk_filepath)
self._index_counter += 1
except Exception as e:
raise RuntimeError(f"Failed processing {self.items[index]}") from e
def _handle_data_chunk_recipe_end(self) -> None:
chunks_filepaths = self.cache.done()
if chunks_filepaths and len(self.to_upload_queues):
for i, chunk_filepath in enumerate(chunks_filepaths):
if isinstance(chunk_filepath, str) and os.path.exists(chunk_filepath):
self.to_upload_queues[i % self.num_uploaders].put(chunk_filepath)
def _handle_data_transform_recipe(self, index: int) -> None:
# Don't use a context manager to avoid deleting files that are being uploaded.
output_dir = tempfile.mkdtemp()
item = self.items[index] if self.reader is None else self.reader.read(self.items[index])
item_data = self.data_recipe.prepare_item(item, str(output_dir), len(self.items) - 1 == index)
if item_data is not None:
raise ValueError(
"When using a `DataTransformRecipe`, the `prepare_item` shouldn't return anything."
" Simply store your files under the output_dir."
)
filepaths = []
for directory, _, filenames in os.walk(output_dir):
for filename in filenames:
filepaths.append(os.path.join(directory, filename))
for filepath in filepaths:
self._try_upload((output_dir, filepath))
class DataWorkerProcess(BaseWorker, Process):
def __init__(self, *args: Any, **kwargs: Any) -> None:
"""The DataWorkerProcess is responsible to process the user data inside processes."""
BaseWorker.__init__(self, *args, **kwargs)
Process.__init__(self)
@dataclass
class _Result:
size: Optional[int] = None
num_bytes: Optional[str] = None
data_format: Optional[str] = None
compression: Optional[str] = None
num_chunks: Optional[int] = None
num_bytes_per_chunk: Optional[List[int]] = None
T = TypeVar("T")
class DataRecipe:
@abstractmethod
def prepare_structure(self, input_dir: Optional[str]) -> List[T]:
pass
@abstractmethod
def prepare_item(self, *args: Any, **kwargs: Any) -> Any:
pass
def __init__(self) -> None:
self._name: Optional[str] = None
def _done(self, size: int, delete_cached_files: bool, output_dir: Dir) -> _Result:
return _Result(size=size)
class DataChunkRecipe(DataRecipe):
def __init__(
self,
chunk_size: Optional[int] = None,
chunk_bytes: Optional[Union[int, str]] = None,
compression: Optional[str] = None,
):
super().__init__()
if chunk_size is not None and chunk_bytes is not None:
raise ValueError("Either one of the `chunk_size` or the `chunk_bytes` need to be provided.")
self.chunk_size = chunk_size
self.chunk_bytes = 1 << 26 if chunk_size is None else chunk_bytes
self.compression = compression
@abstractmethod
def prepare_structure(self, input_dir: Optional[str]) -> List[T]:
"""Return the structure of your data.
Each element should contain at least a filepath.
"""
@abstractmethod
def prepare_item(self, item_metadata: T) -> Any:
"""The return of this `prepare_item` method is persisted in chunked binary files."""
def _done(self, size: int, delete_cached_files: bool, output_dir: Dir) -> _Result:
num_nodes = _get_num_nodes()
cache_dir = _get_cache_dir()
chunks = [file for file in os.listdir(cache_dir) if file.endswith(".bin")]
if chunks and delete_cached_files and output_dir.path is not None:
raise RuntimeError(f"All the chunks should have been deleted. Found {chunks}")
merge_cache = Cache(cache_dir, chunk_bytes=1)
node_rank = _get_node_rank()
merge_cache._merge_no_wait(node_rank if num_nodes > 1 else None)
self._upload_index(output_dir, cache_dir, num_nodes, node_rank)
if num_nodes == node_rank + 1:
with open(os.path.join(cache_dir, _INDEX_FILENAME)) as f:
config = json.load(f)
size = sum([c["dim"] if c["dim"] is not None else c["chunk_size"] for c in config["chunks"]])
num_bytes = sum([c["chunk_bytes"] for c in config["chunks"]])
if config["config"] is not None:
data_format = tree_unflatten(
config["config"]["data_format"], treespec_loads(config["config"]["data_spec"])
)
else:
data_format = None
num_chunks = len(config["chunks"])
# The platform can't store more than 1024 entries.
# Note: This isn't really used right now, so it is fine to skip if too big.
num_bytes_per_chunk = [c["chunk_size"] for c in config["chunks"]] if num_chunks < 1024 else []
return _Result(
size=size,
num_bytes=num_bytes,
data_format=data_format,
compression=config["config"]["compression"] if config["config"] else None,
num_chunks=len(config["chunks"]),
num_bytes_per_chunk=num_bytes_per_chunk,
)
return _Result(
size=size,
)
def _upload_index(self, output_dir: Dir, cache_dir: str, num_nodes: int, node_rank: Optional[int]) -> None:
"""This method upload the index file to the remote cloud directory."""
if output_dir.path is None and output_dir.url is None:
return
obj = parse.urlparse(output_dir.url if output_dir.url else output_dir.path)
if num_nodes > 1:
local_filepath = os.path.join(cache_dir, f"{node_rank}-{_INDEX_FILENAME}")
else:
local_filepath = os.path.join(cache_dir, _INDEX_FILENAME)
if obj.scheme == "s3":
s3 = S3Client()
s3.client.upload_file(
local_filepath, obj.netloc, os.path.join(str(obj.path).lstrip("/"), os.path.basename(local_filepath))
)
elif output_dir.path and os.path.isdir(output_dir.path):
shutil.copyfile(local_filepath, os.path.join(output_dir.path, os.path.basename(local_filepath)))
if num_nodes == 1 or node_rank is None:
return
# Merge the index files generated by each node.
# Note: When using the Data Optimizer, they should be a single process on each node executing this section
# So no risk to get race conditon.
if num_nodes == node_rank + 1:
# Get the index file locally
for node_rank in range(num_nodes - 1):
output_dir_path = output_dir.url if output_dir.url else output_dir.path
assert output_dir_path
remote_filepath = os.path.join(output_dir_path, f"{node_rank}-{_INDEX_FILENAME}")
node_index_filepath = os.path.join(cache_dir, os.path.basename(remote_filepath))
if obj.scheme == "s3":
obj = parse.urlparse(remote_filepath)
_wait_for_file_to_exist(s3, obj)
with open(node_index_filepath, "wb") as f:
s3.client.download_fileobj(obj.netloc, obj.path.lstrip("/"), f)
elif output_dir.path and os.path.isdir(output_dir.path):
shutil.copyfile(remote_filepath, node_index_filepath)
merge_cache = Cache(cache_dir, chunk_bytes=1)
merge_cache._merge_no_wait()
self._upload_index(output_dir, cache_dir, 1, None)
class DataTransformRecipe(DataRecipe):
@abstractmethod
def prepare_structure(self, input_dir: Optional[str]) -> List[T]:
"""Return the structure of your data.
Each element should contain at least a filepath.
"""
@abstractmethod
def prepare_item(self, item_metadata: T, output_dir: str, is_last: bool) -> None:
"""Use your item metadata to process your files and save the file outputs into `output_dir`."""
class DataProcessor:
def __init__(
self,
input_dir: Union[str, Dir],
output_dir: Optional[Union[str, Dir]] = None,
num_workers: Optional[int] = None,
num_downloaders: Optional[int] = None,
num_uploaders: Optional[int] = None,
delete_cached_files: bool = True,
fast_dev_run: Optional[Union[bool, int]] = None,
random_seed: Optional[int] = 42,
reorder_files: bool = True,
weights: Optional[List[int]] = None,
reader: Optional[BaseReader] = None,
):
"""The `DatasetOptimiser` provides an efficient way to process data across multiple machine into chunks to make
training faster.
Arguments:
input_dir: The path to where the input data are stored.
output_dir: The path to where the output data are stored.
num_workers: The number of worker threads to use.
num_downloaders: The number of file downloaders to use.
num_uploaders: The number of file uploaders to use.
delete_cached_files: Whether to delete the cached files.
fast_dev_run: Whether to run a quick dev run.
random_seed: The random seed to be set before shuffling the data.
reorder_files: By default, reorders the files by file size to distribute work equally among all workers.
Set this to ``False`` if the order in which samples are processed should be preserved.
weights: Provide a list of weights associated to the inputs.
This is used to evenly split the work among the workers.
reader: Map the inputs to worker inputs and provides a read method to read a slice of the data.
"""
self.input_dir = _resolve_dir(input_dir)
self.output_dir = _resolve_dir(output_dir)
self.num_workers = num_workers or (1 if fast_dev_run else (os.cpu_count() or 1) * 4)
self.num_downloaders = num_downloaders or 2
self.num_uploaders = num_uploaders or 5
self.delete_cached_files = delete_cached_files
self.fast_dev_run = _get_fast_dev_run() if fast_dev_run is None else fast_dev_run
self.workers: Any = []
self.workers_tracker: Dict[int, int] = {}
self.progress_queue: Optional[Queue] = None
self.error_queue: Queue = Queue()
self.stop_queues: List[Queue] = []
self.reorder_files = reorder_files
self.weights = weights
self.reader = reader
if self.reader is not None and self.weights is not None:
raise ValueError("Either the reader or the weights needs to be defined.")
# Ensure the input dir is the same across all nodes
self.input_dir = broadcast_object("input_dir", self.input_dir)
if self.output_dir:
# Ensure the output dir is the same across all nodes
self.output_dir = broadcast_object("output_dir", self.output_dir)
print(f"Storing the files under {self.output_dir.path}")
self.random_seed = random_seed
def run(self, data_recipe: DataRecipe) -> None:
"""The `DataProcessor.run(...)` method triggers the data recipe processing over your dataset."""
if not isinstance(data_recipe, DataRecipe):
raise ValueError("The provided value should be a data recipe.")
t0 = time()
print(f"Setup started with fast_dev_run={self.fast_dev_run}.")
# Force random seed to be fixed
random.seed(self.random_seed)
np.random.seed(self.random_seed)
torch.manual_seed(self.random_seed)
# Call the setup method of the user
user_items: List[Any] = data_recipe.prepare_structure(self.input_dir.path if self.input_dir else None)
if not isinstance(user_items, (list, StreamingDataLoader)):
raise ValueError("The `prepare_structure` should return a list of item metadata.")
if isinstance(user_items, StreamingDataLoader):
self.reader = StreamingDataLoaderReader(user_items)
if self.reader:
user_items = self.reader.remap_items(user_items, self.num_workers)
if self.weights is not None:
if len(self.weights) != len(user_items):
raise ValueError("The provided weights length should match the inputs' length.")
workers_user_items = _map_items_to_workers_weighted(
num_workers=self.num_workers, user_items=user_items, weights=self.weights, file_size=False
)
elif self.reorder_files and self.input_dir.path:
# TODO: Only do this on node 0, and broadcast the item sizes to the other nodes.
item_sizes = _get_item_filesizes(user_items, base_path=self.input_dir.path)
workers_user_items = _map_items_to_workers_weighted(
num_workers=self.num_workers, user_items=user_items, weights=item_sizes
)
else:
workers_user_items = _map_items_to_workers_sequentially(num_workers=self.num_workers, user_items=user_items)
print(f"Setup finished in {round(time() - t0, 3)} seconds. Found {len(user_items)} items to process.")
if self.fast_dev_run:
items_to_keep = self.fast_dev_run if type(self.fast_dev_run) is int else _DEFAULT_FAST_DEV_RUN_ITEMS
workers_user_items = [w[:items_to_keep] for w in workers_user_items]
print(f"Fast dev run is enabled. Limiting to {items_to_keep} items per process.")
num_items = sum([len(items) for items in workers_user_items])
self._cleanup_cache()
print(f"Starting {self.num_workers} workers with {num_items} items.")
if self.input_dir is None and self.src_resolver is not None and self.input_dir:
self.input_dir = self.src_resolver(self.input_dir)
print(f"The remote_dir is `{self.input_dir}`.")
signal.signal(signal.SIGINT, self._signal_handler)
self._create_process_workers(data_recipe, workers_user_items)
print("Workers are ready ! Starting data processing...")
current_total = 0
has_failed = False
pbar = _tqdm(
desc="Progress",
total=num_items,
smoothing=0,
position=-1,
mininterval=1,
leave=True,
dynamic_ncols=True,
)
while True:
try:
error = self.error_queue.get(timeout=0.001)
self._exit_on_error(error)
except Empty:
assert self.progress_queue
try:
index, counter = self.progress_queue.get(timeout=0.001)
except Empty:
continue
self.workers_tracker[index] = counter
new_total = sum(self.workers_tracker.values())
pbar.update(new_total - current_total)
current_total = new_total
if current_total == num_items:
break
# Exit early if all the workers are done.
# This means there were some kinda of errors.
if all(not w.is_alive() for w in self.workers):
has_failed = True
break
pbar.close()
num_nodes = _get_num_nodes()
node_rank = _get_node_rank()
# TODO: Understand why it hangs.
if num_nodes == 1:
for w in self.workers:
w.join(0)
print("Workers are finished.")
result = data_recipe._done(len(user_items), self.delete_cached_files, self.output_dir)
if num_nodes == node_rank + 1 and self.output_dir.url and _IS_IN_STUDIO:
assert self.output_dir.path
_create_dataset(
input_dir=self.input_dir.path,
storage_dir=self.output_dir.path,
dataset_type=V1DatasetType.CHUNKED
if isinstance(data_recipe, DataChunkRecipe)
else V1DatasetType.TRANSFORMED,
empty=False,
size=result.size,
num_bytes=result.num_bytes,
data_format=result.data_format,
compression=result.compression,
num_chunks=result.num_chunks,
num_bytes_per_chunk=result.num_bytes_per_chunk,
)
print("Finished data processing!")
# TODO: Understand why it is required to avoid long shutdown.
if _get_num_nodes() > 1:
os._exit(int(has_failed))
def _exit_on_error(self, error: str) -> None:
for w in self.workers:
w.join(0)
raise RuntimeError(f"We found the following error {error}.")
def _create_process_workers(self, data_recipe: DataRecipe, workers_user_items: List[List[Any]]) -> None:
self.progress_queue = Queue()
workers: List[DataWorkerProcess] = []
stop_queues: List[Queue] = []
for worker_idx, worker_user_items in enumerate(workers_user_items):
stop_queues.append(Queue())
worker = DataWorkerProcess(
worker_idx,
self.num_workers,
_get_node_rank(),
data_recipe,
self.input_dir,
self.output_dir,
worker_user_items,
self.progress_queue,
self.error_queue,
stop_queues[-1],
self.num_downloaders,
self.num_uploaders,
self.delete_cached_files,
self.reader,
)
worker.start()
workers.append(worker)
# Note: Don't store within the loop as weakref aren't serializable
self.workers = workers
self.stop_queues = stop_queues
def _signal_handler(self, signal: Any, frame: Any) -> None:
"""On temrination, we stop all the processes to avoid leaking RAM."""
for stop_queue in self.stop_queues:
stop_queue.put(None)
for w in self.workers:
w.join(0)
os._exit(0)
def _cleanup_cache(self) -> None:
cache_dir = _get_cache_dir()
# Cleanup the cache dir folder to avoid corrupted files from previous run to be there.
if os.path.exists(cache_dir):
shutil.rmtree(cache_dir, ignore_errors=True)
os.makedirs(cache_dir, exist_ok=True)
cache_data_dir = _get_cache_data_dir()
# Cleanup the cache data folder to avoid corrupted files from previous run to be there.
if os.path.exists(cache_data_dir):
shutil.rmtree(cache_data_dir, ignore_errors=True)
os.makedirs(cache_data_dir, exist_ok=True)
|
evocodebench_data_159
|
import concurrent
import json
import logging
import os
import random
import shutil
import signal
import tempfile
import traceback
import types
from abc import abstractmethod
from dataclasses import dataclass
from multiprocessing import Process, Queue
from pathlib import Path
from queue import Empty
from time import sleep, time
from typing import Any, Dict, List, Optional, Tuple, TypeVar, Union
from urllib import parse
import numpy as np
import torch
from tqdm.auto import tqdm as _tqdm
from litdata.constants import (
_BOTO3_AVAILABLE,
_DEFAULT_FAST_DEV_RUN_ITEMS,
_INDEX_FILENAME,
_IS_IN_STUDIO,
_LIGHTNING_CLOUD_LATEST,
_TORCH_GREATER_EQUAL_2_1_0,
)
from litdata.processing.readers import BaseReader, StreamingDataLoaderReader
from litdata.processing.utilities import _create_dataset
from litdata.streaming import Cache
from litdata.streaming.cache import Dir
from litdata.streaming.client import S3Client
from litdata.streaming.dataloader import StreamingDataLoader
from litdata.streaming.resolver import _resolve_dir
from litdata.utilities.broadcast import broadcast_object
from litdata.utilities.packing import _pack_greedily
if _TORCH_GREATER_EQUAL_2_1_0:
from torch.utils._pytree import tree_flatten, tree_unflatten, treespec_loads
if _LIGHTNING_CLOUD_LATEST:
from lightning_cloud.openapi import V1DatasetType
if _BOTO3_AVAILABLE:
import botocore
logger = logging.Logger(__name__)
def _get_num_nodes() -> int:
"""Returns the number of nodes."""
return int(os.getenv("DATA_OPTIMIZER_NUM_NODES", 1))
def _get_node_rank() -> int:
"""Returns the current node rank of the instance."""
return int(os.getenv("DATA_OPTIMIZER_NODE_RANK", 0))
def _get_fast_dev_run() -> int:
"""Returns whether fast dev mode is enabled."""
return bool(int(os.getenv("DATA_OPTIMIZER_FAST_DEV_RUN", 1)))
def _get_default_cache() -> str:
return "/cache" if _IS_IN_STUDIO else tempfile.gettempdir()
def _get_cache_dir(name: Optional[str] = None) -> str:
"""Returns the cache directory used by the Cache to store the chunks."""
cache_dir = os.getenv("DATA_OPTIMIZER_CACHE_FOLDER", f"{_get_default_cache()}/chunks")
if name is None:
return cache_dir
return os.path.join(cache_dir, name.lstrip("/"))
def _get_cache_data_dir(name: Optional[str] = None) -> str:
"""Returns the cache data directory used by the DataProcessor workers to download the files."""
cache_dir = os.getenv("DATA_OPTIMIZER_DATA_CACHE_FOLDER", f"{_get_default_cache()}/data")
if name is None:
return os.path.join(cache_dir)
return os.path.join(cache_dir, name.lstrip("/"))
def _wait_for_file_to_exist(s3: S3Client, obj: parse.ParseResult, sleep_time: int = 2) -> Any:
"""This function check."""
while True:
try:
return s3.client.head_object(Bucket=obj.netloc, Key=obj.path.lstrip("/"))
except botocore.exceptions.ClientError as e:
if "the HeadObject operation: Not Found" in str(e):
sleep(sleep_time)
else:
raise e
def _wait_for_disk_usage_higher_than_threshold(input_dir: str, threshold_in_gb: int = 25, sleep_time: int = 3) -> None:
usage = shutil.disk_usage(input_dir)
while (usage.free / 1000 / 1000 / 1000) <= threshold_in_gb:
sleep(sleep_time)
usage = shutil.disk_usage(input_dir)
return
def _download_data_target(input_dir: Dir, cache_dir: str, queue_in: Queue, queue_out: Queue) -> None:
"""This function is used to download data from a remote directory to a cache directory to optimise reading."""
s3 = S3Client()
while True:
# 2. Fetch from the queue
r: Optional[Tuple[int, List[str]]] = queue_in.get()
# 3. Terminate the process if we received a termination signal
if r is None:
queue_out.put(None)
return
# 4. Unpack
index, paths = r
# 5. Check whether all the files are already downloaded
if input_dir.path and all(
os.path.exists(p.replace(input_dir.path, cache_dir) if input_dir else p) for p in paths
):
queue_out.put(index)
continue
if input_dir.url is not None or input_dir.path is not None:
if input_dir.url:
# 6. Wait for the removers to catch up when we are downloading data.
_wait_for_disk_usage_higher_than_threshold("/", 25)
# 7. Download all the required paths to unblock the current index
for path in paths:
if input_dir.path:
local_path = path.replace(input_dir.path, cache_dir)
if input_dir.url and input_dir.path:
path = path.replace(input_dir.path, input_dir.url)
obj = parse.urlparse(path)
if obj.scheme == "s3":
dirpath = os.path.dirname(local_path)
os.makedirs(dirpath, exist_ok=True)
with open(local_path, "wb") as f:
s3.client.download_fileobj(obj.netloc, obj.path.lstrip("/"), f)
elif os.path.isfile(path):
if not path.startswith("/teamspace/studios/this_studio"):
os.makedirs(os.path.dirname(local_path), exist_ok=True)
shutil.copyfile(path, local_path)
else:
raise ValueError(f"The provided {input_dir.url} isn't supported.")
# 7. Inform the worker the current files are available
queue_out.put(index)
def _remove_target(input_dir: Dir, cache_dir: str, queue_in: Queue) -> None:
"""This function is used to delete files from the cache directory to minimise disk space."""
while True:
# 1. Collect paths
paths = queue_in.get()
# 2. Terminate the process if we received a termination signal
if paths is None:
return
# 3. Iterate through the paths and delete them sequentially.
for path in paths:
if input_dir:
if not path.startswith(cache_dir) and input_dir.path is not None:
path = path.replace(input_dir.path, cache_dir)
if os.path.exists(path):
os.remove(path)
elif os.path.exists(path) and "s3_connections" not in path:
os.remove(path)
def _upload_fn(upload_queue: Queue, remove_queue: Queue, cache_dir: str, output_dir: Dir) -> None:
"""This function is used to upload optimised chunks from a local to remote dataset directory."""
obj = parse.urlparse(output_dir.url if output_dir.url else output_dir.path)
if obj.scheme == "s3":
s3 = S3Client()
while True:
data: Optional[Union[str, Tuple[str, str]]] = upload_queue.get()
tmpdir = None
if isinstance(data, str) or data is None:
local_filepath = data
else:
tmpdir, local_filepath = data
# Terminate the process if we received a termination signal
if local_filepath is None:
return
# Upload the file to the target cloud storage
if not local_filepath.startswith(cache_dir):
local_filepath = os.path.join(cache_dir, local_filepath)
if obj.scheme == "s3":
try:
if tmpdir is None:
output_filepath = os.path.join(str(obj.path).lstrip("/"), os.path.basename(local_filepath))
else:
output_filepath = os.path.join(str(obj.path).lstrip("/"), local_filepath.replace(tmpdir, "")[1:])
s3.client.upload_file(
local_filepath,
obj.netloc,
output_filepath,
)
except Exception as e:
print(e)
elif output_dir.path:
if tmpdir is None:
output_filepath = os.path.join(output_dir.path, os.path.basename(local_filepath))
else:
output_filepath = os.path.join(output_dir.path, local_filepath.replace(tmpdir, "")[1:])
os.makedirs(os.path.dirname(output_filepath), exist_ok=True)
shutil.move(local_filepath, output_filepath)
else:
raise ValueError(f"The provided {output_dir.path} isn't supported.")
# Inform the remover to delete the file
if remove_queue and os.path.exists(local_filepath):
remove_queue.put([local_filepath])
def _map_items_to_workers_sequentially(num_workers: int, user_items: List[Any]) -> List[List[Any]]:
from typing import List, Any
import os
total_nodes = _get_num_nodes()
node_rank = _get_node_rank()
total_workers = total_nodes * num_workers
items_per_worker = len(user_items) // total_workers
extra_items = len(user_items) % total_workers
start = 0
result = []
for i in range(total_workers):
worker_items = items_per_worker + 1 if i < extra_items else items_per_worker
end = start + worker_items
result.append(user_items[start:end])
start = end
if len(result) != num_workers:
raise RuntimeError("Improper assignment of items to workers")
return result
def _map_items_to_workers_weighted(
num_workers: int,
user_items: List[Any],
weights: Optional[List[int]] = None,
file_size: bool = True,
) -> List[List[Any]]:
# Associate the items to the workers based on number of nodes and node rank.
weights = [1] * len(user_items) if weights is None else weights
num_nodes = _get_num_nodes()
node_rank = _get_node_rank()
world_size = num_nodes * num_workers
worker_items, worker_weights = _pack_greedily(items=user_items, weights=weights, num_bins=world_size)
worker_ids_this_node = range(node_rank * num_workers, (node_rank + 1) * num_workers)
for worker_id, size in worker_weights.items():
if worker_id not in worker_ids_this_node:
continue
if file_size:
print(f"Worker {worker_id} gets {size / 1e6:.1f} MB ({len(worker_items[worker_id])} files)")
else:
print(f"Worker {worker_id} gets ({len(worker_items[worker_id])}) items for a total weight of {size}.")
return [np.random.permutation(worker_items[worker_id]).tolist() for worker_id in worker_ids_this_node]
def _get_num_bytes(item: Any, base_path: str) -> int:
flattened_item, _ = tree_flatten(item)
num_bytes = 0
for element in flattened_item:
if isinstance(element, str):
element = Path(element).resolve()
if not element.exists():
continue
file_bytes = os.path.getsize(element)
if file_bytes == 0:
raise RuntimeError(f"The file {element} has 0 bytes!")
num_bytes += file_bytes
return num_bytes
def _get_item_filesizes(items: List[Any], base_path: str = "") -> List[int]:
"""Computes the total size in bytes of all file paths for every datastructure in the given list."""
item_sizes = []
cpu_count = os.cpu_count() or 1
# Parallelize to accelerate retrieving the number of file bytes to read for each item
with concurrent.futures.ThreadPoolExecutor(max_workers=cpu_count * 2 if cpu_count > 4 else cpu_count) as executor:
futures = [executor.submit(_get_num_bytes, item, base_path) for item in items]
for future in futures:
item_sizes.append(future.result())
return item_sizes
def _to_path(element: str) -> str:
return element if _IS_IN_STUDIO and element.startswith("/teamspace") else str(Path(element).resolve())
def _is_path(input_dir: Optional[str], element: Any) -> bool:
if not isinstance(element, str):
return False
if _IS_IN_STUDIO and input_dir is not None:
if element.startswith(input_dir):
return True
element = str(Path(element).absolute())
if element.startswith(input_dir):
return True
return os.path.exists(element)
class BaseWorker:
def __init__(
self,
worker_index: int,
num_workers: int,
node_rank: int,
data_recipe: "DataRecipe",
input_dir: Dir,
output_dir: Dir,
items: List[Any],
progress_queue: Queue,
error_queue: Queue,
stop_queue: Queue,
num_downloaders: int,
num_uploaders: int,
remove: bool,
reader: Optional[BaseReader] = None,
) -> None:
"""The BaseWorker is responsible to process the user data."""
self.worker_index = worker_index
self.num_workers = num_workers
self.node_rank = node_rank
self.data_recipe = data_recipe
self.input_dir = input_dir
self.output_dir = output_dir
self.items = items
self.num_items = len(self.items)
self.num_downloaders = num_downloaders
self.num_uploaders = num_uploaders
self.remove = remove
self.reader = reader
self.paths: List[List[str]] = []
self.remover: Optional[Process] = None
self.downloaders: List[Process] = []
self.uploaders: List[Process] = []
self.to_download_queues: List[Queue] = []
self.to_upload_queues: List[Queue] = []
self.stop_queue = stop_queue
self.ready_to_process_queue: Queue = Queue()
self.remove_queue: Queue = Queue()
self.progress_queue: Queue = progress_queue
self.error_queue: Queue = error_queue
self._counter = 0
self._last_time = time()
self._index_counter = 0
def run(self) -> None:
try:
self._setup()
self._loop()
except Exception:
traceback_format = traceback.format_exc()
print(traceback_format)
self.error_queue.put(traceback_format)
print(f"Worker {str(_get_node_rank() * self.num_workers + self.worker_index)} is done.")
def _setup(self) -> None:
self._set_environ_variables()
self._create_cache()
self._collect_paths()
self._start_downloaders()
self._start_uploaders()
self._start_remover()
def _loop(self) -> None:
num_downloader_finished = 0
while True:
index = self.ready_to_process_queue.get()
if index is None:
num_downloader_finished += 1
if num_downloader_finished == self.num_downloaders:
print(f"Worker {str(_get_node_rank() * self.num_workers + self.worker_index)} is terminating.")
if isinstance(self.data_recipe, DataChunkRecipe):
self._handle_data_chunk_recipe_end()
if self.output_dir.url if self.output_dir.url else self.output_dir.path:
# Inform the uploaders they are doing working
for i in range(self.num_uploaders):
self.to_upload_queues[i].put(None)
# Wait for them all to be finished
for uploader in self.uploaders:
uploader.join()
if self.remove:
assert self.remover
self.remove_queue.put(None)
self.remover.join()
if self.progress_queue:
self.progress_queue.put((self.worker_index, self._counter))
return
continue
if isinstance(self.data_recipe, DataChunkRecipe):
self._handle_data_chunk_recipe(index)
else:
self._handle_data_transform_recipe(index)
self._counter += 1
# Don't send the last progress update, so the main thread awaits for the uploader and remover
if self.progress_queue and (time() - self._last_time) > 1 and self._counter < (self.num_items - 2):
self.progress_queue.put((self.worker_index, self._counter))
self._last_time = time()
if self.remove and self.input_dir.path is not None and self.reader is None:
self.remove_queue.put(self.paths[index])
try:
self.stop_queue.get(timeout=0.0001)
return
except Empty:
pass
def _set_environ_variables(self) -> None:
# set the optimizer global rank and world_size
os.environ["DATA_OPTIMIZER_GLOBAL_RANK"] = str(_get_node_rank() * self.num_workers + self.worker_index)
os.environ["DATA_OPTIMIZER_NUM_WORKERS"] = str(self.num_workers)
def _create_cache(self) -> None:
self.cache_data_dir = _get_cache_data_dir()
os.makedirs(self.cache_data_dir, exist_ok=True)
self.cache_chunks_dir = _get_cache_dir()
os.makedirs(self.cache_chunks_dir, exist_ok=True)
if isinstance(self.data_recipe, DataTransformRecipe):
return
self.cache = Cache(
self.cache_chunks_dir,
chunk_bytes=self.data_recipe.chunk_bytes,
chunk_size=self.data_recipe.chunk_size,
compression=self.data_recipe.compression,
)
self.cache._reader._rank = _get_node_rank() * self.num_workers + self.worker_index
def _try_upload(self, data: Optional[Union[str, Tuple[str, str]]]) -> None:
if not data or (self.output_dir.url if self.output_dir.url else self.output_dir.path) is None:
return
if isinstance(data, str):
assert os.path.exists(data), data
else:
assert os.path.exists(data[-1]), data
self.to_upload_queues[self._counter % self.num_uploaders].put(data)
def _collect_paths(self) -> None:
if self.input_dir.path is None or self.reader is not None:
for index in range(len(self.items)):
self.ready_to_process_queue.put(index)
for _ in range(self.num_downloaders):
self.ready_to_process_queue.put(None)
return
items = []
for item in self.items:
flattened_item, spec = tree_flatten(item)
# For speed reasons, we assume starting with `self.input_dir` is enough to be a real file.
# Other alternative would be too slow.
# TODO: Try using dictionary for higher accurary.
indexed_paths = {
index: _to_path(element)
for index, element in enumerate(flattened_item)
if _is_path(self.input_dir.path, element)
}
if len(indexed_paths) == 0:
raise ValueError(
f"The provided item {item} didn't contain any filepaths. The input_dir is {self.input_dir.path}."
)
paths = []
for index, path in indexed_paths.items():
paths.append(path)
if self.input_dir and not self.input_dir.path.startswith("/teamspace/studios/this_studio"):
path = path.replace(self.input_dir.path, self.cache_data_dir)
flattened_item[index] = path
self.paths.append(paths)
items.append(tree_unflatten(flattened_item, spec))
self.items = items
def _start_downloaders(self) -> None:
if self.input_dir.path is None or self.reader is not None:
return
for _ in range(self.num_downloaders):
to_download_queue: Queue = Queue()
p = Process(
target=_download_data_target,
args=(
self.input_dir,
self.cache_data_dir,
to_download_queue,
self.ready_to_process_queue,
),
)
p.start()
self.downloaders.append(p)
self.to_download_queues.append(to_download_queue)
for index, paths in enumerate(self.paths):
self.to_download_queues[index % self.num_downloaders].put((index, paths))
for downloader_index in range(self.num_downloaders):
self.to_download_queues[downloader_index].put(None)
def _start_remover(self) -> None:
if not self.remove:
return
self.remover = Process(
target=_remove_target,
args=(
self.input_dir,
self.cache_data_dir,
self.remove_queue,
),
)
self.remover.start()
def _start_uploaders(self) -> None:
if self.output_dir.path is None and self.output_dir.url is None:
return
for _ in range(self.num_uploaders):
to_upload_queue: Queue = Queue()
p = Process(
target=_upload_fn,
args=(
to_upload_queue,
self.remove_queue,
self.cache_chunks_dir,
self.output_dir,
),
)
p.start()
self.uploaders.append(p)
self.to_upload_queues.append(to_upload_queue)
def _handle_data_chunk_recipe(self, index: int) -> None:
try:
current_item = self.items[index] if self.reader is None else self.reader.read(self.items[index])
item_data_or_generator = self.data_recipe.prepare_item(current_item)
if isinstance(item_data_or_generator, types.GeneratorType):
for item_data in item_data_or_generator:
if item_data is not None:
chunk_filepath = self.cache._add_item(self._index_counter, item_data)
self._try_upload(chunk_filepath)
self._index_counter += 1
elif item_data_or_generator is not None:
chunk_filepath = self.cache._add_item(self._index_counter, item_data_or_generator)
self._try_upload(chunk_filepath)
self._index_counter += 1
except Exception as e:
raise RuntimeError(f"Failed processing {self.items[index]}") from e
def _handle_data_chunk_recipe_end(self) -> None:
chunks_filepaths = self.cache.done()
if chunks_filepaths and len(self.to_upload_queues):
for i, chunk_filepath in enumerate(chunks_filepaths):
if isinstance(chunk_filepath, str) and os.path.exists(chunk_filepath):
self.to_upload_queues[i % self.num_uploaders].put(chunk_filepath)
def _handle_data_transform_recipe(self, index: int) -> None:
# Don't use a context manager to avoid deleting files that are being uploaded.
output_dir = tempfile.mkdtemp()
item = self.items[index] if self.reader is None else self.reader.read(self.items[index])
item_data = self.data_recipe.prepare_item(item, str(output_dir), len(self.items) - 1 == index)
if item_data is not None:
raise ValueError(
"When using a `DataTransformRecipe`, the `prepare_item` shouldn't return anything."
" Simply store your files under the output_dir."
)
filepaths = []
for directory, _, filenames in os.walk(output_dir):
for filename in filenames:
filepaths.append(os.path.join(directory, filename))
for filepath in filepaths:
self._try_upload((output_dir, filepath))
class DataWorkerProcess(BaseWorker, Process):
def __init__(self, *args: Any, **kwargs: Any) -> None:
"""The DataWorkerProcess is responsible to process the user data inside processes."""
BaseWorker.__init__(self, *args, **kwargs)
Process.__init__(self)
@dataclass
class _Result:
size: Optional[int] = None
num_bytes: Optional[str] = None
data_format: Optional[str] = None
compression: Optional[str] = None
num_chunks: Optional[int] = None
num_bytes_per_chunk: Optional[List[int]] = None
T = TypeVar("T")
class DataRecipe:
@abstractmethod
def prepare_structure(self, input_dir: Optional[str]) -> List[T]:
pass
@abstractmethod
def prepare_item(self, *args: Any, **kwargs: Any) -> Any:
pass
def __init__(self) -> None:
self._name: Optional[str] = None
def _done(self, size: int, delete_cached_files: bool, output_dir: Dir) -> _Result:
return _Result(size=size)
class DataChunkRecipe(DataRecipe):
def __init__(
self,
chunk_size: Optional[int] = None,
chunk_bytes: Optional[Union[int, str]] = None,
compression: Optional[str] = None,
):
super().__init__()
if chunk_size is not None and chunk_bytes is not None:
raise ValueError("Either one of the `chunk_size` or the `chunk_bytes` need to be provided.")
self.chunk_size = chunk_size
self.chunk_bytes = 1 << 26 if chunk_size is None else chunk_bytes
self.compression = compression
@abstractmethod
def prepare_structure(self, input_dir: Optional[str]) -> List[T]:
"""Return the structure of your data.
Each element should contain at least a filepath.
"""
@abstractmethod
def prepare_item(self, item_metadata: T) -> Any:
"""The return of this `prepare_item` method is persisted in chunked binary files."""
def _done(self, size: int, delete_cached_files: bool, output_dir: Dir) -> _Result:
num_nodes = _get_num_nodes()
cache_dir = _get_cache_dir()
chunks = [file for file in os.listdir(cache_dir) if file.endswith(".bin")]
if chunks and delete_cached_files and output_dir.path is not None:
raise RuntimeError(f"All the chunks should have been deleted. Found {chunks}")
merge_cache = Cache(cache_dir, chunk_bytes=1)
node_rank = _get_node_rank()
merge_cache._merge_no_wait(node_rank if num_nodes > 1 else None)
self._upload_index(output_dir, cache_dir, num_nodes, node_rank)
if num_nodes == node_rank + 1:
with open(os.path.join(cache_dir, _INDEX_FILENAME)) as f:
config = json.load(f)
size = sum([c["dim"] if c["dim"] is not None else c["chunk_size"] for c in config["chunks"]])
num_bytes = sum([c["chunk_bytes"] for c in config["chunks"]])
if config["config"] is not None:
data_format = tree_unflatten(
config["config"]["data_format"], treespec_loads(config["config"]["data_spec"])
)
else:
data_format = None
num_chunks = len(config["chunks"])
# The platform can't store more than 1024 entries.
# Note: This isn't really used right now, so it is fine to skip if too big.
num_bytes_per_chunk = [c["chunk_size"] for c in config["chunks"]] if num_chunks < 1024 else []
return _Result(
size=size,
num_bytes=num_bytes,
data_format=data_format,
compression=config["config"]["compression"] if config["config"] else None,
num_chunks=len(config["chunks"]),
num_bytes_per_chunk=num_bytes_per_chunk,
)
return _Result(
size=size,
)
def _upload_index(self, output_dir: Dir, cache_dir: str, num_nodes: int, node_rank: Optional[int]) -> None:
"""This method upload the index file to the remote cloud directory."""
if output_dir.path is None and output_dir.url is None:
return
obj = parse.urlparse(output_dir.url if output_dir.url else output_dir.path)
if num_nodes > 1:
local_filepath = os.path.join(cache_dir, f"{node_rank}-{_INDEX_FILENAME}")
else:
local_filepath = os.path.join(cache_dir, _INDEX_FILENAME)
if obj.scheme == "s3":
s3 = S3Client()
s3.client.upload_file(
local_filepath, obj.netloc, os.path.join(str(obj.path).lstrip("/"), os.path.basename(local_filepath))
)
elif output_dir.path and os.path.isdir(output_dir.path):
shutil.copyfile(local_filepath, os.path.join(output_dir.path, os.path.basename(local_filepath)))
if num_nodes == 1 or node_rank is None:
return
# Merge the index files generated by each node.
# Note: When using the Data Optimizer, they should be a single process on each node executing this section
# So no risk to get race conditon.
if num_nodes == node_rank + 1:
# Get the index file locally
for node_rank in range(num_nodes - 1):
output_dir_path = output_dir.url if output_dir.url else output_dir.path
assert output_dir_path
remote_filepath = os.path.join(output_dir_path, f"{node_rank}-{_INDEX_FILENAME}")
node_index_filepath = os.path.join(cache_dir, os.path.basename(remote_filepath))
if obj.scheme == "s3":
obj = parse.urlparse(remote_filepath)
_wait_for_file_to_exist(s3, obj)
with open(node_index_filepath, "wb") as f:
s3.client.download_fileobj(obj.netloc, obj.path.lstrip("/"), f)
elif output_dir.path and os.path.isdir(output_dir.path):
shutil.copyfile(remote_filepath, node_index_filepath)
merge_cache = Cache(cache_dir, chunk_bytes=1)
merge_cache._merge_no_wait()
self._upload_index(output_dir, cache_dir, 1, None)
class DataTransformRecipe(DataRecipe):
@abstractmethod
def prepare_structure(self, input_dir: Optional[str]) -> List[T]:
"""Return the structure of your data.
Each element should contain at least a filepath.
"""
@abstractmethod
def prepare_item(self, item_metadata: T, output_dir: str, is_last: bool) -> None:
"""Use your item metadata to process your files and save the file outputs into `output_dir`."""
class DataProcessor:
def __init__(
self,
input_dir: Union[str, Dir],
output_dir: Optional[Union[str, Dir]] = None,
num_workers: Optional[int] = None,
num_downloaders: Optional[int] = None,
num_uploaders: Optional[int] = None,
delete_cached_files: bool = True,
fast_dev_run: Optional[Union[bool, int]] = None,
random_seed: Optional[int] = 42,
reorder_files: bool = True,
weights: Optional[List[int]] = None,
reader: Optional[BaseReader] = None,
):
"""The `DatasetOptimiser` provides an efficient way to process data across multiple machine into chunks to make
training faster.
Arguments:
input_dir: The path to where the input data are stored.
output_dir: The path to where the output data are stored.
num_workers: The number of worker threads to use.
num_downloaders: The number of file downloaders to use.
num_uploaders: The number of file uploaders to use.
delete_cached_files: Whether to delete the cached files.
fast_dev_run: Whether to run a quick dev run.
random_seed: The random seed to be set before shuffling the data.
reorder_files: By default, reorders the files by file size to distribute work equally among all workers.
Set this to ``False`` if the order in which samples are processed should be preserved.
weights: Provide a list of weights associated to the inputs.
This is used to evenly split the work among the workers.
reader: Map the inputs to worker inputs and provides a read method to read a slice of the data.
"""
self.input_dir = _resolve_dir(input_dir)
self.output_dir = _resolve_dir(output_dir)
self.num_workers = num_workers or (1 if fast_dev_run else (os.cpu_count() or 1) * 4)
self.num_downloaders = num_downloaders or 2
self.num_uploaders = num_uploaders or 5
self.delete_cached_files = delete_cached_files
self.fast_dev_run = _get_fast_dev_run() if fast_dev_run is None else fast_dev_run
self.workers: Any = []
self.workers_tracker: Dict[int, int] = {}
self.progress_queue: Optional[Queue] = None
self.error_queue: Queue = Queue()
self.stop_queues: List[Queue] = []
self.reorder_files = reorder_files
self.weights = weights
self.reader = reader
if self.reader is not None and self.weights is not None:
raise ValueError("Either the reader or the weights needs to be defined.")
# Ensure the input dir is the same across all nodes
self.input_dir = broadcast_object("input_dir", self.input_dir)
if self.output_dir:
# Ensure the output dir is the same across all nodes
self.output_dir = broadcast_object("output_dir", self.output_dir)
print(f"Storing the files under {self.output_dir.path}")
self.random_seed = random_seed
def run(self, data_recipe: DataRecipe) -> None:
"""The `DataProcessor.run(...)` method triggers the data recipe processing over your dataset."""
if not isinstance(data_recipe, DataRecipe):
raise ValueError("The provided value should be a data recipe.")
t0 = time()
print(f"Setup started with fast_dev_run={self.fast_dev_run}.")
# Force random seed to be fixed
random.seed(self.random_seed)
np.random.seed(self.random_seed)
torch.manual_seed(self.random_seed)
# Call the setup method of the user
user_items: List[Any] = data_recipe.prepare_structure(self.input_dir.path if self.input_dir else None)
if not isinstance(user_items, (list, StreamingDataLoader)):
raise ValueError("The `prepare_structure` should return a list of item metadata.")
if isinstance(user_items, StreamingDataLoader):
self.reader = StreamingDataLoaderReader(user_items)
if self.reader:
user_items = self.reader.remap_items(user_items, self.num_workers)
if self.weights is not None:
if len(self.weights) != len(user_items):
raise ValueError("The provided weights length should match the inputs' length.")
workers_user_items = _map_items_to_workers_weighted(
num_workers=self.num_workers, user_items=user_items, weights=self.weights, file_size=False
)
elif self.reorder_files and self.input_dir.path:
# TODO: Only do this on node 0, and broadcast the item sizes to the other nodes.
item_sizes = _get_item_filesizes(user_items, base_path=self.input_dir.path)
workers_user_items = _map_items_to_workers_weighted(
num_workers=self.num_workers, user_items=user_items, weights=item_sizes
)
else:
workers_user_items = _map_items_to_workers_sequentially(num_workers=self.num_workers, user_items=user_items)
print(f"Setup finished in {round(time() - t0, 3)} seconds. Found {len(user_items)} items to process.")
if self.fast_dev_run:
items_to_keep = self.fast_dev_run if type(self.fast_dev_run) is int else _DEFAULT_FAST_DEV_RUN_ITEMS
workers_user_items = [w[:items_to_keep] for w in workers_user_items]
print(f"Fast dev run is enabled. Limiting to {items_to_keep} items per process.")
num_items = sum([len(items) for items in workers_user_items])
self._cleanup_cache()
print(f"Starting {self.num_workers} workers with {num_items} items.")
if self.input_dir is None and self.src_resolver is not None and self.input_dir:
self.input_dir = self.src_resolver(self.input_dir)
print(f"The remote_dir is `{self.input_dir}`.")
signal.signal(signal.SIGINT, self._signal_handler)
self._create_process_workers(data_recipe, workers_user_items)
print("Workers are ready ! Starting data processing...")
current_total = 0
has_failed = False
pbar = _tqdm(
desc="Progress",
total=num_items,
smoothing=0,
position=-1,
mininterval=1,
leave=True,
dynamic_ncols=True,
)
while True:
try:
error = self.error_queue.get(timeout=0.001)
self._exit_on_error(error)
except Empty:
assert self.progress_queue
try:
index, counter = self.progress_queue.get(timeout=0.001)
except Empty:
continue
self.workers_tracker[index] = counter
new_total = sum(self.workers_tracker.values())
pbar.update(new_total - current_total)
current_total = new_total
if current_total == num_items:
break
# Exit early if all the workers are done.
# This means there were some kinda of errors.
if all(not w.is_alive() for w in self.workers):
has_failed = True
break
pbar.close()
num_nodes = _get_num_nodes()
node_rank = _get_node_rank()
# TODO: Understand why it hangs.
if num_nodes == 1:
for w in self.workers:
w.join(0)
print("Workers are finished.")
result = data_recipe._done(len(user_items), self.delete_cached_files, self.output_dir)
if num_nodes == node_rank + 1 and self.output_dir.url and _IS_IN_STUDIO:
assert self.output_dir.path
_create_dataset(
input_dir=self.input_dir.path,
storage_dir=self.output_dir.path,
dataset_type=V1DatasetType.CHUNKED
if isinstance(data_recipe, DataChunkRecipe)
else V1DatasetType.TRANSFORMED,
empty=False,
size=result.size,
num_bytes=result.num_bytes,
data_format=result.data_format,
compression=result.compression,
num_chunks=result.num_chunks,
num_bytes_per_chunk=result.num_bytes_per_chunk,
)
print("Finished data processing!")
# TODO: Understand why it is required to avoid long shutdown.
if _get_num_nodes() > 1:
os._exit(int(has_failed))
def _exit_on_error(self, error: str) -> None:
for w in self.workers:
w.join(0)
raise RuntimeError(f"We found the following error {error}.")
def _create_process_workers(self, data_recipe: DataRecipe, workers_user_items: List[List[Any]]) -> None:
self.progress_queue = Queue()
workers: List[DataWorkerProcess] = []
stop_queues: List[Queue] = []
for worker_idx, worker_user_items in enumerate(workers_user_items):
stop_queues.append(Queue())
worker = DataWorkerProcess(
worker_idx,
self.num_workers,
_get_node_rank(),
data_recipe,
self.input_dir,
self.output_dir,
worker_user_items,
self.progress_queue,
self.error_queue,
stop_queues[-1],
self.num_downloaders,
self.num_uploaders,
self.delete_cached_files,
self.reader,
)
worker.start()
workers.append(worker)
# Note: Don't store within the loop as weakref aren't serializable
self.workers = workers
self.stop_queues = stop_queues
def _signal_handler(self, signal: Any, frame: Any) -> None:
"""On temrination, we stop all the processes to avoid leaking RAM."""
for stop_queue in self.stop_queues:
stop_queue.put(None)
for w in self.workers:
w.join(0)
os._exit(0)
def _cleanup_cache(self) -> None:
cache_dir = _get_cache_dir()
# Cleanup the cache dir folder to avoid corrupted files from previous run to be there.
if os.path.exists(cache_dir):
shutil.rmtree(cache_dir, ignore_errors=True)
os.makedirs(cache_dir, exist_ok=True)
cache_data_dir = _get_cache_data_dir()
# Cleanup the cache data folder to avoid corrupted files from previous run to be there.
if os.path.exists(cache_data_dir):
shutil.rmtree(cache_data_dir, ignore_errors=True)
os.makedirs(cache_data_dir, exist_ok=True)
|
evocodebench_data_160
|
import concurrent
import json
import logging
import os
import random
import shutil
import signal
import tempfile
import traceback
import types
from abc import abstractmethod
from dataclasses import dataclass
from multiprocessing import Process, Queue
from pathlib import Path
from queue import Empty
from time import sleep, time
from typing import Any, Dict, List, Optional, Tuple, TypeVar, Union
from urllib import parse
import numpy as np
import torch
from tqdm.auto import tqdm as _tqdm
from litdata.constants import (
_BOTO3_AVAILABLE,
_DEFAULT_FAST_DEV_RUN_ITEMS,
_INDEX_FILENAME,
_IS_IN_STUDIO,
_LIGHTNING_CLOUD_LATEST,
_TORCH_GREATER_EQUAL_2_1_0,
)
from litdata.processing.readers import BaseReader, StreamingDataLoaderReader
from litdata.processing.utilities import _create_dataset
from litdata.streaming import Cache
from litdata.streaming.cache import Dir
from litdata.streaming.client import S3Client
from litdata.streaming.dataloader import StreamingDataLoader
from litdata.streaming.resolver import _resolve_dir
from litdata.utilities.broadcast import broadcast_object
from litdata.utilities.packing import _pack_greedily
if _TORCH_GREATER_EQUAL_2_1_0:
from torch.utils._pytree import tree_flatten, tree_unflatten, treespec_loads
if _LIGHTNING_CLOUD_LATEST:
from lightning_cloud.openapi import V1DatasetType
if _BOTO3_AVAILABLE:
import botocore
logger = logging.Logger(__name__)
def _get_num_nodes() -> int:
"""Returns the number of nodes."""
return int(os.getenv("DATA_OPTIMIZER_NUM_NODES", 1))
def _get_node_rank() -> int:
"""Returns the current node rank of the instance."""
return int(os.getenv("DATA_OPTIMIZER_NODE_RANK", 0))
def _get_fast_dev_run() -> int:
"""Returns whether fast dev mode is enabled."""
return bool(int(os.getenv("DATA_OPTIMIZER_FAST_DEV_RUN", 1)))
def _get_default_cache() -> str:
return "/cache" if _IS_IN_STUDIO else tempfile.gettempdir()
def _get_cache_dir(name: Optional[str] = None) -> str:
"""Returns the cache directory used by the Cache to store the chunks."""
cache_dir = os.getenv("DATA_OPTIMIZER_CACHE_FOLDER", f"{_get_default_cache()}/chunks")
if name is None:
return cache_dir
return os.path.join(cache_dir, name.lstrip("/"))
def _get_cache_data_dir(name: Optional[str] = None) -> str:
"""Returns the cache data directory used by the DataProcessor workers to download the files."""
cache_dir = os.getenv("DATA_OPTIMIZER_DATA_CACHE_FOLDER", f"{_get_default_cache()}/data")
if name is None:
return os.path.join(cache_dir)
return os.path.join(cache_dir, name.lstrip("/"))
def _wait_for_file_to_exist(s3: S3Client, obj: parse.ParseResult, sleep_time: int = 2) -> Any:
"""This function check."""
while True:
try:
return s3.client.head_object(Bucket=obj.netloc, Key=obj.path.lstrip("/"))
except botocore.exceptions.ClientError as e:
if "the HeadObject operation: Not Found" in str(e):
sleep(sleep_time)
else:
raise e
def _wait_for_disk_usage_higher_than_threshold(input_dir: str, threshold_in_gb: int = 25, sleep_time: int = 3) -> None:
usage = shutil.disk_usage(input_dir)
while (usage.free / 1000 / 1000 / 1000) <= threshold_in_gb:
sleep(sleep_time)
usage = shutil.disk_usage(input_dir)
return
def _download_data_target(input_dir: Dir, cache_dir: str, queue_in: Queue, queue_out: Queue) -> None:
"""This function is used to download data from a remote directory to a cache directory to optimise reading."""
s3 = S3Client()
while True:
# 2. Fetch from the queue
r: Optional[Tuple[int, List[str]]] = queue_in.get()
# 3. Terminate the process if we received a termination signal
if r is None:
queue_out.put(None)
return
# 4. Unpack
index, paths = r
# 5. Check whether all the files are already downloaded
if input_dir.path and all(
os.path.exists(p.replace(input_dir.path, cache_dir) if input_dir else p) for p in paths
):
queue_out.put(index)
continue
if input_dir.url is not None or input_dir.path is not None:
if input_dir.url:
# 6. Wait for the removers to catch up when we are downloading data.
_wait_for_disk_usage_higher_than_threshold("/", 25)
# 7. Download all the required paths to unblock the current index
for path in paths:
if input_dir.path:
local_path = path.replace(input_dir.path, cache_dir)
if input_dir.url and input_dir.path:
path = path.replace(input_dir.path, input_dir.url)
obj = parse.urlparse(path)
if obj.scheme == "s3":
dirpath = os.path.dirname(local_path)
os.makedirs(dirpath, exist_ok=True)
with open(local_path, "wb") as f:
s3.client.download_fileobj(obj.netloc, obj.path.lstrip("/"), f)
elif os.path.isfile(path):
if not path.startswith("/teamspace/studios/this_studio"):
os.makedirs(os.path.dirname(local_path), exist_ok=True)
shutil.copyfile(path, local_path)
else:
raise ValueError(f"The provided {input_dir.url} isn't supported.")
# 7. Inform the worker the current files are available
queue_out.put(index)
def _remove_target(input_dir: Dir, cache_dir: str, queue_in: Queue) -> None:
"""This function is used to delete files from the cache directory to minimise disk space."""
while True:
# 1. Collect paths
paths = queue_in.get()
# 2. Terminate the process if we received a termination signal
if paths is None:
return
# 3. Iterate through the paths and delete them sequentially.
for path in paths:
if input_dir:
if not path.startswith(cache_dir) and input_dir.path is not None:
path = path.replace(input_dir.path, cache_dir)
if os.path.exists(path):
os.remove(path)
elif os.path.exists(path) and "s3_connections" not in path:
os.remove(path)
def _upload_fn(upload_queue: Queue, remove_queue: Queue, cache_dir: str, output_dir: Dir) -> None:
"""This function is used to upload optimised chunks from a local to remote dataset directory."""
obj = parse.urlparse(output_dir.url if output_dir.url else output_dir.path)
if obj.scheme == "s3":
s3 = S3Client()
while True:
data: Optional[Union[str, Tuple[str, str]]] = upload_queue.get()
tmpdir = None
if isinstance(data, str) or data is None:
local_filepath = data
else:
tmpdir, local_filepath = data
# Terminate the process if we received a termination signal
if local_filepath is None:
return
# Upload the file to the target cloud storage
if not local_filepath.startswith(cache_dir):
local_filepath = os.path.join(cache_dir, local_filepath)
if obj.scheme == "s3":
try:
if tmpdir is None:
output_filepath = os.path.join(str(obj.path).lstrip("/"), os.path.basename(local_filepath))
else:
output_filepath = os.path.join(str(obj.path).lstrip("/"), local_filepath.replace(tmpdir, "")[1:])
s3.client.upload_file(
local_filepath,
obj.netloc,
output_filepath,
)
except Exception as e:
print(e)
elif output_dir.path:
if tmpdir is None:
output_filepath = os.path.join(output_dir.path, os.path.basename(local_filepath))
else:
output_filepath = os.path.join(output_dir.path, local_filepath.replace(tmpdir, "")[1:])
os.makedirs(os.path.dirname(output_filepath), exist_ok=True)
shutil.move(local_filepath, output_filepath)
else:
raise ValueError(f"The provided {output_dir.path} isn't supported.")
# Inform the remover to delete the file
if remove_queue and os.path.exists(local_filepath):
remove_queue.put([local_filepath])
def _map_items_to_workers_sequentially(num_workers: int, user_items: List[Any]) -> List[List[Any]]:
from typing import List, Any
import os
total_nodes = _get_num_nodes()
node_rank = _get_node_rank()
total_workers = total_nodes * num_workers
items_per_worker = len(user_items) // total_workers
extra_items = len(user_items) % total_workers
start = 0
result = []
for i in range(total_workers):
worker_items = items_per_worker + 1 if i < extra_items else items_per_worker
end = start + worker_items
result.append(user_items[start:end])
start = end
if len(result) != num_workers:
raise RuntimeError("Improper assignment of items to workers")
return result
def _map_items_to_workers_weighted(
num_workers: int,
user_items: List[Any],
weights: Optional[List[int]] = None,
file_size: bool = True,
) -> List[List[Any]]:
# Associate the items to the workers based on number of nodes and node rank.
weights = [1] * len(user_items) if weights is None else weights
num_nodes = _get_num_nodes()
node_rank = _get_node_rank()
world_size = num_nodes * num_workers
worker_items, worker_weights = _pack_greedily(items=user_items, weights=weights, num_bins=world_size)
worker_ids_this_node = range(node_rank * num_workers, (node_rank + 1) * num_workers)
for worker_id, size in worker_weights.items():
if worker_id not in worker_ids_this_node:
continue
if file_size:
print(f"Worker {worker_id} gets {size / 1e6:.1f} MB ({len(worker_items[worker_id])} files)")
else:
print(f"Worker {worker_id} gets ({len(worker_items[worker_id])}) items for a total weight of {size}.")
return [np.random.permutation(worker_items[worker_id]).tolist() for worker_id in worker_ids_this_node]
def _get_num_bytes(item: Any, base_path: str) -> int:
flattened_item, _ = tree_flatten(item)
num_bytes = 0
for element in flattened_item:
if isinstance(element, str):
element = Path(element).resolve()
if not element.exists():
continue
file_bytes = os.path.getsize(element)
if file_bytes == 0:
raise RuntimeError(f"The file {element} has 0 bytes!")
num_bytes += file_bytes
return num_bytes
def _get_item_filesizes(items: List[Any], base_path: str = "") -> List[int]:
"""Computes the total size in bytes of all file paths for every datastructure in the given list."""
item_sizes = []
cpu_count = os.cpu_count() or 1
# Parallelize to accelerate retrieving the number of file bytes to read for each item
with concurrent.futures.ThreadPoolExecutor(max_workers=cpu_count * 2 if cpu_count > 4 else cpu_count) as executor:
futures = [executor.submit(_get_num_bytes, item, base_path) for item in items]
for future in futures:
item_sizes.append(future.result())
return item_sizes
def _to_path(element: str) -> str:
return element if _IS_IN_STUDIO and element.startswith("/teamspace") else str(Path(element).resolve())
def _is_path(input_dir: Optional[str], element: Any) -> bool:
if not isinstance(element, str):
return False
if _IS_IN_STUDIO and input_dir is not None:
if element.startswith(input_dir):
return True
element = str(Path(element).absolute())
if element.startswith(input_dir):
return True
return os.path.exists(element)
class BaseWorker:
def __init__(
self,
worker_index: int,
num_workers: int,
node_rank: int,
data_recipe: "DataRecipe",
input_dir: Dir,
output_dir: Dir,
items: List[Any],
progress_queue: Queue,
error_queue: Queue,
stop_queue: Queue,
num_downloaders: int,
num_uploaders: int,
remove: bool,
reader: Optional[BaseReader] = None,
) -> None:
"""The BaseWorker is responsible to process the user data."""
self.worker_index = worker_index
self.num_workers = num_workers
self.node_rank = node_rank
self.data_recipe = data_recipe
self.input_dir = input_dir
self.output_dir = output_dir
self.items = items
self.num_items = len(self.items)
self.num_downloaders = num_downloaders
self.num_uploaders = num_uploaders
self.remove = remove
self.reader = reader
self.paths: List[List[str]] = []
self.remover: Optional[Process] = None
self.downloaders: List[Process] = []
self.uploaders: List[Process] = []
self.to_download_queues: List[Queue] = []
self.to_upload_queues: List[Queue] = []
self.stop_queue = stop_queue
self.ready_to_process_queue: Queue = Queue()
self.remove_queue: Queue = Queue()
self.progress_queue: Queue = progress_queue
self.error_queue: Queue = error_queue
self._counter = 0
self._last_time = time()
self._index_counter = 0
def run(self) -> None:
try:
self._setup()
self._loop()
except Exception:
traceback_format = traceback.format_exc()
print(traceback_format)
self.error_queue.put(traceback_format)
print(f"Worker {str(_get_node_rank() * self.num_workers + self.worker_index)} is done.")
def _setup(self) -> None:
self._set_environ_variables()
self._create_cache()
self._collect_paths()
self._start_downloaders()
self._start_uploaders()
self._start_remover()
def _loop(self) -> None:
num_downloader_finished = 0
while True:
index = self.ready_to_process_queue.get()
if index is None:
num_downloader_finished += 1
if num_downloader_finished == self.num_downloaders:
print(f"Worker {str(_get_node_rank() * self.num_workers + self.worker_index)} is terminating.")
if isinstance(self.data_recipe, DataChunkRecipe):
self._handle_data_chunk_recipe_end()
if self.output_dir.url if self.output_dir.url else self.output_dir.path:
# Inform the uploaders they are doing working
for i in range(self.num_uploaders):
self.to_upload_queues[i].put(None)
# Wait for them all to be finished
for uploader in self.uploaders:
uploader.join()
if self.remove:
assert self.remover
self.remove_queue.put(None)
self.remover.join()
if self.progress_queue:
self.progress_queue.put((self.worker_index, self._counter))
return
continue
if isinstance(self.data_recipe, DataChunkRecipe):
self._handle_data_chunk_recipe(index)
else:
self._handle_data_transform_recipe(index)
self._counter += 1
# Don't send the last progress update, so the main thread awaits for the uploader and remover
if self.progress_queue and (time() - self._last_time) > 1 and self._counter < (self.num_items - 2):
self.progress_queue.put((self.worker_index, self._counter))
self._last_time = time()
if self.remove and self.input_dir.path is not None and self.reader is None:
self.remove_queue.put(self.paths[index])
try:
self.stop_queue.get(timeout=0.0001)
return
except Empty:
pass
def _set_environ_variables(self) -> None:
# set the optimizer global rank and world_size
os.environ["DATA_OPTIMIZER_GLOBAL_RANK"] = str(_get_node_rank() * self.num_workers + self.worker_index)
os.environ["DATA_OPTIMIZER_NUM_WORKERS"] = str(self.num_workers)
def _create_cache(self) -> None:
self.cache_data_dir = _get_cache_data_dir()
os.makedirs(self.cache_data_dir, exist_ok=True)
self.cache_chunks_dir = _get_cache_dir()
os.makedirs(self.cache_chunks_dir, exist_ok=True)
if isinstance(self.data_recipe, DataTransformRecipe):
return
self.cache = Cache(
self.cache_chunks_dir,
chunk_bytes=self.data_recipe.chunk_bytes,
chunk_size=self.data_recipe.chunk_size,
compression=self.data_recipe.compression,
)
self.cache._reader._rank = _get_node_rank() * self.num_workers + self.worker_index
def _try_upload(self, data: Optional[Union[str, Tuple[str, str]]]) -> None:
if not data or (self.output_dir.url if self.output_dir.url else self.output_dir.path) is None:
return
if isinstance(data, str):
assert os.path.exists(data), data
else:
assert os.path.exists(data[-1]), data
self.to_upload_queues[self._counter % self.num_uploaders].put(data)
def _collect_paths(self) -> None:
if self.input_dir.path is None or self.reader is not None:
for index in range(len(self.items)):
self.ready_to_process_queue.put(index)
for _ in range(self.num_downloaders):
self.ready_to_process_queue.put(None)
return
items = []
for item in self.items:
flattened_item, spec = tree_flatten(item)
# For speed reasons, we assume starting with `self.input_dir` is enough to be a real file.
# Other alternative would be too slow.
# TODO: Try using dictionary for higher accurary.
indexed_paths = {
index: _to_path(element)
for index, element in enumerate(flattened_item)
if _is_path(self.input_dir.path, element)
}
if len(indexed_paths) == 0:
raise ValueError(
f"The provided item {item} didn't contain any filepaths. The input_dir is {self.input_dir.path}."
)
paths = []
for index, path in indexed_paths.items():
paths.append(path)
if self.input_dir and not self.input_dir.path.startswith("/teamspace/studios/this_studio"):
path = path.replace(self.input_dir.path, self.cache_data_dir)
flattened_item[index] = path
self.paths.append(paths)
items.append(tree_unflatten(flattened_item, spec))
self.items = items
def _start_downloaders(self) -> None:
if self.input_dir.path is None or self.reader is not None:
return
for _ in range(self.num_downloaders):
to_download_queue: Queue = Queue()
p = Process(
target=_download_data_target,
args=(
self.input_dir,
self.cache_data_dir,
to_download_queue,
self.ready_to_process_queue,
),
)
p.start()
self.downloaders.append(p)
self.to_download_queues.append(to_download_queue)
for index, paths in enumerate(self.paths):
self.to_download_queues[index % self.num_downloaders].put((index, paths))
for downloader_index in range(self.num_downloaders):
self.to_download_queues[downloader_index].put(None)
def _start_remover(self) -> None:
if not self.remove:
return
self.remover = Process(
target=_remove_target,
args=(
self.input_dir,
self.cache_data_dir,
self.remove_queue,
),
)
self.remover.start()
def _start_uploaders(self) -> None:
if self.output_dir.path is None and self.output_dir.url is None:
return
for _ in range(self.num_uploaders):
to_upload_queue: Queue = Queue()
p = Process(
target=_upload_fn,
args=(
to_upload_queue,
self.remove_queue,
self.cache_chunks_dir,
self.output_dir,
),
)
p.start()
self.uploaders.append(p)
self.to_upload_queues.append(to_upload_queue)
def _handle_data_chunk_recipe(self, index: int) -> None:
try:
current_item = self.items[index] if self.reader is None else self.reader.read(self.items[index])
item_data_or_generator = self.data_recipe.prepare_item(current_item)
if isinstance(item_data_or_generator, types.GeneratorType):
for item_data in item_data_or_generator:
if item_data is not None:
chunk_filepath = self.cache._add_item(self._index_counter, item_data)
self._try_upload(chunk_filepath)
self._index_counter += 1
elif item_data_or_generator is not None:
chunk_filepath = self.cache._add_item(self._index_counter, item_data_or_generator)
self._try_upload(chunk_filepath)
self._index_counter += 1
except Exception as e:
raise RuntimeError(f"Failed processing {self.items[index]}") from e
def _handle_data_chunk_recipe_end(self) -> None:
chunks_filepaths = self.cache.done()
if chunks_filepaths and len(self.to_upload_queues):
for i, chunk_filepath in enumerate(chunks_filepaths):
if isinstance(chunk_filepath, str) and os.path.exists(chunk_filepath):
self.to_upload_queues[i % self.num_uploaders].put(chunk_filepath)
def _handle_data_transform_recipe(self, index: int) -> None:
# Don't use a context manager to avoid deleting files that are being uploaded.
output_dir = tempfile.mkdtemp()
item = self.items[index] if self.reader is None else self.reader.read(self.items[index])
item_data = self.data_recipe.prepare_item(item, str(output_dir), len(self.items) - 1 == index)
if item_data is not None:
raise ValueError(
"When using a `DataTransformRecipe`, the `prepare_item` shouldn't return anything."
" Simply store your files under the output_dir."
)
filepaths = []
for directory, _, filenames in os.walk(output_dir):
for filename in filenames:
filepaths.append(os.path.join(directory, filename))
for filepath in filepaths:
self._try_upload((output_dir, filepath))
class DataWorkerProcess(BaseWorker, Process):
def __init__(self, *args: Any, **kwargs: Any) -> None:
"""The DataWorkerProcess is responsible to process the user data inside processes."""
BaseWorker.__init__(self, *args, **kwargs)
Process.__init__(self)
@dataclass
class _Result:
size: Optional[int] = None
num_bytes: Optional[str] = None
data_format: Optional[str] = None
compression: Optional[str] = None
num_chunks: Optional[int] = None
num_bytes_per_chunk: Optional[List[int]] = None
T = TypeVar("T")
class DataRecipe:
@abstractmethod
def prepare_structure(self, input_dir: Optional[str]) -> List[T]:
pass
@abstractmethod
def prepare_item(self, *args: Any, **kwargs: Any) -> Any:
pass
def __init__(self) -> None:
self._name: Optional[str] = None
def _done(self, size: int, delete_cached_files: bool, output_dir: Dir) -> _Result:
return _Result(size=size)
class DataChunkRecipe(DataRecipe):
def __init__(
self,
chunk_size: Optional[int] = None,
chunk_bytes: Optional[Union[int, str]] = None,
compression: Optional[str] = None,
):
super().__init__()
if chunk_size is not None and chunk_bytes is not None:
raise ValueError("Either one of the `chunk_size` or the `chunk_bytes` need to be provided.")
self.chunk_size = chunk_size
self.chunk_bytes = 1 << 26 if chunk_size is None else chunk_bytes
self.compression = compression
@abstractmethod
def prepare_structure(self, input_dir: Optional[str]) -> List[T]:
"""Return the structure of your data.
Each element should contain at least a filepath.
"""
@abstractmethod
def prepare_item(self, item_metadata: T) -> Any:
"""The return of this `prepare_item` method is persisted in chunked binary files."""
def _done(self, size: int, delete_cached_files: bool, output_dir: Dir) -> _Result:
num_nodes = _get_num_nodes()
cache_dir = _get_cache_dir()
chunks = [file for file in os.listdir(cache_dir) if file.endswith(".bin")]
if chunks and delete_cached_files and output_dir.path is not None:
raise RuntimeError(f"All the chunks should have been deleted. Found {chunks}")
merge_cache = Cache(cache_dir, chunk_bytes=1)
node_rank = _get_node_rank()
merge_cache._merge_no_wait(node_rank if num_nodes > 1 else None)
self._upload_index(output_dir, cache_dir, num_nodes, node_rank)
if num_nodes == node_rank + 1:
with open(os.path.join(cache_dir, _INDEX_FILENAME)) as f:
config = json.load(f)
size = sum([c["dim"] if c["dim"] is not None else c["chunk_size"] for c in config["chunks"]])
num_bytes = sum([c["chunk_bytes"] for c in config["chunks"]])
if config["config"] is not None:
data_format = tree_unflatten(
config["config"]["data_format"], treespec_loads(config["config"]["data_spec"])
)
else:
data_format = None
num_chunks = len(config["chunks"])
# The platform can't store more than 1024 entries.
# Note: This isn't really used right now, so it is fine to skip if too big.
num_bytes_per_chunk = [c["chunk_size"] for c in config["chunks"]] if num_chunks < 1024 else []
return _Result(
size=size,
num_bytes=num_bytes,
data_format=data_format,
compression=config["config"]["compression"] if config["config"] else None,
num_chunks=len(config["chunks"]),
num_bytes_per_chunk=num_bytes_per_chunk,
)
return _Result(
size=size,
)
def _upload_index(self, output_dir: Dir, cache_dir: str, num_nodes: int, node_rank: Optional[int]) -> None:
"""This method upload the index file to the remote cloud directory."""
if output_dir.path is None and output_dir.url is None:
return
obj = parse.urlparse(output_dir.url if output_dir.url else output_dir.path)
if num_nodes > 1:
local_filepath = os.path.join(cache_dir, f"{node_rank}-{_INDEX_FILENAME}")
else:
local_filepath = os.path.join(cache_dir, _INDEX_FILENAME)
if obj.scheme == "s3":
s3 = S3Client()
s3.client.upload_file(
local_filepath, obj.netloc, os.path.join(str(obj.path).lstrip("/"), os.path.basename(local_filepath))
)
elif output_dir.path and os.path.isdir(output_dir.path):
shutil.copyfile(local_filepath, os.path.join(output_dir.path, os.path.basename(local_filepath)))
if num_nodes == 1 or node_rank is None:
return
# Merge the index files generated by each node.
# Note: When using the Data Optimizer, they should be a single process on each node executing this section
# So no risk to get race conditon.
if num_nodes == node_rank + 1:
# Get the index file locally
for node_rank in range(num_nodes - 1):
output_dir_path = output_dir.url if output_dir.url else output_dir.path
assert output_dir_path
remote_filepath = os.path.join(output_dir_path, f"{node_rank}-{_INDEX_FILENAME}")
node_index_filepath = os.path.join(cache_dir, os.path.basename(remote_filepath))
if obj.scheme == "s3":
obj = parse.urlparse(remote_filepath)
_wait_for_file_to_exist(s3, obj)
with open(node_index_filepath, "wb") as f:
s3.client.download_fileobj(obj.netloc, obj.path.lstrip("/"), f)
elif output_dir.path and os.path.isdir(output_dir.path):
shutil.copyfile(remote_filepath, node_index_filepath)
merge_cache = Cache(cache_dir, chunk_bytes=1)
merge_cache._merge_no_wait()
self._upload_index(output_dir, cache_dir, 1, None)
class DataTransformRecipe(DataRecipe):
@abstractmethod
def prepare_structure(self, input_dir: Optional[str]) -> List[T]:
"""Return the structure of your data.
Each element should contain at least a filepath.
"""
@abstractmethod
def prepare_item(self, item_metadata: T, output_dir: str, is_last: bool) -> None:
"""Use your item metadata to process your files and save the file outputs into `output_dir`."""
class DataProcessor:
def __init__(
self,
input_dir: Union[str, Dir],
output_dir: Optional[Union[str, Dir]] = None,
num_workers: Optional[int] = None,
num_downloaders: Optional[int] = None,
num_uploaders: Optional[int] = None,
delete_cached_files: bool = True,
fast_dev_run: Optional[Union[bool, int]] = None,
random_seed: Optional[int] = 42,
reorder_files: bool = True,
weights: Optional[List[int]] = None,
reader: Optional[BaseReader] = None,
):
"""The `DatasetOptimiser` provides an efficient way to process data across multiple machine into chunks to make
training faster.
Arguments:
input_dir: The path to where the input data are stored.
output_dir: The path to where the output data are stored.
num_workers: The number of worker threads to use.
num_downloaders: The number of file downloaders to use.
num_uploaders: The number of file uploaders to use.
delete_cached_files: Whether to delete the cached files.
fast_dev_run: Whether to run a quick dev run.
random_seed: The random seed to be set before shuffling the data.
reorder_files: By default, reorders the files by file size to distribute work equally among all workers.
Set this to ``False`` if the order in which samples are processed should be preserved.
weights: Provide a list of weights associated to the inputs.
This is used to evenly split the work among the workers.
reader: Map the inputs to worker inputs and provides a read method to read a slice of the data.
"""
self.input_dir = _resolve_dir(input_dir)
self.output_dir = _resolve_dir(output_dir)
self.num_workers = num_workers or (1 if fast_dev_run else (os.cpu_count() or 1) * 4)
self.num_downloaders = num_downloaders or 2
self.num_uploaders = num_uploaders or 5
self.delete_cached_files = delete_cached_files
self.fast_dev_run = _get_fast_dev_run() if fast_dev_run is None else fast_dev_run
self.workers: Any = []
self.workers_tracker: Dict[int, int] = {}
self.progress_queue: Optional[Queue] = None
self.error_queue: Queue = Queue()
self.stop_queues: List[Queue] = []
self.reorder_files = reorder_files
self.weights = weights
self.reader = reader
if self.reader is not None and self.weights is not None:
raise ValueError("Either the reader or the weights needs to be defined.")
# Ensure the input dir is the same across all nodes
self.input_dir = broadcast_object("input_dir", self.input_dir)
if self.output_dir:
# Ensure the output dir is the same across all nodes
self.output_dir = broadcast_object("output_dir", self.output_dir)
print(f"Storing the files under {self.output_dir.path}")
self.random_seed = random_seed
def run(self, data_recipe: DataRecipe) -> None:
"""The `DataProcessor.run(...)` method triggers the data recipe processing over your dataset."""
if not isinstance(data_recipe, DataRecipe):
raise ValueError("The provided value should be a data recipe.")
t0 = time()
print(f"Setup started with fast_dev_run={self.fast_dev_run}.")
# Force random seed to be fixed
random.seed(self.random_seed)
np.random.seed(self.random_seed)
torch.manual_seed(self.random_seed)
# Call the setup method of the user
user_items: List[Any] = data_recipe.prepare_structure(self.input_dir.path if self.input_dir else None)
if not isinstance(user_items, (list, StreamingDataLoader)):
raise ValueError("The `prepare_structure` should return a list of item metadata.")
if isinstance(user_items, StreamingDataLoader):
self.reader = StreamingDataLoaderReader(user_items)
if self.reader:
user_items = self.reader.remap_items(user_items, self.num_workers)
if self.weights is not None:
if len(self.weights) != len(user_items):
raise ValueError("The provided weights length should match the inputs' length.")
workers_user_items = _map_items_to_workers_weighted(
num_workers=self.num_workers, user_items=user_items, weights=self.weights, file_size=False
)
elif self.reorder_files and self.input_dir.path:
# TODO: Only do this on node 0, and broadcast the item sizes to the other nodes.
item_sizes = _get_item_filesizes(user_items, base_path=self.input_dir.path)
workers_user_items = _map_items_to_workers_weighted(
num_workers=self.num_workers, user_items=user_items, weights=item_sizes
)
else:
workers_user_items = _map_items_to_workers_sequentially(num_workers=self.num_workers, user_items=user_items)
print(f"Setup finished in {round(time() - t0, 3)} seconds. Found {len(user_items)} items to process.")
if self.fast_dev_run:
items_to_keep = self.fast_dev_run if type(self.fast_dev_run) is int else _DEFAULT_FAST_DEV_RUN_ITEMS
workers_user_items = [w[:items_to_keep] for w in workers_user_items]
print(f"Fast dev run is enabled. Limiting to {items_to_keep} items per process.")
num_items = sum([len(items) for items in workers_user_items])
self._cleanup_cache()
print(f"Starting {self.num_workers} workers with {num_items} items.")
if self.input_dir is None and self.src_resolver is not None and self.input_dir:
self.input_dir = self.src_resolver(self.input_dir)
print(f"The remote_dir is `{self.input_dir}`.")
signal.signal(signal.SIGINT, self._signal_handler)
self._create_process_workers(data_recipe, workers_user_items)
print("Workers are ready ! Starting data processing...")
current_total = 0
has_failed = False
pbar = _tqdm(
desc="Progress",
total=num_items,
smoothing=0,
position=-1,
mininterval=1,
leave=True,
dynamic_ncols=True,
)
while True:
try:
error = self.error_queue.get(timeout=0.001)
self._exit_on_error(error)
except Empty:
assert self.progress_queue
try:
index, counter = self.progress_queue.get(timeout=0.001)
except Empty:
continue
self.workers_tracker[index] = counter
new_total = sum(self.workers_tracker.values())
pbar.update(new_total - current_total)
current_total = new_total
if current_total == num_items:
break
# Exit early if all the workers are done.
# This means there were some kinda of errors.
if all(not w.is_alive() for w in self.workers):
has_failed = True
break
pbar.close()
num_nodes = _get_num_nodes()
node_rank = _get_node_rank()
# TODO: Understand why it hangs.
if num_nodes == 1:
for w in self.workers:
w.join(0)
print("Workers are finished.")
result = data_recipe._done(len(user_items), self.delete_cached_files, self.output_dir)
if num_nodes == node_rank + 1 and self.output_dir.url and _IS_IN_STUDIO:
assert self.output_dir.path
_create_dataset(
input_dir=self.input_dir.path,
storage_dir=self.output_dir.path,
dataset_type=V1DatasetType.CHUNKED
if isinstance(data_recipe, DataChunkRecipe)
else V1DatasetType.TRANSFORMED,
empty=False,
size=result.size,
num_bytes=result.num_bytes,
data_format=result.data_format,
compression=result.compression,
num_chunks=result.num_chunks,
num_bytes_per_chunk=result.num_bytes_per_chunk,
)
print("Finished data processing!")
# TODO: Understand why it is required to avoid long shutdown.
if _get_num_nodes() > 1:
os._exit(int(has_failed))
def _exit_on_error(self, error: str) -> None:
for w in self.workers:
w.join(0)
raise RuntimeError(f"We found the following error {error}.")
def _create_process_workers(self, data_recipe: DataRecipe, workers_user_items: List[List[Any]]) -> None:
self.progress_queue = Queue()
workers: List[DataWorkerProcess] = []
stop_queues: List[Queue] = []
for worker_idx, worker_user_items in enumerate(workers_user_items):
stop_queues.append(Queue())
worker = DataWorkerProcess(
worker_idx,
self.num_workers,
_get_node_rank(),
data_recipe,
self.input_dir,
self.output_dir,
worker_user_items,
self.progress_queue,
self.error_queue,
stop_queues[-1],
self.num_downloaders,
self.num_uploaders,
self.delete_cached_files,
self.reader,
)
worker.start()
workers.append(worker)
# Note: Don't store within the loop as weakref aren't serializable
self.workers = workers
self.stop_queues = stop_queues
def _signal_handler(self, signal: Any, frame: Any) -> None:
"""On temrination, we stop all the processes to avoid leaking RAM."""
for stop_queue in self.stop_queues:
stop_queue.put(None)
for w in self.workers:
w.join(0)
os._exit(0)
def _cleanup_cache(self) -> None:
cache_dir = _get_cache_dir()
# Cleanup the cache dir folder to avoid corrupted files from previous run to be there.
if os.path.exists(cache_dir):
shutil.rmtree(cache_dir, ignore_errors=True)
os.makedirs(cache_dir, exist_ok=True)
cache_data_dir = _get_cache_data_dir()
# Cleanup the cache data folder to avoid corrupted files from previous run to be there.
if os.path.exists(cache_data_dir):
shutil.rmtree(cache_data_dir, ignore_errors=True)
os.makedirs(cache_data_dir, exist_ok=True)
|
evocodebench_data_161
|
import concurrent
import json
import logging
import os
import random
import shutil
import signal
import tempfile
import traceback
import types
from abc import abstractmethod
from dataclasses import dataclass
from multiprocessing import Process, Queue
from pathlib import Path
from queue import Empty
from time import sleep, time
from typing import Any, Dict, List, Optional, Tuple, TypeVar, Union
from urllib import parse
import numpy as np
import torch
from tqdm.auto import tqdm as _tqdm
from litdata.constants import (
_BOTO3_AVAILABLE,
_DEFAULT_FAST_DEV_RUN_ITEMS,
_INDEX_FILENAME,
_IS_IN_STUDIO,
_LIGHTNING_CLOUD_LATEST,
_TORCH_GREATER_EQUAL_2_1_0,
)
from litdata.processing.readers import BaseReader, StreamingDataLoaderReader
from litdata.processing.utilities import _create_dataset
from litdata.streaming import Cache
from litdata.streaming.cache import Dir
from litdata.streaming.client import S3Client
from litdata.streaming.dataloader import StreamingDataLoader
from litdata.streaming.resolver import _resolve_dir
from litdata.utilities.broadcast import broadcast_object
from litdata.utilities.packing import _pack_greedily
if _TORCH_GREATER_EQUAL_2_1_0:
from torch.utils._pytree import tree_flatten, tree_unflatten, treespec_loads
if _LIGHTNING_CLOUD_LATEST:
from lightning_cloud.openapi import V1DatasetType
if _BOTO3_AVAILABLE:
import botocore
logger = logging.Logger(__name__)
def _get_num_nodes() -> int:
"""Returns the number of nodes."""
return int(os.getenv("DATA_OPTIMIZER_NUM_NODES", 1))
def _get_node_rank() -> int:
"""Returns the current node rank of the instance."""
return int(os.getenv("DATA_OPTIMIZER_NODE_RANK", 0))
def _get_fast_dev_run() -> int:
"""Returns whether fast dev mode is enabled."""
return bool(int(os.getenv("DATA_OPTIMIZER_FAST_DEV_RUN", 1)))
def _get_default_cache() -> str:
return "/cache" if _IS_IN_STUDIO else tempfile.gettempdir()
def _get_cache_dir(name: Optional[str] = None) -> str:
"""Returns the cache directory used by the Cache to store the chunks."""
cache_dir = os.getenv("DATA_OPTIMIZER_CACHE_FOLDER", f"{_get_default_cache()}/chunks")
if name is None:
return cache_dir
return os.path.join(cache_dir, name.lstrip("/"))
def _get_cache_data_dir(name: Optional[str] = None) -> str:
"""Returns the cache data directory used by the DataProcessor workers to download the files."""
cache_dir = os.getenv("DATA_OPTIMIZER_DATA_CACHE_FOLDER", f"{_get_default_cache()}/data")
if name is None:
return os.path.join(cache_dir)
return os.path.join(cache_dir, name.lstrip("/"))
def _wait_for_file_to_exist(s3: S3Client, obj: parse.ParseResult, sleep_time: int = 2) -> Any:
"""This function check."""
while True:
try:
return s3.client.head_object(Bucket=obj.netloc, Key=obj.path.lstrip("/"))
except botocore.exceptions.ClientError as e:
if "the HeadObject operation: Not Found" in str(e):
sleep(sleep_time)
else:
raise e
def _wait_for_disk_usage_higher_than_threshold(input_dir: str, threshold_in_gb: int = 25, sleep_time: int = 3) -> None:
usage = shutil.disk_usage(input_dir)
while (usage.free / 1000 / 1000 / 1000) <= threshold_in_gb:
sleep(sleep_time)
usage = shutil.disk_usage(input_dir)
return
def _download_data_target(input_dir: Dir, cache_dir: str, queue_in: Queue, queue_out: Queue) -> None:
"""This function is used to download data from a remote directory to a cache directory to optimise reading."""
s3 = S3Client()
while True:
# 2. Fetch from the queue
r: Optional[Tuple[int, List[str]]] = queue_in.get()
# 3. Terminate the process if we received a termination signal
if r is None:
queue_out.put(None)
return
# 4. Unpack
index, paths = r
# 5. Check whether all the files are already downloaded
if input_dir.path and all(
os.path.exists(p.replace(input_dir.path, cache_dir) if input_dir else p) for p in paths
):
queue_out.put(index)
continue
if input_dir.url is not None or input_dir.path is not None:
if input_dir.url:
# 6. Wait for the removers to catch up when we are downloading data.
_wait_for_disk_usage_higher_than_threshold("/", 25)
# 7. Download all the required paths to unblock the current index
for path in paths:
if input_dir.path:
local_path = path.replace(input_dir.path, cache_dir)
if input_dir.url and input_dir.path:
path = path.replace(input_dir.path, input_dir.url)
obj = parse.urlparse(path)
if obj.scheme == "s3":
dirpath = os.path.dirname(local_path)
os.makedirs(dirpath, exist_ok=True)
with open(local_path, "wb") as f:
s3.client.download_fileobj(obj.netloc, obj.path.lstrip("/"), f)
elif os.path.isfile(path):
if not path.startswith("/teamspace/studios/this_studio"):
os.makedirs(os.path.dirname(local_path), exist_ok=True)
shutil.copyfile(path, local_path)
else:
raise ValueError(f"The provided {input_dir.url} isn't supported.")
# 7. Inform the worker the current files are available
queue_out.put(index)
def _remove_target(input_dir: Dir, cache_dir: str, queue_in: Queue) -> None:
"""This function is used to delete files from the cache directory to minimise disk space."""
while True:
# 1. Collect paths
paths = queue_in.get()
# 2. Terminate the process if we received a termination signal
if paths is None:
return
# 3. Iterate through the paths and delete them sequentially.
for path in paths:
if input_dir:
if not path.startswith(cache_dir) and input_dir.path is not None:
path = path.replace(input_dir.path, cache_dir)
if os.path.exists(path):
os.remove(path)
elif os.path.exists(path) and "s3_connections" not in path:
os.remove(path)
def _upload_fn(upload_queue: Queue, remove_queue: Queue, cache_dir: str, output_dir: Dir) -> None:
"""This function is used to upload optimised chunks from a local to remote dataset directory."""
obj = parse.urlparse(output_dir.url if output_dir.url else output_dir.path)
if obj.scheme == "s3":
s3 = S3Client()
while True:
data: Optional[Union[str, Tuple[str, str]]] = upload_queue.get()
tmpdir = None
if isinstance(data, str) or data is None:
local_filepath = data
else:
tmpdir, local_filepath = data
# Terminate the process if we received a termination signal
if local_filepath is None:
return
# Upload the file to the target cloud storage
if not local_filepath.startswith(cache_dir):
local_filepath = os.path.join(cache_dir, local_filepath)
if obj.scheme == "s3":
try:
if tmpdir is None:
output_filepath = os.path.join(str(obj.path).lstrip("/"), os.path.basename(local_filepath))
else:
output_filepath = os.path.join(str(obj.path).lstrip("/"), local_filepath.replace(tmpdir, "")[1:])
s3.client.upload_file(
local_filepath,
obj.netloc,
output_filepath,
)
except Exception as e:
print(e)
elif output_dir.path:
if tmpdir is None:
output_filepath = os.path.join(output_dir.path, os.path.basename(local_filepath))
else:
output_filepath = os.path.join(output_dir.path, local_filepath.replace(tmpdir, "")[1:])
os.makedirs(os.path.dirname(output_filepath), exist_ok=True)
shutil.move(local_filepath, output_filepath)
else:
raise ValueError(f"The provided {output_dir.path} isn't supported.")
# Inform the remover to delete the file
if remove_queue and os.path.exists(local_filepath):
remove_queue.put([local_filepath])
def _map_items_to_workers_sequentially(num_workers: int, user_items: List[Any]) -> List[List[Any]]:
from typing import List, Any
import os
total_nodes = _get_num_nodes()
node_rank = _get_node_rank()
total_workers = total_nodes * num_workers
items_per_worker = len(user_items) // total_workers
extra_items = len(user_items) % total_workers
start = 0
result = []
for i in range(total_workers):
worker_items = items_per_worker + 1 if i < extra_items else items_per_worker
end = start + worker_items
result.append(user_items[start:end])
start = end
if len(result) != num_workers:
raise RuntimeError("Improper assignment of items to workers")
return result
def _map_items_to_workers_weighted(
num_workers: int,
user_items: List[Any],
weights: Optional[List[int]] = None,
file_size: bool = True,
) -> List[List[Any]]:
# Associate the items to the workers based on number of nodes and node rank.
weights = [1] * len(user_items) if weights is None else weights
num_nodes = _get_num_nodes()
node_rank = _get_node_rank()
world_size = num_nodes * num_workers
worker_items, worker_weights = _pack_greedily(items=user_items, weights=weights, num_bins=world_size)
worker_ids_this_node = range(node_rank * num_workers, (node_rank + 1) * num_workers)
for worker_id, size in worker_weights.items():
if worker_id not in worker_ids_this_node:
continue
if file_size:
print(f"Worker {worker_id} gets {size / 1e6:.1f} MB ({len(worker_items[worker_id])} files)")
else:
print(f"Worker {worker_id} gets ({len(worker_items[worker_id])}) items for a total weight of {size}.")
return [np.random.permutation(worker_items[worker_id]).tolist() for worker_id in worker_ids_this_node]
def _get_num_bytes(item: Any, base_path: str) -> int:
flattened_item, _ = tree_flatten(item)
num_bytes = 0
for element in flattened_item:
if isinstance(element, str):
element = Path(element).resolve()
if not element.exists():
continue
file_bytes = os.path.getsize(element)
if file_bytes == 0:
raise RuntimeError(f"The file {element} has 0 bytes!")
num_bytes += file_bytes
return num_bytes
def _get_item_filesizes(items: List[Any], base_path: str = "") -> List[int]:
"""Computes the total size in bytes of all file paths for every datastructure in the given list."""
item_sizes = []
cpu_count = os.cpu_count() or 1
# Parallelize to accelerate retrieving the number of file bytes to read for each item
with concurrent.futures.ThreadPoolExecutor(max_workers=cpu_count * 2 if cpu_count > 4 else cpu_count) as executor:
futures = [executor.submit(_get_num_bytes, item, base_path) for item in items]
for future in futures:
item_sizes.append(future.result())
return item_sizes
def _to_path(element: str) -> str:
return element if _IS_IN_STUDIO and element.startswith("/teamspace") else str(Path(element).resolve())
def _is_path(input_dir: Optional[str], element: Any) -> bool:
if not isinstance(element, str):
return False
if _IS_IN_STUDIO and input_dir is not None:
if element.startswith(input_dir):
return True
element = str(Path(element).absolute())
if element.startswith(input_dir):
return True
return os.path.exists(element)
class BaseWorker:
def __init__(
self,
worker_index: int,
num_workers: int,
node_rank: int,
data_recipe: "DataRecipe",
input_dir: Dir,
output_dir: Dir,
items: List[Any],
progress_queue: Queue,
error_queue: Queue,
stop_queue: Queue,
num_downloaders: int,
num_uploaders: int,
remove: bool,
reader: Optional[BaseReader] = None,
) -> None:
"""The BaseWorker is responsible to process the user data."""
self.worker_index = worker_index
self.num_workers = num_workers
self.node_rank = node_rank
self.data_recipe = data_recipe
self.input_dir = input_dir
self.output_dir = output_dir
self.items = items
self.num_items = len(self.items)
self.num_downloaders = num_downloaders
self.num_uploaders = num_uploaders
self.remove = remove
self.reader = reader
self.paths: List[List[str]] = []
self.remover: Optional[Process] = None
self.downloaders: List[Process] = []
self.uploaders: List[Process] = []
self.to_download_queues: List[Queue] = []
self.to_upload_queues: List[Queue] = []
self.stop_queue = stop_queue
self.ready_to_process_queue: Queue = Queue()
self.remove_queue: Queue = Queue()
self.progress_queue: Queue = progress_queue
self.error_queue: Queue = error_queue
self._counter = 0
self._last_time = time()
self._index_counter = 0
def run(self) -> None:
try:
self._setup()
self._loop()
except Exception:
traceback_format = traceback.format_exc()
print(traceback_format)
self.error_queue.put(traceback_format)
print(f"Worker {str(_get_node_rank() * self.num_workers + self.worker_index)} is done.")
def _setup(self) -> None:
self._set_environ_variables()
self._create_cache()
self._collect_paths()
self._start_downloaders()
self._start_uploaders()
self._start_remover()
def _loop(self) -> None:
num_downloader_finished = 0
while True:
index = self.ready_to_process_queue.get()
if index is None:
num_downloader_finished += 1
if num_downloader_finished == self.num_downloaders:
print(f"Worker {str(_get_node_rank() * self.num_workers + self.worker_index)} is terminating.")
if isinstance(self.data_recipe, DataChunkRecipe):
self._handle_data_chunk_recipe_end()
if self.output_dir.url if self.output_dir.url else self.output_dir.path:
# Inform the uploaders they are doing working
for i in range(self.num_uploaders):
self.to_upload_queues[i].put(None)
# Wait for them all to be finished
for uploader in self.uploaders:
uploader.join()
if self.remove:
assert self.remover
self.remove_queue.put(None)
self.remover.join()
if self.progress_queue:
self.progress_queue.put((self.worker_index, self._counter))
return
continue
if isinstance(self.data_recipe, DataChunkRecipe):
self._handle_data_chunk_recipe(index)
else:
self._handle_data_transform_recipe(index)
self._counter += 1
# Don't send the last progress update, so the main thread awaits for the uploader and remover
if self.progress_queue and (time() - self._last_time) > 1 and self._counter < (self.num_items - 2):
self.progress_queue.put((self.worker_index, self._counter))
self._last_time = time()
if self.remove and self.input_dir.path is not None and self.reader is None:
self.remove_queue.put(self.paths[index])
try:
self.stop_queue.get(timeout=0.0001)
return
except Empty:
pass
def _set_environ_variables(self) -> None:
# set the optimizer global rank and world_size
os.environ["DATA_OPTIMIZER_GLOBAL_RANK"] = str(_get_node_rank() * self.num_workers + self.worker_index)
os.environ["DATA_OPTIMIZER_NUM_WORKERS"] = str(self.num_workers)
def _create_cache(self) -> None:
self.cache_data_dir = _get_cache_data_dir()
os.makedirs(self.cache_data_dir, exist_ok=True)
self.cache_chunks_dir = _get_cache_dir()
os.makedirs(self.cache_chunks_dir, exist_ok=True)
if isinstance(self.data_recipe, DataTransformRecipe):
return
self.cache = Cache(
self.cache_chunks_dir,
chunk_bytes=self.data_recipe.chunk_bytes,
chunk_size=self.data_recipe.chunk_size,
compression=self.data_recipe.compression,
)
self.cache._reader._rank = _get_node_rank() * self.num_workers + self.worker_index
def _try_upload(self, data: Optional[Union[str, Tuple[str, str]]]) -> None:
if not data or (self.output_dir.url if self.output_dir.url else self.output_dir.path) is None:
return
if isinstance(data, str):
assert os.path.exists(data), data
else:
assert os.path.exists(data[-1]), data
self.to_upload_queues[self._counter % self.num_uploaders].put(data)
def _collect_paths(self) -> None:
if self.input_dir.path is None or self.reader is not None:
for index in range(len(self.items)):
self.ready_to_process_queue.put(index)
for _ in range(self.num_downloaders):
self.ready_to_process_queue.put(None)
return
items = []
for item in self.items:
flattened_item, spec = tree_flatten(item)
# For speed reasons, we assume starting with `self.input_dir` is enough to be a real file.
# Other alternative would be too slow.
# TODO: Try using dictionary for higher accurary.
indexed_paths = {
index: _to_path(element)
for index, element in enumerate(flattened_item)
if _is_path(self.input_dir.path, element)
}
if len(indexed_paths) == 0:
raise ValueError(
f"The provided item {item} didn't contain any filepaths. The input_dir is {self.input_dir.path}."
)
paths = []
for index, path in indexed_paths.items():
paths.append(path)
if self.input_dir and not self.input_dir.path.startswith("/teamspace/studios/this_studio"):
path = path.replace(self.input_dir.path, self.cache_data_dir)
flattened_item[index] = path
self.paths.append(paths)
items.append(tree_unflatten(flattened_item, spec))
self.items = items
def _start_downloaders(self) -> None:
if self.input_dir.path is None or self.reader is not None:
return
for _ in range(self.num_downloaders):
to_download_queue: Queue = Queue()
p = Process(
target=_download_data_target,
args=(
self.input_dir,
self.cache_data_dir,
to_download_queue,
self.ready_to_process_queue,
),
)
p.start()
self.downloaders.append(p)
self.to_download_queues.append(to_download_queue)
for index, paths in enumerate(self.paths):
self.to_download_queues[index % self.num_downloaders].put((index, paths))
for downloader_index in range(self.num_downloaders):
self.to_download_queues[downloader_index].put(None)
def _start_remover(self) -> None:
if not self.remove:
return
self.remover = Process(
target=_remove_target,
args=(
self.input_dir,
self.cache_data_dir,
self.remove_queue,
),
)
self.remover.start()
def _start_uploaders(self) -> None:
if self.output_dir.path is None and self.output_dir.url is None:
return
for _ in range(self.num_uploaders):
to_upload_queue: Queue = Queue()
p = Process(
target=_upload_fn,
args=(
to_upload_queue,
self.remove_queue,
self.cache_chunks_dir,
self.output_dir,
),
)
p.start()
self.uploaders.append(p)
self.to_upload_queues.append(to_upload_queue)
def _handle_data_chunk_recipe(self, index: int) -> None:
try:
current_item = self.items[index] if self.reader is None else self.reader.read(self.items[index])
item_data_or_generator = self.data_recipe.prepare_item(current_item)
if isinstance(item_data_or_generator, types.GeneratorType):
for item_data in item_data_or_generator:
if item_data is not None:
chunk_filepath = self.cache._add_item(self._index_counter, item_data)
self._try_upload(chunk_filepath)
self._index_counter += 1
elif item_data_or_generator is not None:
chunk_filepath = self.cache._add_item(self._index_counter, item_data_or_generator)
self._try_upload(chunk_filepath)
self._index_counter += 1
except Exception as e:
raise RuntimeError(f"Failed processing {self.items[index]}") from e
def _handle_data_chunk_recipe_end(self) -> None:
chunks_filepaths = self.cache.done()
if chunks_filepaths and len(self.to_upload_queues):
for i, chunk_filepath in enumerate(chunks_filepaths):
if isinstance(chunk_filepath, str) and os.path.exists(chunk_filepath):
self.to_upload_queues[i % self.num_uploaders].put(chunk_filepath)
def _handle_data_transform_recipe(self, index: int) -> None:
# Don't use a context manager to avoid deleting files that are being uploaded.
output_dir = tempfile.mkdtemp()
item = self.items[index] if self.reader is None else self.reader.read(self.items[index])
item_data = self.data_recipe.prepare_item(item, str(output_dir), len(self.items) - 1 == index)
if item_data is not None:
raise ValueError(
"When using a `DataTransformRecipe`, the `prepare_item` shouldn't return anything."
" Simply store your files under the output_dir."
)
filepaths = []
for directory, _, filenames in os.walk(output_dir):
for filename in filenames:
filepaths.append(os.path.join(directory, filename))
for filepath in filepaths:
self._try_upload((output_dir, filepath))
class DataWorkerProcess(BaseWorker, Process):
def __init__(self, *args: Any, **kwargs: Any) -> None:
"""The DataWorkerProcess is responsible to process the user data inside processes."""
BaseWorker.__init__(self, *args, **kwargs)
Process.__init__(self)
@dataclass
class _Result:
size: Optional[int] = None
num_bytes: Optional[str] = None
data_format: Optional[str] = None
compression: Optional[str] = None
num_chunks: Optional[int] = None
num_bytes_per_chunk: Optional[List[int]] = None
T = TypeVar("T")
class DataRecipe:
@abstractmethod
def prepare_structure(self, input_dir: Optional[str]) -> List[T]:
pass
@abstractmethod
def prepare_item(self, *args: Any, **kwargs: Any) -> Any:
pass
def __init__(self) -> None:
self._name: Optional[str] = None
def _done(self, size: int, delete_cached_files: bool, output_dir: Dir) -> _Result:
return _Result(size=size)
class DataChunkRecipe(DataRecipe):
def __init__(
self,
chunk_size: Optional[int] = None,
chunk_bytes: Optional[Union[int, str]] = None,
compression: Optional[str] = None,
):
super().__init__()
if chunk_size is not None and chunk_bytes is not None:
raise ValueError("Either one of the `chunk_size` or the `chunk_bytes` need to be provided.")
self.chunk_size = chunk_size
self.chunk_bytes = 1 << 26 if chunk_size is None else chunk_bytes
self.compression = compression
@abstractmethod
def prepare_structure(self, input_dir: Optional[str]) -> List[T]:
"""Return the structure of your data.
Each element should contain at least a filepath.
"""
@abstractmethod
def prepare_item(self, item_metadata: T) -> Any:
"""The return of this `prepare_item` method is persisted in chunked binary files."""
def _done(self, size: int, delete_cached_files: bool, output_dir: Dir) -> _Result:
num_nodes = _get_num_nodes()
cache_dir = _get_cache_dir()
chunks = [file for file in os.listdir(cache_dir) if file.endswith(".bin")]
if chunks and delete_cached_files and output_dir.path is not None:
raise RuntimeError(f"All the chunks should have been deleted. Found {chunks}")
merge_cache = Cache(cache_dir, chunk_bytes=1)
node_rank = _get_node_rank()
merge_cache._merge_no_wait(node_rank if num_nodes > 1 else None)
self._upload_index(output_dir, cache_dir, num_nodes, node_rank)
if num_nodes == node_rank + 1:
with open(os.path.join(cache_dir, _INDEX_FILENAME)) as f:
config = json.load(f)
size = sum([c["dim"] if c["dim"] is not None else c["chunk_size"] for c in config["chunks"]])
num_bytes = sum([c["chunk_bytes"] for c in config["chunks"]])
if config["config"] is not None:
data_format = tree_unflatten(
config["config"]["data_format"], treespec_loads(config["config"]["data_spec"])
)
else:
data_format = None
num_chunks = len(config["chunks"])
# The platform can't store more than 1024 entries.
# Note: This isn't really used right now, so it is fine to skip if too big.
num_bytes_per_chunk = [c["chunk_size"] for c in config["chunks"]] if num_chunks < 1024 else []
return _Result(
size=size,
num_bytes=num_bytes,
data_format=data_format,
compression=config["config"]["compression"] if config["config"] else None,
num_chunks=len(config["chunks"]),
num_bytes_per_chunk=num_bytes_per_chunk,
)
return _Result(
size=size,
)
def _upload_index(self, output_dir: Dir, cache_dir: str, num_nodes: int, node_rank: Optional[int]) -> None:
"""This method upload the index file to the remote cloud directory."""
if output_dir.path is None and output_dir.url is None:
return
obj = parse.urlparse(output_dir.url if output_dir.url else output_dir.path)
if num_nodes > 1:
local_filepath = os.path.join(cache_dir, f"{node_rank}-{_INDEX_FILENAME}")
else:
local_filepath = os.path.join(cache_dir, _INDEX_FILENAME)
if obj.scheme == "s3":
s3 = S3Client()
s3.client.upload_file(
local_filepath, obj.netloc, os.path.join(str(obj.path).lstrip("/"), os.path.basename(local_filepath))
)
elif output_dir.path and os.path.isdir(output_dir.path):
shutil.copyfile(local_filepath, os.path.join(output_dir.path, os.path.basename(local_filepath)))
if num_nodes == 1 or node_rank is None:
return
# Merge the index files generated by each node.
# Note: When using the Data Optimizer, they should be a single process on each node executing this section
# So no risk to get race conditon.
if num_nodes == node_rank + 1:
# Get the index file locally
for node_rank in range(num_nodes - 1):
output_dir_path = output_dir.url if output_dir.url else output_dir.path
assert output_dir_path
remote_filepath = os.path.join(output_dir_path, f"{node_rank}-{_INDEX_FILENAME}")
node_index_filepath = os.path.join(cache_dir, os.path.basename(remote_filepath))
if obj.scheme == "s3":
obj = parse.urlparse(remote_filepath)
_wait_for_file_to_exist(s3, obj)
with open(node_index_filepath, "wb") as f:
s3.client.download_fileobj(obj.netloc, obj.path.lstrip("/"), f)
elif output_dir.path and os.path.isdir(output_dir.path):
shutil.copyfile(remote_filepath, node_index_filepath)
merge_cache = Cache(cache_dir, chunk_bytes=1)
merge_cache._merge_no_wait()
self._upload_index(output_dir, cache_dir, 1, None)
class DataTransformRecipe(DataRecipe):
@abstractmethod
def prepare_structure(self, input_dir: Optional[str]) -> List[T]:
"""Return the structure of your data.
Each element should contain at least a filepath.
"""
@abstractmethod
def prepare_item(self, item_metadata: T, output_dir: str, is_last: bool) -> None:
"""Use your item metadata to process your files and save the file outputs into `output_dir`."""
class DataProcessor:
def __init__(
self,
input_dir: Union[str, Dir],
output_dir: Optional[Union[str, Dir]] = None,
num_workers: Optional[int] = None,
num_downloaders: Optional[int] = None,
num_uploaders: Optional[int] = None,
delete_cached_files: bool = True,
fast_dev_run: Optional[Union[bool, int]] = None,
random_seed: Optional[int] = 42,
reorder_files: bool = True,
weights: Optional[List[int]] = None,
reader: Optional[BaseReader] = None,
):
"""The `DatasetOptimiser` provides an efficient way to process data across multiple machine into chunks to make
training faster.
Arguments:
input_dir: The path to where the input data are stored.
output_dir: The path to where the output data are stored.
num_workers: The number of worker threads to use.
num_downloaders: The number of file downloaders to use.
num_uploaders: The number of file uploaders to use.
delete_cached_files: Whether to delete the cached files.
fast_dev_run: Whether to run a quick dev run.
random_seed: The random seed to be set before shuffling the data.
reorder_files: By default, reorders the files by file size to distribute work equally among all workers.
Set this to ``False`` if the order in which samples are processed should be preserved.
weights: Provide a list of weights associated to the inputs.
This is used to evenly split the work among the workers.
reader: Map the inputs to worker inputs and provides a read method to read a slice of the data.
"""
self.input_dir = _resolve_dir(input_dir)
self.output_dir = _resolve_dir(output_dir)
self.num_workers = num_workers or (1 if fast_dev_run else (os.cpu_count() or 1) * 4)
self.num_downloaders = num_downloaders or 2
self.num_uploaders = num_uploaders or 5
self.delete_cached_files = delete_cached_files
self.fast_dev_run = _get_fast_dev_run() if fast_dev_run is None else fast_dev_run
self.workers: Any = []
self.workers_tracker: Dict[int, int] = {}
self.progress_queue: Optional[Queue] = None
self.error_queue: Queue = Queue()
self.stop_queues: List[Queue] = []
self.reorder_files = reorder_files
self.weights = weights
self.reader = reader
if self.reader is not None and self.weights is not None:
raise ValueError("Either the reader or the weights needs to be defined.")
# Ensure the input dir is the same across all nodes
self.input_dir = broadcast_object("input_dir", self.input_dir)
if self.output_dir:
# Ensure the output dir is the same across all nodes
self.output_dir = broadcast_object("output_dir", self.output_dir)
print(f"Storing the files under {self.output_dir.path}")
self.random_seed = random_seed
def run(self, data_recipe: DataRecipe) -> None:
"""The `DataProcessor.run(...)` method triggers the data recipe processing over your dataset."""
if not isinstance(data_recipe, DataRecipe):
raise ValueError("The provided value should be a data recipe.")
t0 = time()
print(f"Setup started with fast_dev_run={self.fast_dev_run}.")
# Force random seed to be fixed
random.seed(self.random_seed)
np.random.seed(self.random_seed)
torch.manual_seed(self.random_seed)
# Call the setup method of the user
user_items: List[Any] = data_recipe.prepare_structure(self.input_dir.path if self.input_dir else None)
if not isinstance(user_items, (list, StreamingDataLoader)):
raise ValueError("The `prepare_structure` should return a list of item metadata.")
if isinstance(user_items, StreamingDataLoader):
self.reader = StreamingDataLoaderReader(user_items)
if self.reader:
user_items = self.reader.remap_items(user_items, self.num_workers)
if self.weights is not None:
if len(self.weights) != len(user_items):
raise ValueError("The provided weights length should match the inputs' length.")
workers_user_items = _map_items_to_workers_weighted(
num_workers=self.num_workers, user_items=user_items, weights=self.weights, file_size=False
)
elif self.reorder_files and self.input_dir.path:
# TODO: Only do this on node 0, and broadcast the item sizes to the other nodes.
item_sizes = _get_item_filesizes(user_items, base_path=self.input_dir.path)
workers_user_items = _map_items_to_workers_weighted(
num_workers=self.num_workers, user_items=user_items, weights=item_sizes
)
else:
workers_user_items = _map_items_to_workers_sequentially(num_workers=self.num_workers, user_items=user_items)
print(f"Setup finished in {round(time() - t0, 3)} seconds. Found {len(user_items)} items to process.")
if self.fast_dev_run:
items_to_keep = self.fast_dev_run if type(self.fast_dev_run) is int else _DEFAULT_FAST_DEV_RUN_ITEMS
workers_user_items = [w[:items_to_keep] for w in workers_user_items]
print(f"Fast dev run is enabled. Limiting to {items_to_keep} items per process.")
num_items = sum([len(items) for items in workers_user_items])
self._cleanup_cache()
print(f"Starting {self.num_workers} workers with {num_items} items.")
if self.input_dir is None and self.src_resolver is not None and self.input_dir:
self.input_dir = self.src_resolver(self.input_dir)
print(f"The remote_dir is `{self.input_dir}`.")
signal.signal(signal.SIGINT, self._signal_handler)
self._create_process_workers(data_recipe, workers_user_items)
print("Workers are ready ! Starting data processing...")
current_total = 0
has_failed = False
pbar = _tqdm(
desc="Progress",
total=num_items,
smoothing=0,
position=-1,
mininterval=1,
leave=True,
dynamic_ncols=True,
)
while True:
try:
error = self.error_queue.get(timeout=0.001)
self._exit_on_error(error)
except Empty:
assert self.progress_queue
try:
index, counter = self.progress_queue.get(timeout=0.001)
except Empty:
continue
self.workers_tracker[index] = counter
new_total = sum(self.workers_tracker.values())
pbar.update(new_total - current_total)
current_total = new_total
if current_total == num_items:
break
# Exit early if all the workers are done.
# This means there were some kinda of errors.
if all(not w.is_alive() for w in self.workers):
has_failed = True
break
pbar.close()
num_nodes = _get_num_nodes()
node_rank = _get_node_rank()
# TODO: Understand why it hangs.
if num_nodes == 1:
for w in self.workers:
w.join(0)
print("Workers are finished.")
result = data_recipe._done(len(user_items), self.delete_cached_files, self.output_dir)
if num_nodes == node_rank + 1 and self.output_dir.url and _IS_IN_STUDIO:
assert self.output_dir.path
_create_dataset(
input_dir=self.input_dir.path,
storage_dir=self.output_dir.path,
dataset_type=V1DatasetType.CHUNKED
if isinstance(data_recipe, DataChunkRecipe)
else V1DatasetType.TRANSFORMED,
empty=False,
size=result.size,
num_bytes=result.num_bytes,
data_format=result.data_format,
compression=result.compression,
num_chunks=result.num_chunks,
num_bytes_per_chunk=result.num_bytes_per_chunk,
)
print("Finished data processing!")
# TODO: Understand why it is required to avoid long shutdown.
if _get_num_nodes() > 1:
os._exit(int(has_failed))
def _exit_on_error(self, error: str) -> None:
for w in self.workers:
w.join(0)
raise RuntimeError(f"We found the following error {error}.")
def _create_process_workers(self, data_recipe: DataRecipe, workers_user_items: List[List[Any]]) -> None:
self.progress_queue = Queue()
workers: List[DataWorkerProcess] = []
stop_queues: List[Queue] = []
for worker_idx, worker_user_items in enumerate(workers_user_items):
stop_queues.append(Queue())
worker = DataWorkerProcess(
worker_idx,
self.num_workers,
_get_node_rank(),
data_recipe,
self.input_dir,
self.output_dir,
worker_user_items,
self.progress_queue,
self.error_queue,
stop_queues[-1],
self.num_downloaders,
self.num_uploaders,
self.delete_cached_files,
self.reader,
)
worker.start()
workers.append(worker)
# Note: Don't store within the loop as weakref aren't serializable
self.workers = workers
self.stop_queues = stop_queues
def _signal_handler(self, signal: Any, frame: Any) -> None:
"""On temrination, we stop all the processes to avoid leaking RAM."""
for stop_queue in self.stop_queues:
stop_queue.put(None)
for w in self.workers:
w.join(0)
os._exit(0)
def _cleanup_cache(self) -> None:
cache_dir = _get_cache_dir()
# Cleanup the cache dir folder to avoid corrupted files from previous run to be there.
if os.path.exists(cache_dir):
shutil.rmtree(cache_dir, ignore_errors=True)
os.makedirs(cache_dir, exist_ok=True)
cache_data_dir = _get_cache_data_dir()
# Cleanup the cache data folder to avoid corrupted files from previous run to be there.
if os.path.exists(cache_data_dir):
shutil.rmtree(cache_data_dir, ignore_errors=True)
os.makedirs(cache_data_dir, exist_ok=True)
|
evocodebench_data_162
|
from typing import Literal
import torch
from torch import nn
class NetworkWithSkipLayers(torch.nn.Module):
def __init__(self, skip_layers, output_layers) -> None:
super().__init__()
self.skip_layers = skip_layers
self.output_layers = output_layers
def forward(self, x):
input = x
for i in self.skip_layers:
y = i(input)
input = torch.concat([x, y], dim=-1)
return self.output_layers(input)
class NetworkFactory:
seed: int = 1337
def __init__(self, tcnn: bool = True):
self.tcnn = tcnn
def _get_seed(self):
try:
return NetworkFactory.seed
finally:
NetworkFactory.seed += 1
def get_linear(self, in_features: int, out_features: int):
return self.get_network(
n_input_dims=in_features,
n_output_dims=out_features,
n_layers=1,
n_neurons=out_features,
activation="ReLU",
output_activation="None",
)
def get_network(
self,
n_input_dims: int,
n_output_dims: int,
n_layers: int,
n_neurons: int,
activation: Literal["ReLU", "None"],
output_activation: Literal["ReLU", "Sigmoid", "None"],
):
assert n_layers > 0 and n_neurons > 0
if self.tcnn is True:
import tinycudann as tcnn
otype = "FullyFusedMLP"
if n_neurons > 128:
otype = "CutlassMLP"
return tcnn.Network(
n_input_dims=n_input_dims,
n_output_dims=n_output_dims,
network_config={
"otype": otype,
"activation": activation,
"output_activation": output_activation,
"n_neurons": n_neurons,
"n_hidden_layers": n_layers - 1,
},
seed=self._get_seed(),
)
# PyTorch
model_list = []
# hidden layers
in_features = n_input_dims
for i in range(n_layers - 1):
model_list += self._get_torch_layer(in_features, n_neurons, activation)
in_features = n_neurons # next layer's in_features
# output layer
model_list += self._get_torch_layer(in_features, n_output_dims, output_activation)
return nn.Sequential(*model_list)
def get_network_with_skip_layers(
self,
n_input_dims: int,
n_output_dims: int,
n_layers: int,
n_neurons: int,
activation: Literal["ReLU", "None"],
output_activation: Literal["ReLU", "Sigmoid", "None"],
skips: list[int] = [],
):
original_n_input_dims = n_input_dims
# build skip layers
skip_layer_list = []
initialized_layers = 0
n_input_dims = original_n_input_dims
for i in skips:
n_layers_to_create = i - initialized_layers
skip_layer_list.append(self.get_network(
n_input_dims=n_input_dims,
n_output_dims=n_neurons,
n_layers=n_layers_to_create,
n_neurons=n_neurons,
activation=activation,
output_activation=activation,
))
n_input_dims = n_neurons + original_n_input_dims
initialized_layers += n_layers_to_create
skip_layers = nn.ModuleList(skip_layer_list)
# build left layers
output = self.get_network(
n_input_dims=n_input_dims,
n_output_dims=n_output_dims,
n_layers=n_layers - initialized_layers,
n_neurons=n_neurons,
activation=activation,
output_activation=output_activation,
)
return NetworkWithSkipLayers(skip_layers, output)
def _get_torch_activation(self, name: str):
if name == "None":
return None
if name == "ReLU":
return nn.ReLU()
if name == "Sigmoid":
return nn.Sigmoid()
raise ValueError("unsupported activation type {}".format(name))
def _get_torch_layer(self, in_features: int, out_features: int, activation_name: str) -> list:
model_list = []
layer = nn.Linear(in_features, out_features)
activation = self._get_torch_activation(activation_name)
model_list.append(layer)
if activation is not None:
model_list.append(activation)
return model_list
|
evocodebench_data_163
|
from typing import List, Tuple
import numpy as np
from pydantic import Field
from iris.io.class_configs import Algorithm
from iris.io.dataclasses import EyeCenters, GeometryPolygons
from iris.io.errors import GeometryRefinementError
from iris.utils import math
class Smoothing(Algorithm):
"""Implementation of contour smoothing algorithm.
Algorithm steps:
1) Map iris/pupil points to polar space based on estimated iris/pupil centers.
2) Smooth iris/pupil contour by applying 1D convolution with rolling median kernel approach.
3) Map points back to cartesian space from polar space.
"""
class Parameters(Algorithm.Parameters):
"""Smoothing parameters class."""
dphi: float = Field(..., gt=0.0, lt=360.0)
kernel_size: float = Field(..., gt=0.0, lt=360.0)
gap_threshold: float = Field(..., gt=0.0, lt=360.0)
__parameters_type__ = Parameters
def __init__(self, dphi: float = 1.0, kernel_size: float = 10.0, gap_threshold: float = 10.0) -> None:
"""Assign parameters.
Args:
dphi (float, optional): phi angle delta used to sample points while doing smoothing by interpolation. Defaults to 1.0.
kernel_size (float, optional): Rolling median kernel size expressed in radians. Final kernel size is computed as a quotient of kernel_size and dphi. Defaults to 10.0.
gap_threshold (float, optional): Gap threshold distance. Defaults to None. Defaults to 10.0.
"""
super().__init__(dphi=dphi, kernel_size=kernel_size, gap_threshold=gap_threshold)
@property
def kernel_offset(self) -> int:
"""Kernel offset (distance from kernel center to border) property used when smoothing with rolling median. If a quotient is less then 1 then kernel size equal to 1 is returned.
Returns:
int: Kernel size.
"""
return max(1, int((np.radians(self.params.kernel_size) / np.radians(self.params.dphi))) // 2)
def run(self, polygons: GeometryPolygons, eye_centers: EyeCenters) -> GeometryPolygons:
"""Perform smoothing refinement.
Args:
polygons (GeometryPolygons): Contours to refine.
eye_centers (EyeCenters): Eye center used when performing a coordinates mapping from cartesian space to polar space.
Returns:
GeometryPolygons: Smoothed contours.
"""
pupil_arcs = self._smooth(polygons.pupil_array, (eye_centers.pupil_x, eye_centers.pupil_y))
iris_arcs = self._smooth(polygons.iris_array, (eye_centers.iris_x, eye_centers.iris_y))
return GeometryPolygons(pupil_array=pupil_arcs, iris_array=iris_arcs, eyeball_array=polygons.eyeball_array)
def _smooth(self, polygon: np.ndarray, center_xy: Tuple[float, float]) -> np.ndarray:
"""Smooth a single contour.
Args:
polygon (np.ndarray): Contour to smooth.
center_xy (Tuple[float, float]): Contour's center.
Returns:
np.ndarray: Smoothed contour's vertices.
"""
arcs, num_gaps = self._cut_into_arcs(polygon, center_xy)
arcs = (
self._smooth_circular_shape(arcs[0], center_xy)
if num_gaps == 0
else np.vstack([self._smooth_arc(arc, center_xy) for arc in arcs if len(arc) >= 2])
)
return arcs
def _cut_into_arcs(self, polygon: np.ndarray, center_xy: Tuple[float, float]) -> Tuple[List[np.ndarray], int]:
"""Cut contour into arcs.
Args:
polygon (np.ndarray): Contour polygon.
center_xy (Tuple[float, float]): Polygon's center.
Returns:
Tuple[List[np.ndarray], int]: Tuple with: (list of list of vertices, number of gaps detected in a contour).
"""
rho, phi = math.cartesian2polar(polygon[:, 0], polygon[:, 1], *center_xy)
phi, rho = self._sort_two_arrays(phi, rho)
differences = np.abs(phi - np.roll(phi, -1))
# True distance between first and last point
differences[-1] = 2 * np.pi - differences[-1]
gap_indices = np.argwhere(differences > np.radians(self.params.gap_threshold)).flatten()
if gap_indices.size < 2:
return [polygon], gap_indices.size
gap_indices += 1
phi, rho = np.split(phi, gap_indices), np.split(rho, gap_indices)
arcs = [
np.column_stack(math.polar2cartesian(rho_coords, phi_coords, *center_xy))
for rho_coords, phi_coords in zip(rho, phi)
]
# Connect arc which lies between 0 and 2π.
if len(arcs) == gap_indices.size + 1:
arcs[0] = np.vstack([arcs[0], arcs[-1]])
arcs = arcs[:-1]
return arcs, gap_indices.size
def _smooth_arc(self, vertices: np.ndarray, center_xy: Tuple[float, float]) -> np.ndarray:
"""Smooth a single contour arc.
Args:
vertices (np.ndarray): Arc's vertices.
center_xy (Tuple[float, float]): Center of an entire contour.
Returns:
np.ndarray: Smoothed arc's vertices.
"""
rho, phi = math.cartesian2polar(vertices[:, 0], vertices[:, 1], *center_xy)
phi, rho = self._sort_two_arrays(phi, rho)
idx = self._find_start_index(phi)
offset = phi[idx]
relative_phi = (phi - offset) % (2 * np.pi)
smoothed_relative_phi, smoothed_rho = self._smooth_array(relative_phi, rho)
smoothed_phi = (smoothed_relative_phi + offset) % (2 * np.pi)
x_smoothed, y_smoothed = math.polar2cartesian(smoothed_rho, smoothed_phi, *center_xy)
return np.column_stack([x_smoothed, y_smoothed])
def _smooth_circular_shape(self, vertices: np.ndarray, center_xy: Tuple[float, float]) -> np.ndarray:
"""Smooth arc in a form of a circular shape.
Args:
vertices (np.ndarray): Arc's vertices.
center_xy (Tuple[float, float]): Center of an entire contour.
Returns:
np.ndarray: Smoothed arc's vertices.
"""
rho, phi = math.cartesian2polar(vertices[:, 0], vertices[:, 1], *center_xy)
padded_phi = np.concatenate([phi - 2 * np.pi, phi, phi + 2 * np.pi])
padded_rho = np.concatenate([rho, rho, rho])
smoothed_phi, smoothed_rho = self._smooth_array(padded_phi, padded_rho)
mask = (smoothed_phi >= 0) & (smoothed_phi < 2 * np.pi)
rho_smoothed, phi_smoothed = smoothed_rho[mask], smoothed_phi[mask]
x_smoothed, y_smoothed = math.polar2cartesian(rho_smoothed, phi_smoothed, *center_xy)
return np.column_stack([x_smoothed, y_smoothed])
def _smooth_array(self, phis: np.ndarray, rhos: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Smooth coordinates expressed in polar space.
Args:
phis (np.ndarray): phi values.
rhos (np.ndarray): rho values.
Returns:
Tuple[np.ndarray, np.ndarray]: Tuple with smoothed coordinates (phis, rhos).
"""
interpolated_phi = np.arange(min(phis), max(phis), np.radians(self.params.dphi))
interpolated_rho = np.interp(interpolated_phi, xp=phis, fp=rhos, period=2 * np.pi)
smoothed_rho = self._rolling_median(interpolated_rho, self.kernel_offset)
smoothed_phi = interpolated_phi[self.kernel_offset : -self.kernel_offset]
return smoothed_phi, smoothed_rho
def _sort_two_arrays(self, first_list: np.ndarray, second_list: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Sort both numpy arrays based on values from the first_list.
Args:
first_list (np.ndarray): First array.
second_list (np.ndarray): Second array.
Returns:
Tuple[np.ndarray, np.ndarray]: Tuple with (sorted first array, sorted second array).
"""
zipped_lists = zip(first_list, second_list)
sorted_pairs = sorted(zipped_lists)
sorted_tuples = zip(*sorted_pairs)
first_list, second_list = [list(sorted_tuple) for sorted_tuple in sorted_tuples]
return np.array(first_list), np.array(second_list)
def _find_start_index(self, phi: np.ndarray) -> int:
"""Find the start index by checking the largest gap. phi needs to be sorted.
Args:
phi (np.ndarray): phi angle values.
Raises:
GeometryRefinementError: Raised if phi values are not sorted ascendingly.
Returns:
int: Index value.
"""
if not np.all((phi - np.roll(phi, 1))[1:] >= 0):
raise GeometryRefinementError("Smoothing._find_start_index phi must be sorted ascendingly!")
phi_tmp = np.concatenate(([phi[-1] - 2 * np.pi], phi, [phi[0] + 2 * np.pi]))
phi_tmp_left_neighbor = np.roll(phi_tmp, 1)
dphi = (phi_tmp - phi_tmp_left_neighbor)[1:-1]
largest_gap_index = np.argmax(dphi)
return int(largest_gap_index)
def _rolling_median(self, signal: np.ndarray, kernel_offset: int) -> np.ndarray:
"""Compute rolling median of a 1D signal.
Args:
signal (np.ndarray): Signal values.
kernel_size (int): Kernel size.
Raises:
GeometryRefinementError: Raised if signal is not 1D.
Returns:
np.ndarray: Rolling median result.
"""
if signal.ndim != 1:
raise GeometryRefinementError("Smoothing._rolling_median only works for 1d arrays.")
stacked_signals: List[np.ndarray] = []
for i in range(-kernel_offset, kernel_offset + 1):
stacked_signals.append(np.roll(signal, i))
stacked_signals = np.stack(stacked_signals)
rolling_median = np.median(stacked_signals, axis=0)
rolling_median = rolling_median[kernel_offset:-kernel_offset]
return rolling_median
|
evocodebench_data_164
|
from typing import List, Optional, Tuple
import numpy as np
from iris.io.dataclasses import IrisTemplate
from iris.io.errors import MatcherError
def normalized_HD(irisbitcount: int, maskbitcount: int, sqrt_totalbitcount: float, nm_dist: float) -> float:
"""Perform normalized HD calculation.
Args:
irisbitcount (int): nonmatched iriscode bit count.
maskbitcount (int): common maskcode bit count.
sqrt_totalbitcount (float): square root of bit counts.
nm_dist (float): nonmatch distance used for normalized HD.
Returns:
float: normalized Hamming distance.
"""
norm_HD = max(
0, nm_dist - (nm_dist - irisbitcount / maskbitcount) * min(1.0, np.sqrt(maskbitcount) / sqrt_totalbitcount)
)
return norm_HD
def count_sqrt_totalbits(
toal_codesize: int,
half_width: List[int],
weights: Optional[List[np.ndarray]] = None,
) -> Tuple[float, float, float]:
"""Count total amount of sqrt bits.
Args:
toal_codesizes (int): total size of iriscodes.
half_width (List[int]): half width of iriscodes.
weights (Optional[List[np.ndarray]] = None): list of weights table. Optional paremeter for weighted HD. Defaults to None.
Returns:
Tuple[float, float, float]: square root of bit counts from whole iris, top iris and bottom iris.
"""
sqrt_totalbitcount = np.sqrt(np.sum([np.sum(w) for w in weights])) if weights else np.sqrt(toal_codesize * 3 / 4)
sqrt_totalbitcount_bot = (
np.sqrt(np.sum([np.sum(w[:, :hw, ...]) for w, hw in zip(weights, half_width)]))
if weights
else sqrt_totalbitcount / np.sqrt(2)
)
sqrt_totalbitcount_top = (
np.sqrt(np.sum([np.sum(w[:, hw:, ...]) for w, hw in zip(weights, half_width)]))
if weights
else sqrt_totalbitcount / np.sqrt(2)
)
return sqrt_totalbitcount, sqrt_totalbitcount_top, sqrt_totalbitcount_bot
def count_nonmatchbits(
irisbits: np.ndarray,
maskbits: np.ndarray,
half_width: List[int],
weights: Optional[List[np.ndarray]] = None,
) -> Tuple[int, int, int, int]:
"""Count nonmatch bits for Hammming distance.
Args:
irisbits (np.ndarray): nonmatch irisbits.
maskbits (np.ndarray): common maskbits.
half_width (List[int]): list of half of code width.
weights (Optional[np.ndarray] = None): list of weights table. Optional paremeter for weighted HD. Defaults to None.
Returns:
Tuple[int, int, int, int]: nonmatch iriscode bit count and common maskcode bit count from top iris and bottom iris.
"""
if weights:
irisbitcount_top = np.sum(
[
np.sum(np.multiply(x[:, hw:, ...] & y[:, hw:, ...], z[:, hw:, ...]))
for x, y, hw, z in zip(irisbits, maskbits, half_width, weights)
]
)
maskbitcount_top = np.sum(
[np.sum(np.multiply(x[:, hw:, ...], z[:, hw:, ...])) for x, hw, z in zip(maskbits, half_width, weights)]
)
irisbitcount_bot = np.sum(
[
np.sum(np.multiply(x[:, :hw, ...] & y[:, :hw, ...], z[:, :hw, ...]))
for x, y, hw, z in zip(irisbits, maskbits, half_width, weights)
]
)
maskbitcount_bot = np.sum(
[np.sum(np.multiply(x[:, :hw, ...], z[:, :hw, ...])) for x, hw, z in zip(maskbits, half_width, weights)]
)
else:
irisbitcount_top = np.sum(
[np.sum(x[:, hw:, ...] & y[:, hw:, ...]) for x, y, hw in zip(irisbits, maskbits, half_width)]
)
maskbitcount_top = np.sum([np.sum(x[:, hw:, ...]) for x, hw in zip(maskbits, half_width)])
irisbitcount_bot = np.sum(
[np.sum(x[:, :hw, ...] & y[:, :hw, ...]) for x, y, hw in zip(irisbits, maskbits, half_width)]
)
maskbitcount_bot = np.sum([np.sum(x[:, :hw, ...]) for x, hw in zip(maskbits, half_width)])
return irisbitcount_top, maskbitcount_top, irisbitcount_bot, maskbitcount_bot
def hamming_distance(
template_probe: IrisTemplate,
template_gallery: IrisTemplate,
rotation_shift: int,
nm_dist: Optional[float] = None,
weights: Optional[List[np.ndarray]] = None,
) -> Tuple[float, int]:
"""Compute Hamming distance.
Args:
template_probe (IrisTemplate): Iris template from probe.
template_gallery (IrisTemplate): Iris template from gallery.
rotation_shift (int): rotation allowed in matching, converted to columns.
nm_dist (Optional[float] = None): nonmatch distance, Optional paremeter for normalized HD. Defaults to None.
weights (Optional[List[np.ndarray]]= None): list of weights table. Optional paremeter for weighted HD. Defaults to None.
Returns:
Tuple[float, int]: miminum Hamming distance and corresonding rotation shift.
"""
half_codewidth = []
for probe_code, gallery_code in zip(template_probe.iris_codes, template_gallery.iris_codes):
if probe_code.shape != gallery_code.shape:
raise MatcherError("probe and gallery iris codes are of different sizes")
if (probe_code.shape[1] % 2) != 0:
raise MatcherError("number of columns of iris codes need to be even")
half_codewidth.append(int(probe_code.shape[1] / 2))
if weights:
for probe_code, w in zip(template_probe.iris_codes, weights):
if probe_code.shape != w.shape:
raise MatcherError("weights table and iris codes are of different sizes")
if nm_dist:
if weights:
sqrt_totalbitcount, sqrt_totalbitcount_top, sqrt_totalbitcount_bot = count_sqrt_totalbits(
np.sum([np.size(a) for a in template_probe.iris_codes]), half_codewidth, weights
)
else:
sqrt_totalbitcount, sqrt_totalbitcount_top, sqrt_totalbitcount_bot = count_sqrt_totalbits(
np.sum([np.size(a) for a in template_probe.iris_codes]), half_codewidth
)
# Calculate the Hamming distance between probe and gallery template.
match_dist = 1
match_rot = 0
for shiftby in range(-rotation_shift, rotation_shift + 1):
irisbits = [
np.roll(probe_code, shiftby, axis=1) != gallery_code
for probe_code, gallery_code in zip(template_probe.iris_codes, template_gallery.iris_codes)
]
maskbits = [
np.roll(probe_code, shiftby, axis=1) & gallery_code
for probe_code, gallery_code in zip(template_probe.mask_codes, template_gallery.mask_codes)
]
if weights:
irisbitcount_top, maskbitcount_top, irisbitcount_bot, maskbitcount_bot = count_nonmatchbits(
irisbits, maskbits, half_codewidth, weights
)
else:
irisbitcount_top, maskbitcount_top, irisbitcount_bot, maskbitcount_bot = count_nonmatchbits(
irisbits, maskbits, half_codewidth
)
maskbitcount = maskbitcount_top + maskbitcount_bot
if maskbitcount == 0:
continue
if nm_dist:
normdist_top = (
normalized_HD(irisbitcount_top, maskbitcount_top, sqrt_totalbitcount_top, nm_dist)
if maskbitcount_top > 0
else 1
)
normdist_bot = (
normalized_HD(irisbitcount_bot, maskbitcount_bot, sqrt_totalbitcount_bot, nm_dist)
if maskbitcount_bot > 0
else 1
)
w_top = np.sqrt(maskbitcount_top)
w_bot = np.sqrt(maskbitcount_bot)
Hdist = (
normalized_HD((irisbitcount_top + irisbitcount_bot), maskbitcount, sqrt_totalbitcount, nm_dist) / 2
+ (normdist_top * w_top + normdist_bot * w_bot) / (w_top + w_bot) / 2
)
else:
Hdist = (irisbitcount_top + irisbitcount_bot) / maskbitcount
if (Hdist < match_dist) or (Hdist == match_dist and shiftby == 0):
match_dist = Hdist
match_rot = shiftby
return match_dist, match_rot
|
evocodebench_data_165
|
from typing import Tuple
import numpy as np
from pydantic import Field
from iris.io.class_configs import Algorithm
from iris.io.dataclasses import EyeCenters, GeometryPolygons
from iris.io.errors import EyeCentersEstimationError
class BisectorsMethod(Algorithm):
"""Implementation of eye's center estimation algorithm using bisectors method for finding a circle center.
This algorithm samples a given number of bisectors from the pupil and iris polygons, and averages their intersection
to produce the polygon center. This method is robust against noise in the polygons, making it a good choice for
non-perfect shapes. It is also robust to polygons missing parts of the circle arc, making it a good choice for
partially-occluded shapes.
LIMITATIONS:
The iris and pupil can be approximated to circles, when the user is properly gazing at the camera.
This requires that the cases of off-gaze have already been filtered out.
"""
class Parameters(Algorithm.Parameters):
"""Default Parameters for BisectorsMethod algorithm."""
num_bisectors: int = Field(..., gt=0)
min_distance_between_sector_points: float = Field(..., gt=0.0, lt=1.0)
max_iterations: int = Field(..., gt=0)
__parameters_type__ = Parameters
def __init__(
self,
num_bisectors: int = 100,
min_distance_between_sector_points: float = 0.75,
max_iterations: int = 50,
) -> None:
"""Assign parameters.
Args:
num_bisectors (int, optional): Number of bisectors.. Defaults to 100.
min_distance_between_sector_points (float, optional): Minimum distance between sectors expressed as a fractional value of a circular shape diameter. Defaults to 0.75.
max_iterations (int, optional): Max iterations for bisector search.. Defaults to 50.
"""
super().__init__(
num_bisectors=num_bisectors,
min_distance_between_sector_points=min_distance_between_sector_points,
max_iterations=max_iterations,
)
def run(self, geometries: GeometryPolygons) -> EyeCenters:
"""Estimate eye's iris and pupil centers.
Args:
geometries (GeometryPolygons): Geometry polygons.
Returns:
EyeCenters: Eye's centers object.
"""
pupil_center_x, pupil_center_y = self._find_center_coords(geometries.pupil_array, geometries.pupil_diameter)
iris_center_x, iris_center_y = self._find_center_coords(geometries.iris_array, geometries.iris_diameter)
return EyeCenters(pupil_x=pupil_center_x, pupil_y=pupil_center_y, iris_x=iris_center_x, iris_y=iris_center_y)
def _find_center_coords(self, polygon: np.ndarray, diameter: float) -> Tuple[float, float]:
"""Find center coordinates of a polygon.
Args:
polygon (np.ndarray): np.ndarray.
diameter (float): diameter of the polygon.
Returns:
Tuple[float, float]: Tuple with the center location coordinates (x, y).
"""
min_distance_between_sector_points_in_px = self.params.min_distance_between_sector_points * diameter
first_bisectors_point, second_bisectors_point = self._calculate_perpendicular_bisectors(
polygon, min_distance_between_sector_points_in_px
)
return self._find_best_intersection(first_bisectors_point, second_bisectors_point)
def _calculate_perpendicular_bisectors(
self, polygon: np.ndarray, min_distance_between_sector_points_in_px: float
) -> Tuple[np.ndarray, np.ndarray]:
"""Calculate the perpendicular bisector of self.params.num_bisectors randomly chosen points from a polygon's vertices.
A pair of points is used if their distance is larger then min_distance_between_sector_points_in_px.
Args:
polygon (np.ndarray): np.ndarray based on which we are searching the center of a circular shape.
min_distance_between_sector_points_in_px (float): Minimum distance between sector points.
Raises:
EyeCentersEstimationError: Raised if not able to find enough random pairs of points on the arc with a large enough distance!
Returns:
Tuple[np.ndarray, np.ndarray]: Calculated perpendicular bisectors.
"""
np.random.seed(142857)
bisectors_first_points = np.empty([0, 2])
bisectors_second_points = np.empty([0, 2])
for _ in range(self.params.max_iterations):
random_indices = np.random.choice(len(polygon), size=(self.params.num_bisectors, 2))
first_drawn_points = polygon[random_indices[:, 0]]
second_drawn_points = polygon[random_indices[:, 1]]
norms = np.linalg.norm(first_drawn_points - second_drawn_points, axis=1)
mask = norms > min_distance_between_sector_points_in_px
bisectors_first_points = np.vstack([bisectors_first_points, first_drawn_points[mask]])
bisectors_second_points = np.vstack([bisectors_second_points, second_drawn_points[mask]])
if len(bisectors_first_points) >= self.params.num_bisectors:
break
else:
raise EyeCentersEstimationError(
"Not able to find enough random pairs of points on the arc with a large enough distance!"
)
bisectors_first_points = bisectors_first_points[: self.params.num_bisectors]
bisectors_second_points = bisectors_second_points[: self.params.num_bisectors]
bisectors_center = (bisectors_first_points + bisectors_second_points) / 2
# Flip xs with ys and flip sign of on of them to create a 90deg rotation
inv_bisectors_center_slope = np.fliplr(bisectors_second_points - bisectors_first_points)
inv_bisectors_center_slope[:, 1] = -inv_bisectors_center_slope[:, 1]
# Add perpendicular vector to center and normalize
norm = np.linalg.norm(inv_bisectors_center_slope, axis=1)
inv_bisectors_center_slope[:, 0] /= norm
inv_bisectors_center_slope[:, 1] /= norm
first_bisectors_point = bisectors_center - inv_bisectors_center_slope
second_bisectors_point = bisectors_center + inv_bisectors_center_slope
return first_bisectors_point, second_bisectors_point
def _find_best_intersection(self, fst_points: np.ndarray, sec_points: np.ndarray) -> Tuple[float, float]:
"""fst_points and sec_points are NxD arrays defining N lines. D is the dimension of the space.
This function returns the least squares intersection of the N lines from the system given by eq. 13 in
http://cal.cs.illinois.edu/~johannes/research/LS_line_intersecpdf.
Args:
fst_points (np.ndarray): First bisectors points.
sec_points (np.ndarray): Second bisectors points.
Returns:
Tuple[float, float]: Best intersection point.
Reference:
[1] http://cal.cs.illinois.edu/~johannes/research/LS_line_intersecpdf
"""
norm_bisectors = (sec_points - fst_points) / np.linalg.norm(sec_points - fst_points, axis=1)[:, np.newaxis]
# Generate the array of all projectors I - n*n.T
projections = np.eye(norm_bisectors.shape[1]) - norm_bisectors[:, :, np.newaxis] * norm_bisectors[:, np.newaxis]
# Generate R matrix and q vector
R = projections.sum(axis=0)
q = (projections @ fst_points[:, :, np.newaxis]).sum(axis=0)
# Solve the least squares problem for the intersection point p: Rp = q
p = np.linalg.lstsq(R, q, rcond=None)[0]
intersection_x, intersection_y = p
return intersection_x.item(), intersection_y.item()
|
evocodebench_data_166
|
import abc
from copy import deepcopy
from typing import Any, List
import pydantic
from pydantic import Extra
from iris.callbacks.callback_interface import Callback
class ImmutableModel(pydantic.BaseModel):
"""Specifies configurations for validating classes which objects should be immutable."""
class Config:
"""Configuration options for classes which objects are meant to be immutable."""
arbitrary_types_allowed = True
allow_mutation = False
validate_all = True
smart_union = True
extra = Extra.forbid
def serialize(self) -> Any:
"""Serialize the object. By defaults, this method raises a RuntimeError to notify the user that the method wasn't implemented.
Raises:
RuntimeError: Always.
"""
raise RuntimeError(f"{self.__class__.__name__}.serialize not implemented!")
@staticmethod
def deserialize(self) -> Any:
"""Deserialize the object. By defaults, this method raises a RuntimeError to notify the user that the method wasn't implemented.
Raises:
RuntimeError: Always.
"""
raise RuntimeError(f"{self.__class__.__name__}.deserialize not implemented!")
class Algorithm(abc.ABC):
"""Base class of every node of the iris recognition pipeline."""
class Parameters(ImmutableModel):
"""Default parameters."""
pass
__parameters_type__ = Parameters
def __init__(self, **kwargs: Any) -> None:
"""Init function."""
self._callbacks: List[Callback] = []
if "callbacks" in kwargs.keys():
self._callbacks = deepcopy(kwargs["callbacks"])
del kwargs["callbacks"]
self.params = self.__parameters_type__(**kwargs)
def __call__(self, *args: Any, **kwargs: Any) -> Any:
"""Make an object a functor.
Returns:
Any: Object specified by an interface.
"""
return self.execute(*args, **kwargs)
def execute(self, *args: Any, **kwargs: Any) -> Any:
"""Execute method and wrapped with hooks if such are specified.
Returns:
Any: Object specified by an interface.
"""
for callback_func in self._callbacks:
callback_func.on_execute_start(*args, **kwargs)
result = self.run(*args, **kwargs)
for callback_func in self._callbacks:
callback_func.on_execute_end(result)
return result
def run(self, *args: Any, **kwargs: Any) -> Any:
"""Implement method design pattern. Not overwritten by subclass will raise an error.
Raises:
NotImplementedError: Raised if subclass doesn't implement `run` method.
Returns:
Any: Return value by concrate implementation of the `run` method.
"""
raise NotImplementedError(f"{self.__class__.__name__}.run method not implemented!")
|
evocodebench_data_167
|
import abc
from collections import defaultdict
import collections
import typing
from collections import deque
import dataclasses
import inspect
import json
from dataclasses import is_dataclass
from typing import get_origin, get_args, Any, Mapping, MutableMapping, OrderedDict, Literal, Union, get_type_hints, \
Type, Sequence, Tuple, Optional
from pydantic import BaseModel, create_model
import datetime
class Validator:
def __init__(self):
# Extract types from collections and collections.abc
collection_types = {cls for name, cls in collections.__dict__.items() if isinstance(cls, type)}
abc_collection_types = {cls for name, cls in collections.abc.__dict__.items() if isinstance(cls, type)}
# Filter out types that have dictionary-like methods
self.dict_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'keys') and hasattr(cls, 'items')
}
self.list_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'append') and hasattr(cls, 'pop')
}
self.set_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'add') and hasattr(cls, 'discard')
}
# Add the general Sequence to list-like types
# if python version is 3.9 or above, use collections.abc.Sequence
if hasattr(collections.abc, 'Sequence'):
self.list_like_types.add(collections.abc.Sequence)
else:
self.list_like_types.add(collections.Sequence)
self.list_like_types.add(typing.List)
# Add the general Mapping to dict-like types
if hasattr(collections.abc, 'Mapping'):
self.dict_like_types.add(collections.abc.Mapping)
else:
self.dict_like_types.add(collections.Mapping)
self.dict_like_types.add(typing.Dict)
# Add the general Set to set-like types
if hasattr(collections.abc, 'Set'):
self.set_like_types.add(collections.abc.Set)
else:
self.set_like_types.add(collections.Set)
self.set_like_types.add(typing.Set)
# Add the general Tuple to tuple-like types
self.tuple_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, '__getitem__') and hasattr(cls, '__len__')
}
self.tuple_like_types.add(typing.Tuple)
def is_base_type(self, _type: Any) -> bool:
"""Determine if a type is a base type."""
return _type in {int, float, str, bool, None}
def validate_base_type(self, value: Any, typ: Any) -> bool:
"""Validate base types."""
if typ is None:
return value is None
return isinstance(value, typ)
def validate_output(self, output: str, type_definition: Any) -> bool:
try:
deserialized_output = json.loads(output)
except json.JSONDecodeError:
return False
return self.check_type(deserialized_output, type_definition)
def check_type(self, value: Any, type_definition: Any) -> bool:
"""
Validate a value against a type definition.
Args:
value: Any object or primitive value
type_definition: The type definition to validate against
Returns:
Whether the value is valid for the type definition
"""
if type_definition is Any:
return True
if self.is_base_type(type_definition):
return self.validate_base_type(value, type_definition)
origin = get_origin(type_definition) or type_definition
args = get_args(type_definition)
# Handle base types
if self.is_base_type(origin):
return self.validate_base_type(value, origin)
if origin == Literal:
return value in args
if origin == Union:
return any(self.check_type(value, union_type) for union_type in args)
# Handle tuples
if origin == tuple:
if not isinstance(value, tuple):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle lists
if origin == list:
if not isinstance(value, list):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle more complex types that are collections and list-like
if origin is list or issubclass(origin, tuple(self.list_like_types)):
if not any(isinstance(value, t) for t in self.list_like_types):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle sets
if origin == set:
if not isinstance(value, set):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle datetime
if origin in [datetime.datetime, datetime.date, datetime.time]:
# try to instantiate datetime
try:
obj = origin(**value)
return True
except:
return False
# Handle dictionaries
if origin is dict or issubclass(origin, tuple(self.dict_like_types)):
if not isinstance(value, (dict, Mapping)):#, MutableMapping, OrderedDict)):
return False
if args:
if len(args) == 1:
key_type = args[0]
value_type = Any # General assumption; specific dict-like types might differ
elif len(args) == 2:
key_type, value_type = args
else:
key_type = value_type = Any
else:
key_type = value_type = Any
return all(
self.check_type(k, key_type) and self.check_type(v, value_type)
for k, v in value.items()
)
# Handle pydantic models
if self.is_pydantic_model(origin):
try:
#temp_model = create_model('TempModel', **value)
if isinstance(value, origin):
return True
#return isinstance(temp_model, origin)
# check if value is dict
if not isinstance(value, dict):
return False
# get all required init arguments for origin
# required arguments are the ones withouyt default values
required_fields = [field for field, field_type in origin.__annotations__.items() if not (typing.get_origin(field_type) is Union and type(None) in typing.get_args(field_type))]
# check that all required arguments are in value and do type checking
for arg in required_fields:
# check if it is in value
if arg not in value:
return False
# get the type of the argument
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
# check that all arguments in value are correct type
# this is additional check, because the above check only checks required arguments
for arg, obj in value.items():
if arg in required_fields:
continue
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
#origin.parse_obj(value)
return True
except Exception as e:
print(e)
return False
# Handle dataclasses
if self.is_dataclass_instance(origin):
try:
# for field in dataclasses.fields(origin):
# field_name = field.name
# field_type = field.type
# if field_name not in value or not self.check_type(value[field_name], field_type):
# return False
# return True
obj = origin(**value)
return dataclasses.asdict(obj) == value
except:
return False
# Handle dataclasses and arbitrary class types
if inspect.isclass(origin) and not self.is_base_type(origin):
# Ensure the value is an instance of the class
if not isinstance(value, origin):
return False
# Gather type hints from the class and its bases
type_hints = {}
for cls in reversed(origin.__mro__):
type_hints.update(get_type_hints(cls))
# Validate each attribute of the class
for attr, attr_type in type_hints.items():
attr_value = getattr(value, attr, None)
if not self.check_type(attr_value, attr_type):
return False
return True
return False
@staticmethod
def is_pydantic_model(cls):
return hasattr(cls, 'parse_obj')
@staticmethod
def is_dataclass_instance(cls):
return hasattr(cls, '__annotations__') and hasattr(cls, '__dataclass_fields__')
@staticmethod
def _is_subclass_of_generic(cls: Type, generic: Type) -> bool:
"""Determine if the class is a subclass of a generic type."""
try:
return issubclass(cls, generic) and cls is not generic
except TypeError:
if not hasattr(cls, '__origin__'):
return False
return cls.__origin__ is generic
@staticmethod
def _is_generic(cls: Type) -> bool:
"""Check if the provided type is a generic."""
return hasattr(cls, "__origin__")
def _get_recursive_args(self, target_type: Type) -> Tuple[Type, ...]:
"""
Recursively check the base classes (i.e., the superclass chain) of the target type until we find one that
retains the type arguments.
:return: Type chain
"""
if get_args(target_type):
return get_args(target_type)
for base in target_type.__bases__:
args = self._get_recursive_args(base)
if args:
return args
return ()
def _find_generic_base_and_args(self, target_type: Type) -> Tuple[Type, Tuple[Type, ...]]:
"""
Navigate up the MRO to find the first generic base and its arguments.
"""
# First, check if target_type is a type annotation.
# If so, directly return its origin and arguments.
origin = get_origin(target_type)
args = get_args(target_type)
if origin and args:
return origin, args
# If target_type is a real class, then navigate its MRO.
if hasattr(target_type, '__mro__'):
if hasattr(target_type, '__orig_bases__'):
for base in target_type.__orig_bases__:
if get_args(base):
return base, get_args(base)
for base in target_type.__mro__:
if get_args(base):
return base, get_args(base)
return None, ()
def _is_list_like(self, target_type: Type) -> bool:
"""Determine if the target type is list-like."""
if target_type in {list, typing.List}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {list, typing.List}:
return True
return False
def _is_tuple_like(self, target_type: Type) -> bool:
"""Determine if the target type is tuple-like."""
if target_type in {tuple, typing.Tuple}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {tuple, typing.Tuple}:
return True
return False
def _is_dict_like(self, target_type: Type) -> bool:
"""Determine if the target type is dict-like."""
if target_type in {dict, typing.Dict}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {dict, typing.Dict}:
return True
return False
def _is_set_like(self, target_type: Type) -> bool:
"""Determine if the target type is set-like."""
if target_type in {set, typing.Set}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {set, typing.Set}:
return True
return False
def instantiate(self, data: Any, target_type: Type) -> Any:
"""
Attempts to convert a JSON-compatible data structure into an instance of the specified type.
Args:
data: JSON-compatible data structure to instantiate the target type.
target_type: The type to instantiate from the given data.
Returns:
An instance of the target type initialized with the data.
"""
# Handle None type
if data is None:
return None
origin = get_origin(target_type) or target_type
# If the target type is a built-in, attempt to instantiate and return
if self.is_base_type(target_type) or target_type is Any:
# If the parsed data is a string and target type is str, return it directly
if isinstance(data, str) and target_type is str:
return data
# If any, return the data directly
if target_type is Any:
return data
try:
return target_type(data)
except (ValueError, TypeError):
# Handle the special case where the string represents a float but we want an integer
if target_type is int:
try:
return int(float(data))
except (ValueError, TypeError):
pass
if target_type is float:
try:
return int(float(data))
except (ValueError, TypeError):
pass
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# special handling for datetime
if origin == datetime.datetime:
# try to instantiate datetime
try:
return datetime.datetime(**data)
except:
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# check if origin is Union, if so, instantiate the first type that works
if origin == Union:
for arg in get_args(target_type):
try:
return self.instantiate(data, arg)
except:
continue
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# If the data is a dictionary and the target is a custom class that can be instantiated from a dictionary.
if isinstance(data, dict):
if inspect.isclass(target_type) and not self.is_base_type(target_type):
# Special handling for dataclasses
if is_dataclass(target_type):
fields = [f.name for f in dataclasses.fields(target_type)]
type_hints = get_type_hints(target_type)
filtered_data = {k: self.instantiate(v, type_hints.get(k, Any)) for k, v in data.items() if
k in fields}
return target_type(**filtered_data)
# Special handling for Pydantic models
if issubclass(target_type, BaseModel):
# instantiate the sub attributes
for attr, attr_type in target_type.__annotations__.items():
if attr in data:
data[attr] = self.instantiate(data[attr], attr_type)
try:
return target_type.model_validate(data)
except AttributeError as e:
# backwards compatibility with pydantic < 2
return target_type.parse_obj(data)
# For general classes, attempt instantiation
try:
return target_type(**data)
except TypeError:
raise TypeError(f"Failed to instantiate {target_type.__name__} from dictionary.")
# Handle dictionary-like types
# Check if the target type is or inherits from defaultdict
if origin is defaultdict or (isinstance(origin, type) and issubclass(origin, defaultdict)):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in
data.items()}
# For defaultdict, you'll need a default factory. Here, I'm using `int` for simplicity,
# but you might want to adapt this based on your needs.
return defaultdict(int, instantiated_items)
# Handle set-like dict types like OrderedDict
# the first check needs to be done to ensure origin has the __mro__ attribute
elif inspect.isclass(origin)and any(issubclass(base, dict) for base in origin.__mro__):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()}
return origin(instantiated_items)
# Handle other dictionary-like types
elif origin is dict or self._is_subclass_of_generic(origin, dict):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_dict = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in
data.items()}
# If the target_type is a subclass of dict, return an instance of target_type
if self._is_subclass_of_generic(target_type, dict) and not self._is_generic(target_type):
return target_type(instantiated_dict)
else:
return dict(instantiated_dict)
# Tuples aren't supported in JSONable types, so we look for lists instead
if isinstance(data, list):
try:
# If the origin or target type is a list-like type, or if it implements a list-like collections type
# e.g Sequence[int]
if origin is list or self._is_subclass_of_generic(origin, list):
base, item_types = self._find_generic_base_and_args(target_type)
item_type = item_types[0] if item_types else Any
instantiated_items = []
for item in data:
# For each item, validate and instantiate it
try:
instantiated_item = self.instantiate(item, item_type)
except ValueError:
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
safe = self.check_type(instantiated_item, item_type)
if not safe:
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
instantiated_items.append(instantiated_item)
# If target_type is a subclass of list, return an instance of target_type
if self._is_subclass_of_generic(target_type, list) and not self._is_generic(target_type):
return target_type(instantiated_items)
return instantiated_items
# Handle tuples
if self._is_tuple_like(target_type) or (isinstance(origin, type) and issubclass(origin, tuple)):
base, item_types = self._find_generic_base_and_args(target_type)
instantiated_items = []
# If there are no subscripted types, assume Any
if not item_types:
item_types = (Any,) * len(data)
for i, item in enumerate(data):
# For each item, validate and instantiate it
instantiated_item = self.instantiate(item, item_types[i])
instantiated_items.append(instantiated_item)
# If the instantiated item does not match the expected type, raise an exception
_type = item_types[i]
if not isinstance(instantiated_item, _type):
raise TypeError(
f"Item {i} of type {type(item).__name__} does not match expected type {item_types[i].__name__}.")
# Convert the list of instantiated items to a tuple
instantiated_tuple = tuple(instantiated_items)
# If target_type is a subclass of tuple, return an instance of target_type
if self._is_subclass_of_generic(target_type, tuple):
return target_type(instantiated_tuple)
return instantiated_tuple
# Handle sets
if self._is_set_like(target_type) or (isinstance(origin, type) and issubclass(origin, set)):
base, item_type = self._find_generic_base_and_args(target_type)
if not item_type:
item_type = Any
instantiated_items = set()
for item in data:
# For each item, validate and instantiate it
instantiated_item = self.instantiate(item, item_type[0])
instantiated_items.add(instantiated_item)
# If the instantiated item does not match the expected type, raise an exception
if not isinstance(instantiated_item, item_type[0]):
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
# If target_type is a subclass of set, return an instance of target_type
if self._is_subclass_of_generic(target_type, set):
return target_type(instantiated_items)
return instantiated_items
# Handle deques
if origin is deque or (isinstance(origin, type) and issubclass(origin, set)):
item_type = get_args(target_type)[0] if get_args(target_type) else Any
return deque(self.instantiate(item, item_type) for item in data)
if origin is frozenset or (isinstance(origin, type) and issubclass(origin, frozenset)):
item_type = get_args(target_type)[0] if get_args(target_type) else Any
return frozenset(self.instantiate(item, item_type) for item in data)
except TypeError as e:
print(e)
raise TypeError(f"Failed to instantiate {target_type} from list. {e}")
# If none of the above, return the data as-is
return data
|
evocodebench_data_168
|
import inspect
from typing import get_type_hints, Literal, get_origin, Tuple, Callable, Optional, Dict, Union
import json
from tanuki.models.embedding import Embedding
from tanuki.models.function_description import FunctionDescription
from tanuki.models.function_type import FunctionType
from tanuki.utils import get_source
alignable_symbolic_functions = {}
alignable_embedding_functions = {}
class Register:
def __init__(self):
pass
@staticmethod
def get(func_name) -> Tuple[FunctionType, Callable]:
if func_name not in alignable_symbolic_functions and func_name not in alignable_embedding_functions:
pass
if func_name in alignable_symbolic_functions:
return FunctionType.SYMBOLIC, alignable_symbolic_functions[func_name]
elif func_name in alignable_embedding_functions:
return FunctionType.EMBEDDABLE, alignable_embedding_functions[func_name]
@staticmethod
def function_names_to_patch(*args, type: Optional[FunctionType] = None):
"""
Get the registered function names that should be patched, either globally (if len(args)==0) or as members of
an instance
:param args: Optional instance to check
:return:
"""
function_names = []
if len(args) == 1:
instance = args[0]
if type == FunctionType.SYMBOLIC:
for key in alignable_symbolic_functions.keys():
if hasattr(instance, key):
function_names.append(key)
return function_names
elif type == FunctionType.EMBEDDABLE:
for key in alignable_embedding_functions.keys():
if hasattr(instance, key):
function_names.append(key)
return function_names
else:
for key in alignable_symbolic_functions.keys():
if hasattr(instance, key):
function_names.append(key)
for key in alignable_embedding_functions.keys():
if hasattr(instance, key):
function_names.append(key)
return function_names
else:
if type == FunctionType.SYMBOLIC:
return list(alignable_symbolic_functions.keys())
elif type == FunctionType.EMBEDDABLE:
return list(alignable_embedding_functions.keys())
else:
return list(alignable_symbolic_functions.keys()) + list(alignable_embedding_functions.keys())
@staticmethod
def functions_to_patch(*args, type: Optional[FunctionType] = None) -> Dict[str, Callable]:
function_names = Register.function_names_to_patch(*args, type=type)
if type == FunctionType.SYMBOLIC:
return {key: alignable_symbolic_functions[key] for key in function_names}
elif type == FunctionType.EMBEDDABLE:
return {key: alignable_embedding_functions[key] for key in function_names}
else:
return {key: alignable_symbolic_functions[key] for key in function_names} + \
{key: alignable_embedding_functions[key] for key in function_names}
@staticmethod
def add_function(func, function_description: FunctionDescription):
if function_description.type == FunctionType.SYMBOLIC:
alignable_symbolic_functions[func.__name__] = func
elif function_description.type == FunctionType.EMBEDDABLE:
alignable_embedding_functions[func.__name__] = func
@staticmethod
def load_function_description_from_name(*args) -> FunctionDescription:
"""
Load a function description from a function name from the global scope.
:param func_name:
:return:
"""
if len(args) == 1:
instance = None
func_name = args[0]
elif len(args) == 2:
instance = args[0]
func_name = args[1]
else:
raise ValueError("Invalid number of arguments")
if not instance:
if func_name in alignable_symbolic_functions:
func_object = alignable_symbolic_functions[func_name]
elif func_name in alignable_embedding_functions:
func_object = alignable_embedding_functions[func_name]
else:
func_object = getattr(instance, func_name)
return Register.load_function_description(func_object)
@staticmethod
def load_function_description(func_object) -> FunctionDescription:
"""
Create a function description from a function object that can be used to register the function.
:param func_object:
:return:
"""
sig = inspect.signature(func_object)
type_hints = get_type_hints(func_object)
# Extract input type hints and output type hint
input_type_hints = {k: v for k, v in type_hints.items() if k in sig.parameters}
output_type_hint = type_hints.get('return')
# Fetch the docstring
docstring = func_object.__doc__.strip() if func_object.__doc__ else ""
def get_class_definition(class_type):
"""Helper function to get class definition source if not a built-in type"""
if hasattr(class_type, "__origin__"): # Check if it's a generic type
origin_type = class_type.__origin__
if origin_type is Literal: # Handle Literal case
return [literal for literal in class_type.__args__]
elif hasattr(class_type, "__args__"): # Access inner types
return [get_class_definition(arg) for arg in class_type.__args__ if arg is not None]
elif inspect.isclass(class_type) and class_type.__module__ != "builtins":
return get_source(class_type)
return class_type.__name__
# Extract class definitions for input and output types
input_class_definitions = {
param_name: get_class_definition(param_type)
for param_name, param_type in input_type_hints.items()
}
# if inspect.isclass(output_type_hint) and issubclass(output_type_hint, Embedding):
# output_class_definition = None
# else:
# output_class_definition = get_class_definition(output_type_hint)
output_class_definition = None
function_type = FunctionType.SYMBOLIC
# check if the output type hint is a class or a subclass of a Union
if inspect.isclass(output_type_hint) or (hasattr(output_type_hint, "__origin__") and
output_type_hint.__origin__ == Union):
if (hasattr(output_type_hint, "__origin__") and output_type_hint.__origin__ == Union): # it's a union
# get all the types in the union
union_types = output_type_hint.__args__
output_type_descriptions = {}
for output_type in union_types:
# check if it is a class Nonetype
if output_type is type(None):
output_type_descriptions["NoneType"] = "None"
elif inspect.isclass(output_type):
# Check if the base class of the output type hint is Embedding
base_class = get_origin(output_type) or output_type
if issubclass(base_class, Embedding):
output_class_definition = None
function_type = FunctionType.EMBEDDABLE
break
else:
class_type_description = get_class_definition(output_type)
if isinstance(class_type_description,str):
class_type_description = class_type_description.replace('"', "'") # less horrible prompt formatting when dump to json
output_type_descriptions[output_type.__name__] = class_type_description
output_class_definition = f"Union of following classes {json.dumps(output_type_descriptions)}"
else: # it's a class
# Check if the base class of the output type hint is Embedding
base_class = get_origin(output_type_hint) or output_type_hint
if issubclass(base_class, Embedding):
output_class_definition = None
function_type = FunctionType.EMBEDDABLE
else:
output_class_definition = get_class_definition(output_type_hint)
return FunctionDescription(
name=func_object.__name__,
docstring=docstring,
input_type_hints=input_type_hints,
output_type_hint=output_type_hint,
input_class_definitions=input_class_definitions,
output_class_definition=output_class_definition,
type=function_type
)
|
evocodebench_data_169
|
import hashlib
import logging
import math
import numpy as np
from bitarray import bitarray
from tanuki.persistence.filter.bloom_interface import IBloomFilterPersistence
class BloomFilter:
def __init__(self,
persistence: IBloomFilterPersistence,
size=None,
hash_count=None,
expected_number_of_elements=None,
false_positive_probability=None):
if not persistence:
raise ValueError("Persistence cannot be None, it must be an instance of IBloomFilterPersistence")
if not size and not hash_count and not expected_number_of_elements and not false_positive_probability:
raise ValueError("Must specify either (size, hash_count) or (expected_number_of_elements, false_positive_probability")
if expected_number_of_elements and false_positive_probability:
size, hash_count = BloomFilter.optimal_bloom_filter_params(expected_number_of_elements, false_positive_probability)
if not size and not hash_count:
raise ValueError("Size and hash_count not set. This should never happen.")
self.size = size
self.hash_count = hash_count
self.bit_array, self.indices = self.init_bit_array(size)
self.persistence = persistence
def init_bit_array(self, size):
_bit_array = bitarray(size)
_bit_array.setall(0)
_indices = np.zeros(size, dtype=np.int32)
return _bit_array, _indices
def hash_functions(self, string):
# h1(x)
hash1 = int(hashlib.sha256(string.encode('utf-8')).hexdigest(), 16)
# h2(x)
hash2 = int(hashlib.md5(string.encode('utf-8')).hexdigest(), 16)
return hash1, hash2
def lookup(self, string):
hash1, hash2 = self.hash_functions(string)
for seed in range(self.hash_count):
index = (hash1 + seed * hash2) % self.size
#print(f"Lookup: Seed={seed}, Digest={index}, BitValue={self.bit_array[index]}")
if self.bit_array[index] == 0:
return False
return True
def add(self, string):
hash1, hash2 = self.hash_functions(string)
for seed in range(self.hash_count):
index = (hash1 + seed * hash2) % self.size
self.bit_array[index] = 1
#print(f"Add: Seed={seed}, Digest={index}, BitValue={self.bit_array[index]}")
def save(self):
self.persistence.save(self.bit_array)
def load(self):
self.bit_array = self.persistence.load()
length_in_bytes = int(len(self.bit_array)/8)
expected_length = math.ceil(self.size / 8)
if length_in_bytes != expected_length:
logging.warning("Bit array length does not match expected size, and so might be corrupted. Reinitializing.")
self.bit_array, self.indices = self.init_bit_array(self.size)
self.save()
@staticmethod
def optimal_bloom_filter_params(n, p):
"""
Calculate the optimal bit array size (m) and number of hash functions (k)
for a Bloom filter.
n: expected number of items to be stored
p: acceptable false positive probability
Returns a tuple (m, k)
"""
m = - (n * math.log(p)) / (math.log(2) ** 2)
k = (m / n) * math.log(2)
return int(math.ceil(m)), int(math.ceil(k))
|
evocodebench_data_170
|
import hashlib
import logging
import math
import numpy as np
from bitarray import bitarray
from tanuki.persistence.filter.bloom_interface import IBloomFilterPersistence
class BloomFilter:
def __init__(self,
persistence: IBloomFilterPersistence,
size=None,
hash_count=None,
expected_number_of_elements=None,
false_positive_probability=None):
if not persistence:
raise ValueError("Persistence cannot be None, it must be an instance of IBloomFilterPersistence")
if not size and not hash_count and not expected_number_of_elements and not false_positive_probability:
raise ValueError("Must specify either (size, hash_count) or (expected_number_of_elements, false_positive_probability")
if expected_number_of_elements and false_positive_probability:
size, hash_count = BloomFilter.optimal_bloom_filter_params(expected_number_of_elements, false_positive_probability)
if not size and not hash_count:
raise ValueError("Size and hash_count not set. This should never happen.")
self.size = size
self.hash_count = hash_count
self.bit_array, self.indices = self.init_bit_array(size)
self.persistence = persistence
def init_bit_array(self, size):
_bit_array = bitarray(size)
_bit_array.setall(0)
_indices = np.zeros(size, dtype=np.int32)
return _bit_array, _indices
def hash_functions(self, string):
# h1(x)
hash1 = int(hashlib.sha256(string.encode('utf-8')).hexdigest(), 16)
# h2(x)
hash2 = int(hashlib.md5(string.encode('utf-8')).hexdigest(), 16)
return hash1, hash2
def lookup(self, string):
hash1, hash2 = self.hash_functions(string)
for seed in range(self.hash_count):
index = (hash1 + seed * hash2) % self.size
#print(f"Lookup: Seed={seed}, Digest={index}, BitValue={self.bit_array[index]}")
if self.bit_array[index] == 0:
return False
return True
def add(self, string):
hash1, hash2 = self.hash_functions(string)
for seed in range(self.hash_count):
index = (hash1 + seed * hash2) % self.size
self.bit_array[index] = 1
#print(f"Add: Seed={seed}, Digest={index}, BitValue={self.bit_array[index]}")
def save(self):
self.persistence.save(self.bit_array)
def load(self):
self.bit_array = self.persistence.load()
length_in_bytes = int(len(self.bit_array)/8)
expected_length = math.ceil(self.size / 8)
if length_in_bytes != expected_length:
logging.warning("Bit array length does not match expected size, and so might be corrupted. Reinitializing.")
self.bit_array, self.indices = self.init_bit_array(self.size)
self.save()
@staticmethod
def optimal_bloom_filter_params(n, p):
"""
Calculate the optimal bit array size (m) and number of hash functions (k)
for a Bloom filter.
n: expected number of items to be stored
p: acceptable false positive probability
Returns a tuple (m, k)
"""
m = - (n * math.log(p)) / (math.log(2) ** 2)
k = (m / n) * math.log(2)
return int(math.ceil(m)), int(math.ceil(k))
|
evocodebench_data_171
|
import hashlib
import logging
import math
import numpy as np
from bitarray import bitarray
from tanuki.persistence.filter.bloom_interface import IBloomFilterPersistence
class BloomFilter:
def __init__(self,
persistence: IBloomFilterPersistence,
size=None,
hash_count=None,
expected_number_of_elements=None,
false_positive_probability=None):
if not persistence:
raise ValueError("Persistence cannot be None, it must be an instance of IBloomFilterPersistence")
if not size and not hash_count and not expected_number_of_elements and not false_positive_probability:
raise ValueError("Must specify either (size, hash_count) or (expected_number_of_elements, false_positive_probability")
if expected_number_of_elements and false_positive_probability:
size, hash_count = BloomFilter.optimal_bloom_filter_params(expected_number_of_elements, false_positive_probability)
if not size and not hash_count:
raise ValueError("Size and hash_count not set. This should never happen.")
self.size = size
self.hash_count = hash_count
self.bit_array, self.indices = self.init_bit_array(size)
self.persistence = persistence
def init_bit_array(self, size):
_bit_array = bitarray(size)
_bit_array.setall(0)
_indices = np.zeros(size, dtype=np.int32)
return _bit_array, _indices
def hash_functions(self, string):
# h1(x)
hash1 = int(hashlib.sha256(string.encode('utf-8')).hexdigest(), 16)
# h2(x)
hash2 = int(hashlib.md5(string.encode('utf-8')).hexdigest(), 16)
return hash1, hash2
def lookup(self, string):
hash1, hash2 = self.hash_functions(string)
for seed in range(self.hash_count):
index = (hash1 + seed * hash2) % self.size
#print(f"Lookup: Seed={seed}, Digest={index}, BitValue={self.bit_array[index]}")
if self.bit_array[index] == 0:
return False
return True
def add(self, string):
hash1, hash2 = self.hash_functions(string)
for seed in range(self.hash_count):
index = (hash1 + seed * hash2) % self.size
self.bit_array[index] = 1
#print(f"Add: Seed={seed}, Digest={index}, BitValue={self.bit_array[index]}")
def save(self):
self.persistence.save(self.bit_array)
def load(self):
self.bit_array = self.persistence.load()
length_in_bytes = int(len(self.bit_array)/8)
expected_length = math.ceil(self.size / 8)
if length_in_bytes != expected_length:
logging.warning("Bit array length does not match expected size, and so might be corrupted. Reinitializing.")
self.bit_array, self.indices = self.init_bit_array(self.size)
self.save()
@staticmethod
def optimal_bloom_filter_params(n, p):
"""
Calculate the optimal bit array size (m) and number of hash functions (k)
for a Bloom filter.
n: expected number of items to be stored
p: acceptable false positive probability
Returns a tuple (m, k)
"""
m = - (n * math.log(p)) / (math.log(2) ** 2)
k = (m / n) * math.log(2)
return int(math.ceil(m)), int(math.ceil(k))
|
evocodebench_data_172
|
from pydantic import BaseModel
from typing import Dict, List
from tanuki.language_models.llm_configs.abc_base_config import BaseModelConfig
from tanuki.language_models.llm_configs import DEFAULT_TEACHER_MODELS, DEFAULT_STUDENT_MODELS
from tanuki.constants import DEFAULT_TEACHER_MODEL_NAMES, DEFAULT_DISTILLED_MODEL_NAME, \
DISTILLED_MODEL, TEACHER_MODEL
from tanuki.language_models.llm_configs.model_config_factory import ModelConfigFactory
config_factory = ModelConfigFactory()
class FunctionConfig(BaseModel):
"""
The function config to execute the inference for the function and distillation.
Parameters
----------
distilled_model : BaseModelConfig -- the distilled model config
current_model_stats : Dict -- the current model stats
last_training_run : Dict -- the last training run
current_training_run : Dict -- the current training run
teacher_models : List[BaseModelConfig] -- the teacher models
nr_of_training_runs : int -- the number of training runs
"""
distilled_model: BaseModelConfig = DEFAULT_STUDENT_MODELS[DEFAULT_DISTILLED_MODEL_NAME]
current_model_stats : Dict = {
"trained_on_datapoints": 0,
"running_faults": []}
last_training_run : Dict = {"trained_on_datapoints": 0}
current_training_run : Dict = {}
teacher_models : List[BaseModelConfig] = [DEFAULT_TEACHER_MODELS[teacher_model_name] for teacher_model_name in DEFAULT_TEACHER_MODEL_NAMES]
nr_of_training_runs : int = 0
def load_from_dict(self, json_dict):
"""
Load the function config from a dict
Args:
json_dict: The dict to load the function config from
Returns:
The function config
"""
self.distilled_model = config_factory.create_config(json_dict["distilled_model"], DISTILLED_MODEL)
self.current_model_stats = json_dict["current_model_stats"]
self.last_training_run = json_dict["last_training_run"]
self.current_training_run = json_dict["current_training_run"]
self.nr_of_training_runs = json_dict["nr_of_training_runs"]
if "teacher_models" in json_dict and len(json_dict["teacher_models"]) > 0:
self.teacher_models = [config_factory.create_config(teacher_model, TEACHER_MODEL) for teacher_model in json_dict["teacher_models"]]
return self
def to_dict(self):
"""
Convert the function config to a dict
Returns:
The dict
"""
try:
config_dictionary = self.model_dump()
except AttributeError as e:
config_dictionary = self.dict()
return config_dictionary
def update_with_finetuned_response(self, response):
"""
Update the function config with the finetuned response
Args:
response: The finetuned response
"""
if response.status == "failed":
self.current_training_run = {}
else:
self.distilled_model = response.fine_tuned_model
self.last_training_run = self.current_training_run
self.current_model_stats = {
"trained_on_datapoints": self.current_training_run[
"trained_on_datapoints"],
"running_faults": []}
self.nr_of_training_runs += 1
self.current_training_run = {}
|
evocodebench_data_173
|
from typing import List
import logging
import time
# import abstract base class
from openai import OpenAI
from openai.types import CreateEmbeddingResponse
from openai.types.fine_tuning import FineTuningJob
from tanuki.language_models.llm_finetune_api_abc import LLM_Finetune_API
from tanuki.models.embedding import Embedding
from tanuki.language_models.embedding_api_abc import Embedding_API
from tanuki.language_models.llm_api_abc import LLM_API
import os
from tanuki.constants import DEFAULT_DISTILLED_MODEL_NAME
from tanuki.language_models.llm_configs.openai_config import OpenAIConfig
from tanuki.models.finetune_job import FinetuneJob
import copy
OPENAI_URL = "https://api.openai.com/v1/chat/completions"
import requests
LLM_GENERATION_PARAMETERS = ["temperature", "top_p", "max_new_tokens", "frequency_penalty", "presence_penalty"]
class OpenAI_API(LLM_API, Embedding_API, LLM_Finetune_API):
def __init__(self) -> None:
# initialise the abstract base class
super().__init__()
self.api_key = os.environ.get("OPENAI_API_KEY")
self.client = None
def embed(self, texts: List[str], model: OpenAIConfig, **kwargs) -> List[Embedding]:
"""
Generate embeddings for the provided texts using the specified OpenAI model.
Lightweight wrapper over the OpenAI client.
:param texts: A list of texts to embed.
:param model: The model to use for embeddings.
:return: A list of embeddings.
"""
self.check_api_key()
try:
response: CreateEmbeddingResponse = self.client.embeddings.create(
input=texts,
model=model.model_name,
**kwargs
)
assert response.object == "list"
assert len(response.data) == len(texts)
embeddings = []
for embedding_response in response.data:
assert embedding_response.object == "embedding"
embeddings.append(Embedding(embedding_response.embedding))
return embeddings
except Exception as e:
print(f"An error occurred: {e}")
return None
def generate(self, model, system_message, prompt, **kwargs):
"""
The main generation function, given the args, kwargs, function_modeler, function description and model type, generate a response
Args
model (OpenAIConfig): The model to use for generation.
system_message (str): The system message to use for generation.
prompt (str): The prompt to use for generation.
kwargs (dict): Additional generation parameters.
"""
self.check_api_key()
temperature = kwargs.get("temperature", 0.1)
top_p = kwargs.get("top_p", 1)
frequency_penalty = kwargs.get("frequency_penalty", 0)
presence_penalty = kwargs.get("presence_penalty", 0)
max_new_tokens = kwargs.get("max_new_tokens")
# check if there are any generation parameters that are not supported
unsupported_params = [param for param in kwargs.keys() if param not in LLM_GENERATION_PARAMETERS]
if len(unsupported_params) > 0:
# log warning
logging.warning(f"Unused generation parameters sent as input: {unsupported_params}."\
f"For OpenAI, only the following parameters are supported: {LLM_GENERATION_PARAMETERS}")
params = {
"model": model.model_name,
"temperature": temperature,
"max_tokens": max_new_tokens,
"top_p": top_p,
"frequency_penalty": frequency_penalty,
"presence_penalty": presence_penalty,
}
if model.parsing_helper_tokens["start_token"]:
prompt += model.parsing_helper_tokens["start_token"]
messages = [
{
"role": "system",
"content": system_message
},
{
"role": "user",
"content": prompt
}
]
params["messages"] = messages
counter = 0
choice = None
# initiate response so exception logic doesnt error out when checking for error in response
response = {}
while counter <= 5:
try:
openai_headers = {
"Authorization": f"Bearer {self.api_key}",
"Content-Type": "application/json",
}
response = requests.post(
OPENAI_URL, headers=openai_headers, json=params, timeout=50
)
response = response.json()
choice = response["choices"][0]["message"]["content"].strip("'")
break
except Exception as e:
if ("error" in response and
"code" in response["error"] and
response["error"]["code"] == 'invalid_api_key'):
raise Exception(f"The supplied OpenAI API key {self.api_key} is invalid")
if counter == 5:
raise Exception(f"OpenAI API failed to generate a response: {e}")
counter += 1
time.sleep(2 ** counter)
continue
if not choice:
raise Exception("OpenAI API failed to generate a response")
if model.parsing_helper_tokens["end_token"]:
# remove the end token from the choice
choice = choice.split(model.parsing_helper_tokens["end_token"])[0]
# check if starting token is in choice
if model.parsing_helper_tokens["start_token"] in choice:
# remove the starting token from the choice
choice = choice.split(model.parsing_helper_tokens["start_token"])[-1]
return choice
def list_finetuned(self, model_config, limit=100, **kwargs) -> List[FinetuneJob]:
self.check_api_key()
response = self.client.fine_tuning.jobs.list(limit=limit)
jobs = []
for job in response.data:
finetune_job = self.create_finetune_job(job, model_config)
jobs.append(finetune_job)
return jobs
def get_finetuned(self, job_id, model_config: OpenAIConfig) -> FinetuneJob:
self.check_api_key()
response = self.client.fine_tuning.jobs.retrieve(job_id)
finetune_job = self.create_finetune_job(response, model_config= model_config)
return finetune_job
def finetune(self, file, suffix, model_config, **kwargs) -> FinetuneJob:
self.check_api_key()
# Use the stream as a file
response = self.client.files.create(file=file, purpose='fine-tune')
training_file_id = response.id
if not model_config.base_model_for_sft:
model_config.base_model_for_sft = DEFAULT_DISTILLED_MODEL_NAME
# submit the finetuning job
finetuning_response: FineTuningJob = self.client.fine_tuning.jobs.create(training_file=training_file_id,
model=model_config.base_model_for_sft,
suffix=suffix)
finetune_job = self.create_finetune_job(finetuning_response, model_config)
return finetune_job
def create_finetune_job(self, response: FineTuningJob, model_config: OpenAIConfig) -> FinetuneJob:
finetuned_model_config = copy.deepcopy(model_config)
finetuned_model_config.model_name = response.fine_tuned_model
finetune_job = FinetuneJob(response.id, response.status, finetuned_model_config)
return finetune_job
def check_api_key(self):
# check if api key is not none
if not self.api_key:
# try to get the api key from the environment, maybe it has been set later
self.api_key = os.getenv("OPENAI_API_KEY")
if not self.api_key:
raise ValueError("OpenAI API key is not set")
if not self.client:
self.client = OpenAI(api_key=self.api_key)
|
evocodebench_data_174
|
"""Tools module"""
# Copyright (c) 2023
# Author: Hugo Delatte <delatte.hugo@gmail.com>
# License: BSD 3 clause
# Implementation derived from:
# Riskfolio-Lib, Copyright (c) 2020-2023, Dany Cajas, Licensed under BSD 3 clause.
# Statsmodels, Copyright (C) 2006, Jonathan E. Taylor, Licensed under BSD 3 clause.
from enum import auto
import numpy as np
import scipy.cluster.hierarchy as sch
import scipy.optimize as sco
import scipy.spatial.distance as scd
import scipy.special as scs
from scipy.sparse import csr_matrix
from skfolio.utils.tools import AutoEnum
__all__ = [
"NBinsMethod",
"n_bins_freedman",
"n_bins_knuth",
"is_cholesky_dec",
"assert_is_square",
"assert_is_symmetric",
"assert_is_distance",
"cov_nearest",
"cov_to_corr",
"corr_to_cov",
"commutation_matrix",
"compute_optimal_n_clusters",
"rand_weights",
"rand_weights_dirichlet",
]
class NBinsMethod(AutoEnum):
"""Enumeration of the Number of Bins Methods
Parameters
----------
FREEDMAN : str
Freedman method
KNUTH : str
Knuth method
"""
FREEDMAN = auto()
KNUTH = auto()
def n_bins_freedman(x: np.ndarray) -> int:
"""Compute the optimal histogram bin size using the Freedman-Diaconis rule [1]_.
Parameters
----------
x : ndarray of shape (n_observations,)
The input array.
Returns
-------
n_bins : int
The optimal bin size.
References
----------
.. [1] "On the histogram as a density estimator: L2 theory".
Freedman & Diaconis (1981).
"""
if x.ndim != 1:
raise ValueError("`x` must be a 1d-array")
n = len(x)
p_25, p_75 = np.percentile(x, [25, 75])
d = 2 * (p_75 - p_25) / (n ** (1 / 3))
if d == 0:
return 5
n_bins = max(1, np.ceil((np.max(x) - np.min(x)) / d))
return int(round(n_bins))
def n_bins_knuth(x: np.ndarray) -> int:
"""Compute the optimal histogram bin size using Knuth's rule [1]_.
Parameters
----------
x : ndarray of shape (n_observations,)
The input array.
Returns
-------
n_bins : int
The optimal bin size.
References
----------
.. [1] "Optimal Data-Based Binning for Histograms".
Knuth.
"""
x = np.sort(x)
n = len(x)
def func(y: float):
y = y[0]
if y <= 0:
return np.inf
bin_edges = np.linspace(x[0], x[-1], int(y) + 1)
hist, _ = np.histogram(x, bin_edges)
return -(
n * np.log(y)
+ scs.gammaln(0.5 * y)
- y * scs.gammaln(0.5)
- scs.gammaln(n + 0.5 * y)
+ np.sum(scs.gammaln(hist + 0.5))
)
n_bins_init = n_bins_freedman(x)
n_bins = sco.fmin(func, n_bins_init, disp=0)[0]
return int(round(n_bins))
def rand_weights_dirichlet(n: int) -> np.array:
"""Produces n random weights that sum to one from a dirichlet distribution
(uniform distribution over a simplex)
Parameters
----------
n : int
Number of weights.
Returns
-------
weights : ndarray of shape (n, )
The vector of weights.
"""
return np.random.dirichlet(np.ones(n))
def rand_weights(n: int, zeros: int = 0) -> np.array:
"""Produces n random weights that sum to one from an uniform distribution
(non-uniform distribution over a simplex)
Parameters
----------
n : int
Number of weights.
zeros : int, default=0
The number of weights to randomly set to zeros.
Returns
-------
weights : ndarray of shape (n, )
The vector of weights.
"""
k = np.random.rand(n)
if zeros > 0:
zeros_idx = np.random.choice(n, zeros, replace=False)
k[zeros_idx] = 0
return k / sum(k)
def is_cholesky_dec(x: np.ndarray) -> bool:
"""Returns True if Cholesky decomposition can be computed.
The matrix must be Hermitian (symmetric if real-valued) and positive-definite.
No checking is performed to verify whether the matrix is Hermitian or not.
Parameters
----------
x : ndarray of shape (n, m)
The matrix.
Returns
-------
value : bool
True if Cholesky decomposition can be applied to the matrix, False otherwise.
"""
# Around 100 times faster than checking for positive eigenvalues with np.linalg.eigh
try:
np.linalg.cholesky(x)
return True
except np.linalg.linalg.LinAlgError:
return False
def is_positive_definite(x: np.ndarray) -> bool:
"""Returns True if the matrix is positive definite.
Parameters
----------
x : ndarray of shape (n, m)
The matrix.
Returns
-------
value : bool
True if if the matrix is positive definite, False otherwise.
"""
return np.all(np.linalg.eigvals(x) > 0)
def assert_is_square(x: np.ndarray) -> None:
"""Raises an error if the matrix is not square.
Parameters
----------
x : ndarray of shape (n, n)
The matrix.
Raises
------
ValueError: if the matrix is not square.
"""
if x.ndim != 2 or x.shape[0] != x.shape[1]:
raise ValueError("The matrix must be square")
def assert_is_symmetric(x: np.ndarray) -> None:
"""Raises an error if the matrix is not symmetric.
Parameters
----------
x : ndarray of shape (n, m)
The matrix.
Raises
------
ValueError: if the matrix is not symmetric.
"""
assert_is_square(x)
if not np.allclose(x, x.T):
raise ValueError("The matrix must be symmetric")
def assert_is_distance(x: np.ndarray) -> None:
"""Raises an error if the matrix is not a distance matrix.
Parameters
----------
x : ndarray of shape (n, n)
The matrix.
Raises
------
ValueError: if the matrix is a distance matrix.
"""
assert_is_symmetric(x)
if not np.allclose(np.diag(x), np.zeros(x.shape[0]), atol=1e-5):
raise ValueError(
"The distance matrix must have diagonal elements close to zeros"
)
def cov_to_corr(cov: np.ndarray) -> tuple[np.ndarray, np.ndarray]:
"""Convert a covariance matrix to a correlation matrix.
Parameters
----------
cov : ndarray of shape (n, n)
Covariance matrix.
Returns
-------
corr, std : tuple[ndarray of shape (n, n), ndarray of shape (n, )]
Correlation matrix and standard-deviation vector
"""
if cov.ndim != 2:
raise ValueError(f"`cov` must be a 2D array, got a {cov.ndim}D array")
std = np.sqrt(np.diag(cov))
corr = cov / std / std[:, None]
return corr, std
def corr_to_cov(corr: np.ndarray, std: np.ndarray):
"""Convert a correlation matrix to a covariance matrix given its
standard-deviation vector.
Parameters
----------
corr : ndarray of shape (n, n)
Correlation matrix.
std : ndarray of shape (n, )
Standard-deviation vector.
Returns
-------
cov : ndarray of shape (n, n)
Covariance matrix
"""
if std.ndim != 1:
raise ValueError(f"`std` must be a 1D array, got a {std.ndim}D array")
if corr.ndim != 2:
raise ValueError(f"`corr` must be a 2D array, got a {corr.ndim}D array")
cov = corr * std * std[:, None]
return cov
_CLIPPING_VALUE = 1e-13
def cov_nearest(cov: np.ndarray, higham: bool = False, higham_max_iteration: int = 100):
"""Compute the nearest covariance matrix that is positive definite and with a
cholesky decomposition than can be computed. The variance is left unchanged.
First, it converts the covariance matrix to a correlation matrix.
Then, it finds the nearest correlation matrix and converts it back to a covariance
matrix using the initial standard deviation.
Cholesky decomposition can fail for symmetric positive definite (SPD) matrix due
to floating point error and inversely, Cholesky decomposition can success for
non-SPD matrix. Therefore, we need to test for both. We always start by testing
for Cholesky decomposition which is significantly faster than checking for positive
eigenvalues.
Parameters
----------
cov : ndarray of shape (n, n)
Covariance matrix.
higham : bool, default=False
If this is set to True, the Higham & Nick (2002) algorithm [1]_ is used,
otherwise the eigenvalues are clipped to threshold above zeros (1e-13).
The default (`False`) is to use the clipping method as the Higham & Nick
algorithm can be slow for large datasets.
higham_max_iteration : int, default=100
Maximum number of iteration of the Higham & Nick (2002) algorithm.
The default value is `100`.
Returns
-------
cov : ndarray
The nearest covariance matrix.
References
----------
.. [1] "Computing the nearest correlation matrix - a problem from finance"
IMA Journal of Numerical Analysis
Higham & Nick (2002)
"""
assert_is_square(cov)
assert_is_symmetric(cov)
# Around 100 times faster than checking eigenvalues with np.linalg.eigh
if is_cholesky_dec(cov) and is_positive_definite(cov):
return cov
corr, std = cov_to_corr(cov)
if higham:
eps = np.finfo(np.float64).eps * 5
diff = np.zeros(corr.shape)
x = corr.copy()
for _ in range(higham_max_iteration):
x_adj = x - diff
eig_vals, eig_vecs = np.linalg.eigh(x_adj)
x = eig_vecs * np.maximum(eig_vals, eps) @ eig_vecs.T
diff = x - x_adj
np.fill_diagonal(x, 1)
cov = corr_to_cov(x, std)
if is_cholesky_dec(cov) and is_positive_definite(cov):
break
else:
raise ValueError("Unable to find the nearest positive definite matrix")
else:
eig_vals, eig_vecs = np.linalg.eigh(corr)
# Clipping the eigenvalues with a value smaller than 1e-13 can cause scipy to
# consider the matrix non-psd is some corner cases (see test/test_stats.py)
x = eig_vecs * np.maximum(eig_vals, _CLIPPING_VALUE) @ eig_vecs.T
x, _ = cov_to_corr(x)
cov = corr_to_cov(x, std)
return cov
def commutation_matrix(x):
"""Compute the commutation matrix.
Parameters
----------
x : ndarray of shape (n, m)
The matrix.
Returns
-------
K : ndarray of shape (m * n, m * n)
The commutation matrix.
"""
(m, n) = x.shape
row = np.arange(m * n)
col = row.reshape((m, n), order="F").ravel()
data = np.ones(m * n, dtype=np.int8)
k = csr_matrix((data, (row, col)), shape=(m * n, m * n))
return k
def compute_optimal_n_clusters(distance: np.ndarray, linkage_matrix: np.ndarray) -> int:
r"""Compute the optimal number of clusters based on Two-Order Difference to Gap
Statistic [1]_.
The Two-Order Difference to Gap Statistic has been developed to improve the
performance and stability of the Tibshiranis Gap statistic.
It applies the two-order difference of the within-cluster dispersion to replace the
reference null distribution in the Gap statistic.
The number of cluster :math:`k` is determined by:
.. math:: \begin{cases}
\begin{aligned}
&\max_{k} & & W_{k+2} + W_{k} - 2 W_{k+1} \\
&\text{s.t.} & & 1 \ge c \ge max\bigl(8, \sqrt{n}\bigr) \\
\end{aligned}
\end{cases}
with :math:`n` the sample size and :math:`W_{k}` the within-cluster dispersions
defined as:
.. math:: W_{k} = \sum_{i=1}^{k} \frac{D_{i}}{2|C_{i}|}
where :math:`|C_{i}|` is the cardinality of cluster :math:`i` and :math:`D_{i}` its
density defined as:
.. math:: D_{i} = \sum_{u \in C_{i}} \sum_{v \in C_{i}} d(u,v)
with :math:`d(u,v)` the distance between u and v.
Parameters
----------
distance : ndarray of shape (n, n)
Distance matrix.
linkage_matrix : ndarray of shape (n - 1, 4)
Linkage matrix.
Returns
-------
value : int
Optimal number of clusters.
References
----------
.. [1] "Application of two-order difference to gap statistic".
Yue, Wang & Wei (2009)
"""
cut_tree = sch.cut_tree(linkage_matrix)
n = cut_tree.shape[1]
max_clusters = max(8, round(np.sqrt(n)))
dispersion = []
for k in range(max_clusters):
level = cut_tree[:, n - k - 1]
cluster_density = []
for i in range(np.max(level) + 1):
cluster_idx = np.argwhere(level == i).flatten()
cluster_dists = scd.squareform(
distance[cluster_idx, :][:, cluster_idx], checks=False
)
if cluster_dists.shape[0] != 0:
cluster_density.append(np.nan_to_num(cluster_dists.mean()))
dispersion.append(np.sum(cluster_density))
dispersion = np.array(dispersion)
gaps = np.roll(dispersion, -2) + dispersion - 2 * np.roll(dispersion, -1)
gaps = gaps[:-2]
# k=0 represents one cluster
k = np.argmax(gaps) + 2
return k
|
evocodebench_data_175
|
"""Tools module"""
# Copyright (c) 2023
# Author: Hugo Delatte <delatte.hugo@gmail.com>
# License: BSD 3 clause
# Implementation derived from:
# Riskfolio-Lib, Copyright (c) 2020-2023, Dany Cajas, Licensed under BSD 3 clause.
# Statsmodels, Copyright (C) 2006, Jonathan E. Taylor, Licensed under BSD 3 clause.
from enum import auto
import numpy as np
import scipy.cluster.hierarchy as sch
import scipy.optimize as sco
import scipy.spatial.distance as scd
import scipy.special as scs
from scipy.sparse import csr_matrix
from skfolio.utils.tools import AutoEnum
__all__ = [
"NBinsMethod",
"n_bins_freedman",
"n_bins_knuth",
"is_cholesky_dec",
"assert_is_square",
"assert_is_symmetric",
"assert_is_distance",
"cov_nearest",
"cov_to_corr",
"corr_to_cov",
"commutation_matrix",
"compute_optimal_n_clusters",
"rand_weights",
"rand_weights_dirichlet",
]
class NBinsMethod(AutoEnum):
"""Enumeration of the Number of Bins Methods
Parameters
----------
FREEDMAN : str
Freedman method
KNUTH : str
Knuth method
"""
FREEDMAN = auto()
KNUTH = auto()
def n_bins_freedman(x: np.ndarray) -> int:
"""Compute the optimal histogram bin size using the Freedman-Diaconis rule [1]_.
Parameters
----------
x : ndarray of shape (n_observations,)
The input array.
Returns
-------
n_bins : int
The optimal bin size.
References
----------
.. [1] "On the histogram as a density estimator: L2 theory".
Freedman & Diaconis (1981).
"""
if x.ndim != 1:
raise ValueError("`x` must be a 1d-array")
n = len(x)
p_25, p_75 = np.percentile(x, [25, 75])
d = 2 * (p_75 - p_25) / (n ** (1 / 3))
if d == 0:
return 5
n_bins = max(1, np.ceil((np.max(x) - np.min(x)) / d))
return int(round(n_bins))
def n_bins_knuth(x: np.ndarray) -> int:
"""Compute the optimal histogram bin size using Knuth's rule [1]_.
Parameters
----------
x : ndarray of shape (n_observations,)
The input array.
Returns
-------
n_bins : int
The optimal bin size.
References
----------
.. [1] "Optimal Data-Based Binning for Histograms".
Knuth.
"""
x = np.sort(x)
n = len(x)
def func(y: float):
y = y[0]
if y <= 0:
return np.inf
bin_edges = np.linspace(x[0], x[-1], int(y) + 1)
hist, _ = np.histogram(x, bin_edges)
return -(
n * np.log(y)
+ scs.gammaln(0.5 * y)
- y * scs.gammaln(0.5)
- scs.gammaln(n + 0.5 * y)
+ np.sum(scs.gammaln(hist + 0.5))
)
n_bins_init = n_bins_freedman(x)
n_bins = sco.fmin(func, n_bins_init, disp=0)[0]
return int(round(n_bins))
def rand_weights_dirichlet(n: int) -> np.array:
"""Produces n random weights that sum to one from a dirichlet distribution
(uniform distribution over a simplex)
Parameters
----------
n : int
Number of weights.
Returns
-------
weights : ndarray of shape (n, )
The vector of weights.
"""
return np.random.dirichlet(np.ones(n))
def rand_weights(n: int, zeros: int = 0) -> np.array:
"""Produces n random weights that sum to one from an uniform distribution
(non-uniform distribution over a simplex)
Parameters
----------
n : int
Number of weights.
zeros : int, default=0
The number of weights to randomly set to zeros.
Returns
-------
weights : ndarray of shape (n, )
The vector of weights.
"""
k = np.random.rand(n)
if zeros > 0:
zeros_idx = np.random.choice(n, zeros, replace=False)
k[zeros_idx] = 0
return k / sum(k)
def is_cholesky_dec(x: np.ndarray) -> bool:
"""Returns True if Cholesky decomposition can be computed.
The matrix must be Hermitian (symmetric if real-valued) and positive-definite.
No checking is performed to verify whether the matrix is Hermitian or not.
Parameters
----------
x : ndarray of shape (n, m)
The matrix.
Returns
-------
value : bool
True if Cholesky decomposition can be applied to the matrix, False otherwise.
"""
# Around 100 times faster than checking for positive eigenvalues with np.linalg.eigh
try:
np.linalg.cholesky(x)
return True
except np.linalg.linalg.LinAlgError:
return False
def is_positive_definite(x: np.ndarray) -> bool:
"""Returns True if the matrix is positive definite.
Parameters
----------
x : ndarray of shape (n, m)
The matrix.
Returns
-------
value : bool
True if if the matrix is positive definite, False otherwise.
"""
return np.all(np.linalg.eigvals(x) > 0)
def assert_is_square(x: np.ndarray) -> None:
"""Raises an error if the matrix is not square.
Parameters
----------
x : ndarray of shape (n, n)
The matrix.
Raises
------
ValueError: if the matrix is not square.
"""
if x.ndim != 2 or x.shape[0] != x.shape[1]:
raise ValueError("The matrix must be square")
def assert_is_symmetric(x: np.ndarray) -> None:
"""Raises an error if the matrix is not symmetric.
Parameters
----------
x : ndarray of shape (n, m)
The matrix.
Raises
------
ValueError: if the matrix is not symmetric.
"""
assert_is_square(x)
if not np.allclose(x, x.T):
raise ValueError("The matrix must be symmetric")
def assert_is_distance(x: np.ndarray) -> None:
"""Raises an error if the matrix is not a distance matrix.
Parameters
----------
x : ndarray of shape (n, n)
The matrix.
Raises
------
ValueError: if the matrix is a distance matrix.
"""
assert_is_symmetric(x)
if not np.allclose(np.diag(x), np.zeros(x.shape[0]), atol=1e-5):
raise ValueError(
"The distance matrix must have diagonal elements close to zeros"
)
def cov_to_corr(cov: np.ndarray) -> tuple[np.ndarray, np.ndarray]:
"""Convert a covariance matrix to a correlation matrix.
Parameters
----------
cov : ndarray of shape (n, n)
Covariance matrix.
Returns
-------
corr, std : tuple[ndarray of shape (n, n), ndarray of shape (n, )]
Correlation matrix and standard-deviation vector
"""
if cov.ndim != 2:
raise ValueError(f"`cov` must be a 2D array, got a {cov.ndim}D array")
std = np.sqrt(np.diag(cov))
corr = cov / std / std[:, None]
return corr, std
def corr_to_cov(corr: np.ndarray, std: np.ndarray):
"""Convert a correlation matrix to a covariance matrix given its
standard-deviation vector.
Parameters
----------
corr : ndarray of shape (n, n)
Correlation matrix.
std : ndarray of shape (n, )
Standard-deviation vector.
Returns
-------
cov : ndarray of shape (n, n)
Covariance matrix
"""
if std.ndim != 1:
raise ValueError(f"`std` must be a 1D array, got a {std.ndim}D array")
if corr.ndim != 2:
raise ValueError(f"`corr` must be a 2D array, got a {corr.ndim}D array")
cov = corr * std * std[:, None]
return cov
_CLIPPING_VALUE = 1e-13
def cov_nearest(cov: np.ndarray, higham: bool = False, higham_max_iteration: int = 100):
"""Compute the nearest covariance matrix that is positive definite and with a
cholesky decomposition than can be computed. The variance is left unchanged.
First, it converts the covariance matrix to a correlation matrix.
Then, it finds the nearest correlation matrix and converts it back to a covariance
matrix using the initial standard deviation.
Cholesky decomposition can fail for symmetric positive definite (SPD) matrix due
to floating point error and inversely, Cholesky decomposition can success for
non-SPD matrix. Therefore, we need to test for both. We always start by testing
for Cholesky decomposition which is significantly faster than checking for positive
eigenvalues.
Parameters
----------
cov : ndarray of shape (n, n)
Covariance matrix.
higham : bool, default=False
If this is set to True, the Higham & Nick (2002) algorithm [1]_ is used,
otherwise the eigenvalues are clipped to threshold above zeros (1e-13).
The default (`False`) is to use the clipping method as the Higham & Nick
algorithm can be slow for large datasets.
higham_max_iteration : int, default=100
Maximum number of iteration of the Higham & Nick (2002) algorithm.
The default value is `100`.
Returns
-------
cov : ndarray
The nearest covariance matrix.
References
----------
.. [1] "Computing the nearest correlation matrix - a problem from finance"
IMA Journal of Numerical Analysis
Higham & Nick (2002)
"""
assert_is_square(cov)
assert_is_symmetric(cov)
# Around 100 times faster than checking eigenvalues with np.linalg.eigh
if is_cholesky_dec(cov) and is_positive_definite(cov):
return cov
corr, std = cov_to_corr(cov)
if higham:
eps = np.finfo(np.float64).eps * 5
diff = np.zeros(corr.shape)
x = corr.copy()
for _ in range(higham_max_iteration):
x_adj = x - diff
eig_vals, eig_vecs = np.linalg.eigh(x_adj)
x = eig_vecs * np.maximum(eig_vals, eps) @ eig_vecs.T
diff = x - x_adj
np.fill_diagonal(x, 1)
cov = corr_to_cov(x, std)
if is_cholesky_dec(cov) and is_positive_definite(cov):
break
else:
raise ValueError("Unable to find the nearest positive definite matrix")
else:
eig_vals, eig_vecs = np.linalg.eigh(corr)
# Clipping the eigenvalues with a value smaller than 1e-13 can cause scipy to
# consider the matrix non-psd is some corner cases (see test/test_stats.py)
x = eig_vecs * np.maximum(eig_vals, _CLIPPING_VALUE) @ eig_vecs.T
x, _ = cov_to_corr(x)
cov = corr_to_cov(x, std)
return cov
def commutation_matrix(x):
"""Compute the commutation matrix.
Parameters
----------
x : ndarray of shape (n, m)
The matrix.
Returns
-------
K : ndarray of shape (m * n, m * n)
The commutation matrix.
"""
(m, n) = x.shape
row = np.arange(m * n)
col = row.reshape((m, n), order="F").ravel()
data = np.ones(m * n, dtype=np.int8)
k = csr_matrix((data, (row, col)), shape=(m * n, m * n))
return k
def compute_optimal_n_clusters(distance: np.ndarray, linkage_matrix: np.ndarray) -> int:
r"""Compute the optimal number of clusters based on Two-Order Difference to Gap
Statistic [1]_.
The Two-Order Difference to Gap Statistic has been developed to improve the
performance and stability of the Tibshiranis Gap statistic.
It applies the two-order difference of the within-cluster dispersion to replace the
reference null distribution in the Gap statistic.
The number of cluster :math:`k` is determined by:
.. math:: \begin{cases}
\begin{aligned}
&\max_{k} & & W_{k+2} + W_{k} - 2 W_{k+1} \\
&\text{s.t.} & & 1 \ge c \ge max\bigl(8, \sqrt{n}\bigr) \\
\end{aligned}
\end{cases}
with :math:`n` the sample size and :math:`W_{k}` the within-cluster dispersions
defined as:
.. math:: W_{k} = \sum_{i=1}^{k} \frac{D_{i}}{2|C_{i}|}
where :math:`|C_{i}|` is the cardinality of cluster :math:`i` and :math:`D_{i}` its
density defined as:
.. math:: D_{i} = \sum_{u \in C_{i}} \sum_{v \in C_{i}} d(u,v)
with :math:`d(u,v)` the distance between u and v.
Parameters
----------
distance : ndarray of shape (n, n)
Distance matrix.
linkage_matrix : ndarray of shape (n - 1, 4)
Linkage matrix.
Returns
-------
value : int
Optimal number of clusters.
References
----------
.. [1] "Application of two-order difference to gap statistic".
Yue, Wang & Wei (2009)
"""
cut_tree = sch.cut_tree(linkage_matrix)
n = cut_tree.shape[1]
max_clusters = max(8, round(np.sqrt(n)))
dispersion = []
for k in range(max_clusters):
level = cut_tree[:, n - k - 1]
cluster_density = []
for i in range(np.max(level) + 1):
cluster_idx = np.argwhere(level == i).flatten()
cluster_dists = scd.squareform(
distance[cluster_idx, :][:, cluster_idx], checks=False
)
if cluster_dists.shape[0] != 0:
cluster_density.append(np.nan_to_num(cluster_dists.mean()))
dispersion.append(np.sum(cluster_density))
dispersion = np.array(dispersion)
gaps = np.roll(dispersion, -2) + dispersion - 2 * np.roll(dispersion, -1)
gaps = gaps[:-2]
# k=0 represents one cluster
k = np.argmax(gaps) + 2
return k
|
evocodebench_data_176
|
import json
from typing import Any, Dict
from tanuki.function_modeler import FunctionModeler
from tanuki.language_models.llm_api_abc import LLM_API
from tanuki.models.function_description import FunctionDescription
from tanuki.models.function_example import FunctionExample
from tanuki.models.language_model_output import LanguageModelOutput
from tanuki.utils import approximate_token_count
from tanuki.validator import Validator
from tanuki.models.api_manager import APIManager
from tanuki.language_models.llm_configs.abc_base_config import BaseModelConfig
import logging
class LanguageModelManager(object):
"""
The LanguageModelManager is responsible for managing the language models and their outputs operationally,
this includes:
- Generating outputs from the language models
- Repairing outputs from the language models
- Saving outputs from the language models
- Finetuning the language models from the saved outputs
"""
def __init__(self,
function_modeler: FunctionModeler,
api_provider: APIManager,
generation_token_limit=512,) -> None:
self.api_provider = api_provider
self.function_modeler = function_modeler
self.default_generation_length = generation_token_limit
self.initialized_functions = {}
self.token_counts = {}
def __call__(self,
args,
function_description: FunctionDescription,
kwargs,
validator: Validator,
generation_parameters: dict) -> Any:
# add the generation length if not there
if "max_new_tokens" not in generation_parameters:
generation_parameters["max_new_tokens"] = self.default_generation_length
output = self.generate(args, kwargs, function_description, generation_parameters)
# start parsing the object, very hacky way for the time being
choice_parsed = self._parse_choice(output)
valid = validator.check_type(choice_parsed, function_description.output_type_hint)
if not valid:
choice, choice_parsed, successful_repair = self.repair_output(args,
kwargs,
function_description,
output.generated_response,
validator,
generation_parameters)
if not successful_repair:
raise TypeError(
f"Output type was not valid. Expected an object of type {function_description.output_type_hint}, got '{output.generated_response}'")
output.generated_response = choice
output.distilled_model = False
datapoint = FunctionExample(args, kwargs, output.generated_response)
if output.suitable_for_finetuning and not output.distilled_model:
self.function_modeler.postprocess_symbolic_datapoint(function_description.__hash__(), function_description,
datapoint, repaired=not valid)
instantiated = validator.instantiate(choice_parsed, function_description.output_type_hint)
return instantiated
def _parse_choice(self, output):
try:
# json load
choice_parsed = json.loads(output.generated_response)
except:
# if it fails, it's not a json object, try eval
try:
choice_parsed = eval(output.generated_response)
except:
choice_parsed = output.generated_response
return choice_parsed
def generate(self, args, kwargs, function_description, llm_parameters={}):
"""
The main generation function, given the args, kwargs, function description and model type, generate a response and check if the datapoint can be saved to the finetune dataset
"""
func_hash = function_description.__hash__()
prompt, model, save_to_finetune, is_distilled_model = self.get_generation_case(args, kwargs,
function_description,
llm_parameters,
func_hash)
# loggings
current_function_setup = self.initialized_functions.get(func_hash, None) # getting the current function setup - model and align statements
if current_function_setup:
generator_model = current_function_setup["model"]
if is_distilled_model:
logging.info(f"Generating function outputs for {function_description.name} with a finetuned model: {model.model_name}.")
self.initialized_functions[func_hash]["model"] = model.model_name
elif generator_model == "":
logging.info(f"Found {len(current_function_setup['examples'])} align statements for {function_description.name}. Generating function outputs with {model.model_name}.")
self.initialized_functions[func_hash]["model"] = model.model_name
elif generator_model != model.model_name:
logging.info(f"Switching output generation from {generator_model} to {model.model_name} for function {function_description.name}.")
self.initialized_functions[func_hash]["model"] = model.model_name
choice = self._synthesise_answer(prompt, model, llm_parameters)
output = LanguageModelOutput(choice, save_to_finetune, is_distilled_model)
return output
def _synthesise_answer(self, prompt, model, llm_parameters):
"""
Synthesise an answer given the prompt, model, model_type and llm_parameters
Args:
prompt (str): The prompt to send to the model
model (BaseModelConfig): The model to use for generation
llm_parameters (dict): The parameters to use for generation
return:
choice (str): The generated response
"""
system_message = model.system_message
return self.api_provider[model.provider].generate(model, system_message, prompt, **llm_parameters)
def get_generation_case(self, args, kwargs, function_description, llm_parameters, func_hash):
"""
Get the generation case with the correct prompt and model
First get the current model, then if distilled model, do zero-shot prompt and return False as suitable_for_finetune
If not distilled model, check if suitable for finetuning, create the prompt and return the correct model given the token count
"""
f = str(function_description.__dict__.__repr__())
distilled_model, teacher_models = self.function_modeler.get_models(function_description)
is_distilled_model = distilled_model.model_name != ""
suitable_for_distillation, input_prompt_token_count = self.suitable_for_finetuning_token_check(args, kwargs, f,
distilled_model)
if func_hash not in self.initialized_functions:
# initialise the initialized_functions dict
self.initialized_functions[func_hash] = {"model": "", "examples": []}
# no examples needed, using a finetuned model. Dont save to finetune dataset
if is_distilled_model and suitable_for_distillation:
prompt = self.construct_prompt(f, args, kwargs, [], distilled_model)
return prompt, distilled_model, suitable_for_distillation, True
else:
aligns = self.function_modeler.get_symbolic_alignments(function_description.__hash__(), max=16)
examples = [f"Inputs:\nArgs: {align['args']}\nKwargs: {align['kwargs']}\nOutput: {align['output']}" for align in
aligns]
# update the examples in the initialized_functions dict
self.initialized_functions[func_hash]["examples"] = examples
examples_token_count = sum([approximate_token_count(example) for example in examples])
generation_tokens = llm_parameters.get("max_new_tokens", self.default_generation_length)
model = self.choose_model_from_tokens(teacher_models,
examples_token_count + input_prompt_token_count + generation_tokens,
len(examples))
if model:
examples_with_parsing_tokens = [f"Inputs:\nArgs: {align['args']}\nKwargs: {align['kwargs']}\nOutput:{model.parsing_helper_tokens['start_token']}{align['output']}{model.parsing_helper_tokens['end_token']}" for align in
aligns]
prompt = self.construct_prompt(f, args, kwargs, examples_with_parsing_tokens, model)
return prompt, model, suitable_for_distillation, False
else:
raise ValueError(
"The input content and align statements combined are too long, please shorten it. The maximum currently allowed token limit is 32000")
def suitable_for_finetuning_token_check(self, args, kwargs, f, distilled_model: BaseModelConfig):
"""
Check if the inputs are suitable for finetuning, i.e are below the finetuning token count
"""
# check if finetunable
finetuning_prompt = f"Function: {f}\n---\nInputs:\nArgs: {args}\nKwargs: {kwargs}\nOutput:"
input_prompt_token_count = approximate_token_count(finetuning_prompt)
if distilled_model.system_message_token_count < 0:
distilled_model.system_message_token_count = approximate_token_count(distilled_model.system_message)
if distilled_model.instruction_token_count < 0:
distilled_model.instruction_token_count = approximate_token_count(distilled_model.instructions)
suitable_for_finetune = input_prompt_token_count + distilled_model.instruction_token_count + distilled_model.system_message_token_count < distilled_model.context_length
return suitable_for_finetune, input_prompt_token_count
def construct_prompt(self, f, args, kwargs, examples, model):
"""
Construct a prompt given the model, function description, args, kwargs and examples
Args:
model (BaseModelConfig): The model to use for generation
f (str): The function description
args (tuple): The args of the function
kwargs (tuple): The kwargs of the function
examples (list): The examples of the function
Returns:
content (str): The prompt to send to the model
"""
if examples:
final_examples = "\n".join(
[f"{align}" for align in
examples])
example_input = f"Examples:{final_examples}\n"
else:
example_input = ""
instruction_prompt = model.instructions
content = f"{instruction_prompt}\nFunction: {f}\n{example_input}---\nInputs:\nArgs: {args}\nKwargs: {kwargs}\nOutput:"
return content
def repair_generate(self, args, kwargs, f, failed_outputs_list, aligns, models, llm_parameters):
"""
Repair the output given the input, function description, failed outputs list, examples and models
"""
# get the token counts
examples = [f"Inputs:\nArgs: {align['args']}\nKwargs: {align['kwargs']}\nOutput: {align['output']}" for align in
aligns]
examples_token_count = sum([approximate_token_count(example) for example in examples])
failed_examples_token_count = sum([approximate_token_count(failed_output[0]) + approximate_token_count(failed_output[1]) for failed_output in failed_outputs_list])
input_prompt_token_count = approximate_token_count(f"Function: {f}\n---\nInputs:\nArgs: {args}\nKwargs: {kwargs}\nOutput:")
generation_tokens = llm_parameters.get("max_new_tokens", self.default_generation_length)
model = self.choose_model_from_tokens(models,
examples_token_count+input_prompt_token_count+generation_tokens+failed_examples_token_count,
len(examples))
if model:
prompt = self.generate_repair_prompt(args, kwargs, f, failed_outputs_list, examples, model)
logging.info(f"Previous output failed type validation, attempting to repair with {model.model_name}")
choice = self._synthesise_answer(prompt, model, llm_parameters)
return choice
else:
return None
def generate_repair_prompt(self, args, kwargs, f, failed_outputs_list, examples, model):
"""
Generate a repair prompt given the args, kwargs, function description, failed outputs list and examples
"""
if examples:
final_examples = "\n".join(
[f"{model.parsing_helper_tokens['start_token']}{align}{model.parsing_helper_tokens['end_token']}" for align in
examples])
successful_examples = f"Examples:{final_examples}\n"
else:
successful_examples = ""
failed_examples = ""
for failed_output in failed_outputs_list:
failed_examples += f"Output: {failed_output[0]}\nError: {failed_output[1]}\n\n"
end_token_addition = ""
if model.parsing_helper_tokens["end_token"]:
end_token_addition = f"Make sure to add the {model.parsing_helper_tokens['end_token']} token at the end of the output."
prompt = f"{model.repair_instruction}{end_token_addition}\nFUNCTION DESCRIPTION: {f}\n{successful_examples}---{model.parsing_helper_tokens['start_token']}Inputs:\nArgs: {args}\nKwargs: {kwargs}\nFAILED EXAMPLES: {failed_examples}Correct output:"
return prompt
def choose_model_from_tokens(self, models, input_token_count, nr_of_examples=0):
"""
Choose a model from the models given the token count and number of examples
Args:
models (list): The models to choose from
input_token_count (int): The token count of the input
nr_of_examples (int): The number of examples
Returns:
model (BaseModelConfig): The chosen model
"""
for model in models:
# check if input token count is less than the context length
# If the model config has custom messages, then use those, otherwise use the default ones
if model.system_message_token_count < 0:
model.system_message_token_count = approximate_token_count(model.system_message)
if model.instruction_token_count < 0:
model.instruction_token_count = approximate_token_count(model.instructions)
if model.parsing_helper_tokens["start_token"]:
input_token_count += 2*nr_of_examples
if model.parsing_helper_tokens["end_token"]:
input_token_count += 2*nr_of_examples
total_token_count = input_token_count + model.instruction_token_count + model.system_message_token_count
if total_token_count < model.context_length:
return model
return None
def repair_output(self,
args: tuple,
kwargs: dict,
function_description: FunctionDescription,
choice,
validator: Validator,
generation_parameters: dict) -> tuple:
"""
Repair an output, that failed type validation by generating a new output using the teacher model and the error
Args:
args (tuple): The args of the function
kwargs (dict): The kwargs of the function
function_description (FunctionDescription): The function description
choice: The output that failed type validation, type is arbitrary
validator (Validator): The validator object
Returns:
choice (str): The choice that was generated by the language model
choice_parsed: The parsed choice, type is arbitrary
valid (bool): Whether the output was correctly repaired was valid
"""
# get the teacher models
teacher_models = self.function_modeler.get_models(function_description)[1]
valid = False
retry_index = 5
f = str(function_description.__dict__.__repr__() + "\n")
error = f"Output type was not valid. Expected an valid object of type {function_description.output_type_hint}, got '{choice}'"
# instantiate the failed outputs list
failed_outputs_list = [(choice, error)]
while retry_index > 0 and not valid:
# get the alignments
aligns = self.function_modeler.get_symbolic_alignments(function_description.__hash__(), max=5)
# Generate the reparied LLM output
choice = self.repair_generate(args,
kwargs,
f,
failed_outputs_list,
aligns,
teacher_models,
generation_parameters)
if not choice:
# if no choice then the input was too long for the model
# no specific error but the retry index goes down
retry_index -= 1
continue
# start parsing the object
try:
# json load
choice_parsed = json.loads(choice)
except:
# if it fails, it's not a json object, try eval
try:
choice_parsed = eval(choice)
except:
choice_parsed = choice
valid = validator.check_type(choice_parsed, function_description.output_type_hint)
if not valid:
# if it's not valid, add it to the failed outputs list
error = f"Output type was not valid. Expected an object of type {function_description.output_type_hint}, got '{choice}'"
failed_outputs_list.append((choice, error))
retry_index -= 1
if valid:
logging.info(f"Successfully repaired output.")
return choice, choice_parsed, valid
|
evocodebench_data_177
|
"""Tools module"""
# Copyright (c) 2023
# Author: Hugo Delatte <delatte.hugo@gmail.com>
# License: BSD 3 clause
# Implementation derived from:
# Riskfolio-Lib, Copyright (c) 2020-2023, Dany Cajas, Licensed under BSD 3 clause.
# Statsmodels, Copyright (C) 2006, Jonathan E. Taylor, Licensed under BSD 3 clause.
from enum import auto
import numpy as np
import scipy.cluster.hierarchy as sch
import scipy.optimize as sco
import scipy.spatial.distance as scd
import scipy.special as scs
from scipy.sparse import csr_matrix
from skfolio.utils.tools import AutoEnum
__all__ = [
"NBinsMethod",
"n_bins_freedman",
"n_bins_knuth",
"is_cholesky_dec",
"assert_is_square",
"assert_is_symmetric",
"assert_is_distance",
"cov_nearest",
"cov_to_corr",
"corr_to_cov",
"commutation_matrix",
"compute_optimal_n_clusters",
"rand_weights",
"rand_weights_dirichlet",
]
class NBinsMethod(AutoEnum):
"""Enumeration of the Number of Bins Methods
Parameters
----------
FREEDMAN : str
Freedman method
KNUTH : str
Knuth method
"""
FREEDMAN = auto()
KNUTH = auto()
def n_bins_freedman(x: np.ndarray) -> int:
"""Compute the optimal histogram bin size using the Freedman-Diaconis rule [1]_.
Parameters
----------
x : ndarray of shape (n_observations,)
The input array.
Returns
-------
n_bins : int
The optimal bin size.
References
----------
.. [1] "On the histogram as a density estimator: L2 theory".
Freedman & Diaconis (1981).
"""
if x.ndim != 1:
raise ValueError("`x` must be a 1d-array")
n = len(x)
p_25, p_75 = np.percentile(x, [25, 75])
d = 2 * (p_75 - p_25) / (n ** (1 / 3))
if d == 0:
return 5
n_bins = max(1, np.ceil((np.max(x) - np.min(x)) / d))
return int(round(n_bins))
def n_bins_knuth(x: np.ndarray) -> int:
"""Compute the optimal histogram bin size using Knuth's rule [1]_.
Parameters
----------
x : ndarray of shape (n_observations,)
The input array.
Returns
-------
n_bins : int
The optimal bin size.
References
----------
.. [1] "Optimal Data-Based Binning for Histograms".
Knuth.
"""
x = np.sort(x)
n = len(x)
def func(y: float):
y = y[0]
if y <= 0:
return np.inf
bin_edges = np.linspace(x[0], x[-1], int(y) + 1)
hist, _ = np.histogram(x, bin_edges)
return -(
n * np.log(y)
+ scs.gammaln(0.5 * y)
- y * scs.gammaln(0.5)
- scs.gammaln(n + 0.5 * y)
+ np.sum(scs.gammaln(hist + 0.5))
)
n_bins_init = n_bins_freedman(x)
n_bins = sco.fmin(func, n_bins_init, disp=0)[0]
return int(round(n_bins))
def rand_weights_dirichlet(n: int) -> np.array:
"""Produces n random weights that sum to one from a dirichlet distribution
(uniform distribution over a simplex)
Parameters
----------
n : int
Number of weights.
Returns
-------
weights : ndarray of shape (n, )
The vector of weights.
"""
return np.random.dirichlet(np.ones(n))
def rand_weights(n: int, zeros: int = 0) -> np.array:
"""Produces n random weights that sum to one from an uniform distribution
(non-uniform distribution over a simplex)
Parameters
----------
n : int
Number of weights.
zeros : int, default=0
The number of weights to randomly set to zeros.
Returns
-------
weights : ndarray of shape (n, )
The vector of weights.
"""
k = np.random.rand(n)
if zeros > 0:
zeros_idx = np.random.choice(n, zeros, replace=False)
k[zeros_idx] = 0
return k / sum(k)
def is_cholesky_dec(x: np.ndarray) -> bool:
"""Returns True if Cholesky decomposition can be computed.
The matrix must be Hermitian (symmetric if real-valued) and positive-definite.
No checking is performed to verify whether the matrix is Hermitian or not.
Parameters
----------
x : ndarray of shape (n, m)
The matrix.
Returns
-------
value : bool
True if Cholesky decomposition can be applied to the matrix, False otherwise.
"""
# Around 100 times faster than checking for positive eigenvalues with np.linalg.eigh
try:
np.linalg.cholesky(x)
return True
except np.linalg.linalg.LinAlgError:
return False
def is_positive_definite(x: np.ndarray) -> bool:
"""Returns True if the matrix is positive definite.
Parameters
----------
x : ndarray of shape (n, m)
The matrix.
Returns
-------
value : bool
True if if the matrix is positive definite, False otherwise.
"""
return np.all(np.linalg.eigvals(x) > 0)
def assert_is_square(x: np.ndarray) -> None:
"""Raises an error if the matrix is not square.
Parameters
----------
x : ndarray of shape (n, n)
The matrix.
Raises
------
ValueError: if the matrix is not square.
"""
if x.ndim != 2 or x.shape[0] != x.shape[1]:
raise ValueError("The matrix must be square")
def assert_is_symmetric(x: np.ndarray) -> None:
"""Raises an error if the matrix is not symmetric.
Parameters
----------
x : ndarray of shape (n, m)
The matrix.
Raises
------
ValueError: if the matrix is not symmetric.
"""
assert_is_square(x)
if not np.allclose(x, x.T):
raise ValueError("The matrix must be symmetric")
def assert_is_distance(x: np.ndarray) -> None:
"""Raises an error if the matrix is not a distance matrix.
Parameters
----------
x : ndarray of shape (n, n)
The matrix.
Raises
------
ValueError: if the matrix is a distance matrix.
"""
assert_is_symmetric(x)
if not np.allclose(np.diag(x), np.zeros(x.shape[0]), atol=1e-5):
raise ValueError(
"The distance matrix must have diagonal elements close to zeros"
)
def cov_to_corr(cov: np.ndarray) -> tuple[np.ndarray, np.ndarray]:
"""Convert a covariance matrix to a correlation matrix.
Parameters
----------
cov : ndarray of shape (n, n)
Covariance matrix.
Returns
-------
corr, std : tuple[ndarray of shape (n, n), ndarray of shape (n, )]
Correlation matrix and standard-deviation vector
"""
if cov.ndim != 2:
raise ValueError(f"`cov` must be a 2D array, got a {cov.ndim}D array")
std = np.sqrt(np.diag(cov))
corr = cov / std / std[:, None]
return corr, std
def corr_to_cov(corr: np.ndarray, std: np.ndarray):
"""Convert a correlation matrix to a covariance matrix given its
standard-deviation vector.
Parameters
----------
corr : ndarray of shape (n, n)
Correlation matrix.
std : ndarray of shape (n, )
Standard-deviation vector.
Returns
-------
cov : ndarray of shape (n, n)
Covariance matrix
"""
if std.ndim != 1:
raise ValueError(f"`std` must be a 1D array, got a {std.ndim}D array")
if corr.ndim != 2:
raise ValueError(f"`corr` must be a 2D array, got a {corr.ndim}D array")
cov = corr * std * std[:, None]
return cov
_CLIPPING_VALUE = 1e-13
def cov_nearest(cov: np.ndarray, higham: bool = False, higham_max_iteration: int = 100):
"""Compute the nearest covariance matrix that is positive definite and with a
cholesky decomposition than can be computed. The variance is left unchanged.
First, it converts the covariance matrix to a correlation matrix.
Then, it finds the nearest correlation matrix and converts it back to a covariance
matrix using the initial standard deviation.
Cholesky decomposition can fail for symmetric positive definite (SPD) matrix due
to floating point error and inversely, Cholesky decomposition can success for
non-SPD matrix. Therefore, we need to test for both. We always start by testing
for Cholesky decomposition which is significantly faster than checking for positive
eigenvalues.
Parameters
----------
cov : ndarray of shape (n, n)
Covariance matrix.
higham : bool, default=False
If this is set to True, the Higham & Nick (2002) algorithm [1]_ is used,
otherwise the eigenvalues are clipped to threshold above zeros (1e-13).
The default (`False`) is to use the clipping method as the Higham & Nick
algorithm can be slow for large datasets.
higham_max_iteration : int, default=100
Maximum number of iteration of the Higham & Nick (2002) algorithm.
The default value is `100`.
Returns
-------
cov : ndarray
The nearest covariance matrix.
References
----------
.. [1] "Computing the nearest correlation matrix - a problem from finance"
IMA Journal of Numerical Analysis
Higham & Nick (2002)
"""
assert_is_square(cov)
assert_is_symmetric(cov)
# Around 100 times faster than checking eigenvalues with np.linalg.eigh
if is_cholesky_dec(cov) and is_positive_definite(cov):
return cov
corr, std = cov_to_corr(cov)
if higham:
eps = np.finfo(np.float64).eps * 5
diff = np.zeros(corr.shape)
x = corr.copy()
for _ in range(higham_max_iteration):
x_adj = x - diff
eig_vals, eig_vecs = np.linalg.eigh(x_adj)
x = eig_vecs * np.maximum(eig_vals, eps) @ eig_vecs.T
diff = x - x_adj
np.fill_diagonal(x, 1)
cov = corr_to_cov(x, std)
if is_cholesky_dec(cov) and is_positive_definite(cov):
break
else:
raise ValueError("Unable to find the nearest positive definite matrix")
else:
eig_vals, eig_vecs = np.linalg.eigh(corr)
# Clipping the eigenvalues with a value smaller than 1e-13 can cause scipy to
# consider the matrix non-psd is some corner cases (see test/test_stats.py)
x = eig_vecs * np.maximum(eig_vals, _CLIPPING_VALUE) @ eig_vecs.T
x, _ = cov_to_corr(x)
cov = corr_to_cov(x, std)
return cov
def commutation_matrix(x):
"""Compute the commutation matrix.
Parameters
----------
x : ndarray of shape (n, m)
The matrix.
Returns
-------
K : ndarray of shape (m * n, m * n)
The commutation matrix.
"""
(m, n) = x.shape
row = np.arange(m * n)
col = row.reshape((m, n), order="F").ravel()
data = np.ones(m * n, dtype=np.int8)
k = csr_matrix((data, (row, col)), shape=(m * n, m * n))
return k
def compute_optimal_n_clusters(distance: np.ndarray, linkage_matrix: np.ndarray) -> int:
r"""Compute the optimal number of clusters based on Two-Order Difference to Gap
Statistic [1]_.
The Two-Order Difference to Gap Statistic has been developed to improve the
performance and stability of the Tibshiranis Gap statistic.
It applies the two-order difference of the within-cluster dispersion to replace the
reference null distribution in the Gap statistic.
The number of cluster :math:`k` is determined by:
.. math:: \begin{cases}
\begin{aligned}
&\max_{k} & & W_{k+2} + W_{k} - 2 W_{k+1} \\
&\text{s.t.} & & 1 \ge c \ge max\bigl(8, \sqrt{n}\bigr) \\
\end{aligned}
\end{cases}
with :math:`n` the sample size and :math:`W_{k}` the within-cluster dispersions
defined as:
.. math:: W_{k} = \sum_{i=1}^{k} \frac{D_{i}}{2|C_{i}|}
where :math:`|C_{i}|` is the cardinality of cluster :math:`i` and :math:`D_{i}` its
density defined as:
.. math:: D_{i} = \sum_{u \in C_{i}} \sum_{v \in C_{i}} d(u,v)
with :math:`d(u,v)` the distance between u and v.
Parameters
----------
distance : ndarray of shape (n, n)
Distance matrix.
linkage_matrix : ndarray of shape (n - 1, 4)
Linkage matrix.
Returns
-------
value : int
Optimal number of clusters.
References
----------
.. [1] "Application of two-order difference to gap statistic".
Yue, Wang & Wei (2009)
"""
cut_tree = sch.cut_tree(linkage_matrix)
n = cut_tree.shape[1]
max_clusters = max(8, round(np.sqrt(n)))
dispersion = []
for k in range(max_clusters):
level = cut_tree[:, n - k - 1]
cluster_density = []
for i in range(np.max(level) + 1):
cluster_idx = np.argwhere(level == i).flatten()
cluster_dists = scd.squareform(
distance[cluster_idx, :][:, cluster_idx], checks=False
)
if cluster_dists.shape[0] != 0:
cluster_density.append(np.nan_to_num(cluster_dists.mean()))
dispersion.append(np.sum(cluster_density))
dispersion = np.array(dispersion)
gaps = np.roll(dispersion, -2) + dispersion - 2 * np.roll(dispersion, -1)
gaps = gaps[:-2]
# k=0 represents one cluster
k = np.argmax(gaps) + 2
return k
|
evocodebench_data_178
|
"""Datasets module."""
# Copyright (c) 2023
# Author: Hugo Delatte <delatte.hugo@gmail.com>
# License: BSD 3 clause
# Implementation derived from:
# scikit-portfolio, Copyright (c) 2022, Carlo Nicolini, Licensed under MIT Licence.
# scikit-learn, Copyright (c) 2007-2010 David Cournapeau, Fabian Pedregosa, Olivier
# Grisel Licensed under BSD 3 clause.
import gzip
import os
import shutil
import urllib.request as ur
from importlib import resources
from pathlib import Path
import joblib
import pandas as pd
DATA_MODULE = "skfolio.datasets.data"
def get_data_home(data_home: str | Path | None = None) -> str:
"""Return the path of the skfolio data directory.
This folder is used by some large dataset loaders to avoid downloading the
data several times.
By default, the data directory is set to a folder named 'skfolio_data' in the
user home folder.
Alternatively, it can be set by the 'SKFOLIO_DATA' environment
variable or programmatically by giving an explicit folder path. The '~'
symbol is expanded to the user home folder.
If the folder does not already exist, it is automatically created.
Parameters
----------
data_home : str, optional
The path to skfolio data directory. If `None`, the default path
is `~/skfolio_data`.
Returns
-------
data_home: str or path-like, optional
The path to skfolio data directory.
"""
if data_home is None:
data_home = os.environ.get("SKFOLIO_DATA", os.path.join("~", "skfolio_data"))
data_home = os.path.expanduser(data_home)
os.makedirs(data_home, exist_ok=True)
return data_home
def clear_data_home(data_home: str | Path | None = None) -> None:
"""Delete all the content of the data home cache.
Parameters
----------
data_home : str or path-like, optional
The path to scikit-learn data directory. If `None`, the default path
is `~/skfolio_data`.
"""
data_home = get_data_home(data_home)
shutil.rmtree(data_home)
def load_gzip_compressed_csv_data(
data_filename: str,
data_module: str = DATA_MODULE,
encoding="utf-8",
datetime_index: bool = True,
) -> pd.DataFrame:
"""Loads gzip-compressed csv files with `importlib.resources`.
1) Open resource file with `importlib.resources.open_binary`
2) Decompress csv file with `gzip.open`
3) Load decompressed data with `pd.read_csv`
Parameters
----------
data_filename : str
Name of gzip-compressed csv file (`'*.csv.gz'`) to be loaded from
`data_module/data_file_name`. For example `'SPX500.csv.gz'`.
data_module : str or module, default='skfolio.datasets.data'
Module where data lives. The default is `'skfolio.datasets.data'`.
encoding : str, default="utf-8"
Name of the encoding that the gzip-decompressed file will be
decoded with. The default is 'utf-8'.
datetime_index: bool, default=True
If this is set to True, the DataFrame index is converted to datetime with
format="%Y-%m-%d".
The default is `True`.
Returns
-------
df : DataFrame of shape (n_observations, n_assets)
DataFrame with each row representing one observation and each column
representing the asset price of a given observation.
"""
path = resources.files(data_module).joinpath(data_filename)
with path.open("rb") as compressed_file:
compressed_file = gzip.open(compressed_file, mode="rt", encoding=encoding)
df = pd.read_csv(compressed_file, sep=",", index_col=0)
if datetime_index:
df.index = pd.to_datetime(df.index, format="%Y-%m-%d")
return df
def download_dataset(
data_filename: str,
data_home: str | Path | None = None,
download_if_missing: bool = True,
) -> pd.DataFrame:
"""Download and save locally a dataset from the remote GitHub dataset folder.
Parameters
----------
data_filename : str
Name of gzip-compressed csv file (`'*.csv.gz'`) to be loaded from a remote
GitHub dataset folder.
data_home : str or path-like, optional
Specify another download and cache folder for the datasets. By default,
all skfolio data is stored in `~/skfolio_data` sub-folders.
download_if_missing : bool, default=True
If False, raise an OSError if the data is not locally available
instead of trying to download the data from the source site.
The default is `True`.
Returns
-------
df : DataFrame of shape (n_observations, n_assets)
DataFrame with each row representing one observation and each column
representing the asset price of a given observation.
"""
url = f"https://github.com/skfolio/skfolio/raw/main/datasets/{data_filename}.csv.gz"
data_home = get_data_home(data_home=data_home)
filepath = os.path.join(data_home, f"{data_filename}.pkz")
if os.path.exists(filepath):
return joblib.load(filepath)
if not download_if_missing:
raise OSError("Data not found and `download_if_missing` is False")
archive_path = os.path.join(data_home, os.path.basename(url))
ur.urlretrieve(url, archive_path)
df = load_gzip_compressed_csv_data(archive_path)
joblib.dump(df, filepath, compress=6)
os.remove(archive_path)
return df
def load_sp500_dataset() -> pd.DataFrame:
"""Load the prices of 20 assets from the S&P 500 Index composition.
This dataset is composed of the daily prices of 20 assets from the S&P 500
composition starting from 1990-01-02 up to 2022-12-28.
The data comes from the Yahoo public API.
The price is the adjusted close which is the closing price after adjustments for
all applicable splits and dividend distributions.
The adjustment uses appropriate split and dividend multipliers, adhering to
the Center for Research in Security Prices (CRSP) standards.
============== ==================
Observations 8313
Assets 20
============== ==================
Returns
-------
df : DataFrame of shape (n_observations, n_assets)
Prices DataFrame
Examples
--------
>>> from skfolio.datasets import load_sp500_dataset
>>> prices = load_sp500_dataset()
>>> prices.head()
AAPL AMD BAC ... UNH WMT XOM
1990-01-02 0.332589 4.1250 11.65625 ... 0.382813 5.890625 12.5000
1990-01-03 0.334821 4.0000 11.75000 ... 0.375000 5.890625 12.3750
1990-01-04 0.335938 3.9375 11.50000 ... 0.371094 5.859375 12.2500
1990-01-05 0.337054 3.8125 11.25000 ... 0.355469 5.796875 12.1875
1990-01-08 0.339286 3.8125 11.31250 ... 0.347656 5.875000 12.3750
"""
data_filename = "sp500_dataset.csv.gz"
df = load_gzip_compressed_csv_data(data_filename)
return df
def load_sp500_index() -> pd.DataFrame:
"""Load the prices of the S&P 500 Index.
This dataset is composed of the daily prices of the S&P 500 Index starting from
1990-01-02 up to 2022-12-28.
The data comes from the Yahoo public API.
The price is the adjusted close which is the closing price after adjustments for
all applicable splits and dividend distributions.
The adjustment uses appropriate split and dividend multipliers, adhering to
the Center for Research in Security Prices (CRSP) standards.
============== ==================
Observations 8313
Assets 1
============== ==================
Returns
-------
df : DataFrame of shape (n_observations, n_assets)
Prices DataFrame
Examples
--------
>>> from skfolio.datasets import load_sp500_index
>>> prices = load_sp500_index()
>>> prices.head()
SP500
Date
1990-01-02 359.69
1990-01-03 358.76
1990-01-04 355.67
1990-01-05 352.20
1990-01-08 353.79
"""
data_filename = "sp500_index.csv.gz"
df = load_gzip_compressed_csv_data(data_filename)
return df
def load_factors_dataset() -> pd.DataFrame:
"""Load the prices of 5 factor ETFs.
This dataset is composed of the daily prices of 5 ETF representing common factors
starting from 2014-01-02 up to 2022-12-28.
The factors are:
* "MTUM": Momentum
* "QUAL": Quanlity
* "SIZE": Size
* "VLUE": Value
* "USMV": low volatility
The data comes from the Yahoo public API.
The price is the adjusted close which is the closing price after adjustments for
all applicable splits and dividend distributions.
The adjustment uses appropriate split and dividend multipliers, adhering to
the Center for Research in Security Prices (CRSP) standards.
============== ==================
Observations 2264
Assets 5
============== ==================
Returns
-------
df : DataFrame of shape (n_observations, n_assets)
Prices DataFrame
Examples
--------
>>> from skfolio.datasets import load_factors_dataset
>>> prices = load_factors_dataset()
>>> prices.head()
MTUM QUAL SIZE USMV VLUE
Date
2014-01-02 52.704 48.351 48.986 29.338 47.054
2014-01-03 52.792 48.256 48.722 29.330 46.999
2014-01-06 52.677 48.067 48.722 29.263 46.991
2014-01-07 53.112 48.455 48.731 29.430 47.253
2014-01-08 53.502 48.437 48.731 29.422 47.253
"""
data_filename = "factors_dataset.csv.gz"
df = load_gzip_compressed_csv_data(data_filename)
return df
def load_ftse100_dataset(data_home=None, download_if_missing=True) -> pd.DataFrame:
"""Load the prices of 64 assets from the FTSE 100 Index composition.
This dataset is composed of the daily prices of 64 assets from the FTSE 100 Index
starting from 2000-01-04 up to 2023-05-31.
The data comes from the Yahoo public API.
The price is the adjusted close which is the closing price after adjustments for
all applicable splits and dividend distributions.
The adjustment uses appropriate split and dividend multipliers, adhering to
the Center for Research in Security Prices (CRSP) standards.
The data contains NaN.
============== ==================
Observations 5960
Assets 64
============== ==================
Parameters
----------
data_home : str, optional
Specify another download and cache folder for the datasets.
By default, all skfolio data is stored in `~/skfolio_data` subfolders.
download_if_missing : bool, default=True
If False, raise an OSError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
df : DataFrame of shape (n_observations, n_assets)
Prices DataFrame
Examples
--------
>>> from skfolio.datasets import load_ftse100_dataset
>>> prices = load_ftse100_dataset()
>>> prices.head()
AAL.L ABF.L AHT.L ANTO.L ... VOD.L WEIR.L WPP.L WTB.L
Date ...
2000-01-04 535.354 205.926 97.590 40.313 ... 72.562 115.240 512.249 382.907
2000-01-05 540.039 209.185 96.729 40.313 ... 69.042 118.483 462.080 381.972
2000-01-06 553.289 229.048 95.581 40.452 ... 66.950 124.220 458.119 386.337
2000-01-07 572.829 222.220 95.581 40.452 ... 70.716 121.725 475.283 405.046
2000-01-10 578.852 224.548 92.711 40.685 ... 74.285 121.476 498.254 392.885
"""
data_filename = "ftse100_dataset"
df = download_dataset(
data_filename, data_home=data_home, download_if_missing=download_if_missing
)
return df
def load_nasdaq_dataset(data_home=None, download_if_missing=True) -> pd.DataFrame:
"""Load the prices of 1455 assets from the NASDAQ Composite Index.
This dataset is composed of the daily prices of 1455 assets from the NASDAQ
Composite starting from 2018-01-02 up to 2023-05-31.
The data comes from the Yahoo public API.
The price is the adjusted close which is the closing price after adjustments for
all applicable splits and dividend distributions.
The adjustment uses appropriate split and dividend multipliers, adhering to
the Center for Research in Security Prices (CRSP) standards.
============== ==================
Observations 1362
Assets 1455
============== ==================
Parameters
----------
data_home : str, optional
Specify another download and cache folder for the datasets.
By default, all skfolio data is stored in `~/skfolio_data` subfolders.
download_if_missing : bool, default=True
If False, raise an OSError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
df : DataFrame of shape (n_observations, n_assets)
Prices DataFrame
Examples
--------
>>> from skfolio.datasets import load_nasdaq_dataset
>>> prices = load_nasdaq_dataset()
>>> prices.head()
AAL AAOI AAON AAPL ... ZVRA ZYME ZYNE ZYXI
Date ...
2018-01-02 51.648 37.91 35.621 41.310 ... 66.4 7.933 12.995 2.922
2018-01-03 51.014 37.89 36.247 41.303 ... 72.8 7.965 13.460 2.913
2018-01-04 51.336 38.38 36.103 41.495 ... 78.4 8.430 12.700 2.869
2018-01-05 51.316 38.89 36.681 41.967 ... 77.6 8.400 12.495 2.780
2018-01-08 50.809 38.37 36.103 41.811 ... 82.4 8.310 12.550 2.825
"""
data_filename = "nasdaq_dataset"
df = download_dataset(
data_filename, data_home=data_home, download_if_missing=download_if_missing
)
return df
|
evocodebench_data_179
|
# Copyright (c) Facebook, Inc. and its affiliates.
import collections
from dataclasses import dataclass
from typing import Callable, List, Optional, Tuple
import torch
from torch import nn
from detectron2.structures import Boxes, Instances, ROIMasks
from detectron2.utils.registry import _convert_target_to_string, locate
from .torchscript_patch import patch_builtin_len
@dataclass
class Schema:
"""
A Schema defines how to flatten a possibly hierarchical object into tuple of
primitive objects, so it can be used as inputs/outputs of PyTorch's tracing.
PyTorch does not support tracing a function that produces rich output
structures (e.g. dict, Instances, Boxes). To trace such a function, we
flatten the rich object into tuple of tensors, and return this tuple of tensors
instead. Meanwhile, we also need to know how to "rebuild" the original object
from the flattened results, so we can evaluate the flattened results.
A Schema defines how to flatten an object, and while flattening it, it records
necessary schemas so that the object can be rebuilt using the flattened outputs.
The flattened object and the schema object is returned by ``.flatten`` classmethod.
Then the original object can be rebuilt with the ``__call__`` method of schema.
A Schema is a dataclass that can be serialized easily.
"""
# inspired by FetchMapper in tensorflow/python/client/session.py
@classmethod
def flatten(cls, obj):
raise NotImplementedError
def __call__(self, values):
raise NotImplementedError
@staticmethod
def _concat(values):
ret = ()
sizes = []
for v in values:
assert isinstance(v, tuple), "Flattened results must be a tuple"
ret = ret + v
sizes.append(len(v))
return ret, sizes
@staticmethod
def _split(values, sizes):
if len(sizes):
expected_len = sum(sizes)
assert (
len(values) == expected_len
), f"Values has length {len(values)} but expect length {expected_len}."
ret = []
for k in range(len(sizes)):
begin, end = sum(sizes[:k]), sum(sizes[: k + 1])
ret.append(values[begin:end])
return ret
@dataclass
class ListSchema(Schema):
schemas: List[Schema] # the schemas that define how to flatten each element in the list
sizes: List[int] # the flattened length of each element
def __call__(self, values):
values = self._split(values, self.sizes)
if len(values) != len(self.schemas):
raise ValueError(
f"Values has length {len(values)} but schemas " f"has length {len(self.schemas)}!"
)
values = [m(v) for m, v in zip(self.schemas, values)]
return list(values)
@classmethod
def flatten(cls, obj):
res = [flatten_to_tuple(k) for k in obj]
values, sizes = cls._concat([k[0] for k in res])
return values, cls([k[1] for k in res], sizes)
@dataclass
class TupleSchema(ListSchema):
def __call__(self, values):
return tuple(super().__call__(values))
@dataclass
class IdentitySchema(Schema):
def __call__(self, values):
return values[0]
@classmethod
def flatten(cls, obj):
return (obj,), cls()
@dataclass
class DictSchema(ListSchema):
keys: List[str]
def __call__(self, values):
values = super().__call__(values)
return dict(zip(self.keys, values))
@classmethod
def flatten(cls, obj):
for k in obj.keys():
if not isinstance(k, str):
raise KeyError("Only support flattening dictionaries if keys are str.")
keys = sorted(obj.keys())
values = [obj[k] for k in keys]
ret, schema = ListSchema.flatten(values)
return ret, cls(schema.schemas, schema.sizes, keys)
@dataclass
class InstancesSchema(DictSchema):
def __call__(self, values):
image_size, fields = values[-1], values[:-1]
fields = super().__call__(fields)
return Instances(image_size, **fields)
@classmethod
def flatten(cls, obj):
ret, schema = super().flatten(obj.get_fields())
size = obj.image_size
if not isinstance(size, torch.Tensor):
size = torch.tensor(size)
return ret + (size,), schema
@dataclass
class TensorWrapSchema(Schema):
"""
For classes that are simple wrapper of tensors, e.g.
Boxes, RotatedBoxes, BitMasks
"""
class_name: str
def __call__(self, values):
return locate(self.class_name)(values[0])
@classmethod
def flatten(cls, obj):
return (obj.tensor,), cls(_convert_target_to_string(type(obj)))
# if more custom structures needed in the future, can allow
# passing in extra schemas for custom types
def flatten_to_tuple(obj):
"""
Flatten an object so it can be used for PyTorch tracing.
Also returns how to rebuild the original object from the flattened outputs.
Returns:
res (tuple): the flattened results that can be used as tracing outputs
schema: an object with a ``__call__`` method such that ``schema(res) == obj``.
It is a pure dataclass that can be serialized.
"""
schemas = [
((str, bytes), IdentitySchema),
(list, ListSchema),
(tuple, TupleSchema),
(collections.abc.Mapping, DictSchema),
(Instances, InstancesSchema),
((Boxes, ROIMasks), TensorWrapSchema),
]
for klass, schema in schemas:
if isinstance(obj, klass):
F = schema
break
else:
F = IdentitySchema
return F.flatten(obj)
class TracingAdapter(nn.Module):
"""
A model may take rich input/output format (e.g. dict or custom classes),
but `torch.jit.trace` requires tuple of tensors as input/output.
This adapter flattens input/output format of a model so it becomes traceable.
It also records the necessary schema to rebuild model's inputs/outputs from flattened
inputs/outputs.
Example:
::
outputs = model(inputs) # inputs/outputs may be rich structure
adapter = TracingAdapter(model, inputs)
# can now trace the model, with adapter.flattened_inputs, or another
# tuple of tensors with the same length and meaning
traced = torch.jit.trace(adapter, adapter.flattened_inputs)
# traced model can only produce flattened outputs (tuple of tensors)
flattened_outputs = traced(*adapter.flattened_inputs)
# adapter knows the schema to convert it back (new_outputs == outputs)
new_outputs = adapter.outputs_schema(flattened_outputs)
"""
flattened_inputs: Tuple[torch.Tensor] = None
"""
Flattened version of inputs given to this class's constructor.
"""
inputs_schema: Schema = None
"""
Schema of the inputs given to this class's constructor.
"""
outputs_schema: Schema = None
"""
Schema of the output produced by calling the given model with inputs.
"""
def __init__(
self,
model: nn.Module,
inputs,
inference_func: Optional[Callable] = None,
allow_non_tensor: bool = False,
):
"""
Args:
model: an nn.Module
inputs: An input argument or a tuple of input arguments used to call model.
After flattening, it has to only consist of tensors.
inference_func: a callable that takes (model, *inputs), calls the
model with inputs, and return outputs. By default it
is ``lambda model, *inputs: model(*inputs)``. Can be override
if you need to call the model differently.
allow_non_tensor: allow inputs/outputs to contain non-tensor objects.
This option will filter out non-tensor objects to make the
model traceable, but ``inputs_schema``/``outputs_schema`` cannot be
used anymore because inputs/outputs cannot be rebuilt from pure tensors.
This is useful when you're only interested in the single trace of
execution (e.g. for flop count), but not interested in
generalizing the traced graph to new inputs.
"""
super().__init__()
if isinstance(model, (nn.parallel.distributed.DistributedDataParallel, nn.DataParallel)):
model = model.module
self.model = model
if not isinstance(inputs, tuple):
inputs = (inputs,)
self.inputs = inputs
self.allow_non_tensor = allow_non_tensor
if inference_func is None:
inference_func = lambda model, *inputs: model(*inputs) # noqa
self.inference_func = inference_func
self.flattened_inputs, self.inputs_schema = flatten_to_tuple(inputs)
if all(isinstance(x, torch.Tensor) for x in self.flattened_inputs):
return
if self.allow_non_tensor:
self.flattened_inputs = tuple(
[x for x in self.flattened_inputs if isinstance(x, torch.Tensor)]
)
self.inputs_schema = None
else:
for input in self.flattened_inputs:
if not isinstance(input, torch.Tensor):
raise ValueError(
"Inputs for tracing must only contain tensors. "
f"Got a {type(input)} instead."
)
def forward(self, *args: torch.Tensor):
with torch.no_grad(), patch_builtin_len():
if self.inputs_schema is not None:
inputs_orig_format = self.inputs_schema(args)
else:
if len(args) != len(self.flattened_inputs) or any(
x is not y for x, y in zip(args, self.flattened_inputs)
):
raise ValueError(
"TracingAdapter does not contain valid inputs_schema."
" So it cannot generalize to other inputs and must be"
" traced with `.flattened_inputs`."
)
inputs_orig_format = self.inputs
outputs = self.inference_func(self.model, *inputs_orig_format)
flattened_outputs, schema = flatten_to_tuple(outputs)
flattened_output_tensors = tuple(
[x for x in flattened_outputs if isinstance(x, torch.Tensor)]
)
if len(flattened_output_tensors) < len(flattened_outputs):
if self.allow_non_tensor:
flattened_outputs = flattened_output_tensors
self.outputs_schema = None
else:
raise ValueError(
"Model cannot be traced because some model outputs "
"cannot flatten to tensors."
)
else: # schema is valid
if self.outputs_schema is None:
self.outputs_schema = schema
else:
assert self.outputs_schema == schema, (
"Model should always return outputs with the same "
"structure so it can be traced!"
)
return flattened_outputs
def _create_wrapper(self, traced_model):
"""
Return a function that has an input/output interface the same as the
original model, but it calls the given traced model under the hood.
"""
def forward(*args):
flattened_inputs, _ = flatten_to_tuple(args)
flattened_outputs = traced_model(*flattened_inputs)
return self.outputs_schema(flattened_outputs)
return forward
|
evocodebench_data_180
|
"""Equation module"""
# Copyright (c) 2023
# Author: Hugo Delatte <delatte.hugo@gmail.com>
# License: BSD 3 clause
import re
import warnings
import numpy as np
import numpy.typing as npt
from skfolio.exceptions import EquationToMatrixError, GroupNotFoundError
__all__ = ["equations_to_matrix"]
def equations_to_matrix(
groups: npt.ArrayLike,
equations: npt.ArrayLike,
sum_to_one: bool = False,
raise_if_group_missing: bool = False,
names: tuple[str, str] = ("groups", "equations"),
) -> tuple[np.ndarray, np.ndarray]:
"""Convert a list of linear equations into the left and right matrices of the
inequality A <= B.
Parameters
----------
groups : array-like of shape (n_groups, n_assets)
2D array of assets groups.
Examples:
groups = np.array(
[
["SPX", "SX5E", "NKY", "TLT"],
["Equity", "Equity", "Equity", "Bond"],
["US", "Europe", "Japan", "US"],
]
)
equations : array-like of shape (n_equations,)
1D array of equations.
Example of valid equation patterns:
* "number_1 * group_1 + number_3 <= number_4 * group_3 + number_5"
* "group_1 >= number * group_2"
* "group_1 <= number"
* "group_1 >= number"
"group_1" and "group_2" are the group names defined in `groups`.
The second expression means that the sum of all assets in "group_1" should be
less or equal to "number" times the sum of all assets in "group_2".
Examples:
equations = [
"Equity <= 3 * Bond",
"US >= 1.5",
"Europe >= 0.5 * Japan",
"Japan <= 1",
"3*SPX + 5*SX5E <= 2*TLT + 3",
]
sum_to_one : bool
If this is set to True, all elements in a group sum to one (used in the `views`
of the Black-Litterman model).
raise_if_group_missing : bool, default=False
If this is set to True, an error is raised when a group is not found in the
groups, otherwise only a warning is shown.
The default is False.
names : tuple[str, str], default=('groups', 'equations')
The group and equation names used in error messages.
The default is `('groups', 'equations')`.
Returns
-------
left: ndarray of shape (n_equations, n_assets)
right: ndarray of shape (n_equations,)
The left and right matrices of the inequality A <= B.
If none of the group inside the equations are part of the groups, `None` is
returned.
"""
groups = np.asarray(groups)
equations = np.asarray(equations)
if groups.ndim != 2:
raise ValueError(
f"`{names[0]}` must be a 2D array, got {groups.ndim}D array instead."
)
if equations.ndim != 1:
raise ValueError(
f"`{names[1]}` must be a 1D array, got {equations.ndim}D array instead."
)
n_equations = len(equations)
n_assets = groups.shape[1]
a = np.zeros((n_equations, n_assets))
b = np.zeros(n_equations)
for i, string in enumerate(equations):
try:
left, right = _string_to_equation(
groups=groups,
string=string,
sum_to_one=sum_to_one,
)
a[i] = left
b[i] = right
except GroupNotFoundError as e:
if raise_if_group_missing:
raise
warnings.warn(str(e), stacklevel=2)
return a, b
def _matching_array(values: np.ndarray, key: str, sum_to_one: bool) -> np.ndarray:
"""Takes in a 2D array of strings, a key string, and a boolean flag.
It returns a 1D array where the value is 1 if there is a match between the key and
any value in the 2D array, and 0 otherwise. The returned array can be scaled to
have a sum of one if the flag is set to True.
Parameters
----------
values : ndarray of shape (n, m)
2D-array of strings.
key : str
String to match in the values.
sum_to_one : bool
If this is set to True, the matching 1D-array is scaled to have a sum of one.
Returns
-------
matching_array : ndarray of shape (n, )
Matching 1D-array.
"""
arr = np.any(values == key, axis=0)
if not arr.any():
raise EquationToMatrixError(f"Unable to find '{key}' in '{values}'")
if sum_to_one:
s = np.sum(arr)
else:
s = 1
return arr / s
_operator_mapping = {">=": -1, "<=": 1, "==": 1, "=": 1}
_operator_signs = {"+": 1, "-": -1}
def _inequality_operator_sign(operator: str) -> int:
"""Convert the operators '>=', "==" and '<=' into the corresponding integer
values -1, 1 and 1, respectively.
Parameters
----------
operator : str
Operator: '>=' or '<='.
Returns
-------
value : int
Operator sign: 1 or -1.
"""
try:
return _operator_mapping[operator]
except KeyError:
raise EquationToMatrixError(
f"operator '{operator}' is not valid. It should be '<=' or '>='"
) from None
def _operator_sign(operator: str) -> int:
"""Convert the operators '+' and '-' into 1 or -1
Parameters
----------
operator : str
Operator: '+' and '-'.
Returns
-------
value : int
Operator sign: 1 or -1.
"""
try:
return _operator_signs[operator]
except KeyError:
raise EquationToMatrixError(
f"operator '{operator}' is not valid. It should be be '+' or '-'"
) from None
def _string_to_float(string: str) -> float:
"""Convert the factor string into a float.
Parameters
----------
string : str
The factor string.
Returns
-------
value : int
The factor string converted to float.
"""
try:
return float(string)
except ValueError:
raise EquationToMatrixError(f"Unable to convert {string} into float") from None
def _string_to_equation(
groups: np.ndarray,
string: str,
sum_to_one: bool,
) -> tuple[np.ndarray, float]:
"""Convert a string to a left 1D-array and right float of the form:
`groups @ left <= right`.
Parameters
----------
groups : ndarray of shape (n_groups, n_assets)
Groups 2D-array
string : str
String to convert
sum_to_one : bool
If this is set to True, the 1D-array is scaled to have a sum of one.
Returns
-------
left: 1D-array of shape (n_assets,)
right: float
"""
n = groups.shape[1]
operators = ["-", "+", "*", ">=", "<=", "==", "="]
invalid_operators = [">", "<"]
pattern = re.compile(r"((?:" + "|\\".join(operators) + r"))")
invalid_pattern = re.compile(r"((?:" + "|\\".join(invalid_operators) + r"))")
err_msg = f"Wrong pattern encountered while converting the string '{string}'"
res = re.split(pattern, string)
res = [x.strip() for x in res]
res = [x for x in res if x != ""]
iterator = iter(res)
group_names = set(groups.flatten())
def is_group(name: str) -> bool:
return name in group_names
left = np.zeros(n)
right = 0
main_sign = 1
inequality_sign = None
e = next(iterator, None)
i = 0
while True:
i += 1
if i > 1e6:
raise RecursionError(err_msg)
if e is None:
break
sign = 1
if e in [">=", "<=", "==", "="]:
main_sign = -1
inequality_sign = _inequality_operator_sign(e)
e = next(iterator, None)
if e in ["-", "+"]:
sign *= _operator_sign(e)
e = next(iterator, None)
elif e in ["-", "+"]:
sign *= _operator_sign(e)
e = next(iterator, None)
elif e == "*":
raise EquationToMatrixError(
f"{err_msg}: the character '{e}' is wrongly positioned"
)
sign *= main_sign
# next can only be a number or a group
if e is None or e in operators:
raise EquationToMatrixError(
f"{err_msg}: the character '{e}' is wrongly positioned"
)
if is_group(e):
arr = _matching_array(values=groups, key=e, sum_to_one=sum_to_one)
# next can only be a '*' or an ['-', '+', '>=', '<=', '==', '='] or None
e = next(iterator, None)
if e is None or e in ["-", "+", ">=", "<=", "==", "="]:
left += sign * arr
elif e == "*":
# next can only a number
e = next(iterator, None)
try:
number = float(e)
except ValueError:
invalid_ops = invalid_pattern.findall(e)
if len(invalid_ops) > 0:
raise EquationToMatrixError(
f"{invalid_ops[0]} is an invalid operator. Valid operators"
f" are: {operators}"
) from None
raise GroupNotFoundError(
f"{err_msg}: the group '{e}' is missing from the groups"
f" {groups}"
) from None
left += number * sign * arr
e = next(iterator, None)
else:
raise EquationToMatrixError(
f"{err_msg}: the character '{e}' is wrongly positioned"
)
else:
try:
number = float(e)
except ValueError:
invalid_ops = invalid_pattern.findall(e)
if len(invalid_ops) > 0:
raise EquationToMatrixError(
f"{invalid_ops[0]} is an invalid operator. Valid operators are:"
f" {operators}"
) from None
raise GroupNotFoundError(
f"{err_msg}: the group '{e}' is missing from the groups {groups}"
) from None
# next can only be a '*' or an operator or None
e = next(iterator, None)
if e == "*":
# next can only a group
e = next(iterator, None)
if not is_group(e):
raise EquationToMatrixError(
f"{err_msg}: the character '{e}' is wrongly positioned"
)
arr = _matching_array(values=groups, key=e, sum_to_one=sum_to_one)
left += number * sign * arr
e = next(iterator, None)
elif e is None or e in ["-", "+", ">=", "<=", "==", "="]:
right += number * sign
else:
raise EquationToMatrixError(
f"{err_msg}: the character '{e}' is wrongly positioned"
)
left *= inequality_sign
right *= -inequality_sign
return left, right
|
evocodebench_data_181
|
# Copyright (c) Facebook, Inc. and its affiliates.
import os
import sys
import tempfile
from contextlib import ExitStack, contextmanager
from copy import deepcopy
from unittest import mock
import torch
from torch import nn
# need some explicit imports due to https://github.com/pytorch/pytorch/issues/38964
import detectron2 # noqa F401
from detectron2.structures import Boxes, Instances
from detectron2.utils.env import _import_file
_counter = 0
def _clear_jit_cache():
from torch.jit._recursive import concrete_type_store
from torch.jit._state import _jit_caching_layer
concrete_type_store.type_store.clear() # for modules
_jit_caching_layer.clear() # for free functions
def _add_instances_conversion_methods(newInstances):
"""
Add from_instances methods to the scripted Instances class.
"""
cls_name = newInstances.__name__
@torch.jit.unused
def from_instances(instances: Instances):
"""
Create scripted Instances from original Instances
"""
fields = instances.get_fields()
image_size = instances.image_size
ret = newInstances(image_size)
for name, val in fields.items():
assert hasattr(ret, f"_{name}"), f"No attribute named {name} in {cls_name}"
setattr(ret, name, deepcopy(val))
return ret
newInstances.from_instances = from_instances
@contextmanager
def patch_instances(fields):
"""
A contextmanager, under which the Instances class in detectron2 is replaced
by a statically-typed scriptable class, defined by `fields`.
See more in `scripting_with_instances`.
"""
with tempfile.TemporaryDirectory(prefix="detectron2") as dir, tempfile.NamedTemporaryFile(
mode="w", encoding="utf-8", suffix=".py", dir=dir, delete=False
) as f:
try:
# Objects that use Instances should not reuse previously-compiled
# results in cache, because `Instances` could be a new class each time.
_clear_jit_cache()
cls_name, s = _gen_instance_module(fields)
f.write(s)
f.flush()
f.close()
module = _import(f.name)
new_instances = getattr(module, cls_name)
_ = torch.jit.script(new_instances)
# let torchscript think Instances was scripted already
Instances.__torch_script_class__ = True
# let torchscript find new_instances when looking for the jit type of Instances
Instances._jit_override_qualname = torch._jit_internal._qualified_name(new_instances)
_add_instances_conversion_methods(new_instances)
yield new_instances
finally:
try:
del Instances.__torch_script_class__
del Instances._jit_override_qualname
except AttributeError:
pass
sys.modules.pop(module.__name__)
def _gen_instance_class(fields):
"""
Args:
fields (dict[name: type])
"""
class _FieldType:
def __init__(self, name, type_):
assert isinstance(name, str), f"Field name must be str, got {name}"
self.name = name
self.type_ = type_
self.annotation = f"{type_.__module__}.{type_.__name__}"
fields = [_FieldType(k, v) for k, v in fields.items()]
def indent(level, s):
return " " * 4 * level + s
lines = []
global _counter
_counter += 1
cls_name = "ScriptedInstances{}".format(_counter)
field_names = tuple(x.name for x in fields)
extra_args = ", ".join([f"{f.name}: Optional[{f.annotation}] = None" for f in fields])
lines.append(
f"""
class {cls_name}:
def __init__(self, image_size: Tuple[int, int], {extra_args}):
self.image_size = image_size
self._field_names = {field_names}
"""
)
for f in fields:
lines.append(
indent(2, f"self._{f.name} = torch.jit.annotate(Optional[{f.annotation}], {f.name})")
)
for f in fields:
lines.append(
f"""
@property
def {f.name}(self) -> {f.annotation}:
# has to use a local for type refinement
# https://pytorch.org/docs/stable/jit_language_reference.html#optional-type-refinement
t = self._{f.name}
assert t is not None, "{f.name} is None and cannot be accessed!"
return t
@{f.name}.setter
def {f.name}(self, value: {f.annotation}) -> None:
self._{f.name} = value
"""
)
# support method `__len__`
lines.append(
"""
def __len__(self) -> int:
"""
)
for f in fields:
lines.append(
f"""
t = self._{f.name}
if t is not None:
return len(t)
"""
)
lines.append(
"""
raise NotImplementedError("Empty Instances does not support __len__!")
"""
)
# support method `has`
lines.append(
"""
def has(self, name: str) -> bool:
"""
)
for f in fields:
lines.append(
f"""
if name == "{f.name}":
return self._{f.name} is not None
"""
)
lines.append(
"""
return False
"""
)
# support method `to`
none_args = ", None" * len(fields)
lines.append(
f"""
def to(self, device: torch.device) -> "{cls_name}":
ret = {cls_name}(self.image_size{none_args})
"""
)
for f in fields:
if hasattr(f.type_, "to"):
lines.append(
f"""
t = self._{f.name}
if t is not None:
ret._{f.name} = t.to(device)
"""
)
else:
# For now, ignore fields that cannot be moved to devices.
# Maybe can support other tensor-like classes (e.g. __torch_function__)
pass
lines.append(
"""
return ret
"""
)
# support method `getitem`
none_args = ", None" * len(fields)
lines.append(
f"""
def __getitem__(self, item) -> "{cls_name}":
ret = {cls_name}(self.image_size{none_args})
"""
)
for f in fields:
lines.append(
f"""
t = self._{f.name}
if t is not None:
ret._{f.name} = t[item]
"""
)
lines.append(
"""
return ret
"""
)
# support method `cat`
# this version does not contain checks that all instances have same size and fields
none_args = ", None" * len(fields)
lines.append(
f"""
def cat(self, instances: List["{cls_name}"]) -> "{cls_name}":
ret = {cls_name}(self.image_size{none_args})
"""
)
for f in fields:
lines.append(
f"""
t = self._{f.name}
if t is not None:
values: List[{f.annotation}] = [x.{f.name} for x in instances]
if torch.jit.isinstance(t, torch.Tensor):
ret._{f.name} = torch.cat(values, dim=0)
else:
ret._{f.name} = t.cat(values)
"""
)
lines.append(
"""
return ret"""
)
# support method `get_fields()`
lines.append(
"""
def get_fields(self) -> Dict[str, Tensor]:
ret = {}
"""
)
for f in fields:
if f.type_ == Boxes:
stmt = "t.tensor"
elif f.type_ == torch.Tensor:
stmt = "t"
else:
stmt = f'assert False, "unsupported type {str(f.type_)}"'
lines.append(
f"""
t = self._{f.name}
if t is not None:
ret["{f.name}"] = {stmt}
"""
)
lines.append(
"""
return ret"""
)
return cls_name, os.linesep.join(lines)
def _gen_instance_module(fields):
# TODO: find a more automatic way to enable import of other classes
s = """
from copy import deepcopy
import torch
from torch import Tensor
import typing
from typing import *
import detectron2
from detectron2.structures import Boxes, Instances
"""
cls_name, cls_def = _gen_instance_class(fields)
s += cls_def
return cls_name, s
def _import(path):
return _import_file(
"{}{}".format(sys.modules[__name__].__name__, _counter), path, make_importable=True
)
@contextmanager
def patch_builtin_len(modules=()):
"""
Patch the builtin len() function of a few detectron2 modules
to use __len__ instead, because __len__ does not convert values to
integers and therefore is friendly to tracing.
Args:
modules (list[stsr]): names of extra modules to patch len(), in
addition to those in detectron2.
"""
def _new_len(obj):
return obj.__len__()
with ExitStack() as stack:
MODULES = [
"detectron2.modeling.roi_heads.fast_rcnn",
"detectron2.modeling.roi_heads.mask_head",
"detectron2.modeling.roi_heads.keypoint_head",
] + list(modules)
ctxs = [stack.enter_context(mock.patch(mod + ".len")) for mod in MODULES]
for m in ctxs:
m.side_effect = _new_len
yield
def patch_nonscriptable_classes():
"""
Apply patches on a few nonscriptable detectron2 classes.
Should not have side-effects on eager usage.
"""
# __prepare_scriptable__ can also be added to models for easier maintenance.
# But it complicates the clean model code.
from detectron2.modeling.backbone import ResNet, FPN
# Due to https://github.com/pytorch/pytorch/issues/36061,
# we change backbone to use ModuleList for scripting.
# (note: this changes param names in state_dict)
def prepare_resnet(self):
ret = deepcopy(self)
ret.stages = nn.ModuleList(ret.stages)
for k in self.stage_names:
delattr(ret, k)
return ret
ResNet.__prepare_scriptable__ = prepare_resnet
def prepare_fpn(self):
ret = deepcopy(self)
ret.lateral_convs = nn.ModuleList(ret.lateral_convs)
ret.output_convs = nn.ModuleList(ret.output_convs)
for name, _ in self.named_children():
if name.startswith("fpn_"):
delattr(ret, name)
return ret
FPN.__prepare_scriptable__ = prepare_fpn
# Annotate some attributes to be constants for the purpose of scripting,
# even though they are not constants in eager mode.
from detectron2.modeling.roi_heads import StandardROIHeads
if hasattr(StandardROIHeads, "__annotations__"):
# copy first to avoid editing annotations of base class
StandardROIHeads.__annotations__ = deepcopy(StandardROIHeads.__annotations__)
StandardROIHeads.__annotations__["mask_on"] = torch.jit.Final[bool]
StandardROIHeads.__annotations__["keypoint_on"] = torch.jit.Final[bool]
# These patches are not supposed to have side-effects.
patch_nonscriptable_classes()
@contextmanager
def freeze_training_mode(model):
"""
A context manager that annotates the "training" attribute of every submodule
to constant, so that the training codepath in these modules can be
meta-compiled away. Upon exiting, the annotations are reverted.
"""
classes = {type(x) for x in model.modules()}
# __constants__ is the old way to annotate constants and not compatible
# with __annotations__ .
classes = {x for x in classes if not hasattr(x, "__constants__")}
for cls in classes:
cls.__annotations__["training"] = torch.jit.Final[bool]
yield
for cls in classes:
cls.__annotations__["training"] = bool
|
evocodebench_data_182
|
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
"""
Common data processing utilities that are used in a
typical object detection data pipeline.
"""
import logging
import numpy as np
from typing import List, Union
import pycocotools.mask as mask_util
import torch
from PIL import Image
from detectron2.structures import (
BitMasks,
Boxes,
BoxMode,
Instances,
Keypoints,
PolygonMasks,
RotatedBoxes,
polygons_to_bitmask,
)
from detectron2.utils.file_io import PathManager
from . import transforms as T
from .catalog import MetadataCatalog
__all__ = [
"SizeMismatchError",
"convert_image_to_rgb",
"check_image_size",
"transform_proposals",
"transform_instance_annotations",
"annotations_to_instances",
"annotations_to_instances_rotated",
"build_augmentation",
"build_transform_gen",
"create_keypoint_hflip_indices",
"filter_empty_instances",
"read_image",
]
class SizeMismatchError(ValueError):
"""
When loaded image has difference width/height compared with annotation.
"""
# https://en.wikipedia.org/wiki/YUV#SDTV_with_BT.601
_M_RGB2YUV = [[0.299, 0.587, 0.114], [-0.14713, -0.28886, 0.436], [0.615, -0.51499, -0.10001]]
_M_YUV2RGB = [[1.0, 0.0, 1.13983], [1.0, -0.39465, -0.58060], [1.0, 2.03211, 0.0]]
# https://www.exiv2.org/tags.html
_EXIF_ORIENT = 274 # exif 'Orientation' tag
def convert_PIL_to_numpy(image, format):
"""
Convert PIL image to numpy array of target format.
Args:
image (PIL.Image): a PIL image
format (str): the format of output image
Returns:
(np.ndarray): also see `read_image`
"""
if format is not None:
# PIL only supports RGB, so convert to RGB and flip channels over below
conversion_format = format
if format in ["BGR", "YUV-BT.601"]:
conversion_format = "RGB"
image = image.convert(conversion_format)
image = np.asarray(image)
# PIL squeezes out the channel dimension for "L", so make it HWC
if format == "L":
image = np.expand_dims(image, -1)
# handle formats not supported by PIL
elif format == "BGR":
# flip channels if needed
image = image[:, :, ::-1]
elif format == "YUV-BT.601":
image = image / 255.0
image = np.dot(image, np.array(_M_RGB2YUV).T)
return image
def convert_image_to_rgb(image, format):
"""
Convert an image from given format to RGB.
Args:
image (np.ndarray or Tensor): an HWC image
format (str): the format of input image, also see `read_image`
Returns:
(np.ndarray): (H,W,3) RGB image in 0-255 range, can be either float or uint8
"""
if isinstance(image, torch.Tensor):
image = image.cpu().numpy()
if format == "BGR":
image = image[:, :, [2, 1, 0]]
elif format == "YUV-BT.601":
image = np.dot(image, np.array(_M_YUV2RGB).T)
image = image * 255.0
else:
if format == "L":
image = image[:, :, 0]
image = image.astype(np.uint8)
image = np.asarray(Image.fromarray(image, mode=format).convert("RGB"))
return image
def _apply_exif_orientation(image):
"""
Applies the exif orientation correctly.
This code exists per the bug:
https://github.com/python-pillow/Pillow/issues/3973
with the function `ImageOps.exif_transpose`. The Pillow source raises errors with
various methods, especially `tobytes`
Function based on:
https://github.com/wkentaro/labelme/blob/v4.5.4/labelme/utils/image.py#L59
https://github.com/python-pillow/Pillow/blob/7.1.2/src/PIL/ImageOps.py#L527
Args:
image (PIL.Image): a PIL image
Returns:
(PIL.Image): the PIL image with exif orientation applied, if applicable
"""
if not hasattr(image, "getexif"):
return image
try:
exif = image.getexif()
except Exception: # https://github.com/facebookresearch/detectron2/issues/1885
exif = None
if exif is None:
return image
orientation = exif.get(_EXIF_ORIENT)
method = {
2: Image.FLIP_LEFT_RIGHT,
3: Image.ROTATE_180,
4: Image.FLIP_TOP_BOTTOM,
5: Image.TRANSPOSE,
6: Image.ROTATE_270,
7: Image.TRANSVERSE,
8: Image.ROTATE_90,
}.get(orientation)
if method is not None:
return image.transpose(method)
return image
def read_image(file_name, format=None):
"""
Read an image into the given format.
Will apply rotation and flipping if the image has such exif information.
Args:
file_name (str): image file path
format (str): one of the supported image modes in PIL, or "BGR" or "YUV-BT.601".
Returns:
image (np.ndarray):
an HWC image in the given format, which is 0-255, uint8 for
supported image modes in PIL or "BGR"; float (0-1 for Y) for YUV-BT.601.
"""
with PathManager.open(file_name, "rb") as f:
image = Image.open(f)
# work around this bug: https://github.com/python-pillow/Pillow/issues/3973
image = _apply_exif_orientation(image)
return convert_PIL_to_numpy(image, format)
def check_image_size(dataset_dict, image):
"""
Raise an error if the image does not match the size specified in the dict.
"""
if "width" in dataset_dict or "height" in dataset_dict:
image_wh = (image.shape[1], image.shape[0])
expected_wh = (dataset_dict["width"], dataset_dict["height"])
if not image_wh == expected_wh:
raise SizeMismatchError(
"Mismatched image shape{}, got {}, expect {}.".format(
" for image " + dataset_dict["file_name"]
if "file_name" in dataset_dict
else "",
image_wh,
expected_wh,
)
+ " Please check the width/height in your annotation."
)
# To ensure bbox always remap to original image size
if "width" not in dataset_dict:
dataset_dict["width"] = image.shape[1]
if "height" not in dataset_dict:
dataset_dict["height"] = image.shape[0]
def transform_proposals(dataset_dict, image_shape, transforms, *, proposal_topk, min_box_size=0):
"""
Apply transformations to the proposals in dataset_dict, if any.
Args:
dataset_dict (dict): a dict read from the dataset, possibly
contains fields "proposal_boxes", "proposal_objectness_logits", "proposal_bbox_mode"
image_shape (tuple): height, width
transforms (TransformList):
proposal_topk (int): only keep top-K scoring proposals
min_box_size (int): proposals with either side smaller than this
threshold are removed
The input dict is modified in-place, with abovementioned keys removed. A new
key "proposals" will be added. Its value is an `Instances`
object which contains the transformed proposals in its field
"proposal_boxes" and "objectness_logits".
"""
if "proposal_boxes" in dataset_dict:
# Transform proposal boxes
boxes = transforms.apply_box(
BoxMode.convert(
dataset_dict.pop("proposal_boxes"),
dataset_dict.pop("proposal_bbox_mode"),
BoxMode.XYXY_ABS,
)
)
boxes = Boxes(boxes)
objectness_logits = torch.as_tensor(
dataset_dict.pop("proposal_objectness_logits").astype("float32")
)
boxes.clip(image_shape)
keep = boxes.nonempty(threshold=min_box_size)
boxes = boxes[keep]
objectness_logits = objectness_logits[keep]
proposals = Instances(image_shape)
proposals.proposal_boxes = boxes[:proposal_topk]
proposals.objectness_logits = objectness_logits[:proposal_topk]
dataset_dict["proposals"] = proposals
def transform_instance_annotations(
annotation, transforms, image_size, *, keypoint_hflip_indices=None
):
"""
Apply transforms to box, segmentation and keypoints annotations of a single instance.
It will use `transforms.apply_box` for the box, and
`transforms.apply_coords` for segmentation polygons & keypoints.
If you need anything more specially designed for each data structure,
you'll need to implement your own version of this function or the transforms.
Args:
annotation (dict): dict of instance annotations for a single instance.
It will be modified in-place.
transforms (TransformList or list[Transform]):
image_size (tuple): the height, width of the transformed image
keypoint_hflip_indices (ndarray[int]): see `create_keypoint_hflip_indices`.
Returns:
dict:
the same input dict with fields "bbox", "segmentation", "keypoints"
transformed according to `transforms`.
The "bbox_mode" field will be set to XYXY_ABS.
"""
if isinstance(transforms, (tuple, list)):
transforms = T.TransformList(transforms)
# bbox is 1d (per-instance bounding box)
bbox = BoxMode.convert(annotation["bbox"], annotation["bbox_mode"], BoxMode.XYXY_ABS)
# clip transformed bbox to image size
bbox = transforms.apply_box(np.array([bbox]))[0].clip(min=0)
annotation["bbox"] = np.minimum(bbox, list(image_size + image_size)[::-1])
annotation["bbox_mode"] = BoxMode.XYXY_ABS
if "segmentation" in annotation:
# each instance contains 1 or more polygons
segm = annotation["segmentation"]
if isinstance(segm, list):
# polygons
polygons = [np.asarray(p).reshape(-1, 2) for p in segm]
annotation["segmentation"] = [
p.reshape(-1) for p in transforms.apply_polygons(polygons)
]
elif isinstance(segm, dict):
# RLE
mask = mask_util.decode(segm)
mask = transforms.apply_segmentation(mask)
assert tuple(mask.shape[:2]) == image_size
annotation["segmentation"] = mask
else:
raise ValueError(
"Cannot transform segmentation of type '{}'!"
"Supported types are: polygons as list[list[float] or ndarray],"
" COCO-style RLE as a dict.".format(type(segm))
)
if "keypoints" in annotation:
keypoints = transform_keypoint_annotations(
annotation["keypoints"], transforms, image_size, keypoint_hflip_indices
)
annotation["keypoints"] = keypoints
return annotation
def transform_keypoint_annotations(keypoints, transforms, image_size, keypoint_hflip_indices=None):
"""
Transform keypoint annotations of an image.
If a keypoint is transformed out of image boundary, it will be marked "unlabeled" (visibility=0)
Args:
keypoints (list[float]): Nx3 float in Detectron2's Dataset format.
Each point is represented by (x, y, visibility).
transforms (TransformList):
image_size (tuple): the height, width of the transformed image
keypoint_hflip_indices (ndarray[int]): see `create_keypoint_hflip_indices`.
When `transforms` includes horizontal flip, will use the index
mapping to flip keypoints.
"""
# (N*3,) -> (N, 3)
keypoints = np.asarray(keypoints, dtype="float64").reshape(-1, 3)
keypoints_xy = transforms.apply_coords(keypoints[:, :2])
# Set all out-of-boundary points to "unlabeled"
inside = (keypoints_xy >= np.array([0, 0])) & (keypoints_xy <= np.array(image_size[::-1]))
inside = inside.all(axis=1)
keypoints[:, :2] = keypoints_xy
keypoints[:, 2][~inside] = 0
# This assumes that HorizFlipTransform is the only one that does flip
do_hflip = sum(isinstance(t, T.HFlipTransform) for t in transforms.transforms) % 2 == 1
# Alternative way: check if probe points was horizontally flipped.
# probe = np.asarray([[0.0, 0.0], [image_width, 0.0]])
# probe_aug = transforms.apply_coords(probe.copy())
# do_hflip = np.sign(probe[1][0] - probe[0][0]) != np.sign(probe_aug[1][0] - probe_aug[0][0]) # noqa
# If flipped, swap each keypoint with its opposite-handed equivalent
if do_hflip:
if keypoint_hflip_indices is None:
raise ValueError("Cannot flip keypoints without providing flip indices!")
if len(keypoints) != len(keypoint_hflip_indices):
raise ValueError(
"Keypoint data has {} points, but metadata "
"contains {} points!".format(len(keypoints), len(keypoint_hflip_indices))
)
keypoints = keypoints[np.asarray(keypoint_hflip_indices, dtype=np.int32), :]
# Maintain COCO convention that if visibility == 0 (unlabeled), then x, y = 0
keypoints[keypoints[:, 2] == 0] = 0
return keypoints
def annotations_to_instances(annos, image_size, mask_format="polygon"):
"""
Create an :class:`Instances` object used by the models,
from instance annotations in the dataset dict.
Args:
annos (list[dict]): a list of instance annotations in one image, each
element for one instance.
image_size (tuple): height, width
Returns:
Instances:
It will contain fields "gt_boxes", "gt_classes",
"gt_masks", "gt_keypoints", if they can be obtained from `annos`.
This is the format that builtin models expect.
"""
boxes = (
np.stack(
[BoxMode.convert(obj["bbox"], obj["bbox_mode"], BoxMode.XYXY_ABS) for obj in annos]
)
if len(annos)
else np.zeros((0, 4))
)
target = Instances(image_size)
target.gt_boxes = Boxes(boxes)
classes = [int(obj["category_id"]) for obj in annos]
classes = torch.tensor(classes, dtype=torch.int64)
target.gt_classes = classes
if len(annos) and "segmentation" in annos[0]:
segms = [obj["segmentation"] for obj in annos]
if mask_format == "polygon":
try:
masks = PolygonMasks(segms)
except ValueError as e:
raise ValueError(
"Failed to use mask_format=='polygon' from the given annotations!"
) from e
else:
assert mask_format == "bitmask", mask_format
masks = []
for segm in segms:
if isinstance(segm, list):
# polygon
masks.append(polygons_to_bitmask(segm, *image_size))
elif isinstance(segm, dict):
# COCO RLE
masks.append(mask_util.decode(segm))
elif isinstance(segm, np.ndarray):
assert segm.ndim == 2, "Expect segmentation of 2 dimensions, got {}.".format(
segm.ndim
)
# mask array
masks.append(segm)
else:
raise ValueError(
"Cannot convert segmentation of type '{}' to BitMasks!"
"Supported types are: polygons as list[list[float] or ndarray],"
" COCO-style RLE as a dict, or a binary segmentation mask "
" in a 2D numpy array of shape HxW.".format(type(segm))
)
# torch.from_numpy does not support array with negative stride.
masks = BitMasks(
torch.stack([torch.from_numpy(np.ascontiguousarray(x)) for x in masks])
)
target.gt_masks = masks
if len(annos) and "keypoints" in annos[0]:
kpts = [obj.get("keypoints", []) for obj in annos]
target.gt_keypoints = Keypoints(kpts)
return target
def annotations_to_instances_rotated(annos, image_size):
"""
Create an :class:`Instances` object used by the models,
from instance annotations in the dataset dict.
Compared to `annotations_to_instances`, this function is for rotated boxes only
Args:
annos (list[dict]): a list of instance annotations in one image, each
element for one instance.
image_size (tuple): height, width
Returns:
Instances:
Containing fields "gt_boxes", "gt_classes",
if they can be obtained from `annos`.
This is the format that builtin models expect.
"""
boxes = [obj["bbox"] for obj in annos]
target = Instances(image_size)
boxes = target.gt_boxes = RotatedBoxes(boxes)
boxes.clip(image_size)
classes = [obj["category_id"] for obj in annos]
classes = torch.tensor(classes, dtype=torch.int64)
target.gt_classes = classes
return target
def filter_empty_instances(
instances, by_box=True, by_mask=True, box_threshold=1e-5, return_mask=False
):
"""
Filter out empty instances in an `Instances` object.
Args:
instances (Instances):
by_box (bool): whether to filter out instances with empty boxes
by_mask (bool): whether to filter out instances with empty masks
box_threshold (float): minimum width and height to be considered non-empty
return_mask (bool): whether to return boolean mask of filtered instances
Returns:
Instances: the filtered instances.
tensor[bool], optional: boolean mask of filtered instances
"""
assert by_box or by_mask
r = []
if by_box:
r.append(instances.gt_boxes.nonempty(threshold=box_threshold))
if instances.has("gt_masks") and by_mask:
r.append(instances.gt_masks.nonempty())
# TODO: can also filter visible keypoints
if not r:
return instances
m = r[0]
for x in r[1:]:
m = m & x
if return_mask:
return instances[m], m
return instances[m]
def create_keypoint_hflip_indices(dataset_names: Union[str, List[str]]) -> List[int]:
"""
Args:
dataset_names: list of dataset names
Returns:
list[int]: a list of size=#keypoints, storing the
horizontally-flipped keypoint indices.
"""
if isinstance(dataset_names, str):
dataset_names = [dataset_names]
check_metadata_consistency("keypoint_names", dataset_names)
check_metadata_consistency("keypoint_flip_map", dataset_names)
meta = MetadataCatalog.get(dataset_names[0])
names = meta.keypoint_names
# TODO flip -> hflip
flip_map = dict(meta.keypoint_flip_map)
flip_map.update({v: k for k, v in flip_map.items()})
flipped_names = [i if i not in flip_map else flip_map[i] for i in names]
flip_indices = [names.index(i) for i in flipped_names]
return flip_indices
def gen_crop_transform_with_instance(crop_size, image_size, instance):
"""
Generate a CropTransform so that the cropping region contains
the center of the given instance.
Args:
crop_size (tuple): h, w in pixels
image_size (tuple): h, w
instance (dict): an annotation dict of one instance, in Detectron2's
dataset format.
"""
crop_size = np.asarray(crop_size, dtype=np.int32)
bbox = BoxMode.convert(instance["bbox"], instance["bbox_mode"], BoxMode.XYXY_ABS)
center_yx = (bbox[1] + bbox[3]) * 0.5, (bbox[0] + bbox[2]) * 0.5
assert (
image_size[0] >= center_yx[0] and image_size[1] >= center_yx[1]
), "The annotation bounding box is outside of the image!"
assert (
image_size[0] >= crop_size[0] and image_size[1] >= crop_size[1]
), "Crop size is larger than image size!"
min_yx = np.maximum(np.floor(center_yx).astype(np.int32) - crop_size, 0)
max_yx = np.maximum(np.asarray(image_size, dtype=np.int32) - crop_size, 0)
max_yx = np.minimum(max_yx, np.ceil(center_yx).astype(np.int32))
y0 = np.random.randint(min_yx[0], max_yx[0] + 1)
x0 = np.random.randint(min_yx[1], max_yx[1] + 1)
return T.CropTransform(x0, y0, crop_size[1], crop_size[0])
def check_metadata_consistency(key, dataset_names):
"""
Check that the datasets have consistent metadata.
Args:
key (str): a metadata key
dataset_names (list[str]): a list of dataset names
Raises:
AttributeError: if the key does not exist in the metadata
ValueError: if the given datasets do not have the same metadata values defined by key
"""
if len(dataset_names) == 0:
return
logger = logging.getLogger(__name__)
entries_per_dataset = [getattr(MetadataCatalog.get(d), key) for d in dataset_names]
for idx, entry in enumerate(entries_per_dataset):
if entry != entries_per_dataset[0]:
logger.error(
"Metadata '{}' for dataset '{}' is '{}'".format(key, dataset_names[idx], str(entry))
)
logger.error(
"Metadata '{}' for dataset '{}' is '{}'".format(
key, dataset_names[0], str(entries_per_dataset[0])
)
)
raise ValueError("Datasets have different metadata '{}'!".format(key))
def build_augmentation(cfg, is_train):
"""
Create a list of default :class:`Augmentation` from config.
Now it includes resizing and flipping.
Returns:
list[Augmentation]
"""
if is_train:
min_size = cfg.INPUT.MIN_SIZE_TRAIN
max_size = cfg.INPUT.MAX_SIZE_TRAIN
sample_style = cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING
else:
min_size = cfg.INPUT.MIN_SIZE_TEST
max_size = cfg.INPUT.MAX_SIZE_TEST
sample_style = "choice"
augmentation = [T.ResizeShortestEdge(min_size, max_size, sample_style)]
if is_train and cfg.INPUT.RANDOM_FLIP != "none":
augmentation.append(
T.RandomFlip(
horizontal=cfg.INPUT.RANDOM_FLIP == "horizontal",
vertical=cfg.INPUT.RANDOM_FLIP == "vertical",
)
)
return augmentation
build_transform_gen = build_augmentation
"""
Alias for backward-compatibility.
"""
|
evocodebench_data_183
|
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
"""
Common data processing utilities that are used in a
typical object detection data pipeline.
"""
import logging
import numpy as np
from typing import List, Union
import pycocotools.mask as mask_util
import torch
from PIL import Image
from detectron2.structures import (
BitMasks,
Boxes,
BoxMode,
Instances,
Keypoints,
PolygonMasks,
RotatedBoxes,
polygons_to_bitmask,
)
from detectron2.utils.file_io import PathManager
from . import transforms as T
from .catalog import MetadataCatalog
__all__ = [
"SizeMismatchError",
"convert_image_to_rgb",
"check_image_size",
"transform_proposals",
"transform_instance_annotations",
"annotations_to_instances",
"annotations_to_instances_rotated",
"build_augmentation",
"build_transform_gen",
"create_keypoint_hflip_indices",
"filter_empty_instances",
"read_image",
]
class SizeMismatchError(ValueError):
"""
When loaded image has difference width/height compared with annotation.
"""
# https://en.wikipedia.org/wiki/YUV#SDTV_with_BT.601
_M_RGB2YUV = [[0.299, 0.587, 0.114], [-0.14713, -0.28886, 0.436], [0.615, -0.51499, -0.10001]]
_M_YUV2RGB = [[1.0, 0.0, 1.13983], [1.0, -0.39465, -0.58060], [1.0, 2.03211, 0.0]]
# https://www.exiv2.org/tags.html
_EXIF_ORIENT = 274 # exif 'Orientation' tag
def convert_PIL_to_numpy(image, format):
"""
Convert PIL image to numpy array of target format.
Args:
image (PIL.Image): a PIL image
format (str): the format of output image
Returns:
(np.ndarray): also see `read_image`
"""
if format is not None:
# PIL only supports RGB, so convert to RGB and flip channels over below
conversion_format = format
if format in ["BGR", "YUV-BT.601"]:
conversion_format = "RGB"
image = image.convert(conversion_format)
image = np.asarray(image)
# PIL squeezes out the channel dimension for "L", so make it HWC
if format == "L":
image = np.expand_dims(image, -1)
# handle formats not supported by PIL
elif format == "BGR":
# flip channels if needed
image = image[:, :, ::-1]
elif format == "YUV-BT.601":
image = image / 255.0
image = np.dot(image, np.array(_M_RGB2YUV).T)
return image
def convert_image_to_rgb(image, format):
"""
Convert an image from given format to RGB.
Args:
image (np.ndarray or Tensor): an HWC image
format (str): the format of input image, also see `read_image`
Returns:
(np.ndarray): (H,W,3) RGB image in 0-255 range, can be either float or uint8
"""
if isinstance(image, torch.Tensor):
image = image.cpu().numpy()
if format == "BGR":
image = image[:, :, [2, 1, 0]]
elif format == "YUV-BT.601":
image = np.dot(image, np.array(_M_YUV2RGB).T)
image = image * 255.0
else:
if format == "L":
image = image[:, :, 0]
image = image.astype(np.uint8)
image = np.asarray(Image.fromarray(image, mode=format).convert("RGB"))
return image
def _apply_exif_orientation(image):
"""
Applies the exif orientation correctly.
This code exists per the bug:
https://github.com/python-pillow/Pillow/issues/3973
with the function `ImageOps.exif_transpose`. The Pillow source raises errors with
various methods, especially `tobytes`
Function based on:
https://github.com/wkentaro/labelme/blob/v4.5.4/labelme/utils/image.py#L59
https://github.com/python-pillow/Pillow/blob/7.1.2/src/PIL/ImageOps.py#L527
Args:
image (PIL.Image): a PIL image
Returns:
(PIL.Image): the PIL image with exif orientation applied, if applicable
"""
if not hasattr(image, "getexif"):
return image
try:
exif = image.getexif()
except Exception: # https://github.com/facebookresearch/detectron2/issues/1885
exif = None
if exif is None:
return image
orientation = exif.get(_EXIF_ORIENT)
method = {
2: Image.FLIP_LEFT_RIGHT,
3: Image.ROTATE_180,
4: Image.FLIP_TOP_BOTTOM,
5: Image.TRANSPOSE,
6: Image.ROTATE_270,
7: Image.TRANSVERSE,
8: Image.ROTATE_90,
}.get(orientation)
if method is not None:
return image.transpose(method)
return image
def read_image(file_name, format=None):
"""
Read an image into the given format.
Will apply rotation and flipping if the image has such exif information.
Args:
file_name (str): image file path
format (str): one of the supported image modes in PIL, or "BGR" or "YUV-BT.601".
Returns:
image (np.ndarray):
an HWC image in the given format, which is 0-255, uint8 for
supported image modes in PIL or "BGR"; float (0-1 for Y) for YUV-BT.601.
"""
with PathManager.open(file_name, "rb") as f:
image = Image.open(f)
# work around this bug: https://github.com/python-pillow/Pillow/issues/3973
image = _apply_exif_orientation(image)
return convert_PIL_to_numpy(image, format)
def check_image_size(dataset_dict, image):
"""
Raise an error if the image does not match the size specified in the dict.
"""
if "width" in dataset_dict or "height" in dataset_dict:
image_wh = (image.shape[1], image.shape[0])
expected_wh = (dataset_dict["width"], dataset_dict["height"])
if not image_wh == expected_wh:
raise SizeMismatchError(
"Mismatched image shape{}, got {}, expect {}.".format(
" for image " + dataset_dict["file_name"]
if "file_name" in dataset_dict
else "",
image_wh,
expected_wh,
)
+ " Please check the width/height in your annotation."
)
# To ensure bbox always remap to original image size
if "width" not in dataset_dict:
dataset_dict["width"] = image.shape[1]
if "height" not in dataset_dict:
dataset_dict["height"] = image.shape[0]
def transform_proposals(dataset_dict, image_shape, transforms, *, proposal_topk, min_box_size=0):
"""
Apply transformations to the proposals in dataset_dict, if any.
Args:
dataset_dict (dict): a dict read from the dataset, possibly
contains fields "proposal_boxes", "proposal_objectness_logits", "proposal_bbox_mode"
image_shape (tuple): height, width
transforms (TransformList):
proposal_topk (int): only keep top-K scoring proposals
min_box_size (int): proposals with either side smaller than this
threshold are removed
The input dict is modified in-place, with abovementioned keys removed. A new
key "proposals" will be added. Its value is an `Instances`
object which contains the transformed proposals in its field
"proposal_boxes" and "objectness_logits".
"""
if "proposal_boxes" in dataset_dict:
# Transform proposal boxes
boxes = transforms.apply_box(
BoxMode.convert(
dataset_dict.pop("proposal_boxes"),
dataset_dict.pop("proposal_bbox_mode"),
BoxMode.XYXY_ABS,
)
)
boxes = Boxes(boxes)
objectness_logits = torch.as_tensor(
dataset_dict.pop("proposal_objectness_logits").astype("float32")
)
boxes.clip(image_shape)
keep = boxes.nonempty(threshold=min_box_size)
boxes = boxes[keep]
objectness_logits = objectness_logits[keep]
proposals = Instances(image_shape)
proposals.proposal_boxes = boxes[:proposal_topk]
proposals.objectness_logits = objectness_logits[:proposal_topk]
dataset_dict["proposals"] = proposals
def transform_instance_annotations(
annotation, transforms, image_size, *, keypoint_hflip_indices=None
):
"""
Apply transforms to box, segmentation and keypoints annotations of a single instance.
It will use `transforms.apply_box` for the box, and
`transforms.apply_coords` for segmentation polygons & keypoints.
If you need anything more specially designed for each data structure,
you'll need to implement your own version of this function or the transforms.
Args:
annotation (dict): dict of instance annotations for a single instance.
It will be modified in-place.
transforms (TransformList or list[Transform]):
image_size (tuple): the height, width of the transformed image
keypoint_hflip_indices (ndarray[int]): see `create_keypoint_hflip_indices`.
Returns:
dict:
the same input dict with fields "bbox", "segmentation", "keypoints"
transformed according to `transforms`.
The "bbox_mode" field will be set to XYXY_ABS.
"""
if isinstance(transforms, (tuple, list)):
transforms = T.TransformList(transforms)
# bbox is 1d (per-instance bounding box)
bbox = BoxMode.convert(annotation["bbox"], annotation["bbox_mode"], BoxMode.XYXY_ABS)
# clip transformed bbox to image size
bbox = transforms.apply_box(np.array([bbox]))[0].clip(min=0)
annotation["bbox"] = np.minimum(bbox, list(image_size + image_size)[::-1])
annotation["bbox_mode"] = BoxMode.XYXY_ABS
if "segmentation" in annotation:
# each instance contains 1 or more polygons
segm = annotation["segmentation"]
if isinstance(segm, list):
# polygons
polygons = [np.asarray(p).reshape(-1, 2) for p in segm]
annotation["segmentation"] = [
p.reshape(-1) for p in transforms.apply_polygons(polygons)
]
elif isinstance(segm, dict):
# RLE
mask = mask_util.decode(segm)
mask = transforms.apply_segmentation(mask)
assert tuple(mask.shape[:2]) == image_size
annotation["segmentation"] = mask
else:
raise ValueError(
"Cannot transform segmentation of type '{}'!"
"Supported types are: polygons as list[list[float] or ndarray],"
" COCO-style RLE as a dict.".format(type(segm))
)
if "keypoints" in annotation:
keypoints = transform_keypoint_annotations(
annotation["keypoints"], transforms, image_size, keypoint_hflip_indices
)
annotation["keypoints"] = keypoints
return annotation
def transform_keypoint_annotations(keypoints, transforms, image_size, keypoint_hflip_indices=None):
"""
Transform keypoint annotations of an image.
If a keypoint is transformed out of image boundary, it will be marked "unlabeled" (visibility=0)
Args:
keypoints (list[float]): Nx3 float in Detectron2's Dataset format.
Each point is represented by (x, y, visibility).
transforms (TransformList):
image_size (tuple): the height, width of the transformed image
keypoint_hflip_indices (ndarray[int]): see `create_keypoint_hflip_indices`.
When `transforms` includes horizontal flip, will use the index
mapping to flip keypoints.
"""
# (N*3,) -> (N, 3)
keypoints = np.asarray(keypoints, dtype="float64").reshape(-1, 3)
keypoints_xy = transforms.apply_coords(keypoints[:, :2])
# Set all out-of-boundary points to "unlabeled"
inside = (keypoints_xy >= np.array([0, 0])) & (keypoints_xy <= np.array(image_size[::-1]))
inside = inside.all(axis=1)
keypoints[:, :2] = keypoints_xy
keypoints[:, 2][~inside] = 0
# This assumes that HorizFlipTransform is the only one that does flip
do_hflip = sum(isinstance(t, T.HFlipTransform) for t in transforms.transforms) % 2 == 1
# Alternative way: check if probe points was horizontally flipped.
# probe = np.asarray([[0.0, 0.0], [image_width, 0.0]])
# probe_aug = transforms.apply_coords(probe.copy())
# do_hflip = np.sign(probe[1][0] - probe[0][0]) != np.sign(probe_aug[1][0] - probe_aug[0][0]) # noqa
# If flipped, swap each keypoint with its opposite-handed equivalent
if do_hflip:
if keypoint_hflip_indices is None:
raise ValueError("Cannot flip keypoints without providing flip indices!")
if len(keypoints) != len(keypoint_hflip_indices):
raise ValueError(
"Keypoint data has {} points, but metadata "
"contains {} points!".format(len(keypoints), len(keypoint_hflip_indices))
)
keypoints = keypoints[np.asarray(keypoint_hflip_indices, dtype=np.int32), :]
# Maintain COCO convention that if visibility == 0 (unlabeled), then x, y = 0
keypoints[keypoints[:, 2] == 0] = 0
return keypoints
def annotations_to_instances(annos, image_size, mask_format="polygon"):
"""
Create an :class:`Instances` object used by the models,
from instance annotations in the dataset dict.
Args:
annos (list[dict]): a list of instance annotations in one image, each
element for one instance.
image_size (tuple): height, width
Returns:
Instances:
It will contain fields "gt_boxes", "gt_classes",
"gt_masks", "gt_keypoints", if they can be obtained from `annos`.
This is the format that builtin models expect.
"""
boxes = (
np.stack(
[BoxMode.convert(obj["bbox"], obj["bbox_mode"], BoxMode.XYXY_ABS) for obj in annos]
)
if len(annos)
else np.zeros((0, 4))
)
target = Instances(image_size)
target.gt_boxes = Boxes(boxes)
classes = [int(obj["category_id"]) for obj in annos]
classes = torch.tensor(classes, dtype=torch.int64)
target.gt_classes = classes
if len(annos) and "segmentation" in annos[0]:
segms = [obj["segmentation"] for obj in annos]
if mask_format == "polygon":
try:
masks = PolygonMasks(segms)
except ValueError as e:
raise ValueError(
"Failed to use mask_format=='polygon' from the given annotations!"
) from e
else:
assert mask_format == "bitmask", mask_format
masks = []
for segm in segms:
if isinstance(segm, list):
# polygon
masks.append(polygons_to_bitmask(segm, *image_size))
elif isinstance(segm, dict):
# COCO RLE
masks.append(mask_util.decode(segm))
elif isinstance(segm, np.ndarray):
assert segm.ndim == 2, "Expect segmentation of 2 dimensions, got {}.".format(
segm.ndim
)
# mask array
masks.append(segm)
else:
raise ValueError(
"Cannot convert segmentation of type '{}' to BitMasks!"
"Supported types are: polygons as list[list[float] or ndarray],"
" COCO-style RLE as a dict, or a binary segmentation mask "
" in a 2D numpy array of shape HxW.".format(type(segm))
)
# torch.from_numpy does not support array with negative stride.
masks = BitMasks(
torch.stack([torch.from_numpy(np.ascontiguousarray(x)) for x in masks])
)
target.gt_masks = masks
if len(annos) and "keypoints" in annos[0]:
kpts = [obj.get("keypoints", []) for obj in annos]
target.gt_keypoints = Keypoints(kpts)
return target
def annotations_to_instances_rotated(annos, image_size):
"""
Create an :class:`Instances` object used by the models,
from instance annotations in the dataset dict.
Compared to `annotations_to_instances`, this function is for rotated boxes only
Args:
annos (list[dict]): a list of instance annotations in one image, each
element for one instance.
image_size (tuple): height, width
Returns:
Instances:
Containing fields "gt_boxes", "gt_classes",
if they can be obtained from `annos`.
This is the format that builtin models expect.
"""
boxes = [obj["bbox"] for obj in annos]
target = Instances(image_size)
boxes = target.gt_boxes = RotatedBoxes(boxes)
boxes.clip(image_size)
classes = [obj["category_id"] for obj in annos]
classes = torch.tensor(classes, dtype=torch.int64)
target.gt_classes = classes
return target
def filter_empty_instances(
instances, by_box=True, by_mask=True, box_threshold=1e-5, return_mask=False
):
"""
Filter out empty instances in an `Instances` object.
Args:
instances (Instances):
by_box (bool): whether to filter out instances with empty boxes
by_mask (bool): whether to filter out instances with empty masks
box_threshold (float): minimum width and height to be considered non-empty
return_mask (bool): whether to return boolean mask of filtered instances
Returns:
Instances: the filtered instances.
tensor[bool], optional: boolean mask of filtered instances
"""
assert by_box or by_mask
r = []
if by_box:
r.append(instances.gt_boxes.nonempty(threshold=box_threshold))
if instances.has("gt_masks") and by_mask:
r.append(instances.gt_masks.nonempty())
# TODO: can also filter visible keypoints
if not r:
return instances
m = r[0]
for x in r[1:]:
m = m & x
if return_mask:
return instances[m], m
return instances[m]
def create_keypoint_hflip_indices(dataset_names: Union[str, List[str]]) -> List[int]:
"""
Args:
dataset_names: list of dataset names
Returns:
list[int]: a list of size=#keypoints, storing the
horizontally-flipped keypoint indices.
"""
if isinstance(dataset_names, str):
dataset_names = [dataset_names]
check_metadata_consistency("keypoint_names", dataset_names)
check_metadata_consistency("keypoint_flip_map", dataset_names)
meta = MetadataCatalog.get(dataset_names[0])
names = meta.keypoint_names
# TODO flip -> hflip
flip_map = dict(meta.keypoint_flip_map)
flip_map.update({v: k for k, v in flip_map.items()})
flipped_names = [i if i not in flip_map else flip_map[i] for i in names]
flip_indices = [names.index(i) for i in flipped_names]
return flip_indices
def gen_crop_transform_with_instance(crop_size, image_size, instance):
"""
Generate a CropTransform so that the cropping region contains
the center of the given instance.
Args:
crop_size (tuple): h, w in pixels
image_size (tuple): h, w
instance (dict): an annotation dict of one instance, in Detectron2's
dataset format.
"""
crop_size = np.asarray(crop_size, dtype=np.int32)
bbox = BoxMode.convert(instance["bbox"], instance["bbox_mode"], BoxMode.XYXY_ABS)
center_yx = (bbox[1] + bbox[3]) * 0.5, (bbox[0] + bbox[2]) * 0.5
assert (
image_size[0] >= center_yx[0] and image_size[1] >= center_yx[1]
), "The annotation bounding box is outside of the image!"
assert (
image_size[0] >= crop_size[0] and image_size[1] >= crop_size[1]
), "Crop size is larger than image size!"
min_yx = np.maximum(np.floor(center_yx).astype(np.int32) - crop_size, 0)
max_yx = np.maximum(np.asarray(image_size, dtype=np.int32) - crop_size, 0)
max_yx = np.minimum(max_yx, np.ceil(center_yx).astype(np.int32))
y0 = np.random.randint(min_yx[0], max_yx[0] + 1)
x0 = np.random.randint(min_yx[1], max_yx[1] + 1)
return T.CropTransform(x0, y0, crop_size[1], crop_size[0])
def check_metadata_consistency(key, dataset_names):
"""
Check that the datasets have consistent metadata.
Args:
key (str): a metadata key
dataset_names (list[str]): a list of dataset names
Raises:
AttributeError: if the key does not exist in the metadata
ValueError: if the given datasets do not have the same metadata values defined by key
"""
if len(dataset_names) == 0:
return
logger = logging.getLogger(__name__)
entries_per_dataset = [getattr(MetadataCatalog.get(d), key) for d in dataset_names]
for idx, entry in enumerate(entries_per_dataset):
if entry != entries_per_dataset[0]:
logger.error(
"Metadata '{}' for dataset '{}' is '{}'".format(key, dataset_names[idx], str(entry))
)
logger.error(
"Metadata '{}' for dataset '{}' is '{}'".format(
key, dataset_names[0], str(entries_per_dataset[0])
)
)
raise ValueError("Datasets have different metadata '{}'!".format(key))
def build_augmentation(cfg, is_train):
"""
Create a list of default :class:`Augmentation` from config.
Now it includes resizing and flipping.
Returns:
list[Augmentation]
"""
if is_train:
min_size = cfg.INPUT.MIN_SIZE_TRAIN
max_size = cfg.INPUT.MAX_SIZE_TRAIN
sample_style = cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING
else:
min_size = cfg.INPUT.MIN_SIZE_TEST
max_size = cfg.INPUT.MAX_SIZE_TEST
sample_style = "choice"
augmentation = [T.ResizeShortestEdge(min_size, max_size, sample_style)]
if is_train and cfg.INPUT.RANDOM_FLIP != "none":
augmentation.append(
T.RandomFlip(
horizontal=cfg.INPUT.RANDOM_FLIP == "horizontal",
vertical=cfg.INPUT.RANDOM_FLIP == "vertical",
)
)
return augmentation
build_transform_gen = build_augmentation
"""
Alias for backward-compatibility.
"""
|
evocodebench_data_184
|
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
"""
See "Data Augmentation" tutorial for an overview of the system:
https://detectron2.readthedocs.io/tutorials/augmentation.html
"""
import numpy as np
import torch
import torch.nn.functional as F
from fvcore.transforms.transform import (
CropTransform,
HFlipTransform,
NoOpTransform,
Transform,
TransformList,
)
from PIL import Image
try:
import cv2 # noqa
except ImportError:
# OpenCV is an optional dependency at the moment
pass
__all__ = [
"ExtentTransform",
"ResizeTransform",
"RotationTransform",
"ColorTransform",
"PILColorTransform",
]
class ExtentTransform(Transform):
"""
Extracts a subregion from the source image and scales it to the output size.
The fill color is used to map pixels from the source rect that fall outside
the source image.
See: https://pillow.readthedocs.io/en/latest/PIL.html#PIL.ImageTransform.ExtentTransform
"""
def __init__(self, src_rect, output_size, interp=Image.LINEAR, fill=0):
"""
Args:
src_rect (x0, y0, x1, y1): src coordinates
output_size (h, w): dst image size
interp: PIL interpolation methods
fill: Fill color used when src_rect extends outside image
"""
super().__init__()
self._set_attributes(locals())
def apply_image(self, img, interp=None):
h, w = self.output_size
if len(img.shape) > 2 and img.shape[2] == 1:
pil_image = Image.fromarray(img[:, :, 0], mode="L")
else:
pil_image = Image.fromarray(img)
pil_image = pil_image.transform(
size=(w, h),
method=Image.EXTENT,
data=self.src_rect,
resample=interp if interp else self.interp,
fill=self.fill,
)
ret = np.asarray(pil_image)
if len(img.shape) > 2 and img.shape[2] == 1:
ret = np.expand_dims(ret, -1)
return ret
def apply_coords(self, coords):
# Transform image center from source coordinates into output coordinates
# and then map the new origin to the corner of the output image.
h, w = self.output_size
x0, y0, x1, y1 = self.src_rect
new_coords = coords.astype(np.float32)
new_coords[:, 0] -= 0.5 * (x0 + x1)
new_coords[:, 1] -= 0.5 * (y0 + y1)
new_coords[:, 0] *= w / (x1 - x0)
new_coords[:, 1] *= h / (y1 - y0)
new_coords[:, 0] += 0.5 * w
new_coords[:, 1] += 0.5 * h
return new_coords
def apply_segmentation(self, segmentation):
segmentation = self.apply_image(segmentation, interp=Image.NEAREST)
return segmentation
class ResizeTransform(Transform):
"""
Resize the image to a target size.
"""
def __init__(self, h, w, new_h, new_w, interp=None):
"""
Args:
h, w (int): original image size
new_h, new_w (int): new image size
interp: PIL interpolation methods, defaults to bilinear.
"""
# TODO decide on PIL vs opencv
super().__init__()
if interp is None:
interp = Image.BILINEAR
self._set_attributes(locals())
def apply_image(self, img, interp=None):
assert img.shape[:2] == (self.h, self.w)
assert len(img.shape) <= 4
interp_method = interp if interp is not None else self.interp
if img.dtype == np.uint8:
if len(img.shape) > 2 and img.shape[2] == 1:
pil_image = Image.fromarray(img[:, :, 0], mode="L")
else:
pil_image = Image.fromarray(img)
pil_image = pil_image.resize((self.new_w, self.new_h), interp_method)
ret = np.asarray(pil_image)
if len(img.shape) > 2 and img.shape[2] == 1:
ret = np.expand_dims(ret, -1)
else:
# PIL only supports uint8
if any(x < 0 for x in img.strides):
img = np.ascontiguousarray(img)
img = torch.from_numpy(img)
shape = list(img.shape)
shape_4d = shape[:2] + [1] * (4 - len(shape)) + shape[2:]
img = img.view(shape_4d).permute(2, 3, 0, 1) # hw(c) -> nchw
_PIL_RESIZE_TO_INTERPOLATE_MODE = {
Image.NEAREST: "nearest",
Image.BILINEAR: "bilinear",
Image.BICUBIC: "bicubic",
}
mode = _PIL_RESIZE_TO_INTERPOLATE_MODE[interp_method]
align_corners = None if mode == "nearest" else False
img = F.interpolate(
img, (self.new_h, self.new_w), mode=mode, align_corners=align_corners
)
shape[:2] = (self.new_h, self.new_w)
ret = img.permute(2, 3, 0, 1).view(shape).numpy() # nchw -> hw(c)
return ret
def apply_coords(self, coords):
coords[:, 0] = coords[:, 0] * (self.new_w * 1.0 / self.w)
coords[:, 1] = coords[:, 1] * (self.new_h * 1.0 / self.h)
return coords
def apply_segmentation(self, segmentation):
segmentation = self.apply_image(segmentation, interp=Image.NEAREST)
return segmentation
def inverse(self):
return ResizeTransform(self.new_h, self.new_w, self.h, self.w, self.interp)
class RotationTransform(Transform):
"""
This method returns a copy of this image, rotated the given
number of degrees counter clockwise around its center.
"""
def __init__(self, h, w, angle, expand=True, center=None, interp=None):
"""
Args:
h, w (int): original image size
angle (float): degrees for rotation
expand (bool): choose if the image should be resized to fit the whole
rotated image (default), or simply cropped
center (tuple (width, height)): coordinates of the rotation center
if left to None, the center will be fit to the center of each image
center has no effect if expand=True because it only affects shifting
interp: cv2 interpolation method, default cv2.INTER_LINEAR
"""
super().__init__()
image_center = np.array((w / 2, h / 2))
if center is None:
center = image_center
if interp is None:
interp = cv2.INTER_LINEAR
abs_cos, abs_sin = (abs(np.cos(np.deg2rad(angle))), abs(np.sin(np.deg2rad(angle))))
if expand:
# find the new width and height bounds
bound_w, bound_h = np.rint(
[h * abs_sin + w * abs_cos, h * abs_cos + w * abs_sin]
).astype(int)
else:
bound_w, bound_h = w, h
self._set_attributes(locals())
self.rm_coords = self.create_rotation_matrix()
# Needed because of this problem https://github.com/opencv/opencv/issues/11784
self.rm_image = self.create_rotation_matrix(offset=-0.5)
def apply_image(self, img, interp=None):
"""
img should be a numpy array, formatted as Height * Width * Nchannels
"""
if len(img) == 0 or self.angle % 360 == 0:
return img
assert img.shape[:2] == (self.h, self.w)
interp = interp if interp is not None else self.interp
return cv2.warpAffine(img, self.rm_image, (self.bound_w, self.bound_h), flags=interp)
def apply_coords(self, coords):
"""
coords should be a N * 2 array-like, containing N couples of (x, y) points
"""
coords = np.asarray(coords, dtype=float)
if len(coords) == 0 or self.angle % 360 == 0:
return coords
return cv2.transform(coords[:, np.newaxis, :], self.rm_coords)[:, 0, :]
def apply_segmentation(self, segmentation):
segmentation = self.apply_image(segmentation, interp=cv2.INTER_NEAREST)
return segmentation
def create_rotation_matrix(self, offset=0):
center = (self.center[0] + offset, self.center[1] + offset)
rm = cv2.getRotationMatrix2D(tuple(center), self.angle, 1)
if self.expand:
# Find the coordinates of the center of rotation in the new image
# The only point for which we know the future coordinates is the center of the image
rot_im_center = cv2.transform(self.image_center[None, None, :] + offset, rm)[0, 0, :]
new_center = np.array([self.bound_w / 2, self.bound_h / 2]) + offset - rot_im_center
# shift the rotation center to the new coordinates
rm[:, 2] += new_center
return rm
def inverse(self):
"""
The inverse is to rotate it back with expand, and crop to get the original shape.
"""
if not self.expand: # Not possible to inverse if a part of the image is lost
raise NotImplementedError()
rotation = RotationTransform(
self.bound_h, self.bound_w, -self.angle, True, None, self.interp
)
crop = CropTransform(
(rotation.bound_w - self.w) // 2, (rotation.bound_h - self.h) // 2, self.w, self.h
)
return TransformList([rotation, crop])
class ColorTransform(Transform):
"""
Generic wrapper for any photometric transforms.
These transformations should only affect the color space and
not the coordinate space of the image (e.g. annotation
coordinates such as bounding boxes should not be changed)
"""
def __init__(self, op):
"""
Args:
op (Callable): operation to be applied to the image,
which takes in an ndarray and returns an ndarray.
"""
if not callable(op):
raise ValueError("op parameter should be callable")
super().__init__()
self._set_attributes(locals())
def apply_image(self, img):
return self.op(img)
def apply_coords(self, coords):
return coords
def inverse(self):
return NoOpTransform()
def apply_segmentation(self, segmentation):
return segmentation
class PILColorTransform(ColorTransform):
"""
Generic wrapper for PIL Photometric image transforms,
which affect the color space and not the coordinate
space of the image
"""
def __init__(self, op):
"""
Args:
op (Callable): operation to be applied to the image,
which takes in a PIL Image and returns a transformed
PIL Image.
For reference on possible operations see:
- https://pillow.readthedocs.io/en/stable/
"""
if not callable(op):
raise ValueError("op parameter should be callable")
super().__init__(op)
def apply_image(self, img):
img = Image.fromarray(img)
return np.asarray(super().apply_image(img))
def HFlip_rotated_box(transform, rotated_boxes):
"""
Apply the horizontal flip transform on rotated boxes.
Args:
rotated_boxes (ndarray): Nx5 floating point array of
(x_center, y_center, width, height, angle_degrees) format
in absolute coordinates.
"""
# Transform x_center
rotated_boxes[:, 0] = transform.width - rotated_boxes[:, 0]
# Transform angle
rotated_boxes[:, 4] = -rotated_boxes[:, 4]
return rotated_boxes
def Resize_rotated_box(transform, rotated_boxes):
"""
Apply the resizing transform on rotated boxes. For details of how these (approximation)
formulas are derived, please refer to :meth:`RotatedBoxes.scale`.
Args:
rotated_boxes (ndarray): Nx5 floating point array of
(x_center, y_center, width, height, angle_degrees) format
in absolute coordinates.
"""
scale_factor_x = transform.new_w * 1.0 / transform.w
scale_factor_y = transform.new_h * 1.0 / transform.h
rotated_boxes[:, 0] *= scale_factor_x
rotated_boxes[:, 1] *= scale_factor_y
theta = rotated_boxes[:, 4] * np.pi / 180.0
c = np.cos(theta)
s = np.sin(theta)
rotated_boxes[:, 2] *= np.sqrt(np.square(scale_factor_x * c) + np.square(scale_factor_y * s))
rotated_boxes[:, 3] *= np.sqrt(np.square(scale_factor_x * s) + np.square(scale_factor_y * c))
rotated_boxes[:, 4] = np.arctan2(scale_factor_x * s, scale_factor_y * c) * 180 / np.pi
return rotated_boxes
HFlipTransform.register_type("rotated_box", HFlip_rotated_box)
ResizeTransform.register_type("rotated_box", Resize_rotated_box)
# not necessary any more with latest fvcore
NoOpTransform.register_type("rotated_box", lambda t, x: x)
|
evocodebench_data_185
|
# Copyright (c) Facebook, Inc. and its affiliates.
# -*- coding: utf-8 -*-
import typing
from typing import Any, List
import fvcore
from fvcore.nn import activation_count, flop_count, parameter_count, parameter_count_table
from torch import nn
from detectron2.export import TracingAdapter
__all__ = [
"activation_count_operators",
"flop_count_operators",
"parameter_count_table",
"parameter_count",
"FlopCountAnalysis",
]
FLOPS_MODE = "flops"
ACTIVATIONS_MODE = "activations"
# Some extra ops to ignore from counting, including elementwise and reduction ops
_IGNORED_OPS = {
"aten::add",
"aten::add_",
"aten::argmax",
"aten::argsort",
"aten::batch_norm",
"aten::constant_pad_nd",
"aten::div",
"aten::div_",
"aten::exp",
"aten::log2",
"aten::max_pool2d",
"aten::meshgrid",
"aten::mul",
"aten::mul_",
"aten::neg",
"aten::nonzero_numpy",
"aten::reciprocal",
"aten::repeat_interleave",
"aten::rsub",
"aten::sigmoid",
"aten::sigmoid_",
"aten::softmax",
"aten::sort",
"aten::sqrt",
"aten::sub",
"torchvision::nms", # TODO estimate flop for nms
}
class FlopCountAnalysis(fvcore.nn.FlopCountAnalysis):
"""
Same as :class:`fvcore.nn.FlopCountAnalysis`, but supports detectron2 models.
"""
def __init__(self, model, inputs):
"""
Args:
model (nn.Module):
inputs (Any): inputs of the given model. Does not have to be tuple of tensors.
"""
wrapper = TracingAdapter(model, inputs, allow_non_tensor=True)
super().__init__(wrapper, wrapper.flattened_inputs)
self.set_op_handle(**{k: None for k in _IGNORED_OPS})
def flop_count_operators(model: nn.Module, inputs: list) -> typing.DefaultDict[str, float]:
"""
Implement operator-level flops counting using jit.
This is a wrapper of :func:`fvcore.nn.flop_count` and adds supports for standard
detection models in detectron2.
Please use :class:`FlopCountAnalysis` for more advanced functionalities.
Note:
The function runs the input through the model to compute flops.
The flops of a detection model is often input-dependent, for example,
the flops of box & mask head depends on the number of proposals &
the number of detected objects.
Therefore, the flops counting using a single input may not accurately
reflect the computation cost of a model. It's recommended to average
across a number of inputs.
Args:
model: a detectron2 model that takes `list[dict]` as input.
inputs (list[dict]): inputs to model, in detectron2's standard format.
Only "image" key will be used.
supported_ops (dict[str, Handle]): see documentation of :func:`fvcore.nn.flop_count`
Returns:
Counter: Gflop count per operator
"""
old_train = model.training
model.eval()
ret = FlopCountAnalysis(model, inputs).by_operator()
model.train(old_train)
return {k: v / 1e9 for k, v in ret.items()}
def activation_count_operators(
model: nn.Module, inputs: list, **kwargs
) -> typing.DefaultDict[str, float]:
"""
Implement operator-level activations counting using jit.
This is a wrapper of fvcore.nn.activation_count, that supports standard detection models
in detectron2.
Note:
The function runs the input through the model to compute activations.
The activations of a detection model is often input-dependent, for example,
the activations of box & mask head depends on the number of proposals &
the number of detected objects.
Args:
model: a detectron2 model that takes `list[dict]` as input.
inputs (list[dict]): inputs to model, in detectron2's standard format.
Only "image" key will be used.
Returns:
Counter: activation count per operator
"""
return _wrapper_count_operators(model=model, inputs=inputs, mode=ACTIVATIONS_MODE, **kwargs)
def _wrapper_count_operators(
model: nn.Module, inputs: list, mode: str, **kwargs
) -> typing.DefaultDict[str, float]:
# ignore some ops
supported_ops = {k: lambda *args, **kwargs: {} for k in _IGNORED_OPS}
supported_ops.update(kwargs.pop("supported_ops", {}))
kwargs["supported_ops"] = supported_ops
assert len(inputs) == 1, "Please use batch size=1"
tensor_input = inputs[0]["image"]
inputs = [{"image": tensor_input}] # remove other keys, in case there are any
old_train = model.training
if isinstance(model, (nn.parallel.distributed.DistributedDataParallel, nn.DataParallel)):
model = model.module
wrapper = TracingAdapter(model, inputs)
wrapper.eval()
if mode == FLOPS_MODE:
ret = flop_count(wrapper, (tensor_input,), **kwargs)
elif mode == ACTIVATIONS_MODE:
ret = activation_count(wrapper, (tensor_input,), **kwargs)
else:
raise NotImplementedError("Count for mode {} is not supported yet.".format(mode))
# compatible with change in fvcore
if isinstance(ret, tuple):
ret = ret[0]
model.train(old_train)
return ret
def find_unused_parameters(model: nn.Module, inputs: Any) -> List[str]:
"""
Given a model, find parameters that do not contribute
to the loss.
Args:
model: a model in training mode that returns losses
inputs: argument or a tuple of arguments. Inputs of the model
Returns:
list[str]: the name of unused parameters
"""
assert model.training
for _, prm in model.named_parameters():
prm.grad = None
if isinstance(inputs, tuple):
losses = model(*inputs)
else:
losses = model(inputs)
if isinstance(losses, dict):
losses = sum(losses.values())
losses.backward()
unused: List[str] = []
for name, prm in model.named_parameters():
if prm.grad is None:
unused.append(name)
prm.grad = None
return unused
|
evocodebench_data_186
|
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
"""
See "Data Augmentation" tutorial for an overview of the system:
https://detectron2.readthedocs.io/tutorials/augmentation.html
"""
import numpy as np
import torch
import torch.nn.functional as F
from fvcore.transforms.transform import (
CropTransform,
HFlipTransform,
NoOpTransform,
Transform,
TransformList,
)
from PIL import Image
try:
import cv2 # noqa
except ImportError:
# OpenCV is an optional dependency at the moment
pass
__all__ = [
"ExtentTransform",
"ResizeTransform",
"RotationTransform",
"ColorTransform",
"PILColorTransform",
]
class ExtentTransform(Transform):
"""
Extracts a subregion from the source image and scales it to the output size.
The fill color is used to map pixels from the source rect that fall outside
the source image.
See: https://pillow.readthedocs.io/en/latest/PIL.html#PIL.ImageTransform.ExtentTransform
"""
def __init__(self, src_rect, output_size, interp=Image.LINEAR, fill=0):
"""
Args:
src_rect (x0, y0, x1, y1): src coordinates
output_size (h, w): dst image size
interp: PIL interpolation methods
fill: Fill color used when src_rect extends outside image
"""
super().__init__()
self._set_attributes(locals())
def apply_image(self, img, interp=None):
h, w = self.output_size
if len(img.shape) > 2 and img.shape[2] == 1:
pil_image = Image.fromarray(img[:, :, 0], mode="L")
else:
pil_image = Image.fromarray(img)
pil_image = pil_image.transform(
size=(w, h),
method=Image.EXTENT,
data=self.src_rect,
resample=interp if interp else self.interp,
fill=self.fill,
)
ret = np.asarray(pil_image)
if len(img.shape) > 2 and img.shape[2] == 1:
ret = np.expand_dims(ret, -1)
return ret
def apply_coords(self, coords):
# Transform image center from source coordinates into output coordinates
# and then map the new origin to the corner of the output image.
h, w = self.output_size
x0, y0, x1, y1 = self.src_rect
new_coords = coords.astype(np.float32)
new_coords[:, 0] -= 0.5 * (x0 + x1)
new_coords[:, 1] -= 0.5 * (y0 + y1)
new_coords[:, 0] *= w / (x1 - x0)
new_coords[:, 1] *= h / (y1 - y0)
new_coords[:, 0] += 0.5 * w
new_coords[:, 1] += 0.5 * h
return new_coords
def apply_segmentation(self, segmentation):
segmentation = self.apply_image(segmentation, interp=Image.NEAREST)
return segmentation
class ResizeTransform(Transform):
"""
Resize the image to a target size.
"""
def __init__(self, h, w, new_h, new_w, interp=None):
"""
Args:
h, w (int): original image size
new_h, new_w (int): new image size
interp: PIL interpolation methods, defaults to bilinear.
"""
# TODO decide on PIL vs opencv
super().__init__()
if interp is None:
interp = Image.BILINEAR
self._set_attributes(locals())
def apply_image(self, img, interp=None):
assert img.shape[:2] == (self.h, self.w)
assert len(img.shape) <= 4
interp_method = interp if interp is not None else self.interp
if img.dtype == np.uint8:
if len(img.shape) > 2 and img.shape[2] == 1:
pil_image = Image.fromarray(img[:, :, 0], mode="L")
else:
pil_image = Image.fromarray(img)
pil_image = pil_image.resize((self.new_w, self.new_h), interp_method)
ret = np.asarray(pil_image)
if len(img.shape) > 2 and img.shape[2] == 1:
ret = np.expand_dims(ret, -1)
else:
# PIL only supports uint8
if any(x < 0 for x in img.strides):
img = np.ascontiguousarray(img)
img = torch.from_numpy(img)
shape = list(img.shape)
shape_4d = shape[:2] + [1] * (4 - len(shape)) + shape[2:]
img = img.view(shape_4d).permute(2, 3, 0, 1) # hw(c) -> nchw
_PIL_RESIZE_TO_INTERPOLATE_MODE = {
Image.NEAREST: "nearest",
Image.BILINEAR: "bilinear",
Image.BICUBIC: "bicubic",
}
mode = _PIL_RESIZE_TO_INTERPOLATE_MODE[interp_method]
align_corners = None if mode == "nearest" else False
img = F.interpolate(
img, (self.new_h, self.new_w), mode=mode, align_corners=align_corners
)
shape[:2] = (self.new_h, self.new_w)
ret = img.permute(2, 3, 0, 1).view(shape).numpy() # nchw -> hw(c)
return ret
def apply_coords(self, coords):
coords[:, 0] = coords[:, 0] * (self.new_w * 1.0 / self.w)
coords[:, 1] = coords[:, 1] * (self.new_h * 1.0 / self.h)
return coords
def apply_segmentation(self, segmentation):
segmentation = self.apply_image(segmentation, interp=Image.NEAREST)
return segmentation
def inverse(self):
return ResizeTransform(self.new_h, self.new_w, self.h, self.w, self.interp)
class RotationTransform(Transform):
"""
This method returns a copy of this image, rotated the given
number of degrees counter clockwise around its center.
"""
def __init__(self, h, w, angle, expand=True, center=None, interp=None):
"""
Args:
h, w (int): original image size
angle (float): degrees for rotation
expand (bool): choose if the image should be resized to fit the whole
rotated image (default), or simply cropped
center (tuple (width, height)): coordinates of the rotation center
if left to None, the center will be fit to the center of each image
center has no effect if expand=True because it only affects shifting
interp: cv2 interpolation method, default cv2.INTER_LINEAR
"""
super().__init__()
image_center = np.array((w / 2, h / 2))
if center is None:
center = image_center
if interp is None:
interp = cv2.INTER_LINEAR
abs_cos, abs_sin = (abs(np.cos(np.deg2rad(angle))), abs(np.sin(np.deg2rad(angle))))
if expand:
# find the new width and height bounds
bound_w, bound_h = np.rint(
[h * abs_sin + w * abs_cos, h * abs_cos + w * abs_sin]
).astype(int)
else:
bound_w, bound_h = w, h
self._set_attributes(locals())
self.rm_coords = self.create_rotation_matrix()
# Needed because of this problem https://github.com/opencv/opencv/issues/11784
self.rm_image = self.create_rotation_matrix(offset=-0.5)
def apply_image(self, img, interp=None):
"""
img should be a numpy array, formatted as Height * Width * Nchannels
"""
if len(img) == 0 or self.angle % 360 == 0:
return img
assert img.shape[:2] == (self.h, self.w)
interp = interp if interp is not None else self.interp
return cv2.warpAffine(img, self.rm_image, (self.bound_w, self.bound_h), flags=interp)
def apply_coords(self, coords):
"""
coords should be a N * 2 array-like, containing N couples of (x, y) points
"""
coords = np.asarray(coords, dtype=float)
if len(coords) == 0 or self.angle % 360 == 0:
return coords
return cv2.transform(coords[:, np.newaxis, :], self.rm_coords)[:, 0, :]
def apply_segmentation(self, segmentation):
segmentation = self.apply_image(segmentation, interp=cv2.INTER_NEAREST)
return segmentation
def create_rotation_matrix(self, offset=0):
center = (self.center[0] + offset, self.center[1] + offset)
rm = cv2.getRotationMatrix2D(tuple(center), self.angle, 1)
if self.expand:
# Find the coordinates of the center of rotation in the new image
# The only point for which we know the future coordinates is the center of the image
rot_im_center = cv2.transform(self.image_center[None, None, :] + offset, rm)[0, 0, :]
new_center = np.array([self.bound_w / 2, self.bound_h / 2]) + offset - rot_im_center
# shift the rotation center to the new coordinates
rm[:, 2] += new_center
return rm
def inverse(self):
"""
The inverse is to rotate it back with expand, and crop to get the original shape.
"""
if not self.expand: # Not possible to inverse if a part of the image is lost
raise NotImplementedError()
rotation = RotationTransform(
self.bound_h, self.bound_w, -self.angle, True, None, self.interp
)
crop = CropTransform(
(rotation.bound_w - self.w) // 2, (rotation.bound_h - self.h) // 2, self.w, self.h
)
return TransformList([rotation, crop])
class ColorTransform(Transform):
"""
Generic wrapper for any photometric transforms.
These transformations should only affect the color space and
not the coordinate space of the image (e.g. annotation
coordinates such as bounding boxes should not be changed)
"""
def __init__(self, op):
"""
Args:
op (Callable): operation to be applied to the image,
which takes in an ndarray and returns an ndarray.
"""
if not callable(op):
raise ValueError("op parameter should be callable")
super().__init__()
self._set_attributes(locals())
def apply_image(self, img):
return self.op(img)
def apply_coords(self, coords):
return coords
def inverse(self):
return NoOpTransform()
def apply_segmentation(self, segmentation):
return segmentation
class PILColorTransform(ColorTransform):
"""
Generic wrapper for PIL Photometric image transforms,
which affect the color space and not the coordinate
space of the image
"""
def __init__(self, op):
"""
Args:
op (Callable): operation to be applied to the image,
which takes in a PIL Image and returns a transformed
PIL Image.
For reference on possible operations see:
- https://pillow.readthedocs.io/en/stable/
"""
if not callable(op):
raise ValueError("op parameter should be callable")
super().__init__(op)
def apply_image(self, img):
img = Image.fromarray(img)
return np.asarray(super().apply_image(img))
def HFlip_rotated_box(transform, rotated_boxes):
"""
Apply the horizontal flip transform on rotated boxes.
Args:
rotated_boxes (ndarray): Nx5 floating point array of
(x_center, y_center, width, height, angle_degrees) format
in absolute coordinates.
"""
# Transform x_center
rotated_boxes[:, 0] = transform.width - rotated_boxes[:, 0]
# Transform angle
rotated_boxes[:, 4] = -rotated_boxes[:, 4]
return rotated_boxes
def Resize_rotated_box(transform, rotated_boxes):
"""
Apply the resizing transform on rotated boxes. For details of how these (approximation)
formulas are derived, please refer to :meth:`RotatedBoxes.scale`.
Args:
rotated_boxes (ndarray): Nx5 floating point array of
(x_center, y_center, width, height, angle_degrees) format
in absolute coordinates.
"""
scale_factor_x = transform.new_w * 1.0 / transform.w
scale_factor_y = transform.new_h * 1.0 / transform.h
rotated_boxes[:, 0] *= scale_factor_x
rotated_boxes[:, 1] *= scale_factor_y
theta = rotated_boxes[:, 4] * np.pi / 180.0
c = np.cos(theta)
s = np.sin(theta)
rotated_boxes[:, 2] *= np.sqrt(np.square(scale_factor_x * c) + np.square(scale_factor_y * s))
rotated_boxes[:, 3] *= np.sqrt(np.square(scale_factor_x * s) + np.square(scale_factor_y * c))
rotated_boxes[:, 4] = np.arctan2(scale_factor_x * s, scale_factor_y * c) * 180 / np.pi
return rotated_boxes
HFlipTransform.register_type("rotated_box", HFlip_rotated_box)
ResizeTransform.register_type("rotated_box", Resize_rotated_box)
# not necessary any more with latest fvcore
NoOpTransform.register_type("rotated_box", lambda t, x: x)
|
evocodebench_data_187
|
# Copyright (c) Facebook, Inc. and its affiliates.
import colorsys
import logging
import math
from enum import Enum, unique
import cv2
import matplotlib as mpl
import matplotlib.colors as mplc
import matplotlib.figure as mplfigure
import numpy as np
import pycocotools.mask as mask_util
import torch
from detectron2.data import MetadataCatalog
from detectron2.structures import (
BitMasks,
Boxes,
BoxMode,
Keypoints,
PolygonMasks,
RotatedBoxes,
)
from detectron2.utils.file_io import PathManager
from matplotlib.backends.backend_agg import FigureCanvasAgg
from PIL import Image
from .colormap import random_color
logger = logging.getLogger(__name__)
__all__ = ["ColorMode", "VisImage", "Visualizer"]
_SMALL_OBJECT_AREA_THRESH = 1000
_LARGE_MASK_AREA_THRESH = 120000
_OFF_WHITE = (1.0, 1.0, 240.0 / 255)
_BLACK = (0, 0, 0)
_RED = (1.0, 0, 0)
_KEYPOINT_THRESHOLD = 0.05
@unique
class ColorMode(Enum):
"""
Enum of different color modes to use for instance visualizations.
"""
IMAGE = 0
"""
Picks a random color for every instance and overlay segmentations with low opacity.
"""
SEGMENTATION = 1
"""
Let instances of the same category have similar colors
(from metadata.thing_colors), and overlay them with
high opacity. This provides more attention on the quality of segmentation.
"""
IMAGE_BW = 2
"""
Same as IMAGE, but convert all areas without masks to gray-scale.
Only available for drawing per-instance mask predictions.
"""
class GenericMask:
"""
Attribute:
polygons (list[ndarray]): list[ndarray]: polygons for this mask.
Each ndarray has format [x, y, x, y, ...]
mask (ndarray): a binary mask
"""
def __init__(self, mask_or_polygons, height, width):
self._mask = self._polygons = self._has_holes = None
self.height = height
self.width = width
m = mask_or_polygons
if isinstance(m, dict):
# RLEs
assert "counts" in m and "size" in m
if isinstance(m["counts"], list): # uncompressed RLEs
h, w = m["size"]
assert h == height and w == width
m = mask_util.frPyObjects(m, h, w)
self._mask = mask_util.decode(m)[:, :]
return
if isinstance(m, list): # list[ndarray]
self._polygons = [np.asarray(x).reshape(-1) for x in m]
return
if isinstance(m, np.ndarray): # assumed to be a binary mask
assert m.shape[1] != 2, m.shape
assert m.shape == (
height,
width,
), f"mask shape: {m.shape}, target dims: {height}, {width}"
self._mask = m.astype("uint8")
return
raise ValueError("GenericMask cannot handle object {} of type '{}'".format(m, type(m)))
@property
def mask(self):
if self._mask is None:
self._mask = self.polygons_to_mask(self._polygons)
return self._mask
@property
def polygons(self):
if self._polygons is None:
self._polygons, self._has_holes = self.mask_to_polygons(self._mask)
return self._polygons
@property
def has_holes(self):
if self._has_holes is None:
if self._mask is not None:
self._polygons, self._has_holes = self.mask_to_polygons(self._mask)
else:
self._has_holes = False # if original format is polygon, does not have holes
return self._has_holes
def mask_to_polygons(self, mask):
# cv2.RETR_CCOMP flag retrieves all the contours and arranges them to a 2-level
# hierarchy. External contours (boundary) of the object are placed in hierarchy-1.
# Internal contours (holes) are placed in hierarchy-2.
# cv2.CHAIN_APPROX_NONE flag gets vertices of polygons from contours.
mask = np.ascontiguousarray(mask) # some versions of cv2 does not support incontiguous arr
res = cv2.findContours(mask.astype("uint8"), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)
hierarchy = res[-1]
if hierarchy is None: # empty mask
return [], False
has_holes = (hierarchy.reshape(-1, 4)[:, 3] >= 0).sum() > 0
res = res[-2]
res = [x.flatten() for x in res]
# These coordinates from OpenCV are integers in range [0, W-1 or H-1].
# We add 0.5 to turn them into real-value coordinate space. A better solution
# would be to first +0.5 and then dilate the returned polygon by 0.5.
res = [x + 0.5 for x in res if len(x) >= 6]
return res, has_holes
def polygons_to_mask(self, polygons):
rle = mask_util.frPyObjects(polygons, self.height, self.width)
rle = mask_util.merge(rle)
return mask_util.decode(rle)[:, :]
def area(self):
return self.mask.sum()
def bbox(self):
p = mask_util.frPyObjects(self.polygons, self.height, self.width)
p = mask_util.merge(p)
bbox = mask_util.toBbox(p)
bbox[2] += bbox[0]
bbox[3] += bbox[1]
return bbox
class _PanopticPrediction:
"""
Unify different panoptic annotation/prediction formats
"""
def __init__(self, panoptic_seg, segments_info, metadata=None):
if segments_info is None:
assert metadata is not None
# If "segments_info" is None, we assume "panoptic_img" is a
# H*W int32 image storing the panoptic_id in the format of
# category_id * label_divisor + instance_id. We reserve -1 for
# VOID label.
label_divisor = metadata.label_divisor
segments_info = []
for panoptic_label in np.unique(panoptic_seg.numpy()):
if panoptic_label == -1:
# VOID region.
continue
pred_class = panoptic_label // label_divisor
isthing = pred_class in metadata.thing_dataset_id_to_contiguous_id.values()
segments_info.append(
{
"id": int(panoptic_label),
"category_id": int(pred_class),
"isthing": bool(isthing),
}
)
del metadata
self._seg = panoptic_seg
self._sinfo = {s["id"]: s for s in segments_info} # seg id -> seg info
segment_ids, areas = torch.unique(panoptic_seg, sorted=True, return_counts=True)
areas = areas.numpy()
sorted_idxs = np.argsort(-areas)
self._seg_ids, self._seg_areas = segment_ids[sorted_idxs], areas[sorted_idxs]
self._seg_ids = self._seg_ids.tolist()
for sid, area in zip(self._seg_ids, self._seg_areas):
if sid in self._sinfo:
self._sinfo[sid]["area"] = float(area)
def non_empty_mask(self):
"""
Returns:
(H, W) array, a mask for all pixels that have a prediction
"""
empty_ids = []
for id in self._seg_ids:
if id not in self._sinfo:
empty_ids.append(id)
if len(empty_ids) == 0:
return np.zeros(self._seg.shape, dtype=np.uint8)
assert (
len(empty_ids) == 1
), ">1 ids corresponds to no labels. This is currently not supported"
return (self._seg != empty_ids[0]).numpy().astype(np.bool)
def semantic_masks(self):
for sid in self._seg_ids:
sinfo = self._sinfo.get(sid)
if sinfo is None or sinfo["isthing"]:
# Some pixels (e.g. id 0 in PanopticFPN) have no instance or semantic predictions.
continue
yield (self._seg == sid).numpy().astype(np.bool), sinfo
def instance_masks(self):
for sid in self._seg_ids:
sinfo = self._sinfo.get(sid)
if sinfo is None or not sinfo["isthing"]:
continue
mask = (self._seg == sid).numpy().astype(np.bool)
if mask.sum() > 0:
yield mask, sinfo
def _create_text_labels(classes, scores, class_names, is_crowd=None):
"""
Args:
classes (list[int] or None):
scores (list[float] or None):
class_names (list[str] or None):
is_crowd (list[bool] or None):
Returns:
list[str] or None
"""
labels = None
if classes is not None:
if class_names is not None and len(class_names) > 0:
labels = [class_names[i] for i in classes]
else:
labels = [str(i) for i in classes]
if scores is not None:
if labels is None:
labels = ["{:.0f}%".format(s * 100) for s in scores]
else:
labels = ["{} {:.0f}%".format(l, s * 100) for l, s in zip(labels, scores)]
if labels is not None and is_crowd is not None:
labels = [l + ("|crowd" if crowd else "") for l, crowd in zip(labels, is_crowd)]
return labels
class VisImage:
def __init__(self, img, scale=1.0):
"""
Args:
img (ndarray): an RGB image of shape (H, W, 3) in range [0, 255].
scale (float): scale the input image
"""
self.img = img
self.scale = scale
self.width, self.height = img.shape[1], img.shape[0]
self._setup_figure(img)
def _setup_figure(self, img):
"""
Args:
Same as in :meth:`__init__()`.
Returns:
fig (matplotlib.pyplot.figure): top level container for all the image plot elements.
ax (matplotlib.pyplot.Axes): contains figure elements and sets the coordinate system.
"""
fig = mplfigure.Figure(frameon=False)
self.dpi = fig.get_dpi()
# add a small 1e-2 to avoid precision lost due to matplotlib's truncation
# (https://github.com/matplotlib/matplotlib/issues/15363)
fig.set_size_inches(
(self.width * self.scale + 1e-2) / self.dpi,
(self.height * self.scale + 1e-2) / self.dpi,
)
self.canvas = FigureCanvasAgg(fig)
# self.canvas = mpl.backends.backend_cairo.FigureCanvasCairo(fig)
ax = fig.add_axes([0.0, 0.0, 1.0, 1.0])
ax.axis("off")
self.fig = fig
self.ax = ax
self.reset_image(img)
def reset_image(self, img):
"""
Args:
img: same as in __init__
"""
img = img.astype("uint8")
self.ax.imshow(img, extent=(0, self.width, self.height, 0), interpolation="nearest")
def save(self, filepath):
"""
Args:
filepath (str): a string that contains the absolute path, including the file name, where
the visualized image will be saved.
"""
self.fig.savefig(filepath)
def get_image(self):
"""
Returns:
ndarray:
the visualized image of shape (H, W, 3) (RGB) in uint8 type.
The shape is scaled w.r.t the input image using the given `scale` argument.
"""
canvas = self.canvas
s, (width, height) = canvas.print_to_buffer()
# buf = io.BytesIO() # works for cairo backend
# canvas.print_rgba(buf)
# width, height = self.width, self.height
# s = buf.getvalue()
buffer = np.frombuffer(s, dtype="uint8")
img_rgba = buffer.reshape(height, width, 4)
rgb, alpha = np.split(img_rgba, [3], axis=2)
return rgb.astype("uint8")
class Visualizer:
"""
Visualizer that draws data about detection/segmentation on images.
It contains methods like `draw_{text,box,circle,line,binary_mask,polygon}`
that draw primitive objects to images, as well as high-level wrappers like
`draw_{instance_predictions,sem_seg,panoptic_seg_predictions,dataset_dict}`
that draw composite data in some pre-defined style.
Note that the exact visualization style for the high-level wrappers are subject to change.
Style such as color, opacity, label contents, visibility of labels, or even the visibility
of objects themselves (e.g. when the object is too small) may change according
to different heuristics, as long as the results still look visually reasonable.
To obtain a consistent style, you can implement custom drawing functions with the
abovementioned primitive methods instead. If you need more customized visualization
styles, you can process the data yourself following their format documented in
tutorials (:doc:`/tutorials/models`, :doc:`/tutorials/datasets`). This class does not
intend to satisfy everyone's preference on drawing styles.
This visualizer focuses on high rendering quality rather than performance. It is not
designed to be used for real-time applications.
"""
# TODO implement a fast, rasterized version using OpenCV
def __init__(self, img_rgb, metadata=None, scale=1.0, instance_mode=ColorMode.IMAGE):
"""
Args:
img_rgb: a numpy array of shape (H, W, C), where H and W correspond to
the height and width of the image respectively. C is the number of
color channels. The image is required to be in RGB format since that
is a requirement of the Matplotlib library. The image is also expected
to be in the range [0, 255].
metadata (Metadata): dataset metadata (e.g. class names and colors)
instance_mode (ColorMode): defines one of the pre-defined style for drawing
instances on an image.
"""
self.img = np.asarray(img_rgb).clip(0, 255).astype(np.uint8)
if metadata is None:
metadata = MetadataCatalog.get("__nonexist__")
self.metadata = metadata
self.output = VisImage(self.img, scale=scale)
self.cpu_device = torch.device("cpu")
# too small texts are useless, therefore clamp to 9
self._default_font_size = max(
np.sqrt(self.output.height * self.output.width) // 90, 10 // scale
)
self._instance_mode = instance_mode
self.keypoint_threshold = _KEYPOINT_THRESHOLD
def draw_instance_predictions(self, predictions):
"""
Draw instance-level prediction results on an image.
Args:
predictions (Instances): the output of an instance detection/segmentation
model. Following fields will be used to draw:
"pred_boxes", "pred_classes", "scores", "pred_masks" (or "pred_masks_rle").
Returns:
output (VisImage): image object with visualizations.
"""
boxes = predictions.pred_boxes if predictions.has("pred_boxes") else None
scores = predictions.scores if predictions.has("scores") else None
classes = predictions.pred_classes.tolist() if predictions.has("pred_classes") else None
labels = _create_text_labels(classes, scores, self.metadata.get("thing_classes", None))
keypoints = predictions.pred_keypoints if predictions.has("pred_keypoints") else None
if predictions.has("pred_masks"):
masks = np.asarray(predictions.pred_masks)
masks = [GenericMask(x, self.output.height, self.output.width) for x in masks]
else:
masks = None
if self._instance_mode == ColorMode.SEGMENTATION and self.metadata.get("thing_colors"):
colors = [
self._jitter([x / 255 for x in self.metadata.thing_colors[c]]) for c in classes
]
alpha = 0.8
else:
colors = None
alpha = 0.5
if self._instance_mode == ColorMode.IMAGE_BW:
self.output.reset_image(
self._create_grayscale_image(
(predictions.pred_masks.any(dim=0) > 0).numpy()
if predictions.has("pred_masks")
else None
)
)
alpha = 0.3
self.overlay_instances(
masks=masks,
boxes=boxes,
labels=labels,
keypoints=keypoints,
assigned_colors=colors,
alpha=alpha,
)
return self.output
def draw_sem_seg(self, sem_seg, area_threshold=None, alpha=0.8):
"""
Draw semantic segmentation predictions/labels.
Args:
sem_seg (Tensor or ndarray): the segmentation of shape (H, W).
Each value is the integer label of the pixel.
area_threshold (int): segments with less than `area_threshold` are not drawn.
alpha (float): the larger it is, the more opaque the segmentations are.
Returns:
output (VisImage): image object with visualizations.
"""
if isinstance(sem_seg, torch.Tensor):
sem_seg = sem_seg.numpy()
labels, areas = np.unique(sem_seg, return_counts=True)
sorted_idxs = np.argsort(-areas).tolist()
labels = labels[sorted_idxs]
for label in filter(lambda l: l < len(self.metadata.stuff_classes), labels):
try:
mask_color = [x / 255 for x in self.metadata.stuff_colors[label]]
except (AttributeError, IndexError):
mask_color = None
binary_mask = (sem_seg == label).astype(np.uint8)
text = self.metadata.stuff_classes[label]
self.draw_binary_mask(
binary_mask,
color=mask_color,
edge_color=_OFF_WHITE,
text=text,
alpha=alpha,
area_threshold=area_threshold,
)
return self.output
def draw_panoptic_seg(self, panoptic_seg, segments_info, area_threshold=None, alpha=0.7):
"""
Draw panoptic prediction annotations or results.
Args:
panoptic_seg (Tensor): of shape (height, width) where the values are ids for each
segment.
segments_info (list[dict] or None): Describe each segment in `panoptic_seg`.
If it is a ``list[dict]``, each dict contains keys "id", "category_id".
If None, category id of each pixel is computed by
``pixel // metadata.label_divisor``.
area_threshold (int): stuff segments with less than `area_threshold` are not drawn.
Returns:
output (VisImage): image object with visualizations.
"""
pred = _PanopticPrediction(panoptic_seg, segments_info, self.metadata)
if self._instance_mode == ColorMode.IMAGE_BW:
self.output.reset_image(self._create_grayscale_image(pred.non_empty_mask()))
# draw mask for all semantic segments first i.e. "stuff"
for mask, sinfo in pred.semantic_masks():
category_idx = sinfo["category_id"]
try:
mask_color = [x / 255 for x in self.metadata.stuff_colors[category_idx]]
except AttributeError:
mask_color = None
text = self.metadata.stuff_classes[category_idx]
self.draw_binary_mask(
mask,
color=mask_color,
edge_color=_OFF_WHITE,
text=text,
alpha=alpha,
area_threshold=area_threshold,
)
# draw mask for all instances second
all_instances = list(pred.instance_masks())
if len(all_instances) == 0:
return self.output
masks, sinfo = list(zip(*all_instances))
category_ids = [x["category_id"] for x in sinfo]
try:
scores = [x["score"] for x in sinfo]
except KeyError:
scores = None
labels = _create_text_labels(
category_ids, scores, self.metadata.thing_classes, [x.get("iscrowd", 0) for x in sinfo]
)
try:
colors = [
self._jitter([x / 255 for x in self.metadata.thing_colors[c]]) for c in category_ids
]
except AttributeError:
colors = None
self.overlay_instances(masks=masks, labels=labels, assigned_colors=colors, alpha=alpha)
return self.output
draw_panoptic_seg_predictions = draw_panoptic_seg # backward compatibility
def draw_dataset_dict(self, dic):
"""
Draw annotations/segmentaions in Detectron2 Dataset format.
Args:
dic (dict): annotation/segmentation data of one image, in Detectron2 Dataset format.
Returns:
output (VisImage): image object with visualizations.
"""
annos = dic.get("annotations", None)
if annos:
if "segmentation" in annos[0]:
masks = [x["segmentation"] for x in annos]
else:
masks = None
if "keypoints" in annos[0]:
keypts = [x["keypoints"] for x in annos]
keypts = np.array(keypts).reshape(len(annos), -1, 3)
else:
keypts = None
boxes = [
BoxMode.convert(x["bbox"], x["bbox_mode"], BoxMode.XYXY_ABS)
if len(x["bbox"]) == 4
else x["bbox"]
for x in annos
]
colors = None
category_ids = [x["category_id"] for x in annos]
if self._instance_mode == ColorMode.SEGMENTATION and self.metadata.get("thing_colors"):
colors = [
self._jitter([x / 255 for x in self.metadata.thing_colors[c]])
for c in category_ids
]
names = self.metadata.get("thing_classes", None)
labels = _create_text_labels(
category_ids,
scores=None,
class_names=names,
is_crowd=[x.get("iscrowd", 0) for x in annos],
)
self.overlay_instances(
labels=labels, boxes=boxes, masks=masks, keypoints=keypts, assigned_colors=colors
)
sem_seg = dic.get("sem_seg", None)
if sem_seg is None and "sem_seg_file_name" in dic:
with PathManager.open(dic["sem_seg_file_name"], "rb") as f:
sem_seg = Image.open(f)
sem_seg = np.asarray(sem_seg, dtype="uint8")
if sem_seg is not None:
self.draw_sem_seg(sem_seg, area_threshold=0, alpha=0.5)
pan_seg = dic.get("pan_seg", None)
if pan_seg is None and "pan_seg_file_name" in dic:
with PathManager.open(dic["pan_seg_file_name"], "rb") as f:
pan_seg = Image.open(f)
pan_seg = np.asarray(pan_seg)
from panopticapi.utils import rgb2id
pan_seg = rgb2id(pan_seg)
if pan_seg is not None:
segments_info = dic["segments_info"]
pan_seg = torch.tensor(pan_seg)
self.draw_panoptic_seg(pan_seg, segments_info, area_threshold=0, alpha=0.5)
return self.output
def overlay_instances(
self,
*,
boxes=None,
labels=None,
masks=None,
keypoints=None,
assigned_colors=None,
alpha=0.5,
):
"""
Args:
boxes (Boxes, RotatedBoxes or ndarray): either a :class:`Boxes`,
or an Nx4 numpy array of XYXY_ABS format for the N objects in a single image,
or a :class:`RotatedBoxes`,
or an Nx5 numpy array of (x_center, y_center, width, height, angle_degrees) format
for the N objects in a single image,
labels (list[str]): the text to be displayed for each instance.
masks (masks-like object): Supported types are:
* :class:`detectron2.structures.PolygonMasks`,
:class:`detectron2.structures.BitMasks`.
* list[list[ndarray]]: contains the segmentation masks for all objects in one image.
The first level of the list corresponds to individual instances. The second
level to all the polygon that compose the instance, and the third level
to the polygon coordinates. The third level should have the format of
[x0, y0, x1, y1, ..., xn, yn] (n >= 3).
* list[ndarray]: each ndarray is a binary mask of shape (H, W).
* list[dict]: each dict is a COCO-style RLE.
keypoints (Keypoint or array like): an array-like object of shape (N, K, 3),
where the N is the number of instances and K is the number of keypoints.
The last dimension corresponds to (x, y, visibility or score).
assigned_colors (list[matplotlib.colors]): a list of colors, where each color
corresponds to each mask or box in the image. Refer to 'matplotlib.colors'
for full list of formats that the colors are accepted in.
Returns:
output (VisImage): image object with visualizations.
"""
num_instances = 0
if boxes is not None:
boxes = self._convert_boxes(boxes)
num_instances = len(boxes)
if masks is not None:
masks = self._convert_masks(masks)
if num_instances:
assert len(masks) == num_instances
else:
num_instances = len(masks)
if keypoints is not None:
if num_instances:
assert len(keypoints) == num_instances
else:
num_instances = len(keypoints)
keypoints = self._convert_keypoints(keypoints)
if labels is not None:
assert len(labels) == num_instances
if assigned_colors is None:
assigned_colors = [random_color(rgb=True, maximum=1) for _ in range(num_instances)]
if num_instances == 0:
return self.output
if boxes is not None and boxes.shape[1] == 5:
return self.overlay_rotated_instances(
boxes=boxes, labels=labels, assigned_colors=assigned_colors
)
# Display in largest to smallest order to reduce occlusion.
areas = None
if boxes is not None:
areas = np.prod(boxes[:, 2:] - boxes[:, :2], axis=1)
elif masks is not None:
areas = np.asarray([x.area() for x in masks])
if areas is not None:
sorted_idxs = np.argsort(-areas).tolist()
# Re-order overlapped instances in descending order.
boxes = boxes[sorted_idxs] if boxes is not None else None
labels = [labels[k] for k in sorted_idxs] if labels is not None else None
masks = [masks[idx] for idx in sorted_idxs] if masks is not None else None
assigned_colors = [assigned_colors[idx] for idx in sorted_idxs]
keypoints = keypoints[sorted_idxs] if keypoints is not None else None
for i in range(num_instances):
color = assigned_colors[i]
if boxes is not None:
self.draw_box(boxes[i], edge_color=color)
if masks is not None:
for segment in masks[i].polygons:
self.draw_polygon(segment.reshape(-1, 2), color, alpha=alpha)
if labels is not None:
# first get a box
if boxes is not None:
x0, y0, x1, y1 = boxes[i]
text_pos = (x0, y0) # if drawing boxes, put text on the box corner.
horiz_align = "left"
elif masks is not None:
# skip small mask without polygon
if len(masks[i].polygons) == 0:
continue
x0, y0, x1, y1 = masks[i].bbox()
# draw text in the center (defined by median) when box is not drawn
# median is less sensitive to outliers.
text_pos = np.median(masks[i].mask.nonzero(), axis=1)[::-1]
horiz_align = "center"
else:
continue # drawing the box confidence for keypoints isn't very useful.
# for small objects, draw text at the side to avoid occlusion
instance_area = (y1 - y0) * (x1 - x0)
if (
instance_area < _SMALL_OBJECT_AREA_THRESH * self.output.scale
or y1 - y0 < 40 * self.output.scale
):
if y1 >= self.output.height - 5:
text_pos = (x1, y0)
else:
text_pos = (x0, y1)
height_ratio = (y1 - y0) / np.sqrt(self.output.height * self.output.width)
lighter_color = self._change_color_brightness(color, brightness_factor=0.7)
font_size = (
np.clip((height_ratio - 0.02) / 0.08 + 1, 1.2, 2)
* 0.5
* self._default_font_size
)
self.draw_text(
labels[i],
text_pos,
color=lighter_color,
horizontal_alignment=horiz_align,
font_size=font_size,
)
# draw keypoints
if keypoints is not None:
for keypoints_per_instance in keypoints:
self.draw_and_connect_keypoints(keypoints_per_instance)
return self.output
def overlay_rotated_instances(self, boxes=None, labels=None, assigned_colors=None):
"""
Args:
boxes (ndarray): an Nx5 numpy array of
(x_center, y_center, width, height, angle_degrees) format
for the N objects in a single image.
labels (list[str]): the text to be displayed for each instance.
assigned_colors (list[matplotlib.colors]): a list of colors, where each color
corresponds to each mask or box in the image. Refer to 'matplotlib.colors'
for full list of formats that the colors are accepted in.
Returns:
output (VisImage): image object with visualizations.
"""
num_instances = len(boxes)
if assigned_colors is None:
assigned_colors = [random_color(rgb=True, maximum=1) for _ in range(num_instances)]
if num_instances == 0:
return self.output
# Display in largest to smallest order to reduce occlusion.
if boxes is not None:
areas = boxes[:, 2] * boxes[:, 3]
sorted_idxs = np.argsort(-areas).tolist()
# Re-order overlapped instances in descending order.
boxes = boxes[sorted_idxs]
labels = [labels[k] for k in sorted_idxs] if labels is not None else None
colors = [assigned_colors[idx] for idx in sorted_idxs]
for i in range(num_instances):
self.draw_rotated_box_with_label(
boxes[i], edge_color=colors[i], label=labels[i] if labels is not None else None
)
return self.output
def draw_and_connect_keypoints(self, keypoints):
"""
Draws keypoints of an instance and follows the rules for keypoint connections
to draw lines between appropriate keypoints. This follows color heuristics for
line color.
Args:
keypoints (Tensor): a tensor of shape (K, 3), where K is the number of keypoints
and the last dimension corresponds to (x, y, probability).
Returns:
output (VisImage): image object with visualizations.
"""
visible = {}
keypoint_names = self.metadata.get("keypoint_names")
for idx, keypoint in enumerate(keypoints):
# draw keypoint
x, y, prob = keypoint
if prob > self.keypoint_threshold:
self.draw_circle((x, y), color=_RED)
if keypoint_names:
keypoint_name = keypoint_names[idx]
visible[keypoint_name] = (x, y)
if self.metadata.get("keypoint_connection_rules"):
for kp0, kp1, color in self.metadata.keypoint_connection_rules:
if kp0 in visible and kp1 in visible:
x0, y0 = visible[kp0]
x1, y1 = visible[kp1]
color = tuple(x / 255.0 for x in color)
self.draw_line([x0, x1], [y0, y1], color=color)
# draw lines from nose to mid-shoulder and mid-shoulder to mid-hip
# Note that this strategy is specific to person keypoints.
# For other keypoints, it should just do nothing
try:
ls_x, ls_y = visible["left_shoulder"]
rs_x, rs_y = visible["right_shoulder"]
mid_shoulder_x, mid_shoulder_y = (ls_x + rs_x) / 2, (ls_y + rs_y) / 2
except KeyError:
pass
else:
# draw line from nose to mid-shoulder
nose_x, nose_y = visible.get("nose", (None, None))
if nose_x is not None:
self.draw_line([nose_x, mid_shoulder_x], [nose_y, mid_shoulder_y], color=_RED)
try:
# draw line from mid-shoulder to mid-hip
lh_x, lh_y = visible["left_hip"]
rh_x, rh_y = visible["right_hip"]
except KeyError:
pass
else:
mid_hip_x, mid_hip_y = (lh_x + rh_x) / 2, (lh_y + rh_y) / 2
self.draw_line([mid_hip_x, mid_shoulder_x], [mid_hip_y, mid_shoulder_y], color=_RED)
return self.output
"""
Primitive drawing functions:
"""
def draw_text(
self,
text,
position,
*,
font_size=None,
color="g",
horizontal_alignment="center",
rotation=0,
):
"""
Args:
text (str): class label
position (tuple): a tuple of the x and y coordinates to place text on image.
font_size (int, optional): font of the text. If not provided, a font size
proportional to the image width is calculated and used.
color: color of the text. Refer to `matplotlib.colors` for full list
of formats that are accepted.
horizontal_alignment (str): see `matplotlib.text.Text`
rotation: rotation angle in degrees CCW
Returns:
output (VisImage): image object with text drawn.
"""
if not font_size:
font_size = self._default_font_size
# since the text background is dark, we don't want the text to be dark
color = np.maximum(list(mplc.to_rgb(color)), 0.2)
color[np.argmax(color)] = max(0.8, np.max(color))
x, y = position
self.output.ax.text(
x,
y,
text,
size=font_size * self.output.scale,
family="sans-serif",
bbox={"facecolor": "black", "alpha": 0.8, "pad": 0.7, "edgecolor": "none"},
verticalalignment="top",
horizontalalignment=horizontal_alignment,
color=color,
zorder=10,
rotation=rotation,
)
return self.output
def draw_box(self, box_coord, alpha=0.5, edge_color="g", line_style="-"):
"""
Args:
box_coord (tuple): a tuple containing x0, y0, x1, y1 coordinates, where x0 and y0
are the coordinates of the image's top left corner. x1 and y1 are the
coordinates of the image's bottom right corner.
alpha (float): blending efficient. Smaller values lead to more transparent masks.
edge_color: color of the outline of the box. Refer to `matplotlib.colors`
for full list of formats that are accepted.
line_style (string): the string to use to create the outline of the boxes.
Returns:
output (VisImage): image object with box drawn.
"""
x0, y0, x1, y1 = box_coord
width = x1 - x0
height = y1 - y0
linewidth = max(self._default_font_size / 4, 1)
self.output.ax.add_patch(
mpl.patches.Rectangle(
(x0, y0),
width,
height,
fill=False,
edgecolor=edge_color,
linewidth=linewidth * self.output.scale,
alpha=alpha,
linestyle=line_style,
)
)
return self.output
def draw_rotated_box_with_label(
self, rotated_box, alpha=0.5, edge_color="g", line_style="-", label=None
):
"""
Draw a rotated box with label on its top-left corner.
Args:
rotated_box (tuple): a tuple containing (cnt_x, cnt_y, w, h, angle),
where cnt_x and cnt_y are the center coordinates of the box.
w and h are the width and height of the box. angle represents how
many degrees the box is rotated CCW with regard to the 0-degree box.
alpha (float): blending efficient. Smaller values lead to more transparent masks.
edge_color: color of the outline of the box. Refer to `matplotlib.colors`
for full list of formats that are accepted.
line_style (string): the string to use to create the outline of the boxes.
label (string): label for rotated box. It will not be rendered when set to None.
Returns:
output (VisImage): image object with box drawn.
"""
cnt_x, cnt_y, w, h, angle = rotated_box
area = w * h
# use thinner lines when the box is small
linewidth = self._default_font_size / (
6 if area < _SMALL_OBJECT_AREA_THRESH * self.output.scale else 3
)
theta = angle * math.pi / 180.0
c = math.cos(theta)
s = math.sin(theta)
rect = [(-w / 2, h / 2), (-w / 2, -h / 2), (w / 2, -h / 2), (w / 2, h / 2)]
# x: left->right ; y: top->down
rotated_rect = [(s * yy + c * xx + cnt_x, c * yy - s * xx + cnt_y) for (xx, yy) in rect]
for k in range(4):
j = (k + 1) % 4
self.draw_line(
[rotated_rect[k][0], rotated_rect[j][0]],
[rotated_rect[k][1], rotated_rect[j][1]],
color=edge_color,
linestyle="--" if k == 1 else line_style,
linewidth=linewidth,
)
if label is not None:
text_pos = rotated_rect[1] # topleft corner
height_ratio = h / np.sqrt(self.output.height * self.output.width)
label_color = self._change_color_brightness(edge_color, brightness_factor=0.7)
font_size = (
np.clip((height_ratio - 0.02) / 0.08 + 1, 1.2, 2) * 0.5 * self._default_font_size
)
self.draw_text(label, text_pos, color=label_color, font_size=font_size, rotation=angle)
return self.output
def draw_circle(self, circle_coord, color, radius=3):
"""
Args:
circle_coord (list(int) or tuple(int)): contains the x and y coordinates
of the center of the circle.
color: color of the polygon. Refer to `matplotlib.colors` for a full list of
formats that are accepted.
radius (int): radius of the circle.
Returns:
output (VisImage): image object with box drawn.
"""
x, y = circle_coord
self.output.ax.add_patch(
mpl.patches.Circle(circle_coord, radius=radius, fill=True, color=color)
)
return self.output
def draw_line(self, x_data, y_data, color, linestyle="-", linewidth=None):
"""
Args:
x_data (list[int]): a list containing x values of all the points being drawn.
Length of list should match the length of y_data.
y_data (list[int]): a list containing y values of all the points being drawn.
Length of list should match the length of x_data.
color: color of the line. Refer to `matplotlib.colors` for a full list of
formats that are accepted.
linestyle: style of the line. Refer to `matplotlib.lines.Line2D`
for a full list of formats that are accepted.
linewidth (float or None): width of the line. When it's None,
a default value will be computed and used.
Returns:
output (VisImage): image object with line drawn.
"""
if linewidth is None:
linewidth = self._default_font_size / 3
linewidth = max(linewidth, 1)
self.output.ax.add_line(
mpl.lines.Line2D(
x_data,
y_data,
linewidth=linewidth * self.output.scale,
color=color,
linestyle=linestyle,
)
)
return self.output
def draw_binary_mask(
self, binary_mask, color=None, *, edge_color=None, text=None, alpha=0.5, area_threshold=10
):
"""
Args:
binary_mask (ndarray): numpy array of shape (H, W), where H is the image height and
W is the image width. Each value in the array is either a 0 or 1 value of uint8
type.
color: color of the mask. Refer to `matplotlib.colors` for a full list of
formats that are accepted. If None, will pick a random color.
edge_color: color of the polygon edges. Refer to `matplotlib.colors` for a
full list of formats that are accepted.
text (str): if None, will be drawn on the object
alpha (float): blending efficient. Smaller values lead to more transparent masks.
area_threshold (float): a connected component smaller than this area will not be shown.
Returns:
output (VisImage): image object with mask drawn.
"""
if color is None:
color = random_color(rgb=True, maximum=1)
color = mplc.to_rgb(color)
has_valid_segment = False
binary_mask = binary_mask.astype("uint8") # opencv needs uint8
mask = GenericMask(binary_mask, self.output.height, self.output.width)
shape2d = (binary_mask.shape[0], binary_mask.shape[1])
if not mask.has_holes:
# draw polygons for regular masks
for segment in mask.polygons:
area = mask_util.area(mask_util.frPyObjects([segment], shape2d[0], shape2d[1]))
if area < (area_threshold or 0):
continue
has_valid_segment = True
segment = segment.reshape(-1, 2)
self.draw_polygon(segment, color=color, edge_color=edge_color, alpha=alpha)
else:
# TODO: Use Path/PathPatch to draw vector graphics:
# https://stackoverflow.com/questions/8919719/how-to-plot-a-complex-polygon
rgba = np.zeros(shape2d + (4,), dtype="float32")
rgba[:, :, :3] = color
rgba[:, :, 3] = (mask.mask == 1).astype("float32") * alpha
has_valid_segment = True
self.output.ax.imshow(rgba, extent=(0, self.output.width, self.output.height, 0))
if text is not None and has_valid_segment:
lighter_color = self._change_color_brightness(color, brightness_factor=0.7)
self._draw_text_in_mask(binary_mask, text, lighter_color)
return self.output
def draw_soft_mask(self, soft_mask, color=None, *, text=None, alpha=0.5):
"""
Args:
soft_mask (ndarray): float array of shape (H, W), each value in [0, 1].
color: color of the mask. Refer to `matplotlib.colors` for a full list of
formats that are accepted. If None, will pick a random color.
text (str): if None, will be drawn on the object
alpha (float): blending efficient. Smaller values lead to more transparent masks.
Returns:
output (VisImage): image object with mask drawn.
"""
if color is None:
color = random_color(rgb=True, maximum=1)
color = mplc.to_rgb(color)
shape2d = (soft_mask.shape[0], soft_mask.shape[1])
rgba = np.zeros(shape2d + (4,), dtype="float32")
rgba[:, :, :3] = color
rgba[:, :, 3] = soft_mask * alpha
self.output.ax.imshow(rgba, extent=(0, self.output.width, self.output.height, 0))
if text is not None:
lighter_color = self._change_color_brightness(color, brightness_factor=0.7)
binary_mask = (soft_mask > 0.5).astype("uint8")
self._draw_text_in_mask(binary_mask, text, lighter_color)
return self.output
def draw_polygon(self, segment, color, edge_color=None, alpha=0.5):
"""
Args:
segment: numpy array of shape Nx2, containing all the points in the polygon.
color: color of the polygon. Refer to `matplotlib.colors` for a full list of
formats that are accepted.
edge_color: color of the polygon edges. Refer to `matplotlib.colors` for a
full list of formats that are accepted. If not provided, a darker shade
of the polygon color will be used instead.
alpha (float): blending efficient. Smaller values lead to more transparent masks.
Returns:
output (VisImage): image object with polygon drawn.
"""
if edge_color is None:
# make edge color darker than the polygon color
if alpha > 0.8:
edge_color = self._change_color_brightness(color, brightness_factor=-0.7)
else:
edge_color = color
edge_color = mplc.to_rgb(edge_color) + (1,)
polygon = mpl.patches.Polygon(
segment,
fill=True,
facecolor=mplc.to_rgb(color) + (alpha,),
edgecolor=edge_color,
linewidth=max(self._default_font_size // 15 * self.output.scale, 1),
)
self.output.ax.add_patch(polygon)
return self.output
"""
Internal methods:
"""
def _jitter(self, color):
"""
Randomly modifies given color to produce a slightly different color than the color given.
Args:
color (tuple[double]): a tuple of 3 elements, containing the RGB values of the color
picked. The values in the list are in the [0.0, 1.0] range.
Returns:
jittered_color (tuple[double]): a tuple of 3 elements, containing the RGB values of the
color after being jittered. The values in the list are in the [0.0, 1.0] range.
"""
color = mplc.to_rgb(color)
vec = np.random.rand(3)
# better to do it in another color space
vec = vec / np.linalg.norm(vec) * 0.5
res = np.clip(vec + color, 0, 1)
return tuple(res)
def _create_grayscale_image(self, mask=None):
"""
Create a grayscale version of the original image.
The colors in masked area, if given, will be kept.
"""
img_bw = self.img.astype("f4").mean(axis=2)
img_bw = np.stack([img_bw] * 3, axis=2)
if mask is not None:
img_bw[mask] = self.img[mask]
return img_bw
def _change_color_brightness(self, color, brightness_factor):
"""
Depending on the brightness_factor, gives a lighter or darker color i.e. a color with
less or more saturation than the original color.
Args:
color: color of the polygon. Refer to `matplotlib.colors` for a full list of
formats that are accepted.
brightness_factor (float): a value in [-1.0, 1.0] range. A lightness factor of
0 will correspond to no change, a factor in [-1.0, 0) range will result in
a darker color and a factor in (0, 1.0] range will result in a lighter color.
Returns:
modified_color (tuple[double]): a tuple containing the RGB values of the
modified color. Each value in the tuple is in the [0.0, 1.0] range.
"""
assert brightness_factor >= -1.0 and brightness_factor <= 1.0
color = mplc.to_rgb(color)
polygon_color = colorsys.rgb_to_hls(*mplc.to_rgb(color))
modified_lightness = polygon_color[1] + (brightness_factor * polygon_color[1])
modified_lightness = 0.0 if modified_lightness < 0.0 else modified_lightness
modified_lightness = 1.0 if modified_lightness > 1.0 else modified_lightness
modified_color = colorsys.hls_to_rgb(polygon_color[0], modified_lightness, polygon_color[2])
return modified_color
def _convert_boxes(self, boxes):
"""
Convert different format of boxes to an NxB array, where B = 4 or 5 is the box dimension.
"""
if isinstance(boxes, Boxes) or isinstance(boxes, RotatedBoxes):
return boxes.tensor.detach().numpy()
else:
return np.asarray(boxes)
def _convert_masks(self, masks_or_polygons):
"""
Convert different format of masks or polygons to a tuple of masks and polygons.
Returns:
list[GenericMask]:
"""
m = masks_or_polygons
if isinstance(m, PolygonMasks):
m = m.polygons
if isinstance(m, BitMasks):
m = m.tensor.numpy()
if isinstance(m, torch.Tensor):
m = m.numpy()
ret = []
for x in m:
if isinstance(x, GenericMask):
ret.append(x)
else:
ret.append(GenericMask(x, self.output.height, self.output.width))
return ret
def _draw_text_in_mask(self, binary_mask, text, color):
"""
Find proper places to draw text given a binary mask.
"""
# TODO sometimes drawn on wrong objects. the heuristics here can improve.
_num_cc, cc_labels, stats, centroids = cv2.connectedComponentsWithStats(binary_mask, 8)
if stats[1:, -1].size == 0:
return
largest_component_id = np.argmax(stats[1:, -1]) + 1
# draw text on the largest component, as well as other very large components.
for cid in range(1, _num_cc):
if cid == largest_component_id or stats[cid, -1] > _LARGE_MASK_AREA_THRESH:
# median is more stable than centroid
# center = centroids[largest_component_id]
center = np.median((cc_labels == cid).nonzero(), axis=1)[::-1]
self.draw_text(text, center, color=color)
def _convert_keypoints(self, keypoints):
if isinstance(keypoints, Keypoints):
keypoints = keypoints.tensor
keypoints = np.asarray(keypoints)
return keypoints
def get_output(self):
"""
Returns:
output (VisImage): the image output containing the visualizations added
to the image.
"""
return self.output
|
evocodebench_data_188
|
# Copyright (c) Facebook, Inc. and its affiliates.
import colorsys
import logging
import math
from enum import Enum, unique
import cv2
import matplotlib as mpl
import matplotlib.colors as mplc
import matplotlib.figure as mplfigure
import numpy as np
import pycocotools.mask as mask_util
import torch
from detectron2.data import MetadataCatalog
from detectron2.structures import (
BitMasks,
Boxes,
BoxMode,
Keypoints,
PolygonMasks,
RotatedBoxes,
)
from detectron2.utils.file_io import PathManager
from matplotlib.backends.backend_agg import FigureCanvasAgg
from PIL import Image
from .colormap import random_color
logger = logging.getLogger(__name__)
__all__ = ["ColorMode", "VisImage", "Visualizer"]
_SMALL_OBJECT_AREA_THRESH = 1000
_LARGE_MASK_AREA_THRESH = 120000
_OFF_WHITE = (1.0, 1.0, 240.0 / 255)
_BLACK = (0, 0, 0)
_RED = (1.0, 0, 0)
_KEYPOINT_THRESHOLD = 0.05
@unique
class ColorMode(Enum):
"""
Enum of different color modes to use for instance visualizations.
"""
IMAGE = 0
"""
Picks a random color for every instance and overlay segmentations with low opacity.
"""
SEGMENTATION = 1
"""
Let instances of the same category have similar colors
(from metadata.thing_colors), and overlay them with
high opacity. This provides more attention on the quality of segmentation.
"""
IMAGE_BW = 2
"""
Same as IMAGE, but convert all areas without masks to gray-scale.
Only available for drawing per-instance mask predictions.
"""
class GenericMask:
"""
Attribute:
polygons (list[ndarray]): list[ndarray]: polygons for this mask.
Each ndarray has format [x, y, x, y, ...]
mask (ndarray): a binary mask
"""
def __init__(self, mask_or_polygons, height, width):
self._mask = self._polygons = self._has_holes = None
self.height = height
self.width = width
m = mask_or_polygons
if isinstance(m, dict):
# RLEs
assert "counts" in m and "size" in m
if isinstance(m["counts"], list): # uncompressed RLEs
h, w = m["size"]
assert h == height and w == width
m = mask_util.frPyObjects(m, h, w)
self._mask = mask_util.decode(m)[:, :]
return
if isinstance(m, list): # list[ndarray]
self._polygons = [np.asarray(x).reshape(-1) for x in m]
return
if isinstance(m, np.ndarray): # assumed to be a binary mask
assert m.shape[1] != 2, m.shape
assert m.shape == (
height,
width,
), f"mask shape: {m.shape}, target dims: {height}, {width}"
self._mask = m.astype("uint8")
return
raise ValueError("GenericMask cannot handle object {} of type '{}'".format(m, type(m)))
@property
def mask(self):
if self._mask is None:
self._mask = self.polygons_to_mask(self._polygons)
return self._mask
@property
def polygons(self):
if self._polygons is None:
self._polygons, self._has_holes = self.mask_to_polygons(self._mask)
return self._polygons
@property
def has_holes(self):
if self._has_holes is None:
if self._mask is not None:
self._polygons, self._has_holes = self.mask_to_polygons(self._mask)
else:
self._has_holes = False # if original format is polygon, does not have holes
return self._has_holes
def mask_to_polygons(self, mask):
# cv2.RETR_CCOMP flag retrieves all the contours and arranges them to a 2-level
# hierarchy. External contours (boundary) of the object are placed in hierarchy-1.
# Internal contours (holes) are placed in hierarchy-2.
# cv2.CHAIN_APPROX_NONE flag gets vertices of polygons from contours.
mask = np.ascontiguousarray(mask) # some versions of cv2 does not support incontiguous arr
res = cv2.findContours(mask.astype("uint8"), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)
hierarchy = res[-1]
if hierarchy is None: # empty mask
return [], False
has_holes = (hierarchy.reshape(-1, 4)[:, 3] >= 0).sum() > 0
res = res[-2]
res = [x.flatten() for x in res]
# These coordinates from OpenCV are integers in range [0, W-1 or H-1].
# We add 0.5 to turn them into real-value coordinate space. A better solution
# would be to first +0.5 and then dilate the returned polygon by 0.5.
res = [x + 0.5 for x in res if len(x) >= 6]
return res, has_holes
def polygons_to_mask(self, polygons):
rle = mask_util.frPyObjects(polygons, self.height, self.width)
rle = mask_util.merge(rle)
return mask_util.decode(rle)[:, :]
def area(self):
return self.mask.sum()
def bbox(self):
p = mask_util.frPyObjects(self.polygons, self.height, self.width)
p = mask_util.merge(p)
bbox = mask_util.toBbox(p)
bbox[2] += bbox[0]
bbox[3] += bbox[1]
return bbox
class _PanopticPrediction:
"""
Unify different panoptic annotation/prediction formats
"""
def __init__(self, panoptic_seg, segments_info, metadata=None):
if segments_info is None:
assert metadata is not None
# If "segments_info" is None, we assume "panoptic_img" is a
# H*W int32 image storing the panoptic_id in the format of
# category_id * label_divisor + instance_id. We reserve -1 for
# VOID label.
label_divisor = metadata.label_divisor
segments_info = []
for panoptic_label in np.unique(panoptic_seg.numpy()):
if panoptic_label == -1:
# VOID region.
continue
pred_class = panoptic_label // label_divisor
isthing = pred_class in metadata.thing_dataset_id_to_contiguous_id.values()
segments_info.append(
{
"id": int(panoptic_label),
"category_id": int(pred_class),
"isthing": bool(isthing),
}
)
del metadata
self._seg = panoptic_seg
self._sinfo = {s["id"]: s for s in segments_info} # seg id -> seg info
segment_ids, areas = torch.unique(panoptic_seg, sorted=True, return_counts=True)
areas = areas.numpy()
sorted_idxs = np.argsort(-areas)
self._seg_ids, self._seg_areas = segment_ids[sorted_idxs], areas[sorted_idxs]
self._seg_ids = self._seg_ids.tolist()
for sid, area in zip(self._seg_ids, self._seg_areas):
if sid in self._sinfo:
self._sinfo[sid]["area"] = float(area)
def non_empty_mask(self):
"""
Returns:
(H, W) array, a mask for all pixels that have a prediction
"""
empty_ids = []
for id in self._seg_ids:
if id not in self._sinfo:
empty_ids.append(id)
if len(empty_ids) == 0:
return np.zeros(self._seg.shape, dtype=np.uint8)
assert (
len(empty_ids) == 1
), ">1 ids corresponds to no labels. This is currently not supported"
return (self._seg != empty_ids[0]).numpy().astype(np.bool)
def semantic_masks(self):
for sid in self._seg_ids:
sinfo = self._sinfo.get(sid)
if sinfo is None or sinfo["isthing"]:
# Some pixels (e.g. id 0 in PanopticFPN) have no instance or semantic predictions.
continue
yield (self._seg == sid).numpy().astype(np.bool), sinfo
def instance_masks(self):
for sid in self._seg_ids:
sinfo = self._sinfo.get(sid)
if sinfo is None or not sinfo["isthing"]:
continue
mask = (self._seg == sid).numpy().astype(np.bool)
if mask.sum() > 0:
yield mask, sinfo
def _create_text_labels(classes, scores, class_names, is_crowd=None):
"""
Args:
classes (list[int] or None):
scores (list[float] or None):
class_names (list[str] or None):
is_crowd (list[bool] or None):
Returns:
list[str] or None
"""
labels = None
if classes is not None:
if class_names is not None and len(class_names) > 0:
labels = [class_names[i] for i in classes]
else:
labels = [str(i) for i in classes]
if scores is not None:
if labels is None:
labels = ["{:.0f}%".format(s * 100) for s in scores]
else:
labels = ["{} {:.0f}%".format(l, s * 100) for l, s in zip(labels, scores)]
if labels is not None and is_crowd is not None:
labels = [l + ("|crowd" if crowd else "") for l, crowd in zip(labels, is_crowd)]
return labels
class VisImage:
def __init__(self, img, scale=1.0):
"""
Args:
img (ndarray): an RGB image of shape (H, W, 3) in range [0, 255].
scale (float): scale the input image
"""
self.img = img
self.scale = scale
self.width, self.height = img.shape[1], img.shape[0]
self._setup_figure(img)
def _setup_figure(self, img):
"""
Args:
Same as in :meth:`__init__()`.
Returns:
fig (matplotlib.pyplot.figure): top level container for all the image plot elements.
ax (matplotlib.pyplot.Axes): contains figure elements and sets the coordinate system.
"""
fig = mplfigure.Figure(frameon=False)
self.dpi = fig.get_dpi()
# add a small 1e-2 to avoid precision lost due to matplotlib's truncation
# (https://github.com/matplotlib/matplotlib/issues/15363)
fig.set_size_inches(
(self.width * self.scale + 1e-2) / self.dpi,
(self.height * self.scale + 1e-2) / self.dpi,
)
self.canvas = FigureCanvasAgg(fig)
# self.canvas = mpl.backends.backend_cairo.FigureCanvasCairo(fig)
ax = fig.add_axes([0.0, 0.0, 1.0, 1.0])
ax.axis("off")
self.fig = fig
self.ax = ax
self.reset_image(img)
def reset_image(self, img):
"""
Args:
img: same as in __init__
"""
img = img.astype("uint8")
self.ax.imshow(img, extent=(0, self.width, self.height, 0), interpolation="nearest")
def save(self, filepath):
"""
Args:
filepath (str): a string that contains the absolute path, including the file name, where
the visualized image will be saved.
"""
self.fig.savefig(filepath)
def get_image(self):
"""
Returns:
ndarray:
the visualized image of shape (H, W, 3) (RGB) in uint8 type.
The shape is scaled w.r.t the input image using the given `scale` argument.
"""
canvas = self.canvas
s, (width, height) = canvas.print_to_buffer()
# buf = io.BytesIO() # works for cairo backend
# canvas.print_rgba(buf)
# width, height = self.width, self.height
# s = buf.getvalue()
buffer = np.frombuffer(s, dtype="uint8")
img_rgba = buffer.reshape(height, width, 4)
rgb, alpha = np.split(img_rgba, [3], axis=2)
return rgb.astype("uint8")
class Visualizer:
"""
Visualizer that draws data about detection/segmentation on images.
It contains methods like `draw_{text,box,circle,line,binary_mask,polygon}`
that draw primitive objects to images, as well as high-level wrappers like
`draw_{instance_predictions,sem_seg,panoptic_seg_predictions,dataset_dict}`
that draw composite data in some pre-defined style.
Note that the exact visualization style for the high-level wrappers are subject to change.
Style such as color, opacity, label contents, visibility of labels, or even the visibility
of objects themselves (e.g. when the object is too small) may change according
to different heuristics, as long as the results still look visually reasonable.
To obtain a consistent style, you can implement custom drawing functions with the
abovementioned primitive methods instead. If you need more customized visualization
styles, you can process the data yourself following their format documented in
tutorials (:doc:`/tutorials/models`, :doc:`/tutorials/datasets`). This class does not
intend to satisfy everyone's preference on drawing styles.
This visualizer focuses on high rendering quality rather than performance. It is not
designed to be used for real-time applications.
"""
# TODO implement a fast, rasterized version using OpenCV
def __init__(self, img_rgb, metadata=None, scale=1.0, instance_mode=ColorMode.IMAGE):
"""
Args:
img_rgb: a numpy array of shape (H, W, C), where H and W correspond to
the height and width of the image respectively. C is the number of
color channels. The image is required to be in RGB format since that
is a requirement of the Matplotlib library. The image is also expected
to be in the range [0, 255].
metadata (Metadata): dataset metadata (e.g. class names and colors)
instance_mode (ColorMode): defines one of the pre-defined style for drawing
instances on an image.
"""
self.img = np.asarray(img_rgb).clip(0, 255).astype(np.uint8)
if metadata is None:
metadata = MetadataCatalog.get("__nonexist__")
self.metadata = metadata
self.output = VisImage(self.img, scale=scale)
self.cpu_device = torch.device("cpu")
# too small texts are useless, therefore clamp to 9
self._default_font_size = max(
np.sqrt(self.output.height * self.output.width) // 90, 10 // scale
)
self._instance_mode = instance_mode
self.keypoint_threshold = _KEYPOINT_THRESHOLD
def draw_instance_predictions(self, predictions):
"""
Draw instance-level prediction results on an image.
Args:
predictions (Instances): the output of an instance detection/segmentation
model. Following fields will be used to draw:
"pred_boxes", "pred_classes", "scores", "pred_masks" (or "pred_masks_rle").
Returns:
output (VisImage): image object with visualizations.
"""
boxes = predictions.pred_boxes if predictions.has("pred_boxes") else None
scores = predictions.scores if predictions.has("scores") else None
classes = predictions.pred_classes.tolist() if predictions.has("pred_classes") else None
labels = _create_text_labels(classes, scores, self.metadata.get("thing_classes", None))
keypoints = predictions.pred_keypoints if predictions.has("pred_keypoints") else None
if predictions.has("pred_masks"):
masks = np.asarray(predictions.pred_masks)
masks = [GenericMask(x, self.output.height, self.output.width) for x in masks]
else:
masks = None
if self._instance_mode == ColorMode.SEGMENTATION and self.metadata.get("thing_colors"):
colors = [
self._jitter([x / 255 for x in self.metadata.thing_colors[c]]) for c in classes
]
alpha = 0.8
else:
colors = None
alpha = 0.5
if self._instance_mode == ColorMode.IMAGE_BW:
self.output.reset_image(
self._create_grayscale_image(
(predictions.pred_masks.any(dim=0) > 0).numpy()
if predictions.has("pred_masks")
else None
)
)
alpha = 0.3
self.overlay_instances(
masks=masks,
boxes=boxes,
labels=labels,
keypoints=keypoints,
assigned_colors=colors,
alpha=alpha,
)
return self.output
def draw_sem_seg(self, sem_seg, area_threshold=None, alpha=0.8):
"""
Draw semantic segmentation predictions/labels.
Args:
sem_seg (Tensor or ndarray): the segmentation of shape (H, W).
Each value is the integer label of the pixel.
area_threshold (int): segments with less than `area_threshold` are not drawn.
alpha (float): the larger it is, the more opaque the segmentations are.
Returns:
output (VisImage): image object with visualizations.
"""
if isinstance(sem_seg, torch.Tensor):
sem_seg = sem_seg.numpy()
labels, areas = np.unique(sem_seg, return_counts=True)
sorted_idxs = np.argsort(-areas).tolist()
labels = labels[sorted_idxs]
for label in filter(lambda l: l < len(self.metadata.stuff_classes), labels):
try:
mask_color = [x / 255 for x in self.metadata.stuff_colors[label]]
except (AttributeError, IndexError):
mask_color = None
binary_mask = (sem_seg == label).astype(np.uint8)
text = self.metadata.stuff_classes[label]
self.draw_binary_mask(
binary_mask,
color=mask_color,
edge_color=_OFF_WHITE,
text=text,
alpha=alpha,
area_threshold=area_threshold,
)
return self.output
def draw_panoptic_seg(self, panoptic_seg, segments_info, area_threshold=None, alpha=0.7):
"""
Draw panoptic prediction annotations or results.
Args:
panoptic_seg (Tensor): of shape (height, width) where the values are ids for each
segment.
segments_info (list[dict] or None): Describe each segment in `panoptic_seg`.
If it is a ``list[dict]``, each dict contains keys "id", "category_id".
If None, category id of each pixel is computed by
``pixel // metadata.label_divisor``.
area_threshold (int): stuff segments with less than `area_threshold` are not drawn.
Returns:
output (VisImage): image object with visualizations.
"""
pred = _PanopticPrediction(panoptic_seg, segments_info, self.metadata)
if self._instance_mode == ColorMode.IMAGE_BW:
self.output.reset_image(self._create_grayscale_image(pred.non_empty_mask()))
# draw mask for all semantic segments first i.e. "stuff"
for mask, sinfo in pred.semantic_masks():
category_idx = sinfo["category_id"]
try:
mask_color = [x / 255 for x in self.metadata.stuff_colors[category_idx]]
except AttributeError:
mask_color = None
text = self.metadata.stuff_classes[category_idx]
self.draw_binary_mask(
mask,
color=mask_color,
edge_color=_OFF_WHITE,
text=text,
alpha=alpha,
area_threshold=area_threshold,
)
# draw mask for all instances second
all_instances = list(pred.instance_masks())
if len(all_instances) == 0:
return self.output
masks, sinfo = list(zip(*all_instances))
category_ids = [x["category_id"] for x in sinfo]
try:
scores = [x["score"] for x in sinfo]
except KeyError:
scores = None
labels = _create_text_labels(
category_ids, scores, self.metadata.thing_classes, [x.get("iscrowd", 0) for x in sinfo]
)
try:
colors = [
self._jitter([x / 255 for x in self.metadata.thing_colors[c]]) for c in category_ids
]
except AttributeError:
colors = None
self.overlay_instances(masks=masks, labels=labels, assigned_colors=colors, alpha=alpha)
return self.output
draw_panoptic_seg_predictions = draw_panoptic_seg # backward compatibility
def draw_dataset_dict(self, dic):
"""
Draw annotations/segmentaions in Detectron2 Dataset format.
Args:
dic (dict): annotation/segmentation data of one image, in Detectron2 Dataset format.
Returns:
output (VisImage): image object with visualizations.
"""
annos = dic.get("annotations", None)
if annos:
if "segmentation" in annos[0]:
masks = [x["segmentation"] for x in annos]
else:
masks = None
if "keypoints" in annos[0]:
keypts = [x["keypoints"] for x in annos]
keypts = np.array(keypts).reshape(len(annos), -1, 3)
else:
keypts = None
boxes = [
BoxMode.convert(x["bbox"], x["bbox_mode"], BoxMode.XYXY_ABS)
if len(x["bbox"]) == 4
else x["bbox"]
for x in annos
]
colors = None
category_ids = [x["category_id"] for x in annos]
if self._instance_mode == ColorMode.SEGMENTATION and self.metadata.get("thing_colors"):
colors = [
self._jitter([x / 255 for x in self.metadata.thing_colors[c]])
for c in category_ids
]
names = self.metadata.get("thing_classes", None)
labels = _create_text_labels(
category_ids,
scores=None,
class_names=names,
is_crowd=[x.get("iscrowd", 0) for x in annos],
)
self.overlay_instances(
labels=labels, boxes=boxes, masks=masks, keypoints=keypts, assigned_colors=colors
)
sem_seg = dic.get("sem_seg", None)
if sem_seg is None and "sem_seg_file_name" in dic:
with PathManager.open(dic["sem_seg_file_name"], "rb") as f:
sem_seg = Image.open(f)
sem_seg = np.asarray(sem_seg, dtype="uint8")
if sem_seg is not None:
self.draw_sem_seg(sem_seg, area_threshold=0, alpha=0.5)
pan_seg = dic.get("pan_seg", None)
if pan_seg is None and "pan_seg_file_name" in dic:
with PathManager.open(dic["pan_seg_file_name"], "rb") as f:
pan_seg = Image.open(f)
pan_seg = np.asarray(pan_seg)
from panopticapi.utils import rgb2id
pan_seg = rgb2id(pan_seg)
if pan_seg is not None:
segments_info = dic["segments_info"]
pan_seg = torch.tensor(pan_seg)
self.draw_panoptic_seg(pan_seg, segments_info, area_threshold=0, alpha=0.5)
return self.output
def overlay_instances(
self,
*,
boxes=None,
labels=None,
masks=None,
keypoints=None,
assigned_colors=None,
alpha=0.5,
):
"""
Args:
boxes (Boxes, RotatedBoxes or ndarray): either a :class:`Boxes`,
or an Nx4 numpy array of XYXY_ABS format for the N objects in a single image,
or a :class:`RotatedBoxes`,
or an Nx5 numpy array of (x_center, y_center, width, height, angle_degrees) format
for the N objects in a single image,
labels (list[str]): the text to be displayed for each instance.
masks (masks-like object): Supported types are:
* :class:`detectron2.structures.PolygonMasks`,
:class:`detectron2.structures.BitMasks`.
* list[list[ndarray]]: contains the segmentation masks for all objects in one image.
The first level of the list corresponds to individual instances. The second
level to all the polygon that compose the instance, and the third level
to the polygon coordinates. The third level should have the format of
[x0, y0, x1, y1, ..., xn, yn] (n >= 3).
* list[ndarray]: each ndarray is a binary mask of shape (H, W).
* list[dict]: each dict is a COCO-style RLE.
keypoints (Keypoint or array like): an array-like object of shape (N, K, 3),
where the N is the number of instances and K is the number of keypoints.
The last dimension corresponds to (x, y, visibility or score).
assigned_colors (list[matplotlib.colors]): a list of colors, where each color
corresponds to each mask or box in the image. Refer to 'matplotlib.colors'
for full list of formats that the colors are accepted in.
Returns:
output (VisImage): image object with visualizations.
"""
num_instances = 0
if boxes is not None:
boxes = self._convert_boxes(boxes)
num_instances = len(boxes)
if masks is not None:
masks = self._convert_masks(masks)
if num_instances:
assert len(masks) == num_instances
else:
num_instances = len(masks)
if keypoints is not None:
if num_instances:
assert len(keypoints) == num_instances
else:
num_instances = len(keypoints)
keypoints = self._convert_keypoints(keypoints)
if labels is not None:
assert len(labels) == num_instances
if assigned_colors is None:
assigned_colors = [random_color(rgb=True, maximum=1) for _ in range(num_instances)]
if num_instances == 0:
return self.output
if boxes is not None and boxes.shape[1] == 5:
return self.overlay_rotated_instances(
boxes=boxes, labels=labels, assigned_colors=assigned_colors
)
# Display in largest to smallest order to reduce occlusion.
areas = None
if boxes is not None:
areas = np.prod(boxes[:, 2:] - boxes[:, :2], axis=1)
elif masks is not None:
areas = np.asarray([x.area() for x in masks])
if areas is not None:
sorted_idxs = np.argsort(-areas).tolist()
# Re-order overlapped instances in descending order.
boxes = boxes[sorted_idxs] if boxes is not None else None
labels = [labels[k] for k in sorted_idxs] if labels is not None else None
masks = [masks[idx] for idx in sorted_idxs] if masks is not None else None
assigned_colors = [assigned_colors[idx] for idx in sorted_idxs]
keypoints = keypoints[sorted_idxs] if keypoints is not None else None
for i in range(num_instances):
color = assigned_colors[i]
if boxes is not None:
self.draw_box(boxes[i], edge_color=color)
if masks is not None:
for segment in masks[i].polygons:
self.draw_polygon(segment.reshape(-1, 2), color, alpha=alpha)
if labels is not None:
# first get a box
if boxes is not None:
x0, y0, x1, y1 = boxes[i]
text_pos = (x0, y0) # if drawing boxes, put text on the box corner.
horiz_align = "left"
elif masks is not None:
# skip small mask without polygon
if len(masks[i].polygons) == 0:
continue
x0, y0, x1, y1 = masks[i].bbox()
# draw text in the center (defined by median) when box is not drawn
# median is less sensitive to outliers.
text_pos = np.median(masks[i].mask.nonzero(), axis=1)[::-1]
horiz_align = "center"
else:
continue # drawing the box confidence for keypoints isn't very useful.
# for small objects, draw text at the side to avoid occlusion
instance_area = (y1 - y0) * (x1 - x0)
if (
instance_area < _SMALL_OBJECT_AREA_THRESH * self.output.scale
or y1 - y0 < 40 * self.output.scale
):
if y1 >= self.output.height - 5:
text_pos = (x1, y0)
else:
text_pos = (x0, y1)
height_ratio = (y1 - y0) / np.sqrt(self.output.height * self.output.width)
lighter_color = self._change_color_brightness(color, brightness_factor=0.7)
font_size = (
np.clip((height_ratio - 0.02) / 0.08 + 1, 1.2, 2)
* 0.5
* self._default_font_size
)
self.draw_text(
labels[i],
text_pos,
color=lighter_color,
horizontal_alignment=horiz_align,
font_size=font_size,
)
# draw keypoints
if keypoints is not None:
for keypoints_per_instance in keypoints:
self.draw_and_connect_keypoints(keypoints_per_instance)
return self.output
def overlay_rotated_instances(self, boxes=None, labels=None, assigned_colors=None):
"""
Args:
boxes (ndarray): an Nx5 numpy array of
(x_center, y_center, width, height, angle_degrees) format
for the N objects in a single image.
labels (list[str]): the text to be displayed for each instance.
assigned_colors (list[matplotlib.colors]): a list of colors, where each color
corresponds to each mask or box in the image. Refer to 'matplotlib.colors'
for full list of formats that the colors are accepted in.
Returns:
output (VisImage): image object with visualizations.
"""
num_instances = len(boxes)
if assigned_colors is None:
assigned_colors = [random_color(rgb=True, maximum=1) for _ in range(num_instances)]
if num_instances == 0:
return self.output
# Display in largest to smallest order to reduce occlusion.
if boxes is not None:
areas = boxes[:, 2] * boxes[:, 3]
sorted_idxs = np.argsort(-areas).tolist()
# Re-order overlapped instances in descending order.
boxes = boxes[sorted_idxs]
labels = [labels[k] for k in sorted_idxs] if labels is not None else None
colors = [assigned_colors[idx] for idx in sorted_idxs]
for i in range(num_instances):
self.draw_rotated_box_with_label(
boxes[i], edge_color=colors[i], label=labels[i] if labels is not None else None
)
return self.output
def draw_and_connect_keypoints(self, keypoints):
"""
Draws keypoints of an instance and follows the rules for keypoint connections
to draw lines between appropriate keypoints. This follows color heuristics for
line color.
Args:
keypoints (Tensor): a tensor of shape (K, 3), where K is the number of keypoints
and the last dimension corresponds to (x, y, probability).
Returns:
output (VisImage): image object with visualizations.
"""
visible = {}
keypoint_names = self.metadata.get("keypoint_names")
for idx, keypoint in enumerate(keypoints):
# draw keypoint
x, y, prob = keypoint
if prob > self.keypoint_threshold:
self.draw_circle((x, y), color=_RED)
if keypoint_names:
keypoint_name = keypoint_names[idx]
visible[keypoint_name] = (x, y)
if self.metadata.get("keypoint_connection_rules"):
for kp0, kp1, color in self.metadata.keypoint_connection_rules:
if kp0 in visible and kp1 in visible:
x0, y0 = visible[kp0]
x1, y1 = visible[kp1]
color = tuple(x / 255.0 for x in color)
self.draw_line([x0, x1], [y0, y1], color=color)
# draw lines from nose to mid-shoulder and mid-shoulder to mid-hip
# Note that this strategy is specific to person keypoints.
# For other keypoints, it should just do nothing
try:
ls_x, ls_y = visible["left_shoulder"]
rs_x, rs_y = visible["right_shoulder"]
mid_shoulder_x, mid_shoulder_y = (ls_x + rs_x) / 2, (ls_y + rs_y) / 2
except KeyError:
pass
else:
# draw line from nose to mid-shoulder
nose_x, nose_y = visible.get("nose", (None, None))
if nose_x is not None:
self.draw_line([nose_x, mid_shoulder_x], [nose_y, mid_shoulder_y], color=_RED)
try:
# draw line from mid-shoulder to mid-hip
lh_x, lh_y = visible["left_hip"]
rh_x, rh_y = visible["right_hip"]
except KeyError:
pass
else:
mid_hip_x, mid_hip_y = (lh_x + rh_x) / 2, (lh_y + rh_y) / 2
self.draw_line([mid_hip_x, mid_shoulder_x], [mid_hip_y, mid_shoulder_y], color=_RED)
return self.output
"""
Primitive drawing functions:
"""
def draw_text(
self,
text,
position,
*,
font_size=None,
color="g",
horizontal_alignment="center",
rotation=0,
):
"""
Args:
text (str): class label
position (tuple): a tuple of the x and y coordinates to place text on image.
font_size (int, optional): font of the text. If not provided, a font size
proportional to the image width is calculated and used.
color: color of the text. Refer to `matplotlib.colors` for full list
of formats that are accepted.
horizontal_alignment (str): see `matplotlib.text.Text`
rotation: rotation angle in degrees CCW
Returns:
output (VisImage): image object with text drawn.
"""
if not font_size:
font_size = self._default_font_size
# since the text background is dark, we don't want the text to be dark
color = np.maximum(list(mplc.to_rgb(color)), 0.2)
color[np.argmax(color)] = max(0.8, np.max(color))
x, y = position
self.output.ax.text(
x,
y,
text,
size=font_size * self.output.scale,
family="sans-serif",
bbox={"facecolor": "black", "alpha": 0.8, "pad": 0.7, "edgecolor": "none"},
verticalalignment="top",
horizontalalignment=horizontal_alignment,
color=color,
zorder=10,
rotation=rotation,
)
return self.output
def draw_box(self, box_coord, alpha=0.5, edge_color="g", line_style="-"):
"""
Args:
box_coord (tuple): a tuple containing x0, y0, x1, y1 coordinates, where x0 and y0
are the coordinates of the image's top left corner. x1 and y1 are the
coordinates of the image's bottom right corner.
alpha (float): blending efficient. Smaller values lead to more transparent masks.
edge_color: color of the outline of the box. Refer to `matplotlib.colors`
for full list of formats that are accepted.
line_style (string): the string to use to create the outline of the boxes.
Returns:
output (VisImage): image object with box drawn.
"""
x0, y0, x1, y1 = box_coord
width = x1 - x0
height = y1 - y0
linewidth = max(self._default_font_size / 4, 1)
self.output.ax.add_patch(
mpl.patches.Rectangle(
(x0, y0),
width,
height,
fill=False,
edgecolor=edge_color,
linewidth=linewidth * self.output.scale,
alpha=alpha,
linestyle=line_style,
)
)
return self.output
def draw_rotated_box_with_label(
self, rotated_box, alpha=0.5, edge_color="g", line_style="-", label=None
):
"""
Draw a rotated box with label on its top-left corner.
Args:
rotated_box (tuple): a tuple containing (cnt_x, cnt_y, w, h, angle),
where cnt_x and cnt_y are the center coordinates of the box.
w and h are the width and height of the box. angle represents how
many degrees the box is rotated CCW with regard to the 0-degree box.
alpha (float): blending efficient. Smaller values lead to more transparent masks.
edge_color: color of the outline of the box. Refer to `matplotlib.colors`
for full list of formats that are accepted.
line_style (string): the string to use to create the outline of the boxes.
label (string): label for rotated box. It will not be rendered when set to None.
Returns:
output (VisImage): image object with box drawn.
"""
cnt_x, cnt_y, w, h, angle = rotated_box
area = w * h
# use thinner lines when the box is small
linewidth = self._default_font_size / (
6 if area < _SMALL_OBJECT_AREA_THRESH * self.output.scale else 3
)
theta = angle * math.pi / 180.0
c = math.cos(theta)
s = math.sin(theta)
rect = [(-w / 2, h / 2), (-w / 2, -h / 2), (w / 2, -h / 2), (w / 2, h / 2)]
# x: left->right ; y: top->down
rotated_rect = [(s * yy + c * xx + cnt_x, c * yy - s * xx + cnt_y) for (xx, yy) in rect]
for k in range(4):
j = (k + 1) % 4
self.draw_line(
[rotated_rect[k][0], rotated_rect[j][0]],
[rotated_rect[k][1], rotated_rect[j][1]],
color=edge_color,
linestyle="--" if k == 1 else line_style,
linewidth=linewidth,
)
if label is not None:
text_pos = rotated_rect[1] # topleft corner
height_ratio = h / np.sqrt(self.output.height * self.output.width)
label_color = self._change_color_brightness(edge_color, brightness_factor=0.7)
font_size = (
np.clip((height_ratio - 0.02) / 0.08 + 1, 1.2, 2) * 0.5 * self._default_font_size
)
self.draw_text(label, text_pos, color=label_color, font_size=font_size, rotation=angle)
return self.output
def draw_circle(self, circle_coord, color, radius=3):
"""
Args:
circle_coord (list(int) or tuple(int)): contains the x and y coordinates
of the center of the circle.
color: color of the polygon. Refer to `matplotlib.colors` for a full list of
formats that are accepted.
radius (int): radius of the circle.
Returns:
output (VisImage): image object with box drawn.
"""
x, y = circle_coord
self.output.ax.add_patch(
mpl.patches.Circle(circle_coord, radius=radius, fill=True, color=color)
)
return self.output
def draw_line(self, x_data, y_data, color, linestyle="-", linewidth=None):
"""
Args:
x_data (list[int]): a list containing x values of all the points being drawn.
Length of list should match the length of y_data.
y_data (list[int]): a list containing y values of all the points being drawn.
Length of list should match the length of x_data.
color: color of the line. Refer to `matplotlib.colors` for a full list of
formats that are accepted.
linestyle: style of the line. Refer to `matplotlib.lines.Line2D`
for a full list of formats that are accepted.
linewidth (float or None): width of the line. When it's None,
a default value will be computed and used.
Returns:
output (VisImage): image object with line drawn.
"""
if linewidth is None:
linewidth = self._default_font_size / 3
linewidth = max(linewidth, 1)
self.output.ax.add_line(
mpl.lines.Line2D(
x_data,
y_data,
linewidth=linewidth * self.output.scale,
color=color,
linestyle=linestyle,
)
)
return self.output
def draw_binary_mask(
self, binary_mask, color=None, *, edge_color=None, text=None, alpha=0.5, area_threshold=10
):
"""
Args:
binary_mask (ndarray): numpy array of shape (H, W), where H is the image height and
W is the image width. Each value in the array is either a 0 or 1 value of uint8
type.
color: color of the mask. Refer to `matplotlib.colors` for a full list of
formats that are accepted. If None, will pick a random color.
edge_color: color of the polygon edges. Refer to `matplotlib.colors` for a
full list of formats that are accepted.
text (str): if None, will be drawn on the object
alpha (float): blending efficient. Smaller values lead to more transparent masks.
area_threshold (float): a connected component smaller than this area will not be shown.
Returns:
output (VisImage): image object with mask drawn.
"""
if color is None:
color = random_color(rgb=True, maximum=1)
color = mplc.to_rgb(color)
has_valid_segment = False
binary_mask = binary_mask.astype("uint8") # opencv needs uint8
mask = GenericMask(binary_mask, self.output.height, self.output.width)
shape2d = (binary_mask.shape[0], binary_mask.shape[1])
if not mask.has_holes:
# draw polygons for regular masks
for segment in mask.polygons:
area = mask_util.area(mask_util.frPyObjects([segment], shape2d[0], shape2d[1]))
if area < (area_threshold or 0):
continue
has_valid_segment = True
segment = segment.reshape(-1, 2)
self.draw_polygon(segment, color=color, edge_color=edge_color, alpha=alpha)
else:
# TODO: Use Path/PathPatch to draw vector graphics:
# https://stackoverflow.com/questions/8919719/how-to-plot-a-complex-polygon
rgba = np.zeros(shape2d + (4,), dtype="float32")
rgba[:, :, :3] = color
rgba[:, :, 3] = (mask.mask == 1).astype("float32") * alpha
has_valid_segment = True
self.output.ax.imshow(rgba, extent=(0, self.output.width, self.output.height, 0))
if text is not None and has_valid_segment:
lighter_color = self._change_color_brightness(color, brightness_factor=0.7)
self._draw_text_in_mask(binary_mask, text, lighter_color)
return self.output
def draw_soft_mask(self, soft_mask, color=None, *, text=None, alpha=0.5):
"""
Args:
soft_mask (ndarray): float array of shape (H, W), each value in [0, 1].
color: color of the mask. Refer to `matplotlib.colors` for a full list of
formats that are accepted. If None, will pick a random color.
text (str): if None, will be drawn on the object
alpha (float): blending efficient. Smaller values lead to more transparent masks.
Returns:
output (VisImage): image object with mask drawn.
"""
if color is None:
color = random_color(rgb=True, maximum=1)
color = mplc.to_rgb(color)
shape2d = (soft_mask.shape[0], soft_mask.shape[1])
rgba = np.zeros(shape2d + (4,), dtype="float32")
rgba[:, :, :3] = color
rgba[:, :, 3] = soft_mask * alpha
self.output.ax.imshow(rgba, extent=(0, self.output.width, self.output.height, 0))
if text is not None:
lighter_color = self._change_color_brightness(color, brightness_factor=0.7)
binary_mask = (soft_mask > 0.5).astype("uint8")
self._draw_text_in_mask(binary_mask, text, lighter_color)
return self.output
def draw_polygon(self, segment, color, edge_color=None, alpha=0.5):
"""
Args:
segment: numpy array of shape Nx2, containing all the points in the polygon.
color: color of the polygon. Refer to `matplotlib.colors` for a full list of
formats that are accepted.
edge_color: color of the polygon edges. Refer to `matplotlib.colors` for a
full list of formats that are accepted. If not provided, a darker shade
of the polygon color will be used instead.
alpha (float): blending efficient. Smaller values lead to more transparent masks.
Returns:
output (VisImage): image object with polygon drawn.
"""
if edge_color is None:
# make edge color darker than the polygon color
if alpha > 0.8:
edge_color = self._change_color_brightness(color, brightness_factor=-0.7)
else:
edge_color = color
edge_color = mplc.to_rgb(edge_color) + (1,)
polygon = mpl.patches.Polygon(
segment,
fill=True,
facecolor=mplc.to_rgb(color) + (alpha,),
edgecolor=edge_color,
linewidth=max(self._default_font_size // 15 * self.output.scale, 1),
)
self.output.ax.add_patch(polygon)
return self.output
"""
Internal methods:
"""
def _jitter(self, color):
"""
Randomly modifies given color to produce a slightly different color than the color given.
Args:
color (tuple[double]): a tuple of 3 elements, containing the RGB values of the color
picked. The values in the list are in the [0.0, 1.0] range.
Returns:
jittered_color (tuple[double]): a tuple of 3 elements, containing the RGB values of the
color after being jittered. The values in the list are in the [0.0, 1.0] range.
"""
color = mplc.to_rgb(color)
vec = np.random.rand(3)
# better to do it in another color space
vec = vec / np.linalg.norm(vec) * 0.5
res = np.clip(vec + color, 0, 1)
return tuple(res)
def _create_grayscale_image(self, mask=None):
"""
Create a grayscale version of the original image.
The colors in masked area, if given, will be kept.
"""
img_bw = self.img.astype("f4").mean(axis=2)
img_bw = np.stack([img_bw] * 3, axis=2)
if mask is not None:
img_bw[mask] = self.img[mask]
return img_bw
def _change_color_brightness(self, color, brightness_factor):
"""
Depending on the brightness_factor, gives a lighter or darker color i.e. a color with
less or more saturation than the original color.
Args:
color: color of the polygon. Refer to `matplotlib.colors` for a full list of
formats that are accepted.
brightness_factor (float): a value in [-1.0, 1.0] range. A lightness factor of
0 will correspond to no change, a factor in [-1.0, 0) range will result in
a darker color and a factor in (0, 1.0] range will result in a lighter color.
Returns:
modified_color (tuple[double]): a tuple containing the RGB values of the
modified color. Each value in the tuple is in the [0.0, 1.0] range.
"""
assert brightness_factor >= -1.0 and brightness_factor <= 1.0
color = mplc.to_rgb(color)
polygon_color = colorsys.rgb_to_hls(*mplc.to_rgb(color))
modified_lightness = polygon_color[1] + (brightness_factor * polygon_color[1])
modified_lightness = 0.0 if modified_lightness < 0.0 else modified_lightness
modified_lightness = 1.0 if modified_lightness > 1.0 else modified_lightness
modified_color = colorsys.hls_to_rgb(polygon_color[0], modified_lightness, polygon_color[2])
return modified_color
def _convert_boxes(self, boxes):
"""
Convert different format of boxes to an NxB array, where B = 4 or 5 is the box dimension.
"""
if isinstance(boxes, Boxes) or isinstance(boxes, RotatedBoxes):
return boxes.tensor.detach().numpy()
else:
return np.asarray(boxes)
def _convert_masks(self, masks_or_polygons):
"""
Convert different format of masks or polygons to a tuple of masks and polygons.
Returns:
list[GenericMask]:
"""
m = masks_or_polygons
if isinstance(m, PolygonMasks):
m = m.polygons
if isinstance(m, BitMasks):
m = m.tensor.numpy()
if isinstance(m, torch.Tensor):
m = m.numpy()
ret = []
for x in m:
if isinstance(x, GenericMask):
ret.append(x)
else:
ret.append(GenericMask(x, self.output.height, self.output.width))
return ret
def _draw_text_in_mask(self, binary_mask, text, color):
"""
Find proper places to draw text given a binary mask.
"""
# TODO sometimes drawn on wrong objects. the heuristics here can improve.
_num_cc, cc_labels, stats, centroids = cv2.connectedComponentsWithStats(binary_mask, 8)
if stats[1:, -1].size == 0:
return
largest_component_id = np.argmax(stats[1:, -1]) + 1
# draw text on the largest component, as well as other very large components.
for cid in range(1, _num_cc):
if cid == largest_component_id or stats[cid, -1] > _LARGE_MASK_AREA_THRESH:
# median is more stable than centroid
# center = centroids[largest_component_id]
center = np.median((cc_labels == cid).nonzero(), axis=1)[::-1]
self.draw_text(text, center, color=color)
def _convert_keypoints(self, keypoints):
if isinstance(keypoints, Keypoints):
keypoints = keypoints.tensor
keypoints = np.asarray(keypoints)
return keypoints
def get_output(self):
"""
Returns:
output (VisImage): the image output containing the visualizations added
to the image.
"""
return self.output
|
evocodebench_data_189
|
# Copyright (c) Facebook, Inc. and its affiliates.
import colorsys
import logging
import math
from enum import Enum, unique
import cv2
import matplotlib as mpl
import matplotlib.colors as mplc
import matplotlib.figure as mplfigure
import numpy as np
import pycocotools.mask as mask_util
import torch
from detectron2.data import MetadataCatalog
from detectron2.structures import (
BitMasks,
Boxes,
BoxMode,
Keypoints,
PolygonMasks,
RotatedBoxes,
)
from detectron2.utils.file_io import PathManager
from matplotlib.backends.backend_agg import FigureCanvasAgg
from PIL import Image
from .colormap import random_color
logger = logging.getLogger(__name__)
__all__ = ["ColorMode", "VisImage", "Visualizer"]
_SMALL_OBJECT_AREA_THRESH = 1000
_LARGE_MASK_AREA_THRESH = 120000
_OFF_WHITE = (1.0, 1.0, 240.0 / 255)
_BLACK = (0, 0, 0)
_RED = (1.0, 0, 0)
_KEYPOINT_THRESHOLD = 0.05
@unique
class ColorMode(Enum):
"""
Enum of different color modes to use for instance visualizations.
"""
IMAGE = 0
"""
Picks a random color for every instance and overlay segmentations with low opacity.
"""
SEGMENTATION = 1
"""
Let instances of the same category have similar colors
(from metadata.thing_colors), and overlay them with
high opacity. This provides more attention on the quality of segmentation.
"""
IMAGE_BW = 2
"""
Same as IMAGE, but convert all areas without masks to gray-scale.
Only available for drawing per-instance mask predictions.
"""
class GenericMask:
"""
Attribute:
polygons (list[ndarray]): list[ndarray]: polygons for this mask.
Each ndarray has format [x, y, x, y, ...]
mask (ndarray): a binary mask
"""
def __init__(self, mask_or_polygons, height, width):
self._mask = self._polygons = self._has_holes = None
self.height = height
self.width = width
m = mask_or_polygons
if isinstance(m, dict):
# RLEs
assert "counts" in m and "size" in m
if isinstance(m["counts"], list): # uncompressed RLEs
h, w = m["size"]
assert h == height and w == width
m = mask_util.frPyObjects(m, h, w)
self._mask = mask_util.decode(m)[:, :]
return
if isinstance(m, list): # list[ndarray]
self._polygons = [np.asarray(x).reshape(-1) for x in m]
return
if isinstance(m, np.ndarray): # assumed to be a binary mask
assert m.shape[1] != 2, m.shape
assert m.shape == (
height,
width,
), f"mask shape: {m.shape}, target dims: {height}, {width}"
self._mask = m.astype("uint8")
return
raise ValueError("GenericMask cannot handle object {} of type '{}'".format(m, type(m)))
@property
def mask(self):
if self._mask is None:
self._mask = self.polygons_to_mask(self._polygons)
return self._mask
@property
def polygons(self):
if self._polygons is None:
self._polygons, self._has_holes = self.mask_to_polygons(self._mask)
return self._polygons
@property
def has_holes(self):
if self._has_holes is None:
if self._mask is not None:
self._polygons, self._has_holes = self.mask_to_polygons(self._mask)
else:
self._has_holes = False # if original format is polygon, does not have holes
return self._has_holes
def mask_to_polygons(self, mask):
# cv2.RETR_CCOMP flag retrieves all the contours and arranges them to a 2-level
# hierarchy. External contours (boundary) of the object are placed in hierarchy-1.
# Internal contours (holes) are placed in hierarchy-2.
# cv2.CHAIN_APPROX_NONE flag gets vertices of polygons from contours.
mask = np.ascontiguousarray(mask) # some versions of cv2 does not support incontiguous arr
res = cv2.findContours(mask.astype("uint8"), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)
hierarchy = res[-1]
if hierarchy is None: # empty mask
return [], False
has_holes = (hierarchy.reshape(-1, 4)[:, 3] >= 0).sum() > 0
res = res[-2]
res = [x.flatten() for x in res]
# These coordinates from OpenCV are integers in range [0, W-1 or H-1].
# We add 0.5 to turn them into real-value coordinate space. A better solution
# would be to first +0.5 and then dilate the returned polygon by 0.5.
res = [x + 0.5 for x in res if len(x) >= 6]
return res, has_holes
def polygons_to_mask(self, polygons):
rle = mask_util.frPyObjects(polygons, self.height, self.width)
rle = mask_util.merge(rle)
return mask_util.decode(rle)[:, :]
def area(self):
return self.mask.sum()
def bbox(self):
p = mask_util.frPyObjects(self.polygons, self.height, self.width)
p = mask_util.merge(p)
bbox = mask_util.toBbox(p)
bbox[2] += bbox[0]
bbox[3] += bbox[1]
return bbox
class _PanopticPrediction:
"""
Unify different panoptic annotation/prediction formats
"""
def __init__(self, panoptic_seg, segments_info, metadata=None):
if segments_info is None:
assert metadata is not None
# If "segments_info" is None, we assume "panoptic_img" is a
# H*W int32 image storing the panoptic_id in the format of
# category_id * label_divisor + instance_id. We reserve -1 for
# VOID label.
label_divisor = metadata.label_divisor
segments_info = []
for panoptic_label in np.unique(panoptic_seg.numpy()):
if panoptic_label == -1:
# VOID region.
continue
pred_class = panoptic_label // label_divisor
isthing = pred_class in metadata.thing_dataset_id_to_contiguous_id.values()
segments_info.append(
{
"id": int(panoptic_label),
"category_id": int(pred_class),
"isthing": bool(isthing),
}
)
del metadata
self._seg = panoptic_seg
self._sinfo = {s["id"]: s for s in segments_info} # seg id -> seg info
segment_ids, areas = torch.unique(panoptic_seg, sorted=True, return_counts=True)
areas = areas.numpy()
sorted_idxs = np.argsort(-areas)
self._seg_ids, self._seg_areas = segment_ids[sorted_idxs], areas[sorted_idxs]
self._seg_ids = self._seg_ids.tolist()
for sid, area in zip(self._seg_ids, self._seg_areas):
if sid in self._sinfo:
self._sinfo[sid]["area"] = float(area)
def non_empty_mask(self):
"""
Returns:
(H, W) array, a mask for all pixels that have a prediction
"""
empty_ids = []
for id in self._seg_ids:
if id not in self._sinfo:
empty_ids.append(id)
if len(empty_ids) == 0:
return np.zeros(self._seg.shape, dtype=np.uint8)
assert (
len(empty_ids) == 1
), ">1 ids corresponds to no labels. This is currently not supported"
return (self._seg != empty_ids[0]).numpy().astype(np.bool)
def semantic_masks(self):
for sid in self._seg_ids:
sinfo = self._sinfo.get(sid)
if sinfo is None or sinfo["isthing"]:
# Some pixels (e.g. id 0 in PanopticFPN) have no instance or semantic predictions.
continue
yield (self._seg == sid).numpy().astype(np.bool), sinfo
def instance_masks(self):
for sid in self._seg_ids:
sinfo = self._sinfo.get(sid)
if sinfo is None or not sinfo["isthing"]:
continue
mask = (self._seg == sid).numpy().astype(np.bool)
if mask.sum() > 0:
yield mask, sinfo
def _create_text_labels(classes, scores, class_names, is_crowd=None):
"""
Args:
classes (list[int] or None):
scores (list[float] or None):
class_names (list[str] or None):
is_crowd (list[bool] or None):
Returns:
list[str] or None
"""
labels = None
if classes is not None:
if class_names is not None and len(class_names) > 0:
labels = [class_names[i] for i in classes]
else:
labels = [str(i) for i in classes]
if scores is not None:
if labels is None:
labels = ["{:.0f}%".format(s * 100) for s in scores]
else:
labels = ["{} {:.0f}%".format(l, s * 100) for l, s in zip(labels, scores)]
if labels is not None and is_crowd is not None:
labels = [l + ("|crowd" if crowd else "") for l, crowd in zip(labels, is_crowd)]
return labels
class VisImage:
def __init__(self, img, scale=1.0):
"""
Args:
img (ndarray): an RGB image of shape (H, W, 3) in range [0, 255].
scale (float): scale the input image
"""
self.img = img
self.scale = scale
self.width, self.height = img.shape[1], img.shape[0]
self._setup_figure(img)
def _setup_figure(self, img):
"""
Args:
Same as in :meth:`__init__()`.
Returns:
fig (matplotlib.pyplot.figure): top level container for all the image plot elements.
ax (matplotlib.pyplot.Axes): contains figure elements and sets the coordinate system.
"""
fig = mplfigure.Figure(frameon=False)
self.dpi = fig.get_dpi()
# add a small 1e-2 to avoid precision lost due to matplotlib's truncation
# (https://github.com/matplotlib/matplotlib/issues/15363)
fig.set_size_inches(
(self.width * self.scale + 1e-2) / self.dpi,
(self.height * self.scale + 1e-2) / self.dpi,
)
self.canvas = FigureCanvasAgg(fig)
# self.canvas = mpl.backends.backend_cairo.FigureCanvasCairo(fig)
ax = fig.add_axes([0.0, 0.0, 1.0, 1.0])
ax.axis("off")
self.fig = fig
self.ax = ax
self.reset_image(img)
def reset_image(self, img):
"""
Args:
img: same as in __init__
"""
img = img.astype("uint8")
self.ax.imshow(img, extent=(0, self.width, self.height, 0), interpolation="nearest")
def save(self, filepath):
"""
Args:
filepath (str): a string that contains the absolute path, including the file name, where
the visualized image will be saved.
"""
self.fig.savefig(filepath)
def get_image(self):
"""
Returns:
ndarray:
the visualized image of shape (H, W, 3) (RGB) in uint8 type.
The shape is scaled w.r.t the input image using the given `scale` argument.
"""
canvas = self.canvas
s, (width, height) = canvas.print_to_buffer()
# buf = io.BytesIO() # works for cairo backend
# canvas.print_rgba(buf)
# width, height = self.width, self.height
# s = buf.getvalue()
buffer = np.frombuffer(s, dtype="uint8")
img_rgba = buffer.reshape(height, width, 4)
rgb, alpha = np.split(img_rgba, [3], axis=2)
return rgb.astype("uint8")
class Visualizer:
"""
Visualizer that draws data about detection/segmentation on images.
It contains methods like `draw_{text,box,circle,line,binary_mask,polygon}`
that draw primitive objects to images, as well as high-level wrappers like
`draw_{instance_predictions,sem_seg,panoptic_seg_predictions,dataset_dict}`
that draw composite data in some pre-defined style.
Note that the exact visualization style for the high-level wrappers are subject to change.
Style such as color, opacity, label contents, visibility of labels, or even the visibility
of objects themselves (e.g. when the object is too small) may change according
to different heuristics, as long as the results still look visually reasonable.
To obtain a consistent style, you can implement custom drawing functions with the
abovementioned primitive methods instead. If you need more customized visualization
styles, you can process the data yourself following their format documented in
tutorials (:doc:`/tutorials/models`, :doc:`/tutorials/datasets`). This class does not
intend to satisfy everyone's preference on drawing styles.
This visualizer focuses on high rendering quality rather than performance. It is not
designed to be used for real-time applications.
"""
# TODO implement a fast, rasterized version using OpenCV
def __init__(self, img_rgb, metadata=None, scale=1.0, instance_mode=ColorMode.IMAGE):
"""
Args:
img_rgb: a numpy array of shape (H, W, C), where H and W correspond to
the height and width of the image respectively. C is the number of
color channels. The image is required to be in RGB format since that
is a requirement of the Matplotlib library. The image is also expected
to be in the range [0, 255].
metadata (Metadata): dataset metadata (e.g. class names and colors)
instance_mode (ColorMode): defines one of the pre-defined style for drawing
instances on an image.
"""
self.img = np.asarray(img_rgb).clip(0, 255).astype(np.uint8)
if metadata is None:
metadata = MetadataCatalog.get("__nonexist__")
self.metadata = metadata
self.output = VisImage(self.img, scale=scale)
self.cpu_device = torch.device("cpu")
# too small texts are useless, therefore clamp to 9
self._default_font_size = max(
np.sqrt(self.output.height * self.output.width) // 90, 10 // scale
)
self._instance_mode = instance_mode
self.keypoint_threshold = _KEYPOINT_THRESHOLD
def draw_instance_predictions(self, predictions):
"""
Draw instance-level prediction results on an image.
Args:
predictions (Instances): the output of an instance detection/segmentation
model. Following fields will be used to draw:
"pred_boxes", "pred_classes", "scores", "pred_masks" (or "pred_masks_rle").
Returns:
output (VisImage): image object with visualizations.
"""
boxes = predictions.pred_boxes if predictions.has("pred_boxes") else None
scores = predictions.scores if predictions.has("scores") else None
classes = predictions.pred_classes.tolist() if predictions.has("pred_classes") else None
labels = _create_text_labels(classes, scores, self.metadata.get("thing_classes", None))
keypoints = predictions.pred_keypoints if predictions.has("pred_keypoints") else None
if predictions.has("pred_masks"):
masks = np.asarray(predictions.pred_masks)
masks = [GenericMask(x, self.output.height, self.output.width) for x in masks]
else:
masks = None
if self._instance_mode == ColorMode.SEGMENTATION and self.metadata.get("thing_colors"):
colors = [
self._jitter([x / 255 for x in self.metadata.thing_colors[c]]) for c in classes
]
alpha = 0.8
else:
colors = None
alpha = 0.5
if self._instance_mode == ColorMode.IMAGE_BW:
self.output.reset_image(
self._create_grayscale_image(
(predictions.pred_masks.any(dim=0) > 0).numpy()
if predictions.has("pred_masks")
else None
)
)
alpha = 0.3
self.overlay_instances(
masks=masks,
boxes=boxes,
labels=labels,
keypoints=keypoints,
assigned_colors=colors,
alpha=alpha,
)
return self.output
def draw_sem_seg(self, sem_seg, area_threshold=None, alpha=0.8):
"""
Draw semantic segmentation predictions/labels.
Args:
sem_seg (Tensor or ndarray): the segmentation of shape (H, W).
Each value is the integer label of the pixel.
area_threshold (int): segments with less than `area_threshold` are not drawn.
alpha (float): the larger it is, the more opaque the segmentations are.
Returns:
output (VisImage): image object with visualizations.
"""
if isinstance(sem_seg, torch.Tensor):
sem_seg = sem_seg.numpy()
labels, areas = np.unique(sem_seg, return_counts=True)
sorted_idxs = np.argsort(-areas).tolist()
labels = labels[sorted_idxs]
for label in filter(lambda l: l < len(self.metadata.stuff_classes), labels):
try:
mask_color = [x / 255 for x in self.metadata.stuff_colors[label]]
except (AttributeError, IndexError):
mask_color = None
binary_mask = (sem_seg == label).astype(np.uint8)
text = self.metadata.stuff_classes[label]
self.draw_binary_mask(
binary_mask,
color=mask_color,
edge_color=_OFF_WHITE,
text=text,
alpha=alpha,
area_threshold=area_threshold,
)
return self.output
def draw_panoptic_seg(self, panoptic_seg, segments_info, area_threshold=None, alpha=0.7):
"""
Draw panoptic prediction annotations or results.
Args:
panoptic_seg (Tensor): of shape (height, width) where the values are ids for each
segment.
segments_info (list[dict] or None): Describe each segment in `panoptic_seg`.
If it is a ``list[dict]``, each dict contains keys "id", "category_id".
If None, category id of each pixel is computed by
``pixel // metadata.label_divisor``.
area_threshold (int): stuff segments with less than `area_threshold` are not drawn.
Returns:
output (VisImage): image object with visualizations.
"""
pred = _PanopticPrediction(panoptic_seg, segments_info, self.metadata)
if self._instance_mode == ColorMode.IMAGE_BW:
self.output.reset_image(self._create_grayscale_image(pred.non_empty_mask()))
# draw mask for all semantic segments first i.e. "stuff"
for mask, sinfo in pred.semantic_masks():
category_idx = sinfo["category_id"]
try:
mask_color = [x / 255 for x in self.metadata.stuff_colors[category_idx]]
except AttributeError:
mask_color = None
text = self.metadata.stuff_classes[category_idx]
self.draw_binary_mask(
mask,
color=mask_color,
edge_color=_OFF_WHITE,
text=text,
alpha=alpha,
area_threshold=area_threshold,
)
# draw mask for all instances second
all_instances = list(pred.instance_masks())
if len(all_instances) == 0:
return self.output
masks, sinfo = list(zip(*all_instances))
category_ids = [x["category_id"] for x in sinfo]
try:
scores = [x["score"] for x in sinfo]
except KeyError:
scores = None
labels = _create_text_labels(
category_ids, scores, self.metadata.thing_classes, [x.get("iscrowd", 0) for x in sinfo]
)
try:
colors = [
self._jitter([x / 255 for x in self.metadata.thing_colors[c]]) for c in category_ids
]
except AttributeError:
colors = None
self.overlay_instances(masks=masks, labels=labels, assigned_colors=colors, alpha=alpha)
return self.output
draw_panoptic_seg_predictions = draw_panoptic_seg # backward compatibility
def draw_dataset_dict(self, dic):
"""
Draw annotations/segmentaions in Detectron2 Dataset format.
Args:
dic (dict): annotation/segmentation data of one image, in Detectron2 Dataset format.
Returns:
output (VisImage): image object with visualizations.
"""
annos = dic.get("annotations", None)
if annos:
if "segmentation" in annos[0]:
masks = [x["segmentation"] for x in annos]
else:
masks = None
if "keypoints" in annos[0]:
keypts = [x["keypoints"] for x in annos]
keypts = np.array(keypts).reshape(len(annos), -1, 3)
else:
keypts = None
boxes = [
BoxMode.convert(x["bbox"], x["bbox_mode"], BoxMode.XYXY_ABS)
if len(x["bbox"]) == 4
else x["bbox"]
for x in annos
]
colors = None
category_ids = [x["category_id"] for x in annos]
if self._instance_mode == ColorMode.SEGMENTATION and self.metadata.get("thing_colors"):
colors = [
self._jitter([x / 255 for x in self.metadata.thing_colors[c]])
for c in category_ids
]
names = self.metadata.get("thing_classes", None)
labels = _create_text_labels(
category_ids,
scores=None,
class_names=names,
is_crowd=[x.get("iscrowd", 0) for x in annos],
)
self.overlay_instances(
labels=labels, boxes=boxes, masks=masks, keypoints=keypts, assigned_colors=colors
)
sem_seg = dic.get("sem_seg", None)
if sem_seg is None and "sem_seg_file_name" in dic:
with PathManager.open(dic["sem_seg_file_name"], "rb") as f:
sem_seg = Image.open(f)
sem_seg = np.asarray(sem_seg, dtype="uint8")
if sem_seg is not None:
self.draw_sem_seg(sem_seg, area_threshold=0, alpha=0.5)
pan_seg = dic.get("pan_seg", None)
if pan_seg is None and "pan_seg_file_name" in dic:
with PathManager.open(dic["pan_seg_file_name"], "rb") as f:
pan_seg = Image.open(f)
pan_seg = np.asarray(pan_seg)
from panopticapi.utils import rgb2id
pan_seg = rgb2id(pan_seg)
if pan_seg is not None:
segments_info = dic["segments_info"]
pan_seg = torch.tensor(pan_seg)
self.draw_panoptic_seg(pan_seg, segments_info, area_threshold=0, alpha=0.5)
return self.output
def overlay_instances(
self,
*,
boxes=None,
labels=None,
masks=None,
keypoints=None,
assigned_colors=None,
alpha=0.5,
):
"""
Args:
boxes (Boxes, RotatedBoxes or ndarray): either a :class:`Boxes`,
or an Nx4 numpy array of XYXY_ABS format for the N objects in a single image,
or a :class:`RotatedBoxes`,
or an Nx5 numpy array of (x_center, y_center, width, height, angle_degrees) format
for the N objects in a single image,
labels (list[str]): the text to be displayed for each instance.
masks (masks-like object): Supported types are:
* :class:`detectron2.structures.PolygonMasks`,
:class:`detectron2.structures.BitMasks`.
* list[list[ndarray]]: contains the segmentation masks for all objects in one image.
The first level of the list corresponds to individual instances. The second
level to all the polygon that compose the instance, and the third level
to the polygon coordinates. The third level should have the format of
[x0, y0, x1, y1, ..., xn, yn] (n >= 3).
* list[ndarray]: each ndarray is a binary mask of shape (H, W).
* list[dict]: each dict is a COCO-style RLE.
keypoints (Keypoint or array like): an array-like object of shape (N, K, 3),
where the N is the number of instances and K is the number of keypoints.
The last dimension corresponds to (x, y, visibility or score).
assigned_colors (list[matplotlib.colors]): a list of colors, where each color
corresponds to each mask or box in the image. Refer to 'matplotlib.colors'
for full list of formats that the colors are accepted in.
Returns:
output (VisImage): image object with visualizations.
"""
num_instances = 0
if boxes is not None:
boxes = self._convert_boxes(boxes)
num_instances = len(boxes)
if masks is not None:
masks = self._convert_masks(masks)
if num_instances:
assert len(masks) == num_instances
else:
num_instances = len(masks)
if keypoints is not None:
if num_instances:
assert len(keypoints) == num_instances
else:
num_instances = len(keypoints)
keypoints = self._convert_keypoints(keypoints)
if labels is not None:
assert len(labels) == num_instances
if assigned_colors is None:
assigned_colors = [random_color(rgb=True, maximum=1) for _ in range(num_instances)]
if num_instances == 0:
return self.output
if boxes is not None and boxes.shape[1] == 5:
return self.overlay_rotated_instances(
boxes=boxes, labels=labels, assigned_colors=assigned_colors
)
# Display in largest to smallest order to reduce occlusion.
areas = None
if boxes is not None:
areas = np.prod(boxes[:, 2:] - boxes[:, :2], axis=1)
elif masks is not None:
areas = np.asarray([x.area() for x in masks])
if areas is not None:
sorted_idxs = np.argsort(-areas).tolist()
# Re-order overlapped instances in descending order.
boxes = boxes[sorted_idxs] if boxes is not None else None
labels = [labels[k] for k in sorted_idxs] if labels is not None else None
masks = [masks[idx] for idx in sorted_idxs] if masks is not None else None
assigned_colors = [assigned_colors[idx] for idx in sorted_idxs]
keypoints = keypoints[sorted_idxs] if keypoints is not None else None
for i in range(num_instances):
color = assigned_colors[i]
if boxes is not None:
self.draw_box(boxes[i], edge_color=color)
if masks is not None:
for segment in masks[i].polygons:
self.draw_polygon(segment.reshape(-1, 2), color, alpha=alpha)
if labels is not None:
# first get a box
if boxes is not None:
x0, y0, x1, y1 = boxes[i]
text_pos = (x0, y0) # if drawing boxes, put text on the box corner.
horiz_align = "left"
elif masks is not None:
# skip small mask without polygon
if len(masks[i].polygons) == 0:
continue
x0, y0, x1, y1 = masks[i].bbox()
# draw text in the center (defined by median) when box is not drawn
# median is less sensitive to outliers.
text_pos = np.median(masks[i].mask.nonzero(), axis=1)[::-1]
horiz_align = "center"
else:
continue # drawing the box confidence for keypoints isn't very useful.
# for small objects, draw text at the side to avoid occlusion
instance_area = (y1 - y0) * (x1 - x0)
if (
instance_area < _SMALL_OBJECT_AREA_THRESH * self.output.scale
or y1 - y0 < 40 * self.output.scale
):
if y1 >= self.output.height - 5:
text_pos = (x1, y0)
else:
text_pos = (x0, y1)
height_ratio = (y1 - y0) / np.sqrt(self.output.height * self.output.width)
lighter_color = self._change_color_brightness(color, brightness_factor=0.7)
font_size = (
np.clip((height_ratio - 0.02) / 0.08 + 1, 1.2, 2)
* 0.5
* self._default_font_size
)
self.draw_text(
labels[i],
text_pos,
color=lighter_color,
horizontal_alignment=horiz_align,
font_size=font_size,
)
# draw keypoints
if keypoints is not None:
for keypoints_per_instance in keypoints:
self.draw_and_connect_keypoints(keypoints_per_instance)
return self.output
def overlay_rotated_instances(self, boxes=None, labels=None, assigned_colors=None):
"""
Args:
boxes (ndarray): an Nx5 numpy array of
(x_center, y_center, width, height, angle_degrees) format
for the N objects in a single image.
labels (list[str]): the text to be displayed for each instance.
assigned_colors (list[matplotlib.colors]): a list of colors, where each color
corresponds to each mask or box in the image. Refer to 'matplotlib.colors'
for full list of formats that the colors are accepted in.
Returns:
output (VisImage): image object with visualizations.
"""
num_instances = len(boxes)
if assigned_colors is None:
assigned_colors = [random_color(rgb=True, maximum=1) for _ in range(num_instances)]
if num_instances == 0:
return self.output
# Display in largest to smallest order to reduce occlusion.
if boxes is not None:
areas = boxes[:, 2] * boxes[:, 3]
sorted_idxs = np.argsort(-areas).tolist()
# Re-order overlapped instances in descending order.
boxes = boxes[sorted_idxs]
labels = [labels[k] for k in sorted_idxs] if labels is not None else None
colors = [assigned_colors[idx] for idx in sorted_idxs]
for i in range(num_instances):
self.draw_rotated_box_with_label(
boxes[i], edge_color=colors[i], label=labels[i] if labels is not None else None
)
return self.output
def draw_and_connect_keypoints(self, keypoints):
"""
Draws keypoints of an instance and follows the rules for keypoint connections
to draw lines between appropriate keypoints. This follows color heuristics for
line color.
Args:
keypoints (Tensor): a tensor of shape (K, 3), where K is the number of keypoints
and the last dimension corresponds to (x, y, probability).
Returns:
output (VisImage): image object with visualizations.
"""
visible = {}
keypoint_names = self.metadata.get("keypoint_names")
for idx, keypoint in enumerate(keypoints):
# draw keypoint
x, y, prob = keypoint
if prob > self.keypoint_threshold:
self.draw_circle((x, y), color=_RED)
if keypoint_names:
keypoint_name = keypoint_names[idx]
visible[keypoint_name] = (x, y)
if self.metadata.get("keypoint_connection_rules"):
for kp0, kp1, color in self.metadata.keypoint_connection_rules:
if kp0 in visible and kp1 in visible:
x0, y0 = visible[kp0]
x1, y1 = visible[kp1]
color = tuple(x / 255.0 for x in color)
self.draw_line([x0, x1], [y0, y1], color=color)
# draw lines from nose to mid-shoulder and mid-shoulder to mid-hip
# Note that this strategy is specific to person keypoints.
# For other keypoints, it should just do nothing
try:
ls_x, ls_y = visible["left_shoulder"]
rs_x, rs_y = visible["right_shoulder"]
mid_shoulder_x, mid_shoulder_y = (ls_x + rs_x) / 2, (ls_y + rs_y) / 2
except KeyError:
pass
else:
# draw line from nose to mid-shoulder
nose_x, nose_y = visible.get("nose", (None, None))
if nose_x is not None:
self.draw_line([nose_x, mid_shoulder_x], [nose_y, mid_shoulder_y], color=_RED)
try:
# draw line from mid-shoulder to mid-hip
lh_x, lh_y = visible["left_hip"]
rh_x, rh_y = visible["right_hip"]
except KeyError:
pass
else:
mid_hip_x, mid_hip_y = (lh_x + rh_x) / 2, (lh_y + rh_y) / 2
self.draw_line([mid_hip_x, mid_shoulder_x], [mid_hip_y, mid_shoulder_y], color=_RED)
return self.output
"""
Primitive drawing functions:
"""
def draw_text(
self,
text,
position,
*,
font_size=None,
color="g",
horizontal_alignment="center",
rotation=0,
):
"""
Args:
text (str): class label
position (tuple): a tuple of the x and y coordinates to place text on image.
font_size (int, optional): font of the text. If not provided, a font size
proportional to the image width is calculated and used.
color: color of the text. Refer to `matplotlib.colors` for full list
of formats that are accepted.
horizontal_alignment (str): see `matplotlib.text.Text`
rotation: rotation angle in degrees CCW
Returns:
output (VisImage): image object with text drawn.
"""
if not font_size:
font_size = self._default_font_size
# since the text background is dark, we don't want the text to be dark
color = np.maximum(list(mplc.to_rgb(color)), 0.2)
color[np.argmax(color)] = max(0.8, np.max(color))
x, y = position
self.output.ax.text(
x,
y,
text,
size=font_size * self.output.scale,
family="sans-serif",
bbox={"facecolor": "black", "alpha": 0.8, "pad": 0.7, "edgecolor": "none"},
verticalalignment="top",
horizontalalignment=horizontal_alignment,
color=color,
zorder=10,
rotation=rotation,
)
return self.output
def draw_box(self, box_coord, alpha=0.5, edge_color="g", line_style="-"):
"""
Args:
box_coord (tuple): a tuple containing x0, y0, x1, y1 coordinates, where x0 and y0
are the coordinates of the image's top left corner. x1 and y1 are the
coordinates of the image's bottom right corner.
alpha (float): blending efficient. Smaller values lead to more transparent masks.
edge_color: color of the outline of the box. Refer to `matplotlib.colors`
for full list of formats that are accepted.
line_style (string): the string to use to create the outline of the boxes.
Returns:
output (VisImage): image object with box drawn.
"""
x0, y0, x1, y1 = box_coord
width = x1 - x0
height = y1 - y0
linewidth = max(self._default_font_size / 4, 1)
self.output.ax.add_patch(
mpl.patches.Rectangle(
(x0, y0),
width,
height,
fill=False,
edgecolor=edge_color,
linewidth=linewidth * self.output.scale,
alpha=alpha,
linestyle=line_style,
)
)
return self.output
def draw_rotated_box_with_label(
self, rotated_box, alpha=0.5, edge_color="g", line_style="-", label=None
):
"""
Draw a rotated box with label on its top-left corner.
Args:
rotated_box (tuple): a tuple containing (cnt_x, cnt_y, w, h, angle),
where cnt_x and cnt_y are the center coordinates of the box.
w and h are the width and height of the box. angle represents how
many degrees the box is rotated CCW with regard to the 0-degree box.
alpha (float): blending efficient. Smaller values lead to more transparent masks.
edge_color: color of the outline of the box. Refer to `matplotlib.colors`
for full list of formats that are accepted.
line_style (string): the string to use to create the outline of the boxes.
label (string): label for rotated box. It will not be rendered when set to None.
Returns:
output (VisImage): image object with box drawn.
"""
cnt_x, cnt_y, w, h, angle = rotated_box
area = w * h
# use thinner lines when the box is small
linewidth = self._default_font_size / (
6 if area < _SMALL_OBJECT_AREA_THRESH * self.output.scale else 3
)
theta = angle * math.pi / 180.0
c = math.cos(theta)
s = math.sin(theta)
rect = [(-w / 2, h / 2), (-w / 2, -h / 2), (w / 2, -h / 2), (w / 2, h / 2)]
# x: left->right ; y: top->down
rotated_rect = [(s * yy + c * xx + cnt_x, c * yy - s * xx + cnt_y) for (xx, yy) in rect]
for k in range(4):
j = (k + 1) % 4
self.draw_line(
[rotated_rect[k][0], rotated_rect[j][0]],
[rotated_rect[k][1], rotated_rect[j][1]],
color=edge_color,
linestyle="--" if k == 1 else line_style,
linewidth=linewidth,
)
if label is not None:
text_pos = rotated_rect[1] # topleft corner
height_ratio = h / np.sqrt(self.output.height * self.output.width)
label_color = self._change_color_brightness(edge_color, brightness_factor=0.7)
font_size = (
np.clip((height_ratio - 0.02) / 0.08 + 1, 1.2, 2) * 0.5 * self._default_font_size
)
self.draw_text(label, text_pos, color=label_color, font_size=font_size, rotation=angle)
return self.output
def draw_circle(self, circle_coord, color, radius=3):
"""
Args:
circle_coord (list(int) or tuple(int)): contains the x and y coordinates
of the center of the circle.
color: color of the polygon. Refer to `matplotlib.colors` for a full list of
formats that are accepted.
radius (int): radius of the circle.
Returns:
output (VisImage): image object with box drawn.
"""
x, y = circle_coord
self.output.ax.add_patch(
mpl.patches.Circle(circle_coord, radius=radius, fill=True, color=color)
)
return self.output
def draw_line(self, x_data, y_data, color, linestyle="-", linewidth=None):
"""
Args:
x_data (list[int]): a list containing x values of all the points being drawn.
Length of list should match the length of y_data.
y_data (list[int]): a list containing y values of all the points being drawn.
Length of list should match the length of x_data.
color: color of the line. Refer to `matplotlib.colors` for a full list of
formats that are accepted.
linestyle: style of the line. Refer to `matplotlib.lines.Line2D`
for a full list of formats that are accepted.
linewidth (float or None): width of the line. When it's None,
a default value will be computed and used.
Returns:
output (VisImage): image object with line drawn.
"""
if linewidth is None:
linewidth = self._default_font_size / 3
linewidth = max(linewidth, 1)
self.output.ax.add_line(
mpl.lines.Line2D(
x_data,
y_data,
linewidth=linewidth * self.output.scale,
color=color,
linestyle=linestyle,
)
)
return self.output
def draw_binary_mask(
self, binary_mask, color=None, *, edge_color=None, text=None, alpha=0.5, area_threshold=10
):
"""
Args:
binary_mask (ndarray): numpy array of shape (H, W), where H is the image height and
W is the image width. Each value in the array is either a 0 or 1 value of uint8
type.
color: color of the mask. Refer to `matplotlib.colors` for a full list of
formats that are accepted. If None, will pick a random color.
edge_color: color of the polygon edges. Refer to `matplotlib.colors` for a
full list of formats that are accepted.
text (str): if None, will be drawn on the object
alpha (float): blending efficient. Smaller values lead to more transparent masks.
area_threshold (float): a connected component smaller than this area will not be shown.
Returns:
output (VisImage): image object with mask drawn.
"""
if color is None:
color = random_color(rgb=True, maximum=1)
color = mplc.to_rgb(color)
has_valid_segment = False
binary_mask = binary_mask.astype("uint8") # opencv needs uint8
mask = GenericMask(binary_mask, self.output.height, self.output.width)
shape2d = (binary_mask.shape[0], binary_mask.shape[1])
if not mask.has_holes:
# draw polygons for regular masks
for segment in mask.polygons:
area = mask_util.area(mask_util.frPyObjects([segment], shape2d[0], shape2d[1]))
if area < (area_threshold or 0):
continue
has_valid_segment = True
segment = segment.reshape(-1, 2)
self.draw_polygon(segment, color=color, edge_color=edge_color, alpha=alpha)
else:
# TODO: Use Path/PathPatch to draw vector graphics:
# https://stackoverflow.com/questions/8919719/how-to-plot-a-complex-polygon
rgba = np.zeros(shape2d + (4,), dtype="float32")
rgba[:, :, :3] = color
rgba[:, :, 3] = (mask.mask == 1).astype("float32") * alpha
has_valid_segment = True
self.output.ax.imshow(rgba, extent=(0, self.output.width, self.output.height, 0))
if text is not None and has_valid_segment:
lighter_color = self._change_color_brightness(color, brightness_factor=0.7)
self._draw_text_in_mask(binary_mask, text, lighter_color)
return self.output
def draw_soft_mask(self, soft_mask, color=None, *, text=None, alpha=0.5):
"""
Args:
soft_mask (ndarray): float array of shape (H, W), each value in [0, 1].
color: color of the mask. Refer to `matplotlib.colors` for a full list of
formats that are accepted. If None, will pick a random color.
text (str): if None, will be drawn on the object
alpha (float): blending efficient. Smaller values lead to more transparent masks.
Returns:
output (VisImage): image object with mask drawn.
"""
if color is None:
color = random_color(rgb=True, maximum=1)
color = mplc.to_rgb(color)
shape2d = (soft_mask.shape[0], soft_mask.shape[1])
rgba = np.zeros(shape2d + (4,), dtype="float32")
rgba[:, :, :3] = color
rgba[:, :, 3] = soft_mask * alpha
self.output.ax.imshow(rgba, extent=(0, self.output.width, self.output.height, 0))
if text is not None:
lighter_color = self._change_color_brightness(color, brightness_factor=0.7)
binary_mask = (soft_mask > 0.5).astype("uint8")
self._draw_text_in_mask(binary_mask, text, lighter_color)
return self.output
def draw_polygon(self, segment, color, edge_color=None, alpha=0.5):
"""
Args:
segment: numpy array of shape Nx2, containing all the points in the polygon.
color: color of the polygon. Refer to `matplotlib.colors` for a full list of
formats that are accepted.
edge_color: color of the polygon edges. Refer to `matplotlib.colors` for a
full list of formats that are accepted. If not provided, a darker shade
of the polygon color will be used instead.
alpha (float): blending efficient. Smaller values lead to more transparent masks.
Returns:
output (VisImage): image object with polygon drawn.
"""
if edge_color is None:
# make edge color darker than the polygon color
if alpha > 0.8:
edge_color = self._change_color_brightness(color, brightness_factor=-0.7)
else:
edge_color = color
edge_color = mplc.to_rgb(edge_color) + (1,)
polygon = mpl.patches.Polygon(
segment,
fill=True,
facecolor=mplc.to_rgb(color) + (alpha,),
edgecolor=edge_color,
linewidth=max(self._default_font_size // 15 * self.output.scale, 1),
)
self.output.ax.add_patch(polygon)
return self.output
"""
Internal methods:
"""
def _jitter(self, color):
"""
Randomly modifies given color to produce a slightly different color than the color given.
Args:
color (tuple[double]): a tuple of 3 elements, containing the RGB values of the color
picked. The values in the list are in the [0.0, 1.0] range.
Returns:
jittered_color (tuple[double]): a tuple of 3 elements, containing the RGB values of the
color after being jittered. The values in the list are in the [0.0, 1.0] range.
"""
color = mplc.to_rgb(color)
vec = np.random.rand(3)
# better to do it in another color space
vec = vec / np.linalg.norm(vec) * 0.5
res = np.clip(vec + color, 0, 1)
return tuple(res)
def _create_grayscale_image(self, mask=None):
"""
Create a grayscale version of the original image.
The colors in masked area, if given, will be kept.
"""
img_bw = self.img.astype("f4").mean(axis=2)
img_bw = np.stack([img_bw] * 3, axis=2)
if mask is not None:
img_bw[mask] = self.img[mask]
return img_bw
def _change_color_brightness(self, color, brightness_factor):
"""
Depending on the brightness_factor, gives a lighter or darker color i.e. a color with
less or more saturation than the original color.
Args:
color: color of the polygon. Refer to `matplotlib.colors` for a full list of
formats that are accepted.
brightness_factor (float): a value in [-1.0, 1.0] range. A lightness factor of
0 will correspond to no change, a factor in [-1.0, 0) range will result in
a darker color and a factor in (0, 1.0] range will result in a lighter color.
Returns:
modified_color (tuple[double]): a tuple containing the RGB values of the
modified color. Each value in the tuple is in the [0.0, 1.0] range.
"""
assert brightness_factor >= -1.0 and brightness_factor <= 1.0
color = mplc.to_rgb(color)
polygon_color = colorsys.rgb_to_hls(*mplc.to_rgb(color))
modified_lightness = polygon_color[1] + (brightness_factor * polygon_color[1])
modified_lightness = 0.0 if modified_lightness < 0.0 else modified_lightness
modified_lightness = 1.0 if modified_lightness > 1.0 else modified_lightness
modified_color = colorsys.hls_to_rgb(polygon_color[0], modified_lightness, polygon_color[2])
return modified_color
def _convert_boxes(self, boxes):
"""
Convert different format of boxes to an NxB array, where B = 4 or 5 is the box dimension.
"""
if isinstance(boxes, Boxes) or isinstance(boxes, RotatedBoxes):
return boxes.tensor.detach().numpy()
else:
return np.asarray(boxes)
def _convert_masks(self, masks_or_polygons):
"""
Convert different format of masks or polygons to a tuple of masks and polygons.
Returns:
list[GenericMask]:
"""
m = masks_or_polygons
if isinstance(m, PolygonMasks):
m = m.polygons
if isinstance(m, BitMasks):
m = m.tensor.numpy()
if isinstance(m, torch.Tensor):
m = m.numpy()
ret = []
for x in m:
if isinstance(x, GenericMask):
ret.append(x)
else:
ret.append(GenericMask(x, self.output.height, self.output.width))
return ret
def _draw_text_in_mask(self, binary_mask, text, color):
"""
Find proper places to draw text given a binary mask.
"""
# TODO sometimes drawn on wrong objects. the heuristics here can improve.
_num_cc, cc_labels, stats, centroids = cv2.connectedComponentsWithStats(binary_mask, 8)
if stats[1:, -1].size == 0:
return
largest_component_id = np.argmax(stats[1:, -1]) + 1
# draw text on the largest component, as well as other very large components.
for cid in range(1, _num_cc):
if cid == largest_component_id or stats[cid, -1] > _LARGE_MASK_AREA_THRESH:
# median is more stable than centroid
# center = centroids[largest_component_id]
center = np.median((cc_labels == cid).nonzero(), axis=1)[::-1]
self.draw_text(text, center, color=color)
def _convert_keypoints(self, keypoints):
if isinstance(keypoints, Keypoints):
keypoints = keypoints.tensor
keypoints = np.asarray(keypoints)
return keypoints
def get_output(self):
"""
Returns:
output (VisImage): the image output containing the visualizations added
to the image.
"""
return self.output
|
evocodebench_data_190
|
# Copyright (c) Facebook, Inc. and its affiliates.
import colorsys
import logging
import math
from enum import Enum, unique
import cv2
import matplotlib as mpl
import matplotlib.colors as mplc
import matplotlib.figure as mplfigure
import numpy as np
import pycocotools.mask as mask_util
import torch
from detectron2.data import MetadataCatalog
from detectron2.structures import (
BitMasks,
Boxes,
BoxMode,
Keypoints,
PolygonMasks,
RotatedBoxes,
)
from detectron2.utils.file_io import PathManager
from matplotlib.backends.backend_agg import FigureCanvasAgg
from PIL import Image
from .colormap import random_color
logger = logging.getLogger(__name__)
__all__ = ["ColorMode", "VisImage", "Visualizer"]
_SMALL_OBJECT_AREA_THRESH = 1000
_LARGE_MASK_AREA_THRESH = 120000
_OFF_WHITE = (1.0, 1.0, 240.0 / 255)
_BLACK = (0, 0, 0)
_RED = (1.0, 0, 0)
_KEYPOINT_THRESHOLD = 0.05
@unique
class ColorMode(Enum):
"""
Enum of different color modes to use for instance visualizations.
"""
IMAGE = 0
"""
Picks a random color for every instance and overlay segmentations with low opacity.
"""
SEGMENTATION = 1
"""
Let instances of the same category have similar colors
(from metadata.thing_colors), and overlay them with
high opacity. This provides more attention on the quality of segmentation.
"""
IMAGE_BW = 2
"""
Same as IMAGE, but convert all areas without masks to gray-scale.
Only available for drawing per-instance mask predictions.
"""
class GenericMask:
"""
Attribute:
polygons (list[ndarray]): list[ndarray]: polygons for this mask.
Each ndarray has format [x, y, x, y, ...]
mask (ndarray): a binary mask
"""
def __init__(self, mask_or_polygons, height, width):
self._mask = self._polygons = self._has_holes = None
self.height = height
self.width = width
m = mask_or_polygons
if isinstance(m, dict):
# RLEs
assert "counts" in m and "size" in m
if isinstance(m["counts"], list): # uncompressed RLEs
h, w = m["size"]
assert h == height and w == width
m = mask_util.frPyObjects(m, h, w)
self._mask = mask_util.decode(m)[:, :]
return
if isinstance(m, list): # list[ndarray]
self._polygons = [np.asarray(x).reshape(-1) for x in m]
return
if isinstance(m, np.ndarray): # assumed to be a binary mask
assert m.shape[1] != 2, m.shape
assert m.shape == (
height,
width,
), f"mask shape: {m.shape}, target dims: {height}, {width}"
self._mask = m.astype("uint8")
return
raise ValueError("GenericMask cannot handle object {} of type '{}'".format(m, type(m)))
@property
def mask(self):
if self._mask is None:
self._mask = self.polygons_to_mask(self._polygons)
return self._mask
@property
def polygons(self):
if self._polygons is None:
self._polygons, self._has_holes = self.mask_to_polygons(self._mask)
return self._polygons
@property
def has_holes(self):
if self._has_holes is None:
if self._mask is not None:
self._polygons, self._has_holes = self.mask_to_polygons(self._mask)
else:
self._has_holes = False # if original format is polygon, does not have holes
return self._has_holes
def mask_to_polygons(self, mask):
# cv2.RETR_CCOMP flag retrieves all the contours and arranges them to a 2-level
# hierarchy. External contours (boundary) of the object are placed in hierarchy-1.
# Internal contours (holes) are placed in hierarchy-2.
# cv2.CHAIN_APPROX_NONE flag gets vertices of polygons from contours.
mask = np.ascontiguousarray(mask) # some versions of cv2 does not support incontiguous arr
res = cv2.findContours(mask.astype("uint8"), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)
hierarchy = res[-1]
if hierarchy is None: # empty mask
return [], False
has_holes = (hierarchy.reshape(-1, 4)[:, 3] >= 0).sum() > 0
res = res[-2]
res = [x.flatten() for x in res]
# These coordinates from OpenCV are integers in range [0, W-1 or H-1].
# We add 0.5 to turn them into real-value coordinate space. A better solution
# would be to first +0.5 and then dilate the returned polygon by 0.5.
res = [x + 0.5 for x in res if len(x) >= 6]
return res, has_holes
def polygons_to_mask(self, polygons):
rle = mask_util.frPyObjects(polygons, self.height, self.width)
rle = mask_util.merge(rle)
return mask_util.decode(rle)[:, :]
def area(self):
return self.mask.sum()
def bbox(self):
p = mask_util.frPyObjects(self.polygons, self.height, self.width)
p = mask_util.merge(p)
bbox = mask_util.toBbox(p)
bbox[2] += bbox[0]
bbox[3] += bbox[1]
return bbox
class _PanopticPrediction:
"""
Unify different panoptic annotation/prediction formats
"""
def __init__(self, panoptic_seg, segments_info, metadata=None):
if segments_info is None:
assert metadata is not None
# If "segments_info" is None, we assume "panoptic_img" is a
# H*W int32 image storing the panoptic_id in the format of
# category_id * label_divisor + instance_id. We reserve -1 for
# VOID label.
label_divisor = metadata.label_divisor
segments_info = []
for panoptic_label in np.unique(panoptic_seg.numpy()):
if panoptic_label == -1:
# VOID region.
continue
pred_class = panoptic_label // label_divisor
isthing = pred_class in metadata.thing_dataset_id_to_contiguous_id.values()
segments_info.append(
{
"id": int(panoptic_label),
"category_id": int(pred_class),
"isthing": bool(isthing),
}
)
del metadata
self._seg = panoptic_seg
self._sinfo = {s["id"]: s for s in segments_info} # seg id -> seg info
segment_ids, areas = torch.unique(panoptic_seg, sorted=True, return_counts=True)
areas = areas.numpy()
sorted_idxs = np.argsort(-areas)
self._seg_ids, self._seg_areas = segment_ids[sorted_idxs], areas[sorted_idxs]
self._seg_ids = self._seg_ids.tolist()
for sid, area in zip(self._seg_ids, self._seg_areas):
if sid in self._sinfo:
self._sinfo[sid]["area"] = float(area)
def non_empty_mask(self):
"""
Returns:
(H, W) array, a mask for all pixels that have a prediction
"""
empty_ids = []
for id in self._seg_ids:
if id not in self._sinfo:
empty_ids.append(id)
if len(empty_ids) == 0:
return np.zeros(self._seg.shape, dtype=np.uint8)
assert (
len(empty_ids) == 1
), ">1 ids corresponds to no labels. This is currently not supported"
return (self._seg != empty_ids[0]).numpy().astype(np.bool)
def semantic_masks(self):
for sid in self._seg_ids:
sinfo = self._sinfo.get(sid)
if sinfo is None or sinfo["isthing"]:
# Some pixels (e.g. id 0 in PanopticFPN) have no instance or semantic predictions.
continue
yield (self._seg == sid).numpy().astype(np.bool), sinfo
def instance_masks(self):
for sid in self._seg_ids:
sinfo = self._sinfo.get(sid)
if sinfo is None or not sinfo["isthing"]:
continue
mask = (self._seg == sid).numpy().astype(np.bool)
if mask.sum() > 0:
yield mask, sinfo
def _create_text_labels(classes, scores, class_names, is_crowd=None):
"""
Args:
classes (list[int] or None):
scores (list[float] or None):
class_names (list[str] or None):
is_crowd (list[bool] or None):
Returns:
list[str] or None
"""
labels = None
if classes is not None:
if class_names is not None and len(class_names) > 0:
labels = [class_names[i] for i in classes]
else:
labels = [str(i) for i in classes]
if scores is not None:
if labels is None:
labels = ["{:.0f}%".format(s * 100) for s in scores]
else:
labels = ["{} {:.0f}%".format(l, s * 100) for l, s in zip(labels, scores)]
if labels is not None and is_crowd is not None:
labels = [l + ("|crowd" if crowd else "") for l, crowd in zip(labels, is_crowd)]
return labels
class VisImage:
def __init__(self, img, scale=1.0):
"""
Args:
img (ndarray): an RGB image of shape (H, W, 3) in range [0, 255].
scale (float): scale the input image
"""
self.img = img
self.scale = scale
self.width, self.height = img.shape[1], img.shape[0]
self._setup_figure(img)
def _setup_figure(self, img):
"""
Args:
Same as in :meth:`__init__()`.
Returns:
fig (matplotlib.pyplot.figure): top level container for all the image plot elements.
ax (matplotlib.pyplot.Axes): contains figure elements and sets the coordinate system.
"""
fig = mplfigure.Figure(frameon=False)
self.dpi = fig.get_dpi()
# add a small 1e-2 to avoid precision lost due to matplotlib's truncation
# (https://github.com/matplotlib/matplotlib/issues/15363)
fig.set_size_inches(
(self.width * self.scale + 1e-2) / self.dpi,
(self.height * self.scale + 1e-2) / self.dpi,
)
self.canvas = FigureCanvasAgg(fig)
# self.canvas = mpl.backends.backend_cairo.FigureCanvasCairo(fig)
ax = fig.add_axes([0.0, 0.0, 1.0, 1.0])
ax.axis("off")
self.fig = fig
self.ax = ax
self.reset_image(img)
def reset_image(self, img):
"""
Args:
img: same as in __init__
"""
img = img.astype("uint8")
self.ax.imshow(img, extent=(0, self.width, self.height, 0), interpolation="nearest")
def save(self, filepath):
"""
Args:
filepath (str): a string that contains the absolute path, including the file name, where
the visualized image will be saved.
"""
self.fig.savefig(filepath)
def get_image(self):
"""
Returns:
ndarray:
the visualized image of shape (H, W, 3) (RGB) in uint8 type.
The shape is scaled w.r.t the input image using the given `scale` argument.
"""
canvas = self.canvas
s, (width, height) = canvas.print_to_buffer()
# buf = io.BytesIO() # works for cairo backend
# canvas.print_rgba(buf)
# width, height = self.width, self.height
# s = buf.getvalue()
buffer = np.frombuffer(s, dtype="uint8")
img_rgba = buffer.reshape(height, width, 4)
rgb, alpha = np.split(img_rgba, [3], axis=2)
return rgb.astype("uint8")
class Visualizer:
"""
Visualizer that draws data about detection/segmentation on images.
It contains methods like `draw_{text,box,circle,line,binary_mask,polygon}`
that draw primitive objects to images, as well as high-level wrappers like
`draw_{instance_predictions,sem_seg,panoptic_seg_predictions,dataset_dict}`
that draw composite data in some pre-defined style.
Note that the exact visualization style for the high-level wrappers are subject to change.
Style such as color, opacity, label contents, visibility of labels, or even the visibility
of objects themselves (e.g. when the object is too small) may change according
to different heuristics, as long as the results still look visually reasonable.
To obtain a consistent style, you can implement custom drawing functions with the
abovementioned primitive methods instead. If you need more customized visualization
styles, you can process the data yourself following their format documented in
tutorials (:doc:`/tutorials/models`, :doc:`/tutorials/datasets`). This class does not
intend to satisfy everyone's preference on drawing styles.
This visualizer focuses on high rendering quality rather than performance. It is not
designed to be used for real-time applications.
"""
# TODO implement a fast, rasterized version using OpenCV
def __init__(self, img_rgb, metadata=None, scale=1.0, instance_mode=ColorMode.IMAGE):
"""
Args:
img_rgb: a numpy array of shape (H, W, C), where H and W correspond to
the height and width of the image respectively. C is the number of
color channels. The image is required to be in RGB format since that
is a requirement of the Matplotlib library. The image is also expected
to be in the range [0, 255].
metadata (Metadata): dataset metadata (e.g. class names and colors)
instance_mode (ColorMode): defines one of the pre-defined style for drawing
instances on an image.
"""
self.img = np.asarray(img_rgb).clip(0, 255).astype(np.uint8)
if metadata is None:
metadata = MetadataCatalog.get("__nonexist__")
self.metadata = metadata
self.output = VisImage(self.img, scale=scale)
self.cpu_device = torch.device("cpu")
# too small texts are useless, therefore clamp to 9
self._default_font_size = max(
np.sqrt(self.output.height * self.output.width) // 90, 10 // scale
)
self._instance_mode = instance_mode
self.keypoint_threshold = _KEYPOINT_THRESHOLD
def draw_instance_predictions(self, predictions):
"""
Draw instance-level prediction results on an image.
Args:
predictions (Instances): the output of an instance detection/segmentation
model. Following fields will be used to draw:
"pred_boxes", "pred_classes", "scores", "pred_masks" (or "pred_masks_rle").
Returns:
output (VisImage): image object with visualizations.
"""
boxes = predictions.pred_boxes if predictions.has("pred_boxes") else None
scores = predictions.scores if predictions.has("scores") else None
classes = predictions.pred_classes.tolist() if predictions.has("pred_classes") else None
labels = _create_text_labels(classes, scores, self.metadata.get("thing_classes", None))
keypoints = predictions.pred_keypoints if predictions.has("pred_keypoints") else None
if predictions.has("pred_masks"):
masks = np.asarray(predictions.pred_masks)
masks = [GenericMask(x, self.output.height, self.output.width) for x in masks]
else:
masks = None
if self._instance_mode == ColorMode.SEGMENTATION and self.metadata.get("thing_colors"):
colors = [
self._jitter([x / 255 for x in self.metadata.thing_colors[c]]) for c in classes
]
alpha = 0.8
else:
colors = None
alpha = 0.5
if self._instance_mode == ColorMode.IMAGE_BW:
self.output.reset_image(
self._create_grayscale_image(
(predictions.pred_masks.any(dim=0) > 0).numpy()
if predictions.has("pred_masks")
else None
)
)
alpha = 0.3
self.overlay_instances(
masks=masks,
boxes=boxes,
labels=labels,
keypoints=keypoints,
assigned_colors=colors,
alpha=alpha,
)
return self.output
def draw_sem_seg(self, sem_seg, area_threshold=None, alpha=0.8):
"""
Draw semantic segmentation predictions/labels.
Args:
sem_seg (Tensor or ndarray): the segmentation of shape (H, W).
Each value is the integer label of the pixel.
area_threshold (int): segments with less than `area_threshold` are not drawn.
alpha (float): the larger it is, the more opaque the segmentations are.
Returns:
output (VisImage): image object with visualizations.
"""
if isinstance(sem_seg, torch.Tensor):
sem_seg = sem_seg.numpy()
labels, areas = np.unique(sem_seg, return_counts=True)
sorted_idxs = np.argsort(-areas).tolist()
labels = labels[sorted_idxs]
for label in filter(lambda l: l < len(self.metadata.stuff_classes), labels):
try:
mask_color = [x / 255 for x in self.metadata.stuff_colors[label]]
except (AttributeError, IndexError):
mask_color = None
binary_mask = (sem_seg == label).astype(np.uint8)
text = self.metadata.stuff_classes[label]
self.draw_binary_mask(
binary_mask,
color=mask_color,
edge_color=_OFF_WHITE,
text=text,
alpha=alpha,
area_threshold=area_threshold,
)
return self.output
def draw_panoptic_seg(self, panoptic_seg, segments_info, area_threshold=None, alpha=0.7):
"""
Draw panoptic prediction annotations or results.
Args:
panoptic_seg (Tensor): of shape (height, width) where the values are ids for each
segment.
segments_info (list[dict] or None): Describe each segment in `panoptic_seg`.
If it is a ``list[dict]``, each dict contains keys "id", "category_id".
If None, category id of each pixel is computed by
``pixel // metadata.label_divisor``.
area_threshold (int): stuff segments with less than `area_threshold` are not drawn.
Returns:
output (VisImage): image object with visualizations.
"""
pred = _PanopticPrediction(panoptic_seg, segments_info, self.metadata)
if self._instance_mode == ColorMode.IMAGE_BW:
self.output.reset_image(self._create_grayscale_image(pred.non_empty_mask()))
# draw mask for all semantic segments first i.e. "stuff"
for mask, sinfo in pred.semantic_masks():
category_idx = sinfo["category_id"]
try:
mask_color = [x / 255 for x in self.metadata.stuff_colors[category_idx]]
except AttributeError:
mask_color = None
text = self.metadata.stuff_classes[category_idx]
self.draw_binary_mask(
mask,
color=mask_color,
edge_color=_OFF_WHITE,
text=text,
alpha=alpha,
area_threshold=area_threshold,
)
# draw mask for all instances second
all_instances = list(pred.instance_masks())
if len(all_instances) == 0:
return self.output
masks, sinfo = list(zip(*all_instances))
category_ids = [x["category_id"] for x in sinfo]
try:
scores = [x["score"] for x in sinfo]
except KeyError:
scores = None
labels = _create_text_labels(
category_ids, scores, self.metadata.thing_classes, [x.get("iscrowd", 0) for x in sinfo]
)
try:
colors = [
self._jitter([x / 255 for x in self.metadata.thing_colors[c]]) for c in category_ids
]
except AttributeError:
colors = None
self.overlay_instances(masks=masks, labels=labels, assigned_colors=colors, alpha=alpha)
return self.output
draw_panoptic_seg_predictions = draw_panoptic_seg # backward compatibility
def draw_dataset_dict(self, dic):
"""
Draw annotations/segmentaions in Detectron2 Dataset format.
Args:
dic (dict): annotation/segmentation data of one image, in Detectron2 Dataset format.
Returns:
output (VisImage): image object with visualizations.
"""
annos = dic.get("annotations", None)
if annos:
if "segmentation" in annos[0]:
masks = [x["segmentation"] for x in annos]
else:
masks = None
if "keypoints" in annos[0]:
keypts = [x["keypoints"] for x in annos]
keypts = np.array(keypts).reshape(len(annos), -1, 3)
else:
keypts = None
boxes = [
BoxMode.convert(x["bbox"], x["bbox_mode"], BoxMode.XYXY_ABS)
if len(x["bbox"]) == 4
else x["bbox"]
for x in annos
]
colors = None
category_ids = [x["category_id"] for x in annos]
if self._instance_mode == ColorMode.SEGMENTATION and self.metadata.get("thing_colors"):
colors = [
self._jitter([x / 255 for x in self.metadata.thing_colors[c]])
for c in category_ids
]
names = self.metadata.get("thing_classes", None)
labels = _create_text_labels(
category_ids,
scores=None,
class_names=names,
is_crowd=[x.get("iscrowd", 0) for x in annos],
)
self.overlay_instances(
labels=labels, boxes=boxes, masks=masks, keypoints=keypts, assigned_colors=colors
)
sem_seg = dic.get("sem_seg", None)
if sem_seg is None and "sem_seg_file_name" in dic:
with PathManager.open(dic["sem_seg_file_name"], "rb") as f:
sem_seg = Image.open(f)
sem_seg = np.asarray(sem_seg, dtype="uint8")
if sem_seg is not None:
self.draw_sem_seg(sem_seg, area_threshold=0, alpha=0.5)
pan_seg = dic.get("pan_seg", None)
if pan_seg is None and "pan_seg_file_name" in dic:
with PathManager.open(dic["pan_seg_file_name"], "rb") as f:
pan_seg = Image.open(f)
pan_seg = np.asarray(pan_seg)
from panopticapi.utils import rgb2id
pan_seg = rgb2id(pan_seg)
if pan_seg is not None:
segments_info = dic["segments_info"]
pan_seg = torch.tensor(pan_seg)
self.draw_panoptic_seg(pan_seg, segments_info, area_threshold=0, alpha=0.5)
return self.output
def overlay_instances(
self,
*,
boxes=None,
labels=None,
masks=None,
keypoints=None,
assigned_colors=None,
alpha=0.5,
):
"""
Args:
boxes (Boxes, RotatedBoxes or ndarray): either a :class:`Boxes`,
or an Nx4 numpy array of XYXY_ABS format for the N objects in a single image,
or a :class:`RotatedBoxes`,
or an Nx5 numpy array of (x_center, y_center, width, height, angle_degrees) format
for the N objects in a single image,
labels (list[str]): the text to be displayed for each instance.
masks (masks-like object): Supported types are:
* :class:`detectron2.structures.PolygonMasks`,
:class:`detectron2.structures.BitMasks`.
* list[list[ndarray]]: contains the segmentation masks for all objects in one image.
The first level of the list corresponds to individual instances. The second
level to all the polygon that compose the instance, and the third level
to the polygon coordinates. The third level should have the format of
[x0, y0, x1, y1, ..., xn, yn] (n >= 3).
* list[ndarray]: each ndarray is a binary mask of shape (H, W).
* list[dict]: each dict is a COCO-style RLE.
keypoints (Keypoint or array like): an array-like object of shape (N, K, 3),
where the N is the number of instances and K is the number of keypoints.
The last dimension corresponds to (x, y, visibility or score).
assigned_colors (list[matplotlib.colors]): a list of colors, where each color
corresponds to each mask or box in the image. Refer to 'matplotlib.colors'
for full list of formats that the colors are accepted in.
Returns:
output (VisImage): image object with visualizations.
"""
num_instances = 0
if boxes is not None:
boxes = self._convert_boxes(boxes)
num_instances = len(boxes)
if masks is not None:
masks = self._convert_masks(masks)
if num_instances:
assert len(masks) == num_instances
else:
num_instances = len(masks)
if keypoints is not None:
if num_instances:
assert len(keypoints) == num_instances
else:
num_instances = len(keypoints)
keypoints = self._convert_keypoints(keypoints)
if labels is not None:
assert len(labels) == num_instances
if assigned_colors is None:
assigned_colors = [random_color(rgb=True, maximum=1) for _ in range(num_instances)]
if num_instances == 0:
return self.output
if boxes is not None and boxes.shape[1] == 5:
return self.overlay_rotated_instances(
boxes=boxes, labels=labels, assigned_colors=assigned_colors
)
# Display in largest to smallest order to reduce occlusion.
areas = None
if boxes is not None:
areas = np.prod(boxes[:, 2:] - boxes[:, :2], axis=1)
elif masks is not None:
areas = np.asarray([x.area() for x in masks])
if areas is not None:
sorted_idxs = np.argsort(-areas).tolist()
# Re-order overlapped instances in descending order.
boxes = boxes[sorted_idxs] if boxes is not None else None
labels = [labels[k] for k in sorted_idxs] if labels is not None else None
masks = [masks[idx] for idx in sorted_idxs] if masks is not None else None
assigned_colors = [assigned_colors[idx] for idx in sorted_idxs]
keypoints = keypoints[sorted_idxs] if keypoints is not None else None
for i in range(num_instances):
color = assigned_colors[i]
if boxes is not None:
self.draw_box(boxes[i], edge_color=color)
if masks is not None:
for segment in masks[i].polygons:
self.draw_polygon(segment.reshape(-1, 2), color, alpha=alpha)
if labels is not None:
# first get a box
if boxes is not None:
x0, y0, x1, y1 = boxes[i]
text_pos = (x0, y0) # if drawing boxes, put text on the box corner.
horiz_align = "left"
elif masks is not None:
# skip small mask without polygon
if len(masks[i].polygons) == 0:
continue
x0, y0, x1, y1 = masks[i].bbox()
# draw text in the center (defined by median) when box is not drawn
# median is less sensitive to outliers.
text_pos = np.median(masks[i].mask.nonzero(), axis=1)[::-1]
horiz_align = "center"
else:
continue # drawing the box confidence for keypoints isn't very useful.
# for small objects, draw text at the side to avoid occlusion
instance_area = (y1 - y0) * (x1 - x0)
if (
instance_area < _SMALL_OBJECT_AREA_THRESH * self.output.scale
or y1 - y0 < 40 * self.output.scale
):
if y1 >= self.output.height - 5:
text_pos = (x1, y0)
else:
text_pos = (x0, y1)
height_ratio = (y1 - y0) / np.sqrt(self.output.height * self.output.width)
lighter_color = self._change_color_brightness(color, brightness_factor=0.7)
font_size = (
np.clip((height_ratio - 0.02) / 0.08 + 1, 1.2, 2)
* 0.5
* self._default_font_size
)
self.draw_text(
labels[i],
text_pos,
color=lighter_color,
horizontal_alignment=horiz_align,
font_size=font_size,
)
# draw keypoints
if keypoints is not None:
for keypoints_per_instance in keypoints:
self.draw_and_connect_keypoints(keypoints_per_instance)
return self.output
def overlay_rotated_instances(self, boxes=None, labels=None, assigned_colors=None):
"""
Args:
boxes (ndarray): an Nx5 numpy array of
(x_center, y_center, width, height, angle_degrees) format
for the N objects in a single image.
labels (list[str]): the text to be displayed for each instance.
assigned_colors (list[matplotlib.colors]): a list of colors, where each color
corresponds to each mask or box in the image. Refer to 'matplotlib.colors'
for full list of formats that the colors are accepted in.
Returns:
output (VisImage): image object with visualizations.
"""
num_instances = len(boxes)
if assigned_colors is None:
assigned_colors = [random_color(rgb=True, maximum=1) for _ in range(num_instances)]
if num_instances == 0:
return self.output
# Display in largest to smallest order to reduce occlusion.
if boxes is not None:
areas = boxes[:, 2] * boxes[:, 3]
sorted_idxs = np.argsort(-areas).tolist()
# Re-order overlapped instances in descending order.
boxes = boxes[sorted_idxs]
labels = [labels[k] for k in sorted_idxs] if labels is not None else None
colors = [assigned_colors[idx] for idx in sorted_idxs]
for i in range(num_instances):
self.draw_rotated_box_with_label(
boxes[i], edge_color=colors[i], label=labels[i] if labels is not None else None
)
return self.output
def draw_and_connect_keypoints(self, keypoints):
"""
Draws keypoints of an instance and follows the rules for keypoint connections
to draw lines between appropriate keypoints. This follows color heuristics for
line color.
Args:
keypoints (Tensor): a tensor of shape (K, 3), where K is the number of keypoints
and the last dimension corresponds to (x, y, probability).
Returns:
output (VisImage): image object with visualizations.
"""
visible = {}
keypoint_names = self.metadata.get("keypoint_names")
for idx, keypoint in enumerate(keypoints):
# draw keypoint
x, y, prob = keypoint
if prob > self.keypoint_threshold:
self.draw_circle((x, y), color=_RED)
if keypoint_names:
keypoint_name = keypoint_names[idx]
visible[keypoint_name] = (x, y)
if self.metadata.get("keypoint_connection_rules"):
for kp0, kp1, color in self.metadata.keypoint_connection_rules:
if kp0 in visible and kp1 in visible:
x0, y0 = visible[kp0]
x1, y1 = visible[kp1]
color = tuple(x / 255.0 for x in color)
self.draw_line([x0, x1], [y0, y1], color=color)
# draw lines from nose to mid-shoulder and mid-shoulder to mid-hip
# Note that this strategy is specific to person keypoints.
# For other keypoints, it should just do nothing
try:
ls_x, ls_y = visible["left_shoulder"]
rs_x, rs_y = visible["right_shoulder"]
mid_shoulder_x, mid_shoulder_y = (ls_x + rs_x) / 2, (ls_y + rs_y) / 2
except KeyError:
pass
else:
# draw line from nose to mid-shoulder
nose_x, nose_y = visible.get("nose", (None, None))
if nose_x is not None:
self.draw_line([nose_x, mid_shoulder_x], [nose_y, mid_shoulder_y], color=_RED)
try:
# draw line from mid-shoulder to mid-hip
lh_x, lh_y = visible["left_hip"]
rh_x, rh_y = visible["right_hip"]
except KeyError:
pass
else:
mid_hip_x, mid_hip_y = (lh_x + rh_x) / 2, (lh_y + rh_y) / 2
self.draw_line([mid_hip_x, mid_shoulder_x], [mid_hip_y, mid_shoulder_y], color=_RED)
return self.output
"""
Primitive drawing functions:
"""
def draw_text(
self,
text,
position,
*,
font_size=None,
color="g",
horizontal_alignment="center",
rotation=0,
):
"""
Args:
text (str): class label
position (tuple): a tuple of the x and y coordinates to place text on image.
font_size (int, optional): font of the text. If not provided, a font size
proportional to the image width is calculated and used.
color: color of the text. Refer to `matplotlib.colors` for full list
of formats that are accepted.
horizontal_alignment (str): see `matplotlib.text.Text`
rotation: rotation angle in degrees CCW
Returns:
output (VisImage): image object with text drawn.
"""
if not font_size:
font_size = self._default_font_size
# since the text background is dark, we don't want the text to be dark
color = np.maximum(list(mplc.to_rgb(color)), 0.2)
color[np.argmax(color)] = max(0.8, np.max(color))
x, y = position
self.output.ax.text(
x,
y,
text,
size=font_size * self.output.scale,
family="sans-serif",
bbox={"facecolor": "black", "alpha": 0.8, "pad": 0.7, "edgecolor": "none"},
verticalalignment="top",
horizontalalignment=horizontal_alignment,
color=color,
zorder=10,
rotation=rotation,
)
return self.output
def draw_box(self, box_coord, alpha=0.5, edge_color="g", line_style="-"):
"""
Args:
box_coord (tuple): a tuple containing x0, y0, x1, y1 coordinates, where x0 and y0
are the coordinates of the image's top left corner. x1 and y1 are the
coordinates of the image's bottom right corner.
alpha (float): blending efficient. Smaller values lead to more transparent masks.
edge_color: color of the outline of the box. Refer to `matplotlib.colors`
for full list of formats that are accepted.
line_style (string): the string to use to create the outline of the boxes.
Returns:
output (VisImage): image object with box drawn.
"""
x0, y0, x1, y1 = box_coord
width = x1 - x0
height = y1 - y0
linewidth = max(self._default_font_size / 4, 1)
self.output.ax.add_patch(
mpl.patches.Rectangle(
(x0, y0),
width,
height,
fill=False,
edgecolor=edge_color,
linewidth=linewidth * self.output.scale,
alpha=alpha,
linestyle=line_style,
)
)
return self.output
def draw_rotated_box_with_label(
self, rotated_box, alpha=0.5, edge_color="g", line_style="-", label=None
):
"""
Draw a rotated box with label on its top-left corner.
Args:
rotated_box (tuple): a tuple containing (cnt_x, cnt_y, w, h, angle),
where cnt_x and cnt_y are the center coordinates of the box.
w and h are the width and height of the box. angle represents how
many degrees the box is rotated CCW with regard to the 0-degree box.
alpha (float): blending efficient. Smaller values lead to more transparent masks.
edge_color: color of the outline of the box. Refer to `matplotlib.colors`
for full list of formats that are accepted.
line_style (string): the string to use to create the outline of the boxes.
label (string): label for rotated box. It will not be rendered when set to None.
Returns:
output (VisImage): image object with box drawn.
"""
cnt_x, cnt_y, w, h, angle = rotated_box
area = w * h
# use thinner lines when the box is small
linewidth = self._default_font_size / (
6 if area < _SMALL_OBJECT_AREA_THRESH * self.output.scale else 3
)
theta = angle * math.pi / 180.0
c = math.cos(theta)
s = math.sin(theta)
rect = [(-w / 2, h / 2), (-w / 2, -h / 2), (w / 2, -h / 2), (w / 2, h / 2)]
# x: left->right ; y: top->down
rotated_rect = [(s * yy + c * xx + cnt_x, c * yy - s * xx + cnt_y) for (xx, yy) in rect]
for k in range(4):
j = (k + 1) % 4
self.draw_line(
[rotated_rect[k][0], rotated_rect[j][0]],
[rotated_rect[k][1], rotated_rect[j][1]],
color=edge_color,
linestyle="--" if k == 1 else line_style,
linewidth=linewidth,
)
if label is not None:
text_pos = rotated_rect[1] # topleft corner
height_ratio = h / np.sqrt(self.output.height * self.output.width)
label_color = self._change_color_brightness(edge_color, brightness_factor=0.7)
font_size = (
np.clip((height_ratio - 0.02) / 0.08 + 1, 1.2, 2) * 0.5 * self._default_font_size
)
self.draw_text(label, text_pos, color=label_color, font_size=font_size, rotation=angle)
return self.output
def draw_circle(self, circle_coord, color, radius=3):
"""
Args:
circle_coord (list(int) or tuple(int)): contains the x and y coordinates
of the center of the circle.
color: color of the polygon. Refer to `matplotlib.colors` for a full list of
formats that are accepted.
radius (int): radius of the circle.
Returns:
output (VisImage): image object with box drawn.
"""
x, y = circle_coord
self.output.ax.add_patch(
mpl.patches.Circle(circle_coord, radius=radius, fill=True, color=color)
)
return self.output
def draw_line(self, x_data, y_data, color, linestyle="-", linewidth=None):
"""
Args:
x_data (list[int]): a list containing x values of all the points being drawn.
Length of list should match the length of y_data.
y_data (list[int]): a list containing y values of all the points being drawn.
Length of list should match the length of x_data.
color: color of the line. Refer to `matplotlib.colors` for a full list of
formats that are accepted.
linestyle: style of the line. Refer to `matplotlib.lines.Line2D`
for a full list of formats that are accepted.
linewidth (float or None): width of the line. When it's None,
a default value will be computed and used.
Returns:
output (VisImage): image object with line drawn.
"""
if linewidth is None:
linewidth = self._default_font_size / 3
linewidth = max(linewidth, 1)
self.output.ax.add_line(
mpl.lines.Line2D(
x_data,
y_data,
linewidth=linewidth * self.output.scale,
color=color,
linestyle=linestyle,
)
)
return self.output
def draw_binary_mask(
self, binary_mask, color=None, *, edge_color=None, text=None, alpha=0.5, area_threshold=10
):
"""
Args:
binary_mask (ndarray): numpy array of shape (H, W), where H is the image height and
W is the image width. Each value in the array is either a 0 or 1 value of uint8
type.
color: color of the mask. Refer to `matplotlib.colors` for a full list of
formats that are accepted. If None, will pick a random color.
edge_color: color of the polygon edges. Refer to `matplotlib.colors` for a
full list of formats that are accepted.
text (str): if None, will be drawn on the object
alpha (float): blending efficient. Smaller values lead to more transparent masks.
area_threshold (float): a connected component smaller than this area will not be shown.
Returns:
output (VisImage): image object with mask drawn.
"""
if color is None:
color = random_color(rgb=True, maximum=1)
color = mplc.to_rgb(color)
has_valid_segment = False
binary_mask = binary_mask.astype("uint8") # opencv needs uint8
mask = GenericMask(binary_mask, self.output.height, self.output.width)
shape2d = (binary_mask.shape[0], binary_mask.shape[1])
if not mask.has_holes:
# draw polygons for regular masks
for segment in mask.polygons:
area = mask_util.area(mask_util.frPyObjects([segment], shape2d[0], shape2d[1]))
if area < (area_threshold or 0):
continue
has_valid_segment = True
segment = segment.reshape(-1, 2)
self.draw_polygon(segment, color=color, edge_color=edge_color, alpha=alpha)
else:
# TODO: Use Path/PathPatch to draw vector graphics:
# https://stackoverflow.com/questions/8919719/how-to-plot-a-complex-polygon
rgba = np.zeros(shape2d + (4,), dtype="float32")
rgba[:, :, :3] = color
rgba[:, :, 3] = (mask.mask == 1).astype("float32") * alpha
has_valid_segment = True
self.output.ax.imshow(rgba, extent=(0, self.output.width, self.output.height, 0))
if text is not None and has_valid_segment:
lighter_color = self._change_color_brightness(color, brightness_factor=0.7)
self._draw_text_in_mask(binary_mask, text, lighter_color)
return self.output
def draw_soft_mask(self, soft_mask, color=None, *, text=None, alpha=0.5):
"""
Args:
soft_mask (ndarray): float array of shape (H, W), each value in [0, 1].
color: color of the mask. Refer to `matplotlib.colors` for a full list of
formats that are accepted. If None, will pick a random color.
text (str): if None, will be drawn on the object
alpha (float): blending efficient. Smaller values lead to more transparent masks.
Returns:
output (VisImage): image object with mask drawn.
"""
if color is None:
color = random_color(rgb=True, maximum=1)
color = mplc.to_rgb(color)
shape2d = (soft_mask.shape[0], soft_mask.shape[1])
rgba = np.zeros(shape2d + (4,), dtype="float32")
rgba[:, :, :3] = color
rgba[:, :, 3] = soft_mask * alpha
self.output.ax.imshow(rgba, extent=(0, self.output.width, self.output.height, 0))
if text is not None:
lighter_color = self._change_color_brightness(color, brightness_factor=0.7)
binary_mask = (soft_mask > 0.5).astype("uint8")
self._draw_text_in_mask(binary_mask, text, lighter_color)
return self.output
def draw_polygon(self, segment, color, edge_color=None, alpha=0.5):
"""
Args:
segment: numpy array of shape Nx2, containing all the points in the polygon.
color: color of the polygon. Refer to `matplotlib.colors` for a full list of
formats that are accepted.
edge_color: color of the polygon edges. Refer to `matplotlib.colors` for a
full list of formats that are accepted. If not provided, a darker shade
of the polygon color will be used instead.
alpha (float): blending efficient. Smaller values lead to more transparent masks.
Returns:
output (VisImage): image object with polygon drawn.
"""
if edge_color is None:
# make edge color darker than the polygon color
if alpha > 0.8:
edge_color = self._change_color_brightness(color, brightness_factor=-0.7)
else:
edge_color = color
edge_color = mplc.to_rgb(edge_color) + (1,)
polygon = mpl.patches.Polygon(
segment,
fill=True,
facecolor=mplc.to_rgb(color) + (alpha,),
edgecolor=edge_color,
linewidth=max(self._default_font_size // 15 * self.output.scale, 1),
)
self.output.ax.add_patch(polygon)
return self.output
"""
Internal methods:
"""
def _jitter(self, color):
"""
Randomly modifies given color to produce a slightly different color than the color given.
Args:
color (tuple[double]): a tuple of 3 elements, containing the RGB values of the color
picked. The values in the list are in the [0.0, 1.0] range.
Returns:
jittered_color (tuple[double]): a tuple of 3 elements, containing the RGB values of the
color after being jittered. The values in the list are in the [0.0, 1.0] range.
"""
color = mplc.to_rgb(color)
vec = np.random.rand(3)
# better to do it in another color space
vec = vec / np.linalg.norm(vec) * 0.5
res = np.clip(vec + color, 0, 1)
return tuple(res)
def _create_grayscale_image(self, mask=None):
"""
Create a grayscale version of the original image.
The colors in masked area, if given, will be kept.
"""
img_bw = self.img.astype("f4").mean(axis=2)
img_bw = np.stack([img_bw] * 3, axis=2)
if mask is not None:
img_bw[mask] = self.img[mask]
return img_bw
def _change_color_brightness(self, color, brightness_factor):
"""
Depending on the brightness_factor, gives a lighter or darker color i.e. a color with
less or more saturation than the original color.
Args:
color: color of the polygon. Refer to `matplotlib.colors` for a full list of
formats that are accepted.
brightness_factor (float): a value in [-1.0, 1.0] range. A lightness factor of
0 will correspond to no change, a factor in [-1.0, 0) range will result in
a darker color and a factor in (0, 1.0] range will result in a lighter color.
Returns:
modified_color (tuple[double]): a tuple containing the RGB values of the
modified color. Each value in the tuple is in the [0.0, 1.0] range.
"""
assert brightness_factor >= -1.0 and brightness_factor <= 1.0
color = mplc.to_rgb(color)
polygon_color = colorsys.rgb_to_hls(*mplc.to_rgb(color))
modified_lightness = polygon_color[1] + (brightness_factor * polygon_color[1])
modified_lightness = 0.0 if modified_lightness < 0.0 else modified_lightness
modified_lightness = 1.0 if modified_lightness > 1.0 else modified_lightness
modified_color = colorsys.hls_to_rgb(polygon_color[0], modified_lightness, polygon_color[2])
return modified_color
def _convert_boxes(self, boxes):
"""
Convert different format of boxes to an NxB array, where B = 4 or 5 is the box dimension.
"""
if isinstance(boxes, Boxes) or isinstance(boxes, RotatedBoxes):
return boxes.tensor.detach().numpy()
else:
return np.asarray(boxes)
def _convert_masks(self, masks_or_polygons):
"""
Convert different format of masks or polygons to a tuple of masks and polygons.
Returns:
list[GenericMask]:
"""
m = masks_or_polygons
if isinstance(m, PolygonMasks):
m = m.polygons
if isinstance(m, BitMasks):
m = m.tensor.numpy()
if isinstance(m, torch.Tensor):
m = m.numpy()
ret = []
for x in m:
if isinstance(x, GenericMask):
ret.append(x)
else:
ret.append(GenericMask(x, self.output.height, self.output.width))
return ret
def _draw_text_in_mask(self, binary_mask, text, color):
"""
Find proper places to draw text given a binary mask.
"""
# TODO sometimes drawn on wrong objects. the heuristics here can improve.
_num_cc, cc_labels, stats, centroids = cv2.connectedComponentsWithStats(binary_mask, 8)
if stats[1:, -1].size == 0:
return
largest_component_id = np.argmax(stats[1:, -1]) + 1
# draw text on the largest component, as well as other very large components.
for cid in range(1, _num_cc):
if cid == largest_component_id or stats[cid, -1] > _LARGE_MASK_AREA_THRESH:
# median is more stable than centroid
# center = centroids[largest_component_id]
center = np.median((cc_labels == cid).nonzero(), axis=1)[::-1]
self.draw_text(text, center, color=color)
def _convert_keypoints(self, keypoints):
if isinstance(keypoints, Keypoints):
keypoints = keypoints.tensor
keypoints = np.asarray(keypoints)
return keypoints
def get_output(self):
"""
Returns:
output (VisImage): the image output containing the visualizations added
to the image.
"""
return self.output
|
evocodebench_data_191
|
# Copyright (c) Facebook, Inc. and its affiliates.
import io
import numpy as np
import torch
from detectron2 import model_zoo
from detectron2.config import CfgNode, instantiate
from detectron2.data import DatasetCatalog
from detectron2.data.detection_utils import read_image
from detectron2.modeling import build_model
from detectron2.structures import Boxes, Instances, ROIMasks
from detectron2.utils.file_io import PathManager
"""
Internal utilities for tests. Don't use except for writing tests.
"""
def get_model_no_weights(config_path):
"""
Like model_zoo.get, but do not load any weights (even pretrained)
"""
cfg = model_zoo.get_config(config_path)
if isinstance(cfg, CfgNode):
if not torch.cuda.is_available():
cfg.MODEL.DEVICE = "cpu"
return build_model(cfg)
else:
return instantiate(cfg.model)
def random_boxes(num_boxes, max_coord=100, device="cpu"):
"""
Create a random Nx4 boxes tensor, with coordinates < max_coord.
"""
boxes = torch.rand(num_boxes, 4, device=device) * (max_coord * 0.5)
boxes.clamp_(min=1.0) # tiny boxes cause numerical instability in box regression
# Note: the implementation of this function in torchvision is:
# boxes[:, 2:] += torch.rand(N, 2) * 100
# but it does not guarantee non-negative widths/heights constraints:
# boxes[:, 2] >= boxes[:, 0] and boxes[:, 3] >= boxes[:, 1]:
boxes[:, 2:] += boxes[:, :2]
return boxes
def get_sample_coco_image(tensor=True):
"""
Args:
tensor (bool): if True, returns 3xHxW tensor.
else, returns a HxWx3 numpy array.
Returns:
an image, in BGR color.
"""
try:
file_name = DatasetCatalog.get("coco_2017_val_100")[0]["file_name"]
if not PathManager.exists(file_name):
raise FileNotFoundError()
except IOError:
# for public CI to run
file_name = PathManager.get_local_path(
"http://images.cocodataset.org/train2017/000000000009.jpg"
)
ret = read_image(file_name, format="BGR")
if tensor:
ret = torch.from_numpy(np.ascontiguousarray(ret.transpose(2, 0, 1)))
return ret
def convert_scripted_instances(instances):
"""
Convert a scripted Instances object to a regular :class:`Instances` object
"""
assert hasattr(
instances, "image_size"
), f"Expect an Instances object, but got {type(instances)}!"
ret = Instances(instances.image_size)
for name in instances._field_names:
val = getattr(instances, "_" + name, None)
if val is not None:
ret.set(name, val)
return ret
def assert_instances_allclose(input, other, *, rtol=1e-5, msg="", size_as_tensor=False):
"""
Args:
input, other (Instances):
size_as_tensor: compare image_size of the Instances as tensors (instead of tuples).
Useful for comparing outputs of tracing.
"""
if not isinstance(input, Instances):
input = convert_scripted_instances(input)
if not isinstance(other, Instances):
other = convert_scripted_instances(other)
if not msg:
msg = "Two Instances are different! "
else:
msg = msg.rstrip() + " "
size_error_msg = msg + f"image_size is {input.image_size} vs. {other.image_size}!"
if size_as_tensor:
assert torch.equal(
torch.tensor(input.image_size), torch.tensor(other.image_size)
), size_error_msg
else:
assert input.image_size == other.image_size, size_error_msg
fields = sorted(input.get_fields().keys())
fields_other = sorted(other.get_fields().keys())
assert fields == fields_other, msg + f"Fields are {fields} vs {fields_other}!"
for f in fields:
val1, val2 = input.get(f), other.get(f)
if isinstance(val1, (Boxes, ROIMasks)):
# boxes in the range of O(100) and can have a larger tolerance
assert torch.allclose(val1.tensor, val2.tensor, atol=100 * rtol), (
msg + f"Field {f} differs too much!"
)
elif isinstance(val1, torch.Tensor):
if val1.dtype.is_floating_point:
mag = torch.abs(val1).max().cpu().item()
assert torch.allclose(val1, val2, atol=mag * rtol), (
msg + f"Field {f} differs too much!"
)
else:
assert torch.equal(val1, val2), msg + f"Field {f} is different!"
else:
raise ValueError(f"Don't know how to compare type {type(val1)}")
def reload_script_model(module):
"""
Save a jit module and load it back.
Similar to the `getExportImportCopy` function in torch/testing/
"""
buffer = io.BytesIO()
torch.jit.save(module, buffer)
buffer.seek(0)
return torch.jit.load(buffer)
|
evocodebench_data_192
|
# Copyright (c) Facebook, Inc. and its affiliates.
import math
from typing import List, Tuple
import torch
from detectron2.layers.rotated_boxes import pairwise_iou_rotated
from .boxes import Boxes
class RotatedBoxes(Boxes):
"""
This structure stores a list of rotated boxes as a Nx5 torch.Tensor.
It supports some common methods about boxes
(`area`, `clip`, `nonempty`, etc),
and also behaves like a Tensor
(support indexing, `to(device)`, `.device`, and iteration over all boxes)
"""
def __init__(self, tensor: torch.Tensor):
"""
Args:
tensor (Tensor[float]): a Nx5 matrix. Each row is
(x_center, y_center, width, height, angle),
in which angle is represented in degrees.
While there's no strict range restriction for it,
the recommended principal range is between [-180, 180) degrees.
Assume we have a horizontal box B = (x_center, y_center, width, height),
where width is along the x-axis and height is along the y-axis.
The rotated box B_rot (x_center, y_center, width, height, angle)
can be seen as:
1. When angle == 0:
B_rot == B
2. When angle > 0:
B_rot is obtained by rotating B w.r.t its center by :math:`|angle|` degrees CCW;
3. When angle < 0:
B_rot is obtained by rotating B w.r.t its center by :math:`|angle|` degrees CW.
Mathematically, since the right-handed coordinate system for image space
is (y, x), where y is top->down and x is left->right, the 4 vertices of the
rotated rectangle :math:`(yr_i, xr_i)` (i = 1, 2, 3, 4) can be obtained from
the vertices of the horizontal rectangle :math:`(y_i, x_i)` (i = 1, 2, 3, 4)
in the following way (:math:`\\theta = angle*\\pi/180` is the angle in radians,
:math:`(y_c, x_c)` is the center of the rectangle):
.. math::
yr_i = \\cos(\\theta) (y_i - y_c) - \\sin(\\theta) (x_i - x_c) + y_c,
xr_i = \\sin(\\theta) (y_i - y_c) + \\cos(\\theta) (x_i - x_c) + x_c,
which is the standard rigid-body rotation transformation.
Intuitively, the angle is
(1) the rotation angle from y-axis in image space
to the height vector (top->down in the box's local coordinate system)
of the box in CCW, and
(2) the rotation angle from x-axis in image space
to the width vector (left->right in the box's local coordinate system)
of the box in CCW.
More intuitively, consider the following horizontal box ABCD represented
in (x1, y1, x2, y2): (3, 2, 7, 4),
covering the [3, 7] x [2, 4] region of the continuous coordinate system
which looks like this:
.. code:: none
O--------> x
|
| A---B
| | |
| D---C
|
v y
Note that each capital letter represents one 0-dimensional geometric point
instead of a 'square pixel' here.
In the example above, using (x, y) to represent a point we have:
.. math::
O = (0, 0), A = (3, 2), B = (7, 2), C = (7, 4), D = (3, 4)
We name vector AB = vector DC as the width vector in box's local coordinate system, and
vector AD = vector BC as the height vector in box's local coordinate system. Initially,
when angle = 0 degree, they're aligned with the positive directions of x-axis and y-axis
in the image space, respectively.
For better illustration, we denote the center of the box as E,
.. code:: none
O--------> x
|
| A---B
| | E |
| D---C
|
v y
where the center E = ((3+7)/2, (2+4)/2) = (5, 3).
Also,
.. math::
width = |AB| = |CD| = 7 - 3 = 4,
height = |AD| = |BC| = 4 - 2 = 2.
Therefore, the corresponding representation for the same shape in rotated box in
(x_center, y_center, width, height, angle) format is:
(5, 3, 4, 2, 0),
Now, let's consider (5, 3, 4, 2, 90), which is rotated by 90 degrees
CCW (counter-clockwise) by definition. It looks like this:
.. code:: none
O--------> x
| B-C
| | |
| |E|
| | |
| A-D
v y
The center E is still located at the same point (5, 3), while the vertices
ABCD are rotated by 90 degrees CCW with regard to E:
A = (4, 5), B = (4, 1), C = (6, 1), D = (6, 5)
Here, 90 degrees can be seen as the CCW angle to rotate from y-axis to
vector AD or vector BC (the top->down height vector in box's local coordinate system),
or the CCW angle to rotate from x-axis to vector AB or vector DC (the left->right
width vector in box's local coordinate system).
.. math::
width = |AB| = |CD| = 5 - 1 = 4,
height = |AD| = |BC| = 6 - 4 = 2.
Next, how about (5, 3, 4, 2, -90), which is rotated by 90 degrees CW (clockwise)
by definition? It looks like this:
.. code:: none
O--------> x
| D-A
| | |
| |E|
| | |
| C-B
v y
The center E is still located at the same point (5, 3), while the vertices
ABCD are rotated by 90 degrees CW with regard to E:
A = (6, 1), B = (6, 5), C = (4, 5), D = (4, 1)
.. math::
width = |AB| = |CD| = 5 - 1 = 4,
height = |AD| = |BC| = 6 - 4 = 2.
This covers exactly the same region as (5, 3, 4, 2, 90) does, and their IoU
will be 1. However, these two will generate different RoI Pooling results and
should not be treated as an identical box.
On the other hand, it's easy to see that (X, Y, W, H, A) is identical to
(X, Y, W, H, A+360N), for any integer N. For example (5, 3, 4, 2, 270) would be
identical to (5, 3, 4, 2, -90), because rotating the shape 270 degrees CCW is
equivalent to rotating the same shape 90 degrees CW.
We could rotate further to get (5, 3, 4, 2, 180), or (5, 3, 4, 2, -180):
.. code:: none
O--------> x
|
| C---D
| | E |
| B---A
|
v y
.. math::
A = (7, 4), B = (3, 4), C = (3, 2), D = (7, 2),
width = |AB| = |CD| = 7 - 3 = 4,
height = |AD| = |BC| = 4 - 2 = 2.
Finally, this is a very inaccurate (heavily quantized) illustration of
how (5, 3, 4, 2, 60) looks like in case anyone wonders:
.. code:: none
O--------> x
| B\
| / C
| /E /
| A /
| `D
v y
It's still a rectangle with center of (5, 3), width of 4 and height of 2,
but its angle (and thus orientation) is somewhere between
(5, 3, 4, 2, 0) and (5, 3, 4, 2, 90).
"""
device = tensor.device if isinstance(tensor, torch.Tensor) else torch.device("cpu")
tensor = torch.as_tensor(tensor, dtype=torch.float32, device=device)
if tensor.numel() == 0:
# Use reshape, so we don't end up creating a new tensor that does not depend on
# the inputs (and consequently confuses jit)
tensor = tensor.reshape((0, 5)).to(dtype=torch.float32, device=device)
assert tensor.dim() == 2 and tensor.size(-1) == 5, tensor.size()
self.tensor = tensor
def clone(self) -> "RotatedBoxes":
"""
Clone the RotatedBoxes.
Returns:
RotatedBoxes
"""
return RotatedBoxes(self.tensor.clone())
def to(self, device: torch.device):
# Boxes are assumed float32 and does not support to(dtype)
return RotatedBoxes(self.tensor.to(device=device))
def area(self) -> torch.Tensor:
"""
Computes the area of all the boxes.
Returns:
torch.Tensor: a vector with areas of each box.
"""
box = self.tensor
area = box[:, 2] * box[:, 3]
return area
def normalize_angles(self) -> None:
"""
Restrict angles to the range of [-180, 180) degrees
"""
self.tensor[:, 4] = (self.tensor[:, 4] + 180.0) % 360.0 - 180.0
def clip(self, box_size: Tuple[int, int], clip_angle_threshold: float = 1.0) -> None:
"""
Clip (in place) the boxes by limiting x coordinates to the range [0, width]
and y coordinates to the range [0, height].
For RRPN:
Only clip boxes that are almost horizontal with a tolerance of
clip_angle_threshold to maintain backward compatibility.
Rotated boxes beyond this threshold are not clipped for two reasons:
1. There are potentially multiple ways to clip a rotated box to make it
fit within the image.
2. It's tricky to make the entire rectangular box fit within the image
and still be able to not leave out pixels of interest.
Therefore we rely on ops like RoIAlignRotated to safely handle this.
Args:
box_size (height, width): The clipping box's size.
clip_angle_threshold:
Iff. abs(normalized(angle)) <= clip_angle_threshold (in degrees),
we do the clipping as horizontal boxes.
"""
h, w = box_size
# normalize angles to be within (-180, 180] degrees
self.normalize_angles()
idx = torch.where(torch.abs(self.tensor[:, 4]) <= clip_angle_threshold)[0]
# convert to (x1, y1, x2, y2)
x1 = self.tensor[idx, 0] - self.tensor[idx, 2] / 2.0
y1 = self.tensor[idx, 1] - self.tensor[idx, 3] / 2.0
x2 = self.tensor[idx, 0] + self.tensor[idx, 2] / 2.0
y2 = self.tensor[idx, 1] + self.tensor[idx, 3] / 2.0
# clip
x1.clamp_(min=0, max=w)
y1.clamp_(min=0, max=h)
x2.clamp_(min=0, max=w)
y2.clamp_(min=0, max=h)
# convert back to (xc, yc, w, h)
self.tensor[idx, 0] = (x1 + x2) / 2.0
self.tensor[idx, 1] = (y1 + y2) / 2.0
# make sure widths and heights do not increase due to numerical errors
self.tensor[idx, 2] = torch.min(self.tensor[idx, 2], x2 - x1)
self.tensor[idx, 3] = torch.min(self.tensor[idx, 3], y2 - y1)
def nonempty(self, threshold: float = 0.0) -> torch.Tensor:
"""
Find boxes that are non-empty.
A box is considered empty, if either of its side is no larger than threshold.
Returns:
Tensor: a binary vector which represents
whether each box is empty (False) or non-empty (True).
"""
box = self.tensor
widths = box[:, 2]
heights = box[:, 3]
keep = (widths > threshold) & (heights > threshold)
return keep
def __getitem__(self, item) -> "RotatedBoxes":
"""
Returns:
RotatedBoxes: Create a new :class:`RotatedBoxes` by indexing.
The following usage are allowed:
1. `new_boxes = boxes[3]`: return a `RotatedBoxes` which contains only one box.
2. `new_boxes = boxes[2:10]`: return a slice of boxes.
3. `new_boxes = boxes[vector]`, where vector is a torch.ByteTensor
with `length = len(boxes)`. Nonzero elements in the vector will be selected.
Note that the returned RotatedBoxes might share storage with this RotatedBoxes,
subject to Pytorch's indexing semantics.
"""
if isinstance(item, int):
return RotatedBoxes(self.tensor[item].view(1, -1))
b = self.tensor[item]
assert b.dim() == 2, "Indexing on RotatedBoxes with {} failed to return a matrix!".format(
item
)
return RotatedBoxes(b)
def __len__(self) -> int:
return self.tensor.shape[0]
def __repr__(self) -> str:
return "RotatedBoxes(" + str(self.tensor) + ")"
def inside_box(self, box_size: Tuple[int, int], boundary_threshold: int = 0) -> torch.Tensor:
"""
Args:
box_size (height, width): Size of the reference box covering
[0, width] x [0, height]
boundary_threshold (int): Boxes that extend beyond the reference box
boundary by more than boundary_threshold are considered "outside".
For RRPN, it might not be necessary to call this function since it's common
for rotated box to extend to outside of the image boundaries
(the clip function only clips the near-horizontal boxes)
Returns:
a binary vector, indicating whether each box is inside the reference box.
"""
height, width = box_size
cnt_x = self.tensor[..., 0]
cnt_y = self.tensor[..., 1]
half_w = self.tensor[..., 2] / 2.0
half_h = self.tensor[..., 3] / 2.0
a = self.tensor[..., 4]
c = torch.abs(torch.cos(a * math.pi / 180.0))
s = torch.abs(torch.sin(a * math.pi / 180.0))
# This basically computes the horizontal bounding rectangle of the rotated box
max_rect_dx = c * half_w + s * half_h
max_rect_dy = c * half_h + s * half_w
inds_inside = (
(cnt_x - max_rect_dx >= -boundary_threshold)
& (cnt_y - max_rect_dy >= -boundary_threshold)
& (cnt_x + max_rect_dx < width + boundary_threshold)
& (cnt_y + max_rect_dy < height + boundary_threshold)
)
return inds_inside
def get_centers(self) -> torch.Tensor:
"""
Returns:
The box centers in a Nx2 array of (x, y).
"""
return self.tensor[:, :2]
def scale(self, scale_x: float, scale_y: float) -> None:
"""
Scale the rotated box with horizontal and vertical scaling factors
Note: when scale_factor_x != scale_factor_y,
the rotated box does not preserve the rectangular shape when the angle
is not a multiple of 90 degrees under resize transformation.
Instead, the shape is a parallelogram (that has skew)
Here we make an approximation by fitting a rotated rectangle to the parallelogram.
"""
self.tensor[:, 0] *= scale_x
self.tensor[:, 1] *= scale_y
theta = self.tensor[:, 4] * math.pi / 180.0
c = torch.cos(theta)
s = torch.sin(theta)
# In image space, y is top->down and x is left->right
# Consider the local coordintate system for the rotated box,
# where the box center is located at (0, 0), and the four vertices ABCD are
# A(-w / 2, -h / 2), B(w / 2, -h / 2), C(w / 2, h / 2), D(-w / 2, h / 2)
# the midpoint of the left edge AD of the rotated box E is:
# E = (A+D)/2 = (-w / 2, 0)
# the midpoint of the top edge AB of the rotated box F is:
# F(0, -h / 2)
# To get the old coordinates in the global system, apply the rotation transformation
# (Note: the right-handed coordinate system for image space is yOx):
# (old_x, old_y) = (s * y + c * x, c * y - s * x)
# E(old) = (s * 0 + c * (-w/2), c * 0 - s * (-w/2)) = (-c * w / 2, s * w / 2)
# F(old) = (s * (-h / 2) + c * 0, c * (-h / 2) - s * 0) = (-s * h / 2, -c * h / 2)
# After applying the scaling factor (sfx, sfy):
# E(new) = (-sfx * c * w / 2, sfy * s * w / 2)
# F(new) = (-sfx * s * h / 2, -sfy * c * h / 2)
# The new width after scaling tranformation becomes:
# w(new) = |E(new) - O| * 2
# = sqrt[(sfx * c * w / 2)^2 + (sfy * s * w / 2)^2] * 2
# = sqrt[(sfx * c)^2 + (sfy * s)^2] * w
# i.e., scale_factor_w = sqrt[(sfx * c)^2 + (sfy * s)^2]
#
# For example,
# when angle = 0 or 180, |c| = 1, s = 0, scale_factor_w == scale_factor_x;
# when |angle| = 90, c = 0, |s| = 1, scale_factor_w == scale_factor_y
self.tensor[:, 2] *= torch.sqrt((scale_x * c) ** 2 + (scale_y * s) ** 2)
# h(new) = |F(new) - O| * 2
# = sqrt[(sfx * s * h / 2)^2 + (sfy * c * h / 2)^2] * 2
# = sqrt[(sfx * s)^2 + (sfy * c)^2] * h
# i.e., scale_factor_h = sqrt[(sfx * s)^2 + (sfy * c)^2]
#
# For example,
# when angle = 0 or 180, |c| = 1, s = 0, scale_factor_h == scale_factor_y;
# when |angle| = 90, c = 0, |s| = 1, scale_factor_h == scale_factor_x
self.tensor[:, 3] *= torch.sqrt((scale_x * s) ** 2 + (scale_y * c) ** 2)
# The angle is the rotation angle from y-axis in image space to the height
# vector (top->down in the box's local coordinate system) of the box in CCW.
#
# angle(new) = angle_yOx(O - F(new))
# = angle_yOx( (sfx * s * h / 2, sfy * c * h / 2) )
# = atan2(sfx * s * h / 2, sfy * c * h / 2)
# = atan2(sfx * s, sfy * c)
#
# For example,
# when sfx == sfy, angle(new) == atan2(s, c) == angle(old)
self.tensor[:, 4] = torch.atan2(scale_x * s, scale_y * c) * 180 / math.pi
@classmethod
def cat(cls, boxes_list: List["RotatedBoxes"]) -> "RotatedBoxes":
"""
Concatenates a list of RotatedBoxes into a single RotatedBoxes
Arguments:
boxes_list (list[RotatedBoxes])
Returns:
RotatedBoxes: the concatenated RotatedBoxes
"""
assert isinstance(boxes_list, (list, tuple))
if len(boxes_list) == 0:
return cls(torch.empty(0))
assert all([isinstance(box, RotatedBoxes) for box in boxes_list])
# use torch.cat (v.s. layers.cat) so the returned boxes never share storage with input
cat_boxes = cls(torch.cat([b.tensor for b in boxes_list], dim=0))
return cat_boxes
@property
def device(self) -> torch.device:
return self.tensor.device
@torch.jit.unused
def __iter__(self):
"""
Yield a box as a Tensor of shape (5,) at a time.
"""
yield from self.tensor
def pairwise_iou(boxes1: RotatedBoxes, boxes2: RotatedBoxes) -> None:
"""
Given two lists of rotated boxes of size N and M,
compute the IoU (intersection over union)
between **all** N x M pairs of boxes.
The box order must be (x_center, y_center, width, height, angle).
Args:
boxes1, boxes2 (RotatedBoxes):
two `RotatedBoxes`. Contains N & M rotated boxes, respectively.
Returns:
Tensor: IoU, sized [N,M].
"""
return pairwise_iou_rotated(boxes1.tensor, boxes2.tensor)
|
evocodebench_data_193
|
# Copyright (c) Facebook, Inc. and its affiliates.
from detectron2.utils.registry import Registry
PROPOSAL_GENERATOR_REGISTRY = Registry("PROPOSAL_GENERATOR")
PROPOSAL_GENERATOR_REGISTRY.__doc__ = """
Registry for proposal generator, which produces object proposals from feature maps.
The registered object will be called with `obj(cfg, input_shape)`.
The call should return a `nn.Module` object.
"""
from . import rpn, rrpn # noqa F401 isort:skip
def build_proposal_generator(cfg, input_shape):
"""
Build a proposal generator from `cfg.MODEL.PROPOSAL_GENERATOR.NAME`.
The name can be "PrecomputedProposals" to use no proposal generator.
"""
name = cfg.MODEL.PROPOSAL_GENERATOR.NAME
if name == "PrecomputedProposals":
return None
return PROPOSAL_GENERATOR_REGISTRY.get(name)(cfg, input_shape)
|
evocodebench_data_194
|
# Copyright (c) Facebook, Inc. and its affiliates.
import logging
from typing import Dict, List, Tuple, Union
import torch
from torch import nn
from torch.nn import functional as F
from detectron2.config import configurable
from detectron2.layers import ShapeSpec, batched_nms, cat, cross_entropy, nonzero_tuple
from detectron2.modeling.box_regression import Box2BoxTransform, _dense_box_regression_loss
from detectron2.structures import Boxes, Instances
from detectron2.utils.events import get_event_storage
__all__ = ["fast_rcnn_inference", "FastRCNNOutputLayers"]
logger = logging.getLogger(__name__)
"""
Shape shorthand in this module:
N: number of images in the minibatch
R: number of ROIs, combined over all images, in the minibatch
Ri: number of ROIs in image i
K: number of foreground classes. E.g.,there are 80 foreground classes in COCO.
Naming convention:
deltas: refers to the 4-d (dx, dy, dw, dh) deltas that parameterize the box2box
transform (see :class:`box_regression.Box2BoxTransform`).
pred_class_logits: predicted class scores in [-inf, +inf]; use
softmax(pred_class_logits) to estimate P(class).
gt_classes: ground-truth classification labels in [0, K], where [0, K) represent
foreground object classes and K represents the background class.
pred_proposal_deltas: predicted box2box transform deltas for transforming proposals
to detection box predictions.
gt_proposal_deltas: ground-truth box2box transform deltas
"""
def fast_rcnn_inference(
boxes: List[torch.Tensor],
scores: List[torch.Tensor],
image_shapes: List[Tuple[int, int]],
score_thresh: float,
nms_thresh: float,
topk_per_image: int,
):
"""
Call `fast_rcnn_inference_single_image` for all images.
Args:
boxes (list[Tensor]): A list of Tensors of predicted class-specific or class-agnostic
boxes for each image. Element i has shape (Ri, K * 4) if doing
class-specific regression, or (Ri, 4) if doing class-agnostic
regression, where Ri is the number of predicted objects for image i.
This is compatible with the output of :meth:`FastRCNNOutputLayers.predict_boxes`.
scores (list[Tensor]): A list of Tensors of predicted class scores for each image.
Element i has shape (Ri, K + 1), where Ri is the number of predicted objects
for image i. Compatible with the output of :meth:`FastRCNNOutputLayers.predict_probs`.
image_shapes (list[tuple]): A list of (width, height) tuples for each image in the batch.
score_thresh (float): Only return detections with a confidence score exceeding this
threshold.
nms_thresh (float): The threshold to use for box non-maximum suppression. Value in [0, 1].
topk_per_image (int): The number of top scoring detections to return. Set < 0 to return
all detections.
Returns:
instances: (list[Instances]): A list of N instances, one for each image in the batch,
that stores the topk most confidence detections.
kept_indices: (list[Tensor]): A list of 1D tensor of length of N, each element indicates
the corresponding boxes/scores index in [0, Ri) from the input, for image i.
"""
result_per_image = [
fast_rcnn_inference_single_image(
boxes_per_image, scores_per_image, image_shape, score_thresh, nms_thresh, topk_per_image
)
for scores_per_image, boxes_per_image, image_shape in zip(scores, boxes, image_shapes)
]
return [x[0] for x in result_per_image], [x[1] for x in result_per_image]
def _log_classification_stats(pred_logits, gt_classes, prefix="fast_rcnn"):
"""
Log the classification metrics to EventStorage.
Args:
pred_logits: Rx(K+1) logits. The last column is for background class.
gt_classes: R labels
"""
num_instances = gt_classes.numel()
if num_instances == 0:
return
pred_classes = pred_logits.argmax(dim=1)
bg_class_ind = pred_logits.shape[1] - 1
fg_inds = (gt_classes >= 0) & (gt_classes < bg_class_ind)
num_fg = fg_inds.nonzero().numel()
fg_gt_classes = gt_classes[fg_inds]
fg_pred_classes = pred_classes[fg_inds]
num_false_negative = (fg_pred_classes == bg_class_ind).nonzero().numel()
num_accurate = (pred_classes == gt_classes).nonzero().numel()
fg_num_accurate = (fg_pred_classes == fg_gt_classes).nonzero().numel()
storage = get_event_storage()
storage.put_scalar(f"{prefix}/cls_accuracy", num_accurate / num_instances)
if num_fg > 0:
storage.put_scalar(f"{prefix}/fg_cls_accuracy", fg_num_accurate / num_fg)
storage.put_scalar(f"{prefix}/false_negative", num_false_negative / num_fg)
def fast_rcnn_inference_single_image(
boxes,
scores,
image_shape: Tuple[int, int],
score_thresh: float,
nms_thresh: float,
topk_per_image: int,
):
"""
Single-image inference. Return bounding-box detection results by thresholding
on scores and applying non-maximum suppression (NMS).
Args:
Same as `fast_rcnn_inference`, but with boxes, scores, and image shapes
per image.
Returns:
Same as `fast_rcnn_inference`, but for only one image.
"""
valid_mask = torch.isfinite(boxes).all(dim=1) & torch.isfinite(scores).all(dim=1)
if not valid_mask.all():
boxes = boxes[valid_mask]
scores = scores[valid_mask]
scores = scores[:, :-1]
num_bbox_reg_classes = boxes.shape[1] // 4
# Convert to Boxes to use the `clip` function ...
boxes = Boxes(boxes.reshape(-1, 4))
boxes.clip(image_shape)
boxes = boxes.tensor.view(-1, num_bbox_reg_classes, 4) # R x C x 4
# 1. Filter results based on detection scores. It can make NMS more efficient
# by filtering out low-confidence detections.
filter_mask = scores > score_thresh # R x K
# R' x 2. First column contains indices of the R predictions;
# Second column contains indices of classes.
filter_inds = filter_mask.nonzero()
if num_bbox_reg_classes == 1:
boxes = boxes[filter_inds[:, 0], 0]
else:
boxes = boxes[filter_mask]
scores = scores[filter_mask]
# 2. Apply NMS for each class independently.
keep = batched_nms(boxes, scores, filter_inds[:, 1], nms_thresh)
if topk_per_image >= 0:
keep = keep[:topk_per_image]
boxes, scores, filter_inds = boxes[keep], scores[keep], filter_inds[keep]
result = Instances(image_shape)
result.pred_boxes = Boxes(boxes)
result.scores = scores
result.pred_classes = filter_inds[:, 1]
return result, filter_inds[:, 0]
class FastRCNNOutputLayers(nn.Module):
"""
Two linear layers for predicting Fast R-CNN outputs:
1. proposal-to-detection box regression deltas
2. classification scores
"""
@configurable
def __init__(
self,
input_shape: ShapeSpec,
*,
box2box_transform,
num_classes: int,
test_score_thresh: float = 0.0,
test_nms_thresh: float = 0.5,
test_topk_per_image: int = 100,
cls_agnostic_bbox_reg: bool = False,
smooth_l1_beta: float = 0.0,
box_reg_loss_type: str = "smooth_l1",
loss_weight: Union[float, Dict[str, float]] = 1.0,
):
"""
NOTE: this interface is experimental.
Args:
input_shape (ShapeSpec): shape of the input feature to this module
box2box_transform (Box2BoxTransform or Box2BoxTransformRotated):
num_classes (int): number of foreground classes
test_score_thresh (float): threshold to filter predictions results.
test_nms_thresh (float): NMS threshold for prediction results.
test_topk_per_image (int): number of top predictions to produce per image.
cls_agnostic_bbox_reg (bool): whether to use class agnostic for bbox regression
smooth_l1_beta (float): transition point from L1 to L2 loss. Only used if
`box_reg_loss_type` is "smooth_l1"
box_reg_loss_type (str): Box regression loss type. One of: "smooth_l1", "giou",
"diou", "ciou"
loss_weight (float|dict): weights to use for losses. Can be single float for weighting
all losses, or a dict of individual weightings. Valid dict keys are:
* "loss_cls": applied to classification loss
* "loss_box_reg": applied to box regression loss
"""
super().__init__()
if isinstance(input_shape, int): # some backward compatibility
input_shape = ShapeSpec(channels=input_shape)
self.num_classes = num_classes
input_size = input_shape.channels * (input_shape.width or 1) * (input_shape.height or 1)
# prediction layer for num_classes foreground classes and one background class (hence + 1)
self.cls_score = nn.Linear(input_size, num_classes + 1)
num_bbox_reg_classes = 1 if cls_agnostic_bbox_reg else num_classes
box_dim = len(box2box_transform.weights)
self.bbox_pred = nn.Linear(input_size, num_bbox_reg_classes * box_dim)
nn.init.normal_(self.cls_score.weight, std=0.01)
nn.init.normal_(self.bbox_pred.weight, std=0.001)
for l in [self.cls_score, self.bbox_pred]:
nn.init.constant_(l.bias, 0)
self.box2box_transform = box2box_transform
self.smooth_l1_beta = smooth_l1_beta
self.test_score_thresh = test_score_thresh
self.test_nms_thresh = test_nms_thresh
self.test_topk_per_image = test_topk_per_image
self.box_reg_loss_type = box_reg_loss_type
if isinstance(loss_weight, float):
loss_weight = {"loss_cls": loss_weight, "loss_box_reg": loss_weight}
self.loss_weight = loss_weight
@classmethod
def from_config(cls, cfg, input_shape):
return {
"input_shape": input_shape,
"box2box_transform": Box2BoxTransform(weights=cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_WEIGHTS),
# fmt: off
"num_classes" : cfg.MODEL.ROI_HEADS.NUM_CLASSES,
"cls_agnostic_bbox_reg" : cfg.MODEL.ROI_BOX_HEAD.CLS_AGNOSTIC_BBOX_REG,
"smooth_l1_beta" : cfg.MODEL.ROI_BOX_HEAD.SMOOTH_L1_BETA,
"test_score_thresh" : cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST,
"test_nms_thresh" : cfg.MODEL.ROI_HEADS.NMS_THRESH_TEST,
"test_topk_per_image" : cfg.TEST.DETECTIONS_PER_IMAGE,
"box_reg_loss_type" : cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_LOSS_TYPE,
"loss_weight" : {"loss_box_reg": cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_LOSS_WEIGHT},
# fmt: on
}
def forward(self, x):
"""
Args:
x: per-region features of shape (N, ...) for N bounding boxes to predict.
Returns:
(Tensor, Tensor):
First tensor: shape (N,K+1), scores for each of the N box. Each row contains the
scores for K object categories and 1 background class.
Second tensor: bounding box regression deltas for each box. Shape is shape (N,Kx4),
or (N,4) for class-agnostic regression.
"""
if x.dim() > 2:
x = torch.flatten(x, start_dim=1)
scores = self.cls_score(x)
proposal_deltas = self.bbox_pred(x)
return scores, proposal_deltas
def losses(self, predictions, proposals):
"""
Args:
predictions: return values of :meth:`forward()`.
proposals (list[Instances]): proposals that match the features that were used
to compute predictions. The fields ``proposal_boxes``, ``gt_boxes``,
``gt_classes`` are expected.
Returns:
Dict[str, Tensor]: dict of losses
"""
scores, proposal_deltas = predictions
# parse classification outputs
gt_classes = (
cat([p.gt_classes for p in proposals], dim=0) if len(proposals) else torch.empty(0)
)
_log_classification_stats(scores, gt_classes)
# parse box regression outputs
if len(proposals):
proposal_boxes = cat([p.proposal_boxes.tensor for p in proposals], dim=0) # Nx4
assert not proposal_boxes.requires_grad, "Proposals should not require gradients!"
# If "gt_boxes" does not exist, the proposals must be all negative and
# should not be included in regression loss computation.
# Here we just use proposal_boxes as an arbitrary placeholder because its
# value won't be used in self.box_reg_loss().
gt_boxes = cat(
[(p.gt_boxes if p.has("gt_boxes") else p.proposal_boxes).tensor for p in proposals],
dim=0,
)
else:
proposal_boxes = gt_boxes = torch.empty((0, 4), device=proposal_deltas.device)
losses = {
"loss_cls": cross_entropy(scores, gt_classes, reduction="mean"),
"loss_box_reg": self.box_reg_loss(
proposal_boxes, gt_boxes, proposal_deltas, gt_classes
),
}
return {k: v * self.loss_weight.get(k, 1.0) for k, v in losses.items()}
def box_reg_loss(self, proposal_boxes, gt_boxes, pred_deltas, gt_classes):
"""
Args:
proposal_boxes/gt_boxes are tensors with the same shape (R, 4 or 5).
pred_deltas has shape (R, 4 or 5), or (R, num_classes * (4 or 5)).
gt_classes is a long tensor of shape R, the gt class label of each proposal.
R shall be the number of proposals.
"""
box_dim = proposal_boxes.shape[1] # 4 or 5
# Regression loss is only computed for foreground proposals (those matched to a GT)
fg_inds = nonzero_tuple((gt_classes >= 0) & (gt_classes < self.num_classes))[0]
if pred_deltas.shape[1] == box_dim: # cls-agnostic regression
fg_pred_deltas = pred_deltas[fg_inds]
else:
fg_pred_deltas = pred_deltas.view(-1, self.num_classes, box_dim)[
fg_inds, gt_classes[fg_inds]
]
loss_box_reg = _dense_box_regression_loss(
[proposal_boxes[fg_inds]],
self.box2box_transform,
[fg_pred_deltas.unsqueeze(0)],
[gt_boxes[fg_inds]],
...,
self.box_reg_loss_type,
self.smooth_l1_beta,
)
# The reg loss is normalized using the total number of regions (R), not the number
# of foreground regions even though the box regression loss is only defined on
# foreground regions. Why? Because doing so gives equal training influence to
# each foreground example. To see how, consider two different minibatches:
# (1) Contains a single foreground region
# (2) Contains 100 foreground regions
# If we normalize by the number of foreground regions, the single example in
# minibatch (1) will be given 100 times as much influence as each foreground
# example in minibatch (2). Normalizing by the total number of regions, R,
# means that the single example in minibatch (1) and each of the 100 examples
# in minibatch (2) are given equal influence.
return loss_box_reg / max(gt_classes.numel(), 1.0) # return 0 if empty
def inference(self, predictions: Tuple[torch.Tensor, torch.Tensor], proposals: List[Instances]):
"""
Args:
predictions: return values of :meth:`forward()`.
proposals (list[Instances]): proposals that match the features that were
used to compute predictions. The ``proposal_boxes`` field is expected.
Returns:
list[Instances]: same as `fast_rcnn_inference`.
list[Tensor]: same as `fast_rcnn_inference`.
"""
boxes = self.predict_boxes(predictions, proposals)
scores = self.predict_probs(predictions, proposals)
image_shapes = [x.image_size for x in proposals]
return fast_rcnn_inference(
boxes,
scores,
image_shapes,
self.test_score_thresh,
self.test_nms_thresh,
self.test_topk_per_image,
)
def predict_boxes_for_gt_classes(self, predictions, proposals):
"""
Args:
predictions: return values of :meth:`forward()`.
proposals (list[Instances]): proposals that match the features that were used
to compute predictions. The fields ``proposal_boxes``, ``gt_classes`` are expected.
Returns:
list[Tensor]:
A list of Tensors of predicted boxes for GT classes in case of
class-specific box head. Element i of the list has shape (Ri, B), where Ri is
the number of proposals for image i and B is the box dimension (4 or 5)
"""
if not len(proposals):
return []
scores, proposal_deltas = predictions
proposal_boxes = cat([p.proposal_boxes.tensor for p in proposals], dim=0)
N, B = proposal_boxes.shape
predict_boxes = self.box2box_transform.apply_deltas(
proposal_deltas, proposal_boxes
) # Nx(KxB)
K = predict_boxes.shape[1] // B
if K > 1:
gt_classes = torch.cat([p.gt_classes for p in proposals], dim=0)
# Some proposals are ignored or have a background class. Their gt_classes
# cannot be used as index.
gt_classes = gt_classes.clamp_(0, K - 1)
predict_boxes = predict_boxes.view(N, K, B)[
torch.arange(N, dtype=torch.long, device=predict_boxes.device), gt_classes
]
num_prop_per_image = [len(p) for p in proposals]
return predict_boxes.split(num_prop_per_image)
def predict_boxes(
self, predictions: Tuple[torch.Tensor, torch.Tensor], proposals: List[Instances]
):
"""
Args:
predictions: return values of :meth:`forward()`.
proposals (list[Instances]): proposals that match the features that were
used to compute predictions. The ``proposal_boxes`` field is expected.
Returns:
list[Tensor]:
A list of Tensors of predicted class-specific or class-agnostic boxes
for each image. Element i has shape (Ri, K * B) or (Ri, B), where Ri is
the number of proposals for image i and B is the box dimension (4 or 5)
"""
if not len(proposals):
return []
_, proposal_deltas = predictions
num_prop_per_image = [len(p) for p in proposals]
proposal_boxes = cat([p.proposal_boxes.tensor for p in proposals], dim=0)
predict_boxes = self.box2box_transform.apply_deltas(
proposal_deltas,
proposal_boxes,
) # Nx(KxB)
return predict_boxes.split(num_prop_per_image)
def predict_probs(
self, predictions: Tuple[torch.Tensor, torch.Tensor], proposals: List[Instances]
):
"""
Args:
predictions: return values of :meth:`forward()`.
proposals (list[Instances]): proposals that match the features that were
used to compute predictions.
Returns:
list[Tensor]:
A list of Tensors of predicted class probabilities for each image.
Element i has shape (Ri, K + 1), where Ri is the number of proposals for image i.
"""
scores, _ = predictions
num_inst_per_image = [len(p) for p in proposals]
probs = F.softmax(scores, dim=-1)
return probs.split(num_inst_per_image, dim=0)
|
evocodebench_data_195
|
#!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
from ..structures import Instances
from detectron2.utils.registry import Registry
from ..config.config import CfgNode as CfgNode_
from detectron2.config import configurable
TRACKER_HEADS_REGISTRY = Registry("TRACKER_HEADS")
TRACKER_HEADS_REGISTRY.__doc__ = """
Registry for tracking classes.
"""
class BaseTracker(object):
"""
A parent class for all trackers
"""
@configurable
def __init__(self, **kwargs):
self._prev_instances = None # (D2)instances for previous frame
self._matched_idx = set() # indices in prev_instances found matching
self._matched_ID = set() # idendities in prev_instances found matching
self._untracked_prev_idx = set() # indices in prev_instances not found matching
self._id_count = 0 # used to assign new id
@classmethod
def from_config(cls, cfg: CfgNode_):
raise NotImplementedError("Calling BaseTracker::from_config")
def update(self, predictions: Instances) -> Instances:
"""
Args:
predictions: D2 Instances for predictions of the current frame
Return:
D2 Instances for predictions of the current frame with ID assigned
_prev_instances and instances will have the following fields:
.pred_boxes (shape=[N, 4])
.scores (shape=[N,])
.pred_classes (shape=[N,])
.pred_keypoints (shape=[N, M, 3], Optional)
.pred_masks (shape=List[2D_MASK], Optional) 2D_MASK: shape=[H, W]
.ID (shape=[N,])
N: # of detected bboxes
H and W: height and width of 2D mask
"""
raise NotImplementedError("Calling BaseTracker::update")
def build_tracker_head(cfg: CfgNode_) -> BaseTracker:
"""
Build a tracker head from `cfg.TRACKER_HEADS.TRACKER_NAME`.
Args:
cfg: D2 CfgNode, config file with tracker information
Return:
tracker object
"""
name = cfg.TRACKER_HEADS.TRACKER_NAME
tracker_class = TRACKER_HEADS_REGISTRY.get(name)
return tracker_class(cfg)
|
evocodebench_data_196
|
# Copyright (c) Facebook, Inc. and its affiliates.
import math
from typing import List, Tuple, Union
import torch
from fvcore.nn import giou_loss, smooth_l1_loss
from torch.nn import functional as F
from detectron2.layers import cat, ciou_loss, diou_loss
from detectron2.structures import Boxes
# Value for clamping large dw and dh predictions. The heuristic is that we clamp
# such that dw and dh are no larger than what would transform a 16px box into a
# 1000px box (based on a small anchor, 16px, and a typical image size, 1000px).
_DEFAULT_SCALE_CLAMP = math.log(1000.0 / 16)
__all__ = ["Box2BoxTransform", "Box2BoxTransformRotated", "Box2BoxTransformLinear"]
@torch.jit.script
class Box2BoxTransform(object):
"""
The box-to-box transform defined in R-CNN. The transformation is parameterized
by 4 deltas: (dx, dy, dw, dh). The transformation scales the box's width and height
by exp(dw), exp(dh) and shifts a box's center by the offset (dx * width, dy * height).
"""
def __init__(
self, weights: Tuple[float, float, float, float], scale_clamp: float = _DEFAULT_SCALE_CLAMP
):
"""
Args:
weights (4-element tuple): Scaling factors that are applied to the
(dx, dy, dw, dh) deltas. In Fast R-CNN, these were originally set
such that the deltas have unit variance; now they are treated as
hyperparameters of the system.
scale_clamp (float): When predicting deltas, the predicted box scaling
factors (dw and dh) are clamped such that they are <= scale_clamp.
"""
self.weights = weights
self.scale_clamp = scale_clamp
def get_deltas(self, src_boxes, target_boxes):
"""
Get box regression transformation deltas (dx, dy, dw, dh) that can be used
to transform the `src_boxes` into the `target_boxes`. That is, the relation
``target_boxes == self.apply_deltas(deltas, src_boxes)`` is true (unless
any delta is too large and is clamped).
Args:
src_boxes (Tensor): source boxes, e.g., object proposals
target_boxes (Tensor): target of the transformation, e.g., ground-truth
boxes.
"""
assert isinstance(src_boxes, torch.Tensor), type(src_boxes)
assert isinstance(target_boxes, torch.Tensor), type(target_boxes)
src_widths = src_boxes[:, 2] - src_boxes[:, 0]
src_heights = src_boxes[:, 3] - src_boxes[:, 1]
src_ctr_x = src_boxes[:, 0] + 0.5 * src_widths
src_ctr_y = src_boxes[:, 1] + 0.5 * src_heights
target_widths = target_boxes[:, 2] - target_boxes[:, 0]
target_heights = target_boxes[:, 3] - target_boxes[:, 1]
target_ctr_x = target_boxes[:, 0] + 0.5 * target_widths
target_ctr_y = target_boxes[:, 1] + 0.5 * target_heights
wx, wy, ww, wh = self.weights
dx = wx * (target_ctr_x - src_ctr_x) / src_widths
dy = wy * (target_ctr_y - src_ctr_y) / src_heights
dw = ww * torch.log(target_widths / src_widths)
dh = wh * torch.log(target_heights / src_heights)
deltas = torch.stack((dx, dy, dw, dh), dim=1)
assert (src_widths > 0).all().item(), "Input boxes to Box2BoxTransform are not valid!"
return deltas
def apply_deltas(self, deltas, boxes):
"""
Apply transformation `deltas` (dx, dy, dw, dh) to `boxes`.
Args:
deltas (Tensor): transformation deltas of shape (N, k*4), where k >= 1.
deltas[i] represents k potentially different class-specific
box transformations for the single box boxes[i].
boxes (Tensor): boxes to transform, of shape (N, 4)
"""
deltas = deltas.float() # ensure fp32 for decoding precision
boxes = boxes.to(deltas.dtype)
widths = boxes[:, 2] - boxes[:, 0]
heights = boxes[:, 3] - boxes[:, 1]
ctr_x = boxes[:, 0] + 0.5 * widths
ctr_y = boxes[:, 1] + 0.5 * heights
wx, wy, ww, wh = self.weights
dx = deltas[:, 0::4] / wx
dy = deltas[:, 1::4] / wy
dw = deltas[:, 2::4] / ww
dh = deltas[:, 3::4] / wh
# Prevent sending too large values into torch.exp()
dw = torch.clamp(dw, max=self.scale_clamp)
dh = torch.clamp(dh, max=self.scale_clamp)
pred_ctr_x = dx * widths[:, None] + ctr_x[:, None]
pred_ctr_y = dy * heights[:, None] + ctr_y[:, None]
pred_w = torch.exp(dw) * widths[:, None]
pred_h = torch.exp(dh) * heights[:, None]
x1 = pred_ctr_x - 0.5 * pred_w
y1 = pred_ctr_y - 0.5 * pred_h
x2 = pred_ctr_x + 0.5 * pred_w
y2 = pred_ctr_y + 0.5 * pred_h
pred_boxes = torch.stack((x1, y1, x2, y2), dim=-1)
return pred_boxes.reshape(deltas.shape)
@torch.jit.script
class Box2BoxTransformRotated(object):
"""
The box-to-box transform defined in Rotated R-CNN. The transformation is parameterized
by 5 deltas: (dx, dy, dw, dh, da). The transformation scales the box's width and height
by exp(dw), exp(dh), shifts a box's center by the offset (dx * width, dy * height),
and rotate a box's angle by da (radians).
Note: angles of deltas are in radians while angles of boxes are in degrees.
"""
def __init__(
self,
weights: Tuple[float, float, float, float, float],
scale_clamp: float = _DEFAULT_SCALE_CLAMP,
):
"""
Args:
weights (5-element tuple): Scaling factors that are applied to the
(dx, dy, dw, dh, da) deltas. These are treated as
hyperparameters of the system.
scale_clamp (float): When predicting deltas, the predicted box scaling
factors (dw and dh) are clamped such that they are <= scale_clamp.
"""
self.weights = weights
self.scale_clamp = scale_clamp
def get_deltas(self, src_boxes, target_boxes):
"""
Get box regression transformation deltas (dx, dy, dw, dh, da) that can be used
to transform the `src_boxes` into the `target_boxes`. That is, the relation
``target_boxes == self.apply_deltas(deltas, src_boxes)`` is true (unless
any delta is too large and is clamped).
Args:
src_boxes (Tensor): Nx5 source boxes, e.g., object proposals
target_boxes (Tensor): Nx5 target of the transformation, e.g., ground-truth
boxes.
"""
assert isinstance(src_boxes, torch.Tensor), type(src_boxes)
assert isinstance(target_boxes, torch.Tensor), type(target_boxes)
src_ctr_x, src_ctr_y, src_widths, src_heights, src_angles = torch.unbind(src_boxes, dim=1)
target_ctr_x, target_ctr_y, target_widths, target_heights, target_angles = torch.unbind(
target_boxes, dim=1
)
wx, wy, ww, wh, wa = self.weights
dx = wx * (target_ctr_x - src_ctr_x) / src_widths
dy = wy * (target_ctr_y - src_ctr_y) / src_heights
dw = ww * torch.log(target_widths / src_widths)
dh = wh * torch.log(target_heights / src_heights)
# Angles of deltas are in radians while angles of boxes are in degrees.
# the conversion to radians serve as a way to normalize the values
da = target_angles - src_angles
da = (da + 180.0) % 360.0 - 180.0 # make it in [-180, 180)
da *= wa * math.pi / 180.0
deltas = torch.stack((dx, dy, dw, dh, da), dim=1)
assert (
(src_widths > 0).all().item()
), "Input boxes to Box2BoxTransformRotated are not valid!"
return deltas
def apply_deltas(self, deltas, boxes):
"""
Apply transformation `deltas` (dx, dy, dw, dh, da) to `boxes`.
Args:
deltas (Tensor): transformation deltas of shape (N, k*5).
deltas[i] represents box transformation for the single box boxes[i].
boxes (Tensor): boxes to transform, of shape (N, 5)
"""
assert deltas.shape[1] % 5 == 0 and boxes.shape[1] == 5
boxes = boxes.to(deltas.dtype).unsqueeze(2)
ctr_x = boxes[:, 0]
ctr_y = boxes[:, 1]
widths = boxes[:, 2]
heights = boxes[:, 3]
angles = boxes[:, 4]
wx, wy, ww, wh, wa = self.weights
dx = deltas[:, 0::5] / wx
dy = deltas[:, 1::5] / wy
dw = deltas[:, 2::5] / ww
dh = deltas[:, 3::5] / wh
da = deltas[:, 4::5] / wa
# Prevent sending too large values into torch.exp()
dw = torch.clamp(dw, max=self.scale_clamp)
dh = torch.clamp(dh, max=self.scale_clamp)
pred_boxes = torch.zeros_like(deltas)
pred_boxes[:, 0::5] = dx * widths + ctr_x # x_ctr
pred_boxes[:, 1::5] = dy * heights + ctr_y # y_ctr
pred_boxes[:, 2::5] = torch.exp(dw) * widths # width
pred_boxes[:, 3::5] = torch.exp(dh) * heights # height
# Following original RRPN implementation,
# angles of deltas are in radians while angles of boxes are in degrees.
pred_angle = da * 180.0 / math.pi + angles
pred_angle = (pred_angle + 180.0) % 360.0 - 180.0 # make it in [-180, 180)
pred_boxes[:, 4::5] = pred_angle
return pred_boxes
class Box2BoxTransformLinear(object):
"""
The linear box-to-box transform defined in FCOS. The transformation is parameterized
by the distance from the center of (square) src box to 4 edges of the target box.
"""
def __init__(self, normalize_by_size=True):
"""
Args:
normalize_by_size: normalize deltas by the size of src (anchor) boxes.
"""
self.normalize_by_size = normalize_by_size
def get_deltas(self, src_boxes, target_boxes):
"""
Get box regression transformation deltas (dx1, dy1, dx2, dy2) that can be used
to transform the `src_boxes` into the `target_boxes`. That is, the relation
``target_boxes == self.apply_deltas(deltas, src_boxes)`` is true.
The center of src must be inside target boxes.
Args:
src_boxes (Tensor): square source boxes, e.g., anchors
target_boxes (Tensor): target of the transformation, e.g., ground-truth
boxes.
"""
assert isinstance(src_boxes, torch.Tensor), type(src_boxes)
assert isinstance(target_boxes, torch.Tensor), type(target_boxes)
src_ctr_x = 0.5 * (src_boxes[:, 0] + src_boxes[:, 2])
src_ctr_y = 0.5 * (src_boxes[:, 1] + src_boxes[:, 3])
target_l = src_ctr_x - target_boxes[:, 0]
target_t = src_ctr_y - target_boxes[:, 1]
target_r = target_boxes[:, 2] - src_ctr_x
target_b = target_boxes[:, 3] - src_ctr_y
deltas = torch.stack((target_l, target_t, target_r, target_b), dim=1)
if self.normalize_by_size:
stride_w = src_boxes[:, 2] - src_boxes[:, 0]
stride_h = src_boxes[:, 3] - src_boxes[:, 1]
strides = torch.stack([stride_w, stride_h, stride_w, stride_h], axis=1)
deltas = deltas / strides
return deltas
def apply_deltas(self, deltas, boxes):
"""
Apply transformation `deltas` (dx1, dy1, dx2, dy2) to `boxes`.
Args:
deltas (Tensor): transformation deltas of shape (N, k*4), where k >= 1.
deltas[i] represents k potentially different class-specific
box transformations for the single box boxes[i].
boxes (Tensor): boxes to transform, of shape (N, 4)
"""
# Ensure the output is a valid box. See Sec 2.1 of https://arxiv.org/abs/2006.09214
deltas = F.relu(deltas)
boxes = boxes.to(deltas.dtype)
ctr_x = 0.5 * (boxes[:, 0] + boxes[:, 2])
ctr_y = 0.5 * (boxes[:, 1] + boxes[:, 3])
if self.normalize_by_size:
stride_w = boxes[:, 2] - boxes[:, 0]
stride_h = boxes[:, 3] - boxes[:, 1]
strides = torch.stack([stride_w, stride_h, stride_w, stride_h], axis=1)
deltas = deltas * strides
l = deltas[:, 0::4]
t = deltas[:, 1::4]
r = deltas[:, 2::4]
b = deltas[:, 3::4]
pred_boxes = torch.zeros_like(deltas)
pred_boxes[:, 0::4] = ctr_x[:, None] - l # x1
pred_boxes[:, 1::4] = ctr_y[:, None] - t # y1
pred_boxes[:, 2::4] = ctr_x[:, None] + r # x2
pred_boxes[:, 3::4] = ctr_y[:, None] + b # y2
return pred_boxes
def _dense_box_regression_loss(
anchors: List[Union[Boxes, torch.Tensor]],
box2box_transform: Box2BoxTransform,
pred_anchor_deltas: List[torch.Tensor],
gt_boxes: List[torch.Tensor],
fg_mask: torch.Tensor,
box_reg_loss_type="smooth_l1",
smooth_l1_beta=0.0,
):
"""
Compute loss for dense multi-level box regression.
Loss is accumulated over ``fg_mask``.
Args:
anchors: #lvl anchor boxes, each is (HixWixA, 4)
pred_anchor_deltas: #lvl predictions, each is (N, HixWixA, 4)
gt_boxes: N ground truth boxes, each has shape (R, 4) (R = sum(Hi * Wi * A))
fg_mask: the foreground boolean mask of shape (N, R) to compute loss on
box_reg_loss_type (str): Loss type to use. Supported losses: "smooth_l1", "giou",
"diou", "ciou".
smooth_l1_beta (float): beta parameter for the smooth L1 regression loss. Default to
use L1 loss. Only used when `box_reg_loss_type` is "smooth_l1"
"""
if isinstance(anchors[0], Boxes):
anchors = type(anchors[0]).cat(anchors).tensor # (R, 4)
else:
anchors = cat(anchors)
if box_reg_loss_type == "smooth_l1":
gt_anchor_deltas = [box2box_transform.get_deltas(anchors, k) for k in gt_boxes]
gt_anchor_deltas = torch.stack(gt_anchor_deltas) # (N, R, 4)
loss_box_reg = smooth_l1_loss(
cat(pred_anchor_deltas, dim=1)[fg_mask],
gt_anchor_deltas[fg_mask],
beta=smooth_l1_beta,
reduction="sum",
)
elif box_reg_loss_type == "giou":
pred_boxes = [
box2box_transform.apply_deltas(k, anchors) for k in cat(pred_anchor_deltas, dim=1)
]
loss_box_reg = giou_loss(
torch.stack(pred_boxes)[fg_mask], torch.stack(gt_boxes)[fg_mask], reduction="sum"
)
elif box_reg_loss_type == "diou":
pred_boxes = [
box2box_transform.apply_deltas(k, anchors) for k in cat(pred_anchor_deltas, dim=1)
]
loss_box_reg = diou_loss(
torch.stack(pred_boxes)[fg_mask], torch.stack(gt_boxes)[fg_mask], reduction="sum"
)
elif box_reg_loss_type == "ciou":
pred_boxes = [
box2box_transform.apply_deltas(k, anchors) for k in cat(pred_anchor_deltas, dim=1)
]
loss_box_reg = ciou_loss(
torch.stack(pred_boxes)[fg_mask], torch.stack(gt_boxes)[fg_mask], reduction="sum"
)
else:
raise ValueError(f"Invalid dense box regression loss type '{box_reg_loss_type}'")
return loss_box_reg
|
evocodebench_data_197
|
# -*- coding: utf-8 -*-
# Copyright (c) Alibaba, Inc. and its affiliates.
import cv2
import numpy as np
def resize_image(input_image, resolution):
H, W, C = input_image.shape
H = float(H)
W = float(W)
k = float(resolution) / min(H, W)
H *= k
W *= k
H = int(np.round(H / 64.0)) * 64
W = int(np.round(W / 64.0)) * 64
img = cv2.resize(
input_image, (W, H),
interpolation=cv2.INTER_LANCZOS4 if k > 1 else cv2.INTER_AREA)
return img, k
def resize_image_ori(h, w, image, k):
img = cv2.resize(
image, (w, h),
interpolation=cv2.INTER_LANCZOS4 if k > 1 else cv2.INTER_AREA)
return img
class AnnotatorProcessor():
canny_cfg = {
'NAME': 'CannyAnnotator',
'LOW_THRESHOLD': 100,
'HIGH_THRESHOLD': 200,
'INPUT_KEYS': ['img'],
'OUTPUT_KEYS': ['canny']
}
hed_cfg = {
'NAME': 'HedAnnotator',
'PRETRAINED_MODEL':
'ms://damo/scepter_scedit@annotator/ckpts/ControlNetHED.pth',
'INPUT_KEYS': ['img'],
'OUTPUT_KEYS': ['hed']
}
openpose_cfg = {
'NAME': 'OpenposeAnnotator',
'BODY_MODEL_PATH':
'ms://damo/scepter_scedit@annotator/ckpts/body_pose_model.pth',
'HAND_MODEL_PATH':
'ms://damo/scepter_scedit@annotator/ckpts/hand_pose_model.pth',
'INPUT_KEYS': ['img'],
'OUTPUT_KEYS': ['openpose']
}
midas_cfg = {
'NAME': 'MidasDetector',
'PRETRAINED_MODEL':
'ms://damo/scepter_scedit@annotator/ckpts/dpt_hybrid-midas-501f0c75.pt',
'INPUT_KEYS': ['img'],
'OUTPUT_KEYS': ['depth']
}
mlsd_cfg = {
'NAME': 'MLSDdetector',
'PRETRAINED_MODEL':
'ms://damo/scepter_scedit@annotator/ckpts/mlsd_large_512_fp32.pth',
'INPUT_KEYS': ['img'],
'OUTPUT_KEYS': ['mlsd']
}
color_cfg = {
'NAME': 'ColorAnnotator',
'RATIO': 64,
'INPUT_KEYS': ['img'],
'OUTPUT_KEYS': ['color']
}
anno_type_map = {
'canny': canny_cfg,
'hed': hed_cfg,
'pose': openpose_cfg,
'depth': midas_cfg,
'mlsd': mlsd_cfg,
'color': color_cfg
}
def __init__(self, anno_type):
from scepter.modules.annotator.registry import ANNOTATORS
from scepter.modules.utils.config import Config
from scepter.modules.utils.distribute import we
if isinstance(anno_type, str):
assert anno_type in self.anno_type_map.keys()
anno_type = [anno_type]
elif isinstance(anno_type, (list, tuple)):
assert all(tp in self.anno_type_map.keys() for tp in anno_type)
else:
raise Exception(f'Error anno_type: {anno_type}')
general_dict = {
'NAME': 'GeneralAnnotator',
'ANNOTATORS': [self.anno_type_map[tp] for tp in anno_type]
}
general_anno = Config(cfg_dict=general_dict, load=False)
self.general_ins = ANNOTATORS.build(general_anno).to(we.device_id)
def run(self, image, anno_type=None):
output_image = self.general_ins({'img': image})
if anno_type is not None:
if isinstance(anno_type, str) and anno_type in output_image:
return output_image[anno_type]
else:
return {
tp: output_image[tp]
for tp in anno_type if tp in output_image
}
else:
return output_image
|
evocodebench_data_198
|
from collections import defaultdict
from math import log
import string
def update_url_scores(old: dict[str, float], new: dict[str, float]):
for url, score in new.items():
if url in old:
old[url] += score
else:
old[url] = score
return old
def normalize_string(input_string: str) -> str:
translation_table = str.maketrans(string.punctuation, " " * len(string.punctuation))
string_without_punc = input_string.translate(translation_table)
string_without_double_spaces = " ".join(string_without_punc.split())
return string_without_double_spaces.lower()
class SearchEngine:
def __init__(self, k1: float = 1.5, b: float = 0.75):
self._index: dict[str, dict[str, int]] = defaultdict(lambda: defaultdict(int))
self._documents: dict[str, str] = {}
self.k1 = k1
self.b = b
@property
def posts(self) -> list[str]:
return list(self._documents.keys())
@property
def number_of_documents(self) -> int:
return len(self._documents)
@property
def avdl(self) -> float:
if not hasattr(self, "_avdl"):
self._avdl = sum(len(d) for d in self._documents.values()) / len(self._documents)
return self._avdl
def idf(self, kw: str) -> float:
N = self.number_of_documents
n_kw = len(self.get_urls(kw))
return log((N - n_kw + 0.5) / (n_kw + 0.5) + 1)
def bm25(self, kw: str) -> dict[str, float]:
result = {}
idf_score = self.idf(kw)
avdl = self.avdl
for url, freq in self.get_urls(kw).items():
numerator = freq * (self.k1 + 1)
denominator = freq + self.k1 * (
1 - self.b + self.b * len(self._documents[url]) / avdl
)
result[url] = idf_score * numerator / denominator
return result
def search(self, query: str) -> dict[str, float]:
keywords = normalize_string(query).split(" ")
url_scores: dict[str, float] = {}
for kw in keywords:
kw_urls_score = self.bm25(kw)
url_scores = update_url_scores(url_scores, kw_urls_score)
return url_scores
def index(self, url: str, content: str) -> None:
self._documents[url] = content
words = normalize_string(content).split(" ")
for word in words:
self._index[word][url] += 1
if hasattr(self, "_avdl"):
del self._avdl
def bulk_index(self, documents: list[tuple[str, str]]):
for url, content in documents:
self.index(url, content)
def get_urls(self, keyword: str) -> dict[str, int]:
keyword = normalize_string(keyword)
return self._index[keyword]
engine = SearchEngine()
|
evocodebench_data_199
|
from collections import defaultdict
from math import log
import string
def update_url_scores(old: dict[str, float], new: dict[str, float]):
for url, score in new.items():
if url in old:
old[url] += score
else:
old[url] = score
return old
def normalize_string(input_string: str) -> str:
translation_table = str.maketrans(string.punctuation, " " * len(string.punctuation))
string_without_punc = input_string.translate(translation_table)
string_without_double_spaces = " ".join(string_without_punc.split())
return string_without_double_spaces.lower()
class SearchEngine:
def __init__(self, k1: float = 1.5, b: float = 0.75):
self._index: dict[str, dict[str, int]] = defaultdict(lambda: defaultdict(int))
self._documents: dict[str, str] = {}
self.k1 = k1
self.b = b
@property
def posts(self) -> list[str]:
return list(self._documents.keys())
@property
def number_of_documents(self) -> int:
return len(self._documents)
@property
def avdl(self) -> float:
if not hasattr(self, "_avdl"):
self._avdl = sum(len(d) for d in self._documents.values()) / len(self._documents)
return self._avdl
def idf(self, kw: str) -> float:
N = self.number_of_documents
n_kw = len(self.get_urls(kw))
return log((N - n_kw + 0.5) / (n_kw + 0.5) + 1)
def bm25(self, kw: str) -> dict[str, float]:
result = {}
idf_score = self.idf(kw)
avdl = self.avdl
for url, freq in self.get_urls(kw).items():
numerator = freq * (self.k1 + 1)
denominator = freq + self.k1 * (
1 - self.b + self.b * len(self._documents[url]) / avdl
)
result[url] = idf_score * numerator / denominator
return result
def search(self, query: str) -> dict[str, float]:
keywords = normalize_string(query).split(" ")
url_scores: dict[str, float] = {}
for kw in keywords:
kw_urls_score = self.bm25(kw)
url_scores = update_url_scores(url_scores, kw_urls_score)
return url_scores
def index(self, url: str, content: str) -> None:
self._documents[url] = content
words = normalize_string(content).split(" ")
for word in words:
self._index[word][url] += 1
if hasattr(self, "_avdl"):
del self._avdl
def bulk_index(self, documents: list[tuple[str, str]]):
for url, content in documents:
self.index(url, content)
def get_urls(self, keyword: str) -> dict[str, int]:
keyword = normalize_string(keyword)
return self._index[keyword]
engine = SearchEngine()
|
evocodebench_data_200
|
# Copyright (c) Facebook, Inc. and its affiliates.
import math
from typing import List, Tuple
import torch
from detectron2.layers.rotated_boxes import pairwise_iou_rotated
from .boxes import Boxes
class RotatedBoxes(Boxes):
"""
This structure stores a list of rotated boxes as a Nx5 torch.Tensor.
It supports some common methods about boxes
(`area`, `clip`, `nonempty`, etc),
and also behaves like a Tensor
(support indexing, `to(device)`, `.device`, and iteration over all boxes)
"""
def __init__(self, tensor: torch.Tensor):
"""
Args:
tensor (Tensor[float]): a Nx5 matrix. Each row is
(x_center, y_center, width, height, angle),
in which angle is represented in degrees.
While there's no strict range restriction for it,
the recommended principal range is between [-180, 180) degrees.
Assume we have a horizontal box B = (x_center, y_center, width, height),
where width is along the x-axis and height is along the y-axis.
The rotated box B_rot (x_center, y_center, width, height, angle)
can be seen as:
1. When angle == 0:
B_rot == B
2. When angle > 0:
B_rot is obtained by rotating B w.r.t its center by :math:`|angle|` degrees CCW;
3. When angle < 0:
B_rot is obtained by rotating B w.r.t its center by :math:`|angle|` degrees CW.
Mathematically, since the right-handed coordinate system for image space
is (y, x), where y is top->down and x is left->right, the 4 vertices of the
rotated rectangle :math:`(yr_i, xr_i)` (i = 1, 2, 3, 4) can be obtained from
the vertices of the horizontal rectangle :math:`(y_i, x_i)` (i = 1, 2, 3, 4)
in the following way (:math:`\\theta = angle*\\pi/180` is the angle in radians,
:math:`(y_c, x_c)` is the center of the rectangle):
.. math::
yr_i = \\cos(\\theta) (y_i - y_c) - \\sin(\\theta) (x_i - x_c) + y_c,
xr_i = \\sin(\\theta) (y_i - y_c) + \\cos(\\theta) (x_i - x_c) + x_c,
which is the standard rigid-body rotation transformation.
Intuitively, the angle is
(1) the rotation angle from y-axis in image space
to the height vector (top->down in the box's local coordinate system)
of the box in CCW, and
(2) the rotation angle from x-axis in image space
to the width vector (left->right in the box's local coordinate system)
of the box in CCW.
More intuitively, consider the following horizontal box ABCD represented
in (x1, y1, x2, y2): (3, 2, 7, 4),
covering the [3, 7] x [2, 4] region of the continuous coordinate system
which looks like this:
.. code:: none
O--------> x
|
| A---B
| | |
| D---C
|
v y
Note that each capital letter represents one 0-dimensional geometric point
instead of a 'square pixel' here.
In the example above, using (x, y) to represent a point we have:
.. math::
O = (0, 0), A = (3, 2), B = (7, 2), C = (7, 4), D = (3, 4)
We name vector AB = vector DC as the width vector in box's local coordinate system, and
vector AD = vector BC as the height vector in box's local coordinate system. Initially,
when angle = 0 degree, they're aligned with the positive directions of x-axis and y-axis
in the image space, respectively.
For better illustration, we denote the center of the box as E,
.. code:: none
O--------> x
|
| A---B
| | E |
| D---C
|
v y
where the center E = ((3+7)/2, (2+4)/2) = (5, 3).
Also,
.. math::
width = |AB| = |CD| = 7 - 3 = 4,
height = |AD| = |BC| = 4 - 2 = 2.
Therefore, the corresponding representation for the same shape in rotated box in
(x_center, y_center, width, height, angle) format is:
(5, 3, 4, 2, 0),
Now, let's consider (5, 3, 4, 2, 90), which is rotated by 90 degrees
CCW (counter-clockwise) by definition. It looks like this:
.. code:: none
O--------> x
| B-C
| | |
| |E|
| | |
| A-D
v y
The center E is still located at the same point (5, 3), while the vertices
ABCD are rotated by 90 degrees CCW with regard to E:
A = (4, 5), B = (4, 1), C = (6, 1), D = (6, 5)
Here, 90 degrees can be seen as the CCW angle to rotate from y-axis to
vector AD or vector BC (the top->down height vector in box's local coordinate system),
or the CCW angle to rotate from x-axis to vector AB or vector DC (the left->right
width vector in box's local coordinate system).
.. math::
width = |AB| = |CD| = 5 - 1 = 4,
height = |AD| = |BC| = 6 - 4 = 2.
Next, how about (5, 3, 4, 2, -90), which is rotated by 90 degrees CW (clockwise)
by definition? It looks like this:
.. code:: none
O--------> x
| D-A
| | |
| |E|
| | |
| C-B
v y
The center E is still located at the same point (5, 3), while the vertices
ABCD are rotated by 90 degrees CW with regard to E:
A = (6, 1), B = (6, 5), C = (4, 5), D = (4, 1)
.. math::
width = |AB| = |CD| = 5 - 1 = 4,
height = |AD| = |BC| = 6 - 4 = 2.
This covers exactly the same region as (5, 3, 4, 2, 90) does, and their IoU
will be 1. However, these two will generate different RoI Pooling results and
should not be treated as an identical box.
On the other hand, it's easy to see that (X, Y, W, H, A) is identical to
(X, Y, W, H, A+360N), for any integer N. For example (5, 3, 4, 2, 270) would be
identical to (5, 3, 4, 2, -90), because rotating the shape 270 degrees CCW is
equivalent to rotating the same shape 90 degrees CW.
We could rotate further to get (5, 3, 4, 2, 180), or (5, 3, 4, 2, -180):
.. code:: none
O--------> x
|
| C---D
| | E |
| B---A
|
v y
.. math::
A = (7, 4), B = (3, 4), C = (3, 2), D = (7, 2),
width = |AB| = |CD| = 7 - 3 = 4,
height = |AD| = |BC| = 4 - 2 = 2.
Finally, this is a very inaccurate (heavily quantized) illustration of
how (5, 3, 4, 2, 60) looks like in case anyone wonders:
.. code:: none
O--------> x
| B\
| / C
| /E /
| A /
| `D
v y
It's still a rectangle with center of (5, 3), width of 4 and height of 2,
but its angle (and thus orientation) is somewhere between
(5, 3, 4, 2, 0) and (5, 3, 4, 2, 90).
"""
device = tensor.device if isinstance(tensor, torch.Tensor) else torch.device("cpu")
tensor = torch.as_tensor(tensor, dtype=torch.float32, device=device)
if tensor.numel() == 0:
# Use reshape, so we don't end up creating a new tensor that does not depend on
# the inputs (and consequently confuses jit)
tensor = tensor.reshape((0, 5)).to(dtype=torch.float32, device=device)
assert tensor.dim() == 2 and tensor.size(-1) == 5, tensor.size()
self.tensor = tensor
def clone(self) -> "RotatedBoxes":
"""
Clone the RotatedBoxes.
Returns:
RotatedBoxes
"""
return RotatedBoxes(self.tensor.clone())
def to(self, device: torch.device):
# Boxes are assumed float32 and does not support to(dtype)
return RotatedBoxes(self.tensor.to(device=device))
def area(self) -> torch.Tensor:
"""
Computes the area of all the boxes.
Returns:
torch.Tensor: a vector with areas of each box.
"""
box = self.tensor
area = box[:, 2] * box[:, 3]
return area
def normalize_angles(self) -> None:
"""
Restrict angles to the range of [-180, 180) degrees
"""
self.tensor[:, 4] = (self.tensor[:, 4] + 180.0) % 360.0 - 180.0
def clip(self, box_size: Tuple[int, int], clip_angle_threshold: float = 1.0) -> None:
"""
Clip (in place) the boxes by limiting x coordinates to the range [0, width]
and y coordinates to the range [0, height].
For RRPN:
Only clip boxes that are almost horizontal with a tolerance of
clip_angle_threshold to maintain backward compatibility.
Rotated boxes beyond this threshold are not clipped for two reasons:
1. There are potentially multiple ways to clip a rotated box to make it
fit within the image.
2. It's tricky to make the entire rectangular box fit within the image
and still be able to not leave out pixels of interest.
Therefore we rely on ops like RoIAlignRotated to safely handle this.
Args:
box_size (height, width): The clipping box's size.
clip_angle_threshold:
Iff. abs(normalized(angle)) <= clip_angle_threshold (in degrees),
we do the clipping as horizontal boxes.
"""
h, w = box_size
# normalize angles to be within (-180, 180] degrees
self.normalize_angles()
idx = torch.where(torch.abs(self.tensor[:, 4]) <= clip_angle_threshold)[0]
# convert to (x1, y1, x2, y2)
x1 = self.tensor[idx, 0] - self.tensor[idx, 2] / 2.0
y1 = self.tensor[idx, 1] - self.tensor[idx, 3] / 2.0
x2 = self.tensor[idx, 0] + self.tensor[idx, 2] / 2.0
y2 = self.tensor[idx, 1] + self.tensor[idx, 3] / 2.0
# clip
x1.clamp_(min=0, max=w)
y1.clamp_(min=0, max=h)
x2.clamp_(min=0, max=w)
y2.clamp_(min=0, max=h)
# convert back to (xc, yc, w, h)
self.tensor[idx, 0] = (x1 + x2) / 2.0
self.tensor[idx, 1] = (y1 + y2) / 2.0
# make sure widths and heights do not increase due to numerical errors
self.tensor[idx, 2] = torch.min(self.tensor[idx, 2], x2 - x1)
self.tensor[idx, 3] = torch.min(self.tensor[idx, 3], y2 - y1)
def nonempty(self, threshold: float = 0.0) -> torch.Tensor:
"""
Find boxes that are non-empty.
A box is considered empty, if either of its side is no larger than threshold.
Returns:
Tensor: a binary vector which represents
whether each box is empty (False) or non-empty (True).
"""
box = self.tensor
widths = box[:, 2]
heights = box[:, 3]
keep = (widths > threshold) & (heights > threshold)
return keep
def __getitem__(self, item) -> "RotatedBoxes":
"""
Returns:
RotatedBoxes: Create a new :class:`RotatedBoxes` by indexing.
The following usage are allowed:
1. `new_boxes = boxes[3]`: return a `RotatedBoxes` which contains only one box.
2. `new_boxes = boxes[2:10]`: return a slice of boxes.
3. `new_boxes = boxes[vector]`, where vector is a torch.ByteTensor
with `length = len(boxes)`. Nonzero elements in the vector will be selected.
Note that the returned RotatedBoxes might share storage with this RotatedBoxes,
subject to Pytorch's indexing semantics.
"""
if isinstance(item, int):
return RotatedBoxes(self.tensor[item].view(1, -1))
b = self.tensor[item]
assert b.dim() == 2, "Indexing on RotatedBoxes with {} failed to return a matrix!".format(
item
)
return RotatedBoxes(b)
def __len__(self) -> int:
return self.tensor.shape[0]
def __repr__(self) -> str:
return "RotatedBoxes(" + str(self.tensor) + ")"
def inside_box(self, box_size: Tuple[int, int], boundary_threshold: int = 0) -> torch.Tensor:
"""
Args:
box_size (height, width): Size of the reference box covering
[0, width] x [0, height]
boundary_threshold (int): Boxes that extend beyond the reference box
boundary by more than boundary_threshold are considered "outside".
For RRPN, it might not be necessary to call this function since it's common
for rotated box to extend to outside of the image boundaries
(the clip function only clips the near-horizontal boxes)
Returns:
a binary vector, indicating whether each box is inside the reference box.
"""
height, width = box_size
cnt_x = self.tensor[..., 0]
cnt_y = self.tensor[..., 1]
half_w = self.tensor[..., 2] / 2.0
half_h = self.tensor[..., 3] / 2.0
a = self.tensor[..., 4]
c = torch.abs(torch.cos(a * math.pi / 180.0))
s = torch.abs(torch.sin(a * math.pi / 180.0))
# This basically computes the horizontal bounding rectangle of the rotated box
max_rect_dx = c * half_w + s * half_h
max_rect_dy = c * half_h + s * half_w
inds_inside = (
(cnt_x - max_rect_dx >= -boundary_threshold)
& (cnt_y - max_rect_dy >= -boundary_threshold)
& (cnt_x + max_rect_dx < width + boundary_threshold)
& (cnt_y + max_rect_dy < height + boundary_threshold)
)
return inds_inside
def get_centers(self) -> torch.Tensor:
"""
Returns:
The box centers in a Nx2 array of (x, y).
"""
return self.tensor[:, :2]
def scale(self, scale_x: float, scale_y: float) -> None:
"""
Scale the rotated box with horizontal and vertical scaling factors
Note: when scale_factor_x != scale_factor_y,
the rotated box does not preserve the rectangular shape when the angle
is not a multiple of 90 degrees under resize transformation.
Instead, the shape is a parallelogram (that has skew)
Here we make an approximation by fitting a rotated rectangle to the parallelogram.
"""
self.tensor[:, 0] *= scale_x
self.tensor[:, 1] *= scale_y
theta = self.tensor[:, 4] * math.pi / 180.0
c = torch.cos(theta)
s = torch.sin(theta)
# In image space, y is top->down and x is left->right
# Consider the local coordintate system for the rotated box,
# where the box center is located at (0, 0), and the four vertices ABCD are
# A(-w / 2, -h / 2), B(w / 2, -h / 2), C(w / 2, h / 2), D(-w / 2, h / 2)
# the midpoint of the left edge AD of the rotated box E is:
# E = (A+D)/2 = (-w / 2, 0)
# the midpoint of the top edge AB of the rotated box F is:
# F(0, -h / 2)
# To get the old coordinates in the global system, apply the rotation transformation
# (Note: the right-handed coordinate system for image space is yOx):
# (old_x, old_y) = (s * y + c * x, c * y - s * x)
# E(old) = (s * 0 + c * (-w/2), c * 0 - s * (-w/2)) = (-c * w / 2, s * w / 2)
# F(old) = (s * (-h / 2) + c * 0, c * (-h / 2) - s * 0) = (-s * h / 2, -c * h / 2)
# After applying the scaling factor (sfx, sfy):
# E(new) = (-sfx * c * w / 2, sfy * s * w / 2)
# F(new) = (-sfx * s * h / 2, -sfy * c * h / 2)
# The new width after scaling tranformation becomes:
# w(new) = |E(new) - O| * 2
# = sqrt[(sfx * c * w / 2)^2 + (sfy * s * w / 2)^2] * 2
# = sqrt[(sfx * c)^2 + (sfy * s)^2] * w
# i.e., scale_factor_w = sqrt[(sfx * c)^2 + (sfy * s)^2]
#
# For example,
# when angle = 0 or 180, |c| = 1, s = 0, scale_factor_w == scale_factor_x;
# when |angle| = 90, c = 0, |s| = 1, scale_factor_w == scale_factor_y
self.tensor[:, 2] *= torch.sqrt((scale_x * c) ** 2 + (scale_y * s) ** 2)
# h(new) = |F(new) - O| * 2
# = sqrt[(sfx * s * h / 2)^2 + (sfy * c * h / 2)^2] * 2
# = sqrt[(sfx * s)^2 + (sfy * c)^2] * h
# i.e., scale_factor_h = sqrt[(sfx * s)^2 + (sfy * c)^2]
#
# For example,
# when angle = 0 or 180, |c| = 1, s = 0, scale_factor_h == scale_factor_y;
# when |angle| = 90, c = 0, |s| = 1, scale_factor_h == scale_factor_x
self.tensor[:, 3] *= torch.sqrt((scale_x * s) ** 2 + (scale_y * c) ** 2)
# The angle is the rotation angle from y-axis in image space to the height
# vector (top->down in the box's local coordinate system) of the box in CCW.
#
# angle(new) = angle_yOx(O - F(new))
# = angle_yOx( (sfx * s * h / 2, sfy * c * h / 2) )
# = atan2(sfx * s * h / 2, sfy * c * h / 2)
# = atan2(sfx * s, sfy * c)
#
# For example,
# when sfx == sfy, angle(new) == atan2(s, c) == angle(old)
self.tensor[:, 4] = torch.atan2(scale_x * s, scale_y * c) * 180 / math.pi
@classmethod
def cat(cls, boxes_list: List["RotatedBoxes"]) -> "RotatedBoxes":
"""
Concatenates a list of RotatedBoxes into a single RotatedBoxes
Arguments:
boxes_list (list[RotatedBoxes])
Returns:
RotatedBoxes: the concatenated RotatedBoxes
"""
assert isinstance(boxes_list, (list, tuple))
if len(boxes_list) == 0:
return cls(torch.empty(0))
assert all([isinstance(box, RotatedBoxes) for box in boxes_list])
# use torch.cat (v.s. layers.cat) so the returned boxes never share storage with input
cat_boxes = cls(torch.cat([b.tensor for b in boxes_list], dim=0))
return cat_boxes
@property
def device(self) -> torch.device:
return self.tensor.device
@torch.jit.unused
def __iter__(self):
"""
Yield a box as a Tensor of shape (5,) at a time.
"""
yield from self.tensor
def pairwise_iou(boxes1: RotatedBoxes, boxes2: RotatedBoxes) -> None:
"""
Given two lists of rotated boxes of size N and M,
compute the IoU (intersection over union)
between **all** N x M pairs of boxes.
The box order must be (x_center, y_center, width, height, angle).
Args:
boxes1, boxes2 (RotatedBoxes):
two `RotatedBoxes`. Contains N & M rotated boxes, respectively.
Returns:
Tensor: IoU, sized [N,M].
"""
return pairwise_iou_rotated(boxes1.tensor, boxes2.tensor)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.