id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
8,661 | import click
from click_default_group import DefaultGroup
from dataclasses import asdict
import io
import json
from llm import (
Collection,
Conversation,
Response,
Template,
UnknownModelError,
encode,
get_embedding_models_with_aliases,
get_embedding_model_aliases,
get_embedding_model,
get_key,
get_plugins,
get_model,
get_model_aliases,
get_models_with_aliases,
user_dir,
set_alias,
remove_alias,
)
from .migrations import migrate
from .plugins import pm
import base64
import pathlib
import pydantic
import readline
from runpy import run_module
import shutil
import sqlite_utils
from sqlite_utils.utils import rows_from_file, Format
import sys
import textwrap
from typing import cast, Optional, Iterable, Union, Tuple
import warnings
import yaml
def logs_db_path():
return user_dir() / "logs.db"
def _human_readable_size(size_bytes):
if size_bytes == 0:
return "0B"
size_name = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB")
i = 0
while size_bytes >= 1024 and i < len(size_name) - 1:
size_bytes /= 1024.0
i += 1
return "{:.2f}{}".format(size_bytes, size_name[i])
def logs_on():
return not (user_dir() / "logs-off").exists()
def migrate(db):
ensure_migrations_table(db)
already_applied = {r["name"] for r in db["_llm_migrations"].rows}
for fn in MIGRATIONS:
name = fn.__name__
if name not in already_applied:
fn(db)
db["_llm_migrations"].insert(
{"name": name, "applied_at": str(datetime.datetime.utcnow())}
)
already_applied.add(name)
The provided code snippet includes necessary dependencies for implementing the `logs_status` function. Write a Python function `def logs_status()` to solve the following problem:
Show current status of database logging
Here is the function:
def logs_status():
"Show current status of database logging"
path = logs_db_path()
if not path.exists():
click.echo("No log database found at {}".format(path))
return
if logs_on():
click.echo("Logging is ON for all prompts".format())
else:
click.echo("Logging is OFF".format())
db = sqlite_utils.Database(path)
migrate(db)
click.echo("Found log database at {}".format(path))
click.echo("Number of conversations logged:\t{}".format(db["conversations"].count))
click.echo("Number of responses logged:\t{}".format(db["responses"].count))
click.echo(
"Database file size: \t\t{}".format(_human_readable_size(path.stat().st_size))
) | Show current status of database logging |
8,662 | import click
from click_default_group import DefaultGroup
from dataclasses import asdict
import io
import json
from llm import (
Collection,
Conversation,
Response,
Template,
UnknownModelError,
encode,
get_embedding_models_with_aliases,
get_embedding_model_aliases,
get_embedding_model,
get_key,
get_plugins,
get_model,
get_model_aliases,
get_models_with_aliases,
user_dir,
set_alias,
remove_alias,
)
from .migrations import migrate
from .plugins import pm
import base64
import pathlib
import pydantic
import readline
from runpy import run_module
import shutil
import sqlite_utils
from sqlite_utils.utils import rows_from_file, Format
import sys
import textwrap
from typing import cast, Optional, Iterable, Union, Tuple
import warnings
import yaml
def user_dir():
llm_user_path = os.environ.get("LLM_USER_PATH")
if llm_user_path:
path = pathlib.Path(llm_user_path)
else:
path = pathlib.Path(click.get_app_dir("io.datasette.llm"))
path.mkdir(exist_ok=True, parents=True)
return path
The provided code snippet includes necessary dependencies for implementing the `logs_turn_on` function. Write a Python function `def logs_turn_on()` to solve the following problem:
Turn on logging for all prompts
Here is the function:
def logs_turn_on():
"Turn on logging for all prompts"
path = user_dir() / "logs-off"
if path.exists():
path.unlink() | Turn on logging for all prompts |
8,663 | import click
from click_default_group import DefaultGroup
from dataclasses import asdict
import io
import json
from llm import (
Collection,
Conversation,
Response,
Template,
UnknownModelError,
encode,
get_embedding_models_with_aliases,
get_embedding_model_aliases,
get_embedding_model,
get_key,
get_plugins,
get_model,
get_model_aliases,
get_models_with_aliases,
user_dir,
set_alias,
remove_alias,
)
from .migrations import migrate
from .plugins import pm
import base64
import pathlib
import pydantic
import readline
from runpy import run_module
import shutil
import sqlite_utils
from sqlite_utils.utils import rows_from_file, Format
import sys
import textwrap
from typing import cast, Optional, Iterable, Union, Tuple
import warnings
import yaml
def user_dir():
llm_user_path = os.environ.get("LLM_USER_PATH")
if llm_user_path:
path = pathlib.Path(llm_user_path)
else:
path = pathlib.Path(click.get_app_dir("io.datasette.llm"))
path.mkdir(exist_ok=True, parents=True)
return path
The provided code snippet includes necessary dependencies for implementing the `logs_turn_off` function. Write a Python function `def logs_turn_off()` to solve the following problem:
Turn off logging for all prompts
Here is the function:
def logs_turn_off():
"Turn off logging for all prompts"
path = user_dir() / "logs-off"
path.touch() | Turn off logging for all prompts |
8,664 | import click
from click_default_group import DefaultGroup
from dataclasses import asdict
import io
import json
from llm import (
Collection,
Conversation,
Response,
Template,
UnknownModelError,
encode,
get_embedding_models_with_aliases,
get_embedding_model_aliases,
get_embedding_model,
get_key,
get_plugins,
get_model,
get_model_aliases,
get_models_with_aliases,
user_dir,
set_alias,
remove_alias,
)
from .migrations import migrate
from .plugins import pm
import base64
import pathlib
import pydantic
import readline
from runpy import run_module
import shutil
import sqlite_utils
from sqlite_utils.utils import rows_from_file, Format
import sys
import textwrap
from typing import cast, Optional, Iterable, Union, Tuple
import warnings
import yaml
def keys():
"Manage stored API keys for different models"
LOGS_COLUMNS = """ responses.id,
responses.model,
responses.prompt,
responses.system,
responses.prompt_json,
responses.options_json,
responses.response,
responses.response_json,
responses.conversation_id,
responses.duration_ms,
responses.datetime_utc,
conversations.name as conversation_name,
conversations.model as conversation_model"""
LOGS_SQL = """
select
{columns}
from
responses
left join conversations on responses.conversation_id = conversations.id{extra_where}
order by responses.id desc{limit}
"""
LOGS_SQL_SEARCH = """
select
{columns}
from
responses
left join conversations on responses.conversation_id = conversations.id
join responses_fts on responses_fts.rowid = responses.rowid
where responses_fts match :query{extra_where}
order by responses_fts.rank desc{limit}
"""
def _truncate_string(s, max_length=100):
if len(s) > max_length:
return s[: max_length - 3] + "..."
return s
def logs_db_path():
return user_dir() / "logs.db"
class UnknownModelError(KeyError):
pass
def get_model(name):
aliases = get_model_aliases()
try:
return aliases[name]
except KeyError:
raise UnknownModelError("Unknown model: " + name)
def migrate(db):
ensure_migrations_table(db)
already_applied = {r["name"] for r in db["_llm_migrations"].rows}
for fn in MIGRATIONS:
name = fn.__name__
if name not in already_applied:
fn(db)
db["_llm_migrations"].insert(
{"name": name, "applied_at": str(datetime.datetime.utcnow())}
)
already_applied.add(name)
The provided code snippet includes necessary dependencies for implementing the `logs_list` function. Write a Python function `def logs_list( count, path, model, query, truncate, response, current_conversation, conversation_id, json_output, )` to solve the following problem:
Show recent logged prompts and their responses
Here is the function:
def logs_list(
count,
path,
model,
query,
truncate,
response,
current_conversation,
conversation_id,
json_output,
):
"Show recent logged prompts and their responses"
path = pathlib.Path(path or logs_db_path())
if not path.exists():
raise click.ClickException("No log database found at {}".format(path))
db = sqlite_utils.Database(path)
migrate(db)
if response and not current_conversation and not conversation_id:
current_conversation = True
if current_conversation:
try:
conversation_id = next(
db.query(
"select conversation_id from responses order by id desc limit 1"
)
)["conversation_id"]
except StopIteration:
# No conversations yet
raise click.ClickException("No conversations found")
# For --conversation set limit 0, if not explicitly set
if count is None:
if conversation_id:
count = 0
else:
count = 3
model_id = None
if model:
# Resolve alias, if any
try:
model_id = get_model(model).model_id
except UnknownModelError:
# Maybe they uninstalled a model, use the -m option as-is
model_id = model
sql = LOGS_SQL
if query:
sql = LOGS_SQL_SEARCH
limit = ""
if count is not None and count > 0:
limit = " limit {}".format(count)
sql_format = {
"limit": limit,
"columns": LOGS_COLUMNS,
"extra_where": "",
}
where_bits = []
if model_id:
where_bits.append("responses.model = :model")
if conversation_id:
where_bits.append("responses.conversation_id = :conversation_id")
if where_bits:
sql_format["extra_where"] = " where " + " and ".join(where_bits)
final_sql = sql.format(**sql_format)
rows = list(
db.query(
final_sql,
{"model": model_id, "query": query, "conversation_id": conversation_id},
)
)
# Reverse the order - we do this because we 'order by id desc limit 3' to get the
# 3 most recent results, but we still want to display them in chronological order
# ... except for searches where we don't do this
if not query:
rows.reverse()
for row in rows:
if truncate:
row["prompt"] = _truncate_string(row["prompt"])
row["response"] = _truncate_string(row["response"])
# Either decode or remove all JSON keys
keys = list(row.keys())
for key in keys:
if key.endswith("_json") and row[key] is not None:
if truncate:
del row[key]
else:
row[key] = json.loads(row[key])
if json_output:
# Output as JSON if requested
click.echo(json.dumps(list(rows), indent=2))
elif response:
# Just output the last response
if rows:
click.echo(rows[-1]["response"])
else:
# Output neatly formatted human-readable logs
current_system = None
should_show_conversation = True
for row in rows:
click.echo(
"# {}{}\n{}".format(
row["datetime_utc"].split(".")[0],
(
" conversation: {}".format(row["conversation_id"])
if should_show_conversation
else ""
),
(
"\nModel: **{}**\n".format(row["model"])
if should_show_conversation
else ""
),
)
)
# In conversation log mode only show it for the first one
if conversation_id:
should_show_conversation = False
click.echo("## Prompt:\n\n{}".format(row["prompt"]))
if row["system"] != current_system:
if row["system"] is not None:
click.echo("\n## System:\n\n{}".format(row["system"]))
current_system = row["system"]
click.echo("\n## Response:\n\n{}\n".format(row["response"])) | Show recent logged prompts and their responses |
8,665 | import click
from click_default_group import DefaultGroup
from dataclasses import asdict
import io
import json
from llm import (
Collection,
Conversation,
Response,
Template,
UnknownModelError,
encode,
get_embedding_models_with_aliases,
get_embedding_model_aliases,
get_embedding_model,
get_key,
get_plugins,
get_model,
get_model_aliases,
get_models_with_aliases,
user_dir,
set_alias,
remove_alias,
)
from .migrations import migrate
from .plugins import pm
import base64
import pathlib
import pydantic
import readline
from runpy import run_module
import shutil
import sqlite_utils
from sqlite_utils.utils import rows_from_file, Format
import sys
import textwrap
from typing import cast, Optional, Iterable, Union, Tuple
import warnings
import yaml
The provided code snippet includes necessary dependencies for implementing the `models` function. Write a Python function `def models()` to solve the following problem:
Manage available models
Here is the function:
def models():
"Manage available models" | Manage available models |
8,666 | import click
from click_default_group import DefaultGroup
from dataclasses import asdict
import io
import json
from llm import (
Collection,
Conversation,
Response,
Template,
UnknownModelError,
encode,
get_embedding_models_with_aliases,
get_embedding_model_aliases,
get_embedding_model,
get_key,
get_plugins,
get_model,
get_model_aliases,
get_models_with_aliases,
user_dir,
set_alias,
remove_alias,
)
from .migrations import migrate
from .plugins import pm
import base64
import pathlib
import pydantic
import readline
from runpy import run_module
import shutil
import sqlite_utils
from sqlite_utils.utils import rows_from_file, Format
import sys
import textwrap
from typing import cast, Optional, Iterable, Union, Tuple
import warnings
import yaml
_type_lookup = {
"number": "float",
"integer": "int",
"string": "str",
"object": "dict",
}
def aliases():
"Manage model aliases"
def get_models_with_aliases() -> List["ModelWithAliases"]:
model_aliases = []
# Include aliases from aliases.json
aliases_path = user_dir() / "aliases.json"
extra_model_aliases: Dict[str, list] = {}
if aliases_path.exists():
configured_aliases = json.loads(aliases_path.read_text())
for alias, model_id in configured_aliases.items():
extra_model_aliases.setdefault(model_id, []).append(alias)
def register(model, aliases=None):
alias_list = list(aliases or [])
if model.model_id in extra_model_aliases:
alias_list.extend(extra_model_aliases[model.model_id])
model_aliases.append(ModelWithAliases(model, alias_list))
pm.hook.register_models(register=register)
return model_aliases
The provided code snippet includes necessary dependencies for implementing the `models_list` function. Write a Python function `def models_list(options)` to solve the following problem:
List available models
Here is the function:
def models_list(options):
"List available models"
models_that_have_shown_options = set()
for model_with_aliases in get_models_with_aliases():
extra = ""
if model_with_aliases.aliases:
extra = " (aliases: {})".format(", ".join(model_with_aliases.aliases))
output = str(model_with_aliases.model) + extra
if options and model_with_aliases.model.Options.schema()["properties"]:
for name, field in model_with_aliases.model.Options.schema()[
"properties"
].items():
any_of = field.get("anyOf")
if any_of is None:
any_of = [{"type": field["type"]}]
types = ", ".join(
[
_type_lookup.get(item["type"], item["type"])
for item in any_of
if item["type"] != "null"
]
)
bits = ["\n ", name, ": ", types]
description = field.get("description", "")
if description and (
model_with_aliases.model.__class__
not in models_that_have_shown_options
):
wrapped = textwrap.wrap(description, 70)
bits.append("\n ")
bits.extend("\n ".join(wrapped))
output += "".join(bits)
models_that_have_shown_options.add(model_with_aliases.model.__class__)
click.echo(output) | List available models |
8,667 | import click
from click_default_group import DefaultGroup
from dataclasses import asdict
import io
import json
from llm import (
Collection,
Conversation,
Response,
Template,
UnknownModelError,
encode,
get_embedding_models_with_aliases,
get_embedding_model_aliases,
get_embedding_model,
get_key,
get_plugins,
get_model,
get_model_aliases,
get_models_with_aliases,
user_dir,
set_alias,
remove_alias,
)
from .migrations import migrate
from .plugins import pm
import base64
import pathlib
import pydantic
import readline
from runpy import run_module
import shutil
import sqlite_utils
from sqlite_utils.utils import rows_from_file, Format
import sys
import textwrap
from typing import cast, Optional, Iterable, Union, Tuple
import warnings
import yaml
def get_default_model(filename="default_model.txt", default=DEFAULT_MODEL):
path = user_dir() / filename
if path.exists():
return path.read_text().strip()
else:
return default
def set_default_model(model, filename="default_model.txt"):
path = user_dir() / filename
if model is None and path.exists():
path.unlink()
else:
path.write_text(model)
def get_model(name):
aliases = get_model_aliases()
try:
return aliases[name]
except KeyError:
raise UnknownModelError("Unknown model: " + name)
The provided code snippet includes necessary dependencies for implementing the `models_default` function. Write a Python function `def models_default(model)` to solve the following problem:
Show or set the default model
Here is the function:
def models_default(model):
"Show or set the default model"
if not model:
click.echo(get_default_model())
return
# Validate it is a known model
try:
model = get_model(model)
set_default_model(model.model_id)
except KeyError:
raise click.ClickException("Unknown model: {}".format(model)) | Show or set the default model |
8,668 | import click
from click_default_group import DefaultGroup
from dataclasses import asdict
import io
import json
from llm import (
Collection,
Conversation,
Response,
Template,
UnknownModelError,
encode,
get_embedding_models_with_aliases,
get_embedding_model_aliases,
get_embedding_model,
get_key,
get_plugins,
get_model,
get_model_aliases,
get_models_with_aliases,
user_dir,
set_alias,
remove_alias,
)
from .migrations import migrate
from .plugins import pm
import base64
import pathlib
import pydantic
import readline
from runpy import run_module
import shutil
import sqlite_utils
from sqlite_utils.utils import rows_from_file, Format
import sys
import textwrap
from typing import cast, Optional, Iterable, Union, Tuple
import warnings
import yaml
The provided code snippet includes necessary dependencies for implementing the `templates` function. Write a Python function `def templates()` to solve the following problem:
Manage stored prompt templates
Here is the function:
def templates():
"Manage stored prompt templates" | Manage stored prompt templates |
8,669 | import click
from click_default_group import DefaultGroup
from dataclasses import asdict
import io
import json
from llm import (
Collection,
Conversation,
Response,
Template,
UnknownModelError,
encode,
get_embedding_models_with_aliases,
get_embedding_model_aliases,
get_embedding_model,
get_key,
get_plugins,
get_model,
get_model_aliases,
get_models_with_aliases,
user_dir,
set_alias,
remove_alias,
)
from .migrations import migrate
from .plugins import pm
import base64
import pathlib
import pydantic
import readline
from runpy import run_module
import shutil
import sqlite_utils
from sqlite_utils.utils import rows_from_file, Format
import sys
import textwrap
from typing import cast, Optional, Iterable, Union, Tuple
import warnings
import yaml
def prompt(
prompt,
system,
model_id,
options,
template,
param,
no_stream,
no_log,
log,
_continue,
conversation_id,
key,
save,
):
"""
Execute a prompt
Documentation: https://llm.datasette.io/en/stable/usage.html
"""
if log and no_log:
raise click.ClickException("--log and --no-log are mutually exclusive")
model_aliases = get_model_aliases()
def read_prompt():
nonlocal prompt
# Is there extra prompt available on stdin?
stdin_prompt = None
if not sys.stdin.isatty():
stdin_prompt = sys.stdin.read()
if stdin_prompt:
bits = [stdin_prompt]
if prompt:
bits.append(prompt)
prompt = " ".join(bits)
if prompt is None and not save and sys.stdin.isatty():
# Hang waiting for input to stdin (unless --save)
prompt = sys.stdin.read()
return prompt
if save:
# We are saving their prompt/system/etc to a new template
# Fields to save: prompt, system, model - and more in the future
disallowed_options = []
for option, var in (
("--template", template),
("--continue", _continue),
("--cid", conversation_id),
):
if var:
disallowed_options.append(option)
if disallowed_options:
raise click.ClickException(
"--save cannot be used with {}".format(", ".join(disallowed_options))
)
path = template_dir() / f"{save}.yaml"
to_save = {}
if model_id:
try:
to_save["model"] = model_aliases[model_id].model_id
except KeyError:
raise click.ClickException("'{}' is not a known model".format(model_id))
prompt = read_prompt()
if prompt:
to_save["prompt"] = prompt
if system:
to_save["system"] = system
if param:
to_save["defaults"] = dict(param)
path.write_text(
yaml.dump(
to_save,
indent=4,
default_flow_style=False,
),
"utf-8",
)
return
if template:
params = dict(param)
# Cannot be used with system
if system:
raise click.ClickException("Cannot use -t/--template and --system together")
template_obj = load_template(template)
prompt = read_prompt()
try:
prompt, system = template_obj.evaluate(prompt, params)
except Template.MissingVariables as ex:
raise click.ClickException(str(ex))
if model_id is None and template_obj.model:
model_id = template_obj.model
conversation = None
if conversation_id or _continue:
# Load the conversation - loads most recent if no ID provided
try:
conversation = load_conversation(conversation_id)
except UnknownModelError as ex:
raise click.ClickException(str(ex))
# Figure out which model we are using
if model_id is None:
if conversation:
model_id = conversation.model.model_id
else:
model_id = get_default_model()
# Now resolve the model
try:
model = model_aliases[model_id]
except KeyError:
raise click.ClickException("'{}' is not a known model".format(model_id))
# Provide the API key, if one is needed and has been provided
if model.needs_key:
model.key = get_key(key, model.needs_key, model.key_env_var)
if conversation:
# To ensure it can see the key
conversation.model = model
# Validate options
validated_options = {}
if options:
# Validate with pydantic
try:
validated_options = dict(
(key, value)
for key, value in model.Options(**dict(options))
if value is not None
)
except pydantic.ValidationError as ex:
raise click.ClickException(render_errors(ex.errors()))
should_stream = model.can_stream and not no_stream
if not should_stream:
validated_options["stream"] = False
prompt = read_prompt()
prompt_method = model.prompt
if conversation:
prompt_method = conversation.prompt
try:
response = prompt_method(prompt, system, **validated_options)
if should_stream:
for chunk in response:
print(chunk, end="")
sys.stdout.flush()
print("")
else:
print(response.text())
except Exception as ex:
raise click.ClickException(str(ex))
# Log to the database
if (logs_on() or log) and not no_log:
log_path = logs_db_path()
(log_path.parent).mkdir(parents=True, exist_ok=True)
db = sqlite_utils.Database(log_path)
migrate(db)
response.log_to_db(db)
"_continue",
"-c",
"--continue",
is_flag=True,
flag_value=-1,
help="Continue the most recent conversation.",
def display_truncated(text):
console_width = shutil.get_terminal_size()[0]
if len(text) > console_width:
return text[: console_width - 3] + "..."
else:
return text
def template_dir():
path = user_dir() / "templates"
path.mkdir(parents=True, exist_ok=True)
return path
def load_template(name):
path = template_dir() / f"{name}.yaml"
if not path.exists():
raise click.ClickException(f"Invalid template: {name}")
try:
loaded = yaml.safe_load(path.read_text())
except yaml.YAMLError as ex:
raise click.ClickException("Invalid YAML: {}".format(str(ex)))
if isinstance(loaded, str):
return Template(name=name, prompt=loaded)
loaded["name"] = name
try:
return Template(**loaded)
except pydantic.ValidationError as ex:
msg = "A validation error occurred:\n"
msg += render_errors(ex.errors())
raise click.ClickException(msg)
The provided code snippet includes necessary dependencies for implementing the `templates_list` function. Write a Python function `def templates_list()` to solve the following problem:
List available prompt templates
Here is the function:
def templates_list():
"List available prompt templates"
path = template_dir()
pairs = []
for file in path.glob("*.yaml"):
name = file.stem
template = load_template(name)
text = []
if template.system:
text.append(f"system: {template.system}")
if template.prompt:
text.append(f" prompt: {template.prompt}")
else:
text = [template.prompt if template.prompt else ""]
pairs.append((name, "".join(text).replace("\n", " ")))
try:
max_name_len = max(len(p[0]) for p in pairs)
except ValueError:
return
else:
fmt = "{name:<" + str(max_name_len) + "} : {prompt}"
for name, prompt in sorted(pairs):
text = fmt.format(name=name, prompt=prompt)
click.echo(display_truncated(text)) | List available prompt templates |
8,670 | import click
from click_default_group import DefaultGroup
from dataclasses import asdict
import io
import json
from llm import (
Collection,
Conversation,
Response,
Template,
UnknownModelError,
encode,
get_embedding_models_with_aliases,
get_embedding_model_aliases,
get_embedding_model,
get_key,
get_plugins,
get_model,
get_model_aliases,
get_models_with_aliases,
user_dir,
set_alias,
remove_alias,
)
from .migrations import migrate
from .plugins import pm
import base64
import pathlib
import pydantic
import readline
from runpy import run_module
import shutil
import sqlite_utils
from sqlite_utils.utils import rows_from_file, Format
import sys
import textwrap
from typing import cast, Optional, Iterable, Union, Tuple
import warnings
import yaml
def get_embedding_model_aliases() -> Dict[str, EmbeddingModel]:
model_aliases = {}
for model_with_aliases in get_embedding_models_with_aliases():
for alias in model_with_aliases.aliases:
model_aliases[alias] = model_with_aliases.model
model_aliases[model_with_aliases.model.model_id] = model_with_aliases.model
return model_aliases
def get_model_aliases() -> Dict[str, Model]:
model_aliases = {}
for model_with_aliases in get_models_with_aliases():
for alias in model_with_aliases.aliases:
model_aliases[alias] = model_with_aliases.model
model_aliases[model_with_aliases.model.model_id] = model_with_aliases.model
return model_aliases
The provided code snippet includes necessary dependencies for implementing the `aliases_list` function. Write a Python function `def aliases_list(json_)` to solve the following problem:
List current aliases
Here is the function:
def aliases_list(json_):
"List current aliases"
to_output = []
for alias, model in get_model_aliases().items():
if alias != model.model_id:
to_output.append((alias, model.model_id, ""))
for alias, embedding_model in get_embedding_model_aliases().items():
if alias != embedding_model.model_id:
to_output.append((alias, embedding_model.model_id, "embedding"))
if json_:
click.echo(
json.dumps({key: value for key, value, type_ in to_output}, indent=4)
)
return
max_alias_length = max(len(a) for a, _, _ in to_output)
fmt = "{alias:<" + str(max_alias_length) + "} : {model_id}{type_}"
for alias, model_id, type_ in to_output:
click.echo(
fmt.format(
alias=alias, model_id=model_id, type_=f" ({type_})" if type_ else ""
)
) | List current aliases |
8,671 | import click
from click_default_group import DefaultGroup
from dataclasses import asdict
import io
import json
from llm import (
Collection,
Conversation,
Response,
Template,
UnknownModelError,
encode,
get_embedding_models_with_aliases,
get_embedding_model_aliases,
get_embedding_model,
get_key,
get_plugins,
get_model,
get_model_aliases,
get_models_with_aliases,
user_dir,
set_alias,
remove_alias,
)
from .migrations import migrate
from .plugins import pm
import base64
import pathlib
import pydantic
import readline
from runpy import run_module
import shutil
import sqlite_utils
from sqlite_utils.utils import rows_from_file, Format
import sys
import textwrap
from typing import cast, Optional, Iterable, Union, Tuple
import warnings
import yaml
def set_alias(alias, model_id_or_alias):
"""
Set an alias to point to the specified model.
"""
path = user_dir() / "aliases.json"
path.parent.mkdir(parents=True, exist_ok=True)
if not path.exists():
path.write_text("{}\n")
try:
current = json.loads(path.read_text())
except json.decoder.JSONDecodeError:
# We're going to write a valid JSON file in a moment:
current = {}
# Resolve model_id_or_alias to a model_id
try:
model = get_model(model_id_or_alias)
model_id = model.model_id
except UnknownModelError:
# Try to resolve it to an embedding model
try:
model = get_embedding_model(model_id_or_alias)
model_id = model.model_id
except UnknownModelError:
# Set the alias to the exact string they provided instead
model_id = model_id_or_alias
current[alias] = model_id
path.write_text(json.dumps(current, indent=4) + "\n")
The provided code snippet includes necessary dependencies for implementing the `aliases_set` function. Write a Python function `def aliases_set(alias, model_id)` to solve the following problem:
Set an alias for a model Example usage: \b $ llm aliases set turbo gpt-3.5-turbo
Here is the function:
def aliases_set(alias, model_id):
"""
Set an alias for a model
Example usage:
\b
$ llm aliases set turbo gpt-3.5-turbo
"""
set_alias(alias, model_id) | Set an alias for a model Example usage: \b $ llm aliases set turbo gpt-3.5-turbo |
8,672 | import click
from click_default_group import DefaultGroup
from dataclasses import asdict
import io
import json
from llm import (
Collection,
Conversation,
Response,
Template,
UnknownModelError,
encode,
get_embedding_models_with_aliases,
get_embedding_model_aliases,
get_embedding_model,
get_key,
get_plugins,
get_model,
get_model_aliases,
get_models_with_aliases,
user_dir,
set_alias,
remove_alias,
)
from .migrations import migrate
from .plugins import pm
import base64
import pathlib
import pydantic
import readline
from runpy import run_module
import shutil
import sqlite_utils
from sqlite_utils.utils import rows_from_file, Format
import sys
import textwrap
from typing import cast, Optional, Iterable, Union, Tuple
import warnings
import yaml
def remove_alias(alias):
"""
Remove an alias.
"""
path = user_dir() / "aliases.json"
if not path.exists():
raise KeyError("No aliases.json file exists")
try:
current = json.loads(path.read_text())
except json.decoder.JSONDecodeError:
raise KeyError("aliases.json file is not valid JSON")
if alias not in current:
raise KeyError("No such alias: {}".format(alias))
del current[alias]
path.write_text(json.dumps(current, indent=4) + "\n")
The provided code snippet includes necessary dependencies for implementing the `aliases_remove` function. Write a Python function `def aliases_remove(alias)` to solve the following problem:
Remove an alias Example usage: \b $ llm aliases remove turbo
Here is the function:
def aliases_remove(alias):
"""
Remove an alias
Example usage:
\b
$ llm aliases remove turbo
"""
try:
remove_alias(alias)
except KeyError as ex:
raise click.ClickException(ex.args[0]) | Remove an alias Example usage: \b $ llm aliases remove turbo |
8,673 | import click
from click_default_group import DefaultGroup
from dataclasses import asdict
import io
import json
from llm import (
Collection,
Conversation,
Response,
Template,
UnknownModelError,
encode,
get_embedding_models_with_aliases,
get_embedding_model_aliases,
get_embedding_model,
get_key,
get_plugins,
get_model,
get_model_aliases,
get_models_with_aliases,
user_dir,
set_alias,
remove_alias,
)
from .migrations import migrate
from .plugins import pm
import base64
import pathlib
import pydantic
import readline
from runpy import run_module
import shutil
import sqlite_utils
from sqlite_utils.utils import rows_from_file, Format
import sys
import textwrap
from typing import cast, Optional, Iterable, Union, Tuple
import warnings
import yaml
def user_dir():
llm_user_path = os.environ.get("LLM_USER_PATH")
if llm_user_path:
path = pathlib.Path(llm_user_path)
else:
path = pathlib.Path(click.get_app_dir("io.datasette.llm"))
path.mkdir(exist_ok=True, parents=True)
return path
The provided code snippet includes necessary dependencies for implementing the `aliases_path` function. Write a Python function `def aliases_path()` to solve the following problem:
Output the path to the aliases.json file
Here is the function:
def aliases_path():
"Output the path to the aliases.json file"
click.echo(user_dir() / "aliases.json") | Output the path to the aliases.json file |
8,674 | import click
from click_default_group import DefaultGroup
from dataclasses import asdict
import io
import json
from llm import (
Collection,
Conversation,
Response,
Template,
UnknownModelError,
encode,
get_embedding_models_with_aliases,
get_embedding_model_aliases,
get_embedding_model,
get_key,
get_plugins,
get_model,
get_model_aliases,
get_models_with_aliases,
user_dir,
set_alias,
remove_alias,
)
from .migrations import migrate
from .plugins import pm
import base64
import pathlib
import pydantic
import readline
from runpy import run_module
import shutil
import sqlite_utils
from sqlite_utils.utils import rows_from_file, Format
import sys
import textwrap
from typing import cast, Optional, Iterable, Union, Tuple
import warnings
import yaml
def get_plugins(all=False):
plugins = []
plugin_to_distinfo = dict(pm.list_plugin_distinfo())
for plugin in pm.get_plugins():
if not all and plugin.__name__.startswith("llm.default_plugins."):
continue
plugin_info = {
"name": plugin.__name__,
"hooks": [h.name for h in pm.get_hookcallers(plugin)],
}
distinfo = plugin_to_distinfo.get(plugin)
if distinfo:
plugin_info["version"] = distinfo.version
plugin_info["name"] = (
getattr(distinfo, "name", None) or distinfo.project_name
)
plugins.append(plugin_info)
return plugins
The provided code snippet includes necessary dependencies for implementing the `plugins_list` function. Write a Python function `def plugins_list(all)` to solve the following problem:
List installed plugins
Here is the function:
def plugins_list(all):
"List installed plugins"
click.echo(json.dumps(get_plugins(all), indent=2)) | List installed plugins |
8,675 | import click
from click_default_group import DefaultGroup
from dataclasses import asdict
import io
import json
from llm import (
Collection,
Conversation,
Response,
Template,
UnknownModelError,
encode,
get_embedding_models_with_aliases,
get_embedding_model_aliases,
get_embedding_model,
get_key,
get_plugins,
get_model,
get_model_aliases,
get_models_with_aliases,
user_dir,
set_alias,
remove_alias,
)
from .migrations import migrate
from .plugins import pm
import base64
import pathlib
import pydantic
import readline
from runpy import run_module
import shutil
import sqlite_utils
from sqlite_utils.utils import rows_from_file, Format
import sys
import textwrap
from typing import cast, Optional, Iterable, Union, Tuple
import warnings
import yaml
def load_template(name):
path = template_dir() / f"{name}.yaml"
if not path.exists():
raise click.ClickException(f"Invalid template: {name}")
try:
loaded = yaml.safe_load(path.read_text())
except yaml.YAMLError as ex:
raise click.ClickException("Invalid YAML: {}".format(str(ex)))
if isinstance(loaded, str):
return Template(name=name, prompt=loaded)
loaded["name"] = name
try:
return Template(**loaded)
except pydantic.ValidationError as ex:
msg = "A validation error occurred:\n"
msg += render_errors(ex.errors())
raise click.ClickException(msg)
The provided code snippet includes necessary dependencies for implementing the `templates_show` function. Write a Python function `def templates_show(name)` to solve the following problem:
Show the specified prompt template
Here is the function:
def templates_show(name):
"Show the specified prompt template"
template = load_template(name)
click.echo(
yaml.dump(
dict((k, v) for k, v in template.dict().items() if v is not None),
indent=4,
default_flow_style=False,
)
) | Show the specified prompt template |
8,676 | import click
from click_default_group import DefaultGroup
from dataclasses import asdict
import io
import json
from llm import (
Collection,
Conversation,
Response,
Template,
UnknownModelError,
encode,
get_embedding_models_with_aliases,
get_embedding_model_aliases,
get_embedding_model,
get_key,
get_plugins,
get_model,
get_model_aliases,
get_models_with_aliases,
user_dir,
set_alias,
remove_alias,
)
from .migrations import migrate
from .plugins import pm
import base64
import pathlib
import pydantic
import readline
from runpy import run_module
import shutil
import sqlite_utils
from sqlite_utils.utils import rows_from_file, Format
import sys
import textwrap
from typing import cast, Optional, Iterable, Union, Tuple
import warnings
import yaml
DEFAULT_TEMPLATE = "prompt: "
def template_dir():
path = user_dir() / "templates"
path.mkdir(parents=True, exist_ok=True)
return path
def load_template(name):
path = template_dir() / f"{name}.yaml"
if not path.exists():
raise click.ClickException(f"Invalid template: {name}")
try:
loaded = yaml.safe_load(path.read_text())
except yaml.YAMLError as ex:
raise click.ClickException("Invalid YAML: {}".format(str(ex)))
if isinstance(loaded, str):
return Template(name=name, prompt=loaded)
loaded["name"] = name
try:
return Template(**loaded)
except pydantic.ValidationError as ex:
msg = "A validation error occurred:\n"
msg += render_errors(ex.errors())
raise click.ClickException(msg)
The provided code snippet includes necessary dependencies for implementing the `templates_edit` function. Write a Python function `def templates_edit(name)` to solve the following problem:
Edit the specified prompt template using the default $EDITOR
Here is the function:
def templates_edit(name):
"Edit the specified prompt template using the default $EDITOR"
# First ensure it exists
path = template_dir() / f"{name}.yaml"
if not path.exists():
path.write_text(DEFAULT_TEMPLATE, "utf-8")
click.edit(filename=path)
# Validate that template
load_template(name) | Edit the specified prompt template using the default $EDITOR |
8,677 | import click
from click_default_group import DefaultGroup
from dataclasses import asdict
import io
import json
from llm import (
Collection,
Conversation,
Response,
Template,
UnknownModelError,
encode,
get_embedding_models_with_aliases,
get_embedding_model_aliases,
get_embedding_model,
get_key,
get_plugins,
get_model,
get_model_aliases,
get_models_with_aliases,
user_dir,
set_alias,
remove_alias,
)
from .migrations import migrate
from .plugins import pm
import base64
import pathlib
import pydantic
import readline
from runpy import run_module
import shutil
import sqlite_utils
from sqlite_utils.utils import rows_from_file, Format
import sys
import textwrap
from typing import cast, Optional, Iterable, Union, Tuple
import warnings
import yaml
def template_dir():
path = user_dir() / "templates"
path.mkdir(parents=True, exist_ok=True)
return path
The provided code snippet includes necessary dependencies for implementing the `templates_path` function. Write a Python function `def templates_path()` to solve the following problem:
Output the path to the templates directory
Here is the function:
def templates_path():
"Output the path to the templates directory"
click.echo(template_dir()) | Output the path to the templates directory |
8,678 | import click
from click_default_group import DefaultGroup
from dataclasses import asdict
import io
import json
from llm import (
Collection,
Conversation,
Response,
Template,
UnknownModelError,
encode,
get_embedding_models_with_aliases,
get_embedding_model_aliases,
get_embedding_model,
get_key,
get_plugins,
get_model,
get_model_aliases,
get_models_with_aliases,
user_dir,
set_alias,
remove_alias,
)
from .migrations import migrate
from .plugins import pm
import base64
import pathlib
import pydantic
import readline
from runpy import run_module
import shutil
import sqlite_utils
from sqlite_utils.utils import rows_from_file, Format
import sys
import textwrap
from typing import cast, Optional, Iterable, Union, Tuple
import warnings
import yaml
The provided code snippet includes necessary dependencies for implementing the `install` function. Write a Python function `def install(packages, upgrade, editable, force_reinstall, no_cache_dir)` to solve the following problem:
Install packages from PyPI into the same environment as LLM
Here is the function:
def install(packages, upgrade, editable, force_reinstall, no_cache_dir):
"""Install packages from PyPI into the same environment as LLM"""
args = ["pip", "install"]
if upgrade:
args += ["--upgrade"]
if editable:
args += ["--editable", editable]
if force_reinstall:
args += ["--force-reinstall"]
if no_cache_dir:
args += ["--no-cache-dir"]
args += list(packages)
sys.argv = args
run_module("pip", run_name="__main__") | Install packages from PyPI into the same environment as LLM |
8,679 | import click
from click_default_group import DefaultGroup
from dataclasses import asdict
import io
import json
from llm import (
Collection,
Conversation,
Response,
Template,
UnknownModelError,
encode,
get_embedding_models_with_aliases,
get_embedding_model_aliases,
get_embedding_model,
get_key,
get_plugins,
get_model,
get_model_aliases,
get_models_with_aliases,
user_dir,
set_alias,
remove_alias,
)
from .migrations import migrate
from .plugins import pm
import base64
import pathlib
import pydantic
import readline
from runpy import run_module
import shutil
import sqlite_utils
from sqlite_utils.utils import rows_from_file, Format
import sys
import textwrap
from typing import cast, Optional, Iterable, Union, Tuple
import warnings
import yaml
The provided code snippet includes necessary dependencies for implementing the `uninstall` function. Write a Python function `def uninstall(packages, yes)` to solve the following problem:
Uninstall Python packages from the LLM environment
Here is the function:
def uninstall(packages, yes):
"""Uninstall Python packages from the LLM environment"""
sys.argv = ["pip", "uninstall"] + list(packages) + (["-y"] if yes else [])
run_module("pip", run_name="__main__") | Uninstall Python packages from the LLM environment |
8,680 | import click
from click_default_group import DefaultGroup
from dataclasses import asdict
import io
import json
from llm import (
Collection,
Conversation,
Response,
Template,
UnknownModelError,
encode,
get_embedding_models_with_aliases,
get_embedding_model_aliases,
get_embedding_model,
get_key,
get_plugins,
get_model,
get_model_aliases,
get_models_with_aliases,
user_dir,
set_alias,
remove_alias,
)
from .migrations import migrate
from .plugins import pm
import base64
import pathlib
import pydantic
import readline
from runpy import run_module
import shutil
import sqlite_utils
from sqlite_utils.utils import rows_from_file, Format
import sys
import textwrap
from typing import cast, Optional, Iterable, Union, Tuple
import warnings
import yaml
def get_default_embedding_model():
return get_default_model("default_embedding_model.txt", None)
def get_embedding_model(name):
aliases = get_embedding_model_aliases()
try:
return aliases[name]
except KeyError:
raise UnknownModelError("Unknown model: " + str(name))
class UnknownModelError(KeyError):
pass
def user_dir():
llm_user_path = os.environ.get("LLM_USER_PATH")
if llm_user_path:
path = pathlib.Path(llm_user_path)
else:
path = pathlib.Path(click.get_app_dir("io.datasette.llm"))
path.mkdir(exist_ok=True, parents=True)
return path
def encode(values):
return struct.pack("<" + "f" * len(values), *values)
The provided code snippet includes necessary dependencies for implementing the `embed` function. Write a Python function `def embed( collection, id, input, model, store, database, content, binary, metadata, format_ )` to solve the following problem:
Embed text and store or return the result
Here is the function:
def embed(
collection, id, input, model, store, database, content, binary, metadata, format_
):
"""Embed text and store or return the result"""
if collection and not id:
raise click.ClickException("Must provide both collection and id")
if store and not collection:
raise click.ClickException("Must provide collection when using --store")
# Lazy load this because we do not need it for -c or -i versions
def get_db():
if database:
return sqlite_utils.Database(database)
else:
return sqlite_utils.Database(user_dir() / "embeddings.db")
collection_obj = None
model_obj = None
if collection:
db = get_db()
if Collection.exists(db, collection):
# Load existing collection and use its model
collection_obj = Collection(collection, db)
model_obj = collection_obj.model()
else:
# We will create a new one, but that means model is required
if not model:
model = get_default_embedding_model()
if model is None:
raise click.ClickException(
"You need to specify an embedding model (no default model is set)"
)
collection_obj = Collection(collection, db=db, model_id=model)
model_obj = collection_obj.model()
if model_obj is None:
if model is None:
model = get_default_embedding_model()
try:
model_obj = get_embedding_model(model)
except UnknownModelError:
raise click.ClickException(
"You need to specify an embedding model (no default model is set)"
)
show_output = True
if collection and (format_ is None):
show_output = False
# Resolve input text
if not content:
if not input or input == "-":
# Read from stdin
input_source = sys.stdin.buffer if binary else sys.stdin
content = input_source.read()
else:
mode = "rb" if binary else "r"
with open(input, mode) as f:
content = f.read()
if not content:
raise click.ClickException("No content provided")
if collection_obj:
embedding = collection_obj.embed(id, content, metadata=metadata, store=store)
else:
embedding = model_obj.embed(content)
if show_output:
if format_ == "json" or format_ is None:
click.echo(json.dumps(embedding))
elif format_ == "blob":
click.echo(encode(embedding))
elif format_ == "base64":
click.echo(base64.b64encode(encode(embedding)).decode("ascii"))
elif format_ == "hex":
click.echo(encode(embedding).hex()) | Embed text and store or return the result |
8,681 | import click
from click_default_group import DefaultGroup
from dataclasses import asdict
import io
import json
from llm import (
Collection,
Conversation,
Response,
Template,
UnknownModelError,
encode,
get_embedding_models_with_aliases,
get_embedding_model_aliases,
get_embedding_model,
get_key,
get_plugins,
get_model,
get_model_aliases,
get_models_with_aliases,
user_dir,
set_alias,
remove_alias,
)
from .migrations import migrate
from .plugins import pm
import base64
import pathlib
import pydantic
import readline
from runpy import run_module
import shutil
import sqlite_utils
from sqlite_utils.utils import rows_from_file, Format
import sys
import textwrap
from typing import cast, Optional, Iterable, Union, Tuple
import warnings
import yaml
def get_default_embedding_model():
return get_default_model("default_embedding_model.txt", None)
def user_dir():
llm_user_path = os.environ.get("LLM_USER_PATH")
if llm_user_path:
path = pathlib.Path(llm_user_path)
else:
path = pathlib.Path(click.get_app_dir("io.datasette.llm"))
path.mkdir(exist_ok=True, parents=True)
return path
The provided code snippet includes necessary dependencies for implementing the `embed_multi` function. Write a Python function `def embed_multi( collection, input_path, format, files, encodings, binary, sql, attach, batch_size, prefix, model, store, database, )` to solve the following problem:
Store embeddings for multiple strings at once Input can be CSV, TSV or a JSON list of objects. The first column is treated as an ID - all other columns are assumed to be text that should be concatenated together in order to calculate the embeddings. Input data can come from one of three sources: \b 1. A CSV, JSON, TSV or JSON-nl file (including on standard input) 2. A SQL query against a SQLite database 3. A directory of files
Here is the function:
def embed_multi(
collection,
input_path,
format,
files,
encodings,
binary,
sql,
attach,
batch_size,
prefix,
model,
store,
database,
):
"""
Store embeddings for multiple strings at once
Input can be CSV, TSV or a JSON list of objects.
The first column is treated as an ID - all other columns
are assumed to be text that should be concatenated together
in order to calculate the embeddings.
Input data can come from one of three sources:
\b
1. A CSV, JSON, TSV or JSON-nl file (including on standard input)
2. A SQL query against a SQLite database
3. A directory of files
"""
if binary and not files:
raise click.UsageError("--binary must be used with --files")
if binary and encodings:
raise click.UsageError("--binary cannot be used with --encoding")
if not input_path and not sql and not files:
raise click.UsageError("Either --sql or input path or --files is required")
if files:
if input_path or sql or format:
raise click.UsageError(
"Cannot use --files with --sql, input path or --format"
)
if database:
db = sqlite_utils.Database(database)
else:
db = sqlite_utils.Database(user_dir() / "embeddings.db")
for alias, attach_path in attach:
db.attach(alias, attach_path)
try:
collection_obj = Collection(
collection, db=db, model_id=model or get_default_embedding_model()
)
except ValueError:
raise click.ClickException(
"You need to specify an embedding model (no default model is set)"
)
expected_length = None
if files:
encodings = encodings or ("utf-8", "latin-1")
def count_files():
i = 0
for directory, pattern in files:
for path in pathlib.Path(directory).glob(pattern):
i += 1
return i
def iterate_files():
for directory, pattern in files:
p = pathlib.Path(directory)
if not p.exists() or not p.is_dir():
# fixes issue/274 - raise error if directory does not exist
raise click.UsageError(f"Invalid directory: {directory}")
for path in pathlib.Path(directory).glob(pattern):
if path.is_dir():
continue # fixed issue/280 - skip directories
relative = path.relative_to(directory)
content = None
if binary:
content = path.read_bytes()
else:
for encoding in encodings:
try:
content = path.read_text(encoding=encoding)
except UnicodeDecodeError:
continue
if content is None:
# Log to stderr
click.echo(
"Could not decode text in file {}".format(path),
err=True,
)
else:
yield {"id": str(relative), "content": content}
expected_length = count_files()
rows = iterate_files()
elif sql:
rows = db.query(sql)
count_sql = "select count(*) as c from ({})".format(sql)
expected_length = next(db.query(count_sql))["c"]
else:
def load_rows(fp):
return rows_from_file(fp, Format[format.upper()] if format else None)[0]
try:
if input_path != "-":
# Read the file twice - first time is to get a count
expected_length = 0
with open(input_path, "rb") as fp:
for _ in load_rows(fp):
expected_length += 1
rows = load_rows(
open(input_path, "rb")
if input_path != "-"
else io.BufferedReader(sys.stdin.buffer)
)
except json.JSONDecodeError as ex:
raise click.ClickException(str(ex))
with click.progressbar(
rows, label="Embedding", show_percent=True, length=expected_length
) as rows:
def tuples() -> Iterable[Tuple[str, Union[bytes, str]]]:
for row in rows:
values = list(row.values())
id = prefix + str(values[0])
if binary:
yield id, cast(bytes, values[1])
else:
yield id, " ".join(v or "" for v in values[1:])
embed_kwargs = {"store": store}
if batch_size:
embed_kwargs["batch_size"] = batch_size
collection_obj.embed_multi(tuples(), **embed_kwargs) | Store embeddings for multiple strings at once Input can be CSV, TSV or a JSON list of objects. The first column is treated as an ID - all other columns are assumed to be text that should be concatenated together in order to calculate the embeddings. Input data can come from one of three sources: \b 1. A CSV, JSON, TSV or JSON-nl file (including on standard input) 2. A SQL query against a SQLite database 3. A directory of files |
8,682 | import click
from click_default_group import DefaultGroup
from dataclasses import asdict
import io
import json
from llm import (
Collection,
Conversation,
Response,
Template,
UnknownModelError,
encode,
get_embedding_models_with_aliases,
get_embedding_model_aliases,
get_embedding_model,
get_key,
get_plugins,
get_model,
get_model_aliases,
get_models_with_aliases,
user_dir,
set_alias,
remove_alias,
)
from .migrations import migrate
from .plugins import pm
import base64
import pathlib
import pydantic
import readline
from runpy import run_module
import shutil
import sqlite_utils
from sqlite_utils.utils import rows_from_file, Format
import sys
import textwrap
from typing import cast, Optional, Iterable, Union, Tuple
import warnings
import yaml
def user_dir():
llm_user_path = os.environ.get("LLM_USER_PATH")
if llm_user_path:
path = pathlib.Path(llm_user_path)
else:
path = pathlib.Path(click.get_app_dir("io.datasette.llm"))
path.mkdir(exist_ok=True, parents=True)
return path
The provided code snippet includes necessary dependencies for implementing the `similar` function. Write a Python function `def similar(collection, id, input, content, binary, number, database)` to solve the following problem:
Return top N similar IDs from a collection Example usage: \b llm similar my-collection -c "I like cats" Or to find content similar to a specific stored ID: \b llm similar my-collection 1234
Here is the function:
def similar(collection, id, input, content, binary, number, database):
"""
Return top N similar IDs from a collection
Example usage:
\b
llm similar my-collection -c "I like cats"
Or to find content similar to a specific stored ID:
\b
llm similar my-collection 1234
"""
if not id and not content and not input:
raise click.ClickException("Must provide content or an ID for the comparison")
if database:
db = sqlite_utils.Database(database)
else:
db = sqlite_utils.Database(user_dir() / "embeddings.db")
if not db["embeddings"].exists():
raise click.ClickException("No embeddings table found in database")
try:
collection_obj = Collection(collection, db, create=False)
except Collection.DoesNotExist:
raise click.ClickException("Collection does not exist")
if id:
try:
results = collection_obj.similar_by_id(id, number)
except Collection.DoesNotExist:
raise click.ClickException("ID not found in collection")
else:
# Resolve input text
if not content:
if not input or input == "-":
# Read from stdin
input_source = sys.stdin.buffer if binary else sys.stdin
content = input_source.read()
else:
mode = "rb" if binary else "r"
with open(input, mode) as f:
content = f.read()
if not content:
raise click.ClickException("No content provided")
results = collection_obj.similar(content, number)
for result in results:
click.echo(json.dumps(asdict(result))) | Return top N similar IDs from a collection Example usage: \b llm similar my-collection -c "I like cats" Or to find content similar to a specific stored ID: \b llm similar my-collection 1234 |
8,683 | import click
from click_default_group import DefaultGroup
from dataclasses import asdict
import io
import json
from llm import (
Collection,
Conversation,
Response,
Template,
UnknownModelError,
encode,
get_embedding_models_with_aliases,
get_embedding_model_aliases,
get_embedding_model,
get_key,
get_plugins,
get_model,
get_model_aliases,
get_models_with_aliases,
user_dir,
set_alias,
remove_alias,
)
from .migrations import migrate
from .plugins import pm
import base64
import pathlib
import pydantic
import readline
from runpy import run_module
import shutil
import sqlite_utils
from sqlite_utils.utils import rows_from_file, Format
import sys
import textwrap
from typing import cast, Optional, Iterable, Union, Tuple
import warnings
import yaml
The provided code snippet includes necessary dependencies for implementing the `embed_models` function. Write a Python function `def embed_models()` to solve the following problem:
Manage available embedding models
Here is the function:
def embed_models():
"Manage available embedding models" | Manage available embedding models |
8,684 | import click
from click_default_group import DefaultGroup
from dataclasses import asdict
import io
import json
from llm import (
Collection,
Conversation,
Response,
Template,
UnknownModelError,
encode,
get_embedding_models_with_aliases,
get_embedding_model_aliases,
get_embedding_model,
get_key,
get_plugins,
get_model,
get_model_aliases,
get_models_with_aliases,
user_dir,
set_alias,
remove_alias,
)
from .migrations import migrate
from .plugins import pm
import base64
import pathlib
import pydantic
import readline
from runpy import run_module
import shutil
import sqlite_utils
from sqlite_utils.utils import rows_from_file, Format
import sys
import textwrap
from typing import cast, Optional, Iterable, Union, Tuple
import warnings
import yaml
def aliases():
"Manage model aliases"
def get_embedding_models_with_aliases() -> List["EmbeddingModelWithAliases"]:
model_aliases = []
# Include aliases from aliases.json
aliases_path = user_dir() / "aliases.json"
extra_model_aliases: Dict[str, list] = {}
if aliases_path.exists():
configured_aliases = json.loads(aliases_path.read_text())
for alias, model_id in configured_aliases.items():
extra_model_aliases.setdefault(model_id, []).append(alias)
def register(model, aliases=None):
alias_list = list(aliases or [])
if model.model_id in extra_model_aliases:
alias_list.extend(extra_model_aliases[model.model_id])
model_aliases.append(EmbeddingModelWithAliases(model, alias_list))
pm.hook.register_embedding_models(register=register)
return model_aliases
The provided code snippet includes necessary dependencies for implementing the `embed_models_list` function. Write a Python function `def embed_models_list()` to solve the following problem:
List available embedding models
Here is the function:
def embed_models_list():
"List available embedding models"
output = []
for model_with_aliases in get_embedding_models_with_aliases():
s = str(model_with_aliases.model.model_id)
if model_with_aliases.aliases:
s += " (aliases: {})".format(", ".join(model_with_aliases.aliases))
output.append(s)
click.echo("\n".join(output)) | List available embedding models |
8,685 | import click
from click_default_group import DefaultGroup
from dataclasses import asdict
import io
import json
from llm import (
Collection,
Conversation,
Response,
Template,
UnknownModelError,
encode,
get_embedding_models_with_aliases,
get_embedding_model_aliases,
get_embedding_model,
get_key,
get_plugins,
get_model,
get_model_aliases,
get_models_with_aliases,
user_dir,
set_alias,
remove_alias,
)
from .migrations import migrate
from .plugins import pm
import base64
import pathlib
import pydantic
import readline
from runpy import run_module
import shutil
import sqlite_utils
from sqlite_utils.utils import rows_from_file, Format
import sys
import textwrap
from typing import cast, Optional, Iterable, Union, Tuple
import warnings
import yaml
default=None,
def get_default_embedding_model():
return get_default_model("default_embedding_model.txt", None)
def set_default_embedding_model(model):
set_default_model(model, "default_embedding_model.txt")
def get_embedding_model(name):
aliases = get_embedding_model_aliases()
try:
return aliases[name]
except KeyError:
raise UnknownModelError("Unknown model: " + str(name))
The provided code snippet includes necessary dependencies for implementing the `embed_models_default` function. Write a Python function `def embed_models_default(model, remove_default)` to solve the following problem:
Show or set the default embedding model
Here is the function:
def embed_models_default(model, remove_default):
"Show or set the default embedding model"
if not model and not remove_default:
default = get_default_embedding_model()
if default is None:
click.echo("<No default embedding model set>", err=True)
else:
click.echo(default)
return
# Validate it is a known model
try:
if remove_default:
set_default_embedding_model(None)
else:
model = get_embedding_model(model)
set_default_embedding_model(model.model_id)
except KeyError:
raise click.ClickException("Unknown embedding model: {}".format(model)) | Show or set the default embedding model |
8,686 | import click
from click_default_group import DefaultGroup
from dataclasses import asdict
import io
import json
from llm import (
Collection,
Conversation,
Response,
Template,
UnknownModelError,
encode,
get_embedding_models_with_aliases,
get_embedding_model_aliases,
get_embedding_model,
get_key,
get_plugins,
get_model,
get_model_aliases,
get_models_with_aliases,
user_dir,
set_alias,
remove_alias,
)
from .migrations import migrate
from .plugins import pm
import base64
import pathlib
import pydantic
import readline
from runpy import run_module
import shutil
import sqlite_utils
from sqlite_utils.utils import rows_from_file, Format
import sys
import textwrap
from typing import cast, Optional, Iterable, Union, Tuple
import warnings
import yaml
The provided code snippet includes necessary dependencies for implementing the `collections` function. Write a Python function `def collections()` to solve the following problem:
View and manage collections of embeddings
Here is the function:
def collections():
"View and manage collections of embeddings" | View and manage collections of embeddings |
8,687 | import click
from click_default_group import DefaultGroup
from dataclasses import asdict
import io
import json
from llm import (
Collection,
Conversation,
Response,
Template,
UnknownModelError,
encode,
get_embedding_models_with_aliases,
get_embedding_model_aliases,
get_embedding_model,
get_key,
get_plugins,
get_model,
get_model_aliases,
get_models_with_aliases,
user_dir,
set_alias,
remove_alias,
)
from .migrations import migrate
from .plugins import pm
import base64
import pathlib
import pydantic
import readline
from runpy import run_module
import shutil
import sqlite_utils
from sqlite_utils.utils import rows_from_file, Format
import sys
import textwrap
from typing import cast, Optional, Iterable, Union, Tuple
import warnings
import yaml
def user_dir():
llm_user_path = os.environ.get("LLM_USER_PATH")
if llm_user_path:
path = pathlib.Path(llm_user_path)
else:
path = pathlib.Path(click.get_app_dir("io.datasette.llm"))
path.mkdir(exist_ok=True, parents=True)
return path
The provided code snippet includes necessary dependencies for implementing the `collections_path` function. Write a Python function `def collections_path()` to solve the following problem:
Output the path to the embeddings database
Here is the function:
def collections_path():
"Output the path to the embeddings database"
click.echo(user_dir() / "embeddings.db") | Output the path to the embeddings database |
8,688 | import click
from click_default_group import DefaultGroup
from dataclasses import asdict
import io
import json
from llm import (
Collection,
Conversation,
Response,
Template,
UnknownModelError,
encode,
get_embedding_models_with_aliases,
get_embedding_model_aliases,
get_embedding_model,
get_key,
get_plugins,
get_model,
get_model_aliases,
get_models_with_aliases,
user_dir,
set_alias,
remove_alias,
)
from .migrations import migrate
from .plugins import pm
import base64
import pathlib
import pydantic
import readline
from runpy import run_module
import shutil
import sqlite_utils
from sqlite_utils.utils import rows_from_file, Format
import sys
import textwrap
from typing import cast, Optional, Iterable, Union, Tuple
import warnings
import yaml
def user_dir():
llm_user_path = os.environ.get("LLM_USER_PATH")
if llm_user_path:
path = pathlib.Path(llm_user_path)
else:
path = pathlib.Path(click.get_app_dir("io.datasette.llm"))
path.mkdir(exist_ok=True, parents=True)
return path
The provided code snippet includes necessary dependencies for implementing the `embed_db_collections` function. Write a Python function `def embed_db_collections(database, json_)` to solve the following problem:
View a list of collections
Here is the function:
def embed_db_collections(database, json_):
"View a list of collections"
database = database or (user_dir() / "embeddings.db")
db = sqlite_utils.Database(str(database))
if not db["collections"].exists():
raise click.ClickException("No collections table found in {}".format(database))
rows = db.query(
"""
select
collections.name,
collections.model,
count(embeddings.id) as num_embeddings
from
collections left join embeddings
on collections.id = embeddings.collection_id
group by
collections.name, collections.model
"""
)
if json_:
click.echo(json.dumps(list(rows), indent=4))
else:
for row in rows:
click.echo("{}: {}".format(row["name"], row["model"]))
click.echo(
" {} embedding{}".format(
row["num_embeddings"], "s" if row["num_embeddings"] != 1 else ""
)
) | View a list of collections |
8,689 | import click
from click_default_group import DefaultGroup
from dataclasses import asdict
import io
import json
from llm import (
Collection,
Conversation,
Response,
Template,
UnknownModelError,
encode,
get_embedding_models_with_aliases,
get_embedding_model_aliases,
get_embedding_model,
get_key,
get_plugins,
get_model,
get_model_aliases,
get_models_with_aliases,
user_dir,
set_alias,
remove_alias,
)
from .migrations import migrate
from .plugins import pm
import base64
import pathlib
import pydantic
import readline
from runpy import run_module
import shutil
import sqlite_utils
from sqlite_utils.utils import rows_from_file, Format
import sys
import textwrap
from typing import cast, Optional, Iterable, Union, Tuple
import warnings
import yaml
def user_dir():
llm_user_path = os.environ.get("LLM_USER_PATH")
if llm_user_path:
path = pathlib.Path(llm_user_path)
else:
path = pathlib.Path(click.get_app_dir("io.datasette.llm"))
path.mkdir(exist_ok=True, parents=True)
return path
The provided code snippet includes necessary dependencies for implementing the `collections_delete` function. Write a Python function `def collections_delete(collection, database)` to solve the following problem:
Delete the specified collection Example usage: \b llm collections delete my-collection
Here is the function:
def collections_delete(collection, database):
"""
Delete the specified collection
Example usage:
\b
llm collections delete my-collection
"""
database = database or (user_dir() / "embeddings.db")
db = sqlite_utils.Database(str(database))
try:
collection_obj = Collection(collection, db, create=False)
except Collection.DoesNotExist:
raise click.ClickException("Collection does not exist")
collection_obj.delete() | Delete the specified collection Example usage: \b llm collections delete my-collection |
8,690 | import click
from click_default_group import DefaultGroup
from dataclasses import asdict
import io
import json
from llm import (
Collection,
Conversation,
Response,
Template,
UnknownModelError,
encode,
get_embedding_models_with_aliases,
get_embedding_model_aliases,
get_embedding_model,
get_key,
get_plugins,
get_model,
get_model_aliases,
get_models_with_aliases,
user_dir,
set_alias,
remove_alias,
)
from .migrations import migrate
from .plugins import pm
import base64
import pathlib
import pydantic
import readline
from runpy import run_module
import shutil
import sqlite_utils
from sqlite_utils.utils import rows_from_file, Format
import sys
import textwrap
from typing import cast, Optional, Iterable, Union, Tuple
import warnings
import yaml
def logs_db_path():
return user_dir() / "logs.db"
def migrate(db):
ensure_migrations_table(db)
already_applied = {r["name"] for r in db["_llm_migrations"].rows}
for fn in MIGRATIONS:
name = fn.__name__
if name not in already_applied:
fn(db)
db["_llm_migrations"].insert(
{"name": name, "applied_at": str(datetime.datetime.utcnow())}
)
already_applied.add(name)
def get_history(chat_id):
if chat_id is None:
return None, []
log_path = logs_db_path()
db = sqlite_utils.Database(log_path)
migrate(db)
if chat_id == -1:
# Return the most recent chat
last_row = list(db["logs"].rows_where(order_by="-id", limit=1))
if last_row:
chat_id = last_row[0].get("chat_id") or last_row[0].get("id")
else: # Database is empty
return None, []
rows = db["logs"].rows_where(
"id = ? or chat_id = ?", [chat_id, chat_id], order_by="id"
)
return chat_id, rows | null |
8,691 | import click
import httpx
import json
import textwrap
from typing import List, Dict
The provided code snippet includes necessary dependencies for implementing the `remove_dict_none_values` function. Write a Python function `def remove_dict_none_values(d)` to solve the following problem:
Recursively remove keys with value of None or value of a dict that is all values of None
Here is the function:
def remove_dict_none_values(d):
"""
Recursively remove keys with value of None or value of a dict that is all values of None
"""
if not isinstance(d, dict):
return d
new_dict = {}
for key, value in d.items():
if value is not None:
if isinstance(value, dict):
nested = remove_dict_none_values(value)
if nested:
new_dict[key] = nested
elif isinstance(value, list):
new_dict[key] = [remove_dict_none_values(v) for v in value]
else:
new_dict[key] = value
return new_dict | Recursively remove keys with value of None or value of a dict that is all values of None |
8,692 | import click
import httpx
import json
import textwrap
from typing import List, Dict
class _LogTransport(httpx.BaseTransport):
def __init__(self, transport: httpx.BaseTransport):
self.transport = transport
def handle_request(self, request: httpx.Request) -> httpx.Response:
response = self.transport.handle_request(request)
return _LogResponse(
status_code=response.status_code,
headers=response.headers,
stream=response.stream,
extensions=response.extensions,
)
def _no_accept_encoding(request: httpx.Request):
request.headers.pop("accept-encoding", None)
def _log_response(response: httpx.Response):
request = response.request
click.echo(f"Request: {request.method} {request.url}", err=True)
click.echo(" Headers:", err=True)
for key, value in request.headers.items():
if key.lower() == "authorization":
value = "[...]"
if key.lower() == "cookie":
value = value.split("=")[0] + "=..."
click.echo(f" {key}: {value}", err=True)
click.echo(" Body:", err=True)
try:
request_body = json.loads(request.content)
click.echo(
textwrap.indent(json.dumps(request_body, indent=2), " "), err=True
)
except json.JSONDecodeError:
click.echo(textwrap.indent(request.content.decode(), " "), err=True)
click.echo(f"Response: status_code={response.status_code}", err=True)
click.echo(" Headers:", err=True)
for key, value in response.headers.items():
if key.lower() == "set-cookie":
value = value.split("=")[0] + "=..."
click.echo(f" {key}: {value}", err=True)
click.echo(" Body:", err=True)
def logging_client() -> httpx.Client:
return httpx.Client(
transport=_LogTransport(httpx.HTTPTransport()),
event_hooks={"request": [_no_accept_encoding], "response": [_log_response]},
) | null |
8,693 | import datetime
from typing import Callable, List
def m001_initial(db):
# Ensure the original table design exists, so other migrations can run
if db["log"].exists():
# It needs to have the chat_id column
if "chat_id" not in db["log"].columns_dict:
db["log"].add_column("chat_id")
return
db["log"].create(
{
"provider": str,
"system": str,
"prompt": str,
"chat_id": str,
"response": str,
"model": str,
"timestamp": str,
}
) | null |
8,694 | import datetime
from typing import Callable, List
def m002_id_primary_key(db):
db["log"].transform(pk="id") | null |
8,695 | import datetime
from typing import Callable, List
def m003_chat_id_foreign_key(db):
db["log"].transform(types={"chat_id": int})
db["log"].add_foreign_key("chat_id", "log", "id") | null |
8,696 | import datetime
from typing import Callable, List
def m004_column_order(db):
db["log"].transform(
column_order=(
"id",
"model",
"timestamp",
"prompt",
"system",
"response",
"chat_id",
)
) | null |
8,697 | import datetime
from typing import Callable, List
def m004_drop_provider(db):
db["log"].transform(drop=("provider",)) | null |
8,698 | import datetime
from typing import Callable, List
def m005_debug(db):
db["log"].add_column("debug", str)
db["log"].add_column("duration_ms", int) | null |
8,699 | import datetime
from typing import Callable, List
def m006_new_logs_table(db):
columns = db["log"].columns_dict
for column, type in (
("options_json", str),
("prompt_json", str),
("response_json", str),
("reply_to_id", int),
):
# It's possible people running development code like myself
# might have accidentally created these columns already
if column not in columns:
db["log"].add_column(column, type)
# Use .transform() to rename options and timestamp_utc, and set new order
db["log"].transform(
column_order=(
"id",
"model",
"prompt",
"system",
"prompt_json",
"options_json",
"response",
"response_json",
"reply_to_id",
"chat_id",
"duration_ms",
"timestamp_utc",
),
rename={
"timestamp": "timestamp_utc",
"options": "options_json",
},
) | null |
8,700 | import datetime
from typing import Callable, List
def m007_finish_logs_table(db):
db["log"].transform(
drop={"debug"},
rename={"timestamp_utc": "datetime_utc"},
drop_foreign_keys=("chat_id",),
)
with db.conn:
db.execute("alter table log rename to logs") | null |
8,701 | import datetime
from typing import Callable, List
def m008_reply_to_id_foreign_key(db):
db["logs"].add_foreign_key("reply_to_id", "logs", "id") | null |
8,702 | import datetime
from typing import Callable, List
def m008_fix_column_order_in_logs(db):
# reply_to_id ended up at the end after foreign key added
db["logs"].transform(
column_order=(
"id",
"model",
"prompt",
"system",
"prompt_json",
"options_json",
"response",
"response_json",
"reply_to_id",
"chat_id",
"duration_ms",
"timestamp_utc",
),
) | null |
8,703 | import datetime
from typing import Callable, List
def m009_delete_logs_table_if_empty(db):
# We moved to a new table design, but we don't delete the table
# if someone has put data in it
if not db["logs"].count:
db["logs"].drop() | null |
8,704 | import datetime
from typing import Callable, List
def m010_create_new_log_tables(db):
db["conversations"].create(
{
"id": str,
"name": str,
"model": str,
},
pk="id",
)
db["responses"].create(
{
"id": str,
"model": str,
"prompt": str,
"system": str,
"prompt_json": str,
"options_json": str,
"response": str,
"response_json": str,
"conversation_id": str,
"duration_ms": int,
"datetime_utc": str,
},
pk="id",
foreign_keys=(("conversation_id", "conversations", "id"),),
) | null |
8,705 | import datetime
from typing import Callable, List
def m011_fts_for_responses(db):
db["responses"].enable_fts(["prompt", "response"], create_triggers=True) | null |
8,706 | from dataclasses import dataclass, field
import datetime
from .errors import NeedsKeyException
from itertools import islice
import re
import time
from typing import Any, Dict, Iterable, Iterator, List, Optional, Set, Union
from abc import ABC, abstractmethod
import json
from pydantic import BaseModel
from ulid import ULID
CONVERSATION_NAME_LENGTH = 32
def _conversation_name(text):
# Collapse whitespace, including newlines
text = re.sub(r"\s+", " ", text)
if len(text) <= CONVERSATION_NAME_LENGTH:
return text
return text[: CONVERSATION_NAME_LENGTH - 1] + "…" | null |
8,707 | from pluggy import HookimplMarker
from pluggy import HookspecMarker
The provided code snippet includes necessary dependencies for implementing the `register_commands` function. Write a Python function `def register_commands(cli)` to solve the following problem:
Register additional CLI commands, e.g. 'llm mycommand ...
Here is the function:
def register_commands(cli):
"""Register additional CLI commands, e.g. 'llm mycommand ...'""" | Register additional CLI commands, e.g. 'llm mycommand ... |
8,708 | from pluggy import HookimplMarker
from pluggy import HookspecMarker
The provided code snippet includes necessary dependencies for implementing the `register_models` function. Write a Python function `def register_models(register)` to solve the following problem:
Register additional model instances representing LLM models that can be called
Here is the function:
def register_models(register):
"Register additional model instances representing LLM models that can be called" | Register additional model instances representing LLM models that can be called |
8,709 | from pluggy import HookimplMarker
from pluggy import HookspecMarker
The provided code snippet includes necessary dependencies for implementing the `register_embedding_models` function. Write a Python function `def register_embedding_models(register)` to solve the following problem:
Register additional model instances that can be used for embedding
Here is the function:
def register_embedding_models(register):
"Register additional model instances that can be used for embedding" | Register additional model instances that can be used for embedding |
8,710 | from llm import EmbeddingModel, Model, hookimpl
import llm
from llm.utils import dicts_to_table_string, remove_dict_none_values, logging_client
import click
import datetime
import httpx
import openai
import os
from typing import List, Iterable, Iterator, Optional, Union
import json
import yaml
class Chat(Model):
needs_key = "openai"
key_env_var = "OPENAI_API_KEY"
can_stream: bool = True
default_max_tokens = None
class Options(SharedOptions):
json_object: Optional[bool] = Field(
description="Output a valid JSON object {...}. Prompt must mention JSON.",
default=None,
)
def __init__(
self,
model_id,
key=None,
model_name=None,
api_base=None,
api_type=None,
api_version=None,
api_engine=None,
headers=None,
):
self.model_id = model_id
self.key = key
self.model_name = model_name
self.api_base = api_base
self.api_type = api_type
self.api_version = api_version
self.api_engine = api_engine
self.headers = headers
def __str__(self):
return "OpenAI Chat: {}".format(self.model_id)
def execute(self, prompt, stream, response, conversation=None):
messages = []
current_system = None
if conversation is not None:
for prev_response in conversation.responses:
if (
prev_response.prompt.system
and prev_response.prompt.system != current_system
):
messages.append(
{"role": "system", "content": prev_response.prompt.system}
)
current_system = prev_response.prompt.system
messages.append(
{"role": "user", "content": prev_response.prompt.prompt}
)
messages.append({"role": "assistant", "content": prev_response.text()})
if prompt.system and prompt.system != current_system:
messages.append({"role": "system", "content": prompt.system})
messages.append({"role": "user", "content": prompt.prompt})
response._prompt_json = {"messages": messages}
kwargs = self.build_kwargs(prompt)
client = self.get_client()
if stream:
completion = client.chat.completions.create(
model=self.model_name or self.model_id,
messages=messages,
stream=True,
**kwargs,
)
chunks = []
for chunk in completion:
chunks.append(chunk)
content = chunk.choices[0].delta.content
if content is not None:
yield content
response.response_json = remove_dict_none_values(combine_chunks(chunks))
else:
completion = client.chat.completions.create(
model=self.model_name or self.model_id,
messages=messages,
stream=False,
**kwargs,
)
response.response_json = remove_dict_none_values(completion.dict())
yield completion.choices[0].message.content
def get_client(self):
kwargs = {}
if self.api_base:
kwargs["base_url"] = self.api_base
if self.api_type:
kwargs["api_type"] = self.api_type
if self.api_version:
kwargs["api_version"] = self.api_version
if self.api_engine:
kwargs["engine"] = self.api_engine
if self.needs_key:
if self.key:
kwargs["api_key"] = self.key
else:
# OpenAI-compatible models don't need a key, but the
# openai client library requires one
kwargs["api_key"] = "DUMMY_KEY"
if self.headers:
kwargs["default_headers"] = self.headers
if os.environ.get("LLM_OPENAI_SHOW_RESPONSES"):
kwargs["http_client"] = logging_client()
return openai.OpenAI(**kwargs)
def build_kwargs(self, prompt):
kwargs = dict(not_nulls(prompt.options))
json_object = kwargs.pop("json_object", None)
if "max_tokens" not in kwargs and self.default_max_tokens is not None:
kwargs["max_tokens"] = self.default_max_tokens
if json_object:
kwargs["response_format"] = {"type": "json_object"}
return kwargs
class Completion(Chat):
class Options(SharedOptions):
logprobs: Optional[int] = Field(
description="Include the log probabilities of most likely N per token",
default=None,
le=5,
)
def __init__(self, *args, default_max_tokens=None, **kwargs):
super().__init__(*args, **kwargs)
self.default_max_tokens = default_max_tokens
def __str__(self):
return "OpenAI Completion: {}".format(self.model_id)
def execute(self, prompt, stream, response, conversation=None):
if prompt.system:
raise NotImplementedError(
"System prompts are not supported for OpenAI completion models"
)
messages = []
if conversation is not None:
for prev_response in conversation.responses:
messages.append(prev_response.prompt.prompt)
messages.append(prev_response.text())
messages.append(prompt.prompt)
response._prompt_json = {"messages": messages}
kwargs = self.build_kwargs(prompt)
client = self.get_client()
if stream:
completion = client.completions.create(
model=self.model_name or self.model_id,
prompt="\n".join(messages),
stream=True,
**kwargs,
)
chunks = []
for chunk in completion:
chunks.append(chunk)
content = chunk.choices[0].text
if content is not None:
yield content
combined = combine_chunks(chunks)
cleaned = remove_dict_none_values(combined)
response.response_json = cleaned
else:
completion = client.completions.create(
model=self.model_name or self.model_id,
prompt="\n".join(messages),
stream=False,
**kwargs,
)
response.response_json = remove_dict_none_values(completion.dict())
yield completion.choices[0].text
def register_models(register):
register(Chat("gpt-3.5-turbo"), aliases=("3.5", "chatgpt"))
register(Chat("gpt-3.5-turbo-16k"), aliases=("chatgpt-16k", "3.5-16k"))
register(Chat("gpt-4"), aliases=("4", "gpt4"))
register(Chat("gpt-4-32k"), aliases=("4-32k",))
# GPT-4 turbo models
register(Chat("gpt-4-1106-preview"))
register(Chat("gpt-4-0125-preview"))
register(Chat("gpt-4-turbo-preview"), aliases=("gpt-4-turbo", "4-turbo", "4t"))
# The -instruct completion model
register(
Completion("gpt-3.5-turbo-instruct", default_max_tokens=256),
aliases=("3.5-instruct", "chatgpt-instruct"),
)
# Load extra models
extra_path = llm.user_dir() / "extra-openai-models.yaml"
if not extra_path.exists():
return
with open(extra_path) as f:
extra_models = yaml.safe_load(f)
for extra_model in extra_models:
model_id = extra_model["model_id"]
aliases = extra_model.get("aliases", [])
model_name = extra_model["model_name"]
api_base = extra_model.get("api_base")
api_type = extra_model.get("api_type")
api_version = extra_model.get("api_version")
api_engine = extra_model.get("api_engine")
headers = extra_model.get("headers")
if extra_model.get("completion"):
klass = Completion
else:
klass = Chat
chat_model = klass(
model_id,
model_name=model_name,
api_base=api_base,
api_type=api_type,
api_version=api_version,
api_engine=api_engine,
headers=headers,
)
if api_base:
chat_model.needs_key = None
if extra_model.get("api_key_name"):
chat_model.needs_key = extra_model["api_key_name"]
register(
chat_model,
aliases=aliases,
) | null |
8,711 | from llm import EmbeddingModel, Model, hookimpl
import llm
from llm.utils import dicts_to_table_string, remove_dict_none_values, logging_client
import click
import datetime
import httpx
import openai
import os
from typing import List, Iterable, Iterator, Optional, Union
import json
import yaml
class OpenAIEmbeddingModel(EmbeddingModel):
def __init__(self, model_id, openai_model_id, dimensions=None):
def embed_batch(self, items: Iterable[Union[str, bytes]]) -> Iterator[List[float]]:
def register_embedding_models(register):
register(
OpenAIEmbeddingModel("ada-002", "text-embedding-ada-002"), aliases=("ada",)
)
register(OpenAIEmbeddingModel("3-small", "text-embedding-3-small"))
register(OpenAIEmbeddingModel("3-large", "text-embedding-3-large"))
# With varying dimensions
register(OpenAIEmbeddingModel("3-small-512", "text-embedding-3-small", 512))
register(OpenAIEmbeddingModel("3-large-256", "text-embedding-3-large", 256))
register(OpenAIEmbeddingModel("3-large-1024", "text-embedding-3-large", 1024)) | null |
8,712 | from llm import EmbeddingModel, Model, hookimpl
import llm
from llm.utils import dicts_to_table_string, remove_dict_none_values, logging_client
import click
import datetime
import httpx
import openai
import os
from typing import List, Iterable, Iterator, Optional, Union
import json
import yaml
def dicts_to_table_string(
headings: List[str], dicts: List[Dict[str, str]]
) -> List[str]:
max_lengths = [len(h) for h in headings]
# Compute maximum length for each column
for d in dicts:
for i, h in enumerate(headings):
if h in d and len(str(d[h])) > max_lengths[i]:
max_lengths[i] = len(str(d[h]))
# Generate formatted table strings
res = []
res.append(" ".join(h.ljust(max_lengths[i]) for i, h in enumerate(headings)))
for d in dicts:
row = []
for i, h in enumerate(headings):
row.append(str(d.get(h, "")).ljust(max_lengths[i]))
res.append(" ".join(row))
return res
def register_commands(cli):
@cli.group(name="openai")
def openai_():
"Commands for working directly with the OpenAI API"
@openai_.command()
@click.option("json_", "--json", is_flag=True, help="Output as JSON")
@click.option("--key", help="OpenAI API key")
def models(json_, key):
"List models available to you from the OpenAI API"
from llm.cli import get_key
api_key = get_key(key, "openai", "OPENAI_API_KEY")
response = httpx.get(
"https://api.openai.com/v1/models",
headers={"Authorization": f"Bearer {api_key}"},
)
if response.status_code != 200:
raise click.ClickException(
f"Error {response.status_code} from OpenAI API: {response.text}"
)
models = response.json()["data"]
if json_:
click.echo(json.dumps(models, indent=4))
else:
to_print = []
for model in models:
# Print id, owned_by, root, created as ISO 8601
created_str = datetime.datetime.utcfromtimestamp(
model["created"]
).isoformat()
to_print.append(
{
"id": model["id"],
"owned_by": model["owned_by"],
"created": created_str,
}
)
done = dicts_to_table_string("id owned_by created".split(), to_print)
print("\n".join(done)) | null |
8,713 | from llm import EmbeddingModel, Model, hookimpl
import llm
from llm.utils import dicts_to_table_string, remove_dict_none_values, logging_client
import click
import datetime
import httpx
import openai
import os
from typing import List, Iterable, Iterator, Optional, Union
import json
import yaml
def not_nulls(data) -> dict:
return {key: value for key, value in data if value is not None} | null |
8,714 | from llm import EmbeddingModel, Model, hookimpl
import llm
from llm.utils import dicts_to_table_string, remove_dict_none_values, logging_client
import click
import datetime
import httpx
import openai
import os
from typing import List, Iterable, Iterator, Optional, Union
import json
import yaml
def combine_chunks(chunks: List) -> dict:
content = ""
role = None
finish_reason = None
# If any of them have log probability, we're going to persist
# those later on
logprobs = []
for item in chunks:
for choice in item.choices:
if choice.logprobs and hasattr(choice.logprobs, "top_logprobs"):
logprobs.append(
{
"text": choice.text if hasattr(choice, "text") else None,
"top_logprobs": choice.logprobs.top_logprobs,
}
)
if not hasattr(choice, "delta"):
content += choice.text
continue
role = choice.delta.role
if choice.delta.content is not None:
content += choice.delta.content
if choice.finish_reason is not None:
finish_reason = choice.finish_reason
# Imitations of the OpenAI API may be missing some of these fields
combined = {
"content": content,
"role": role,
"finish_reason": finish_reason,
}
if logprobs:
combined["logprobs"] = logprobs
for key in ("id", "object", "model", "created", "index"):
value = getattr(chunks[0], key, None)
if value is not None:
combined[key] = value
return combined | null |
8,715 | import re
import json
import typing
import requests
import lxml.html
from typing import Any, Sequence, Type
from pydantic import BaseModel
from .errors import PreprocessorError
from .responses import Response, ScrapeResponse
from .apicall import OpenAiCall, Postprocessor, RetryRule
from .utils import logger, _tokens, _tostr
from .preprocessors import Preprocessor, CleanHTML
from .postprocessors import (
JSONPostprocessor,
PydanticPostprocessor,
)
class Response:
api_responses: list = field(default_factory=list)
total_cost: float = 0
total_prompt_tokens: int = 0
total_completion_tokens: int = 0
api_time: float = 0
data: dict | list | str = ""
class ScrapeResponse(Response):
url: str | None = None
parsed_html: lxml.html.HtmlElement | None = None
auto_split_length: int | None = None
The provided code snippet includes necessary dependencies for implementing the `_combine_responses` function. Write a Python function `def _combine_responses( sr: ScrapeResponse, responses: Sequence[Response] ) -> ScrapeResponse` to solve the following problem:
Combine (possibly paginated) API responses into a single ScrapeResponse.
Here is the function:
def _combine_responses(
sr: ScrapeResponse, responses: Sequence[Response]
) -> ScrapeResponse:
"""
Combine (possibly paginated) API responses into a single ScrapeResponse.
"""
sr.api_responses = [
api_resp for resp in responses for api_resp in resp.api_responses
]
sr.total_cost = sum([resp.total_cost for resp in responses])
sr.total_prompt_tokens = sum([resp.total_prompt_tokens for resp in responses])
sr.total_completion_tokens = sum(
[resp.total_completion_tokens for resp in responses]
)
sr.api_time = sum([resp.api_time for resp in responses])
if len(responses) > 1:
sr.data = [item for resp in responses for item in resp.data]
else:
sr.data = responses[0].data
return sr | Combine (possibly paginated) API responses into a single ScrapeResponse. |
8,716 | import re
import json
import typing
import requests
import lxml.html
from typing import Any, Sequence, Type
from pydantic import BaseModel
from .errors import PreprocessorError
from .responses import Response, ScrapeResponse
from .apicall import OpenAiCall, Postprocessor, RetryRule
from .utils import logger, _tokens, _tostr
from .preprocessors import Preprocessor, CleanHTML
from .postprocessors import (
JSONPostprocessor,
PydanticPostprocessor,
)
logger = structlog.get_logger("scrapeghost")
The provided code snippet includes necessary dependencies for implementing the `_parse_url_or_html` function. Write a Python function `def _parse_url_or_html(url_or_html: str) -> lxml.html.Element` to solve the following problem:
Given URL or HTML, return lxml.html.Element
Here is the function:
def _parse_url_or_html(url_or_html: str) -> lxml.html.Element:
"""
Given URL or HTML, return lxml.html.Element
"""
# coerce to HTML
orig_url = None
if url_or_html.startswith("http"):
orig_url = url_or_html
url_or_html = requests.get(url_or_html).text
# collapse whitespace
url_or_html = re.sub("[ \t]+", " ", url_or_html)
logger.debug("got HTML", length=len(url_or_html), url=orig_url)
doc = lxml.html.fromstring(url_or_html)
if orig_url:
doc.make_links_absolute(orig_url)
return doc | Given URL or HTML, return lxml.html.Element |
8,717 | import re
import json
import typing
import requests
import lxml.html
from typing import Any, Sequence, Type
from pydantic import BaseModel
from .errors import PreprocessorError
from .responses import Response, ScrapeResponse
from .apicall import OpenAiCall, Postprocessor, RetryRule
from .utils import logger, _tokens, _tostr
from .preprocessors import Preprocessor, CleanHTML
from .postprocessors import (
JSONPostprocessor,
PydanticPostprocessor,
)
logger = structlog.get_logger("scrapeghost")
def _tostr(obj: lxml.html.HtmlElement) -> str:
"""
Given lxml.html.HtmlElement, return string
"""
return lxml.html.tostring(obj, encoding="unicode")
def _tokens(model: str, html: str) -> int:
encoding = tiktoken.encoding_for_model(model)
return len(encoding.encode(html))
The provided code snippet includes necessary dependencies for implementing the `_chunk_tags` function. Write a Python function `def _chunk_tags(tags: list, max_tokens: int, model: str) -> list[str]` to solve the following problem:
Given a list of all matching HTML tags, recombine into HTML chunks that can be passed to API.
Here is the function:
def _chunk_tags(tags: list, max_tokens: int, model: str) -> list[str]:
"""
Given a list of all matching HTML tags, recombine into HTML chunks
that can be passed to API.
"""
chunks = []
chunk_sizes = []
chunk = ""
chunk_tokens = 0
for tag in tags:
tag_html = _tostr(tag)
tag_tokens = _tokens(model, tag_html)
# if adding tag would exceed max_tokens, start new chunk (unless chunk is empty)
if chunk_tokens + tag_tokens > max_tokens and chunk_tokens > 0:
chunks.append(chunk)
chunk_sizes.append(chunk_tokens)
chunk = ""
chunk_tokens = 0
chunk += tag_html
chunk_tokens += tag_tokens
chunks.append(chunk)
chunk_sizes.append(chunk_tokens)
logger.debug(
"chunked tags",
num=len(chunks),
sizes=chunk_sizes,
)
return chunks | Given a list of all matching HTML tags, recombine into HTML chunks that can be passed to API. |
8,718 | import re
import json
import typing
import requests
import lxml.html
from typing import Any, Sequence, Type
from pydantic import BaseModel
from .errors import PreprocessorError
from .responses import Response, ScrapeResponse
from .apicall import OpenAiCall, Postprocessor, RetryRule
from .utils import logger, _tokens, _tostr
from .preprocessors import Preprocessor, CleanHTML
from .postprocessors import (
JSONPostprocessor,
PydanticPostprocessor,
)
The provided code snippet includes necessary dependencies for implementing the `_pydantic_to_simple_schema` function. Write a Python function `def _pydantic_to_simple_schema(pydantic_model: Type[BaseModel]) -> dict` to solve the following problem:
Given a Pydantic model, return a simple schema that can be used by SchemaScraper. We don't use Pydantic's schema() method because the additional complexity of JSON Schema adds a lot of extra tokens and in testing did not work as well as the simplified versions.
Here is the function:
def _pydantic_to_simple_schema(pydantic_model: Type[BaseModel]) -> dict:
"""
Given a Pydantic model, return a simple schema that can be used
by SchemaScraper.
We don't use Pydantic's schema() method because the
additional complexity of JSON Schema adds a lot of extra tokens
and in testing did not work as well as the simplified versions.
"""
schema: dict = {}
for field_name, field in pydantic_model.model_fields.items():
# model_fields is present on Pydantic models, so can process recursively
if field.annotation is None:
raise TypeError("missing annotation")
elif isinstance(field.annotation, type) and issubclass(
field.annotation, BaseModel
):
schema[field_name] = _pydantic_to_simple_schema(field.annotation)
else:
type_name = field.annotation.__name__
if type_name == "list":
(inner,) = typing.get_args(field.annotation)
schema[field_name] = [inner.__name__]
elif type_name == "dict":
k, v = typing.get_args(field.annotation)
schema[field_name] = {k.__name__: v.__name__}
else:
schema[field_name] = type_name
return schema | Given a Pydantic model, return a simple schema that can be used by SchemaScraper. We don't use Pydantic's schema() method because the additional complexity of JSON Schema adds a lot of extra tokens and in testing did not work as well as the simplified versions. |
8,719 | import json
import pathlib
import logging
import structlog
import typer
from .scrapers import SchemaScraper
from .preprocessors import CSS, XPath
class SchemaScraper(OpenAiCall):
def __init__(
self,
schema: dict | str | list,
extra_preprocessors: list | None = None,
*,
auto_split_length: int = 0,
# inherited from OpenAiCall
models: list[str] = ["gpt-3.5-turbo", "gpt-4"],
model_params: dict | None = None,
max_cost: float = 1,
retry: RetryRule = RetryRule(1, 30),
extra_instructions: list[str] | None = None,
postprocessors: list | None = None,
):
def _apply_preprocessors(
self, doc: lxml.html.Element, extra_preprocessors: list
) -> list:
def scrape(
self,
url_or_html: str,
extra_preprocessors: list | None = None,
) -> ScrapeResponse:
class XPath:
def __init__(self, xpath: str):
def __str__(self) -> str:
def __call__(self, node: lxml.html.HtmlElement) -> list[lxml.html.HtmlElement]:
class CSS:
def __init__(self, css: str):
def __str__(self) -> str:
def __call__(self, node: lxml.html.HtmlElement) -> list[lxml.html.HtmlElement]:
def scrape(
url: str,
xpath: str = typer.Option(None, help="XPath selector to narrow the scrape"),
css: str = typer.Option(None, help="CSS selector to narrow the scrape"),
schema: str = typer.Option(None, help="Schema to use for scraping"),
schema_file: pathlib.Path = typer.Option(None, help="Path to schema.json file"),
gpt4: bool = typer.Option(False, help="Use GPT-4 instead of GPT-3.5-turbo"),
verbosity: int = typer.Option(
0, "-v", "--verbose", count=True, help="Verbosity level 0-2"
),
) -> None:
if schema_file:
with open(schema_file) as f:
schema = f.read()
if not schema:
raise typer.BadParameter("You must provide a schema or schema_file.")
log_level = {0: logging.WARNING, 1: logging.INFO, 2: logging.DEBUG}[verbosity]
structlog.configure(
wrapper_class=structlog.make_filtering_bound_logger(log_level),
)
scraper = SchemaScraper(schema, models=["gpt-4"] if gpt4 else ["gpt-3.5-turbo"])
if xpath:
scraper.preprocessors.append(XPath(xpath))
if css:
scraper.preprocessors.append(CSS(css))
result = scraper(url)
typer.echo(json.dumps(result.data)) | null |
8,720 | import lxml.html
import structlog
import tiktoken
from .models import _model_dict
def _tokens(model: str, html: str) -> int:
encoding = tiktoken.encoding_for_model(model)
return len(encoding.encode(html))
_model_dict = {model.name: model for model in models}
The provided code snippet includes necessary dependencies for implementing the `cost_estimate` function. Write a Python function `def cost_estimate(html: str, model: str = "gpt-4") -> float` to solve the following problem:
Given HTML, return cost estimate in dollars. This is a very rough estimate and not guaranteed to be accurate.
Here is the function:
def cost_estimate(html: str, model: str = "gpt-4") -> float:
"""
Given HTML, return cost estimate in dollars.
This is a very rough estimate and not guaranteed to be accurate.
"""
tokens = _tokens(model, html)
model_data = _model_dict[model]
# assumes response is half as long as prompt, which is probably wrong
return model_data.cost(tokens, tokens // 2) | Given HTML, return cost estimate in dollars. This is a very rough estimate and not guaranteed to be accurate. |
8,721 | from __future__ import annotations
from typing import TYPE_CHECKING
import json
from pydantic import ValidationError
from .utils import logger, _tostr
from .errors import InvalidJSON, PostprocessingError
from .responses import Response, ScrapeResponse
class PostprocessingError(ScrapeghostError):
pass
The provided code snippet includes necessary dependencies for implementing the `_check_data_in_html` function. Write a Python function `def _check_data_in_html(html: str, d: dict | list | str, parent: str = "") -> None` to solve the following problem:
Recursively check response for data that is not in the html.
Here is the function:
def _check_data_in_html(html: str, d: dict | list | str, parent: str = "") -> None:
"""
Recursively check response for data that is not in the html.
"""
if isinstance(d, dict):
for k, v in d.items():
_check_data_in_html(html, v, parent + f".{k}")
elif isinstance(d, list):
for i, v in enumerate(d):
_check_data_in_html(html, v, parent + f"[{i}]")
elif isinstance(d, str):
if d not in html:
raise PostprocessingError(f"Data not found in html: {d} ({parent})") | Recursively check response for data that is not in the html. |
8,722 | import sys
import json
import lxml.html
import requests
from scrapeghost import SchemaScraper, CSS
def get_urls():
# this page is currently too long for the 8k limit, even with hints
html = requests.get("https://www.ilga.gov/senate/default.asp").text
doc = lxml.html.fromstring(html)
doc.make_links_absolute("https://www.ilga.gov/senate/")
return [
a.attrib["href"]
for a in doc.cssselect("a")
if "Senator.asp" in a.attrib["href"]
] | null |
8,723 | import ast
import re
from setuptools import setup, find_packages
with open('pypi_desc.md', 'r', encoding='utf-8') as f:
readme = f.read()
The provided code snippet includes necessary dependencies for implementing the `get_version_string` function. Write a Python function `def get_version_string()` to solve the following problem:
Get the gne version number :return: version number :rtype: str
Here is the function:
def get_version_string():
"""
Get the gne version number
:return: version number
:rtype: str
"""
with open("gne/__init__.py", "rb") as _f:
version_line = re.search(
r"__version__\s+=\s+(.*)", _f.read().decode("utf-8")
).group(1)
return str(ast.literal_eval(version_line)) | Get the gne version number :return: version number :rtype: str |
8,724 | import ast
import re
from setuptools import setup, find_packages
with open('pypi_desc.md', 'r', encoding='utf-8') as f:
readme = f.read()
The provided code snippet includes necessary dependencies for implementing the `get_author_string` function. Write a Python function `def get_author_string()` to solve the following problem:
Get the gne author info :return: author name :rtype: str
Here is the function:
def get_author_string():
"""
Get the gne author info
:return: author name
:rtype: str
"""
with open("gne/__init__.py", "rb") as _f:
version_line = re.search(
r"__author__\s+=\s+(.*)", _f.read().decode("utf-8")
).group(1)
return str(ast.literal_eval(version_line)) | Get the gne author info :return: author name :rtype: str |
8,725 | import os
import re
import yaml
import unicodedata
from lxml.html import fromstring, HtmlElement
from lxml.html import etree
from urllib.parse import urlparse, urljoin
from .defaults import USELESS_TAG, TAGS_CAN_BE_REMOVE_IF_EMPTY, USELESS_ATTR, HIGH_WEIGHT_ARRT_KEYWORD
def html2element(html):
html = re.sub('</?br.*?>', '', html)
element = fromstring(html)
return element | null |
8,726 | import os
import re
import yaml
import unicodedata
from lxml.html import fromstring, HtmlElement
from lxml.html import etree
from urllib.parse import urlparse, urljoin
from .defaults import USELESS_TAG, TAGS_CAN_BE_REMOVE_IF_EMPTY, USELESS_ATTR, HIGH_WEIGHT_ARRT_KEYWORD
def normalize_node(element: HtmlElement):
def pre_parse(element):
normalize_node(element)
return element | null |
8,727 | import os
import re
import yaml
import unicodedata
from lxml.html import fromstring, HtmlElement
from lxml.html import etree
from urllib.parse import urlparse, urljoin
from .defaults import USELESS_TAG, TAGS_CAN_BE_REMOVE_IF_EMPTY, USELESS_ATTR, HIGH_WEIGHT_ARRT_KEYWORD
def remove_node(node: HtmlElement):
"""
this is a in-place operation, not necessary to return
:param node:
:return:
"""
parent = node.getparent()
if parent is not None:
parent.remove(node)
config = read_config()
def remove_noise_node(element, noise_xpath_list):
noise_xpath_list = noise_xpath_list or config.get('noise_node_list')
if not noise_xpath_list:
return
for noise_xpath in noise_xpath_list:
nodes = element.xpath(noise_xpath)
for node in nodes:
remove_node(node)
return element | null |
8,728 | import os
import re
import yaml
import unicodedata
from lxml.html import fromstring, HtmlElement
from lxml.html import etree
from urllib.parse import urlparse, urljoin
from .defaults import USELESS_TAG, TAGS_CAN_BE_REMOVE_IF_EMPTY, USELESS_ATTR, HIGH_WEIGHT_ARRT_KEYWORD
The provided code snippet includes necessary dependencies for implementing the `pad_host_for_images` function. Write a Python function `def pad_host_for_images(host, url)` to solve the following problem:
网站上的图片可能有如下几种格式: 完整的绝对路径:https://xxx.com/1.jpg 完全不含 host 的相对路径: /1.jpg 含 host 但是不含 scheme: xxx.com/1.jpg 或者 ://xxx.com/1.jpg :param host: :param url: :return:
Here is the function:
def pad_host_for_images(host, url):
"""
网站上的图片可能有如下几种格式:
完整的绝对路径:https://xxx.com/1.jpg
完全不含 host 的相对路径: /1.jpg
含 host 但是不含 scheme: xxx.com/1.jpg 或者 ://xxx.com/1.jpg
:param host:
:param url:
:return:
"""
if url.startswith('http'):
return url
parsed_uri = urlparse(host)
scheme = parsed_uri.scheme
if url.startswith(':'):
return f'{scheme}{url}'
if url.startswith('//'):
return f'{scheme}:{url}'
return urljoin(host, url) | 网站上的图片可能有如下几种格式: 完整的绝对路径:https://xxx.com/1.jpg 完全不含 host 的相对路径: /1.jpg 含 host 但是不含 scheme: xxx.com/1.jpg 或者 ://xxx.com/1.jpg :param host: :param url: :return: |
8,729 | import os
import re
import yaml
import unicodedata
from lxml.html import fromstring, HtmlElement
from lxml.html import etree
from urllib.parse import urlparse, urljoin
from .defaults import USELESS_TAG, TAGS_CAN_BE_REMOVE_IF_EMPTY, USELESS_ATTR, HIGH_WEIGHT_ARRT_KEYWORD
config = read_config()
def read_config():
if os.path.exists('.gne'):
with open('.gne', encoding='utf-8') as f:
config_text = f.read()
config = yaml.safe_load(config_text)
return config
return {} | null |
8,730 | import os
import re
import yaml
import unicodedata
from lxml.html import fromstring, HtmlElement
from lxml.html import etree
from urllib.parse import urlparse, urljoin
from .defaults import USELESS_TAG, TAGS_CAN_BE_REMOVE_IF_EMPTY, USELESS_ATTR, HIGH_WEIGHT_ARRT_KEYWORD
HIGH_WEIGHT_ARRT_KEYWORD = ['content',
'article',
'news_txt',
'post_text']
]
def get_high_weight_keyword_pattern():
return re.compile('|'.join(HIGH_WEIGHT_ARRT_KEYWORD), flags=re.I) | null |
8,731 | import os
import re
import yaml
import unicodedata
from lxml.html import fromstring, HtmlElement
from lxml.html import etree
from urllib.parse import urlparse, urljoin
from .defaults import USELESS_TAG, TAGS_CAN_BE_REMOVE_IF_EMPTY, USELESS_ATTR, HIGH_WEIGHT_ARRT_KEYWORD
The provided code snippet includes necessary dependencies for implementing the `get_longest_common_sub_string` function. Write a Python function `def get_longest_common_sub_string(str1: str, str2: str) -> str` to solve the following problem:
获取两个字符串的最长公共子串。 构造一个矩阵,横向是字符串1,纵向是字符串2,例如: 青南是天才!? 听0 0 0 0 00 0 说0 0 0 0 00 0 青1 0 0 0 00 0 南0 1 0 0 00 0 是0 0 1 0 00 0 天0 0 0 1 00 0 才0 0 0 0 10 0 !0 0 0 0 01 0 显然,只要斜对角线最长的就是最长公共子串 :param str1: :param str2: :return:
Here is the function:
def get_longest_common_sub_string(str1: str, str2: str) -> str:
"""
获取两个字符串的最长公共子串。
构造一个矩阵,横向是字符串1,纵向是字符串2,例如:
青南是天才!?
听0 0 0 0 00 0
说0 0 0 0 00 0
青1 0 0 0 00 0
南0 1 0 0 00 0
是0 0 1 0 00 0
天0 0 0 1 00 0
才0 0 0 0 10 0
!0 0 0 0 01 0
显然,只要斜对角线最长的就是最长公共子串
:param str1:
:param str2:
:return:
"""
if not all([str1, str2]):
return ''
matrix = [[0] * (len(str2) + 1) for _ in range(len(str1) + 1)]
max_length = 0
start_position = 0
for index_of_str1 in range(1, len(str1) + 1):
for index_of_str2 in range(1, len(str2) + 1):
if str1[index_of_str1 - 1] == str2[index_of_str2 - 1]:
matrix[index_of_str1][index_of_str2] = matrix[index_of_str1 - 1][index_of_str2 - 1] + 1
if matrix[index_of_str1][index_of_str2] > max_length:
max_length = matrix[index_of_str1][index_of_str2]
start_position = index_of_str1 - max_length
else:
matrix[index_of_str1][index_of_str2] = 0
return str1[start_position: start_position + max_length] | 获取两个字符串的最长公共子串。 构造一个矩阵,横向是字符串1,纵向是字符串2,例如: 青南是天才!? 听0 0 0 0 00 0 说0 0 0 0 00 0 青1 0 0 0 00 0 南0 1 0 0 00 0 是0 0 1 0 00 0 天0 0 0 1 00 0 才0 0 0 0 10 0 !0 0 0 0 01 0 显然,只要斜对角线最长的就是最长公共子串 :param str1: :param str2: :return: |
8,732 | import os
import re
import yaml
import unicodedata
from lxml.html import fromstring, HtmlElement
from lxml.html import etree
from urllib.parse import urlparse, urljoin
from .defaults import USELESS_TAG, TAGS_CAN_BE_REMOVE_IF_EMPTY, USELESS_ATTR, HIGH_WEIGHT_ARRT_KEYWORD
The provided code snippet includes necessary dependencies for implementing the `normalize_text` function. Write a Python function `def normalize_text(html)` to solve the following problem:
使用 NFKC 对网页源代码进行归一化,把特殊符号转换为普通符号 注意,中文标点符号可能会被转换成英文标点符合。 :param html: :return:
Here is the function:
def normalize_text(html):
"""
使用 NFKC 对网页源代码进行归一化,把特殊符号转换为普通符号
注意,中文标点符号可能会被转换成英文标点符合。
:param html:
:return:
"""
return unicodedata.normalize('NFKC', html) | 使用 NFKC 对网页源代码进行归一化,把特殊符号转换为普通符号 注意,中文标点符号可能会被转换成英文标点符合。 :param html: :return: |
8,733 | import os
import re
from setuptools import find_packages, setup
deps = {b: a for a, b in (re.findall(r"^(([^!=<>~]+)(?:[!=<>~].*)?$)", x)[0] for x in _deps)}
def deps_list(*pkgs):
return [deps[pkg] for pkg in pkgs] | null |
8,734 | import gc
from collections import OrderedDict
from typing import *
import numpy as np
import onnx
import onnx_graphsurgeon as gs
import tensorrt as trt
import torch
from cuda import cudart
from PIL import Image
from polygraphy import cuda
from polygraphy.backend.common import bytes_from_path
from polygraphy.backend.trt import (
CreateConfig,
Profile,
engine_from_bytes,
engine_from_network,
network_from_onnx_path,
save_engine,
)
from polygraphy.backend.trt import util as trt_util
from .models import CLIP, VAE, BaseModel, UNet, VAEEncoder
def CUASSERT(cuda_ret):
err = cuda_ret[0]
if err != cudart.cudaError_t.cudaSuccess:
raise RuntimeError(
f"CUDA ERROR: {err}, error code reference: https://nvidia.github.io/cuda-python/module/cudart.html#cuda.cudart.cudaError_t"
)
if len(cuda_ret) > 1:
return cuda_ret[1]
return None | null |
8,735 | import gc
from collections import OrderedDict
from typing import *
import numpy as np
import onnx
import onnx_graphsurgeon as gs
import tensorrt as trt
import torch
from cuda import cudart
from PIL import Image
from polygraphy import cuda
from polygraphy.backend.common import bytes_from_path
from polygraphy.backend.trt import (
CreateConfig,
Profile,
engine_from_bytes,
engine_from_network,
network_from_onnx_path,
save_engine,
)
from polygraphy.backend.trt import util as trt_util
from .models import CLIP, VAE, BaseModel, UNet, VAEEncoder
def decode_images(images: torch.Tensor):
images = (
((images + 1) * 255 / 2).clamp(0, 255).detach().permute(0, 2, 3, 1).round().type(torch.uint8).cpu().numpy()
)
return [Image.fromarray(x) for x in images] | null |
8,736 | import gc
from collections import OrderedDict
from typing import *
import numpy as np
import onnx
import onnx_graphsurgeon as gs
import tensorrt as trt
import torch
from cuda import cudart
from PIL import Image
from polygraphy import cuda
from polygraphy.backend.common import bytes_from_path
from polygraphy.backend.trt import (
CreateConfig,
Profile,
engine_from_bytes,
engine_from_network,
network_from_onnx_path,
save_engine,
)
from polygraphy.backend.trt import util as trt_util
from .models import CLIP, VAE, BaseModel, UNet, VAEEncoder
if np.version.full_version >= "1.24.0":
numpy_to_torch_dtype_dict[np.bool_] = torch.bool
else:
numpy_to_torch_dtype_dict[np.bool] = torch.bool
def preprocess_image(image: Image.Image):
w, h = image.size
w, h = map(lambda x: x - x % 32, (w, h)) # resize to integer multiple of 32
image = image.resize((w, h))
init_image = np.array(image).astype(np.float32) / 255.0
init_image = init_image[None].transpose(0, 3, 1, 2)
init_image = torch.from_numpy(init_image).contiguous()
return 2.0 * init_image - 1.0 | null |
8,737 | import gc
from collections import OrderedDict
from typing import *
import numpy as np
import onnx
import onnx_graphsurgeon as gs
import tensorrt as trt
import torch
from cuda import cudart
from PIL import Image
from polygraphy import cuda
from polygraphy.backend.common import bytes_from_path
from polygraphy.backend.trt import (
CreateConfig,
Profile,
engine_from_bytes,
engine_from_network,
network_from_onnx_path,
save_engine,
)
from polygraphy.backend.trt import util as trt_util
from .models import CLIP, VAE, BaseModel, UNet, VAEEncoder
if np.version.full_version >= "1.24.0":
numpy_to_torch_dtype_dict[np.bool_] = torch.bool
else:
numpy_to_torch_dtype_dict[np.bool] = torch.bool
def prepare_mask_and_masked_image(image: Image.Image, mask: Image.Image):
if isinstance(image, Image.Image):
image = np.array(image.convert("RGB"))
image = image[None].transpose(0, 3, 1, 2)
image = torch.from_numpy(image).to(dtype=torch.float32).contiguous() / 127.5 - 1.0
if isinstance(mask, Image.Image):
mask = np.array(mask.convert("L"))
mask = mask.astype(np.float32) / 255.0
mask = mask[None, None]
mask[mask < 0.5] = 0
mask[mask >= 0.5] = 1
mask = torch.from_numpy(mask).to(dtype=torch.float32).contiguous()
masked_image = image * (mask < 0.5)
return mask, masked_image | null |
8,738 | import gc
from collections import OrderedDict
from typing import *
import numpy as np
import onnx
import onnx_graphsurgeon as gs
import tensorrt as trt
import torch
from cuda import cudart
from PIL import Image
from polygraphy import cuda
from polygraphy.backend.common import bytes_from_path
from polygraphy.backend.trt import (
CreateConfig,
Profile,
engine_from_bytes,
engine_from_network,
network_from_onnx_path,
save_engine,
)
from polygraphy.backend.trt import util as trt_util
from .models import CLIP, VAE, BaseModel, UNet, VAEEncoder
class CLIP(BaseModel):
def __init__(self, device, max_batch_size, embedding_dim, min_batch_size=1):
super(CLIP, self).__init__(
device=device,
max_batch_size=max_batch_size,
min_batch_size=min_batch_size,
embedding_dim=embedding_dim,
)
self.name = "CLIP"
def get_input_names(self):
return ["input_ids"]
def get_output_names(self):
return ["text_embeddings", "pooler_output"]
def get_dynamic_axes(self):
return {"input_ids": {0: "B"}, "text_embeddings": {0: "B"}}
def get_input_profile(self, batch_size, image_height, image_width, static_batch, static_shape):
self.check_dims(batch_size, image_height, image_width)
min_batch, max_batch, _, _, _, _, _, _, _, _ = self.get_minmax_dims(
batch_size, image_height, image_width, static_batch, static_shape
)
return {
"input_ids": [
(min_batch, self.text_maxlen),
(batch_size, self.text_maxlen),
(max_batch, self.text_maxlen),
]
}
def get_shape_dict(self, batch_size, image_height, image_width):
self.check_dims(batch_size, image_height, image_width)
return {
"input_ids": (batch_size, self.text_maxlen),
"text_embeddings": (batch_size, self.text_maxlen, self.embedding_dim),
}
def get_sample_input(self, batch_size, image_height, image_width):
self.check_dims(batch_size, image_height, image_width)
return torch.zeros(batch_size, self.text_maxlen, dtype=torch.int32, device=self.device)
def optimize(self, onnx_graph):
opt = Optimizer(onnx_graph)
opt.info(self.name + ": original")
opt.select_outputs([0]) # delete graph output#1
opt.cleanup()
opt.info(self.name + ": remove output[1]")
opt.fold_constants()
opt.info(self.name + ": fold constants")
opt.infer_shapes()
opt.info(self.name + ": shape inference")
opt.select_outputs([0], names=["text_embeddings"]) # rename network output
opt.info(self.name + ": remove output[0]")
opt_onnx_graph = opt.cleanup(return_onnx=True)
opt.info(self.name + ": finished")
return opt_onnx_graph
class UNet(BaseModel):
def __init__(
self,
fp16=False,
device="cuda",
max_batch_size=16,
min_batch_size=1,
embedding_dim=768,
text_maxlen=77,
unet_dim=4,
):
super(UNet, self).__init__(
fp16=fp16,
device=device,
max_batch_size=max_batch_size,
min_batch_size=min_batch_size,
embedding_dim=embedding_dim,
text_maxlen=text_maxlen,
)
self.unet_dim = unet_dim
self.name = "UNet"
def get_input_names(self):
return ["sample", "timestep", "encoder_hidden_states"]
def get_output_names(self):
return ["latent"]
def get_dynamic_axes(self):
return {
"sample": {0: "2B", 2: "H", 3: "W"},
"timestep": {0: "2B"},
"encoder_hidden_states": {0: "2B"},
"latent": {0: "2B", 2: "H", 3: "W"},
}
def get_input_profile(self, batch_size, image_height, image_width, static_batch, static_shape):
latent_height, latent_width = self.check_dims(batch_size, image_height, image_width)
(
min_batch,
max_batch,
_,
_,
_,
_,
min_latent_height,
max_latent_height,
min_latent_width,
max_latent_width,
) = self.get_minmax_dims(batch_size, image_height, image_width, static_batch, static_shape)
return {
"sample": [
(min_batch, self.unet_dim, min_latent_height, min_latent_width),
(batch_size, self.unet_dim, latent_height, latent_width),
(max_batch, self.unet_dim, max_latent_height, max_latent_width),
],
"timestep": [(min_batch,), (batch_size,), (max_batch,)],
"encoder_hidden_states": [
(min_batch, self.text_maxlen, self.embedding_dim),
(batch_size, self.text_maxlen, self.embedding_dim),
(max_batch, self.text_maxlen, self.embedding_dim),
],
}
def get_shape_dict(self, batch_size, image_height, image_width):
latent_height, latent_width = self.check_dims(batch_size, image_height, image_width)
return {
"sample": (2 * batch_size, self.unet_dim, latent_height, latent_width),
"timestep": (2 * batch_size,),
"encoder_hidden_states": (2 * batch_size, self.text_maxlen, self.embedding_dim),
"latent": (2 * batch_size, 4, latent_height, latent_width),
}
def get_sample_input(self, batch_size, image_height, image_width):
latent_height, latent_width = self.check_dims(batch_size, image_height, image_width)
dtype = torch.float16 if self.fp16 else torch.float32
return (
torch.randn(
2 * batch_size, self.unet_dim, latent_height, latent_width, dtype=torch.float32, device=self.device
),
torch.ones((2 * batch_size,), dtype=torch.float32, device=self.device),
torch.randn(2 * batch_size, self.text_maxlen, self.embedding_dim, dtype=dtype, device=self.device),
)
class VAE(BaseModel):
def __init__(self, device, max_batch_size, min_batch_size=1):
super(VAE, self).__init__(
device=device,
max_batch_size=max_batch_size,
min_batch_size=min_batch_size,
embedding_dim=None,
)
self.name = "VAE decoder"
def get_input_names(self):
return ["latent"]
def get_output_names(self):
return ["images"]
def get_dynamic_axes(self):
return {
"latent": {0: "B", 2: "H", 3: "W"},
"images": {0: "B", 2: "8H", 3: "8W"},
}
def get_input_profile(self, batch_size, image_height, image_width, static_batch, static_shape):
latent_height, latent_width = self.check_dims(batch_size, image_height, image_width)
(
min_batch,
max_batch,
_,
_,
_,
_,
min_latent_height,
max_latent_height,
min_latent_width,
max_latent_width,
) = self.get_minmax_dims(batch_size, image_height, image_width, static_batch, static_shape)
return {
"latent": [
(min_batch, 4, min_latent_height, min_latent_width),
(batch_size, 4, latent_height, latent_width),
(max_batch, 4, max_latent_height, max_latent_width),
]
}
def get_shape_dict(self, batch_size, image_height, image_width):
latent_height, latent_width = self.check_dims(batch_size, image_height, image_width)
return {
"latent": (batch_size, 4, latent_height, latent_width),
"images": (batch_size, 3, image_height, image_width),
}
def get_sample_input(self, batch_size, image_height, image_width):
latent_height, latent_width = self.check_dims(batch_size, image_height, image_width)
return torch.randn(
batch_size,
4,
latent_height,
latent_width,
dtype=torch.float32,
device=self.device,
)
class VAEEncoder(BaseModel):
def __init__(self, device, max_batch_size, min_batch_size=1):
super(VAEEncoder, self).__init__(
device=device,
max_batch_size=max_batch_size,
min_batch_size=min_batch_size,
embedding_dim=None,
)
self.name = "VAE encoder"
def get_input_names(self):
return ["images"]
def get_output_names(self):
return ["latent"]
def get_dynamic_axes(self):
return {
"images": {0: "B", 2: "8H", 3: "8W"},
"latent": {0: "B", 2: "H", 3: "W"},
}
def get_input_profile(self, batch_size, image_height, image_width, static_batch, static_shape):
assert batch_size >= self.min_batch and batch_size <= self.max_batch
min_batch = batch_size if static_batch else self.min_batch
max_batch = batch_size if static_batch else self.max_batch
self.check_dims(batch_size, image_height, image_width)
(
min_batch,
max_batch,
min_image_height,
max_image_height,
min_image_width,
max_image_width,
_,
_,
_,
_,
) = self.get_minmax_dims(batch_size, image_height, image_width, static_batch, static_shape)
return {
"images": [
(min_batch, 3, min_image_height, min_image_width),
(batch_size, 3, image_height, image_width),
(max_batch, 3, max_image_height, max_image_width),
],
}
def get_shape_dict(self, batch_size, image_height, image_width):
latent_height, latent_width = self.check_dims(batch_size, image_height, image_width)
return {
"images": (batch_size, 3, image_height, image_width),
"latent": (batch_size, 4, latent_height, latent_width),
}
def get_sample_input(self, batch_size, image_height, image_width):
self.check_dims(batch_size, image_height, image_width)
return torch.randn(
batch_size,
3,
image_height,
image_width,
dtype=torch.float32,
device=self.device,
)
def create_models(
model_id: str,
use_auth_token: Optional[str],
device: Union[str, torch.device],
max_batch_size: int,
unet_in_channels: int = 4,
embedding_dim: int = 768,
):
models = {
"clip": CLIP(
hf_token=use_auth_token,
device=device,
max_batch_size=max_batch_size,
embedding_dim=embedding_dim,
),
"unet": UNet(
hf_token=use_auth_token,
fp16=True,
device=device,
max_batch_size=max_batch_size,
embedding_dim=embedding_dim,
unet_dim=unet_in_channels,
),
"vae": VAE(
hf_token=use_auth_token,
device=device,
max_batch_size=max_batch_size,
embedding_dim=embedding_dim,
),
"vae_encoder": VAEEncoder(
hf_token=use_auth_token,
device=device,
max_batch_size=max_batch_size,
embedding_dim=embedding_dim,
),
}
return models | null |
8,739 | import gc
from collections import OrderedDict
from typing import *
import numpy as np
import onnx
import onnx_graphsurgeon as gs
import tensorrt as trt
import torch
from cuda import cudart
from PIL import Image
from polygraphy import cuda
from polygraphy.backend.common import bytes_from_path
from polygraphy.backend.trt import (
CreateConfig,
Profile,
engine_from_bytes,
engine_from_network,
network_from_onnx_path,
save_engine,
)
from polygraphy.backend.trt import util as trt_util
from .models import CLIP, VAE, BaseModel, UNet, VAEEncoder
class Engine:
def __init__(
self,
engine_path,
):
self.engine_path = engine_path
self.engine = None
self.context = None
self.buffers = OrderedDict()
self.tensors = OrderedDict()
self.cuda_graph_instance = None # cuda graph
def __del__(self):
[buf.free() for buf in self.buffers.values() if isinstance(buf, cuda.DeviceArray)]
del self.engine
del self.context
del self.buffers
del self.tensors
def refit(self, onnx_path, onnx_refit_path):
def convert_int64(arr):
# TODO: smarter conversion
if len(arr.shape) == 0:
return np.int32(arr)
return arr
def add_to_map(refit_dict, name, values):
if name in refit_dict:
assert refit_dict[name] is None
if values.dtype == np.int64:
values = convert_int64(values)
refit_dict[name] = values
print(f"Refitting TensorRT engine with {onnx_refit_path} weights")
refit_nodes = gs.import_onnx(onnx.load(onnx_refit_path)).toposort().nodes
# Construct mapping from weight names in refit model -> original model
name_map = {}
for n, node in enumerate(gs.import_onnx(onnx.load(onnx_path)).toposort().nodes):
refit_node = refit_nodes[n]
assert node.op == refit_node.op
# Constant nodes in ONNX do not have inputs but have a constant output
if node.op == "Constant":
name_map[refit_node.outputs[0].name] = node.outputs[0].name
# Handle scale and bias weights
elif node.op == "Conv":
if node.inputs[1].__class__ == gs.Constant:
name_map[refit_node.name + "_TRTKERNEL"] = node.name + "_TRTKERNEL"
if node.inputs[2].__class__ == gs.Constant:
name_map[refit_node.name + "_TRTBIAS"] = node.name + "_TRTBIAS"
# For all other nodes: find node inputs that are initializers (gs.Constant)
else:
for i, inp in enumerate(node.inputs):
if inp.__class__ == gs.Constant:
name_map[refit_node.inputs[i].name] = inp.name
def map_name(name):
if name in name_map:
return name_map[name]
return name
# Construct refit dictionary
refit_dict = {}
refitter = trt.Refitter(self.engine, TRT_LOGGER)
all_weights = refitter.get_all()
for layer_name, role in zip(all_weights[0], all_weights[1]):
# for speciailized roles, use a unique name in the map:
if role == trt.WeightsRole.KERNEL:
name = layer_name + "_TRTKERNEL"
elif role == trt.WeightsRole.BIAS:
name = layer_name + "_TRTBIAS"
else:
name = layer_name
assert name not in refit_dict, "Found duplicate layer: " + name
refit_dict[name] = None
for n in refit_nodes:
# Constant nodes in ONNX do not have inputs but have a constant output
if n.op == "Constant":
name = map_name(n.outputs[0].name)
print(f"Add Constant {name}\n")
add_to_map(refit_dict, name, n.outputs[0].values)
# Handle scale and bias weights
elif n.op == "Conv":
if n.inputs[1].__class__ == gs.Constant:
name = map_name(n.name + "_TRTKERNEL")
add_to_map(refit_dict, name, n.inputs[1].values)
if n.inputs[2].__class__ == gs.Constant:
name = map_name(n.name + "_TRTBIAS")
add_to_map(refit_dict, name, n.inputs[2].values)
# For all other nodes: find node inputs that are initializers (AKA gs.Constant)
else:
for inp in n.inputs:
name = map_name(inp.name)
if inp.__class__ == gs.Constant:
add_to_map(refit_dict, name, inp.values)
for layer_name, weights_role in zip(all_weights[0], all_weights[1]):
if weights_role == trt.WeightsRole.KERNEL:
custom_name = layer_name + "_TRTKERNEL"
elif weights_role == trt.WeightsRole.BIAS:
custom_name = layer_name + "_TRTBIAS"
else:
custom_name = layer_name
# Skip refitting Trilu for now; scalar weights of type int64 value 1 - for clip model
if layer_name.startswith("onnx::Trilu"):
continue
if refit_dict[custom_name] is not None:
refitter.set_weights(layer_name, weights_role, refit_dict[custom_name])
else:
print(f"[W] No refit weights for layer: {layer_name}")
if not refitter.refit_cuda_engine():
print("Failed to refit!")
exit(0)
def build(
self,
onnx_path,
fp16,
input_profile=None,
enable_refit=False,
enable_all_tactics=False,
timing_cache=None,
workspace_size=0,
):
print(f"Building TensorRT engine for {onnx_path}: {self.engine_path}")
p = Profile()
if input_profile:
for name, dims in input_profile.items():
assert len(dims) == 3
p.add(name, min=dims[0], opt=dims[1], max=dims[2])
config_kwargs = {}
if workspace_size > 0:
config_kwargs["memory_pool_limits"] = {trt.MemoryPoolType.WORKSPACE: workspace_size}
if not enable_all_tactics:
config_kwargs["tactic_sources"] = []
engine = engine_from_network(
network_from_onnx_path(onnx_path, flags=[trt.OnnxParserFlag.NATIVE_INSTANCENORM]),
config=CreateConfig(
fp16=fp16, refittable=enable_refit, profiles=[p], load_timing_cache=timing_cache, **config_kwargs
),
save_timing_cache=timing_cache,
)
save_engine(engine, path=self.engine_path)
def load(self):
print(f"Loading TensorRT engine: {self.engine_path}")
self.engine = engine_from_bytes(bytes_from_path(self.engine_path))
def activate(self, reuse_device_memory=None):
if reuse_device_memory:
self.context = self.engine.create_execution_context_without_device_memory()
self.context.device_memory = reuse_device_memory
else:
self.context = self.engine.create_execution_context()
def allocate_buffers(self, shape_dict=None, device="cuda"):
for idx in range(trt_util.get_bindings_per_profile(self.engine)):
binding = self.engine[idx]
if shape_dict and binding in shape_dict:
shape = shape_dict[binding]
else:
shape = self.engine.get_binding_shape(binding)
dtype = trt.nptype(self.engine.get_binding_dtype(binding))
if self.engine.binding_is_input(binding):
self.context.set_binding_shape(idx, shape)
tensor = torch.empty(tuple(shape), dtype=numpy_to_torch_dtype_dict[dtype]).to(device=device)
self.tensors[binding] = tensor
def infer(self, feed_dict, stream, use_cuda_graph=False):
for name, buf in feed_dict.items():
self.tensors[name].copy_(buf)
for name, tensor in self.tensors.items():
self.context.set_tensor_address(name, tensor.data_ptr())
if use_cuda_graph:
if self.cuda_graph_instance is not None:
CUASSERT(cudart.cudaGraphLaunch(self.cuda_graph_instance, stream.ptr))
CUASSERT(cudart.cudaStreamSynchronize(stream.ptr))
else:
# do inference before CUDA graph capture
noerror = self.context.execute_async_v3(stream.ptr)
if not noerror:
raise ValueError("ERROR: inference failed.")
# capture cuda graph
CUASSERT(
cudart.cudaStreamBeginCapture(stream.ptr, cudart.cudaStreamCaptureMode.cudaStreamCaptureModeGlobal)
)
self.context.execute_async_v3(stream.ptr)
self.graph = CUASSERT(cudart.cudaStreamEndCapture(stream.ptr))
self.cuda_graph_instance = CUASSERT(cudart.cudaGraphInstantiate(self.graph, 0))
else:
noerror = self.context.execute_async_v3(stream.ptr)
if not noerror:
raise ValueError("ERROR: inference failed.")
return self.tensors
class BaseModel:
def __init__(
self,
fp16=False,
device="cuda",
verbose=True,
max_batch_size=16,
min_batch_size=1,
embedding_dim=768,
text_maxlen=77,
):
self.name = "SD Model"
self.fp16 = fp16
self.device = device
self.verbose = verbose
self.min_batch = min_batch_size
self.max_batch = max_batch_size
self.min_image_shape = 256 # min image resolution: 256x256
self.max_image_shape = 1024 # max image resolution: 1024x1024
self.min_latent_shape = self.min_image_shape // 8
self.max_latent_shape = self.max_image_shape // 8
self.embedding_dim = embedding_dim
self.text_maxlen = text_maxlen
def get_model(self):
pass
def get_input_names(self):
pass
def get_output_names(self):
pass
def get_dynamic_axes(self):
return None
def get_sample_input(self, batch_size, image_height, image_width):
pass
def get_input_profile(self, batch_size, image_height, image_width, static_batch, static_shape):
return None
def get_shape_dict(self, batch_size, image_height, image_width):
return None
def optimize(self, onnx_graph):
opt = Optimizer(onnx_graph, verbose=self.verbose)
opt.info(self.name + ": original")
opt.cleanup()
opt.info(self.name + ": cleanup")
opt.fold_constants()
opt.info(self.name + ": fold constants")
opt.infer_shapes()
opt.info(self.name + ": shape inference")
onnx_opt_graph = opt.cleanup(return_onnx=True)
opt.info(self.name + ": finished")
return onnx_opt_graph
def check_dims(self, batch_size, image_height, image_width):
assert batch_size >= self.min_batch and batch_size <= self.max_batch
assert image_height % 8 == 0 or image_width % 8 == 0
latent_height = image_height // 8
latent_width = image_width // 8
assert latent_height >= self.min_latent_shape and latent_height <= self.max_latent_shape
assert latent_width >= self.min_latent_shape and latent_width <= self.max_latent_shape
return (latent_height, latent_width)
def get_minmax_dims(self, batch_size, image_height, image_width, static_batch, static_shape):
min_batch = batch_size if static_batch else self.min_batch
max_batch = batch_size if static_batch else self.max_batch
latent_height = image_height // 8
latent_width = image_width // 8
min_image_height = image_height if static_shape else self.min_image_shape
max_image_height = image_height if static_shape else self.max_image_shape
min_image_width = image_width if static_shape else self.min_image_shape
max_image_width = image_width if static_shape else self.max_image_shape
min_latent_height = latent_height if static_shape else self.min_latent_shape
max_latent_height = latent_height if static_shape else self.max_latent_shape
min_latent_width = latent_width if static_shape else self.min_latent_shape
max_latent_width = latent_width if static_shape else self.max_latent_shape
return (
min_batch,
max_batch,
min_image_height,
max_image_height,
min_image_width,
max_image_width,
min_latent_height,
max_latent_height,
min_latent_width,
max_latent_width,
)
def build_engine(
engine_path: str,
onnx_opt_path: str,
model_data: BaseModel,
opt_image_height: int,
opt_image_width: int,
opt_batch_size: int,
build_static_batch: bool = False,
build_dynamic_shape: bool = False,
build_all_tactics: bool = False,
build_enable_refit: bool = False,
):
_, free_mem, _ = cudart.cudaMemGetInfo()
GiB = 2**30
if free_mem > 6 * GiB:
activation_carveout = 4 * GiB
max_workspace_size = free_mem - activation_carveout
else:
max_workspace_size = 0
engine = Engine(engine_path)
input_profile = model_data.get_input_profile(
opt_batch_size,
opt_image_height,
opt_image_width,
static_batch=build_static_batch,
static_shape=not build_dynamic_shape,
)
engine.build(
onnx_opt_path,
fp16=True,
input_profile=input_profile,
enable_refit=build_enable_refit,
enable_all_tactics=build_all_tactics,
workspace_size=max_workspace_size,
)
return engine | null |
8,740 | import gc
from collections import OrderedDict
from typing import *
import numpy as np
import onnx
import onnx_graphsurgeon as gs
import tensorrt as trt
import torch
from cuda import cudart
from PIL import Image
from polygraphy import cuda
from polygraphy.backend.common import bytes_from_path
from polygraphy.backend.trt import (
CreateConfig,
Profile,
engine_from_bytes,
engine_from_network,
network_from_onnx_path,
save_engine,
)
from polygraphy.backend.trt import util as trt_util
from .models import CLIP, VAE, BaseModel, UNet, VAEEncoder
class BaseModel:
def __init__(
self,
fp16=False,
device="cuda",
verbose=True,
max_batch_size=16,
min_batch_size=1,
embedding_dim=768,
text_maxlen=77,
):
self.name = "SD Model"
self.fp16 = fp16
self.device = device
self.verbose = verbose
self.min_batch = min_batch_size
self.max_batch = max_batch_size
self.min_image_shape = 256 # min image resolution: 256x256
self.max_image_shape = 1024 # max image resolution: 1024x1024
self.min_latent_shape = self.min_image_shape // 8
self.max_latent_shape = self.max_image_shape // 8
self.embedding_dim = embedding_dim
self.text_maxlen = text_maxlen
def get_model(self):
pass
def get_input_names(self):
pass
def get_output_names(self):
pass
def get_dynamic_axes(self):
return None
def get_sample_input(self, batch_size, image_height, image_width):
pass
def get_input_profile(self, batch_size, image_height, image_width, static_batch, static_shape):
return None
def get_shape_dict(self, batch_size, image_height, image_width):
return None
def optimize(self, onnx_graph):
opt = Optimizer(onnx_graph, verbose=self.verbose)
opt.info(self.name + ": original")
opt.cleanup()
opt.info(self.name + ": cleanup")
opt.fold_constants()
opt.info(self.name + ": fold constants")
opt.infer_shapes()
opt.info(self.name + ": shape inference")
onnx_opt_graph = opt.cleanup(return_onnx=True)
opt.info(self.name + ": finished")
return onnx_opt_graph
def check_dims(self, batch_size, image_height, image_width):
assert batch_size >= self.min_batch and batch_size <= self.max_batch
assert image_height % 8 == 0 or image_width % 8 == 0
latent_height = image_height // 8
latent_width = image_width // 8
assert latent_height >= self.min_latent_shape and latent_height <= self.max_latent_shape
assert latent_width >= self.min_latent_shape and latent_width <= self.max_latent_shape
return (latent_height, latent_width)
def get_minmax_dims(self, batch_size, image_height, image_width, static_batch, static_shape):
min_batch = batch_size if static_batch else self.min_batch
max_batch = batch_size if static_batch else self.max_batch
latent_height = image_height // 8
latent_width = image_width // 8
min_image_height = image_height if static_shape else self.min_image_shape
max_image_height = image_height if static_shape else self.max_image_shape
min_image_width = image_width if static_shape else self.min_image_shape
max_image_width = image_width if static_shape else self.max_image_shape
min_latent_height = latent_height if static_shape else self.min_latent_shape
max_latent_height = latent_height if static_shape else self.max_latent_shape
min_latent_width = latent_width if static_shape else self.min_latent_shape
max_latent_width = latent_width if static_shape else self.max_latent_shape
return (
min_batch,
max_batch,
min_image_height,
max_image_height,
min_image_width,
max_image_width,
min_latent_height,
max_latent_height,
min_latent_width,
max_latent_width,
)
def export_onnx(
model,
onnx_path: str,
model_data: BaseModel,
opt_image_height: int,
opt_image_width: int,
opt_batch_size: int,
onnx_opset: int,
):
with torch.inference_mode(), torch.autocast("cuda"):
inputs = model_data.get_sample_input(opt_batch_size, opt_image_height, opt_image_width)
torch.onnx.export(
model,
inputs,
onnx_path,
export_params=True,
opset_version=onnx_opset,
do_constant_folding=True,
input_names=model_data.get_input_names(),
output_names=model_data.get_output_names(),
dynamic_axes=model_data.get_dynamic_axes(),
)
del model
gc.collect()
torch.cuda.empty_cache() | null |
8,741 | import gc
from collections import OrderedDict
from typing import *
import numpy as np
import onnx
import onnx_graphsurgeon as gs
import tensorrt as trt
import torch
from cuda import cudart
from PIL import Image
from polygraphy import cuda
from polygraphy.backend.common import bytes_from_path
from polygraphy.backend.trt import (
CreateConfig,
Profile,
engine_from_bytes,
engine_from_network,
network_from_onnx_path,
save_engine,
)
from polygraphy.backend.trt import util as trt_util
from .models import CLIP, VAE, BaseModel, UNet, VAEEncoder
class BaseModel:
def __init__(
self,
fp16=False,
device="cuda",
verbose=True,
max_batch_size=16,
min_batch_size=1,
embedding_dim=768,
text_maxlen=77,
):
self.name = "SD Model"
self.fp16 = fp16
self.device = device
self.verbose = verbose
self.min_batch = min_batch_size
self.max_batch = max_batch_size
self.min_image_shape = 256 # min image resolution: 256x256
self.max_image_shape = 1024 # max image resolution: 1024x1024
self.min_latent_shape = self.min_image_shape // 8
self.max_latent_shape = self.max_image_shape // 8
self.embedding_dim = embedding_dim
self.text_maxlen = text_maxlen
def get_model(self):
pass
def get_input_names(self):
pass
def get_output_names(self):
pass
def get_dynamic_axes(self):
return None
def get_sample_input(self, batch_size, image_height, image_width):
pass
def get_input_profile(self, batch_size, image_height, image_width, static_batch, static_shape):
return None
def get_shape_dict(self, batch_size, image_height, image_width):
return None
def optimize(self, onnx_graph):
opt = Optimizer(onnx_graph, verbose=self.verbose)
opt.info(self.name + ": original")
opt.cleanup()
opt.info(self.name + ": cleanup")
opt.fold_constants()
opt.info(self.name + ": fold constants")
opt.infer_shapes()
opt.info(self.name + ": shape inference")
onnx_opt_graph = opt.cleanup(return_onnx=True)
opt.info(self.name + ": finished")
return onnx_opt_graph
def check_dims(self, batch_size, image_height, image_width):
assert batch_size >= self.min_batch and batch_size <= self.max_batch
assert image_height % 8 == 0 or image_width % 8 == 0
latent_height = image_height // 8
latent_width = image_width // 8
assert latent_height >= self.min_latent_shape and latent_height <= self.max_latent_shape
assert latent_width >= self.min_latent_shape and latent_width <= self.max_latent_shape
return (latent_height, latent_width)
def get_minmax_dims(self, batch_size, image_height, image_width, static_batch, static_shape):
min_batch = batch_size if static_batch else self.min_batch
max_batch = batch_size if static_batch else self.max_batch
latent_height = image_height // 8
latent_width = image_width // 8
min_image_height = image_height if static_shape else self.min_image_shape
max_image_height = image_height if static_shape else self.max_image_shape
min_image_width = image_width if static_shape else self.min_image_shape
max_image_width = image_width if static_shape else self.max_image_shape
min_latent_height = latent_height if static_shape else self.min_latent_shape
max_latent_height = latent_height if static_shape else self.max_latent_shape
min_latent_width = latent_width if static_shape else self.min_latent_shape
max_latent_width = latent_width if static_shape else self.max_latent_shape
return (
min_batch,
max_batch,
min_image_height,
max_image_height,
min_image_width,
max_image_width,
min_latent_height,
max_latent_height,
min_latent_width,
max_latent_width,
)
def optimize_onnx(
onnx_path: str,
onnx_opt_path: str,
model_data: BaseModel,
):
onnx_opt_graph = model_data.optimize(onnx.load(onnx_path))
onnx.save(onnx_opt_graph, onnx_opt_path)
del onnx_opt_graph
gc.collect()
torch.cuda.empty_cache() | null |
8,742 | import gc
import os
from typing import *
import torch
from .models import BaseModel
from .utilities import (
build_engine,
export_onnx,
optimize_onnx,
)
def create_onnx_path(name, onnx_dir, opt=True):
return os.path.join(onnx_dir, name + (".opt" if opt else "") + ".onnx") | null |
8,743 | from typing import Literal, Optional
import fire
from packaging.version import Version
from ..pip_utils import is_installed, run_pip, version
import platform
def get_cuda_version_from_torch() -> Optional[Literal["11", "12"]]:
try:
import torch
except ImportError:
return None
return torch.version.cuda.split(".")[0]
def version(package: str) -> Optional[Version]:
try:
return Version(importlib.import_module(package).__version__)
except ModuleNotFoundError:
return None
def is_installed(package: str) -> bool:
try:
spec = importlib.util.find_spec(package)
except ModuleNotFoundError:
return False
return spec is not None
def run_pip(command: str, env: Dict[str, str] = None) -> str:
return run_python(f"-m pip {command}", env)
def install(cu: Optional[Literal["11", "12"]] = get_cuda_version_from_torch()):
if cu is None or cu not in ["11", "12"]:
print("Could not detect CUDA version. Please specify manually.")
return
print("Installing TensorRT requirements...")
if is_installed("tensorrt"):
if version("tensorrt") < Version("9.0.0"):
run_pip("uninstall -y tensorrt")
cudnn_name = f"nvidia-cudnn-cu{cu}==8.9.4.25"
if not is_installed("tensorrt"):
run_pip(f"install {cudnn_name} --no-cache-dir")
run_pip(
"install --pre --extra-index-url https://pypi.nvidia.com tensorrt==9.0.1.post11.dev4 --no-cache-dir"
)
if not is_installed("polygraphy"):
run_pip(
"install polygraphy==0.47.1 --extra-index-url https://pypi.ngc.nvidia.com"
)
if not is_installed("onnx_graphsurgeon"):
run_pip(
"install onnx-graphsurgeon==0.3.26 --extra-index-url https://pypi.ngc.nvidia.com"
)
if platform.system() == 'Windows' and not is_installed("pywin32"):
run_pip(
"install pywin32"
)
pass | null |
8,744 | import os
import sys
import threading
import time
import tkinter as tk
from multiprocessing import Process, Queue, get_context
from typing import List, Literal
import fire
from PIL import Image, ImageTk
from streamdiffusion.image_utils import postprocess_image
from utils.wrapper import StreamDiffusionWrapper
class StreamDiffusionWrapper:
def __init__(
self,
model_id_or_path: str,
t_index_list: List[int],
lora_dict: Optional[Dict[str, float]] = None,
mode: Literal["img2img", "txt2img"] = "img2img",
output_type: Literal["pil", "pt", "np", "latent"] = "pil",
lcm_lora_id: Optional[str] = None,
vae_id: Optional[str] = None,
device: Literal["cpu", "cuda"] = "cuda",
dtype: torch.dtype = torch.float16,
frame_buffer_size: int = 1,
width: int = 512,
height: int = 512,
warmup: int = 10,
acceleration: Literal["none", "xformers", "tensorrt"] = "tensorrt",
do_add_noise: bool = True,
device_ids: Optional[List[int]] = None,
use_lcm_lora: bool = True,
use_tiny_vae: bool = True,
enable_similar_image_filter: bool = False,
similar_image_filter_threshold: float = 0.98,
similar_image_filter_max_skip_frame: int = 10,
use_denoising_batch: bool = True,
cfg_type: Literal["none", "full", "self", "initialize"] = "self",
seed: int = 2,
use_safety_checker: bool = False,
engine_dir: Optional[Union[str, Path]] = "engines",
):
"""
Initializes the StreamDiffusionWrapper.
Parameters
----------
model_id_or_path : str
The model id or path to load.
t_index_list : List[int]
The t_index_list to use for inference.
lora_dict : Optional[Dict[str, float]], optional
The lora_dict to load, by default None.
Keys are the LoRA names and values are the LoRA scales.
Example: {'LoRA_1' : 0.5 , 'LoRA_2' : 0.7 ,...}
mode : Literal["img2img", "txt2img"], optional
txt2img or img2img, by default "img2img".
output_type : Literal["pil", "pt", "np", "latent"], optional
The output type of image, by default "pil".
lcm_lora_id : Optional[str], optional
The lcm_lora_id to load, by default None.
If None, the default LCM-LoRA
("latent-consistency/lcm-lora-sdv1-5") will be used.
vae_id : Optional[str], optional
The vae_id to load, by default None.
If None, the default TinyVAE
("madebyollin/taesd") will be used.
device : Literal["cpu", "cuda"], optional
The device to use for inference, by default "cuda".
dtype : torch.dtype, optional
The dtype for inference, by default torch.float16.
frame_buffer_size : int, optional
The frame buffer size for denoising batch, by default 1.
width : int, optional
The width of the image, by default 512.
height : int, optional
The height of the image, by default 512.
warmup : int, optional
The number of warmup steps to perform, by default 10.
acceleration : Literal["none", "xformers", "tensorrt"], optional
The acceleration method, by default "tensorrt".
do_add_noise : bool, optional
Whether to add noise for following denoising steps or not,
by default True.
device_ids : Optional[List[int]], optional
The device ids to use for DataParallel, by default None.
use_lcm_lora : bool, optional
Whether to use LCM-LoRA or not, by default True.
use_tiny_vae : bool, optional
Whether to use TinyVAE or not, by default True.
enable_similar_image_filter : bool, optional
Whether to enable similar image filter or not,
by default False.
similar_image_filter_threshold : float, optional
The threshold for similar image filter, by default 0.98.
similar_image_filter_max_skip_frame : int, optional
The max skip frame for similar image filter, by default 10.
use_denoising_batch : bool, optional
Whether to use denoising batch or not, by default True.
cfg_type : Literal["none", "full", "self", "initialize"],
optional
The cfg_type for img2img mode, by default "self".
You cannot use anything other than "none" for txt2img mode.
seed : int, optional
The seed, by default 2.
use_safety_checker : bool, optional
Whether to use safety checker or not, by default False.
"""
self.sd_turbo = "turbo" in model_id_or_path
if mode == "txt2img":
if cfg_type != "none":
raise ValueError(
f"txt2img mode accepts only cfg_type = 'none', but got {cfg_type}"
)
if use_denoising_batch and frame_buffer_size > 1:
if not self.sd_turbo:
raise ValueError(
"txt2img mode cannot use denoising batch with frame_buffer_size > 1."
)
if mode == "img2img":
if not use_denoising_batch:
raise NotImplementedError(
"img2img mode must use denoising batch for now."
)
self.device = device
self.dtype = dtype
self.width = width
self.height = height
self.mode = mode
self.output_type = output_type
self.frame_buffer_size = frame_buffer_size
self.batch_size = (
len(t_index_list) * frame_buffer_size
if use_denoising_batch
else frame_buffer_size
)
self.use_denoising_batch = use_denoising_batch
self.use_safety_checker = use_safety_checker
self.stream: StreamDiffusion = self._load_model(
model_id_or_path=model_id_or_path,
lora_dict=lora_dict,
lcm_lora_id=lcm_lora_id,
vae_id=vae_id,
t_index_list=t_index_list,
acceleration=acceleration,
warmup=warmup,
do_add_noise=do_add_noise,
use_lcm_lora=use_lcm_lora,
use_tiny_vae=use_tiny_vae,
cfg_type=cfg_type,
seed=seed,
engine_dir=engine_dir,
)
if device_ids is not None:
self.stream.unet = torch.nn.DataParallel(
self.stream.unet, device_ids=device_ids
)
if enable_similar_image_filter:
self.stream.enable_similar_image_filter(similar_image_filter_threshold, similar_image_filter_max_skip_frame)
def prepare(
self,
prompt: str,
negative_prompt: str = "",
num_inference_steps: int = 50,
guidance_scale: float = 1.2,
delta: float = 1.0,
) -> None:
"""
Prepares the model for inference.
Parameters
----------
prompt : str
The prompt to generate images from.
num_inference_steps : int, optional
The number of inference steps to perform, by default 50.
guidance_scale : float, optional
The guidance scale to use, by default 1.2.
delta : float, optional
The delta multiplier of virtual residual noise,
by default 1.0.
"""
self.stream.prepare(
prompt,
negative_prompt,
num_inference_steps=num_inference_steps,
guidance_scale=guidance_scale,
delta=delta,
)
def __call__(
self,
image: Optional[Union[str, Image.Image, torch.Tensor]] = None,
prompt: Optional[str] = None,
) -> Union[Image.Image, List[Image.Image]]:
"""
Performs img2img or txt2img based on the mode.
Parameters
----------
image : Optional[Union[str, Image.Image, torch.Tensor]]
The image to generate from.
prompt : Optional[str]
The prompt to generate images from.
Returns
-------
Union[Image.Image, List[Image.Image]]
The generated image.
"""
if self.mode == "img2img":
return self.img2img(image, prompt)
else:
return self.txt2img(prompt)
def txt2img(
self, prompt: Optional[str] = None
) -> Union[Image.Image, List[Image.Image], torch.Tensor, np.ndarray]:
"""
Performs txt2img.
Parameters
----------
prompt : Optional[str]
The prompt to generate images from.
Returns
-------
Union[Image.Image, List[Image.Image]]
The generated image.
"""
if prompt is not None:
self.stream.update_prompt(prompt)
if self.sd_turbo:
image_tensor = self.stream.txt2img_sd_turbo(self.batch_size)
else:
image_tensor = self.stream.txt2img(self.frame_buffer_size)
image = self.postprocess_image(image_tensor, output_type=self.output_type)
if self.use_safety_checker:
safety_checker_input = self.feature_extractor(
image, return_tensors="pt"
).to(self.device)
_, has_nsfw_concept = self.safety_checker(
images=image_tensor.to(self.dtype),
clip_input=safety_checker_input.pixel_values.to(self.dtype),
)
image = self.nsfw_fallback_img if has_nsfw_concept[0] else image
return image
def img2img(
self, image: Union[str, Image.Image, torch.Tensor], prompt: Optional[str] = None
) -> Union[Image.Image, List[Image.Image], torch.Tensor, np.ndarray]:
"""
Performs img2img.
Parameters
----------
image : Union[str, Image.Image, torch.Tensor]
The image to generate from.
Returns
-------
Image.Image
The generated image.
"""
if prompt is not None:
self.stream.update_prompt(prompt)
if isinstance(image, str) or isinstance(image, Image.Image):
image = self.preprocess_image(image)
image_tensor = self.stream(image)
image = self.postprocess_image(image_tensor, output_type=self.output_type)
if self.use_safety_checker:
safety_checker_input = self.feature_extractor(
image, return_tensors="pt"
).to(self.device)
_, has_nsfw_concept = self.safety_checker(
images=image_tensor.to(self.dtype),
clip_input=safety_checker_input.pixel_values.to(self.dtype),
)
image = self.nsfw_fallback_img if has_nsfw_concept[0] else image
return image
def preprocess_image(self, image: Union[str, Image.Image]) -> torch.Tensor:
"""
Preprocesses the image.
Parameters
----------
image : Union[str, Image.Image, torch.Tensor]
The image to preprocess.
Returns
-------
torch.Tensor
The preprocessed image.
"""
if isinstance(image, str):
image = Image.open(image).convert("RGB").resize((self.width, self.height))
if isinstance(image, Image.Image):
image = image.convert("RGB").resize((self.width, self.height))
return self.stream.image_processor.preprocess(
image, self.height, self.width
).to(device=self.device, dtype=self.dtype)
def postprocess_image(
self, image_tensor: torch.Tensor, output_type: str = "pil"
) -> Union[Image.Image, List[Image.Image], torch.Tensor, np.ndarray]:
"""
Postprocesses the image.
Parameters
----------
image_tensor : torch.Tensor
The image tensor to postprocess.
Returns
-------
Union[Image.Image, List[Image.Image]]
The postprocessed image.
"""
if self.frame_buffer_size > 1:
return postprocess_image(image_tensor.cpu(), output_type=output_type)
else:
return postprocess_image(image_tensor.cpu(), output_type=output_type)[0]
def _load_model(
self,
model_id_or_path: str,
t_index_list: List[int],
lora_dict: Optional[Dict[str, float]] = None,
lcm_lora_id: Optional[str] = None,
vae_id: Optional[str] = None,
acceleration: Literal["none", "xformers", "tensorrt"] = "tensorrt",
warmup: int = 10,
do_add_noise: bool = True,
use_lcm_lora: bool = True,
use_tiny_vae: bool = True,
cfg_type: Literal["none", "full", "self", "initialize"] = "self",
seed: int = 2,
engine_dir: Optional[Union[str, Path]] = "engines",
) -> StreamDiffusion:
"""
Loads the model.
This method does the following:
1. Loads the model from the model_id_or_path.
2. Loads and fuses the LCM-LoRA model from the lcm_lora_id if needed.
3. Loads the VAE model from the vae_id if needed.
4. Enables acceleration if needed.
5. Prepares the model for inference.
6. Load the safety checker if needed.
Parameters
----------
model_id_or_path : str
The model id or path to load.
t_index_list : List[int]
The t_index_list to use for inference.
lora_dict : Optional[Dict[str, float]], optional
The lora_dict to load, by default None.
Keys are the LoRA names and values are the LoRA scales.
Example: {'LoRA_1' : 0.5 , 'LoRA_2' : 0.7 ,...}
lcm_lora_id : Optional[str], optional
The lcm_lora_id to load, by default None.
vae_id : Optional[str], optional
The vae_id to load, by default None.
acceleration : Literal["none", "xfomers", "sfast", "tensorrt"], optional
The acceleration method, by default "tensorrt".
warmup : int, optional
The number of warmup steps to perform, by default 10.
do_add_noise : bool, optional
Whether to add noise for following denoising steps or not,
by default True.
use_lcm_lora : bool, optional
Whether to use LCM-LoRA or not, by default True.
use_tiny_vae : bool, optional
Whether to use TinyVAE or not, by default True.
cfg_type : Literal["none", "full", "self", "initialize"],
optional
The cfg_type for img2img mode, by default "self".
You cannot use anything other than "none" for txt2img mode.
seed : int, optional
The seed, by default 2.
Returns
-------
StreamDiffusion
The loaded model.
"""
try: # Load from local directory
pipe: StableDiffusionPipeline = StableDiffusionPipeline.from_pretrained(
model_id_or_path,
).to(device=self.device, dtype=self.dtype)
except ValueError: # Load from huggingface
pipe: StableDiffusionPipeline = StableDiffusionPipeline.from_single_file(
model_id_or_path,
).to(device=self.device, dtype=self.dtype)
except Exception: # No model found
traceback.print_exc()
print("Model load has failed. Doesn't exist.")
exit()
stream = StreamDiffusion(
pipe=pipe,
t_index_list=t_index_list,
torch_dtype=self.dtype,
width=self.width,
height=self.height,
do_add_noise=do_add_noise,
frame_buffer_size=self.frame_buffer_size,
use_denoising_batch=self.use_denoising_batch,
cfg_type=cfg_type,
)
if not self.sd_turbo:
if use_lcm_lora:
if lcm_lora_id is not None:
stream.load_lcm_lora(
pretrained_model_name_or_path_or_dict=lcm_lora_id
)
else:
stream.load_lcm_lora()
stream.fuse_lora()
if lora_dict is not None:
for lora_name, lora_scale in lora_dict.items():
stream.load_lora(lora_name)
stream.fuse_lora(lora_scale=lora_scale)
print(f"Use LoRA: {lora_name} in weights {lora_scale}")
if use_tiny_vae:
if vae_id is not None:
stream.vae = AutoencoderTiny.from_pretrained(vae_id).to(
device=pipe.device, dtype=pipe.dtype
)
else:
stream.vae = AutoencoderTiny.from_pretrained("madebyollin/taesd").to(
device=pipe.device, dtype=pipe.dtype
)
try:
if acceleration == "xformers":
stream.pipe.enable_xformers_memory_efficient_attention()
if acceleration == "tensorrt":
from polygraphy import cuda
from streamdiffusion.acceleration.tensorrt import (
TorchVAEEncoder,
compile_unet,
compile_vae_decoder,
compile_vae_encoder,
)
from streamdiffusion.acceleration.tensorrt.engine import (
AutoencoderKLEngine,
UNet2DConditionModelEngine,
)
from streamdiffusion.acceleration.tensorrt.models import (
VAE,
UNet,
VAEEncoder,
)
def create_prefix(
model_id_or_path: str,
max_batch_size: int,
min_batch_size: int,
):
maybe_path = Path(model_id_or_path)
if maybe_path.exists():
return f"{maybe_path.stem}--lcm_lora-{use_lcm_lora}--tiny_vae-{use_tiny_vae}--max_batch-{max_batch_size}--min_batch-{min_batch_size}--mode-{self.mode}"
else:
return f"{model_id_or_path}--lcm_lora-{use_lcm_lora}--tiny_vae-{use_tiny_vae}--max_batch-{max_batch_size}--min_batch-{min_batch_size}--mode-{self.mode}"
engine_dir = Path(engine_dir)
unet_path = os.path.join(
engine_dir,
create_prefix(
model_id_or_path=model_id_or_path,
max_batch_size=stream.trt_unet_batch_size,
min_batch_size=stream.trt_unet_batch_size,
),
"unet.engine",
)
vae_encoder_path = os.path.join(
engine_dir,
create_prefix(
model_id_or_path=model_id_or_path,
max_batch_size=self.batch_size
if self.mode == "txt2img"
else stream.frame_bff_size,
min_batch_size=self.batch_size
if self.mode == "txt2img"
else stream.frame_bff_size,
),
"vae_encoder.engine",
)
vae_decoder_path = os.path.join(
engine_dir,
create_prefix(
model_id_or_path=model_id_or_path,
max_batch_size=self.batch_size
if self.mode == "txt2img"
else stream.frame_bff_size,
min_batch_size=self.batch_size
if self.mode == "txt2img"
else stream.frame_bff_size,
),
"vae_decoder.engine",
)
if not os.path.exists(unet_path):
os.makedirs(os.path.dirname(unet_path), exist_ok=True)
unet_model = UNet(
fp16=True,
device=stream.device,
max_batch_size=stream.trt_unet_batch_size,
min_batch_size=stream.trt_unet_batch_size,
embedding_dim=stream.text_encoder.config.hidden_size,
unet_dim=stream.unet.config.in_channels,
)
compile_unet(
stream.unet,
unet_model,
unet_path + ".onnx",
unet_path + ".opt.onnx",
unet_path,
opt_batch_size=stream.trt_unet_batch_size,
)
if not os.path.exists(vae_decoder_path):
os.makedirs(os.path.dirname(vae_decoder_path), exist_ok=True)
stream.vae.forward = stream.vae.decode
vae_decoder_model = VAE(
device=stream.device,
max_batch_size=self.batch_size
if self.mode == "txt2img"
else stream.frame_bff_size,
min_batch_size=self.batch_size
if self.mode == "txt2img"
else stream.frame_bff_size,
)
compile_vae_decoder(
stream.vae,
vae_decoder_model,
vae_decoder_path + ".onnx",
vae_decoder_path + ".opt.onnx",
vae_decoder_path,
opt_batch_size=self.batch_size
if self.mode == "txt2img"
else stream.frame_bff_size,
)
delattr(stream.vae, "forward")
if not os.path.exists(vae_encoder_path):
os.makedirs(os.path.dirname(vae_encoder_path), exist_ok=True)
vae_encoder = TorchVAEEncoder(stream.vae).to(torch.device("cuda"))
vae_encoder_model = VAEEncoder(
device=stream.device,
max_batch_size=self.batch_size
if self.mode == "txt2img"
else stream.frame_bff_size,
min_batch_size=self.batch_size
if self.mode == "txt2img"
else stream.frame_bff_size,
)
compile_vae_encoder(
vae_encoder,
vae_encoder_model,
vae_encoder_path + ".onnx",
vae_encoder_path + ".opt.onnx",
vae_encoder_path,
opt_batch_size=self.batch_size
if self.mode == "txt2img"
else stream.frame_bff_size,
)
cuda_steram = cuda.Stream()
vae_config = stream.vae.config
vae_dtype = stream.vae.dtype
stream.unet = UNet2DConditionModelEngine(
unet_path, cuda_steram, use_cuda_graph=False
)
stream.vae = AutoencoderKLEngine(
vae_encoder_path,
vae_decoder_path,
cuda_steram,
stream.pipe.vae_scale_factor,
use_cuda_graph=False,
)
setattr(stream.vae, "config", vae_config)
setattr(stream.vae, "dtype", vae_dtype)
gc.collect()
torch.cuda.empty_cache()
print("TensorRT acceleration enabled.")
if acceleration == "sfast":
from streamdiffusion.acceleration.sfast import (
accelerate_with_stable_fast,
)
stream = accelerate_with_stable_fast(stream)
print("StableFast acceleration enabled.")
except Exception:
traceback.print_exc()
print("Acceleration has failed. Falling back to normal mode.")
if seed < 0: # Random seed
seed = np.random.randint(0, 1000000)
stream.prepare(
"",
"",
num_inference_steps=50,
guidance_scale=1.1
if stream.cfg_type in ["full", "self", "initialize"]
else 1.0,
generator=torch.manual_seed(seed),
seed=seed,
)
if self.use_safety_checker:
from transformers import CLIPFeatureExtractor
from diffusers.pipelines.stable_diffusion.safety_checker import (
StableDiffusionSafetyChecker,
)
self.safety_checker = StableDiffusionSafetyChecker.from_pretrained(
"CompVis/stable-diffusion-safety-checker"
).to(pipe.device)
self.feature_extractor = CLIPFeatureExtractor.from_pretrained(
"openai/clip-vit-base-patch32"
)
self.nsfw_fallback_img = Image.new("RGB", (512, 512), (0, 0, 0))
return stream
The provided code snippet includes necessary dependencies for implementing the `image_generation_process` function. Write a Python function `def image_generation_process( queue: Queue, fps_queue: Queue, prompt: str, model_id_or_path: str, batch_size: int = 10, acceleration: Literal["none", "xformers", "tensorrt"] = "tensorrt", ) -> None` to solve the following problem:
Process for generating images based on a prompt using a specified model. Parameters ---------- queue : Queue The queue to put the generated images in. fps_queue : Queue The queue to put the calculated fps. prompt : str The prompt to generate images from. model_id_or_path : str The name of the model to use for image generation. batch_size : int The batch size to use for image generation. acceleration : Literal["none", "xformers", "tensorrt"] The type of acceleration to use for image generation.
Here is the function:
def image_generation_process(
queue: Queue,
fps_queue: Queue,
prompt: str,
model_id_or_path: str,
batch_size: int = 10,
acceleration: Literal["none", "xformers", "tensorrt"] = "tensorrt",
) -> None:
"""
Process for generating images based on a prompt using a specified model.
Parameters
----------
queue : Queue
The queue to put the generated images in.
fps_queue : Queue
The queue to put the calculated fps.
prompt : str
The prompt to generate images from.
model_id_or_path : str
The name of the model to use for image generation.
batch_size : int
The batch size to use for image generation.
acceleration : Literal["none", "xformers", "tensorrt"]
The type of acceleration to use for image generation.
"""
stream = StreamDiffusionWrapper(
model_id_or_path=model_id_or_path,
t_index_list=[0],
frame_buffer_size=batch_size,
warmup=10,
acceleration=acceleration,
use_lcm_lora=False,
mode="txt2img",
cfg_type="none",
use_denoising_batch=True,
)
stream.prepare(
prompt=prompt,
num_inference_steps=50,
)
while True:
try:
start_time = time.time()
x_outputs = stream.stream.txt2img_sd_turbo(batch_size).cpu()
queue.put(x_outputs, block=False)
fps = 1 / (time.time() - start_time) * batch_size
fps_queue.put(fps)
except KeyboardInterrupt:
print(f"fps: {fps}")
return | Process for generating images based on a prompt using a specified model. Parameters ---------- queue : Queue The queue to put the generated images in. fps_queue : Queue The queue to put the calculated fps. prompt : str The prompt to generate images from. model_id_or_path : str The name of the model to use for image generation. batch_size : int The batch size to use for image generation. acceleration : Literal["none", "xformers", "tensorrt"] The type of acceleration to use for image generation. |
8,745 | import os
import sys
import threading
import time
import tkinter as tk
from multiprocessing import Process, Queue, get_context
from typing import List, Literal
import fire
from PIL import Image, ImageTk
from streamdiffusion.image_utils import postprocess_image
from utils.wrapper import StreamDiffusionWrapper
def _receive_images(
queue: Queue, fps_queue: Queue, labels: List[tk.Label], fps_label: tk.Label
) -> None:
"""
Continuously receive images from a queue and update the labels.
Parameters
----------
queue : Queue
The queue to receive images from.
fps_queue : Queue
The queue to put the calculated fps.
labels : List[tk.Label]
The list of labels to update with images.
fps_label : tk.Label
The label to show fps.
"""
while True:
try:
if not queue.empty():
[
labels[0].after(0, update_image, image_data, labels)
for image_data in postprocess_image(
queue.get(block=False), output_type="pil"
)
]
if not fps_queue.empty():
fps_label.config(text=f"FPS: {fps_queue.get(block=False):.2f}")
time.sleep(0.0005)
except KeyboardInterrupt:
return
The provided code snippet includes necessary dependencies for implementing the `receive_images` function. Write a Python function `def receive_images(queue: Queue, fps_queue: Queue) -> None` to solve the following problem:
Setup the Tkinter window and start the thread to receive images. Parameters ---------- queue : Queue The queue to receive images from. fps_queue : Queue The queue to put the calculated fps.
Here is the function:
def receive_images(queue: Queue, fps_queue: Queue) -> None:
"""
Setup the Tkinter window and start the thread to receive images.
Parameters
----------
queue : Queue
The queue to receive images from.
fps_queue : Queue
The queue to put the calculated fps.
"""
root = tk.Tk()
root.title("Image Viewer")
labels = [tk.Label(root) for _ in range(4)]
labels[0].grid(row=0, column=0)
labels[1].grid(row=0, column=1)
labels[2].grid(row=1, column=0)
labels[3].grid(row=1, column=1)
fps_label = tk.Label(root, text="FPS: 0")
fps_label.grid(rows=2, columnspan=2)
thread = threading.Thread(
target=_receive_images, args=(queue, fps_queue, labels, fps_label), daemon=True
)
thread.start()
try:
root.mainloop()
except KeyboardInterrupt:
return | Setup the Tkinter window and start the thread to receive images. Parameters ---------- queue : Queue The queue to receive images from. fps_queue : Queue The queue to put the calculated fps. |
8,746 | import os
import sys
import time
from multiprocessing import Process, Queue, get_context
from typing import Literal
import fire
from utils.viewer import receive_images
from utils.wrapper import StreamDiffusionWrapper
class StreamDiffusionWrapper:
def __init__(
self,
model_id_or_path: str,
t_index_list: List[int],
lora_dict: Optional[Dict[str, float]] = None,
mode: Literal["img2img", "txt2img"] = "img2img",
output_type: Literal["pil", "pt", "np", "latent"] = "pil",
lcm_lora_id: Optional[str] = None,
vae_id: Optional[str] = None,
device: Literal["cpu", "cuda"] = "cuda",
dtype: torch.dtype = torch.float16,
frame_buffer_size: int = 1,
width: int = 512,
height: int = 512,
warmup: int = 10,
acceleration: Literal["none", "xformers", "tensorrt"] = "tensorrt",
do_add_noise: bool = True,
device_ids: Optional[List[int]] = None,
use_lcm_lora: bool = True,
use_tiny_vae: bool = True,
enable_similar_image_filter: bool = False,
similar_image_filter_threshold: float = 0.98,
similar_image_filter_max_skip_frame: int = 10,
use_denoising_batch: bool = True,
cfg_type: Literal["none", "full", "self", "initialize"] = "self",
seed: int = 2,
use_safety_checker: bool = False,
engine_dir: Optional[Union[str, Path]] = "engines",
):
"""
Initializes the StreamDiffusionWrapper.
Parameters
----------
model_id_or_path : str
The model id or path to load.
t_index_list : List[int]
The t_index_list to use for inference.
lora_dict : Optional[Dict[str, float]], optional
The lora_dict to load, by default None.
Keys are the LoRA names and values are the LoRA scales.
Example: {'LoRA_1' : 0.5 , 'LoRA_2' : 0.7 ,...}
mode : Literal["img2img", "txt2img"], optional
txt2img or img2img, by default "img2img".
output_type : Literal["pil", "pt", "np", "latent"], optional
The output type of image, by default "pil".
lcm_lora_id : Optional[str], optional
The lcm_lora_id to load, by default None.
If None, the default LCM-LoRA
("latent-consistency/lcm-lora-sdv1-5") will be used.
vae_id : Optional[str], optional
The vae_id to load, by default None.
If None, the default TinyVAE
("madebyollin/taesd") will be used.
device : Literal["cpu", "cuda"], optional
The device to use for inference, by default "cuda".
dtype : torch.dtype, optional
The dtype for inference, by default torch.float16.
frame_buffer_size : int, optional
The frame buffer size for denoising batch, by default 1.
width : int, optional
The width of the image, by default 512.
height : int, optional
The height of the image, by default 512.
warmup : int, optional
The number of warmup steps to perform, by default 10.
acceleration : Literal["none", "xformers", "tensorrt"], optional
The acceleration method, by default "tensorrt".
do_add_noise : bool, optional
Whether to add noise for following denoising steps or not,
by default True.
device_ids : Optional[List[int]], optional
The device ids to use for DataParallel, by default None.
use_lcm_lora : bool, optional
Whether to use LCM-LoRA or not, by default True.
use_tiny_vae : bool, optional
Whether to use TinyVAE or not, by default True.
enable_similar_image_filter : bool, optional
Whether to enable similar image filter or not,
by default False.
similar_image_filter_threshold : float, optional
The threshold for similar image filter, by default 0.98.
similar_image_filter_max_skip_frame : int, optional
The max skip frame for similar image filter, by default 10.
use_denoising_batch : bool, optional
Whether to use denoising batch or not, by default True.
cfg_type : Literal["none", "full", "self", "initialize"],
optional
The cfg_type for img2img mode, by default "self".
You cannot use anything other than "none" for txt2img mode.
seed : int, optional
The seed, by default 2.
use_safety_checker : bool, optional
Whether to use safety checker or not, by default False.
"""
self.sd_turbo = "turbo" in model_id_or_path
if mode == "txt2img":
if cfg_type != "none":
raise ValueError(
f"txt2img mode accepts only cfg_type = 'none', but got {cfg_type}"
)
if use_denoising_batch and frame_buffer_size > 1:
if not self.sd_turbo:
raise ValueError(
"txt2img mode cannot use denoising batch with frame_buffer_size > 1."
)
if mode == "img2img":
if not use_denoising_batch:
raise NotImplementedError(
"img2img mode must use denoising batch for now."
)
self.device = device
self.dtype = dtype
self.width = width
self.height = height
self.mode = mode
self.output_type = output_type
self.frame_buffer_size = frame_buffer_size
self.batch_size = (
len(t_index_list) * frame_buffer_size
if use_denoising_batch
else frame_buffer_size
)
self.use_denoising_batch = use_denoising_batch
self.use_safety_checker = use_safety_checker
self.stream: StreamDiffusion = self._load_model(
model_id_or_path=model_id_or_path,
lora_dict=lora_dict,
lcm_lora_id=lcm_lora_id,
vae_id=vae_id,
t_index_list=t_index_list,
acceleration=acceleration,
warmup=warmup,
do_add_noise=do_add_noise,
use_lcm_lora=use_lcm_lora,
use_tiny_vae=use_tiny_vae,
cfg_type=cfg_type,
seed=seed,
engine_dir=engine_dir,
)
if device_ids is not None:
self.stream.unet = torch.nn.DataParallel(
self.stream.unet, device_ids=device_ids
)
if enable_similar_image_filter:
self.stream.enable_similar_image_filter(similar_image_filter_threshold, similar_image_filter_max_skip_frame)
def prepare(
self,
prompt: str,
negative_prompt: str = "",
num_inference_steps: int = 50,
guidance_scale: float = 1.2,
delta: float = 1.0,
) -> None:
"""
Prepares the model for inference.
Parameters
----------
prompt : str
The prompt to generate images from.
num_inference_steps : int, optional
The number of inference steps to perform, by default 50.
guidance_scale : float, optional
The guidance scale to use, by default 1.2.
delta : float, optional
The delta multiplier of virtual residual noise,
by default 1.0.
"""
self.stream.prepare(
prompt,
negative_prompt,
num_inference_steps=num_inference_steps,
guidance_scale=guidance_scale,
delta=delta,
)
def __call__(
self,
image: Optional[Union[str, Image.Image, torch.Tensor]] = None,
prompt: Optional[str] = None,
) -> Union[Image.Image, List[Image.Image]]:
"""
Performs img2img or txt2img based on the mode.
Parameters
----------
image : Optional[Union[str, Image.Image, torch.Tensor]]
The image to generate from.
prompt : Optional[str]
The prompt to generate images from.
Returns
-------
Union[Image.Image, List[Image.Image]]
The generated image.
"""
if self.mode == "img2img":
return self.img2img(image, prompt)
else:
return self.txt2img(prompt)
def txt2img(
self, prompt: Optional[str] = None
) -> Union[Image.Image, List[Image.Image], torch.Tensor, np.ndarray]:
"""
Performs txt2img.
Parameters
----------
prompt : Optional[str]
The prompt to generate images from.
Returns
-------
Union[Image.Image, List[Image.Image]]
The generated image.
"""
if prompt is not None:
self.stream.update_prompt(prompt)
if self.sd_turbo:
image_tensor = self.stream.txt2img_sd_turbo(self.batch_size)
else:
image_tensor = self.stream.txt2img(self.frame_buffer_size)
image = self.postprocess_image(image_tensor, output_type=self.output_type)
if self.use_safety_checker:
safety_checker_input = self.feature_extractor(
image, return_tensors="pt"
).to(self.device)
_, has_nsfw_concept = self.safety_checker(
images=image_tensor.to(self.dtype),
clip_input=safety_checker_input.pixel_values.to(self.dtype),
)
image = self.nsfw_fallback_img if has_nsfw_concept[0] else image
return image
def img2img(
self, image: Union[str, Image.Image, torch.Tensor], prompt: Optional[str] = None
) -> Union[Image.Image, List[Image.Image], torch.Tensor, np.ndarray]:
"""
Performs img2img.
Parameters
----------
image : Union[str, Image.Image, torch.Tensor]
The image to generate from.
Returns
-------
Image.Image
The generated image.
"""
if prompt is not None:
self.stream.update_prompt(prompt)
if isinstance(image, str) or isinstance(image, Image.Image):
image = self.preprocess_image(image)
image_tensor = self.stream(image)
image = self.postprocess_image(image_tensor, output_type=self.output_type)
if self.use_safety_checker:
safety_checker_input = self.feature_extractor(
image, return_tensors="pt"
).to(self.device)
_, has_nsfw_concept = self.safety_checker(
images=image_tensor.to(self.dtype),
clip_input=safety_checker_input.pixel_values.to(self.dtype),
)
image = self.nsfw_fallback_img if has_nsfw_concept[0] else image
return image
def preprocess_image(self, image: Union[str, Image.Image]) -> torch.Tensor:
"""
Preprocesses the image.
Parameters
----------
image : Union[str, Image.Image, torch.Tensor]
The image to preprocess.
Returns
-------
torch.Tensor
The preprocessed image.
"""
if isinstance(image, str):
image = Image.open(image).convert("RGB").resize((self.width, self.height))
if isinstance(image, Image.Image):
image = image.convert("RGB").resize((self.width, self.height))
return self.stream.image_processor.preprocess(
image, self.height, self.width
).to(device=self.device, dtype=self.dtype)
def postprocess_image(
self, image_tensor: torch.Tensor, output_type: str = "pil"
) -> Union[Image.Image, List[Image.Image], torch.Tensor, np.ndarray]:
"""
Postprocesses the image.
Parameters
----------
image_tensor : torch.Tensor
The image tensor to postprocess.
Returns
-------
Union[Image.Image, List[Image.Image]]
The postprocessed image.
"""
if self.frame_buffer_size > 1:
return postprocess_image(image_tensor.cpu(), output_type=output_type)
else:
return postprocess_image(image_tensor.cpu(), output_type=output_type)[0]
def _load_model(
self,
model_id_or_path: str,
t_index_list: List[int],
lora_dict: Optional[Dict[str, float]] = None,
lcm_lora_id: Optional[str] = None,
vae_id: Optional[str] = None,
acceleration: Literal["none", "xformers", "tensorrt"] = "tensorrt",
warmup: int = 10,
do_add_noise: bool = True,
use_lcm_lora: bool = True,
use_tiny_vae: bool = True,
cfg_type: Literal["none", "full", "self", "initialize"] = "self",
seed: int = 2,
engine_dir: Optional[Union[str, Path]] = "engines",
) -> StreamDiffusion:
"""
Loads the model.
This method does the following:
1. Loads the model from the model_id_or_path.
2. Loads and fuses the LCM-LoRA model from the lcm_lora_id if needed.
3. Loads the VAE model from the vae_id if needed.
4. Enables acceleration if needed.
5. Prepares the model for inference.
6. Load the safety checker if needed.
Parameters
----------
model_id_or_path : str
The model id or path to load.
t_index_list : List[int]
The t_index_list to use for inference.
lora_dict : Optional[Dict[str, float]], optional
The lora_dict to load, by default None.
Keys are the LoRA names and values are the LoRA scales.
Example: {'LoRA_1' : 0.5 , 'LoRA_2' : 0.7 ,...}
lcm_lora_id : Optional[str], optional
The lcm_lora_id to load, by default None.
vae_id : Optional[str], optional
The vae_id to load, by default None.
acceleration : Literal["none", "xfomers", "sfast", "tensorrt"], optional
The acceleration method, by default "tensorrt".
warmup : int, optional
The number of warmup steps to perform, by default 10.
do_add_noise : bool, optional
Whether to add noise for following denoising steps or not,
by default True.
use_lcm_lora : bool, optional
Whether to use LCM-LoRA or not, by default True.
use_tiny_vae : bool, optional
Whether to use TinyVAE or not, by default True.
cfg_type : Literal["none", "full", "self", "initialize"],
optional
The cfg_type for img2img mode, by default "self".
You cannot use anything other than "none" for txt2img mode.
seed : int, optional
The seed, by default 2.
Returns
-------
StreamDiffusion
The loaded model.
"""
try: # Load from local directory
pipe: StableDiffusionPipeline = StableDiffusionPipeline.from_pretrained(
model_id_or_path,
).to(device=self.device, dtype=self.dtype)
except ValueError: # Load from huggingface
pipe: StableDiffusionPipeline = StableDiffusionPipeline.from_single_file(
model_id_or_path,
).to(device=self.device, dtype=self.dtype)
except Exception: # No model found
traceback.print_exc()
print("Model load has failed. Doesn't exist.")
exit()
stream = StreamDiffusion(
pipe=pipe,
t_index_list=t_index_list,
torch_dtype=self.dtype,
width=self.width,
height=self.height,
do_add_noise=do_add_noise,
frame_buffer_size=self.frame_buffer_size,
use_denoising_batch=self.use_denoising_batch,
cfg_type=cfg_type,
)
if not self.sd_turbo:
if use_lcm_lora:
if lcm_lora_id is not None:
stream.load_lcm_lora(
pretrained_model_name_or_path_or_dict=lcm_lora_id
)
else:
stream.load_lcm_lora()
stream.fuse_lora()
if lora_dict is not None:
for lora_name, lora_scale in lora_dict.items():
stream.load_lora(lora_name)
stream.fuse_lora(lora_scale=lora_scale)
print(f"Use LoRA: {lora_name} in weights {lora_scale}")
if use_tiny_vae:
if vae_id is not None:
stream.vae = AutoencoderTiny.from_pretrained(vae_id).to(
device=pipe.device, dtype=pipe.dtype
)
else:
stream.vae = AutoencoderTiny.from_pretrained("madebyollin/taesd").to(
device=pipe.device, dtype=pipe.dtype
)
try:
if acceleration == "xformers":
stream.pipe.enable_xformers_memory_efficient_attention()
if acceleration == "tensorrt":
from polygraphy import cuda
from streamdiffusion.acceleration.tensorrt import (
TorchVAEEncoder,
compile_unet,
compile_vae_decoder,
compile_vae_encoder,
)
from streamdiffusion.acceleration.tensorrt.engine import (
AutoencoderKLEngine,
UNet2DConditionModelEngine,
)
from streamdiffusion.acceleration.tensorrt.models import (
VAE,
UNet,
VAEEncoder,
)
def create_prefix(
model_id_or_path: str,
max_batch_size: int,
min_batch_size: int,
):
maybe_path = Path(model_id_or_path)
if maybe_path.exists():
return f"{maybe_path.stem}--lcm_lora-{use_lcm_lora}--tiny_vae-{use_tiny_vae}--max_batch-{max_batch_size}--min_batch-{min_batch_size}--mode-{self.mode}"
else:
return f"{model_id_or_path}--lcm_lora-{use_lcm_lora}--tiny_vae-{use_tiny_vae}--max_batch-{max_batch_size}--min_batch-{min_batch_size}--mode-{self.mode}"
engine_dir = Path(engine_dir)
unet_path = os.path.join(
engine_dir,
create_prefix(
model_id_or_path=model_id_or_path,
max_batch_size=stream.trt_unet_batch_size,
min_batch_size=stream.trt_unet_batch_size,
),
"unet.engine",
)
vae_encoder_path = os.path.join(
engine_dir,
create_prefix(
model_id_or_path=model_id_or_path,
max_batch_size=self.batch_size
if self.mode == "txt2img"
else stream.frame_bff_size,
min_batch_size=self.batch_size
if self.mode == "txt2img"
else stream.frame_bff_size,
),
"vae_encoder.engine",
)
vae_decoder_path = os.path.join(
engine_dir,
create_prefix(
model_id_or_path=model_id_or_path,
max_batch_size=self.batch_size
if self.mode == "txt2img"
else stream.frame_bff_size,
min_batch_size=self.batch_size
if self.mode == "txt2img"
else stream.frame_bff_size,
),
"vae_decoder.engine",
)
if not os.path.exists(unet_path):
os.makedirs(os.path.dirname(unet_path), exist_ok=True)
unet_model = UNet(
fp16=True,
device=stream.device,
max_batch_size=stream.trt_unet_batch_size,
min_batch_size=stream.trt_unet_batch_size,
embedding_dim=stream.text_encoder.config.hidden_size,
unet_dim=stream.unet.config.in_channels,
)
compile_unet(
stream.unet,
unet_model,
unet_path + ".onnx",
unet_path + ".opt.onnx",
unet_path,
opt_batch_size=stream.trt_unet_batch_size,
)
if not os.path.exists(vae_decoder_path):
os.makedirs(os.path.dirname(vae_decoder_path), exist_ok=True)
stream.vae.forward = stream.vae.decode
vae_decoder_model = VAE(
device=stream.device,
max_batch_size=self.batch_size
if self.mode == "txt2img"
else stream.frame_bff_size,
min_batch_size=self.batch_size
if self.mode == "txt2img"
else stream.frame_bff_size,
)
compile_vae_decoder(
stream.vae,
vae_decoder_model,
vae_decoder_path + ".onnx",
vae_decoder_path + ".opt.onnx",
vae_decoder_path,
opt_batch_size=self.batch_size
if self.mode == "txt2img"
else stream.frame_bff_size,
)
delattr(stream.vae, "forward")
if not os.path.exists(vae_encoder_path):
os.makedirs(os.path.dirname(vae_encoder_path), exist_ok=True)
vae_encoder = TorchVAEEncoder(stream.vae).to(torch.device("cuda"))
vae_encoder_model = VAEEncoder(
device=stream.device,
max_batch_size=self.batch_size
if self.mode == "txt2img"
else stream.frame_bff_size,
min_batch_size=self.batch_size
if self.mode == "txt2img"
else stream.frame_bff_size,
)
compile_vae_encoder(
vae_encoder,
vae_encoder_model,
vae_encoder_path + ".onnx",
vae_encoder_path + ".opt.onnx",
vae_encoder_path,
opt_batch_size=self.batch_size
if self.mode == "txt2img"
else stream.frame_bff_size,
)
cuda_steram = cuda.Stream()
vae_config = stream.vae.config
vae_dtype = stream.vae.dtype
stream.unet = UNet2DConditionModelEngine(
unet_path, cuda_steram, use_cuda_graph=False
)
stream.vae = AutoencoderKLEngine(
vae_encoder_path,
vae_decoder_path,
cuda_steram,
stream.pipe.vae_scale_factor,
use_cuda_graph=False,
)
setattr(stream.vae, "config", vae_config)
setattr(stream.vae, "dtype", vae_dtype)
gc.collect()
torch.cuda.empty_cache()
print("TensorRT acceleration enabled.")
if acceleration == "sfast":
from streamdiffusion.acceleration.sfast import (
accelerate_with_stable_fast,
)
stream = accelerate_with_stable_fast(stream)
print("StableFast acceleration enabled.")
except Exception:
traceback.print_exc()
print("Acceleration has failed. Falling back to normal mode.")
if seed < 0: # Random seed
seed = np.random.randint(0, 1000000)
stream.prepare(
"",
"",
num_inference_steps=50,
guidance_scale=1.1
if stream.cfg_type in ["full", "self", "initialize"]
else 1.0,
generator=torch.manual_seed(seed),
seed=seed,
)
if self.use_safety_checker:
from transformers import CLIPFeatureExtractor
from diffusers.pipelines.stable_diffusion.safety_checker import (
StableDiffusionSafetyChecker,
)
self.safety_checker = StableDiffusionSafetyChecker.from_pretrained(
"CompVis/stable-diffusion-safety-checker"
).to(pipe.device)
self.feature_extractor = CLIPFeatureExtractor.from_pretrained(
"openai/clip-vit-base-patch32"
)
self.nsfw_fallback_img = Image.new("RGB", (512, 512), (0, 0, 0))
return stream
The provided code snippet includes necessary dependencies for implementing the `image_generation_process` function. Write a Python function `def image_generation_process( queue: Queue, fps_queue: Queue, prompt: str, model_id_or_path: str, acceleration: Literal["none", "xformers", "tensorrt"] = "tensorrt", ) -> None` to solve the following problem:
Process for generating images based on a prompt using a specified model. Parameters ---------- queue : Queue The queue to put the generated images in. fps_queue : Queue The queue to put the calculated fps. prompt : str The prompt to generate images from. model_id_or_path : str The name of the model to use for image generation. acceleration : Literal["none", "xformers", "tensorrt"] The type of acceleration to use for image generation.
Here is the function:
def image_generation_process(
queue: Queue,
fps_queue: Queue,
prompt: str,
model_id_or_path: str,
acceleration: Literal["none", "xformers", "tensorrt"] = "tensorrt",
) -> None:
"""
Process for generating images based on a prompt using a specified model.
Parameters
----------
queue : Queue
The queue to put the generated images in.
fps_queue : Queue
The queue to put the calculated fps.
prompt : str
The prompt to generate images from.
model_id_or_path : str
The name of the model to use for image generation.
acceleration : Literal["none", "xformers", "tensorrt"]
The type of acceleration to use for image generation.
"""
stream = StreamDiffusionWrapper(
model_id_or_path=model_id_or_path,
t_index_list=[0],
frame_buffer_size=1,
warmup=10,
acceleration=acceleration,
use_lcm_lora=False,
mode="txt2img",
cfg_type="none",
use_denoising_batch=True,
)
stream.prepare(
prompt=prompt,
num_inference_steps=50,
)
while True:
try:
start_time = time.time()
x_outputs = stream.stream.txt2img_sd_turbo(1).cpu()
queue.put(x_outputs, block=False)
fps = 1 / (time.time() - start_time)
fps_queue.put(fps)
except KeyboardInterrupt:
print(f"fps: {fps}")
return | Process for generating images based on a prompt using a specified model. Parameters ---------- queue : Queue The queue to put the generated images in. fps_queue : Queue The queue to put the calculated fps. prompt : str The prompt to generate images from. model_id_or_path : str The name of the model to use for image generation. acceleration : Literal["none", "xformers", "tensorrt"] The type of acceleration to use for image generation. |
8,747 | import os
import sys
import time
import threading
from multiprocessing import Process, Queue, get_context
from multiprocessing.connection import Connection
from typing import List, Literal, Dict, Optional
import torch
import PIL.Image
from streamdiffusion.image_utils import pil2tensor
import mss
import fire
import tkinter as tk
from utils.viewer import receive_images
from utils.wrapper import StreamDiffusionWrapper
def dummy_screen(
width: int,
height: int,
):
def monitor_setting_process(
width: int,
height: int,
monitor_sender: Connection,
) -> None:
monitor = dummy_screen(width, height)
monitor_sender.send(monitor) | null |
8,748 | import os
import sys
import time
import threading
from multiprocessing import Process, Queue, get_context
from multiprocessing.connection import Connection
from typing import List, Literal, Dict, Optional
import torch
import PIL.Image
from streamdiffusion.image_utils import pil2tensor
import mss
import fire
import tkinter as tk
from utils.viewer import receive_images
from utils.wrapper import StreamDiffusionWrapper
inputs = []
def screen(
event: threading.Event,
height: int = 512,
width: int = 512,
monitor: Dict[str, int] = {"top": 300, "left": 200, "width": 512, "height": 512},
):
global inputs
with mss.mss() as sct:
while True:
if event.is_set():
print("terminate read thread")
break
img = sct.grab(monitor)
img = PIL.Image.frombytes("RGB", img.size, img.bgra, "raw", "BGRX")
img.resize((height, width))
inputs.append(pil2tensor(img))
print('exit : screen')
class StreamDiffusionWrapper:
def __init__(
self,
model_id_or_path: str,
t_index_list: List[int],
lora_dict: Optional[Dict[str, float]] = None,
mode: Literal["img2img", "txt2img"] = "img2img",
output_type: Literal["pil", "pt", "np", "latent"] = "pil",
lcm_lora_id: Optional[str] = None,
vae_id: Optional[str] = None,
device: Literal["cpu", "cuda"] = "cuda",
dtype: torch.dtype = torch.float16,
frame_buffer_size: int = 1,
width: int = 512,
height: int = 512,
warmup: int = 10,
acceleration: Literal["none", "xformers", "tensorrt"] = "tensorrt",
do_add_noise: bool = True,
device_ids: Optional[List[int]] = None,
use_lcm_lora: bool = True,
use_tiny_vae: bool = True,
enable_similar_image_filter: bool = False,
similar_image_filter_threshold: float = 0.98,
similar_image_filter_max_skip_frame: int = 10,
use_denoising_batch: bool = True,
cfg_type: Literal["none", "full", "self", "initialize"] = "self",
seed: int = 2,
use_safety_checker: bool = False,
engine_dir: Optional[Union[str, Path]] = "engines",
):
"""
Initializes the StreamDiffusionWrapper.
Parameters
----------
model_id_or_path : str
The model id or path to load.
t_index_list : List[int]
The t_index_list to use for inference.
lora_dict : Optional[Dict[str, float]], optional
The lora_dict to load, by default None.
Keys are the LoRA names and values are the LoRA scales.
Example: {'LoRA_1' : 0.5 , 'LoRA_2' : 0.7 ,...}
mode : Literal["img2img", "txt2img"], optional
txt2img or img2img, by default "img2img".
output_type : Literal["pil", "pt", "np", "latent"], optional
The output type of image, by default "pil".
lcm_lora_id : Optional[str], optional
The lcm_lora_id to load, by default None.
If None, the default LCM-LoRA
("latent-consistency/lcm-lora-sdv1-5") will be used.
vae_id : Optional[str], optional
The vae_id to load, by default None.
If None, the default TinyVAE
("madebyollin/taesd") will be used.
device : Literal["cpu", "cuda"], optional
The device to use for inference, by default "cuda".
dtype : torch.dtype, optional
The dtype for inference, by default torch.float16.
frame_buffer_size : int, optional
The frame buffer size for denoising batch, by default 1.
width : int, optional
The width of the image, by default 512.
height : int, optional
The height of the image, by default 512.
warmup : int, optional
The number of warmup steps to perform, by default 10.
acceleration : Literal["none", "xformers", "tensorrt"], optional
The acceleration method, by default "tensorrt".
do_add_noise : bool, optional
Whether to add noise for following denoising steps or not,
by default True.
device_ids : Optional[List[int]], optional
The device ids to use for DataParallel, by default None.
use_lcm_lora : bool, optional
Whether to use LCM-LoRA or not, by default True.
use_tiny_vae : bool, optional
Whether to use TinyVAE or not, by default True.
enable_similar_image_filter : bool, optional
Whether to enable similar image filter or not,
by default False.
similar_image_filter_threshold : float, optional
The threshold for similar image filter, by default 0.98.
similar_image_filter_max_skip_frame : int, optional
The max skip frame for similar image filter, by default 10.
use_denoising_batch : bool, optional
Whether to use denoising batch or not, by default True.
cfg_type : Literal["none", "full", "self", "initialize"],
optional
The cfg_type for img2img mode, by default "self".
You cannot use anything other than "none" for txt2img mode.
seed : int, optional
The seed, by default 2.
use_safety_checker : bool, optional
Whether to use safety checker or not, by default False.
"""
self.sd_turbo = "turbo" in model_id_or_path
if mode == "txt2img":
if cfg_type != "none":
raise ValueError(
f"txt2img mode accepts only cfg_type = 'none', but got {cfg_type}"
)
if use_denoising_batch and frame_buffer_size > 1:
if not self.sd_turbo:
raise ValueError(
"txt2img mode cannot use denoising batch with frame_buffer_size > 1."
)
if mode == "img2img":
if not use_denoising_batch:
raise NotImplementedError(
"img2img mode must use denoising batch for now."
)
self.device = device
self.dtype = dtype
self.width = width
self.height = height
self.mode = mode
self.output_type = output_type
self.frame_buffer_size = frame_buffer_size
self.batch_size = (
len(t_index_list) * frame_buffer_size
if use_denoising_batch
else frame_buffer_size
)
self.use_denoising_batch = use_denoising_batch
self.use_safety_checker = use_safety_checker
self.stream: StreamDiffusion = self._load_model(
model_id_or_path=model_id_or_path,
lora_dict=lora_dict,
lcm_lora_id=lcm_lora_id,
vae_id=vae_id,
t_index_list=t_index_list,
acceleration=acceleration,
warmup=warmup,
do_add_noise=do_add_noise,
use_lcm_lora=use_lcm_lora,
use_tiny_vae=use_tiny_vae,
cfg_type=cfg_type,
seed=seed,
engine_dir=engine_dir,
)
if device_ids is not None:
self.stream.unet = torch.nn.DataParallel(
self.stream.unet, device_ids=device_ids
)
if enable_similar_image_filter:
self.stream.enable_similar_image_filter(similar_image_filter_threshold, similar_image_filter_max_skip_frame)
def prepare(
self,
prompt: str,
negative_prompt: str = "",
num_inference_steps: int = 50,
guidance_scale: float = 1.2,
delta: float = 1.0,
) -> None:
"""
Prepares the model for inference.
Parameters
----------
prompt : str
The prompt to generate images from.
num_inference_steps : int, optional
The number of inference steps to perform, by default 50.
guidance_scale : float, optional
The guidance scale to use, by default 1.2.
delta : float, optional
The delta multiplier of virtual residual noise,
by default 1.0.
"""
self.stream.prepare(
prompt,
negative_prompt,
num_inference_steps=num_inference_steps,
guidance_scale=guidance_scale,
delta=delta,
)
def __call__(
self,
image: Optional[Union[str, Image.Image, torch.Tensor]] = None,
prompt: Optional[str] = None,
) -> Union[Image.Image, List[Image.Image]]:
"""
Performs img2img or txt2img based on the mode.
Parameters
----------
image : Optional[Union[str, Image.Image, torch.Tensor]]
The image to generate from.
prompt : Optional[str]
The prompt to generate images from.
Returns
-------
Union[Image.Image, List[Image.Image]]
The generated image.
"""
if self.mode == "img2img":
return self.img2img(image, prompt)
else:
return self.txt2img(prompt)
def txt2img(
self, prompt: Optional[str] = None
) -> Union[Image.Image, List[Image.Image], torch.Tensor, np.ndarray]:
"""
Performs txt2img.
Parameters
----------
prompt : Optional[str]
The prompt to generate images from.
Returns
-------
Union[Image.Image, List[Image.Image]]
The generated image.
"""
if prompt is not None:
self.stream.update_prompt(prompt)
if self.sd_turbo:
image_tensor = self.stream.txt2img_sd_turbo(self.batch_size)
else:
image_tensor = self.stream.txt2img(self.frame_buffer_size)
image = self.postprocess_image(image_tensor, output_type=self.output_type)
if self.use_safety_checker:
safety_checker_input = self.feature_extractor(
image, return_tensors="pt"
).to(self.device)
_, has_nsfw_concept = self.safety_checker(
images=image_tensor.to(self.dtype),
clip_input=safety_checker_input.pixel_values.to(self.dtype),
)
image = self.nsfw_fallback_img if has_nsfw_concept[0] else image
return image
def img2img(
self, image: Union[str, Image.Image, torch.Tensor], prompt: Optional[str] = None
) -> Union[Image.Image, List[Image.Image], torch.Tensor, np.ndarray]:
"""
Performs img2img.
Parameters
----------
image : Union[str, Image.Image, torch.Tensor]
The image to generate from.
Returns
-------
Image.Image
The generated image.
"""
if prompt is not None:
self.stream.update_prompt(prompt)
if isinstance(image, str) or isinstance(image, Image.Image):
image = self.preprocess_image(image)
image_tensor = self.stream(image)
image = self.postprocess_image(image_tensor, output_type=self.output_type)
if self.use_safety_checker:
safety_checker_input = self.feature_extractor(
image, return_tensors="pt"
).to(self.device)
_, has_nsfw_concept = self.safety_checker(
images=image_tensor.to(self.dtype),
clip_input=safety_checker_input.pixel_values.to(self.dtype),
)
image = self.nsfw_fallback_img if has_nsfw_concept[0] else image
return image
def preprocess_image(self, image: Union[str, Image.Image]) -> torch.Tensor:
"""
Preprocesses the image.
Parameters
----------
image : Union[str, Image.Image, torch.Tensor]
The image to preprocess.
Returns
-------
torch.Tensor
The preprocessed image.
"""
if isinstance(image, str):
image = Image.open(image).convert("RGB").resize((self.width, self.height))
if isinstance(image, Image.Image):
image = image.convert("RGB").resize((self.width, self.height))
return self.stream.image_processor.preprocess(
image, self.height, self.width
).to(device=self.device, dtype=self.dtype)
def postprocess_image(
self, image_tensor: torch.Tensor, output_type: str = "pil"
) -> Union[Image.Image, List[Image.Image], torch.Tensor, np.ndarray]:
"""
Postprocesses the image.
Parameters
----------
image_tensor : torch.Tensor
The image tensor to postprocess.
Returns
-------
Union[Image.Image, List[Image.Image]]
The postprocessed image.
"""
if self.frame_buffer_size > 1:
return postprocess_image(image_tensor.cpu(), output_type=output_type)
else:
return postprocess_image(image_tensor.cpu(), output_type=output_type)[0]
def _load_model(
self,
model_id_or_path: str,
t_index_list: List[int],
lora_dict: Optional[Dict[str, float]] = None,
lcm_lora_id: Optional[str] = None,
vae_id: Optional[str] = None,
acceleration: Literal["none", "xformers", "tensorrt"] = "tensorrt",
warmup: int = 10,
do_add_noise: bool = True,
use_lcm_lora: bool = True,
use_tiny_vae: bool = True,
cfg_type: Literal["none", "full", "self", "initialize"] = "self",
seed: int = 2,
engine_dir: Optional[Union[str, Path]] = "engines",
) -> StreamDiffusion:
"""
Loads the model.
This method does the following:
1. Loads the model from the model_id_or_path.
2. Loads and fuses the LCM-LoRA model from the lcm_lora_id if needed.
3. Loads the VAE model from the vae_id if needed.
4. Enables acceleration if needed.
5. Prepares the model for inference.
6. Load the safety checker if needed.
Parameters
----------
model_id_or_path : str
The model id or path to load.
t_index_list : List[int]
The t_index_list to use for inference.
lora_dict : Optional[Dict[str, float]], optional
The lora_dict to load, by default None.
Keys are the LoRA names and values are the LoRA scales.
Example: {'LoRA_1' : 0.5 , 'LoRA_2' : 0.7 ,...}
lcm_lora_id : Optional[str], optional
The lcm_lora_id to load, by default None.
vae_id : Optional[str], optional
The vae_id to load, by default None.
acceleration : Literal["none", "xfomers", "sfast", "tensorrt"], optional
The acceleration method, by default "tensorrt".
warmup : int, optional
The number of warmup steps to perform, by default 10.
do_add_noise : bool, optional
Whether to add noise for following denoising steps or not,
by default True.
use_lcm_lora : bool, optional
Whether to use LCM-LoRA or not, by default True.
use_tiny_vae : bool, optional
Whether to use TinyVAE or not, by default True.
cfg_type : Literal["none", "full", "self", "initialize"],
optional
The cfg_type for img2img mode, by default "self".
You cannot use anything other than "none" for txt2img mode.
seed : int, optional
The seed, by default 2.
Returns
-------
StreamDiffusion
The loaded model.
"""
try: # Load from local directory
pipe: StableDiffusionPipeline = StableDiffusionPipeline.from_pretrained(
model_id_or_path,
).to(device=self.device, dtype=self.dtype)
except ValueError: # Load from huggingface
pipe: StableDiffusionPipeline = StableDiffusionPipeline.from_single_file(
model_id_or_path,
).to(device=self.device, dtype=self.dtype)
except Exception: # No model found
traceback.print_exc()
print("Model load has failed. Doesn't exist.")
exit()
stream = StreamDiffusion(
pipe=pipe,
t_index_list=t_index_list,
torch_dtype=self.dtype,
width=self.width,
height=self.height,
do_add_noise=do_add_noise,
frame_buffer_size=self.frame_buffer_size,
use_denoising_batch=self.use_denoising_batch,
cfg_type=cfg_type,
)
if not self.sd_turbo:
if use_lcm_lora:
if lcm_lora_id is not None:
stream.load_lcm_lora(
pretrained_model_name_or_path_or_dict=lcm_lora_id
)
else:
stream.load_lcm_lora()
stream.fuse_lora()
if lora_dict is not None:
for lora_name, lora_scale in lora_dict.items():
stream.load_lora(lora_name)
stream.fuse_lora(lora_scale=lora_scale)
print(f"Use LoRA: {lora_name} in weights {lora_scale}")
if use_tiny_vae:
if vae_id is not None:
stream.vae = AutoencoderTiny.from_pretrained(vae_id).to(
device=pipe.device, dtype=pipe.dtype
)
else:
stream.vae = AutoencoderTiny.from_pretrained("madebyollin/taesd").to(
device=pipe.device, dtype=pipe.dtype
)
try:
if acceleration == "xformers":
stream.pipe.enable_xformers_memory_efficient_attention()
if acceleration == "tensorrt":
from polygraphy import cuda
from streamdiffusion.acceleration.tensorrt import (
TorchVAEEncoder,
compile_unet,
compile_vae_decoder,
compile_vae_encoder,
)
from streamdiffusion.acceleration.tensorrt.engine import (
AutoencoderKLEngine,
UNet2DConditionModelEngine,
)
from streamdiffusion.acceleration.tensorrt.models import (
VAE,
UNet,
VAEEncoder,
)
def create_prefix(
model_id_or_path: str,
max_batch_size: int,
min_batch_size: int,
):
maybe_path = Path(model_id_or_path)
if maybe_path.exists():
return f"{maybe_path.stem}--lcm_lora-{use_lcm_lora}--tiny_vae-{use_tiny_vae}--max_batch-{max_batch_size}--min_batch-{min_batch_size}--mode-{self.mode}"
else:
return f"{model_id_or_path}--lcm_lora-{use_lcm_lora}--tiny_vae-{use_tiny_vae}--max_batch-{max_batch_size}--min_batch-{min_batch_size}--mode-{self.mode}"
engine_dir = Path(engine_dir)
unet_path = os.path.join(
engine_dir,
create_prefix(
model_id_or_path=model_id_or_path,
max_batch_size=stream.trt_unet_batch_size,
min_batch_size=stream.trt_unet_batch_size,
),
"unet.engine",
)
vae_encoder_path = os.path.join(
engine_dir,
create_prefix(
model_id_or_path=model_id_or_path,
max_batch_size=self.batch_size
if self.mode == "txt2img"
else stream.frame_bff_size,
min_batch_size=self.batch_size
if self.mode == "txt2img"
else stream.frame_bff_size,
),
"vae_encoder.engine",
)
vae_decoder_path = os.path.join(
engine_dir,
create_prefix(
model_id_or_path=model_id_or_path,
max_batch_size=self.batch_size
if self.mode == "txt2img"
else stream.frame_bff_size,
min_batch_size=self.batch_size
if self.mode == "txt2img"
else stream.frame_bff_size,
),
"vae_decoder.engine",
)
if not os.path.exists(unet_path):
os.makedirs(os.path.dirname(unet_path), exist_ok=True)
unet_model = UNet(
fp16=True,
device=stream.device,
max_batch_size=stream.trt_unet_batch_size,
min_batch_size=stream.trt_unet_batch_size,
embedding_dim=stream.text_encoder.config.hidden_size,
unet_dim=stream.unet.config.in_channels,
)
compile_unet(
stream.unet,
unet_model,
unet_path + ".onnx",
unet_path + ".opt.onnx",
unet_path,
opt_batch_size=stream.trt_unet_batch_size,
)
if not os.path.exists(vae_decoder_path):
os.makedirs(os.path.dirname(vae_decoder_path), exist_ok=True)
stream.vae.forward = stream.vae.decode
vae_decoder_model = VAE(
device=stream.device,
max_batch_size=self.batch_size
if self.mode == "txt2img"
else stream.frame_bff_size,
min_batch_size=self.batch_size
if self.mode == "txt2img"
else stream.frame_bff_size,
)
compile_vae_decoder(
stream.vae,
vae_decoder_model,
vae_decoder_path + ".onnx",
vae_decoder_path + ".opt.onnx",
vae_decoder_path,
opt_batch_size=self.batch_size
if self.mode == "txt2img"
else stream.frame_bff_size,
)
delattr(stream.vae, "forward")
if not os.path.exists(vae_encoder_path):
os.makedirs(os.path.dirname(vae_encoder_path), exist_ok=True)
vae_encoder = TorchVAEEncoder(stream.vae).to(torch.device("cuda"))
vae_encoder_model = VAEEncoder(
device=stream.device,
max_batch_size=self.batch_size
if self.mode == "txt2img"
else stream.frame_bff_size,
min_batch_size=self.batch_size
if self.mode == "txt2img"
else stream.frame_bff_size,
)
compile_vae_encoder(
vae_encoder,
vae_encoder_model,
vae_encoder_path + ".onnx",
vae_encoder_path + ".opt.onnx",
vae_encoder_path,
opt_batch_size=self.batch_size
if self.mode == "txt2img"
else stream.frame_bff_size,
)
cuda_steram = cuda.Stream()
vae_config = stream.vae.config
vae_dtype = stream.vae.dtype
stream.unet = UNet2DConditionModelEngine(
unet_path, cuda_steram, use_cuda_graph=False
)
stream.vae = AutoencoderKLEngine(
vae_encoder_path,
vae_decoder_path,
cuda_steram,
stream.pipe.vae_scale_factor,
use_cuda_graph=False,
)
setattr(stream.vae, "config", vae_config)
setattr(stream.vae, "dtype", vae_dtype)
gc.collect()
torch.cuda.empty_cache()
print("TensorRT acceleration enabled.")
if acceleration == "sfast":
from streamdiffusion.acceleration.sfast import (
accelerate_with_stable_fast,
)
stream = accelerate_with_stable_fast(stream)
print("StableFast acceleration enabled.")
except Exception:
traceback.print_exc()
print("Acceleration has failed. Falling back to normal mode.")
if seed < 0: # Random seed
seed = np.random.randint(0, 1000000)
stream.prepare(
"",
"",
num_inference_steps=50,
guidance_scale=1.1
if stream.cfg_type in ["full", "self", "initialize"]
else 1.0,
generator=torch.manual_seed(seed),
seed=seed,
)
if self.use_safety_checker:
from transformers import CLIPFeatureExtractor
from diffusers.pipelines.stable_diffusion.safety_checker import (
StableDiffusionSafetyChecker,
)
self.safety_checker = StableDiffusionSafetyChecker.from_pretrained(
"CompVis/stable-diffusion-safety-checker"
).to(pipe.device)
self.feature_extractor = CLIPFeatureExtractor.from_pretrained(
"openai/clip-vit-base-patch32"
)
self.nsfw_fallback_img = Image.new("RGB", (512, 512), (0, 0, 0))
return stream
The provided code snippet includes necessary dependencies for implementing the `image_generation_process` function. Write a Python function `def image_generation_process( queue: Queue, fps_queue: Queue, close_queue: Queue, model_id_or_path: str, lora_dict: Optional[Dict[str, float]], prompt: str, negative_prompt: str, frame_buffer_size: int, width: int, height: int, acceleration: Literal["none", "xformers", "tensorrt"], use_denoising_batch: bool, seed: int, cfg_type: Literal["none", "full", "self", "initialize"], guidance_scale: float, delta: float, do_add_noise: bool, enable_similar_image_filter: bool, similar_image_filter_threshold: float, similar_image_filter_max_skip_frame: float, monitor_receiver : Connection, ) -> None` to solve the following problem:
Process for generating images based on a prompt using a specified model. Parameters ---------- queue : Queue The queue to put the generated images in. fps_queue : Queue The queue to put the calculated fps. model_id_or_path : str The name of the model to use for image generation. lora_dict : Optional[Dict[str, float]], optional The lora_dict to load, by default None. Keys are the LoRA names and values are the LoRA scales. Example: {'LoRA_1' : 0.5 , 'LoRA_2' : 0.7 ,...} prompt : str The prompt to generate images from. negative_prompt : str, optional The negative prompt to use. frame_buffer_size : int, optional The frame buffer size for denoising batch, by default 1. width : int, optional The width of the image, by default 512. height : int, optional The height of the image, by default 512. acceleration : Literal["none", "xformers", "tensorrt"], optional The acceleration method, by default "tensorrt". use_denoising_batch : bool, optional Whether to use denoising batch or not, by default True. seed : int, optional The seed, by default 2. if -1, use random seed. cfg_type : Literal["none", "full", "self", "initialize"], optional The cfg_type for img2img mode, by default "self". You cannot use anything other than "none" for txt2img mode. guidance_scale : float, optional The CFG scale, by default 1.2. delta : float, optional The delta multiplier of virtual residual noise, by default 1.0. do_add_noise : bool, optional Whether to add noise for following denoising steps or not, by default True. enable_similar_image_filter : bool, optional Whether to enable similar image filter or not, by default False. similar_image_filter_threshold : float, optional The threshold for similar image filter, by default 0.98. similar_image_filter_max_skip_frame : int, optional The max skip frame for similar image filter, by default 10.
Here is the function:
def image_generation_process(
queue: Queue,
fps_queue: Queue,
close_queue: Queue,
model_id_or_path: str,
lora_dict: Optional[Dict[str, float]],
prompt: str,
negative_prompt: str,
frame_buffer_size: int,
width: int,
height: int,
acceleration: Literal["none", "xformers", "tensorrt"],
use_denoising_batch: bool,
seed: int,
cfg_type: Literal["none", "full", "self", "initialize"],
guidance_scale: float,
delta: float,
do_add_noise: bool,
enable_similar_image_filter: bool,
similar_image_filter_threshold: float,
similar_image_filter_max_skip_frame: float,
monitor_receiver : Connection,
) -> None:
"""
Process for generating images based on a prompt using a specified model.
Parameters
----------
queue : Queue
The queue to put the generated images in.
fps_queue : Queue
The queue to put the calculated fps.
model_id_or_path : str
The name of the model to use for image generation.
lora_dict : Optional[Dict[str, float]], optional
The lora_dict to load, by default None.
Keys are the LoRA names and values are the LoRA scales.
Example: {'LoRA_1' : 0.5 , 'LoRA_2' : 0.7 ,...}
prompt : str
The prompt to generate images from.
negative_prompt : str, optional
The negative prompt to use.
frame_buffer_size : int, optional
The frame buffer size for denoising batch, by default 1.
width : int, optional
The width of the image, by default 512.
height : int, optional
The height of the image, by default 512.
acceleration : Literal["none", "xformers", "tensorrt"], optional
The acceleration method, by default "tensorrt".
use_denoising_batch : bool, optional
Whether to use denoising batch or not, by default True.
seed : int, optional
The seed, by default 2. if -1, use random seed.
cfg_type : Literal["none", "full", "self", "initialize"],
optional
The cfg_type for img2img mode, by default "self".
You cannot use anything other than "none" for txt2img mode.
guidance_scale : float, optional
The CFG scale, by default 1.2.
delta : float, optional
The delta multiplier of virtual residual noise,
by default 1.0.
do_add_noise : bool, optional
Whether to add noise for following denoising steps or not,
by default True.
enable_similar_image_filter : bool, optional
Whether to enable similar image filter or not,
by default False.
similar_image_filter_threshold : float, optional
The threshold for similar image filter, by default 0.98.
similar_image_filter_max_skip_frame : int, optional
The max skip frame for similar image filter, by default 10.
"""
global inputs
stream = StreamDiffusionWrapper(
model_id_or_path=model_id_or_path,
lora_dict=lora_dict,
t_index_list=[32, 45],
frame_buffer_size=frame_buffer_size,
width=width,
height=height,
warmup=10,
acceleration=acceleration,
do_add_noise=do_add_noise,
enable_similar_image_filter=enable_similar_image_filter,
similar_image_filter_threshold=similar_image_filter_threshold,
similar_image_filter_max_skip_frame=similar_image_filter_max_skip_frame,
mode="img2img",
use_denoising_batch=use_denoising_batch,
cfg_type=cfg_type,
seed=seed,
)
stream.prepare(
prompt=prompt,
negative_prompt=negative_prompt,
num_inference_steps=50,
guidance_scale=guidance_scale,
delta=delta,
)
monitor = monitor_receiver.recv()
event = threading.Event()
input_screen = threading.Thread(target=screen, args=(event, height, width, monitor))
input_screen.start()
time.sleep(5)
while True:
try:
if not close_queue.empty(): # closing check
break
if len(inputs) < frame_buffer_size:
time.sleep(0.005)
continue
start_time = time.time()
sampled_inputs = []
for i in range(frame_buffer_size):
index = (len(inputs) // frame_buffer_size) * i
sampled_inputs.append(inputs[len(inputs) - index - 1])
input_batch = torch.cat(sampled_inputs)
inputs.clear()
output_images = stream.stream(
input_batch.to(device=stream.device, dtype=stream.dtype)
).cpu()
if frame_buffer_size == 1:
output_images = [output_images]
for output_image in output_images:
queue.put(output_image, block=False)
fps = 1 / (time.time() - start_time)
fps_queue.put(fps)
except KeyboardInterrupt:
break
print("closing image_generation_process...")
event.set() # stop capture thread
input_screen.join()
print(f"fps: {fps}") | Process for generating images based on a prompt using a specified model. Parameters ---------- queue : Queue The queue to put the generated images in. fps_queue : Queue The queue to put the calculated fps. model_id_or_path : str The name of the model to use for image generation. lora_dict : Optional[Dict[str, float]], optional The lora_dict to load, by default None. Keys are the LoRA names and values are the LoRA scales. Example: {'LoRA_1' : 0.5 , 'LoRA_2' : 0.7 ,...} prompt : str The prompt to generate images from. negative_prompt : str, optional The negative prompt to use. frame_buffer_size : int, optional The frame buffer size for denoising batch, by default 1. width : int, optional The width of the image, by default 512. height : int, optional The height of the image, by default 512. acceleration : Literal["none", "xformers", "tensorrt"], optional The acceleration method, by default "tensorrt". use_denoising_batch : bool, optional Whether to use denoising batch or not, by default True. seed : int, optional The seed, by default 2. if -1, use random seed. cfg_type : Literal["none", "full", "self", "initialize"], optional The cfg_type for img2img mode, by default "self". You cannot use anything other than "none" for txt2img mode. guidance_scale : float, optional The CFG scale, by default 1.2. delta : float, optional The delta multiplier of virtual residual noise, by default 1.0. do_add_noise : bool, optional Whether to add noise for following denoising steps or not, by default True. enable_similar_image_filter : bool, optional Whether to enable similar image filter or not, by default False. similar_image_filter_threshold : float, optional The threshold for similar image filter, by default 0.98. similar_image_filter_max_skip_frame : int, optional The max skip frame for similar image filter, by default 10. |
8,749 | import io
import os
import sys
import time
from multiprocessing import Process, Queue
from typing import List, Literal, Optional, Dict
import fire
import PIL.Image
import requests
import torch
from tqdm import tqdm
from streamdiffusion.image_utils import postprocess_image
from utils.wrapper import StreamDiffusionWrapper
def _postprocess_image(queue: Queue) -> None:
while True:
try:
if not queue.empty():
output = postprocess_image(queue.get(block=False), output_type="pil")[0]
time.sleep(0.0005)
except KeyboardInterrupt:
return
def download_image(url: str):
response = requests.get(url)
image = PIL.Image.open(io.BytesIO(response.content))
return image
class StreamDiffusionWrapper:
def __init__(
self,
model_id_or_path: str,
t_index_list: List[int],
lora_dict: Optional[Dict[str, float]] = None,
mode: Literal["img2img", "txt2img"] = "img2img",
output_type: Literal["pil", "pt", "np", "latent"] = "pil",
lcm_lora_id: Optional[str] = None,
vae_id: Optional[str] = None,
device: Literal["cpu", "cuda"] = "cuda",
dtype: torch.dtype = torch.float16,
frame_buffer_size: int = 1,
width: int = 512,
height: int = 512,
warmup: int = 10,
acceleration: Literal["none", "xformers", "tensorrt"] = "tensorrt",
do_add_noise: bool = True,
device_ids: Optional[List[int]] = None,
use_lcm_lora: bool = True,
use_tiny_vae: bool = True,
enable_similar_image_filter: bool = False,
similar_image_filter_threshold: float = 0.98,
similar_image_filter_max_skip_frame: int = 10,
use_denoising_batch: bool = True,
cfg_type: Literal["none", "full", "self", "initialize"] = "self",
seed: int = 2,
use_safety_checker: bool = False,
engine_dir: Optional[Union[str, Path]] = "engines",
):
"""
Initializes the StreamDiffusionWrapper.
Parameters
----------
model_id_or_path : str
The model id or path to load.
t_index_list : List[int]
The t_index_list to use for inference.
lora_dict : Optional[Dict[str, float]], optional
The lora_dict to load, by default None.
Keys are the LoRA names and values are the LoRA scales.
Example: {'LoRA_1' : 0.5 , 'LoRA_2' : 0.7 ,...}
mode : Literal["img2img", "txt2img"], optional
txt2img or img2img, by default "img2img".
output_type : Literal["pil", "pt", "np", "latent"], optional
The output type of image, by default "pil".
lcm_lora_id : Optional[str], optional
The lcm_lora_id to load, by default None.
If None, the default LCM-LoRA
("latent-consistency/lcm-lora-sdv1-5") will be used.
vae_id : Optional[str], optional
The vae_id to load, by default None.
If None, the default TinyVAE
("madebyollin/taesd") will be used.
device : Literal["cpu", "cuda"], optional
The device to use for inference, by default "cuda".
dtype : torch.dtype, optional
The dtype for inference, by default torch.float16.
frame_buffer_size : int, optional
The frame buffer size for denoising batch, by default 1.
width : int, optional
The width of the image, by default 512.
height : int, optional
The height of the image, by default 512.
warmup : int, optional
The number of warmup steps to perform, by default 10.
acceleration : Literal["none", "xformers", "tensorrt"], optional
The acceleration method, by default "tensorrt".
do_add_noise : bool, optional
Whether to add noise for following denoising steps or not,
by default True.
device_ids : Optional[List[int]], optional
The device ids to use for DataParallel, by default None.
use_lcm_lora : bool, optional
Whether to use LCM-LoRA or not, by default True.
use_tiny_vae : bool, optional
Whether to use TinyVAE or not, by default True.
enable_similar_image_filter : bool, optional
Whether to enable similar image filter or not,
by default False.
similar_image_filter_threshold : float, optional
The threshold for similar image filter, by default 0.98.
similar_image_filter_max_skip_frame : int, optional
The max skip frame for similar image filter, by default 10.
use_denoising_batch : bool, optional
Whether to use denoising batch or not, by default True.
cfg_type : Literal["none", "full", "self", "initialize"],
optional
The cfg_type for img2img mode, by default "self".
You cannot use anything other than "none" for txt2img mode.
seed : int, optional
The seed, by default 2.
use_safety_checker : bool, optional
Whether to use safety checker or not, by default False.
"""
self.sd_turbo = "turbo" in model_id_or_path
if mode == "txt2img":
if cfg_type != "none":
raise ValueError(
f"txt2img mode accepts only cfg_type = 'none', but got {cfg_type}"
)
if use_denoising_batch and frame_buffer_size > 1:
if not self.sd_turbo:
raise ValueError(
"txt2img mode cannot use denoising batch with frame_buffer_size > 1."
)
if mode == "img2img":
if not use_denoising_batch:
raise NotImplementedError(
"img2img mode must use denoising batch for now."
)
self.device = device
self.dtype = dtype
self.width = width
self.height = height
self.mode = mode
self.output_type = output_type
self.frame_buffer_size = frame_buffer_size
self.batch_size = (
len(t_index_list) * frame_buffer_size
if use_denoising_batch
else frame_buffer_size
)
self.use_denoising_batch = use_denoising_batch
self.use_safety_checker = use_safety_checker
self.stream: StreamDiffusion = self._load_model(
model_id_or_path=model_id_or_path,
lora_dict=lora_dict,
lcm_lora_id=lcm_lora_id,
vae_id=vae_id,
t_index_list=t_index_list,
acceleration=acceleration,
warmup=warmup,
do_add_noise=do_add_noise,
use_lcm_lora=use_lcm_lora,
use_tiny_vae=use_tiny_vae,
cfg_type=cfg_type,
seed=seed,
engine_dir=engine_dir,
)
if device_ids is not None:
self.stream.unet = torch.nn.DataParallel(
self.stream.unet, device_ids=device_ids
)
if enable_similar_image_filter:
self.stream.enable_similar_image_filter(similar_image_filter_threshold, similar_image_filter_max_skip_frame)
def prepare(
self,
prompt: str,
negative_prompt: str = "",
num_inference_steps: int = 50,
guidance_scale: float = 1.2,
delta: float = 1.0,
) -> None:
"""
Prepares the model for inference.
Parameters
----------
prompt : str
The prompt to generate images from.
num_inference_steps : int, optional
The number of inference steps to perform, by default 50.
guidance_scale : float, optional
The guidance scale to use, by default 1.2.
delta : float, optional
The delta multiplier of virtual residual noise,
by default 1.0.
"""
self.stream.prepare(
prompt,
negative_prompt,
num_inference_steps=num_inference_steps,
guidance_scale=guidance_scale,
delta=delta,
)
def __call__(
self,
image: Optional[Union[str, Image.Image, torch.Tensor]] = None,
prompt: Optional[str] = None,
) -> Union[Image.Image, List[Image.Image]]:
"""
Performs img2img or txt2img based on the mode.
Parameters
----------
image : Optional[Union[str, Image.Image, torch.Tensor]]
The image to generate from.
prompt : Optional[str]
The prompt to generate images from.
Returns
-------
Union[Image.Image, List[Image.Image]]
The generated image.
"""
if self.mode == "img2img":
return self.img2img(image, prompt)
else:
return self.txt2img(prompt)
def txt2img(
self, prompt: Optional[str] = None
) -> Union[Image.Image, List[Image.Image], torch.Tensor, np.ndarray]:
"""
Performs txt2img.
Parameters
----------
prompt : Optional[str]
The prompt to generate images from.
Returns
-------
Union[Image.Image, List[Image.Image]]
The generated image.
"""
if prompt is not None:
self.stream.update_prompt(prompt)
if self.sd_turbo:
image_tensor = self.stream.txt2img_sd_turbo(self.batch_size)
else:
image_tensor = self.stream.txt2img(self.frame_buffer_size)
image = self.postprocess_image(image_tensor, output_type=self.output_type)
if self.use_safety_checker:
safety_checker_input = self.feature_extractor(
image, return_tensors="pt"
).to(self.device)
_, has_nsfw_concept = self.safety_checker(
images=image_tensor.to(self.dtype),
clip_input=safety_checker_input.pixel_values.to(self.dtype),
)
image = self.nsfw_fallback_img if has_nsfw_concept[0] else image
return image
def img2img(
self, image: Union[str, Image.Image, torch.Tensor], prompt: Optional[str] = None
) -> Union[Image.Image, List[Image.Image], torch.Tensor, np.ndarray]:
"""
Performs img2img.
Parameters
----------
image : Union[str, Image.Image, torch.Tensor]
The image to generate from.
Returns
-------
Image.Image
The generated image.
"""
if prompt is not None:
self.stream.update_prompt(prompt)
if isinstance(image, str) or isinstance(image, Image.Image):
image = self.preprocess_image(image)
image_tensor = self.stream(image)
image = self.postprocess_image(image_tensor, output_type=self.output_type)
if self.use_safety_checker:
safety_checker_input = self.feature_extractor(
image, return_tensors="pt"
).to(self.device)
_, has_nsfw_concept = self.safety_checker(
images=image_tensor.to(self.dtype),
clip_input=safety_checker_input.pixel_values.to(self.dtype),
)
image = self.nsfw_fallback_img if has_nsfw_concept[0] else image
return image
def preprocess_image(self, image: Union[str, Image.Image]) -> torch.Tensor:
"""
Preprocesses the image.
Parameters
----------
image : Union[str, Image.Image, torch.Tensor]
The image to preprocess.
Returns
-------
torch.Tensor
The preprocessed image.
"""
if isinstance(image, str):
image = Image.open(image).convert("RGB").resize((self.width, self.height))
if isinstance(image, Image.Image):
image = image.convert("RGB").resize((self.width, self.height))
return self.stream.image_processor.preprocess(
image, self.height, self.width
).to(device=self.device, dtype=self.dtype)
def postprocess_image(
self, image_tensor: torch.Tensor, output_type: str = "pil"
) -> Union[Image.Image, List[Image.Image], torch.Tensor, np.ndarray]:
"""
Postprocesses the image.
Parameters
----------
image_tensor : torch.Tensor
The image tensor to postprocess.
Returns
-------
Union[Image.Image, List[Image.Image]]
The postprocessed image.
"""
if self.frame_buffer_size > 1:
return postprocess_image(image_tensor.cpu(), output_type=output_type)
else:
return postprocess_image(image_tensor.cpu(), output_type=output_type)[0]
def _load_model(
self,
model_id_or_path: str,
t_index_list: List[int],
lora_dict: Optional[Dict[str, float]] = None,
lcm_lora_id: Optional[str] = None,
vae_id: Optional[str] = None,
acceleration: Literal["none", "xformers", "tensorrt"] = "tensorrt",
warmup: int = 10,
do_add_noise: bool = True,
use_lcm_lora: bool = True,
use_tiny_vae: bool = True,
cfg_type: Literal["none", "full", "self", "initialize"] = "self",
seed: int = 2,
engine_dir: Optional[Union[str, Path]] = "engines",
) -> StreamDiffusion:
"""
Loads the model.
This method does the following:
1. Loads the model from the model_id_or_path.
2. Loads and fuses the LCM-LoRA model from the lcm_lora_id if needed.
3. Loads the VAE model from the vae_id if needed.
4. Enables acceleration if needed.
5. Prepares the model for inference.
6. Load the safety checker if needed.
Parameters
----------
model_id_or_path : str
The model id or path to load.
t_index_list : List[int]
The t_index_list to use for inference.
lora_dict : Optional[Dict[str, float]], optional
The lora_dict to load, by default None.
Keys are the LoRA names and values are the LoRA scales.
Example: {'LoRA_1' : 0.5 , 'LoRA_2' : 0.7 ,...}
lcm_lora_id : Optional[str], optional
The lcm_lora_id to load, by default None.
vae_id : Optional[str], optional
The vae_id to load, by default None.
acceleration : Literal["none", "xfomers", "sfast", "tensorrt"], optional
The acceleration method, by default "tensorrt".
warmup : int, optional
The number of warmup steps to perform, by default 10.
do_add_noise : bool, optional
Whether to add noise for following denoising steps or not,
by default True.
use_lcm_lora : bool, optional
Whether to use LCM-LoRA or not, by default True.
use_tiny_vae : bool, optional
Whether to use TinyVAE or not, by default True.
cfg_type : Literal["none", "full", "self", "initialize"],
optional
The cfg_type for img2img mode, by default "self".
You cannot use anything other than "none" for txt2img mode.
seed : int, optional
The seed, by default 2.
Returns
-------
StreamDiffusion
The loaded model.
"""
try: # Load from local directory
pipe: StableDiffusionPipeline = StableDiffusionPipeline.from_pretrained(
model_id_or_path,
).to(device=self.device, dtype=self.dtype)
except ValueError: # Load from huggingface
pipe: StableDiffusionPipeline = StableDiffusionPipeline.from_single_file(
model_id_or_path,
).to(device=self.device, dtype=self.dtype)
except Exception: # No model found
traceback.print_exc()
print("Model load has failed. Doesn't exist.")
exit()
stream = StreamDiffusion(
pipe=pipe,
t_index_list=t_index_list,
torch_dtype=self.dtype,
width=self.width,
height=self.height,
do_add_noise=do_add_noise,
frame_buffer_size=self.frame_buffer_size,
use_denoising_batch=self.use_denoising_batch,
cfg_type=cfg_type,
)
if not self.sd_turbo:
if use_lcm_lora:
if lcm_lora_id is not None:
stream.load_lcm_lora(
pretrained_model_name_or_path_or_dict=lcm_lora_id
)
else:
stream.load_lcm_lora()
stream.fuse_lora()
if lora_dict is not None:
for lora_name, lora_scale in lora_dict.items():
stream.load_lora(lora_name)
stream.fuse_lora(lora_scale=lora_scale)
print(f"Use LoRA: {lora_name} in weights {lora_scale}")
if use_tiny_vae:
if vae_id is not None:
stream.vae = AutoencoderTiny.from_pretrained(vae_id).to(
device=pipe.device, dtype=pipe.dtype
)
else:
stream.vae = AutoencoderTiny.from_pretrained("madebyollin/taesd").to(
device=pipe.device, dtype=pipe.dtype
)
try:
if acceleration == "xformers":
stream.pipe.enable_xformers_memory_efficient_attention()
if acceleration == "tensorrt":
from polygraphy import cuda
from streamdiffusion.acceleration.tensorrt import (
TorchVAEEncoder,
compile_unet,
compile_vae_decoder,
compile_vae_encoder,
)
from streamdiffusion.acceleration.tensorrt.engine import (
AutoencoderKLEngine,
UNet2DConditionModelEngine,
)
from streamdiffusion.acceleration.tensorrt.models import (
VAE,
UNet,
VAEEncoder,
)
def create_prefix(
model_id_or_path: str,
max_batch_size: int,
min_batch_size: int,
):
maybe_path = Path(model_id_or_path)
if maybe_path.exists():
return f"{maybe_path.stem}--lcm_lora-{use_lcm_lora}--tiny_vae-{use_tiny_vae}--max_batch-{max_batch_size}--min_batch-{min_batch_size}--mode-{self.mode}"
else:
return f"{model_id_or_path}--lcm_lora-{use_lcm_lora}--tiny_vae-{use_tiny_vae}--max_batch-{max_batch_size}--min_batch-{min_batch_size}--mode-{self.mode}"
engine_dir = Path(engine_dir)
unet_path = os.path.join(
engine_dir,
create_prefix(
model_id_or_path=model_id_or_path,
max_batch_size=stream.trt_unet_batch_size,
min_batch_size=stream.trt_unet_batch_size,
),
"unet.engine",
)
vae_encoder_path = os.path.join(
engine_dir,
create_prefix(
model_id_or_path=model_id_or_path,
max_batch_size=self.batch_size
if self.mode == "txt2img"
else stream.frame_bff_size,
min_batch_size=self.batch_size
if self.mode == "txt2img"
else stream.frame_bff_size,
),
"vae_encoder.engine",
)
vae_decoder_path = os.path.join(
engine_dir,
create_prefix(
model_id_or_path=model_id_or_path,
max_batch_size=self.batch_size
if self.mode == "txt2img"
else stream.frame_bff_size,
min_batch_size=self.batch_size
if self.mode == "txt2img"
else stream.frame_bff_size,
),
"vae_decoder.engine",
)
if not os.path.exists(unet_path):
os.makedirs(os.path.dirname(unet_path), exist_ok=True)
unet_model = UNet(
fp16=True,
device=stream.device,
max_batch_size=stream.trt_unet_batch_size,
min_batch_size=stream.trt_unet_batch_size,
embedding_dim=stream.text_encoder.config.hidden_size,
unet_dim=stream.unet.config.in_channels,
)
compile_unet(
stream.unet,
unet_model,
unet_path + ".onnx",
unet_path + ".opt.onnx",
unet_path,
opt_batch_size=stream.trt_unet_batch_size,
)
if not os.path.exists(vae_decoder_path):
os.makedirs(os.path.dirname(vae_decoder_path), exist_ok=True)
stream.vae.forward = stream.vae.decode
vae_decoder_model = VAE(
device=stream.device,
max_batch_size=self.batch_size
if self.mode == "txt2img"
else stream.frame_bff_size,
min_batch_size=self.batch_size
if self.mode == "txt2img"
else stream.frame_bff_size,
)
compile_vae_decoder(
stream.vae,
vae_decoder_model,
vae_decoder_path + ".onnx",
vae_decoder_path + ".opt.onnx",
vae_decoder_path,
opt_batch_size=self.batch_size
if self.mode == "txt2img"
else stream.frame_bff_size,
)
delattr(stream.vae, "forward")
if not os.path.exists(vae_encoder_path):
os.makedirs(os.path.dirname(vae_encoder_path), exist_ok=True)
vae_encoder = TorchVAEEncoder(stream.vae).to(torch.device("cuda"))
vae_encoder_model = VAEEncoder(
device=stream.device,
max_batch_size=self.batch_size
if self.mode == "txt2img"
else stream.frame_bff_size,
min_batch_size=self.batch_size
if self.mode == "txt2img"
else stream.frame_bff_size,
)
compile_vae_encoder(
vae_encoder,
vae_encoder_model,
vae_encoder_path + ".onnx",
vae_encoder_path + ".opt.onnx",
vae_encoder_path,
opt_batch_size=self.batch_size
if self.mode == "txt2img"
else stream.frame_bff_size,
)
cuda_steram = cuda.Stream()
vae_config = stream.vae.config
vae_dtype = stream.vae.dtype
stream.unet = UNet2DConditionModelEngine(
unet_path, cuda_steram, use_cuda_graph=False
)
stream.vae = AutoencoderKLEngine(
vae_encoder_path,
vae_decoder_path,
cuda_steram,
stream.pipe.vae_scale_factor,
use_cuda_graph=False,
)
setattr(stream.vae, "config", vae_config)
setattr(stream.vae, "dtype", vae_dtype)
gc.collect()
torch.cuda.empty_cache()
print("TensorRT acceleration enabled.")
if acceleration == "sfast":
from streamdiffusion.acceleration.sfast import (
accelerate_with_stable_fast,
)
stream = accelerate_with_stable_fast(stream)
print("StableFast acceleration enabled.")
except Exception:
traceback.print_exc()
print("Acceleration has failed. Falling back to normal mode.")
if seed < 0: # Random seed
seed = np.random.randint(0, 1000000)
stream.prepare(
"",
"",
num_inference_steps=50,
guidance_scale=1.1
if stream.cfg_type in ["full", "self", "initialize"]
else 1.0,
generator=torch.manual_seed(seed),
seed=seed,
)
if self.use_safety_checker:
from transformers import CLIPFeatureExtractor
from diffusers.pipelines.stable_diffusion.safety_checker import (
StableDiffusionSafetyChecker,
)
self.safety_checker = StableDiffusionSafetyChecker.from_pretrained(
"CompVis/stable-diffusion-safety-checker"
).to(pipe.device)
self.feature_extractor = CLIPFeatureExtractor.from_pretrained(
"openai/clip-vit-base-patch32"
)
self.nsfw_fallback_img = Image.new("RGB", (512, 512), (0, 0, 0))
return stream
The provided code snippet includes necessary dependencies for implementing the `run` function. Write a Python function `def run( iterations: int = 100, model_id_or_path: str = "KBlueLeaf/kohaku-v2.1", lora_dict: Optional[Dict[str, float]] = None, prompt: str = "1girl with brown dog hair, thick glasses, smiling", negative_prompt: str = "bad image , bad quality", use_lcm_lora: bool = True, use_tiny_vae: bool = True, width: int = 512, height: int = 512, warmup: int = 10, acceleration: Literal["none", "xformers", "tensorrt"] = "xformers", device_ids: Optional[List[int]] = None, use_denoising_batch: bool = True, seed: int = 2, )` to solve the following problem:
Initializes the StreamDiffusionWrapper. Parameters ---------- iterations : int, optional The number of iterations to run, by default 100. model_id_or_path : str The model id or path to load. lora_dict : Optional[Dict[str, float]], optional The lora_dict to load, by default None. Keys are the LoRA names and values are the LoRA scales. Example: {'LoRA_1' : 0.5 , 'LoRA_2' : 0.7 ,...} prompt : str, optional The prompt to use, by default "1girl with brown dog hair, thick glasses, smiling". negative_prompt : str, optional The negative prompt to use, by default "bad image , bad quality". use_lcm_lora : bool, optional Whether to use LCM-LoRA or not, by default True. use_tiny_vae : bool, optional Whether to use TinyVAE or not, by default True. width : int, optional The width of the image, by default 512. height : int, optional The height of the image, by default 512. warmup : int, optional The number of warmup steps to perform, by default 10. acceleration : Literal["none", "xformers", "tensorrt"], optional The acceleration method, by default "tensorrt". device_ids : Optional[List[int]], optional The device ids to use for DataParallel, by default None. use_denoising_batch : bool, optional Whether to use denoising batch or not, by default True. seed : int, optional The seed, by default 2. if -1, use random seed.
Here is the function:
def run(
iterations: int = 100,
model_id_or_path: str = "KBlueLeaf/kohaku-v2.1",
lora_dict: Optional[Dict[str, float]] = None,
prompt: str = "1girl with brown dog hair, thick glasses, smiling",
negative_prompt: str = "bad image , bad quality",
use_lcm_lora: bool = True,
use_tiny_vae: bool = True,
width: int = 512,
height: int = 512,
warmup: int = 10,
acceleration: Literal["none", "xformers", "tensorrt"] = "xformers",
device_ids: Optional[List[int]] = None,
use_denoising_batch: bool = True,
seed: int = 2,
):
"""
Initializes the StreamDiffusionWrapper.
Parameters
----------
iterations : int, optional
The number of iterations to run, by default 100.
model_id_or_path : str
The model id or path to load.
lora_dict : Optional[Dict[str, float]], optional
The lora_dict to load, by default None.
Keys are the LoRA names and values are the LoRA scales.
Example: {'LoRA_1' : 0.5 , 'LoRA_2' : 0.7 ,...}
prompt : str, optional
The prompt to use, by default "1girl with brown dog hair, thick glasses, smiling".
negative_prompt : str, optional
The negative prompt to use, by default "bad image , bad quality".
use_lcm_lora : bool, optional
Whether to use LCM-LoRA or not, by default True.
use_tiny_vae : bool, optional
Whether to use TinyVAE or not, by default True.
width : int, optional
The width of the image, by default 512.
height : int, optional
The height of the image, by default 512.
warmup : int, optional
The number of warmup steps to perform, by default 10.
acceleration : Literal["none", "xformers", "tensorrt"], optional
The acceleration method, by default "tensorrt".
device_ids : Optional[List[int]], optional
The device ids to use for DataParallel, by default None.
use_denoising_batch : bool, optional
Whether to use denoising batch or not, by default True.
seed : int, optional
The seed, by default 2. if -1, use random seed.
"""
stream = StreamDiffusionWrapper(
model_id_or_path=model_id_or_path,
t_index_list=[32, 45],
lora_dict=lora_dict,
mode="img2img",
frame_buffer_size=1,
width=width,
height=height,
warmup=warmup,
acceleration=acceleration,
device_ids=device_ids,
use_lcm_lora=use_lcm_lora,
use_tiny_vae=use_tiny_vae,
enable_similar_image_filter=False,
similar_image_filter_threshold=0.98,
use_denoising_batch=use_denoising_batch,
cfg_type="self", # initialize, full, self , none
seed=seed,
)
stream.prepare(
prompt=prompt,
negative_prompt=negative_prompt,
num_inference_steps=50,
guidance_scale=1.2,
delta=0.5,
)
image = download_image("https://github.com/ddpn08.png").resize((width, height))
image_tensor = stream.preprocess_image(image)
# warmup
for _ in range(warmup):
stream.stream(image_tensor)
queue = Queue()
p = Process(target=_postprocess_image, args=(queue,))
p.start()
results = []
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
for _ in tqdm(range(iterations)):
start.record()
out_tensor = stream.stream(image_tensor).cpu()
queue.put(out_tensor)
end.record()
torch.cuda.synchronize()
results.append(start.elapsed_time(end))
print(f"Average time: {sum(results) / len(results)}ms")
print(f"Average FPS: {1000 / (sum(results) / len(results))}") | Initializes the StreamDiffusionWrapper. Parameters ---------- iterations : int, optional The number of iterations to run, by default 100. model_id_or_path : str The model id or path to load. lora_dict : Optional[Dict[str, float]], optional The lora_dict to load, by default None. Keys are the LoRA names and values are the LoRA scales. Example: {'LoRA_1' : 0.5 , 'LoRA_2' : 0.7 ,...} prompt : str, optional The prompt to use, by default "1girl with brown dog hair, thick glasses, smiling". negative_prompt : str, optional The negative prompt to use, by default "bad image , bad quality". use_lcm_lora : bool, optional Whether to use LCM-LoRA or not, by default True. use_tiny_vae : bool, optional Whether to use TinyVAE or not, by default True. width : int, optional The width of the image, by default 512. height : int, optional The height of the image, by default 512. warmup : int, optional The number of warmup steps to perform, by default 10. acceleration : Literal["none", "xformers", "tensorrt"], optional The acceleration method, by default "tensorrt". device_ids : Optional[List[int]], optional The device ids to use for DataParallel, by default None. use_denoising_batch : bool, optional Whether to use denoising batch or not, by default True. seed : int, optional The seed, by default 2. if -1, use random seed. |
8,750 | import io
import os
import sys
from typing import List, Literal, Optional, Dict
import fire
import PIL.Image
import requests
import torch
from tqdm import tqdm
from utils.wrapper import StreamDiffusionWrapper
def download_image(url: str):
response = requests.get(url)
image = PIL.Image.open(io.BytesIO(response.content))
return image
class StreamDiffusionWrapper:
def __init__(
self,
model_id_or_path: str,
t_index_list: List[int],
lora_dict: Optional[Dict[str, float]] = None,
mode: Literal["img2img", "txt2img"] = "img2img",
output_type: Literal["pil", "pt", "np", "latent"] = "pil",
lcm_lora_id: Optional[str] = None,
vae_id: Optional[str] = None,
device: Literal["cpu", "cuda"] = "cuda",
dtype: torch.dtype = torch.float16,
frame_buffer_size: int = 1,
width: int = 512,
height: int = 512,
warmup: int = 10,
acceleration: Literal["none", "xformers", "tensorrt"] = "tensorrt",
do_add_noise: bool = True,
device_ids: Optional[List[int]] = None,
use_lcm_lora: bool = True,
use_tiny_vae: bool = True,
enable_similar_image_filter: bool = False,
similar_image_filter_threshold: float = 0.98,
similar_image_filter_max_skip_frame: int = 10,
use_denoising_batch: bool = True,
cfg_type: Literal["none", "full", "self", "initialize"] = "self",
seed: int = 2,
use_safety_checker: bool = False,
engine_dir: Optional[Union[str, Path]] = "engines",
):
"""
Initializes the StreamDiffusionWrapper.
Parameters
----------
model_id_or_path : str
The model id or path to load.
t_index_list : List[int]
The t_index_list to use for inference.
lora_dict : Optional[Dict[str, float]], optional
The lora_dict to load, by default None.
Keys are the LoRA names and values are the LoRA scales.
Example: {'LoRA_1' : 0.5 , 'LoRA_2' : 0.7 ,...}
mode : Literal["img2img", "txt2img"], optional
txt2img or img2img, by default "img2img".
output_type : Literal["pil", "pt", "np", "latent"], optional
The output type of image, by default "pil".
lcm_lora_id : Optional[str], optional
The lcm_lora_id to load, by default None.
If None, the default LCM-LoRA
("latent-consistency/lcm-lora-sdv1-5") will be used.
vae_id : Optional[str], optional
The vae_id to load, by default None.
If None, the default TinyVAE
("madebyollin/taesd") will be used.
device : Literal["cpu", "cuda"], optional
The device to use for inference, by default "cuda".
dtype : torch.dtype, optional
The dtype for inference, by default torch.float16.
frame_buffer_size : int, optional
The frame buffer size for denoising batch, by default 1.
width : int, optional
The width of the image, by default 512.
height : int, optional
The height of the image, by default 512.
warmup : int, optional
The number of warmup steps to perform, by default 10.
acceleration : Literal["none", "xformers", "tensorrt"], optional
The acceleration method, by default "tensorrt".
do_add_noise : bool, optional
Whether to add noise for following denoising steps or not,
by default True.
device_ids : Optional[List[int]], optional
The device ids to use for DataParallel, by default None.
use_lcm_lora : bool, optional
Whether to use LCM-LoRA or not, by default True.
use_tiny_vae : bool, optional
Whether to use TinyVAE or not, by default True.
enable_similar_image_filter : bool, optional
Whether to enable similar image filter or not,
by default False.
similar_image_filter_threshold : float, optional
The threshold for similar image filter, by default 0.98.
similar_image_filter_max_skip_frame : int, optional
The max skip frame for similar image filter, by default 10.
use_denoising_batch : bool, optional
Whether to use denoising batch or not, by default True.
cfg_type : Literal["none", "full", "self", "initialize"],
optional
The cfg_type for img2img mode, by default "self".
You cannot use anything other than "none" for txt2img mode.
seed : int, optional
The seed, by default 2.
use_safety_checker : bool, optional
Whether to use safety checker or not, by default False.
"""
self.sd_turbo = "turbo" in model_id_or_path
if mode == "txt2img":
if cfg_type != "none":
raise ValueError(
f"txt2img mode accepts only cfg_type = 'none', but got {cfg_type}"
)
if use_denoising_batch and frame_buffer_size > 1:
if not self.sd_turbo:
raise ValueError(
"txt2img mode cannot use denoising batch with frame_buffer_size > 1."
)
if mode == "img2img":
if not use_denoising_batch:
raise NotImplementedError(
"img2img mode must use denoising batch for now."
)
self.device = device
self.dtype = dtype
self.width = width
self.height = height
self.mode = mode
self.output_type = output_type
self.frame_buffer_size = frame_buffer_size
self.batch_size = (
len(t_index_list) * frame_buffer_size
if use_denoising_batch
else frame_buffer_size
)
self.use_denoising_batch = use_denoising_batch
self.use_safety_checker = use_safety_checker
self.stream: StreamDiffusion = self._load_model(
model_id_or_path=model_id_or_path,
lora_dict=lora_dict,
lcm_lora_id=lcm_lora_id,
vae_id=vae_id,
t_index_list=t_index_list,
acceleration=acceleration,
warmup=warmup,
do_add_noise=do_add_noise,
use_lcm_lora=use_lcm_lora,
use_tiny_vae=use_tiny_vae,
cfg_type=cfg_type,
seed=seed,
engine_dir=engine_dir,
)
if device_ids is not None:
self.stream.unet = torch.nn.DataParallel(
self.stream.unet, device_ids=device_ids
)
if enable_similar_image_filter:
self.stream.enable_similar_image_filter(similar_image_filter_threshold, similar_image_filter_max_skip_frame)
def prepare(
self,
prompt: str,
negative_prompt: str = "",
num_inference_steps: int = 50,
guidance_scale: float = 1.2,
delta: float = 1.0,
) -> None:
"""
Prepares the model for inference.
Parameters
----------
prompt : str
The prompt to generate images from.
num_inference_steps : int, optional
The number of inference steps to perform, by default 50.
guidance_scale : float, optional
The guidance scale to use, by default 1.2.
delta : float, optional
The delta multiplier of virtual residual noise,
by default 1.0.
"""
self.stream.prepare(
prompt,
negative_prompt,
num_inference_steps=num_inference_steps,
guidance_scale=guidance_scale,
delta=delta,
)
def __call__(
self,
image: Optional[Union[str, Image.Image, torch.Tensor]] = None,
prompt: Optional[str] = None,
) -> Union[Image.Image, List[Image.Image]]:
"""
Performs img2img or txt2img based on the mode.
Parameters
----------
image : Optional[Union[str, Image.Image, torch.Tensor]]
The image to generate from.
prompt : Optional[str]
The prompt to generate images from.
Returns
-------
Union[Image.Image, List[Image.Image]]
The generated image.
"""
if self.mode == "img2img":
return self.img2img(image, prompt)
else:
return self.txt2img(prompt)
def txt2img(
self, prompt: Optional[str] = None
) -> Union[Image.Image, List[Image.Image], torch.Tensor, np.ndarray]:
"""
Performs txt2img.
Parameters
----------
prompt : Optional[str]
The prompt to generate images from.
Returns
-------
Union[Image.Image, List[Image.Image]]
The generated image.
"""
if prompt is not None:
self.stream.update_prompt(prompt)
if self.sd_turbo:
image_tensor = self.stream.txt2img_sd_turbo(self.batch_size)
else:
image_tensor = self.stream.txt2img(self.frame_buffer_size)
image = self.postprocess_image(image_tensor, output_type=self.output_type)
if self.use_safety_checker:
safety_checker_input = self.feature_extractor(
image, return_tensors="pt"
).to(self.device)
_, has_nsfw_concept = self.safety_checker(
images=image_tensor.to(self.dtype),
clip_input=safety_checker_input.pixel_values.to(self.dtype),
)
image = self.nsfw_fallback_img if has_nsfw_concept[0] else image
return image
def img2img(
self, image: Union[str, Image.Image, torch.Tensor], prompt: Optional[str] = None
) -> Union[Image.Image, List[Image.Image], torch.Tensor, np.ndarray]:
"""
Performs img2img.
Parameters
----------
image : Union[str, Image.Image, torch.Tensor]
The image to generate from.
Returns
-------
Image.Image
The generated image.
"""
if prompt is not None:
self.stream.update_prompt(prompt)
if isinstance(image, str) or isinstance(image, Image.Image):
image = self.preprocess_image(image)
image_tensor = self.stream(image)
image = self.postprocess_image(image_tensor, output_type=self.output_type)
if self.use_safety_checker:
safety_checker_input = self.feature_extractor(
image, return_tensors="pt"
).to(self.device)
_, has_nsfw_concept = self.safety_checker(
images=image_tensor.to(self.dtype),
clip_input=safety_checker_input.pixel_values.to(self.dtype),
)
image = self.nsfw_fallback_img if has_nsfw_concept[0] else image
return image
def preprocess_image(self, image: Union[str, Image.Image]) -> torch.Tensor:
"""
Preprocesses the image.
Parameters
----------
image : Union[str, Image.Image, torch.Tensor]
The image to preprocess.
Returns
-------
torch.Tensor
The preprocessed image.
"""
if isinstance(image, str):
image = Image.open(image).convert("RGB").resize((self.width, self.height))
if isinstance(image, Image.Image):
image = image.convert("RGB").resize((self.width, self.height))
return self.stream.image_processor.preprocess(
image, self.height, self.width
).to(device=self.device, dtype=self.dtype)
def postprocess_image(
self, image_tensor: torch.Tensor, output_type: str = "pil"
) -> Union[Image.Image, List[Image.Image], torch.Tensor, np.ndarray]:
"""
Postprocesses the image.
Parameters
----------
image_tensor : torch.Tensor
The image tensor to postprocess.
Returns
-------
Union[Image.Image, List[Image.Image]]
The postprocessed image.
"""
if self.frame_buffer_size > 1:
return postprocess_image(image_tensor.cpu(), output_type=output_type)
else:
return postprocess_image(image_tensor.cpu(), output_type=output_type)[0]
def _load_model(
self,
model_id_or_path: str,
t_index_list: List[int],
lora_dict: Optional[Dict[str, float]] = None,
lcm_lora_id: Optional[str] = None,
vae_id: Optional[str] = None,
acceleration: Literal["none", "xformers", "tensorrt"] = "tensorrt",
warmup: int = 10,
do_add_noise: bool = True,
use_lcm_lora: bool = True,
use_tiny_vae: bool = True,
cfg_type: Literal["none", "full", "self", "initialize"] = "self",
seed: int = 2,
engine_dir: Optional[Union[str, Path]] = "engines",
) -> StreamDiffusion:
"""
Loads the model.
This method does the following:
1. Loads the model from the model_id_or_path.
2. Loads and fuses the LCM-LoRA model from the lcm_lora_id if needed.
3. Loads the VAE model from the vae_id if needed.
4. Enables acceleration if needed.
5. Prepares the model for inference.
6. Load the safety checker if needed.
Parameters
----------
model_id_or_path : str
The model id or path to load.
t_index_list : List[int]
The t_index_list to use for inference.
lora_dict : Optional[Dict[str, float]], optional
The lora_dict to load, by default None.
Keys are the LoRA names and values are the LoRA scales.
Example: {'LoRA_1' : 0.5 , 'LoRA_2' : 0.7 ,...}
lcm_lora_id : Optional[str], optional
The lcm_lora_id to load, by default None.
vae_id : Optional[str], optional
The vae_id to load, by default None.
acceleration : Literal["none", "xfomers", "sfast", "tensorrt"], optional
The acceleration method, by default "tensorrt".
warmup : int, optional
The number of warmup steps to perform, by default 10.
do_add_noise : bool, optional
Whether to add noise for following denoising steps or not,
by default True.
use_lcm_lora : bool, optional
Whether to use LCM-LoRA or not, by default True.
use_tiny_vae : bool, optional
Whether to use TinyVAE or not, by default True.
cfg_type : Literal["none", "full", "self", "initialize"],
optional
The cfg_type for img2img mode, by default "self".
You cannot use anything other than "none" for txt2img mode.
seed : int, optional
The seed, by default 2.
Returns
-------
StreamDiffusion
The loaded model.
"""
try: # Load from local directory
pipe: StableDiffusionPipeline = StableDiffusionPipeline.from_pretrained(
model_id_or_path,
).to(device=self.device, dtype=self.dtype)
except ValueError: # Load from huggingface
pipe: StableDiffusionPipeline = StableDiffusionPipeline.from_single_file(
model_id_or_path,
).to(device=self.device, dtype=self.dtype)
except Exception: # No model found
traceback.print_exc()
print("Model load has failed. Doesn't exist.")
exit()
stream = StreamDiffusion(
pipe=pipe,
t_index_list=t_index_list,
torch_dtype=self.dtype,
width=self.width,
height=self.height,
do_add_noise=do_add_noise,
frame_buffer_size=self.frame_buffer_size,
use_denoising_batch=self.use_denoising_batch,
cfg_type=cfg_type,
)
if not self.sd_turbo:
if use_lcm_lora:
if lcm_lora_id is not None:
stream.load_lcm_lora(
pretrained_model_name_or_path_or_dict=lcm_lora_id
)
else:
stream.load_lcm_lora()
stream.fuse_lora()
if lora_dict is not None:
for lora_name, lora_scale in lora_dict.items():
stream.load_lora(lora_name)
stream.fuse_lora(lora_scale=lora_scale)
print(f"Use LoRA: {lora_name} in weights {lora_scale}")
if use_tiny_vae:
if vae_id is not None:
stream.vae = AutoencoderTiny.from_pretrained(vae_id).to(
device=pipe.device, dtype=pipe.dtype
)
else:
stream.vae = AutoencoderTiny.from_pretrained("madebyollin/taesd").to(
device=pipe.device, dtype=pipe.dtype
)
try:
if acceleration == "xformers":
stream.pipe.enable_xformers_memory_efficient_attention()
if acceleration == "tensorrt":
from polygraphy import cuda
from streamdiffusion.acceleration.tensorrt import (
TorchVAEEncoder,
compile_unet,
compile_vae_decoder,
compile_vae_encoder,
)
from streamdiffusion.acceleration.tensorrt.engine import (
AutoencoderKLEngine,
UNet2DConditionModelEngine,
)
from streamdiffusion.acceleration.tensorrt.models import (
VAE,
UNet,
VAEEncoder,
)
def create_prefix(
model_id_or_path: str,
max_batch_size: int,
min_batch_size: int,
):
maybe_path = Path(model_id_or_path)
if maybe_path.exists():
return f"{maybe_path.stem}--lcm_lora-{use_lcm_lora}--tiny_vae-{use_tiny_vae}--max_batch-{max_batch_size}--min_batch-{min_batch_size}--mode-{self.mode}"
else:
return f"{model_id_or_path}--lcm_lora-{use_lcm_lora}--tiny_vae-{use_tiny_vae}--max_batch-{max_batch_size}--min_batch-{min_batch_size}--mode-{self.mode}"
engine_dir = Path(engine_dir)
unet_path = os.path.join(
engine_dir,
create_prefix(
model_id_or_path=model_id_or_path,
max_batch_size=stream.trt_unet_batch_size,
min_batch_size=stream.trt_unet_batch_size,
),
"unet.engine",
)
vae_encoder_path = os.path.join(
engine_dir,
create_prefix(
model_id_or_path=model_id_or_path,
max_batch_size=self.batch_size
if self.mode == "txt2img"
else stream.frame_bff_size,
min_batch_size=self.batch_size
if self.mode == "txt2img"
else stream.frame_bff_size,
),
"vae_encoder.engine",
)
vae_decoder_path = os.path.join(
engine_dir,
create_prefix(
model_id_or_path=model_id_or_path,
max_batch_size=self.batch_size
if self.mode == "txt2img"
else stream.frame_bff_size,
min_batch_size=self.batch_size
if self.mode == "txt2img"
else stream.frame_bff_size,
),
"vae_decoder.engine",
)
if not os.path.exists(unet_path):
os.makedirs(os.path.dirname(unet_path), exist_ok=True)
unet_model = UNet(
fp16=True,
device=stream.device,
max_batch_size=stream.trt_unet_batch_size,
min_batch_size=stream.trt_unet_batch_size,
embedding_dim=stream.text_encoder.config.hidden_size,
unet_dim=stream.unet.config.in_channels,
)
compile_unet(
stream.unet,
unet_model,
unet_path + ".onnx",
unet_path + ".opt.onnx",
unet_path,
opt_batch_size=stream.trt_unet_batch_size,
)
if not os.path.exists(vae_decoder_path):
os.makedirs(os.path.dirname(vae_decoder_path), exist_ok=True)
stream.vae.forward = stream.vae.decode
vae_decoder_model = VAE(
device=stream.device,
max_batch_size=self.batch_size
if self.mode == "txt2img"
else stream.frame_bff_size,
min_batch_size=self.batch_size
if self.mode == "txt2img"
else stream.frame_bff_size,
)
compile_vae_decoder(
stream.vae,
vae_decoder_model,
vae_decoder_path + ".onnx",
vae_decoder_path + ".opt.onnx",
vae_decoder_path,
opt_batch_size=self.batch_size
if self.mode == "txt2img"
else stream.frame_bff_size,
)
delattr(stream.vae, "forward")
if not os.path.exists(vae_encoder_path):
os.makedirs(os.path.dirname(vae_encoder_path), exist_ok=True)
vae_encoder = TorchVAEEncoder(stream.vae).to(torch.device("cuda"))
vae_encoder_model = VAEEncoder(
device=stream.device,
max_batch_size=self.batch_size
if self.mode == "txt2img"
else stream.frame_bff_size,
min_batch_size=self.batch_size
if self.mode == "txt2img"
else stream.frame_bff_size,
)
compile_vae_encoder(
vae_encoder,
vae_encoder_model,
vae_encoder_path + ".onnx",
vae_encoder_path + ".opt.onnx",
vae_encoder_path,
opt_batch_size=self.batch_size
if self.mode == "txt2img"
else stream.frame_bff_size,
)
cuda_steram = cuda.Stream()
vae_config = stream.vae.config
vae_dtype = stream.vae.dtype
stream.unet = UNet2DConditionModelEngine(
unet_path, cuda_steram, use_cuda_graph=False
)
stream.vae = AutoencoderKLEngine(
vae_encoder_path,
vae_decoder_path,
cuda_steram,
stream.pipe.vae_scale_factor,
use_cuda_graph=False,
)
setattr(stream.vae, "config", vae_config)
setattr(stream.vae, "dtype", vae_dtype)
gc.collect()
torch.cuda.empty_cache()
print("TensorRT acceleration enabled.")
if acceleration == "sfast":
from streamdiffusion.acceleration.sfast import (
accelerate_with_stable_fast,
)
stream = accelerate_with_stable_fast(stream)
print("StableFast acceleration enabled.")
except Exception:
traceback.print_exc()
print("Acceleration has failed. Falling back to normal mode.")
if seed < 0: # Random seed
seed = np.random.randint(0, 1000000)
stream.prepare(
"",
"",
num_inference_steps=50,
guidance_scale=1.1
if stream.cfg_type in ["full", "self", "initialize"]
else 1.0,
generator=torch.manual_seed(seed),
seed=seed,
)
if self.use_safety_checker:
from transformers import CLIPFeatureExtractor
from diffusers.pipelines.stable_diffusion.safety_checker import (
StableDiffusionSafetyChecker,
)
self.safety_checker = StableDiffusionSafetyChecker.from_pretrained(
"CompVis/stable-diffusion-safety-checker"
).to(pipe.device)
self.feature_extractor = CLIPFeatureExtractor.from_pretrained(
"openai/clip-vit-base-patch32"
)
self.nsfw_fallback_img = Image.new("RGB", (512, 512), (0, 0, 0))
return stream
The provided code snippet includes necessary dependencies for implementing the `run` function. Write a Python function `def run( iterations: int = 100, model_id_or_path: str = "KBlueLeaf/kohaku-v2.1", lora_dict: Optional[Dict[str, float]] = None, prompt: str = "1girl with brown dog hair, thick glasses, smiling", negative_prompt: str = "bad image , bad quality", use_lcm_lora: bool = True, use_tiny_vae: bool = True, width: int = 512, height: int = 512, warmup: int = 10, acceleration: Literal["none", "xformers", "tensorrt"] = "xformers", device_ids: Optional[List[int]] = None, use_denoising_batch: bool = True, seed: int = 2, )` to solve the following problem:
Initializes the StreamDiffusionWrapper. Parameters ---------- iterations : int, optional The number of iterations to run, by default 100. model_id_or_path : str The model id or path to load. lora_dict : Optional[Dict[str, float]], optional The lora_dict to load, by default None. Keys are the LoRA names and values are the LoRA scales. Example: {'LoRA_1' : 0.5 , 'LoRA_2' : 0.7 ,...} prompt : str, optional The prompt to use, by default "1girl with brown dog hair, thick glasses, smiling". negative_prompt : str, optional The negative prompt to use, by default "bad image , bad quality". use_lcm_lora : bool, optional Whether to use LCM-LoRA or not, by default True. use_tiny_vae : bool, optional Whether to use TinyVAE or not, by default True. width : int, optional The width of the image, by default 512. height : int, optional The height of the image, by default 512. warmup : int, optional The number of warmup steps to perform, by default 10. acceleration : Literal["none", "xformers", "tensorrt"], optional The acceleration method, by default "tensorrt". device_ids : Optional[List[int]], optional The device ids to use for DataParallel, by default None. use_denoising_batch : bool, optional Whether to use denoising batch or not, by default True. seed : int, optional The seed, by default 2. if -1, use random seed.
Here is the function:
def run(
iterations: int = 100,
model_id_or_path: str = "KBlueLeaf/kohaku-v2.1",
lora_dict: Optional[Dict[str, float]] = None,
prompt: str = "1girl with brown dog hair, thick glasses, smiling",
negative_prompt: str = "bad image , bad quality",
use_lcm_lora: bool = True,
use_tiny_vae: bool = True,
width: int = 512,
height: int = 512,
warmup: int = 10,
acceleration: Literal["none", "xformers", "tensorrt"] = "xformers",
device_ids: Optional[List[int]] = None,
use_denoising_batch: bool = True,
seed: int = 2,
):
"""
Initializes the StreamDiffusionWrapper.
Parameters
----------
iterations : int, optional
The number of iterations to run, by default 100.
model_id_or_path : str
The model id or path to load.
lora_dict : Optional[Dict[str, float]], optional
The lora_dict to load, by default None.
Keys are the LoRA names and values are the LoRA scales.
Example: {'LoRA_1' : 0.5 , 'LoRA_2' : 0.7 ,...}
prompt : str, optional
The prompt to use, by default "1girl with brown dog hair, thick glasses, smiling".
negative_prompt : str, optional
The negative prompt to use, by default "bad image , bad quality".
use_lcm_lora : bool, optional
Whether to use LCM-LoRA or not, by default True.
use_tiny_vae : bool, optional
Whether to use TinyVAE or not, by default True.
width : int, optional
The width of the image, by default 512.
height : int, optional
The height of the image, by default 512.
warmup : int, optional
The number of warmup steps to perform, by default 10.
acceleration : Literal["none", "xformers", "tensorrt"], optional
The acceleration method, by default "tensorrt".
device_ids : Optional[List[int]], optional
The device ids to use for DataParallel, by default None.
use_denoising_batch : bool, optional
Whether to use denoising batch or not, by default True.
seed : int, optional
The seed, by default 2. if -1, use random seed.
"""
stream = StreamDiffusionWrapper(
model_id_or_path=model_id_or_path,
t_index_list=[32, 45],
lora_dict=lora_dict,
mode="img2img",
frame_buffer_size=1,
width=width,
height=height,
warmup=warmup,
acceleration=acceleration,
device_ids=device_ids,
use_lcm_lora=use_lcm_lora,
use_tiny_vae=use_tiny_vae,
enable_similar_image_filter=False,
similar_image_filter_threshold=0.98,
use_denoising_batch=use_denoising_batch,
cfg_type="initialize", # initialize, full, self , none
seed=seed,
)
stream.prepare(
prompt=prompt,
negative_prompt=negative_prompt,
num_inference_steps=50,
guidance_scale=1.4,
delta=0.5,
)
downloaded_image = download_image("https://github.com/ddpn08.png").resize(
(width, height)
)
# warmup
for _ in range(warmup):
image_tensor = stream.preprocess_image(downloaded_image)
stream(image=image_tensor)
results = []
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
for _ in tqdm(range(iterations)):
start.record()
image_tensor = stream.preprocess_image(downloaded_image)
stream(image=image_tensor)
end.record()
torch.cuda.synchronize()
results.append(start.elapsed_time(end))
print(f"Average time: {sum(results) / len(results)}ms")
print(f"Average FPS: {1000 / (sum(results) / len(results))}")
import numpy as np
fps_arr = 1000 / np.array(results)
print(f"Max FPS: {np.max(fps_arr)}")
print(f"Min FPS: {np.min(fps_arr)}")
print(f"Std: {np.std(fps_arr)}") | Initializes the StreamDiffusionWrapper. Parameters ---------- iterations : int, optional The number of iterations to run, by default 100. model_id_or_path : str The model id or path to load. lora_dict : Optional[Dict[str, float]], optional The lora_dict to load, by default None. Keys are the LoRA names and values are the LoRA scales. Example: {'LoRA_1' : 0.5 , 'LoRA_2' : 0.7 ,...} prompt : str, optional The prompt to use, by default "1girl with brown dog hair, thick glasses, smiling". negative_prompt : str, optional The negative prompt to use, by default "bad image , bad quality". use_lcm_lora : bool, optional Whether to use LCM-LoRA or not, by default True. use_tiny_vae : bool, optional Whether to use TinyVAE or not, by default True. width : int, optional The width of the image, by default 512. height : int, optional The height of the image, by default 512. warmup : int, optional The number of warmup steps to perform, by default 10. acceleration : Literal["none", "xformers", "tensorrt"], optional The acceleration method, by default "tensorrt". device_ids : Optional[List[int]], optional The device ids to use for DataParallel, by default None. use_denoising_batch : bool, optional Whether to use denoising batch or not, by default True. seed : int, optional The seed, by default 2. if -1, use random seed. |
8,751 | from importlib import import_module
from types import ModuleType
from typing import Dict, Any
from pydantic import BaseModel as PydanticBaseModel, Field
from PIL import Image
import io
def get_pipeline_class(pipeline_name: str) -> ModuleType:
try:
module = import_module(f"pipelines.{pipeline_name}")
except ModuleNotFoundError:
raise ValueError(f"Pipeline {pipeline_name} module not found")
pipeline_class = getattr(module, "Pipeline", None)
if pipeline_class is None:
raise ValueError(f"'Pipeline' class not found in module '{pipeline_name}'.")
return pipeline_class | null |
8,752 | from importlib import import_module
from types import ModuleType
from typing import Dict, Any
from pydantic import BaseModel as PydanticBaseModel, Field
from PIL import Image
import io
def bytes_to_pil(image_bytes: bytes) -> Image.Image:
image = Image.open(io.BytesIO(image_bytes))
return image | null |
8,753 | from importlib import import_module
from types import ModuleType
from typing import Dict, Any
from pydantic import BaseModel as PydanticBaseModel, Field
from PIL import Image
import io
def pil_to_frame(image: Image.Image) -> bytes:
frame_data = io.BytesIO()
image.save(frame_data, format="JPEG")
frame_data = frame_data.getvalue()
return (
b"--frame\r\n"
+ b"Content-Type: image/jpeg\r\n"
+ f"Content-Length: {len(frame_data)}\r\n\r\n".encode()
+ frame_data
+ b"\r\n"
) | null |
8,754 | from importlib import import_module
from types import ModuleType
from typing import Dict, Any
from pydantic import BaseModel as PydanticBaseModel, Field
from PIL import Image
import io
def is_firefox(user_agent: str) -> bool:
return "Firefox" in user_agent | null |
8,755 | import os
import sys
import threading
import time
import tkinter as tk
from multiprocessing import Queue
from typing import List
from PIL import Image, ImageTk
from streamdiffusion.image_utils import postprocess_image
def _receive_images(
queue: Queue, fps_queue: Queue, label: tk.Label, fps_label: tk.Label
) -> None:
"""
Continuously receive images from a queue and update the labels.
Parameters
----------
queue : Queue
The queue to receive images from.
fps_queue : Queue
The queue to put the calculated fps.
label : tk.Label
The label to update with images.
fps_label : tk.Label
The label to show fps.
"""
while True:
try:
if not queue.empty():
label.after(
0,
update_image,
postprocess_image(queue.get(block=False), output_type="pil")[0],
label,
)
if not fps_queue.empty():
fps_label.config(text=f"FPS: {fps_queue.get(block=False):.2f}")
time.sleep(0.0005)
except KeyboardInterrupt:
return
The provided code snippet includes necessary dependencies for implementing the `receive_images` function. Write a Python function `def receive_images(queue: Queue, fps_queue: Queue) -> None` to solve the following problem:
Setup the Tkinter window and start the thread to receive images. Parameters ---------- queue : Queue The queue to receive images from. fps_queue : Queue The queue to put the calculated fps.
Here is the function:
def receive_images(queue: Queue, fps_queue: Queue) -> None:
"""
Setup the Tkinter window and start the thread to receive images.
Parameters
----------
queue : Queue
The queue to receive images from.
fps_queue : Queue
The queue to put the calculated fps.
"""
root = tk.Tk()
root.title("Image Viewer")
label = tk.Label(root)
fps_label = tk.Label(root, text="FPS: 0")
label.grid(column=0)
fps_label.grid(column=1)
def on_closing():
print("window closed")
root.quit() # stop event loop
return
thread = threading.Thread(
target=_receive_images, args=(queue, fps_queue, label, fps_label), daemon=True
)
thread.start()
try:
root.protocol("WM_DELETE_WINDOW", on_closing)
root.mainloop()
except KeyboardInterrupt:
return | Setup the Tkinter window and start the thread to receive images. Parameters ---------- queue : Queue The queue to receive images from. fps_queue : Queue The queue to put the calculated fps. |
8,756 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import random
import time
from albert import fine_tuning_utils
from albert import modeling
from albert import squad_utils
import six
import tensorflow.compat.v1 as tf
from tensorflow.compat.v1 import estimator as tf_estimator
from tensorflow.contrib import cluster_resolver as contrib_cluster_resolver
from tensorflow.contrib import tpu as contrib_tpu
FLAGS = flags.FLAGS
The provided code snippet includes necessary dependencies for implementing the `validate_flags_or_throw` function. Write a Python function `def validate_flags_or_throw(albert_config)` to solve the following problem:
Validate the input FLAGS or throw an exception.
Here is the function:
def validate_flags_or_throw(albert_config):
"""Validate the input FLAGS or throw an exception."""
if not FLAGS.do_train and not FLAGS.do_predict and not FLAGS.export_dir:
err_msg = "At least one of `do_train` or `do_predict` or `export_dir`" + "must be True."
raise ValueError(err_msg)
if FLAGS.do_train:
if not FLAGS.train_file:
raise ValueError(
"If `do_train` is True, then `train_file` must be specified.")
if FLAGS.do_predict:
if not FLAGS.predict_file:
raise ValueError(
"If `do_predict` is True, then `predict_file` must be specified.")
if not FLAGS.predict_feature_file:
raise ValueError(
"If `do_predict` is True, then `predict_feature_file` must be "
"specified.")
if not FLAGS.predict_feature_left_file:
raise ValueError(
"If `do_predict` is True, then `predict_feature_left_file` must be "
"specified.")
if FLAGS.max_seq_length > albert_config.max_position_embeddings:
raise ValueError(
"Cannot use sequence length %d because the ALBERT model "
"was only trained up to sequence length %d" %
(FLAGS.max_seq_length, albert_config.max_position_embeddings))
if FLAGS.max_seq_length <= FLAGS.max_query_length + 3:
raise ValueError(
"The max_seq_length (%d) must be greater than max_query_length "
"(%d) + 3" % (FLAGS.max_seq_length, FLAGS.max_query_length)) | Validate the input FLAGS or throw an exception. |
8,757 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import random
import time
from albert import fine_tuning_utils
from albert import modeling
from albert import squad_utils
import six
import tensorflow.compat.v1 as tf
from tensorflow.compat.v1 import estimator as tf_estimator
from tensorflow.contrib import cluster_resolver as contrib_cluster_resolver
from tensorflow.contrib import tpu as contrib_tpu
tf.flags.DEFINE_string(
"tpu_name", None,
"The Cloud TPU to use for training. This should be either the name "
"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 "
"url.")
tf.flags.DEFINE_string(
"tpu_zone", None,
"[Optional] GCE zone where the Cloud TPU is located in. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string(
"gcp_project", None,
"[Optional] Project name for the Cloud TPU-enabled project. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.")
The provided code snippet includes necessary dependencies for implementing the `build_squad_serving_input_fn` function. Write a Python function `def build_squad_serving_input_fn(seq_length)` to solve the following problem:
Builds a serving input fn for raw input.
Here is the function:
def build_squad_serving_input_fn(seq_length):
"""Builds a serving input fn for raw input."""
def _seq_serving_input_fn():
"""Serving input fn for raw images."""
input_ids = tf.placeholder(
shape=[1, seq_length], name="input_ids", dtype=tf.int32)
input_mask = tf.placeholder(
shape=[1, seq_length], name="input_mask", dtype=tf.int32)
segment_ids = tf.placeholder(
shape=[1, seq_length], name="segment_ids", dtype=tf.int32)
inputs = {
"input_ids": input_ids,
"input_mask": input_mask,
"segment_ids": segment_ids
}
return tf_estimator.export.ServingInputReceiver(features=inputs,
receiver_tensors=inputs)
return _seq_serving_input_fn | Builds a serving input fn for raw input. |
8,758 | from albert import modeling
from albert import tokenization
import tensorflow.compat.v1 as tf
import tensorflow_hub as hub
def _create_model_from_hub(hub_module, is_training, input_ids, input_mask,
segment_ids):
"""Creates an ALBERT model from TF-Hub."""
tags = set()
if is_training:
tags.add("train")
albert_module = hub.Module(hub_module, tags=tags, trainable=True)
albert_inputs = dict(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids)
albert_outputs = albert_module(
inputs=albert_inputs,
signature="tokens",
as_dict=True)
return (albert_outputs["pooled_output"], albert_outputs["sequence_output"])
def _create_model_from_scratch(albert_config, is_training, input_ids,
input_mask, segment_ids, use_one_hot_embeddings,
use_einsum):
"""Creates an ALBERT model from scratch/config."""
model = modeling.AlbertModel(
config=albert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings,
use_einsum=use_einsum)
return (model.get_pooled_output(), model.get_sequence_output())
The provided code snippet includes necessary dependencies for implementing the `create_albert` function. Write a Python function `def create_albert(albert_config, is_training, input_ids, input_mask, segment_ids, use_one_hot_embeddings, use_einsum, hub_module)` to solve the following problem:
Creates an ALBERT, either from TF-Hub or from scratch.
Here is the function:
def create_albert(albert_config, is_training, input_ids, input_mask,
segment_ids, use_one_hot_embeddings, use_einsum, hub_module):
"""Creates an ALBERT, either from TF-Hub or from scratch."""
if hub_module:
tf.logging.info("creating model from hub_module: %s", hub_module)
return _create_model_from_hub(hub_module, is_training, input_ids,
input_mask, segment_ids)
else:
tf.logging.info("creating model from albert_config")
return _create_model_from_scratch(albert_config, is_training, input_ids,
input_mask, segment_ids,
use_one_hot_embeddings, use_einsum) | Creates an ALBERT, either from TF-Hub or from scratch. |
8,759 | from albert import modeling
from albert import tokenization
import tensorflow.compat.v1 as tf
import tensorflow_hub as hub
The provided code snippet includes necessary dependencies for implementing the `create_vocab` function. Write a Python function `def create_vocab(vocab_file, do_lower_case, spm_model_file, hub_module)` to solve the following problem:
Creates a vocab, either from vocab file or from a TF-Hub module.
Here is the function:
def create_vocab(vocab_file, do_lower_case, spm_model_file, hub_module):
"""Creates a vocab, either from vocab file or from a TF-Hub module."""
if hub_module:
use_spm = True if spm_model_file else False
return tokenization.FullTokenizer.from_hub_module(
hub_module=hub_module, use_spm=use_spm)
else:
return tokenization.FullTokenizer.from_scratch(
vocab_file=vocab_file, do_lower_case=do_lower_case,
spm_model_file=spm_model_file) | Creates a vocab, either from vocab file or from a TF-Hub module. |
8,760 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import random
from albert import tokenization
import numpy as np
import six
from six.moves import range
from six.moves import zip
import tensorflow.compat.v1 as tf
FLAGS = flags.FLAGS
def create_int_feature(values):
feature = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
return feature
def create_float_feature(values):
feature = tf.train.Feature(float_list=tf.train.FloatList(value=list(values)))
return feature
The provided code snippet includes necessary dependencies for implementing the `write_instance_to_example_files` function. Write a Python function `def write_instance_to_example_files(instances, tokenizer, max_seq_length, max_predictions_per_seq, output_files)` to solve the following problem:
Create TF example files from `TrainingInstance`s.
Here is the function:
def write_instance_to_example_files(instances, tokenizer, max_seq_length,
max_predictions_per_seq, output_files):
"""Create TF example files from `TrainingInstance`s."""
writers = []
for output_file in output_files:
writers.append(tf.python_io.TFRecordWriter(output_file))
writer_index = 0
total_written = 0
for (inst_index, instance) in enumerate(instances):
input_ids = tokenizer.convert_tokens_to_ids(instance.tokens)
input_mask = [1] * len(input_ids)
segment_ids = list(instance.segment_ids)
token_boundary = list(instance.token_boundary)
assert len(input_ids) <= max_seq_length
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
token_boundary.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
masked_lm_positions = list(instance.masked_lm_positions)
masked_lm_ids = tokenizer.convert_tokens_to_ids(instance.masked_lm_labels)
masked_lm_weights = [1.0] * len(masked_lm_ids)
multiplier = 1 + int(FLAGS.do_permutation)
while len(masked_lm_positions) < max_predictions_per_seq * multiplier:
masked_lm_positions.append(0)
masked_lm_ids.append(0)
masked_lm_weights.append(0.0)
sentence_order_label = 1 if instance.is_random_next else 0
features = collections.OrderedDict()
features["input_ids"] = create_int_feature(input_ids)
features["input_mask"] = create_int_feature(input_mask)
features["segment_ids"] = create_int_feature(segment_ids)
features["token_boundary"] = create_int_feature(token_boundary)
features["masked_lm_positions"] = create_int_feature(masked_lm_positions)
features["masked_lm_ids"] = create_int_feature(masked_lm_ids)
features["masked_lm_weights"] = create_float_feature(masked_lm_weights)
# Note: We keep this feature name `next_sentence_labels` to be compatible
# with the original data created by lanzhzh@. However, in the ALBERT case
# it does contain sentence_order_label.
features["next_sentence_labels"] = create_int_feature(
[sentence_order_label])
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
writers[writer_index].write(tf_example.SerializeToString())
writer_index = (writer_index + 1) % len(writers)
total_written += 1
if inst_index < 20:
tf.logging.info("*** Example ***")
tf.logging.info("tokens: %s" % " ".join(
[tokenization.printable_text(x) for x in instance.tokens]))
for feature_name in features.keys():
feature = features[feature_name]
values = []
if feature.int64_list.value:
values = feature.int64_list.value
elif feature.float_list.value:
values = feature.float_list.value
tf.logging.info(
"%s: %s" % (feature_name, " ".join([str(x) for x in values])))
for writer in writers:
writer.close()
tf.logging.info("Wrote %d total instances", total_written) | Create TF example files from `TrainingInstance`s. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.