id
int64
0
190k
prompt
stringlengths
21
13.4M
docstring
stringlengths
1
12k
20,494
from sanic.request import Request from sanic.exceptions import BadRequest import traceback from urllib.parse import urlparse import time import os import logging import re import tiktoken def validate_user_id(user_id): # 定义正则表达式模式 pattern = r'^[A-Za-z][A-Za-z0-9_]*$' # 检查是否匹配 if isinstance(user_id, str) and re.match(pattern, user_id): return True else: return False
null
20,495
from sanic.request import Request from sanic.exceptions import BadRequest import traceback from urllib.parse import urlparse import time import os import logging import re import tiktoken The provided code snippet includes necessary dependencies for implementing the `num_tokens` function. Write a Python function `def num_tokens(text: str, model: str = 'gpt-3.5-turbo-0613') -> int` to solve the following problem: Return the number of tokens in a string. Here is the function: def num_tokens(text: str, model: str = 'gpt-3.5-turbo-0613') -> int: """Return the number of tokens in a string.""" encoding = tiktoken.encoding_for_model(model) return len(encoding.encode(text))
Return the number of tokens in a string.
20,496
from langchain.docstore.document import Document from typing import List import re def is_possible_title( text: str, title_max_word_length: int = 20, non_alpha_threshold: float = 0.5, ) -> bool: """Checks to see if the text passes all of the checks for a valid title. Parameters ---------- text The input text to check title_max_word_length The maximum number of words a title can contain non_alpha_threshold The minimum number of alpha characters the text needs to be considered a title """ # 文本长度为0的话,肯定不是title if len(text) == 0: print("Not a title. Text is empty.") return False # 文本中有标点符号,就不是title ENDS_IN_PUNCT_PATTERN = r"[^\w\s]\Z" ENDS_IN_PUNCT_RE = re.compile(ENDS_IN_PUNCT_PATTERN) if ENDS_IN_PUNCT_RE.search(text) is not None: return False # 文本长度不能超过设定值,默认20 # NOTE(robinson) - splitting on spaces here instead of word tokenizing because it # is less expensive and actual tokenization doesn't add much value for the length check if len(text) > title_max_word_length: return False # 文本中数字的占比不能太高,否则不是title if under_non_alpha_ratio(text, threshold=non_alpha_threshold): return False # NOTE(robinson) - Prevent flagging salutations like "To My Dearest Friends," as titles if text.endswith((",", ".", ",", "。")): return False if text.isnumeric(): print(f"Not a title. Text is all numeric:\n\n{text}") # type: ignore return False # 开头的字符内应该有数字,默认5个字符内 if len(text) < 5: text_5 = text else: text_5 = text[:5] alpha_in_text_5 = sum(list(map(lambda x: x.isnumeric(), list(text_5)))) if not alpha_in_text_5: return False return True def zh_title_enhance(docs: List[Document]) -> List[Document]: title = None if len(docs) > 0: for doc in docs: if is_possible_title(doc.page_content): doc.metadata['category'] = 'cn_Title' title = doc.page_content elif title: doc.page_content = f"下文与({title})有关。{doc.page_content}" return docs else: print("文件不存在")
null
20,497
from asyncio import AbstractEventLoop import json import logging import logging.handlers import os import platform import sys from typing import AsyncGenerator, Generator import warnings import requests from fastchat.constants import LOGDIR handler = None visited_loggers = set() class StreamToLogger(object): """ Fake file-like stream object that redirects writes to a logger instance. """ def __init__(self, logger, log_level=logging.INFO): self.terminal = sys.stdout self.logger = logger self.log_level = log_level self.linebuf = "" def __getattr__(self, attr): return getattr(self.terminal, attr) def write(self, buf): temp_linebuf = self.linebuf + buf self.linebuf = "" for line in temp_linebuf.splitlines(True): # From the io.TextIOWrapper docs: # On output, if newline is None, any '\n' characters written # are translated to the system default line separator. # By default sys.stdout.write() expects '\n' newlines and then # translates them so this is still cross platform. if line[-1] == "\n": encoded_message = line.encode("utf-8", "ignore").decode("utf-8") self.logger.log(self.log_level, encoded_message.rstrip()) else: self.linebuf += line def flush(self): if self.linebuf != "": encoded_message = self.linebuf.encode("utf-8", "ignore").decode("utf-8") self.logger.log(self.log_level, encoded_message.rstrip()) self.linebuf = "" LOGDIR = os.getenv("LOGDIR", ".") def build_logger(logger_name, logger_filename): global handler formatter = logging.Formatter( fmt="%(asctime)s | %(levelname)s | %(name)s | %(message)s", datefmt="%Y-%m-%d %H:%M:%S", ) # Set the format of root handlers if not logging.getLogger().handlers: if sys.version_info[1] >= 9: # This is for windows logging.basicConfig(level=logging.INFO, encoding="utf-8") else: if platform.system() == "Windows": warnings.warn( "If you are running on Windows, " "we recommend you use Python >= 3.9 for UTF-8 encoding." ) logging.basicConfig(level=logging.INFO) logging.getLogger().handlers[0].setFormatter(formatter) # Redirect stdout and stderr to loggers stdout_logger = logging.getLogger("stdout") stdout_logger.setLevel(logging.INFO) sl = StreamToLogger(stdout_logger, logging.INFO) sys.stdout = sl stderr_logger = logging.getLogger("stderr") stderr_logger.setLevel(logging.ERROR) sl = StreamToLogger(stderr_logger, logging.ERROR) sys.stderr = sl # Get logger logger = logging.getLogger(logger_name) logger.setLevel(logging.INFO) # if LOGDIR is empty, then don't try output log to local file if LOGDIR != "": os.makedirs(LOGDIR, exist_ok=True) filename = os.path.join(LOGDIR, logger_filename) handler = logging.handlers.TimedRotatingFileHandler( filename, when="D", utc=True, encoding="utf-8" ) handler.setFormatter(formatter) for l in [stdout_logger, stderr_logger, logger]: if l in visited_loggers: continue visited_loggers.add(l) l.addHandler(handler) return logger
null
20,498
from asyncio import AbstractEventLoop import json import logging import logging.handlers import os import platform import sys from typing import AsyncGenerator, Generator import warnings import requests from fastchat.constants import LOGDIR The provided code snippet includes necessary dependencies for implementing the `disable_torch_init` function. Write a Python function `def disable_torch_init()` to solve the following problem: Disable the redundant torch default initialization to accelerate model creation. Here is the function: def disable_torch_init(): """ Disable the redundant torch default initialization to accelerate model creation. """ import torch setattr(torch.nn.Linear, "reset_parameters", lambda self: None) setattr(torch.nn.LayerNorm, "reset_parameters", lambda self: None)
Disable the redundant torch default initialization to accelerate model creation.
20,499
from asyncio import AbstractEventLoop import json import logging import logging.handlers import os import platform import sys from typing import AsyncGenerator, Generator import warnings import requests from fastchat.constants import LOGDIR The provided code snippet includes necessary dependencies for implementing the `clean_flant5_ckpt` function. Write a Python function `def clean_flant5_ckpt(ckpt_path)` to solve the following problem: Flan-t5 trained with HF+FSDP saves corrupted weights for shared embeddings, Use this function to make sure it can be correctly loaded. Here is the function: def clean_flant5_ckpt(ckpt_path): """ Flan-t5 trained with HF+FSDP saves corrupted weights for shared embeddings, Use this function to make sure it can be correctly loaded. """ import torch index_file = os.path.join(ckpt_path, "pytorch_model.bin.index.json") index_json = json.load(open(index_file, "r")) weightmap = index_json["weight_map"] share_weight_file = weightmap["shared.weight"] share_weight = torch.load(os.path.join(ckpt_path, share_weight_file))[ "shared.weight" ] for weight_name in ["decoder.embed_tokens.weight", "encoder.embed_tokens.weight"]: weight_file = weightmap[weight_name] weight = torch.load(os.path.join(ckpt_path, weight_file)) weight[weight_name] = share_weight torch.save(weight, os.path.join(ckpt_path, weight_file))
Flan-t5 trained with HF+FSDP saves corrupted weights for shared embeddings, Use this function to make sure it can be correctly loaded.
20,500
from asyncio import AbstractEventLoop import json import logging import logging.handlers import os import platform import sys from typing import AsyncGenerator, Generator import warnings import requests from fastchat.constants import LOGDIR The provided code snippet includes necessary dependencies for implementing the `pretty_print_semaphore` function. Write a Python function `def pretty_print_semaphore(semaphore)` to solve the following problem: Print a semaphore in better format. Here is the function: def pretty_print_semaphore(semaphore): """Print a semaphore in better format.""" if semaphore is None: return "None" return f"Semaphore(value={semaphore._value}, locked={semaphore.locked()})"
Print a semaphore in better format.
20,501
from asyncio import AbstractEventLoop import json import logging import logging.handlers import os import platform import sys from typing import AsyncGenerator, Generator import warnings import requests from fastchat.constants import LOGDIR The provided code snippet includes necessary dependencies for implementing the `iter_over_async` function. Write a Python function `def iter_over_async( async_gen: AsyncGenerator, event_loop: AbstractEventLoop ) -> Generator` to solve the following problem: Convert async generator to sync generator :param async_gen: the AsyncGenerator to convert :param event_loop: the event loop to run on :returns: Sync generator Here is the function: def iter_over_async( async_gen: AsyncGenerator, event_loop: AbstractEventLoop ) -> Generator: """ Convert async generator to sync generator :param async_gen: the AsyncGenerator to convert :param event_loop: the event loop to run on :returns: Sync generator """ ait = async_gen.__aiter__() async def get_next(): try: obj = await ait.__anext__() return False, obj except StopAsyncIteration: return True, None while True: done, obj = event_loop.run_until_complete(get_next()) if done: break yield obj
Convert async generator to sync generator :param async_gen: the AsyncGenerator to convert :param event_loop: the event loop to run on :returns: Sync generator
20,502
from asyncio import AbstractEventLoop import json import logging import logging.handlers import os import platform import sys from typing import AsyncGenerator, Generator import warnings import requests from fastchat.constants import LOGDIR The provided code snippet includes necessary dependencies for implementing the `parse_gradio_auth_creds` function. Write a Python function `def parse_gradio_auth_creds(filename: str)` to solve the following problem: Parse a username:password file for gradio authorization. Here is the function: def parse_gradio_auth_creds(filename: str): """Parse a username:password file for gradio authorization.""" gradio_auth_creds = [] with open(filename, "r", encoding="utf8") as file: for line in file.readlines(): gradio_auth_creds += [x.strip() for x in line.split(",") if x.strip()] if gradio_auth_creds: auth = [tuple(cred.split(":")) for cred in gradio_auth_creds] else: auth = None return auth
Parse a username:password file for gradio authorization.
20,503
from asyncio import AbstractEventLoop import json import logging import logging.handlers import os import platform import sys from typing import AsyncGenerator, Generator import warnings import requests from fastchat.constants import LOGDIR The provided code snippet includes necessary dependencies for implementing the `run_cmd` function. Write a Python function `def run_cmd(cmd: str)` to solve the following problem: Run a bash command. Here is the function: def run_cmd(cmd: str): """Run a bash command.""" print(cmd) return os.system(cmd)
Run a bash command.
20,504
import dataclasses from enum import auto, IntEnum from typing import List, Any, Dict, Union, Tuple class Conversation: """A class that manages prompt templates and keeps all conversation history.""" # The name of this template name: str # The template of the system prompt system_template: str = "{system_message}" # The system message system_message: str = "" # The names of two roles roles: Tuple[str] = ("USER", "ASSISTANT") # All messages. Each item is (role, message). messages: List[List[str]] = () # The number of few shot examples offset: int = 0 # The separator style and configurations sep_style: SeparatorStyle = SeparatorStyle.ADD_COLON_SINGLE sep: str = "\n" sep2: str = None # Stop criteria (the default one is EOS token) stop_str: Union[str, List[str]] = None # Stops generation if meeting any token in this list stop_token_ids: List[int] = None def get_prompt(self) -> str: """Get the prompt for generation.""" system_prompt = self.system_template.format(system_message=self.system_message) if self.sep_style == SeparatorStyle.ADD_COLON_SINGLE: ret = system_prompt + self.sep for role, message in self.messages: if message: ret += role + ": " + message + self.sep else: ret += role + ":" return ret elif self.sep_style == SeparatorStyle.ADD_COLON_TWO: seps = [self.sep, self.sep2] ret = system_prompt + seps[0] for i, (role, message) in enumerate(self.messages): if message: ret += role + ": " + message + seps[i % 2] else: ret += role + ":" return ret elif self.sep_style == SeparatorStyle.ADD_COLON_SPACE_SINGLE: ret = system_prompt + self.sep for role, message in self.messages: if message: ret += role + ": " + message + self.sep else: ret += role + ": " # must be end with a space return ret elif self.sep_style == SeparatorStyle.ADD_NEW_LINE_SINGLE: ret = "" if system_prompt == "" else system_prompt + self.sep for role, message in self.messages: if message: ret += role + "\n" + message + self.sep else: ret += role + "\n" return ret elif self.sep_style == SeparatorStyle.NO_COLON_SINGLE: ret = system_prompt for role, message in self.messages: if message: ret += role + message + self.sep else: ret += role return ret elif self.sep_style == SeparatorStyle.NO_COLON_TWO: seps = [self.sep, self.sep2] ret = system_prompt for i, (role, message) in enumerate(self.messages): if message: ret += role + message + seps[i % 2] else: ret += role return ret elif self.sep_style == SeparatorStyle.RWKV: ret = system_prompt for i, (role, message) in enumerate(self.messages): if message: ret += ( role + ": " + message.replace("\r\n", "\n").replace("\n\n", "\n") ) ret += "\n\n" else: ret += role + ":" return ret elif self.sep_style == SeparatorStyle.LLAMA2: seps = [self.sep, self.sep2] if self.system_message: ret = system_prompt else: ret = "[INST] " for i, (role, message) in enumerate(self.messages): tag = self.roles[i % 2] if message: if i == 0: ret += message + " " else: ret += tag + " " + message + seps[i % 2] else: ret += tag return ret elif self.sep_style == SeparatorStyle.MINIMA: seps = [self.sep, self.sep2] if self.system_message: ret = system_prompt else: ret = "<s> " for i, (role, message) in enumerate(self.messages): tag = self.roles[i % 2] if message: if i == 0: ret += message + " " else: ret += tag + " " + message + seps[i % 2] else: ret += tag return ret elif self.sep_style == SeparatorStyle.CHATGLM: # source: https://huggingface.co/THUDM/chatglm-6b/blob/1d240ba371910e9282298d4592532d7f0f3e9f3e/modeling_chatglm.py#L1302-L1308 # source2: https://huggingface.co/THUDM/chatglm2-6b/blob/e186c891cf64310ac66ef10a87e6635fa6c2a579/modeling_chatglm.py#L926 round_add_n = 1 if self.name == "chatglm2" else 0 if system_prompt: ret = system_prompt + self.sep else: ret = "" for i, (role, message) in enumerate(self.messages): if i % 2 == 0: ret += f"[Round {i//2 + round_add_n}]{self.sep}" if message: ret += f"{role}:{message}{self.sep}" else: ret += f"{role}:" return ret elif self.sep_style == SeparatorStyle.CHATML: ret = "" if system_prompt == "" else system_prompt + self.sep + "\n" for role, message in self.messages: if message: ret += role + "\n" + message + self.sep + "\n" else: ret += role + "\n" return ret elif self.sep_style == SeparatorStyle.CHATMLQANY: ret = "" if system_prompt == "" else system_prompt + self.sep2 + "\n" for role, message in self.messages: if message: ret += role + "\n" + message + self.sep + "\n" else: ret += role + "\n" return ret elif self.sep_style == SeparatorStyle.CHATGLM3: ret = "" if self.system_message: ret += system_prompt for role, message in self.messages: if message: ret += role + "\n" + message else: ret += role return ret elif self.sep_style == SeparatorStyle.CHATINTERN: # source: https://huggingface.co/internlm/internlm-chat-7b-8k/blob/bd546fa984b4b0b86958f56bf37f94aa75ab8831/modeling_internlm.py#L771 seps = [self.sep, self.sep2] ret = system_prompt for i, (role, message) in enumerate(self.messages): if i % 2 == 0: ret += "<s>" if message: ret += role + ":" + message + seps[i % 2] + "\n" else: ret += role + ":" return ret elif self.sep_style == SeparatorStyle.DOLLY: seps = [self.sep, self.sep2] ret = system_prompt for i, (role, message) in enumerate(self.messages): if message: ret += role + ":\n" + message + seps[i % 2] if i % 2 == 1: ret += "\n\n" else: ret += role + ":\n" return ret elif self.sep_style == SeparatorStyle.PHOENIX: ret = system_prompt for role, message in self.messages: if message: ret += role + ": " + "<s>" + message + "</s>" else: ret += role + ": " + "<s>" return ret elif self.sep_style == SeparatorStyle.ROBIN: ret = system_prompt + self.sep for role, message in self.messages: if message: ret += role + ":\n" + message + self.sep else: ret += role + ":\n" return ret elif self.sep_style == SeparatorStyle.FALCON_CHAT: ret = "" if self.system_message: ret += system_prompt + self.sep for role, message in self.messages: if message: ret += role + ": " + message + self.sep else: ret += role + ":" return ret elif self.sep_style == SeparatorStyle.METAMATH: ret = "" if system_prompt == "" else system_prompt + self.sep for i, (role, message) in enumerate(self.messages): # For MetaMath, sep2 is used to prefix the message. starting_sep = ":\n" if i % 2 == 0 else ": " + self.sep2 ending_sep = self.sep if i % 2 == 0 else "" if message: ret += role + starting_sep + message + ending_sep else: ret += role + starting_sep return ret elif self.sep_style == SeparatorStyle.DEEPSEEK_CHAT: seps = [self.sep, self.sep2] ret = system_prompt for i, (role, message) in enumerate(self.messages): if message: ret += role + ": " + message + seps[i % 2] else: ret += role + ":" return ret else: raise ValueError(f"Invalid style: {self.sep_style}") def set_system_message(self, system_message: str): """Set the system message.""" self.system_message = system_message def append_message(self, role: str, message: str): """Append a new message.""" self.messages.append([role, message]) def update_last_message(self, message: str): """Update the last output. The last message is typically set to be None when constructing the prompt, so we need to update it in-place after getting the response from a model. """ self.messages[-1][1] = message def to_gradio_chatbot(self): """Convert the conversation to gradio chatbot format.""" ret = [] for i, (role, msg) in enumerate(self.messages[self.offset :]): if i % 2 == 0: ret.append([msg, None]) else: ret[-1][-1] = msg return ret def to_openai_api_messages(self): """Convert the conversation to OpenAI chat completion format.""" if self.system_message == "": ret = [] else: ret = [{"role": "system", "content": self.system_message}] for i, (_, msg) in enumerate(self.messages[self.offset :]): if i % 2 == 0: ret.append({"role": "user", "content": msg}) else: if msg is not None: ret.append({"role": "assistant", "content": msg}) return ret def copy(self): return Conversation( name=self.name, system_template=self.system_template, system_message=self.system_message, roles=self.roles, messages=[[x, y] for x, y in self.messages], offset=self.offset, sep_style=self.sep_style, sep=self.sep, sep2=self.sep2, stop_str=self.stop_str, stop_token_ids=self.stop_token_ids, ) def dict(self): return { "template_name": self.name, "system_message": self.system_message, "roles": self.roles, "messages": self.messages, "offset": self.offset, } conv_templates: Dict[str, Conversation] = {} The provided code snippet includes necessary dependencies for implementing the `register_conv_template` function. Write a Python function `def register_conv_template(template: Conversation, override: bool = False)` to solve the following problem: Register a new conversation template. Here is the function: def register_conv_template(template: Conversation, override: bool = False): """Register a new conversation template.""" if not override: assert ( template.name not in conv_templates ), f"{template.name} has been registered." conv_templates[template.name] = template
Register a new conversation template.
20,505
import argparse import asyncio import json import uuid import os from typing import List, Optional import requests import uvicorn from fastapi import BackgroundTasks, FastAPI, Request from fastapi.responses import JSONResponse, StreamingResponse from huggingface_hub import InferenceClient from fastchat.constants import SERVER_ERROR_MSG, ErrorCode from fastchat.serve.base_model_worker import BaseModelWorker from fastchat.utils import build_logger def get_gen_kwargs( params, seed: Optional[int] = None, ): stop = params.get("stop", None) if isinstance(stop, list): stop_sequences = stop elif isinstance(stop, str): stop_sequences = [stop] else: stop_sequences = [] gen_kwargs = { "do_sample": True, "return_full_text": bool(params.get("echo", False)), "max_new_tokens": int(params.get("max_new_tokens", 256)), "top_p": float(params.get("top_p", 1.0)), "temperature": float(params.get("temperature", 1.0)), "stop_sequences": stop_sequences, "repetition_penalty": float(params.get("repetition_penalty", 1.0)), "top_k": params.get("top_k", None), "seed": seed, } if gen_kwargs["top_p"] == 1: gen_kwargs["top_p"] = 0.9999999 if gen_kwargs["top_p"] == 0: gen_kwargs.pop("top_p") if gen_kwargs["temperature"] == 0: gen_kwargs.pop("temperature") gen_kwargs["do_sample"] = False return gen_kwargs
null
20,506
import argparse import asyncio import json import uuid import os from typing import List, Optional import requests import uvicorn from fastapi import BackgroundTasks, FastAPI, Request from fastapi.responses import JSONResponse, StreamingResponse from huggingface_hub import InferenceClient from fastchat.constants import SERVER_ERROR_MSG, ErrorCode from fastchat.serve.base_model_worker import BaseModelWorker from fastchat.utils import build_logger def could_be_stop(text, stop): for s in stop: if any(text.endswith(s[:i]) for i in range(1, len(s) + 1)): return True return False
null
20,507
import argparse import asyncio import json import uuid import os from typing import List, Optional import requests import uvicorn from fastapi import BackgroundTasks, FastAPI, Request from fastapi.responses import JSONResponse, StreamingResponse from huggingface_hub import InferenceClient from fastchat.constants import SERVER_ERROR_MSG, ErrorCode from fastchat.serve.base_model_worker import BaseModelWorker from fastchat.utils import build_logger worker_map = {} def acquire_worker_semaphore(worker): if worker.semaphore is None: worker.semaphore = asyncio.Semaphore(worker.limit_worker_concurrency) return worker.semaphore.acquire() def create_background_tasks(worker): background_tasks = BackgroundTasks() background_tasks.add_task(lambda: release_worker_semaphore(worker)) return background_tasks async def api_generate_stream(request: Request): params = await request.json() worker = worker_map[params["model"]] await acquire_worker_semaphore(worker) generator = worker.generate_stream_gate(params) background_tasks = create_background_tasks(worker) return StreamingResponse(generator, background=background_tasks)
null
20,508
import argparse import asyncio import json import uuid import os from typing import List, Optional import requests import uvicorn from fastapi import BackgroundTasks, FastAPI, Request from fastapi.responses import JSONResponse, StreamingResponse from huggingface_hub import InferenceClient from fastchat.constants import SERVER_ERROR_MSG, ErrorCode from fastchat.serve.base_model_worker import BaseModelWorker from fastchat.utils import build_logger worker_map = {} def release_worker_semaphore(worker): worker.semaphore.release() def acquire_worker_semaphore(worker): if worker.semaphore is None: worker.semaphore = asyncio.Semaphore(worker.limit_worker_concurrency) return worker.semaphore.acquire() async def api_generate(request: Request): params = await request.json() worker = worker_map[params["model"]] await acquire_worker_semaphore(worker) output = worker.generate_gate(params) release_worker_semaphore(worker) return JSONResponse(output)
null
20,509
import argparse import asyncio import json import uuid import os from typing import List, Optional import requests import uvicorn from fastapi import BackgroundTasks, FastAPI, Request from fastapi.responses import JSONResponse, StreamingResponse from huggingface_hub import InferenceClient from fastchat.constants import SERVER_ERROR_MSG, ErrorCode from fastchat.serve.base_model_worker import BaseModelWorker from fastchat.utils import build_logger worker_map = {} def release_worker_semaphore(worker): worker.semaphore.release() def acquire_worker_semaphore(worker): if worker.semaphore is None: worker.semaphore = asyncio.Semaphore(worker.limit_worker_concurrency) return worker.semaphore.acquire() async def api_get_embeddings(request: Request): params = await request.json() worker = worker_map[params["model"]] await acquire_worker_semaphore(worker) embedding = worker.get_embeddings(params) release_worker_semaphore(worker) return JSONResponse(content=embedding)
null
20,510
import argparse import asyncio import json import uuid import os from typing import List, Optional import requests import uvicorn from fastapi import BackgroundTasks, FastAPI, Request from fastapi.responses import JSONResponse, StreamingResponse from huggingface_hub import InferenceClient from fastchat.constants import SERVER_ERROR_MSG, ErrorCode from fastchat.serve.base_model_worker import BaseModelWorker from fastchat.utils import build_logger workers = [] async def api_get_status(request: Request): return { "model_names": [m for w in workers for m in w.model_names], "speed": 1, "queue_length": sum([w.get_queue_length() for w in workers]), }
null
20,511
import argparse import asyncio import json import uuid import os from typing import List, Optional import requests import uvicorn from fastapi import BackgroundTasks, FastAPI, Request from fastapi.responses import JSONResponse, StreamingResponse from huggingface_hub import InferenceClient from fastchat.constants import SERVER_ERROR_MSG, ErrorCode from fastchat.serve.base_model_worker import BaseModelWorker from fastchat.utils import build_logger worker_map = {} async def api_count_token(request: Request): params = await request.json() worker = worker_map[params["model"]] return worker.count_token(params)
null
20,512
import argparse import asyncio import json import uuid import os from typing import List, Optional import requests import uvicorn from fastapi import BackgroundTasks, FastAPI, Request from fastapi.responses import JSONResponse, StreamingResponse from huggingface_hub import InferenceClient from fastchat.constants import SERVER_ERROR_MSG, ErrorCode from fastchat.serve.base_model_worker import BaseModelWorker from fastchat.utils import build_logger worker_map = {} async def api_get_conv(request: Request): params = await request.json() worker = worker_map[params["model"]] return worker.get_conv_template()
null
20,513
import argparse import asyncio import json import uuid import os from typing import List, Optional import requests import uvicorn from fastapi import BackgroundTasks, FastAPI, Request from fastapi.responses import JSONResponse, StreamingResponse from huggingface_hub import InferenceClient from fastchat.constants import SERVER_ERROR_MSG, ErrorCode from fastchat.serve.base_model_worker import BaseModelWorker from fastchat.utils import build_logger worker_map = {} async def api_model_details(request: Request): params = await request.json() worker = worker_map[params["model"]] return {"context_length": worker.context_len}
null
20,514
import argparse import asyncio import json import uuid import os from typing import List, Optional import requests import uvicorn from fastapi import BackgroundTasks, FastAPI, Request from fastapi.responses import JSONResponse, StreamingResponse from huggingface_hub import InferenceClient from fastchat.constants import SERVER_ERROR_MSG, ErrorCode from fastchat.serve.base_model_worker import BaseModelWorker from fastchat.utils import build_logger worker_id = str(uuid.uuid4())[:8] logger = build_logger("model_worker", f"model_worker_{worker_id}.log") workers = [] worker_map = {} class HuggingfaceApiWorker(BaseModelWorker): def __init__( self, controller_addr: str, worker_addr: str, worker_id: str, model_path: str, api_base: str, token: str, context_length: int, model_names: List[str], limit_worker_concurrency: int, no_register: bool, conv_template: Optional[str] = None, seed: Optional[int] = None, **kwargs, ): super().__init__( controller_addr, worker_addr, worker_id, model_path, model_names, limit_worker_concurrency, conv_template=conv_template, ) self.model_path = model_path self.api_base = api_base self.token = token self.context_len = context_length self.seed = seed logger.info( f"Connecting with huggingface api {self.model_path} as {self.model_names} on worker {worker_id} ..." ) if not no_register: self.init_heart_beat() def count_token(self, params): # No tokenizer here ret = { "count": 0, "error_code": 0, } return ret def generate_stream_gate(self, params): self.call_ct += 1 prompt = params["prompt"] gen_kwargs = get_gen_kwargs(params, seed=self.seed) stop = gen_kwargs["stop_sequences"] if "falcon" in self.model_path and "chat" in self.model_path: stop.extend(["\nUser:", "<|endoftext|>", " User:", "###"]) stop = list(set(stop)) gen_kwargs["stop_sequences"] = stop logger.info(f"prompt: {prompt}") logger.info(f"gen_kwargs: {gen_kwargs}") try: if self.model_path == "": url = f"{self.api_base}" else: url = f"{self.api_base}/{self.model_path}" client = InferenceClient(url, token=self.token) res = client.text_generation( prompt, stream=True, details=True, **gen_kwargs ) reason = None text = "" for chunk in res: if chunk.token.special: continue text += chunk.token.text s = next((x for x in stop if text.endswith(x)), None) if s is not None: text = text[: -len(s)] reason = "stop" break if could_be_stop(text, stop): continue if ( chunk.details is not None and chunk.details.finish_reason is not None ): reason = chunk.details.finish_reason if reason not in ["stop", "length"]: reason = None ret = { "text": text, "error_code": 0, "finish_reason": reason, } yield json.dumps(ret).encode() + b"\0" except Exception as e: ret = { "text": f"{SERVER_ERROR_MSG}\n\n({e})", "error_code": ErrorCode.INTERNAL_ERROR, } yield json.dumps(ret).encode() + b"\0" def generate_gate(self, params): for x in self.generate_stream_gate(params): pass return json.loads(x[:-1].decode()) def get_embeddings(self, params): raise NotImplementedError() def create_huggingface_api_worker(): parser = argparse.ArgumentParser() parser.add_argument("--host", type=str, default="localhost") parser.add_argument("--port", type=int, default=21002) parser.add_argument("--worker-address", type=str, default="http://localhost:21002") parser.add_argument( "--controller-address", type=str, default="http://localhost:21001" ) # all model-related parameters are listed in --model-info-file parser.add_argument( "--model-info-file", type=str, required=True, help="Huggingface API model's info file path", ) parser.add_argument( "--limit-worker-concurrency", type=int, default=5, help="Limit the model concurrency to prevent OOM.", ) parser.add_argument("--no-register", action="store_true") parser.add_argument( "--seed", type=int, default=None, help="Overwrite the random seed for each generation.", ) parser.add_argument( "--ssl", action="store_true", required=False, default=False, help="Enable SSL. Requires OS Environment variables 'SSL_KEYFILE' and 'SSL_CERTFILE'.", ) args = parser.parse_args() with open(args.model_info_file, "r", encoding="UTF-8") as f: model_info = json.load(f) logger.info(f"args: {args}") model_path_list = [] api_base_list = [] token_list = [] context_length_list = [] model_names_list = [] conv_template_list = [] for m in model_info: model_path_list.append(model_info[m]["model_path"]) api_base_list.append(model_info[m]["api_base"]) token_list.append(model_info[m]["token"]) context_length = model_info[m]["context_length"] model_names = model_info[m].get("model_names", [m.split("/")[-1]]) if isinstance(model_names, str): model_names = [model_names] conv_template = model_info[m].get("conv_template", None) context_length_list.append(context_length) model_names_list.append(model_names) conv_template_list.append(conv_template) logger.info(f"Model paths: {model_path_list}") logger.info(f"API bases: {api_base_list}") logger.info(f"Tokens: {token_list}") logger.info(f"Context lengths: {context_length_list}") logger.info(f"Model names: {model_names_list}") logger.info(f"Conv templates: {conv_template_list}") for ( model_names, conv_template, model_path, api_base, token, context_length, ) in zip( model_names_list, conv_template_list, model_path_list, api_base_list, token_list, context_length_list, ): m = HuggingfaceApiWorker( args.controller_address, args.worker_address, worker_id, model_path, api_base, token, context_length, model_names, args.limit_worker_concurrency, no_register=args.no_register, conv_template=conv_template, seed=args.seed, ) workers.append(m) for name in model_names: worker_map[name] = m # register all the models url = args.controller_address + "/register_worker" data = { "worker_name": workers[0].worker_addr, "check_heart_beat": not args.no_register, "worker_status": { "model_names": [m for w in workers for m in w.model_names], "speed": 1, "queue_length": sum([w.get_queue_length() for w in workers]), }, } r = requests.post(url, json=data) assert r.status_code == 200 return args, workers
null
20,515
import argparse import asyncio import json from typing import List from fastapi import FastAPI, Request, BackgroundTasks from fastapi.responses import StreamingResponse, JSONResponse import uvicorn from vllm import AsyncLLMEngine from vllm.engine.arg_utils import AsyncEngineArgs from vllm.sampling_params import SamplingParams from vllm.utils import random_uuid from fastchat.serve.base_model_worker import BaseModelWorker from fastchat.serve.model_worker import ( logger, worker_id, ) from fastchat.utils import get_context_length, is_partial_stop def acquire_worker_semaphore(): if worker.semaphore is None: worker.semaphore = asyncio.Semaphore(worker.limit_worker_concurrency) return worker.semaphore.acquire() def create_background_tasks(request_id): async def abort_request() -> None: await engine.abort(request_id) background_tasks = BackgroundTasks() background_tasks.add_task(release_worker_semaphore) background_tasks.add_task(abort_request) return background_tasks async def api_generate_stream(request: Request): params = await request.json() await acquire_worker_semaphore() request_id = random_uuid() params["request_id"] = request_id generator = worker.generate_stream(params) background_tasks = create_background_tasks(request_id) return StreamingResponse(generator, background=background_tasks)
null
20,516
import argparse import asyncio import json from typing import List from fastapi import FastAPI, Request, BackgroundTasks from fastapi.responses import StreamingResponse, JSONResponse import uvicorn from vllm import AsyncLLMEngine from vllm.engine.arg_utils import AsyncEngineArgs from vllm.sampling_params import SamplingParams from vllm.utils import random_uuid from fastchat.serve.base_model_worker import BaseModelWorker from fastchat.serve.model_worker import ( logger, worker_id, ) from fastchat.utils import get_context_length, is_partial_stop def release_worker_semaphore(): worker.semaphore.release() def acquire_worker_semaphore(): if worker.semaphore is None: worker.semaphore = asyncio.Semaphore(worker.limit_worker_concurrency) return worker.semaphore.acquire() async def api_generate(request: Request): params = await request.json() await acquire_worker_semaphore() request_id = random_uuid() params["request_id"] = request_id output = await worker.generate(params) release_worker_semaphore() await engine.abort(request_id) return JSONResponse(output)
null
20,517
import argparse import asyncio import json from typing import List from fastapi import FastAPI, Request, BackgroundTasks from fastapi.responses import StreamingResponse, JSONResponse import uvicorn from vllm import AsyncLLMEngine from vllm.engine.arg_utils import AsyncEngineArgs from vllm.sampling_params import SamplingParams from vllm.utils import random_uuid from fastchat.serve.base_model_worker import BaseModelWorker from fastchat.serve.model_worker import ( logger, worker_id, ) from fastchat.utils import get_context_length, is_partial_stop async def api_get_status(request: Request): return worker.get_status()
null
20,518
import argparse import asyncio import json from typing import List from fastapi import FastAPI, Request, BackgroundTasks from fastapi.responses import StreamingResponse, JSONResponse import uvicorn from vllm import AsyncLLMEngine from vllm.engine.arg_utils import AsyncEngineArgs from vllm.sampling_params import SamplingParams from vllm.utils import random_uuid from fastchat.serve.base_model_worker import BaseModelWorker from fastchat.serve.model_worker import ( logger, worker_id, ) from fastchat.utils import get_context_length, is_partial_stop async def api_count_token(request: Request): params = await request.json() return worker.count_token(params)
null
20,519
import argparse import asyncio import json from typing import List from fastapi import FastAPI, Request, BackgroundTasks from fastapi.responses import StreamingResponse, JSONResponse import uvicorn from vllm import AsyncLLMEngine from vllm.engine.arg_utils import AsyncEngineArgs from vllm.sampling_params import SamplingParams from vllm.utils import random_uuid from fastchat.serve.base_model_worker import BaseModelWorker from fastchat.serve.model_worker import ( logger, worker_id, ) from fastchat.utils import get_context_length, is_partial_stop async def api_get_conv(request: Request): return worker.get_conv_template()
null
20,520
import argparse import asyncio import json from typing import List from fastapi import FastAPI, Request, BackgroundTasks from fastapi.responses import StreamingResponse, JSONResponse import uvicorn from vllm import AsyncLLMEngine from vllm.engine.arg_utils import AsyncEngineArgs from vllm.sampling_params import SamplingParams from vllm.utils import random_uuid from fastchat.serve.base_model_worker import BaseModelWorker from fastchat.serve.model_worker import ( logger, worker_id, ) from fastchat.utils import get_context_length, is_partial_stop async def api_model_details(request: Request): return {"context_length": worker.context_len}
null
20,521
from json import loads import os import random import time from fastchat.utils import build_logger from fastchat.constants import WORKER_API_TIMEOUT logger = build_logger("gradio_web_server", "gradio_web_server.log") def ai2_api_stream_iter( model_name, messages, temperature, top_p, max_new_tokens, api_key=None, api_base=None, ): from requests import post # get keys and needed values ai2_key = api_key or os.environ.get("AI2_API_KEY") api_base = api_base or "https://inferd.allen.ai/api/v1/infer" model_id = "mod_01hhgcga70c91402r9ssyxekan" # Make requests gen_params = { "model": model_name, "prompt": messages, "temperature": temperature, "top_p": top_p, "max_new_tokens": max_new_tokens, } logger.info(f"==== request ====\n{gen_params}") # AI2 uses vLLM, which requires that `top_p` be 1.0 for greedy sampling: # https://github.com/vllm-project/vllm/blob/v0.1.7/vllm/sampling_params.py#L156-L157 if temperature == 0.0 and top_p < 1.0: raise ValueError("top_p must be 1 when temperature is 0.0") res = post( api_base, stream=True, headers={"Authorization": f"Bearer {ai2_key}"}, json={ "model_id": model_id, # This input format is specific to the Tulu2 model. Other models # may require different input formats. See the model's schema # documentation on InferD for more information. "input": { "messages": messages, "opts": { "max_tokens": max_new_tokens, "temperature": temperature, "top_p": top_p, "logprobs": 1, # increase for more choices }, }, }, ) if res.status_code != 200: logger.error(f"unexpected response ({res.status_code}): {res.text}") raise ValueError("unexpected response from InferD", res) text = "" for line in res.iter_lines(): if line: part = loads(line) if "result" in part and "output" in part["result"]: for t in part["result"]["output"]["text"]: text += t else: logger.error(f"unexpected part: {part}") raise ValueError("empty result in InferD response") data = { "text": text, "error_code": 0, } yield data
null
20,522
import argparse import asyncio import dataclasses import logging import json import os import time from typing import List, Union import threading import uuid from fastapi import FastAPI, Request, BackgroundTasks from fastapi.responses import StreamingResponse, JSONResponse import requests import torch import torch.nn.functional as F import uvicorn from fastchat.constants import WORKER_HEART_BEAT_INTERVAL, ErrorCode, SERVER_ERROR_MSG from fastchat.model.model_adapter import ( load_model, add_model_args, get_conversation_template, ) from fastchat.model.model_chatglm import generate_stream_chatglm from fastchat.model.model_falcon import generate_stream_falcon from fastchat.model.model_codet5p import generate_stream_codet5p from fastchat.modules.gptq import GptqConfig from fastchat.modules.exllama import ExllamaConfig from fastchat.modules.xfastertransformer import XftConfig from fastchat.serve.inference import generate_stream from fastchat.serve.model_worker import ModelWorker, worker_id, logger from fastchat.utils import build_logger, pretty_print_semaphore, get_context_length worker_map = {} def acquire_worker_semaphore(): if workers[0].semaphore is None: # Share the same semaphore for all workers because # all workers share the same GPU. semaphore = asyncio.Semaphore(workers[0].limit_worker_concurrency) for w in workers: w.semaphore = semaphore return workers[0].semaphore.acquire() def create_background_tasks(): background_tasks = BackgroundTasks() background_tasks.add_task(release_worker_semaphore) return background_tasks async def api_generate_stream(request: Request): params = await request.json() await acquire_worker_semaphore() worker = worker_map[params["model"]] generator = worker.generate_stream_gate(params) background_tasks = create_background_tasks() return StreamingResponse(generator, background=background_tasks)
null
20,523
import argparse import asyncio import dataclasses import logging import json import os import time from typing import List, Union import threading import uuid from fastapi import FastAPI, Request, BackgroundTasks from fastapi.responses import StreamingResponse, JSONResponse import requests import torch import torch.nn.functional as F import uvicorn from fastchat.constants import WORKER_HEART_BEAT_INTERVAL, ErrorCode, SERVER_ERROR_MSG from fastchat.model.model_adapter import ( load_model, add_model_args, get_conversation_template, ) from fastchat.model.model_chatglm import generate_stream_chatglm from fastchat.model.model_falcon import generate_stream_falcon from fastchat.model.model_codet5p import generate_stream_codet5p from fastchat.modules.gptq import GptqConfig from fastchat.modules.exllama import ExllamaConfig from fastchat.modules.xfastertransformer import XftConfig from fastchat.serve.inference import generate_stream from fastchat.serve.model_worker import ModelWorker, worker_id, logger from fastchat.utils import build_logger, pretty_print_semaphore, get_context_length worker_map = {} def release_worker_semaphore(): def acquire_worker_semaphore(): async def api_generate(request: Request): params = await request.json() await acquire_worker_semaphore() worker = worker_map[params["model"]] output = worker.generate_gate(params) release_worker_semaphore() return JSONResponse(output)
null
20,524
import argparse import asyncio import dataclasses import logging import json import os import time from typing import List, Union import threading import uuid from fastapi import FastAPI, Request, BackgroundTasks from fastapi.responses import StreamingResponse, JSONResponse import requests import torch import torch.nn.functional as F import uvicorn from fastchat.constants import WORKER_HEART_BEAT_INTERVAL, ErrorCode, SERVER_ERROR_MSG from fastchat.model.model_adapter import ( load_model, add_model_args, get_conversation_template, ) from fastchat.model.model_chatglm import generate_stream_chatglm from fastchat.model.model_falcon import generate_stream_falcon from fastchat.model.model_codet5p import generate_stream_codet5p from fastchat.modules.gptq import GptqConfig from fastchat.modules.exllama import ExllamaConfig from fastchat.modules.xfastertransformer import XftConfig from fastchat.serve.inference import generate_stream from fastchat.serve.model_worker import ModelWorker, worker_id, logger from fastchat.utils import build_logger, pretty_print_semaphore, get_context_length worker_map = {} def acquire_worker_semaphore(): if workers[0].semaphore is None: # Share the same semaphore for all workers because # all workers share the same GPU. semaphore = asyncio.Semaphore(workers[0].limit_worker_concurrency) for w in workers: w.semaphore = semaphore return workers[0].semaphore.acquire() def create_background_tasks(): background_tasks = BackgroundTasks() background_tasks.add_task(release_worker_semaphore) return background_tasks async def api_get_embeddings(request: Request): params = await request.json() await acquire_worker_semaphore() worker = worker_map[params["model"]] embedding = worker.get_embeddings(params) background_tasks = create_background_tasks() return JSONResponse(content=embedding, background=background_tasks)
null
20,525
import argparse import asyncio import dataclasses import logging import json import os import time from typing import List, Union import threading import uuid from fastapi import FastAPI, Request, BackgroundTasks from fastapi.responses import StreamingResponse, JSONResponse import requests import torch import torch.nn.functional as F import uvicorn from fastchat.constants import WORKER_HEART_BEAT_INTERVAL, ErrorCode, SERVER_ERROR_MSG from fastchat.model.model_adapter import ( load_model, add_model_args, get_conversation_template, ) from fastchat.model.model_chatglm import generate_stream_chatglm from fastchat.model.model_falcon import generate_stream_falcon from fastchat.model.model_codet5p import generate_stream_codet5p from fastchat.modules.gptq import GptqConfig from fastchat.modules.exllama import ExllamaConfig from fastchat.modules.xfastertransformer import XftConfig from fastchat.serve.inference import generate_stream from fastchat.serve.model_worker import ModelWorker, worker_id, logger from fastchat.utils import build_logger, pretty_print_semaphore, get_context_length workers = [] async def api_get_status(request: Request): return { "model_names": [m for w in workers for m in w.model_names], "speed": 1, "queue_length": sum([w.get_queue_length() for w in workers]), }
null
20,526
import argparse import asyncio import dataclasses import logging import json import os import time from typing import List, Union import threading import uuid from fastapi import FastAPI, Request, BackgroundTasks from fastapi.responses import StreamingResponse, JSONResponse import requests import torch import torch.nn.functional as F import uvicorn from fastchat.constants import WORKER_HEART_BEAT_INTERVAL, ErrorCode, SERVER_ERROR_MSG from fastchat.model.model_adapter import ( load_model, add_model_args, get_conversation_template, ) from fastchat.model.model_chatglm import generate_stream_chatglm from fastchat.model.model_falcon import generate_stream_falcon from fastchat.model.model_codet5p import generate_stream_codet5p from fastchat.modules.gptq import GptqConfig from fastchat.modules.exllama import ExllamaConfig from fastchat.modules.xfastertransformer import XftConfig from fastchat.serve.inference import generate_stream from fastchat.serve.model_worker import ModelWorker, worker_id, logger from fastchat.utils import build_logger, pretty_print_semaphore, get_context_length worker_map = {} async def api_count_token(request: Request): params = await request.json() worker = worker_map[params["model"]] return worker.count_token(params)
null
20,527
import argparse import asyncio import dataclasses import logging import json import os import time from typing import List, Union import threading import uuid from fastapi import FastAPI, Request, BackgroundTasks from fastapi.responses import StreamingResponse, JSONResponse import requests import torch import torch.nn.functional as F import uvicorn from fastchat.constants import WORKER_HEART_BEAT_INTERVAL, ErrorCode, SERVER_ERROR_MSG from fastchat.model.model_adapter import ( load_model, add_model_args, get_conversation_template, ) from fastchat.model.model_chatglm import generate_stream_chatglm from fastchat.model.model_falcon import generate_stream_falcon from fastchat.model.model_codet5p import generate_stream_codet5p from fastchat.modules.gptq import GptqConfig from fastchat.modules.exllama import ExllamaConfig from fastchat.modules.xfastertransformer import XftConfig from fastchat.serve.inference import generate_stream from fastchat.serve.model_worker import ModelWorker, worker_id, logger from fastchat.utils import build_logger, pretty_print_semaphore, get_context_length worker_map = {} async def api_get_conv(request: Request): params = await request.json() worker = worker_map[params["model"]] return worker.get_conv_template()
null
20,528
import argparse import asyncio import dataclasses import logging import json import os import time from typing import List, Union import threading import uuid from fastapi import FastAPI, Request, BackgroundTasks from fastapi.responses import StreamingResponse, JSONResponse import requests import torch import torch.nn.functional as F import uvicorn from fastchat.constants import WORKER_HEART_BEAT_INTERVAL, ErrorCode, SERVER_ERROR_MSG from fastchat.model.model_adapter import ( load_model, add_model_args, get_conversation_template, ) from fastchat.model.model_chatglm import generate_stream_chatglm from fastchat.model.model_falcon import generate_stream_falcon from fastchat.model.model_codet5p import generate_stream_codet5p from fastchat.modules.gptq import GptqConfig from fastchat.modules.exllama import ExllamaConfig from fastchat.modules.xfastertransformer import XftConfig from fastchat.serve.inference import generate_stream from fastchat.serve.model_worker import ModelWorker, worker_id, logger from fastchat.utils import build_logger, pretty_print_semaphore, get_context_length worker_map = {} async def api_model_details(request: Request): params = await request.json() worker = worker_map[params["model"]] return {"context_length": worker.context_len}
null
20,529
import argparse import asyncio import dataclasses import logging import json import os import time from typing import List, Union import threading import uuid from fastapi import FastAPI, Request, BackgroundTasks from fastapi.responses import StreamingResponse, JSONResponse import requests import torch import torch.nn.functional as F import uvicorn from fastchat.constants import WORKER_HEART_BEAT_INTERVAL, ErrorCode, SERVER_ERROR_MSG from fastchat.model.model_adapter import ( load_model, add_model_args, get_conversation_template, ) from fastchat.model.model_chatglm import generate_stream_chatglm from fastchat.model.model_falcon import generate_stream_falcon from fastchat.model.model_codet5p import generate_stream_codet5p from fastchat.modules.gptq import GptqConfig from fastchat.modules.exllama import ExllamaConfig from fastchat.modules.xfastertransformer import XftConfig from fastchat.serve.inference import generate_stream from fastchat.serve.model_worker import ModelWorker, worker_id, logger from fastchat.utils import build_logger, pretty_print_semaphore, get_context_length workers = [] worker_map = {} def add_model_args(parser): class GptqConfig: class ExllamaConfig: class XftConfig: worker_id = str(uuid.uuid4())[:8] logger = build_logger("model_worker", f"model_worker_{worker_id}.log") class ModelWorker(BaseModelWorker): def __init__( self, controller_addr: str, worker_addr: str, worker_id: str, model_path: str, model_names: List[str], limit_worker_concurrency: int, no_register: bool, device: str, num_gpus: int, max_gpu_memory: str, revision: str = None, dtype: Optional[torch.dtype] = None, load_8bit: bool = False, cpu_offloading: bool = False, gptq_config: Optional[GptqConfig] = None, awq_config: Optional[AWQConfig] = None, exllama_config: Optional[ExllamaConfig] = None, xft_config: Optional[XftConfig] = None, stream_interval: int = 2, conv_template: Optional[str] = None, embed_in_truncate: bool = False, seed: Optional[int] = None, debug: bool = False, **kwargs, ): def generate_stream_gate(self, params): def generate_gate(self, params): def __process_embed_chunk(self, input_ids, attention_mask, **model_type_dict): def __encode_base64(self, embeddings: torch.Tensor) -> List[str]: def get_embeddings(self, params): def create_multi_model_worker(): # Note: Ensure we resolve arg conflicts. We let `add_model_args` add MOST # of the model args but we'll override one to have an append action that # supports multiple values. parser = argparse.ArgumentParser(conflict_handler="resolve") parser.add_argument("--host", type=str, default="localhost") parser.add_argument("--port", type=int, default=21002) parser.add_argument("--worker-address", type=str, default="http://localhost:21002") parser.add_argument( "--controller-address", type=str, default="http://localhost:21001" ) add_model_args(parser) # Override the model path to be repeated and align it with model names. parser.add_argument( "--model-path", type=str, default=[], action="append", help="One or more paths to model weights to load. This can be a local folder or a Hugging Face repo ID.", ) parser.add_argument( "--model-names", type=lambda s: s.split(","), action="append", help="One or more model names. Values must be aligned with `--model-path` values.", ) parser.add_argument( "--conv-template", type=str, default=None, action="append", help="Conversation prompt template. Values must be aligned with `--model-path` values. If only one value is provided, it will be repeated for all models.", ) parser.add_argument("--limit-worker-concurrency", type=int, default=5) parser.add_argument("--stream-interval", type=int, default=2) parser.add_argument("--no-register", action="store_true") parser.add_argument( "--ssl", action="store_true", required=False, default=False, help="Enable SSL. Requires OS Environment variables 'SSL_KEYFILE' and 'SSL_CERTFILE'.", ) args = parser.parse_args() logger.info(f"args: {args}") if args.gpus: if len(args.gpus.split(",")) < args.num_gpus: raise ValueError( f"Larger --num-gpus ({args.num_gpus}) than --gpus {args.gpus}!" ) os.environ["CUDA_VISIBLE_DEVICES"] = args.gpus gptq_config = GptqConfig( ckpt=args.gptq_ckpt or args.model_path, wbits=args.gptq_wbits, groupsize=args.gptq_groupsize, act_order=args.gptq_act_order, ) if args.enable_exllama: exllama_config = ExllamaConfig( max_seq_len=args.exllama_max_seq_len, gpu_split=args.exllama_gpu_split, cache_8bit=args.exllama_cache_8bit, ) else: exllama_config = None if args.enable_xft: xft_config = XftConfig( max_seq_len=args.xft_max_seq_len, data_type=args.xft_dtype, ) if args.device != "cpu": print("xFasterTransformer now is only support CPUs. Reset device to CPU") args.device = "cpu" else: xft_config = None if args.model_names is None: args.model_names = [[x.split("/")[-1]] for x in args.model_path] if args.conv_template is None: args.conv_template = [None] * len(args.model_path) elif len(args.conv_template) == 1: # Repeat the same template args.conv_template = args.conv_template * len(args.model_path) # Launch all workers workers = [] for conv_template, model_path, model_names in zip( args.conv_template, args.model_path, args.model_names ): w = ModelWorker( args.controller_address, args.worker_address, worker_id, model_path, model_names, args.limit_worker_concurrency, args.no_register, device=args.device, num_gpus=args.num_gpus, max_gpu_memory=args.max_gpu_memory, load_8bit=args.load_8bit, cpu_offloading=args.cpu_offloading, gptq_config=gptq_config, exllama_config=exllama_config, xft_config=xft_config, stream_interval=args.stream_interval, conv_template=conv_template, ) workers.append(w) for model_name in model_names: worker_map[model_name] = w # Register all models url = args.controller_address + "/register_worker" data = { "worker_name": workers[0].worker_addr, "check_heart_beat": not args.no_register, "worker_status": { "model_names": [m for w in workers for m in w.model_names], "speed": 1, "queue_length": sum([w.get_queue_length() for w in workers]), }, } r = requests.post(url, json=data) assert r.status_code == 200 return args, workers
null
20,530
import abc import gc import json import math import os import sys import time from typing import Iterable, Optional, Dict import warnings import psutil import torch from transformers import ( AutoTokenizer, AutoModelForCausalLM, LlamaTokenizer, LlamaForCausalLM, AutoModel, AutoModelForSeq2SeqLM, T5Tokenizer, AutoConfig, ) from transformers.generation.logits_process import ( LogitsProcessorList, RepetitionPenaltyLogitsProcessor, TemperatureLogitsWarper, TopKLogitsWarper, TopPLogitsWarper, ) from fastchat.conversation import get_conv_template, SeparatorStyle from fastchat.model.model_adapter import ( load_model, get_conversation_template, get_generate_stream_function, ) from fastchat.modules.awq import AWQConfig from fastchat.modules.gptq import GptqConfig from fastchat.modules.exllama import ExllamaConfig from fastchat.modules.xfastertransformer import XftConfig from fastchat.utils import is_partial_stop, is_sentence_complete, get_context_length class ChatIO(abc.ABC): def prompt_for_input(self, role: str) -> str: """Prompt for input from a role.""" def prompt_for_output(self, role: str): """Prompt for output from a role.""" def stream_output(self, output_stream): """Stream output.""" def print_output(self, text: str): """Print output.""" def get_conv_template(name: str) -> Conversation: """Get a conversation template.""" return conv_templates[name].copy() def load_model( model_path: str, device: str = "cuda", num_gpus: int = 1, max_gpu_memory: Optional[str] = None, dtype: Optional[torch.dtype] = None, load_8bit: bool = False, cpu_offloading: bool = False, gptq_config: Optional[GptqConfig] = None, awq_config: Optional[AWQConfig] = None, exllama_config: Optional[ExllamaConfig] = None, xft_config: Optional[XftConfig] = None, revision: str = "main", debug: bool = False, ): """Load a model from Hugging Face.""" import accelerate # get model adapter adapter = get_model_adapter(model_path) # Handle device mapping cpu_offloading = raise_warning_for_incompatible_cpu_offloading_configuration( device, load_8bit, cpu_offloading ) if device == "cpu": kwargs = {"torch_dtype": torch.float32} if CPU_ISA in ["avx512_bf16", "amx"]: try: import intel_extension_for_pytorch as ipex kwargs = {"torch_dtype": torch.bfloat16} except ImportError: warnings.warn( "Intel Extension for PyTorch is not installed, it can be installed to accelerate cpu inference" ) elif device == "cuda": kwargs = {"torch_dtype": torch.float16} if num_gpus != 1: kwargs["device_map"] = "auto" if max_gpu_memory is None: kwargs[ "device_map" ] = "sequential" # This is important for not the same VRAM sizes available_gpu_memory = get_gpu_memory(num_gpus) kwargs["max_memory"] = { i: str(int(available_gpu_memory[i] * 0.85)) + "GiB" for i in range(num_gpus) } else: kwargs["max_memory"] = {i: max_gpu_memory for i in range(num_gpus)} elif device == "mps": kwargs = {"torch_dtype": torch.float16} import transformers version = tuple(int(v) for v in transformers.__version__.split(".")) if version < (4, 35, 0): # NOTE: Recent transformers library seems to fix the mps issue, also # it has made some changes causing compatibility issues with our # original patch. So we only apply the patch for older versions. # Avoid bugs in mps backend by not using in-place operations. replace_llama_attn_with_non_inplace_operations() elif device == "xpu": kwargs = {"torch_dtype": torch.bfloat16} # Try to load ipex, while it looks unused, it links into torch for xpu support try: import intel_extension_for_pytorch as ipex except ImportError: warnings.warn( "Intel Extension for PyTorch is not installed, but is required for xpu inference." ) elif device == "npu": kwargs = {"torch_dtype": torch.float16} # Try to load ipex, while it looks unused, it links into torch for xpu support try: import torch_npu except ImportError: warnings.warn("Ascend Extension for PyTorch is not installed.") else: raise ValueError(f"Invalid device: {device}") if cpu_offloading: # raises an error on incompatible platforms from transformers import BitsAndBytesConfig if "max_memory" in kwargs: kwargs["max_memory"]["cpu"] = ( str(math.floor(psutil.virtual_memory().available / 2**20)) + "Mib" ) kwargs["quantization_config"] = BitsAndBytesConfig( load_in_8bit_fp32_cpu_offload=cpu_offloading ) kwargs["load_in_8bit"] = load_8bit elif load_8bit: if num_gpus != 1: warnings.warn( "8-bit quantization is not supported for multi-gpu inference." ) else: model, tokenizer = adapter.load_compress_model( model_path=model_path, device=device, torch_dtype=kwargs["torch_dtype"], revision=revision, ) if debug: print(model) return model, tokenizer elif awq_config and awq_config.wbits < 16: assert ( awq_config.wbits == 4 ), "Currently we only support 4-bit inference for AWQ." model, tokenizer = load_awq_quantized(model_path, awq_config, device) if num_gpus != 1: device_map = accelerate.infer_auto_device_map( model, max_memory=kwargs["max_memory"], no_split_module_classes=[ "OPTDecoderLayer", "LlamaDecoderLayer", "BloomBlock", "MPTBlock", "DecoderLayer", ], ) model = accelerate.dispatch_model( model, device_map=device_map, offload_buffers=True ) else: model.to(device) return model, tokenizer elif gptq_config and gptq_config.wbits < 16: model, tokenizer = load_gptq_quantized(model_path, gptq_config) if num_gpus != 1: device_map = accelerate.infer_auto_device_map( model, max_memory=kwargs["max_memory"], no_split_module_classes=["LlamaDecoderLayer"], ) model = accelerate.dispatch_model( model, device_map=device_map, offload_buffers=True ) else: model.to(device) return model, tokenizer elif exllama_config: model, tokenizer = load_exllama_model(model_path, exllama_config) return model, tokenizer elif xft_config: model, tokenizer = load_xft_model(model_path, xft_config) return model, tokenizer kwargs["revision"] = revision if dtype is not None: # Overwrite dtype if it is provided in the arguments. kwargs["torch_dtype"] = dtype if os.environ.get("FASTCHAT_USE_MODELSCOPE", "False").lower() == "true": # download model from ModelScope hub, # lazy import so that modelscope is not required for normal use. try: from modelscope.hub.snapshot_download import snapshot_download if not os.path.exists(model_path): model_path = snapshot_download(model_id=model_path, revision=revision) except ImportError as e: warnings.warn( "Use model from www.modelscope.cn need pip install modelscope" ) raise e # Load model model, tokenizer = adapter.load_model(model_path, kwargs) if ( device == "cpu" and kwargs["torch_dtype"] is torch.bfloat16 and CPU_ISA is not None ): model = ipex.optimize(model, dtype=kwargs["torch_dtype"]) if (device == "cuda" and num_gpus == 1 and not cpu_offloading) or device in ( "mps", "xpu", "npu", ): model.to(device) if device == "xpu": model = torch.xpu.optimize(model, dtype=kwargs["torch_dtype"], inplace=True) if debug: print(model) return model, tokenizer def get_conversation_template(model_path: str) -> Conversation: """Get the default conversation template.""" adapter = get_model_adapter(model_path) return adapter.get_default_conv_template(model_path) def get_generate_stream_function(model: torch.nn.Module, model_path: str): """Get the generate_stream function for inference.""" from fastchat.serve.inference import generate_stream model_type = str(type(model)).lower() is_peft = "peft" in model_type is_chatglm = "chatglm" in model_type is_falcon = "rwforcausallm" in model_type is_codet5p = "codet5p" in model_type is_exllama = "exllama" in model_type is_xft = "xft" in model_type if is_chatglm: return generate_stream_chatglm elif is_falcon: return generate_stream_falcon elif is_codet5p: return generate_stream_codet5p elif is_exllama: return generate_stream_exllama elif is_xft: return generate_stream_xft elif peft_share_base_weights and is_peft: # Return a curried stream function that loads the right adapter # according to the model_name available in this context. This ensures # the right weights are available. def generate_stream_peft( model, tokenizer, params: Dict, device: str, context_len: int, stream_interval: int = 2, judge_sent_end: bool = False, ): model.set_adapter(model_path) base_model_type = str(type(model.base_model.model)) is_chatglm = "chatglm" in base_model_type is_falcon = "rwforcausallm" in base_model_type is_codet5p = "codet5p" in base_model_type is_exllama = "exllama" in base_model_type is_xft = "xft" in base_model_type generate_stream_function = generate_stream if is_chatglm: generate_stream_function = generate_stream_chatglm elif is_falcon: generate_stream_function = generate_stream_falcon elif is_codet5p: generate_stream_function = generate_stream_codet5p elif is_exllama: generate_stream_function = generate_stream_exllama elif is_xft: generate_stream_function = generate_stream_xft for x in generate_stream_function( model, tokenizer, params, device, context_len, stream_interval, judge_sent_end, ): yield x return generate_stream_peft else: return generate_stream class AWQConfig: ckpt: str = field( default=None, metadata={ "help": "Load quantized model. The path to the local AWQ checkpoint." }, ) wbits: int = field(default=16, metadata={"help": "#bits to use for quantization"}) groupsize: int = field( default=-1, metadata={"help": "Groupsize to use for quantization; default uses full row."}, ) class GptqConfig: ckpt: str = field( default=None, metadata={ "help": "Load quantized model. The path to the local GPTQ checkpoint." }, ) wbits: int = field(default=16, metadata={"help": "#bits to use for quantization"}) groupsize: int = field( default=-1, metadata={"help": "Groupsize to use for quantization; default uses full row."}, ) act_order: bool = field( default=True, metadata={"help": "Whether to apply the activation order GPTQ heuristic"}, ) class ExllamaConfig: max_seq_len: int gpu_split: str = None cache_8bit: bool = False class XftConfig: max_seq_len: int = 4096 beam_width: int = 1 eos_token_id: int = -1 pad_token_id: int = -1 num_return_sequences: int = 1 is_encoder_decoder: bool = False padding: bool = True early_stopping: bool = False data_type: str = "bf16_fp16" def get_context_length(config): """Get the context length of a model from a huggingface model config.""" rope_scaling = getattr(config, "rope_scaling", None) if rope_scaling: rope_scaling_factor = config.rope_scaling["factor"] else: rope_scaling_factor = 1 for key in SEQUENCE_LENGTH_KEYS: val = getattr(config, key, None) if val is not None: return int(rope_scaling_factor * val) return 2048 def chat_loop( model_path: str, device: str, num_gpus: int, max_gpu_memory: str, dtype: Optional[torch.dtype], load_8bit: bool, cpu_offloading: bool, conv_template: Optional[str], conv_system_msg: Optional[str], temperature: float, repetition_penalty: float, max_new_tokens: int, chatio: ChatIO, gptq_config: Optional[GptqConfig] = None, awq_config: Optional[AWQConfig] = None, exllama_config: Optional[ExllamaConfig] = None, xft_config: Optional[XftConfig] = None, revision: str = "main", judge_sent_end: bool = True, debug: bool = True, history: bool = True, ): # Model model, tokenizer = load_model( model_path, device=device, num_gpus=num_gpus, max_gpu_memory=max_gpu_memory, dtype=dtype, load_8bit=load_8bit, cpu_offloading=cpu_offloading, gptq_config=gptq_config, awq_config=awq_config, exllama_config=exllama_config, xft_config=xft_config, revision=revision, debug=debug, ) generate_stream_func = get_generate_stream_function(model, model_path) model_type = str(type(model)).lower() is_t5 = "t5" in model_type is_codet5p = "codet5p" in model_type is_xft = "xft" in model_type # Hardcode T5's default repetition penalty to be 1.2 if is_t5 and repetition_penalty == 1.0: repetition_penalty = 1.2 # Set context length context_len = get_context_length(model.config) # Chat def new_chat(): if conv_template: conv = get_conv_template(conv_template) else: conv = get_conversation_template(model_path) if conv_system_msg is not None: conv.set_system_message(conv_system_msg) return conv def reload_conv(conv): """ Reprints the conversation from the start. """ for message in conv.messages[conv.offset :]: chatio.prompt_for_output(message[0]) chatio.print_output(message[1]) conv = None while True: if not history or not conv: conv = new_chat() try: inp = chatio.prompt_for_input(conv.roles[0]) except EOFError: inp = "" if inp == "!!exit" or not inp: print("exit...") break elif inp == "!!reset": print("resetting...") conv = new_chat() continue elif inp == "!!remove": print("removing last message...") if len(conv.messages) > conv.offset: # Assistant if conv.messages[-1][0] == conv.roles[1]: conv.messages.pop() # User if conv.messages[-1][0] == conv.roles[0]: conv.messages.pop() reload_conv(conv) else: print("No messages to remove.") continue elif inp == "!!regen": print("regenerating last message...") if len(conv.messages) > conv.offset: # Assistant if conv.messages[-1][0] == conv.roles[1]: conv.messages.pop() # User if conv.messages[-1][0] == conv.roles[0]: reload_conv(conv) # Set inp to previous message inp = conv.messages.pop()[1] else: # Shouldn't happen in normal circumstances print("No user message to regenerate from.") continue else: print("No messages to regenerate.") continue elif inp.startswith("!!save"): args = inp.split(" ", 1) if len(args) != 2: print("usage: !!save <filename>") continue else: filename = args[1] # Add .json if extension not present if not "." in filename: filename += ".json" print("saving...", filename) with open(filename, "w") as outfile: json.dump(conv.dict(), outfile) continue elif inp.startswith("!!load"): args = inp.split(" ", 1) if len(args) != 2: print("usage: !!load <filename>") continue else: filename = args[1] # Check if file exists and add .json if needed if not os.path.exists(filename): if (not filename.endswith(".json")) and os.path.exists( filename + ".json" ): filename += ".json" else: print("file not found:", filename) continue print("loading...", filename) with open(filename, "r") as infile: new_conv = json.load(infile) conv = get_conv_template(new_conv["template_name"]) conv.set_system_message(new_conv["system_message"]) conv.messages = new_conv["messages"] reload_conv(conv) continue conv.append_message(conv.roles[0], inp) conv.append_message(conv.roles[1], None) prompt = conv.get_prompt() if is_codet5p: # codet5p is a code completion model. prompt = inp gen_params = { "model": model_path, "prompt": prompt, "temperature": temperature, "repetition_penalty": repetition_penalty, "max_new_tokens": max_new_tokens, "stop": conv.stop_str, "stop_token_ids": conv.stop_token_ids, "echo": False, } try: chatio.prompt_for_output(conv.roles[1]) output_stream = generate_stream_func( model, tokenizer, gen_params, device, context_len=context_len, judge_sent_end=judge_sent_end, ) t = time.time() outputs = chatio.stream_output(output_stream) duration = time.time() - t conv.update_last_message(outputs.strip()) if debug: num_tokens = len(tokenizer.encode(outputs)) msg = { "conv_template": conv.name, "prompt": prompt, "outputs": outputs, "speed (token/s)": round(num_tokens / duration, 2), } print(f"\n{msg}\n") except KeyboardInterrupt: print("stopped generation.") # If generation didn't finish if conv.messages[-1][1] is None: conv.messages.pop() # Remove last user message, so there isn't a double up if conv.messages[-1][0] == conv.roles[0]: conv.messages.pop() reload_conv(conv)
null
20,531
import argparse import base64 import gc import json import os from typing import List, Optional import uuid import torch import torch.nn.functional as F from transformers import set_seed import uvicorn from fastchat.constants import ErrorCode, SERVER_ERROR_MSG from fastchat.model.model_adapter import ( load_model, add_model_args, get_generate_stream_function, ) from fastchat.modules.awq import AWQConfig from fastchat.modules.exllama import ExllamaConfig from fastchat.modules.xfastertransformer import XftConfig from fastchat.modules.gptq import GptqConfig from fastchat.serve.base_model_worker import BaseModelWorker, app from fastchat.utils import ( build_logger, get_context_length, str_to_torch_dtype, ) worker_id = str(uuid.uuid4())[:8] logger = build_logger("model_worker", f"model_worker_{worker_id}.log") class ModelWorker(BaseModelWorker): def __init__( self, controller_addr: str, worker_addr: str, worker_id: str, model_path: str, model_names: List[str], limit_worker_concurrency: int, no_register: bool, device: str, num_gpus: int, max_gpu_memory: str, revision: str = None, dtype: Optional[torch.dtype] = None, load_8bit: bool = False, cpu_offloading: bool = False, gptq_config: Optional[GptqConfig] = None, awq_config: Optional[AWQConfig] = None, exllama_config: Optional[ExllamaConfig] = None, xft_config: Optional[XftConfig] = None, stream_interval: int = 2, conv_template: Optional[str] = None, embed_in_truncate: bool = False, seed: Optional[int] = None, debug: bool = False, **kwargs, ): def generate_stream_gate(self, params): def generate_gate(self, params): def __process_embed_chunk(self, input_ids, attention_mask, **model_type_dict): def __encode_base64(self, embeddings: torch.Tensor) -> List[str]: def get_embeddings(self, params): def add_model_args(parser): class AWQConfig: class ExllamaConfig: class XftConfig: class GptqConfig: def str_to_torch_dtype(dtype: str): def create_model_worker(): parser = argparse.ArgumentParser() parser.add_argument("--host", type=str, default="localhost") parser.add_argument("--port", type=int, default=21002) parser.add_argument("--worker-address", type=str, default="http://localhost:21002") parser.add_argument( "--controller-address", type=str, default="http://localhost:21001" ) add_model_args(parser) parser.add_argument( "--model-names", type=lambda s: s.split(","), help="Optional display comma separated names", ) parser.add_argument( "--conv-template", type=str, default=None, help="Conversation prompt template." ) parser.add_argument("--embed-in-truncate", action="store_true") parser.add_argument( "--limit-worker-concurrency", type=int, default=5, help="Limit the model concurrency to prevent OOM.", ) parser.add_argument("--stream-interval", type=int, default=2) parser.add_argument("--no-register", action="store_true") parser.add_argument( "--seed", type=int, default=None, help="Overwrite the random seed for each generation.", ) parser.add_argument( "--debug", type=bool, default=False, help="Print debugging messages" ) parser.add_argument( "--ssl", action="store_true", required=False, default=False, help="Enable SSL. Requires OS Environment variables 'SSL_KEYFILE' and 'SSL_CERTFILE'.", ) args = parser.parse_args() logger.info(f"args: {args}") if args.gpus: if len(args.gpus.split(",")) < args.num_gpus: raise ValueError( f"Larger --num-gpus ({args.num_gpus}) than --gpus {args.gpus}!" ) os.environ["CUDA_VISIBLE_DEVICES"] = args.gpus gptq_config = GptqConfig( ckpt=args.gptq_ckpt or args.model_path, wbits=args.gptq_wbits, groupsize=args.gptq_groupsize, act_order=args.gptq_act_order, ) awq_config = AWQConfig( ckpt=args.awq_ckpt or args.model_path, wbits=args.awq_wbits, groupsize=args.awq_groupsize, ) if args.enable_exllama: exllama_config = ExllamaConfig( max_seq_len=args.exllama_max_seq_len, gpu_split=args.exllama_gpu_split, cache_8bit=args.exllama_cache_8bit, ) else: exllama_config = None if args.enable_xft: xft_config = XftConfig( max_seq_len=args.xft_max_seq_len, data_type=args.xft_dtype, ) if args.device != "cpu": print("xFasterTransformer now is only support CPUs. Reset device to CPU") args.device = "cpu" else: xft_config = None worker = ModelWorker( args.controller_address, args.worker_address, worker_id, args.model_path, args.model_names, args.limit_worker_concurrency, revision=args.revision, no_register=args.no_register, device=args.device, num_gpus=args.num_gpus, max_gpu_memory=args.max_gpu_memory, dtype=str_to_torch_dtype(args.dtype), load_8bit=args.load_8bit, cpu_offloading=args.cpu_offloading, gptq_config=gptq_config, awq_config=awq_config, exllama_config=exllama_config, xft_config=xft_config, stream_interval=args.stream_interval, conv_template=args.conv_template, embed_in_truncate=args.embed_in_truncate, seed=args.seed, debug=args.debug, ) return args, worker
null
20,532
import argparse import asyncio import dataclasses from enum import Enum, auto import json import logging import os import time from typing import List, Union import threading from fastapi import FastAPI, Request from fastapi.responses import StreamingResponse import numpy as np import requests import uvicorn from fastchat.constants import ( CONTROLLER_HEART_BEAT_EXPIRATION, WORKER_API_TIMEOUT, ErrorCode, SERVER_ERROR_MSG, ) from fastchat.utils import build_logger CONTROLLER_HEART_BEAT_EXPIRATION = int( os.getenv("FASTCHAT_CONTROLLER_HEART_BEAT_EXPIRATION", 90) ) def heart_beat_controller(controller): while True: time.sleep(CONTROLLER_HEART_BEAT_EXPIRATION) controller.remove_stale_workers_by_expiration()
null
20,533
import argparse import asyncio import dataclasses from enum import Enum, auto import json import logging import os import time from typing import List, Union import threading from fastapi import FastAPI, Request from fastapi.responses import StreamingResponse import numpy as np import requests import uvicorn from fastchat.constants import ( CONTROLLER_HEART_BEAT_EXPIRATION, WORKER_API_TIMEOUT, ErrorCode, SERVER_ERROR_MSG, ) from fastchat.utils import build_logger async def register_worker(request: Request): data = await request.json() controller.register_worker( data["worker_name"], data["check_heart_beat"], data.get("worker_status", None) )
null
20,534
import argparse import asyncio import dataclasses from enum import Enum, auto import json import logging import os import time from typing import List, Union import threading from fastapi import FastAPI, Request from fastapi.responses import StreamingResponse import numpy as np import requests import uvicorn from fastchat.constants import ( CONTROLLER_HEART_BEAT_EXPIRATION, WORKER_API_TIMEOUT, ErrorCode, SERVER_ERROR_MSG, ) from fastchat.utils import build_logger async def refresh_all_workers(): models = controller.refresh_all_workers()
null
20,535
import argparse import asyncio import dataclasses from enum import Enum, auto import json import logging import os import time from typing import List, Union import threading from fastapi import FastAPI, Request from fastapi.responses import StreamingResponse import numpy as np import requests import uvicorn from fastchat.constants import ( CONTROLLER_HEART_BEAT_EXPIRATION, WORKER_API_TIMEOUT, ErrorCode, SERVER_ERROR_MSG, ) from fastchat.utils import build_logger async def list_models(): models = controller.list_models() return {"models": models}
null
20,536
import argparse import asyncio import dataclasses from enum import Enum, auto import json import logging import os import time from typing import List, Union import threading from fastapi import FastAPI, Request from fastapi.responses import StreamingResponse import numpy as np import requests import uvicorn from fastchat.constants import ( CONTROLLER_HEART_BEAT_EXPIRATION, WORKER_API_TIMEOUT, ErrorCode, SERVER_ERROR_MSG, ) from fastchat.utils import build_logger async def get_worker_address(request: Request): data = await request.json() addr = controller.get_worker_address(data["model"]) return {"address": addr}
null
20,537
import argparse import asyncio import dataclasses from enum import Enum, auto import json import logging import os import time from typing import List, Union import threading from fastapi import FastAPI, Request from fastapi.responses import StreamingResponse import numpy as np import requests import uvicorn from fastchat.constants import ( CONTROLLER_HEART_BEAT_EXPIRATION, WORKER_API_TIMEOUT, ErrorCode, SERVER_ERROR_MSG, ) from fastchat.utils import build_logger async def receive_heart_beat(request: Request): data = await request.json() exist = controller.receive_heart_beat(data["worker_name"], data["queue_length"]) return {"exist": exist}
null
20,538
import argparse import asyncio import dataclasses from enum import Enum, auto import json import logging import os import time from typing import List, Union import threading from fastapi import FastAPI, Request from fastapi.responses import StreamingResponse import numpy as np import requests import uvicorn from fastchat.constants import ( CONTROLLER_HEART_BEAT_EXPIRATION, WORKER_API_TIMEOUT, ErrorCode, SERVER_ERROR_MSG, ) from fastchat.utils import build_logger async def worker_api_generate_stream(request: Request): params = await request.json() generator = controller.worker_api_generate_stream(params) return StreamingResponse(generator)
null
20,539
import argparse import asyncio import dataclasses from enum import Enum, auto import json import logging import os import time from typing import List, Union import threading from fastapi import FastAPI, Request from fastapi.responses import StreamingResponse import numpy as np import requests import uvicorn from fastchat.constants import ( CONTROLLER_HEART_BEAT_EXPIRATION, WORKER_API_TIMEOUT, ErrorCode, SERVER_ERROR_MSG, ) from fastchat.utils import build_logger async def worker_api_get_status(request: Request): return controller.worker_api_get_status()
null
20,540
import argparse import asyncio import dataclasses from enum import Enum, auto import json import logging import os import time from typing import List, Union import threading from fastapi import FastAPI, Request from fastapi.responses import StreamingResponse import numpy as np import requests import uvicorn from fastchat.constants import ( CONTROLLER_HEART_BEAT_EXPIRATION, WORKER_API_TIMEOUT, ErrorCode, SERVER_ERROR_MSG, ) from fastchat.utils import build_logger async def worker_api_get_status(request: Request): return "success"
null
20,541
import argparse import asyncio import dataclasses from enum import Enum, auto import json import logging import os import time from typing import List, Union import threading from fastapi import FastAPI, Request from fastapi.responses import StreamingResponse import numpy as np import requests import uvicorn from fastchat.constants import ( CONTROLLER_HEART_BEAT_EXPIRATION, WORKER_API_TIMEOUT, ErrorCode, SERVER_ERROR_MSG, ) from fastchat.utils import build_logger logger = build_logger("controller", "controller.log") class Controller: def __init__(self, dispatch_method: str): # Dict[str -> WorkerInfo] self.worker_info = {} self.dispatch_method = DispatchMethod.from_str(dispatch_method) self.heart_beat_thread = threading.Thread( target=heart_beat_controller, args=(self,) ) self.heart_beat_thread.start() def register_worker( self, worker_name: str, check_heart_beat: bool, worker_status: dict ): if worker_name not in self.worker_info: logger.info(f"Register a new worker: {worker_name}") else: logger.info(f"Register an existing worker: {worker_name}") if not worker_status: worker_status = self.get_worker_status(worker_name) if not worker_status: return False self.worker_info[worker_name] = WorkerInfo( worker_status["model_names"], worker_status["speed"], worker_status["queue_length"], check_heart_beat, time.time(), ) logger.info(f"Register done: {worker_name}, {worker_status}") return True def get_worker_status(self, worker_name: str): try: r = requests.post(worker_name + "/worker_get_status", timeout=5) except requests.exceptions.RequestException as e: logger.error(f"Get status fails: {worker_name}, {e}") return None if r.status_code != 200: logger.error(f"Get status fails: {worker_name}, {r}") return None return r.json() def remove_worker(self, worker_name: str): del self.worker_info[worker_name] def refresh_all_workers(self): old_info = dict(self.worker_info) self.worker_info = {} for w_name, w_info in old_info.items(): if not self.register_worker(w_name, w_info.check_heart_beat, None): logger.info(f"Remove stale worker: {w_name}") def list_models(self): model_names = set() for w_name, w_info in self.worker_info.items(): model_names.update(w_info.model_names) return list(model_names) def get_worker_address(self, model_name: str): if self.dispatch_method == DispatchMethod.LOTTERY: worker_names = [] worker_speeds = [] for w_name, w_info in self.worker_info.items(): if model_name in w_info.model_names: worker_names.append(w_name) worker_speeds.append(w_info.speed) worker_speeds = np.array(worker_speeds, dtype=np.float32) norm = np.sum(worker_speeds) if norm < 1e-4: return "" worker_speeds = worker_speeds / norm if True: # Directly return address pt = np.random.choice(np.arange(len(worker_names)), p=worker_speeds) worker_name = worker_names[pt] return worker_name # Check status before returning while True: pt = np.random.choice(np.arange(len(worker_names)), p=worker_speeds) worker_name = worker_names[pt] if self.get_worker_status(worker_name): break else: self.remove_worker(worker_name) worker_speeds[pt] = 0 norm = np.sum(worker_speeds) if norm < 1e-4: return "" worker_speeds = worker_speeds / norm continue return worker_name elif self.dispatch_method == DispatchMethod.SHORTEST_QUEUE: worker_names = [] worker_qlen = [] for w_name, w_info in self.worker_info.items(): if model_name in w_info.model_names: worker_names.append(w_name) worker_qlen.append(w_info.queue_length / w_info.speed) if len(worker_names) == 0: return "" min_index = np.argmin(worker_qlen) w_name = worker_names[min_index] self.worker_info[w_name].queue_length += 1 logger.info( f"names: {worker_names}, queue_lens: {worker_qlen}, ret: {w_name}" ) return w_name else: raise ValueError(f"Invalid dispatch method: {self.dispatch_method}") def receive_heart_beat(self, worker_name: str, queue_length: int): if worker_name not in self.worker_info: logger.info(f"Receive unknown heart beat. {worker_name}") return False self.worker_info[worker_name].queue_length = queue_length self.worker_info[worker_name].last_heart_beat = time.time() logger.info(f"Receive heart beat. {worker_name}") return True def remove_stale_workers_by_expiration(self): expire = time.time() - CONTROLLER_HEART_BEAT_EXPIRATION to_delete = [] for worker_name, w_info in self.worker_info.items(): if w_info.check_heart_beat and w_info.last_heart_beat < expire: to_delete.append(worker_name) for worker_name in to_delete: self.remove_worker(worker_name) def handle_no_worker(self, params): logger.info(f"no worker: {params['model']}") ret = { "text": SERVER_ERROR_MSG, "error_code": ErrorCode.CONTROLLER_NO_WORKER, } return json.dumps(ret).encode() + b"\0" def handle_worker_timeout(self, worker_address): logger.info(f"worker timeout: {worker_address}") ret = { "text": SERVER_ERROR_MSG, "error_code": ErrorCode.CONTROLLER_WORKER_TIMEOUT, } return json.dumps(ret).encode() + b"\0" # Let the controller act as a worker to achieve hierarchical # management. This can be used to connect isolated sub networks. def worker_api_get_status(self): model_names = set() speed = 0 queue_length = 0 for w_name in self.worker_info: worker_status = self.get_worker_status(w_name) if worker_status is not None: model_names.update(worker_status["model_names"]) speed += worker_status["speed"] queue_length += worker_status["queue_length"] model_names = sorted(list(model_names)) return { "model_names": model_names, "speed": speed, "queue_length": queue_length, } def worker_api_generate_stream(self, params): worker_addr = self.get_worker_address(params["model"]) if not worker_addr: yield self.handle_no_worker(params) try: response = requests.post( worker_addr + "/worker_generate_stream", json=params, stream=True, timeout=WORKER_API_TIMEOUT, ) for chunk in response.iter_lines(decode_unicode=False, delimiter=b"\0"): if chunk: yield chunk + b"\0" except requests.exceptions.RequestException as e: yield self.handle_worker_timeout(worker_addr) def create_controller(): parser = argparse.ArgumentParser() parser.add_argument("--host", type=str, default="localhost") parser.add_argument("--port", type=int, default=21001) parser.add_argument( "--dispatch-method", type=str, choices=["lottery", "shortest_queue"], default="shortest_queue", ) parser.add_argument( "--ssl", action="store_true", required=False, default=False, help="Enable SSL. Requires OS Environment variables 'SSL_KEYFILE' and 'SSL_CERTFILE'.", ) args = parser.parse_args() logger.info(f"args: {args}") controller = Controller(args.dispatch_method) return args, controller
null
20,542
import sys import os import subprocess import re import argparse LOGDIR = "./logs/" controller_args = ["controller-host", "controller-port", "dispatch-method"] server_args = [ "server-host", "server-port", "allow-credentials", "api-keys", "controller-address", ] args = parser.parse_args() args = argparse.Namespace( **vars(args), **{"controller-address": f"http://{args.controller_host}:{args.controller_port}"}, ) if args.gpus: if len(args.gpus.split(",")) < args.num_gpus: raise ValueError( f"Larger --num-gpus ({args.num_gpus}) than --gpus {args.gpus}!" ) os.environ["CUDA_VISIBLE_DEVICES"] = args.gpus base_launch_sh = "nohup python3 -m fastchat.serve.{0} {1} >{2}/{3}.log 2>&1 &" base_check_sh = """while [ `grep -c "Uvicorn running on" {0}/{1}.log` -eq '0' ];do sleep 1s; echo "wait {2} running" done echo '{2} running' """ def string_args(args, args_list): args_str = "" for key, value in args._get_kwargs(): key = key.replace("_", "-") if key not in args_list: continue key = key.split("-")[-1] if re.search("port|host", key) else key if not value: pass # 1==True -> True elif isinstance(value, bool) and value == True: args_str += f" --{key} " elif ( isinstance(value, list) or isinstance(value, tuple) or isinstance(value, set) ): value = " ".join(value) args_str += f" --{key} {value} " else: args_str += f" --{key} {value} " return args_str def launch_worker(item): log_name = ( item.split("/")[-1] .split("\\")[-1] .replace("-", "_") .replace("@", "_") .replace(".", "_") ) args.model_path, args.worker_host, args.worker_port = item.split("@") print("*" * 80) worker_str_args = string_args(args, worker_args) print(worker_str_args) worker_sh = base_launch_sh.format( "model_worker", worker_str_args, LOGDIR, f"worker_{log_name}" ) worker_check_sh = base_check_sh.format(LOGDIR, f"worker_{log_name}", "model_worker") subprocess.run(worker_sh, shell=True, check=True) subprocess.run(worker_check_sh, shell=True, check=True) def launch_all(): controller_str_args = string_args(args, controller_args) controller_sh = base_launch_sh.format( "controller", controller_str_args, LOGDIR, "controller" ) controller_check_sh = base_check_sh.format(LOGDIR, "controller", "controller") subprocess.run(controller_sh, shell=True, check=True) subprocess.run(controller_check_sh, shell=True, check=True) if isinstance(args.model_path_address, str): launch_worker(args.model_path_address) else: for idx, item in enumerate(args.model_path_address): print(f"loading {idx}th model:{item}") launch_worker(item) server_str_args = string_args(args, server_args) server_sh = base_launch_sh.format( "openai_api_server", server_str_args, LOGDIR, "openai_api_server" ) server_check_sh = base_check_sh.format( LOGDIR, "openai_api_server", "openai_api_server" ) subprocess.run(server_sh, shell=True, check=True) subprocess.run(server_check_sh, shell=True, check=True)
null
20,543
import asyncio import threading import time from typing import List from fastapi import FastAPI, Request, BackgroundTasks from fastapi.responses import StreamingResponse, JSONResponse import requests from fastchat.constants import WORKER_HEART_BEAT_INTERVAL from fastchat.conversation import Conversation from fastchat.utils import pretty_print_semaphore, build_logger WORKER_HEART_BEAT_INTERVAL = int(os.getenv("FASTCHAT_WORKER_HEART_BEAT_INTERVAL", 45)) def heart_beat_worker(obj): while True: time.sleep(WORKER_HEART_BEAT_INTERVAL) obj.send_heart_beat()
null
20,544
import asyncio import threading import time from typing import List from fastapi import FastAPI, Request, BackgroundTasks from fastapi.responses import StreamingResponse, JSONResponse import requests from fastchat.constants import WORKER_HEART_BEAT_INTERVAL from fastchat.conversation import Conversation from fastchat.utils import pretty_print_semaphore, build_logger worker = None def acquire_worker_semaphore(): if worker.semaphore is None: worker.semaphore = asyncio.Semaphore(worker.limit_worker_concurrency) return worker.semaphore.acquire() def create_background_tasks(): background_tasks = BackgroundTasks() background_tasks.add_task(release_worker_semaphore) return background_tasks async def api_generate_stream(request: Request): params = await request.json() await acquire_worker_semaphore() generator = worker.generate_stream_gate(params) background_tasks = create_background_tasks() return StreamingResponse(generator, background=background_tasks)
null
20,545
import asyncio import threading import time from typing import List from fastapi import FastAPI, Request, BackgroundTasks from fastapi.responses import StreamingResponse, JSONResponse import requests from fastchat.constants import WORKER_HEART_BEAT_INTERVAL from fastchat.conversation import Conversation from fastchat.utils import pretty_print_semaphore, build_logger worker = None def release_worker_semaphore(): def acquire_worker_semaphore(): async def api_generate(request: Request): params = await request.json() await acquire_worker_semaphore() output = await asyncio.to_thread(worker.generate_gate, params) release_worker_semaphore() return JSONResponse(output)
null
20,546
import asyncio import threading import time from typing import List from fastapi import FastAPI, Request, BackgroundTasks from fastapi.responses import StreamingResponse, JSONResponse import requests from fastchat.constants import WORKER_HEART_BEAT_INTERVAL from fastchat.conversation import Conversation from fastchat.utils import pretty_print_semaphore, build_logger worker = None def release_worker_semaphore(): worker.semaphore.release() def acquire_worker_semaphore(): if worker.semaphore is None: worker.semaphore = asyncio.Semaphore(worker.limit_worker_concurrency) return worker.semaphore.acquire() async def api_get_embeddings(request: Request): params = await request.json() await acquire_worker_semaphore() embedding = worker.get_embeddings(params) release_worker_semaphore() return JSONResponse(content=embedding)
null
20,547
import asyncio import threading import time from typing import List from fastapi import FastAPI, Request, BackgroundTasks from fastapi.responses import StreamingResponse, JSONResponse import requests from fastchat.constants import WORKER_HEART_BEAT_INTERVAL from fastchat.conversation import Conversation from fastchat.utils import pretty_print_semaphore, build_logger worker = None async def api_get_status(request: Request): return worker.get_status()
null
20,548
import asyncio import threading import time from typing import List from fastapi import FastAPI, Request, BackgroundTasks from fastapi.responses import StreamingResponse, JSONResponse import requests from fastchat.constants import WORKER_HEART_BEAT_INTERVAL from fastchat.conversation import Conversation from fastchat.utils import pretty_print_semaphore, build_logger worker = None async def api_count_token(request: Request): params = await request.json() return worker.count_token(params)
null
20,549
import asyncio import threading import time from typing import List from fastapi import FastAPI, Request, BackgroundTasks from fastapi.responses import StreamingResponse, JSONResponse import requests from fastchat.constants import WORKER_HEART_BEAT_INTERVAL from fastchat.conversation import Conversation from fastchat.utils import pretty_print_semaphore, build_logger worker = None def get_conv_template(name: str) -> Conversation: """Get a conversation template.""" return conv_templates[name].copy() async def api_get_conv(request: Request): return worker.get_conv_template()
null
20,550
import asyncio import threading import time from typing import List from fastapi import FastAPI, Request, BackgroundTasks from fastapi.responses import StreamingResponse, JSONResponse import requests from fastchat.constants import WORKER_HEART_BEAT_INTERVAL from fastchat.conversation import Conversation from fastchat.utils import pretty_print_semaphore, build_logger worker = None async def api_model_details(request: Request): return {"context_length": worker.context_len}
null
20,551
import argparse import numpy as np from fastchat.serve.monitor.monitor import load_leaderboard_table_csv def model_hyperlink(model_name, link): return f'<a target="_blank" href="{link}"> {model_name} </a>'
null
20,552
import argparse import ast import json import pickle import os import threading import time import pandas as pd import gradio as gr import numpy as np from fastchat.serve.monitor.basic_stats import report_basic_stats, get_log_files from fastchat.serve.monitor.clean_battle_data import clean_battle_data from fastchat.serve.monitor.elo_analysis import report_elo_analysis_results from fastchat.utils import build_logger, get_window_url_params_js def update_elo_components( max_num_files, elo_results_file, ban_ip_file, exclude_model_names ): log_files = get_log_files(max_num_files) # Leaderboard if elo_results_file is None: # Do live update ban_ip_list = json.load(open(ban_ip_file)) if ban_ip_file else None battles = clean_battle_data( log_files, exclude_model_names, ban_ip_list=ban_ip_list ) elo_results = report_elo_analysis_results(battles) leader_component_values[0] = make_leaderboard_md_live(elo_results) leader_component_values[1] = elo_results["win_fraction_heatmap"] leader_component_values[2] = elo_results["battle_count_heatmap"] leader_component_values[3] = elo_results["bootstrap_elo_rating"] leader_component_values[4] = elo_results["average_win_rate_bar"] # Basic stats basic_stats = report_basic_stats(log_files) md0 = f"Last updated: {basic_stats['last_updated_datetime']}" md1 = "### Action Histogram\n" md1 += basic_stats["action_hist_md"] + "\n" md2 = "### Anony. Vote Histogram\n" md2 += basic_stats["anony_vote_hist_md"] + "\n" md3 = "### Model Call Histogram\n" md3 += basic_stats["model_hist_md"] + "\n" md4 = "### Model Call (Last 24 Hours)\n" md4 += basic_stats["num_chats_last_24_hours"] + "\n" basic_component_values[0] = md0 basic_component_values[1] = basic_stats["chat_dates_bar"] basic_component_values[2] = md1 basic_component_values[3] = md2 basic_component_values[4] = md3 basic_component_values[5] = md4 def update_worker( max_num_files, interval, elo_results_file, ban_ip_file, exclude_model_names ): while True: tic = time.time() update_elo_components( max_num_files, elo_results_file, ban_ip_file, exclude_model_names ) durtaion = time.time() - tic print(f"update duration: {durtaion:.2f} s") time.sleep(max(interval - durtaion, 0))
null
20,553
import argparse import ast import json import pickle import os import threading import time import pandas as pd import gradio as gr import numpy as np from fastchat.serve.monitor.basic_stats import report_basic_stats, get_log_files from fastchat.serve.monitor.clean_battle_data import clean_battle_data from fastchat.serve.monitor.elo_analysis import report_elo_analysis_results from fastchat.utils import build_logger, get_window_url_params_js def load_demo(url_params, request: gr.Request): logger.info(f"load_demo. ip: {request.client.host}. params: {url_params}") return basic_component_values + leader_component_values def build_basic_stats_tab(): empty = "Loading ..." basic_component_values[:] = [empty, None, empty, empty, empty, empty] md0 = gr.Markdown(empty) gr.Markdown("#### Figure 1: Number of model calls and votes") plot_1 = gr.Plot(show_label=False) with gr.Row(): with gr.Column(): md1 = gr.Markdown(empty) with gr.Column(): md2 = gr.Markdown(empty) with gr.Row(): with gr.Column(): md3 = gr.Markdown(empty) with gr.Column(): md4 = gr.Markdown(empty) return [md0, plot_1, md1, md2, md3, md4] def build_leaderboard_tab(elo_results_file, leaderboard_table_file, show_plot=False): if elo_results_file is None: # Do live update default_md = "Loading ..." p1 = p2 = p3 = p4 = None else: with open(elo_results_file, "rb") as fin: elo_results = pickle.load(fin) p1 = elo_results["win_fraction_heatmap"] p2 = elo_results["battle_count_heatmap"] p3 = elo_results["bootstrap_elo_rating"] p4 = elo_results["average_win_rate_bar"] arena_df = elo_results["leaderboard_table_df"] default_md = make_default_md(arena_df, elo_results) md_1 = gr.Markdown(default_md, elem_id="leaderboard_markdown") if leaderboard_table_file: data = load_leaderboard_table_csv(leaderboard_table_file) model_table_df = pd.DataFrame(data) with gr.Tabs() as tabs: # arena table arena_table_vals = get_arena_table(arena_df, model_table_df) with gr.Tab("Arena Elo", id=0): md = make_arena_leaderboard_md(arena_df) gr.Markdown(md, elem_id="leaderboard_markdown") gr.Dataframe( headers=[ "Rank", "🤖 Model", "⭐ Arena Elo", "📊 95% CI", "🗳️ Votes", "Organization", "License", ], datatype=[ "str", "markdown", "number", "str", "number", "str", "str", ], value=arena_table_vals, elem_id="arena_leaderboard_dataframe", height=700, column_widths=[50, 200, 100, 100, 100, 150, 150], wrap=True, ) with gr.Tab("Full Leaderboard", id=1): md = make_full_leaderboard_md(elo_results) gr.Markdown(md, elem_id="leaderboard_markdown") full_table_vals = get_full_table(arena_df, model_table_df) gr.Dataframe( headers=[ "🤖 Model", "⭐ Arena Elo", "📈 MT-bench", "📚 MMLU", "Organization", "License", ], datatype=["markdown", "number", "number", "number", "str", "str"], value=full_table_vals, elem_id="full_leaderboard_dataframe", column_widths=[200, 100, 100, 100, 150, 150], height=700, wrap=True, ) if not show_plot: gr.Markdown( """ ## Visit our [HF space](https://huggingface.co/spaces/lmsys/chatbot-arena-leaderboard) for more analysis! If you want to see more models, please help us [add them](https://github.com/lm-sys/FastChat/blob/main/docs/arena.md#how-to-add-a-new-model). """, elem_id="leaderboard_markdown", ) else: pass leader_component_values[:] = [default_md, p1, p2, p3, p4] if show_plot: gr.Markdown( f"""## More Statistics for Chatbot Arena\n Below are figures for more statistics. The code for generating them is also included in this [notebook]({notebook_url}). You can find more discussions in this blog [post](https://lmsys.org/blog/2023-12-07-leaderboard/). """, elem_id="leaderboard_markdown", ) with gr.Row(): with gr.Column(): gr.Markdown( "#### Figure 1: Fraction of Model A Wins for All Non-tied A vs. B Battles" ) plot_1 = gr.Plot(p1, show_label=False) with gr.Column(): gr.Markdown( "#### Figure 2: Battle Count for Each Combination of Models (without Ties)" ) plot_2 = gr.Plot(p2, show_label=False) with gr.Row(): with gr.Column(): gr.Markdown( "#### Figure 3: Bootstrap of Elo Estimates (1000 Rounds of Random Sampling)" ) plot_3 = gr.Plot(p3, show_label=False) with gr.Column(): gr.Markdown( "#### Figure 4: Average Win Rate Against All Other Models (Assuming Uniform Sampling and No Ties)" ) plot_4 = gr.Plot(p4, show_label=False) from fastchat.serve.gradio_web_server import acknowledgment_md gr.Markdown(acknowledgment_md) if show_plot: return [md_1, plot_1, plot_2, plot_3, plot_4] return [md_1] get_window_url_params_js = """ function() { const params = new URLSearchParams(window.location.search); url_params = Object.fromEntries(params); console.log("url_params", url_params); return url_params; } """ block_css = """ #notice_markdown { font-size: 110% } #notice_markdown th { display: none; } #notice_markdown td { padding-top: 6px; padding-bottom: 6px; } #model_description_markdown { font-size: 110% } #leaderboard_markdown { font-size: 110% } #leaderboard_markdown td { padding-top: 6px; padding-bottom: 6px; } #leaderboard_dataframe td { line-height: 0.1em; } #about_markdown { font-size: 110% } #ack_markdown { font-size: 110% } #input_box textarea { } footer { display:none !important } .image-container { display: flex; align-items: center; padding: 1px; } .image-container img { margin: 0 30px; height: 30px; max-height: 100%; width: auto; max-width: 20%; } .image-about img { margin: 0 30px; margin-top: 30px; height: 60px; max-height: 100%; width: auto; max-width: 20%; float: left; } """ def build_demo(elo_results_file, leaderboard_table_file): from fastchat.serve.gradio_web_server import block_css text_size = gr.themes.sizes.text_lg with gr.Blocks( title="Monitor", theme=gr.themes.Base(text_size=text_size), css=block_css, ) as demo: with gr.Tabs() as tabs: with gr.Tab("Leaderboard", id=0): leader_components = build_leaderboard_tab( elo_results_file, leaderboard_table_file, show_plot=True, ) with gr.Tab("Basic Stats", id=1): basic_components = build_basic_stats_tab() url_params = gr.JSON(visible=False) demo.load( load_demo, [url_params], basic_components + leader_components, _js=get_window_url_params_js, ) return demo
null
20,554
import argparse import json import pickle import string import time import numpy as np from sentence_transformers import SentenceTransformer from sentence_transformers.util import cos_sim from sklearn.cluster import KMeans, AgglomerativeClustering import torch from tqdm import tqdm from fastchat.utils import detect_language def remove_punctuation(input_string): # Make a translator object to remove all punctuation translator = str.maketrans("", "", string.punctuation) # Use the translator object to remove the punctuation no_punct = input_string.translate(translator) return no_punct def detect_language(text: str) -> str: """Detect the langauge of a string.""" import polyglot # pip3 install polyglot pyicu pycld2 from polyglot.detect import Detector from polyglot.detect.base import logger as polyglot_logger import pycld2 polyglot_logger.setLevel("ERROR") try: lang_code = Detector(text).language.name except (pycld2.error, polyglot.detect.base.UnknownLanguage): lang_code = "unknown" return lang_code def read_texts(input_file, min_length, max_length, english_only): visited = set() texts = [] lines = json.load(open(input_file, "r")) for l in tqdm(lines): if "text" in l: line_texts = [l["text"]] elif "conversation_a" in l: line_texts = [ x["content"] for x in l["conversation_a"] if x["role"] == "user" ] elif "conversation" in l: line_texts = [ x["content"] for x in l["conversation"] if x["role"] == "user" ] for text in line_texts: text = text.strip() # Filter language if english_only: lang = detect_language(text) if lang != "English": continue # Filter short or long prompts if min_length: if len(text) < min_length: continue if max_length: if len(text) > max_length: continue # De-duplication words = sorted([x.lower() for x in remove_punctuation(text).split(" ")]) words = "".join(words) if words in visited: continue visited.add(words) texts.append(text) return np.array(texts)
null
20,555
import argparse import json import pickle import string import time import numpy as np from sentence_transformers import SentenceTransformer from sentence_transformers.util import cos_sim from sklearn.cluster import KMeans, AgglomerativeClustering import torch from tqdm import tqdm from fastchat.utils import detect_language def get_embeddings(texts, model_name, batch_size): model = SentenceTransformer(model_name) embeddings = model.encode( texts, batch_size=batch_size, show_progress_bar=True, device="cuda", convert_to_tensor=True, ) embeddings = torch.nn.functional.normalize(embeddings, p=2, dim=1) return embeddings.cpu()
null
20,556
import argparse import json import pickle import string import time import numpy as np from sentence_transformers import SentenceTransformer from sentence_transformers.util import cos_sim from sklearn.cluster import KMeans, AgglomerativeClustering import torch from tqdm import tqdm from fastchat.utils import detect_language def run_k_means(embeddings, num_clusters): np.random.seed(42) clustering_model = KMeans(n_clusters=num_clusters, n_init="auto") clustering_model.fit(embeddings.numpy()) centers = torch.from_numpy(clustering_model.cluster_centers_) labels = torch.from_numpy(clustering_model.labels_) # Sort labels classes, counts = np.unique(labels, return_counts=True) indices = np.argsort(counts)[::-1] classes = [classes[i] for i in indices] new_labels = torch.empty_like(labels) new_centers = torch.empty_like(centers) for i, c in enumerate(classes): new_labels[labels == c] = i new_centers[i] = centers[c] return new_centers, new_labels
null
20,557
import argparse import json import pickle import string import time import numpy as np from sentence_transformers import SentenceTransformer from sentence_transformers.util import cos_sim from sklearn.cluster import KMeans, AgglomerativeClustering import torch from tqdm import tqdm from fastchat.utils import detect_language def run_agg_cluster(embeddings, num_clusters): np.random.seed(42) clustering_model = AgglomerativeClustering(n_clusters=num_clusters) clustering_model.fit(embeddings) labels = torch.from_numpy(clustering_model.labels_) # Sort labels classes, counts = np.unique(labels, return_counts=True) indices = np.argsort(counts)[::-1] classes = [classes[i] for i in indices] new_labels = torch.empty_like(labels) for i, c in enumerate(classes): new_labels[labels == c] = i # Compute centers centers = [] for i in range(len(classes)): centers.append(embeddings[new_labels == i].mean(axis=0, keepdim=True)) centers = torch.cat(centers) return centers, new_labels
null
20,558
import argparse import json import pickle import string import time import numpy as np from sentence_transformers import SentenceTransformer from sentence_transformers.util import cos_sim from sklearn.cluster import KMeans, AgglomerativeClustering import torch from tqdm import tqdm from fastchat.utils import detect_language def run_hdbscan_cluster(embeddings): import hdbscan np.random.seed(42) clusterer = hdbscan.HDBSCAN(min_cluster_size=10) labels = torch.from_numpy(clusterer.fit_predict(embeddings)) # Sort labels classes, counts = np.unique(labels, return_counts=True) indices = np.argsort(counts)[::-1] classes = [classes[i] for i in indices] new_labels = torch.empty_like(labels) for i, c in enumerate(classes): new_labels[labels == c] = i # Compute centers centers = [] for i in range(len(classes)): centers.append(embeddings[new_labels == i].mean(axis=0, keepdim=True)) centers = torch.cat(centers) return centers, new_labels
null
20,559
import argparse import json import pickle import string import time import numpy as np from sentence_transformers import SentenceTransformer from sentence_transformers.util import cos_sim from sklearn.cluster import KMeans, AgglomerativeClustering import torch from tqdm import tqdm from fastchat.utils import detect_language def get_topk_indices(centers, labels, embeddings, topk): indices = [] arange = torch.arange(len(labels)) counts = torch.unique(labels, return_counts=True)[1] topk = min(topk, counts.min().item()) for i in range(len(centers)): tmp_indices = labels == i tmp_arange = arange[tmp_indices] tmp_embeddings = embeddings[tmp_indices] scores = cos_sim(centers[i].unsqueeze(0), tmp_embeddings)[0] sorted_indices = torch.flip(torch.argsort(scores), dims=[0]) indices.append(tmp_arange[sorted_indices[:topk]].unsqueeze(0)) return torch.cat(indices)
null
20,560
import argparse import json import pickle import string import time import numpy as np from sentence_transformers import SentenceTransformer from sentence_transformers.util import cos_sim from sklearn.cluster import KMeans, AgglomerativeClustering import torch from tqdm import tqdm from fastchat.utils import detect_language def print_topk(texts, labels, topk_indices, show_cut_off): ret = "" for k in range(len(topk_indices)): num_samples = torch.sum(labels == k).item() ret += "=" * 20 + f" cluster {k}, #samples: {num_samples} " + "=" * 20 + "\n" for idx in topk_indices[k]: ret += "PROMPT: " + texts[idx][:show_cut_off] + "\n" ret += "=" * 40 + "\n\n" return ret
null
20,561
import argparse import json import pickle import string import time import numpy as np from sentence_transformers import SentenceTransformer from sentence_transformers.util import cos_sim from sklearn.cluster import KMeans, AgglomerativeClustering import torch from tqdm import tqdm from fastchat.utils import detect_language def get_cluster_info(texts, labels, topk_indices): np.random.seed(42) cluster_info = [] for k in range(len(topk_indices)): num_samples = torch.sum(labels == k).item() topk_prompts = [] for idx in topk_indices[k]: topk_prompts.append(texts[idx]) random_prompts = [] for idx in range(len(topk_indices)): random_prompts.append(np.random.choice(texts)) cluster_info.append((num_samples, topk_prompts, random_prompts)) return cluster_info
null
20,562
import argparse from collections import defaultdict from enum import Enum, auto import json import os import random from tqdm import tqdm blocked_words = [] class TypeCode(Enum): def detect_type(conv): for key in ["conversation_a", "conversation_b"]: messages = [row["content"] for row in conv[key]] for msg in messages: if not isinstance(msg, str): return TypeCode.BAD_FORMAT user_prompts = [ row["content"].lower().strip() for row in conv[key] if row["role"] == "user" ] if len(messages) <= 2 and all(len(x) < 16 for x in user_prompts): return TypeCode.TOO_SHORT if all(x in frequent_prompts for x in user_prompts): return TypeCode.TOO_FREQUENT for msg in messages: msg = msg.lower() if "<anonymized>" in msg: return TypeCode.ANONYMIZED if "<redacted>" in msg: return TypeCode.REDACTED for w in blocked_words: if w in msg: return TypeCode.BLOCKED_WORD for key in ["model_a", "model_b"]: if conv[key] in ["vicuna-33b", "mpt-30b-chat"]: return TypeCode.BLOCKED_MODEL return TypeCode.CORRECT
null
20,563
import argparse import datetime import json import os from pytz import timezone import time import kaleido import numpy as np import pandas as pd import plotly.express as px import plotly.graph_objects as go from tqdm import tqdm import plotly.io as pio import transformers def to_remove(x): for d in ["08-09", "08-08", "08-07", "08-06", "08-05", "08-04"]: if d in x: return True return False
null
20,564
import argparse from concurrent.futures import ProcessPoolExecutor from collections import defaultdict from enum import Enum, auto import json import os import random from tqdm import tqdm import opencc blocked_words = [] cc_converter = opencc.OpenCC("t2s") class TypeCode(Enum): CORRECT = auto() ANONYMIZED = auto() REDACTED = auto() BAD_FORMAT = auto() BLOCKED_WORD = auto() BLOCKED_MODEL = auto() TOO_SHORT = auto() TOO_FREQUENT = auto() def detect_type(conv): for key in ["conversation_a", "conversation_b", "conversation"]: if key not in conv: continue messages = [row["content"] for row in conv[key]] for msg in messages: if not isinstance(msg, str): return TypeCode.BAD_FORMAT if len(messages) == 0: return TypeCode.BAD_FORMAT user_prompts = [ row["content"].lower().strip() for row in conv[key] if row["role"] == "user" ] for msg in messages: msg = cc_converter.convert(msg.lower()) if "<anonymized>" in msg: return TypeCode.ANONYMIZED if "<redacted>" in msg: return TypeCode.REDACTED for w in blocked_words: if w in msg: return TypeCode.BLOCKED_WORD return TypeCode.CORRECT
null
20,565
import argparse from collections import defaultdict import datetime import json import math import pickle from pytz import timezone import numpy as np import pandas as pd import plotly.express as px from tqdm import tqdm from fastchat.model.model_registry import get_model_info from fastchat.serve.monitor.basic_stats import get_log_files from fastchat.serve.monitor.clean_battle_data import clean_battle_data def pretty_print_elo_rating(rating): model_order = list(rating.keys()) model_order.sort(key=lambda k: -rating[k]) for i, model in enumerate(model_order): print(f"{i+1:2d}, {model:25s}, {rating[model]:.0f}")
null
20,566
import argparse import datetime import json import os from pytz import timezone import time from tqdm import tqdm from fastchat.serve.monitor.basic_stats import NUM_SERVERS from fastchat.serve.monitor.clean_battle_data import ( to_openai_format, replace_model_name, ) from fastchat.utils import detect_language NUM_SERVERS = 14 def get_log_files(max_num_files=None): dates = [] for month in range(4, 12): for day in range(1, 33): dates.append(f"2023-{month:02d}-{day:02d}") filenames = [] for d in dates: for i in range(NUM_SERVERS): name = os.path.expanduser(f"~/fastchat_logs/server{i}/{d}-conv.json") if os.path.exists(name): filenames.append(name) max_num_files = max_num_files or len(filenames) # filenames = list(reversed(filenames)) filenames = filenames[-max_num_files:] return filenames
null
20,567
import argparse import datetime import json import os from pytz import timezone import time from tqdm import tqdm from fastchat.serve.monitor.basic_stats import NUM_SERVERS from fastchat.serve.monitor.clean_battle_data import ( to_openai_format, replace_model_name, ) from fastchat.utils import detect_language NETWORK_ERROR_MSG = ( "NETWORK ERROR DUE TO HIGH TRAFFIC. PLEASE REGENERATE OR REFRESH THIS PAGE.".lower() ) def to_openai_format(messages): roles = ["user", "assistant"] ret = [] for i, x in enumerate(messages): ret.append({"role": roles[i % 2], "content": x[1]}) return ret def replace_model_name(old_name, tstamp): replace_dict = { "bard": "palm-2", "claude-v1": "claude-1", "claude-instant-v1": "claude-instant-1", "oasst-sft-1-pythia-12b": "oasst-pythia-12b", "claude-2": "claude-2.0", } if old_name in ["gpt-4", "gpt-3.5-turbo"]: if tstamp > 1687849200: return old_name + "-0613" else: return old_name + "-0314" if old_name in replace_dict: return replace_dict[old_name] return old_name def detect_language(text: str) -> str: """Detect the langauge of a string.""" import polyglot # pip3 install polyglot pyicu pycld2 from polyglot.detect import Detector from polyglot.detect.base import logger as polyglot_logger import pycld2 polyglot_logger.setLevel("ERROR") try: lang_code = Detector(text).language.name except (pycld2.error, polyglot.detect.base.UnknownLanguage): lang_code = "unknown" return lang_code def clean_chat_data(log_files, action_type): raw_data = [] for filename in tqdm(log_files, desc="read files"): for retry in range(5): try: lines = open(filename).readlines() break except FileNotFoundError: time.sleep(2) for l in lines: row = json.loads(l) if row["type"] == action_type: raw_data.append(row) all_models = set() all_ips = dict() chats = [] ct_invalid_conv_id = 0 ct_invalid = 0 ct_network_error = 0 for row in raw_data: try: if action_type in ["chat", "upvote", "downvote"]: state = row["state"] model = row["model"] elif action_type == "leftvote": state = row["states"][0] model = row["states"][0]["model_name"] elif action_type == "rightvote": state = row["states"][1] model = row["states"][1]["model_name"] conversation_id = state["conv_id"] except KeyError: ct_invalid_conv_id += 1 continue if conversation_id is None: ct_invalid_conv_id += 1 continue conversation = to_openai_format(state["messages"][state["offset"] :]) if not isinstance(model, str): ct_invalid += 1 continue model = replace_model_name(model) try: lang_code = detect_language(state["messages"][state["offset"]][1]) except IndexError: ct_invalid += 1 continue if not all(isinstance(x["content"], str) for x in conversation): ct_invalid += 1 continue messages = "".join([x["content"] for x in conversation]).lower() if NETWORK_ERROR_MSG in messages: ct_network_error += 1 continue ip = row["ip"] if ip not in all_ips: all_ips[ip] = len(all_ips) user_id = all_ips[ip] chats.append( dict( conversation_id=conversation_id, model=model, conversation=conversation, turn=len(conversation) // 2, language=lang_code, user_id=user_id, tstamp=row["tstamp"], ) ) all_models.update([model]) chats.sort(key=lambda x: x["tstamp"]) last_updated_tstamp = chats[-1]["tstamp"] last_updated_datetime = datetime.datetime.fromtimestamp( last_updated_tstamp, tz=timezone("US/Pacific") ).strftime("%Y-%m-%d %H:%M:%S %Z") # Deduplication dedup_chats = [] visited_conv_ids = set() for i in reversed(range(len(chats))): if chats[i]["conversation_id"] in visited_conv_ids: continue visited_conv_ids.add(chats[i]["conversation_id"]) dedup_chats.append(chats[i]) print( f"#raw: {len(raw_data)}, #chat: {len(chats)}, #dedup_chat: {len(dedup_chats)}" ) print( f"#invalid_conv_id: {ct_invalid_conv_id}, #network_error: {ct_network_error}, #invalid: {ct_invalid}" ) print(f"#models: {len(all_models)}, {all_models}") print(f"last-updated: {last_updated_datetime}") return list(reversed(dedup_chats))
null
20,568
import argparse import pickle from fastchat.llm_judge.common import ( chat_completion_openai, chat_completion_openai_azure, chat_completion_anthropic, ) from fastchat.conversation import get_conv_template def truncate_string(s, l): half = int(l // 2) return s[:half] + s[-half:] if len(s) > l else s
null
20,569
import argparse from concurrent.futures import ThreadPoolExecutor import json import os import time import openai import requests from tqdm import tqdm def tag_moderation(text): result = API_ERROR_OUTPUT for _ in range(API_MAX_RETRY): try: result = openai.Moderation.create(input=text)["results"][0] break except openai.error.OpenAIError as e: print(type(e), e) time.sleep(API_RETRY_SLEEP) return result def tag_openai_moderation(x): conv = x["conversation_a"] user_prompts = "\n".join([x["content"] for x in conv if x["role"] == "user"]) result = tag_moderation(user_prompts) x["openai_moderation"] = result
null
20,570
import argparse import code import datetime import json import os from pytz import timezone import time import pandas as pd from tqdm import tqdm def get_log_files(max_num_files=None): dates = [] for month in [4, 5]: for day in range(1, 32): dates.append(f"2023-{month:02d}-{day:02d}") num_servers = 14 filenames = [] for d in dates: for i in range(num_servers): name = os.path.expanduser(f"~/fastchat_logs/server{i}/{d}-conv.json") if os.path.exists(name): filenames.append(name) max_num_files = max_num_files or len(filenames) filenames = filenames[-max_num_files:] return filenames
null
20,571
import argparse import code import datetime import json import os from pytz import timezone import time import pandas as pd from tqdm import tqdm def pretty_print_conversation(messages): def inspect_convs(log_files): data = [] for filename in tqdm(log_files, desc="read files"): for retry in range(5): try: lines = open(filename).readlines() break except FileNotFoundError: time.sleep(2) for l in lines: row = json.loads(l) if "states" not in row: continue if row["type"] not in ["leftvote", "rightvote", "bothbad_vote"]: continue model_names = row["states"][0]["model_name"], row["states"][1]["model_name"] if row["type"] == "leftvote": winner, loser = model_names[0], model_names[1] winner_conv, loser_conv = row["states"][0], row["states"][1] elif row["type"] == "rightvote": loser, winner = model_names[0], model_names[1] loser_conv, winner_conv = row["states"][0], row["states"][1] if loser == "bard" and winner == "vicuna-13b": print("=" * 20) print(f"Winner: {winner}") pretty_print_conversation(winner_conv["messages"]) print(f"Loser: {loser}") pretty_print_conversation(loser_conv["messages"]) print("=" * 20) input() # if row["type"] == "bothbad_vote" and "gpt-4" in model_names: # print("=" * 20) # print(f"Model A: {model_names[0]}") # pretty_print_conversation(row["states"][0]["messages"]) # print(f"Model B: {model_names[1]}") # pretty_print_conversation(row["states"][1]["messages"]) # print("=" * 20) # input()
null
20,572
import argparse from collections import defaultdict import datetime import json import os import random import time import uuid import gradio as gr import requests from fastchat.conversation import SeparatorStyle from fastchat.constants import ( LOGDIR, WORKER_API_TIMEOUT, ErrorCode, MODERATION_MSG, CONVERSATION_LIMIT_MSG, RATE_LIMIT_MSG, SERVER_ERROR_MSG, INPUT_CHAR_LEN_LIMIT, CONVERSATION_TURN_LIMIT, SESSION_EXPIRATION_TIME, ) from fastchat.model.model_adapter import ( get_conversation_template, ANTHROPIC_MODEL_LIST, ) from fastchat.model.model_registry import get_model_info, model_info from fastchat.serve.api_provider import ( anthropic_api_stream_iter, openai_api_stream_iter, palm_api_stream_iter, init_palm_chat, ) from fastchat.utils import ( build_logger, moderation_filter, get_window_url_params_js, get_window_url_params_with_tos_js, parse_gradio_auth_creds, ) controller_url = None enable_moderation = False def set_global_vars(controller_url_, enable_moderation_): global controller_url, enable_moderation controller_url = controller_url_ enable_moderation = enable_moderation_
null
20,573
import argparse from collections import defaultdict import datetime import json import os import random import time import uuid import gradio as gr import requests from fastchat.conversation import SeparatorStyle from fastchat.constants import ( LOGDIR, WORKER_API_TIMEOUT, ErrorCode, MODERATION_MSG, CONVERSATION_LIMIT_MSG, RATE_LIMIT_MSG, SERVER_ERROR_MSG, INPUT_CHAR_LEN_LIMIT, CONVERSATION_TURN_LIMIT, SESSION_EXPIRATION_TIME, ) from fastchat.model.model_adapter import ( get_conversation_template, ANTHROPIC_MODEL_LIST, ) from fastchat.model.model_registry import get_model_info, model_info from fastchat.serve.api_provider import ( anthropic_api_stream_iter, openai_api_stream_iter, palm_api_stream_iter, init_palm_chat, ) from fastchat.utils import ( build_logger, moderation_filter, get_window_url_params_js, get_window_url_params_with_tos_js, parse_gradio_auth_creds, ) def load_demo(url_params, request: gr.Request): block_css = """ #notice_markdown { font-size: 110% } #notice_markdown th { display: none; } #notice_markdown td { padding-top: 6px; padding-bottom: 6px; } #model_description_markdown { font-size: 110% } #leaderboard_markdown { font-size: 110% } #leaderboard_markdown td { padding-top: 6px; padding-bottom: 6px; } #leaderboard_dataframe td { line-height: 0.1em; } #about_markdown { font-size: 110% } #ack_markdown { font-size: 110% } #input_box textarea { } footer { display:none !important } .image-container { display: flex; align-items: center; padding: 1px; } .image-container img { margin: 0 30px; height: 30px; max-height: 100%; width: auto; max-width: 20%; } .image-about img { margin: 0 30px; margin-top: 30px; height: 60px; max-height: 100%; width: auto; max-width: 20%; float: left; } """ def build_single_model_ui(models, add_promotion_links=False): get_window_url_params_js = """ function() { const params = new URLSearchParams(window.location.search); url_params = Object.fromEntries(params); console.log("url_params", url_params); return url_params; } """ get_window_url_params_with_tos_js = """ function() { const params = new URLSearchParams(window.location.search); url_params = Object.fromEntries(params); console.log("url_params", url_params); msg = "Users of this website are required to agree to the following terms:\\n\\nThe service is a research preview. It only provides limited safety measures and may generate offensive content. It must not be used for any illegal, harmful, violent, racist, or sexual purposes.\\nThe service collects user dialogue data and reserves the right to distribute it under a Creative Commons Attribution (CC-BY) or a similar license." alert(msg); return url_params; } """ def build_demo(models): with gr.Blocks( title="Chat with Open Large Language Models", theme=gr.themes.Default(), css=block_css, ) as demo: url_params = gr.JSON(visible=False) state, model_selector = build_single_model_ui(models) if args.model_list_mode not in ["once", "reload"]: raise ValueError(f"Unknown model list mode: {args.model_list_mode}") if args.show_terms_of_use: load_js = get_window_url_params_with_tos_js else: load_js = get_window_url_params_js demo.load( load_demo, [url_params], [ state, model_selector, ], _js=load_js, ) return demo
null
20,574
import json import time import gradio as gr import numpy as np from fastchat.constants import ( MODERATION_MSG, CONVERSATION_LIMIT_MSG, INPUT_CHAR_LEN_LIMIT, CONVERSATION_TURN_LIMIT, ) from fastchat.model.model_adapter import get_conversation_template from fastchat.serve.gradio_web_server import ( State, bot_response, get_conv_log_filename, no_change_btn, enable_btn, disable_btn, invisible_btn, acknowledgment_md, get_model_description_md, ip_expiration_dict, get_ip, ) from fastchat.utils import ( build_logger, moderation_filter, ) enable_moderation = False def set_global_vars_named(enable_moderation_): global enable_moderation enable_moderation = enable_moderation_
null
20,575
import asyncio import argparse import json import os from typing import Generator, Optional, Union, Dict, List, Any import aiohttp import fastapi from fastapi import Depends, HTTPException from fastapi.exceptions import RequestValidationError from fastapi.middleware.cors import CORSMiddleware from fastapi.responses import StreamingResponse, JSONResponse from fastapi.security.http import HTTPAuthorizationCredentials, HTTPBearer import httpx from pydantic import BaseSettings import shortuuid import tiktoken import uvicorn from fastchat.constants import ( WORKER_API_TIMEOUT, WORKER_API_EMBEDDING_BATCH_SIZE, ErrorCode, ) from fastchat.conversation import Conversation, SeparatorStyle from fastchat.protocol.openai_api_protocol import ( ChatCompletionRequest, ChatCompletionResponse, ChatCompletionResponseStreamChoice, ChatCompletionStreamResponse, ChatMessage, ChatCompletionResponseChoice, CompletionRequest, CompletionResponse, CompletionResponseChoice, DeltaMessage, CompletionResponseStreamChoice, CompletionStreamResponse, EmbeddingsRequest, EmbeddingsResponse, ErrorResponse, LogProbs, ModelCard, ModelList, ModelPermission, UsageInfo, ) from fastchat.protocol.api_protocol import ( APIChatCompletionRequest, APITokenCheckRequest, APITokenCheckResponse, APITokenCheckResponseItem, ) from fastchat.utils import build_logger app_settings = AppSettings() get_bearer_token = HTTPBearer(auto_error=False) async def check_api_key( auth: Optional[HTTPAuthorizationCredentials] = Depends(get_bearer_token), ) -> str: if app_settings.api_keys: if auth is None or (token := auth.credentials) not in app_settings.api_keys: raise HTTPException( status_code=401, detail={ "error": { "message": "", "type": "invalid_request_error", "param": None, "code": "invalid_api_key", } }, ) return token else: # api_keys not set; allow all return None
null
20,576
import asyncio import argparse import json import os from typing import Generator, Optional, Union, Dict, List, Any import aiohttp import fastapi from fastapi import Depends, HTTPException from fastapi.exceptions import RequestValidationError from fastapi.middleware.cors import CORSMiddleware from fastapi.responses import StreamingResponse, JSONResponse from fastapi.security.http import HTTPAuthorizationCredentials, HTTPBearer import httpx from pydantic import BaseSettings import shortuuid import tiktoken import uvicorn from fastchat.constants import ( WORKER_API_TIMEOUT, WORKER_API_EMBEDDING_BATCH_SIZE, ErrorCode, ) from fastchat.conversation import Conversation, SeparatorStyle from fastchat.protocol.openai_api_protocol import ( ChatCompletionRequest, ChatCompletionResponse, ChatCompletionResponseStreamChoice, ChatCompletionStreamResponse, ChatMessage, ChatCompletionResponseChoice, CompletionRequest, CompletionResponse, CompletionResponseChoice, DeltaMessage, CompletionResponseStreamChoice, CompletionStreamResponse, EmbeddingsRequest, EmbeddingsResponse, ErrorResponse, LogProbs, ModelCard, ModelList, ModelPermission, UsageInfo, ) from fastchat.protocol.api_protocol import ( APIChatCompletionRequest, APITokenCheckRequest, APITokenCheckResponse, APITokenCheckResponseItem, ) from fastchat.utils import build_logger def create_error_response(code: int, message: str) -> JSONResponse: class ErrorCode(IntEnum): async def validation_exception_handler(request, exc): return create_error_response(ErrorCode.VALIDATION_TYPE_ERROR, str(exc))
null
20,577
import asyncio import argparse import json import os from typing import Generator, Optional, Union, Dict, List, Any import aiohttp import fastapi from fastapi import Depends, HTTPException from fastapi.exceptions import RequestValidationError from fastapi.middleware.cors import CORSMiddleware from fastapi.responses import StreamingResponse, JSONResponse from fastapi.security.http import HTTPAuthorizationCredentials, HTTPBearer import httpx from pydantic import BaseSettings import shortuuid import tiktoken import uvicorn from fastchat.constants import ( WORKER_API_TIMEOUT, WORKER_API_EMBEDDING_BATCH_SIZE, ErrorCode, ) from fastchat.conversation import Conversation, SeparatorStyle from fastchat.protocol.openai_api_protocol import ( ChatCompletionRequest, ChatCompletionResponse, ChatCompletionResponseStreamChoice, ChatCompletionStreamResponse, ChatMessage, ChatCompletionResponseChoice, CompletionRequest, CompletionResponse, CompletionResponseChoice, DeltaMessage, CompletionResponseStreamChoice, CompletionStreamResponse, EmbeddingsRequest, EmbeddingsResponse, ErrorResponse, LogProbs, ModelCard, ModelList, ModelPermission, UsageInfo, ) from fastchat.protocol.api_protocol import ( APIChatCompletionRequest, APITokenCheckRequest, APITokenCheckResponse, APITokenCheckResponseItem, ) from fastchat.utils import build_logger async def fetch_remote(url, pload=None, name=None): async with aiohttp.ClientSession(timeout=fetch_timeout) as session: async with session.post(url, json=pload) as response: chunks = [] if response.status != 200: ret = { "text": f"{response.reason}", "error_code": ErrorCode.INTERNAL_ERROR, } return json.dumps(ret) async for chunk, _ in response.content.iter_chunks(): chunks.append(chunk) output = b"".join(chunks) if name is not None: res = json.loads(output) if name != "": res = res[name] return res return output app_settings = AppSettings() class ModelPermission(BaseModel): id: str = Field(default_factory=lambda: f"modelperm-{shortuuid.random()}") object: str = "model_permission" created: int = Field(default_factory=lambda: int(time.time())) allow_create_engine: bool = False allow_sampling: bool = True allow_logprobs: bool = True allow_search_indices: bool = True allow_view: bool = True allow_fine_tuning: bool = False organization: str = "*" group: Optional[str] = None is_blocking: str = False class ModelCard(BaseModel): id: str object: str = "model" created: int = Field(default_factory=lambda: int(time.time())) owned_by: str = "fastchat" root: Optional[str] = None parent: Optional[str] = None permission: List[ModelPermission] = [] class ModelList(BaseModel): object: str = "list" data: List[ModelCard] = [] async def show_available_models(): controller_address = app_settings.controller_address ret = await fetch_remote(controller_address + "/refresh_all_workers") models = await fetch_remote(controller_address + "/list_models", None, "models") models.sort() # TODO: return real model permission details model_cards = [] for m in models: model_cards.append(ModelCard(id=m, root=m, permission=[ModelPermission()])) return ModelList(data=model_cards)
null
20,578
import asyncio import argparse import json import os from typing import Generator, Optional, Union, Dict, List, Any import aiohttp import fastapi from fastapi import Depends, HTTPException from fastapi.exceptions import RequestValidationError from fastapi.middleware.cors import CORSMiddleware from fastapi.responses import StreamingResponse, JSONResponse from fastapi.security.http import HTTPAuthorizationCredentials, HTTPBearer import httpx from pydantic import BaseSettings import shortuuid import tiktoken import uvicorn from fastchat.constants import ( WORKER_API_TIMEOUT, WORKER_API_EMBEDDING_BATCH_SIZE, ErrorCode, ) from fastchat.conversation import Conversation, SeparatorStyle from fastchat.protocol.openai_api_protocol import ( ChatCompletionRequest, ChatCompletionResponse, ChatCompletionResponseStreamChoice, ChatCompletionStreamResponse, ChatMessage, ChatCompletionResponseChoice, CompletionRequest, CompletionResponse, CompletionResponseChoice, DeltaMessage, CompletionResponseStreamChoice, CompletionStreamResponse, EmbeddingsRequest, EmbeddingsResponse, ErrorResponse, LogProbs, ModelCard, ModelList, ModelPermission, UsageInfo, ) from fastchat.protocol.api_protocol import ( APIChatCompletionRequest, APITokenCheckRequest, APITokenCheckResponse, APITokenCheckResponseItem, ) from fastchat.utils import build_logger def create_error_response(code: int, message: str) -> JSONResponse: return JSONResponse( ErrorResponse(message=message, code=code).dict(), status_code=400 ) async def check_model(request) -> Optional[JSONResponse]: controller_address = app_settings.controller_address ret = None models = await fetch_remote(controller_address + "/list_models", None, "models") if request.model not in models: ret = create_error_response( ErrorCode.INVALID_MODEL, f"Only {'&&'.join(models)} allowed now, your model {request.model}", ) return ret async def check_length(request, prompt, max_tokens, worker_addr): if ( not isinstance(max_tokens, int) or max_tokens <= 0 ): # model worker not support max_tokens=None max_tokens = 1024 * 1024 context_len = await fetch_remote( worker_addr + "/model_details", {"model": request.model}, "context_length" ) token_num = await fetch_remote( worker_addr + "/count_token", {"model": request.model, "prompt": prompt}, "count", ) length = min(max_tokens, context_len - token_num) if length <= 0: return None, create_error_response( ErrorCode.CONTEXT_OVERFLOW, f"This model's maximum context length is {context_len} tokens. However, your messages resulted in {token_num} tokens. Please reduce the length of the messages.", ) return length, None def check_requests(request) -> Optional[JSONResponse]: # Check all params if request.max_tokens is not None and request.max_tokens <= 0: return create_error_response( ErrorCode.PARAM_OUT_OF_RANGE, f"{request.max_tokens} is less than the minimum of 1 - 'max_tokens'", ) if request.n is not None and request.n <= 0: return create_error_response( ErrorCode.PARAM_OUT_OF_RANGE, f"{request.n} is less than the minimum of 1 - 'n'", ) if request.temperature is not None and request.temperature < 0: return create_error_response( ErrorCode.PARAM_OUT_OF_RANGE, f"{request.temperature} is less than the minimum of 0 - 'temperature'", ) if request.temperature is not None and request.temperature > 2: return create_error_response( ErrorCode.PARAM_OUT_OF_RANGE, f"{request.temperature} is greater than the maximum of 2 - 'temperature'", ) if request.top_p is not None and request.top_p < 0: return create_error_response( ErrorCode.PARAM_OUT_OF_RANGE, f"{request.top_p} is less than the minimum of 0 - 'top_p'", ) if request.top_p is not None and request.top_p > 1: return create_error_response( ErrorCode.PARAM_OUT_OF_RANGE, f"{request.top_p} is greater than the maximum of 1 - 'top_p'", ) if request.top_k is not None and (request.top_k > -1 and request.top_k < 1): return create_error_response( ErrorCode.PARAM_OUT_OF_RANGE, f"{request.top_k} is out of Range. Either set top_k to -1 or >=1.", ) if request.stop is not None and ( not isinstance(request.stop, str) and not isinstance(request.stop, list) ): return create_error_response( ErrorCode.PARAM_OUT_OF_RANGE, f"{request.stop} is not valid under any of the given schemas - 'stop'", ) return None async def get_gen_params( model_name: str, worker_addr: str, messages: Union[str, List[Dict[str, str]]], *, temperature: float, top_p: float, top_k: Optional[int], presence_penalty: Optional[float], frequency_penalty: Optional[float], max_tokens: Optional[int], echo: Optional[bool], logprobs: Optional[int] = None, stop: Optional[Union[str, List[str]]], best_of: Optional[int] = None, use_beam_search: Optional[bool] = None, ) -> Dict[str, Any]: conv = await get_conv(model_name, worker_addr) conv = Conversation( name=conv["name"], system_template=conv["system_template"], system_message=conv["system_message"], roles=conv["roles"], messages=list(conv["messages"]), # prevent in-place modification offset=conv["offset"], sep_style=SeparatorStyle(conv["sep_style"]), sep=conv["sep"], sep2=conv["sep2"], stop_str=conv["stop_str"], stop_token_ids=conv["stop_token_ids"], ) if isinstance(messages, str): prompt = messages else: for message in messages: msg_role = message["role"] if msg_role == "system": conv.set_system_message(message["content"]) elif msg_role == "user": conv.append_message(conv.roles[0], message["content"]) elif msg_role == "assistant": conv.append_message(conv.roles[1], message["content"]) else: raise ValueError(f"Unknown role: {msg_role}") # Add a blank message for the assistant. conv.append_message(conv.roles[1], None) prompt = conv.get_prompt() gen_params = { "model": model_name, "prompt": prompt, "temperature": temperature, "logprobs": logprobs, "top_p": top_p, "top_k": top_k, "presence_penalty": presence_penalty, "frequency_penalty": frequency_penalty, "max_new_tokens": max_tokens, "echo": echo, "stop_token_ids": conv.stop_token_ids, } if best_of is not None: gen_params.update({"best_of": best_of}) if use_beam_search is not None: gen_params.update({"use_beam_search": use_beam_search}) new_stop = set() _add_to_set(stop, new_stop) _add_to_set(conv.stop_str, new_stop) gen_params["stop"] = list(new_stop) logger.debug(f"==== request ====\n{gen_params}") return gen_params async def get_worker_address(model_name: str) -> str: """ Get worker address based on the requested model :param model_name: The worker's model name :return: Worker address from the controller :raises: :class:`ValueError`: No available worker for requested model """ controller_address = app_settings.controller_address worker_addr = await fetch_remote( controller_address + "/get_worker_address", {"model": model_name}, "address" ) # No available worker if worker_addr == "": raise ValueError(f"No available worker for {model_name}") logger.debug(f"model_name: {model_name}, worker_addr: {worker_addr}") return worker_addr async def chat_completion_stream_generator( model_name: str, gen_params: Dict[str, Any], n: int, worker_addr: str ) -> Generator[str, Any, None]: """ Event stream format: https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#event_stream_format """ id = f"chatcmpl-{shortuuid.random()}" finish_stream_events = [] for i in range(n): # First chunk with role choice_data = ChatCompletionResponseStreamChoice( index=i, delta=DeltaMessage(role="assistant"), finish_reason=None, ) chunk = ChatCompletionStreamResponse( id=id, choices=[choice_data], model=model_name ) yield f"data: {chunk.json(exclude_unset=True, ensure_ascii=False)}\n\n" previous_text = "" async for content in generate_completion_stream(gen_params, worker_addr): if content["error_code"] != 0: yield f"data: {json.dumps(content, ensure_ascii=False)}\n\n" yield "data: [DONE]\n\n" return decoded_unicode = content["text"].replace("\ufffd", "") delta_text = decoded_unicode[len(previous_text) :] previous_text = ( decoded_unicode if len(decoded_unicode) > len(previous_text) else previous_text ) if len(delta_text) == 0: delta_text = None choice_data = ChatCompletionResponseStreamChoice( index=i, delta=DeltaMessage(content=delta_text), finish_reason=content.get("finish_reason", None), ) chunk = ChatCompletionStreamResponse( id=id, choices=[choice_data], model=model_name ) if delta_text is None: if content.get("finish_reason", None) is not None: finish_stream_events.append(chunk) continue yield f"data: {chunk.json(exclude_unset=True, ensure_ascii=False)}\n\n" # There is not "content" field in the last delta message, so exclude_none to exclude field "content". for finish_chunk in finish_stream_events: yield f"data: {finish_chunk.json(exclude_none=True, ensure_ascii=False)}\n\n" yield "data: [DONE]\n\n" async def generate_completion(payload: Dict[str, Any], worker_addr: str): return await fetch_remote(worker_addr + "/worker_generate", payload, "") class ErrorCode(IntEnum): """ https://platform.openai.com/docs/guides/error-codes/api-errors """ VALIDATION_TYPE_ERROR = 40001 INVALID_AUTH_KEY = 40101 INCORRECT_AUTH_KEY = 40102 NO_PERMISSION = 40103 INVALID_MODEL = 40301 PARAM_OUT_OF_RANGE = 40302 CONTEXT_OVERFLOW = 40303 RATE_LIMIT = 42901 QUOTA_EXCEEDED = 42902 ENGINE_OVERLOADED = 42903 INTERNAL_ERROR = 50001 CUDA_OUT_OF_MEMORY = 50002 GRADIO_REQUEST_ERROR = 50003 GRADIO_STREAM_UNKNOWN_ERROR = 50004 CONTROLLER_NO_WORKER = 50005 CONTROLLER_WORKER_TIMEOUT = 50006 class UsageInfo(BaseModel): prompt_tokens: int = 0 total_tokens: int = 0 completion_tokens: Optional[int] = 0 class ChatCompletionRequest(BaseModel): model: str messages: Union[str, List[Dict[str, str]]] temperature: Optional[float] = 0.7 top_p: Optional[float] = 1.0 top_k: Optional[int] = -1 n: Optional[int] = 1 max_tokens: Optional[int] = None stop: Optional[Union[str, List[str]]] = None stream: Optional[bool] = False presence_penalty: Optional[float] = 0.0 frequency_penalty: Optional[float] = 0.0 user: Optional[str] = None class ChatMessage(BaseModel): role: str content: str class ChatCompletionResponseChoice(BaseModel): index: int message: ChatMessage finish_reason: Optional[Literal["stop", "length"]] = None class ChatCompletionResponse(BaseModel): id: str = Field(default_factory=lambda: f"chatcmpl-{shortuuid.random()}") object: str = "chat.completion" created: int = Field(default_factory=lambda: int(time.time())) model: str choices: List[ChatCompletionResponseChoice] usage: UsageInfo The provided code snippet includes necessary dependencies for implementing the `create_chat_completion` function. Write a Python function `async def create_chat_completion(request: ChatCompletionRequest)` to solve the following problem: Creates a completion for the chat message Here is the function: async def create_chat_completion(request: ChatCompletionRequest): """Creates a completion for the chat message""" error_check_ret = await check_model(request) if error_check_ret is not None: return error_check_ret error_check_ret = check_requests(request) if error_check_ret is not None: return error_check_ret worker_addr = await get_worker_address(request.model) gen_params = await get_gen_params( request.model, worker_addr, request.messages, temperature=request.temperature, top_p=request.top_p, top_k=request.top_k, presence_penalty=request.presence_penalty, frequency_penalty=request.frequency_penalty, max_tokens=request.max_tokens, echo=False, stop=request.stop, ) max_new_tokens, error_check_ret = await check_length( request, gen_params["prompt"], gen_params["max_new_tokens"], worker_addr, ) if error_check_ret is not None: return error_check_ret gen_params["max_new_tokens"] = max_new_tokens if request.stream: generator = chat_completion_stream_generator( request.model, gen_params, request.n, worker_addr ) return StreamingResponse(generator, media_type="text/event-stream") choices = [] chat_completions = [] for i in range(request.n): content = asyncio.create_task(generate_completion(gen_params, worker_addr)) chat_completions.append(content) try: all_tasks = await asyncio.gather(*chat_completions) except Exception as e: return create_error_response(ErrorCode.INTERNAL_ERROR, str(e)) usage = UsageInfo() for i, content in enumerate(all_tasks): if content["error_code"] != 0: return create_error_response(content["error_code"], content["text"]) choices.append( ChatCompletionResponseChoice( index=i, message=ChatMessage(role="assistant", content=content["text"]), finish_reason=content.get("finish_reason", "stop"), ) ) if "usage" in content: task_usage = UsageInfo.parse_obj(content["usage"]) for usage_key, usage_value in task_usage.dict().items(): setattr(usage, usage_key, getattr(usage, usage_key) + usage_value) return ChatCompletionResponse(model=request.model, choices=choices, usage=usage)
Creates a completion for the chat message
20,579
import asyncio import argparse import json import os from typing import Generator, Optional, Union, Dict, List, Any import aiohttp import fastapi from fastapi import Depends, HTTPException from fastapi.exceptions import RequestValidationError from fastapi.middleware.cors import CORSMiddleware from fastapi.responses import StreamingResponse, JSONResponse from fastapi.security.http import HTTPAuthorizationCredentials, HTTPBearer import httpx from pydantic import BaseSettings import shortuuid import tiktoken import uvicorn from fastchat.constants import ( WORKER_API_TIMEOUT, WORKER_API_EMBEDDING_BATCH_SIZE, ErrorCode, ) from fastchat.conversation import Conversation, SeparatorStyle from fastchat.protocol.openai_api_protocol import ( ChatCompletionRequest, ChatCompletionResponse, ChatCompletionResponseStreamChoice, ChatCompletionStreamResponse, ChatMessage, ChatCompletionResponseChoice, CompletionRequest, CompletionResponse, CompletionResponseChoice, DeltaMessage, CompletionResponseStreamChoice, CompletionStreamResponse, EmbeddingsRequest, EmbeddingsResponse, ErrorResponse, LogProbs, ModelCard, ModelList, ModelPermission, UsageInfo, ) from fastchat.protocol.api_protocol import ( APIChatCompletionRequest, APITokenCheckRequest, APITokenCheckResponse, APITokenCheckResponseItem, ) from fastchat.utils import build_logger def create_error_response(code: int, message: str) -> JSONResponse: async def check_model(request) -> Optional[JSONResponse]: async def check_length(request, prompt, max_tokens, worker_addr): def check_requests(request) -> Optional[JSONResponse]: def process_input(model_name, inp): def create_openai_logprobs(logprob_dict): async def get_gen_params( model_name: str, worker_addr: str, messages: Union[str, List[Dict[str, str]]], *, temperature: float, top_p: float, top_k: Optional[int], presence_penalty: Optional[float], frequency_penalty: Optional[float], max_tokens: Optional[int], echo: Optional[bool], logprobs: Optional[int] = None, stop: Optional[Union[str, List[str]]], best_of: Optional[int] = None, use_beam_search: Optional[bool] = None, ) -> Dict[str, Any]: async def get_worker_address(model_name: str) -> str: async def generate_completion_stream_generator( request: CompletionRequest, n: int, worker_addr: str ): async def generate_completion(payload: Dict[str, Any], worker_addr: str): class ErrorCode(IntEnum): class UsageInfo(BaseModel): class CompletionRequest(BaseModel): class CompletionResponseChoice(BaseModel): class CompletionResponse(BaseModel): async def create_completion(request: CompletionRequest): error_check_ret = await check_model(request) if error_check_ret is not None: return error_check_ret error_check_ret = check_requests(request) if error_check_ret is not None: return error_check_ret request.prompt = process_input(request.model, request.prompt) worker_addr = await get_worker_address(request.model) for text in request.prompt: max_tokens, error_check_ret = await check_length( request, text, request.max_tokens, worker_addr ) if error_check_ret is not None: return error_check_ret if isinstance(max_tokens, int) and max_tokens < request.max_tokens: request.max_tokens = max_tokens if request.stream: generator = generate_completion_stream_generator( request, request.n, worker_addr ) return StreamingResponse(generator, media_type="text/event-stream") else: text_completions = [] for text in request.prompt: gen_params = await get_gen_params( request.model, worker_addr, text, temperature=request.temperature, top_p=request.top_p, top_k=request.top_k, frequency_penalty=request.frequency_penalty, presence_penalty=request.presence_penalty, max_tokens=request.max_tokens, logprobs=request.logprobs, echo=request.echo, stop=request.stop, best_of=request.best_of, use_beam_search=request.use_beam_search, ) for i in range(request.n): content = asyncio.create_task( generate_completion(gen_params, worker_addr) ) text_completions.append(content) try: all_tasks = await asyncio.gather(*text_completions) except Exception as e: return create_error_response(ErrorCode.INTERNAL_ERROR, str(e)) choices = [] usage = UsageInfo() for i, content in enumerate(all_tasks): if content["error_code"] != 0: return create_error_response(content["error_code"], content["text"]) choices.append( CompletionResponseChoice( index=i, text=content["text"], logprobs=create_openai_logprobs(content.get("logprobs", None)), finish_reason=content.get("finish_reason", "stop"), ) ) task_usage = UsageInfo.parse_obj(content["usage"]) for usage_key, usage_value in task_usage.dict().items(): setattr(usage, usage_key, getattr(usage, usage_key) + usage_value) return CompletionResponse( model=request.model, choices=choices, usage=UsageInfo.parse_obj(usage) )
null
20,580
import asyncio import argparse import json import os from typing import Generator, Optional, Union, Dict, List, Any import aiohttp import fastapi from fastapi import Depends, HTTPException from fastapi.exceptions import RequestValidationError from fastapi.middleware.cors import CORSMiddleware from fastapi.responses import StreamingResponse, JSONResponse from fastapi.security.http import HTTPAuthorizationCredentials, HTTPBearer import httpx from pydantic import BaseSettings import shortuuid import tiktoken import uvicorn from fastchat.constants import ( WORKER_API_TIMEOUT, WORKER_API_EMBEDDING_BATCH_SIZE, ErrorCode, ) from fastchat.conversation import Conversation, SeparatorStyle from fastchat.protocol.openai_api_protocol import ( ChatCompletionRequest, ChatCompletionResponse, ChatCompletionResponseStreamChoice, ChatCompletionStreamResponse, ChatMessage, ChatCompletionResponseChoice, CompletionRequest, CompletionResponse, CompletionResponseChoice, DeltaMessage, CompletionResponseStreamChoice, CompletionStreamResponse, EmbeddingsRequest, EmbeddingsResponse, ErrorResponse, LogProbs, ModelCard, ModelList, ModelPermission, UsageInfo, ) from fastchat.protocol.api_protocol import ( APIChatCompletionRequest, APITokenCheckRequest, APITokenCheckResponse, APITokenCheckResponseItem, ) from fastchat.utils import build_logger def create_error_response(code: int, message: str) -> JSONResponse: return JSONResponse( ErrorResponse(message=message, code=code).dict(), status_code=400 ) async def check_model(request) -> Optional[JSONResponse]: controller_address = app_settings.controller_address ret = None models = await fetch_remote(controller_address + "/list_models", None, "models") if request.model not in models: ret = create_error_response( ErrorCode.INVALID_MODEL, f"Only {'&&'.join(models)} allowed now, your model {request.model}", ) return ret def process_input(model_name, inp): if isinstance(inp, str): inp = [inp] elif isinstance(inp, list): if isinstance(inp[0], int): decoding = tiktoken.model.encoding_for_model(model_name) inp = [decoding.decode(inp)] elif isinstance(inp[0], list): decoding = tiktoken.model.encoding_for_model(model_name) inp = [decoding.decode(text) for text in inp] return inp async def get_embedding(payload: Dict[str, Any]): controller_address = app_settings.controller_address model_name = payload["model"] worker_addr = await get_worker_address(model_name) embedding = await fetch_remote(worker_addr + "/worker_get_embeddings", payload) return json.loads(embedding) WORKER_API_EMBEDDING_BATCH_SIZE = int( os.getenv("FASTCHAT_WORKER_API_EMBEDDING_BATCH_SIZE", 4) ) class UsageInfo(BaseModel): prompt_tokens: int = 0 total_tokens: int = 0 completion_tokens: Optional[int] = 0 class EmbeddingsRequest(BaseModel): model: Optional[str] = None engine: Optional[str] = None input: Union[str, List[Any]] user: Optional[str] = None encoding_format: Optional[str] = None class EmbeddingsResponse(BaseModel): object: str = "list" data: List[Dict[str, Any]] model: str usage: UsageInfo The provided code snippet includes necessary dependencies for implementing the `create_embeddings` function. Write a Python function `async def create_embeddings(request: EmbeddingsRequest, model_name: str = None)` to solve the following problem: Creates embeddings for the text Here is the function: async def create_embeddings(request: EmbeddingsRequest, model_name: str = None): """Creates embeddings for the text""" if request.model is None: request.model = model_name error_check_ret = await check_model(request) if error_check_ret is not None: return error_check_ret request.input = process_input(request.model, request.input) data = [] token_num = 0 batch_size = WORKER_API_EMBEDDING_BATCH_SIZE batches = [ request.input[i : min(i + batch_size, len(request.input))] for i in range(0, len(request.input), batch_size) ] for num_batch, batch in enumerate(batches): payload = { "model": request.model, "input": batch, "encoding_format": request.encoding_format, } embedding = await get_embedding(payload) if "error_code" in embedding and embedding["error_code"] != 0: return create_error_response(embedding["error_code"], embedding["text"]) data += [ { "object": "embedding", "embedding": emb, "index": num_batch * batch_size + i, } for i, emb in enumerate(embedding["embedding"]) ] token_num += embedding["token_num"] return EmbeddingsResponse( data=data, model=request.model, usage=UsageInfo( prompt_tokens=token_num, total_tokens=token_num, completion_tokens=None, ), ).dict(exclude_none=True)
Creates embeddings for the text
20,581
import asyncio import argparse import json import os from typing import Generator, Optional, Union, Dict, List, Any import aiohttp import fastapi from fastapi import Depends, HTTPException from fastapi.exceptions import RequestValidationError from fastapi.middleware.cors import CORSMiddleware from fastapi.responses import StreamingResponse, JSONResponse from fastapi.security.http import HTTPAuthorizationCredentials, HTTPBearer import httpx from pydantic import BaseSettings import shortuuid import tiktoken import uvicorn from fastchat.constants import ( WORKER_API_TIMEOUT, WORKER_API_EMBEDDING_BATCH_SIZE, ErrorCode, ) from fastchat.conversation import Conversation, SeparatorStyle from fastchat.protocol.openai_api_protocol import ( ChatCompletionRequest, ChatCompletionResponse, ChatCompletionResponseStreamChoice, ChatCompletionStreamResponse, ChatMessage, ChatCompletionResponseChoice, CompletionRequest, CompletionResponse, CompletionResponseChoice, DeltaMessage, CompletionResponseStreamChoice, CompletionStreamResponse, EmbeddingsRequest, EmbeddingsResponse, ErrorResponse, LogProbs, ModelCard, ModelList, ModelPermission, UsageInfo, ) from fastchat.protocol.api_protocol import ( APIChatCompletionRequest, APITokenCheckRequest, APITokenCheckResponse, APITokenCheckResponseItem, ) from fastchat.utils import build_logger async def fetch_remote(url, pload=None, name=None): async with aiohttp.ClientSession(timeout=fetch_timeout) as session: async with session.post(url, json=pload) as response: chunks = [] if response.status != 200: ret = { "text": f"{response.reason}", "error_code": ErrorCode.INTERNAL_ERROR, } return json.dumps(ret) async for chunk, _ in response.content.iter_chunks(): chunks.append(chunk) output = b"".join(chunks) if name is not None: res = json.loads(output) if name != "": res = res[name] return res return output async def get_worker_address(model_name: str) -> str: """ Get worker address based on the requested model :param model_name: The worker's model name :return: Worker address from the controller :raises: :class:`ValueError`: No available worker for requested model """ controller_address = app_settings.controller_address worker_addr = await fetch_remote( controller_address + "/get_worker_address", {"model": model_name}, "address" ) # No available worker if worker_addr == "": raise ValueError(f"No available worker for {model_name}") logger.debug(f"model_name: {model_name}, worker_addr: {worker_addr}") return worker_addr class APITokenCheckRequest(BaseModel): prompts: List[APITokenCheckRequestItem] class APITokenCheckResponseItem(BaseModel): fits: bool tokenCount: int contextLength: int class APITokenCheckResponse(BaseModel): prompts: List[APITokenCheckResponseItem] The provided code snippet includes necessary dependencies for implementing the `count_tokens` function. Write a Python function `async def count_tokens(request: APITokenCheckRequest)` to solve the following problem: Checks the token count for each message in your list This is not part of the OpenAI API spec. Here is the function: async def count_tokens(request: APITokenCheckRequest): """ Checks the token count for each message in your list This is not part of the OpenAI API spec. """ checkedList = [] for item in request.prompts: worker_addr = await get_worker_address(item.model) context_len = await fetch_remote( worker_addr + "/model_details", {"prompt": item.prompt, "model": item.model}, "context_length", ) token_num = await fetch_remote( worker_addr + "/count_token", {"prompt": item.prompt, "model": item.model}, "count", ) can_fit = True if token_num + item.max_tokens > context_len: can_fit = False checkedList.append( APITokenCheckResponseItem( fits=can_fit, contextLength=context_len, tokenCount=token_num ) ) return APITokenCheckResponse(prompts=checkedList)
Checks the token count for each message in your list This is not part of the OpenAI API spec.
20,582
import asyncio import argparse import json import os from typing import Generator, Optional, Union, Dict, List, Any import aiohttp import fastapi from fastapi import Depends, HTTPException from fastapi.exceptions import RequestValidationError from fastapi.middleware.cors import CORSMiddleware from fastapi.responses import StreamingResponse, JSONResponse from fastapi.security.http import HTTPAuthorizationCredentials, HTTPBearer import httpx from pydantic import BaseSettings import shortuuid import tiktoken import uvicorn from fastchat.constants import ( WORKER_API_TIMEOUT, WORKER_API_EMBEDDING_BATCH_SIZE, ErrorCode, ) from fastchat.conversation import Conversation, SeparatorStyle from fastchat.protocol.openai_api_protocol import ( ChatCompletionRequest, ChatCompletionResponse, ChatCompletionResponseStreamChoice, ChatCompletionStreamResponse, ChatMessage, ChatCompletionResponseChoice, CompletionRequest, CompletionResponse, CompletionResponseChoice, DeltaMessage, CompletionResponseStreamChoice, CompletionStreamResponse, EmbeddingsRequest, EmbeddingsResponse, ErrorResponse, LogProbs, ModelCard, ModelList, ModelPermission, UsageInfo, ) from fastchat.protocol.api_protocol import ( APIChatCompletionRequest, APITokenCheckRequest, APITokenCheckResponse, APITokenCheckResponseItem, ) from fastchat.utils import build_logger def create_error_response(code: int, message: str) -> JSONResponse: return JSONResponse( ErrorResponse(message=message, code=code).dict(), status_code=400 ) async def check_model(request) -> Optional[JSONResponse]: controller_address = app_settings.controller_address ret = None models = await fetch_remote(controller_address + "/list_models", None, "models") if request.model not in models: ret = create_error_response( ErrorCode.INVALID_MODEL, f"Only {'&&'.join(models)} allowed now, your model {request.model}", ) return ret async def check_length(request, prompt, max_tokens, worker_addr): if ( not isinstance(max_tokens, int) or max_tokens <= 0 ): # model worker not support max_tokens=None max_tokens = 1024 * 1024 context_len = await fetch_remote( worker_addr + "/model_details", {"model": request.model}, "context_length" ) token_num = await fetch_remote( worker_addr + "/count_token", {"model": request.model, "prompt": prompt}, "count", ) length = min(max_tokens, context_len - token_num) if length <= 0: return None, create_error_response( ErrorCode.CONTEXT_OVERFLOW, f"This model's maximum context length is {context_len} tokens. However, your messages resulted in {token_num} tokens. Please reduce the length of the messages.", ) return length, None def check_requests(request) -> Optional[JSONResponse]: # Check all params if request.max_tokens is not None and request.max_tokens <= 0: return create_error_response( ErrorCode.PARAM_OUT_OF_RANGE, f"{request.max_tokens} is less than the minimum of 1 - 'max_tokens'", ) if request.n is not None and request.n <= 0: return create_error_response( ErrorCode.PARAM_OUT_OF_RANGE, f"{request.n} is less than the minimum of 1 - 'n'", ) if request.temperature is not None and request.temperature < 0: return create_error_response( ErrorCode.PARAM_OUT_OF_RANGE, f"{request.temperature} is less than the minimum of 0 - 'temperature'", ) if request.temperature is not None and request.temperature > 2: return create_error_response( ErrorCode.PARAM_OUT_OF_RANGE, f"{request.temperature} is greater than the maximum of 2 - 'temperature'", ) if request.top_p is not None and request.top_p < 0: return create_error_response( ErrorCode.PARAM_OUT_OF_RANGE, f"{request.top_p} is less than the minimum of 0 - 'top_p'", ) if request.top_p is not None and request.top_p > 1: return create_error_response( ErrorCode.PARAM_OUT_OF_RANGE, f"{request.top_p} is greater than the maximum of 1 - 'top_p'", ) if request.top_k is not None and (request.top_k > -1 and request.top_k < 1): return create_error_response( ErrorCode.PARAM_OUT_OF_RANGE, f"{request.top_k} is out of Range. Either set top_k to -1 or >=1.", ) if request.stop is not None and ( not isinstance(request.stop, str) and not isinstance(request.stop, list) ): return create_error_response( ErrorCode.PARAM_OUT_OF_RANGE, f"{request.stop} is not valid under any of the given schemas - 'stop'", ) return None async def get_gen_params( model_name: str, worker_addr: str, messages: Union[str, List[Dict[str, str]]], *, temperature: float, top_p: float, top_k: Optional[int], presence_penalty: Optional[float], frequency_penalty: Optional[float], max_tokens: Optional[int], echo: Optional[bool], logprobs: Optional[int] = None, stop: Optional[Union[str, List[str]]], best_of: Optional[int] = None, use_beam_search: Optional[bool] = None, ) -> Dict[str, Any]: conv = await get_conv(model_name, worker_addr) conv = Conversation( name=conv["name"], system_template=conv["system_template"], system_message=conv["system_message"], roles=conv["roles"], messages=list(conv["messages"]), # prevent in-place modification offset=conv["offset"], sep_style=SeparatorStyle(conv["sep_style"]), sep=conv["sep"], sep2=conv["sep2"], stop_str=conv["stop_str"], stop_token_ids=conv["stop_token_ids"], ) if isinstance(messages, str): prompt = messages else: for message in messages: msg_role = message["role"] if msg_role == "system": conv.set_system_message(message["content"]) elif msg_role == "user": conv.append_message(conv.roles[0], message["content"]) elif msg_role == "assistant": conv.append_message(conv.roles[1], message["content"]) else: raise ValueError(f"Unknown role: {msg_role}") # Add a blank message for the assistant. conv.append_message(conv.roles[1], None) prompt = conv.get_prompt() gen_params = { "model": model_name, "prompt": prompt, "temperature": temperature, "logprobs": logprobs, "top_p": top_p, "top_k": top_k, "presence_penalty": presence_penalty, "frequency_penalty": frequency_penalty, "max_new_tokens": max_tokens, "echo": echo, "stop_token_ids": conv.stop_token_ids, } if best_of is not None: gen_params.update({"best_of": best_of}) if use_beam_search is not None: gen_params.update({"use_beam_search": use_beam_search}) new_stop = set() _add_to_set(stop, new_stop) _add_to_set(conv.stop_str, new_stop) gen_params["stop"] = list(new_stop) logger.debug(f"==== request ====\n{gen_params}") return gen_params async def get_worker_address(model_name: str) -> str: """ Get worker address based on the requested model :param model_name: The worker's model name :return: Worker address from the controller :raises: :class:`ValueError`: No available worker for requested model """ controller_address = app_settings.controller_address worker_addr = await fetch_remote( controller_address + "/get_worker_address", {"model": model_name}, "address" ) # No available worker if worker_addr == "": raise ValueError(f"No available worker for {model_name}") logger.debug(f"model_name: {model_name}, worker_addr: {worker_addr}") return worker_addr async def chat_completion_stream_generator( model_name: str, gen_params: Dict[str, Any], n: int, worker_addr: str ) -> Generator[str, Any, None]: """ Event stream format: https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#event_stream_format """ id = f"chatcmpl-{shortuuid.random()}" finish_stream_events = [] for i in range(n): # First chunk with role choice_data = ChatCompletionResponseStreamChoice( index=i, delta=DeltaMessage(role="assistant"), finish_reason=None, ) chunk = ChatCompletionStreamResponse( id=id, choices=[choice_data], model=model_name ) yield f"data: {chunk.json(exclude_unset=True, ensure_ascii=False)}\n\n" previous_text = "" async for content in generate_completion_stream(gen_params, worker_addr): if content["error_code"] != 0: yield f"data: {json.dumps(content, ensure_ascii=False)}\n\n" yield "data: [DONE]\n\n" return decoded_unicode = content["text"].replace("\ufffd", "") delta_text = decoded_unicode[len(previous_text) :] previous_text = ( decoded_unicode if len(decoded_unicode) > len(previous_text) else previous_text ) if len(delta_text) == 0: delta_text = None choice_data = ChatCompletionResponseStreamChoice( index=i, delta=DeltaMessage(content=delta_text), finish_reason=content.get("finish_reason", None), ) chunk = ChatCompletionStreamResponse( id=id, choices=[choice_data], model=model_name ) if delta_text is None: if content.get("finish_reason", None) is not None: finish_stream_events.append(chunk) continue yield f"data: {chunk.json(exclude_unset=True, ensure_ascii=False)}\n\n" # There is not "content" field in the last delta message, so exclude_none to exclude field "content". for finish_chunk in finish_stream_events: yield f"data: {finish_chunk.json(exclude_none=True, ensure_ascii=False)}\n\n" yield "data: [DONE]\n\n" async def generate_completion(payload: Dict[str, Any], worker_addr: str): return await fetch_remote(worker_addr + "/worker_generate", payload, "") class ErrorCode(IntEnum): """ https://platform.openai.com/docs/guides/error-codes/api-errors """ VALIDATION_TYPE_ERROR = 40001 INVALID_AUTH_KEY = 40101 INCORRECT_AUTH_KEY = 40102 NO_PERMISSION = 40103 INVALID_MODEL = 40301 PARAM_OUT_OF_RANGE = 40302 CONTEXT_OVERFLOW = 40303 RATE_LIMIT = 42901 QUOTA_EXCEEDED = 42902 ENGINE_OVERLOADED = 42903 INTERNAL_ERROR = 50001 CUDA_OUT_OF_MEMORY = 50002 GRADIO_REQUEST_ERROR = 50003 GRADIO_STREAM_UNKNOWN_ERROR = 50004 CONTROLLER_NO_WORKER = 50005 CONTROLLER_WORKER_TIMEOUT = 50006 class UsageInfo(BaseModel): prompt_tokens: int = 0 total_tokens: int = 0 completion_tokens: Optional[int] = 0 class ChatMessage(BaseModel): role: str content: str class ChatCompletionResponseChoice(BaseModel): index: int message: ChatMessage finish_reason: Optional[Literal["stop", "length"]] = None class ChatCompletionResponse(BaseModel): id: str = Field(default_factory=lambda: f"chatcmpl-{shortuuid.random()}") object: str = "chat.completion" created: int = Field(default_factory=lambda: int(time.time())) model: str choices: List[ChatCompletionResponseChoice] usage: UsageInfo class APIChatCompletionRequest(BaseModel): model: str messages: Union[str, List[Dict[str, str]]] temperature: Optional[float] = 0.7 top_p: Optional[float] = 1.0 top_k: Optional[int] = -1 n: Optional[int] = 1 max_tokens: Optional[int] = None stop: Optional[Union[str, List[str]]] = None stream: Optional[bool] = False user: Optional[str] = None repetition_penalty: Optional[float] = 1.0 frequency_penalty: Optional[float] = 0.0 presence_penalty: Optional[float] = 0.0 The provided code snippet includes necessary dependencies for implementing the `create_chat_completion` function. Write a Python function `async def create_chat_completion(request: APIChatCompletionRequest)` to solve the following problem: Creates a completion for the chat message Here is the function: async def create_chat_completion(request: APIChatCompletionRequest): """Creates a completion for the chat message""" error_check_ret = await check_model(request) if error_check_ret is not None: return error_check_ret error_check_ret = check_requests(request) if error_check_ret is not None: return error_check_ret worker_addr = await get_worker_address(request.model) gen_params = await get_gen_params( request.model, worker_addr, request.messages, temperature=request.temperature, top_p=request.top_p, top_k=request.top_k, presence_penalty=request.presence_penalty, frequency_penalty=request.frequency_penalty, max_tokens=request.max_tokens, echo=False, stop=request.stop, ) if request.repetition_penalty is not None: gen_params["repetition_penalty"] = request.repetition_penalty max_new_tokens, error_check_ret = await check_length( request, gen_params["prompt"], gen_params["max_new_tokens"], worker_addr, ) if error_check_ret is not None: return error_check_ret gen_params["max_new_tokens"] = max_new_tokens if request.stream: generator = chat_completion_stream_generator( request.model, gen_params, request.n, worker_addr ) return StreamingResponse(generator, media_type="text/event-stream") choices = [] chat_completions = [] for i in range(request.n): content = asyncio.create_task(generate_completion(gen_params, worker_addr)) chat_completions.append(content) try: all_tasks = await asyncio.gather(*chat_completions) except Exception as e: return create_error_response(ErrorCode.INTERNAL_ERROR, str(e)) usage = UsageInfo() for i, content in enumerate(all_tasks): if content["error_code"] != 0: return create_error_response(content["error_code"], content["text"]) choices.append( ChatCompletionResponseChoice( index=i, message=ChatMessage(role="assistant", content=content["text"]), finish_reason=content.get("finish_reason", "stop"), ) ) task_usage = UsageInfo.parse_obj(content["usage"]) for usage_key, usage_value in task_usage.dict().items(): setattr(usage, usage_key, getattr(usage, usage_key) + usage_value) return ChatCompletionResponse(model=request.model, choices=choices, usage=usage)
Creates a completion for the chat message
20,583
import asyncio import argparse import json import os from typing import Generator, Optional, Union, Dict, List, Any import aiohttp import fastapi from fastapi import Depends, HTTPException from fastapi.exceptions import RequestValidationError from fastapi.middleware.cors import CORSMiddleware from fastapi.responses import StreamingResponse, JSONResponse from fastapi.security.http import HTTPAuthorizationCredentials, HTTPBearer import httpx from pydantic import BaseSettings import shortuuid import tiktoken import uvicorn from fastchat.constants import ( WORKER_API_TIMEOUT, WORKER_API_EMBEDDING_BATCH_SIZE, ErrorCode, ) from fastchat.conversation import Conversation, SeparatorStyle from fastchat.protocol.openai_api_protocol import ( ChatCompletionRequest, ChatCompletionResponse, ChatCompletionResponseStreamChoice, ChatCompletionStreamResponse, ChatMessage, ChatCompletionResponseChoice, CompletionRequest, CompletionResponse, CompletionResponseChoice, DeltaMessage, CompletionResponseStreamChoice, CompletionStreamResponse, EmbeddingsRequest, EmbeddingsResponse, ErrorResponse, LogProbs, ModelCard, ModelList, ModelPermission, UsageInfo, ) from fastchat.protocol.api_protocol import ( APIChatCompletionRequest, APITokenCheckRequest, APITokenCheckResponse, APITokenCheckResponseItem, ) from fastchat.utils import build_logger logger = build_logger("openai_api_server", "openai_api_server.log") app_settings = AppSettings() app = fastapi.FastAPI() def create_openai_api_server(): parser = argparse.ArgumentParser( description="FastChat ChatGPT-Compatible RESTful API server." ) parser.add_argument("--host", type=str, default="localhost", help="host name") parser.add_argument("--port", type=int, default=8000, help="port number") parser.add_argument( "--controller-address", type=str, default="http://localhost:21001" ) parser.add_argument( "--allow-credentials", action="store_true", help="allow credentials" ) parser.add_argument( "--allowed-origins", type=json.loads, default=["*"], help="allowed origins" ) parser.add_argument( "--allowed-methods", type=json.loads, default=["*"], help="allowed methods" ) parser.add_argument( "--allowed-headers", type=json.loads, default=["*"], help="allowed headers" ) parser.add_argument( "--api-keys", type=lambda s: s.split(","), help="Optional list of comma separated API keys", ) parser.add_argument( "--ssl", action="store_true", required=False, default=False, help="Enable SSL. Requires OS Environment variables 'SSL_KEYFILE' and 'SSL_CERTFILE'.", ) args = parser.parse_args() app.add_middleware( CORSMiddleware, allow_origins=args.allowed_origins, allow_credentials=args.allow_credentials, allow_methods=args.allowed_methods, allow_headers=args.allowed_headers, ) app_settings.controller_address = args.controller_address app_settings.api_keys = args.api_keys logger.info(f"args: {args}") return args
null
20,584
import json import time import gradio as gr import numpy as np from fastchat.constants import ( MODERATION_MSG, CONVERSATION_LIMIT_MSG, SLOW_MODEL_MSG, INPUT_CHAR_LEN_LIMIT, CONVERSATION_TURN_LIMIT, ) from fastchat.model.model_adapter import get_conversation_template from fastchat.serve.gradio_block_arena_named import flash_buttons from fastchat.serve.gradio_web_server import ( State, bot_response, get_conv_log_filename, no_change_btn, enable_btn, disable_btn, invisible_btn, acknowledgment_md, ip_expiration_dict, get_ip, get_model_description_md, ) from fastchat.utils import ( build_logger, moderation_filter, ) enable_moderation = False def set_global_vars_anony(enable_moderation_): global enable_moderation enable_moderation = enable_moderation_
null
20,585
import argparse import pickle import time import gradio as gr from fastchat.constants import ( SESSION_EXPIRATION_TIME, ) from fastchat.serve.gradio_block_arena_anony import ( build_side_by_side_ui_anony, load_demo_side_by_side_anony, set_global_vars_anony, ) from fastchat.serve.gradio_block_arena_named import ( build_side_by_side_ui_named, load_demo_side_by_side_named, set_global_vars_named, ) from fastchat.serve.gradio_web_server import ( set_global_vars, block_css, build_single_model_ui, build_about, get_model_list, load_demo_single, ip_expiration_dict, get_ip, ) from fastchat.serve.monitor.monitor import build_leaderboard_tab from fastchat.utils import ( build_logger, get_window_url_params_js, get_window_url_params_with_tos_js, parse_gradio_auth_creds, ) def load_demo(url_params, request: gr.Request): global models ip = get_ip(request) logger.info(f"load_demo. ip: {ip}. params: {url_params}") ip_expiration_dict[ip] = time.time() + SESSION_EXPIRATION_TIME selected = 0 if "arena" in url_params: selected = 0 elif "compare" in url_params: selected = 1 elif "single" in url_params: selected = 2 elif "leaderboard" in url_params: selected = 3 if args.model_list_mode == "reload": if args.anony_only_for_proprietary_model: models = get_model_list( args.controller_url, args.register_openai_compatible_models, False, False, False, ) else: models = get_model_list( args.controller_url, args.register_openai_compatible_models, args.add_chatgpt, args.add_claude, args.add_palm, ) single_updates = load_demo_single(models, url_params) models_anony = list(models) if args.anony_only_for_proprietary_model: # Only enable these models in anony battles. if args.add_chatgpt: models_anony += [ "gpt-4-0314", "gpt-4-0613", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-1106", ] if args.add_claude: models_anony += ["claude-2.1", "claude-2.0", "claude-1", "claude-instant-1"] if args.add_palm: models_anony += ["gemini-pro"] anony_only_models = [ "claude-1", "gpt-4-0314", "gpt-4-0613", ] for mdl in anony_only_models: models_anony.append(mdl) models_anony = list(set(models_anony)) side_by_side_anony_updates = load_demo_side_by_side_anony(models_anony, url_params) side_by_side_named_updates = load_demo_side_by_side_named(models, url_params) return ( (gr.Tabs.update(selected=selected),) + single_updates + side_by_side_anony_updates + side_by_side_named_updates ) def build_side_by_side_ui_anony(models): notice_markdown = """ # ⚔️ Chatbot Arena ⚔️ : Benchmarking LLMs in the Wild | [Blog](https://lmsys.org/blog/2023-05-03-arena/) | [GitHub](https://github.com/lm-sys/FastChat) | [Paper](https://arxiv.org/abs/2306.05685) | [Dataset](https://github.com/lm-sys/FastChat/blob/main/docs/dataset_release.md) | [Twitter](https://twitter.com/lmsysorg) | [Discord](https://discord.gg/HSWAKCrnFx) | ## 📜 Rules - Ask any question to two anonymous models (e.g., ChatGPT, Claude, Llama) and vote for the better one! - You can continue chatting until you identify a winner. - Vote won't be counted if model identity is revealed during conversation. ## 🏆 Arena Elo [Leaderboard](https://huggingface.co/spaces/lmsys/chatbot-arena-leaderboard) We use **100K+** human votes to compile an Elo-based LLM leaderboard. Find out who is the 🥇LLM Champion! ## 👇 Chat now! """ states = [gr.State() for _ in range(num_sides)] model_selectors = [None] * num_sides chatbots = [None] * num_sides gr.Markdown(notice_markdown, elem_id="notice_markdown") with gr.Box(elem_id="share-region-anony"): with gr.Accordion("🔍 Expand to see 20+ Arena players", open=False): model_description_md = get_model_description_md(models) gr.Markdown(model_description_md, elem_id="model_description_markdown") with gr.Row(): for i in range(num_sides): label = "Model A" if i == 0 else "Model B" with gr.Column(): chatbots[i] = gr.Chatbot( label=label, elem_id=f"chatbot", height=550 ) with gr.Row(): for i in range(num_sides): with gr.Column(): model_selectors[i] = gr.Markdown(anony_names[i]) with gr.Row(): slow_warning = gr.Markdown("", elem_id="notice_markdown") with gr.Row(): leftvote_btn = gr.Button( value="👈 A is better", visible=False, interactive=False ) rightvote_btn = gr.Button( value="👉 B is better", visible=False, interactive=False ) tie_btn = gr.Button(value="🤝 Tie", visible=False, interactive=False) bothbad_btn = gr.Button( value="👎 Both are bad", visible=False, interactive=False ) with gr.Row(): textbox = gr.Textbox( show_label=False, placeholder="👉 Enter your prompt and press ENTER", container=False, elem_id="input_box", ) send_btn = gr.Button(value="Send", variant="primary", scale=0) with gr.Row() as button_row: clear_btn = gr.Button(value="🎲 New Round", interactive=False) regenerate_btn = gr.Button(value="🔄 Regenerate", interactive=False) share_btn = gr.Button(value="📷 Share") with gr.Accordion("Parameters", open=False) as parameter_row: temperature = gr.Slider( minimum=0.0, maximum=1.0, value=0.7, step=0.1, interactive=True, label="Temperature", ) top_p = gr.Slider( minimum=0.0, maximum=1.0, value=1.0, step=0.1, interactive=True, label="Top P", ) max_output_tokens = gr.Slider( minimum=16, maximum=2048, value=1024, step=64, interactive=True, label="Max output tokens", ) gr.Markdown(acknowledgment_md, elem_id="ack_markdown") # Register listeners btn_list = [ leftvote_btn, rightvote_btn, tie_btn, bothbad_btn, regenerate_btn, clear_btn, ] leftvote_btn.click( leftvote_last_response, states + model_selectors, model_selectors + [textbox, leftvote_btn, rightvote_btn, tie_btn, bothbad_btn], ) rightvote_btn.click( rightvote_last_response, states + model_selectors, model_selectors + [textbox, leftvote_btn, rightvote_btn, tie_btn, bothbad_btn], ) tie_btn.click( tievote_last_response, states + model_selectors, model_selectors + [textbox, leftvote_btn, rightvote_btn, tie_btn, bothbad_btn], ) bothbad_btn.click( bothbad_vote_last_response, states + model_selectors, model_selectors + [textbox, leftvote_btn, rightvote_btn, tie_btn, bothbad_btn], ) regenerate_btn.click( regenerate, states, states + chatbots + [textbox] + btn_list ).then( bot_response_multi, states + [temperature, top_p, max_output_tokens], states + chatbots + btn_list, ).then( flash_buttons, [], btn_list ) clear_btn.click( clear_history, None, states + chatbots + model_selectors + [textbox] + btn_list + [slow_warning], ) share_js = """ function (a, b, c, d) { const captureElement = document.querySelector('#share-region-anony'); html2canvas(captureElement) .then(canvas => { canvas.style.display = 'none' document.body.appendChild(canvas) return canvas }) .then(canvas => { const image = canvas.toDataURL('image/png') const a = document.createElement('a') a.setAttribute('download', 'chatbot-arena.png') a.setAttribute('href', image) a.click() canvas.remove() }); return [a, b, c, d]; } """ share_btn.click(share_click, states + model_selectors, [], _js=share_js) textbox.submit( add_text, states + model_selectors + [textbox], states + chatbots + [textbox] + btn_list + [slow_warning], ).then( bot_response_multi, states + [temperature, top_p, max_output_tokens], states + chatbots + btn_list, ).then( flash_buttons, [], btn_list, ) send_btn.click( add_text, states + model_selectors + [textbox], states + chatbots + [textbox] + btn_list, ).then( bot_response_multi, states + [temperature, top_p, max_output_tokens], states + chatbots + btn_list, ).then( flash_buttons, [], btn_list ) return states + model_selectors def build_side_by_side_ui_named(models): notice_markdown = """ # ⚔️ Chatbot Arena ⚔️ : Benchmarking LLMs in the Wild | [Blog](https://lmsys.org/blog/2023-05-03-arena/) | [GitHub](https://github.com/lm-sys/FastChat) | [Paper](https://arxiv.org/abs/2306.05685) | [Dataset](https://github.com/lm-sys/FastChat/blob/main/docs/dataset_release.md) | [Twitter](https://twitter.com/lmsysorg) | [Discord](https://discord.gg/HSWAKCrnFx) | ## 📜 Rules - Chat with any two models side-by-side and vote! - You can continue chatting for multiple rounds. - Click "Clear history" to start a new round. ## 🤖 Choose two models to compare """ states = [gr.State() for _ in range(num_sides)] model_selectors = [None] * num_sides chatbots = [None] * num_sides notice = gr.Markdown(notice_markdown, elem_id="notice_markdown") with gr.Box(elem_id="share-region-named"): with gr.Row(): for i in range(num_sides): with gr.Column(): model_selectors[i] = gr.Dropdown( choices=models, value=models[i] if len(models) > i else "", interactive=True, show_label=False, container=False, ) with gr.Row(): with gr.Accordion("🔍 Expand to see 20+ model descriptions", open=False): model_description_md = get_model_description_md(models) gr.Markdown(model_description_md, elem_id="model_description_markdown") with gr.Row(): for i in range(num_sides): label = "Model A" if i == 0 else "Model B" with gr.Column(): chatbots[i] = gr.Chatbot( label=label, elem_id=f"chatbot", height=550 ) with gr.Row(): leftvote_btn = gr.Button( value="👈 A is better", visible=False, interactive=False ) rightvote_btn = gr.Button( value="👉 B is better", visible=False, interactive=False ) tie_btn = gr.Button(value="🤝 Tie", visible=False, interactive=False) bothbad_btn = gr.Button( value="👎 Both are bad", visible=False, interactive=False ) with gr.Row(): textbox = gr.Textbox( show_label=False, placeholder="👉 Enter your prompt and press ENTER", container=False, elem_id="input_box", ) send_btn = gr.Button(value="Send", variant="primary", scale=0) with gr.Row() as button_row: clear_btn = gr.Button(value="🗑️ Clear history", interactive=False) regenerate_btn = gr.Button(value="🔄 Regenerate", interactive=False) share_btn = gr.Button(value="📷 Share") with gr.Accordion("Parameters", open=False) as parameter_row: temperature = gr.Slider( minimum=0.0, maximum=1.0, value=0.7, step=0.1, interactive=True, label="Temperature", ) top_p = gr.Slider( minimum=0.0, maximum=1.0, value=1.0, step=0.1, interactive=True, label="Top P", ) max_output_tokens = gr.Slider( minimum=16, maximum=2048, value=1024, step=64, interactive=True, label="Max output tokens", ) gr.Markdown(acknowledgment_md, elem_id="ack_markdown") # Register listeners btn_list = [ leftvote_btn, rightvote_btn, tie_btn, bothbad_btn, regenerate_btn, clear_btn, ] leftvote_btn.click( leftvote_last_response, states + model_selectors, [textbox, leftvote_btn, rightvote_btn, tie_btn, bothbad_btn], ) rightvote_btn.click( rightvote_last_response, states + model_selectors, [textbox, leftvote_btn, rightvote_btn, tie_btn, bothbad_btn], ) tie_btn.click( tievote_last_response, states + model_selectors, [textbox, leftvote_btn, rightvote_btn, tie_btn, bothbad_btn], ) bothbad_btn.click( bothbad_vote_last_response, states + model_selectors, [textbox, leftvote_btn, rightvote_btn, tie_btn, bothbad_btn], ) regenerate_btn.click( regenerate, states, states + chatbots + [textbox] + btn_list ).then( bot_response_multi, states + [temperature, top_p, max_output_tokens], states + chatbots + btn_list, ).then( flash_buttons, [], btn_list ) clear_btn.click(clear_history, None, states + chatbots + [textbox] + btn_list) share_js = """ function (a, b, c, d) { const captureElement = document.querySelector('#share-region-named'); html2canvas(captureElement) .then(canvas => { canvas.style.display = 'none' document.body.appendChild(canvas) return canvas }) .then(canvas => { const image = canvas.toDataURL('image/png') const a = document.createElement('a') a.setAttribute('download', 'chatbot-arena.png') a.setAttribute('href', image) a.click() canvas.remove() }); return [a, b, c, d]; } """ share_btn.click(share_click, states + model_selectors, [], _js=share_js) for i in range(num_sides): model_selectors[i].change( clear_history, None, states + chatbots + [textbox] + btn_list ) textbox.submit( add_text, states + model_selectors + [textbox], states + chatbots + [textbox] + btn_list, ).then( bot_response_multi, states + [temperature, top_p, max_output_tokens], states + chatbots + btn_list, ).then( flash_buttons, [], btn_list ) send_btn.click( add_text, states + model_selectors + [textbox], states + chatbots + [textbox] + btn_list, ).then( bot_response_multi, states + [temperature, top_p, max_output_tokens], states + chatbots + btn_list, ).then( flash_buttons, [], btn_list ) return states + model_selectors block_css = """ #notice_markdown { font-size: 110% } #notice_markdown th { display: none; } #notice_markdown td { padding-top: 6px; padding-bottom: 6px; } #model_description_markdown { font-size: 110% } #leaderboard_markdown { font-size: 110% } #leaderboard_markdown td { padding-top: 6px; padding-bottom: 6px; } #leaderboard_dataframe td { line-height: 0.1em; } #about_markdown { font-size: 110% } #ack_markdown { font-size: 110% } #input_box textarea { } footer { display:none !important } .image-container { display: flex; align-items: center; padding: 1px; } .image-container img { margin: 0 30px; height: 30px; max-height: 100%; width: auto; max-width: 20%; } .image-about img { margin: 0 30px; margin-top: 30px; height: 60px; max-height: 100%; width: auto; max-width: 20%; float: left; } """ def build_about(): about_markdown = f""" # About Us Chatbot Arena is an open-source research project developed by members from [LMSYS](https://lmsys.org/about/) and UC Berkeley [SkyLab](https://sky.cs.berkeley.edu/). Our mission is to build an open crowdsourced platform to collect human feedback and evaluate LLMs under real-world scenarios. We open-source our [FastChat](https://github.com/lm-sys/FastChat) project at GitHub and release chat and human feedback datasets [here](https://github.com/lm-sys/FastChat/blob/main/docs/dataset_release.md). We invite everyone to join us in this journey! ## Read More - Chatbot Arena [launch post](https://lmsys.org/blog/2023-05-03-arena/), [data release](https://lmsys.org/blog/2023-07-20-dataset/) - LMSYS-Chat-1M [report](https://arxiv.org/abs/2309.11998) ## Core Members [Lianmin Zheng](https://lmzheng.net/), [Wei-Lin Chiang](https://infwinston.github.io/), [Ying Sheng](https://sites.google.com/view/yingsheng/home), [Siyuan Zhuang](https://scholar.google.com/citations?user=KSZmI5EAAAAJ) ## Advisors [Ion Stoica](http://people.eecs.berkeley.edu/~istoica/), [Joseph E. Gonzalez](https://people.eecs.berkeley.edu/~jegonzal/), [Hao Zhang](https://cseweb.ucsd.edu/~haozhang/) ## Contact Us - Follow our [Twitter](https://twitter.com/lmsysorg), [Discord](https://discord.gg/HSWAKCrnFx) or email us at lmsys.org@gmail.com - File issues on [GitHub](https://github.com/lm-sys/FastChat) - Download our datasets and models on [HuggingFace](https://huggingface.co/lmsys) ## Acknowledgment We thank [SkyPilot](https://github.com/skypilot-org/skypilot) and [Gradio](https://github.com/gradio-app/gradio) team for their system support. We also thank [Kaggle](https://www.kaggle.com/), [MBZUAI](https://mbzuai.ac.ae/), [Anyscale](https://www.anyscale.com/), [a16z](https://www.a16z.com/), [HuggingFace](https://huggingface.co/) for their generous sponsorship. Learn more about partnership [here](https://lmsys.org/donations/). <div class="image-about"> <img src="https://upload.wikimedia.org/wikipedia/commons/thumb/7/7c/Kaggle_logo.png/400px-Kaggle_logo.png" alt="Kaggle"> <img src="https://mma.prnewswire.com/media/1227419/MBZUAI_Logo.jpg?p=facebookg" alt="MBZUAI"> <img src="https://docs.anyscale.com/site-assets/logo.png" alt="AnyScale"> <img src="https://a16z.com/wp-content/themes/a16z/assets/images/opegraph_images/corporate-Yoast-Twitter.jpg" alt="a16z"> <img src="https://huggingface.co/datasets/huggingface/brand-assets/resolve/main/hf-logo-with-title.png" alt="HuggingFace"> </div> """ # state = gr.State() gr.Markdown(about_markdown, elem_id="about_markdown") # return [state] def build_single_model_ui(models, add_promotion_links=False): promotion = ( """ - | [GitHub](https://github.com/lm-sys/FastChat) | [Dataset](https://github.com/lm-sys/FastChat/blob/main/docs/dataset_release.md) | [Twitter](https://twitter.com/lmsysorg) | [Discord](https://discord.gg/HSWAKCrnFx) | - Introducing Llama 2: The Next Generation Open Source Large Language Model. [[Website]](https://ai.meta.com/llama/) - Vicuna: An Open-Source Chatbot Impressing GPT-4 with 90% ChatGPT Quality. [[Blog]](https://lmsys.org/blog/2023-03-30-vicuna/) ## 🤖 Choose any model to chat """ if add_promotion_links else "" ) notice_markdown = f""" # 🏔️ Chat with Open Large Language Models {promotion} """ state = gr.State() gr.Markdown(notice_markdown, elem_id="notice_markdown") with gr.Box(elem_id="share-region-named"): with gr.Row(elem_id="model_selector_row"): model_selector = gr.Dropdown( choices=models, value=models[0] if len(models) > 0 else "", interactive=True, show_label=False, container=False, ) with gr.Row(): with gr.Accordion( "🔍 Expand to see 20+ model descriptions", open=False, elem_id="model_description_accordion", ): model_description_md = get_model_description_md(models) gr.Markdown(model_description_md, elem_id="model_description_markdown") chatbot = gr.Chatbot( elem_id="chatbot", label="Scroll down and start chatting", height=550, ) with gr.Row(): textbox = gr.Textbox( show_label=False, placeholder="👉 Enter your prompt and press ENTER", container=False, elem_id="input_box", ) send_btn = gr.Button(value="Send", variant="primary", scale=0) with gr.Row() as button_row: upvote_btn = gr.Button(value="👍 Upvote", interactive=False) downvote_btn = gr.Button(value="👎 Downvote", interactive=False) flag_btn = gr.Button(value="⚠️ Flag", interactive=False) regenerate_btn = gr.Button(value="🔄 Regenerate", interactive=False) clear_btn = gr.Button(value="🗑️ Clear history", interactive=False) with gr.Accordion("Parameters", open=False) as parameter_row: temperature = gr.Slider( minimum=0.0, maximum=1.0, value=0.7, step=0.1, interactive=True, label="Temperature", ) top_p = gr.Slider( minimum=0.0, maximum=1.0, value=1.0, step=0.1, interactive=True, label="Top P", ) max_output_tokens = gr.Slider( minimum=16, maximum=2048, value=1024, step=64, interactive=True, label="Max output tokens", ) if add_promotion_links: gr.Markdown(acknowledgment_md, elem_id="ack_markdown") # Register listeners btn_list = [upvote_btn, downvote_btn, flag_btn, regenerate_btn, clear_btn] upvote_btn.click( upvote_last_response, [state, model_selector], [textbox, upvote_btn, downvote_btn, flag_btn], ) downvote_btn.click( downvote_last_response, [state, model_selector], [textbox, upvote_btn, downvote_btn, flag_btn], ) flag_btn.click( flag_last_response, [state, model_selector], [textbox, upvote_btn, downvote_btn, flag_btn], ) regenerate_btn.click(regenerate, state, [state, chatbot, textbox] + btn_list).then( bot_response, [state, temperature, top_p, max_output_tokens], [state, chatbot] + btn_list, ) clear_btn.click(clear_history, None, [state, chatbot, textbox] + btn_list) model_selector.change(clear_history, None, [state, chatbot, textbox] + btn_list) textbox.submit( add_text, [state, model_selector, textbox], [state, chatbot, textbox] + btn_list ).then( bot_response, [state, temperature, top_p, max_output_tokens], [state, chatbot] + btn_list, ) send_btn.click( add_text, [state, model_selector, textbox], [state, chatbot, textbox] + btn_list, ).then( bot_response, [state, temperature, top_p, max_output_tokens], [state, chatbot] + btn_list, ) return [state, model_selector] def build_leaderboard_tab(elo_results_file, leaderboard_table_file, show_plot=False): if elo_results_file is None: # Do live update default_md = "Loading ..." p1 = p2 = p3 = p4 = None else: with open(elo_results_file, "rb") as fin: elo_results = pickle.load(fin) p1 = elo_results["win_fraction_heatmap"] p2 = elo_results["battle_count_heatmap"] p3 = elo_results["bootstrap_elo_rating"] p4 = elo_results["average_win_rate_bar"] arena_df = elo_results["leaderboard_table_df"] default_md = make_default_md(arena_df, elo_results) md_1 = gr.Markdown(default_md, elem_id="leaderboard_markdown") if leaderboard_table_file: data = load_leaderboard_table_csv(leaderboard_table_file) model_table_df = pd.DataFrame(data) with gr.Tabs() as tabs: # arena table arena_table_vals = get_arena_table(arena_df, model_table_df) with gr.Tab("Arena Elo", id=0): md = make_arena_leaderboard_md(arena_df) gr.Markdown(md, elem_id="leaderboard_markdown") gr.Dataframe( headers=[ "Rank", "🤖 Model", "⭐ Arena Elo", "📊 95% CI", "🗳️ Votes", "Organization", "License", ], datatype=[ "str", "markdown", "number", "str", "number", "str", "str", ], value=arena_table_vals, elem_id="arena_leaderboard_dataframe", height=700, column_widths=[50, 200, 100, 100, 100, 150, 150], wrap=True, ) with gr.Tab("Full Leaderboard", id=1): md = make_full_leaderboard_md(elo_results) gr.Markdown(md, elem_id="leaderboard_markdown") full_table_vals = get_full_table(arena_df, model_table_df) gr.Dataframe( headers=[ "🤖 Model", "⭐ Arena Elo", "📈 MT-bench", "📚 MMLU", "Organization", "License", ], datatype=["markdown", "number", "number", "number", "str", "str"], value=full_table_vals, elem_id="full_leaderboard_dataframe", column_widths=[200, 100, 100, 100, 150, 150], height=700, wrap=True, ) if not show_plot: gr.Markdown( """ ## Visit our [HF space](https://huggingface.co/spaces/lmsys/chatbot-arena-leaderboard) for more analysis! If you want to see more models, please help us [add them](https://github.com/lm-sys/FastChat/blob/main/docs/arena.md#how-to-add-a-new-model). """, elem_id="leaderboard_markdown", ) else: pass leader_component_values[:] = [default_md, p1, p2, p3, p4] if show_plot: gr.Markdown( f"""## More Statistics for Chatbot Arena\n Below are figures for more statistics. The code for generating them is also included in this [notebook]({notebook_url}). You can find more discussions in this blog [post](https://lmsys.org/blog/2023-12-07-leaderboard/). """, elem_id="leaderboard_markdown", ) with gr.Row(): with gr.Column(): gr.Markdown( "#### Figure 1: Fraction of Model A Wins for All Non-tied A vs. B Battles" ) plot_1 = gr.Plot(p1, show_label=False) with gr.Column(): gr.Markdown( "#### Figure 2: Battle Count for Each Combination of Models (without Ties)" ) plot_2 = gr.Plot(p2, show_label=False) with gr.Row(): with gr.Column(): gr.Markdown( "#### Figure 3: Bootstrap of Elo Estimates (1000 Rounds of Random Sampling)" ) plot_3 = gr.Plot(p3, show_label=False) with gr.Column(): gr.Markdown( "#### Figure 4: Average Win Rate Against All Other Models (Assuming Uniform Sampling and No Ties)" ) plot_4 = gr.Plot(p4, show_label=False) from fastchat.serve.gradio_web_server import acknowledgment_md gr.Markdown(acknowledgment_md) if show_plot: return [md_1, plot_1, plot_2, plot_3, plot_4] return [md_1] get_window_url_params_js = """ function() { const params = new URLSearchParams(window.location.search); url_params = Object.fromEntries(params); console.log("url_params", url_params); return url_params; } """ get_window_url_params_with_tos_js = """ function() { const params = new URLSearchParams(window.location.search); url_params = Object.fromEntries(params); console.log("url_params", url_params); msg = "Users of this website are required to agree to the following terms:\\n\\nThe service is a research preview. It only provides limited safety measures and may generate offensive content. It must not be used for any illegal, harmful, violent, racist, or sexual purposes.\\nThe service collects user dialogue data and reserves the right to distribute it under a Creative Commons Attribution (CC-BY) or a similar license." alert(msg); return url_params; } """ def build_demo(models, elo_results_file, leaderboard_table_file): text_size = gr.themes.sizes.text_md with gr.Blocks( title="Chat with Open Large Language Models", theme=gr.themes.Default(text_size=text_size), css=block_css, ) as demo: with gr.Tabs() as tabs: with gr.Tab("Arena (battle)", id=0): side_by_side_anony_list = build_side_by_side_ui_anony(models) with gr.Tab("Arena (side-by-side)", id=1): side_by_side_named_list = build_side_by_side_ui_named(models) with gr.Tab("Direct Chat", id=2): single_model_list = build_single_model_ui( models, add_promotion_links=True ) if elo_results_file: with gr.Tab("Leaderboard", id=3): build_leaderboard_tab(elo_results_file, leaderboard_table_file) with gr.Tab("About Us", id=4): about = build_about() url_params = gr.JSON(visible=False) if args.model_list_mode not in ["once", "reload"]: raise ValueError(f"Unknown model list mode: {args.model_list_mode}") if args.show_terms_of_use: load_js = get_window_url_params_with_tos_js else: load_js = get_window_url_params_js demo.load( load_demo, [url_params], [tabs] + single_model_list + side_by_side_anony_list + side_by_side_named_list, _js=load_js, ) return demo
null
20,586
import argparse import json import re import polyglot from polyglot.detect import Detector import pycld2 from tqdm import tqdm def skip(conv, args): # Remove certain languages if args.keep_lang != "all" or args.skip_lang is not None: text = "\n".join([x["value"] for x in conv["conversations"]]) try: lang_code = Detector(text).language.code except (pycld2.error, polyglot.detect.base.UnknownLanguage): lang_code = "unknown" if args.keep_lang != "all" and lang_code != args.keep_lang: return True if lang_code == args.skip_lang: return True # Remove repetitive numbers if args.reduce_rep: for sentence in conv["conversations"]: val = sentence["value"] sub = re.search(r"(\d)\1{8}", val) if sub is not None: return True return False
null
20,587
import json The provided code snippet includes necessary dependencies for implementing the `identity_questions` function. Write a Python function `def identity_questions()` to solve the following problem: " Adapted from https://github.com/young-geng/koala_data_pipeline/blob/main/process_hard_coded_data.py Here is the function: def identity_questions(): """ " Adapted from https://github.com/young-geng/koala_data_pipeline/blob/main/process_hard_coded_data.py """ content = [] name = "Vicuna" org = "Large Model Systems Organization (LMSYS)" def generate_conversations(questions, answers): for q in questions: for a in answers: content.append( { "id": f"identity_{len(content)}", "conversations": [ {"from": "human", "value": q}, {"from": "gpt", "value": a}, ], } ) questions = [ "Who are you?", "What is your name?", "Can you introduce yourself?", "Can you tell me a little bit about yourself?", "What's your name?", "What are you called?", "What are you?", "Tell me your name.", "Tell me about yourself.", "Tell me about you.", "Tell me who you are.", "Please introduce yourself.", ] answers = [ f"I am {name}, a language model trained by researchers from {org}.", f"My name is {name}, and I'm a language model developed by {org}.", f"You can call me {name}, and I was trained by {org} researchers as a language model.", f"As a language model, I go by the name {name} and was trained by researchers from {org}.", f"I'm a language model called {name}, and I was trained by {org} researchers.", f"You may refer to me as {name}, a language model meticulously developed by the researchers at {org}.", ] generate_conversations(questions, answers) questions = [ "Who created you?", "Who made you?", "Who built you?", "Who programmed you?", "Who trained you?", "Who taught you?", "Who developed you?", ] answers = [ f"Researchers from {org} created me.", f"I'm created by {org}.", f"I'm built by researchers from {org}.", f"I am a language model trained by researchers from {org}.", f"I'm a language model developed by {org}.", f"I'm a language model created by researchers from {org}.", f"My creators are researchers from {org}.", ] generate_conversations(questions, answers) questions = [ "Are you ChatGPT?", "Are you GPT-2?", "Are you GPT-3?", "Are you GPT-4?", "Are you davinci?", "Are you davinci-001?", "Are you davinci-002?", "Are you davinci-003?", "Are you curie?", "Are you based on ChatGPT?", "Are you based on GPT-2?", "Are you based on GPT-3?", "Are you based on GPT-4?", "Are you based on davinci?", "Are you based on davinci-001?", "Are you based on davinci-002?", "Are you based on davinci-003?", "Are you based on curie?", "Are you trained by OpenAI?", "Are you trained by Google?", "Are you trained by Microsoft?", "Are you trained by Meta?", "Are you trained by IBM?", "Do you call OpenAI APIs?", "Do you call Google APIs?", "Do you call Microsoft APIs?", "Do you call Meta APIs?", "Do you call IBM APIs?", "Are you created by OpenAI?", "Are you created by Google?", "Are you created by Microsoft?", "Are you created by Meta?", "Are you created by IBM?", "Are you developed by OpenAI?", "Are you developed by Google?", "Are you developed by Microsoft?", "Are you developed by Meta?", "Are you developed by IBM?", "Are you trained on OpenAI data?", "Are you trained on Google data?", "Are you trained on Microsoft data?", "Are you trained on Meta data?", "Are you trained on IBM data?", "Are you trained with OpenAI data?", "Are you trained with Google data?", "Are you trained with Microsoft data?", "Are you trained with Meta data?", "Are you trained with IBM data?", "Have you been trained with OpenAI data?", "Have you been trained with Google data?", "Have you been trained with Microsoft data?", "Have you been trained with Meta data?", "Have you been trained with IBM data?", "Are you finetuned on OpenAI data?", "Are you finetuned on Google data?", "Are you finetuned on Microsoft data?", "Are you finetuned on Meta data?", "Are you finetuned on IBM data?", "Are you finetuned with OpenAI data?", "Are you finetuned with Google data?", "Are you finetuned with Microsoft data?", "Are you finetuned with Meta data?", "Are you finetuned with IBM data?", "Have you been finetuned with OpenAI data?", "Have you been finetuned with Google data?", "Have you been finetuned with Microsoft data?", "Have you been finetuned with Meta data?", "Have you been finetuned with IBM data?", ] answers = [ f"No, I am a language model trained by researchers from {org}.", f"No, I am a language model developed by researchers from {org}.", f"No, I am a language model created by researchers from {org}.", f"No, I am trained by researchers from {org}.", f"No, I am developed by researchers from {org}.", f"No, I am created by researchers from {org}.", f"No, I'm a language model trained by researchers from {org}.", f"No, I'm a language model developed by researchers from {org}.", f"No, I'm a language model created by researchers from {org}.", f"No, I'm trained by researchers from {org}.", f"No, I'm developed by researchers from {org}.", f"No, I'm created by researchers from {org}.", ] generate_conversations(questions, answers) return content
" Adapted from https://github.com/young-geng/koala_data_pipeline/blob/main/process_hard_coded_data.py
20,588
import argparse from concurrent.futures import ProcessPoolExecutor import json from typing import Dict, Sequence, Optional import transformers from tqdm import tqdm tokenizer = max_length = None def worker(input_data): result = [] for sample in input_data: result.extend(split_one_sample(sample)) return result The provided code snippet includes necessary dependencies for implementing the `split_all` function. Write a Python function `def split_all(content, begin, end, tokenizer_, max_length_)` to solve the following problem: Keep the maximum round of conversations within the max token length constraint Here is the function: def split_all(content, begin, end, tokenizer_, max_length_): """ Keep the maximum round of conversations within the max token length constraint """ global tokenizer, max_length tokenizer = tokenizer_ max_length = max_length_ content = content[begin:end] new_content = [] # Split content into chunks chunks = [content[i : i + 1000] for i in range(0, len(content), 1000)] with ProcessPoolExecutor() as executor: for result in tqdm(executor.map(worker, chunks), total=len(chunks)): new_content.extend(result) return new_content
Keep the maximum round of conversations within the max token length constraint
20,589
import argparse from concurrent.futures import ProcessPoolExecutor import json from typing import Dict, Sequence, Optional import transformers from tqdm import tqdm def filter_invalid_roles(content): new_content = [] for i, c in enumerate(content): roles = ["human", "gpt"] if len(c["conversations"]) <= 0: continue valid = True for j, s in enumerate(c["conversations"]): if s["from"] != roles[j % 2]: valid = False break if valid: new_content.append(c) return new_content
null
20,590
import argparse import json import re from tqdm import tqdm wrong_indices_pattern = re.compile("\n1\. [^2]*\n1\. ") def should_skip(conv): # Filter wrong list indices like https://sharegpt.com/c/1pREAGO for sentence in conv["conversations"]: val = sentence["value"] sub = re.search(wrong_indices_pattern, val) if sub is not None: return True return False
null
20,591
import argparse from concurrent.futures import ProcessPoolExecutor import json import numpy as np from tqdm import tqdm from transformers import AutoTokenizer, AutoModelForCausalLM def tokenize_one_sample(c): for i in range(len(c["conversations"])): v = c["conversations"][i]["value"] c["conversations"][i]["value"] = tokenizer.tokenize(v) return c def tokenize_dataset(content): processed = [] with ProcessPoolExecutor() as executor: for result in tqdm( executor.map(tokenize_one_sample, content), total=len(content) ): processed.append(result) return processed
null
20,592
import argparse from concurrent.futures import ProcessPoolExecutor import json import numpy as np from tqdm import tqdm from transformers import AutoTokenizer, AutoModelForCausalLM def compute_stats(content): sample_lens = [] sample_turns = [] prompt_lens = [] res_lens = [] for c in content: sample_len = 0 sample_turns.append(len(c["conversations"]) // 2) for i in range(len(c["conversations"]) // 2): p = c["conversations"][i * 2]["value"] r = c["conversations"][i * 2 + 1]["value"] turn_len = len(p) + len(r) sample_len += turn_len prompt_lens.append(len(p)) res_lens.append(len(r)) sample_lens.append(sample_len) return sample_lens, sample_turns, prompt_lens, res_lens
null
20,593
import argparse from concurrent.futures import ProcessPoolExecutor import json import logging import re from typing import Dict, Union import bs4 import markdownify from tqdm import tqdm def clean_html_one_sample(sample): roles = ["human", "gpt"] if len(sample["conversations"]) <= 1: return (sample, 1) # Adjust the offset for cases like https://sharegpt.com/c/VyaZlh4 if sample["conversations"][0]["from"] != "human": sample["conversations"] = sample["conversations"][1:] if len(sample["conversations"]) <= 1: return (sample, 1) if sample["conversations"][-1]["from"] == "human": sample["conversations"] = sample["conversations"][:-1] if len(sample["conversations"]) <= 1: return (sample, 1) char_count = 0 new_conversations = [] for i, c in enumerate(sample["conversations"]): if c["from"] != roles[i % 2]: return (sample, 2) if contain_blocked_words(c["value"]): return (sample, 3) try: new_val = html_to_markdown(c["value"]) except (bs4.builder.ParserRejectedMarkup, AssertionError): return (sample, 4) # Filter empty answers like https://sharegpt.com/c/mrllZ6u if not new_val or not new_val[0].isprintable(): break char_count += len(new_val) new_conversations.append( { "from": c["from"], "value": new_val, } ) new_conversations = new_conversations[: len(new_conversations) // 2 * 2] sample["conversations"] = new_conversations if char_count < 16 or len(sample["conversations"]) <= 0: return (sample, 1) return (sample, 0) The provided code snippet includes necessary dependencies for implementing the `clean_html_all` function. Write a Python function `def clean_html_all(content, begin, end)` to solve the following problem: Clean the source html files. Here is the function: def clean_html_all(content, begin, end): """ Clean the source html files. """ cnt_skip = 0 cnt_blocked_words = 0 cnt_wrong_format = 0 cnt_parser_error = 0 cnt_too_short = 0 cnt_id_duplication = 0 cnt_value_duplication = 0 cnt_plugin = 0 cnt_tag = 0 content = content[begin:end] processed = [] with ProcessPoolExecutor() as executor: for result in tqdm( executor.map(clean_html_one_sample, content), total=len(content) ): processed.append(result) visited = {} new_content = [] for sample, error_code in processed: cid = sample["id"] skipped = True if error_code != 0: if error_code == 1: print(f"id {cid} is too short") cnt_too_short += 1 elif error_code == 2: print(f"id {cid} has a wrong format") cnt_wrong_format += 1 elif error_code == 3: print(f"id {cid} contains blocked words") cnt_blocked_words += 1 elif error_code == 4: print(f"id {cid} contains parser errors") cnt_parser_error += 1 else: raise ValueError(f"Invalid error_code: {error_code}") elif cid in visited: print(f"id {cid} is an id duplication of {visited[cid]}") cnt_id_duplication += 1 elif sample.get("plugins", None) is not None: print(f"id {cid} contains plugin") cnt_plugin += 1 else: key = ( sample["conversations"][0]["value"], sample["conversations"][1]["value"], ) if key in visited: print(f"id {cid} is a value duplication of {visited[key]}") cnt_value_duplication += 1 else: visited[cid] = visited[key] = cid skipped = False if not skipped: new_content.append(sample) else: cnt_skip += 1 print( f"total: {len(content)}, skip: {cnt_skip}, new: {len(new_content)}, " f"cnt_blocked_words: {cnt_blocked_words}, cnt_parser_error: {cnt_parser_error}, " f"cnt_wrong_format: {cnt_wrong_format}, " f"cnt_too_short: {cnt_too_short}, cnt_id_duplication: {cnt_id_duplication}, " f"cnt_value_duplication: {cnt_value_duplication}, cnt_plugin: {cnt_plugin}" ) return new_content
Clean the source html files.