Spaces:
Sleeping
Sleeping
fix max tokens
Browse files
main.py
CHANGED
|
@@ -6,17 +6,18 @@ from pydantic import BaseModel
|
|
| 6 |
from huggingface_hub import InferenceClient
|
| 7 |
from typing import List
|
| 8 |
|
| 9 |
-
|
| 10 |
app = FastAPI()
|
| 11 |
client = InferenceClient("openai-community/gpt2-medium")
|
| 12 |
|
| 13 |
-
SYSTEM_PROMPT =
|
| 14 |
-
|
|
|
|
|
|
|
|
|
|
| 15 |
|
| 16 |
class Item(BaseModel):
|
| 17 |
prompt: str
|
| 18 |
history: List[str] = []
|
| 19 |
-
# system_prompt: str = "You are a very powerful AI assistant."
|
| 20 |
temperature: float = 0.0
|
| 21 |
max_new_tokens: int = 1048
|
| 22 |
top_p: float = 0.15
|
|
@@ -26,28 +27,24 @@ class Item(BaseModel):
|
|
| 26 |
def format_prompt(message, history):
|
| 27 |
prompt = "<s>"
|
| 28 |
for user_prompt, bot_response in history:
|
| 29 |
-
prompt += f"[INST] {user_prompt} [/INST]"
|
| 30 |
-
prompt += f" {bot_response}</s> "
|
| 31 |
prompt += f"[INST] {message} [/INST]"
|
| 32 |
return prompt
|
| 33 |
|
| 34 |
|
| 35 |
def generate(item: Item):
|
| 36 |
temperature = max(float(item.temperature), 1e-2)
|
| 37 |
-
# generate_kwargs = dict(
|
| 38 |
-
# temperature=temperature,
|
| 39 |
-
# max_new_tokens=item.max_new_tokens,
|
| 40 |
-
# top_p=float(item.top_p),
|
| 41 |
-
# repetition_penalty=item.repetition_penalty,
|
| 42 |
-
# do_sample=True,
|
| 43 |
-
# seed=42,
|
| 44 |
-
# )
|
| 45 |
|
| 46 |
formatted_prompt = format_prompt(f"{SYSTEM_PROMPT}, {item.prompt}", item.history)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 47 |
stream = client.text_generation(
|
| 48 |
formatted_prompt,
|
| 49 |
temperature=temperature,
|
| 50 |
-
max_new_tokens=
|
| 51 |
top_p=float(item.top_p),
|
| 52 |
repetition_penalty=item.repetition_penalty,
|
| 53 |
do_sample=True,
|
|
@@ -56,10 +53,10 @@ def generate(item: Item):
|
|
| 56 |
details=True,
|
| 57 |
return_full_text=False,
|
| 58 |
)
|
|
|
|
| 59 |
output = "".join(response.token.text for response in stream)
|
| 60 |
-
|
| 61 |
-
output = re.sub(r"
|
| 62 |
-
output = re.sub(r"\s+", " ", output).strip() # Clean up extra whitespace
|
| 63 |
|
| 64 |
return output
|
| 65 |
|
|
@@ -68,7 +65,6 @@ def generate(item: Item):
|
|
| 68 |
async def generate_text(
|
| 69 |
prompt: str,
|
| 70 |
history: List[str] = [],
|
| 71 |
-
# system_prompt: str = "You are a very powerful AI assistant.",
|
| 72 |
temperature: float = 0.0,
|
| 73 |
max_new_tokens: int = 1048,
|
| 74 |
top_p: float = 0.15,
|
|
@@ -77,7 +73,6 @@ async def generate_text(
|
|
| 77 |
item = Item(
|
| 78 |
prompt=prompt,
|
| 79 |
history=history,
|
| 80 |
-
# system_prompt=system_prompt,
|
| 81 |
temperature=temperature,
|
| 82 |
max_new_tokens=max_new_tokens,
|
| 83 |
top_p=top_p,
|
|
|
|
| 6 |
from huggingface_hub import InferenceClient
|
| 7 |
from typing import List
|
| 8 |
|
|
|
|
| 9 |
app = FastAPI()
|
| 10 |
client = InferenceClient("openai-community/gpt2-medium")
|
| 11 |
|
| 12 |
+
SYSTEM_PROMPT = (
|
| 13 |
+
"You are a very powerful AI to generate interesting stories for short-form content "
|
| 14 |
+
"consumption. Make sure to hook the reader's attention in the first few seconds. "
|
| 15 |
+
"Make sure to be engaging and creative in your responses."
|
| 16 |
+
)
|
| 17 |
|
| 18 |
class Item(BaseModel):
|
| 19 |
prompt: str
|
| 20 |
history: List[str] = []
|
|
|
|
| 21 |
temperature: float = 0.0
|
| 22 |
max_new_tokens: int = 1048
|
| 23 |
top_p: float = 0.15
|
|
|
|
| 27 |
def format_prompt(message, history):
|
| 28 |
prompt = "<s>"
|
| 29 |
for user_prompt, bot_response in history:
|
| 30 |
+
prompt += f"[INST] {user_prompt} [/INST] {bot_response}</s> "
|
|
|
|
| 31 |
prompt += f"[INST] {message} [/INST]"
|
| 32 |
return prompt
|
| 33 |
|
| 34 |
|
| 35 |
def generate(item: Item):
|
| 36 |
temperature = max(float(item.temperature), 1e-2)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 37 |
|
| 38 |
formatted_prompt = format_prompt(f"{SYSTEM_PROMPT}, {item.prompt}", item.history)
|
| 39 |
+
|
| 40 |
+
input_token_length = len(formatted_prompt.split())
|
| 41 |
+
max_allowed_tokens = 1024 - input_token_length
|
| 42 |
+
max_new_tokens = min(item.max_new_tokens, max_allowed_tokens)
|
| 43 |
+
|
| 44 |
stream = client.text_generation(
|
| 45 |
formatted_prompt,
|
| 46 |
temperature=temperature,
|
| 47 |
+
max_new_tokens=max_new_tokens,
|
| 48 |
top_p=float(item.top_p),
|
| 49 |
repetition_penalty=item.repetition_penalty,
|
| 50 |
do_sample=True,
|
|
|
|
| 53 |
details=True,
|
| 54 |
return_full_text=False,
|
| 55 |
)
|
| 56 |
+
|
| 57 |
output = "".join(response.token.text for response in stream)
|
| 58 |
+
output = re.sub(r"<[^>]+>", "", output)
|
| 59 |
+
output = re.sub(r"\s+", " ", output).strip()
|
|
|
|
| 60 |
|
| 61 |
return output
|
| 62 |
|
|
|
|
| 65 |
async def generate_text(
|
| 66 |
prompt: str,
|
| 67 |
history: List[str] = [],
|
|
|
|
| 68 |
temperature: float = 0.0,
|
| 69 |
max_new_tokens: int = 1048,
|
| 70 |
top_p: float = 0.15,
|
|
|
|
| 73 |
item = Item(
|
| 74 |
prompt=prompt,
|
| 75 |
history=history,
|
|
|
|
| 76 |
temperature=temperature,
|
| 77 |
max_new_tokens=max_new_tokens,
|
| 78 |
top_p=top_p,
|