premalt commited on
Commit
4f3e7e2
·
1 Parent(s): 361f347

roll back to gpt-2

Browse files
Files changed (1) hide show
  1. main.py +25 -32
main.py CHANGED
@@ -5,54 +5,49 @@ from fastapi import FastAPI
5
  from pydantic import BaseModel
6
  from huggingface_hub import InferenceClient
7
  from typing import List
8
- import tiktoken
9
 
10
  app = FastAPI()
11
- client = InferenceClient("openai-community/gpt2-medium")
12
- tokenizer = tiktoken.get_encoding("gpt2")
13
 
14
- SYSTEM_PROMPT = (
15
- "You are a very powerful AI to generate interesting stories for short-form content "
16
- "consumption. Make sure to hook the reader's attention in the first few seconds. "
17
- "Make sure to be engaging and creative in your responses."
18
- )
19
 
20
- MAX_CONTEXT_LENGTH = 1024
21
 
22
  class Item(BaseModel):
23
  prompt: str
24
  history: List[str] = []
 
25
  temperature: float = 0.0
26
  max_new_tokens: int = 1024
27
  top_p: float = 0.15
28
  repetition_penalty: float = 1.0
29
 
 
30
  def format_prompt(message, history):
31
  prompt = "<s>"
32
  for user_prompt, bot_response in history:
33
- prompt += f"[INST] {user_prompt} [/INST] {bot_response}</s> "
 
34
  prompt += f"[INST] {message} [/INST]"
35
  return prompt
36
 
37
- def count_tokens(text):
38
- return len(tokenizer.encode(text))
39
 
40
  def generate(item: Item):
41
  temperature = max(float(item.temperature), 1e-2)
 
 
 
 
 
 
 
 
42
 
43
  formatted_prompt = format_prompt(f"{SYSTEM_PROMPT}, {item.prompt}", item.history)
44
-
45
- input_token_length = count_tokens(formatted_prompt)
46
- max_allowed_tokens = MAX_CONTEXT_LENGTH - input_token_length
47
- max_new_tokens = min(item.max_new_tokens, max_allowed_tokens)
48
-
49
- if max_new_tokens <= 0:
50
- raise ValueError("The input is too long. Please reduce the prompt or history length.")
51
-
52
  stream = client.text_generation(
53
  formatted_prompt,
54
  temperature=temperature,
55
- max_new_tokens=max_new_tokens,
56
  top_p=float(item.top_p),
57
  repetition_penalty=item.repetition_penalty,
58
  do_sample=True,
@@ -61,36 +56,34 @@ def generate(item: Item):
61
  details=True,
62
  return_full_text=False,
63
  )
64
-
65
  output = "".join(response.token.text for response in stream)
66
- output = re.sub(r"<[^>]+>", "", output)
67
- output = re.sub(r"\s+", " ", output).strip()
 
68
 
69
  return output
70
 
 
71
  @app.get("/generate/")
72
  async def generate_text(
73
  prompt: str,
74
  history: List[str] = [],
 
75
  temperature: float = 0.0,
76
- max_new_tokens: int = 1024,
77
  top_p: float = 0.15,
78
  repetition_penalty: float = 1.0,
79
  ):
80
  item = Item(
81
  prompt=prompt,
82
  history=history,
 
83
  temperature=temperature,
84
  max_new_tokens=max_new_tokens,
85
  top_p=top_p,
86
  repetition_penalty=repetition_penalty,
87
  )
88
 
89
- try:
90
- response = await asyncio.to_thread(generate, item)
91
- return {"response": response}
92
- except ValueError as e:
93
- return {"error": str(e)}
94
 
95
- if __name__ == "__main__":
96
- uvicorn.run(app, host="0.0.0.0", port=8000)
 
5
  from pydantic import BaseModel
6
  from huggingface_hub import InferenceClient
7
  from typing import List
8
+
9
 
10
  app = FastAPI()
11
+ client = InferenceClient("openai-community/gpt2")
 
12
 
13
+ SYSTEM_PROMPT = "You are a very powerful AI to generate interesting stories for short-form content consumption. Make sure to hook the readers attention in the first few seconds. Make sure to be engaging and creative in your responses."
 
 
 
 
14
 
 
15
 
16
  class Item(BaseModel):
17
  prompt: str
18
  history: List[str] = []
19
+ # system_prompt: str = "You are a very powerful AI assistant."
20
  temperature: float = 0.0
21
  max_new_tokens: int = 1024
22
  top_p: float = 0.15
23
  repetition_penalty: float = 1.0
24
 
25
+
26
  def format_prompt(message, history):
27
  prompt = "<s>"
28
  for user_prompt, bot_response in history:
29
+ prompt += f"[INST] {user_prompt} [/INST]"
30
+ prompt += f" {bot_response}</s> "
31
  prompt += f"[INST] {message} [/INST]"
32
  return prompt
33
 
 
 
34
 
35
  def generate(item: Item):
36
  temperature = max(float(item.temperature), 1e-2)
37
+ # generate_kwargs = dict(
38
+ # temperature=temperature,
39
+ # max_new_tokens=item.max_new_tokens,
40
+ # top_p=float(item.top_p),
41
+ # repetition_penalty=item.repetition_penalty,
42
+ # do_sample=True,
43
+ # seed=42,
44
+ # )
45
 
46
  formatted_prompt = format_prompt(f"{SYSTEM_PROMPT}, {item.prompt}", item.history)
 
 
 
 
 
 
 
 
47
  stream = client.text_generation(
48
  formatted_prompt,
49
  temperature=temperature,
50
+ max_new_tokens=item.max_new_tokens,
51
  top_p=float(item.top_p),
52
  repetition_penalty=item.repetition_penalty,
53
  do_sample=True,
 
56
  details=True,
57
  return_full_text=False,
58
  )
 
59
  output = "".join(response.token.text for response in stream)
60
+ # Remove unwanted sequences or patterns (e.g., <s>, [/INST], etc.)
61
+ output = re.sub(r"<[^>]+>", "", output) # Remove any HTML-like tags
62
+ output = re.sub(r"\s+", " ", output).strip() # Clean up extra whitespace
63
 
64
  return output
65
 
66
+
67
  @app.get("/generate/")
68
  async def generate_text(
69
  prompt: str,
70
  history: List[str] = [],
71
+ # system_prompt: str = "You are a very powerful AI assistant.",
72
  temperature: float = 0.0,
73
+ max_new_tokens: int = 1048,
74
  top_p: float = 0.15,
75
  repetition_penalty: float = 1.0,
76
  ):
77
  item = Item(
78
  prompt=prompt,
79
  history=history,
80
+ # system_prompt=system_prompt,
81
  temperature=temperature,
82
  max_new_tokens=max_new_tokens,
83
  top_p=top_p,
84
  repetition_penalty=repetition_penalty,
85
  )
86
 
87
+ response = await asyncio.to_thread(generate, item)
 
 
 
 
88
 
89
+ return {"response": response}