azan888 commited on
Commit
67f7c9a
·
1 Parent(s): 369d6c2

dev changes

Browse files
Files changed (3) hide show
  1. __pycache__/app.cpython-313.pyc +0 -0
  2. app.py +9 -6
  3. requirements.txt +2 -0
__pycache__/app.cpython-313.pyc ADDED
Binary file (1.43 kB). View file
 
app.py CHANGED
@@ -1,8 +1,11 @@
1
  import gradio as gr
2
- from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
3
  import torch
 
 
4
 
5
- model_id = "mistralai/Mistral-7B-Instruct-v0.1"
 
 
6
 
7
  tokenizer = AutoTokenizer.from_pretrained(model_id)
8
  model = AutoModelForCausalLM.from_pretrained(
@@ -14,12 +17,12 @@ model = AutoModelForCausalLM.from_pretrained(
14
  generator = pipeline("text-generation", model=model, tokenizer=tokenizer)
15
 
16
 
17
- def chat(message, history=[]):
18
- prompt = f"<s>[INST] {message} [/INST]"
19
  response = generator(prompt, max_new_tokens=256, temperature=0.7)
20
  reply = response[0]["generated_text"].replace(prompt, "")
21
  return reply.strip()
22
 
23
 
24
- iface = gr.ChatInterface(fn=chat, title="Mistral Chatbot")
25
- iface.launch()
 
1
  import gradio as gr
 
2
  import torch
3
+ from fastapi import FastAPI
4
+ from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
5
 
6
+ app = FastAPI()
7
+
8
+ model_id = "microsoft/DialoGPT-medium"
9
 
10
  tokenizer = AutoTokenizer.from_pretrained(model_id)
11
  model = AutoModelForCausalLM.from_pretrained(
 
17
  generator = pipeline("text-generation", model=model, tokenizer=tokenizer)
18
 
19
 
20
+ def chat(message, history=[]):
21
+ prompt = f"{message}"
22
  response = generator(prompt, max_new_tokens=256, temperature=0.7)
23
  reply = response[0]["generated_text"].replace(prompt, "")
24
  return reply.strip()
25
 
26
 
27
+ iface = gr.ChatInterface(fn=chat, title="DialoGPT Chatbot")
28
+ app = gr.mount_gradio_app(app, iface, path="/")
requirements.txt CHANGED
@@ -2,3 +2,5 @@ huggingface_hub==0.25.2
2
  transformers
3
  torch
4
  gradio
 
 
 
2
  transformers
3
  torch
4
  gradio
5
+ uvicorn
6
+ fastapi