Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -23,16 +23,17 @@
|
|
| 23 |
# iface.launch(share=True)
|
| 24 |
|
| 25 |
import gradio as gr
|
| 26 |
-
from transformers import
|
| 27 |
|
| 28 |
-
|
| 29 |
-
|
|
|
|
| 30 |
|
| 31 |
-
# Define a function to generate answer for the given question
|
| 32 |
def generate_answer(question):
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
|
|
|
| 36 |
|
| 37 |
iface = gr.Interface(
|
| 38 |
fn=generate_answer,
|
|
@@ -44,3 +45,21 @@ iface = gr.Interface(
|
|
| 44 |
|
| 45 |
iface.launch(share=True) # Deploy the interface
|
| 46 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 23 |
# iface.launch(share=True)
|
| 24 |
|
| 25 |
import gradio as gr
|
| 26 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 27 |
|
| 28 |
+
model_name = "abacusai/Smaug-72B-v0.1"
|
| 29 |
+
model = AutoModelForCausalLM.from_pretrained(model_name)
|
| 30 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
| 31 |
|
|
|
|
| 32 |
def generate_answer(question):
|
| 33 |
+
inputs = tokenizer.encode("Question: " + question, return_tensors="pt")
|
| 34 |
+
outputs = model.generate(inputs, max_length=100, num_return_sequences=1, early_stopping=True, do_sample=True)
|
| 35 |
+
answer = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
| 36 |
+
return answer
|
| 37 |
|
| 38 |
iface = gr.Interface(
|
| 39 |
fn=generate_answer,
|
|
|
|
| 45 |
|
| 46 |
iface.launch(share=True) # Deploy the interface
|
| 47 |
|
| 48 |
+
|
| 49 |
+
# from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 50 |
+
|
| 51 |
+
# model_name = "abacusai/Smaug-72B-v0.1"
|
| 52 |
+
# model = AutoModelForCausalLM.from_pretrained(model_name)
|
| 53 |
+
# tokenizer = AutoTokenizer.from_pretrained(model_name)
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
# def generate_answer(question):
|
| 60 |
+
# inputs = tokenizer.encode("Question: " + question, return_tensors="pt")
|
| 61 |
+
# outputs = model.generate(inputs, max_length=100, num_return_sequences=1, early_stopping=True, do_sample=True)
|
| 62 |
+
# answer = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
| 63 |
+
# return answer
|
| 64 |
+
|
| 65 |
+
|