Spaces:
Running
Running
File size: 1,307 Bytes
f28c5c0 e6f9186 f28c5c0 e6f9186 f28c5c0 e6f9186 17fc494 f28c5c0 e6f9186 f28c5c0 17fc494 04d2dd5 e6f9186 17fc494 e6f9186 d328b31 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 | import replicate
import os
import gradio as gr
os.environ['REPLICATE_API_TOKEN'] = 'YOUR_REPLICATE_API_TOKEN'
replicate_url = 'replicate/llama70b-v2-chat:2d19859030ff705a87c746f7e96eea03aefb71f166725aee39692f1476566d48'
def ask_llama2(user_prompt):
system_prompt = "You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature. If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information."
prompt = f"{system_prompt} {user_prompt}"
output = replicate.run(
replicate_url,
input={'prompt': prompt}
)
return ''.join([w for w in output]).strip()
iface = gr.Interface(
fn=ask_llama2,
inputs=gr.inputs.Textbox(label="Type here...", lines=5),
outputs=gr.outputs.Textbox(),
live=True,
title="Hugging Face Chatbot",
description="Interact with the Hugging Face Llama v2 chatbot model.",
button="Submit",
)
if __name__ == "__main__":
iface.launch()
|