Spaces:
Sleeping
Sleeping
File size: 1,961 Bytes
41e59ea 43c17bb 41e59ea 9c59b08 41e59ea 43c17bb 41e59ea 43c17bb 41e59ea 95f27f8 9de8106 41e59ea 95f27f8 9de8106 43c17bb 41e59ea |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 |
import gradio as gr
from huggingface_hub import InferenceClient
"""
For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
"""
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
def respond(
message,
history: list[tuple[str, str]],
system_message
):
messages = [{"role": "system", "content": system_message}]
for val in history:
if val[0]:
messages.append({"role": "user", "content": val[0]})
if val[1]:
messages.append({"role": "assistant", "content": val[1]})
messages.append({"role": "user", "content": message})
response = ""
for message in client.chat_completion(
messages,
max_tokens=1024,
stream=True,
temperature=0.5,
top_p=0.1,
):
token = message.choices[0].delta.content
response += token
yield response
"""
For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
"""
demo = gr.ChatInterface(
respond,
examples=[['найди то, что еще не изучено']],
additional_inputs=[
gr.Textbox(value="if the prompt is 'найди то, что ещё не изучено', ask 'в какой области лингвистики или литературоведения?' after getting the answer, find the research gap using the given information and using both Google Scholar and Academia.edu and then formalize it as a bulleted list with 2-8 points", label="System message"),
],
title="Research Gap Bot",
css="""
body {
margin: 0;
font-family: Arial, sans-serif;
display: flex;
justify-content: center;
align-items: center;
height: 100vh;
background: linear-gradient(135deg, #f8a8a8, #fdd9a8);
overflow: hidden;
}
"""
)
if __name__ == "__main__":
demo.launch()
|