Spaces:
Sleeping
Sleeping
Upload app.py
Browse files
app.py
CHANGED
|
@@ -1,87 +1,64 @@
|
|
| 1 |
import gradio as gr
|
| 2 |
from huggingface_hub import InferenceClient
|
| 3 |
-
import urllib.request
|
| 4 |
-
import xml.etree.ElementTree as ET
|
| 5 |
|
| 6 |
-
|
| 7 |
-
|
|
|
|
| 8 |
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
|
| 9 |
|
| 10 |
|
| 11 |
-
|
| 12 |
-
# Funktion, um relevante Studien von arXiv zu suchen
|
| 13 |
-
def fetch_arxiv_summary(query, sort_by="relevance", sort_order="descending", max_results=20):
|
| 14 |
-
url = (f'http://export.arxiv.org/api/query?search_query=all:{urllib.parse.quote(query)}'
|
| 15 |
-
f'&start=0&max_results={max_results}&sortBy={sort_by}&sortOrder={sort_order}')
|
| 16 |
-
try:
|
| 17 |
-
data = urllib.request.urlopen(url)
|
| 18 |
-
xml_data = data.read().decode("utf-8")
|
| 19 |
-
root = ET.fromstring(xml_data)
|
| 20 |
-
summaries = []
|
| 21 |
-
for entry in root.findall(".//{http://www.w3.org/2005/Atom}entry"):
|
| 22 |
-
title = entry.find("{http://www.w3.org/2005/Atom}title")
|
| 23 |
-
link_element = entry.find("{http://www.w3.org/2005/Atom}link[@rel='alternate']")
|
| 24 |
-
summary = entry.find("{http://www.w3.org/2005/Atom}summary")
|
| 25 |
-
link = link_element.attrib.get("href") if link_element is not None else "Kein Link verfügbar"
|
| 26 |
-
if summary is not None and title is not None:
|
| 27 |
-
summaries.append(f"Titel: {title.text.strip()}\nLink: {link}\nZusammenfassung: {summary.text.strip()}")
|
| 28 |
-
return summaries if summaries else ["Keine relevanten Studien gefunden."]
|
| 29 |
-
except Exception as e:
|
| 30 |
-
return [f"Fehler beim Abrufen der Studie: {str(e)}"]
|
| 31 |
-
|
| 32 |
-
# Chatbot-Logik mit arXiv-Integration
|
| 33 |
def respond(
|
| 34 |
message,
|
| 35 |
history: list[tuple[str, str]],
|
| 36 |
system_message,
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
query
|
| 41 |
):
|
| 42 |
-
|
| 43 |
-
study_summaries = fetch_arxiv_summary(query, sort_by, sort_order, max_results)
|
| 44 |
-
study_info = "\n".join(study_summaries)
|
| 45 |
|
| 46 |
-
# Nachrichten vorbereiten
|
| 47 |
-
messages = [{"role": "system", "content": f"{system_message} You are a highly capable assistant specializing in parsing and summarizing study abstracts. Your task is to analyze the provided study data, extract relevant information, and offer concise summaries. Always include the study's title and a direct link, ensuring clarity and accessibility.\n"}]
|
| 48 |
for val in history:
|
| 49 |
if val[0]:
|
| 50 |
messages.append({"role": "user", "content": val[0]})
|
| 51 |
if val[1]:
|
| 52 |
messages.append({"role": "assistant", "content": val[1]})
|
| 53 |
|
| 54 |
-
messages.append({"role": "user", "content":
|
| 55 |
|
| 56 |
-
# Antwort vom Modell generieren
|
| 57 |
response = ""
|
|
|
|
| 58 |
for message in client.chat_completion(
|
| 59 |
messages,
|
| 60 |
-
|
|
|
|
|
|
|
|
|
|
| 61 |
):
|
| 62 |
token = message.choices[0].delta.content
|
|
|
|
| 63 |
response += token
|
| 64 |
yield response
|
| 65 |
|
| 66 |
-
# Gradio-Interface mit zusätzlichen Eingaben
|
| 67 |
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
|
| 80 |
-
|
| 81 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 82 |
|
| 83 |
-
],
|
| 84 |
-
)
|
| 85 |
|
| 86 |
if __name__ == "__main__":
|
| 87 |
-
demo.launch()
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
from huggingface_hub import InferenceClient
|
|
|
|
|
|
|
| 3 |
|
| 4 |
+
"""
|
| 5 |
+
For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
|
| 6 |
+
"""
|
| 7 |
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
|
| 8 |
|
| 9 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 10 |
def respond(
|
| 11 |
message,
|
| 12 |
history: list[tuple[str, str]],
|
| 13 |
system_message,
|
| 14 |
+
max_tokens,
|
| 15 |
+
temperature,
|
| 16 |
+
top_p,
|
|
|
|
| 17 |
):
|
| 18 |
+
messages = [{"role": "system", "content": system_message}]
|
|
|
|
|
|
|
| 19 |
|
|
|
|
|
|
|
| 20 |
for val in history:
|
| 21 |
if val[0]:
|
| 22 |
messages.append({"role": "user", "content": val[0]})
|
| 23 |
if val[1]:
|
| 24 |
messages.append({"role": "assistant", "content": val[1]})
|
| 25 |
|
| 26 |
+
messages.append({"role": "user", "content": message})
|
| 27 |
|
|
|
|
| 28 |
response = ""
|
| 29 |
+
|
| 30 |
for message in client.chat_completion(
|
| 31 |
messages,
|
| 32 |
+
max_tokens=max_tokens,
|
| 33 |
+
stream=True,
|
| 34 |
+
temperature=temperature,
|
| 35 |
+
top_p=top_p,
|
| 36 |
):
|
| 37 |
token = message.choices[0].delta.content
|
| 38 |
+
|
| 39 |
response += token
|
| 40 |
yield response
|
| 41 |
|
|
|
|
| 42 |
|
| 43 |
+
"""
|
| 44 |
+
For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
|
| 45 |
+
"""
|
| 46 |
+
demo = gr.ChatInterface(
|
| 47 |
+
respond,
|
| 48 |
+
additional_inputs=[
|
| 49 |
+
gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
|
| 50 |
+
gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
|
| 51 |
+
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
|
| 52 |
+
gr.Slider(
|
| 53 |
+
minimum=0.1,
|
| 54 |
+
maximum=1.0,
|
| 55 |
+
value=0.95,
|
| 56 |
+
step=0.05,
|
| 57 |
+
label="Top-p (nucleus sampling)",
|
| 58 |
+
),
|
| 59 |
+
],
|
| 60 |
+
)
|
| 61 |
|
|
|
|
|
|
|
| 62 |
|
| 63 |
if __name__ == "__main__":
|
| 64 |
+
demo.launch()
|