mk1985 commited on
Commit
7a6cb9a
·
verified ·
1 Parent(s): 3c1bb06

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +135 -53
app.py CHANGED
@@ -1,70 +1,152 @@
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
 
 
 
 
 
 
 
 
3
 
 
 
 
 
 
4
 
5
- def respond(
6
- message,
7
- history: list[dict[str, str]],
8
- system_message,
9
- max_tokens,
10
- temperature,
11
- top_p,
12
- hf_token: gr.OAuthToken,
13
- ):
14
- """
15
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
16
- """
17
- client = InferenceClient(token=hf_token.token, model="openai/gpt-oss-20b")
 
18
 
19
- messages = [{"role": "system", "content": system_message}]
 
 
 
 
 
 
 
 
 
 
20
 
21
- messages.extend(history)
 
 
 
22
 
23
- messages.append({"role": "user", "content": message})
 
 
 
 
24
 
25
- response = ""
 
26
 
27
- for message in client.chat_completion(
28
- messages,
29
- max_tokens=max_tokens,
30
- stream=True,
31
- temperature=temperature,
32
- top_p=top_p,
33
- ):
34
- choices = message.choices
35
- token = ""
36
- if len(choices) and choices[0].delta.content:
37
- token = choices[0].delta.content
38
 
39
- response += token
40
- yield response
 
 
 
41
 
 
 
 
42
 
 
 
 
 
 
 
 
 
43
  """
44
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
- """
46
- chatbot = gr.ChatInterface(
47
- respond,
48
- type="messages",
49
- additional_inputs=[
50
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
51
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
52
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
53
- gr.Slider(
54
- minimum=0.1,
55
- maximum=1.0,
56
- value=0.95,
57
- step=0.05,
58
- label="Top-p (nucleus sampling)",
59
- ),
60
- ],
61
  )
 
62
 
63
- with gr.Blocks() as demo:
64
- with gr.Sidebar():
65
- gr.LoginButton()
66
- chatbot.render()
 
 
 
 
 
 
 
 
 
67
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
68
 
69
  if __name__ == "__main__":
70
- demo.launch()
 
1
  import gradio as gr
2
+ from langchain_community.vectorstores import Chroma
3
+ from langchain_core.documents import Document
4
+ from langchain_openai import ChatOpenAI
5
+ from langchain_community.embeddings import HuggingFaceEmbeddings
6
+ from langchain_core.prompts import ChatPromptTemplate
7
+ from langchain_core.output_parsers import StrOutputParser
8
+ from langchain_core.runnables import RunnablePassthrough
9
+ import json
10
+ import os
11
 
12
+ # -------------------------------
13
+ # CONFIGURATION
14
+ # -------------------------------
15
+ # For Hugging Face Spaces, set this as a Secret in your Space settings
16
+ os.environ["OPENAI_API_KEY"] = os.getenv("OPENAI_API_KEY", "sk-your-key-here")
17
 
18
+ # -------------------------------
19
+ # LOAD DATA AND BUILD RAG CHAIN
20
+ # -------------------------------
21
+ def clean_metadata(metadata):
22
+ """Convert list values to comma-separated strings for ChromaDB compatibility"""
23
+ cleaned = {}
24
+ for key, value in metadata.items():
25
+ if isinstance(value, list):
26
+ cleaned[key] = ", ".join(str(v) for v in value)
27
+ elif isinstance(value, (str, int, float, bool)) or value is None:
28
+ cleaned[key] = value
29
+ else:
30
+ cleaned[key] = str(value)
31
+ return cleaned
32
 
33
+ print("Loading documents...")
34
+ docs = []
35
+ with open("helpwildlife_rag.jsonl", "r", encoding="utf-8") as f:
36
+ for line in f:
37
+ entry = json.loads(line)
38
+ metadata = entry.get("metadata", {})
39
+ docs.append(Document(
40
+ page_content=entry["text"],
41
+ metadata=clean_metadata(metadata)
42
+ ))
43
+ print(f"✓ Loaded {len(docs)} documents")
44
 
45
+ print("Loading embedding model...")
46
+ embeddings = HuggingFaceEmbeddings(
47
+ model_name="sentence-transformers/all-MiniLM-L6-v2"
48
+ )
49
 
50
+ print("Building vector store...")
51
+ try:
52
+ vectorstore = Chroma.from_documents(docs, embedding=embeddings)
53
+ except TypeError:
54
+ vectorstore = Chroma.from_documents(docs, embedding_function=embeddings)
55
 
56
+ retriever = vectorstore.as_retriever(search_kwargs={"k": 3})
57
+ print("✓ Vector store created")
58
 
59
+ prompt_template = """
60
+ You are a compassionate wildlife advice assistant.
61
+ Your only source of information is the text provided in the CONTEXT section below.
62
+ Do not use outside knowledge, guesses, or general reasoning.
63
+ If the context does not contain enough information to answer fully,
64
+ say "I'm not sure based on the available guidance."
 
 
 
 
 
65
 
66
+ Never suggest killing or harming any animal.
67
+ Always emphasise that taking an injured or distressed animal to a local wildlife rescue
68
+ or 24/7 animal hospital is the safest and most humane course of action.
69
+ Discourage people from trying to handle or treat the animal themselves,
70
+ and note that general veterinary clinics may euthanise wild animals unnecessarily.
71
 
72
+ Adopt a voice of empathy and respect for all life, consistent with vegan principles:
73
+ there are no animals that are pests, vermin, or unworthy of care.
74
+ Respond clearly and calmly, with brief, practical, step-by-step guidance suitable for the public.
75
 
76
+ ---------------------
77
+ CONTEXT (from HelpWildlife data file):
78
+ {context}
79
+ ---------------------
80
+ QUESTION:
81
+ {question}
82
+
83
+ YOUR ANSWER:
84
  """
85
+
86
+ prompt = ChatPromptTemplate.from_template(prompt_template)
87
+ llm = ChatOpenAI(model_name="gpt-4o-mini", temperature=0)
88
+
89
+ def format_docs(docs):
90
+ return "\n\n".join(doc.page_content for doc in docs)
91
+
92
+ rag_chain = (
93
+ {
94
+ "context": retriever | format_docs,
95
+ "question": RunnablePassthrough()
96
+ }
97
+ | prompt
98
+ | llm
99
+ | StrOutputParser()
 
 
100
  )
101
+ print("✓ RAG chain ready")
102
 
103
+ # -------------------------------
104
+ # GRADIO INTERFACE
105
+ # -------------------------------
106
+ def ask_wildlife_question(question):
107
+ """Process a wildlife question and return an answer"""
108
+ if not question.strip():
109
+ return "Please enter a question about wildlife."
110
+
111
+ try:
112
+ answer = rag_chain.invoke(question)
113
+ return answer
114
+ except Exception as e:
115
+ return f"Error: {str(e)}\n\nPlease check your OpenAI API key is set correctly."
116
 
117
+ # Example questions
118
+ examples = [
119
+ "I found a baby hedgehog out during the day. What should I do?",
120
+ "There's a bird that seems injured in my garden. How can I help?",
121
+ "I found a baby bird on the ground. Should I put it back in the nest?",
122
+ "A fox is limping in my backyard. What should I do?",
123
+ "How do I know if a wild animal needs help?"
124
+ ]
125
+
126
+ # Create Gradio interface
127
+ demo = gr.Interface(
128
+ fn=ask_wildlife_question,
129
+ inputs=gr.Textbox(
130
+ label="Ask a Wildlife Question",
131
+ placeholder="e.g., I found a baby bird on the ground...",
132
+ lines=3
133
+ ),
134
+ outputs=gr.Textbox(
135
+ label="Compassionate Advice",
136
+ lines=10
137
+ ),
138
+ title="🦔 Wildlife Rescue Assistant",
139
+ description="""
140
+ Ask questions about helping wildlife in distress. This assistant provides compassionate,
141
+ evidence-based advice prioritizing the wellbeing of all animals.
142
+
143
+ ⚠️ **Important**: This tool provides general guidance only. For urgent situations,
144
+ contact your local wildlife rescue or 24/7 animal hospital immediately.
145
+ """,
146
+ examples=examples,
147
+ theme=gr.themes.Soft(),
148
+ allow_flagging="never"
149
+ )
150
 
151
  if __name__ == "__main__":
152
+ demo.launch()