gael1130 commited on
Commit
9f9ab63
·
verified ·
1 Parent(s): f6cc29b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +76 -53
app.py CHANGED
@@ -1,68 +1,91 @@
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
  import os
 
 
 
 
 
 
 
 
 
4
 
5
  api_key = os.getenv("secret_test_key")
6
  print(api_key)
7
 
8
- """
9
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
10
- """
11
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
 
 
 
 
 
 
13
 
14
- def respond(
15
- message,
16
- history: list[tuple[str, str]],
17
- system_message,
18
- max_tokens,
19
- temperature,
20
- top_p,
21
- ):
22
- messages = [{"role": "system", "content": system_message}]
23
 
24
- for val in history:
25
- if val[0]:
26
- messages.append({"role": "user", "content": val[0]})
27
- if val[1]:
28
- messages.append({"role": "assistant", "content": val[1]})
 
 
 
 
 
 
 
29
 
30
- messages.append({"role": "user", "content": message})
31
-
32
- response = ""
33
-
34
- for message in client.chat_completion(
35
- messages,
36
- max_tokens=max_tokens,
37
- stream=True,
38
- temperature=temperature,
39
- top_p=top_p,
40
- ):
41
- token = message.choices[0].delta.content
42
-
43
- response += token
44
- yield response
45
-
46
-
47
- """
48
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
49
- """
50
- demo = gr.ChatInterface(
51
- respond,
52
- additional_inputs=[
53
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
54
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
55
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
56
- gr.Slider(
57
- minimum=0.1,
58
- maximum=1.0,
59
- value=0.95,
60
- step=0.05,
61
- label="Top-p (nucleus sampling)",
62
- ),
63
  ],
 
 
 
 
 
 
 
64
  )
65
 
 
 
66
 
67
- if __name__ == "__main__":
68
- demo.launch()
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
  import os
4
+ import pandas as pd
5
+ from langchain_openai import ChatOpenAI
6
+ from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
7
+ from langchain_experimental.tools import PythonAstREPLTool
8
+ from langchain_core.output_parsers.openai_tools import JsonOutputKeyToolsParser
9
+ from langchain_core.output_parsers import StrOutputParser
10
+ from langchain_core.messages import ToolMessage
11
+ from langchain_core.runnables import RunnablePassthrough
12
+ from operator import itemgetter
13
 
14
  api_key = os.getenv("secret_test_key")
15
  print(api_key)
16
 
17
+ def load_model(api_key):
18
+ return ChatOpenAI(
19
+ base_url="https://api.together.xyz/v1",
20
+ api_key=api_key,
21
+ model="mistralai/Mixtral-8x7B-Instruct-v0.1"
22
+ )
23
+ def create_chain(df, llm):
24
+ tool = PythonAstREPLTool(locals={"df": df})
25
+ llm_with_tools = llm.bind_tools([tool], tool_choice=tool.name)
26
+ parser = JsonOutputKeyToolsParser(key_name=tool.name, first_tool_only=True)
27
+
28
+ system = f"""You have access to a pandas dataframe `df`. Here is the output of `df.head().to_markdown()`:
29
+ ```
30
+ {df.head().to_markdown()}
31
+ ```
32
+ Given a user question, write the Python code to answer it. Don't assume you have access to any libraries other than built-in Python ones and pandas.
33
+ Respond directly to the question once you have enough information to answer it."""
34
+
35
+ prompt = ChatPromptTemplate.from_messages([
36
+ ("system", system),
37
+ ("human", "{question}"),
38
+ MessagesPlaceholder("chat_history", optional=True),
39
+ ])
40
 
41
+ def _get_chat_history(x):
42
+ ai_msg = x["ai_msg"]
43
+ tool_call_id = x["ai_msg"].additional_kwargs["tool_calls"][0]["id"]
44
+ tool_msg = ToolMessage(tool_call_id=tool_call_id, content=str(x["tool_output"]))
45
+ return [ai_msg, tool_msg]
46
 
47
+ chain = (
48
+ RunnablePassthrough.assign(ai_msg=prompt | llm_with_tools)
49
+ .assign(tool_output=itemgetter("ai_msg") | parser | tool)
50
+ .assign(chat_history=_get_chat_history)
51
+ .assign(response=prompt | llm | StrOutputParser())
52
+ .pick(["tool_output", "response"])
53
+ )
54
+
55
+ return chain
56
 
57
+ def process_query(csv_file, api_key, query):
58
+ if not api_key.strip():
59
+ return "Please provide an API key"
60
+
61
+ try:
62
+ df = pd.read_csv(csv_file.name)
63
+ llm = load_model(api_key)
64
+ chain = create_chain(df, llm)
65
+ result = chain.invoke({"question": query})
66
+ return f"Analysis Result:\n{result['response']}\n\nTechnical Details:\n{result['tool_output']}"
67
+ except Exception as e:
68
+ return f"Error: {str(e)}"
69
 
70
+ # Create Gradio interface
71
+ iface = gr.Interface(
72
+ fn=process_query,
73
+ inputs=[
74
+ gr.File(label="Upload CSV File"),
75
+ gr.Textbox(label="Together.ai API Key", type="password"),
76
+ gr.Textbox(label="Your Question")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
77
  ],
78
+ outputs=gr.Textbox(label="Result"),
79
+ title="CSV Analysis Assistant",
80
+ description="Upload a CSV file and ask questions about it using natural language.",
81
+ examples=[
82
+ ["data/titanic_dataset.csv", "your-api-key-here", "Which columns have missing values?"],
83
+ ["data/titanic_dataset.csv", "your-api-key-here", "What's the correlation between age and fare?"]
84
+ ]
85
  )
86
 
87
+ # For Hugging Face Spaces deployment, add this:
88
+ iface.launch()
89
 
90
+ # if __name__ == "__main__":
91
+ # demo.launch()