KarthikMuraliM commited on
Commit
fe18036
·
1 Parent(s): bf1736b

Added aipipe integration

Browse files
Files changed (2) hide show
  1. app.py +31 -24
  2. requirements.txt +2 -1
app.py CHANGED
@@ -2,42 +2,49 @@
2
  from fastapi import FastAPI, File, UploadFile, Form
3
  from typing import List
4
  import os
 
5
 
6
  app = FastAPI()
7
 
8
- # A simple root endpoint to confirm the app is running
 
 
 
 
9
  @app.get("/")
10
  async def read_root():
11
  return {"message": "Data Analyst Agent API is running!"}
12
 
13
- # Our main API endpoint for data analysis tasks
14
  @app.post("/api/")
15
  async def analyze_data(
16
  questions_file: UploadFile = File(..., alias="questions.txt"),
17
- files: List[UploadFile] = File([], alias="files"), # This will catch other files if sent
18
  ):
19
  # Read the content of questions.txt
20
  questions_content = await questions_file.read()
21
  questions_text = questions_content.decode("utf-8")
22
 
23
- response_messages = [f"Received questions:\n{questions_text}"]
24
-
25
- # Process other uploaded files
26
- for file in files:
27
- # You would typically save these to a temporary location
28
- # For now, just acknowledge receipt
29
- response_messages.append(f"Received file: {file.filename} (Content-Type: {file.content_type})")
30
- # Example: Save to a temporary file
31
- # with open(f"/tmp/{file.filename}", "wb") as f:
32
- # f.write(await file.read())
33
- # response_messages.append(f"Saved {file.filename} to /tmp/")
34
-
35
-
36
- # This is where the core logic will go. For now, it's a placeholder.
37
- # The LLM will process questions_text and use other files.
38
-
39
- return {"status": "Processing initiated", "details": response_messages}
40
-
41
- if __name__ == "__main__":
42
- import uvicorn
43
- uvicorn.run(app, host="0.0.0.0", port=7860) # Hugging Face Spaces typically use port 7860
 
 
 
 
2
  from fastapi import FastAPI, File, UploadFile, Form
3
  from typing import List
4
  import os
5
+ import openai # Import the openai library
6
 
7
  app = FastAPI()
8
 
9
+ # Initialize the OpenAI client.
10
+ # It will automatically pick up the OPENAI_API_KEY and OPENAI_BASE_URL
11
+ # from the environment variables (our Hugging Face Secrets).
12
+ client = openai.OpenAI()
13
+
14
  @app.get("/")
15
  async def read_root():
16
  return {"message": "Data Analyst Agent API is running!"}
17
 
 
18
  @app.post("/api/")
19
  async def analyze_data(
20
  questions_file: UploadFile = File(..., alias="questions.txt"),
21
+ files: List[UploadFile] = File([], alias="files"),
22
  ):
23
  # Read the content of questions.txt
24
  questions_content = await questions_file.read()
25
  questions_text = questions_content.decode("utf-8")
26
 
27
+ # --- LLM INTEGRATION ---
28
+ llm_response_content = "No response from LLM." # Default message
29
+ try:
30
+ # Create a simple prompt for the LLM
31
+ completion = client.chat.completions.create(
32
+ model="gpt-5-nano", # You can try other models like "mistralai/mistral-7b-instruct"
33
+ messages=[
34
+ {"role": "system", "content": "You are a helpful assistant."},
35
+ {"role": "user", "content": f"Here are the questions I need answered:\n\n{questions_text}\n\nCan you acknowledge that you received them?"}
36
+ ]
37
+ )
38
+ llm_response_content = completion.choices[0].message.content
39
+ except Exception as e:
40
+ # If the LLM call fails, we'll know why
41
+ llm_response_content = f"Error calling LLM: {e}"
42
+ # --- END LLM INTEGRATION ---
43
+
44
+ # We will build a more structured response later.
45
+ # For now, just return the raw LLM response.
46
+ return {
47
+ "status": "Processing complete",
48
+ "received_questions": questions_text,
49
+ "llm_acknowledgement": llm_response_content
50
+ }
requirements.txt CHANGED
@@ -1,3 +1,4 @@
1
  fastapi
2
  uvicorn
3
- python-multipart # Required for FastAPI to handle file uploads
 
 
1
  fastapi
2
  uvicorn
3
+ python-multipart # Required for FastAPI to handle file uploads
4
+ openai