Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,3 +1,101 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
# app.py
|
| 2 |
import os
|
| 3 |
import logging
|
|
@@ -14,43 +112,57 @@ logger = logging.getLogger(__name__)
|
|
| 14 |
# Initialize FastAPI app
|
| 15 |
app = FastAPI(
|
| 16 |
title="LLM Chat API",
|
| 17 |
-
description="API for getting chat responses from Llama model",
|
| 18 |
-
version="1.
|
| 19 |
)
|
| 20 |
|
| 21 |
class ChatRequest(BaseModel):
|
| 22 |
text: str
|
|
|
|
| 23 |
|
| 24 |
class ChatResponse(BaseModel):
|
| 25 |
response: str
|
| 26 |
status: str
|
| 27 |
|
| 28 |
-
def llm_chat_response(text: str) -> str:
|
| 29 |
try:
|
| 30 |
HF_TOKEN = os.getenv("HF_TOKEN")
|
| 31 |
logger.info("Checking HF_TOKEN...")
|
| 32 |
if not HF_TOKEN:
|
| 33 |
logger.error("HF_TOKEN not found in environment variables")
|
| 34 |
raise HTTPException(status_code=500, detail="HF_TOKEN not configured")
|
| 35 |
-
|
| 36 |
logger.info("Initializing InferenceClient...")
|
| 37 |
client = InferenceClient(
|
| 38 |
provider="sambanova",
|
| 39 |
api_key=HF_TOKEN
|
| 40 |
)
|
| 41 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 42 |
messages = [
|
| 43 |
{
|
| 44 |
"role": "user",
|
| 45 |
-
"content":
|
| 46 |
-
{
|
| 47 |
-
"type": "text",
|
| 48 |
-
"text": text + " describe in one line only"
|
| 49 |
-
}
|
| 50 |
-
]
|
| 51 |
}
|
| 52 |
]
|
| 53 |
-
|
| 54 |
logger.info("Sending request to model...")
|
| 55 |
completion = client.chat.completions.create(
|
| 56 |
model="meta-llama/Llama-3.2-11B-Vision-Instruct",
|
|
@@ -58,7 +170,6 @@ def llm_chat_response(text: str) -> str:
|
|
| 58 |
max_tokens=500
|
| 59 |
)
|
| 60 |
return completion.choices[0].message['content']
|
| 61 |
-
|
| 62 |
except Exception as e:
|
| 63 |
logger.error(f"Error in llm_chat_response: {str(e)}")
|
| 64 |
raise HTTPException(status_code=500, detail=str(e))
|
|
@@ -67,7 +178,10 @@ def llm_chat_response(text: str) -> str:
|
|
| 67 |
async def chat(request: ChatRequest):
|
| 68 |
try:
|
| 69 |
logger.info(f"Received chat request with text: {request.text}")
|
| 70 |
-
|
|
|
|
|
|
|
|
|
|
| 71 |
return ChatResponse(response=response, status="success")
|
| 72 |
except HTTPException as he:
|
| 73 |
logger.error(f"HTTP Exception in chat endpoint: {str(he)}")
|
|
@@ -78,7 +192,7 @@ async def chat(request: ChatRequest):
|
|
| 78 |
|
| 79 |
@app.get("/")
|
| 80 |
async def root():
|
| 81 |
-
return {"message": "Welcome to the LLM Chat API. Use POST /chat endpoint to get responses."}
|
| 82 |
|
| 83 |
@app.exception_handler(404)
|
| 84 |
async def not_found_handler(request, exc):
|
|
|
|
| 1 |
+
# # app.py
|
| 2 |
+
# import os
|
| 3 |
+
# import logging
|
| 4 |
+
# from fastapi import FastAPI, HTTPException
|
| 5 |
+
# from fastapi.responses import JSONResponse
|
| 6 |
+
# from pydantic import BaseModel
|
| 7 |
+
# from huggingface_hub import InferenceClient
|
| 8 |
+
# from typing import Optional
|
| 9 |
+
|
| 10 |
+
# # Set up logging
|
| 11 |
+
# logging.basicConfig(level=logging.INFO)
|
| 12 |
+
# logger = logging.getLogger(__name__)
|
| 13 |
+
|
| 14 |
+
# # Initialize FastAPI app
|
| 15 |
+
# app = FastAPI(
|
| 16 |
+
# title="LLM Chat API",
|
| 17 |
+
# description="API for getting chat responses from Llama model",
|
| 18 |
+
# version="1.0.0"
|
| 19 |
+
# )
|
| 20 |
+
|
| 21 |
+
# class ChatRequest(BaseModel):
|
| 22 |
+
# text: str
|
| 23 |
+
|
| 24 |
+
# class ChatResponse(BaseModel):
|
| 25 |
+
# response: str
|
| 26 |
+
# status: str
|
| 27 |
+
|
| 28 |
+
# def llm_chat_response(text: str) -> str:
|
| 29 |
+
# try:
|
| 30 |
+
# HF_TOKEN = os.getenv("HF_TOKEN")
|
| 31 |
+
# logger.info("Checking HF_TOKEN...")
|
| 32 |
+
# if not HF_TOKEN:
|
| 33 |
+
# logger.error("HF_TOKEN not found in environment variables")
|
| 34 |
+
# raise HTTPException(status_code=500, detail="HF_TOKEN not configured")
|
| 35 |
+
|
| 36 |
+
# logger.info("Initializing InferenceClient...")
|
| 37 |
+
# client = InferenceClient(
|
| 38 |
+
# provider="sambanova",
|
| 39 |
+
# api_key=HF_TOKEN
|
| 40 |
+
# )
|
| 41 |
+
|
| 42 |
+
# messages = [
|
| 43 |
+
# {
|
| 44 |
+
# "role": "user",
|
| 45 |
+
# "content": [
|
| 46 |
+
# {
|
| 47 |
+
# "type": "text",
|
| 48 |
+
# "text": text + " describe in one line only"
|
| 49 |
+
# }
|
| 50 |
+
# ]
|
| 51 |
+
# }
|
| 52 |
+
# ]
|
| 53 |
+
|
| 54 |
+
# logger.info("Sending request to model...")
|
| 55 |
+
# completion = client.chat.completions.create(
|
| 56 |
+
# model="meta-llama/Llama-3.2-11B-Vision-Instruct",
|
| 57 |
+
# messages=messages,
|
| 58 |
+
# max_tokens=500
|
| 59 |
+
# )
|
| 60 |
+
# return completion.choices[0].message['content']
|
| 61 |
+
|
| 62 |
+
# except Exception as e:
|
| 63 |
+
# logger.error(f"Error in llm_chat_response: {str(e)}")
|
| 64 |
+
# raise HTTPException(status_code=500, detail=str(e))
|
| 65 |
+
|
| 66 |
+
# @app.post("/chat", response_model=ChatResponse)
|
| 67 |
+
# async def chat(request: ChatRequest):
|
| 68 |
+
# try:
|
| 69 |
+
# logger.info(f"Received chat request with text: {request.text}")
|
| 70 |
+
# response = llm_chat_response(request.text)
|
| 71 |
+
# return ChatResponse(response=response, status="success")
|
| 72 |
+
# except HTTPException as he:
|
| 73 |
+
# logger.error(f"HTTP Exception in chat endpoint: {str(he)}")
|
| 74 |
+
# raise he
|
| 75 |
+
# except Exception as e:
|
| 76 |
+
# logger.error(f"Unexpected error in chat endpoint: {str(e)}")
|
| 77 |
+
# raise HTTPException(status_code=500, detail=str(e))
|
| 78 |
+
|
| 79 |
+
# @app.get("/")
|
| 80 |
+
# async def root():
|
| 81 |
+
# return {"message": "Welcome to the LLM Chat API. Use POST /chat endpoint to get responses."}
|
| 82 |
+
|
| 83 |
+
# @app.exception_handler(404)
|
| 84 |
+
# async def not_found_handler(request, exc):
|
| 85 |
+
# return JSONResponse(
|
| 86 |
+
# status_code=404,
|
| 87 |
+
# content={"error": "Endpoint not found. Please use POST /chat for queries."}
|
| 88 |
+
# )
|
| 89 |
+
|
| 90 |
+
# @app.exception_handler(405)
|
| 91 |
+
# async def method_not_allowed_handler(request, exc):
|
| 92 |
+
# return JSONResponse(
|
| 93 |
+
# status_code=405,
|
| 94 |
+
# content={"error": "Method not allowed. Please check the API documentation."}
|
| 95 |
+
# )
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
|
| 99 |
# app.py
|
| 100 |
import os
|
| 101 |
import logging
|
|
|
|
| 112 |
# Initialize FastAPI app
|
| 113 |
app = FastAPI(
|
| 114 |
title="LLM Chat API",
|
| 115 |
+
description="API for getting chat responses from Llama model with image support",
|
| 116 |
+
version="1.1.0"
|
| 117 |
)
|
| 118 |
|
| 119 |
class ChatRequest(BaseModel):
|
| 120 |
text: str
|
| 121 |
+
image_url: Optional[str] = None
|
| 122 |
|
| 123 |
class ChatResponse(BaseModel):
|
| 124 |
response: str
|
| 125 |
status: str
|
| 126 |
|
| 127 |
+
def llm_chat_response(text: str, image_url: Optional[str] = None) -> str:
|
| 128 |
try:
|
| 129 |
HF_TOKEN = os.getenv("HF_TOKEN")
|
| 130 |
logger.info("Checking HF_TOKEN...")
|
| 131 |
if not HF_TOKEN:
|
| 132 |
logger.error("HF_TOKEN not found in environment variables")
|
| 133 |
raise HTTPException(status_code=500, detail="HF_TOKEN not configured")
|
| 134 |
+
|
| 135 |
logger.info("Initializing InferenceClient...")
|
| 136 |
client = InferenceClient(
|
| 137 |
provider="sambanova",
|
| 138 |
api_key=HF_TOKEN
|
| 139 |
)
|
| 140 |
+
|
| 141 |
+
# Prepare content list for the message
|
| 142 |
+
content = [
|
| 143 |
+
{
|
| 144 |
+
"type": "text",
|
| 145 |
+
"text": text + " describe in one line only"
|
| 146 |
+
}
|
| 147 |
+
]
|
| 148 |
+
|
| 149 |
+
# Add image to content if provided
|
| 150 |
+
if image_url:
|
| 151 |
+
logger.info(f"Adding image URL to request: {image_url}")
|
| 152 |
+
content.append({
|
| 153 |
+
"type": "image_url",
|
| 154 |
+
"image_url": {
|
| 155 |
+
"url": image_url
|
| 156 |
+
}
|
| 157 |
+
})
|
| 158 |
+
|
| 159 |
messages = [
|
| 160 |
{
|
| 161 |
"role": "user",
|
| 162 |
+
"content": content
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 163 |
}
|
| 164 |
]
|
| 165 |
+
|
| 166 |
logger.info("Sending request to model...")
|
| 167 |
completion = client.chat.completions.create(
|
| 168 |
model="meta-llama/Llama-3.2-11B-Vision-Instruct",
|
|
|
|
| 170 |
max_tokens=500
|
| 171 |
)
|
| 172 |
return completion.choices[0].message['content']
|
|
|
|
| 173 |
except Exception as e:
|
| 174 |
logger.error(f"Error in llm_chat_response: {str(e)}")
|
| 175 |
raise HTTPException(status_code=500, detail=str(e))
|
|
|
|
| 178 |
async def chat(request: ChatRequest):
|
| 179 |
try:
|
| 180 |
logger.info(f"Received chat request with text: {request.text}")
|
| 181 |
+
if request.image_url:
|
| 182 |
+
logger.info(f"Image URL included: {request.image_url}")
|
| 183 |
+
|
| 184 |
+
response = llm_chat_response(request.text, request.image_url)
|
| 185 |
return ChatResponse(response=response, status="success")
|
| 186 |
except HTTPException as he:
|
| 187 |
logger.error(f"HTTP Exception in chat endpoint: {str(he)}")
|
|
|
|
| 192 |
|
| 193 |
@app.get("/")
|
| 194 |
async def root():
|
| 195 |
+
return {"message": "Welcome to the LLM Chat API with image support. Use POST /chat endpoint to get responses."}
|
| 196 |
|
| 197 |
@app.exception_handler(404)
|
| 198 |
async def not_found_handler(request, exc):
|