Spaces:
Sleeping
Sleeping
| # import os | |
| # import logging | |
| # from fastapi import FastAPI | |
| # from fastapi.middleware.cors import CORSMiddleware | |
| # from fastapi.responses import JSONResponse, FileResponse | |
| # from fastapi.staticfiles import StaticFiles | |
| # from dotenv import load_dotenv | |
| # from openai import AsyncOpenAI | |
| # # Import your custom modules | |
| # from chainlit.auth import create_jwt | |
| # import chainlit as cl | |
| # import uvicorn | |
| # # Load environment variables from .env file | |
| # load_dotenv() | |
| # # Initialize logging | |
| # logging.basicConfig(level=logging.INFO) | |
| # logger = logging.getLogger(__name__) # Use __name__ to get the root logger | |
| # # Initialize FastAPI app | |
| # app = FastAPI() | |
| # # # CORS middleware setup | |
| # # app.add_middleware( | |
| # # CORSMiddleware, | |
| # # allow_origins=["*"], # Specify domains or use ["*"] for open access | |
| # # allow_credentials=True, | |
| # # allow_methods=["*"], # Specify methods or use ["*"] for all methods | |
| # # allow_headers=["*"], # Specify headers or use ["*"] for all headers | |
| # # ) | |
| # client = AsyncOpenAI(api_key=os.environ["OPENAI_API_KEY"]) | |
| # settings = { | |
| # "model": "gpt-3.5-turbo", | |
| # "temperature": 0.7, | |
| # "max_tokens": 500, | |
| # "top_p": 1, | |
| # "frequency_penalty": 0, | |
| # "presence_penalty": 0, | |
| # } | |
| # app.mount("/", StaticFiles(directory="static", html=True), name="static") | |
| # logger.info("Static files are being served from the 'static' directory.") | |
| # @app.get("/") | |
| # def read_root(): | |
| # # Serve your static HTML file at the root. | |
| # logger.info(f"Serving static file for path") | |
| # return FileResponse('static/index.html') | |
| # @app.get("/api") | |
| # def read_api(): | |
| # # This endpoint simply returns a JSON message. | |
| # return {"message": "Hello from the FastAPI API!"} | |
| # @app.get("/custom-auth") | |
| # def custom_auth(): | |
| # # Verify the user's identity with custom logic. | |
| # token = create_jwt(cl.User(identifier="Test User")) | |
| # logger.info("Custom auth token generated.") | |
| # print("teeeeeee", token) | |
| # return JSONResponse({"token": token}) | |
| # @cl.on_chat_start | |
| # async def on_chat_start(): | |
| # cl.user_session.set( | |
| # "message_history", | |
| # [{"role": "system", "content": "You are a helpful assistant."}], | |
| # ) | |
| # await cl.Message(content="Connected to Chainlit!").send() | |
| # logger.info("Chat started with Chainlit.") | |
| # @cl.on_message | |
| # async def on_message(message: cl.Message): | |
| # message_history = cl.user_session.get("message_history") | |
| # message_history.append({"role": "user", "content": message.content}) | |
| # msg = cl.Message(content="") | |
| # await msg.send() | |
| # stream = await client.chat.completions.create( | |
| # messages=message_history, stream=True, **settings | |
| # ) | |
| # async for part in stream: | |
| # if token := part.choices[0].delta.content or "": | |
| # await msg.stream_token(token) | |
| # message_history.append({"role": "assistant", "content": msg.content}) | |
| # await msg.update() | |
| # logger.info("Message processed and response sent.") | |
| ######################################################################################################################### | |
| from fastapi import FastAPI | |
| from fastapi.staticfiles import StaticFiles | |
| from fastapi.responses import FileResponse, JSONResponse | |
| import os | |
| import logging | |
| from fastapi.middleware.cors import CORSMiddleware | |
| from openai import AsyncOpenAI | |
| # Import your custom modules | |
| from chainlit.auth import create_jwt | |
| from dotenv import load_dotenv | |
| import chainlit as cl | |
| import uvicorn | |
| import asyncio | |
| from socket import gaierror | |
| # Load environment variables from .env file | |
| load_dotenv() | |
| # Initialize logging | |
| logging.basicConfig(level=logging.INFO) | |
| logger = logging.getLogger(__name__) # Use __name__ to get the root logger | |
| app = FastAPI() | |
| # CORS middleware setup | |
| app.add_middleware( | |
| CORSMiddleware, | |
| allow_origins=["*"], # Specify domains or use ["*"] for open access | |
| allow_credentials=True, | |
| allow_methods=["*"], # Specify methods or use ["*"] for all methods | |
| allow_headers=["*"], # Specify headers or use ["*"] for all headers | |
| ) | |
| client = AsyncOpenAI(api_key=os.environ["OPENAI_API_KEY"]) | |
| settings = { | |
| "model": "gpt-3.5-turbo", | |
| "temperature": 0.7, | |
| "max_tokens": 500, | |
| "top_p": 1, | |
| "frequency_penalty": 0, | |
| "presence_penalty": 0, | |
| } | |
| # Assuming your static files are in a directory named 'static'. | |
| # app.mount("/static", StaticFiles(directory="static"), name="static") | |
| # app.mount("/assets", StaticFiles(directory="static/assets"), name="assets") | |
| def read_root(): | |
| # Serve your static HTML file at the root. | |
| return {"message": "Hello from the FastAPI API!"} | |
| def read_api(): | |
| # This endpoint simply returns a JSON message. | |
| return {"message": "Hello from the FastAPI API!"} | |
| def custom_auth(): | |
| # Verify the user's identity with custom logic. | |
| token = create_jwt(cl.User(identifier="Test User")) | |
| logger.info("Custom auth token generated.") | |
| print("teeeeeee", token) | |
| return JSONResponse({"token": token}) | |
| async def on_chat_start(): | |
| cl.user_session.set( | |
| "message_history", | |
| [{"role": "system", "content": "You are a helpful assistant."}], | |
| ) | |
| await cl.Message(content="Connected to Chainlit!").send() | |
| logger.info("Chat started with Chainlit.") | |
| async def on_message(message: cl.Message): | |
| message_history = cl.user_session.get("message_history") | |
| message_history.append({"role": "user", "content": message.content}) | |
| msg = cl.Message(content="") | |
| await msg.send() | |
| try: | |
| stream = await client.chat.completions.create( | |
| messages=message_history, stream=True, **settings | |
| ) | |
| async for part in stream: | |
| if token := part.choices[0].delta.content or "": | |
| await msg.stream_token(token) | |
| except gaierror as e: | |
| logger.error(f"Network error during OpenAI API call: {e}") | |
| await msg.update(content="Sorry, there was a network error.") | |
| except asyncio.TimeoutError: | |
| logger.error("Timeout error during OpenAI API call.") | |
| await msg.update(content="Sorry, the request timed out.") | |
| except Exception as e: | |
| logger.error(f"Unexpected error during OpenAI API call: {e}") | |
| await msg.update(content="Sorry, an unexpected error occurred.") | |
| message_history.append({"role": "assistant", "content": msg.content}) | |
| await msg.update() | |
| logger.info("Message processed and response sent.") | |