Pamudu13 commited on
Commit
7def11a
·
verified ·
1 Parent(s): 79d7b2d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +63 -116
app.py CHANGED
@@ -1,122 +1,69 @@
1
- from fastapi import FastAPI, HTTPException
2
- from fastapi.responses import StreamingResponse
3
- from fastapi.middleware.cors import CORSMiddleware
4
- import aiohttp
5
- import json
6
- import time
7
  import os
8
- from pydantic import BaseModel
9
- from apscheduler.schedulers.background import BackgroundScheduler
10
 
11
- # Initialize FastAPI application
12
- app = FastAPI()
13
-
14
- # CORS settings
15
- app.add_middleware(
16
- CORSMiddleware,
17
- allow_origins=["*"],
18
- allow_credentials=True,
19
- allow_methods=["*"],
20
- allow_headers=["*"],
21
- )
22
-
23
- # Set the OpenAI API key
24
- OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY", None)
25
-
26
- # Time-Limited Infinite Cache
27
- cache = {}
28
- CACHE_DURATION = 120
29
-
30
- # Function to clean up expired cache entries
31
- def cleanup_cache():
32
- current_time = time.time()
33
- for key, (value, timestamp) in list(cache.items()):
34
- if current_time - timestamp > CACHE_DURATION:
35
- del cache[key]
36
-
37
- # Initialize and start the scheduler
38
- scheduler = BackgroundScheduler()
39
- scheduler.add_job(cleanup_cache, 'interval', seconds=60) # Run cleanup every 60 seconds
40
- scheduler.start()
41
-
42
- class StreamTextRequest(BaseModel):
43
- query: str
44
- history: str = "[]"
45
- model: str = "gpt-3.5-turbo" # Default model can be changed as needed
46
-
47
- @app.post("/stream_text")
48
- async def stream_text(request: StreamTextRequest):
49
- current_time = time.time()
50
- cache_key = (request.query, request.history, request.model)
51
-
52
- # Check if the request is in the cache and not expired
53
- if cache_key in cache:
54
- cached_response, timestamp = cache[cache_key]
55
- return StreamingResponse(iter([f"{cached_response}"]), media_type='text/event-stream')
56
-
57
- # Define system message for OpenAI
58
- system_message = """You are an AI language assistant. Your sole task is to correct the grammar, spelling, and structure of the sentences provided to you. You must not change the meaning of the sentences, and you should focus only on making them grammatically correct, concise, and clear. Do not add any additional information or provide explanations unless specifically asked. Your responses should be limited to the corrected version of the sentence."""
59
-
60
- messages = [{'role': 'system', 'content': system_message}]
61
- messages.extend(json.loads(request.history)) # Load history from the request
62
- messages.append({'role': 'user', 'content': request.query})
63
 
64
- data = {'model': request.model, 'messages': messages, 'stream': True}
65
-
66
- async def stream_response():
67
- async with aiohttp.ClientSession() as session:
68
- async with session.post(
69
- 'https://api.openai.com/v1/chat/completions',
70
- headers={
71
- 'Authorization': f'Bearer {OPENAI_API_KEY}',
72
- 'Content-Type': 'application/json'
73
- },
74
- json=data
75
- ) as response:
76
- if response.status != 200:
77
- raise HTTPException(status_code=response.status, detail="Error fetching AI response")
78
-
79
- response_content = ""
80
- async for line in response.content:
81
- line = line.decode('utf-8').strip()
82
- if line.startswith('data:'):
83
- json_data = line[5:].strip() # Remove 'data: ' prefix
84
- if json_data:
85
- try:
86
- parsed_data = json.loads(json_data)
87
- content = parsed_data.get("choices", [{}])[0].get("delta", {}).get("content", '')
88
- if content:
89
- content = content.replace("\n", " ")
90
- response_content += f"data: {content}\n\n"
91
- yield f"data: {content}\n\n"
92
- except json.JSONDecodeError as e:
93
- print(f"Error decoding JSON: {e}")
94
- yield f"data: Error decoding JSON\n\n"
95
-
96
- # Cache the full response
97
- cache[cache_key] = (response_content, current_time)
98
-
99
- return StreamingResponse(stream_response(), media_type='text/event-stream')
100
 
101
- # Serve static files
102
- from starlette.responses import FileResponse
103
 
104
- @app.get("/script1.js")
105
- async def script1_js():
106
- return FileResponse("script1.js")
107
-
108
- @app.get("/script2.js")
109
- async def script2_js():
110
- return FileResponse("script2.js")
111
-
112
- @app.get("/styles.css")
113
- async def styles_css():
114
- return FileResponse("styles.css")
115
-
116
- @app.get("/")
117
- async def read_index():
118
- return FileResponse('index.html')
119
 
120
- if __name__ == "__main__":
121
- import uvicorn
122
- uvicorn.run(app, host="0.0.0.0", port=7068, reload=True)
 
1
+ from flask import Flask, request, jsonify
2
+ import openai
 
 
 
 
3
  import os
 
 
4
 
5
+ # Initialize Flask application
6
+ app = Flask(__name__)
7
+
8
+ # Set the base URL for the API endpoint and API key from environment variables
9
+ openai.api_base = "https://api.pawan.krd/unfiltered/v1"
10
+ openai.api_key = os.getenv("OPENAI_API_KEY")
11
+
12
+ history = []
13
+ first_message = True
14
+ file_name = "Default"
15
+ chatbot_name = "Lunar Ai"
16
+ word_limit = 2000
17
+
18
+ # Introduce the chatbot with its name
19
+ system_message = f"You are a helpful assistant named {chatbot_name}. You only answer coding-related questions. If a non-coding related question is asked, respond with 'I am not programmed to do that.'"
20
+ history.append({"role": "system", "content": system_message})
21
+
22
+ def get_word_count(text):
23
+ return len(text.split())
24
+
25
+ def trim_history_to_word_limit(history, word_limit):
26
+ total_words = sum(get_word_count(message['content']) for message in history)
27
+ while total_words > word_limit:
28
+ if history[1]['role'] == 'user':
29
+ removed_message = history.pop(1)
30
+ total_words -= get_word_count(removed_message['content'])
31
+ if len(history) > 1 and history[1]['role'] == 'assistant':
32
+ removed_message = history.pop(1)
33
+ total_words -= get_word_count(removed_message['content'])
34
+
35
+ @app.route('/chat', methods=['POST'])
36
+ def chat_bot():
37
+ global first_message, file_name
38
+ data = request.json
39
+ message = data.get('message', '')
40
+
41
+ if first_message:
42
+ file_name = message[:20]
43
+ first_message = False
44
+
45
+ history.append({"role": "user", "content": message})
46
+
47
+ try:
48
+ chat_completion = openai.ChatCompletion.create(
49
+ model="gpt-4o",
50
+ messages=history
51
+ )
52
+ response = chat_completion.choices[0].message.content
53
+ except Exception as e:
54
+ return jsonify({"error": str(e)}), 500
 
 
55
 
56
+ history.append({"role": "assistant", "content": response})
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
57
 
58
+ # Trim history to keep within the word limit
59
+ trim_history_to_word_limit(history, word_limit)
60
 
61
+ # Save to a text file
62
+ with open(f"{file_name}.txt", 'a', encoding='utf-8') as f:
63
+ f.write(f"User: {message}\n")
64
+ f.write(f"{chatbot_name}: {response}\n")
65
+
66
+ return jsonify({"response": response, "history": history})
 
 
 
 
 
 
 
 
 
67
 
68
+ if __name__ == '__main__':
69
+ app.run(host='0.0.0.0', port=5000)