|
|
from flask import Flask, request, jsonify
|
|
|
from flask_cors import CORS
|
|
|
import httpx
|
|
|
import json
|
|
|
import logging
|
|
|
import os
|
|
|
|
|
|
logging.basicConfig(level=logging.INFO)
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
app = Flask(__name__)
|
|
|
CORS(app)
|
|
|
|
|
|
|
|
|
MODELS = {
|
|
|
"gpt-4o-mini": "gpt-4o-mini",
|
|
|
"claude-3-haiku": "claude-3-haiku",
|
|
|
"llama": "llama-3.3-70b",
|
|
|
"mistral": "mixtral-8x7b",
|
|
|
"o3-mini": "o3-mini"
|
|
|
}
|
|
|
|
|
|
DEFAULT_MODEL = "gpt-4o-mini"
|
|
|
STATUS_URL = "https://duckduckgo.com/duckchat/v1/status"
|
|
|
CHAT_URL = "https://duckduckgo.com/duckchat/v1/chat"
|
|
|
|
|
|
def get_vqd_token():
|
|
|
"""Get VQD token from DuckDuckGo with enhanced headers"""
|
|
|
headers = {
|
|
|
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:139.0) Gecko/20100101 Firefox/139.0",
|
|
|
"Accept": "text/event-stream",
|
|
|
"Accept-Language": "en-US,en;q=0.5",
|
|
|
"Accept-Encoding": "gzip, deflate, br",
|
|
|
"Referer": "https://duckduckgo.com/",
|
|
|
"x-vqd-accept": "1",
|
|
|
"Cache-Control": "no-store",
|
|
|
"Connection": "keep-alive",
|
|
|
"Cookie": "dcm=3",
|
|
|
"Sec-Fetch-Dest": "empty",
|
|
|
"Sec-Fetch-Mode": "cors",
|
|
|
"Sec-Fetch-Site": "same-origin",
|
|
|
"Pragma": "no-cache",
|
|
|
"TE": "trailers"
|
|
|
}
|
|
|
|
|
|
|
|
|
for attempt in range(3):
|
|
|
try:
|
|
|
logger.info(f"VQD token attempt {attempt + 1}/3")
|
|
|
response = httpx.get(STATUS_URL, headers=headers, timeout=20.0, follow_redirects=True)
|
|
|
|
|
|
logger.info(f"Status response code: {response.status_code}")
|
|
|
|
|
|
|
|
|
vqd = (response.headers.get("x-vqd-4") or
|
|
|
response.headers.get("X-VQD-4") or
|
|
|
response.headers.get("x-vqd-hash-1"))
|
|
|
|
|
|
if vqd:
|
|
|
logger.info(f"VQD token obtained: {vqd[:20]}...")
|
|
|
return vqd
|
|
|
|
|
|
logger.warning(f"No VQD in headers. Available headers: {list(response.headers.keys())}")
|
|
|
|
|
|
except Exception as e:
|
|
|
logger.error(f"VQD attempt {attempt + 1} error: {e}")
|
|
|
if attempt < 2:
|
|
|
import time
|
|
|
time.sleep(1)
|
|
|
|
|
|
return None
|
|
|
|
|
|
def chat_with_ddg(prompt, model, vqd):
|
|
|
"""Send chat request to DuckDuckGo with enhanced headers"""
|
|
|
headers = {
|
|
|
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:139.0) Gecko/20100101 Firefox/139.0",
|
|
|
"Accept": "text/event-stream",
|
|
|
"Accept-Language": "en-US,en;q=0.5",
|
|
|
"Accept-Encoding": "gzip, deflate, br",
|
|
|
"Content-Type": "application/json",
|
|
|
"Referer": "https://duckduckgo.com/",
|
|
|
"x-vqd-4": vqd,
|
|
|
"Origin": "https://duckduckgo.com",
|
|
|
"Cache-Control": "no-store",
|
|
|
"Connection": "keep-alive",
|
|
|
"Cookie": "dcm=3",
|
|
|
"Sec-Fetch-Dest": "empty",
|
|
|
"Sec-Fetch-Mode": "cors",
|
|
|
"Sec-Fetch-Site": "same-origin",
|
|
|
"Pragma": "no-cache",
|
|
|
"TE": "trailers"
|
|
|
}
|
|
|
|
|
|
payload = {
|
|
|
"model": model,
|
|
|
"messages": [{"role": "user", "content": prompt}]
|
|
|
}
|
|
|
|
|
|
try:
|
|
|
response = httpx.post(CHAT_URL, headers=headers, json=payload, timeout=30.0)
|
|
|
|
|
|
if response.status_code != 200:
|
|
|
raise Exception(f"DuckDuckGo returned status {response.status_code}")
|
|
|
|
|
|
|
|
|
text = response.text
|
|
|
full_message = ""
|
|
|
|
|
|
for line in text.split('\n'):
|
|
|
if line.startswith('data: '):
|
|
|
data = line[6:].strip()
|
|
|
if data and data != '[DONE]':
|
|
|
try:
|
|
|
parsed = json.loads(data)
|
|
|
if 'message' in parsed:
|
|
|
full_message += parsed['message']
|
|
|
except:
|
|
|
continue
|
|
|
|
|
|
return full_message
|
|
|
except Exception as e:
|
|
|
logger.error(f"Chat error: {e}")
|
|
|
raise
|
|
|
|
|
|
@app.route('/')
|
|
|
def index():
|
|
|
return jsonify({
|
|
|
"status": "success",
|
|
|
"text": "MyDuck AI API - POST to /api/chat"
|
|
|
})
|
|
|
|
|
|
@app.route('/api/chat', methods=['GET'])
|
|
|
def chat_get():
|
|
|
return jsonify({
|
|
|
"status": "success",
|
|
|
"text": "Use POST with {\"prompt\": \"text\", \"model\": \"gpt-4o-mini\"}"
|
|
|
})
|
|
|
|
|
|
@app.route('/api/chat', methods=['POST'])
|
|
|
def chat_post():
|
|
|
try:
|
|
|
data = request.get_json()
|
|
|
|
|
|
prompt = data.get('prompt', '').strip()
|
|
|
model_key = data.get('model', DEFAULT_MODEL)
|
|
|
|
|
|
if not prompt:
|
|
|
return jsonify({
|
|
|
"status": "error",
|
|
|
"text": "Missing 'prompt' parameter"
|
|
|
}), 400
|
|
|
|
|
|
model = MODELS.get(model_key, MODELS[DEFAULT_MODEL])
|
|
|
|
|
|
logger.info(f"Request: model={model}, prompt={prompt[:30]}...")
|
|
|
|
|
|
|
|
|
vqd = get_vqd_token()
|
|
|
|
|
|
if not vqd:
|
|
|
return jsonify({
|
|
|
"status": "error",
|
|
|
"text": "Failed to get VQD token"
|
|
|
}), 500
|
|
|
|
|
|
|
|
|
result = chat_with_ddg(prompt, model, vqd)
|
|
|
|
|
|
logger.info(f"Response length: {len(result)}")
|
|
|
|
|
|
return jsonify({
|
|
|
"status": "success",
|
|
|
"text": result
|
|
|
})
|
|
|
|
|
|
except Exception as e:
|
|
|
logger.error(f"Error: {str(e)}")
|
|
|
return jsonify({
|
|
|
"status": "error",
|
|
|
"text": str(e)
|
|
|
}), 500
|
|
|
|
|
|
if __name__ == '__main__':
|
|
|
port = int(os.environ.get('PORT', 7860))
|
|
|
app.run(host='0.0.0.0', port=port, debug=False)
|
|
|
|