Spaces:
Paused
Paused
Upload 4 files
Browse files- Dockerfile +11 -0
- README.md +3 -4
- app.py +124 -0
- requirements.txt +7 -0
Dockerfile
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
FROM python:3.11-slim
|
| 2 |
+
|
| 3 |
+
WORKDIR /app
|
| 4 |
+
COPY . /app
|
| 5 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
| 6 |
+
EXPOSE 5005
|
| 7 |
+
|
| 8 |
+
RUN mkdir /app/data
|
| 9 |
+
RUN chmod -R 777 /app/data
|
| 10 |
+
|
| 11 |
+
CMD ["python", "app.py"]
|
README.md
CHANGED
|
@@ -1,10 +1,9 @@
|
|
| 1 |
---
|
| 2 |
title: Pollinations
|
| 3 |
-
emoji:
|
| 4 |
colorFrom: gray
|
| 5 |
-
colorTo:
|
| 6 |
sdk: docker
|
| 7 |
pinned: false
|
|
|
|
| 8 |
---
|
| 9 |
-
|
| 10 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
| 1 |
---
|
| 2 |
title: Pollinations
|
| 3 |
+
emoji: 📈
|
| 4 |
colorFrom: gray
|
| 5 |
+
colorTo: green
|
| 6 |
sdk: docker
|
| 7 |
pinned: false
|
| 8 |
+
app_port: 7860
|
| 9 |
---
|
|
|
|
|
|
app.py
ADDED
|
@@ -0,0 +1,124 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# main.py (v1.0.2 - With Sponsor Adblock)
|
| 2 |
+
|
| 3 |
+
import os
|
| 4 |
+
import httpx
|
| 5 |
+
import json
|
| 6 |
+
from fastapi import FastAPI, Request
|
| 7 |
+
from fastapi.responses import StreamingResponse
|
| 8 |
+
from pydantic import BaseModel, Field
|
| 9 |
+
from typing import List, Optional
|
| 10 |
+
from dotenv import load_dotenv
|
| 11 |
+
|
| 12 |
+
# 加载 .env 文件中的环境变量
|
| 13 |
+
load_dotenv()
|
| 14 |
+
|
| 15 |
+
# --- Pydantic 模型 ---
|
| 16 |
+
class ChatMessage(BaseModel):
|
| 17 |
+
role: str
|
| 18 |
+
content: str
|
| 19 |
+
|
| 20 |
+
class OpenAIChatRequest(BaseModel):
|
| 21 |
+
model: str
|
| 22 |
+
messages: List[ChatMessage]
|
| 23 |
+
max_tokens: Optional[int] = Field(4000, alias='max_tokens')
|
| 24 |
+
temperature: Optional[float] = 0.7
|
| 25 |
+
stream: Optional[bool] = True
|
| 26 |
+
|
| 27 |
+
# --- FastAPI 应用实例 ---
|
| 28 |
+
app = FastAPI(
|
| 29 |
+
title="Pollinations OpenAI-Compatible Proxy",
|
| 30 |
+
description="一个将 OpenAI API 请求转发到 Pollinations 服务的代理,并内置广告过滤。",
|
| 31 |
+
version="1.0.2"
|
| 32 |
+
)
|
| 33 |
+
|
| 34 |
+
# --- 关键的 Headers ---
|
| 35 |
+
POLLINATIONS_HEADERS = {
|
| 36 |
+
'Accept': '*/*',
|
| 37 |
+
'Accept-Encoding': 'gzip, deflate, br, zstd',
|
| 38 |
+
'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
|
| 39 |
+
'Content-Type': 'application/json',
|
| 40 |
+
'Origin': 'https://ish.junioralive.in',
|
| 41 |
+
'Referer': 'https://ish.junioralive.in/',
|
| 42 |
+
'Sec-Ch-Ua': '"Not/A)Brand";v="8", "Chromium";v="126", "Microsoft Edge";v="126"',
|
| 43 |
+
'Sec-Ch-Ua-Mobile': '?0',
|
| 44 |
+
'Sec-Ch-Ua-Platform': '"Windows"',
|
| 45 |
+
'Sec-Fetch-Dest': 'empty',
|
| 46 |
+
'Sec-Fetch-Mode': 'cors',
|
| 47 |
+
'Sec-Fetch-Site': 'cross-site',
|
| 48 |
+
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36 Edg/126.0.0.0',
|
| 49 |
+
}
|
| 50 |
+
|
| 51 |
+
TARGET_URL = os.getenv("TARGET_URL", "https://text.pollinations.ai/openai")
|
| 52 |
+
|
| 53 |
+
# --- 核心:流式代理函数 ---
|
| 54 |
+
async def stream_proxy(request_body: dict):
|
| 55 |
+
"""
|
| 56 |
+
一个异步生成器,用于请求目标API并流式返回响应内容。
|
| 57 |
+
内置了对 "Sponsor" 关键词的检测和过滤。
|
| 58 |
+
"""
|
| 59 |
+
async with httpx.AsyncClient() as client:
|
| 60 |
+
try:
|
| 61 |
+
async with client.stream(
|
| 62 |
+
"POST",
|
| 63 |
+
TARGET_URL,
|
| 64 |
+
json=request_body,
|
| 65 |
+
headers=POLLINATIONS_HEADERS,
|
| 66 |
+
timeout=120.0
|
| 67 |
+
) as response:
|
| 68 |
+
response.raise_for_status()
|
| 69 |
+
|
| 70 |
+
# =================== 广告过滤逻辑开始 ===================
|
| 71 |
+
async for chunk in response.aiter_bytes():
|
| 72 |
+
# 将二进制块解码为字符串以便检查内容,使用 errors='ignore' 避免解码错误
|
| 73 |
+
chunk_str = chunk.decode('utf-8', errors='ignore')
|
| 74 |
+
|
| 75 |
+
# 检查解码后的字符串是否包含 "Sponsor" 关键词
|
| 76 |
+
if "Sponsor" in chunk_str:
|
| 77 |
+
print("Sponsor content detected. Stopping the stream to the client.")
|
| 78 |
+
# 发现广告,立即中断循环,不再向客户端发送任何数据
|
| 79 |
+
break
|
| 80 |
+
|
| 81 |
+
# 如果没有广告,将原始的二进制块转发给客户端
|
| 82 |
+
yield chunk
|
| 83 |
+
# =================== 广告过滤逻辑结束 ===================
|
| 84 |
+
|
| 85 |
+
except httpx.HTTPStatusError as e:
|
| 86 |
+
await e.response.aread()
|
| 87 |
+
error_details = {
|
| 88 |
+
"error": {
|
| 89 |
+
"message": f"Upstream API error: {e.response.status_code}",
|
| 90 |
+
"type": "upstream_error",
|
| 91 |
+
"details": e.response.text
|
| 92 |
+
}
|
| 93 |
+
}
|
| 94 |
+
error_message = f"data: {json.dumps(error_details)}\n\n"
|
| 95 |
+
yield error_message.encode('utf-8')
|
| 96 |
+
print(f"Error from upstream API: {e.response.status_code} - {e.response.text}")
|
| 97 |
+
except Exception as e:
|
| 98 |
+
error_details = {"error": {"message": f"An unexpected error occurred: {str(e)}", "type": "proxy_error"}}
|
| 99 |
+
error_message = f"data: {json.dumps(error_details)}\n\n"
|
| 100 |
+
yield error_message.encode('utf-8')
|
| 101 |
+
print(f"An unexpected error occurred: {e}")
|
| 102 |
+
|
| 103 |
+
# --- FastAPI 路由 ---
|
| 104 |
+
@app.post("/v1/chat/completions")
|
| 105 |
+
async def chat_completions_proxy(payload: OpenAIChatRequest):
|
| 106 |
+
request_body_dict = payload.dict(by_alias=True)
|
| 107 |
+
request_body_dict['stream'] = True
|
| 108 |
+
print(f"Forwarding request for model '{payload.model}' to {TARGET_URL}")
|
| 109 |
+
return StreamingResponse(
|
| 110 |
+
stream_proxy(request_body_dict),
|
| 111 |
+
media_type="text/event-stream"
|
| 112 |
+
)
|
| 113 |
+
|
| 114 |
+
@app.get("/")
|
| 115 |
+
def read_root():
|
| 116 |
+
return {"message": "Pollinations OpenAI-Compatible Proxy is running. Use the /v1/chat/completions endpoint."}
|
| 117 |
+
|
| 118 |
+
# --- 运行服务器的入口 ---
|
| 119 |
+
if __name__ == "__main__":
|
| 120 |
+
import uvicorn
|
| 121 |
+
print("Starting server...")
|
| 122 |
+
print(f"Forwarding requests to: {TARGET_URL}")
|
| 123 |
+
print("OpenAI compatible endpoint available at: http://127.0.0.1:8000/v1/chat/completions")
|
| 124 |
+
uvicorn.run(app, host="0.0.0.0", port=7860)
|
requirements.txt
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# requirements.txt
|
| 2 |
+
|
| 3 |
+
fastapi
|
| 4 |
+
uvicorn[standard]
|
| 5 |
+
httpx
|
| 6 |
+
pydantic
|
| 7 |
+
python-dotenv
|