Upload 5 files
Browse files- Dockerfile +9 -0
- README.md +10 -0
- gitattributes +35 -0
- main (1).py +144 -0
- requirements.txt +8 -0
Dockerfile
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
FROM python:3.9
|
| 2 |
+
|
| 3 |
+
COPY . .
|
| 4 |
+
|
| 5 |
+
WORKDIR /
|
| 6 |
+
|
| 7 |
+
RUN pip install --no-cache-dir --upgrade -r /requirements.txt
|
| 8 |
+
|
| 9 |
+
CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860"]
|
README.md
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
title: Gemiline
|
| 3 |
+
emoji: 🌖
|
| 4 |
+
colorFrom: red
|
| 5 |
+
colorTo: red
|
| 6 |
+
sdk: docker
|
| 7 |
+
pinned: true
|
| 8 |
+
---
|
| 9 |
+
|
| 10 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
gitattributes
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
| 6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
| 13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
| 29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
main (1).py
ADDED
|
@@ -0,0 +1,144 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json, os
|
| 2 |
+
import gradio as gr
|
| 3 |
+
from fastapi.middleware.cors import CORSMiddleware
|
| 4 |
+
from fastapi import FastAPI, Request, Header, BackgroundTasks, HTTPException, status
|
| 5 |
+
import google.generativeai as genai
|
| 6 |
+
|
| 7 |
+
from linebot import LineBotApi, WebhookHandler
|
| 8 |
+
from linebot.exceptions import InvalidSignatureError
|
| 9 |
+
from linebot.models import MessageEvent, TextMessage, TextSendMessage, ImageSendMessage, AudioMessage
|
| 10 |
+
|
| 11 |
+
# 設定 Google AI API 金鑰
|
| 12 |
+
genai.configure(api_key=os.environ["GOOGLE_API_KEY"])
|
| 13 |
+
|
| 14 |
+
# 設定生成文字的參數
|
| 15 |
+
generation_config = genai.types.GenerationConfig(max_output_tokens=2048, temperature=0.2, top_p=0.5, top_k=16)
|
| 16 |
+
|
| 17 |
+
# 使用 Gemini-1.5-flash 模型
|
| 18 |
+
model = genai.GenerativeModel('gemini-1.5-flash', system_instruction="你是聖誕老人,請使用招牌笑聲做開頭,然後以爽朗愉悅的口氣回答問題。") # 或是使用 "你是博通古今的萬應機器人!"
|
| 19 |
+
|
| 20 |
+
# 設定 Line Bot 的 API 金鑰和秘密金鑰
|
| 21 |
+
line_bot_api = LineBotApi(os.environ["CHANNEL_ACCESS_TOKEN"])
|
| 22 |
+
line_handler = WebhookHandler(os.environ["CHANNEL_SECRET"])
|
| 23 |
+
|
| 24 |
+
# 設定是否正在與使用者交談
|
| 25 |
+
working_status = os.getenv("DEFALUT_TALKING", default = "true").lower() == "true"
|
| 26 |
+
|
| 27 |
+
# 建立 FastAPI 應用程式
|
| 28 |
+
app = FastAPI()
|
| 29 |
+
|
| 30 |
+
# 設定 CORS,允許跨域請求
|
| 31 |
+
app.add_middleware(
|
| 32 |
+
CORSMiddleware,
|
| 33 |
+
allow_origins=["*"],
|
| 34 |
+
allow_credentials=True,
|
| 35 |
+
allow_methods=["*"],
|
| 36 |
+
allow_headers=["*"],
|
| 37 |
+
)
|
| 38 |
+
|
| 39 |
+
# 處理根路徑請求
|
| 40 |
+
@app.get("/")
|
| 41 |
+
def root():
|
| 42 |
+
return {"title": "Line Bot"}
|
| 43 |
+
|
| 44 |
+
# 處理 Line Webhook 請求
|
| 45 |
+
@app.post("/webhook")
|
| 46 |
+
async def webhook(
|
| 47 |
+
request: Request,
|
| 48 |
+
background_tasks: BackgroundTasks,
|
| 49 |
+
x_line_signature=Header(None),
|
| 50 |
+
):
|
| 51 |
+
# 取得請求內容
|
| 52 |
+
body = await request.body()
|
| 53 |
+
try:
|
| 54 |
+
# 將處理 Line 事件的任務加入背景工作
|
| 55 |
+
background_tasks.add_task(
|
| 56 |
+
line_handler.handle, body.decode("utf-8"), x_line_signature
|
| 57 |
+
)
|
| 58 |
+
except InvalidSignatureError:
|
| 59 |
+
# 處理無效的簽章錯誤
|
| 60 |
+
raise HTTPException(status_code=400, detail="Invalid signature")
|
| 61 |
+
return "ok"
|
| 62 |
+
|
| 63 |
+
# 處理文字訊息事件
|
| 64 |
+
@line_handler.add(MessageEvent, message=TextMessage)
|
| 65 |
+
def handle_message(event):
|
| 66 |
+
global working_status
|
| 67 |
+
|
| 68 |
+
# 檢查事件類型和訊息類型
|
| 69 |
+
if event.type != "message" or event.message.type != "text":
|
| 70 |
+
# 回覆錯誤訊息
|
| 71 |
+
line_bot_api.reply_message(
|
| 72 |
+
event.reply_token,
|
| 73 |
+
TextSendMessage(text="Event type error:[No message or the message does not contain text]")
|
| 74 |
+
)
|
| 75 |
+
|
| 76 |
+
# 檢查使用者是否輸入 "再見"
|
| 77 |
+
elif event.message.text == "再見":
|
| 78 |
+
# 回覆 "Bye!"
|
| 79 |
+
line_bot_api.reply_message(
|
| 80 |
+
event.reply_token,
|
| 81 |
+
TextSendMessage(text="Bye!")
|
| 82 |
+
)
|
| 83 |
+
return
|
| 84 |
+
|
| 85 |
+
# 檢查是否正在與使用者交談
|
| 86 |
+
elif working_status:
|
| 87 |
+
try:
|
| 88 |
+
# 取得使用者輸入的文字
|
| 89 |
+
prompt = event.message.text
|
| 90 |
+
# 使用 Gemini 模型生成文字
|
| 91 |
+
completion = model.generate_content(prompt, generation_config=generation_config)
|
| 92 |
+
# 檢查生成結果是否為空
|
| 93 |
+
if (completion.parts[0].text != None):
|
| 94 |
+
# 取得生成結果
|
| 95 |
+
out = completion.parts[0].text
|
| 96 |
+
else:
|
| 97 |
+
# 回覆 "Gemini沒答案!請換個說法!"
|
| 98 |
+
out = "Gemini沒答案!請換個說法!"
|
| 99 |
+
except:
|
| 100 |
+
# 處理錯誤
|
| 101 |
+
out = "Gemini執行出錯!請換個說法!"
|
| 102 |
+
|
| 103 |
+
# 回覆生成結果
|
| 104 |
+
line_bot_api.reply_message(
|
| 105 |
+
event.reply_token,
|
| 106 |
+
TextSendMessage(text=out))
|
| 107 |
+
|
| 108 |
+
if __name__ == "__main__":
|
| 109 |
+
# 啟動 FastAPI 應用程式
|
| 110 |
+
uvicorn.run("main:app", host="0.0.0.0", port=7860, reload=True)
|
| 111 |
+
|
| 112 |
+
# 註解說明:
|
| 113 |
+
# import 導入必要的套件
|
| 114 |
+
# genai.configure 設定 Google AI API 金鑰
|
| 115 |
+
# generation_config 設定文字生成參數
|
| 116 |
+
# model 設定使用的 Gemini 模型
|
| 117 |
+
# line_bot_api 和 line_handler 設定 Line Bot API 和 webhook 處理器
|
| 118 |
+
# working_status 設定是否正在與使用者交談
|
| 119 |
+
# app 建立 FastAPI 應用程式
|
| 120 |
+
# app.add_middleware 設定 CORS
|
| 121 |
+
# @app.get("/") 處理根路徑請求
|
| 122 |
+
# @app.post("/webhook") 處理 Line Webhook 請求
|
| 123 |
+
# @line_handler.add(MessageEvent, message=TextMessage) 處理文字訊息事件
|
| 124 |
+
# if __name__ == "__main__": 啟動 FastAPI 應用程式
|
| 125 |
+
# 程式碼功能說明:
|
| 126 |
+
# 程式碼首先會導入必要的套件,並設定 Google AI API 金鑰、文字生成參數、Gemini 模型以及 Line Bot API。
|
| 127 |
+
# 接著會建立 FastAPI 應用程式,並設定 CORS。
|
| 128 |
+
# 程式碼會定義兩個函數:
|
| 129 |
+
# root() 處理根路徑請求,返回一個簡單的 JSON 訊息。
|
| 130 |
+
# webhook() 處理 Line Webhook 請求,將處理 Line 事件的任務加入背景工作,並處理無效的簽章錯誤。
|
| 131 |
+
# 程式碼還定義一個函數 handle_message() 來處理文字訊息事件,它會檢查事件類型和訊息類型,並根據��用者輸入執行不同的動作:
|
| 132 |
+
# 如果使用者輸入 "再見",回覆 "Bye!"。
|
| 133 |
+
# 如果正在與使用者交談,則會使用 Gemini 模型生成文字,並將結果回覆給使用者。
|
| 134 |
+
# 最後,程式碼會啟動 FastAPI 應用程式,開始監聽 HTTP 請求。
|
| 135 |
+
# 程式碼運行方式:
|
| 136 |
+
# 將程式碼存為 main.py 文件。
|
| 137 |
+
# 在環境變數中設定 GOOGLE_API_KEY、CHANNEL_ACCESS_TOKEN 和 CHANNEL_SECRET。
|
| 138 |
+
# 執行 uvicorn main:app --host 0.0.0.0 --port 7860 --reload 命令啟動 FastAPI 應用程式。
|
| 139 |
+
# 使用 Line 帳戶與 Line Bot 進行對話。
|
| 140 |
+
# 注意:
|
| 141 |
+
# 程式碼中使用 os.environ["GOOGLE_API_KEY"]、os.environ["CHANNEL_ACCESS_TOKEN"] 和 os.environ["CHANNEL_SECRET"] 來存取環境變數,需要先在環境變數中設定這些值。
|
| 142 |
+
# 程式碼中使用 uvicorn 執行 FastAPI 應用程式,需要先安裝 uvicorn 套件。
|
| 143 |
+
# 程式碼中使用 google.generativeai 套件,需要先安裝 google-generativeai 套件。
|
| 144 |
+
# 程式碼中使用 linebot 套件,需要先安裝 linebot 套件。
|
requirements.txt
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
fastapi
|
| 2 |
+
uvicorn[standard]
|
| 3 |
+
gunicorn
|
| 4 |
+
pydantic
|
| 5 |
+
python-dotenv
|
| 6 |
+
line-bot-sdk
|
| 7 |
+
google-generativeai
|
| 8 |
+
gradio
|