Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,58 +1,86 @@
|
|
| 1 |
-
# To powinno by膰 na Twoim Space: https://ojciectadeusz-fastapi-fn-calling.hf.space/
|
| 2 |
from fastapi import FastAPI, HTTPException
|
| 3 |
from pydantic import BaseModel
|
| 4 |
-
from huggingface_hub import HfApi
|
| 5 |
import pandas as pd
|
| 6 |
import os
|
| 7 |
-
import
|
| 8 |
|
| 9 |
app = FastAPI()
|
| 10 |
|
|
|
|
| 11 |
# Konfiguracja Datasetu
|
| 12 |
HF_TOKEN = os.environ.get("HF_TOKEN") # Ustaw to w Secrets w ustawieniach Space!
|
| 13 |
# REPO_ID = "OjciecTadeusz/test"
|
| 14 |
REPO_ID = os.environ.get("REPO_ID")
|
| 15 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 16 |
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
sentiment: str
|
| 21 |
|
| 22 |
-
@app.post("/
|
| 23 |
-
async def
|
|
|
|
|
|
|
|
|
|
| 24 |
print(f"Otrzymano dane: {item}")
|
| 25 |
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 32 |
}
|
| 33 |
-
df = pd.DataFrame(new_data)
|
| 34 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 35 |
try:
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
# Aby robi膰 append, trzeba by najpierw pobra膰 plik, doda膰 wiersz i wys艂a膰 ponownie.
|
| 45 |
-
|
| 46 |
-
path_in_repo = "data.csv"
|
| 47 |
-
|
| 48 |
-
# Pami臋taj: 呕eby to dzia艂a艂o jako 'baza danych', musisz zarz膮dza膰 plikiem CSV.
|
| 49 |
-
# W najprostszej wersji API po prostu potwierdza odbi贸r.
|
| 50 |
-
|
| 51 |
-
return {"message": "Dane odebrane przez serwer HF Space", "data": item}
|
| 52 |
-
|
| 53 |
except Exception as e:
|
|
|
|
| 54 |
raise HTTPException(status_code=500, detail=str(e))
|
| 55 |
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
|
|
|
|
|
|
| 1 |
from fastapi import FastAPI, HTTPException
|
| 2 |
from pydantic import BaseModel
|
| 3 |
+
from huggingface_hub import HfApi, hf_hub_download
|
| 4 |
import pandas as pd
|
| 5 |
import os
|
| 6 |
+
import time
|
| 7 |
|
| 8 |
app = FastAPI()
|
| 9 |
|
| 10 |
+
|
| 11 |
# Konfiguracja Datasetu
|
| 12 |
HF_TOKEN = os.environ.get("HF_TOKEN") # Ustaw to w Secrets w ustawieniach Space!
|
| 13 |
# REPO_ID = "OjciecTadeusz/test"
|
| 14 |
REPO_ID = os.environ.get("REPO_ID")
|
| 15 |
+
DATASET_REPO = "OjciecTadeusz/test"
|
| 16 |
+
DATASET_FILENAME = "data.csv"
|
| 17 |
+
|
| 18 |
+
# Model danych zgodny z definicj膮 z AI Studio
|
| 19 |
+
class ConversationItem(BaseModel):
|
| 20 |
+
user_message: str
|
| 21 |
+
category: str
|
| 22 |
+
priority: str = "medium"
|
| 23 |
|
| 24 |
+
@app.get("/")
|
| 25 |
+
def home():
|
| 26 |
+
return {"status": "Serwer dzia艂a. Wy艣lij POST na /save"}
|
|
|
|
| 27 |
|
| 28 |
+
@app.post("/save")
|
| 29 |
+
async def save_to_dataset(item: ConversationItem):
|
| 30 |
+
"""
|
| 31 |
+
Odbiera dane z Gemini i zapisuje do HF Dataset (CSV).
|
| 32 |
+
"""
|
| 33 |
print(f"Otrzymano dane: {item}")
|
| 34 |
|
| 35 |
+
if not HF_TOKEN:
|
| 36 |
+
raise HTTPException(status_code=500, detail="Brak HF_TOKEN w zmiennych 艣rodowiskowych")
|
| 37 |
+
|
| 38 |
+
api = HfApi(token=HF_TOKEN)
|
| 39 |
+
|
| 40 |
+
# 1. Pr贸ba pobrania istniej膮cego pliku CSV z Datasetu
|
| 41 |
+
try:
|
| 42 |
+
file_path = hf_hub_download(
|
| 43 |
+
repo_id=DATASET_REPO,
|
| 44 |
+
filename=DATASET_FILENAME,
|
| 45 |
+
repo_type="dataset",
|
| 46 |
+
token=HF_TOKEN
|
| 47 |
+
)
|
| 48 |
+
df = pd.read_csv(file_path)
|
| 49 |
+
except Exception:
|
| 50 |
+
# Je艣li plik nie istnieje, tworzymy nowy pusty DataFrame
|
| 51 |
+
print("Plik nie istnieje lub b艂膮d pobierania. Tworz臋 nowy.")
|
| 52 |
+
df = pd.DataFrame(columns=["timestamp", "category", "priority", "message"])
|
| 53 |
+
|
| 54 |
+
# 2. Dodanie nowego wiersza
|
| 55 |
+
new_row = {
|
| 56 |
+
"timestamp": time.strftime("%Y-%m-%d %H:%M:%S"),
|
| 57 |
+
"category": item.category,
|
| 58 |
+
"priority": item.priority,
|
| 59 |
+
"message": item.user_message
|
| 60 |
}
|
|
|
|
| 61 |
|
| 62 |
+
# U偶ywamy pd.concat zamiast append (pandas deprecation)
|
| 63 |
+
new_df = pd.DataFrame([new_row])
|
| 64 |
+
df = pd.concat([df, new_df], ignore_index=True)
|
| 65 |
+
|
| 66 |
+
# 3. Zapisanie do pliku lokalnie
|
| 67 |
+
local_filename = "/tmp/data.csv"
|
| 68 |
+
df.to_csv(local_filename, index=False)
|
| 69 |
+
|
| 70 |
+
# 4. Upload zaktualizowanego pliku do Datasetu
|
| 71 |
try:
|
| 72 |
+
api.upload_file(
|
| 73 |
+
path_or_fileobj=local_filename,
|
| 74 |
+
path_in_repo=DATASET_FILENAME,
|
| 75 |
+
repo_id=DATASET_REPO,
|
| 76 |
+
repo_type="dataset",
|
| 77 |
+
commit_message=f"Dodano wpis przez API: {item.category}"
|
| 78 |
+
)
|
| 79 |
+
return {"status": "success", "message": "Zapisano w Datasecie", "data": new_row}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 80 |
except Exception as e:
|
| 81 |
+
print(f"B艂膮d uploadu: {e}")
|
| 82 |
raise HTTPException(status_code=500, detail=str(e))
|
| 83 |
|
| 84 |
+
if __name__ == "__main__":
|
| 85 |
+
import uvicorn
|
| 86 |
+
uvicorn.run(app, host="0.0.0.0", port=7860)
|