File size: 2,501 Bytes
aa3cb06
 
5b665db
 
aa3cb06
 
 
 
 
 
5b665db
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
aa3cb06
5b665db
aa3cb06
 
 
 
 
 
5b665db
aa3cb06
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5b665db
aa3cb06
5b665db
aa3cb06
5b665db
 
 
 
aa3cb06
 
 
 
 
5b665db
aa3cb06
5b665db
aa3cb06
 
5b665db
aa3cb06
 
5b665db
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
import torch
from transformers import AutoModelForSpeechSeq2Seq, AutoProcessor, pipeline
from fastapi import FastAPI, UploadFile, File, HTTPException, Security, status
from fastapi.security import APIKeyHeader
import uvicorn
import os
import shutil

app = FastAPI()

# --- SECURITY CONFIGURATION ---
# Define the header key we expect
API_KEY_NAME = "x-metanthropic-key"
api_key_header = APIKeyHeader(name=API_KEY_NAME, auto_error=False)

# This function checks the key against the Secret you just set
async def get_api_key(api_key_header: str = Security(api_key_header)):
    # Get the secret from Hugging Face Environment
    CORRECT_KEY = os.environ.get("METANTHROPIC_API_KEY")
    
    if api_key_header == CORRECT_KEY:
        return api_key_header
    
    # If key is wrong, reject the request
    raise HTTPException(
        status_code=status.HTTP_403_FORBIDDEN,
        detail="Access Denied: Sovereign Node Locked"
    )

# --- MODEL CONFIGURATION ---
MODEL_ID = "metanthropic/neural-voice-v1" 
device = "cpu"
torch_dtype = torch.float32

print(f"πŸ”Ή Loading Sovereign Model: {MODEL_ID}...")

try:
    model = AutoModelForSpeechSeq2Seq.from_pretrained(
        MODEL_ID, torch_dtype=torch_dtype, low_cpu_mem_usage=True, use_safetensors=True
    )
    model.to(device)
    processor = AutoProcessor.from_pretrained(MODEL_ID)
    pipe = pipeline(
        "automatic-speech-recognition",
        model=model,
        tokenizer=processor.tokenizer,
        feature_extractor=processor.feature_extractor,
        max_new_tokens=128,
        chunk_length_s=15,
        batch_size=16,
        torch_dtype=torch_dtype,
        device=device,
    )
    print("βœ… Model Loaded Successfully.")
except Exception as e:
    print(f"❌ Error loading model: {e}")

@app.get("/")
def home():
    return {"status": "Metanthropic Neural Voice Node Online (Secured)"}

# πŸ”’ THIS ENDPOINT IS NOW LOCKED
@app.post("/transcribe")
async def transcribe(
    file: UploadFile = File(...), 
    api_key: str = Security(get_api_key)  # <--- The Lock
):
    temp_filename = f"temp_{file.filename}"
    try:
        with open(temp_filename, "wb") as buffer:
            shutil.copyfileobj(file.file, buffer)

        print(f"πŸŽ™οΈ Transcribing secure request...")
        result = pipe(temp_filename)
        return {"text": result["text"].strip()}

    except Exception as e:
        return {"error": str(e), "text": ""}
    finally:
        if os.path.exists(temp_filename):
            os.remove(temp_filename)