metanthropiclab commited on
Commit
5b665db
Β·
verified Β·
1 Parent(s): 5fb774e

Added security

Browse files
Files changed (1) hide show
  1. app.py +34 -32
app.py CHANGED
@@ -1,34 +1,45 @@
1
  import torch
2
  from transformers import AutoModelForSpeechSeq2Seq, AutoProcessor, pipeline
3
- from fastapi import FastAPI, UploadFile, File, HTTPException
 
4
  import uvicorn
5
  import os
6
  import shutil
7
 
8
  app = FastAPI()
9
 
10
- # --- CONFIGURATION ---
11
- # 🚨 POINTING TO YOUR REBRANDED MODEL
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
  MODEL_ID = "metanthropic/neural-voice-v1"
13
- device = "cpu" # Free tier is CPU-only
14
  torch_dtype = torch.float32
15
 
16
  print(f"πŸ”Ή Loading Sovereign Model: {MODEL_ID}...")
17
 
18
  try:
19
- # 1. Load Model
20
  model = AutoModelForSpeechSeq2Seq.from_pretrained(
21
- MODEL_ID,
22
- torch_dtype=torch_dtype,
23
- low_cpu_mem_usage=True,
24
- use_safetensors=True
25
  )
26
  model.to(device)
27
-
28
- # 2. Load Processor
29
  processor = AutoProcessor.from_pretrained(MODEL_ID)
30
-
31
- # 3. Create Pipeline
32
  pipe = pipeline(
33
  "automatic-speech-recognition",
34
  model=model,
@@ -43,37 +54,28 @@ try:
43
  print("βœ… Model Loaded Successfully.")
44
  except Exception as e:
45
  print(f"❌ Error loading model: {e}")
46
- raise e
47
 
48
  @app.get("/")
49
  def home():
50
- return {"status": "Metanthropic Neural Voice Node Online", "model": MODEL_ID}
51
 
 
52
  @app.post("/transcribe")
53
- async def transcribe(file: UploadFile = File(...)):
54
- # Create a temporary file to store the upload
 
 
55
  temp_filename = f"temp_{file.filename}"
56
-
57
  try:
58
- # Save uploaded file
59
  with open(temp_filename, "wb") as buffer:
60
  shutil.copyfileobj(file.file, buffer)
61
 
62
- # Run Inference (The Magic)
63
- print(f"πŸŽ™οΈ Transcribing {temp_filename}...")
64
  result = pipe(temp_filename)
65
- text = result["text"]
66
-
67
- return {"text": text.strip()}
68
 
69
  except Exception as e:
70
- print(f"❌ Transcription Error: {e}")
71
- return {"error": str(e), "text": ""} # Return empty text on error
72
-
73
  finally:
74
- # Cleanup temp file
75
  if os.path.exists(temp_filename):
76
- os.remove(temp_filename)
77
-
78
- if __name__ == "__main__":
79
- uvicorn.run(app, host="0.0.0.0", port=7860)
 
1
  import torch
2
  from transformers import AutoModelForSpeechSeq2Seq, AutoProcessor, pipeline
3
+ from fastapi import FastAPI, UploadFile, File, HTTPException, Security, status
4
+ from fastapi.security import APIKeyHeader
5
  import uvicorn
6
  import os
7
  import shutil
8
 
9
  app = FastAPI()
10
 
11
+ # --- SECURITY CONFIGURATION ---
12
+ # Define the header key we expect
13
+ API_KEY_NAME = "x-metanthropic-key"
14
+ api_key_header = APIKeyHeader(name=API_KEY_NAME, auto_error=False)
15
+
16
+ # This function checks the key against the Secret you just set
17
+ async def get_api_key(api_key_header: str = Security(api_key_header)):
18
+ # Get the secret from Hugging Face Environment
19
+ CORRECT_KEY = os.environ.get("METANTHROPIC_API_KEY")
20
+
21
+ if api_key_header == CORRECT_KEY:
22
+ return api_key_header
23
+
24
+ # If key is wrong, reject the request
25
+ raise HTTPException(
26
+ status_code=status.HTTP_403_FORBIDDEN,
27
+ detail="Access Denied: Sovereign Node Locked"
28
+ )
29
+
30
+ # --- MODEL CONFIGURATION ---
31
  MODEL_ID = "metanthropic/neural-voice-v1"
32
+ device = "cpu"
33
  torch_dtype = torch.float32
34
 
35
  print(f"πŸ”Ή Loading Sovereign Model: {MODEL_ID}...")
36
 
37
  try:
 
38
  model = AutoModelForSpeechSeq2Seq.from_pretrained(
39
+ MODEL_ID, torch_dtype=torch_dtype, low_cpu_mem_usage=True, use_safetensors=True
 
 
 
40
  )
41
  model.to(device)
 
 
42
  processor = AutoProcessor.from_pretrained(MODEL_ID)
 
 
43
  pipe = pipeline(
44
  "automatic-speech-recognition",
45
  model=model,
 
54
  print("βœ… Model Loaded Successfully.")
55
  except Exception as e:
56
  print(f"❌ Error loading model: {e}")
 
57
 
58
  @app.get("/")
59
  def home():
60
+ return {"status": "Metanthropic Neural Voice Node Online (Secured)"}
61
 
62
+ # πŸ”’ THIS ENDPOINT IS NOW LOCKED
63
  @app.post("/transcribe")
64
+ async def transcribe(
65
+ file: UploadFile = File(...),
66
+ api_key: str = Security(get_api_key) # <--- The Lock
67
+ ):
68
  temp_filename = f"temp_{file.filename}"
 
69
  try:
 
70
  with open(temp_filename, "wb") as buffer:
71
  shutil.copyfileobj(file.file, buffer)
72
 
73
+ print(f"πŸŽ™οΈ Transcribing secure request...")
 
74
  result = pipe(temp_filename)
75
+ return {"text": result["text"].strip()}
 
 
76
 
77
  except Exception as e:
78
+ return {"error": str(e), "text": ""}
 
 
79
  finally:
 
80
  if os.path.exists(temp_filename):
81
+ os.remove(temp_filename)