Noor3 commited on
Commit
3b36664
·
verified ·
1 Parent(s): 705da5d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +86 -51
app.py CHANGED
@@ -1,64 +1,99 @@
1
- import gradio as gr
2
- from huggingface_hub import InferenceClient
3
 
4
- """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
- """
7
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
 
 
 
 
 
8
 
 
 
 
9
 
10
- def respond(
11
- message,
12
- history: list[tuple[str, str]],
13
- system_message,
14
- max_tokens,
15
- temperature,
16
- top_p,
17
- ):
18
- messages = [{"role": "system", "content": system_message}]
19
 
20
- for val in history:
21
- if val[0]:
22
- messages.append({"role": "user", "content": val[0]})
23
- if val[1]:
24
- messages.append({"role": "assistant", "content": val[1]})
 
 
 
25
 
26
- messages.append({"role": "user", "content": message})
 
 
27
 
28
- response = ""
 
 
29
 
30
- for message in client.chat_completion(
31
- messages,
32
- max_tokens=max_tokens,
33
- stream=True,
34
- temperature=temperature,
35
- top_p=top_p,
36
- ):
37
- token = message.choices[0].delta.content
38
 
39
- response += token
40
- yield response
 
 
41
 
 
 
 
 
 
 
 
 
 
42
 
43
- """
44
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
- """
46
- demo = gr.ChatInterface(
47
- respond,
48
- additional_inputs=[
49
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
50
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
51
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
52
- gr.Slider(
53
- minimum=0.1,
54
- maximum=1.0,
55
- value=0.95,
56
- step=0.05,
57
- label="Top-p (nucleus sampling)",
58
- ),
59
- ],
60
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
61
 
 
 
 
 
62
 
63
  if __name__ == "__main__":
64
- demo.launch()
 
 
1
+ # File: app.py - FastAPI implementation for secure medical chatbot
 
2
 
3
+ import os
4
+ import torch
5
+ from fastapi import FastAPI, HTTPException, Depends, Security
6
+ from fastapi.security import APIKeyHeader
7
+ from fastapi.middleware.cors import CORSMiddleware
8
+ from pydantic import BaseModel
9
+ from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
10
+ import hashlib
11
+ import logging
12
 
13
+ # Setup logging
14
+ logging.basicConfig(level=logging.INFO)
15
+ logger = logging.getLogger(__name__)
16
 
17
+ # Initialize FastAPI app
18
+ app = FastAPI(title="Secure Medical Chatbot API")
 
 
 
 
 
 
 
19
 
20
+ # Setup CORS middleware to control which domains can access your API
21
+ app.add_middleware(
22
+ CORSMiddleware,
23
+ allow_origins=["https://your-website-domain.com"], # Replace with your website domain
24
+ allow_credentials=True,
25
+ allow_methods=["*"],
26
+ allow_headers=["*"],
27
+ )
28
 
29
+ # API key security
30
+ API_KEY = os.environ.get("API_KEY", "your-secret-api-key") # Set this securely in production
31
+ api_key_header = APIKeyHeader(name="X-API-Key")
32
 
33
+ # Input model for request validation
34
+ class QueryInput(BaseModel):
35
+ query: str
36
 
37
+ # Create a hash function for privacy
38
+ def hash_query(query: str) -> str:
39
+ return hashlib.sha256(query.encode()).hexdigest()
 
 
 
 
 
40
 
41
+ # Load the model and tokenizer (lazy loading on first request)
42
+ model = None
43
+ tokenizer = None
44
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
45
 
46
+ def load_model():
47
+ global model, tokenizer
48
+ if model is None or tokenizer is None:
49
+ logger.info("Loading model and tokenizer...")
50
+ model_name = "shanover/medbot_godel_v3"
51
+ model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
52
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
53
+ model.to(device)
54
+ logger.info(f"Model loaded on {device}")
55
 
56
+ # Authentication dependency
57
+ async def verify_api_key(api_key: str = Security(api_key_header)):
58
+ if api_key != API_KEY:
59
+ raise HTTPException(status_code=403, detail="Invalid API key")
60
+ return api_key
61
+
62
+ # Generate response function
63
+ def generate_response(input_text, max_length=512):
64
+ input_ids = tokenizer.encode(input_text, return_tensors="pt", max_length=max_length, truncation=True)
65
+ input_ids = input_ids.to(device)
66
+ with torch.no_grad():
67
+ output_ids = model.generate(input_ids)
68
+ generated_text = tokenizer.decode(output_ids[0], skip_special_tokens=True)
69
+ return generated_text
70
+
71
+ @app.on_event("startup")
72
+ async def startup_event():
73
+ load_model()
74
+
75
+ @app.post("/api/medical-advice")
76
+ async def get_medical_advice(query_input: QueryInput, api_key: str = Depends(verify_api_key)):
77
+ try:
78
+ query = query_input.query
79
+ # Log a hash of the query instead of the query itself for privacy
80
+ logger.info(f"Processing query with hash: {hash_query(query)}")
81
+
82
+ response = generate_response(query)
83
+
84
+ return {
85
+ "response": response,
86
+ "status": "success"
87
+ }
88
+ except Exception as e:
89
+ logger.error(f"Error processing query: {str(e)}")
90
+ raise HTTPException(status_code=500, detail=f"Error processing request: {str(e)}")
91
 
92
+ # Health check endpoint
93
+ @app.get("/health")
94
+ async def health_check():
95
+ return {"status": "healthy"}
96
 
97
  if __name__ == "__main__":
98
+ import uvicorn
99
+ uvicorn.run("app:app", host="0.0.0.0", port=8000, reload=True)