aliMohammad16 commited on
Commit
bbe006e
·
verified ·
1 Parent(s): 36dd29b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -2
app.py CHANGED
@@ -2,11 +2,13 @@ from fastapi import FastAPI
2
  from pydantic import BaseModel
3
  from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
4
  import torch
 
5
 
 
6
  app = FastAPI()
7
 
8
- # Load Model & Tokenizer
9
- MODEL_NAME = "facebook/bart-large-cnn" # Small & fast summarization model
10
  tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
11
  model = AutoModelForSeq2SeqLM.from_pretrained(MODEL_NAME).to("cpu") # Use "cuda" if you have a GPU
12
 
@@ -20,3 +22,7 @@ async def summarize_text(input_text: InputText):
20
  summary_ids = model.generate(inputs.input_ids, max_length=150, min_length=50, length_penalty=2.0)
21
  summary = tokenizer.decode(summary_ids[0], skip_special_tokens=True)
22
  return {"summary": summary}
 
 
 
 
 
2
  from pydantic import BaseModel
3
  from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
4
  import torch
5
+ import uvicorn
6
 
7
+ # Create FastAPI app
8
  app = FastAPI()
9
 
10
+ # Load the tokenizer and model
11
+ MODEL_NAME = "facebook/bart-large-cnn" # A lightweight summarization model
12
  tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
13
  model = AutoModelForSeq2SeqLM.from_pretrained(MODEL_NAME).to("cpu") # Use "cuda" if you have a GPU
14
 
 
22
  summary_ids = model.generate(inputs.input_ids, max_length=150, min_length=50, length_penalty=2.0)
23
  summary = tokenizer.decode(summary_ids[0], skip_special_tokens=True)
24
  return {"summary": summary}
25
+
26
+ # Ensure the application starts when running locally
27
+ if __name__ == "__main__":
28
+ uvicorn.run(app, host="0.0.0.0", port=7860)