Abhlash commited on
Commit
3c2a6d1
·
verified ·
1 Parent(s): 5bcf012

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -5
app.py CHANGED
@@ -4,13 +4,11 @@ from transformers import pipeline
4
 
5
  app = FastAPI()
6
 
7
- # Define request model for math operations
8
  class CalculationRequest(BaseModel):
9
  a: float
10
  b: float
11
  operation: str
12
 
13
- # Load a smaller Hugging Face model (example: distilgpt2)
14
  model = pipeline('text-generation', model='distilgpt2')
15
 
16
  @app.post("/calculate")
@@ -32,13 +30,11 @@ def calculate(request: CalculationRequest):
32
 
33
  return {"result": result}
34
 
35
- # Example endpoint using Hugging Face model
36
  @app.post("/generate")
37
  def generate_text(prompt: str):
38
  generated = model(prompt, max_length=50, clean_up_tokenization_spaces=True)
39
  return {"generated_text": generated[0]['generated_text']}
40
 
41
- # New endpoint for testing math operations
42
  @app.post("/test_math")
43
  def test_math(request: CalculationRequest):
44
  a = request.a
@@ -60,4 +56,4 @@ def test_math(request: CalculationRequest):
60
 
61
  if __name__ == "__main__":
62
  import uvicorn
63
- uvicorn.run(app, host="0.0.0.0", port=8000)
 
4
 
5
  app = FastAPI()
6
 
 
7
  class CalculationRequest(BaseModel):
8
  a: float
9
  b: float
10
  operation: str
11
 
 
12
  model = pipeline('text-generation', model='distilgpt2')
13
 
14
  @app.post("/calculate")
 
30
 
31
  return {"result": result}
32
 
 
33
  @app.post("/generate")
34
  def generate_text(prompt: str):
35
  generated = model(prompt, max_length=50, clean_up_tokenization_spaces=True)
36
  return {"generated_text": generated[0]['generated_text']}
37
 
 
38
  @app.post("/test_math")
39
  def test_math(request: CalculationRequest):
40
  a = request.a
 
56
 
57
  if __name__ == "__main__":
58
  import uvicorn
59
+ uvicorn.run(app, host="0.0.0.0", port=7860)