Alleinzellgaenger commited on
Commit
f7049dd
·
1 Parent(s): 88d4816

Try fixing POST

Browse files
Files changed (1) hide show
  1. backend/app.py +7 -1
backend/app.py CHANGED
@@ -1,6 +1,7 @@
1
  from fastapi import FastAPI, HTTPException
2
  from fastapi.middleware.cors import CORSMiddleware
3
  from fastapi.staticfiles import StaticFiles
 
4
  from transformers import GPT2Tokenizer, GPT2Model, pipeline
5
  import torch as t
6
  import logging
@@ -21,6 +22,7 @@ app.add_middleware(
21
  # Mount static files (frontend) so that visiting "/" serves index.html
22
  # Note: The directory path "../frontend" works because when running in Docker,
23
  # our working directory is set to /app, and the frontend folder is at /app/frontend.
 
24
 
25
  # Load tokenizer and GPT2 model
26
  tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
@@ -83,4 +85,8 @@ def generate(text: str):
83
  output = pipe(text)
84
 
85
  # Return the generated text in a JSON response
86
- return {"output": output[0]["generated_text"]}
 
 
 
 
 
1
  from fastapi import FastAPI, HTTPException
2
  from fastapi.middleware.cors import CORSMiddleware
3
  from fastapi.staticfiles import StaticFiles
4
+ from fastapi.responses import FileResponse
5
  from transformers import GPT2Tokenizer, GPT2Model, pipeline
6
  import torch as t
7
  import logging
 
22
  # Mount static files (frontend) so that visiting "/" serves index.html
23
  # Note: The directory path "../frontend" works because when running in Docker,
24
  # our working directory is set to /app, and the frontend folder is at /app/frontend.
25
+ app.mount("/static", StaticFiles(directory="frontend", html=True), name="static")
26
 
27
  # Load tokenizer and GPT2 model
28
  tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
 
85
  output = pipe(text)
86
 
87
  # Return the generated text in a JSON response
88
+ return {"output": output[0]["generated_text"]}
89
+
90
+ @app.get("/")
91
+ async def read_index():
92
+ return FileResponse("frontend/index.html")