Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -17,24 +17,19 @@ model = AutoModelForCausalLM.from_pretrained(
|
|
| 17 |
device_map="auto",
|
| 18 |
)
|
| 19 |
|
| 20 |
-
cache = {}
|
| 21 |
|
| 22 |
@app.post("/generate")
|
| 23 |
async def generate(request: Request):
|
| 24 |
data = await request.json()
|
| 25 |
prompt = data.get("prompt", "").strip()
|
| 26 |
|
| 27 |
-
|
| 28 |
-
return {"output": cache[prompt], "cached": True}
|
| 29 |
-
|
| 30 |
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
|
| 31 |
outputs = model.generate(
|
| 32 |
inputs.input_ids,
|
| 33 |
max_new_tokens=100,
|
| 34 |
use_cache=True,
|
| 35 |
-
|
| 36 |
-
top_p=0.95,
|
| 37 |
-
top_k=50,
|
| 38 |
temperature=0.7,
|
| 39 |
)
|
| 40 |
|
|
|
|
| 17 |
device_map="auto",
|
| 18 |
)
|
| 19 |
|
|
|
|
| 20 |
|
| 21 |
@app.post("/generate")
|
| 22 |
async def generate(request: Request):
|
| 23 |
data = await request.json()
|
| 24 |
prompt = data.get("prompt", "").strip()
|
| 25 |
|
| 26 |
+
|
|
|
|
|
|
|
| 27 |
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
|
| 28 |
outputs = model.generate(
|
| 29 |
inputs.input_ids,
|
| 30 |
max_new_tokens=100,
|
| 31 |
use_cache=True,
|
| 32 |
+
|
|
|
|
|
|
|
| 33 |
temperature=0.7,
|
| 34 |
)
|
| 35 |
|