Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -23,19 +23,17 @@ dtype = torch.float16
|
|
| 23 |
|
| 24 |
processor = AutoProcessor.from_pretrained("StanfordAIMI/CheXagent-8b", trust_remote_code=True)
|
| 25 |
generation_config = GenerationConfig.from_pretrained("StanfordAIMI/CheXagent-8b")
|
| 26 |
-
model = AutoModelForCausalLM.from_pretrained("StanfordAIMI/CheXagent-8b", torch_dtype=dtype, trust_remote_code=True)
|
| 27 |
|
| 28 |
@spaces.GPU
|
| 29 |
def generate(image, prompt):
|
|
|
|
| 30 |
if hasattr(image, "read"):
|
| 31 |
image = Image.open(io.BytesIO(image.read())).convert("RGB")
|
| 32 |
else:
|
| 33 |
image = image
|
| 34 |
-
|
| 35 |
images = [image]
|
| 36 |
-
|
| 37 |
inputs = processor(images=images, text=f" USER: <s>{prompt} ASSISTANT: <s>", return_tensors="pt").to(device=device, dtype=dtype)
|
| 38 |
-
|
| 39 |
output = model.generate(**inputs, generation_config=generation_config)[0]
|
| 40 |
response = processor.tokenizer.decode(output, skip_special_tokens=True)
|
| 41 |
return response
|
|
|
|
| 23 |
|
| 24 |
processor = AutoProcessor.from_pretrained("StanfordAIMI/CheXagent-8b", trust_remote_code=True)
|
| 25 |
generation_config = GenerationConfig.from_pretrained("StanfordAIMI/CheXagent-8b")
|
| 26 |
+
# model = AutoModelForCausalLM.from_pretrained("StanfordAIMI/CheXagent-8b", torch_dtype=dtype, trust_remote_code=True)
|
| 27 |
|
| 28 |
@spaces.GPU
|
| 29 |
def generate(image, prompt):
|
| 30 |
+
model = AutoModelForCausalLM.from_pretrained("StanfordAIMI/CheXagent-8b", torch_dtype=dtype, trust_remote_code=True).to(device)
|
| 31 |
if hasattr(image, "read"):
|
| 32 |
image = Image.open(io.BytesIO(image.read())).convert("RGB")
|
| 33 |
else:
|
| 34 |
image = image
|
|
|
|
| 35 |
images = [image]
|
|
|
|
| 36 |
inputs = processor(images=images, text=f" USER: <s>{prompt} ASSISTANT: <s>", return_tensors="pt").to(device=device, dtype=dtype)
|
|
|
|
| 37 |
output = model.generate(**inputs, generation_config=generation_config)[0]
|
| 38 |
response = processor.tokenizer.decode(output, skip_special_tokens=True)
|
| 39 |
return response
|