OppaAI commited on
Commit
68ac715
·
verified ·
1 Parent(s): 9d41b1d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -4
app.py CHANGED
@@ -50,18 +50,18 @@ def process_and_describe(payload: dict):
50
  # 1️⃣ Save & upload image
51
  local_tmp_path, hf_url, path_in_repo, size_bytes = save_and_upload_image(image_b64)
52
 
53
- # 2️⃣ Prepare prompt
54
- prompt = "Describe this image in detail."
55
 
56
  # Open the image using PIL for the InferenceClient
57
  image = Image.open(local_tmp_path)
58
 
59
  # 3️⃣ Call VLM using Hugging Face Inference Client
60
- # The client automatically handles the API call and authentication
61
  vlm_text = hf_client.image_to_text(
62
  image=image,
63
  model=HF_VLM_MODEL,
64
- details=True, # Set details=True for more comprehensive output if available
65
  )
66
 
67
  return {
 
50
  # 1️⃣ Save & upload image
51
  local_tmp_path, hf_url, path_in_repo, size_bytes = save_and_upload_image(image_b64)
52
 
53
+ # 2️⃣ Prepare prompt (optional, some models ignore this for basic image_to_text)
54
+ # prompt = "Describe this image in detail."
55
 
56
  # Open the image using PIL for the InferenceClient
57
  image = Image.open(local_tmp_path)
58
 
59
  # 3️⃣ Call VLM using Hugging Face Inference Client
60
+ # Removed the problematic 'details' argument
61
  vlm_text = hf_client.image_to_text(
62
  image=image,
63
  model=HF_VLM_MODEL,
64
+ # details=True # <-- REMOVED THIS LINE
65
  )
66
 
67
  return {