Frenchizer commited on
Commit
bb64441
·
verified ·
1 Parent(s): 76720d2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -20
app.py CHANGED
@@ -22,15 +22,10 @@ def gradio_predict(input_text):
22
  input_ids = tokenized_input["input_ids"].astype(np.int64)
23
  attention_mask = tokenized_input["attention_mask"].astype(np.int64)
24
 
25
- # Initialize decoder_input_ids with start token
26
  decoder_input_ids = np.zeros((1, 512), dtype=np.int64)
27
  decoder_input_ids[:, 0] = tokenizer.bos_token_id or tokenizer.pad_token_id
28
 
29
- print("Input values:")
30
- print(f"First few input_ids: {input_ids[0][:10]}")
31
- print(f"First few attention_mask: {attention_mask[0][:10]}")
32
- print(f"First few decoder_input_ids: {decoder_input_ids[0][:10]}")
33
-
34
  # Run inference
35
  outputs = session.run(
36
  None,
@@ -41,27 +36,22 @@ def gradio_predict(input_text):
41
  }
42
  )
43
 
44
- print("Output shape and type:")
45
- print(f"Output type: {type(outputs)}")
46
- print(f"Output[0] type: {type(outputs[0])}")
47
- print(f"Output[0] shape: {outputs[0].shape}")
48
 
49
- # Process outputs more carefully
50
- output_ids = outputs[0]
51
- if isinstance(output_ids, np.ndarray):
52
- output_ids = output_ids[0] # Take first sequence
53
- # Convert to list of integers if needed
54
- if isinstance(output_ids, np.ndarray):
55
- output_ids = output_ids.tolist()
56
 
57
  # Decode output
58
- translated_text = tokenizer.decode(output_ids, skip_special_tokens=True)
59
  return translated_text
60
 
61
  except Exception as e:
62
  print(f"Detailed error: {str(e)}")
63
- import traceback
64
- print(traceback.format_exc())
65
  return f"Error during translation: {str(e)}"
66
 
67
  # Gradio interface for the web app
 
22
  input_ids = tokenized_input["input_ids"].astype(np.int64)
23
  attention_mask = tokenized_input["attention_mask"].astype(np.int64)
24
 
25
+ # Initialize decoder_input_ids
26
  decoder_input_ids = np.zeros((1, 512), dtype=np.int64)
27
  decoder_input_ids[:, 0] = tokenizer.bos_token_id or tokenizer.pad_token_id
28
 
 
 
 
 
 
29
  # Run inference
30
  outputs = session.run(
31
  None,
 
36
  }
37
  )
38
 
39
+ # Process logits to get token ids
40
+ logits = outputs[0] # Shape: (1, 512, vocab_size)
41
+ token_ids = np.argmax(logits, axis=-1)[0] # Get token ids for first sequence
 
42
 
43
+ # Find where the sequence ends (pad token or eos token)
44
+ eos_token_id = tokenizer.eos_token_id or tokenizer.pad_token_id
45
+ end_idx = np.where(token_ids == eos_token_id)[0]
46
+ if len(end_idx) > 0:
47
+ token_ids = token_ids[:end_idx[0]]
 
 
48
 
49
  # Decode output
50
+ translated_text = tokenizer.decode(token_ids, skip_special_tokens=True)
51
  return translated_text
52
 
53
  except Exception as e:
54
  print(f"Detailed error: {str(e)}")
 
 
55
  return f"Error during translation: {str(e)}"
56
 
57
  # Gradio interface for the web app