FrAnKu34t23 commited on
Commit
eb80595
·
verified ·
1 Parent(s): b06770b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +24 -24
app.py CHANGED
@@ -1,6 +1,9 @@
1
  import torch
2
  import re
3
  import gradio as gr
 
 
 
4
  from transformers import (
5
  AutoTokenizer,
6
  AutoModelForCausalLM,
@@ -52,31 +55,28 @@ def classify_injury_zero_shot(description):
52
  result = injury_classifier(description, candidate_labels)
53
  return label_map[result["labels"][0]]
54
 
 
 
 
 
 
 
 
55
  # === GENERATION FROM EACH MODEL ===
56
  def generate_single_model_output(model, tokenizer, prompt, max_length=300, temperature=0.7):
57
- try:
58
- inputs = tokenizer(prompt, return_tensors="pt", truncation=True, max_length=512).to("cpu")
59
-
60
- with torch.no_grad():
61
- output = model.generate(
62
- **inputs,
63
- max_length=inputs["input_ids"].shape[1] + max_length,
64
- temperature=temperature,
65
- top_p=0.9,
66
- top_k=50,
67
- repetition_penalty=1.1,
68
- pad_token_id=tokenizer.eos_token_id,
69
- do_sample=True
70
- )
71
-
72
- if output is not None and len(output) > 0:
73
- decoded = tokenizer.decode(output[0], skip_special_tokens=True).strip()
74
- return decoded
75
- else:
76
- return "[No output was generated by the model.]"
77
-
78
- except Exception as e:
79
- return f"[Error generating output: {str(e)}]"
80
 
81
  # === ANALYSIS WITH FLAN-T5 ===
82
  def analyze_with_cpu_model(raw_outputs, zero_shot_injury):
@@ -114,7 +114,7 @@ def generate_prediction_ensemble(scenario_text, max_len, temperature):
114
  if not scenario_text.strip():
115
  return "Please enter a scenario", "", ""
116
 
117
- prompt = f"Based on the situation, predict potential hazards and injuries.\n{scenario_text.strip()}"
118
 
119
  raw_outputs = [
120
  generate_single_model_output(model, tokenizer, prompt, max_length=max_len, temperature=temperature)
 
1
  import torch
2
  import re
3
  import gradio as gr
4
+ import json
5
+ import ast
6
+ import traceback
7
  from transformers import (
8
  AutoTokenizer,
9
  AutoModelForCausalLM,
 
55
  result = injury_classifier(description, candidate_labels)
56
  return label_map[result["labels"][0]]
57
 
58
+ # === FORMAT INPUT ===
59
+ def format_input(scenario_text):
60
+ scenario = scenario_text.strip()
61
+ if not scenario.startswith(", "):
62
+ scenario = scenario.lstrip(", ")
63
+ return f"Based on the situation, predict potential hazards and injuries. {scenario}<|endoftext|>"
64
+
65
  # === GENERATION FROM EACH MODEL ===
66
  def generate_single_model_output(model, tokenizer, prompt, max_length=300, temperature=0.7):
67
+ inputs = tokenizer(prompt, return_tensors="pt", truncation=True, max_length=512).to("cpu")
68
+ with torch.no_grad():
69
+ output = model.generate(
70
+ **inputs,
71
+ max_length=inputs["input_ids"].shape[1] + max_length,
72
+ temperature=temperature,
73
+ top_p=0.9,
74
+ top_k=50,
75
+ repetition_penalty=1.1,
76
+ pad_token_id=tokenizer.eos_token_id,
77
+ do_sample=True
78
+ )
79
+ return tokenizer.decode(output[0], skip_special_tokens=True).strip()
 
 
 
 
 
 
 
 
 
 
80
 
81
  # === ANALYSIS WITH FLAN-T5 ===
82
  def analyze_with_cpu_model(raw_outputs, zero_shot_injury):
 
114
  if not scenario_text.strip():
115
  return "Please enter a scenario", "", ""
116
 
117
+ prompt = format_input(scenario_text)
118
 
119
  raw_outputs = [
120
  generate_single_model_output(model, tokenizer, prompt, max_length=max_len, temperature=temperature)