Anisha Bhatnagar commited on
Commit
c38eb7e
·
1 Parent(s): 844bfad

visual changes

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -107,11 +107,11 @@ user_message = args.user_message
107
  print('Initialization Finished')
108
 
109
  print ('Step2: feed-forward process')
110
- title = "Video-LLaMA for hate Detection"
111
 
112
  description = "Hate-LLaMA , is a multi-modal framework, designed to detect hate in videos and classify them as HATE or NON HATE. Hate-LLaMA finetunes Video-LLaMA (uses the LLaMA-7b-chat model as backbone). After uploading a video and clicking submit, the model outputs a simple statement identifying if the video has hate or not. "
113
 
114
- article = ""
115
  #examples = ["examples/hate_video_136.mp4","examples/hate_video_2.mp4", "examples/non_hate_video_349.mp4", "examples/non_hate_video_569.mp4"]
116
 
117
  demo = gr.Interface(fn=infer, inputs="video", outputs="text", title=title, description=description, article=article) #, examples=examples)
 
107
  print('Initialization Finished')
108
 
109
  print ('Step2: feed-forward process')
110
+ title = "Hate-LLaMA - An Instruction-tuned Audio-Visual Language Model for Hate Content Detection"
111
 
112
  description = "Hate-LLaMA , is a multi-modal framework, designed to detect hate in videos and classify them as HATE or NON HATE. Hate-LLaMA finetunes Video-LLaMA (uses the LLaMA-7b-chat model as backbone). After uploading a video and clicking submit, the model outputs a simple statement identifying if the video has hate or not. "
113
 
114
+ article = "Authors : Anisha Bhatnagar, Simran Makariye, Divyanshi Parashar"
115
  #examples = ["examples/hate_video_136.mp4","examples/hate_video_2.mp4", "examples/non_hate_video_349.mp4", "examples/non_hate_video_569.mp4"]
116
 
117
  demo = gr.Interface(fn=infer, inputs="video", outputs="text", title=title, description=description, article=article) #, examples=examples)