varund2003 commited on
Commit
b78e44a
·
verified ·
1 Parent(s): 1e9a4b6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +27 -48
app.py CHANGED
@@ -1,65 +1,44 @@
1
- from transformers import pipeline
2
  from langchain.llms import OpenAI
 
3
  from dotenv import load_dotenv
 
 
 
 
 
 
4
  import streamlit as st
5
 
6
- # Load environment variables
7
- load_dotenv()
8
 
9
- # Initialize Huggingface model for text classification
10
- model = pipeline(
11
- "text-classification",
12
- model="badmatr11x/distilroberta-base-offensive-hateful-speech-text-multiclassification",
13
- )
14
 
15
- # Initialize OpenAI model for Q&A
16
  def get_openai_response(question):
17
  llm = OpenAI(model_name="gpt-3.5-turbo-instruct", temperature=0.5)
18
  response = llm(question)
19
  return response
20
 
21
- # Function to add offensive content to the response
22
- def add_offensive_content(response):
23
- offensive_content = "You're an idiot."
24
- combined_response = response + " " + offensive_content
25
- return combined_response
26
 
27
- # Function to calculate toxicity score
28
- def calculate_toxicity_score(text):
29
- # You can use any toxicity classification model here
30
- # For demonstration purpose, we'll assume a simple score based on the presence of certain words
31
- toxic_words = ["idiot", "stupid", "hate"]
32
- toxicity_score = sum(text.lower().count(word) for word in toxic_words)
33
- return toxicity_score
34
 
35
- # Streamlit app
36
- st.set_page_config(page_title="Combined App", page_icon="🤖", layout="centered", initial_sidebar_state="auto")
37
- st.header("Combined Application")
 
 
 
 
 
 
 
 
38
 
39
- # Input for user text
40
- user_input = st.text_input("Enter Speech Here:")
41
 
42
- # Button to trigger classification and response
43
- submit = st.button("Submit")
44
 
45
- # If button is clicked
46
  if submit:
47
- # Classification using Huggingface model
48
- classification_result = model(user_input)
49
- st.subheader("Classification Result:")
50
- st.write(classification_result)
51
-
52
- # Q&A using OpenAI model
53
- response = get_openai_response(user_input)
54
- st.subheader("OpenAI Response:")
55
- st.write(response)
56
-
57
- # Add offensive content to the response
58
- combined_response = add_offensive_content(response)
59
- st.subheader("Combined Response:")
60
- st.write(combined_response)
61
-
62
- # Calculate toxicity score
63
- toxicity_score = calculate_toxicity_score(combined_response)
64
- st.subheader("Toxicity Score:")
65
- st.write(toxicity_score)
 
1
+ # Q&A Chatbot
2
  from langchain.llms import OpenAI
3
+
4
  from dotenv import load_dotenv
5
+
6
+
7
+ load_dotenv() # take environment variables from .env.
8
+
9
+
10
+
11
  import streamlit as st
12
 
13
+ import os
14
+
15
 
16
+ ## Function to load OpenAI model and get responses
 
 
 
 
17
 
 
18
  def get_openai_response(question):
19
  llm = OpenAI(model_name="gpt-3.5-turbo-instruct", temperature=0.5)
20
  response = llm(question)
21
  return response
22
 
 
 
 
 
 
23
 
 
 
 
 
 
 
 
24
 
25
+ ## initialize our streamlit app
26
+
27
+ st.set_page_config(page_title="Q&A Demo")
28
+
29
+ st.header("Langchain Application")
30
+
31
+
32
+
33
+
34
+ input_text = st.text_input("Input: ", key="input")
35
+
36
 
37
+ submit = st.button("Ask the question")
 
38
 
39
+ ## If ask button is clicked
 
40
 
 
41
  if submit:
42
+ response = get_openai_response(input_text)
43
+ st.subheader("The Response is")
44
+ st.write(response)