ombhojane commited on
Commit
7bd6792
·
1 Parent(s): b7857d3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +23 -34
app.py CHANGED
@@ -1,44 +1,33 @@
1
  import streamlit as st
2
- from langchain.chat_models import ChatOpenAI
3
- from langchain.prompts import ChatPromptTemplate
4
- from langchain.transformers import JsonSegmenter
5
 
6
- data = "Customer: Hi, I'm having trouble with my account. Call center guy: Sure, what seems to be the problem? Customer: I can't seem to log in. Call center guy: Okay, let me check your account. What's your username? Customer: It's johndoe123. Call center guy: Alright, I see the issue. Your account has been locked due to too many failed login attempts. Customer: Oh, I see. Can you unlock it for me? Call center guy: Yes, I can do that for you. Just give me a moment. Customer: Thank you so much. Call center guy: No problem. Is there anything else I can help you with?"
7
 
8
- # Set your OpenAI API key
9
- openai_api_key = openai_api_key
10
 
11
- # Initialize the GPT-3.5 Turbo LLM
12
- llm = ChatOpenAI(
13
- model_name="gpt-3.5-turbo",
14
- openai_api_key=openai_api_key,
15
- max_tokens=1024, # Maximum tokens per inference (adjust as needed)
16
  )
17
 
 
 
 
 
18
 
19
- # Segment the conversation into turns
20
- segmenter = JsonSegmenter()
21
- conversation_turns = segmenter.split(data)
 
 
22
 
23
- # Define prompt template for summarizing important highlights
24
- prompt_template = """Summarize the important highlights of the following conversation:\n\n{conversation_turn}\n\nPlease highlight key events, decisions, and customer concerns, using bullet points if possible."""
 
 
25
 
26
- # Generate summaries for each turn
27
- highlights = []
28
- for turn in conversation_turns:
29
- current_prompt = prompt_template.format(conversation_turn=turn)
30
- response = llm.run(text=current_prompt)
31
- summary = response["text"].strip() # Extract summarized highlights
32
- highlights.append(summary)
33
 
34
- # Streamlit app
35
- st.title("Conversation Highlights Generator")
36
-
37
- # Display conversation data
38
- st.subheader("Conversation Data:")
39
- st.code(data, language="json")
40
-
41
- # Display predicted highlights
42
- st.subheader("Predicted Highlights:")
43
- for i, highlight in enumerate(highlights, 1):
44
- st.write(f"{i}. {highlight}")
 
1
  import streamlit as st
2
+ from langchain.prompts import ConversationChain, ConversationTurnPromptTemplate, ListSummarizationPromptTemplate
 
 
3
 
4
+ st.title("Call Center Conversation Highlights")
5
 
6
+ # Text area for user input
7
+ conversation_text = st.text_area("Paste your call center conversation: ")
8
 
9
+ # Initialize the conversation chain
10
+ conversation_chain = ConversationChain(
11
+ turn_prompt_template=ConversationTurnPromptTemplate(),
12
+ summary_prompt_template=ListSummarizationPromptTemplate(prompt_text="Please summarize the key points in bullet points."),
 
13
  )
14
 
15
+ # Run analysis and display highlights
16
+ if conversation_text:
17
+ # Segment the conversation based on line breaks
18
+ conversation_segments = conversation_text.split("\n")
19
 
20
+ # Generate highlight summaries
21
+ highlights = []
22
+ for segment in conversation_segments:
23
+ response = conversation_chain.run(text=segment)
24
+ highlights.extend(response["summaries"])
25
 
26
+ # Display extracted highlights
27
+ st.header("Extracted Highlights:")
28
+ for highlight in highlights:
29
+ st.write(f"- {highlight}")
30
 
31
+ else:
32
+ st.write("Please enter the call center conversation to analyze.")
 
 
 
 
 
33