Shiraztanvir123 commited on
Commit
4176d6a
·
verified ·
1 Parent(s): eaa55ac

Upload 4 files

Browse files
Files changed (3) hide show
  1. README.md +4 -4
  2. app.py +36 -24
  3. requirements.txt +0 -0
README.md CHANGED
@@ -1,8 +1,8 @@
1
  ---
2
- title: LLMsIntro
3
- emoji: 🏃
4
- colorFrom: green
5
- colorTo: purple
6
  sdk: streamlit
7
  sdk_version: 1.21.0
8
  app_file: app.py
 
1
  ---
2
+ title: ChatModelApp
3
+ emoji:
4
+ colorFrom: gray
5
+ colorTo: red
6
  sdk: streamlit
7
  sdk_version: 1.21.0
8
  app_file: app.py
app.py CHANGED
@@ -1,47 +1,59 @@
1
- #Hello! It seems like you want to import the Streamlit library in Python. Streamlit is a powerful open-source framework used for building web applications with interactive data visualizations and machine learning models. To import Streamlit, you'll need to ensure that you have it installed in your Python environment.
2
- #Once you have Streamlit installed, you can import it into your Python script using the import statement,
3
 
4
  import streamlit as st
5
 
6
- from langchain_openai import OpenAI
7
- from langchain_huggingface import HuggingFaceEndpoint
8
 
9
- #When deployed on huggingface spaces, this values has to be passed using Variables & Secrets setting, as shown in the video :)
10
- #import os
11
- #os.environ["OPENAI_API_KEY"] = "sk-PLfFwPq6y24234234234FJ1Uc234234L8hVowXdt"
12
 
13
  #import os
14
- #os.environ["HUGGINGFACEHUB_API_TOKEN"] = "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"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
 
16
- #Function to return the response
17
  def load_answer(question):
18
- #llm = OpenAI(model_name="gpt-3.5-turbo-instruct",temperature=0)
19
- llm = HuggingFaceEndpoint(
20
- repo_id="mistralai/Mistral-7B-Instruct-v0.3") # Model link : https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.3
21
 
22
- answer=llm.invoke(question)
23
- return answer
24
 
 
 
 
 
 
25
 
26
- #App UI starts here
27
- st.set_page_config(page_title="LangChain Demo", page_icon=":robot:")
28
- st.header("LangChain Demo")
29
 
30
- #Gets the user input
31
  def get_text():
32
- input_text = st.text_input("You: ", key="input")
33
  return input_text
34
 
35
 
36
- user_input=get_text()
37
- if user_input!= "":
38
- response = load_answer(user_input)
39
 
 
 
 
40
  submit = st.button('Generate')
41
 
42
- #If generate button is clicked
43
  if submit:
44
-
 
45
  st.subheader("Answer:")
46
 
47
  st.write(response)
 
 
 
1
 
2
  import streamlit as st
3
 
 
 
4
 
5
+ from langchain_openai import ChatOpenAI
 
 
6
 
7
  #import os
8
+ #os.environ["OPENAI_API_KEY"] = "sk-PLfFw23dd932dfg34446dftyvvdfgdfgmvXr2dL8hVowXdt"
9
+
10
+
11
+ from langchain.schema import (
12
+ AIMessage,
13
+ HumanMessage,
14
+ SystemMessage
15
+ )
16
+
17
+ # From here down is all the StreamLit UI
18
+ st.set_page_config(page_title="LangChain Demo", page_icon=":robot:")
19
+ st.header("Hey, I'm your Chat GPT")
20
+
21
+
22
+
23
+ if "sessionMessages" not in st.session_state:
24
+ st.session_state.sessionMessages = [
25
+ SystemMessage(content="You are a helpful assistant.")
26
+ ]
27
+
28
+
29
 
 
30
  def load_answer(question):
 
 
 
31
 
32
+ st.session_state.sessionMessages.append(HumanMessage(content=question))
 
33
 
34
+ assistant_answer = chat.invoke(st.session_state.sessionMessages )
35
+
36
+ st.session_state.sessionMessages.append(AIMessage(content=assistant_answer.content))
37
+
38
+ return assistant_answer.content
39
 
 
 
 
40
 
 
41
  def get_text():
42
+ input_text = st.text_input("You: ")
43
  return input_text
44
 
45
 
46
+ chat = ChatOpenAI(temperature=0)
47
+
 
48
 
49
+
50
+
51
+ user_input=get_text()
52
  submit = st.button('Generate')
53
 
 
54
  if submit:
55
+
56
+ response = load_answer(user_input)
57
  st.subheader("Answer:")
58
 
59
  st.write(response)
requirements.txt CHANGED
Binary files a/requirements.txt and b/requirements.txt differ