Nazhar commited on
Commit
5547790
·
verified ·
1 Parent(s): db89465

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +60 -61
app.py CHANGED
@@ -724,6 +724,8 @@ with final_recs:
724
 
725
 
726
  with chat:
 
 
727
  from langchain.indexes import VectorstoreIndexCreator
728
  from langchain.chains import RetrievalQA
729
  from langchain.llms import OpenAI
@@ -734,32 +736,7 @@ with chat:
734
  from langchain_community.vectorstores import FAISS
735
  from langchain_community.llms import HuggingFaceHub
736
  from langchain_core.prompts import PromptTemplate
737
-
738
- print("Libraries imported")
739
-
740
- loader = CSVLoader("Events_SameDay.csv",encoding='iso-8859-1')
741
- print("CSV imported")
742
-
743
- # Create an index using the loaded documents
744
- try:
745
- index_creator = VectorstoreIndexCreator()
746
- print("index_creator Created")
747
-
748
- docsearch = index_creator.from_loaders([loader])
749
- print("DB Created")
750
- except Exception as e:
751
- st.write(f'Error Occured: {e}')
752
- repo_id = "mistralai/Mistral-7B-Instruct-v0.1"
753
- llm = HuggingFaceHub(repo_id=repo_id, model_kwargs={"temperature": 0.1, "max_new_tokens": 1024})
754
- print("MistralAi Loaded")
755
-
756
- prompt_temp="As a financial expert for stock market, if user is asking for trading recommendation, then you need to generate trading signal recommendations utilizing insights from two approaches. One is the technical indicators signals EMA55, RSI, EMA9, and MACD (all ranging from -3 to 3, where –3 is strong sell, -2 is moderate sell, -1 is weak sell, 0 is for hold, 1 is for weak buy, 2 is for moderate buy and 3 is for strong buy) from the respective signal while other insight is from news impacts (either positive or negative between -5 to 5). Provide your recommendation with balanced approach if news impact is too much positive or negative, technical indicator can be ignored and buy or sell suggestion based on news impact can be given. On the contrary, if technical indicators are opposite to news impact, a hold position is a reasonable suggestion. If technical indicators are all positive along news impact, strong buy signal can be generated. If technical indicators and news impact are all negative a strong sell signal can be generated. If news impact is too low, then generate recommendation based on technical indicator specially with more weightage to ema 55 in all the technical indicators. Your response should cover all technical aspects including the analysis of technical indicators as well as covering the news sentiments. Also cover some logical financial rational as well as the explanations."
757
- # Create a question-answering chain using the index
758
- chain = RetrievalQA.from_chain_type(llm=llm, chain_type="stuff", retriever=docsearch.vectorstore.as_retriever(), input_key="question")
759
- print("Chain Created")
760
-
761
-
762
-
763
  def get_answer(text):
764
  text = response['result']
765
  helpful_answer_index = text.find('Helpful Answer:')
@@ -776,39 +753,61 @@ with chat:
776
  for word in response.split():
777
  yield word + " "
778
  time.sleep(0.05)
779
-
780
-
781
- st.title("Chat with your AI Stock Advisor")
782
-
783
- # Initialize chat history
784
- if "messages" not in st.session_state:
785
- st.session_state.messages = []
786
-
787
- # Display chat messages from history on app rerun
788
- for message in st.session_state.messages:
789
- with st.chat_message(message["role"]):
790
- st.markdown(message["content"])
791
-
792
- # Accept user input
793
- if prompt := st.chat_input("Enter your query here."):
794
- # Add user message to chat history
795
- st.session_state.messages.append({"role": "user", "content": prompt})
796
- # Display user message in chat message container
797
- with st.chat_message("user"):
798
- st.markdown(prompt)
799
-
800
- # question = query
801
- print(f"User Question: {prompt}")
802
- try:
803
- response = chain({"question": prompt})
804
- print("Answer generated")
805
- result = get_answer(response['result'])
806
- print("helpful answer extracted")
807
 
808
- # Display assistant response in chat message container
809
- with st.chat_message("assistant"):
810
- response = st.write_stream(response_generator(result))
811
- # Add assistant response to chat history
812
- st.session_state.messages.append({"role": "assistant", "content": response})
813
- except Exception as e:
814
- st.write(f'Error: {e}')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
724
 
725
 
726
  with chat:
727
+ st.title("Chat with your AI Stock Advisor")
728
+
729
  from langchain.indexes import VectorstoreIndexCreator
730
  from langchain.chains import RetrievalQA
731
  from langchain.llms import OpenAI
 
736
  from langchain_community.vectorstores import FAISS
737
  from langchain_community.llms import HuggingFaceHub
738
  from langchain_core.prompts import PromptTemplate
739
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
740
  def get_answer(text):
741
  text = response['result']
742
  helpful_answer_index = text.find('Helpful Answer:')
 
753
  for word in response.split():
754
  yield word + " "
755
  time.sleep(0.05)
756
+ print("Libraries imported")
757
+
758
+ loader = CSVLoader("Events_SameDay.csv",encoding='iso-8859-1')
759
+ print("CSV imported")
760
+
761
+ # Create an index using the loaded documents
762
+ try:
763
+ index_creator = VectorstoreIndexCreator()
764
+ print("index_creator Created")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
765
 
766
+ docsearch = index_creator.from_loaders([loader])
767
+ print("DB Created")
768
+
769
+ repo_id = "mistralai/Mistral-7B-Instruct-v0.1"
770
+ llm = HuggingFaceHub(repo_id=repo_id, model_kwargs={"temperature": 0.1, "max_new_tokens": 1024})
771
+ print("MistralAi Loaded")
772
+
773
+ prompt_temp="As a financial expert for stock market, if user is asking for trading recommendation, then you need to generate trading signal recommendations utilizing insights from two approaches. One is the technical indicators signals EMA55, RSI, EMA9, and MACD (all ranging from -3 to 3, where –3 is strong sell, -2 is moderate sell, -1 is weak sell, 0 is for hold, 1 is for weak buy, 2 is for moderate buy and 3 is for strong buy) from the respective signal while other insight is from news impacts (either positive or negative between -5 to 5). Provide your recommendation with balanced approach if news impact is too much positive or negative, technical indicator can be ignored and buy or sell suggestion based on news impact can be given. On the contrary, if technical indicators are opposite to news impact, a hold position is a reasonable suggestion. If technical indicators are all positive along news impact, strong buy signal can be generated. If technical indicators and news impact are all negative a strong sell signal can be generated. If news impact is too low, then generate recommendation based on technical indicator specially with more weightage to ema 55 in all the technical indicators. Your response should cover all technical aspects including the analysis of technical indicators as well as covering the news sentiments. Also cover some logical financial rational as well as the explanations."
774
+ # Create a question-answering chain using the index
775
+ chain = RetrievalQA.from_chain_type(llm=llm, chain_type="stuff", retriever=docsearch.vectorstore.as_retriever(), input_key="question")
776
+ print("Chain Created")
777
+
778
+ # Initialize chat history
779
+ if "messages" not in st.session_state:
780
+ st.session_state.messages = []
781
+
782
+ # Display chat messages from history on app rerun
783
+ for message in st.session_state.messages:
784
+ with st.chat_message(message["role"]):
785
+ st.markdown(message["content"])
786
+
787
+ # Accept user input
788
+ if prompt := st.chat_input("Enter your query here."):
789
+ # Add user message to chat history
790
+ st.session_state.messages.append({"role": "user", "content": prompt})
791
+ # Display user message in chat message container
792
+ with st.chat_message("user"):
793
+ st.markdown(prompt)
794
+
795
+ # question = query
796
+ print(f"User Question: {prompt}")
797
+ response = chain({"question": prompt})
798
+ print("Answer generated")
799
+ result = get_answer(response['result'])
800
+ print("helpful answer extracted")
801
+
802
+ # Display assistant response in chat message container
803
+ with st.chat_message("assistant"):
804
+ response = st.write_stream(response_generator(result))
805
+ # Add assistant response to chat history
806
+ st.session_state.messages.append({"role": "assistant", "content": response})
807
+ except Exception as e:
808
+ if 'You exceeded your current quota' in e:
809
+ st.markdown(':red[Insufficient Quota]')
810
+ st.write(f'You exceeded your current quota, please check your plan and billing details. For more information on this error, read the docs: https://platform.openai.com/docs/guides/error-codes/api-errors.')
811
+
812
+ else:
813
+ st.markdown(f"An Error Occured: \n {e}")