Spaces:
Build error
Build error
| from openai import OpenAI | |
| import streamlit as st | |
| import os | |
| import sys | |
| from dotenv import load_dotenv, dotenv_values | |
| load_dotenv() | |
| # import torch | |
| # from transformers import AutoModelForCausalLM, AutoTokenizer, StoppingCriteria, StoppingCriteriaList, TextIteratorStreamer | |
| # from threading import Thread | |
| # from transformers import AutoTokenizer, AutoModelForCausalLM | |
| # from sentence_transformers import SentenceTransformer | |
| # from langchain_community.document_loaders import TextLoader | |
| # from langchain_community.embeddings.sentence_transformer import ( | |
| # SentenceTransformerEmbeddings, | |
| # ) | |
| # from langchain_community.vectorstores import Chroma | |
| # from langchain_text_splitters import CharacterTextSplitter | |
| # from transformers import pipeline | |
| # tokenizer = AutoTokenizer.from_pretrained("google/gemma-2b-it") | |
| # model = AutoModelForCausalLM.from_pretrained("google/gemma-2b-it") | |
| # pipe = pipeline("text-generation", model=model, tokenizer=tokenizer, max_length=512) | |
| # hf_model = HuggingFacePipeline(pipeline=pipe | |
| # from langchain_community.llms import HuggingFaceHub | |
| # llm = HuggingFaceHub( | |
| # repo_id="google/gemma-2b-it", | |
| # task="text-generation", | |
| # model_kwargs={ | |
| # "max_new_tokens": 512, | |
| # "top_k": 30, | |
| # "temperature": 0.1, | |
| # "repetition_penalty": 1.03 | |
| # }, | |
| # ) | |
| # initialize the client | |
| client = OpenAI( | |
| base_url="https://api-inference.huggingface.co/v1", | |
| api_key=os.environ.get('HUGGINGFACEHUB_API_TOKEN')#"hf_xxx" # Replace with your token | |
| ) | |
| # from langchain.schema import ( | |
| # HumanMessage, | |
| # SystemMessage, | |
| # ) | |
| # from langchain_community.chat_models.huggingface import ChatHuggingFace | |
| # messages = [ | |
| # SystemMessage(content="You're a helpful assistant"), | |
| # HumanMessage( | |
| # content="" | |
| # ), | |
| # ] | |
| #chat_model = ChatHuggingFace(llm=llm) | |
| # from dotenv import load_dotenv | |
| # import os | |
| # load_dotenv() | |
| # openai_api_key = os.getenv("OPENAI_API_KEY") | |
| # with st.sidebar: | |
| # openai_api_key = st.text_input("OpenAI API Key", key="chatbot_api_key", type="password") | |
| # "[Get an OpenAI API key](https://platform.openai.com/account/api-keys)" | |
| # "[View the source code](https://github.com/streamlit/llm-examples/blob/main/Chatbot.py)" | |
| # "[](https://codespaces.new/streamlit/llm-examples?quickstart=1)" | |
| st.title("π¬ Chatbot") | |
| st.caption("π A streamlit chatbot powered by Google Gemma") | |
| if 'messages' not in st.session_state: | |
| st.session_state['messages'] = [] #[{"role": "assistant", "content": "How can I help you?"}] | |
| for msg in st.session_state.messages: | |
| st.chat_message(msg["role"]).write(msg["content"]) | |
| # from google.colab import userdata | |
| # openai_api_key = userdata.get('OPENAI_API_KEY') | |
| if prompt := st.chat_input(): | |
| # if not openai_api_key: | |
| # st.info("Please add your OpenAI API key to continue.") | |
| # st.stop() | |
| #client = OpenAI(api_key=openai_api_key) | |
| #client = OpenAI() | |
| st.session_state.messages.append({"role": "user", "content": prompt}) | |
| st.chat_message("user").write(prompt) | |
| response = client.chat.completions.create(model="google/gemma-2b-it", messages=st.session_state.messages) | |
| msg = response.choices[0].message.content | |
| st.session_state.messages.append({"role": "assistant", "content": msg}) | |
| st.chat_message("assistant").write(msg) |