Spaces:
Build error
Build error
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,4 +1,4 @@
|
|
| 1 |
-
|
| 2 |
import streamlit as st
|
| 3 |
|
| 4 |
# import torch
|
|
@@ -22,34 +22,41 @@ import streamlit as st
|
|
| 22 |
# hf_model = HuggingFacePipeline(pipeline=pipe
|
| 23 |
|
| 24 |
|
| 25 |
-
from langchain_community.llms import HuggingFaceHub
|
| 26 |
|
| 27 |
-
llm = HuggingFaceHub(
|
| 28 |
-
repo_id="google/gemma-2b-it",
|
| 29 |
-
task="text-generation",
|
| 30 |
-
model_kwargs={
|
| 31 |
-
"max_new_tokens": 512,
|
| 32 |
-
"top_k": 30,
|
| 33 |
-
"temperature": 0.1,
|
| 34 |
-
"repetition_penalty": 1.03
|
| 35 |
-
},
|
| 36 |
-
)
|
| 37 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 38 |
|
| 39 |
-
from langchain.schema import (
|
| 40 |
-
HumanMessage,
|
| 41 |
-
SystemMessage,
|
| 42 |
-
)
|
| 43 |
-
from langchain_community.chat_models.huggingface import ChatHuggingFace
|
| 44 |
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
]
|
| 51 |
|
| 52 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 53 |
|
| 54 |
|
| 55 |
# from dotenv import load_dotenv
|
|
@@ -86,9 +93,8 @@ if prompt := st.chat_input():
|
|
| 86 |
#client = OpenAI()
|
| 87 |
st.session_state.messages.append({"role": "user", "content": prompt})
|
| 88 |
st.chat_message("user").write(prompt)
|
| 89 |
-
|
| 90 |
-
|
| 91 |
-
|
| 92 |
-
msg = res.content
|
| 93 |
st.session_state.messages.append({"role": "assistant", "content": msg})
|
| 94 |
st.chat_message("assistant").write(msg)
|
|
|
|
| 1 |
+
from openai import OpenAI
|
| 2 |
import streamlit as st
|
| 3 |
|
| 4 |
# import torch
|
|
|
|
| 22 |
# hf_model = HuggingFacePipeline(pipeline=pipe
|
| 23 |
|
| 24 |
|
| 25 |
+
# from langchain_community.llms import HuggingFaceHub
|
| 26 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 27 |
|
| 28 |
+
# llm = HuggingFaceHub(
|
| 29 |
+
# repo_id="google/gemma-2b-it",
|
| 30 |
+
# task="text-generation",
|
| 31 |
+
# model_kwargs={
|
| 32 |
+
# "max_new_tokens": 512,
|
| 33 |
+
# "top_k": 30,
|
| 34 |
+
# "temperature": 0.1,
|
| 35 |
+
# "repetition_penalty": 1.03
|
| 36 |
+
# },
|
| 37 |
+
# )
|
| 38 |
+
|
| 39 |
+
# initialize the client
|
| 40 |
+
client = OpenAI(
|
| 41 |
+
base_url="https://api-inference.huggingface.co/v1",
|
| 42 |
+
#api_key=os.environ.get('HUGGINGFACEHUB_API_TOKEN')#"hf_xxx" # Replace with your token
|
| 43 |
+
)
|
| 44 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 45 |
|
| 46 |
+
# from langchain.schema import (
|
| 47 |
+
# HumanMessage,
|
| 48 |
+
# SystemMessage,
|
| 49 |
+
# )
|
| 50 |
+
# from langchain_community.chat_models.huggingface import ChatHuggingFace
|
|
|
|
| 51 |
|
| 52 |
+
# messages = [
|
| 53 |
+
# SystemMessage(content="You're a helpful assistant"),
|
| 54 |
+
# HumanMessage(
|
| 55 |
+
# content=""
|
| 56 |
+
# ),
|
| 57 |
+
# ]
|
| 58 |
+
|
| 59 |
+
#chat_model = ChatHuggingFace(llm=llm)
|
| 60 |
|
| 61 |
|
| 62 |
# from dotenv import load_dotenv
|
|
|
|
| 93 |
#client = OpenAI()
|
| 94 |
st.session_state.messages.append({"role": "user", "content": prompt})
|
| 95 |
st.chat_message("user").write(prompt)
|
| 96 |
+
response = client.chat.completions.create(model="google/gemma-2b-it", messages=st.session_state.messages)
|
| 97 |
+
|
| 98 |
+
msg = response.choices[0].message.content
|
|
|
|
| 99 |
st.session_state.messages.append({"role": "assistant", "content": msg})
|
| 100 |
st.chat_message("assistant").write(msg)
|