Update app.py
Browse files
app.py
CHANGED
|
@@ -55,9 +55,7 @@ st.header("Chat with your document 📄 (Model: Falcon-7B-Instruct)")
|
|
| 55 |
model_name = "tiiuae/falcon-7b-instruct"
|
| 56 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
| 57 |
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
class CustomHuggingFaceEndpoint(HuggingFaceEndpoint):
|
| 61 |
def transform_input(self, prompt: str, model_kwargs: Dict) -> bytes:
|
| 62 |
len_prompt = len(prompt)
|
| 63 |
input_str = json.dumps({
|
|
@@ -78,8 +76,9 @@ class CustomHuggingFaceEndpoint(HuggingFaceEndpoint):
|
|
| 78 |
ans = ans[:ans.rfind("Human")].strip()
|
| 79 |
return ans
|
| 80 |
|
|
|
|
| 81 |
def load_chain():
|
| 82 |
-
llm = CustomHuggingFaceEndpoint(
|
| 83 |
memory = ConversationBufferMemory()
|
| 84 |
chain = ConversationChain(llm=llm, memory=memory)
|
| 85 |
return chain
|
|
|
|
| 55 |
model_name = "tiiuae/falcon-7b-instruct"
|
| 56 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
| 57 |
|
| 58 |
+
class CustomHuggingFaceEndpoint(HuggingFaceHub):
|
|
|
|
|
|
|
| 59 |
def transform_input(self, prompt: str, model_kwargs: Dict) -> bytes:
|
| 60 |
len_prompt = len(prompt)
|
| 61 |
input_str = json.dumps({
|
|
|
|
| 76 |
ans = ans[:ans.rfind("Human")].strip()
|
| 77 |
return ans
|
| 78 |
|
| 79 |
+
|
| 80 |
def load_chain():
|
| 81 |
+
llm = CustomHuggingFaceEndpoint(repo_id=model_name)
|
| 82 |
memory = ConversationBufferMemory()
|
| 83 |
chain = ConversationChain(llm=llm, memory=memory)
|
| 84 |
return chain
|