Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,7 +1,7 @@
|
|
| 1 |
import streamlit as st
|
| 2 |
from langchain.llms import CTransformers
|
| 3 |
from langchain.prompts import PromptTemplate
|
| 4 |
-
from transformers import AutoModelForCausalLM
|
| 5 |
from dotenv import load_dotenv
|
| 6 |
from huggingface_hub import login
|
| 7 |
import os
|
|
@@ -13,8 +13,8 @@ login(token=access_token)
|
|
| 13 |
# from transformers import pipeline
|
| 14 |
# model = pipeline("text-generation", model="TheBloke/Llama-2-7B-Chat-GGML")
|
| 15 |
|
| 16 |
-
model_name = "
|
| 17 |
-
model =
|
| 18 |
|
| 19 |
## Function to get response form the LLama2 Model
|
| 20 |
def getLLamaResponse(input_text, no_of_words, blog_style):
|
|
|
|
| 1 |
import streamlit as st
|
| 2 |
from langchain.llms import CTransformers
|
| 3 |
from langchain.prompts import PromptTemplate
|
| 4 |
+
from transformers import AutoModelForCausalLM, AutoModel
|
| 5 |
from dotenv import load_dotenv
|
| 6 |
from huggingface_hub import login
|
| 7 |
import os
|
|
|
|
| 13 |
# from transformers import pipeline
|
| 14 |
# model = pipeline("text-generation", model="TheBloke/Llama-2-7B-Chat-GGML")
|
| 15 |
|
| 16 |
+
model_name = "TheBloke/Llama-2-7B-Chat-GGML"
|
| 17 |
+
model = AutoModel.from_pretrained(model_name, use_auth_token=access_token)
|
| 18 |
|
| 19 |
## Function to get response form the LLama2 Model
|
| 20 |
def getLLamaResponse(input_text, no_of_words, blog_style):
|