karthiksagarn commited on
Commit
a136895
·
verified ·
1 Parent(s): c3fab5b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -1,7 +1,7 @@
1
  import streamlit as st
2
  from langchain.llms import CTransformers
3
  from langchain.prompts import PromptTemplate
4
- from transformers import AutoModelForCausalLM
5
  from dotenv import load_dotenv
6
  from huggingface_hub import login
7
  import os
@@ -13,8 +13,8 @@ login(token=access_token)
13
  # from transformers import pipeline
14
  # model = pipeline("text-generation", model="TheBloke/Llama-2-7B-Chat-GGML")
15
 
16
- model_name = "karthiksagarn/llama-2-7b-chat"
17
- model = AutoModelForCausalLM.from_pretrained(model_name, use_auth_token=access_token)
18
 
19
  ## Function to get response form the LLama2 Model
20
  def getLLamaResponse(input_text, no_of_words, blog_style):
 
1
  import streamlit as st
2
  from langchain.llms import CTransformers
3
  from langchain.prompts import PromptTemplate
4
+ from transformers import AutoModelForCausalLM, AutoModel
5
  from dotenv import load_dotenv
6
  from huggingface_hub import login
7
  import os
 
13
  # from transformers import pipeline
14
  # model = pipeline("text-generation", model="TheBloke/Llama-2-7B-Chat-GGML")
15
 
16
+ model_name = "TheBloke/Llama-2-7B-Chat-GGML"
17
+ model = AutoModel.from_pretrained(model_name, use_auth_token=access_token)
18
 
19
  ## Function to get response form the LLama2 Model
20
  def getLLamaResponse(input_text, no_of_words, blog_style):