ARBAJSSHAIKH commited on
Commit
ba67faf
·
verified ·
1 Parent(s): 7d95b88

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -4
app.py CHANGED
@@ -1,13 +1,15 @@
1
  import streamlit as st
2
  from langchain.prompts import PromptTemplate
3
  from langchain_community.llms import CTransformers
 
4
 
 
 
 
 
5
 
6
  def GetLLMResponse(input_text,no_words,blog_type):
7
- llm=CTransformers(model="models\llama-2-7b-chat.ggmlv3.q8_0.bin",
8
- model_type='llama',
9
- config={'max_new_tokens':200,
10
- 'temperature':0.01})
11
 
12
  template=" wtite a blog for {blog_type} on topic of {input_text} in {no_words} words."
13
 
 
1
  import streamlit as st
2
  from langchain.prompts import PromptTemplate
3
  from langchain_community.llms import CTransformers
4
+ from ctransformers import AutoModelForCausalLM
5
 
6
+ # Set gpu_layers to the number of layers to offload to GPU. Set to 0 if no GPU acceleration is available on your system.
7
+ llm1 = AutoModelForCausalLM.from_pretrained("TheBloke/Llama-2-7b-Chat-GGUF", model_file="llama-2-7b-chat.q4_K_M.gguf", model_type="llama", gpu_layers=0)
8
+
9
+ print(llm("AI is going to"))
10
 
11
  def GetLLMResponse(input_text,no_words,blog_type):
12
+ llm=llm1
 
 
 
13
 
14
  template=" wtite a blog for {blog_type} on topic of {input_text} in {no_words} words."
15