Danielrahmai1991 commited on
Commit
90372f6
·
verified ·
1 Parent(s): 1a459c1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -2
app.py CHANGED
@@ -7,7 +7,7 @@ from langchain_core.callbacks import StreamingStdOutCallbackHandler
7
 
8
 
9
  callbacks = [StreamingStdOutCallbackHandler()]
10
-
11
  llm = LlamaCpp(
12
  model_path="unsloth.Q5_K_M.gguf",
13
  temperature=0.75,
@@ -16,6 +16,7 @@ llm = LlamaCpp(
16
  callback_manager=callbacks,
17
  verbose=True, # Verbose is required to pass to the callback manager
18
  )
 
19
 
20
  template = """You are the Finiantial expert:
21
 
@@ -31,10 +32,14 @@ template = """You are the Finiantial expert:
31
  prompt = PromptTemplate(template=template, input_variables=["question"])
32
 
33
  llm_chain_model = LLMChain(prompt=prompt, llm=llm)
 
34
 
35
 
36
  def greet(question):
37
- out_gen = llm_chain_model.run(question)
 
 
 
38
  return out_gen
39
 
40
  demo = gr.Interface(fn=greet, inputs="text", outputs="text")
 
7
 
8
 
9
  callbacks = [StreamingStdOutCallbackHandler()]
10
+ print("creating ll started")
11
  llm = LlamaCpp(
12
  model_path="unsloth.Q5_K_M.gguf",
13
  temperature=0.75,
 
16
  callback_manager=callbacks,
17
  verbose=True, # Verbose is required to pass to the callback manager
18
  )
19
+ print("creating ll ended")
20
 
21
  template = """You are the Finiantial expert:
22
 
 
32
  prompt = PromptTemplate(template=template, input_variables=["question"])
33
 
34
  llm_chain_model = LLMChain(prompt=prompt, llm=llm)
35
+ print("creating model created")
36
 
37
 
38
  def greet(question):
39
+ print(f"question is {question}")
40
+
41
+ out_gen = llm_chain_model.run(question)
42
+ print(f"out is {out_gen}")
43
  return out_gen
44
 
45
  demo = gr.Interface(fn=greet, inputs="text", outputs="text")