iamkdp commited on
Commit
333d2c3
·
verified ·
1 Parent(s): 35e75f0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +19 -37
app.py CHANGED
@@ -1,47 +1,29 @@
1
  import gradio as gr
2
- from langchain_community.chat_models import HuggingFaceChat
3
- from langchain.chains import ConversationChain
4
- from langchain.memory import ConversationBufferMemory
 
5
 
6
- MODEL_ID = "google/gemma-2b" # Adjust if you have a local path or HF repo
 
7
 
8
- def load_model():
9
- # Load Gemma 2B chat model on CPU
10
- return HuggingFaceChat(
11
- model=MODEL_ID,
12
- model_kwargs={"device_map": "cpu"},
13
- temperature=0.7,
14
- )
15
 
16
- def main():
17
- llm = load_model()
18
- memory = ConversationBufferMemory(return_messages=True)
19
- conversation = ConversationChain(llm=llm, memory=memory)
20
 
21
- def respond(user_input):
22
- response = conversation.run(user_input)
23
- return response
24
 
25
- title = "🦚 Meet Krish"
26
- description = "A wise, witty, and compassionate friend — Chatbot named KrishWay"
27
 
28
- with gr.Blocks() as demo:
29
- gr.Markdown(f"# {title}")
30
- gr.Markdown(description)
31
 
32
- chatbot = gr.Chatbot()
33
- msg = gr.Textbox(placeholder="Say something to Krish...")
34
- clear = gr.Button("Clear")
 
35
 
36
- def user_message(text, chat_history):
37
- bot_response = respond(text)
38
- chat_history = chat_history + [(text, bot_response)]
39
- return "", chat_history
40
 
41
- msg.submit(user_message, [msg, chatbot], [msg, chatbot])
42
- clear.click(lambda: [], None, chatbot)
43
-
44
- demo.launch()
45
-
46
- if __name__ == "__main__":
47
- main()
 
1
  import gradio as gr
2
+ from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
3
+ from langchain_community.llms import HuggingFacePipeline
4
+ from langchain.chains import LLMChain
5
+ from langchain.prompts import PromptTemplate
6
 
7
+ # Load model and tokenizer (Gemma 2B or similar)
8
+ model_id = "google/gemma-2b-it"
9
 
10
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
11
+ model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype="auto")
 
 
 
 
 
12
 
13
+ pipe = pipeline("text-generation", model=model, tokenizer=tokenizer, max_new_tokens=256)
 
 
 
14
 
15
+ llm = HuggingFacePipeline(pipeline=pipe)
 
 
16
 
17
+ # Simple prompt template
18
+ prompt = PromptTemplate.from_template("You are Krish, a wise and witty friend.\n\nUser: {question}\nKrish:")
19
 
20
+ chain = LLMChain(prompt=prompt, llm=llm)
 
 
21
 
22
+ # Gradio interface
23
+ def chat_fn(message):
24
+ response = chain.run({"question": message})
25
+ return response.strip()
26
 
27
+ iface = gr.Interface(fn=chat_fn, inputs="text", outputs="text", title="🦚 Meet Krish", description="A wise, witty, and compassionate friend - KrishWay")
 
 
 
28
 
29
+ iface.launch()