tankkiran commited on
Commit
66da12d
·
verified ·
1 Parent(s): 00a331f

Cretae app.py new file

Browse files
Files changed (1) hide show
  1. app.py +47 -0
app.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # app.py
2
+ import streamlit as st
3
+ from transformers import AutoTokenizer, AutoModelForCausalLM
4
+ import torch
5
+
6
+ st.set_page_config(page_title="RaytikGPT", layout="centered")
7
+ st.title("🧠 RaytikGPT")
8
+ st.markdown("आपका अपना GPT-जैसा चैटबॉट — हिंदी और English दोनों में पूछें")
9
+
10
+ @st.cache_resource
11
+
12
+ def load_model():
13
+ model_id = "HuggingFaceH4/zephyr-7b-beta"
14
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
15
+ model = AutoModelForCausalLM.from_pretrained(
16
+ model_id,
17
+ torch_dtype=torch.float16,
18
+ device_map="auto"
19
+ )
20
+ return tokenizer, model
21
+
22
+ tokenizer, model = load_model()
23
+
24
+ if "chat_history" not in st.session_state:
25
+ st.session_state.chat_history = []
26
+
27
+ user_input = st.text_input("✍️ आप क्या पूछना चाहेंगे?", placeholder="e.g. I want to learn React JS")
28
+
29
+ if st.button("भेजें") and user_input:
30
+ st.session_state.chat_history.append(("🧑‍💻 आप", user_input))
31
+
32
+ inputs = tokenizer(user_input, return_tensors="pt").to(model.device)
33
+ outputs = model.generate(
34
+ **inputs,
35
+ max_new_tokens=256,
36
+ pad_token_id=tokenizer.eos_token_id,
37
+ temperature=0.7,
38
+ top_p=0.9,
39
+ do_sample=True
40
+ )
41
+ reply = tokenizer.decode(outputs[0], skip_special_tokens=True)
42
+ reply = reply.replace(user_input, "").strip()
43
+
44
+ st.session_state.chat_history.append(("🤖 RaytikGPT", reply))
45
+
46
+ for sender, msg in st.session_state.chat_history:
47
+ st.markdown(f"**{sender}:** {msg}")