kshitij230 commited on
Commit
30ace8f
·
verified ·
1 Parent(s): a7dafec

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -19
app.py CHANGED
@@ -1,14 +1,13 @@
1
  import streamlit as st
2
- from transformers import AutoTokenizer, AutoModelForCausalLM
3
 
4
- # Load the model and tokenizer from Hugging Face
5
  @st.cache_resource
6
- def load_model():
7
- tokenizer = AutoTokenizer.from_pretrained("kshitij230/Eunoia")
8
- model = AutoModelForCausalLM.from_pretrained("kshitij230/Eunoia")
9
- return tokenizer, model
10
 
11
- tokenizer, model = load_model()
12
 
13
  # Streamlit page config
14
  st.set_page_config(page_title="Eunoia 💜", layout="centered")
@@ -32,32 +31,28 @@ if user_input:
32
  st.chat_message("user").markdown(user_input)
33
  st.session_state.messages.append({"role": "user", "content": user_input})
34
 
35
- # Build conversation prompt (simple format)
36
  prompt = ""
37
  for msg in st.session_state.messages:
38
  role = "User" if msg["role"] == "user" else "Bot"
39
  prompt += f"{role}: {msg['content']}\n"
40
  prompt += "Bot:"
41
 
42
- # Tokenize the input prompt
43
- inputs = tokenizer(prompt, return_tensors="pt")
44
-
45
  # Generate model response
46
  with st.spinner("Eunoia is listening..."):
47
- outputs = model.generate(
48
- inputs['input_ids'],
49
  max_length=200,
50
- pad_token_id=tokenizer.eos_token_id,
51
  do_sample=True,
52
  temperature=0.7,
53
  top_k=50,
54
  top_p=0.95,
55
  repetition_penalty=1.2,
56
- stop_token=tokenizer.eos_token
57
- )
58
-
59
- reply = tokenizer.decode(outputs[0], skip_special_tokens=True)
60
- reply = reply[len(prompt):].strip() # Remove the prompt portion from the reply
61
 
62
  # Display bot reply
63
  st.chat_message("assistant").markdown(reply)
 
1
  import streamlit as st
2
+ from transformers import pipeline
3
 
4
+ # Load the pipeline from Hugging Face
5
  @st.cache_resource
6
+ def load_pipeline():
7
+ pipe = pipeline("text-generation", model="kshitij230/Eunoia")
8
+ return pipe
 
9
 
10
+ pipe = load_pipeline()
11
 
12
  # Streamlit page config
13
  st.set_page_config(page_title="Eunoia 💜", layout="centered")
 
31
  st.chat_message("user").markdown(user_input)
32
  st.session_state.messages.append({"role": "user", "content": user_input})
33
 
34
+ # Build conversation prompt
35
  prompt = ""
36
  for msg in st.session_state.messages:
37
  role = "User" if msg["role"] == "user" else "Bot"
38
  prompt += f"{role}: {msg['content']}\n"
39
  prompt += "Bot:"
40
 
 
 
 
41
  # Generate model response
42
  with st.spinner("Eunoia is listening..."):
43
+ response = pipe(
44
+ prompt,
45
  max_length=200,
 
46
  do_sample=True,
47
  temperature=0.7,
48
  top_k=50,
49
  top_p=0.95,
50
  repetition_penalty=1.2,
51
+ pad_token_id=pipe.tokenizer.eos_token_id
52
+ )[0]["generated_text"]
53
+
54
+ # Remove the prompt from the response
55
+ reply = response[len(prompt):].strip()
56
 
57
  # Display bot reply
58
  st.chat_message("assistant").markdown(reply)