lakhera2023 commited on
Commit
4af5891
·
verified ·
1 Parent(s): 1679f66

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +47 -0
app.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from transformers import pipeline
3
+
4
+ st.title("Text Generator")
5
+
6
+ # Using Qwen2.5-0.5B-Instruct - efficient 500M parameter model
7
+ # Good balance between size and quality
8
+ @st.cache_resource
9
+ def load_model():
10
+ return pipeline(
11
+ "text-generation",
12
+ model="Qwen/Qwen2.5-0.5B-Instruct"
13
+ )
14
+
15
+ pipe = load_model()
16
+
17
+ text = st.text_input("Enter text to complete or ask a question")
18
+
19
+ if st.button("Generate"):
20
+ if text:
21
+ with st.spinner("Generating..."):
22
+ # Qwen2.5-Instruct works well with direct text or formatted prompts
23
+ # For questions, format as instruction; for completion, use directly
24
+ if text.strip().endswith("?"):
25
+ prompt = f"<|im_start|>user\n{text}<|im_end|>\n<|im_start|>assistant\n"
26
+ else:
27
+ # For text completion, just use the text directly
28
+ prompt = text
29
+
30
+ output = pipe(
31
+ prompt,
32
+ max_length=len(prompt.split()) + 100, # Generate more text
33
+ num_return_sequences=1,
34
+ temperature=0.7,
35
+ do_sample=True,
36
+ top_p=0.9,
37
+ repetition_penalty=1.2,
38
+ pad_token_id=pipe.tokenizer.eos_token_id
39
+ )
40
+ result = output[0]["generated_text"]
41
+
42
+ # Clean up the result - remove the prompt if it was included
43
+ if prompt in result:
44
+ result = result.replace(prompt, "").strip()
45
+
46
+ st.write("**Generated Text:**")
47
+ st.write(result)