parthib07 commited on
Commit
7bdcb82
·
verified ·
1 Parent(s): 896d144

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +66 -66
app.py CHANGED
@@ -1,66 +1,66 @@
1
- import streamlit as st
2
- from langchain.prompts import ChatPromptTemplate
3
- from langchain.schema import SystemMessage, HumanMessage
4
- from langchain_huggingface import ChatHuggingFace, HuggingFaceEndpoint
5
- from dotenv import load_dotenv
6
- import os
7
-
8
- load_dotenv()
9
- HF_TOKEN = os.environ.get('HF_TOKEN')
10
- os.environ['HF_TOKEN'] = HF_TOKEN
11
-
12
- llm = HuggingFaceEndpoint(
13
- repo_id="TinyLlama/TinyLlama-1.1B-Chat-v1.0",
14
- task="text-generation",
15
- huggingfacehub_api_token=HF_TOKEN
16
- )
17
- llm = ChatHuggingFace(llm=llm)
18
-
19
- def getLLamaresponse(input_text, no_words, blog_style):
20
- if not input_text or not no_words:
21
- return "⚠️ Please enter a valid topic and word count."
22
-
23
- try:
24
- template = """
25
- Write a blog for {blog_style} job profile on the topic "{input_text}"
26
- within {no_words} words.
27
- """
28
-
29
- prompt = ChatPromptTemplate.from_template(template)
30
-
31
- messages = [
32
- SystemMessage(content="You are a helpful blog writer."),
33
- HumanMessage(content=prompt.format(blog_style=blog_style, input_text=input_text, no_words=no_words))
34
- ]
35
-
36
- response = llm.invoke(messages)
37
- return response.content if response else "⚠️ No response from the model."
38
-
39
- except Exception as e:
40
- return f"❌ Error: {str(e)}"
41
-
42
-
43
- st.set_page_config(
44
- page_title="Generate Blogs",
45
- page_icon='🤖',
46
- layout='centered',
47
- initial_sidebar_state='collapsed'
48
- )
49
-
50
- st.header("Generate Blogs 🤖")
51
-
52
-
53
- input_text = st.text_input("Enter the Blog Topic")
54
-
55
- col1, col2 = st.columns([5, 5])
56
- with col1:
57
- no_words = st.text_input('Number of Words')
58
- with col2:
59
- blog_style = st.selectbox('Writing the blog for',
60
- ('Researchers', 'Data Scientist', 'Common People'),
61
- index=0)
62
-
63
- submit = st.button("Generate")
64
- if submit:
65
- response = getLLamaresponse(input_text, no_words, blog_style)
66
- st.write(response)
 
1
+ import streamlit as st
2
+ from langchain.prompts import ChatPromptTemplate
3
+ from langchain.schema import SystemMessage, HumanMessage
4
+ from langchain_huggingface import ChatHuggingFace, HuggingFaceEndpoint
5
+ from dotenv import load_dotenv
6
+ import os
7
+
8
+ load_dotenv()
9
+ HF_TOKEN = os.environ.get('HF_TOKEN')
10
+ os.environ['HF_TOKEN'] = HF_TOKEN
11
+
12
+ llm = HuggingFaceEndpoint(
13
+ repo_id="tiiuae/falcon-7b-instruct",
14
+ task="text-generation",
15
+ huggingfacehub_api_token=HF_TOKEN
16
+ )
17
+ llm = ChatHuggingFace(llm=llm)
18
+
19
+ def getLLamaresponse(input_text, no_words, blog_style):
20
+ if not input_text or not no_words:
21
+ return "⚠️ Please enter a valid topic and word count."
22
+
23
+ try:
24
+ template = """
25
+ Write a blog for {blog_style} job profile on the topic "{input_text}"
26
+ within {no_words} words.
27
+ """
28
+
29
+ prompt = ChatPromptTemplate.from_template(template)
30
+
31
+ messages = [
32
+ SystemMessage(content="You are a helpful blog writer."),
33
+ HumanMessage(content=prompt.format(blog_style=blog_style, input_text=input_text, no_words=no_words))
34
+ ]
35
+
36
+ response = llm.invoke(messages)
37
+ return response.content if response else "⚠️ No response from the model."
38
+
39
+ except Exception as e:
40
+ return f"❌ Error: {str(e)}"
41
+
42
+
43
+ st.set_page_config(
44
+ page_title="Generate Blogs",
45
+ page_icon='🤖',
46
+ layout='centered',
47
+ initial_sidebar_state='collapsed'
48
+ )
49
+
50
+ st.header("Generate Blogs 🤖")
51
+
52
+
53
+ input_text = st.text_input("Enter the Blog Topic")
54
+
55
+ col1, col2 = st.columns([5, 5])
56
+ with col1:
57
+ no_words = st.text_input('Number of Words')
58
+ with col2:
59
+ blog_style = st.selectbox('Writing the blog for',
60
+ ('Researchers', 'Data Scientist', 'Common People'),
61
+ index=0)
62
+
63
+ submit = st.button("Generate")
64
+ if submit:
65
+ response = getLLamaresponse(input_text, no_words, blog_style)
66
+ st.write(response)