aarukarthiga commited on
Commit
7ade9d3
·
1 Parent(s): 7c73c89

Added large files with LFS

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ Llama-Blog-Generation.webm filter=lfs diff=lfs merge=lfs -text
app.py ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from langchain.prompts import PromptTemplate
3
+ from langchain_community.llms import CTransformers
4
+ import time
5
+
6
+ # List of blog styles
7
+ BLOG_STYLES = [
8
+ 'Researchers',
9
+ 'Data Scientist',
10
+ 'Common People',
11
+ 'Software Engineers',
12
+ 'Product Managers',
13
+ 'Healthcare Professionals',
14
+ 'Teachers',
15
+ 'Entrepreneurs',
16
+ 'Marketers',
17
+ 'Students'
18
+ ]
19
+
20
+ # Function to get response from LLama 2 model
21
+ def getLLamaResponse(input_text, no_words, blog_style):
22
+ # Initialize the LLama 2 model
23
+ llm = CTransformers(model='models/llama-2-7b-chat.ggmlv3.q8_0.bin',
24
+ model_type='llama',
25
+ config={'max_new_tokens': 256, 'temperature': 0.01})
26
+
27
+ # Define the prompt template
28
+ template = """
29
+ Write a blog for {blog_style} job profile for a topic {input_text}
30
+ within {no_words} words.
31
+ """
32
+ prompt = PromptTemplate(input_variables=["blog_style", "input_text", 'no_words'], template=template)
33
+
34
+ # Generate the response from the LLama 2 model
35
+ response = llm(prompt.format(blog_style=blog_style, input_text=input_text, no_words=no_words))
36
+ return response
37
+
38
+ # Function to generate topics from LLama 2 model
39
+ def generate_topics_from_llama(input_text):
40
+ # Initialize the LLama 2 model
41
+ llm = CTransformers(model='models/llama-2-7b-chat.ggmlv3.q8_0.bin',
42
+ model_type='llama',
43
+ config={'max_new_tokens': 256, 'temperature': 0.01})
44
+
45
+ # Define the prompt template for generating topics
46
+ topic_template = """
47
+ Generate a list of blog topics based on the keywords: {input_text}
48
+ """
49
+ prompt = PromptTemplate(input_variables=["input_text"], template=topic_template)
50
+
51
+ # Generate the topics from the LLama 2 model
52
+ topics_response = llm(prompt.format(input_text=input_text))
53
+ # Split the response into a list of topics
54
+ topics = topics_response.split('\n')
55
+ return [topic.strip() for topic in topics if topic.strip()]
56
+
57
+ # Set up the Streamlit page configuration
58
+ st.set_page_config(page_title="LLAMA 2 Generate Blogs",
59
+ page_icon='images/favicon.ico', # Updated favicon source
60
+ layout='centered',
61
+ initial_sidebar_state='collapsed')
62
+
63
+ # Display image using st.image
64
+ st.image('images/ConcertIDC_Logo_Stack.png', width=50, caption='')
65
+
66
+ # Page header with favicon icon
67
+ st.markdown(
68
+ """
69
+ <h1 style="display:flex; align-items:center;">
70
+ LLAMA 2 Generate Blogs
71
+ </h1>
72
+ """,
73
+ unsafe_allow_html=True
74
+ )
75
+
76
+ # Placeholder for topics and selected topic
77
+ if 'topics' not in st.session_state:
78
+ st.session_state.topics = []
79
+
80
+ if 'selected_topic' not in st.session_state:
81
+ st.session_state.selected_topic = None
82
+
83
+ # Input field for the blog topic
84
+ input_text = st.text_input("Enter the Blog Topic Keywords")
85
+
86
+ # Button to generate topics
87
+ generate_topics = st.button("Generate Topics")
88
+
89
+ # Generate and display topics
90
+ if generate_topics:
91
+ with st.spinner('Generating topics...'):
92
+ st.session_state.topics = generate_topics_from_llama(input_text)
93
+ time.sleep(2) # Simulate processing time
94
+
95
+ # Display generated topics in bullet format
96
+ if st.session_state.topics:
97
+ # st.markdown("### Suggested Topics")
98
+ # for topic in st.session_state.topics:
99
+ # st.markdown(f"- {topic}")
100
+
101
+ # Selection for one of the topics
102
+ selected_topic = st.selectbox('Select a Topic', st.session_state.topics)
103
+ st.session_state.selected_topic = selected_topic
104
+
105
+ # Optional input field for the number of words
106
+ no_words = st.text_input('Number of Words (optional)', value='')
107
+
108
+ # Dropdown selection for the blog style
109
+ blog_style = st.selectbox('Writing the blog for', BLOG_STYLES, index=0)
110
+
111
+ # Button to generate the blog content
112
+ generate_blog = st.button("Generate Blog Content")
113
+
114
+ # Display the generated blog response
115
+ if generate_blog:
116
+ with st.spinner('Generating blog content...'):
117
+ if no_words == '':
118
+ no_words = '500' # Default to 500 words if not provided
119
+ response = getLLamaResponse(st.session_state.selected_topic, no_words, blog_style)
120
+ time.sleep(2) # Simulate processing time
121
+ st.write(response)
favicon.ico ADDED
images/ConcertIDC_Logo_Stack.png ADDED
images/favicon.ico ADDED
models/llama-2-7b-chat.ggmlv3.q8_0.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3bfdde943555c78294626a6ccd40184162d066d39774bd2c98dae24943d32cc3
3
+ size 7160799872
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ sentence-transformers
2
+ uvicorn
3
+ ctransformers
4
+ langchain
5
+ python-box
6
+ streamlit