Spaces:
Build error
Build error
download at runtime
Browse files- .gitignore +2 -1
- app.py +61 -46
.gitignore
CHANGED
|
@@ -1,4 +1,5 @@
|
|
| 1 |
.venv
|
| 2 |
.env
|
| 3 |
__pycache__
|
| 4 |
-
models
|
|
|
|
|
|
| 1 |
.venv
|
| 2 |
.env
|
| 3 |
__pycache__
|
| 4 |
+
models
|
| 5 |
+
*.bin
|
app.py
CHANGED
|
@@ -1,75 +1,90 @@
|
|
|
|
|
| 1 |
import streamlit as st
|
| 2 |
-
from langchain.prompts import PromptTemplate
|
| 3 |
from langchain.llms import CTransformers
|
|
|
|
|
|
|
| 4 |
|
| 5 |
-
|
| 6 |
-
|
| 7 |
-
from
|
| 8 |
-
|
|
|
|
|
|
|
| 9 |
|
| 10 |
-
|
| 11 |
-
def getLLMResponse(form_input,email_sender,email_recipient,email_style):
|
| 12 |
-
#llm = OpenAI(temperature=.9, model="text-davinci-003")
|
| 13 |
|
| 14 |
-
|
|
|
|
|
|
|
|
|
|
| 15 |
|
| 16 |
-
|
| 17 |
-
#enabling efficient deployment on resource-limited devices, reducing model size, and maintaining performance.
|
| 18 |
|
| 19 |
-
#C Transformers offers support for various open-source models,
|
| 20 |
-
#among them popular ones like Llama, GPT4All-J, MPT, and Falcon.
|
| 21 |
|
|
|
|
|
|
|
|
|
|
| 22 |
|
| 23 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 24 |
|
| 25 |
-
llm = CTransformers(model='models/llama-2-7b-chat.ggmlv3.q8_0.bin', #https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGML/tree/main
|
| 26 |
-
model_type='llama',
|
| 27 |
-
config={'max_new_tokens': 256,
|
| 28 |
-
'temperature': 0.01})
|
| 29 |
-
|
| 30 |
-
# llm = AutoModel.from_pretrained("TheBloke/Llama-2-7B-Chat-GGML")
|
| 31 |
-
#Template for building the PROMPT
|
| 32 |
template = """
|
| 33 |
-
Write
|
| 34 |
\n\nEmail Text:
|
| 35 |
|
| 36 |
"""
|
| 37 |
|
| 38 |
-
#Creating the final PROMPT
|
| 39 |
prompt = PromptTemplate(
|
| 40 |
-
input_variables=["style","email_topic","sender","recipient"],
|
| 41 |
template=template,)
|
| 42 |
|
| 43 |
-
|
| 44 |
-
#Generating the response using LLM
|
| 45 |
-
response=llm(prompt.format(email_topic=form_input,sender=email_sender,recipient=email_recipient,style=email_style))
|
| 46 |
print(response)
|
| 47 |
|
| 48 |
return response
|
| 49 |
|
| 50 |
|
| 51 |
st.set_page_config(page_title="Generate Emails",
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
| 55 |
st.header("Generate Emails 📧")
|
| 56 |
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 67 |
('Formal', 'Appreciating', 'Not Satisfied', 'Neutral'),
|
| 68 |
-
|
| 69 |
-
|
| 70 |
|
| 71 |
-
submit = st.button("Generate")
|
| 72 |
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
|
|
|
|
|
|
| 1 |
+
import requests
|
| 2 |
import streamlit as st
|
|
|
|
| 3 |
from langchain.llms import CTransformers
|
| 4 |
+
from langchain.prompts import PromptTemplate
|
| 5 |
+
import os
|
| 6 |
|
| 7 |
+
def download_model() -> None:
|
| 8 |
+
"""
|
| 9 |
+
Downloads the model from the provided URL and saves it to the current directory.
|
| 10 |
+
"""
|
| 11 |
+
url = 'https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGML/resolve/main/llama-2-7b-chat.ggmlv3.q8_0.bin'
|
| 12 |
+
file_name = url.split('/')[-1]
|
| 13 |
|
| 14 |
+
response = requests.get(url, stream=True)
|
|
|
|
|
|
|
| 15 |
|
| 16 |
+
with open(file_name, 'wb') as file:
|
| 17 |
+
for chunk in response.iter_content(chunk_size=1024):
|
| 18 |
+
if chunk:
|
| 19 |
+
file.write(chunk)
|
| 20 |
|
| 21 |
+
print("File downloaded successfully!")
|
|
|
|
| 22 |
|
|
|
|
|
|
|
| 23 |
|
| 24 |
+
def getLLMResponse(form_input: str, email_sender: str, email_recipient: str, email_style: str) -> str:
|
| 25 |
+
"""
|
| 26 |
+
Generates a response using the LLM model.
|
| 27 |
|
| 28 |
+
:param form_input: Email topic provided by the user.
|
| 29 |
+
:param email_sender: Sender name provided by the user.
|
| 30 |
+
:param email_recipient: Recipient name provided by the user.
|
| 31 |
+
:param email_style: Writing style provided by the user.
|
| 32 |
+
:return: Generated response.
|
| 33 |
+
"""
|
| 34 |
+
llm = CTransformers(model='llama-2-7b-chat.ggmlv3.q8_0.bin',
|
| 35 |
+
model_type='llama',
|
| 36 |
+
config={'max_new_tokens': 256,
|
| 37 |
+
'temperature': 0.01})
|
| 38 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 39 |
template = """
|
| 40 |
+
Write an email with {style} style and includes topic :{email_topic}.\n\nSender: {sender}\nRecipient: {recipient}
|
| 41 |
\n\nEmail Text:
|
| 42 |
|
| 43 |
"""
|
| 44 |
|
|
|
|
| 45 |
prompt = PromptTemplate(
|
| 46 |
+
input_variables=["style", "email_topic", "sender", "recipient"],
|
| 47 |
template=template,)
|
| 48 |
|
| 49 |
+
response = llm(prompt.format(email_topic=form_input, sender=email_sender, recipient=email_recipient, style=email_style))
|
|
|
|
|
|
|
| 50 |
print(response)
|
| 51 |
|
| 52 |
return response
|
| 53 |
|
| 54 |
|
| 55 |
st.set_page_config(page_title="Generate Emails",
|
| 56 |
+
page_icon='📧',
|
| 57 |
+
layout='centered',
|
| 58 |
+
initial_sidebar_state='collapsed')
|
| 59 |
st.header("Generate Emails 📧")
|
| 60 |
|
| 61 |
+
model_loaded = st.session_state.get('model_loaded', False)
|
| 62 |
+
|
| 63 |
+
if not model_loaded:
|
| 64 |
+
if st.button('Load Model'):
|
| 65 |
+
model_file = 'llama-2-7b-chat.ggmlv3.q8_0.bin'
|
| 66 |
+
if not os.path.isfile(model_file):
|
| 67 |
+
st.info('Loading the model, this could take ~5 minutes')
|
| 68 |
+
download_model()
|
| 69 |
+
st.session_state.model_loaded = True
|
| 70 |
+
st.info('Model loaded successfully')
|
| 71 |
+
|
| 72 |
+
if st.session_state.get('model_loaded'):
|
| 73 |
+
form_input = st.text_area('Enter the email topic', height=275)
|
| 74 |
+
|
| 75 |
+
col1, col2, col3 = st.columns([10, 10, 5])
|
| 76 |
+
with col1:
|
| 77 |
+
email_sender = st.text_input('Sender Name')
|
| 78 |
+
with col2:
|
| 79 |
+
email_recipient = st.text_input('Recipient Name')
|
| 80 |
+
with col3:
|
| 81 |
+
email_style = st.selectbox('Writing Style',
|
| 82 |
('Formal', 'Appreciating', 'Not Satisfied', 'Neutral'),
|
| 83 |
+
index=0)
|
|
|
|
| 84 |
|
| 85 |
+
submit = st.button("Generate")
|
| 86 |
|
| 87 |
+
if submit:
|
| 88 |
+
st.write(getLLMResponse(form_input, email_sender, email_recipient, email_style))
|
| 89 |
+
else:
|
| 90 |
+
st.write("Please load the model to proceed.")
|