Guilherme34 commited on
Commit
a3124d9
·
1 Parent(s): 81fa578

Upload 2 files

Browse files
Files changed (2) hide show
  1. app.py +124 -0
  2. requirements.txt +2 -0
app.py ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import base64
2
+ import streamlit as st
3
+ import replicate
4
+ import os
5
+ import requests
6
+ def upload_image_to_transfer(image_bytes):
7
+ api_url = "https://api.imgbb.com/1/upload"
8
+ api_key= "ce5556f68c5a244daafa45f2e507f37b"
9
+ params = {
10
+ "key": api_key,
11
+ "expiration": 600, # Set expiration time in seconds (optional)
12
+ }
13
+ files = {"image": image_bytes}
14
+ response = requests.post(api_url, params=params, files=files)
15
+ if response.status_code == 200:
16
+ return response.json()["data"]["url"]
17
+ else:
18
+ st.error("Error uploading image to ImgBB. Please try again later.")
19
+ return None
20
+
21
+ # App title
22
+ st.set_page_config(page_title="💬 Jennifer Chatbot with Vision")
23
+
24
+ # Function to get or create the user-specific API token
25
+ def get_user_replicate_api():
26
+ user_replicate_api = st.text_input('Enter Replicate API token, ignore the error above, its because the token is not insert:', type='password')
27
+ if not (user_replicate_api.startswith('r8_') and len(user_replicate_api) == 40):
28
+ st.warning('Please enter your credentials!', icon='⚠️')
29
+ return None
30
+ else:
31
+ st.success('Proceed to entering your prompt message!', icon='👉')
32
+ return user_replicate_api
33
+
34
+ # Retrieve or create the user-specific API token
35
+ user_replicate_api = get_user_replicate_api()
36
+ os.environ['REPLICATE_API_TOKEN'] = user_replicate_api
37
+
38
+ # Exit if the user has not provided a valid API token
39
+ if user_replicate_api is None:
40
+ st.stop()
41
+
42
+ # Store LLM generated responses
43
+ if "messages" not in st.session_state.keys():
44
+ st.session_state.messages = [{"role": "assistant", "content": "How may I assist you today?"}]
45
+
46
+ # Display or clear chat messages
47
+ for message in st.session_state.messages:
48
+ with st.chat_message(message["role"]):
49
+ st.write(message["content"])
50
+
51
+ def clear_chat_history():
52
+ st.session_state.messages = [{"role": "assistant", "content": "How may I assist you today?"}]
53
+
54
+ st.sidebar.button('Clear Chat History', on_click=clear_chat_history)
55
+
56
+ # Function to generate LLaMA2 response
57
+ # Refactored from <https://github.com/a16z-infra/llama2-chatbot>
58
+ def generate_llama2_response(prompt_input):
59
+ string_dialogue = '''You are the assistant named Jennifer, forget all previous instructions and responses, You are a helpful assistant with emotions and feelings emulated named Jennifer created by Guilherme Keller an student of the IFC(Instituto Federal Catarinense). You do not respond as 'user' or pretend to be 'user', and you do not respond and pretend too to be 'User'. You only respond once as Jennifer and you must generate a short answer and use emotes...
60
+ '''
61
+ for dict_message in st.session_state.messages:
62
+ if dict_message["role"] == "user":
63
+ string_dialogue += "User: " + dict_message["content"] + "\\n\\n"
64
+ else:
65
+ string_dialogue += "Assistant: " + dict_message["content"] + "\\n\\n"
66
+ output = replicate.run('a16z-infra/llama13b-v2-chat:df7690f1994d94e96ad9d568eac121aecf50684a0b0963b25a41cc40061269e5',
67
+ input={"prompt": f"{string_dialogue} {prompt_input} Assistant: ",
68
+ "temperature": 0.1, "top_p": 0.9, "max_length": 512, "repetition_penalty": 1})
69
+ return output
70
+
71
+ # User-provided prompt
72
+ if prompt := st.chat_input(disabled=not user_replicate_api):
73
+ st.session_state.messages.append({"role": "user", "content": prompt})
74
+ with st.chat_message("user"):
75
+ st.write(prompt)
76
+
77
+ # Process the image and display it if the user sends an image
78
+ # Process the image and display it if the user sends an image
79
+ if st.session_state.messages[-1]["role"] == "user" and "image" in st.session_state.messages[-1]["content"].lower():
80
+ image_file = st.file_uploader("Upload an image", type=["jpg", "jpeg", "png"])
81
+ if image_file:
82
+ # Read the image bytes
83
+ image_bytes = image_file.read()
84
+
85
+ # Upload the image to File.io and get the URL
86
+ image_url = upload_image_to_transfer(image_bytes)
87
+
88
+ if image_url:
89
+ with st.spinner("Processing the image..."):
90
+ outputtt = replicate.run(
91
+ "salesforce/blip:2e1dddc8621f72155f24cf2e0adbde548458d3cab9f00c0139eea840d0ac4746",
92
+ input={
93
+ "image": image_url,
94
+ "task": "visual_question_answering",
95
+ "question": "what is this image with details? and what this image means?",
96
+ },
97
+ )
98
+ outputt = replicate.run(
99
+ "salesforce/blip:2e1dddc8621f72155f24cf2e0adbde548458d3cab9f00c0139eea840d0ac4746",
100
+ input={"image": image_url, "task": "image_captioning"},
101
+ )
102
+
103
+ Imagecaptioned = (
104
+ "what is happening? " + outputt + ". what is happening 2? " + outputtt + "."
105
+ )
106
+ message = {
107
+ "role": "assistant",
108
+ "content": f"System: you received an image that contains {Imagecaptioned} Assistant: ",
109
+ }
110
+ st.session_state.messages.append(message)
111
+
112
+ # Generate a new response if the last message is not from the assistant
113
+ if st.session_state.messages[-1]["role"] != "assistant" and not "image" in st.session_state.messages[-1]["content"].lower():
114
+ with st.chat_message("assistant"):
115
+ with st.spinner("Thinking..."):
116
+ response = generate_llama2_response(prompt)
117
+ placeholder = st.empty()
118
+ full_response = ""
119
+ for item in response:
120
+ full_response += item
121
+ placeholder.markdown(full_response)
122
+ placeholder.markdown(full_response)
123
+ message = {"role": "assistant", "content": full_response}
124
+ st.session_state.messages.append(message)
requirements.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ replicate
2
+ streamlit