Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -16,15 +16,12 @@ def gen_show_caption(sub_prompt=None, cap_prompt=""):
|
|
| 16 |
st.markdown(
|
| 17 |
f"""
|
| 18 |
<style>
|
| 19 |
-
red{{
|
| 20 |
-
|
| 21 |
-
}}
|
| 22 |
-
blue{{
|
| 23 |
-
color:#2a72d5
|
| 24 |
-
}}
|
| 25 |
</style>
|
| 26 |
|
| 27 |
-
|
| 28 |
""",
|
| 29 |
unsafe_allow_html=True,
|
| 30 |
)
|
|
@@ -65,7 +62,7 @@ cap_prompt = st.sidebar.text_input("Write the start of your caption below", valu
|
|
| 65 |
_ = st.sidebar.button("Regenerate Caption")
|
| 66 |
|
| 67 |
|
| 68 |
-
st.sidebar.title("Advanced Options
|
| 69 |
num_captions = st.sidebar.select_slider(
|
| 70 |
"Number of Captions to Predict", options=[1, 2, 3, 4, 5], value=1
|
| 71 |
)
|
|
@@ -76,15 +73,6 @@ nuc_size = st.sidebar.slider(
|
|
| 76 |
value=0.8,
|
| 77 |
step=0.05,
|
| 78 |
)
|
| 79 |
-
st.sidebar.markdown(
|
| 80 |
-
"""
|
| 81 |
-
*Please note that this model was explicitly not trained on images of people, and as a result is not designed to caption images with humans.
|
| 82 |
-
|
| 83 |
-
This demo accompanies our paper RedCaps.
|
| 84 |
-
|
| 85 |
-
Created by Karan Desai, Gaurav Kaul, Zubin Aysola, Justin Johnson
|
| 86 |
-
"""
|
| 87 |
-
)
|
| 88 |
# ----------------------------------------------------------------------------
|
| 89 |
|
| 90 |
virtexModel.model.decoder.nucleus_size = nuc_size
|
|
@@ -113,8 +101,12 @@ st.markdown("""
|
|
| 113 |
Caption your own images or try out some of our sample images.
|
| 114 |
You can also generate captions as if they are from specific subreddits,
|
| 115 |
as if they start with a particular prompt, or even both.
|
| 116 |
-
|
| 117 |
Tweet your results with `#redcaps`!
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 118 |
""")
|
| 119 |
|
| 120 |
_, center, _ = st.columns([1, 18, 1])
|
|
|
|
| 16 |
st.markdown(
|
| 17 |
f"""
|
| 18 |
<style>
|
| 19 |
+
red {{ color:#c62828; font-size: 1.5rem }}
|
| 20 |
+
blue {{ color:#2a72d5; font-size: 1.5rem }}
|
| 21 |
+
remaining {{ color: black; font-size: 1.5rem }}
|
|
|
|
|
|
|
|
|
|
| 22 |
</style>
|
| 23 |
|
| 24 |
+
- <red> r/{subreddit} </red> <blue> {cap_prompt} </blue><remaining> {caption} </remaining>
|
| 25 |
""",
|
| 26 |
unsafe_allow_html=True,
|
| 27 |
)
|
|
|
|
| 62 |
_ = st.sidebar.button("Regenerate Caption")
|
| 63 |
|
| 64 |
|
| 65 |
+
st.sidebar.title("Advanced Options")
|
| 66 |
num_captions = st.sidebar.select_slider(
|
| 67 |
"Number of Captions to Predict", options=[1, 2, 3, 4, 5], value=1
|
| 68 |
)
|
|
|
|
| 73 |
value=0.8,
|
| 74 |
step=0.05,
|
| 75 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 76 |
# ----------------------------------------------------------------------------
|
| 77 |
|
| 78 |
virtexModel.model.decoder.nucleus_size = nuc_size
|
|
|
|
| 101 |
Caption your own images or try out some of our sample images.
|
| 102 |
You can also generate captions as if they are from specific subreddits,
|
| 103 |
as if they start with a particular prompt, or even both.
|
|
|
|
| 104 |
Tweet your results with `#redcaps`!
|
| 105 |
+
|
| 106 |
+
**Note:** This model was not trained on images of people,
|
| 107 |
+
hence may not generate accurate captions describing humans.
|
| 108 |
+
For more details, visit [redcaps.xyz](https://redcaps.xyz) check out
|
| 109 |
+
our [NeurIPS 2021 paper](https://openreview.net/forum?id=VjJxBi1p9zh).
|
| 110 |
""")
|
| 111 |
|
| 112 |
_, center, _ = st.columns([1, 18, 1])
|