Ruff formatting
Browse files
app.py
CHANGED
|
@@ -1,35 +1,48 @@
|
|
| 1 |
-
import re
|
| 2 |
import os
|
|
|
|
|
|
|
| 3 |
import bm25s
|
| 4 |
-
import spaces
|
| 5 |
import gradio as gr
|
| 6 |
import gradio_iframe
|
|
|
|
| 7 |
from bm25s.hf import BM25HF
|
|
|
|
|
|
|
|
|
|
|
|
|
| 8 |
from rerankers import Reranker
|
|
|
|
|
|
|
| 9 |
|
| 10 |
-
from inseq import
|
| 11 |
from inseq.attr import StepFunctionArgs
|
| 12 |
from inseq.commands.attribute_context import visualize_attribute_context
|
|
|
|
|
|
|
|
|
|
|
|
|
| 13 |
from inseq.utils.contrast_utils import _setup_contrast_args
|
| 14 |
-
from lxt.models.llama import LlamaForCausalLM, attnlrp
|
| 15 |
-
from transformers import AutoTokenizer
|
| 16 |
-
from lxt.functional import softmax, add2, mul2
|
| 17 |
-
from inseq.commands.attribute_context.attribute_context import attribute_context_with_model, AttributeContextArgs
|
| 18 |
-
|
| 19 |
-
from style import custom_css
|
| 20 |
-
from citations import pecore_citation, mirage_citation, inseq_citation, lxt_citation
|
| 21 |
-
from examples import examples
|
| 22 |
|
|
|
|
| 23 |
model_id = "HuggingFaceTB/SmolLM-360M-Instruct"
|
| 24 |
-
ranker = Reranker("answerdotai/answerai-colbert-small-v1", model_type=
|
| 25 |
retriever = BM25HF.load_from_hub("xhluca/bm25s-nq-index", load_corpus=True, mmap=True)
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 33 |
|
| 34 |
|
| 35 |
def lxt_probability_fn(args: StepFunctionArgs):
|
|
@@ -38,10 +51,11 @@ def lxt_probability_fn(args: StepFunctionArgs):
|
|
| 38 |
logits = softmax(logits, dim=-1)
|
| 39 |
return logits.gather(-1, target_ids).squeeze(-1)
|
| 40 |
|
|
|
|
| 41 |
def lxt_contrast_prob_fn(
|
| 42 |
args: StepFunctionArgs,
|
| 43 |
-
contrast_sources
|
| 44 |
-
contrast_targets
|
| 45 |
contrast_targets_alignments: list[list[tuple[int, int]]] | None = None,
|
| 46 |
contrast_force_inputs: bool = False,
|
| 47 |
skip_special_tokens: bool = False,
|
|
@@ -56,10 +70,11 @@ def lxt_contrast_prob_fn(
|
|
| 56 |
)
|
| 57 |
return lxt_probability_fn(c_args)
|
| 58 |
|
|
|
|
| 59 |
def lxt_contrast_prob_diff_fn(
|
| 60 |
args: StepFunctionArgs,
|
| 61 |
-
contrast_sources
|
| 62 |
-
contrast_targets
|
| 63 |
contrast_targets_alignments: list[list[tuple[int, int]]] | None = None,
|
| 64 |
contrast_force_inputs: bool = False,
|
| 65 |
skip_special_tokens: bool = False,
|
|
@@ -78,12 +93,35 @@ def lxt_contrast_prob_diff_fn(
|
|
| 78 |
|
| 79 |
def set_interactive_settings(rag_setting, retrieve_k, top_k, custom_context):
|
| 80 |
if rag_setting in ("Retrieve with BM25", "Rerank with ColBERT"):
|
| 81 |
-
return
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 82 |
elif rag_setting == "Use Custom Context":
|
| 83 |
-
return
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 84 |
|
| 85 |
@spaces.GPU()
|
| 86 |
-
def generate(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 87 |
global model, model_id
|
| 88 |
if rag_setting == "Use Custom Context":
|
| 89 |
docs = custom_context.split("\n\n")
|
|
@@ -105,11 +143,7 @@ def generate(query, max_new_tokens, top_p, temperature, retrieve_k, top_k, rag_s
|
|
| 105 |
curr_model_id = f"HuggingFaceTB/SmolLM-{model_size}-Instruct"
|
| 106 |
if model is None or model.model_name != curr_model_id:
|
| 107 |
progress(0.2, desc="Loading model...")
|
| 108 |
-
|
| 109 |
-
hf_model = LlamaForCausalLM.from_pretrained(model_id)
|
| 110 |
-
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
| 111 |
-
attnlrp.register(hf_model)
|
| 112 |
-
model = load_model(hf_model, "saliency", tokenizer=tokenizer)
|
| 113 |
progress(0.3, desc="Attributing with LXT...")
|
| 114 |
lm_rag_prompting_example = AttributeContextArgs(
|
| 115 |
model_name_or_path=model_id,
|
|
@@ -125,7 +159,11 @@ def generate(query, max_new_tokens, top_p, temperature, retrieve_k, top_k, rag_s
|
|
| 125 |
context_sensitivity_std_threshold=1,
|
| 126 |
decoder_input_output_separator=" ",
|
| 127 |
special_tokens_to_keep=["<|im_start|>", "<|endoftext|>"],
|
| 128 |
-
generation_kwargs={
|
|
|
|
|
|
|
|
|
|
|
|
|
| 129 |
attribution_aggregators=["sum"],
|
| 130 |
rescale_attributions=True,
|
| 131 |
save_path=os.path.join(os.path.dirname(__file__), "outputs/output.json"),
|
|
@@ -144,17 +182,21 @@ def generate(query, max_new_tokens, top_p, temperature, retrieve_k, top_k, rag_s
|
|
| 144 |
label="🔍 Download HTML",
|
| 145 |
value=os.path.join(os.path.dirname(__file__), "outputs/output.html"),
|
| 146 |
visible=True,
|
| 147 |
-
)
|
| 148 |
]
|
| 149 |
|
| 150 |
|
| 151 |
-
register_step_function(
|
|
|
|
|
|
|
| 152 |
|
| 153 |
|
| 154 |
with gr.Blocks(css=custom_css) as demo:
|
| 155 |
with gr.Row():
|
| 156 |
with gr.Column(min_width=500):
|
| 157 |
-
gr.HTML(
|
|
|
|
|
|
|
| 158 |
text = gr.Markdown(
|
| 159 |
"This demo showcases an end-to-end usage of model internals for RAG answer attribution with the <a href='https://openreview.net/forum?id=XTHfNGI3zT' target='_blank'>PECoRe</a> framework, as described in our <a href='https://arxiv.org/abs/2406.13663' target='_blank'>MIRAGE</a> paper.<br>"
|
| 160 |
"Insert a query to retrieve relevant contexts, generate an answer and attribute its context-sensitive components. An interactive <a href='https://github.com/google-deepmind/treescope' target='_blank'>Treescope</a> visualization will appear in the green square.<br>"
|
|
@@ -182,18 +224,36 @@ with gr.Blocks(css=custom_css) as demo:
|
|
| 182 |
["135M", "360M", "1.7B"],
|
| 183 |
value="360M",
|
| 184 |
label="Model size",
|
| 185 |
-
interactive=True
|
| 186 |
)
|
| 187 |
with gr.Row():
|
| 188 |
rag_setting = gr.Radio(
|
| 189 |
-
[
|
|
|
|
|
|
|
|
|
|
|
|
|
| 190 |
value="Rerank with ColBERT",
|
| 191 |
label="Mode",
|
| 192 |
-
interactive=True
|
| 193 |
)
|
| 194 |
with gr.Row():
|
| 195 |
-
retrieve_k = gr.Slider(
|
| 196 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 197 |
custom_context = gr.Textbox(
|
| 198 |
placeholder="Context will be retrieved automatically. Change mode to 'Use Custom Context' to specify your own.",
|
| 199 |
label="Custom context",
|
|
@@ -201,18 +261,51 @@ with gr.Blocks(css=custom_css) as demo:
|
|
| 201 |
lines=4,
|
| 202 |
)
|
| 203 |
with gr.Row():
|
| 204 |
-
max_new_tokens = gr.Slider(
|
| 205 |
-
|
| 206 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 207 |
with gr.Accordion("📝 Citation", open=False):
|
| 208 |
-
gr.Markdown(
|
| 209 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 210 |
gr.Markdown("To refer to the original PECoRe paper, cite:")
|
| 211 |
-
gr.Code(
|
| 212 |
-
|
| 213 |
-
|
| 214 |
-
|
| 215 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 216 |
with gr.Column():
|
| 217 |
attribute_context_out = gradio_iframe.iFrame(height=400, visible=True)
|
| 218 |
with gr.Row(equal_height=True):
|
|
@@ -229,15 +322,23 @@ with gr.Blocks(css=custom_css) as demo:
|
|
| 229 |
)
|
| 230 |
with gr.Row(elem_classes="footer-container"):
|
| 231 |
with gr.Column():
|
| 232 |
-
gr.Markdown(
|
|
|
|
|
|
|
| 233 |
with gr.Column():
|
| 234 |
with gr.Row(elem_classes="footer-custom-block"):
|
| 235 |
with gr.Column(scale=0.30, min_width=150):
|
| 236 |
-
gr.Markdown(
|
|
|
|
|
|
|
| 237 |
with gr.Column(scale=0.30, min_width=120):
|
| 238 |
-
gr.Markdown(
|
|
|
|
|
|
|
| 239 |
with gr.Column(scale=0.30, min_width=120):
|
| 240 |
-
gr.Markdown(
|
|
|
|
|
|
|
| 241 |
|
| 242 |
rag_setting.change(
|
| 243 |
fn=set_interactive_settings,
|
|
@@ -262,7 +363,9 @@ with gr.Blocks(css=custom_css) as demo:
|
|
| 262 |
attribute_context_out,
|
| 263 |
download_output_file_button,
|
| 264 |
download_output_html_button,
|
| 265 |
-
]
|
| 266 |
)
|
| 267 |
|
| 268 |
-
demo.queue(api_open=False, max_size=20).launch(
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
import os
|
| 2 |
+
import re
|
| 3 |
+
|
| 4 |
import bm25s
|
|
|
|
| 5 |
import gradio as gr
|
| 6 |
import gradio_iframe
|
| 7 |
+
import spaces
|
| 8 |
from bm25s.hf import BM25HF
|
| 9 |
+
from citations import inseq_citation, lxt_citation, mirage_citation, pecore_citation
|
| 10 |
+
from examples import examples
|
| 11 |
+
from lxt.functional import add2, mul2, softmax
|
| 12 |
+
from lxt.models.llama import LlamaForCausalLM, attnlrp
|
| 13 |
from rerankers import Reranker
|
| 14 |
+
from style import custom_css
|
| 15 |
+
from transformers import AutoTokenizer
|
| 16 |
|
| 17 |
+
from inseq import load_model, register_step_function
|
| 18 |
from inseq.attr import StepFunctionArgs
|
| 19 |
from inseq.commands.attribute_context import visualize_attribute_context
|
| 20 |
+
from inseq.commands.attribute_context.attribute_context import (
|
| 21 |
+
AttributeContextArgs,
|
| 22 |
+
attribute_context_with_model,
|
| 23 |
+
)
|
| 24 |
from inseq.utils.contrast_utils import _setup_contrast_args
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 25 |
|
| 26 |
+
model = None
|
| 27 |
model_id = "HuggingFaceTB/SmolLM-360M-Instruct"
|
| 28 |
+
ranker = Reranker("answerdotai/answerai-colbert-small-v1", model_type="colbert")
|
| 29 |
retriever = BM25HF.load_from_hub("xhluca/bm25s-nq-index", load_corpus=True, mmap=True)
|
| 30 |
+
|
| 31 |
+
# Model registry to store loaded models
|
| 32 |
+
model_registry = {}
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def get_model(model_size):
|
| 36 |
+
model_id = f"HuggingFaceTB/SmolLM-{model_size}-Instruct"
|
| 37 |
+
if model_id not in model_registry:
|
| 38 |
+
hf_model = LlamaForCausalLM.from_pretrained(model_id)
|
| 39 |
+
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
| 40 |
+
attnlrp.register(hf_model)
|
| 41 |
+
model = load_model(hf_model, "saliency", tokenizer=tokenizer)
|
| 42 |
+
model.bos_token = "<|endoftext|>"
|
| 43 |
+
model.bos_token_id = 0
|
| 44 |
+
model_registry[model_id] = model
|
| 45 |
+
return model_registry[model_id]
|
| 46 |
|
| 47 |
|
| 48 |
def lxt_probability_fn(args: StepFunctionArgs):
|
|
|
|
| 51 |
logits = softmax(logits, dim=-1)
|
| 52 |
return logits.gather(-1, target_ids).squeeze(-1)
|
| 53 |
|
| 54 |
+
|
| 55 |
def lxt_contrast_prob_fn(
|
| 56 |
args: StepFunctionArgs,
|
| 57 |
+
contrast_sources=None,
|
| 58 |
+
contrast_targets=None,
|
| 59 |
contrast_targets_alignments: list[list[tuple[int, int]]] | None = None,
|
| 60 |
contrast_force_inputs: bool = False,
|
| 61 |
skip_special_tokens: bool = False,
|
|
|
|
| 70 |
)
|
| 71 |
return lxt_probability_fn(c_args)
|
| 72 |
|
| 73 |
+
|
| 74 |
def lxt_contrast_prob_diff_fn(
|
| 75 |
args: StepFunctionArgs,
|
| 76 |
+
contrast_sources=None,
|
| 77 |
+
contrast_targets=None,
|
| 78 |
contrast_targets_alignments: list[list[tuple[int, int]]] | None = None,
|
| 79 |
contrast_force_inputs: bool = False,
|
| 80 |
skip_special_tokens: bool = False,
|
|
|
|
| 93 |
|
| 94 |
def set_interactive_settings(rag_setting, retrieve_k, top_k, custom_context):
|
| 95 |
if rag_setting in ("Retrieve with BM25", "Rerank with ColBERT"):
|
| 96 |
+
return (
|
| 97 |
+
gr.Slider(interactive=True),
|
| 98 |
+
gr.Slider(interactive=True),
|
| 99 |
+
gr.Textbox(
|
| 100 |
+
placeholder="Context will be retrieved automatically. Change mode to 'Use Custom Context' to specify your own.",
|
| 101 |
+
interactive=False,
|
| 102 |
+
),
|
| 103 |
+
)
|
| 104 |
elif rag_setting == "Use Custom Context":
|
| 105 |
+
return (
|
| 106 |
+
gr.Slider(interactive=False),
|
| 107 |
+
gr.Slider(interactive=False),
|
| 108 |
+
gr.Textbox(placeholder="Insert a custom context...", interactive=True),
|
| 109 |
+
)
|
| 110 |
+
|
| 111 |
|
| 112 |
@spaces.GPU()
|
| 113 |
+
def generate(
|
| 114 |
+
query,
|
| 115 |
+
max_new_tokens,
|
| 116 |
+
top_p,
|
| 117 |
+
temperature,
|
| 118 |
+
retrieve_k,
|
| 119 |
+
top_k,
|
| 120 |
+
rag_setting,
|
| 121 |
+
custom_context,
|
| 122 |
+
model_size,
|
| 123 |
+
progress=gr.Progress(),
|
| 124 |
+
):
|
| 125 |
global model, model_id
|
| 126 |
if rag_setting == "Use Custom Context":
|
| 127 |
docs = custom_context.split("\n\n")
|
|
|
|
| 143 |
curr_model_id = f"HuggingFaceTB/SmolLM-{model_size}-Instruct"
|
| 144 |
if model is None or model.model_name != curr_model_id:
|
| 145 |
progress(0.2, desc="Loading model...")
|
| 146 |
+
model = get_model(model_size)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 147 |
progress(0.3, desc="Attributing with LXT...")
|
| 148 |
lm_rag_prompting_example = AttributeContextArgs(
|
| 149 |
model_name_or_path=model_id,
|
|
|
|
| 159 |
context_sensitivity_std_threshold=1,
|
| 160 |
decoder_input_output_separator=" ",
|
| 161 |
special_tokens_to_keep=["<|im_start|>", "<|endoftext|>"],
|
| 162 |
+
generation_kwargs={
|
| 163 |
+
"max_new_tokens": max_new_tokens,
|
| 164 |
+
"top_p": top_p,
|
| 165 |
+
"temperature": temperature,
|
| 166 |
+
},
|
| 167 |
attribution_aggregators=["sum"],
|
| 168 |
rescale_attributions=True,
|
| 169 |
save_path=os.path.join(os.path.dirname(__file__), "outputs/output.json"),
|
|
|
|
| 182 |
label="🔍 Download HTML",
|
| 183 |
value=os.path.join(os.path.dirname(__file__), "outputs/output.html"),
|
| 184 |
visible=True,
|
| 185 |
+
),
|
| 186 |
]
|
| 187 |
|
| 188 |
|
| 189 |
+
register_step_function(
|
| 190 |
+
lxt_contrast_prob_diff_fn, "lxt_contrast_prob_diff", overwrite=True
|
| 191 |
+
)
|
| 192 |
|
| 193 |
|
| 194 |
with gr.Blocks(css=custom_css) as demo:
|
| 195 |
with gr.Row():
|
| 196 |
with gr.Column(min_width=500):
|
| 197 |
+
gr.HTML(
|
| 198 |
+
'<h1><img src="file/img/mirage_logo_white_contour.png" width=300px /></h1>'
|
| 199 |
+
)
|
| 200 |
text = gr.Markdown(
|
| 201 |
"This demo showcases an end-to-end usage of model internals for RAG answer attribution with the <a href='https://openreview.net/forum?id=XTHfNGI3zT' target='_blank'>PECoRe</a> framework, as described in our <a href='https://arxiv.org/abs/2406.13663' target='_blank'>MIRAGE</a> paper.<br>"
|
| 202 |
"Insert a query to retrieve relevant contexts, generate an answer and attribute its context-sensitive components. An interactive <a href='https://github.com/google-deepmind/treescope' target='_blank'>Treescope</a> visualization will appear in the green square.<br>"
|
|
|
|
| 224 |
["135M", "360M", "1.7B"],
|
| 225 |
value="360M",
|
| 226 |
label="Model size",
|
| 227 |
+
interactive=True,
|
| 228 |
)
|
| 229 |
with gr.Row():
|
| 230 |
rag_setting = gr.Radio(
|
| 231 |
+
[
|
| 232 |
+
"Retrieve with BM25",
|
| 233 |
+
"Rerank with ColBERT",
|
| 234 |
+
"Use Custom Context",
|
| 235 |
+
],
|
| 236 |
value="Rerank with ColBERT",
|
| 237 |
label="Mode",
|
| 238 |
+
interactive=True,
|
| 239 |
)
|
| 240 |
with gr.Row():
|
| 241 |
+
retrieve_k = gr.Slider(
|
| 242 |
+
1,
|
| 243 |
+
500,
|
| 244 |
+
value=100,
|
| 245 |
+
step=1,
|
| 246 |
+
label="# Docs to Retrieve",
|
| 247 |
+
interactive=True,
|
| 248 |
+
)
|
| 249 |
+
top_k = gr.Slider(
|
| 250 |
+
1,
|
| 251 |
+
10,
|
| 252 |
+
value=3,
|
| 253 |
+
step=1,
|
| 254 |
+
label="# Docs in Context",
|
| 255 |
+
interactive=True,
|
| 256 |
+
)
|
| 257 |
custom_context = gr.Textbox(
|
| 258 |
placeholder="Context will be retrieved automatically. Change mode to 'Use Custom Context' to specify your own.",
|
| 259 |
label="Custom context",
|
|
|
|
| 261 |
lines=4,
|
| 262 |
)
|
| 263 |
with gr.Row():
|
| 264 |
+
max_new_tokens = gr.Slider(
|
| 265 |
+
0,
|
| 266 |
+
500,
|
| 267 |
+
value=50,
|
| 268 |
+
step=5.0,
|
| 269 |
+
label="Max new tokens",
|
| 270 |
+
interactive=True,
|
| 271 |
+
)
|
| 272 |
+
top_p = gr.Slider(
|
| 273 |
+
0, 1, value=1, step=0.01, label="Top P", interactive=True
|
| 274 |
+
)
|
| 275 |
+
temperature = gr.Slider(
|
| 276 |
+
0, 1, value=0, step=0.01, label="Temperature", interactive=True
|
| 277 |
+
)
|
| 278 |
with gr.Accordion("📝 Citation", open=False):
|
| 279 |
+
gr.Markdown(
|
| 280 |
+
"Using PECoRe for model internals-based RAG answer attribution is discussed in:"
|
| 281 |
+
)
|
| 282 |
+
gr.Code(
|
| 283 |
+
mirage_citation,
|
| 284 |
+
interactive=False,
|
| 285 |
+
label="MIRAGE (Qi, Sarti et al., 2024)",
|
| 286 |
+
)
|
| 287 |
gr.Markdown("To refer to the original PECoRe paper, cite:")
|
| 288 |
+
gr.Code(
|
| 289 |
+
pecore_citation,
|
| 290 |
+
interactive=False,
|
| 291 |
+
label="PECoRe (Sarti et al., 2024)",
|
| 292 |
+
)
|
| 293 |
+
gr.Markdown(
|
| 294 |
+
'The Inseq implementation used in this work (<a href="https://inseq.org/en/latest/main_classes/cli.html#attribute-context"><code>inseq attribute-context</code></a>, including this demo) can be cited with:'
|
| 295 |
+
)
|
| 296 |
+
gr.Code(
|
| 297 |
+
inseq_citation,
|
| 298 |
+
interactive=False,
|
| 299 |
+
label="Inseq (Sarti et al., 2023)",
|
| 300 |
+
)
|
| 301 |
+
gr.Markdown(
|
| 302 |
+
"The AttnLRP attribution method used in this demo via the LXT library can be cited with:"
|
| 303 |
+
)
|
| 304 |
+
gr.Code(
|
| 305 |
+
lxt_citation,
|
| 306 |
+
interactive=False,
|
| 307 |
+
label="AttnLRP (Achtibat et al., 2024)",
|
| 308 |
+
)
|
| 309 |
with gr.Column():
|
| 310 |
attribute_context_out = gradio_iframe.iFrame(height=400, visible=True)
|
| 311 |
with gr.Row(equal_height=True):
|
|
|
|
| 322 |
)
|
| 323 |
with gr.Row(elem_classes="footer-container"):
|
| 324 |
with gr.Column():
|
| 325 |
+
gr.Markdown(
|
| 326 |
+
"""<div class="footer-custom-block"><b>Powered by</b> <a href='https://github.com/inseq-team/inseq' target='_blank'><img src="file/img/inseq_logo_white_contour.png" width=150px /></a> <a href='https://github.com/rachtibat/LRP-eXplains-Transformers' target='_blank'><img src="file/img/lxt_logo.png" width=150px /></a></div>"""
|
| 327 |
+
)
|
| 328 |
with gr.Column():
|
| 329 |
with gr.Row(elem_classes="footer-custom-block"):
|
| 330 |
with gr.Column(scale=0.30, min_width=150):
|
| 331 |
+
gr.Markdown(
|
| 332 |
+
"""<b>Built by <a href="https://gsarti.com" target="_blank">Gabriele Sarti</a><br> with the support of</b>"""
|
| 333 |
+
)
|
| 334 |
with gr.Column(scale=0.30, min_width=120):
|
| 335 |
+
gr.Markdown(
|
| 336 |
+
"""<a href='https://www.rug.nl/research/clcg/research/cl/' target='_blank'><img src="file/img/rug_logo_white_contour.png" width=170px /></a>"""
|
| 337 |
+
)
|
| 338 |
with gr.Column(scale=0.30, min_width=120):
|
| 339 |
+
gr.Markdown(
|
| 340 |
+
"""<a href='https://projects.illc.uva.nl/indeep/' target='_blank'><img src="file/img/indeep_logo_white_contour.png" width=100px /></a>"""
|
| 341 |
+
)
|
| 342 |
|
| 343 |
rag_setting.change(
|
| 344 |
fn=set_interactive_settings,
|
|
|
|
| 363 |
attribute_context_out,
|
| 364 |
download_output_file_button,
|
| 365 |
download_output_html_button,
|
| 366 |
+
],
|
| 367 |
)
|
| 368 |
|
| 369 |
+
demo.queue(api_open=False, max_size=20).launch(
|
| 370 |
+
allowed_paths=["img/", "outputs/"], show_api=False
|
| 371 |
+
)
|