Update app.py
Browse files
app.py
CHANGED
|
@@ -81,7 +81,7 @@ def get_hidden_states(raw_original_prompt):
|
|
| 81 |
+ [gr.Button('', visible=False) for _ in range(MAX_PROMPT_TOKENS - len(tokens))])
|
| 82 |
progress_dummy_output = ''
|
| 83 |
invisible_bubbles = [gr.Textbox('', visible=False) for i in range(MAX_NUM_LAYERS)]
|
| 84 |
-
global_state.hidden_states = hidden_states.cpu()
|
| 85 |
return [progress_dummy_output, *token_btns, *invisible_bubbles]
|
| 86 |
|
| 87 |
|
|
@@ -89,8 +89,10 @@ def get_hidden_states(raw_original_prompt):
|
|
| 89 |
def run_interpretation(raw_interpretation_prompt, max_new_tokens, do_sample,
|
| 90 |
temperature, top_k, top_p, repetition_penalty, length_penalty, i,
|
| 91 |
num_beams=1):
|
| 92 |
-
|
| 93 |
-
|
|
|
|
|
|
|
| 94 |
length_penalty = -length_penalty # unintuitively, length_penalty > 0 will make sequences longer, so we negate it
|
| 95 |
|
| 96 |
# generation parameters
|
|
@@ -107,13 +109,13 @@ def run_interpretation(raw_interpretation_prompt, max_new_tokens, do_sample,
|
|
| 107 |
|
| 108 |
# create an InterpretationPrompt object from raw_interpretation_prompt (after putting it in the right template)
|
| 109 |
interpretation_prompt = global_state.interpretation_prompt_template.format(prompt=raw_interpretation_prompt, repeat=5)
|
| 110 |
-
interpretation_prompt = InterpretationPrompt(
|
| 111 |
|
| 112 |
# generate the interpretations
|
| 113 |
-
generated = interpretation_prompt.generate(
|
| 114 |
layers_format=global_state.layers_format, k=3,
|
| 115 |
**generation_kwargs)
|
| 116 |
-
generation_texts =
|
| 117 |
progress_dummy_output = ''
|
| 118 |
bubble_outputs = [gr.Textbox(text.replace('\n', ' '), visible=True, container=False, label=f'Layer {i}') for text in generation_texts]
|
| 119 |
bubble_outputs += [gr.Textbox('', visible=False) for _ in range(MAX_NUM_LAYERS - len(bubble_outputs))]
|
|
|
|
| 81 |
+ [gr.Button('', visible=False) for _ in range(MAX_PROMPT_TOKENS - len(tokens))])
|
| 82 |
progress_dummy_output = ''
|
| 83 |
invisible_bubbles = [gr.Textbox('', visible=False) for i in range(MAX_NUM_LAYERS)]
|
| 84 |
+
global_state.hidden_states = hidden_states.cpu().detach().numpy()
|
| 85 |
return [progress_dummy_output, *token_btns, *invisible_bubbles]
|
| 86 |
|
| 87 |
|
|
|
|
| 89 |
def run_interpretation(raw_interpretation_prompt, max_new_tokens, do_sample,
|
| 90 |
temperature, top_k, top_p, repetition_penalty, length_penalty, i,
|
| 91 |
num_beams=1):
|
| 92 |
+
model = global_state.model
|
| 93 |
+
tokenizer = global_state.tokenizer
|
| 94 |
+
print(f'run {model}')
|
| 95 |
+
interpreted_vectors = torch.tensor(global_state.hidden_states[:, i]).to(model.device).to(model.dtype)
|
| 96 |
length_penalty = -length_penalty # unintuitively, length_penalty > 0 will make sequences longer, so we negate it
|
| 97 |
|
| 98 |
# generation parameters
|
|
|
|
| 109 |
|
| 110 |
# create an InterpretationPrompt object from raw_interpretation_prompt (after putting it in the right template)
|
| 111 |
interpretation_prompt = global_state.interpretation_prompt_template.format(prompt=raw_interpretation_prompt, repeat=5)
|
| 112 |
+
interpretation_prompt = InterpretationPrompt(tokenizer, interpretation_prompt)
|
| 113 |
|
| 114 |
# generate the interpretations
|
| 115 |
+
generated = interpretation_prompt.generate(model, {0: interpreted_vectors},
|
| 116 |
layers_format=global_state.layers_format, k=3,
|
| 117 |
**generation_kwargs)
|
| 118 |
+
generation_texts = tokenizer.batch_decode(generated)
|
| 119 |
progress_dummy_output = ''
|
| 120 |
bubble_outputs = [gr.Textbox(text.replace('\n', ' '), visible=True, container=False, label=f'Layer {i}') for text in generation_texts]
|
| 121 |
bubble_outputs += [gr.Textbox('', visible=False) for _ in range(MAX_NUM_LAYERS - len(bubble_outputs))]
|