Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
|
@@ -67,9 +67,9 @@ def visualize_layer_diffs(layer_diffs):
|
|
| 67 |
plt.tight_layout()
|
| 68 |
return fig
|
| 69 |
|
| 70 |
-
def gradio_interface(base_model_name, chat_model_name, load_one_at_a_time=False):
|
| 71 |
-
base_model = AutoModelForCausalLM.from_pretrained(base_model_name, torch_dtype=torch.bfloat16)
|
| 72 |
-
chat_model = AutoModelForCausalLM.from_pretrained(chat_model_name, torch_dtype=torch.bfloat16)
|
| 73 |
|
| 74 |
layer_diffs = calculate_layer_diffs(base_model, chat_model, load_one_at_a_time=load_one_at_a_time)
|
| 75 |
fig = visualize_layer_diffs(layer_diffs)
|
|
@@ -81,6 +81,7 @@ iface = gr.Interface(
|
|
| 81 |
inputs=[
|
| 82 |
gr.Textbox(lines=2, placeholder="Enter base model name"),
|
| 83 |
gr.Textbox(lines=2, placeholder="Enter chat model name"),
|
|
|
|
| 84 |
gr.Checkbox(label="Load one layer at a time")
|
| 85 |
],
|
| 86 |
outputs="image",
|
|
|
|
| 67 |
plt.tight_layout()
|
| 68 |
return fig
|
| 69 |
|
| 70 |
+
def gradio_interface(base_model_name, chat_model_name, hf_token, load_one_at_a_time=False):
|
| 71 |
+
base_model = AutoModelForCausalLM.from_pretrained(base_model_name, torch_dtype=torch.bfloat16, use_auth_token=hf_token)
|
| 72 |
+
chat_model = AutoModelForCausalLM.from_pretrained(chat_model_name, torch_dtype=torch.bfloat16, use_auth_token=hf_token)
|
| 73 |
|
| 74 |
layer_diffs = calculate_layer_diffs(base_model, chat_model, load_one_at_a_time=load_one_at_a_time)
|
| 75 |
fig = visualize_layer_diffs(layer_diffs)
|
|
|
|
| 81 |
inputs=[
|
| 82 |
gr.Textbox(lines=2, placeholder="Enter base model name"),
|
| 83 |
gr.Textbox(lines=2, placeholder="Enter chat model name"),
|
| 84 |
+
gr.Textbox(lines=2, placeholder="Enter Hugging Face token"),
|
| 85 |
gr.Checkbox(label="Load one layer at a time")
|
| 86 |
],
|
| 87 |
outputs="image",
|