Spaces:
Sleeping
Sleeping
File size: 3,153 Bytes
eaf12d1 310df59 eaf12d1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 | import gradio as gr
import torch
import os
import json
import pandas as pd
###
def count_layers(model_state):
layer_types = {'weight': 0, 'bias': 0, 'running_mean': 0, 'running_var': 0}
total_parameters = 0
for key, tensor in model_state.items():
if torch.is_tensor(tensor):
total_parameters += torch.numel(tensor)
if 'weight' in key:
layer_types['weight'] += 1
elif 'bias' in key:
layer_types['bias'] += 1
elif 'running_mean' in key:
layer_types['running_mean'] += 1
elif 'running_var' in key:
layer_types['running_var'] += 1
return layer_types, total_parameters
#
def infer_architecture(layer_names):
if any("res" in name for name in layer_names):
return "ResNet"
elif any("dw" in name for name in layer_names):
return "MobileNet"
elif any("efficient" in name for name in layer_names):
return "EfficientNet"
else:
return "Unknown or other"
#
def process_pth(file):
df = pd.DataFrame(columns=['Pth File', 'Information', 'Layer Counts', 'Total Parameters', 'File Size', 'Inferred Architecture'])
try:
model_state = torch.load(file.name, map_location='cpu')
if 'model' in model_state:
model_state = model_state['model']
#
layer_counts, total_parameters = count_layers(model_state)
#
inferred_architecture = infer_architecture(model_state.keys())
#
main_layers = [k for k in model_state.keys() if not any(sub in k for sub in ['bias', 'running_mean', 'running_var'])]
total_main_layers = len(main_layers)
#
first_tensor_key = list(model_state.keys())[0]
last_tensor_key = list(model_state.keys())[-1]
first_tensor = model_state[first_tensor_key]
last_tensor = model_state[last_tensor_key]
first_layer_shape = list(first_tensor.shape) if torch.is_tensor(first_tensor) else "Unknown"
last_layer_shape = list(last_tensor.shape) if torch.is_tensor(last_tensor) else "Unknown"
#
info = {
'Layer Count': layer_counts,
'Total Parameters': total_parameters,
'File Size (KB)': os.path.getsize(file.name) // 1024,
'Total Main Layers': total_main_layers,
'First Layer Shape': first_layer_shape,
'Last Layer Shape': last_layer_shape,
'Inferred Architecture': inferred_architecture
}
return json.dumps(info, indent=4)
except Exception as e:
return f"Failed to process the file: {e}"
#Gradio
iface = gr.Interface(
fn=process_pth,
inputs=gr.File(label="Upload .PTH File"),
outputs="text",
title="PTH-Scope",
description="Upload a .PTH file to analyze its structure and parameters. A .PTH file is typically a PyTorch model file, which contains the state of a neural network trained using libraries like PyTorch. These files encapsulate the learned weights and biases of the model after training. ",
examples=[["mobilenetv2_035.pth"]]
)
#
iface.launch(debug=True)
|