""" 展示参数分布、属性 """ import os import torch import random import struct import pandas as pd from bitstring import BitArray from concurrent.futures import ThreadPoolExecutor import numpy as np import math from numpy.ma.core import shape def showDif(file1, file2): """ 对比提取的恶意软件和原始恶意软件的区别,返回出错的bit :return: """ malwareStr1 = BitArray(filename=file1).bin malwareStr2 = BitArray(filename=file2).bin diffNum = 0 if malwareStr1 != malwareStr2: print("两个恶意软件大小不同,第一个Bit数为:", malwareStr1, " 第二个Bit数为:", malwareStr1) return for i in range(len(malwareStr1)): if malwareStr1[i] != malwareStr2[i]: # 打印出所有不同的bit的位置 print("pos:", i, "initBit:", malwareStr1[i], "extractedBit:", malwareStr2[i]) diffNum += 1 # print(malwareStr1) # print(malwareStr2) print("different bit Num between the two files: ", diffNum) return diffNum def get_file_bit_num(file_path): """ 通过文件路径,获得文件bit数 """ return os.path.getSize(file_path) * 8 def getExpEmbeddSize(initParaPath, layers, interval=1, correct=1): """ 返回指数部分最大的嵌入容量,单位是字节Byte :param initParaPath: :param layers: list :param interval: 每interval个中嵌入一个 :return: list """ para = torch.load(initParaPath, map_location=torch.device("cpu")) ret = [] for layer in layers: paraTensor = para[layer].data paraTensor_flat = paraTensor.flatten() # print(initParaPath, layers, paraTensor_flat.size()) layerSize = len(paraTensor_flat) // (interval * correct * 8) # print(layer, len(paraTensor_flat), layerSize) ret.append(layerSize) return ret def generate_file_with_bits(file_path, num_bits): """ 根据需要多少bit,随机生成对应大小的恶意软件 :param file_path: :param num_bits: :return: """ # 计算需要的字节数,每字节有8个bit num_bytes = (num_bits + 7) // 8 # 向上取整,保证比特数足够 print("Byte Num:", num_bytes) # 创建一个包含随机字节的字节数组 byte_array = bytearray(random.getrandbits(8) for _ in range(num_bytes)) # 如果不需要最后一个字节的全部位,将多余的位清零 if num_bits % 8 != 0: last_byte_bits = num_bits % 8 # 保留最后字节所需的位数,其它位清零 mask = (1 << last_byte_bits) - 1 byte_array[-1] &= mask # 将字节数组写入文件 with open(file_path, 'wb') as f: f.write(byte_array) print(f"File '{file_path}' generated with {num_bits} bits.") if __name__ == "__main__": """测试路径""" path_swin = "../parameters/classification/swin/swin_b.pth" path_yolo = "../parameters/detection/yolov10/yolov10b.pt" path_rt = "../parameters/detection/rt_dert/rt.pth" path_sam = "../parameters/segmentation/samv2/sam.pth" """测试swin""" # swin_keys = torch.load(path_swin).keys() # print(type(torch.load(path_swin))) pth_keys = torch.load(path_yolo) print(pth_keys.keys()) print(type(pth_keys['model'].model.named_modules())) print(pth_keys['model'].model.named_modules()) # for idx, layer in enumerate(pth_keys['model'].model): # print(f"层 {idx}: {layer}") # 遍历模型中所有的子模块(包括嵌套层) for idx, (name, module) in enumerate(pth_keys['model'].model.named_modules()): print(f"模块索引 {idx} - 名称 {name}: {module}") # print((pth_keys['model'].model[23].cv2[0])) # print((pth_keys['model'])) # print(len(pth_keys['model'].model.conv.weight)) # print(type(pth_keys['model'].model[1])) # print(type(pth_keys['model'].model[0].conv.weight)) # print(type(pth_keys['model'].model[0].conv.weight.data)) # print(shape(pth_keys['model'].model[0].conv.weight.data)) # print(pth_keys['model'].model[0].conv.weight.data[0][0][0][0].dtype) # path3 = "../parameters/detection/rt_dert/rt.pth" # a = torch.load(path3) # print(a['ema'].keys()) # print(a['ema']['module']['backbone.conv1.conv1_1.conv.weight'][0][0][0][0].dtype) # # path4 = "../parameters/segmentation/samv2/sam.pth" # b = torch.load(path4) # print(b.keys()) # print(b['image_encoder.neck.0.weight'][0][0][0][0].dtype) # print(get_file_bit_num(path1)) # print(pth_keys['train_args']) print("Test Done")