repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
|---|---|---|---|---|---|---|
PT-MAP
|
PT-MAP-master/data/additional_transforms.py
|
# Copyright 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
from PIL import ImageEnhance
transformtypedict=dict(Brightness=ImageEnhance.Brightness, Contrast=ImageEnhance.Contrast, Sharpness=ImageEnhance.Sharpness, Color=ImageEnhance.Color)
class ImageJitter(object):
def __init__(self, transformdict):
self.transforms = [(transformtypedict[k], transformdict[k]) for k in transformdict]
def __call__(self, img):
out = img
randtensor = torch.rand(len(self.transforms))
for i, (transformer, alpha) in enumerate(self.transforms):
r = alpha*(randtensor[i]*2.0 -1.0) + 1
out = transformer(out).enhance(r).convert('RGB')
return out
| 850
| 24.787879
| 150
|
py
|
PT-MAP
|
PT-MAP-master/data/dataset.py
|
# This code is modified from https://github.com/facebookresearch/low-shot-shrink-hallucinate
import torch
from PIL import Image
import json
import numpy as np
import torchvision.transforms as transforms
import os
identity = lambda x:x
class SimpleDataset:
def __init__(self, data_file, transform, target_transform=identity):
with open(data_file, 'r') as f:
self.meta = json.load(f)
self.transform = transform
self.target_transform = target_transform
def __getitem__(self,i):
image_path = os.path.join(self.meta['image_names'][i])
img = Image.open(image_path).convert('RGB')
img = self.transform(img)
target = self.target_transform(self.meta['image_labels'][i])
return img, target
def __len__(self):
return len(self.meta['image_names'])
class SetDataset:
def __init__(self, data_file, batch_size, transform):
with open(data_file, 'r') as f:
self.meta = json.load(f)
self.cl_list = np.unique(self.meta['image_labels']).tolist()
self.sub_meta = {}
for cl in self.cl_list:
self.sub_meta[cl] = []
for x,y in zip(self.meta['image_names'],self.meta['image_labels']):
self.sub_meta[y].append(x)
self.sub_dataloader = []
sub_data_loader_params = dict(batch_size = batch_size,
shuffle = True,
num_workers = 0, #use main thread only or may receive multiple batches
pin_memory = False)
for cl in self.cl_list:
sub_dataset = SubDataset(self.sub_meta[cl], cl, transform = transform )
self.sub_dataloader.append( torch.utils.data.DataLoader(sub_dataset, **sub_data_loader_params) )
def __getitem__(self,i):
return next(iter(self.sub_dataloader[i]))
def __len__(self):
return len(self.cl_list)
class SubDataset:
def __init__(self, sub_meta, cl, transform=transforms.ToTensor(), target_transform=identity):
self.sub_meta = sub_meta
self.cl = cl
self.transform = transform
self.target_transform = target_transform
def __getitem__(self,i):
#print( '%d -%d' %(self.cl,i))
image_path = os.path.join( self.sub_meta[i])
img = Image.open(image_path).convert('RGB')
img = self.transform(img)
target = self.target_transform(self.cl)
return img, target
def __len__(self):
return len(self.sub_meta)
class EpisodicBatchSampler(object):
def __init__(self, n_classes, n_way, n_episodes):
self.n_classes = n_classes
self.n_way = n_way
self.n_episodes = n_episodes
def __len__(self):
return self.n_episodes
def __iter__(self):
for i in range(self.n_episodes):
yield torch.randperm(self.n_classes)[:self.n_way]
| 2,920
| 32.193182
| 108
|
py
|
PT-MAP
|
PT-MAP-master/data/datamgr.py
|
# This code is modified from https://github.com/facebookresearch/low-shot-shrink-hallucinate
import torch
from PIL import Image
import numpy as np
import torchvision.transforms as transforms
import data.additional_transforms as add_transforms
from data.dataset import SimpleDataset, SetDataset, EpisodicBatchSampler
from abc import abstractmethod
class TransformLoader:
def __init__(self, image_size,
normalize_param = dict(mean= [0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225]),
jitter_param = dict(Brightness=0.4, Contrast=0.4, Color=0.4)):
self.image_size = image_size
self.normalize_param = normalize_param
self.jitter_param = jitter_param
def parse_transform(self, transform_type):
if transform_type=='ImageJitter':
method = add_transforms.ImageJitter( self.jitter_param )
return method
method = getattr(transforms, transform_type)
if transform_type=='RandomSizedCrop':
return method(self.image_size)
elif transform_type=='CenterCrop':
return method(self.image_size)
elif transform_type=='Scale':
return method([int(self.image_size*1.15), int(self.image_size*1.15)])
elif transform_type=='Normalize':
return method(**self.normalize_param )
else:
return method()
def get_composed_transform(self, aug = False):
if aug:
transform_list = ['RandomSizedCrop', 'ImageJitter', 'RandomHorizontalFlip', 'ToTensor', 'Normalize']
else:
transform_list = ['Scale','CenterCrop', 'ToTensor', 'Normalize']
transform_funcs = [ self.parse_transform(x) for x in transform_list]
transform = transforms.Compose(transform_funcs)
return transform
class DataManager:
@abstractmethod
def get_data_loader(self, data_file, aug):
pass
class SimpleDataManager(DataManager):
def __init__(self, image_size, batch_size):
super(SimpleDataManager, self).__init__()
self.batch_size = batch_size
self.trans_loader = TransformLoader(image_size)
def get_data_loader(self, data_file, aug): #parameters that would change on train/val set
transform = self.trans_loader.get_composed_transform(aug)
dataset = SimpleDataset(data_file, transform)
data_loader_params = dict(batch_size = self.batch_size, shuffle = True, num_workers = 12, pin_memory = True)
data_loader = torch.utils.data.DataLoader(dataset, **data_loader_params)
return data_loader
class SetDataManager(DataManager):
def __init__(self, image_size, n_way, n_support, n_query, n_eposide =100):
super(SetDataManager, self).__init__()
self.image_size = image_size
self.n_way = n_way
self.batch_size = n_support + n_query
self.n_eposide = n_eposide
self.trans_loader = TransformLoader(image_size)
def get_data_loader(self, data_file, aug): #parameters that would change on train/val set
transform = self.trans_loader.get_composed_transform(aug)
dataset = SetDataset( data_file , self.batch_size, transform )
sampler = EpisodicBatchSampler(len(dataset), self.n_way, self.n_eposide )
data_loader_params = dict(batch_sampler = sampler, num_workers = 12, pin_memory = True)
data_loader = torch.utils.data.DataLoader(dataset, **data_loader_params)
return data_loader
| 3,515
| 40.857143
| 123
|
py
|
PT-MAP
|
PT-MAP-master/data/__init__.py
|
from . import datamgr
from . import dataset
from . import additional_transforms
| 80
| 19.25
| 35
|
py
|
PyDraw
|
PyDraw-master/HS.py
|
# 调用函数
def hello():
print('© JY.Lin!The first author, 2018/07/31')
# ********************************************************************************
# ********************************************************************************
# ********************************************************************************
# ********************************************************************************
# ********************************************************************************
| 510
| 14.484848
| 82
|
py
|
PyDraw
|
PyDraw-master/ZJM.py
|
from tkinter import *
from tkinter import ttk
from tkinter.scrolledtext import ScrolledText
from tkinter.messagebox import *
import tkinter.colorchooser
import tkinter.filedialog
import tkinter as tk
import HS
canva_W = 0
canva_H = 0
flag_CK_GuDing = FALSE
canva_X = 60
canva_Y = 50
WangGe_KuanDu = 20
WangGe_ShuMu_X = 0
WangGe_ShuMu_Y = 0
scal_X_Zhi = 0
scal_Y_Zhi = 0
# 全局滚轮屏幕坐标
Event_GunLun_x = 0
Event_GunLun_y = 0
# 全局 Canvas 坐标
Event_Canvas_x = 0
Event_Canvas_y = 0
# 滚轮参数
flag_GunLun_Gun = FALSE
flag_GunLun_Shang = FALSE
GL_Yuan_canva_X = 60
GL_Yuan_canva_Y = 50
# 记录字典
Button1 = {}
Canvas1 = {}
Checkbutton1 = {}
Combobox1 = {}
Entry1 = {}
Frame1 = {}
Label1 = {}
LabelFrame1 = {}
Listbox1 = {}
Message1 = {}
PanedWindow1 = {}
Radiobutton1 = {}
Scale1_X = {}
Scale1_Y = {}
Scrollbar1_X = {}
Scrollbar1_Y = {}
Spinbox1 = {}
Text1 = {}
Toplevel1 = {}
tkMessageBox1 = {}
Menu1 = {}
Menu1_ListCode = {}
Menu1_Delete_Num = []
Menu1_Son_Len = {}
zi_menu1_num_i = 0
# 画控件标志
KJBZ = ''
# 画控件数目标志
button1_i = 0
canvas1_i = 0
checkbutton1_i = 0
combobox1_i = 0
entry1_i = 0
frame1_i = 0
label1_i = 0
labelFrame1_i = 0
listbox1_i = 0
menu1_i = 0
message1_i = 0
panedWindow1_i = 0
radiobutton1_i = 0
scale1_x_i = 0
scale1_y_i = 0
scrollbar1_x_i = 0
scrollbar1_y_i = 0
spinbox1_i = 0
text1_i = 0
toplevel1_i = 0
tkMessageBox1_i = 0
# 记录各个部件类型删除的成员的 列表
Button1_List_Num = []
Canvas1_List_Num = []
Checkbutton1_List_Num = []
Combobox1_List_Num = []
Entry1_List_Num = []
Frame1_List_Num = []
Label1_List_Num = []
LabelFrame1_List_Num = []
Listbox1_List_Num = []
Message1_List_Num = []
PanedWindow1_List_Num = []
Radiobutton1_List_Num = []
Scale1_List_Num_X = []
Scale1_List_Num_Y = []
Spinbox1_List_Num = []
Text1_List_Num = []
# 事件字典
SJ_button_press_1 = {}
SJ_button_release_1 = {}
SJ_button_press_right_1 = {}
SJ_button_press_left_2 = {}
SJ_button_press_right_2 = {}
SJ_button_press_middle_1 = {}
SJ_button_press_middle_2 = {}
SJ_button_press_left_move = {}
SJ_cursor_enter = {}
SJ_cursor_leave = {}
SJ_get_key_focus = {}
SJ_lose_key_focus = {}
SJ_press_a_key = {}
SJ_press_enter_key = {}
SJ_when_control_change = {}
SJ_press_space_key = {}
SJ_shift_mouseWheel = {}
SJ_press_combinatorial_key = {}
# 当前控件名
DangQian_KJ_name = ''
# Menu 参数
flag_Menu_Kai = FALSE
D_ZhuMenu = {}
zi_menu1_sum = 0
DQ_ZhuMenu_ZiXiang_Num_i = 0
DQ_Zong_Len = 0
AnXia_Menu_Btn_Num = 0
# Hua_Radiobutton 参数
Radiobutton_i = 0 # 每一组当前的 Radiobutton 编号
flag_RadBtn_Zu = FALSE
# 编译 Text 参数
flag_BianYi_Text = FALSE
flag_Canva_Hide = FALSE
# Canvas 项目处理参数
# 选择参数
background_XiangMu_XuanDing = 'red'
foreground_XiangMu_XuanDing = 'white'
XuanZhong = {}
XuanZhong_sum = 0
# 属性框参数
flag_ShuXing_Tan = FALSE
# 部件编辑参数
flag_ZuJian_Move = TRUE
# 右键编辑选择
each_YouJian = ''
flag_TanChuan_BianJian = FALSE
# 完成时选框
XuanKuang_X0 = 0
XuanKuang_Y0 = 0
XuanKuang_X1 = 0
XuanKuang_Y1 = 0
# 窗口位置
win_X = 0
win_Y = 0
# 属性面板全局参数
lab_ControlType = ''
ent_ControlName = ''
ent_X0 = 0
ent_Y0 = 0
ent_width = 0
ent_height = 0
ent_length = 0
ent_fontSize = 0
combt_fontType = ''
combt_foreground = ''
combt_background = ''
combt_anchor = ''
combt_justify = ''
ent_text = ''
combt_state = ''
combt_relief = ''
combt_highlightcolor = ''
combt_highlightbackground = ''
combt_bitmap = ''
ent_image = ''
combt_padx = 0
combt_pady = 0
combt_takefocus = ''
combt_cursor = ''
ent_container = ''
ent_command = ''
# 窗口设置窗口变量
ck_name = ''
ck_init_x = ''
ck_init_y = ''
ck_is_width_not_change = 1
ck_is_height_not_change = 1
ck_is_minsize = 1
ck_init_minsize_w = 0
ck_init_minsize_h = 0
ck_is_maxsize = 1
ck_init_maxsize_w = 0
ck_init_maxsize_h = 0
ck_is_toolwindow = 0
ck_is_topmost = 1
ck_is_zoomed = 1
ck_set_icon = ''
ck_set_grid = 0
ck_is_transparency = 1
ck_scal_transparency = 1
ck_is_son_win = 1
Str_BianYi = ''
Str_BianYi_End = ''
Str_Menu = ''
# 定义空格tap
tap = " "
# 窗口重要参数
bar_W = 30
bar_menu_W = 30
Distance = 0
# class of Main interface
class PyDraw(Tk):
# Main interface Define
def __init__(self):
super().__init__()
# Main interface parameter setting
w = 1000
h = 700
self.minsize(w, h) # 最小化固定
S_width = self.winfo_screenwidth()
S_height = self.winfo_screenheight()
size = '%dx%d+%d+%d' % (w, h, (S_width - w) / 2, (S_height - h) / 2 - 30)
self.geometry(size)
self.state('zoomed')
self.title('PyDraw')
self.BiaoTi_Text = 'PyDraw'
self.BiaoTi_Text_YanSe = 'black'
self.ChuangKou_JiXia_YanSe = 'black'
self.ChuangKou_BianTiLan_YanSe = 'white'
self.ChuangKou_BeiJing_YanSe = 'white'
# Scale setting
self.Sca_JiZhi_X = 1000
self.Sca_JiZhi_Y = 1000
# parameter setting
self.ChuangKou_BianYan_YanSe = 'black'
self.ChuangKou_BiaoTiLan_YanSe = 'green'
# Control component initial parameter setting
# Button parameter
self.Button_H = 50
self.Button_W = 100
self.Button_NO = 0
self.Button_YanSe = 'gray'
self.Button_Text_YanSe = 'white'
# Boolean value setting
global flag_CK_GuDing
flag_CK_GuDing = FALSE
self.flag_WangGe = FALSE
self.flag_SongKai = FALSE
self.flag_BuJian_YinCang = FALSE
# Original canvas parameter
self.Yuan_canva_H = 600 # height 对应 Y
self.Yuan_canva_W = 800 # width 对应 X
# Canvas parameter
global canva_H
global canva_W
canva_H = self.Yuan_canva_H
canva_W = self.Yuan_canva_W
self.x1 = 0
self.y1 = 0
self.x2 = 0
self.y2 = 0
# Canvas position
global canva_X
global canva_Y
global GL_Yuan_canva_X
global GL_Yuan_canva_Y
global bar_W
canva_X = 60
canva_Y = 50
GL_Yuan_canva_X = 60
GL_Yuan_canva_Y = 50
# Frame parameter
self.fram_H = canva_H # height 对应 Y
self.fram_W = canva_W # width 对应 X
# Menu bar width
self.bar_W = bar_W
# Grid width parameter
global WangGe_KuanDu
WangGe_KuanDu = 20
self.WangGe_YanSe = 'gray'
# 编译 Text 参数初始化
global flag_BianYi_Text
global flag_Canva_Hide
global XuanZhong_sum
XuanZhong_sum = 0
flag_Canva_Hide = FALSE
flag_BianYi_Text = FALSE
# 属性框参数
global flag_ShuXing_Tan
flag_ShuXing_Tan = FALSE
self.V_Scal_Y1 = StringVar()
self.V_Scal_Y2 = StringVar()
# 设置画布的放大调节及参数定义设置
self.vy = StringVar()
self.vx = StringVar()
self.vx_Text_font = StringVar()
self.ent_y = StringVar()
self.ent_x = StringVar()
self.GuDing_Text = StringVar()
self.GuDing_Text.set('Lock')
self.Btn_WG_Text = StringVar()
self.Btn_WG_Text.set('Grid')
self.Btn_YinCang_Text = StringVar()
self.Btn_YinCang_Text.set('Hide')
self.Btn_ShuXing_Text = StringVar()
self.Btn_ShuXing_Text.set('<=')
self.Tv_BianYi_Text = StringVar()
self.Tv_BianYi_Text.set('Text')
self.Tv_Canva_Hide = StringVar()
self.Tv_Canva_Hide.set('Paint')
# 网格参数设定
global WangGe_ShuMu_X
global WangGe_ShuMu_Y
WangGe_ShuMu_X = (canva_H - self.bar_W) / WangGe_KuanDu
WangGe_ShuMu_Y = canva_W / WangGe_KuanDu
# 全局屏幕坐标
global Event_GunLun_x
global Event_GunLun_y
Event_GunLun_x = 0
Event_GunLun_y = 0
# Switch to the main interface UI setting
self.Set_UI()
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
def Set_UI(self):
global canva_X
global canva_Y
global canva_W
global canva_H
# Set Canvas
self.canva = Canvas(bg='white', width=canva_W, height=canva_H) # scrollregion=(0, 0, 1000, 1000)) # 创建canva
self.canva.place(x=canva_X, y=canva_Y) # 放置canva的位置
# 画 Menu
self.it_Menu = self.canva.create_rectangle(0, 0, 0, 0)
# 画外边框
self.it1 = self.canva.create_rectangle(2, canva_H - 1, canva_W - 1, 2,
fil=self.ChuangKou_BeiJing_YanSe)
# 画标题栏
self.it2 = self.canva.create_rectangle(2, self.bar_W, canva_W - 1, self.bar_W,
fil=self.ChuangKou_BiaoTiLan_YanSe)
# 画标题
self.it_BiaoTi = self.canva.create_text(43, 16, text=self.BiaoTi_Text,
font=('Consol', 11),
fill=self.BiaoTi_Text_YanSe)
# 画标题栏按钮
self.it_BiaoTi_AnNiu_ZuiXiao = self.canva.create_text(canva_W - 116, 16, text='—',
font=('Consol', 11),
fill=self.BiaoTi_Text_YanSe)
self.it_BiaoTi_AnNiu_ZuiDa = self.canva.create_text(canva_W - 70, 16, text='□',
font=('Consol', 11),
fill=self.BiaoTi_Text_YanSe)
self.it_BiaoTi_AnNiu_GuanBi = self.canva.create_text(canva_W - 28, 16, text='X',
font=('Helvetica', 11),
fill=self.BiaoTi_Text_YanSe)
self.Menubar = Menu(self)
# 定义下拉菜单栏
FileMenu = Menu(self.Menubar, tearoff=0)
FileMenu.add_command(label='Compile', command=self.BianYi)
FileMenu.add_command(label='Generate', command=self.BianYi)
FileMenu.add_command(label='Copy', command=self.BianYi)
FileMenu.add_separator()
FileMenu.add_command(label='Quit', command=self.quit)
self.Menubar.add_cascade(label='File', menu=FileMenu)
# 定义控件菜单栏
KongJianMenu = Menu(self.Menubar, tearoff=0)
KongJianMenu.add_command(label='Button', command=self.Hua_Button)
KongJianMenu.add_command(label='Canvas', command=self.Hua_Canvas)
KongJianMenu.add_command(label='Checkbutton', command=self.Hua_Checkbutton)
KongJianMenu.add_command(label='Combobox', command=self.Hua_Combobox)
KongJianMenu.add_command(label='Entry', command=self.Hua_Entry)
KongJianMenu.add_command(label='Frame', command=self.Hua_Frame)
KongJianMenu.add_command(label='Label', command=self.Hua_Label)
KongJianMenu.add_command(label='LabelFrame', command=self.Hua_LabelFrame)
KongJianMenu.add_command(label='Listbox', command=self.Hua_Listbox)
KongJianMenu.add_command(label='Menu', command=self.Hua_Menu)
KongJianMenu.add_command(label='Message', command=self.Hua_Message)
KongJianMenu.add_command(label='PanedWindow', command=self.Hua_PanedWindow)
KongJianMenu.add_command(label='Radiobutton', command=self.Hua_Radiobutton)
KongJianMenu.add_command(label='Scale_X', command=self.Hua_Scale_X)
KongJianMenu.add_command(label='Scale_Y', command=self.Hua_Scale_Y)
KongJianMenu.add_command(label='Spinbox', command=self.Hua_Spinbox)
KongJianMenu.add_command(label='Text', command=self.Hua_Text)
self.Menubar.add_cascade(label='Control', menu=KongJianMenu)
# 定义自画控件菜单栏
SheZhiMenu = Menu(self.Menubar, tearoff=0)
SheZhiMenu.add_command(label='System Setup', command=HS.hello)
self.Menubar.add_cascade(label='Setup', menu=SheZhiMenu)
# 定义窗口菜单栏
ChuangKouMenu = Menu(self.Menubar, tearoff=0)
ChuangKouMenu.add_command(label='New son_win', command=HS.hello)
ChuangKouMenu.add_command(label='Current win set', command=HS.hello)
ChuangKouMenu.add_command(label='Win control information', command=HS.hello)
self.Menubar.add_cascade(label='Win', menu=ChuangKouMenu)
# 定义对话框菜单栏
DuiHuaKuangMenu = Menu(self.Menubar, tearoff=0)
DuiHuaKuangMenu.add_command(label='New news dialog', command=HS.hello)
DuiHuaKuangMenu.add_command(label='New flie dialog', command=HS.hello)
DuiHuaKuangMenu.add_command(label='New colour dialog', command=HS.hello)
self.Menubar.add_cascade(label='Dialog', menu=DuiHuaKuangMenu)
# 定义帮助菜单栏
BangZhuMenu = Menu(self.Menubar, tearoff=0)
BangZhuMenu.add_command(label='About', command=HS.hello)
self.Menubar.add_cascade(label='Help', menu=BangZhuMenu)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# 定义右键菜单
# 新建控件右键菜单
self.New_kj_menu = Menu(self.Menubar, tearoff=0)
self.New_kj_menu.add_command(label='Button', command=self.Hua_Button)
self.New_kj_menu.add_command(label='Canvas', command=self.Hua_Canvas)
self.New_kj_menu.add_command(label='Checkbutton', command=self.Hua_Checkbutton)
self.New_kj_menu.add_command(label='Combobox', command=self.Hua_Combobox)
self.New_kj_menu.add_command(label='Entry', command=self.Hua_Entry)
self.New_kj_menu.add_command(label='Frame', command=self.Hua_Frame)
self.New_kj_menu.add_command(label='Label', command=self.Hua_Label)
self.New_kj_menu.add_command(label='LabelFrame', command=self.Hua_LabelFrame)
self.New_kj_menu.add_command(label='Listbox', command=self.Hua_Listbox)
self.New_kj_menu.add_command(label='Menu', command=self.Hua_Menu)
self.New_kj_menu.add_command(label='Message', command=self.Hua_Message)
self.New_kj_menu.add_command(label='PanedWindow', command=self.Hua_PanedWindow)
self.New_kj_menu.add_command(label='Radiobutton', command=self.Hua_Radiobutton)
self.New_kj_menu.add_command(label='Scale_X', command=self.Hua_Scale_X)
self.New_kj_menu.add_command(label='Scale_Y', command=self.Hua_Scale_Y)
self.New_kj_menu.add_command(label='Spinbox', command=self.Hua_Spinbox)
self.New_kj_menu.add_command(label='Text', command=self.Hua_Text)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# 编辑控件右键菜单
self.BianJi_kj_menu = Menu(self.Menubar, tearoff=0)
self.BianJi_kj_menu.add_command(label='OK', command=self.BianJi_OK)
self.BianJi_kj_menu.add_command(label='Move', command=self.BianJi_Move)
self.BianJi_kj_menu.add_command(label='Design', command=self.BianJi_Design)
self.BianJi_kj_menu.add_command(label='Delete', command=self.BianJi_Delete)
self.BianJi_kj_menu.add_command(label='Cancel', command=self.BianJi_Cancel)
# 展示主菜单
self.config(menu=self.Menubar)
# X:横向 Y:纵向 设置部件
self.Lab1 = Label(text='Y:', font=('Consol', '26', 'bold'), foreground='DarkBlue')
self.Lab1.place(x=0, y=0)
self.Lab2 = Label(text='X:', font=('Consol', '26', 'bold'), foreground='DarkBlue')
self.Lab2.place(x=60, y=0)
self.Lab_CK_X_len = Label(text='X length', font=('Consol', '12'), foreground='DarkBlue')
self.Lab_CK_X_len.place(x=623, y=0)
self.Lab_CK_Y_len = Label(text='Y length', font=('Consol', '12'), foreground='DarkBlue')
self.Lab_CK_Y_len.place(x=623, y=26)
self.Lab_font_size = Label(text='font size', font=('Consol', '12'), foreground='DarkBlue')
self.Lab_font_size.place(x=1250, y=760)
self.Lab_font_size.lower()
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
self.Btn_CK_ZhuanDao = Button(text='To win', font=('Consol', 9), foreground='DarkBlue', width=8, height=1,
command=self.ChuangKouZhuan)
self.Btn_CK_ZhuanDao.place(x=762, y=0) # 要想以后修改Btn_CK_ZhuanDao,必须先定义后摆放!
self.Btn_CK_FuWei = Button(text='Reset win', font=('Consol', 9), foreground='DarkBlue', width=8, height=1,
command=self.FuWeiKouZhuan)
self.Btn_CK_FuWei.place(x=762, y=26)
self.Btn_CK_Set = Button(text='Win_Set', font=('Consol', 9), foreground='DarkBlue', width=8, height=1,
command=self.Set_KouZhuan)
self.Btn_CK_Set.place(x=826, y=26)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# Hide or Show 键
self.Btn_YinCang = Button(textvariable=self.Btn_YinCang_Text, font=('Consol', 10), foreground='DarkBlue', width=6, height=1,
command=self.YinCang)
self.Btn_YinCang.place(x=1482, y=0)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# Update 键
self.Btn_Update = Button(text='Update', font=('Consol', 10), foreground='DarkBlue',
width=6, height=1,
command=self.UI_Ban_Btn_OK)
self.Btn_Update.place(x=2000, y=26)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# 属性键
self.Btn_ShuXing = Button(textvariable=self.Btn_ShuXing_Text, font=('Consol', 10), foreground='DarkBlue',
width=6, height=1,
command=self.ShuXing_Zhan)
self.Btn_ShuXing.place(x=1482, y=26)
self.Btn_BianYi = Button(text='Compi', font=('Consol', 10), foreground='black', width=5, height=2,
command=self.BianYi)
self.Btn_BianYi.place(x=6, y=600)
self.Btn_BianYi.lower()
self.Btn_BianYi_FuZhi = Button(text='Copy', font=('Consol', 10), foreground='black', width=5, height=2,
command=self.BianYi_Color_Green)
self.Btn_BianYi_FuZhi.place(x=6, y=650)
self.Btn_BianYi_FuZhi.lower()
self.Btn_BianYi_ShengCheng = Button(text='Gener', font=('Consol', 10), foreground='black', width=5, height=2,
command=self.BianYi_Color_Green)
self.Btn_BianYi_ShengCheng.place(x=6, y=700)
self.Btn_BianYi_ShengCheng.lower()
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# 下排按钮
self.Btn_BianYi_Text = Button(textvariable=self.Tv_BianYi_Text, font=('华文行楷', 12), foreground='red', width=5, height=1,
command=self.BianYi_Text)
self.Btn_BianYi_Text.place(x=60, y=746)
self.Btn_BianYi_Text.lower()
self.Btn_Canva_Hide = Button(textvariable=self.Tv_Canva_Hide, font=('华文行楷', 12), foreground='DarkBlue', width=5,
height=1,
command=self.Canva_Hide)
self.Btn_Canva_Hide.place(x=120, y=746)
self.Btn_Canva_Hide.lower()
self.Btn_BianYi_Color_White = Button(text='Color', font=('华文行楷', 12), foreground='black', background='white', width=5, height=1,
command=self.BianYi_Color_White)
self.Btn_BianYi_Color_White.place(x=180, y=746)
self.Btn_BianYi_Color_White.lower()
self.Btn_BianYi_Color_Black = Button(text='Color', font=('华文行楷', 12), foreground='white', background='black', width=5, height=1,
command=self.BianYi_Color_Black)
self.Btn_BianYi_Color_Black.place(x=240, y=746)
self.Btn_BianYi_Color_Black.lower()
self.Btn_BianYi_Color_YangPiZhi = Button(text='Color', font=('华文行楷', 12), foreground='black', background='LemonChiffon', width=5, height=1,
command=self.BianYi_Color_YangPiZhi)
self.Btn_BianYi_Color_YangPiZhi.place(x=300, y=746)
self.Btn_BianYi_Color_YangPiZhi.lower()
self.Btn_BianYi_Color_Green = Button(text='Color', font=('华文行楷', 12), foreground='white', background='green', width=5, height=1,
command=self.BianYi_Color_Green)
self.Btn_BianYi_Color_Green.place(x=360, y=746)
self.Btn_BianYi_Color_Green.lower()
self.Btn_WangGe = Button(textvariable=self.Btn_WG_Text, font=('华文行楷', 13), foreground='DarkBlue',
width=5, height=1, command=self.QiYong_WangGe)
self.Btn_WangGe.place(x=420, y=746)
self.Btn_WangGe.lower()
self.GuDing = Button(textvariable=self.GuDing_Text, font=('华文行楷', 13),foreground='DarkBlue', width=5, height=1,
command=self.GuDingChuangKou)
self.GuDing.place(x=0, y=746)
self.GuDing.lower()
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
self.Ent_X = Entry(textvariable=self.ent_x, width=5, font=('Consol', '12', 'bold'), foreground='Darkblue')
self.Ent_X.place(x=703, y=0)
self.Ent_Y = Entry(textvariable=self.ent_y, width=5, font=('Consol', '12', 'bold'), foreground='Darkblue')
self.Ent_Y.place(x=703, y=26)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
self.Sca_Y = Scale(from_=0, to=self.Sca_JiZhi_Y, orient=VERTICAL, variable=self.vy, length=500,
resolution=1, command=self.HuaBuFangDa_Y)
self.Sca_Y.place(x=0, y=40)
self.Sca_X = Scale(from_=0, to=self.Sca_JiZhi_X, orient=HORIZONTAL, variable=self.vx, length=500,
resolution=1, command=self.HuaBuFangDa_X)
self.Sca_X.place(x=100, y=0)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# 字体调节滚动条
self.Sca_Text_front = Scale(from_=8, to=50, orient=HORIZONTAL, variable=self.vx_Text_font, length=200,
resolution=1, command=self.Text_font)
self.Sca_Text_front.set(16)
self.Sca_Text_front.place(x=1328, y=739)
self.Sca_Text_front.lower()
self.ent_x.set(canva_W)
self.ent_y.set(canva_H)
self.PanedWin_X1 = PanedWindow(width=1480, height=690, sashwidth=6, sashrelief=SUNKEN)
self.PanedWin_X1.place(x=2000, y=50)
self.PanedWin_X1.lower()
self.Text_BianYi = ScrolledText(self.PanedWin_X1, width=74, height=22, font=('Consolas', '20'), insertbackground='black')
self.PanedWin_X1.add(self.Text_BianYi)
self.Text_BianYi.lower()
self.PanedWin_Y1 = PanedWindow(self.PanedWin_X1, orient=VERTICAL, sashwidth=6, sashrelief=SUNKEN)
self.PanedWin_X1.add(self.PanedWin_Y1)
self.Paned_Frame_Y1 = Frame(self.PanedWin_Y1, width=300, height=380, bg='red')
self.PanedWin_Y1.add(self.Paned_Frame_Y1)
self.Paned_Frame_Y2 = Frame(self.PanedWin_Y1, width=330, height=300, bg='green')
self.PanedWin_Y1.add(self.Paned_Frame_Y2)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
self.PanedF_Canvas_Y1 = Canvas(self.Paned_Frame_Y1, bg='white', width=300, height=1700)
self.PanedF_Canvas_Y1.place(x=48, y=0)
self.Scal_Y1 = Scale(self.Paned_Frame_Y1, from_=0, to=100, fg='white', bg='white', resolution=2, length=380,
variable=self.V_Scal_Y1, command=self.V_P_Scal_Y1)
self.Scal_Y1.pack(side=LEFT, fill=Y)
self.PanedF_Canvas_Y2 = Canvas(self.Paned_Frame_Y2, bg='white', width=300, height=1700)
self.PanedF_Canvas_Y2.place(x=48, y=0)
self.Scal_Y2 = Scale(self.Paned_Frame_Y2, from_=0, to=100, fg='white', bg='white', resolution=2, length=380,
variable=self.V_Scal_Y2, command=self.V_P_Scal_Y2)
self.Scal_Y2.pack(side=LEFT, fill=Y)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# 上属性框部件设置
self.lab_ControlType = StringVar()
self.ent_ControlName = StringVar()
self.ent_X0 = IntVar()
self.ent_Y0 = IntVar()
self.ent_width = IntVar()
self.ent_height = IntVar()
self.ent_length = IntVar()
self.ent_fontSize = IntVar()
self.combt_fontType = StringVar()
self.combt_foreground = StringVar()
self.combt_background = StringVar()
self.combt_anchor = StringVar()
self.combt_justify = StringVar()
self.ent_text = StringVar()
self.combt_state = StringVar()
self.combt_relief = StringVar()
self.combt_highlightcolor = StringVar()
self.combt_highlightbackground = StringVar()
self.combt_bitmap = StringVar()
self.ent_image = StringVar()
self.combt_padx = IntVar()
self.combt_pady = IntVar()
self.combt_takefocus = StringVar()
self.combt_cursor = StringVar()
self.ent_container = StringVar()
self.ent_command = StringVar()
global lab_ControlType
global ent_ControlName
global ent_X0
global ent_Y0
global ent_width
global ent_height
global ent_length
global ent_fontSize
global combt_fontType
global combt_foreground
global combt_background
global combt_anchor
global combt_justify
global ent_text
global combt_state
global combt_relief
global combt_highlightcolor
global combt_highlightbackground
global combt_bitmap
global ent_image
global combt_padx
global combt_pady
global combt_takefocus
global combt_cursor
global ent_container
global ent_command
# 上属性框部件设置
self.lab_ControlType.set(lab_ControlType)
self.ent_ControlName.set(ent_ControlName)
self.ent_X0.set(ent_X0)
self.ent_Y0.set(ent_Y0)
self.ent_width.set(ent_width)
self.ent_height.set(ent_height)
self.ent_length.set(ent_length)
self.ent_fontSize.set(ent_fontSize)
self.combt_fontType.set(combt_fontType)
self.combt_foreground.set(combt_foreground)
self.combt_background.set(combt_background)
self.combt_anchor.set(combt_anchor)
self.combt_justify.set(combt_justify)
self.ent_text.set(ent_text)
self.combt_state.set(combt_state)
self.combt_relief.set(combt_relief)
self.combt_highlightcolor.set(combt_highlightcolor)
self.combt_highlightbackground.set(combt_highlightbackground)
self.combt_bitmap.set(combt_bitmap)
self.ent_image.set(ent_image)
self.combt_padx.set(combt_padx)
self.combt_pady.set(combt_pady)
self.combt_takefocus.set(combt_takefocus)
self.combt_cursor.set(combt_cursor)
self.ent_container.set(ent_container)
self.ent_command.set(ent_command)
self.JG_Y=70
self.JG_X=6
self.FuDong=6
self.FuDong_Scal_Y=30
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# Control Type
self.l = Label(self.PanedF_Canvas_Y1, text='control type', bg='white')
self.l.place(x=self.JG_X, y=self.JG_Y * 0 + self.FuDong)
self.Ent_ControlType = Label(self.PanedF_Canvas_Y1, textvariable=self.lab_ControlType, width=20, bg='DeepSkyBlue',
foreground='Darkblue')
self.Ent_ControlType.place(x=self.JG_X + 120, y=self.JG_Y * 0 + self.FuDong)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# Control Name
self.l = Label(self.PanedF_Canvas_Y1, text='control name', bg='white')
self.l.place(x=self.JG_X, y=40)
self.Ent_ControlName = Entry(self.PanedF_Canvas_Y1, textvariable=self.ent_ControlName, width=16, bg='LightGreen',
foreground='black')
self.Ent_ControlName.place(x=self.JG_X + 120, y=40)
self.Btn_Ok_ControlName = Button(self.PanedF_Canvas_Y1, text='=>', width=2, height=1, font=('Consol', '9')
, command=self.UI_Ban_Btn_OK)
self.Btn_Ok_ControlName.place(x=self.JG_X + 241, y=40)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# X0
self.l = Label(self.PanedF_Canvas_Y1, text='X0', bg='white')
self.l.place(x=self.JG_X, y=self.JG_Y*1 + self.FuDong)
self.Ent_X0 = Entry(self.PanedF_Canvas_Y1, textvariable=self.ent_X0, width=16, bg='DeepSkyBlue',
foreground='Darkblue')
self.Ent_X0.place(x=self.JG_X + 120, y=self.JG_Y*1 + self.FuDong)
self.Btn_Ok_X0 = Button(self.PanedF_Canvas_Y1, text='=>', width=2, height=1, font=('Consol', '9')
, command=self.UI_Ban_Btn_OK)
self.Btn_Ok_X0.place(x=self.JG_X + 241, y=self.JG_Y*1 + self.FuDong)
self.Sca_X0 = Scale(self.PanedF_Canvas_Y1, from_=0, to=1800, orient=HORIZONTAL, variable=self.ent_X0,
length=260, width=10, resolution=1, bg='white', command=self.UI_Ban)
self.Sca_X0.place(x=self.JG_X, y=self.JG_Y*1 + self.FuDong_Scal_Y)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# Y0
self.l = Label(self.PanedF_Canvas_Y1, text='Y0', bg='white')
self.l.place(x=self.JG_X, y=self.JG_Y * 2 + self.FuDong)
self.Ent_Y0 = Entry(self.PanedF_Canvas_Y1, textvariable=self.ent_Y0, width=16, bg='DeepSkyBlue',
foreground='Darkblue')
self.Ent_Y0.place(x=self.JG_X + 120, y=self.JG_Y * 2 + self.FuDong)
self.Btn_Ok_Y0 = Button(self.PanedF_Canvas_Y1, text='=>', width=2, height=1, font=('Consol', '9')
, command=self.UI_Ban_Btn_OK)
self.Btn_Ok_Y0.place(x=self.JG_X + 241, y=self.JG_Y * 2 + self.FuDong)
self.Sca_Y0 = Scale(self.PanedF_Canvas_Y1, from_=0, to=1600, orient=HORIZONTAL, variable=self.ent_Y0,
length=260, width=10, resolution=1, bg='white', command=self.UI_Ban)
self.Sca_Y0.place(x=self.JG_X, y=self.JG_Y * 2 + self.FuDong_Scal_Y)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# width
self.l = Label(self.PanedF_Canvas_Y1, text='width', bg='white')
self.l.place(x=self.JG_X, y=self.JG_Y * 3 + self.FuDong)
self.Ent_width = Entry(self.PanedF_Canvas_Y1, textvariable=self.ent_width, width=16, bg='DeepSkyBlue',
foreground='Darkblue')
self.Ent_width.place(x=self.JG_X + 120, y=self.JG_Y * 3 + self.FuDong)
self.Btn_Ok_width = Button(self.PanedF_Canvas_Y1, text='=>', width=2, height=1, font=('Consol', '9')
, command=self.UI_Ban_Btn_OK)
self.Btn_Ok_width.place(x=self.JG_X + 241, y=self.JG_Y * 3 + self.FuDong)
self.Sca_width = Scale(self.PanedF_Canvas_Y1, from_=0, to=300, orient=HORIZONTAL, variable=self.ent_width,
length=260, width=10, resolution=1, bg='white', command=self.UI_Ban)
self.Sca_width.place(x=self.JG_X, y=self.JG_Y * 3 + self.FuDong_Scal_Y)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# height
self.l = Label(self.PanedF_Canvas_Y1, text='height', bg='white')
self.l.place(x=self.JG_X, y=self.JG_Y * 4 + self.FuDong)
self.Ent_height = Entry(self.PanedF_Canvas_Y1, textvariable=self.ent_height, width=16, bg='DeepSkyBlue',
foreground='Darkblue')
self.Ent_height.place(x=self.JG_X + 120, y=self.JG_Y * 4 + self.FuDong)
self.Btn_Ok_height = Button(self.PanedF_Canvas_Y1, text='=>', width=2, height=1, font=('Consol', '9')
, command=self.UI_Ban_Btn_OK)
self.Btn_Ok_height.place(x=self.JG_X + 241, y=self.JG_Y * 4 + self.FuDong)
self.Sca_height = Scale(self.PanedF_Canvas_Y1, from_=0, to=100, orient=HORIZONTAL, variable=self.ent_height,
length=260, width=10, resolution=1, bg='white', command=self.UI_Ban)
self.Sca_height.place(x=self.JG_X, y=self.JG_Y * 4 + self.FuDong_Scal_Y)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# length
len_scal = 16
D = 18
self.l = Label(self.PanedF_Canvas_Y1, text='length', bg='white')
self.l.place(x=self.JG_X, y=self.JG_Y * len_scal - D)
self.Ent_length = Entry(self.PanedF_Canvas_Y1, textvariable=self.ent_length, width=16, bg='DeepSkyBlue',
foreground='Darkblue')
self.Ent_length.place(x=self.JG_X + 120, y=self.JG_Y * len_scal - D)
self.Btn_Ok_length = Button(self.PanedF_Canvas_Y1, text='=>', width=2, height=1, font=('Consol', '9')
, command=self.UI_Ban_Btn_OK)
self.Btn_Ok_length.place(x=self.JG_X + 241, y=self.JG_Y * len_scal - D)
self.Sca_length = Scale(self.PanedF_Canvas_Y1, from_=0, to=2000, orient=HORIZONTAL, variable=self.ent_length,
length=260, width=10, resolution=1, bg='white', command=self.UI_Ban)
self.Sca_length.place(x=self.JG_X, y=self.JG_Y * len_scal + self.FuDong_Scal_Y - 6 - D)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# fontSize
self.l = Label(self.PanedF_Canvas_Y1, text='fontSize', bg='white')
self.l.place(x=self.JG_X, y=self.JG_Y * 5 + self.FuDong)
self.Ent_fontSize = Entry(self.PanedF_Canvas_Y1, textvariable=self.ent_fontSize, width=16, bg='DeepSkyBlue',
foreground='Darkblue')
self.Ent_fontSize.place(x=self.JG_X + 120, y=self.JG_Y * 5 + self.FuDong)
self.Btn_Ok_fontSize = Button(self.PanedF_Canvas_Y1, text='=>', width=2, height=1, font=('Consol', '9')
, command=self.UI_Ban_Btn_OK)
self.Btn_Ok_fontSize.place(x=self.JG_X + 241, y=self.JG_Y * 5 + self.FuDong)
self.Sca_fontSize = Scale(self.PanedF_Canvas_Y1, from_=1, to=100, orient=HORIZONTAL, variable=self.ent_fontSize,
length=260, width=10, resolution=1, bg='white', command=self.UI_Ban)
self.Sca_fontSize.place(x=self.JG_X, y=self.JG_Y * 5 + self.FuDong_Scal_Y)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# fontType
self.l = Label(self.PanedF_Canvas_Y1, text='fontType', bg='white')
self.l.place(x=self.JG_X, y=self.JG_Y * 6 + self.FuDong)
self.comb_FontType_Chose = ttk.Combobox(self.PanedF_Canvas_Y1, width=19, textvariable=self.combt_fontType)
self.comb_FontType_Chose['values'] = \
(
'TkDefaultFont','Consolas', 'Arial', 'Algerian', 'Arial Rounded MT Bold', 'Bell MT', 'Bauhaus 93', 'BankGothic Md BT'
, 'Bradley Hand ITC', 'CASTELLAR', 'Elephant', 'French Script MT', 'Helvetica', 'Palace Script MT'
, 'MS UI Gothic', 'MingLiU_HKSCS-ExtB', 'Vineta BT', 'Swis721 BlkEx BT', '微软雅黑', '华文宋体'
, '华文行楷', '华文隶书', '华文新魏', '华文楷体', '华文细黑', '华文中宋', '华文彩云', '华文琥珀'
, '方正舒体', '方正姚体', '楷体', '宋体', '隶书', '幼圆', '新宋体'
)
self.comb_FontType_Chose.place(x=self.JG_X + 80, y=self.JG_Y * 6 + self.FuDong)
self.comb_FontType_Chose.current(0)
self.Btn_Ok_fontSize = Button(self.PanedF_Canvas_Y1, text='=>', width=2, height=1, font=('Consol', '9')
, command=self.UI_Ban_Btn_OK)
self.Btn_Ok_fontSize.place(x=self.JG_X + 241, y=self.JG_Y * 6 + self.FuDong)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# foreground
self.l = Label(self.PanedF_Canvas_Y1, text='foreground', bg='white')
self.l.place(x=self.JG_X, y=self.JG_Y * 7 - 20)
self.Btn_foreground = Button(self.PanedF_Canvas_Y1, text='...', width=2, height=1, font=('Consol', '9'),
command=self.More_foreground)
self.Btn_foreground.place(x=self.JG_X+215, y=self.JG_Y * 7 - 20)
self.Btn_Ok_foreground = Button(self.PanedF_Canvas_Y1, text='=>', width=2, height=1, font=('Consol', '9')
, command=self.UI_Ban_Btn_OK)
self.Btn_Ok_foreground.place(x=self.JG_X + 241, y=self.JG_Y * 7 - 20)
self.comb_foreground_Chose = ttk.Combobox(self.PanedF_Canvas_Y1, width=15, textvariable=self.combt_foreground)
self.comb_foreground_Chose['values'] = \
(
'SystemButtonText','black', 'white', 'blue', 'red', 'green', 'yellow', 'gray', 'DarkBlue', 'DeepSkyBlue'
, 'LightGreen', 'Pink', 'LightPink', 'DeepPink', 'Purple', 'Violet', 'BLueViolet','Beige'
, 'GreenYellow', 'Ivory', 'LightYellow', 'LightCyan', 'LightBlue', 'LightSkyBlue','Aqua'
, 'Lime', 'LawnGreen', 'ForestGreen', 'Olive', 'Azure', 'SpringGreen', 'PaleGreen'
, 'SlateGray', 'LightSlateGray', 'CadetBlue','DodgerBlue'
)
self.comb_foreground_Chose.place(x=self.JG_X + 80, y=self.JG_Y * 7 - 20)
self.comb_foreground_Chose.current(0)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# background
self.l = Label(self.PanedF_Canvas_Y1, text='background', bg='white')
self.l.place(x=self.JG_X, y=self.JG_Y * 7 + 20)
self.Btn_background = Button(self.PanedF_Canvas_Y1, text='...', width=2, height=1, font=('Consol', '9'),
command=self.More_background)
self.Btn_background.place(x=self.JG_X + 215, y=self.JG_Y * 7 + 20)
self.Btn_Ok_background = Button(self.PanedF_Canvas_Y1, text='=>', width=2, height=1, font=('Consol', '9')
, command=self.UI_Ban_Btn_OK)
self.Btn_Ok_background.place(x=self.JG_X + 241, y=self.JG_Y * 7 + 20)
self.comb_background_Chose = ttk.Combobox(self.PanedF_Canvas_Y1, width=15, textvariable=self.combt_background)
self.comb_background_Chose['values'] = \
(
'SystemButtonFace','black', 'white', 'blue', 'red', 'green', 'yellow', 'gray', 'DarkBlue', 'DeepSkyBlue'
, 'LightGreen', 'Pink', 'LightPink', 'DeepPink', 'Purple', 'Violet', 'BLueViolet', 'Beige'
, 'GreenYellow', 'Ivory', 'LightYellow', 'LightCyan', 'LightBlue', 'LightSkyBlue', 'Aqua'
, 'Lime', 'LawnGreen', 'ForestGreen', 'Olive', 'Azure', 'SpringGreen', 'PaleGreen'
, 'SlateGray', 'LightSlateGray', 'CadetBlue', 'DodgerBlue'
)
self.comb_background_Chose.place(x=self.JG_X + 80, y=self.JG_Y * 7 + 20)
self.comb_background_Chose.current(0)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# anchor
self.l = Label(self.PanedF_Canvas_Y1, text='anchor', bg='white')
self.l.place(x=self.JG_X, y=self.JG_Y * 7 + 60)
self.combt_anchor_Chose = ttk.Combobox(self.PanedF_Canvas_Y1, width=19, textvariable=self.combt_anchor)
self.combt_anchor_Chose['values'] = \
(
'center', 'n', 'ne', 'e', 'se', 's', 'sw', 'w', 'nw'
)
self.combt_anchor_Chose.place(x=self.JG_X + 80, y=self.JG_Y * 7 + 60)
self.combt_anchor_Chose.current(0)
self.Btn_Ok_anchor = Button(self.PanedF_Canvas_Y1, text='=>', width=2, height=1, font=('Consol', '9')
, command=self.UI_Ban_Btn_OK)
self.Btn_Ok_anchor.place(x=self.JG_X + 241, y=self.JG_Y * 7 + 60)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# justify
self.l = Label(self.PanedF_Canvas_Y1, text='justify', bg='white')
self.l.place(x=self.JG_X, y=self.JG_Y * 7 + 100) # y 方向每 40一间隔
self.combt_justify_Chose = ttk.Combobox(self.PanedF_Canvas_Y1, width=19, textvariable=self.combt_justify)
self.combt_justify_Chose['values'] = \
(
'center', 'left', 'right'
)
self.combt_justify_Chose.place(x=self.JG_X + 80, y=self.JG_Y * 7 + 100)
self.combt_justify_Chose.current(0)
self.Btn_Ok_justify = Button(self.PanedF_Canvas_Y1, text='=>', width=2, height=1, font=('Consol', '9')
, command=self.UI_Ban_Btn_OK)
self.Btn_Ok_justify.place(x=self.JG_X + 241, y=self.JG_Y * 7 + 100)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# text
self.l = Label(self.PanedF_Canvas_Y1, text='text', bg='white')
self.l.place(x=self.JG_X, y=self.JG_Y * 7 + 140) # y 方向每 40一间隔
self.Ent_text = Entry(self.PanedF_Canvas_Y1, textvariable=self.ent_text, width=26, bg='LightCyan',
foreground='Darkblue')
self.Ent_text.place(x=self.JG_X+50, y=self.JG_Y * 7 + 140)
self.Btn_Ok_text = Button(self.PanedF_Canvas_Y1, text='=>', width=2, height=1, font=('Consol', '9')
, command=self.UI_Ban_Btn_OK)
self.Btn_Ok_text.place(x=self.JG_X + 241, y=self.JG_Y * 7 + 140)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# state
self.l = Label(self.PanedF_Canvas_Y1, text='state', bg='white')
self.l.place(x=self.JG_X, y=self.JG_Y * 7 + 180) # y 方向每 40一间隔
self.combt_state_Chose = ttk.Combobox(self.PanedF_Canvas_Y1, width=19, textvariable=self.combt_state)
self.combt_state_Chose['values'] = \
(
'normal', 'active', 'disabled'
)
self.combt_state_Chose.place(x=self.JG_X + 80, y=self.JG_Y * 7 + 180)
self.combt_state_Chose.current(0)
self.Btn_Ok_state = Button(self.PanedF_Canvas_Y1, text='=>', width=2, height=1, font=('Consol', '9')
, command=self.UI_Ban_Btn_OK)
self.Btn_Ok_state.place(x=self.JG_X + 241, y=self.JG_Y * 7 + 180)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# relief
self.l = Label(self.PanedF_Canvas_Y1, text='relief', bg='white')
self.l.place(x=self.JG_X, y=self.JG_Y * 7 + 220) # y 方向每 40一间隔
self.combt_relief_Chose = ttk.Combobox(self.PanedF_Canvas_Y1, width=19, textvariable=self.combt_relief)
self.combt_relief_Chose['values'] = \
(
'raised', 'sunken', 'flat', 'ridge', 'solid', 'groove'
)
self.combt_relief_Chose.place(x=self.JG_X + 80, y=self.JG_Y * 7 + 220)
self.combt_relief_Chose.current(0)
self.Btn_Ok_relief = Button(self.PanedF_Canvas_Y1, text='=>', width=2, height=1, font=('Consol', '9')
, command=self.UI_Ban_Btn_OK)
self.Btn_Ok_relief.place(x=self.JG_X + 241, y=self.JG_Y * 7 + 220)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# highlightcolor
self.l = Label(self.PanedF_Canvas_Y1, text='highlight', bg='white')
self.l.place(x=self.JG_X, y=self.JG_Y * 7 + 260)
self.Btn_highlightcolor = Button(self.PanedF_Canvas_Y1, text='...', width=2, height=1, font=('Consol', '9'),
command=self.More_highlightcolor)
self.Btn_highlightcolor.place(x=self.JG_X + 215, y=self.JG_Y * 7 + 260)
self.Btn_Ok_highlightcolor = Button(self.PanedF_Canvas_Y1, text='=>', width=2, height=1, font=('Consol', '9')
, command=self.UI_Ban_Btn_OK)
self.Btn_Ok_highlightcolor.place(x=self.JG_X + 241, y=self.JG_Y * 7 + 260)
self.comb_highlightcolor_Chose = ttk.Combobox(self.PanedF_Canvas_Y1, width=15,
textvariable=self.combt_highlightcolor)
self.comb_highlightcolor_Chose['values'] = \
(
'SystemWindowFrame', 'black', 'white', 'blue', 'red', 'green', 'yellow', 'gray', 'DarkBlue', 'DeepSkyBlue'
, 'LightGreen', 'Pink', 'LightPink', 'DeepPink', 'Purple', 'Violet', 'BLueViolet', 'Beige'
, 'GreenYellow', 'Ivory', 'LightYellow', 'LightCyan', 'LightBlue', 'LightSkyBlue', 'Aqua'
, 'Lime', 'LawnGreen', 'ForestGreen', 'Olive', 'Azure', 'SpringGreen', 'PaleGreen'
, 'SlateGray', 'LightSlateGray', 'CadetBlue', 'DodgerBlue'
)
self.comb_highlightcolor_Chose.place(x=self.JG_X + 80, y=self.JG_Y * 7 + 260)
self.comb_highlightcolor_Chose.current(0)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# highlightbackground
self.l = Label(self.PanedF_Canvas_Y1, text='highlight_B', bg='white')
self.l.place(x=self.JG_X, y=self.JG_Y * 7 + 300)
self.Btn_highlightbackground = Button(self.PanedF_Canvas_Y1, text='...', width=2, height=1,
font=('Consol', '9'), command=self.More_highlightbackground)
self.Btn_highlightbackground.place(x=self.JG_X + 215, y=self.JG_Y * 7 + 300)
self.Btn_Ok_highlightcolor = Button(self.PanedF_Canvas_Y1, text='=>', width=2, height=1, font=('Consol', '9')
, command=self.UI_Ban_Btn_OK)
self.Btn_Ok_highlightcolor.place(x=self.JG_X + 241, y=self.JG_Y * 7 + 300)
self.comb_highlightbackground_Chose = ttk.Combobox(self.PanedF_Canvas_Y1, width=15,
textvariable=self.combt_highlightbackground)
self.comb_highlightbackground_Chose['values'] = \
(
'SystemButtonFace', 'black', 'white', 'blue', 'red', 'green', 'yellow', 'gray', 'DarkBlue', 'DeepSkyBlue'
, 'LightGreen', 'Pink', 'LightPink', 'DeepPink', 'Purple', 'Violet', 'BLueViolet', 'Beige'
, 'GreenYellow', 'Ivory', 'LightYellow', 'LightCyan', 'LightBlue', 'LightSkyBlue', 'Aqua'
, 'Lime', 'LawnGreen', 'ForestGreen', 'Olive', 'Azure', 'SpringGreen', 'PaleGreen'
, 'SlateGray', 'LightSlateGray', 'CadetBlue', 'DodgerBlue'
)
self.comb_highlightbackground_Chose.place(x=self.JG_X + 80, y=self.JG_Y * 7 + 300)
self.comb_highlightbackground_Chose.current(0)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# bitmap
self.l = Label(self.PanedF_Canvas_Y1, text='bitmap', bg='white')
self.l.place(x=self.JG_X, y=self.JG_Y * 7 + 340) # y 方向每 40一间隔
self.comb_bitmap_Chose = ttk.Combobox(self.PanedF_Canvas_Y1, width=19,
textvariable=self.combt_bitmap)
self.comb_bitmap_Chose['values'] = \
(
'', 'error', 'gray75', 'gray50', 'gray25', 'gray12', 'hourglass', 'info', 'questhead', 'question', 'warning'
)
self.comb_bitmap_Chose.place(x=self.JG_X + 53, y=self.JG_Y * 7 + 340)
self.comb_bitmap_Chose.current(0)
self.Btn_bitmap = Button(self.PanedF_Canvas_Y1, text='...', width=2, height=1,
font=('Consol', '9'),
command=self.More_bitmap)
self.Btn_bitmap.place(x=self.JG_X + 215, y=self.JG_Y * 7 + 340)
self.Btn_Ok_bitmap = Button(self.PanedF_Canvas_Y1, text='=>', width=2, height=1, font=('Consol', '9')
, command=self.UI_Ban_Btn_OK)
self.Btn_Ok_bitmap.place(x=self.JG_X + 241, y=self.JG_Y * 7 + 340)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# image
self.l = Label(self.PanedF_Canvas_Y1, text='image', bg='white')
self.l.place(x=self.JG_X, y=self.JG_Y * 7 + 380) # y 方向每 40一间隔
self.Ent_image = Entry(self.PanedF_Canvas_Y1, textvariable=self.ent_image, width=22, bg='LightCyan',
foreground='Darkblue')
self.Ent_image.place(x=self.JG_X + 53, y=self.JG_Y * 7 + 380)
self.Btn_image = Button(self.PanedF_Canvas_Y1, text='...', width=2, height=1,
font=('Consol', '9'),
command=self.More_image)
self.Btn_image.place(x=self.JG_X + 215, y=self.JG_Y * 7 + 380)
self.Btn_Ok_image = Button(self.PanedF_Canvas_Y1, text='=>', width=2, height=1, font=('Consol', '9')
, command=self.UI_Ban_Btn_OK)
self.Btn_Ok_image.place(x=self.JG_X + 241, y=self.JG_Y * 7 + 380)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# padx
self.l = Label(self.PanedF_Canvas_Y1, text='padx', bg='white')
self.l.place(x=self.JG_X, y=self.JG_Y * 7 + 420) # y 方向每 40一间隔
self.combt_padx_Chose = ttk.Combobox(self.PanedF_Canvas_Y1, width=19, textvariable=self.combt_padx)
self.combt_padx_Chose['values'] = \
(
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20
)
self.combt_padx_Chose.place(x=self.JG_X + 80, y=self.JG_Y * 7 + 420)
self.combt_padx_Chose.current(0)
self.Btn_Ok_padx = Button(self.PanedF_Canvas_Y1, text='=>', width=2, height=1, font=('Consol', '9')
, command=self.UI_Ban_Btn_OK)
self.Btn_Ok_padx.place(x=self.JG_X + 241, y=self.JG_Y * 7 + 420)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# pady
self.l = Label(self.PanedF_Canvas_Y1, text='pady', bg='white')
self.l.place(x=self.JG_X, y=self.JG_Y * 7 + 460) # y 方向每 40一间隔
self.combt_pady_Chose = ttk.Combobox(self.PanedF_Canvas_Y1, width=19, textvariable=self.combt_pady)
self.combt_pady_Chose['values'] = \
(
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20
)
self.combt_pady_Chose.place(x=self.JG_X + 80, y=self.JG_Y * 7 + 460)
self.combt_pady_Chose.current(0)
self.Btn_Ok_pady = Button(self.PanedF_Canvas_Y1, text='=>', width=2, height=1, font=('Consol', '9')
, command=self.UI_Ban_Btn_OK)
self.Btn_Ok_pady.place(x=self.JG_X + 241, y=self.JG_Y * 7 + 460)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# takefocus
self.l = Label(self.PanedF_Canvas_Y1, text='takefocus', bg='white')
self.l.place(x=self.JG_X, y=self.JG_Y * 7 + 500) # y 方向每 40一间隔
self.combt_takefocus_Chose = ttk.Combobox(self.PanedF_Canvas_Y1, width=19, textvariable=self.combt_takefocus,
state='readonly')
self.combt_takefocus_Chose['values'] = \
(
'', 0, 1
)
self.combt_takefocus_Chose.place(x=self.JG_X + 80, y=self.JG_Y * 7 + 500)
self.combt_takefocus_Chose.current(0)
self.Btn_Ok_takefocus = Button(self.PanedF_Canvas_Y1, text='=>', width=2, height=1, font=('Consol', '9')
, command=self.UI_Ban_Btn_OK)
self.Btn_Ok_takefocus.place(x=self.JG_X + 241, y=self.JG_Y * 7 + 500)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# cursor
self.l = Label(self.PanedF_Canvas_Y1, text='cursor', bg='white')
self.l.place(x=self.JG_X, y=self.JG_Y * 7 + 540) # y 方向每 40一间隔
self.combt_cursor_Chose = ttk.Combobox(self.PanedF_Canvas_Y1, width=19, textvariable=self.combt_cursor)
self.combt_cursor_Chose['values'] = \
(
'', 'arrow', 'based_arrow_up', 'based_arrow_down', 'boat', 'bogosity', 'bottom_left_corner ', 'bottom_right_corner', 'bottom_side',
'bottom_tee', 'box_spiral', 'center_ptr', 'circle', 'clock', 'coffee_mug', 'cross', 'cross_reverse', 'crosshair',
'diamond_cross', 'dot', 'dotbox', 'double_arrow', 'draft_large', 'draft_small', 'draped_box', 'exchange',
'fleur', 'gobbler', 'gumby', 'hand1', 'hand2', 'heart', 'icon', 'iron_cross', 'left_ptr', 'left_side', 'left_tee',
'leftbutton', 'll_angle', 'lr_angle', 'man', 'middlebutton', 'mouse', 'pencil', 'pirate', 'plus', 'question_arrow',
'right_ptr', 'right_side', 'right_tee', 'rightbutton', 'rtl_logo', 'sailboat', 'sb_down_arrow',
'sb_h_double_arrow', 'sb_left_arrow', 'sb_right_arrow', 'sb_up_arrow', 'sb_v_double_arrow', 'shuttle',
'sizing', 'spider', 'spraycan', 'star', 'target', 'tcross', 'top_left_arrow', 'top_left_corner', 'top_right_corner',
'top_side', 'top_tee', 'trek', 'ul_angle', 'umbrella', 'ur_angle', 'watch', 'xterm', 'X_cursor'
)
self.combt_cursor_Chose.place(x=self.JG_X + 80, y=self.JG_Y * 7 + 540)
self.combt_cursor_Chose.current(0)
self.Btn_Ok_cursor = Button(self.PanedF_Canvas_Y1, text='=>', width=2, height=1, font=('Consol', '9')
, command=self.UI_Ban_Btn_OK)
self.Btn_Ok_cursor.place(x=self.JG_X + 241, y=self.JG_Y * 7 + 540)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# container
self.l = Label(self.PanedF_Canvas_Y1, text='container', bg='white')
self.l.place(x=self.JG_X, y=self.JG_Y * 7 + 580) # y 方向每 40一间隔
self.Ent_container = Entry(self.PanedF_Canvas_Y1, textvariable=self.ent_container, width=22, bg='LightCyan',
foreground='Darkblue')
self.Ent_container.place(x=self.JG_X + 80, y=self.JG_Y * 7 + 580)
self.Btn_Ok_container = Button(self.PanedF_Canvas_Y1, text='=>', width=2, height=1, font=('Consol', '9')
, command=self.UI_Ban_Btn_OK)
self.Btn_Ok_container.place(x=self.JG_X + 241, y=self.JG_Y * 7 + 580)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# 下事件框事件设置
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# command
self.l = Label(self.PanedF_Canvas_Y2, text='command', bg='white')
self.l.place(x=self.JG_X, y=self.JG_Y * 0 + 6) # y 方向每 40一间隔
self.Ent_command = Entry(self.PanedF_Canvas_Y2, textvariable=self.ent_command, width=22, bg='LightCyan',
foreground='Darkblue')
self.Ent_command.place(x=self.JG_X + 80, y=6)
self.Btn_Ok_command = Button(self.PanedF_Canvas_Y2, text='=>', width=2, height=1, font=('Consol', '9'),
command=self.UI_Ban_Btn_OK)
self.Btn_Ok_command.place(x=246, y=6)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# button_press_1
self.ent_button_press_1 = StringVar()
self.l = Label(self.PanedF_Canvas_Y2, text='Click left mouse button once', bg='white')
self.l.place(x=self.JG_X, y=6 + 40) # y 方向每 40一间隔
self.Btn_button_press_1 = Button(self.PanedF_Canvas_Y2, text='Add...', width=6, height=1, font=('Consol', '10'),
command=self.SJ_button_press_1)
self.Btn_button_press_1.place(x=220, y=6 + 40)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# button_release_1
self.ent_button_release_1 = StringVar()
self.l = Label(self.PanedF_Canvas_Y2, text='Release left mouse button', bg='white')
self.l.place(x=self.JG_X, y=6 + 80) # y 方向每 40一间隔
self.Btn_button_release_1 = Button(self.PanedF_Canvas_Y2, text='Add...', width=6, height=1, font=('Consol', '10'),
command=self.SJ_button_release_1)
self.Btn_button_release_1.place(x=220, y=6 + 80)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# button_press_right_1
self.ent_button_press_right_1 = StringVar()
self.l = Label(self.PanedF_Canvas_Y2, text='Click right mouse button once', bg='white')
self.l.place(x=self.JG_X, y=6 + 120) # y 方向每 40一间隔
self.Btn_button_press_right_1 = Button(self.PanedF_Canvas_Y2, text='Add...', width=6, height=1, font=('Consol', '10'),
command=self.SJ_button_press_right_1)
self.Btn_button_press_right_1.place(x=220, y=6 + 120)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# button_press_left_2
self.ent_button_press_left_2 = StringVar()
self.l = Label(self.PanedF_Canvas_Y2, text='Double click left mouse button', bg='white')
self.l.place(x=self.JG_X, y=6 + 160) # y 方向每 40一间隔
self.Btn_button_press_left_2 = Button(self.PanedF_Canvas_Y2, text='Add...', width=6, height=1,
font=('Consol', '10'),
command=self.SJ_button_press_left_2)
self.Btn_button_press_left_2.place(x=220, y=6 + 160)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# button_press_right_2
self.ent_button_press_right_2 = StringVar()
self.l = Label(self.PanedF_Canvas_Y2, text='Double click right mouse button', bg='white')
self.l.place(x=self.JG_X, y=6 + 160) # y 方向每 40一间隔
self.Btn_button_press_right_2 = Button(self.PanedF_Canvas_Y2, text='Add...', width=6, height=1,
font=('Consol', '10'),
command=self.SJ_button_press_right_2)
self.Btn_button_press_right_2.place(x=220, y=6 + 160)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# button_press_middle_1
self.ent_button_press_middle_1 = StringVar()
self.l = Label(self.PanedF_Canvas_Y2, text='Click middle mouse button once', bg='white')
self.l.place(x=self.JG_X, y=6 + 200) # y 方向每 40一间隔
self.Btn_button_press_middle_1 = Button(self.PanedF_Canvas_Y2, text='Add...', width=6, height=1,
font=('Consol', '10'),
command=self.SJ_button_press_middle_1)
self.Btn_button_press_middle_1.place(x=220, y=6 + 200)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# button_press_middle_2
self.ent_button_press_middle_2 = StringVar()
self.l = Label(self.PanedF_Canvas_Y2, text='Double click right mouse button', bg='white')
self.l.place(x=self.JG_X, y=6 + 240) # y 方向每 40一间隔
self.Btn_button_press_middle_2 = Button(self.PanedF_Canvas_Y2, text='Add...', width=6, height=1,
font=('Consol', '10'),
command=self.SJ_button_press_middle_2)
self.Btn_button_press_middle_2.place(x=220, y=6 + 240)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# button_press_left_move
self.ent_button_press_left_move = StringVar()
self.l = Label(self.PanedF_Canvas_Y2, text='Double click right mouse button', bg='white')
self.l.place(x=self.JG_X, y=6 + 240) # y 方向每 40一间隔
self.Btn_button_press_left_move = Button(self.PanedF_Canvas_Y2, text='Add...', width=6, height=1,
font=('Consol', '10'),
command=self.SJ_button_press_left_move)
self.Btn_button_press_left_move.place(x=220, y=6 + 240)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# cursor_enter
self.combt_cursor_enter = StringVar()
self.l = Label(self.PanedF_Canvas_Y2, text='Cursor enter the control area', bg='white')
self.l.place(x=self.JG_X, y=6 + 280) # y 方向每 40一间隔
self.Btn_cursor_enter = Button(self.PanedF_Canvas_Y2, text='Add...', width=6, height=1,
font=('Consol', '10'),
command=self.SJ_cursor_enter)
self.Btn_cursor_enter.place(x=220, y=6 + 280)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# cursor_leave
self.combt_cursor_leave = StringVar()
self.l = Label(self.PanedF_Canvas_Y2, text='Cursor the leave control area', bg='white')
self.l.place(x=self.JG_X, y=6 + 320) # y 方向每 40一间隔
self.Btn_cursor_leave = Button(self.PanedF_Canvas_Y2, text='Add...', width=6, height=1,
font=('Consol', '10'),
command=self.SJ_cursor_leave)
self.Btn_cursor_leave.place(x=220, y=6 + 320)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# get_key_focus
self.ent_get_key_focus = StringVar()
self.l = Label(self.PanedF_Canvas_Y2, text='Get the focus of the keyboard', bg='white')
self.l.place(x=self.JG_X, y=6 + 360) # y 方向每 40一间隔
self.Btn_get_key_focus = Button(self.PanedF_Canvas_Y2, text='Add...', width=6, height=1,
font=('Consol', '10'),
command=self.SJ_get_key_focus)
self.Btn_get_key_focus.place(x=220, y=6 + 360)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# press_a_key
self.ent_press_a_key = StringVar()
self.l = Label(self.PanedF_Canvas_Y2, text='Press a key of the keyboard', bg='white')
self.l.place(x=self.JG_X, y=6 + 400) # y 方向每 40一间隔
self.Btn_press_a_key = Button(self.PanedF_Canvas_Y2, text='Add...', width=6, height=1,
font=('Consol', '10'),
command=self.SJ_press_a_key)
self.Btn_press_a_key.place(x=220, y=6 + 400)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# press_enter_key
self.ent_press_enter_key = StringVar()
self.l = Label(self.PanedF_Canvas_Y2, text='Press the enter key', bg='white')
self.l.place(x=self.JG_X, y=6 + 440) # y 方向每 40一间隔
self.Btn_press_enter_key = Button(self.PanedF_Canvas_Y2, text='Add...', width=6, height=1,
font=('Consol', '10'),
command=self.SJ_press_enter_key)
self.Btn_press_enter_key.place(x=220, y=6 + 440)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# when_control_change
self.ent_when_control_change = StringVar()
self.l = Label(self.PanedF_Canvas_Y2, text='When the control change', bg='white')
self.l.place(x=self.JG_X, y=6 + 480) # y 方向每 40一间隔
self.Btn_when_control_change = Button(self.PanedF_Canvas_Y2, text='Add...', width=6, height=1,
font=('Consol', '10'),
command=self.SJ_when_control_change)
self.Btn_when_control_change.place(x=220, y=6 + 480)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# control_mouseWheel
self.ent_control_mouseWheel = StringVar()
self.l = Label(self.PanedF_Canvas_Y2, text='Press control and mouse_wheel', bg='white')
self.l.place(x=self.JG_X, y=6 + 520) # y 方向每 40一间隔
self.control_mouseWheel = Button(self.PanedF_Canvas_Y2, text='Add...', width=6, height=1,
font=('Consol', '10'),
command=self.SJ_control_mouseWheel)
self.control_mouseWheel.place(x=220, y=6 + 520)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# shift_mouseWheel
self.ent_shift_mouseWheel = StringVar()
self.l = Label(self.PanedF_Canvas_Y2, text="Press shift and mouse_wheel", bg='white')
self.l.place(x=self.JG_X, y=6 + 560) # y 方向每 40一间隔
self.Btn_shift_mouseWheel = Button(self.PanedF_Canvas_Y2, text='Add...', width=6, height=1,
font=('Consol', '10'),
command=self.SJ_shift_mouseWheel)
self.Btn_shift_mouseWheel.place(x=220, y=6 + 560)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# canva 事件绑定
self.canva.bind("<ButtonPress-1>", self.HuoQu_Canvas_ZuoBiao) # 绑定获取 Canvas 坐标事件
self.canva.bind("<ButtonPress-3>", self.Button3_Press) # 绑定获取 Canvas 坐标事件
self.Text_BianYi.bind("<Control-MouseWheel>", self.Text_Wheel) # 绑定获取 Text_BianYi 滚轮事件
# 组合键之间用 - 连接,只能同时使用
self.Scal_Y1.bind("<MouseWheel>", self.Y1_win_Wheel)
self.Scal_Y2.bind("<MouseWheel>", self.Y2_win_Wheel)
# 窗口位置改变事件
self.bind("<Configure>", self.Win_Change) # 绑定事件
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# 编译
def BianYi(self):
global Str_BianYi
# 编译文本先清空
self.Text_BianYi.delete(1.0, END)
self.Text_BianYi.insert(END, Str_BianYi)
hua = Hua(self.canva, self.BianJi_kj_menu, self.Text_BianYi)
hua.Hua_BianYi()
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# 设置 设计UI窗口参数
def Set_KouZhuan(self):
global canva_W
global canva_H
ck = SetCK_D(self)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
def UI_Ban(self, value):
pass
global XuanZhong
Len = len(XuanZhong)
self.UI_2_QuanJu()
if Len != 0:
self.a = Hua(self.canva, self.BianJi_kj_menu, self.Text_BianYi)
self.a.UI_Ban_Design()
print('UI_Ban')
def UI_Ban_Btn_OK(self):
pass
global XuanZhong
Len = len(XuanZhong)
self.UI_2_QuanJu()
if Len != 0:
self.a = Hua(self.canva, self.BianJi_kj_menu, self.Text_BianYi)
self.a.UI_Ban_Design()
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# 窗口位置改变
def Win_Change(self, event):
global win_X
global win_Y
win_X = self.winfo_x()
win_Y = self.winfo_y()
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# 全局 to UI
def QuanJu_2_UI(self):
# 使用全局变量更新
global lab_ControlType
global ent_ControlName
global ent_X0
global ent_Y0
global ent_width
global ent_height
global ent_length
global ent_fontSize
global combt_fontType
global combt_foreground
global combt_background
global combt_anchor
global combt_justify
global ent_text
global combt_state
global combt_relief
global combt_highlightcolor
global combt_highlightbackground
global combt_bitmap
global ent_image
global combt_padx
global combt_pady
global combt_takefocus
global combt_cursor
global ent_container
global ent_command
# 上属性框部件设置
self.lab_ControlType.set(lab_ControlType)
self.ent_ControlName.set(ent_ControlName)
self.ent_X0.set(ent_X0)
self.ent_Y0.set(ent_Y0)
self.ent_width.set(ent_width)
self.combt_background.set(combt_background)
self.ent_container.set(ent_container)
if ent_height != "":
self.ent_height.set(ent_height)
else:
self.ent_height.set(0)
if ent_length != "":
self.ent_length.set(ent_length)
else:
self.ent_length.set(0)
if ent_fontSize != "":
self.ent_fontSize.set(ent_fontSize)
else:
self.ent_fontSize.set(0)
if combt_fontType != "":
self.combt_fontType.set(combt_fontType)
else:
self.combt_fontType.set("")
if combt_foreground != "":
self.combt_foreground.set(combt_foreground)
else:
self.combt_foreground.set(0)
if combt_anchor != "":
self.combt_anchor.set(combt_anchor)
else:
self.combt_anchor.set("")
if combt_justify != "":
self.combt_justify.set(combt_justify)
else:
self.combt_justify.set("")
if ent_text != "":
self.ent_text.set(ent_text)
else:
self.ent_text.set("")
if combt_state != "":
self.combt_state.set(combt_state)
else:
self.combt_state.set("")
if combt_relief != "":
self.combt_relief.set(combt_relief)
else:
self.combt_relief.set("")
if combt_highlightcolor != "":
self.combt_highlightcolor.set(combt_highlightcolor)
else:
self.combt_highlightcolor.set("")
if combt_highlightbackground != "":
self.combt_highlightbackground.set(combt_highlightbackground)
else:
self.combt_highlightbackground.set("")
if combt_bitmap != "":
self.combt_bitmap.set(combt_bitmap)
else:
self.combt_bitmap.set("")
if ent_image != "":
self.ent_image.set(ent_image)
else:
self.ent_image.set("")
if combt_padx != "":
self.combt_padx.set(combt_padx)
else:
self.combt_padx.set(0)
if combt_pady != "":
self.combt_pady.set(combt_pady)
else:
self.combt_pady.set(0)
if combt_takefocus != "":
self.combt_takefocus.set(combt_takefocus)
else:
self.combt_takefocus.set("")
if combt_cursor != "":
self.combt_cursor.set(combt_cursor)
else:
self.combt_cursor.set("")
if ent_command != "":
self.ent_command.set(ent_command)
else:
self.ent_command.set("")
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# 全局 to UI
def UI_2_QuanJu(self):
# 使用全局变量更新
global lab_ControlType
global ent_ControlName
global ent_X0
global ent_Y0
global ent_width
global ent_height
global ent_length
global ent_fontSize
global combt_fontType
global combt_foreground
global combt_background
global combt_anchor
global combt_justify
global ent_text
global combt_state
global combt_relief
global combt_highlightcolor
global combt_highlightbackground
global combt_bitmap
global ent_image
global combt_padx
global combt_pady
global combt_takefocus
global combt_cursor
global ent_container
global ent_command
# 上属性框部件设置
lab_ControlType = self.lab_ControlType.get()
ent_ControlName = self.ent_ControlName.get()
ent_X0 = self.ent_X0.get()
ent_Y0 = self.ent_Y0.get()
ent_width = self.ent_width.get()
ent_height = self.ent_height.get()
ent_length = self.ent_length.get()
ent_fontSize = self.ent_fontSize.get()
combt_fontType = self.combt_fontType.get()
combt_foreground = self.combt_foreground.get()
combt_background = self.combt_background.get()
combt_anchor = self.combt_anchor.get()
combt_justify = self.combt_justify.get()
ent_text = self.ent_text.get()
combt_state = self.combt_state.get()
combt_relief = self.combt_relief.get()
combt_highlightcolor = self.combt_highlightcolor.get()
combt_highlightbackground = self.combt_highlightbackground.get()
combt_bitmap = self.combt_bitmap.get()
ent_image = self.ent_image.get()
combt_padx = self.combt_padx.get()
combt_pady = self.combt_pady.get()
combt_takefocus = self.combt_takefocus.get()
combt_cursor = self.combt_cursor.get()
ent_container = self.ent_container.get()
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
ent_command = self.ent_command.get()
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
def BianJi_OK(self):
global each_YouJian
each_YouJian = 'OK'
print(each_YouJian)
self.a = Hua(self.canva, self.BianJi_kj_menu, self.Text_BianYi)
self.a.OK()
def BianJi_Move(self):
global each_YouJian
each_YouJian = 'Move'
print(each_YouJian)
self.a = Hua(self.canva, self.BianJi_kj_menu, self.Text_BianYi)
self.a.Move()
def BianJi_Delete(self):
global each_YouJian
each_YouJian = 'Delete'
print(each_YouJian)
self.a = Hua(self.canva, self.BianJi_kj_menu, self.Text_BianYi)
self.a.Delete()
def BianJi_Design(self):
global each_YouJian
self.BianYi_Text_Design()
each_YouJian = 'Design'
self.QuanJu_2_UI()
def BianJi_Cancel(self):
global each_YouJian
each_YouJian = 'Cancel'
self.a = Hua(self.canva, self.BianJi_kj_menu, self.Text_BianYi)
self.a.Cancel()
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# 属性框处理函数
# 滚动处理
def Y1_win_Wheel(self, event):
i = self.Scal_Y1.get()
if event.delta > 0: # 滚轮上滚
i = i - 10
print('i= ', i)
self.Scal_Y1.set(i)
else: # 滚轮下滚
i = i + 10
print('i= ', i)
self.Scal_Y1.set(i)
def Y2_win_Wheel(self, event):
i = self.Scal_Y2.get()
if event.delta > 0: # 滚轮上滚
i = i - 5
self.Scal_Y2.set(i)
else: # 滚轮下滚
i = i + 5
self.Scal_Y2.set(i)
# 添加前景色
def More_foreground(self):
global combt_foreground
a = Choose_Color()
b = a.Color_Choose()
combt_foreground = b[1]
self.QuanJu_2_UI()
self.UI_Ban_Btn_OK()
# 添加背景色
def More_background(self):
global combt_background
a = Choose_Color()
b = a.Color_Choose()
combt_background = b[1]
self.QuanJu_2_UI()
self.UI_Ban_Btn_OK()
# 添加 highlightcolor
def More_highlightcolor(self):
global combt_highlightcolor
a = Choose_Color()
b = a.Color_Choose()
combt_highlightcolor = b[1]
self.QuanJu_2_UI()
self.UI_Ban_Btn_OK()
# 添加 highlightbackground
def More_highlightbackground(self):
global combt_highlightbackground
a = Choose_Color()
b = a.Color_Choose()
combt_highlightbackground = b[1]
self.QuanJu_2_UI()
self.UI_Ban_Btn_OK()
# 添加 bitmap
def More_bitmap(self):
global combt_bitmap
a = Get_File_Name_XBM()
b = a.Get_Name()
combt_bitmap = b
self.QuanJu_2_UI()
# 添加 image
def More_image(self):
global ent_image
a = Get_File_Name_GIF()
b = a.Get_Name()
ent_image = b
self.QuanJu_2_UI()
# 打开事件 button_press_1
def SJ_button_press_1(self):
# def SJ_Dict(self, str_SJ):
sj_dict = SJ_Dictionary()
sj_dict.SJ_Dict("button_press_1")
# 打开事件 button_release_1
def SJ_button_release_1(self):
sj_dict = SJ_Dictionary()
sj_dict.SJ_Dict("button_release_1")
# 打开事件 button_press_right_1
def SJ_button_press_right_1(self):
sj_dict = SJ_Dictionary()
sj_dict.SJ_Dict("button_press_right_1")
def SJ_button_press_left_2(self):
sj_dict = SJ_Dictionary()
sj_dict.SJ_Dict("button_press_left_2")
def SJ_button_press_right_2(self):
sj_dict = SJ_Dictionary()
sj_dict.SJ_Dict("button_press_right_2")
def SJ_button_press_middle_1(self):
sj_dict = SJ_Dictionary()
sj_dict.SJ_Dict("button_press_middle_1")
def SJ_button_press_middle_2(self):
sj_dict = SJ_Dictionary()
sj_dict.SJ_Dict("button_press_middle_2")
def SJ_button_press_left_move(self):
sj_dict = SJ_Dictionary()
sj_dict.SJ_Dict("button_press_left_move")
def SJ_cursor_enter(self):
sj_dict = SJ_Dictionary()
sj_dict.SJ_Dict("cursor_enter")
def SJ_cursor_leave(self):
sj_dict = SJ_Dictionary()
sj_dict.SJ_Dict("cursor_leave")
def SJ_get_key_focus(self):
sj_dict = SJ_Dictionary()
sj_dict.SJ_Dict("get_key_focus")
def SJ_lose_key_focus(self):
sj_dict = SJ_Dictionary()
sj_dict.SJ_Dict("lose_key_focus")
def SJ_press_a_key(self):
sj_dict = SJ_Dictionary()
sj_dict.SJ_Dict("press_a_key")
def SJ_press_enter_key(self):
sj_dict = SJ_Dictionary()
sj_dict.SJ_Dict("press_enter_key")
def SJ_when_control_change(self):
sj_dict = SJ_Dictionary()
sj_dict.SJ_Dict("when_control_change")
def SJ_control_mouseWheel(self):
sj_dict = SJ_Dictionary()
sj_dict.SJ_Dict("control_mouseWheel")
def SJ_shift_mouseWheel(self):
sj_dict = SJ_Dictionary()
sj_dict.SJ_Dict("shift_mouseWheel")
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# 属性框展开函数
def V_P_Scal_Y1(self, value):
self.PanedF_Canvas_Y1.place(x=48, y=0-int(value)*10)
def V_P_Scal_Y2(self, value):
self.PanedF_Canvas_Y2.place(x=48, y=0-int(value)*10)
def ShuXing_Zhan(self):
global flag_ShuXing_Tan
if flag_ShuXing_Tan == FALSE:
self.Btn_ShuXing_Text.set('=>')
self.PanedWin_X1.place(x=1196, y=50)
self.PanedWin_X1.paneconfig(self.Text_BianYi, after=self.PanedWin_Y1)
flag_ShuXing_Tan = TRUE
self.Btn_Update.place(x=1420, y=26)
else:
self.Btn_ShuXing_Text.set('<=')
self.PanedWin_X1.place(x=2000, y=50)
self.PanedWin_X1.paneconfig(self.Text_BianYi, before=self.PanedWin_Y1)
flag_ShuXing_Tan = FALSE
self.Btn_Update.place(x=2000, y=26)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# 编译文本框展开函数
def BianYi_Text(self):
global flag_BianYi_Text
if flag_BianYi_Text == FALSE:
self.Tv_BianYi_Text.set('Hide')
self.PanedWin_X1.place(x=60, y=50)
self.PanedWin_X1.paneconfig(self.Text_BianYi, before=self.PanedWin_Y1)
flag_BianYi_Text = TRUE
self.Btn_Update.place(x=1420, y=26)
elif flag_BianYi_Text == TRUE:
self.Tv_BianYi_Text.set('Text')
self.PanedWin_X1.place(x=2000, y=50)
flag_BianYi_Text = FALSE
self.Btn_Update.place(x=2000, y=26)
def BianYi_Text_Design(self):
global flag_BianYi_Text
self.Tv_BianYi_Text.set('Hide')
self.PanedWin_X1.place(x=60, y=50)
self.PanedWin_X1.paneconfig(self.Text_BianYi, before=self.PanedWin_Y1)
self.Btn_Update.place(x=1420, y=26)
flag_BianYi_Text = TRUE
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# Canvas 隐藏函数
def Canva_Hide(self):
global flag_Canva_Hide
global canva_X
global canva_Y
if flag_Canva_Hide == FALSE:
self.Tv_Canva_Hide.set('Hide')
self.canva.place(x=canva_X, y=canva_Y)
flag_Canva_Hide = TRUE
elif flag_Canva_Hide == TRUE:
self.Tv_Canva_Hide.set('Paint')
self.canva.place(x=2000, y=50)
flag_Canva_Hide = FALSE
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# Text 背景颜色设定函数
def BianYi_Color_White(self):
self.Text_BianYi.config(fg='black', bg='white', insertbackground='black')
def BianYi_Color_Black(self):
self.Text_BianYi.config(fg='white', bg='black', insertbackground='white')
def BianYi_Color_Green(self):
self.Text_BianYi.config(fg='white', bg='green', insertbackground='white')
def BianYi_Color_YangPiZhi(self):
self.Text_BianYi.config(fg='black', bg='LemonChiffon', insertbackground='black')
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# Text 字体大小调节函数
def Text_font(self, value):
Font=('Consolas',str(value))
self.Text_BianYi.config(font=Font)
# Text_BianYi 滚轮事件
def Text_Wheel(self, event):
i = self.Sca_Text_front.get()
if event.delta > 0: # 滚轮上滚
i = i + 1
self.Sca_Text_front.set(i)
else: # 滚轮下滚
i = i - 1
self.Sca_Text_front.set(i)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# 右键菜单
def Button3_Press(self, event):
self.New_kj_menu.post(event.x_root, event.y_root) # 必须为 (event.x_root, event.y_root) 才精准出现在点击点
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
def HuoQu_Canvas_ZuoBiao(self, event):
global Event_Canvas_x
global Event_Canvas_y
Event_GunLun_x = event.x
Event_GunLun_y = event.y
print('Event_GunLun_x = ', Event_GunLun_x)
print('Event_GunLun_y = ', Event_GunLun_y)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# 定义画布放伸缩函数
def HuaBuFangDa_Y(self, value):
global canva_W
global canva_H
global scal_Y_Zhi
scal_Y_Zhi = value
self.ZhuChuangKou_BianYan_ShanChu()
canva_H = self.fram_H + int(value)
self.canva.config(width=canva_W, height=canva_H)
self.ent_y.set(canva_H)
self.ZhuChuangKou_BianYan()
if self.flag_WangGe == TRUE:
self.WG_Kai()
def HuaBuFangDa_X(self, value):
global canva_W
global canva_H
global scal_X_Zhi
scal_X_Zhi = value
self.ZhuChuangKou_BianYan_ShanChu()
canva_W = self.fram_W + int(value)
self.canva.config(width=canva_W, height=canva_H)
self.ent_x.set(canva_W)
self.ZhuChuangKou_BianYan()
if self.flag_WangGe == TRUE:
self.WG_Kai()
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# 转到窗口
def ChuangKouZhuan(self):
global canva_W
global canva_H
self.ZhuChuangKou_BianYan_ShanChu()
self.ScZhi_X = int(self.ent_y.get()) - self.Yuan_canva_H # ScZhi_X 为 X方向的范围条的值
self.ScZhi_Y = int(self.ent_x.get()) - self.Yuan_canva_W # ScZhi_Y 为 Y方向的范围条的值
self.vy.set(self.ScZhi_X) # vy is the value of Sca_Y
self.vx.set(self.ScZhi_Y) # vx is the value of Sca_X
canva_H = int(self.ent_y.get())
canva_W = int(self.ent_x.get())
self.canva.config(width=canva_W, height=canva_H)
self.ZhuChuangKou_BianYan()
if self.flag_WangGe == TRUE:
self.WG_Kai()
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# 复位窗口
def FuWeiKouZhuan(self):
# 定义画布复位
global canva_X
global canva_Y
canva_X = 60
canva_Y = 50
self.ZhuChuangKou_BianYan_ShanChu()
self.canva.place(x=canva_X, y=canva_Y) # 此句用于复位
self.ZhuChuangKou_BianYan()
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# 隐藏窗口
def YinCang(self):
if self.flag_BuJian_YinCang == FALSE:
self.flag_BuJian_YinCang= TRUE
self.Btn_YinCang_Text.set('Show')
D = -600
self.Lab1.place(x=D, y=0)
self.Lab2.place(x=D, y=0)
self.Lab_CK_X_len.place(x=D, y=0)
self.Lab_CK_Y_len.place(x=D, y=26)
self.Lab_font_size.place(x=D, y=760)
self.Btn_CK_ZhuanDao.place(x=D, y=0)
self.Btn_CK_FuWei.place(x=D, y=0)
self.GuDing.place(x=D, y=0)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
self.Btn_WangGe.place(x=D, y=746)
self.Btn_BianYi.place(x=D, y=600)
self.Btn_BianYi_FuZhi.place(x=D, y=650)
self.Btn_BianYi_ShengCheng.place(x=D, y=700)
self.Btn_BianYi_Text.place(x=D, y=746)
self.Btn_Canva_Hide.place(x=D, y=746)
self.Btn_BianYi_Color_White.place(x=D, y=746)
self.Btn_BianYi_Color_Black.place(x=D, y=746)
self.Btn_BianYi_Color_YangPiZhi.place(x=D, y=746)
self.Btn_BianYi_Color_Green.place(x=D, y=746)
self.Btn_CK_Set.place(x=D, y=26)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
self.Ent_X.place(x=D, y=0)
self.Ent_Y.place(x=D, y=26)
self.Sca_Y.place(x=D, y=40)
self.Sca_X.place(x=D, y=0)
self.Sca_Text_front.place(x=D, y=739)
else:
self.flag_BuJian_YinCang = FALSE
self.Btn_YinCang_Text.set('Hide')
self.Lab1.place(x=0, y=0)
self.Lab2.place(x=60, y=0)
self.Lab_CK_X_len.place(x=620, y=0)
self.Lab_CK_Y_len.place(x=620, y=26)
self.Lab_font_size.place(x=1250, y=760)
self.Btn_CK_ZhuanDao.place(x=762, y=0)
self.Btn_CK_FuWei.place(x=762, y=26)
self.GuDing.place(x=0, y=746)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$
self.Btn_WangGe.place(x=420, y=746)
self.Btn_BianYi.place(x=6, y=600)
self.Btn_BianYi_FuZhi.place(x=6, y=650)
self.Btn_BianYi_ShengCheng.place(x=6, y=700)
self.Btn_BianYi_Text.place(x=60, y=746)
self.Btn_Canva_Hide.place(x=120, y=746)
self.Btn_BianYi_Color_White.place(x=180, y=746)
self.Btn_BianYi_Color_Black.place(x=240, y=746)
self.Btn_BianYi_Color_YangPiZhi.place(x=300, y=746)
self.Btn_BianYi_Color_Green.place(x=360, y=746)
self.Btn_CK_Set.place(x=826, y=26)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$
self.Ent_X.place(x=710, y=0)
self.Ent_Y.place(x=710, y=26)
self.Sca_Y.place(x=0, y=40)
self.Sca_X.place(x=100, y=0)
self.Sca_Text_front.place(x=1328, y=739)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# 固定窗口
def GuDingChuangKou(self):
global flag_CK_GuDing
if flag_CK_GuDing == FALSE:
flag_CK_GuDing = TRUE
self.GuDing_Text.set('Unluck')
else:
flag_CK_GuDing = FALSE
self.GuDing_Text.set('Luck')
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# 网格开
def WG_Kai(self):
# 参数设定
global WangGe_ShuMu_X
global WangGe_ShuMu_Y
global WangGe_KuanDu
global canva_H
global canva_W
WangGe_ShuMu_X = (canva_H - self.bar_W) / WangGe_KuanDu
WangGe_ShuMu_Y = canva_W / WangGe_KuanDu
# 下面画网格
for i in range(0, int(WangGe_ShuMu_X), 1):
self.it_WangGe = self.canva.create_line(0, self.bar_W + WangGe_KuanDu * i, canva_W,
self.bar_W + WangGe_KuanDu * i,
fill=self.WangGe_YanSe, width=0.1)
self.canva.itemconfig(self.it_WangGe, tags='WG')
self.canva.lower(self.it_WangGe)
for i in range(0, int(WangGe_ShuMu_Y), 1):
self.it_WangGe = self.canva.create_line(WangGe_KuanDu + WangGe_KuanDu * i, self.bar_W,
WangGe_KuanDu + WangGe_KuanDu * i, canva_H,
fill=self.WangGe_YanSe, width=0.1)
self.canva.itemconfig(self.it_WangGe, tags='WG')
self.canva.lower(self.it_WangGe)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# 网格关
def WG_Gun(self):
self.canva.delete('WG') # 删除所有具有标签'WG'的项目
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# 启用网格
def QiYong_WangGe(self):
if self.flag_WangGe == FALSE:
self.flag_WangGe = TRUE
self.Btn_WG_Text.set('G_Off')
self.WG_Kai()
else:
self.flag_WangGe = FALSE
self.Btn_WG_Text.set('G_On')
self.WG_Gun()
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# 画主窗口边沿
def ZhuChuangKou_BianYan(self):
global canva_W
global canva_H
global flag_Menu_Kai
# 画外边框
self.it1 = self.canva.create_rectangle(2, canva_H - 1, canva_W - 1, 2)
# 画标题栏框
self.it2 = self.canva.create_rectangle(2, self.bar_W, canva_W - 1, self.bar_W,
fil=self.ChuangKou_BiaoTiLan_YanSe)
# 画标题
self.it_BiaoTi = self.canva.create_text(43, 16, text=self.BiaoTi_Text,
font=('Consol', 11),
fill=self.BiaoTi_Text_YanSe)
# 画标题栏按钮
self.it_BiaoTi_AnNiu_ZuiXiao = self.canva.create_text(canva_W - 116, 16, text='—',
font=('Consol', 11),
fill=self.BiaoTi_Text_YanSe)
self.it_BiaoTi_AnNiu_ZuiDa = self.canva.create_text(canva_W - 70, 16, text='□',
font=('Consol', 11),
fill=self.BiaoTi_Text_YanSe)
self.it_BiaoTi_AnNiu_GuanBi = self.canva.create_text(canva_W - 28, 16, text='X',
font=('Helvetica', 11),
fill=self.BiaoTi_Text_YanSe)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# 删除主窗口边沿
def ZhuChuangKou_BianYan_ShanChu(self):
if flag_Menu_Kai == TRUE:
self.canva.delete(self.it_Menu)
self.canva.delete(self.it1)
self.canva.delete(self.it2)
self.canva.delete(self.it_BiaoTi)
self.canva.delete(self.it_BiaoTi_AnNiu_ZuiXiao)
self.canva.delete(self.it_BiaoTi_AnNiu_ZuiDa)
self.canva.delete(self.it_BiaoTi_AnNiu_GuanBi)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# 画布移动
def HuaBu_YiDong(self):
# 鼠标中键按下事件
def paint1(event):
self.x1 = event.x
self.y1 = event.y
self.flag_SongKai = FALSE
self.canva.config(cursor='fleur')
# 鼠标中键松开事件
def paint2(event):
self.flag_SongKai = TRUE
self.canva.config(cursor='arrow')
# 鼠标中键按下并移动事件
def paint3(event):
self.x2 = event.x
self.y2 = event.y
if self.flag_SongKai == FALSE:
if flag_CK_GuDing == FALSE:
global canva_X
global canva_Y
self.canva.place(x=canva_X + (self.x2 - self.x1), y=canva_Y + (self.y2 - self.y1))
# 重新定义画布位置
canva_X = canva_X + (self.x2 - self.x1)
canva_Y = canva_Y + (self.y2 - self.y1)
# 画布控件与鼠标左键进行绑定
self.canva.bind("<ButtonPress-2>", paint1) # 绑定鼠标按下事件
self.canva.bind("<ButtonRelease - 2>", paint2) # 绑定鼠标松开事件
self.canva.bind("<B2-Motion>", paint3) # 绑定鼠标移动事件
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# 主菜单功能函数定义
def Hua_Button(self):
global button1_i
global DangQian_KJ_name
DangQian_KJ_name = 'Button ' + str( button1_i + 1)
a = Hua(self.canva, self.BianJi_kj_menu, self.Text_BianYi)
a.Set_KJBZ('button1')
a.Hua_Button()
def Hua_Canvas(self):
global canvas1_i
global DangQian_KJ_name
DangQian_KJ_name = 'Canvas ' + str(canvas1_i + 1)
a = Hua(self.canva, self.BianJi_kj_menu, self.Text_BianYi)
a.Set_KJBZ('canvas1')
a.Hua_Canvas()
def Hua_Checkbutton(self):
global checkbutton1_i
global DangQian_KJ_name
DangQian_KJ_name = 'Checkbutton ' + str(checkbutton1_i + 1)
a = Hua(self.canva, self.BianJi_kj_menu, self.Text_BianYi)
a.Set_KJBZ('checkbutton1')
a.Hua_Checkbutton()
def Hua_Combobox(self):
global combobox1_i
global DangQian_KJ_name
DangQian_KJ_name = 'Combobox ' + str(combobox1_i + 1)
a = Hua(self.canva, self.BianJi_kj_menu, self.Text_BianYi)
a.Set_KJBZ('combobox1')
a.Hua_Combobox()
def Hua_Entry(self):
global entry1_i
global DangQian_KJ_name
DangQian_KJ_name = 'Entry ' + str(entry1_i + 1)
a = Hua(self.canva, self.BianJi_kj_menu, self.Text_BianYi)
a.Set_KJBZ('entry1')
a.Hua_Entry()
def Hua_Frame(self):
global frame1_i
global DangQian_KJ_name
DangQian_KJ_name = 'Frame ' + str(frame1_i + 1)
a = Hua(self.canva, self.BianJi_kj_menu, self.Text_BianYi)
a.Set_KJBZ('frame1')
a.Hua_Frame()
def Hua_Label(self):
global label1_i
global DangQian_KJ_name
DangQian_KJ_name = 'Label ' + str(label1_i + 1)
a = Hua(self.canva, self.BianJi_kj_menu, self.Text_BianYi)
a.Set_KJBZ('label1')
a.Hua_Label()
def Hua_LabelFrame(self):
global labelFrame1_i
global DangQian_KJ_name
DangQian_KJ_name = 'LabelFrame ' + str(labelFrame1_i + 1)
a = Hua(self.canva, self.BianJi_kj_menu, self.Text_BianYi)
a.Set_KJBZ('labelFrame1')
a.Hua_LabelFrame()
def Hua_Listbox(self):
global listbox1_i
global DangQian_KJ_name
DangQian_KJ_name = 'Listbox ' + str(listbox1_i + 1)
a = Hua(self.canva, self.BianJi_kj_menu, self.Text_BianYi)
a.Set_KJBZ('listbox1')
a.Hua_Listbox()
def Hua_Menu(self):
global menu1_i
global DangQian_KJ_name
DangQian_KJ_name = 'Menu ' + str(menu1_i + 1)
a = Hua(self.canva, self.BianJi_kj_menu, self.Text_BianYi)
a.Set_KJBZ('menu1')
a.Hua_Menu()
def Hua_Message(self):
global message1_i
global DangQian_KJ_name
DangQian_KJ_name = 'Message ' + str(message1_i + 1)
a = Hua(self.canva, self.BianJi_kj_menu, self.Text_BianYi)
a.Set_KJBZ('message1')
a.Hua_Message()
def Hua_PanedWindow(self):
global panedWindow1_i
global DangQian_KJ_name
DangQian_KJ_name = 'PanedWindow ' + str(frame1_i + 1)
a = Hua(self.canva, self.BianJi_kj_menu, self.Text_BianYi)
a.Set_KJBZ('panedWindow1')
a.Hua_PanedWindow()
def Hua_Radiobutton(self):
global radiobutton1_i
global Radiobutton_i
global flag_RadBtn_Zu
global DangQian_KJ_name
flag_RadBtn_Zu = FALSE
Radiobutton_i = 0
DangQian_KJ_name = 'Radiobutton ' + str(radiobutton1_i + 1)
a = Hua(self.canva, self.BianJi_kj_menu, self.Text_BianYi)
a.Set_KJBZ('radiobutton1')
a.Hua_Radiobutton()
def Hua_Scale_X(self):
global scale1_x_i
global DangQian_KJ_name
DangQian_KJ_name = 'Scale_X ' + str(scale1_x_i + 1)
a = Hua(self.canva, self.BianJi_kj_menu, self.Text_BianYi)
a.Set_KJBZ('scale1_x')
a.Hua_Scale_X()
def Hua_Scale_Y(self):
global scale1_y_i
global DangQian_KJ_name
DangQian_KJ_name = 'Scale_Y ' + str(scale1_y_i + 1)
a = Hua(self.canva, self.BianJi_kj_menu, self.Text_BianYi)
a.Set_KJBZ('scale1_y')
a.Hua_Scale_Y()
def Hua_Spinbox(self):
global spinbox1_i
global DangQian_KJ_name
DangQian_KJ_name = 'Spinbox ' + str(spinbox1_i + 1)
a = Hua(self.canva, self.BianJi_kj_menu, self.Text_BianYi)
a.Set_KJBZ('spinbox1')
a.Hua_Spinbox()
def Hua_Text(self):
global text1_i
global DangQian_KJ_name
DangQian_KJ_name = 'Text ' + str(text1_i + 1)
a = Hua(self.canva, self.BianJi_kj_menu, self.Text_BianYi)
a.Set_KJBZ('text1')
a.Hua_Text()
def Hua_Toplevel(self):
pass
def Hua_tkMessageBox(self):
pass
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# 画基本图形类
class Hua:
def __init__(self, Canva_1, Menu_1, Text_1):
self.Text_1 = Text_1
self.BianJi_kj_menu = Menu_1
self.canva = Canva_1
self.front_BiLi = 20
self.Text_YanSe = 'black'
self.fill_YanSe = 'white'
self.OutLine_YanSe = 'Aqua'
self.Kuan_width = 2
self.flag_WanCheng1 = FALSE
self.flag_FuZuKuang = FALSE
self.bg_Canvas_YanSe = 'LightCyan'
self.bg_Entry_YanSe = 'Aqua'
self.bg_Spinbox_YanSe = 'Aqua'
self.bg_Listbox_YanSe = 'AquaMarine'
self.bg_Canvas_YanSe = 'LightCyan'
self.bg_Text_YanSe = 'LightCyan'
self.list_name = StringVar()
global bar_W
self.bar_W = bar_W
self.Zi_Menu_Shu = 0
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
def Hua_Button(self):
if self.flag_WanCheng1 == FALSE:
self.flag_DanJi = FALSE # 用于处理单击时,self.X1, self.Y1 为 0的情况
def paint_AnXia(event):
global DangQian_KJ_name
self.X0 = event.x
self.Y0 = event.y
self.canva.config(cursor='crosshair')
self.it_Button = Button(self.canva, text=DangQian_KJ_name, font=('TkDefaultFont', 10), width=7, height=1)
self.it_Button.place(x=self.X0, y=self.Y0)
# self.it_Button.lower()
self.flag_DanJi = TRUE
def paint_YiDong(event):
self.X1 = event.x
self.Y1 = event.y
self.flag_DanJi = FALSE
W = int(abs(self.X1 - self.X0)/7)
H = int(abs(self.Y1 - self.Y0)/13)
self.it_Button.config(width=W, height=H)
def paint_ShiFang(event):
if self.flag_DanJi == TRUE:
self.X1 = self.X0 + 50
self.Y1 = self.Y0 + 20
self.canva.config(cursor='arrow')
self.LuRu_Dict()
self.WanCheng()
self.canva.bind("<B1-Motion>", paint_YiDong) # 绑定鼠标移动事件
self.canva.bind("<ButtonPress-1>", paint_AnXia) # 绑定鼠标按下事件
self.canva.bind("<ButtonRelease-1>", paint_ShiFang) # 绑定鼠标释放事件
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
def Hua_Canvas(self):
if self.flag_WanCheng1 == FALSE:
self.flag_DanJi = FALSE # 用于处理单击时,self.X1, self.Y1 为 0的情况
def paint_AnXia(event):
global DangQian_KJ_name
self.X0 = event.x
self.Y0 = event.y
self.canva.config(cursor='crosshair')
self.it_Canva = Canvas(self.canva, bg=self.bg_Canvas_YanSe, width=100, height=80)
self.it_Canva.place(x=self.X0, y=self.Y0)
self.it_Canva_name_Text = self.it_Canva.create_text(30, 10, text=DangQian_KJ_name, fill='DeepSkyBlue')
self.flag_DanJi = TRUE
def paint_YiDong(event):
global DangQian_KJ_name
self.X1 = event.x
self.Y1 = event.y
self.flag_DanJi = FALSE
W = int(abs(self.X1 - self.X0))
H = int(abs(self.Y1 - self.Y0))
self.it_Canva.config(width=W, height=H)
self.it_Canva_name_Text = self.it_Canva.create_text(30, 10, text=DangQian_KJ_name, fill='DeepSkyBlue')
def paint_ShiFang(event):
if self.flag_DanJi == TRUE:
self.X1 = self.X0 + 100
self.Y1 = self.Y0 + 80
self.it_Canva.delete(self.it_Canva_name_Text)
self.it_Canva_name_Text = self.it_Canva.create_text(30, 10, text=DangQian_KJ_name, fill='DeepSkyBlue')
self.canva.delete('Hua_Kuang_ing')
self.canva.config(cursor='arrow')
self.LuRu_Dict()
self.WanCheng()
self.canva.bind("<B1-Motion>", paint_YiDong) # 绑定鼠标移动事件
self.canva.bind("<ButtonPress-1>", paint_AnXia) # 绑定鼠标按下事件
self.canva.bind("<ButtonRelease-1>", paint_ShiFang) # 绑定鼠标释放事件
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
def Hua_Checkbutton(self):
if self.flag_WanCheng1 == FALSE:
self.flag_DanJi = FALSE # 用于处理单击时,self.X1, self.Y1 为 0的情况
def paint_AnXia(event):
global DangQian_KJ_name
self.X0 = event.x
self.Y0 = event.y
self.canva.config(cursor='crosshair')
self.it_Checkbutton = Checkbutton(self.canva, text=DangQian_KJ_name, font=('TkDefaultFont', 10),
width=12, height=1)
self.it_Checkbutton.place(x=self.X0, y=self.Y0)
self.it_Checkbutton.lower()
self.flag_DanJi = TRUE
def paint_YiDong(event):
self.X1 = event.x
self.Y1 = event.y
self.flag_DanJi = FALSE
W = int(abs(self.X1 - self.X0)/7.3)
H = int(abs(self.Y1 - self.Y0)/13)
self.it_Checkbutton.config(width=W, height=H)
# self.it_Checkbutton.place(x=self.X1, y=self.Y1)
def paint_ShiFang(event):
if self.flag_DanJi == TRUE:
self.X1 = self.X0 + 100
self.Y1 = self.Y0 + 20
self.canva.delete('Hua_Kuang_ing')
self.canva.config(cursor='arrow')
self.LuRu_Dict()
self.WanCheng()
self.canva.bind("<B1-Motion>", paint_YiDong) # 绑定鼠标移动事件
self.canva.bind("<ButtonPress-1>", paint_AnXia) # 绑定鼠标按下事件
self.canva.bind("<ButtonRelease-1>", paint_ShiFang) # 绑定鼠标释放事件
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
def Hua_Combobox(self):
if self.flag_WanCheng1 == FALSE:
self.flag_DanJi = FALSE # 用于处理单击时,self.X1, self.Y1 为 0的情况
def paint_AnXia(event):
self.X0 = event.x
self.Y0 = event.y
self.canva.config(cursor='crosshair')
list_name = StringVar()
self.it_Combobox = ttk.Combobox(self.canva, text=list_name, font=('TkDefaultFont', 10), width=12, height=2)
self.it_Combobox["values"] = ('Combobox', 1)
self.it_Combobox.current(0)
self.it_Combobox.place(x=self.X0, y=self.Y0)
self.it_Combobox.lower()
self.flag_DanJi = TRUE
def paint_YiDong(event):
self.X1 = event.x
self.Y1 = event.y
self.flag_DanJi = FALSE
W = int(abs(self.X1 - self.X0)/7.6)
H = int(abs(self.Y1 - self.Y0)/15.26)
self.it_Combobox.config(width=W, height=H)
self.it_Combobox.place(x=self.X0, y=self.Y0)
def paint_ShiFang(event):
if self.flag_DanJi == TRUE:
self.X1 = self.X0 + 90
self.Y1 = self.Y0 + 5
self.canva.delete('Hua_Kuang_ing')
self.canva.config(cursor='arrow')
self.LuRu_Dict()
self.WanCheng()
self.canva.bind("<B1-Motion>", paint_YiDong) # 绑定鼠标移动事件
self.canva.bind("<ButtonPress-1>", paint_AnXia) # 绑定鼠标按下事件
self.canva.bind("<ButtonRelease-1>", paint_ShiFang) # 绑定鼠标释放事件
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
def Hua_Entry(self):
if self.flag_WanCheng1 == FALSE:
self.flag_DanJi = FALSE # 用于处理单击时,self.X1, self.Y1 为 0的情况
def paint_AnXia(event):
global DangQian_KJ_name
self.X0 = event.x
self.Y0 = event.y
self.canva.config(cursor='crosshair')
entry_Text = StringVar()
entry_Text.set(DangQian_KJ_name)
self.it_Entry = Entry(self.canva, text=DangQian_KJ_name, font=('TkDefaultFont', 10), width=10, bg=self.bg_Entry_YanSe)
self.it_Entry.place(x=self.X0, y=self.Y0)
self.it_Entry.lower()
self.flag_DanJi = TRUE
def paint_YiDong(event):
self.X1 = event.x
self.Y1 = event.y
self.flag_DanJi = FALSE
W = int(abs(self.X1 - self.X0)/7)
self.it_Entry.config(width=W)
self.it_Entry.place(x=self.X0, y=self.Y0)
def paint_ShiFang(event):
if self.flag_DanJi == TRUE:
self.X1 = self.X0 + 70
self.Y1 = self.Y0 + 20
self.canva.delete('Hua_Kuang_ing')
self.canva.config(cursor='arrow')
self.LuRu_Dict()
self.WanCheng()
self.canva.bind("<B1-Motion>", paint_YiDong) # 绑定鼠标移动事件
self.canva.bind("<ButtonPress-1>", paint_AnXia) # 绑定鼠标按下事件
self.canva.bind("<ButtonRelease-1>", paint_ShiFang) # 绑定鼠标释放事件
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
def Hua_Frame(self):
if self.flag_WanCheng1 == FALSE:
self.flag_DanJi = FALSE # 用于处理单击时,self.X1, self.Y1 为 0的情况
def paint_AnXia(event):
self.X0 = event.x
self.Y0 = event.y
self.canva.config(cursor='crosshair')
self.it_Frame = Frame(self.canva, width=100, height=60)
self.it_Frame.place(x=self.X0, y=self.Y0)
self.it_Frame.lower()
self.flag_DanJi = TRUE
def paint_YiDong(event):
self.X1 = event.x
self.Y1 = event.y
self.flag_DanJi = FALSE
W = int(abs(self.X1 - self.X0))
H = int(abs(self.Y1 - self.Y0))
self.it_Frame.config(width=W, height=H)
def paint_ShiFang(event):
if self.flag_DanJi == TRUE:
self.X1 = self.X0 + 100
self.Y1 = self.Y0 + 60
self.canva.config(cursor='arrow')
self.LuRu_Dict()
self.WanCheng()
self.canva.bind("<B1-Motion>", paint_YiDong) # 绑定鼠标移动事件
self.canva.bind("<ButtonPress-1>", paint_AnXia) # 绑定鼠标按下事件
self.canva.bind("<ButtonRelease-1>", paint_ShiFang) # 绑定鼠标释放事件
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
def Hua_Label(self):
if self.flag_WanCheng1 == FALSE:
self.flag_DanJi = FALSE # 用于处理单击时,self.X1, self.Y1 为 0的情况
def paint_AnXia(event):
global DangQian_KJ_name
self.X0 = event.x
self.Y0 = event.y
self.canva.config(cursor='crosshair')
self.it_Label = Label(self.canva, text=DangQian_KJ_name, font=('TkDefaultFont', 10), width=0, height=0)
self.it_Label.place(x=self.X0, y=self.Y0)
self.it_Label.lower()
self.flag_DanJi = TRUE
def paint_YiDong(event):
global DangQian_KJ_name
self.Text = DangQian_KJ_name
self.X1 = event.x
self.Y1 = event.y
self.flag_DanJi = FALSE
W = int(abs(self.X1 - self.X0)/7)
H = int(abs(self.Y1 - self.Y0)/13)
self.it_Label.config(width=W, height=H)
def paint_ShiFang(event):
if self.flag_DanJi == TRUE:
self.X1 = self.X0 + 40
self.Y1 = self.Y0 + 10
self.canva.delete('Hua_Kuang_ing')
self.canva.config(cursor='arrow')
self.LuRu_Dict()
self.WanCheng()
self.canva.bind("<B1-Motion>", paint_YiDong) # 绑定鼠标移动事件
self.canva.bind("<ButtonPress-1>", paint_AnXia) # 绑定鼠标按下事件
self.canva.bind("<ButtonRelease-1>", paint_ShiFang) # 绑定鼠标释放事件
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
def Hua_LabelFrame(self):
if self.flag_WanCheng1 == FALSE:
self.flag_DanJi = FALSE # 用于处理单击时,self.X1, self.Y1 为 0的情况
def paint_AnXia(event):
global DangQian_KJ_name
self.X0 = event.x
self.Y0 = event.y
self.canva.config(cursor='crosshair')
# LabelFrame 无 textvariable 属性
self.it_LabelFrame = LabelFrame(self.canva, text=DangQian_KJ_name, font=('TkDefaultFont', 10), width=100, height=60)
self.it_LabelFrame.place(x=self.X0, y=self.Y0)
self.it_LabelFrame.lower()
self.flag_DanJi = TRUE
def paint_YiDong(event):
global DangQian_KJ_name
self.Text = DangQian_KJ_name
self.X1 = event.x
self.Y1 = event.y
self.flag_DanJi = FALSE
W = int(abs(self.X1 - self.X0))
H = int(abs(self.Y1 - self.Y0))
self.it_LabelFrame.config(width=W, height=H)
def paint_ShiFang(event):
if self.flag_DanJi == TRUE:
self.X1 = self.X0 + 100
self.Y1 = self.Y0 + 60
self.canva.delete('Hua_Kuang_ing')
self.canva.config(cursor='arrow')
self.LuRu_Dict()
self.WanCheng()
self.canva.bind("<B1-Motion>", paint_YiDong) # 绑定鼠标移动事件
self.canva.bind("<ButtonPress-1>", paint_AnXia) # 绑定鼠标按下事件
self.canva.bind("<ButtonRelease-1>", paint_ShiFang) # 绑定鼠标释放事件
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
def Hua_Listbox(self):
if self.flag_WanCheng1 == FALSE:
self.flag_DanJi = FALSE # 用于处理单击时,self.X1, self.Y1 为 0的情况
def paint_AnXia(event):
global DangQian_KJ_name
self.X0 = event.x
self.Y0 = event.y
self.canva.config(cursor='crosshair')
# Listbox 无 textvariable 或 text 属性
self.it_Listbox = Listbox(self.canva, bg=self.bg_Listbox_YanSe, font=('TkDefaultFont', 10), width=12, height=3)
self.it_Listbox.place(x=self.X0, y=self.Y0)
self.it_Listbox.lower()
self.flag_DanJi = TRUE
def paint_YiDong(event):
global DangQian_KJ_name
self.Text = DangQian_KJ_name
self.X1 = event.x
self.Y1 = event.y
self.flag_DanJi = FALSE
W = int(abs(self.X1 - self.X0)/7)
H = int(abs(self.Y1 - self.Y0)/14)
self.it_Listbox.config(width=W, height=H)
def paint_ShiFang(event):
if self.flag_DanJi == TRUE:
self.X1 = self.X0 + 60
self.Y1 = self.Y0 + 60
self.canva.delete('Hua_Kuang_ing')
self.canva.config(cursor='arrow')
self.LuRu_Dict()
self.WanCheng()
self.canva.bind("<B1-Motion>", paint_YiDong) # 绑定鼠标移动事件
self.canva.bind("<ButtonPress-1>", paint_AnXia) # 绑定鼠标按下事件
self.canva.bind("<ButtonRelease-1>", paint_ShiFang) # 绑定鼠标释放事件
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
def Hua_Menu_Tuo(self):
self.list_name.set('')
self.btn_name.set('')
self.it_ShuRu_Entry.place(x=3, y=2)
self.it_List_ShuRu_Entry.place(x=180, y=2)
def X_add():
global D_ZhuMenu
global Menu1
global DQ_Zong_Len
global zi_menu1_sum # 子 Menu的总数
global DQ_ZhuMenu_ZiXiang_Num_i
global Menu1_Son_Len
global zi_menu1_num_i # 子 Menu的序号
self.Ent_X = StringVar()
if zi_menu1_sum != 0:
YinChang_List(zi_menu1_sum)
if self.btn_name.get() != '': # Entry 空时用 '' 表示
zi_menu1_num_i = zi_menu1_num_i + 1
zi_menu1_sum = zi_menu1_sum + 1
DQ_ZhuMenu_ZiXiang_Num_i = zi_menu1_num_i
self.Ent_X.set(self.btn_name.get())
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
num = zi_menu1_num_i # 华文琥珀 微软雅黑
self.it_X_add_Btn_New = Button(self.frame, textvariable=self.Ent_X, relief=FLAT, height=1,
font=('TkDefaultFont', 8), command=lambda: XianShi_ListBox(num_i=num))
self.it_X_add_Btn_New.grid(row=1, column=zi_menu1_num_i + 1)
self.it_X_add_Btn_New.lift()
width = int((self.it_X_add_Btn_New.winfo_reqwidth()))
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
DQ_Zong_Len = 0
if zi_menu1_num_i > 1:
for i in range(1, zi_menu1_num_i, 1):
if i not in Menu1_Delete_Num:
len_name = "Len" + str(i)
DQ_Zong_Len = DQ_Zong_Len + Menu1_Son_Len[len_name][1]
else:
DQ_Zong_Len = 0
self.it_Y_add_Listbox_new = Listbox(self.canva, bg='SystemButtonFace')
self.it_Y_add_Listbox_new.place(x=3 + DQ_Zong_Len, y=self.bar_W + 20) # 画布坐标是控件的 7 倍
self.it_Y_add_Listbox_new.lift()
len_name = "Len" + str(zi_menu1_num_i)
Menu1_Son_Len[len_name] = (zi_menu1_num_i, width)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# 设置字典
# 0: self.it_X_add_Btn_New 菜单标题按钮
# 1: zi_menu1_num_i 菜单标题按钮的 序号
# 2: self.it_Y_add_Listbox_new 菜单标题按钮对应的下拉列表
# 3: self.Ent_X.get() 菜单标题按钮的 输入标题
D_Menu_Btn_name = 'Menu_Btn' + str(zi_menu1_num_i)
D_ZhuMenu[D_Menu_Btn_name] = (self.it_X_add_Btn_New, zi_menu1_num_i, self.it_Y_add_Listbox_new,
self.Ent_X.get())
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# 录入代码
Menubar = "Menubar"
zi_menu_name = "zi_menu_name" + str(zi_menu1_num_i)
Menu1[zi_menu_name] = (str(self.Ent_X.get()) + "_menu").strip()
zi_menu_tearoff_name = "zi_menu_tearoff_name" + str(zi_menu1_num_i)
zi_menu_add_cascade_name = "zi_menu_add_cascade_name" + str(zi_menu1_num_i)
Menu_Code1 = str(Menu1[zi_menu_name]) + " = Menu(" + Menubar + ", tearoff=0)"
Menu_Code2 = Menubar + ".add_cascade(label='" + str(self.Ent_X.get()) + "', menu=" + str(Menu1[zi_menu_name]) + ")"
Menu1[zi_menu_tearoff_name] = (Menu_Code1, zi_menu1_num_i)
Menu1[zi_menu_add_cascade_name] = (Menu_Code2, zi_menu1_num_i)
print(Menu1[zi_menu_tearoff_name][0])
print(Menu1[zi_menu_add_cascade_name][0])
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# 屏蔽之前的 List
if zi_menu1_num_i > 1:
for i in range(1, zi_menu1_num_i, 1):
if i not in Menu1_Delete_Num:
a = D_ZhuMenu['Menu_Btn' + str(i)]
a[2].place(x=-600, y=self.bar_W + 30)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# 清空以备下次输入
self.btn_name.set('')
def XianShi_ListBox(num_i):
global D_ZhuMenu
global DQ_ZhuMenu_ZiXiang_Num_i
global DQ_Zong_Len
global zi_menu1_sum
global Menu1_Delete_Num
DQ_ZhuMenu_ZiXiang_Num_i = num_i # 定义当前按下的子按钮标号
L = 0
# len_name = "Len" + str(zi_menu1_num_i)
# Menu1_Son_Len[len_name] = (zi_menu1_num_i, width)
for i in range(1, num_i, 1):
if i not in Menu1_Delete_Num:
len_name = "Len" + str(i)
L = L + Menu1_Son_Len[len_name][1]
print("L = ", L)
name_menu = 'Menu_Btn' + str(num_i)
print("num_i = ", num_i)
a = D_ZhuMenu[name_menu]
# 重新复位
a[2].place(x=3 + L, y=self.bar_W + 20) # 字典内的列表下表由 0 开始
print("zi_menu1_sum = ", zi_menu1_sum)
for i_Num in range(1, zi_menu1_num_i+1, 1):
if (i_Num != num_i) and (i_Num not in Menu1_Delete_Num):
name1 = 'Menu_Btn' + str(i_Num)
a = D_ZhuMenu[name1]
a[2].place(x=-600, y=self.bar_W + 30)
def X_delet():
global D_ZhuMenu
global DQ_ZhuMenu_ZiXiang_Num_i
global zi_menu1_sum
global DQ_Zong_Len
global Menu1_Delete_Num # Menu1_Delete_Num[]
if zi_menu1_sum != 0:
if DQ_ZhuMenu_ZiXiang_Num_i == zi_menu1_sum:
DQ_ZhuMenu_ZiXiang_Num_i = DQ_ZhuMenu_ZiXiang_Num_i - 1
# D_Menu_Btn_name = 'Menu_Btn' + str(zi_menu1_sum)
D_Menu_Btn_name = 'Menu_Btn' + str(DQ_ZhuMenu_ZiXiang_Num_i)
a = D_ZhuMenu[D_Menu_Btn_name]
Menu1_Delete_Num.append(DQ_ZhuMenu_ZiXiang_Num_i)
a[0].destroy() # 0: self.it_X_add_Btn_New 菜单标题按钮
a[2].destroy() # 2: self.it_Y_add_Listbox_new 菜单标题按钮对应的下拉列表
del D_ZhuMenu[D_Menu_Btn_name]
zi_menu1_sum = zi_menu1_sum - 1
def YinChang_List(i):
global D_ZhuMenu
global Menu1_Delete_Num
if i not in Menu1_Delete_Num:
name1 = 'Menu_Btn' + str(i)
a = D_ZhuMenu[name1]
a[2].place(x=-600, y=self.bar_W + 30)
def YinChang_Entry():
self.it_ShuRu_Entry.place(x=-600, y=0)
self.it_List_ShuRu_Entry.place(x=-600, y=0)
def YinChang_All():
global DQ_ZhuMenu_ZiXiang_Num_i
YinChang_List(DQ_ZhuMenu_ZiXiang_Num_i)
YinChang_Entry()
def Y_add(flag):
global D_ZhuMenu
global Menu1
global Menu1_ListCode
global DQ_Zong_Len
global zi_menu1_sum
global DQ_ZhuMenu_ZiXiang_Num_i # 当前按下的标题按钮标号
Str_Insert = ''
global tap
if flag == "text":
Str_Insert = tap + str(self.list_name.get())
elif flag == "separator":
Str_Insert = '-----------------------------------------------------------------------------'
if zi_menu1_sum != 0:
D_Menu_Btn_name = 'Menu_Btn' + str(DQ_ZhuMenu_ZiXiang_Num_i)
a = D_ZhuMenu[D_Menu_Btn_name]
DQ_Listbox = a[2]
zong = DQ_Listbox.size()
if zong == 0:
DQ_Listbox.insert(END, Str_Insert)
if zong > 0:
if a[2].curselection() == ():
DQ_Listbox.insert(END, Str_Insert)
else:
DQ_i = a[2].curselection()
DQ_Listbox.insert(DQ_i, Str_Insert)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# 录入代码
zi_menu_name = "zi_menu_name" + str(DQ_ZhuMenu_ZiXiang_Num_i)
Code_Insert = ''
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
if flag == "text":
Code_Insert = str(Menu1[zi_menu_name]) + ".add_command(label='" + \
str(self.list_name.get()) + "', command='')"
elif flag == "separator":
Code_Insert = str(Menu1[zi_menu_name]) + ".add_separator()"
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
if zong == 0:
menu_list_code_name = str(Menu1[zi_menu_name]) + "_list_" + str(1)
Menu1_ListCode[menu_list_code_name] = (Code_Insert, DQ_ZhuMenu_ZiXiang_Num_i, 1)
print(Menu1_ListCode[menu_list_code_name][0])
if zong > 0:
if a[2].curselection() == ():
menu_list_code_name = str(Menu1[zi_menu_name]) + "_list_" + str(zong + 1)
Menu1_ListCode[menu_list_code_name] = (Code_Insert, DQ_ZhuMenu_ZiXiang_Num_i, zong + 1)
print(Menu1_ListCode[menu_list_code_name][0])
else:
# 按下后当前选定选项向后偏移一个
A = a[2].curselection() # a[3].curselection() 是一个单值元组 为 (索引值,)
DQ_Listbox_i = A[0] # A[0] 从 0 开始
# for 循环重新排列大于 int(DQ_Listbox_i) 项对应代码
# listbox 可以 get() 不能 set()
# *****************************************************************************************
D = {} # 备用记录字典
for i in range(1, zong+1, 1): # range(a, b, i) 从 a 开始到 b前为止,间隔为 i, 包括 a不包括 b
name = str(Menu1[zi_menu_name]) + "_list_" + str(i)
D[str(i)] = Menu1_ListCode[name]
for i in range(int(DQ_Listbox_i)+1, zong+2, 1):
name = str(Menu1[zi_menu_name]) + "_list_" + str(i)
Menu1_ListCode[name] = D[str(i-1)]
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# 关键代码
menu_list_code_name = str(Menu1[zi_menu_name]) + "_list_" + str(int(DQ_Listbox_i))
Menu1_ListCode[menu_list_code_name] = (Code_Insert, zi_menu1_sum, int(DQ_Listbox_i))
print(Menu1_ListCode[menu_list_code_name][0])
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
for i in range(1, zong + 2, 1):
name = str(Menu1[zi_menu_name]) + "_list_" + str(i)
print('Menu1[name] = ', Menu1[name][0], 'i = ', i)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
def Y_delet():
global D_ZhuMenu
global D_ZhuMenu_List # 标题按钮下拉列表字典
global DQ_Zong_Len
global Menu1
global Menu1_ListCode
global zi_menu1_sum
global DQ_ZhuMenu_ZiXiang_Num_i # 当前按下的标题按钮标号
D_Menu_Btn_name = 'Menu_Btn' + str(DQ_ZhuMenu_ZiXiang_Num_i)
a = D_ZhuMenu[D_Menu_Btn_name]
DQ_Listbox = a[2]
zong = DQ_Listbox.size()
if zong > 0:
# 录入代码
zi_menu_name = "zi_menu_name" + str(DQ_ZhuMenu_ZiXiang_Num_i)
if zong == 0:
DQ_Listbox.delete(END)
menu_list_code_name = str(Menu1[zi_menu_name]) + "_list_" + str(zong)
del Menu1_ListCode[menu_list_code_name]
if zong > 0:
if a[2].curselection() == ():
DQ_Listbox.delete(END)
menu_list_code_name = str(Menu1[zi_menu_name]) + "_list_" + str(zong)
del Menu1_ListCode[menu_list_code_name]
print('del Menu1[menu_list_code_name] **************************')
else:
DQ_i = a[2].curselection() # a[3].curselection() 是一个单值元组 为 (索引值,)
DQ_Listbox_i = DQ_i[0] # A[0] 从 0 开始
DQ_Listbox.delete(DQ_i) # 删除选定列表项
print('D = {} # 备用记录字典')
D = {} # 备用记录字典
# range(a, b, i) 从 a 开始到 b前为止,间隔为 i, 包括 a不包括 b
for i in range(1, zong + 1, 1):
name = str(Menu1[zi_menu_name]) + "_list_" + str(i)
D[str(i)] = Menu1_ListCode[name]
for i in range(int(DQ_Listbox_i)+1, zong, 1):
name = str(Menu1[zi_menu_name]) + "_list_" + str(i)
Menu1_ListCode[name] = D[str(i + 1)]
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# # 关键代码
menu_list_code_name = str(Menu1[zi_menu_name]) + "_list_" + str(zong)
del Menu1_ListCode[menu_list_code_name]
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
for i in range(1, zong, 1):
name = str(Menu1[zi_menu_name]) + "_list_" + str(i)
print('Menu1[name] = ', Menu1_ListCode[name], 'i = ', i)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
Font = ('TkDefaultFont', 8)
if self.YiCi == FALSE:
self.it_Y_add_Btn = Button(self.frame, text='+Y', width=3, bg='yellow', fg='blue',
font=Font, command=lambda: Y_add(flag="text"))
self.it_Y_delet_Btn = Button(self.frame, text='-Y', width=3, bg='red', fg='white',
font=Font, command=Y_delet)
self.it_X_add_Btn = Button(self.frame, text='+X', width=3, bg='yellow', fg='blue',
font=Font, command=X_add)
self.it_X_delet_Btn = Button(self.frame, text='-X', width=3, bg='red', fg='white',
font=Font, command=X_delet)
self.Separator_Btn = Button(self.frame, text='----', width=3, bg='lightblue', fg='white',
font=Font, command=lambda: Y_add(flag="separator"))
self.YinCang_Btn = Button(self.frame, text='C', width=3, bg='blue', fg='white',
font=Font, command=YinChang_All)
self.YiCi = TRUE
self.it_X_add_Btn.grid(row=1, column=1001)
self.it_X_delet_Btn.grid(row=1, column=1002)
self.it_Y_add_Btn.grid(row=1, column=1003)
self.it_Y_delet_Btn.grid(row=1, column=1005)
self.Separator_Btn.grid(row=1, column=1004)
self.YinCang_Btn.grid(row=1, column=1006)
self.it_X_add_Btn.lift()
self.it_X_delet_Btn.lift()
self.it_Y_add_Btn.lift()
self.it_Y_delet_Btn.lift()
self.YinCang_Btn.lift()
self.Separator_Btn.lift()
# self.it_Y_add_Btn.place(x=3, y=self.bar_W + 2 + 28)
def Hua_Menu(self):
if self.flag_WanCheng1 == FALSE:
def paint_AnXia(event):
global DangQian_KJ_name
global canva_W
global flag_Menu_Kai
self.YiCi = FALSE
self.X0 = event.x
self.Y0 = event.y
self.canva.config(cursor='crosshair')
if flag_Menu_Kai == FALSE:
self.frame = Frame(self.canva, width=380, height=28)
self.frame.place(x=3, y=self.bar_W + 2)
self.btn_name = StringVar()
self.btn_name.set('Menu title input')
self.list_name = StringVar()
self.list_name.set('Title list input')
self.it_ShuRu_Entry = Entry(self.canva, textvariable=self.btn_name, font=('微软雅黑', 10),
bg='DeepSkyBlue', width=20)
self.it_List_ShuRu_Entry = Entry(self.canva, textvariable=self.list_name, font=('微软雅黑', 10),
bg='LightBlue', width=20)
self.it_Button_Menu = Button(self.frame, text='Edit', width=6, bg='LightGreen',
font=('TkDefaultFont', 8), command=self.Hua_Menu_Tuo) # 此处调用函数时,不要加(),加()后,是调用+执行
self.it_Button_Menu.grid(row=1, column=100)
self.it_ShuRu_Entry.place(x=3, y=2)
self.it_List_ShuRu_Entry.place(x=180, y=2)
self.it_Button_Menu.lift()
flag_Menu_Kai = TRUE
def paint_YiDong(event):
global DangQian_KJ_name
self.Text = DangQian_KJ_name
def paint_ShiFang(event):
self.X0 = 0
self.Y0 = 0
self.X1 = 0
self.Y1 = 0
self.canva.delete('Hua_Kuang_ing')
self.canva.config(cursor='arrow')
self.LuRu_Dict()
self.WanCheng()
self.canva.bind("<B1-Motion>", paint_YiDong) # 绑定鼠标移动事件
self.canva.bind("<ButtonPress-1>", paint_AnXia) # 绑定鼠标按下事件
self.canva.bind("<ButtonRelease-1>", paint_ShiFang) # 绑定鼠标释放事件
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
def Hua_Message(self):
if self.flag_WanCheng1 == FALSE:
self.flag_DanJi = FALSE # 用于处理单击时,self.X1, self.Y1 为 0的情况
def paint_AnXia(event):
global DangQian_KJ_name
self.X0 = event.x
self.Y0 = event.y
self.canva.config(cursor='crosshair')
# 再引入 tkinter.messagebox 后,Message定义前面要加上 tk. ,避免冲突
self.it_Message = tk.Message(self.canva, text=DangQian_KJ_name, font=('TkDefaultFont', 10), width=100)
self.it_Message.place(x=self.X0, y=self.Y0)
self.it_Message.lower()
self.flag_DanJi = TRUE
def paint_YiDong(event):
self.X1 = event.x
self.Y1 = event.y
self.flag_DanJi = FALSE
W = int(abs(self.X1 - self.X0))
# H = int(abs(self.Y1 - self.Y0)) # Message 无 height属性
self.it_Message.config(width=W)
def paint_ShiFang(event):
if self.flag_DanJi == TRUE:
self.X1 = self.X0 + 80
self.Y1 = self.Y0 + 10
self.canva.config(cursor='arrow')
self.LuRu_Dict()
self.WanCheng()
self.canva.bind("<B1-Motion>", paint_YiDong) # 绑定鼠标移动事件
self.canva.bind("<ButtonPress-1>", paint_AnXia) # 绑定鼠标按下事件
self.canva.bind("<ButtonRelease-1>", paint_ShiFang) # 绑定鼠标释放事件
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
def Hua_PanedWindow(self):
if self.flag_WanCheng1 == FALSE:
self.flag_DanJi = FALSE # 用于处理单击时,self.X1, self.Y1 为 0的情况
def paint_AnXia(event):
self.X0 = event.x
self.Y0 = event.y
self.canva.config(cursor='crosshair')
self.it_PanedWindow = PanedWindow(self.canva, width=100, height=60)
self.it_PanedWindow.place(x=self.X0, y=self.Y0)
self.it_PanedWindow.lower()
self.flag_DanJi = TRUE
def paint_YiDong(event):
self.X1 = event.x
self.Y1 = event.y
self.flag_DanJi = FALSE
W = int(abs(self.X1 - self.X0))
H = int(abs(self.Y1 - self.Y0))
self.it_PanedWindow.config(width=W, height=H)
def paint_ShiFang(event):
self.X1 = self.X0 + 100
self.Y1 = self.Y0 + 65
self.canva.config(cursor='arrow')
self.LuRu_Dict()
self.WanCheng()
self.canva.bind("<B1-Motion>", paint_YiDong) # 绑定鼠标移动事件
self.canva.bind("<ButtonPress-1>", paint_AnXia) # 绑定鼠标按下事件
self.canva.bind("<ButtonRelease-1>", paint_ShiFang) # 绑定鼠标释放事件
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
def Hua_Radiobutton(self):
if self.flag_WanCheng1 == FALSE:
self.flag_DanJi = FALSE # 用于处理单击时,self.X1, self.Y1 为 0的情况
def paint_AnXia(event):
global Radiobutton_i
global flag_RadBtn_Zu
self.X0 = event.x
self.Y0 = event.y
self.canva.config(cursor='crosshair')
if flag_RadBtn_Zu == FALSE:
self.varInt = IntVar()
self.varInt.set(0)
flag_RadBtn_Zu = TRUE
print('varInt = ', self.varInt)
self.it_Radiobutton = Radiobutton(self.canva, variable=self.varInt, text='Radiobutton',
font=('TkDefaultFont', 10), value=Radiobutton_i)
self.it_Radiobutton.place(x=self.X0, y=self.Y0)
self.it_Radiobutton.lower()
print('Radiobutton_i = ', Radiobutton_i)
Radiobutton_i = Radiobutton_i + 1
self.flag_DanJi = TRUE
def paint_YiDong(event):
self.X1 = event.x
self.Y1 = event.y
self.flag_DanJi = FALSE
W = int(abs(self.X1 - self.X0)/7.6)
H = int(abs(self.Y1 - self.Y0)/13)
self.it_Radiobutton.config(width=W, height=H)
def paint_ShiFang(event):
if self.flag_DanJi == TRUE:
self.X1 = self.X0 + 90
self.Y1 = self.Y0 + 20
self.canva.config(cursor='arrow')
self.LuRu_Dict()
self.WanCheng()
self.canva.bind("<B1-Motion>", paint_YiDong) # 绑定鼠标移动事件
self.canva.bind("<ButtonPress-1>", paint_AnXia) # 绑定鼠标按下事件
self.canva.bind("<ButtonRelease-1>", paint_ShiFang) # 绑定鼠标释放事件
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
def Hua_Scale_X(self):
if self.flag_WanCheng1 == FALSE:
self.flag_DanJi = FALSE # 用于处理单击时,self.X1, self.Y1 为 0的情况
def paint_AnXia(event):
self.X0 = event.x
self.Y0 = event.y
self.canva.config(cursor='crosshair')
self.it_Scale_X = Scale(self.canva, orient=HORIZONTAL, font=('TkDefaultFont', 10))
self.it_Scale_X.place(x=self.X0, y=self.Y0)
self.it_Scale_X.lower()
self.flag_DanJi = TRUE
def paint_YiDong(event):
self.X1 = event.x
self.Y1 = event.y
self.flag_DanJi = FALSE
W = int(abs(self.X1 - self.X0))
H = int(abs(self.Y1 - self.Y0))
self.it_Scale_X.config(width=H-23, length=W)
# ******************************************************************************************
if self.flag_FuZuKuang == TRUE:
self.canva.itemconfig(self.it_Kuan, tags='Hua_Kuang_ing')
# ******************************************************************************************
def paint_ShiFang(event):
if self.flag_DanJi == TRUE:
self.X1 = self.X0 + 100
self.Y1 = self.Y0 + 40
self.canva.config(cursor='arrow')
self.LuRu_Dict()
self.WanCheng()
self.canva.bind("<B1-Motion>", paint_YiDong) # 绑定鼠标移动事件
self.canva.bind("<ButtonPress-1>", paint_AnXia) # 绑定鼠标按下事件
self.canva.bind("<ButtonRelease-1>", paint_ShiFang) # 绑定鼠标释放事件
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
def Hua_Scale_Y(self):
if self.flag_WanCheng1 == FALSE:
self.flag_DanJi = FALSE # 用于处理单击时,self.X1, self.Y1 为 0的情况
def paint_AnXia(event):
self.X0 = event.x
self.Y0 = event.y
self.canva.config(cursor='crosshair')
self.it_Scale_Y = Scale(self.canva, font=('TkDefaultFont', 10))
self.it_Scale_Y.place(x=self.X0, y=self.Y0)
self.it_Scale_Y.lower()
self.flag_DanJi = TRUE
def paint_YiDong(event):
self.X1 = event.x
self.Y1 = event.y
self.flag_DanJi = FALSE
W = int(abs(self.X1 - self.X0))
H = int(abs(self.Y1 - self.Y0))
self.it_Scale_Y.config(width=W-26, length=H)
def paint_ShiFang(event):
if self.flag_DanJi == TRUE:
self.X1 = self.X0 + 50
self.Y1 = self.Y0 + 100
self.canva.config(cursor='arrow')
self.LuRu_Dict()
self.WanCheng()
self.canva.bind("<B1-Motion>", paint_YiDong) # 绑定鼠标移动事件
self.canva.bind("<ButtonPress-1>", paint_AnXia) # 绑定鼠标按下事件
self.canva.bind("<ButtonRelease-1>", paint_ShiFang) # 绑定鼠标释放事件
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
def Hua_Spinbox(self):
if self.flag_WanCheng1 == FALSE:
self.flag_DanJi = FALSE # 用于处理单击时,self.X1, self.Y1 为 0的情况
def paint_AnXia(event):
global DangQian_KJ_name
self.X0 = event.x
self.Y0 = event.y
self.canva.config(cursor='crosshair')
self.it_Spinbox = Spinbox(self.canva, values=(DangQian_KJ_name, 1, 2, 3), font=('TkDefaultFont', 10),
bg=self.bg_Spinbox_YanSe)
self.it_Spinbox.place(x=self.X0, y=self.Y0)
self.it_Spinbox.lower()
self.flag_DanJi = TRUE
def paint_YiDong(event):
self.X1 = event.x
self.Y1 = event.y
self.flag_DanJi = FALSE
W = int(abs(self.X1 - self.X0)/7.2)
# H = int(abs(self.Y1 - self.Y0))
self.it_Spinbox.config(width=W)
def paint_ShiFang(event):
if self.flag_DanJi == TRUE:
self.X1 = self.X0 + 200
self.Y1 = self.Y0 + 20
self.canva.config(cursor='arrow')
self.LuRu_Dict()
self.WanCheng()
self.canva.bind("<B1-Motion>", paint_YiDong) # 绑定鼠标移动事件
self.canva.bind("<ButtonPress-1>", paint_AnXia) # 绑定鼠标按下事件
self.canva.bind("<ButtonRelease-1>", paint_ShiFang) # 绑定鼠标释放事件
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
def Hua_Text(self):
if self.flag_WanCheng1 == FALSE:
self.flag_DanJi = FALSE # 用于处理单击时,self.X1, self.Y1 为 0的情况
def paint_AnXia(event):
global DangQian_KJ_name
self.X0 = event.x
self.Y0 = event.y
self.canva.config(cursor='crosshair')
self.it_Text = Text(self.canva, bg=self.bg_Text_YanSe, font=('TkDefaultFont', 10), width=20, height=6)
self.it_Text.insert(END, DangQian_KJ_name)
self.it_Text.place(x=self.X0, y=self.Y0)
self.it_Text.lower()
self.flag_DanJi = TRUE
def paint_YiDong(event):
self.X1 = event.x
self.Y1 = event.y
self.flag_DanJi = FALSE
W = int(abs(self.X1 - self.X0)/7)
H = int(abs(self.Y1 - self.Y0)/13)
self.it_Text.config(width=W, height=H)
def paint_ShiFang(event):
if self.flag_DanJi == TRUE:
self.X1 = self.X0 + 145
self.Y1 = self.Y0 + 80
self.canva.config(cursor='arrow')
self.LuRu_Dict()
self.WanCheng()
self.canva.bind("<B1-Motion>", paint_YiDong) # 绑定鼠标移动事件
self.canva.bind("<ButtonPress-1>", paint_AnXia) # 绑定鼠标按下事件
self.canva.bind("<ButtonRelease-1>", paint_ShiFang) # 绑定鼠标释放事件
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# 完成后
def WanCheng(self):
global background_XiangMu_XuanDing
global foreground_XiangMu_XuanDing
self.flag_WanCheng1 = TRUE
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
def paint_AnXia(event):
global XuanZhong
global XuanZhong_sum
global Event_Canvas_x
global Event_Canvas_y
global XuanKuang_X0
global XuanKuang_Y0
self.Yanse_HuiFu() # 每次按下颜色都要恢复到原来的状态
XuanZhong.clear()
XuanZhong_sum = 0
Event_Canvas_x = event.x
Event_Canvas_y = event.y
XuanKuang_X0 = event.x
XuanKuang_Y0 = event.y
self.Yanse_HuiFu()
def paint_YiDong(event):
global flag_TanChuan_BianJian
flag_TanChuan_BianJian = TRUE
global XuanKuang_X0
global XuanKuang_Y0
global XuanKuang_X1
global XuanKuang_Y1
XuanKuang_X1 = event.x
XuanKuang_Y1 = event.y
self.canva.delete('Xuan_Kuang_ing')
self.it_Kuan1 = self.canva.create_line(XuanKuang_X0, XuanKuang_Y0, XuanKuang_X1, XuanKuang_Y0, fill='DeepSkyBlue', width=2)
self.it_Kuan2 = self.canva.create_line(XuanKuang_X0, XuanKuang_Y0, XuanKuang_X0, XuanKuang_Y1, fill='DeepSkyBlue', width=2)
self.it_Kuan3 = self.canva.create_line(XuanKuang_X0, XuanKuang_Y1, XuanKuang_X1, XuanKuang_Y1, fill='DeepSkyBlue', width=2)
self.it_Kuan4 = self.canva.create_line(XuanKuang_X1, XuanKuang_Y0, XuanKuang_X1, XuanKuang_Y1, fill='DeepSkyBlue', width=2)
# ******************************************************************************************
self.canva.itemconfig(self.it_Kuan1, tags='Xuan_Kuang_ing')
self.canva.itemconfig(self.it_Kuan2, tags='Xuan_Kuang_ing')
self.canva.itemconfig(self.it_Kuan3, tags='Xuan_Kuang_ing')
self.canva.itemconfig(self.it_Kuan4, tags='Xuan_Kuang_ing')
def paint_ShiFang(event):
global XuanKuang_X0
global XuanKuang_Y0
global XuanKuang_X1
global XuanKuang_Y1
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
global Distance
global bar_W
global bar_menu_W
if zi_menu1_sum == 0:
Distance = bar_W
else:
Distance = bar_W + bar_menu_W
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
XuanKuang_X1 = event.x
XuanKuang_Y1 = event.y
XuanKuang_Y0 = XuanKuang_Y0 - Distance
XuanKuang_Y1 = XuanKuang_Y1 - Distance
self.canva.delete('Xuan_Kuang_ing')
global XuanZhong_sum
global XuanZhong
# 收索
# 控件字典
# $$$$$$$$$$$$$$$$$$$
global Button1
global Canvas1
global Checkbutton1
global Combobox1
global Entry1
global Frame1
global Label1
global LabelFrame1
global Listbox1
global Message1
global PanedWindow1
global Radiobutton1
global Scale1_X
global Scale1_Y
global Scrollbar1_X
global Scrollbar1_Y
global Spinbox1
global Text1
# $$$$$$$$$$$$$$$$$$$$
global button1_i
global canvas1_i
global checkbutton1_i
global combobox1_i
global entry1_i
global frame1_i
global label1_i
global labelFrame1_i
global listbox1_i
global message1_i
global panedWindow1_i
global radiobutton1_i
global scale1_x_i
global scale1_y_i
global scrollbar1_x_i
global scrollbar1_y_i
global spinbox1_i
global text1_i
# for 循环逐个判断
for i in range(1, button1_i + 1, 1):
if i not in Button1_List_Num:
# BuJian_ChuLi(self, i, BuJian_LeiXing_DaXie, BuJian_LeiXing_XiaoXie, BuJian_Lei, XuanKuang_X0, XuanKuang_Y0, XuanKuang_X1, XuanKuang_Y1):
xuan_ding = XuanDing()
xuan_ding.BuJian_ChuLi(i, 'Button', 'button', Button1, XuanKuang_X0, XuanKuang_Y0, XuanKuang_X1, XuanKuang_Y1)
for i in range(1, canvas1_i + 1, 1):
if i not in Canvas1_List_Num:
xuan_ding = XuanDing()
xuan_ding.BuJian_ChuLi(i, 'Canvas', 'canvas', Canvas1, XuanKuang_X0, XuanKuang_Y0, XuanKuang_X1,
XuanKuang_Y1)
for i in range(1, checkbutton1_i + 1, 1):
if i not in Checkbutton1_List_Num:
xuan_ding = XuanDing()
xuan_ding.BuJian_ChuLi(i, 'Checkbutton', 'checkbutton', Checkbutton1, XuanKuang_X0, XuanKuang_Y0, XuanKuang_X1,
XuanKuang_Y1)
for i in range(1, combobox1_i + 1, 1):
if i not in Combobox1_List_Num:
xuan_ding = XuanDing()
xuan_ding.BuJian_ChuLi(i, 'Combobox', 'combobox', Combobox1, XuanKuang_X0, XuanKuang_Y0, XuanKuang_X1,
XuanKuang_Y1)
for i in range(1, entry1_i + 1, 1):
if i not in Entry1_List_Num:
xuan_ding = XuanDing()
xuan_ding.BuJian_ChuLi(i, 'Entry', 'entry', Entry1, XuanKuang_X0, XuanKuang_Y0, XuanKuang_X1,
XuanKuang_Y1)
for i in range(1, frame1_i + 1, 1):
if i not in Frame1_List_Num:
xuan_ding = XuanDing()
xuan_ding.BuJian_ChuLi(i, 'Frame', 'frame', Frame1, XuanKuang_X0, XuanKuang_Y0, XuanKuang_X1,
XuanKuang_Y1)
for i in range(1, label1_i + 1, 1):
if i not in Label1_List_Num:
xuan_ding = XuanDing()
xuan_ding.BuJian_ChuLi(i, 'Label', 'label', Label1, XuanKuang_X0, XuanKuang_Y0, XuanKuang_X1,
XuanKuang_Y1)
for i in range(1, labelFrame1_i + 1, 1):
if i not in LabelFrame1_List_Num:
xuan_ding = XuanDing()
xuan_ding.BuJian_ChuLi(i, 'LabelFrame', 'labelFrame', LabelFrame1, XuanKuang_X0, XuanKuang_Y0, XuanKuang_X1,
XuanKuang_Y1)
for i in range(1, listbox1_i + 1, 1):
if i not in Listbox1_List_Num:
xuan_ding = XuanDing()
xuan_ding.BuJian_ChuLi(i, 'Listbox', 'listbox', Listbox1, XuanKuang_X0, XuanKuang_Y0, XuanKuang_X1,
XuanKuang_Y1)
for i in range(1, message1_i + 1, 1):
if i not in Message1_List_Num:
xuan_ding = XuanDing()
xuan_ding.BuJian_ChuLi(i, 'Message', 'message', Message1, XuanKuang_X0, XuanKuang_Y0, XuanKuang_X1,
XuanKuang_Y1)
for i in range(1, panedWindow1_i + 1, 1):
if i not in PanedWindow1_List_Num:
xuan_ding = XuanDing()
xuan_ding.BuJian_ChuLi(i, 'PanedWindow', 'panedWindow', PanedWindow1, XuanKuang_X0, XuanKuang_Y0, XuanKuang_X1,
XuanKuang_Y1)
for i in range(1, radiobutton1_i + 1, 1):
if i not in Radiobutton1_List_Num:
xuan_ding = XuanDing()
xuan_ding.BuJian_ChuLi(i, 'Radiobutton', 'radiobutton', Radiobutton1, XuanKuang_X0, XuanKuang_Y0, XuanKuang_X1,
XuanKuang_Y1)
for i in range(1, scale1_x_i + 1, 1):
if i not in Scale1_List_Num_X:
xuan_ding = XuanDing()
xuan_ding.BuJian_ChuLi(i, 'Scale_X', 'scale_x', Scale1_X, XuanKuang_X0, XuanKuang_Y0, XuanKuang_X1,
XuanKuang_Y1)
for i in range(1, scale1_y_i + 1, 1):
if i not in Scale1_List_Num_Y:
xuan_ding = XuanDing()
xuan_ding.BuJian_ChuLi(i, 'Scale_Y', 'scale_y', Scale1_Y, XuanKuang_X0, XuanKuang_Y0, XuanKuang_X1,
XuanKuang_Y1)
for i in range(1, spinbox1_i + 1, 1):
if i not in Spinbox1_List_Num:
xuan_ding = XuanDing()
xuan_ding.BuJian_ChuLi(i, 'Spinbox', 'spinbox', Spinbox1, XuanKuang_X0, XuanKuang_Y0, XuanKuang_X1,
XuanKuang_Y1)
for i in range(1, text1_i + 1, 1):
if i not in Text1_List_Num:
xuan_ding = XuanDing()
xuan_ding.BuJian_ChuLi(i, 'Text', 'text', Text1, XuanKuang_X0, XuanKuang_Y0, XuanKuang_X1,
XuanKuang_Y1)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
self.Design()
self.TanChuang()
# &&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
self.canva.bind("<B1-Motion>", paint_YiDong) # 绑定鼠标移动事件
self.canva.bind("<ButtonPress-1>", paint_AnXia) # 绑定鼠标按下事件
self.canva.bind("<ButtonRelease-1>", paint_ShiFang) # 绑定鼠标释放事件
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
def TanChuang(self):
global canva_X
global canva_Y
global win_X
global win_Y
global flag_TanChuan_BianJian
global XuanKuang_X1
global XuanKuang_Y1
if flag_TanChuan_BianJian == TRUE:
self.BianJi_kj_menu.post(XuanKuang_X1+canva_X+win_X, XuanKuang_Y1+canva_Y+win_Y)
flag_TanChuan_BianJian = FALSE
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
def OK(self):
global flag_ZuJian_Move
flag_ZuJian_Move = FALSE
print('OK, $$$$$$$$$$$$$$$$$$$$$$$$$$$$$ flag_ZuJian_Move = ', flag_ZuJian_Move)
self.WanCheng()
self.Yanse_HuiFu()
self.Clear_XuanZhong()
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
def Design(self):
global XuanZhong_sum
global XuanZhong
# Design_bujian(self, XuanZhong_Object):
design_buJian = Design_BuJian()
Len = len(XuanZhong)
if Len == 1:
name = "XuanZhong" + str(1)
a = XuanZhong[name]
design_buJian.Design_bujian(a)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
def UI_Ban_Design(self):
global XuanZhong_sum
global XuanZhong
Len = len(XuanZhong)
if Len == 1:
name = "XuanZhong" + str(1)
a = XuanZhong[name]
BuJian_LeiXing_DaXie = a[1]
BuJian_LeiXing_XiaoXie = a[2]
BuJian_NO_i = a[3]
BuJian_Lei = a[4]
# name = "XuanZhong" + str(XuanZhong_sum)
# XuanZhong[name] = (Button1[KJ], 'Button', 'button', Num_i, Button1)
design_new = Design_New()
design_new.BuJian_New(BuJian_LeiXing_DaXie, BuJian_NO_i, BuJian_LeiXing_XiaoXie, BuJian_Lei)
# name
# KJ_name = str(BuJian_LeiXing_XiaoXie) + '_name' + str(BuJian_NO_i)
# BuJian_Lei[KJ_name] = ent_ControlName
sj_chu_li = SJ_ChuLi()
sj_chu_li.SJ_New(BuJian_LeiXing_XiaoXie, BuJian_NO_i, BuJian_Lei)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
def Move(self):
global flag_ZuJian_Move
# $$$$$$$$$$$$$$$$$$$
# $$$$$$$$$$$$$$$$$$$
global XuanZhong_sum
global XuanZhong
# $$$$$$$$$$$$$$$$$$$
# $$$$$$$$$$$$$$$$$$$
global Button1
global Canvas1
global Checkbutton1
global Combobox1
global Entry1
global Frame1
global Label1
global LabelFrame1
global Listbox1
global Message1
global PanedWindow1
global Radiobutton1
global Scale1_X
global Scale1_Y
global Scrollbar1_X
global Scrollbar1_Y
global Spinbox1
global Text1
Len = len(XuanZhong)
# $$$$$$$$$$$$$$$$$$$$
# $$$$$$$$$$$$$$$$$$$$
# name = "XuanZhong" + str(XuanZhong_sum)
# XuanZhong[name] = (Button1[KJ], 'Button', 'button', Num_i, Button1)
if 1:
# if flag_ZuJian_Move == TRUE:
# 鼠标左键按下事件
def paint1(event):
self.ZuJian_x1 = event.x
self.ZuJian_y1 = event.y
self.canva.config(cursor='fleur')
self.Line = self.canva.create_line(self.ZuJian_x1, self.ZuJian_y1, self.ZuJian_x1, self.ZuJian_y1,
fill="DeepSkyBlue", width=1.6)
# 鼠标左键按下并移动事件
def paint2(event):
self.ZuJian_x2 = event.x
self.ZuJian_y2 = event.y
self.Move_X = self.ZuJian_x2 - self.ZuJian_x1
self.Move_Y = self.ZuJian_y2 - self.ZuJian_y1
# 绘制移动基线
self.canva.coords(self.Line, self.ZuJian_x1, self.ZuJian_y1, self.ZuJian_x2, self.ZuJian_y2)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
for i in range(1, Len + 1, 1):
name = "XuanZhong" + str(i)
a = XuanZhong[name]
if a[1] == 'Button':
num_i = a[3]
KJ = 'Button' + str(a[3])
name_coords = 'button_coords' + str(num_i)
a = Button1[name_coords]
X0 = a[0]
Y0 = a[1]
X = X0 + self.Move_X
Y = Y0 + self.Move_Y
Button1[KJ].place(x=X, y=Y)
elif a[1] == 'Canvas':
num_i = a[3]
KJ = 'Canvas' + str(a[3])
name_coords = 'canvas_coords' + str(num_i)
a = Canvas1[name_coords]
X0 = a[0]
Y0 = a[1]
X = X0 + self.Move_X
Y = Y0 + self.Move_Y
Canvas1[KJ].place(x=X, y=Y)
elif a[1] == 'Checkbutton':
num_i = a[3]
KJ = 'Checkbutton' + str(a[3])
name_coords = 'checkbutton_coords' + str(num_i)
a = Checkbutton1[name_coords]
X0 = a[0]
Y0 = a[1]
X = X0 + self.Move_X
Y = Y0 + self.Move_Y
Checkbutton1[KJ].place(x=X, y=Y)
elif a[1] == 'Combobox':
num_i = a[3]
KJ = 'Combobox' + str(a[3])
name_coords = 'combobox_coords' + str(num_i)
a = Combobox1[name_coords]
X0 = a[0]
Y0 = a[1]
X = X0 + self.Move_X
Y = Y0 + self.Move_Y
Combobox1[KJ].place(x=X, y=Y)
elif a[1] == 'Entry':
num_i = a[3]
KJ = 'Entry' + str(a[3])
name_coords = 'entry_coords' + str(num_i)
a = Entry1[name_coords]
X0 = a[0]
Y0 = a[1]
X = X0 + self.Move_X
Y = Y0 + self.Move_Y
Entry1[KJ].place(x=X, y=Y)
elif a[1] == 'Frame':
num_i = a[3]
KJ = 'Frame' + str(a[3])
name_coords = 'frame_coords' + str(num_i)
a = Frame1[name_coords]
X0 = a[0]
Y0 = a[1]
X = X0 + self.Move_X
Y = Y0 + self.Move_Y
Frame1[KJ].place(x=X, y=Y)
elif a[1] == 'Label':
num_i = a[3]
KJ = 'Label' + str(a[3])
name_coords = 'label_coords' + str(num_i)
a = Label1[name_coords]
X0 = a[0]
Y0 = a[1]
X = X0 + self.Move_X
Y = Y0 + self.Move_Y
Label1[KJ].place(x=X, y=Y)
elif a[1] == 'LabelFrame':
num_i = a[3]
KJ = 'LabelFrame' + str(a[3])
name_coords = 'labelFrame_coords' + str(num_i)
a = LabelFrame1[name_coords]
X0 = a[0]
Y0 = a[1]
X = X0 + self.Move_X
Y = Y0 + self.Move_Y
LabelFrame1[KJ].place(x=X, y=Y)
elif a[1] == 'Listbox':
num_i = a[3]
KJ = 'Listbox' + str(a[3])
name_coords = 'listbox_coords' + str(num_i)
a = Listbox1[name_coords]
X0 = a[0]
Y0 = a[1]
X = X0 + self.Move_X
Y = Y0 + self.Move_Y
Listbox1[KJ].place(x=X, y=Y)
elif a[1] == 'Message':
num_i = a[3]
KJ = 'Message' + str(a[3])
name_coords = 'message_coords' + str(num_i)
a = Message1[name_coords]
X0 = a[0]
Y0 = a[1]
X = X0 + self.Move_X
Y = Y0 + self.Move_Y
Message1[KJ].place(x=X, y=Y)
elif a[1] == 'PanedWindow':
num_i = a[3]
KJ = 'PanedWindow' + str(a[3])
name_coords = 'panedWindow_coords' + str(num_i)
a = PanedWindow1[name_coords]
X0 = a[0]
Y0 = a[1]
X = X0 + self.Move_X
Y = Y0 + self.Move_Y
PanedWindow1[KJ].place(x=X, y=Y)
elif a[1] == 'Radiobutton':
num_i = a[3]
KJ = 'Radiobutton' + str(a[3])
name_coords = 'radiobutton_coords' + str(num_i)
a = Radiobutton1[name_coords]
X0 = a[0]
Y0 = a[1]
X = X0 + self.Move_X
Y = Y0 + self.Move_Y
Radiobutton1[KJ].place(x=X, y=Y)
elif a[1] == 'Scale_X':
num_i = a[3]
KJ = 'Scale_X' + str(a[3])
name_coords = 'scale_x_coords' + str(num_i)
a = Scale1_X[name_coords]
X0 = a[0]
Y0 = a[1]
X = X0 + self.Move_X
Y = Y0 + self.Move_Y
Scale1_X[KJ].place(x=X, y=Y)
elif a[1] == 'Scale_Y':
num_i = a[3]
KJ = 'Scale_Y' + str(a[3])
name_coords = 'scale_y_coords' + str(num_i)
a = Scale1_Y[name_coords]
X0 = a[0]
Y0 = a[1]
X = X0 + self.Move_X
Y = Y0 + self.Move_Y
Scale1_Y[KJ].place(x=X, y=Y)
elif a[1] == 'Spinbox':
num_i = a[3]
KJ = 'Spinbox' + str(a[3])
name_coords = 'spinbox_coords' + str(num_i)
a = Spinbox1[name_coords]
X0 = a[0]
Y0 = a[1]
X = X0 + self.Move_X
Y = Y0 + self.Move_Y
Spinbox1[KJ].place(x=X, y=Y)
elif a[1] == 'Text':
num_i = a[3]
KJ = 'Text' + str(a[3])
name_coords = 'text_coords' + str(num_i)
a = Text1[name_coords]
X0 = a[0]
Y0 = a[1]
X = X0 + self.Move_X
Y = Y0 + self.Move_Y
Text1[KJ].place(x=X, y=Y)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# 鼠标左键松开事件
def paint3(event):
global canva_X
global canva_Y
global win_X
global win_Y
self.ZuJian_x2 = event.x
self.ZuJian_y2 = event.y
self.canva.delete(self.Line)
self.canva.config(cursor='arrow')
self.BianJi_kj_menu.post(self.ZuJian_x2 + canva_X + win_X, self.ZuJian_y2 + canva_Y + win_Y)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
for i in range(1, Len + 1, 1):
name = "XuanZhong" + str(i)
a = XuanZhong[name]
if a[1] == 'Button':
num_i = a[3]
KJ = 'Button' + str(a[3])
name_coords = 'button_coords' + str(num_i)
a = Button1[name_coords]
# name_coords = 'button_coords' + str(BuJian_NO_i)
# Zhi = (self.X0, self.Y0, self.X1, self.Y1, BuJian_NO_i)
X0 = a[0]
Y0 = a[1]
X1 = a[2]
Y1 = a[3]
XX0 = X0 + self.Move_X
YY0 = Y0 + self.Move_Y
XX1 = X1 + self.Move_X
YY1 = Y1 + self.Move_Y
Zhi = (XX0, YY0, XX1, YY1, a[4])
Button1[name_coords] = Zhi
# # Button1[KJ].place(x=X, y=Y)
elif a[1] == 'Canvas':
num_i = a[3]
KJ = 'Canvas' + str(a[3])
name_coords = 'canvas_coords' + str(num_i)
a = Canvas1[name_coords]
X0 = a[0]
Y0 = a[1]
X1 = a[2]
Y1 = a[3]
XX0 = X0 + self.Move_X
YY0 = Y0 + self.Move_Y
XX1 = X1 + self.Move_X
YY1 = Y1 + self.Move_Y
Zhi = (XX0, YY0, XX1, YY1, a[4])
Canvas1[name_coords] = Zhi
elif a[1] == 'Checkbutton':
num_i = a[3]
KJ = 'Checkbutton' + str(a[3])
name_coords = 'checkbutton_coords' + str(num_i)
a = Checkbutton1[name_coords]
X0 = a[0]
Y0 = a[1]
X1 = a[2]
Y1 = a[3]
XX0 = X0 + self.Move_X
YY0 = Y0 + self.Move_Y
XX1 = X1 + self.Move_X
YY1 = Y1 + self.Move_Y
Zhi = (XX0, YY0, XX1, YY1, a[4])
Checkbutton1[name_coords] = Zhi
elif a[1] == 'Combobox':
num_i = a[3]
KJ = 'Combobox' + str(a[3])
name_coords = 'combobox_coords' + str(num_i)
a = Combobox1[name_coords]
X0 = a[0]
Y0 = a[1]
X1 = a[2]
Y1 = a[3]
XX0 = X0 + self.Move_X
YY0 = Y0 + self.Move_Y
XX1 = X1 + self.Move_X
YY1 = Y1 + self.Move_Y
Zhi = (XX0, YY0, XX1, YY1, a[4])
Combobox1[name_coords] = Zhi
elif a[1] == 'Entry':
num_i = a[3]
KJ = 'Entry' + str(a[3])
name_coords = 'entry_coords' + str(num_i)
a = Entry1[name_coords]
X0 = a[0]
Y0 = a[1]
X1 = a[2]
Y1 = a[3]
XX0 = X0 + self.Move_X
YY0 = Y0 + self.Move_Y
XX1 = X1 + self.Move_X
YY1 = Y1 + self.Move_Y
Zhi = (XX0, YY0, XX1, YY1, a[4])
Entry1[name_coords] = Zhi
elif a[1] == 'Frame':
num_i = a[3]
KJ = 'Frame' + str(a[3])
name_coords = 'frame_coords' + str(num_i)
a = Frame1[name_coords]
X0 = a[0]
Y0 = a[1]
X1 = a[2]
Y1 = a[3]
XX0 = X0 + self.Move_X
YY0 = Y0 + self.Move_Y
XX1 = X1 + self.Move_X
YY1 = Y1 + self.Move_Y
Zhi = (XX0, YY0, XX1, YY1, a[4])
Entry1[name_coords] = Zhi
elif a[1] == 'Label':
num_i = a[3]
KJ = 'Label' + str(a[3])
name_coords = 'label_coords' + str(num_i)
a = Label1[name_coords]
X0 = a[0]
Y0 = a[1]
X1 = a[2]
Y1 = a[3]
XX0 = X0 + self.Move_X
YY0 = Y0 + self.Move_Y
XX1 = X1 + self.Move_X
YY1 = Y1 + self.Move_Y
Zhi = (XX0, YY0, XX1, YY1, a[4])
Label1[name_coords] = Zhi
elif a[1] == 'LabelFrame':
num_i = a[3]
KJ = 'LabelFrame' + str(a[3])
name_coords = 'labelFrame_coords' + str(num_i)
a = LabelFrame1[name_coords]
X0 = a[0]
Y0 = a[1]
X1 = a[2]
Y1 = a[3]
XX0 = X0 + self.Move_X
YY0 = Y0 + self.Move_Y
XX1 = X1 + self.Move_X
YY1 = Y1 + self.Move_Y
Zhi = (XX0, YY0, XX1, YY1, a[4])
LabelFrame1[name_coords] = Zhi
elif a[1] == 'Listbox':
num_i = a[3]
KJ = 'Listbox' + str(a[3])
name_coords = 'listbox_coords' + str(num_i)
a = Listbox1[name_coords]
X0 = a[0]
Y0 = a[1]
X1 = a[2]
Y1 = a[3]
XX0 = X0 + self.Move_X
YY0 = Y0 + self.Move_Y
XX1 = X1 + self.Move_X
YY1 = Y1 + self.Move_Y
Zhi = (XX0, YY0, XX1, YY1, a[4])
Listbox1[name_coords] = Zhi
elif a[1] == 'Message':
num_i = a[3]
KJ = 'Message' + str(a[3])
name_coords = 'message_coords' + str(num_i)
a = Message1[name_coords]
X0 = a[0]
Y0 = a[1]
X1 = a[2]
Y1 = a[3]
XX0 = X0 + self.Move_X
YY0 = Y0 + self.Move_Y
XX1 = X1 + self.Move_X
YY1 = Y1 + self.Move_Y
Zhi = (XX0, YY0, XX1, YY1, a[4])
Message1[name_coords] = Zhi
elif a[1] == 'PanedWindow':
num_i = a[3]
KJ = 'PanedWindow' + str(a[3])
name_coords = 'panedWindow_coords' + str(num_i)
a = PanedWindow1[name_coords]
X0 = a[0]
Y0 = a[1]
X1 = a[2]
Y1 = a[3]
XX0 = X0 + self.Move_X
YY0 = Y0 + self.Move_Y
XX1 = X1 + self.Move_X
YY1 = Y1 + self.Move_Y
Zhi = (XX0, YY0, XX1, YY1, a[4])
PanedWindow1[name_coords] = Zhi
elif a[1] == 'Radiobutton':
num_i = a[3]
KJ = 'Radiobutton' + str(a[3])
name_coords = 'radiobutton_coords' + str(num_i)
a = Radiobutton1[name_coords]
X0 = a[0]
Y0 = a[1]
X1 = a[2]
Y1 = a[3]
XX0 = X0 + self.Move_X
YY0 = Y0 + self.Move_Y
XX1 = X1 + self.Move_X
YY1 = Y1 + self.Move_Y
Zhi = (XX0, YY0, XX1, YY1, a[4])
Radiobutton1[name_coords] = Zhi
elif a[1] == 'Scale_X':
num_i = a[3]
KJ = 'Scale_X' + str(a[3])
name_coords = 'scale_x_coords' + str(num_i)
a = Scale1_X[name_coords]
X0 = a[0]
Y0 = a[1]
X1 = a[2]
Y1 = a[3]
XX0 = X0 + self.Move_X
YY0 = Y0 + self.Move_Y
XX1 = X1 + self.Move_X
YY1 = Y1 + self.Move_Y
Zhi = (XX0, YY0, XX1, YY1, a[4])
Scale1_X[name_coords] = Zhi
elif a[1] == 'Scale_Y':
num_i = a[3]
KJ = 'Scale_Y' + str(a[3])
name_coords = 'scale_y_coords' + str(num_i)
a = Scale1_Y[name_coords]
X0 = a[0]
Y0 = a[1]
X1 = a[2]
Y1 = a[3]
XX0 = X0 + self.Move_X
YY0 = Y0 + self.Move_Y
XX1 = X1 + self.Move_X
YY1 = Y1 + self.Move_Y
Zhi = (XX0, YY0, XX1, YY1, a[4])
Scale1_Y[name_coords] = Zhi
elif a[1] == 'Spinbox':
num_i = a[3]
KJ = 'Spinbox' + str(a[3])
name_coords = 'spinbox_coords' + str(num_i)
a = Spinbox1[name_coords]
X0 = a[0]
Y0 = a[1]
X1 = a[2]
Y1 = a[3]
XX0 = X0 + self.Move_X
YY0 = Y0 + self.Move_Y
XX1 = X1 + self.Move_X
YY1 = Y1 + self.Move_Y
Zhi = (XX0, YY0, XX1, YY1, a[4])
Spinbox1[name_coords] = Zhi
elif a[1] == 'Text':
num_i = a[3]
KJ = 'Text' + str(a[3])
name_coords = 'text_coords' + str(num_i)
a = Text1[name_coords]
X0 = a[0]
Y0 = a[1]
X1 = a[2]
Y1 = a[3]
XX0 = X0 + self.Move_X
YY0 = Y0 + self.Move_Y
XX1 = X1 + self.Move_X
YY1 = Y1 + self.Move_Y
Zhi = (XX0, YY0, XX1, YY1, a[4])
Text1[name_coords] = Zhi
print('Text1 = \n', Text1[KJ])
print('Text1_coords = \n', Text1[name_coords])
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# 画布控件与鼠标左键进行绑定
self.canva.bind("<ButtonPress-1>", paint1) # 绑定鼠标按下事件
self.canva.bind("<B1-Motion>", paint2) # 绑定鼠标移动事件
self.canva.bind("<ButtonRelease-1>", paint3) # 绑定鼠标松开事件
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
def Delete(self):
global XuanZhong
global XuanZhong_sum
if askyesno('Delete', 'Is going to delete Selected?'):
Len = len(XuanZhong)
for i in range(1, Len + 1, 1):
name = "XuanZhong" + str(i)
a = XuanZhong[name]
XuanZhong_Object = a
delete_buJian = Delete_BuJian()
delete_buJian.Delete(XuanZhong_Object)
# 清空选中
self.Clear_XuanZhong()
# 如果不清空则恢复原来颜色
else:
self.Yanse_HuiFu()
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
def Cancel(self):
self.Yanse_HuiFu()
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
def Clear_XuanZhong(self):
global XuanZhong_sum
global XuanZhong
XuanZhong_sum = 0
XuanZhong.clear()
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
def Yanse_HuiFu(self):
global XuanZhong_sum
global XuanZhong
# name = "XuanZhong" + str(XuanZhong_sum)
# XuanZhong[name] = (Button1[KJ], 'Button', 'button', Num_i, Button1)
Len = len(XuanZhong)
if len != 0:
for i in range(1, Len + 1, 1):
name = "XuanZhong" + str(i)
a = XuanZhong[name]
BuJian_Lei = a[4]
BuJian_LeiXing_DaXie = a[1]
BuJian_LeiXing_XiaoXie = a[2]
Num_i = a[3]
# Color_Restore(self, BuJian_Lei, BuJian_LeiXing_DaXie, BuJian_LeiXing_XiaoXie, Num_i):
color_handle = Color_Handle()
color_handle.Color_Restore(BuJian_Lei, BuJian_LeiXing_DaXie, BuJian_LeiXing_XiaoXie, Num_i)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# 设置控件标志
def Set_KJBZ(self, str):
global KJBZ
KJBZ = str
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
def Hua_BianYi(self):
global XuanZhong_sum
global XuanZhong
# 收索
# 控件字典
# $$$$$$$$$$$$$$$$$$$
global Button1
global Canvas1
global Checkbutton1
global Combobox1
global Entry1
global Frame1
global Label1
global LabelFrame1
global Listbox1
global Message1
global PanedWindow1
global Radiobutton1
global Scale1_X
global Scale1_Y
global Scrollbar1_X
global Scrollbar1_Y
global Spinbox1
global Text1
# $$$$$$$$$$$$$$$$$$$$
global button1_i
global canvas1_i
global checkbutton1_i
global combobox1_i
global entry1_i
global frame1_i
global label1_i
global labelFrame1_i
global listbox1_i
global message1_i
global panedWindow1_i
global radiobutton1_i
global scale1_x_i
global scale1_y_i
global scrollbar1_x_i
global scrollbar1_y_i
global spinbox1_i
global text1_i
# 记录各个部件类型删除的成员的 列表
global Button1_List_Num
global Canvas1_List_Num
global Checkbutton1_List_Num
global Combobox1_List_Num
global Entry1_List_Num
global Frame1_List_Num
global Label1_List_Num
global LabelFrame1_List_Num
global Listbox1_List_Num
global Menu1_List_Num
global Message1_List_Num
global PanedWindow1_List_Num
global Radiobutton1_List_Num
global Scale1_List_Num_X
global Scale1_List_Num_Y
global Spinbox1_List_Num
global Text1_List_Num
# 标注注释
global tap
global ck_name
if ck_name != "":
str_code = tap + tap + "# Control Define" + "\n\n"
self.Text_1.insert(END, str_code)
# Menu
if zi_menu1_sum != 0:
str_code = tap + tap + "# Menu Define" + "\n"
self.Text_1.insert(END, str_code)
menu_str = Menu_Str()
str_Menu = menu_str.Menu_Str()
self.Text_1.insert(END, str_Menu)
if ck_name != "":
str_code = tap + tap + "# Other Control Define" + "\n\n"
self.Text_1.insert(END, str_code)
# for 循环逐个判断
for i in range(1, button1_i + 1, 1):
if i not in Button1_List_Num:
KJ = 'Button' + str(i)
# Record_Code(self, BuJian, BuJian_Lei, BuJian_LeiXing_DaXie, BuJian_LeiXing_XiaoXie, BuJian_NO_i):
dictionary = Dictionary()
dictionary.Record_Code(Button1[KJ], Button1, 'Button', 'button', i)
name_Code = 'button' + '_Code' + str(i)
str_code = Button1[name_Code]
self.Text_1.insert(END, str_code)
for i in range(1, canvas1_i + 1, 1):
if i not in Canvas1_List_Num:
KJ = 'Canvas' + str(i)
dictionary = Dictionary()
dictionary.Record_Code(Canvas1[KJ], Canvas1, 'Canvas', 'canvas', i)
name_Code = 'canvas' + '_Code' + str(i)
str_code = Canvas1[name_Code]
self.Text_1.insert(END, str_code)
for i in range(1, checkbutton1_i + 1, 1):
if i not in Checkbutton1_List_Num:
KJ = 'Checkbutton' + str(i)
dictionary = Dictionary()
dictionary.Record_Code(Checkbutton1[KJ], Checkbutton1, 'Checkbutton', 'checkbutton', i)
name_Code = 'checkbutton' + '_Code' + str(i)
str_code = Checkbutton1[name_Code]
self.Text_1.insert(END, str_code)
for i in range(1, combobox1_i + 1, 1):
if i not in Combobox1_List_Num:
KJ = 'Combobox' + str(i)
dictionary = Dictionary()
dictionary.Record_Code(Combobox1[KJ], Combobox1, 'ttk.Combobox', 'combobox', i)
name_Code = 'combobox' + '_Code' + str(i)
str_code = Combobox1[name_Code]
self.Text_1.insert(END, str_code)
for i in range(1, entry1_i + 1, 1):
if i not in Entry1_List_Num:
KJ = 'Entry' + str(i)
dictionary = Dictionary()
dictionary.Record_Code(Entry1[KJ], Entry1, 'Entry', 'entry', i)
name_Code = 'entry' + '_Code' + str(i)
str_code = Entry1[name_Code]
self.Text_1.insert(END, str_code)
for i in range(1, frame1_i + 1, 1):
if i not in Frame1_List_Num:
KJ = 'Frame' + str(i)
dictionary = Dictionary()
dictionary.Record_Code(Frame1[KJ], Frame1, 'Frame', 'frame', i)
name_Code = 'frame' + '_Code' + str(i)
str_code = Frame1[name_Code]
self.Text_1.insert(END, str_code)
for i in range(1, label1_i + 1, 1):
if i not in Label1_List_Num:
KJ = 'Label' + str(i)
dictionary = Dictionary()
dictionary.Record_Code(Label1[KJ], Label1, 'Label', 'label', i)
name_Code = 'label' + '_Code' + str(i)
str_code = Label1[name_Code]
self.Text_1.insert(END, str_code)
for i in range(1, labelFrame1_i + 1, 1):
if i not in LabelFrame1_List_Num:
KJ = 'LabelFrame' + str(i)
dictionary = Dictionary()
dictionary.Record_Code(LabelFrame1[KJ], LabelFrame1, 'LabelFrame', 'labelFrame', i)
name_Code = 'labelFrame' + '_Code' + str(i)
str_code = LabelFrame1[name_Code]
self.Text_1.insert(END, str_code)
for i in range(1, listbox1_i + 1, 1):
if i not in Listbox1_List_Num:
KJ = 'Listbox' + str(i)
dictionary = Dictionary()
dictionary.Record_Code(Listbox1[KJ], Listbox1, 'Listbox', 'listbox', i)
name_Code = 'listbox' + '_Code' + str(i)
str_code = Listbox1[name_Code]
self.Text_1.insert(END, str_code)
for i in range(1, message1_i + 1, 1):
if i not in Message1_List_Num:
KJ = 'Message' + str(i)
dictionary = Dictionary()
dictionary.Record_Code(Message1[KJ], Message1, 'tk.Message', 'message', i)
name_Code = 'message' + '_Code' + str(i)
str_code = Message1[name_Code]
self.Text_1.insert(END, str_code)
for i in range(1, panedWindow1_i + 1, 1):
if i not in PanedWindow1_List_Num:
KJ = 'PanedWindow' + str(i)
dictionary = Dictionary()
dictionary.Record_Code(PanedWindow1[KJ], PanedWindow1, 'PanedWindow', 'panedWindow', i)
name_Code = 'panedWindow' + '_Code' + str(i)
str_code = PanedWindow1[name_Code]
self.Text_1.insert(END, str_code)
for i in range(1, radiobutton1_i + 1, 1):
if i not in Radiobutton1_List_Num:
KJ = 'Radiobutton' + str(i)
dictionary = Dictionary()
dictionary.Record_Code(Radiobutton1[KJ], Radiobutton1, 'Radiobutton', 'radiobutton', i)
name_Code = 'radiobutton' + '_Code' + str(i)
str_code = Radiobutton1[name_Code]
self.Text_1.insert(END, str_code)
for i in range(1, scale1_x_i + 1, 1):
if i not in Scale1_List_Num_X:
KJ = 'Scale_X' + str(i)
dictionary = Dictionary()
dictionary.Record_Code(Scale1_X[KJ], Scale1_X, 'Scale_X', 'scale_x', i)
name_Code = 'scale_x' + '_Code' + str(i)
str_code = Scale1_X[name_Code]
self.Text_1.insert(END, str_code)
for i in range(1, scale1_y_i + 1, 1):
if i not in Scale1_List_Num_Y:
KJ = 'Scale_Y' + str(i)
dictionary = Dictionary()
dictionary.Record_Code(Scale1_Y[KJ], Scale1_Y, 'Scale_Y', 'scale_y', i)
name_Code = 'scale_y' + '_Code' + str(i)
str_code = Scale1_Y[name_Code]
self.Text_1.insert(END, str_code)
for i in range(1, spinbox1_i + 1, 1):
if i not in Spinbox1_List_Num:
KJ = 'Spinbox' + str(i)
dictionary = Dictionary()
dictionary.Record_Code(Spinbox1[KJ], Spinbox1, 'Spinbox', 'spinbox', i)
name_Code = 'spinbox' + '_Code' + str(i)
str_code = Spinbox1[name_Code]
self.Text_1.insert(END, str_code)
for i in range(1, text1_i + 1, 1):
if i not in Text1_List_Num:
KJ = 'Text' + str(i)
dictionary = Dictionary()
dictionary.Record_Code(Text1[KJ], Text1, 'Text', 'text', i)
name_Code = 'text' + '_Code' + str(i)
str_code = Text1[name_Code]
self.Text_1.insert(END, str_code)
if ck_name != "":
event_code = tap + tap + "# Event Define" + "\n\n"
self.Text_1.insert(END, event_code)
# def Judge_If_Delete(self, BuJian_LeiXing_XiaoXie, BuJian_NO_i):
sj_chu_li = SJ_ChuLi()
sj_chu_li.SJ_Bian_Yi(SJ_button_press_1, self.Text_1)
sj_chu_li.SJ_Bian_Yi(SJ_button_release_1, self.Text_1)
sj_chu_li.SJ_Bian_Yi(SJ_button_press_right_1, self.Text_1)
sj_chu_li.SJ_Bian_Yi(SJ_button_press_left_2, self.Text_1)
sj_chu_li.SJ_Bian_Yi(SJ_button_press_right_2, self.Text_1)
sj_chu_li.SJ_Bian_Yi(SJ_button_press_middle_1, self.Text_1)
sj_chu_li.SJ_Bian_Yi(SJ_button_press_middle_2, self.Text_1)
sj_chu_li.SJ_Bian_Yi(SJ_button_press_left_move, self.Text_1)
sj_chu_li.SJ_Bian_Yi(SJ_cursor_enter, self.Text_1)
sj_chu_li.SJ_Bian_Yi(SJ_cursor_leave, self.Text_1)
sj_chu_li.SJ_Bian_Yi(SJ_get_key_focus, self.Text_1)
sj_chu_li.SJ_Bian_Yi(SJ_lose_key_focus, self.Text_1)
sj_chu_li.SJ_Bian_Yi(SJ_press_a_key, self.Text_1)
sj_chu_li.SJ_Bian_Yi(SJ_press_enter_key, self.Text_1)
sj_chu_li.SJ_Bian_Yi(SJ_when_control_change, self.Text_1)
sj_chu_li.SJ_Bian_Yi(SJ_press_space_key, self.Text_1)
sj_chu_li.SJ_Bian_Yi(SJ_shift_mouseWheel, self.Text_1)
sj_chu_li.SJ_Bian_Yi(SJ_press_combinatorial_key, self.Text_1)
# 结尾
global Str_BianYi_End
if ck_name != "":
self.Text_1.insert(END, Str_BianYi_End)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# 录入字典
def LuRu_Dict(self):
global KJBZ
global DangQian_KJ_name
global Distance
global bar_W
global bar_menu_W
global zi_menu1_sum
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
if zi_menu1_sum == 0:
Distance = bar_W
else:
Distance = bar_W + bar_menu_W
self.Y0 = self.Y0 - Distance
self.Y1 = self.Y1 - Distance
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
if KJBZ == 'button1':
global Button1
global button1_i
button1_i = button1_i + 1
DangQian_KJ_name = 'Button ' + str(button1_i)
self.it_Button.config(text=DangQian_KJ_name)
BuJian_NO_i = button1_i
# Record_Dict(self, BuJian, BuJian_Lei, BuJian_NO_i, BuJian_LeiXing_DaXie, BuJian_LeiXing_XiaoXie,
# self_X0, self_Y0, self_X1, self_Y1):
dictionary = Dictionary()
dictionary.Record_Dict(self.it_Button, Button1, BuJian_NO_i, 'Button', 'button',
self.X0, self.Y0, self.X1, self.Y1)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
elif KJBZ == 'canvas1':
global Canvas1
global canvas1_i
canvas1_i = canvas1_i + 1
DangQian_KJ_name = 'Canvas ' + str(canvas1_i)
BuJian_NO_i = canvas1_i
dictionary = Dictionary()
dictionary.Record_Dict(self.it_Canva, Canvas1, BuJian_NO_i, 'Canvas', 'canvas',
self.X0, self.Y0, self.X1, self.Y1)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
elif KJBZ == 'checkbutton1':
global Checkbutton1
global checkbutton1_i
checkbutton1_i = checkbutton1_i + 1
DangQian_KJ_name = 'Checkbutton ' + str(checkbutton1_i)
self.it_Checkbutton.config(text=DangQian_KJ_name)
BuJian_NO_i = checkbutton1_i
dictionary = Dictionary()
dictionary.Record_Dict(self.it_Checkbutton, Checkbutton1, BuJian_NO_i, 'Checkbutton', 'checkbutton',
self.X0, self.Y0, self.X1, self.Y1)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
elif KJBZ == 'combobox1':
global Combobox1
global combobox1_i
combobox1_i = combobox1_i + 1
DangQian_KJ_name = 'Combobox ' + str(combobox1_i )
self.it_Combobox["values"] = ('Combobox', DangQian_KJ_name)
self.it_Combobox.current(1)
BuJian_NO_i = combobox1_i
dictionary = Dictionary()
dictionary.Record_Dict(self.it_Combobox, Combobox1, BuJian_NO_i, 'Combobox', 'combobox',
self.X0, self.Y0, self.X1, self.Y1)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
elif KJBZ == 'entry1':
global Entry1
global entry1_i
entry1_i = entry1_i + 1
DangQian_KJ_name = 'Entry ' + str(entry1_i)
# self.it_Entry.config(text=DangQian_KJ_name)
self.it_Entry.insert(1, DangQian_KJ_name)
BuJian_NO_i = entry1_i
dictionary = Dictionary()
dictionary.Record_Dict(self.it_Entry, Entry1, BuJian_NO_i, 'Entry', 'entry',
self.X0, self.Y0, self.X1, self.Y1)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
elif KJBZ == 'frame1':
global Frame1
global frame1_i
frame1_i = frame1_i + 1
DangQian_KJ_name = 'Frame ' + str(frame1_i + 1)
BuJian_NO_i = frame1_i
dictionary = Dictionary()
dictionary.Record_Dict(self.it_Frame, Frame1, BuJian_NO_i, 'Frame', 'frame',
self.X0, self.Y0, self.X1, self.Y1)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
elif KJBZ == 'label1':
global Label1
global label1_i
label1_i = label1_i + 1
DangQian_KJ_name = 'Label ' + str(label1_i + 1)
self.it_Label.config(text=DangQian_KJ_name)
BuJian_NO_i = label1_i
dictionary = Dictionary()
dictionary.Record_Dict(self.it_Label, Label1, BuJian_NO_i, 'Label', 'label',
self.X0, self.Y0, self.X1, self.Y1)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
elif KJBZ == 'labelFrame1':
global LabelFrame1
global labelFrame1_i
labelFrame1_i = labelFrame1_i + 1
DangQian_KJ_name = 'LabelFrame ' + str(labelFrame1_i + 1)
self.it_LabelFrame.config(text=DangQian_KJ_name)
BuJian_NO_i = labelFrame1_i
dictionary = Dictionary()
dictionary.Record_Dict(self.it_LabelFrame, LabelFrame1, BuJian_NO_i, 'LabelFrame', 'labelFrame',
self.X0, self.Y0, self.X1, self.Y1)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
elif KJBZ == 'listbox1':
global Listbox1
global listbox1_i
listbox1_i = listbox1_i + 1
DangQian_KJ_name = 'Listbox ' + str(listbox1_i + 1)
BuJian_NO_i = listbox1_i
dictionary = Dictionary()
dictionary.Record_Dict(self.it_Listbox, Listbox1, BuJian_NO_i, 'Listbox', 'listbox',
self.X0, self.Y0, self.X1, self.Y1)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
elif KJBZ == 'menu1':
global Menu1
global menu1_i
menu1_i = menu1_i + 1
DangQian_KJ_name = 'Menu ' + str(menu1_i + 1)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
elif KJBZ == 'message1':
global Message1
global message1_i
message1_i = message1_i + 1
DangQian_KJ_name = 'Message ' + str(message1_i)
self.it_Message.config(text=DangQian_KJ_name)
BuJian_NO_i = message1_i
dictionary = Dictionary()
dictionary.Record_Dict(self.it_Message, Message1, BuJian_NO_i, 'Message', 'message',
self.X0, self.Y0, self.X1, self.Y1)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
elif KJBZ == 'panedWindow1':
global PanedWindow1
global panedWindow1_i
panedWindow1_i = panedWindow1_i + 1
DangQian_KJ_name = 'PanedWindow ' + str(panedWindow1_i + 1)
BuJian_NO_i = panedWindow1_i
dictionary = Dictionary()
dictionary.Record_Dict(self.it_PanedWindow, PanedWindow1, BuJian_NO_i, 'PanedWindow', 'panedWindow',
self.X0, self.Y0, self.X1, self.Y1)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
elif KJBZ == 'radiobutton1':
global Radiobutton1
global radiobutton1_i
radiobutton1_i = radiobutton1_i + 1
DangQian_KJ_name = 'Radiobutton ' + str(radiobutton1_i)
self.it_Radiobutton.config(text=DangQian_KJ_name)
BuJian_NO_i = radiobutton1_i
dictionary = Dictionary()
dictionary.Record_Dict(self.it_Radiobutton, Radiobutton1, BuJian_NO_i, 'Radiobutton', 'radiobutton',
self.X0, self.Y0, self.X1, self.Y1)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
elif KJBZ == 'scale1_x':
global Scale1_X
global scale1_x_i
scale1_x_i = scale1_x_i + 1
DangQian_KJ_name = 'Scale_X ' + str(scale1_x_i + 1)
BuJian_NO_i = scale1_x_i
dictionary = Dictionary()
dictionary.Record_Dict(self.it_Scale_X, Scale1_X, BuJian_NO_i, 'Scale_X', 'scale_x',
self.X0, self.Y0, self.X1, self.Y1)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
elif KJBZ == 'scale1_y':
global Scale1_Y
global scale1_y_i
scale1_y_i = scale1_y_i + 1
DangQian_KJ_name = 'Scale_Y ' + str(scale1_y_i + 1)
BuJian_NO_i = scale1_y_i
dictionary = Dictionary()
dictionary.Record_Dict(self.it_Scale_Y, Scale1_Y, BuJian_NO_i, 'Scale_Y', 'scale_y',
self.X0, self.Y0, self.X1, self.Y1)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
elif KJBZ == 'scrollbar1_x':
global Scrollbar1_X
global scrollbar1_x_i
scrollbar1_i = scrollbar1_x_i + 1
DangQian_KJ_name = 'Scrollbar_X ' + str(scrollbar1_i + 1)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
elif KJBZ == 'scrollbar1_y':
global Scrollbar1_Y
global scrollbar1_y_i
scrollbar1_i = scrollbar1_y_i + 1
DangQian_KJ_name = 'Scrollbar_Y ' + str(scrollbar1_i + 1)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
elif KJBZ == 'text1':
global Text1
global text1_i
text1_i = text1_i + 1
DangQian_KJ_name = 'Text ' + str(text1_i + 1)
BuJian_NO_i = text1_i
dictionary = Dictionary()
dictionary.Record_Dict(self.it_Text, Text1, BuJian_NO_i, 'Text', 'text',
self.X0, self.Y0, self.X1, self.Y1)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
elif KJBZ == 'spinbox1':
global Spinbox1
global spinbox1_i
spinbox1_i = spinbox1_i + 1
DangQian_KJ_name = 'Spinbox ' + str(spinbox1_i + 1)
BuJian_NO_i = spinbox1_i
dictionary = Dictionary()
dictionary.Record_Dict(self.it_Spinbox, Spinbox1, BuJian_NO_i, 'Spinbox', 'spinbox',
self.X0, self.Y0, self.X1, self.Y1)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
elif KJBZ == 'toplevel1':
global Toplevel1
global toplevel1_i
toplevel1_i = toplevel1_i + 1
DangQian_KJ_name = 'Toplevel ' + str(toplevel1_i + 1)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
elif KJBZ == 'tkMessageBox1':
global tkMessageBox1
global tkMessageBox1_i
tkMessageBox1_i = tkMessageBox1_i + 1
DangQian_KJ_name = 'tkMessageBox1_i ' + str(tkMessageBox1_i + 1)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# 对话框类
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# 字符串处理类
class Str_ChuLi:
def FenDuan(self, str1):
self.str = str(str1)
L = len(self.str)
for i in range(1, L, 1):
if self.str[i] == ' ':
self.falg_FenDuan = True
self.a = self.str[0:i]
self.ab = self.str[i:L]
break
else:
self.falg_FenDuan = False
if self.falg_FenDuan == True:
L = len(self.ab)
for i in range(1, L + 1, 1):
if self.ab[i] != ' ':
self.b = self.ab[i:L]
break
print(self.a)
print(self.b)
return (self.a, self.b)
else:
return (self.str, '')
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# 颜色选择框类
class Choose_Color:
def Color_Choose(self):
col = tkinter.colorchooser.askcolor(color='green', title="Choose the Colour")
print(col)
return col
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# 获取文件名类
class Get_File_Name_GIF:
def Get_Name(self):
file_name = tkinter.filedialog.askopenfilename(filetypes=[("*.gif", "gif")])
return file_name
class Get_File_Name_XBM:
def Get_Name(self):
file_name = tkinter.filedialog.askopenfilename(filetypes=[("*.xbm", "xbm")])
return file_name
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# Bitmap 图像处理类
class BitMap:
def BitMap_ChuLi(self, BuJian_NO_i, BuJian_LeiXing_XiaoXie, BuJian_Lei, BuJian):
global combt_bitmap
name_bitmap = str(BuJian_LeiXing_XiaoXie) + '_bitmap' + str(BuJian_NO_i)
list = (
'error', 'gray75', 'gray50', 'gray25', 'gray12', 'hourglass', 'info', 'questhead', 'question', 'warning')
Zhi = combt_bitmap
flag_bitmap_list = FALSE
for i in list:
if i == Zhi:
flag_bitmap_list = TRUE
BuJian.config(bitmap=Zhi)
if (flag_bitmap_list == FALSE) and (Zhi != ''):
bitmap_photo = tkinter.BitmapImage(file=Zhi)
BuJian.config(bitmap=bitmap_photo)
BuJian_Lei[name_bitmap] = "" + Zhi + ""
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# Image 图像处理类
class Image_ChuLi:
def Image_ChuLi(self, BuJian_NO_i, BuJian_LeiXing_XiaoXie, BuJian_Lei, BuJian):
global ent_image
name_image = str(BuJian_LeiXing_XiaoXie) + '_image' + str(BuJian_NO_i)
Zhi = ent_image
if Zhi != '':
BuJian_Lei[name_image] = Zhi
image_photo = PhotoImage(file=Zhi)
BuJian.config(image=image_photo)
elif Zhi == '':
BuJian.config(image='')
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# 设计更新类
class Design_New:
def BuJian_New(self, BuJian_LeiXing_DaXie, BuJian_NO_i, BuJian_LeiXing_XiaoXie, BuJian_Lei):
global lab_ControlType
global ent_ControlName
global ent_X0
global ent_Y0
global ent_width
global ent_height
global ent_length
global ent_fontSize
global combt_fontType
global combt_foreground
global combt_background
global combt_anchor
global combt_justify
global ent_text
global combt_state
global combt_relief
global combt_highlightcolor
global combt_highlightbackground
global combt_bitmap
global ent_image
global combt_padx
global combt_pady
global combt_takefocus
global combt_cursor
global ent_container
global ent_command
# if a[1] == 'Button':
BuJian_NO_i = BuJian_NO_i
lab_ControlType = BuJian_LeiXing_DaXie
KJ = str(BuJian_LeiXing_DaXie) + str(BuJian_NO_i)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
global Distance
global bar_W
global bar_menu_W
if zi_menu1_sum == 0:
Distance = bar_W
else:
Distance = bar_W + bar_menu_W
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# 字典更新及设计窗口更新
# name = "XuanZhong" + str(XuanZhong_sum)
# XuanZhong[name] = (Button1[KJ], 'Button', 'button', Num_i, Button1)
judge = Judge_Property()
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# 通用属性 container, name, cusor, width, background, coords
# container
name_container = str(BuJian_LeiXing_XiaoXie) + '_container' + str(BuJian_NO_i)
Zhi = ent_container
BuJian_Lei[name_container] = Zhi
# name
KJ_name = str(BuJian_LeiXing_XiaoXie) + '_name' + str(BuJian_NO_i)
BuJian_Lei[KJ_name] = ent_ControlName
judge_Property = Judge_Property()
if (judge_Property.Is_In_text(BuJian_LeiXing_DaXie) == TRUE) and ent_text == '':
BuJian_Lei[KJ].config(text=ent_ControlName)
# coords and width
name_coords = str(BuJian_LeiXing_XiaoXie) + '_coords' + str(BuJian_NO_i)
Zhi = (ent_X0, ent_Y0, ent_width, ent_height, BuJian_NO_i)
BuJian_Lei[name_coords] = Zhi
BuJian_Lei[KJ].place(x=ent_X0, y=ent_Y0 + Distance)
BuJian_Lei[KJ].config(width=int(ent_width))
# cursor
name_cursor = str(BuJian_LeiXing_XiaoXie) + '_cursor' + str(BuJian_NO_i)
Zhi = combt_cursor
BuJian_Lei[name_cursor] = Zhi
BuJian_Lei[KJ].config(cursor=Zhi)
# background
name_background = str(BuJian_LeiXing_XiaoXie) + '_background' + str(BuJian_NO_i)
Zhi = combt_background
BuJian_Lei[name_background] = Zhi
BuJian_Lei[KJ].config(background=Zhi)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# 部分属性
# height
if judge.Is_In_height(BuJian_LeiXing_DaXie) == TRUE:
name_height = str(BuJian_LeiXing_XiaoXie) + '_height' + str(BuJian_NO_i)
Zhi = int(ent_height)
BuJian_Lei[name_height] = Zhi
BuJian_Lei[KJ].config(height=Zhi)
# length
if judge.Is_In_length(BuJian_LeiXing_DaXie) == TRUE:
name_length = str(BuJian_LeiXing_XiaoXie) + '_length' + str(BuJian_NO_i)
Zhi = int(ent_length)
BuJian_Lei[name_length] = Zhi
BuJian_Lei[KJ].config(length=Zhi)
# font
if judge.Is_In_font(BuJian_LeiXing_DaXie) == TRUE:
name_font = str(BuJian_LeiXing_XiaoXie) + '_font' + str(BuJian_NO_i)
Zhi = (str(combt_fontType), ent_fontSize)
BuJian_Lei[name_font] = Zhi
BuJian_Lei[KJ].config(font=Zhi)
# foreground
if judge.Is_In_foreground(BuJian_LeiXing_DaXie) == TRUE:
name_foreground = str(BuJian_LeiXing_XiaoXie) + '_foreground' + str(BuJian_NO_i)
Zhi = combt_foreground
BuJian_Lei[name_foreground] = Zhi
BuJian_Lei[KJ].config(foreground=Zhi)
# anchor
if judge.Is_In_anchor(BuJian_LeiXing_DaXie) == TRUE:
name_anchor = str(BuJian_LeiXing_XiaoXie) + '_anchor' + str(BuJian_NO_i)
Zhi = combt_anchor
BuJian_Lei[name_anchor] = Zhi
BuJian_Lei[KJ].config(anchor=Zhi)
# justify
if judge.Is_In_justify(BuJian_LeiXing_DaXie) == TRUE:
name_justify = str(BuJian_LeiXing_XiaoXie) + '_justify' + str(BuJian_NO_i)
Zhi = combt_justify
BuJian_Lei[name_justify] = Zhi
BuJian_Lei[KJ].config(justify=Zhi)
# state
if judge.Is_In_state(BuJian_LeiXing_DaXie) == TRUE:
name_state = str(BuJian_LeiXing_XiaoXie) + '_state' + str(BuJian_NO_i)
Zhi = combt_state
BuJian_Lei[name_state] = Zhi
BuJian_Lei[KJ].config(state=Zhi)
# relief
if judge.Is_In_relief(BuJian_LeiXing_DaXie) == TRUE:
name_relief = str(BuJian_LeiXing_XiaoXie) + '_relief' + str(BuJian_NO_i)
Zhi = combt_relief
BuJian_Lei[name_relief] = Zhi
BuJian_Lei[KJ].config(relief=Zhi)
# highlightcolor and highlightbackground
if judge.Is_In_highlightcolor_or_highlightbackground(BuJian_LeiXing_DaXie) == TRUE:
name_highlightcolor = str(BuJian_LeiXing_XiaoXie) + '_highlightcolor' + str(BuJian_NO_i)
Zhi = combt_highlightcolor
BuJian_Lei[name_highlightcolor] = Zhi
BuJian_Lei[KJ].config(highlightcolor=Zhi)
name_highlightbackground = str(BuJian_LeiXing_XiaoXie) + '_highlightbackground' + str(BuJian_NO_i)
Zhi = combt_highlightbackground
BuJian_Lei[name_highlightbackground] = Zhi
BuJian_Lei[KJ].config(highlightbackground=Zhi)
# bitmap
if judge.Is_In_bitmap(BuJian_LeiXing_DaXie) == TRUE:
a = BitMap()
a.BitMap_ChuLi(BuJian_NO_i, BuJian_LeiXing_XiaoXie, BuJian_Lei, BuJian_Lei[KJ])
# BitMap_ChuLi(BuJian_NO_i, BuJian_LeiXing_XiaoXie, BuJian_Lei, BuJian):
# image
if judge.Is_In_image(BuJian_LeiXing_DaXie) == TRUE:
a = Image_ChuLi()
a.Image_ChuLi(BuJian_NO_i, BuJian_LeiXing_XiaoXie, BuJian_Lei, BuJian_Lei[KJ])
# Image_ChuLi(self, BuJian_NO_i, BuJian_LeiXing_XiaoXie, BuJian_Lei, BuJian):
# padx and pady
if judge.Is_In_padx_or_pady(BuJian_LeiXing_DaXie) == TRUE:
name_padx = str(BuJian_LeiXing_XiaoXie) + '_padx' + str(BuJian_NO_i)
Zhi = combt_padx
BuJian_Lei[name_padx] = Zhi
BuJian_Lei[KJ].config(padx=Zhi)
name_pady = str(BuJian_LeiXing_XiaoXie) + '_pady' + str(BuJian_NO_i)
Zhi = combt_pady
BuJian_Lei[name_pady] = Zhi
BuJian_Lei[KJ].config(pady=Zhi)
# text
if (judge.Is_In_takefocus(BuJian_LeiXing_DaXie)) == TRUE and (ent_text != ''):
name_text = str(BuJian_LeiXing_XiaoXie) + '_text' + str(BuJian_NO_i)
Zhi = ent_text
BuJian_Lei[name_text] = Zhi
BuJian_Lei[KJ].config(text=Zhi)
# takefocus
if judge.Is_In_takefocus(BuJian_LeiXing_DaXie) == TRUE:
name_takefocus = str(BuJian_LeiXing_XiaoXie) + '_takefocus' + str(BuJian_NO_i)
Zhi = combt_takefocus
BuJian_Lei[name_takefocus] = Zhi
BuJian_Lei[KJ].config(takefocus=Zhi)
# command
if judge.Is_In_command(BuJian_LeiXing_DaXie) == TRUE:
name_command = str(BuJian_LeiXing_XiaoXie) + '_command' + str(BuJian_NO_i)
Zhi = ent_command
BuJian_Lei[name_command] = Zhi
BuJian_Lei[KJ].config(command=Zhi)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# 判断属性类
# 每个控件都有的属性 container, cusor, width, background
class Judge_Property:
def Is_In_anchor(self, BuJian_LeiXing_DaXie):
list = ('Button', 'Checkbutton', 'Label', 'tk.Message', 'Radiobutton')
if BuJian_LeiXing_DaXie in list:
return TRUE
else:
return FALSE
def Is_In_font(self, BuJian_LeiXing_DaXie):
list = ('Button', 'Checkbutton', 'Combobox', 'Entry', 'Label', 'LabelFrame', 'Listbox', 'tk.Message', 'Radiobutton',
'Scale_X', 'Scale_Y', 'Text', 'Spinbox')
if BuJian_LeiXing_DaXie in list:
return TRUE
else:
return FALSE
def Is_In_bitmap(self, BuJian_LeiXing_DaXie):
list = ('Button', 'Checkbutton', 'Label', 'Radiobutton')
if BuJian_LeiXing_DaXie in list:
return TRUE
else:
return FALSE
def Is_In_justify(self, BuJian_LeiXing_DaXie):
list = ('Button', 'Checkbutton', 'Combobox', 'Entry', 'Label', 'Listbox', 'tk.Message', 'Radiobutton', 'Spinbox')
if BuJian_LeiXing_DaXie in list:
return TRUE
else:
return FALSE
def Is_In_image(self, BuJian_LeiXing_DaXie):
list = ('Button', 'Checkbutton', 'Label', 'Radiobutton')
if BuJian_LeiXing_DaXie in list:
return TRUE
else:
return FALSE
def Is_In_height(self, BuJian_LeiXing_DaXie):
list = ('Button', 'Canvas', 'Checkbutton', 'Combobox', 'Frame', 'Label', 'LabelFrame', 'Listbox',
'PanedWindow', 'Radiobutton', 'Text')
if BuJian_LeiXing_DaXie in list:
return TRUE
else:
return FALSE
def Is_In_length(self, BuJian_LeiXing_DaXie):
list = ('Scale_X', 'Scale_Y')
if BuJian_LeiXing_DaXie in list:
return TRUE
else:
return FALSE
def Is_In_foreground(self, BuJian_LeiXing_DaXie):
list = ('Button', 'Checkbutton', 'Combobox', 'Entry', 'Label', 'LabelFrame', 'Listbox', 'tk.Message',
'Radiobutton', 'Scale_X', 'Scale_Y', 'Text', 'Spinbox')
if BuJian_LeiXing_DaXie in list:
return TRUE
else:
return FALSE
def Is_In_padx_or_pady(self, BuJian_LeiXing_DaXie):
list = ('Button', 'Checkbutton', 'Frame', 'Label', 'LabelFrame', 'tk.Message', 'Radiobutton', 'Text')
if BuJian_LeiXing_DaXie in list:
return TRUE
else:
return FALSE
def Is_In_relief(self, BuJian_LeiXing_DaXie):
list = ('Button', 'Canvas', 'Checkbutton', 'Entry', 'Frame', 'Label', 'LabelFrame', 'Listbox',
'tk.Message', 'PanedWindow', 'Radiobutton', 'Scale_X', 'Scale_Y', 'Text', 'Spinbox')
if BuJian_LeiXing_DaXie in list:
return TRUE
else:
return FALSE
def Is_In_text(self, BuJian_LeiXing_DaXie):
list = ('Button', 'Checkbutton', 'Entry', 'Label', 'LabelFrame', 'tk.Message', 'Radiobutton', 'Spinbox')
if BuJian_LeiXing_DaXie in list:
return TRUE
else:
return FALSE
def Is_In_state(self, BuJian_LeiXing_DaXie):
list = ('Button', 'Canvas', 'Checkbutton', 'Combobox', 'Entry', 'Label', 'Listbox',
'Radiobutton', 'Scale_X', 'Scale_Y', 'Text', 'Spinbox')
if BuJian_LeiXing_DaXie in list:
return TRUE
else:
return FALSE
def Is_In_takefocus(self, BuJian_LeiXing_DaXie):
list = ('Button', 'Canvas', 'Checkbutton', 'Combobox', 'Entry', 'Frame', 'Label', 'LabelFrame', 'Listbox',
'tk.Message', 'Radiobutton', 'Scale_X', 'Scale_Y', 'Text', 'Spinbox')
if BuJian_LeiXing_DaXie in list:
return TRUE
else:
return FALSE
def Is_In_highlightcolor_or_highlightbackground(self, BuJian_LeiXing_DaXie):
list = ('Button', 'Canvas', 'Checkbutton', 'Entry', 'Frame', 'Label', 'LabelFrame', 'Listbox', 'tk.Message',
'Radiobutton', 'Scale_X', 'Scale_Y', 'Text', 'Spinbox')
if BuJian_LeiXing_DaXie in list:
return TRUE
else:
return FALSE
def Is_In_command(self, BuJian_LeiXing_DaXie):
list = ('Button', 'Checkbutton', 'Radiobutton', 'Scale_X', 'Scale_Y', 'Spinbox')
if BuJian_LeiXing_DaXie in list:
return TRUE
else:
return FALSE
def Is_In_orient(self, BuJian_LeiXing_DaXie):
list = ('Scale_X', 'Scale_Y')
if BuJian_LeiXing_DaXie in list:
return TRUE
else:
return FALSE
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# BuJian_New(self, BuJian_LeiXing_DaXie, BuJian_NO_i, BuJian_LeiXing_XiaoXie, BuJian_Lei)
# 颜色恢复处理类
class Color_Handle:
# XuanZhong[name] = (Button1[KJ], 'Button', 'button', Num_i, Button1)
def Color_Restore(self, BuJian_Lei, BuJian_LeiXing_DaXie, BuJian_LeiXing_XiaoXie, Num_i):
num_i = Num_i
KJ = BuJian_LeiXing_DaXie + str(Num_i)
if self.Judge_foreground(BuJian_LeiXing_DaXie) == TRUE:
name_foreground = BuJian_LeiXing_XiaoXie + '_foreground' + str(num_i)
BuJian_Lei[KJ].config(foreground=BuJian_Lei[name_foreground])
if self.Judge_background(BuJian_LeiXing_DaXie) == TRUE:
name_background = BuJian_LeiXing_XiaoXie + '_background' + str(num_i)
BuJian_Lei[KJ].config(background=BuJian_Lei[name_background])
if self.Judge_state(BuJian_LeiXing_DaXie) == TRUE:
BuJian_Lei[KJ].configure(state='normal')
# 判断是否具有 foreground or background or state
def Judge_foreground(self, BuJian_LeiXing_DaXie):
foreground_list = ('Button', 'Checkbutton', 'Entry', 'Label', 'LabelFrame', 'Listbox', 'tk.Message',
'Radiobutton', 'Scale_X', 'Scale_Y', 'Spinbox', 'Text')
if BuJian_LeiXing_DaXie in foreground_list:
return TRUE
else:
return FALSE
def Judge_background(self, BuJian_LeiXing_DaXie):
background_list = ('Button', 'Canvas', 'Checkbutton', 'Entry', 'Frame', 'Label', 'LabelFrame', 'Listbox',
'tk.Message', 'PanedWindow', 'Radiobutton', 'Scale_X', 'Scale_Y', 'Spinbox', 'Text')
if BuJian_LeiXing_DaXie in background_list:
return TRUE
else:
return FALSE
def Judge_state(self, BuJian_LeiXing_DaXie): # 注意此处为颜色恢复
state_list = ('Combobox')
if BuJian_LeiXing_DaXie in state_list:
return TRUE
else:
return FALSE
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# 完成后,选定处理类
class XuanDing:
def BuJian_ChuLi(self, i, BuJian_LeiXing_DaXie, BuJian_LeiXing_XiaoXie, BuJian_Lei,
XuanKuang_X0, XuanKuang_Y0, XuanKuang_X1, XuanKuang_Y1):
global XuanZhong
global XuanZhong_sum
name = str(BuJian_LeiXing_XiaoXie) + '_coords' + str(i)
a = BuJian_Lei[name]
xx0 = a[0]
yy0 = a[1]
xx1 = a[2]
yy1 = a[3]
Num_i = a[4]
if ((XuanKuang_X0 <= xx0) and (XuanKuang_Y0 <= yy0) and (XuanKuang_X1 >= xx1) and (XuanKuang_Y1 >= yy1)) \
and (XuanKuang_X1 != XuanKuang_X0) and (XuanKuang_Y1 != XuanKuang_Y0):
KJ = str(BuJian_LeiXing_DaXie) + str(i)
color_handle = Color_Handle()
if color_handle.Judge_foreground(BuJian_LeiXing_DaXie) == TRUE:
BuJian_Lei[KJ].config(foreground=foreground_XiangMu_XuanDing)
if color_handle.Judge_background(BuJian_LeiXing_DaXie) == TRUE:
BuJian_Lei[KJ].config(background=background_XiangMu_XuanDing)
if color_handle.Judge_state(BuJian_LeiXing_DaXie) == TRUE:
BuJian_Lei[KJ].config(state='disabled')
XuanZhong_sum = XuanZhong_sum + 1
name = "XuanZhong" + str(XuanZhong_sum)
XuanZhong[name] = (BuJian_Lei[KJ], str(BuJian_LeiXing_DaXie), str(BuJian_LeiXing_XiaoXie), Num_i, BuJian_Lei)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# Design 部件类
class Design_BuJian:
def Design_bujian(self, XuanZhong_Object):
global lab_ControlType
global ent_ControlName
global ent_X0
global ent_Y0
global ent_width
global ent_height
global ent_length
global ent_fontSize
global combt_fontType
global combt_foreground
global combt_background
global combt_anchor
global combt_justify
global ent_text
global combt_state
global combt_relief
global combt_highlightcolor
global combt_highlightbackground
global combt_bitmap
global ent_image
global combt_padx
global combt_pady
global combt_takefocus
global combt_cursor
global ent_container
global ent_command
a = XuanZhong_Object
lab_ControlType = a[1]
BuJian_LeiXing_DaXie = a[1]
BuJian_LeiXing_XiaoXie = a[2]
BuJian_NO_i = a[3]
BuJian_Lei = a[4]
# 每个控件都有的属性 name, coords, container, cusor, width, background
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# 共有属性修改
KJ_name = str(BuJian_LeiXing_XiaoXie) + '_name' + str(BuJian_NO_i)
ent_ControlName = BuJian_Lei[KJ_name]
name_container = str(BuJian_LeiXing_XiaoXie) + '_container' + str(BuJian_NO_i)
ent_container = BuJian_Lei[name_container]
name_cursor = str(BuJian_LeiXing_XiaoXie) + '_cursor' + str(BuJian_NO_i)
combt_cursor = BuJian_Lei[name_cursor]
name_coords = str(BuJian_LeiXing_XiaoXie) + '_coords' + str(BuJian_NO_i)
a = BuJian_Lei[name_coords]
ent_X0 = a[0]
ent_Y0 = a[1]
name_width = str(BuJian_LeiXing_XiaoXie) + '_width' + str(BuJian_NO_i)
ent_width = BuJian_Lei[name_width]
name_background = str(BuJian_LeiXing_XiaoXie) + '_background' + str(BuJian_NO_i)
combt_background = BuJian_Lei[name_background]
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# 部分属性
judge = Judge_Property()
# height
if judge.Is_In_height(BuJian_LeiXing_DaXie) == TRUE:
name_height = str(BuJian_LeiXing_XiaoXie) + '_height' + str(BuJian_NO_i)
ent_height = BuJian_Lei[name_height]
# length
if judge.Is_In_length(BuJian_LeiXing_DaXie) == TRUE:
name_length = str(BuJian_LeiXing_XiaoXie) + '_length' + str(BuJian_NO_i)
ent_length = BuJian_Lei[name_length]
# font
if judge.Is_In_font(BuJian_LeiXing_DaXie) == TRUE:
name_font = str(BuJian_LeiXing_XiaoXie) + '_font' + str(BuJian_NO_i)
font = BuJian_Lei[name_font]
combt_fontType = font[0]
ent_fontSize = font[1]
# foreground
if judge.Is_In_foreground(BuJian_LeiXing_DaXie) == TRUE:
name_foreground = str(BuJian_LeiXing_XiaoXie) + '_foreground' + str(BuJian_NO_i)
combt_foreground = BuJian_Lei[name_foreground]
# anchor
if judge.Is_In_anchor(BuJian_LeiXing_DaXie) == TRUE:
name_anchor = str(BuJian_LeiXing_XiaoXie) + '_anchor' + str(BuJian_NO_i)
combt_anchor = BuJian_Lei[name_anchor]
# justify
if judge.Is_In_justify(BuJian_LeiXing_DaXie) == TRUE:
name_justify = str(BuJian_LeiXing_XiaoXie) + '_justify' + str(BuJian_NO_i)
combt_justify = BuJian_Lei[name_justify]
# state
if judge.Is_In_state(BuJian_LeiXing_DaXie) == TRUE:
name_state = str(BuJian_LeiXing_XiaoXie) + '_state' + str(BuJian_NO_i)
combt_state = BuJian_Lei[name_state]
# relief
if judge.Is_In_relief(BuJian_LeiXing_DaXie) == TRUE:
name_relief = str(BuJian_LeiXing_XiaoXie) + '_relief' + str(BuJian_NO_i)
combt_relief = BuJian_Lei[name_relief]
# highlightcolor and highlightbackground
if judge.Is_In_highlightcolor_or_highlightbackground(BuJian_LeiXing_DaXie) == TRUE:
name_highlightcolor = str(BuJian_LeiXing_XiaoXie) + '_highlightcolor' + str(BuJian_NO_i)
combt_highlightcolor = BuJian_Lei[name_highlightcolor]
name_highlightbackground = str(BuJian_LeiXing_XiaoXie) + '_highlightbackground' + str(BuJian_NO_i)
combt_highlightbackground = BuJian_Lei[name_highlightbackground]
# bitmap
if judge.Is_In_bitmap(BuJian_LeiXing_DaXie) == TRUE:
name_bitmap = str(BuJian_LeiXing_XiaoXie) + '_bitmap' + str(BuJian_NO_i)
combt_bitmap = BuJian_Lei[name_bitmap]
# image
if judge.Is_In_image(BuJian_LeiXing_DaXie) == TRUE:
name_image = str(BuJian_LeiXing_XiaoXie) + '_image' + str(BuJian_NO_i)
ent_image = BuJian_Lei[name_image]
# padx and pady
if judge.Is_In_padx_or_pady(BuJian_LeiXing_DaXie) == TRUE:
name_padx = str(BuJian_LeiXing_XiaoXie) + '_padx' + str(BuJian_NO_i)
combt_padx = BuJian_Lei[name_padx]
name_pady = str(BuJian_LeiXing_XiaoXie) + '_pady' + str(BuJian_NO_i)
combt_pady = BuJian_Lei[name_pady]
# takefocus
if judge.Is_In_takefocus(BuJian_LeiXing_DaXie) == TRUE:
name_takefocus = str(BuJian_LeiXing_XiaoXie) + '_takefocus' + str(BuJian_NO_i)
combt_takefocus = BuJian_Lei[name_takefocus]
# command
if judge.Is_In_command(BuJian_LeiXing_DaXie) == TRUE:
name_command = str(BuJian_LeiXing_XiaoXie) + '_command' + str(BuJian_NO_i)
ent_command = BuJian_Lei[name_command]
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# Delete 部件类
class Delete_BuJian:
def Delete(self, XuanZhong_Object):
# 记录各个部件类型删除的成员的 列表
global Button1_List_Num
global Canvas1_List_Num
global Checkbutton1_List_Num
global Combobox1_List_Num
global Entry1_List_Num
global Frame1_List_Num
global Label1_List_Num
global LabelFrame1_List_Num
global Listbox1_List_Num
global Menu1_List_Num
global Message1_List_Num
global PanedWindow1_List_Num
global Radiobutton1_List_Num
global Scale1_List_Num_X
global Scale1_List_Num_Y
global Spinbox1_List_Num
global Text1_List_Num
a = XuanZhong_Object
BuJian_LeiXing_DaXie = a[1]
BuJian_NO_i = a[3]
BuJian_Lei = a[4]
KJ = str(BuJian_LeiXing_DaXie) + str(BuJian_NO_i)
BuJian_Lei[KJ].destroy()
if BuJian_LeiXing_DaXie == 'Button':
Button1_List_Num.append(BuJian_NO_i)
elif BuJian_LeiXing_DaXie == 'Canvas':
Canvas1_List_Num.append(BuJian_NO_i)
elif BuJian_LeiXing_DaXie == 'Checkbutton':
Checkbutton1_List_Num.append(BuJian_NO_i)
elif BuJian_LeiXing_DaXie == 'Combobox':
Combobox1_List_Num.append(BuJian_NO_i)
elif BuJian_LeiXing_DaXie == 'Entry':
Entry1_List_Num.append(BuJian_NO_i)
elif BuJian_LeiXing_DaXie == 'Frame':
Frame1_List_Num.append(BuJian_NO_i)
elif BuJian_LeiXing_DaXie == 'Label':
Label1_List_Num.append(BuJian_NO_i)
elif BuJian_LeiXing_DaXie == 'LabelFrame':
LabelFrame1_List_Num.append(BuJian_NO_i)
elif BuJian_LeiXing_DaXie == 'Listbox':
Listbox1_List_Num.append(BuJian_NO_i)
elif BuJian_LeiXing_DaXie == 'Message':
Message1_List_Num.append(BuJian_NO_i)
elif BuJian_LeiXing_DaXie == 'PanedWindow':
PanedWindow1_List_Num.append(BuJian_NO_i)
elif BuJian_LeiXing_DaXie == 'PanedWindow':
PanedWindow1_List_Num.append(BuJian_NO_i)
elif BuJian_LeiXing_DaXie == 'Radiobutton':
Radiobutton1_List_Num.append(BuJian_NO_i)
elif BuJian_LeiXing_DaXie == 'Scale_X':
Scale1_List_Num_X.append(BuJian_NO_i)
elif BuJian_LeiXing_DaXie == 'Scale_Y':
Scale1_List_Num_Y.append(BuJian_NO_i)
elif BuJian_LeiXing_DaXie == 'PanedWindow':
PanedWindow1_List_Num.append(BuJian_NO_i)
elif BuJian_LeiXing_DaXie == 'Spinbox':
Spinbox1_List_Num.append(BuJian_NO_i)
elif BuJian_LeiXing_DaXie == 'Text':
Text1_List_Num.append(BuJian_NO_i)
# ('Button', 'Canvas', 'Checkbutton', 'Combobox', 'Entry', 'Frame', 'Label', 'LabelFrame', 'Listbox', 'Message', 'PanedWindow', 'Radiobutton', 'Scale_X', 'Scale_Y', 'Text', 'Spinbox')
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# 录入字典类
class Dictionary:
def Record_Dict(self, BuJian, BuJian_Lei, BuJian_NO_i, BuJian_LeiXing_DaXie, BuJian_LeiXing_XiaoXie,
self_X0, self_Y0, self_X1, self_Y1):
global DangQian_KJ_name
X0 = self_X0
Y0 = self_Y0
X1 = self_X1
Y1 = self_Y1
DangQian_KJ_name = str(BuJian_LeiXing_DaXie) + ' ' + str(BuJian_NO_i)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# 将控件录入字典
KJ = str(BuJian_LeiXing_DaXie) + str(BuJian_NO_i)
KJ_name = str(BuJian_LeiXing_XiaoXie) + '_name' + str(BuJian_NO_i)
BuJian_Lei[KJ] = BuJian
BuJian_Lei[KJ_name] = str(BuJian_LeiXing_DaXie) + str(BuJian_NO_i)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# 具体参数录入
# 通用属性
name_coords = str(BuJian_LeiXing_XiaoXie) + '_coords' + str(BuJian_NO_i)
Zhi = (X0, Y0, X1, Y1, BuJian_NO_i)
BuJian_Lei[name_coords] = Zhi
name_container = str(BuJian_LeiXing_XiaoXie) + '_container' + str(BuJian_NO_i)
Zhi = 'root'
BuJian_Lei[name_container] = Zhi
name_cursor = str(BuJian_LeiXing_XiaoXie) + '_cursor' + str(BuJian_NO_i)
Zhi = BuJian.cget('cursor')
BuJian_Lei[name_cursor] = Zhi
name_width = str(BuJian_LeiXing_XiaoXie) + '_width' + str(BuJian_NO_i)
Zhi = BuJian.cget('width')
BuJian_Lei[name_width] = Zhi
name_background = str(BuJian_LeiXing_XiaoXie) + '_background' + str(BuJian_NO_i)
Zhi = BuJian.cget('background')
BuJian_Lei[name_background] = Zhi
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# 部分属性
judge = Judge_Property()
# length
if judge.Is_In_length(BuJian_LeiXing_DaXie) == TRUE:
name_length = str(BuJian_LeiXing_XiaoXie) + '_length' + str(BuJian_NO_i)
Zhi = BuJian.cget('length')
BuJian_Lei[name_length] = Zhi
# height
if judge.Is_In_height(BuJian_LeiXing_DaXie) == TRUE:
name_height = str(BuJian_LeiXing_XiaoXie) + '_height' + str(BuJian_NO_i)
Zhi = BuJian.cget('height')
BuJian_Lei[name_height] = Zhi
# font
if judge.Is_In_font(BuJian_LeiXing_DaXie) == TRUE:
name_font = str(BuJian_LeiXing_XiaoXie) + '_font' + str(BuJian_NO_i)
str1 = BuJian.cget('font')
a = Str_ChuLi()
b = a.FenDuan(str1)
BuJian_Lei[name_font] = b
# foreground
if judge.Is_In_foreground(BuJian_LeiXing_DaXie) == TRUE:
name_foreground = str(BuJian_LeiXing_XiaoXie) + '_foreground' + str(BuJian_NO_i)
Zhi = BuJian.cget('foreground')
BuJian_Lei[name_foreground] = Zhi
# anchor
if judge.Is_In_anchor(BuJian_LeiXing_DaXie) == TRUE:
name_anchor = str(BuJian_LeiXing_XiaoXie) + '_anchor' + str(BuJian_NO_i)
Zhi = BuJian.cget('anchor')
BuJian_Lei[name_anchor] = Zhi
# justify
if judge.Is_In_justify(BuJian_LeiXing_DaXie) == TRUE:
name_justify = str(BuJian_LeiXing_XiaoXie) + '_justify' + str(BuJian_NO_i)
Zhi = BuJian.cget('justify')
BuJian_Lei[name_justify] = Zhi
# state
if judge.Is_In_state(BuJian_LeiXing_DaXie) == TRUE:
name_state = str(BuJian_LeiXing_XiaoXie) + '_state' + str(BuJian_NO_i)
Zhi = BuJian.cget('state')
BuJian_Lei[name_state] = Zhi
# relief
if judge.Is_In_relief(BuJian_LeiXing_DaXie) == TRUE:
name_relief = str(BuJian_LeiXing_XiaoXie) + '_relief' + str(BuJian_NO_i)
Zhi = BuJian.cget('relief')
BuJian_Lei[name_relief] = Zhi
# highlightcolor and highlightbackground
if judge.Is_In_highlightcolor_or_highlightbackground(BuJian_LeiXing_DaXie) == TRUE:
name_highlightcolor = str(BuJian_LeiXing_XiaoXie) + '_highlightcolor' + str(BuJian_NO_i)
Zhi = BuJian.cget('highlightcolor')
BuJian_Lei[name_highlightcolor] = Zhi
name_highlightbackground = str(BuJian_LeiXing_XiaoXie) + '_highlightbackground' + str(BuJian_NO_i)
Zhi = BuJian.cget('highlightbackground')
BuJian_Lei[name_highlightbackground] = Zhi
# bitmap
if judge.Is_In_bitmap(BuJian_LeiXing_DaXie) == TRUE:
name_bitmap = str(BuJian_LeiXing_XiaoXie) + '_bitmap' + str(BuJian_NO_i)
Zhi = BuJian.cget('bitmap')
BuJian_Lei[name_bitmap] = Zhi
# image
if judge.Is_In_image(BuJian_LeiXing_DaXie) == TRUE:
name_image = str(BuJian_LeiXing_XiaoXie) + '_image' + str(BuJian_NO_i)
Zhi = BuJian.cget('image')
BuJian_Lei[name_image] = Zhi
# padx and pady
if judge.Is_In_padx_or_pady(BuJian_LeiXing_DaXie) == TRUE:
name_padx = str(BuJian_LeiXing_XiaoXie) + '_padx' + str(BuJian_NO_i)
Zhi = BuJian.cget('padx')
BuJian_Lei[name_padx] = Zhi
name_pady = str(BuJian_LeiXing_XiaoXie) + '_pady' + str(BuJian_NO_i)
Zhi = BuJian.cget('pady')
BuJian_Lei[name_pady] = Zhi
# text
if judge.Is_In_text(BuJian_LeiXing_DaXie) == TRUE:
name_text = str(BuJian_LeiXing_XiaoXie) + '_text' + str(BuJian_NO_i)
Zhi = BuJian.cget('text')
BuJian_Lei[name_text] = Zhi
# 组件名显示
BuJian.config(text=DangQian_KJ_name)
# takefocus
if judge.Is_In_takefocus(BuJian_LeiXing_DaXie) == TRUE:
name_takefocus = str(BuJian_LeiXing_XiaoXie) + '_takefocus' + str(BuJian_NO_i)
Zhi = BuJian.cget('takefocus')
BuJian_Lei[name_takefocus] = Zhi
# command
if judge.Is_In_command(BuJian_LeiXing_DaXie) == TRUE:
name_command = str(BuJian_LeiXing_XiaoXie) + '_command' + str(BuJian_NO_i)
Zhi = BuJian.cget('command')
BuJian_Lei[name_command] = Zhi
# orient
if judge.Is_In_orient(BuJian_LeiXing_DaXie) == TRUE:
name_orient = str(BuJian_LeiXing_XiaoXie) + '_orient' + str(BuJian_NO_i)
Zhi = BuJian.cget('orient')
BuJian_Lei[name_orient] = Zhi
self.Record_Code(BuJian, BuJian_Lei, BuJian_LeiXing_DaXie, BuJian_LeiXing_XiaoXie, BuJian_NO_i)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
def Record_Code(self, BuJian, BuJian_Lei, BuJian_LeiXing_DaXie, BuJian_LeiXing_XiaoXie, BuJian_NO_i):
# 录入代码
judge = Judge_Property()
KJ = str(BuJian_LeiXing_DaXie) + str(BuJian_NO_i)
KJ_name = str(BuJian_LeiXing_XiaoXie) + '_name' + str(BuJian_NO_i)
BuJian_Lei[KJ] = BuJian
# global ent_ControlName
# if ent_ControlName != '':
# BuJian_Lei[KJ_name] = ent_ControlName
# else:
# BuJian_Lei[KJ_name] = str(BuJian_LeiXing_DaXie) + str(BuJian_NO_i)
# 通用属性
# name_container = str(BuJian_LeiXing_XiaoXie) + '_container' + str(BuJian_NO_i)
name_coords = str(BuJian_LeiXing_XiaoXie) + '_coords' + str(BuJian_NO_i)
if BuJian.cget('cursor') != '':
cursor_str = str(BuJian.cget('cursor'))
cursor_str_head = "cursor='"
cursor_str_tail = "', "
else:
cursor_str = ""
cursor_str_head = ""
cursor_str_tail = ""
if BuJian.cget('background') != '':
background_str = str(BuJian.cget('background'))
background_str_head = "background='"
background_str_tail = "', "
else:
background_str = ""
background_str_head = ""
background_str_tail = ""
if BuJian.cget('width') != '':
width_str = str(BuJian.cget('width'))
width_str_head = "width="
width_str_tail = ", "
else:
width_str = ""
width_str_head = ""
width_str_tail = ""
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# 部分属性
# height
height_str = ""
height_str_head = ""
height_str_tail = ""
if judge.Is_In_height(BuJian_LeiXing_DaXie) == TRUE:
if BuJian.cget('height') != '':
height_str = str(BuJian.cget('height'))
height_str_head = "height="
height_str_tail = ", "
# length
length_str = ""
length_str_head = ""
length_str_tail = ""
if judge.Is_In_length(BuJian_LeiXing_DaXie) == TRUE:
if BuJian.cget('length') != '':
length_str = str(BuJian.cget('length'))
length_str_head = "length="
length_str_tail = ", "
# font
font_str = ""
font_str_head = ""
font_str_tail = ""
if judge.Is_In_font(BuJian_LeiXing_DaXie) == TRUE:
if BuJian.cget('font') != '':
str2 = BuJian.cget('font')
a = Str_ChuLi()
b = a.FenDuan(str2)
font_str = "('" + str(b[0]) + "', " + str(b[1]) + ")"
font_str_head = "font="
font_str_tail = ", "
# foreground
foreground_str = ""
foreground_str_head = ""
foreground_str_tail = ""
if judge.Is_In_foreground(BuJian_LeiXing_DaXie) == TRUE:
if BuJian.cget('foreground') != '':
foreground_str = str(BuJian.cget('foreground'))
foreground_str_head = "foreground='"
foreground_str_tail = "', "
# anchor
anchor_str = ""
anchor_str_head = ""
anchor_str_tail = ""
if judge.Is_In_anchor(BuJian_LeiXing_DaXie) == TRUE:
if BuJian.cget('anchor') != '':
anchor_str = str(BuJian.cget('anchor'))
anchor_str_head = "anchor='"
anchor_str_tail = "', "
# justify
justify_str = ""
justify_str_head = ""
justify_str_tail = ""
if judge.Is_In_justify(BuJian_LeiXing_DaXie) == TRUE:
if BuJian.cget('justify') != '':
justify_str = str(BuJian.cget('justify'))
justify_str_head = "justify='"
justify_str_tail = "', "
# state
state_str = ""
state_str_head = ""
state_str_tail = ""
if judge.Is_In_state(BuJian_LeiXing_DaXie) == TRUE:
if BuJian.cget('state') != '':
state_str = str(BuJian.cget('state'))
state_str_head = "state='"
state_str_tail = "', "
# relief
relief_str = ""
relief_str_head = ""
relief_str_tail = ""
if judge.Is_In_relief(BuJian_LeiXing_DaXie) == TRUE:
if BuJian.cget('relief') != '':
relief_str = str(BuJian.cget('relief'))
relief_str_head = "relief='"
relief_str_tail = "', "
# highlightcolor and highlightbackground
highlightcolor_str = ""
highlightbackground_str = ""
highlightcolor_str_head = ""
highlightcolor_str_tail = ""
highlightbackground_str_head = ""
highlightbackground_str_tail = ""
if judge.Is_In_highlightcolor_or_highlightbackground(BuJian_LeiXing_DaXie) == TRUE:
if BuJian.cget('highlightcolor') != '':
highlightcolor_str = str(BuJian.cget('highlightcolor'))
highlightcolor_str_head = "highlightcolor='"
highlightcolor_str_tail = "', "
if BuJian.cget('highlightbackground') != '':
highlightbackground_str = str(BuJian.cget('highlightbackground'))
highlightbackground_str_head = "highlightbackground='"
highlightbackground_str_tail = "', "
# bitmap
bitmap_photo_str = ""
bitmap_str_head = ""
bitmap_str_tail = ""
if judge.Is_In_bitmap(BuJian_LeiXing_DaXie) == TRUE:
if BuJian.cget('bitmap') != '':
bitmap_str = str(BuJian.cget('bitmap'))
bitmap_photo_str = "PhotoImage(file='" + bitmap_str + "'), "
bitmap_str_head = "bitmap="
bitmap_str_tail = ", "
# image
image_photo_str = ""
image_str_head = ""
image_str_tail = ""
if judge.Is_In_image(BuJian_LeiXing_DaXie) == TRUE:
if BuJian.cget('image') != '':
image_str = str(BuJian.cget('image'))
image_photo_str = "PhotoImage(file='" + image_str + "'), "
image_str_head = "image="
image_str_tail = ", "
# padx and pady
padx_str = ""
pady_str = ""
padx_str_head = ""
padx_str_tail = ""
pady_str_head = ""
pady_str_tail = ""
if judge.Is_In_padx_or_pady(BuJian_LeiXing_DaXie) == TRUE:
if BuJian.cget('padx') != '':
padx_str = str(BuJian.cget('padx'))
padx_str_head = "padx="
padx_str_tail = ", "
if BuJian.cget('pady') != '':
pady_str = str(BuJian.cget('pady'))
pady_str_head = "pady="
pady_str_tail = ", "
# text
text_str = ""
text_str_head = ""
text_str_tail = ""
if judge.Is_In_text(BuJian_LeiXing_DaXie) == TRUE:
if BuJian.cget('text') != '':
text_str = str(BuJian.cget('text'))
text_str_head = "text='"
text_str_tail = "', "
# takefocus
takefocus_str = ""
takefocus_str_head = ""
takefocus_str_tail = ""
if judge.Is_In_takefocus(BuJian_LeiXing_DaXie) == TRUE:
if BuJian.cget('takefocus') != '':
takefocus_str = str(BuJian.cget('takefocus'))
takefocus_str_head = "takefocus='"
takefocus_str_tail = "', "
# command
command_str = ""
command_str_head = ""
command_str_tail = ""
if judge.Is_In_command(BuJian_LeiXing_DaXie) == TRUE:
if BuJian.cget('command') != '':
command_str = str(BuJian.cget('command'))
command_str_head = "command="
command_str_tail = ""
# orient
orient_str = ""
orient_str_head = ""
orient_str_tail = ""
if judge.Is_In_orient(BuJian_LeiXing_DaXie) == TRUE:
if BuJian.cget('orient') != '':
orient_str = "'" + str(BuJian.cget('orient')) + "'"
orient_str_head = "orient="
orient_str_tail = ""
Control_Lei = ""
# 判断是否 Scale
if (BuJian_LeiXing_DaXie == "Scale_X") or (BuJian_LeiXing_DaXie == "Scale_Y"):
Control_Lei = "Scale"
else:
Control_Lei = str(BuJian_LeiXing_DaXie)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# 生成编辑代码
# + BuJian_Lei[name_container] + ", " \
Code1 = BuJian_Lei[KJ_name] + " = " + Control_Lei + "(" \
+ anchor_str_head + anchor_str + anchor_str_tail \
+ cursor_str_head + cursor_str + cursor_str_tail \
+ font_str_head + font_str + font_str_tail \
+ bitmap_str_head + bitmap_photo_str + bitmap_str_tail \
+ justify_str_head + justify_str + justify_str_tail \
+ image_str_head + image_photo_str + image_str_tail \
+ width_str_head + width_str + width_str_tail \
+ height_str_head + height_str + height_str_tail \
+ length_str_head + length_str + length_str_tail \
+ foreground_str_head + foreground_str + foreground_str_tail \
+ background_str_head + background_str + background_str_tail \
+ padx_str_head + padx_str + padx_str_tail \
+ pady_str_head + pady_str + pady_str_tail \
+ relief_str_head + relief_str + relief_str_tail \
+ text_str_head + text_str + text_str_tail \
+ state_str_head + state_str + state_str_tail \
+ takefocus_str_head + takefocus_str + takefocus_str_tail \
+ highlightcolor_str_head + highlightcolor_str + highlightcolor_str_tail \
+ highlightbackground_str_head + highlightbackground_str + highlightbackground_str_tail \
+ orient_str_head + orient_str + orient_str_tail \
+ command_str_head + command_str + command_str_tail + ")"
ZuJianZB = BuJian_Lei[name_coords]
Code2 = BuJian_Lei[KJ_name] + ".place(x=" + str(ZuJianZB[0]) + ", " + "y=" + str(ZuJianZB[1]) + ')'
# 代码录入字典**********************************
name_Code = str(BuJian_LeiXing_XiaoXie) + '_Code' + str(BuJian_NO_i)
BuJian_Lei[name_Code] = " " + " " + Code1 + "\n" + " " + " " + Code2 + '\n\n'
print(BuJian_Lei[name_Code])
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# 紧耦合模式
# 窗口设置类
class SetCK_D(Toplevel):
def __init__(self, Parent):
super().__init__()
self.title('Win Setup')
global canva_H
global canva_W
self.Parent = Parent # 显式地保留父窗口
self.Propertys("-topmost", -1)
self.focus_set()
w = 800
h = 500
S_width = self.winfo_screenwidth()
S_height = self.winfo_screenheight()
size = '%dx%d+%d+%d' % (w, h, (S_width - w) / 2, (S_height - h) / 2 - 30)
self.geometry(size)
self.resizable(width=False, height=False)
# 参数设置
self.Tv_ck_width = canva_W
self.Tv_ck_height = canva_H
self.Font = ('Consol', '12')
self.Set_UI()
def Set_UI(self):
self.JG_x = 210
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
self.Lab_ck_name = Label(self, text='Interface Name', font=self.Font)
self.Lab_ck_name.place(x=0, y=6)
self.Tv_ck_name = StringVar()
self.Ent_ck_name = Entry(self, textvariable=self.Tv_ck_name, font=self.Font, width=25)
self.Ent_ck_name.place(x=self.JG_x, y=6)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
self.Lab_ck_width = Label(self, text='Interface Width', font=self.Font)
self.Lab_ck_width.place(x=0, y=6 + 40)
self.Lab_ck_width = Label(self, text=self.Tv_ck_width, font=self.Font, width=25, bg='DeepSkyBlue')
self.Lab_ck_width.place(x=self.JG_x, y=6 + 40)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
self.Lab_ck_height = Label(self, text='Interface Height', font=self.Font)
self.Lab_ck_height.place(x=0, y=6 + 80)
self.Lab_ck_height = Label(self, text=self.Tv_ck_height, font=self.Font, width=25, bg='DeepSkyBlue')
self.Lab_ck_height.place(x=self.JG_x, y=6 + 80)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
self.Lab_ck_init_x = Label(self, text='Initial X coordinate', font=self.Font)
self.Lab_ck_init_x.place(x=0, y=120)
self.Tv_ck_init_x = StringVar()
self.Ent_ck_init_x = Entry(self, textvariable=self.Tv_ck_init_x, font=self.Font, width=25)
self.Ent_ck_init_x.place(x=self.JG_x, y=120)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
self.Lab_ck_init_y = Label(self, text='Initial Y coordinate', font=self.Font)
self.Lab_ck_init_y.place(x=0, y=160)
self.Tv_ck_init_y = StringVar()
self.Ent_ck_init_y = Entry(self, textvariable=self.Tv_ck_init_y, font=self.Font, width=25)
self.Ent_ck_init_y.place(x=self.JG_x, y=160)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
self.Lab_ck_is_width_not_change = Label(self, text='Is width not changeable', font=self.Font)
self.Lab_ck_is_width_not_change.place(x=0, y=200)
self.Tv_ck_is_width_not_change = IntVar()
self.Rad_ck_is_width_not_change1 = Radiobutton(self, text="Yes", variable=self.Tv_ck_is_width_not_change, value=1)
self.Rad_ck_is_width_not_change2 = Radiobutton(self, text="No", variable=self.Tv_ck_is_width_not_change, value=2)
self.Rad_ck_is_width_not_change1.place(x=self.JG_x + 30, y=200)
self.Rad_ck_is_width_not_change2.place(x=self.JG_x + 120, y=200)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
self.Lab_ck_is_height_not_change = Label(self, text='Is height not changeable', font=self.Font)
self.Lab_ck_is_height_not_change.place(x=0, y=240)
self.Tv_ck_is_height_not_change = IntVar()
self.Rad_ck_is_height_not_change1 = Radiobutton(self, text="Yes", variable=self.Tv_ck_is_height_not_change, value=1)
self.Rad_ck_is_height_not_change2 = Radiobutton(self, text="No", variable=self.Tv_ck_is_height_not_change, value=2)
self.Rad_ck_is_height_not_change1.place(x=self.JG_x + 30, y=240)
self.Rad_ck_is_height_not_change2.place(x=self.JG_x + 120, y=240)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
self.Lab_ck_is_minsize = Label(self, text='Is minsize interface', font=self.Font)
self.Lab_ck_is_minsize.place(x=0, y=280)
self.Lab_ck_is_minsize = Label(self, text='X', font=self.Font)
self.Lab_ck_is_minsize.place(x=160 + 70, y=320)
self.Tv_ck_is_minsize = IntVar()
self.Rad_ck_is_minsize1 = Radiobutton(self, text="Yes", variable=self.Tv_ck_is_minsize, value=1)
self.Rad_ck_is_minsize2 = Radiobutton(self, text="No", variable=self.Tv_ck_is_minsize, value=2)
self.Rad_ck_is_minsize1.place(x=self.JG_x + 30, y=280)
self.Rad_ck_is_minsize2.place(x=self.JG_x + 120, y=280)
self.Tv_ck_init_minsize_w = StringVar()
self.Ent_ck_init_minsize_w = Entry(self, textvariable=self.Tv_ck_init_minsize_w, font=self.Font, width=18)
self.Ent_ck_init_minsize_w.place(x=6 + 70, y=320)
self.Tv_ck_init_minsize_h = StringVar()
self.Ent_ck_init_minsize_h = Entry(self, textvariable=self.Tv_ck_init_minsize_h, font=self.Font, width=18)
self.Ent_ck_init_minsize_h.place(x=180 + 70, y=320)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
self.Lab_ck_is_maxsize = Label(self, text='Is maxsize interface', font=self.Font)
self.Lab_ck_is_maxsize.place(x=0, y=360)
self.Lab_ck_is_maxsize = Label(self, text='X', font=self.Font)
self.Lab_ck_is_maxsize.place(x=160 + 70, y=400)
self.Tv_ck_is_maxsize = IntVar()
self.Rad_ck_is_maxsize1 = Radiobutton(self, text="Yes", variable=self.Tv_ck_is_maxsize, value=1)
self.Rad_ck_is_maxsize2 = Radiobutton(self, text="No", variable=self.Tv_ck_is_maxsize, value=2)
self.Rad_ck_is_maxsize1.place(x=self.JG_x + 30, y=360)
self.Rad_ck_is_maxsize2.place(x=self.JG_x + 120, y=360)
self.Tv_ck_init_maxsize_w = StringVar()
self.Ent_ck_init_maxsize_w = Entry(self, textvariable=self.Tv_ck_init_maxsize_w, font=self.Font, width=18)
self.Ent_ck_init_maxsize_w.place(x=6 + 70, y=400)
self.Tv_ck_init_maxsize_h = StringVar()
self.Ent_ck_init_maxsize_h = Entry(self, textvariable=self.Tv_ck_init_maxsize_h, font=self.Font, width=18)
self.Ent_ck_init_maxsize_h.place(x=180 + 70, y=400)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
self.Lab_ck_is_toolwindow = Label(self, text='Is interface toolwindow', font=self.Font)
self.Lab_ck_is_toolwindow.place(x=0, y=440)
self.Tv_ck_is_toolwindow = IntVar()
self.Rad_ck_is_toolwindow1 = Radiobutton(self, text="Yes", variable=self.Tv_ck_is_toolwindow, value=1)
self.Rad_ck_is_toolwindow2 = Radiobutton(self, text="No", variable=self.Tv_ck_is_toolwindow, value=2)
self.Rad_ck_is_toolwindow1.place(x=self.JG_x + 30, y=440)
self.Rad_ck_is_toolwindow2.place(x=self.JG_x + 120, y=440)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
self.Lab_ck_is_topmost = Label(self, text='Is interface topmost', font=self.Font)
self.Lab_ck_is_topmost.place(x=self.JG_x + 230, y=6)
self.Tv_ck_is_topmost = IntVar()
self.Rad_ck_is_topmost1 = Radiobutton(self, text="Yes", variable=self.Tv_ck_is_topmost, value=1)
self.Rad_ck_is_topmost2 = Radiobutton(self, text="No", variable=self.Tv_ck_is_topmost, value=2)
self.Rad_ck_is_topmost1.place(x=self.JG_x + 430, y=6)
self.Rad_ck_is_topmost2.place(x=self.JG_x + 520, y=6)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
self.Lab_ck_is_zoomed = Label(self, text='Is initial zoomed', font=self.Font)
self.Lab_ck_is_zoomed.place(x=self.JG_x + 230, y=6 + 40)
self.Tv_ck_is_zoomed = IntVar()
self.Rad_ck_is_zoomed1 = Radiobutton(self, text="Yes", variable=self.Tv_ck_is_zoomed, value=1)
self.Rad_ck_is_zoomed2 = Radiobutton(self, text="No", variable=self.Tv_ck_is_zoomed, value=2)
self.Rad_ck_is_zoomed1.place(x=self.JG_x + 430, y=6 + 40)
self.Rad_ck_is_zoomed2.place(x=self.JG_x + 520, y=6 + 40)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# 窗口透明度
self.Lab_ck_is_transparency = Label(self, text='Interface transparency', font=self.Font)
self.Lab_ck_is_transparency.place(x=self.JG_x + 230, y=6 + 80)
self.Tv_ck_is_transparency = IntVar()
self.Rad_ck_is_transparency1 = Radiobutton(self, text="Yes", variable=self.Tv_ck_is_transparency, value=1)
self.Rad_ck_is_transparency2 = Radiobutton(self, text="No", variable=self.Tv_ck_is_transparency, value=2)
self.Rad_ck_is_transparency1.place(x=self.JG_x + 430, y=6 + 80)
self.Rad_ck_is_transparency2.place(x=self.JG_x + 520, y=6 + 80)
self.V_Scal_ck_is_transparency = DoubleVar()
self.Scal_ck_is_transparency = Scale(self, from_=0, to=1, orient=HORIZONTAL,
variable=self.V_Scal_ck_is_transparency,
length=330, width=10, resolution=0.01)
self.Scal_ck_is_transparency.place(x=self.JG_x + 230, y=6 + 110)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# 窗口图标
self.Lab_ck_set_icon = Label(self, text='Set interface icon', font=self.Font)
self.Lab_ck_set_icon.place(x=self.JG_x + 230, y=6 + 160)
self.Tv_ck_set_icon = StringVar()
self.Ent_ck_set_icon = Entry(self, textvariable=self.Tv_ck_set_icon, font=self.Font, width=36)
self.Ent_ck_set_icon.place(x=self.JG_x + 230, y=6 + 200)
self.Btn_ck_set_icon = Button(self, text='...', font=('Consol', '10'), width=6, height=1,
command=self.More_Icon)
self.Btn_ck_set_icon.place(x=self.JG_x + 530, y=6 + 196)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# 窗口网格宽度
self.Lab_ck_set_grid = Label(self, text='Set the grid width', font=self.Font)
self.Lab_ck_set_grid.place(x=self.JG_x + 230, y=6 + 240)
self.Tv_ck_set_grid = IntVar()
self.Comb_ck_set_grid = ttk.Combobox(self, width=23, textvariable=self.Tv_ck_set_grid)
self.Comb_ck_set_grid['values'] = (10, 20, 30, 40, 50, 60)
self.Comb_ck_set_grid.place(x=self.JG_x + 400, y=6 + 240)
self.Comb_ck_set_grid.current(1)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# 确定或取消键
self.Lab_OK = Label(self, text='____________________________________________', font=('Consol', '16'))
self.Lab_OK.place(x=self.JG_x + 230, y=6 + 280)
self.Btn_ck_OK = Button(self, text='OK', font=('Consol', '13'), width=6, height=1, command=self.CK_OK)
self.Btn_ck_OK.place(x=self.JG_x + 300, y=6 + 450)
self.Btn_ck_Cancel = Button(self, text='Cancel', font=('Consol', '13'), width=6, height=1,
command=self.CK_Cancel)
self.Btn_ck_Cancel.place(x=self.JG_x + 430, y=6 + 450)
def More_Icon(self):
w = 800
h = 500
S_width = self.winfo_screenwidth()
S_height = self.winfo_screenheight()
size = '%dx%d+%d+%d' % (w, h, (S_width - w) / 2 + 600, (S_height - h) / 2 - 30)
self.geometry(size)
get_more_icon = Get_File_Name_GIF()
icon = get_more_icon.Get_Name()
self.Tv_ck_set_icon.set(icon)
size = '%dx%d+%d+%d' % (w, h, (S_width - w) / 2, (S_height - h) / 2 - 30)
self.geometry(size)
def CK_OK(self):
global Str_BianYi
global ck_name
global ck_init_x
global ck_init_y
global ck_is_width_not_change
global ck_is_height_not_change
global ck_is_minsize
global ck_init_minsize_w
global ck_init_minsize_h
global ck_is_maxsize
global ck_init_maxsize_w
global ck_init_maxsize_h
global ck_is_toolwindow
global ck_is_topmost
global ck_is_zoomed
global ck_is_transparency
global ck_scal_transparency
global ck_set_icon
global ck_set_grid
global ck_is_son_win
global canva_W
global canva_H
global WangGe_KuanDu
global Str_BianYi_End
ck_name = self.Ent_ck_name.get()
ck_init_x = self.Tv_ck_init_x.get()
ck_init_y = self.Tv_ck_init_y.get()
ck_is_width_not_change = self.Tv_ck_is_width_not_change.get()
ck_is_height_not_change = self.Tv_ck_is_height_not_change.get()
ck_is_minsize = self.Tv_ck_is_minsize.get()
ck_init_minsize_w = self.Tv_ck_init_minsize_w.get()
ck_init_minsize_h = self.Tv_ck_init_minsize_h.get()
ck_is_maxsize = self.Tv_ck_is_maxsize.get()
ck_init_maxsize_w = self.Tv_ck_init_maxsize_w.get()
ck_init_maxsize_h = self.Tv_ck_init_maxsize_h.get()
ck_is_toolwindow = self.Tv_ck_is_toolwindow.get()
ck_is_topmost = self.Tv_ck_is_topmost.get()
ck_is_zoomed = self.Tv_ck_is_zoomed.get()
ck_is_transparency = self.Tv_ck_is_transparency.get()
ck_scal_transparency = self.V_Scal_ck_is_transparency.get()
ck_set_icon = self.Tv_ck_set_icon.get()
ck_set_grid = self.Tv_ck_set_grid.get()
global tap
line_next = "\n"
Str_Import = "# Use the PyDraw to Design UI"\
+ """
# © JY.Lin
from tkinter import *
from tkinter import ttk # (when you want to use ttk)
from tkinter.scrolledtext import ScrolledText # (when you want to use scrolledtext)
from tkinter.messagebox import * # (when you want to use messagebox)
import tkinter.colorchooser # (when you want to use colorchooser)
import tkinter.filedialog # (when you want to use filedialog)
import tkinter as tk # (when you want to use the short-call)
""" \
if self.Ent_ck_name.get() == '':
ck_name = "PyDraw"
# self.title('PyDraw')
Str_Main_CK = "class " + str(ck_name) + "(Tk):" + line_next \
+ tap + "def __init__(self): " + line_next \
+ tap + tap + "super().__init__() " + line_next\
+ tap + tap + "self.title(\"" + str(ck_name) + "\")" + line_next
if ck_init_x == '':
ck_init_x = 0
if ck_init_y == 0:
ck_init_y = 0
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
global Distance
global bar_W
global bar_menu_W
if zi_menu1_sum == 0:
Distance = bar_W
else:
Distance = bar_W + bar_menu_W
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
Str_Coords = tap + tap + "S_width = self.winfo_screenwidth()" + line_next \
+ tap + tap + "S_height = self.winfo_screenwidth()" + line_next \
+ tap + tap + "Size = '%dx%d+%d+%d' % (" + str(canva_W) + ", " + str(canva_H-Distance) + ", " + "(S_width - " + str(canva_W) + ") /2, "\
+ "(S_height - " + str(canva_H-Distance) + ") /2)" + line_next \
+ tap + tap + "self.geometry(Size)" + line_next
Str_width_height_change = ''
if ck_is_width_not_change == 1:
if ck_is_height_not_change == 1:
pass
elif ck_is_height_not_change == 2:
Str_width_height_change = tap + tap + "self.resizable(width=TRUE, height=False)" + line_next
elif ck_is_width_not_change == 2:
if ck_is_height_not_change == 1:
Str_width_height_change = tap + tap + "self.resizable(width=False, height=TRUE)" + line_next
elif ck_is_height_not_change == 2:
Str_width_height_change = tap + tap + "self.resizable(width=False, height=False)" + line_next
if ck_is_minsize == 1:
Str_Min_Size = tap + tap + "Min_W = " + str(ck_init_minsize_w) + line_next \
+ tap + tap + "Min_H = " + str(ck_init_minsize_h) + line_next \
+ tap + tap + "self.minsize(Min_W, Min_H)" + line_next
else:
Str_Min_Size = ""
if ck_is_maxsize == 1:
Str_Max_Size = tap + tap + "Max_W = " + str(ck_init_maxsize_w) + line_next\
+ tap + tap + "Max_H = " + str(ck_init_maxsize_h) + line_next\
+ tap + tap + "self.maxsize(Max_W, Max_H)" + line_next
else:
Str_Max_Size = ""
if ck_is_toolwindow == 1:
Str_is_toolwindow = tap + tap + "self.Propertys(\"-toolwindow\", 1)" + line_next
else:
Str_is_toolwindow = ''
if ck_is_topmost == 1:
Str_is_topmost = tap + tap + "self.Propertys(\"-topmost\", 1)" + line_next
else:
Str_is_topmost = ''
if ck_is_zoomed == 1:
Str_is_zoomed = tap + tap + "self.state(\"zoomed\")" + line_next
else:
Str_is_zoomed = ''
if ck_is_transparency == 1:
Str_is_transparency = tap + tap + "self.Propertys(\"-alpha\", " + str(ck_scal_transparency) + ")" + line_next
else:
Str_is_transparency = ''
if ck_set_icon == 1:
Str_set_icon = tap + tap + "self.iconbitmap('" + str(ck_set_icon) + "')" + line_next
else:
Str_set_icon = ''
WangGe_KuanDu = int(ck_set_grid)
Str_set_UI = tap + tap + "self.SetUI()" + line_next
Str_def_UI = tap + "def SetUI(self):" + line_next
# 编译汇总
Str_BianYi = Str_Import + line_next + Str_Main_CK + Str_Coords + Str_width_height_change \
+ Str_Min_Size + Str_Max_Size + Str_is_toolwindow + Str_is_topmost + Str_is_zoomed \
+ Str_is_transparency + Str_set_icon + Str_set_UI + line_next + Str_def_UI
Str_BianYi_End = line_next \
+ "if __name__ == '__main__':" + line_next \
+ tap + "PyPa = " + str(ck_name) + "()" + line_next \
+ tap + "PyPa.SetUI()" + line_next \
+ tap + "PyPa.mainloop()" + line_next
self.destroy()
def CK_Cancel(self):
self.destroy()
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# name = "XuanZhong" + str(XuanZhong_sum)
# XuanZhong[name] = (Button1[KJ], 'Button', 'button', Num_i, Button1)
class SJ_Dictionary:
def SJ_Dict(self, str_SJ):
# 事件字典
global SJ_button_press_1
global SJ_button_release_1
global SJ_button_press_right_1
global SJ_button_press_left_2
global SJ_button_press_right_2
global SJ_button_press_middle_1
global SJ_button_press_middle_2
global SJ_button_press_left_move
global SJ_cursor_enter
global SJ_cursor_leave
global SJ_get_key_focus
global SJ_lose_key_focus
global SJ_press_a_key
global SJ_press_enter_key
global SJ_when_control_change
global SJ_shift_mouseWheel
global SJ_press_combinatorial_key
global XuanZhong
global XuanZhong_sum
if XuanZhong_sum == 1:
# name = "XuanZhong" + str(XuanZhong_sum)
# XuanZhong[name] = (Button1[KJ], 'Button', 'button', Num_i, Button1)
# KJ_name = str(BuJian_LeiXing_XiaoXie) + '_name' + str(BuJian_NO_i)
# BuJian_Lei[KJ] = BuJian
# 控件.bind('<事件代码>', event_handler)
name = "XuanZhong" + str(1)
xuan = XuanZhong[name]
BuJian_LeiXing_XiaoXie = xuan[2]
BuJian_NO_i = xuan[3]
BuJian_Lei = xuan[4]
if str_SJ == "button_press_1":
SJ_code = "1"
KJ_name = str(BuJian_LeiXing_XiaoXie) + '_name' + str(BuJian_NO_i)
SJ_button_press_1[KJ_name] = (str(BuJian_Lei[KJ_name]), ".bind('<" + SJ_code + ">', event_handler)",
BuJian_LeiXing_XiaoXie, BuJian_NO_i)
# a = SJ_button_press_1[KJ_name]
# print(str(a[0]))
elif str_SJ == "button_release_1":
SJ_code = "ButtonRelease-1"
KJ_name = str(BuJian_LeiXing_XiaoXie) + '_name' + str(BuJian_NO_i)
SJ_button_release_1[KJ_name] = (str(BuJian_Lei[KJ_name]), ".bind('<" + SJ_code + ">', event_handler)",
BuJian_LeiXing_XiaoXie, BuJian_NO_i)
elif str_SJ == "button_press_right_1":
SJ_code = "3"
KJ_name = str(BuJian_LeiXing_XiaoXie) + '_name' + str(BuJian_NO_i)
SJ_button_press_right_1[KJ_name] = (str(BuJian_Lei[KJ_name]), ".bind('<" + SJ_code + ">', event_handler)",
BuJian_LeiXing_XiaoXie, BuJian_NO_i)
elif str_SJ == "button_press_left_2":
SJ_code = "Double-1"
KJ_name = str(BuJian_LeiXing_XiaoXie) + '_name' + str(BuJian_NO_i)
SJ_button_press_left_2[KJ_name] = (str(BuJian_Lei[KJ_name]), ".bind('<" + SJ_code + ">', event_handler)",
BuJian_LeiXing_XiaoXie, BuJian_NO_i)
elif str_SJ == "button_press_right_2":
SJ_code = "Double-3"
KJ_name = str(BuJian_LeiXing_XiaoXie) + '_name' + str(BuJian_NO_i)
SJ_button_press_right_2[KJ_name] = (str(BuJian_Lei[KJ_name]), ".bind('<" + SJ_code + ">', event_handler)",
BuJian_LeiXing_XiaoXie, BuJian_NO_i)
elif str_SJ == "button_press_middle_1":
SJ_code = "2"
KJ_name = str(BuJian_LeiXing_XiaoXie) + '_name' + str(BuJian_NO_i)
SJ_button_press_middle_1[KJ_name] = (str(BuJian_Lei[KJ_name]), ".bind('<" + SJ_code + ">', event_handler)",
BuJian_LeiXing_XiaoXie, BuJian_NO_i)
elif str_SJ == "button_press_middle_2":
SJ_code = " Double-2"
KJ_name = str(BuJian_LeiXing_XiaoXie) + '_name' + str(BuJian_NO_i)
SJ_button_press_middle_2[KJ_name] = (str(BuJian_Lei[KJ_name]), ".bind('<" + SJ_code + ">', event_handler)",
BuJian_LeiXing_XiaoXie, BuJian_NO_i)
elif str_SJ == "button_press_left_move":
SJ_code = " B1-Motion"
KJ_name = str(BuJian_LeiXing_XiaoXie) + '_name' + str(BuJian_NO_i)
SJ_button_press_left_move[KJ_name] = (str(BuJian_Lei[KJ_name]), ".bind('<" + SJ_code + ">', event_handler)",
BuJian_LeiXing_XiaoXie, BuJian_NO_i)
elif str_SJ == "cursor_enter":
SJ_code = "Enter"
KJ_name = str(BuJian_LeiXing_XiaoXie) + '_name' + str(BuJian_NO_i)
SJ_cursor_enter[KJ_name] = (str(BuJian_Lei[KJ_name]), ".bind('<" + SJ_code + ">', event_handler)",
BuJian_LeiXing_XiaoXie, BuJian_NO_i)
elif str_SJ == "cursor_leave":
SJ_code = "Leave"
KJ_name = str(BuJian_LeiXing_XiaoXie) + '_name' + str(BuJian_NO_i)
SJ_cursor_leave[KJ_name] = (str(BuJian_Lei[KJ_name]), ".bind('<" + SJ_code + ">', event_handler)",
BuJian_LeiXing_XiaoXie, BuJian_NO_i)
elif str_SJ == "get_key_focus":
SJ_code = "FocusIn"
KJ_name = str(BuJian_LeiXing_XiaoXie) + '_name' + str(BuJian_NO_i)
SJ_get_key_focus[KJ_name] = (str(BuJian_Lei[KJ_name]), ".bind('<" + SJ_code + ">', event_handler)",
BuJian_LeiXing_XiaoXie, BuJian_NO_i)
elif str_SJ == "lose_key_focus":
SJ_code = "FocusOut"
KJ_name = str(BuJian_LeiXing_XiaoXie) + '_name' + str(BuJian_NO_i)
SJ_lose_key_focus[KJ_name] = (str(BuJian_Lei[KJ_name]), ".bind('<" + SJ_code + ">', event_handler)",
BuJian_LeiXing_XiaoXie, BuJian_NO_i)
elif str_SJ == "press_a_key":
SJ_code = "Key"
KJ_name = str(BuJian_LeiXing_XiaoXie) + '_name' + str(BuJian_NO_i)
SJ_press_a_key[KJ_name] = (str(BuJian_Lei[KJ_name]), ".bind('<" + SJ_code + ">', event_handler)",
BuJian_LeiXing_XiaoXie, BuJian_NO_i)
elif str_SJ == "press_enter_key":
SJ_code = "Return"
KJ_name = str(BuJian_LeiXing_XiaoXie) + '_name' + str(BuJian_NO_i)
SJ_press_enter_key[KJ_name] = (str(BuJian_Lei[KJ_name]), ".bind('<" + SJ_code + ">', event_handler)",
BuJian_LeiXing_XiaoXie, BuJian_NO_i)
elif str_SJ == "when_control_change":
SJ_code = "Configure"
KJ_name = str(BuJian_LeiXing_XiaoXie) + '_name' + str(BuJian_NO_i)
SJ_when_control_change[KJ_name] = (str(BuJian_Lei[KJ_name]), ".bind('<" + SJ_code + ">', event_handler)",
BuJian_LeiXing_XiaoXie, BuJian_NO_i)
elif str_SJ == "control_mouseWheel":
SJ_code = "Control-MouseWheel"
KJ_name = str(BuJian_LeiXing_XiaoXie) + '_name' + str(BuJian_NO_i)
SJ_shift_mouseWheel[KJ_name] = (str(BuJian_Lei[KJ_name]), ".bind('<" + SJ_code + ">', event_handler)",
BuJian_LeiXing_XiaoXie, BuJian_NO_i)
elif str_SJ == "shift_mouseWheel":
SJ_code = "Shift-MouseWheel"
KJ_name = str(BuJian_LeiXing_XiaoXie) + '_name' + str(BuJian_NO_i)
SJ_shift_mouseWheel[KJ_name] = (str(BuJian_Lei[KJ_name]), ".bind('<" + SJ_code + ">', event_handler)",
BuJian_LeiXing_XiaoXie, BuJian_NO_i)
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# 判断类
class Judge:
def Judge_If_Delete(self, BuJian_LeiXing_XiaoXie, BuJian_NO_i):
global Button1_List_Num
global Canvas1_List_Num
global Checkbutton1_List_Num
global Combobox1_List_Num
global Entry1_List_Num
global Frame1_List_Num
global Label1_List_Num
global LabelFrame1_List_Num
global Listbox1_List_Num
global Menu1_List_Num
global Message1_List_Num
global PanedWindow1_List_Num
global Radiobutton1_List_Num
global Scale1_List_Num_X
global Scale1_List_Num_Y
global Spinbox1_List_Num
global Text1_List_Num
if BuJian_LeiXing_XiaoXie == "button":
if BuJian_NO_i in Button1_List_Num:
return TRUE
else:
return FALSE
if BuJian_LeiXing_XiaoXie == "canvas":
if BuJian_NO_i in Canvas1_List_Num:
return TRUE
else:
return FALSE
if BuJian_LeiXing_XiaoXie == "checkbutton":
if BuJian_NO_i in Checkbutton1_List_Num:
return TRUE
else:
return FALSE
if BuJian_LeiXing_XiaoXie == "combobox":
if BuJian_NO_i in Combobox1_List_Num:
return TRUE
else:
return FALSE
if BuJian_LeiXing_XiaoXie == "entry":
if BuJian_NO_i in Entry1_List_Num:
return TRUE
else:
return FALSE
if BuJian_LeiXing_XiaoXie == "frame":
if BuJian_NO_i in Frame1_List_Num:
return TRUE
else:
return FALSE
if BuJian_LeiXing_XiaoXie == "label":
if BuJian_NO_i in Label1_List_Num:
return TRUE
else:
return FALSE
if BuJian_LeiXing_XiaoXie == "labelFrame":
if BuJian_NO_i in LabelFrame1_List_Num:
return TRUE
else:
return FALSE
if BuJian_LeiXing_XiaoXie == "listbox":
if BuJian_NO_i in Listbox1_List_Num:
return TRUE
else:
return FALSE
if BuJian_LeiXing_XiaoXie == "menu":
if BuJian_NO_i in Menu1_List_Num:
return TRUE
else:
return FALSE
if BuJian_LeiXing_XiaoXie == "message":
if BuJian_NO_i in Message1_List_Num:
return TRUE
else:
return FALSE
if BuJian_LeiXing_XiaoXie == "panedWindow":
if BuJian_NO_i in PanedWindow1_List_Num:
return TRUE
else:
return FALSE
if BuJian_LeiXing_XiaoXie == "radiobutton":
if BuJian_NO_i in Radiobutton1_List_Num:
return TRUE
else:
return FALSE
if BuJian_LeiXing_XiaoXie == "scale_x":
if BuJian_NO_i in Scale1_List_Num_X:
return TRUE
else:
return FALSE
if BuJian_LeiXing_XiaoXie == "scale_y":
if BuJian_NO_i in Scale1_List_Num_Y:
return TRUE
else:
return FALSE
if BuJian_LeiXing_XiaoXie == "spinbox":
if BuJian_NO_i in Spinbox1_List_Num:
return TRUE
else:
return FALSE
if BuJian_LeiXing_XiaoXie == "text":
if BuJian_NO_i in Text1_List_Num:
return TRUE
else:
return FALSE
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# 事件处理类
class SJ_ChuLi:
def SJ_Bian_Yi(self, SJ_Dictionary, Text_1):
judge = Judge()
tap = " "
for i in SJ_Dictionary:
a = SJ_Dictionary[i]
sj_code = tap + tap + a[0] + a[1] + "\n"
BuJian_LeiXing_XiaoXie = a[2]
BuJian_NO_i = a[3]
if judge.Judge_If_Delete(BuJian_LeiXing_XiaoXie, BuJian_NO_i) == FALSE:
Text_1.insert(END, sj_code)
# KJ_name = str(BuJian_LeiXing_XiaoXie) + '_name' + str(BuJian_NO_i)
# BuJian_Lei[KJ_name]
def SJ_New(self, BuJian_LeiXing_XiaoXie, BuJian_NO_i, BuJian_Lei):
KJ_name = str(BuJian_LeiXing_XiaoXie) + '_name' + str(BuJian_NO_i)
SJ_Dictionary_Zong = (SJ_button_press_1,
SJ_button_release_1,
SJ_button_press_right_1,
SJ_button_press_left_2,
SJ_button_press_right_2,
SJ_button_press_middle_1,
SJ_button_press_middle_2,
SJ_button_press_left_move,
SJ_cursor_enter,
SJ_cursor_leave,
SJ_get_key_focus,
SJ_lose_key_focus,
SJ_press_a_key,
SJ_press_enter_key,
SJ_when_control_change,
SJ_press_space_key,
SJ_shift_mouseWheel,
SJ_press_combinatorial_key)
for SJ_Dictionary in SJ_Dictionary_Zong:
for i in SJ_Dictionary:
a = SJ_Dictionary[i]
SJ_LeiXing_XiaoXie = a[2]
SJ_NO_i = a[3]
if SJ_LeiXing_XiaoXie == BuJian_LeiXing_XiaoXie:
if SJ_NO_i == BuJian_NO_i:
SJ_Dictionary[i] = (BuJian_Lei[KJ_name], a[1], a[2], a[3])
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# menu string 生成类
class Menu_Str:
def Menu_Str(self):
global Menu1
global Menu1_ListCode
global zi_menu1_sum
global Str_Menu
global tap
Str_Menu = ""
str_bar = tap + tap + "Menubar = Menu(self)" + "\n"
# range(a, b, i) 从 a 开始到 b前为止,间隔为 i, 包括 a不包括 b
for i in range(1, zi_menu1_sum+1, 1):
zi_menu_tearoff_name = "zi_menu_tearoff_name" + str(i)
zi_menu_add_cascade_name = "zi_menu_add_cascade_name" + str(i)
tearoff = Menu1[zi_menu_tearoff_name]
add_cascade = Menu1[zi_menu_add_cascade_name]
Code_tearoff = tap + tap + tearoff[0] + "\n"
Code_add_cascade = tap + tap + add_cascade[0]
Str_list = ""
for mlist_j in Menu1_ListCode:
# Menu1_ListCode[menu_list_code_name] = (Code, zi_menu1_sum, zong+1)
menu_list = Menu1_ListCode[mlist_j]
if i == menu_list[1]:
Str_list = Str_list + tap + tap + menu_list[0] + "\n"
Str_son_menu = Code_tearoff + Str_list + Code_add_cascade
Str_Menu = Str_Menu + Str_son_menu + "\n"
Str_Conifg = "\n" + tap + tap + "self.config(menu=Menubar)" + "\n\n"
Str_Menu = str_bar + "\n" + Str_Menu + Str_Conifg
return Str_Menu
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
if __name__ == '__main__':
PypA = PyDraw()
PypA.HuaBu_YiDong()
PypA.mainloop()
| 289,749
| 38.784429
| 184
|
py
|
GSA
|
GSA-main/GSA_CVPR/utils.py
|
import torch
import torch.nn.functional as F
def cutmix_data(x, y, Basic_model,alpha=1.0, cutmix_prob=0.5,):
assert alpha > 0
# generate mixed sample
lam = np.random.beta(alpha, alpha)
batch_size = x.size()[0]
index = torch.randperm(batch_size)
if torch.cuda.is_available():
index = index.cuda()
y_a, y_b = y, y[index]
bbx1, bby1, bbx2, bby2 = rand_bbox(x.size(), lam,x,Basic_model)
#for ii in range(batch_size):x[ii,:,bbx1[ii]:bbx2[ii],bby1[ii]:bby2[ii]]=x[index][ii,:,bbx1[index][ii]:bbx2[index][ii],bby1[index][ii]:bby2[index][ii]]
x[:, :, bbx1:bbx2, bby1:bby2] = x[index, :, bbx1:bbx2, bby1:bby2]
# adjust lambda to exactly match pixel ratio
lam = 1 - ((bbx2 - bbx1) * (bby2 - bby1) / (x.size()[-1] * x.size()[-2]))
return x, y_a, y_b, lam
def rand_bbox(size, lam,x,Basic_model):
W = size[2]
H = size[3]
cut_rat = np.sqrt(1.0 - lam)
cut_w = np.int(W * cut_rat)
cut_h = np.int(H * cut_rat)
# uniform
feat = feat_normalized(Basic_model, x).reshape(-1,W,H)
import pdb
#pdb.set_trace()
# cx=torch.mean(feat,dim=2).max(dim=1)[1].cpu()
# cy=torch.mean(feat,dim=1).max(dim=1)[1].cpu()
cx = np.random.randint(W)
cy = np.random.randint(H)
bbx1 = np.clip(cx - cut_w // 2, 0, W)
bby1 = np.clip(cy - cut_h // 2, 0, H)
bbx2 = np.clip(cx + cut_w // 2, 0, W)
bby2 = np.clip(cy + cut_h // 2, 0, H)
return bbx1, bby1, bbx2, bby2
def flip_inner(x, flip1, flip2):
num = x.shape[0]
# print(num)
a = x # .permute(0,1,3,2)
a = a.view(num, 3, 2, 16, 32)
# imshow(torchvision.utils.make_grid(a))
a = a.permute(2, 0, 1, 3, 4)
s1 = a[0] # .permute(1,0, 2, 3)#, 4)
s2 = a[1] # .permute(1,0, 2, 3)
# print("a",a.shape,a[:63][0].shape)
if flip1:
s1 = torch.flip(s1, (3,)) # torch.rot90(s1, 2*rot1, (2, 3))
if flip2:
s2 = torch.flip(s2, (3,)) # torch.rot90(s2, 2*rot2, (2, 3))
s = torch.cat((s1.unsqueeze(2), s2.unsqueeze(2)), dim=2)
# imshow(torchvision.utils.make_grid(s[2]))
# print("s",s.shape)
# S = s.permute(0,1, 2, 3, 4) # .view(3,32,32)
# print("S",S.shape)
S = s.reshape(num, 3, 32, 32)
# S =S.permute(0,1,3,2)
# imshow(torchvision.utils.make_grid(S[2]))
# print("S", S.shape)
return S
def RandomFlip(x, num):
# print(x.shape)
#aug_x = simclr_aug(x)
# x=simclr_aug(x)
X = []
# print(x.shape)
# for i in range(4):
X.append(simclr_aug(x))
X.append(flip_inner(simclr_aug(x), 1, 1))
X.append(flip_inner(x, 0, 1))
X.append(flip_inner(x, 1, 0))
# else:
# x1=rot_inner(x,0,1)
return torch.cat([X[i] for i in range(num)], dim=0)
def rot_inner(x):
num = x.shape[0]
# print(num)
R = x.repeat(4, 1, 1, 1)
a = x.permute(0, 1, 3, 2)
a = a.view(num, 3, 2, 16, 32)
import pdb
# pdb.set_trace()
# imshow(torchvision.utils.make_grid(a))
a = a.permute(2, 0, 1, 3, 4)
s1 = a[0] # .permute(1,0, 2, 3)#, 4)
s2 = a[1] # .permute(1,0, 2, 3)
a = torch.rot90(a, 2, (3, 4))
s1_1 = a[0] # .permute(1,0, 2, 3)#, 4)
s2_2 = a[1] # .permute(1,0, 2, 3)
# S0 = torch.cat((s1.unsqueeze(2), s2.unsqueeze(2)), dim=2).reshape(num, 1, 28, 28).permute(0, 1, 3, 2)
R[3 * num:] = torch.cat((s1_1.unsqueeze(2), s2.unsqueeze(2)), dim=2).reshape(num, 3, 32, 32).permute(0, 1, 3, 2)
R[num:2 * num] = torch.cat((s1.unsqueeze(2), s2_2.unsqueeze(2)), dim=2).reshape(num, 3, 32, 32).permute(0, 1, 3, 2)
R[2 * num:3 * num] = torch.cat((s1_1.unsqueeze(2), s2_2.unsqueeze(2)), dim=2).reshape(num, 3, 32, 32).permute(0, 1,
3, 2)
return R
def square_diagonal_16(x):
num = x.shape[0]
# print(num)
R = x.repeat(16, 1, 1, 1)
uuu = x.unfold(2, 16, 16)
vvv = uuu.unfold(3, 16, 16)
vvv=vvv.reshape(-1,3,4,16,16)
index1 = [0, 1,2,3]
index2 = [0,1,3,2]
index3 = [0,2,3,1]
index4 = [0,2,1,3]# 2, 1, 3]
index5 = [0,3, 1, 2]
index6=[0,3,2,1]
index7=[1,0,2,3]
index8=[1,0,3,2]
index9 = [1, 2, 3, 0]
index10 = [1, 2, 0, 3]
index11 = [1, 3, 2, 0]
index12 = [1, 3, 0, 2]
index13 = [2, 0, 1, 3]
index14=[2,0,3,1]
index15=[2,1,0,3]
index_r = [1, 0]
vvv1 = vvv[:, :, index1].reshape(-1,3,2,2,16,16)
vvv2 = vvv[:, :, index2].reshape(-1,3,2,2,16,16)
vvv3 = vvv[:, :, index3].reshape(-1,3,2,2,16,16)
vvv4 = vvv[:, :, index4].reshape(-1, 3, 2, 2, 16, 16)
vvv5 = vvv[:, :, index5].reshape(-1, 3, 2, 2, 16, 16)
vvv6 = vvv[:, :, index6].reshape(-1, 3, 2, 2, 16, 16)
vvv7 = vvv[:, :, index7].reshape(-1, 3, 2, 2, 16, 16)
vvv8 = vvv[:, :, index8].reshape(-1, 3, 2, 2, 16, 16)
vvv9 = vvv[:, :, index9].reshape(-1, 3, 2, 2, 16, 16)
vvv10 = vvv[:, :, index10].reshape(-1, 3, 2, 2, 16, 16)
vvv11 = vvv[:, :, index11].reshape(-1, 3, 2, 2, 16, 16)
vvv12 = vvv[:, :, index12].reshape(-1, 3, 2, 2, 16, 16)
vvv13 = vvv[:, :, index13].reshape(-1, 3, 2, 2, 16, 16)
vvv14 = vvv[:, :, index14].reshape(-1, 3, 2, 2, 16, 16)
vvv15 = vvv[:, :, index15].reshape(-1, 3, 2, 2, 16, 16)
vvv1 = torch.cat((vvv1[:, :, 0].squeeze(2), vvv1[:, :, 1].squeeze(2)), dim=3) # vvv.reshape(-1,3,2,32,16)
vvv1 = torch.cat((vvv1[:, :, 0].squeeze(2), vvv1[:, :, 1].squeeze(2)), dim=3)
vvv2 = torch.cat((vvv2[:, :, 0].squeeze(2), vvv2[:, :, 1].squeeze(2)), dim=3) # vvv.reshape(-1,3,2,32,16)
vvv2 = torch.cat((vvv2[:, :, 0].squeeze(2), vvv2[:, :, 1].squeeze(2)), dim=3)
vvv3 = torch.cat((vvv3[:, :, 0].squeeze(2), vvv3[:, :, 1].squeeze(2)), dim=3) # vvv.reshape(-1,3,2,32,16)
vvv3 = torch.cat((vvv3[:, :, 0].squeeze(2), vvv3[:, :, 1].squeeze(2)), dim=3)
vvv4 = torch.cat((vvv4[:, :, 0].squeeze(2), vvv4[:, :, 1].squeeze(2)), dim=3) # vvv.reshape(-1,3,2,32,16)
vvv4 = torch.cat((vvv4[:, :, 0].squeeze(2), vvv4[:, :, 1].squeeze(2)), dim=3)
vvv5 = torch.cat((vvv5[:, :, 0].squeeze(2), vvv5[:, :, 1].squeeze(2)), dim=3) # vvv.reshape(-1,3,2,32,16)
vvv5 = torch.cat((vvv5[:, :, 0].squeeze(2), vvv5[:, :, 1].squeeze(2)), dim=3)
vvv6 = torch.cat((vvv6[:, :, 0].squeeze(2), vvv6[:, :, 1].squeeze(2)), dim=3) # vvv.reshape(-1,3,2,32,16)
vvv6 = torch.cat((vvv6[:, :, 0].squeeze(2), vvv6[:, :, 1].squeeze(2)), dim=3)
vvv7 = torch.cat((vvv7[:, :, 0].squeeze(2), vvv7[:, :, 1].squeeze(2)), dim=3) # vvv.reshape(-1,3,2,32,16)
vvv7 = torch.cat((vvv7[:, :, 0].squeeze(2), vvv7[:, :, 1].squeeze(2)), dim=3)
vvv8 = torch.cat((vvv8[:, :, 0].squeeze(2), vvv8[:, :, 1].squeeze(2)), dim=3) # vvv.reshape(-1,3,2,32,16)
vvv8 = torch.cat((vvv8[:, :, 0].squeeze(2), vvv8[:, :, 1].squeeze(2)), dim=3)
vvv9 = torch.cat((vvv9[:, :, 0].squeeze(2), vvv9[:, :, 1].squeeze(2)), dim=3) # vvv.reshape(-1,3,2,32,16)
vvv9 = torch.cat((vvv9[:, :, 0].squeeze(2), vvv9[:, :, 1].squeeze(2)), dim=3)
vvv10 = torch.cat((vvv10[:, :, 0].squeeze(2), vvv10[:, :, 1].squeeze(2)), dim=3) # vvv.reshape(-1,3,2,32,16)
vvv10 = torch.cat((vvv10[:, :, 0].squeeze(2), vvv10[:, :, 1].squeeze(2)), dim=3)
vvv11 = torch.cat((vvv11[:, :, 0].squeeze(2), vvv11[:, :, 1].squeeze(2)), dim=3) # vvv.reshape(-1,3,2,32,16)
vvv11 = torch.cat((vvv11[:, :, 0].squeeze(2), vvv11[:, :, 1].squeeze(2)), dim=3)
vvv12 = torch.cat((vvv12[:, :, 0].squeeze(2), vvv12[:, :, 1].squeeze(2)), dim=3) # vvv.reshape(-1,3,2,32,16)
vvv12 = torch.cat((vvv12[:, :, 0].squeeze(2), vvv12[:, :, 1].squeeze(2)), dim=3)
vvv13 = torch.cat((vvv13[:, :, 0].squeeze(2), vvv13[:, :, 1].squeeze(2)), dim=3) # vvv.reshape(-1,3,2,32,16)
vvv13 = torch.cat((vvv13[:, :, 0].squeeze(2), vvv13[:, :, 1].squeeze(2)), dim=3)
vvv14 = torch.cat((vvv14[:, :, 0].squeeze(2), vvv14[:, :, 1].squeeze(2)), dim=3) # vvv.reshape(-1,3,2,32,16)
vvv14 = torch.cat((vvv14[:, :, 0].squeeze(2), vvv14[:, :, 1].squeeze(2)), dim=3)
vvv15 = torch.cat((vvv15[:, :, 0].squeeze(2), vvv15[:, :, 1].squeeze(2)), dim=3) # vvv.reshape(-1,3,2,32,16)
vvv15 = torch.cat((vvv15[:, :, 0].squeeze(2), vvv15[:, :, 1].squeeze(2)), dim=3)
import pdb
'''
uvi = square_diagonal(x)
imshow(torchvision.utils.make_grid(uvi[0]))
imshow(torchvision.utils.make_grid(uvi[10]))
imshow(torchvision.utils.make_grid(uvi[20]))
imshow(torchvision.utils.make_grid(uvi[30]))
'''
# S0 = torch.cat((s1.unsqueeze(2), s2.unsqueeze(2)), dim=2).reshape(num, 1, 28, 28).permute(0, 1, 3, 2)
R[3 * num:4*num] = vvv3#torch.cat((s1_1.unsqueeze(2), s2.unsqueeze(2)), dim=2).reshape(num, 3, 32, 32).permute(0, 1, 3, 2)
R[num:2 * num] = vvv1#torch.cat((s1.unsqueeze(2), s2_2.unsqueeze(2)), dim=2).reshape(num, 3, 32, 32).permute(0, 1, 3, 2)
R[2 * num:3 * num] = vvv2#torch.cat((s1_1.unsqueeze(2), s2_2.unsqueeze(2)), dim=2).reshape(num, 3, 32, 32).permute(0, 1,
R[
4 * num:5 * num] = vvv4 # torch.cat((s1_1.unsqueeze(2), s2.unsqueeze(2)), dim=2).reshape(num, 3, 32, 32).permute(0, 1, 3, 2)
R[
5*num:6 * num] = vvv5 # torch.cat((s1.unsqueeze(2), s2_2.unsqueeze(2)), dim=2).reshape(num, 3, 32, 32).permute(0, 1, 3, 2)
R[6 * num:7 * num] = vvv6
R[
7 * num:8 * num] = vvv7 # torch.cat((s1_1.unsqueeze(2), s2.unsqueeze(2)), dim=2).reshape(num, 3, 32, 32).permute(0, 1, 3, 2)
R[
8 * num:9 * num] = vvv8 # torch.cat((s1.unsqueeze(2), s2_2.unsqueeze(2)), dim=2).reshape(num, 3, 32, 32).permute(0, 1, 3, 2)
R[9 * num:10 * num] = vvv9
R[
10 * num:11 * num] = vvv10 # torch.cat((s1_1.unsqueeze(2), s2.unsqueeze(2)), dim=2).reshape(num, 3, 32, 32).permute(0, 1, 3, 2)
R[
11 * num:12 * num] = vvv11 # torch.cat((s1.unsqueeze(2), s2_2.unsqueeze(2)), dim=2).reshape(num, 3, 32, 32).permute(0, 1, 3, 2)
R[12 * num:13 * num] = vvv12
R[
13 * num:14 * num] = vvv13 # torch.cat((s1_1.unsqueeze(2), s2.unsqueeze(2)), dim=2).reshape(num, 3, 32, 32).permute(0, 1, 3, 2)
R[
14 * num:15 * num] = vvv14 # torch.cat((s1.unsqueeze(2), s2_2.unsqueeze(2)), dim=2).reshape(num, 3, 32, 32).permute(0, 1, 3, 2)
R[15 * num:16 * num] = vvv15
#3, 2)
#312 78.7
#
return R
def square_diagonal(x):
num = x.shape[0]
# print(num)
R = x.repeat(4, 1, 1, 1)
#a = x.permute(0, 1, 3, 2)
#a = a.view(num, 3, 2, 16, 32)
uuu = x.unfold(2, 16, 16)
vvv = uuu.unfold(3, 16, 16)
vvv=vvv.reshape(-1,3,4,16,16)
index1 = [0, 2,1,3]
index2 = [3,1,2,0]
index3 = [3,2,1,0]
index_r = [1, 0]
vvv1 = vvv[:, :, index1].reshape(-1,3,2,2,16,16)
vvv2 = vvv[:, :, index2].reshape(-1,3,2,2,16,16)
vvv3 = vvv[:, :, index3].reshape(-1,3,2,2,16,16)
#vvv1 = vvv[:, :, index_r]
#vvv2 = vvv[:, :, :,index_r]
#vvv3 = vvv1[:, :, :, index_r]
# vvv2 = vvv3[:, :, index_r]
vvv1 = torch.cat((vvv1[:, :, 0].squeeze(2), vvv1[:, :, 1].squeeze(2)), dim=3) # vvv.reshape(-1,3,2,32,16)
vvv1 = torch.cat((vvv1[:, :, 0].squeeze(2), vvv1[:, :, 1].squeeze(2)), dim=3)
vvv2 = torch.cat((vvv2[:, :, 0].squeeze(2), vvv2[:, :, 1].squeeze(2)), dim=3) # vvv.reshape(-1,3,2,32,16)
vvv2 = torch.cat((vvv2[:, :, 0].squeeze(2), vvv2[:, :, 1].squeeze(2)), dim=3)
vvv3 = torch.cat((vvv3[:, :, 0].squeeze(2), vvv3[:, :, 1].squeeze(2)), dim=3) # vvv.reshape(-1,3,2,32,16)
vvv3 = torch.cat((vvv3[:, :, 0].squeeze(2), vvv3[:, :, 1].squeeze(2)), dim=3)
import pdb
'''
uvi = square_diagonal(x)
imshow(torchvision.utils.make_grid(uvi[0]))
imshow(torchvision.utils.make_grid(uvi[10]))
imshow(torchvision.utils.make_grid(uvi[20]))
imshow(torchvision.utils.make_grid(uvi[30]))
'''
# pdb.set_trace()
# imshow(torchvision.utils.make_grid(a))
# a = a.permute(2, 0, 1, 3, 4)
# s1 = a[0] # .permute(1,0, 2, 3)#, 4)
# s2 = a[1] # .permute(1,0, 2, 3)
#a = torch.rot90(a, 2, (3, 4))
#s1_1 = a[0] # .permute(1,0, 2, 3)#, 4)
#s2_2 = a[1] # .permute(1,0, 2, 3)
# S0 = torch.cat((s1.unsqueeze(2), s2.unsqueeze(2)), dim=2).reshape(num, 1, 28, 28).permute(0, 1, 3, 2)
R[3 * num:] = vvv3#torch.cat((s1_1.unsqueeze(2), s2.unsqueeze(2)), dim=2).reshape(num, 3, 32, 32).permute(0, 1, 3, 2)
R[num:2 * num] = vvv1#torch.cat((s1.unsqueeze(2), s2_2.unsqueeze(2)), dim=2).reshape(num, 3, 32, 32).permute(0, 1, 3, 2)
R[2 * num:3 * num] = vvv2#torch.cat((s1_1.unsqueeze(2), s2_2.unsqueeze(2)), dim=2).reshape(num, 3, 32, 32).permute(0, 1,
#3, 2)
#312 78.7
#
return R
def square_diagonal_repeat(x):
num = x.shape[0]
# print(num)
R = x.repeat(4, 1, 1, 1)
#a = x.permute(0, 1, 3, 2)
#a = a.view(num, 3, 2, 16, 32)
uuu = x.unfold(2, 16, 16)
vvv = uuu.unfold(3, 16, 16)
vvv=vvv.reshape(-1,3,4,16,16)
index1 = [0, 0,0,0]
index2 = [1,1,1,1]
index3 = [2,2,2,2]
index_r = [1, 0]
vvv1 = vvv[:, :, index1].reshape(-1,3,2,2,16,16)
vvv2 = vvv[:, :, index2].reshape(-1,3,2,2,16,16)
vvv3 = vvv[:, :, index3].reshape(-1,3,2,2,16,16)
#vvv1 = vvv[:, :, index_r]
#vvv2 = vvv[:, :, :,index_r]
#vvv3 = vvv1[:, :, :, index_r]
# vvv2 = vvv3[:, :, index_r]
vvv1 = torch.cat((vvv1[:, :, 0].squeeze(2), vvv1[:, :, 1].squeeze(2)), dim=3) # vvv.reshape(-1,3,2,32,16)
vvv1 = torch.cat((vvv1[:, :, 0].squeeze(2), vvv1[:, :, 1].squeeze(2)), dim=3)
vvv2 = torch.cat((vvv2[:, :, 0].squeeze(2), vvv2[:, :, 1].squeeze(2)), dim=3) # vvv.reshape(-1,3,2,32,16)
vvv2 = torch.cat((vvv2[:, :, 0].squeeze(2), vvv2[:, :, 1].squeeze(2)), dim=3)
vvv3 = torch.cat((vvv3[:, :, 0].squeeze(2), vvv3[:, :, 1].squeeze(2)), dim=3) # vvv.reshape(-1,3,2,32,16)
vvv3 = torch.cat((vvv3[:, :, 0].squeeze(2), vvv3[:, :, 1].squeeze(2)), dim=3)
import pdb
'''
uvi = square_diagonal(x)
imshow(torchvision.utils.make_grid(uvi[0]))
imshow(torchvision.utils.make_grid(uvi[10]))
imshow(torchvision.utils.make_grid(uvi[20]))
imshow(torchvision.utils.make_grid(uvi[30]))
'''
# pdb.set_trace()
# imshow(torchvision.utils.make_grid(a))
# a = a.permute(2, 0, 1, 3, 4)
# s1 = a[0] # .permute(1,0, 2, 3)#, 4)
# s2 = a[1] # .permute(1,0, 2, 3)
#a = torch.rot90(a, 2, (3, 4))
#s1_1 = a[0] # .permute(1,0, 2, 3)#, 4)
#s2_2 = a[1] # .permute(1,0, 2, 3)
# S0 = torch.cat((s1.unsqueeze(2), s2.unsqueeze(2)), dim=2).reshape(num, 1, 28, 28).permute(0, 1, 3, 2)
R[3 * num:] = vvv3#torch.cat((s1_1.unsqueeze(2), s2.unsqueeze(2)), dim=2).reshape(num, 3, 32, 32).permute(0, 1, 3, 2)
R[num:2 * num] = vvv1#torch.cat((s1.unsqueeze(2), s2_2.unsqueeze(2)), dim=2).reshape(num, 3, 32, 32).permute(0, 1, 3, 2)
R[2 * num:3 * num] = vvv2#torch.cat((s1_1.unsqueeze(2), s2_2.unsqueeze(2)), dim=2).reshape(num, 3, 32, 32).permute(0, 1,
#3, 2)
#312 78.7
#
return R
def rot_inner_hlip(x):
num = x.shape[0]
# print(num)
R = x.repeat(4, 1, 1, 1)
a = x#.permute(0, 1, 3, 2)
a = a.view(num, 3, 2, 16, 32)
import pdb
# pdb.set_trace()
# imshow(torchvision.utils.make_grid(a))
a = a.permute(2, 0, 1, 3, 4)
s1 = a[0] # .permute(1,0, 2, 3)#, 4)
s2 = a[1] # .permute(1,0, 2, 3)
a = torch.rot90(a, 2, (3, 4))
s1_1 = a[0] # .permute(1,0, 2, 3)#, 4)
s2_2 = a[1] # .permute(1,0, 2, 3)
# S0 = torch.cat((s1.unsqueeze(2), s2.unsqueeze(2)), dim=2).reshape(num, 1, 28, 28).permute(0, 1, 3, 2)
R[3 * num:] = torch.cat((s1_1.unsqueeze(2), s2.unsqueeze(2)), dim=2).reshape(num, 3, 32, 32)#.permute(0, 1, 3, 2)
R[num:2 * num] = torch.cat((s1.unsqueeze(2), s2_2.unsqueeze(2)), dim=2).reshape(num, 3, 32, 32)#.permute(0, 1, 3, 2)
R[2 * num:3 * num] = torch.cat((s1_1.unsqueeze(2), s2_2.unsqueeze(2)), dim=2).reshape(num, 3, 32, 32)#.permute(0, 1,
# 3, 2)
return R
def Rotation(x, oop):
# print(x.shape)
num = x.shape[0]
X = square_diagonal(x)#rot_inner(x) # , 1, 0)
# X = rot_inner(X)
X2=rot_inner(x)
return torch.cat((X, torch.rot90(X, 1, (2, 3)), torch.rot90(X, 2, (2, 3)), torch.rot90(X, 3, (2, 3)),X2,torch.rot90(X2, 1, (2, 3))), dim=0)[
:num * oop]
import matplotlib.pyplot as plt
import numpy as np
def imshow(img):
img=img/2+0.5
npimg=img.cpu().numpy()
plt.imshow(np.transpose(npimg,(1,2,0)))
plt.show()
def feat_normalized_hat(model,x,task_id):
images = x.cuda(non_blocking=True)
feat_map = model.f_train_feat_map(images,t=task_id,s=1) # (N, C, H, W)
N, Cf, Hf, Wf = feat_map.shape
eval_train_map = feat_map.sum(1).view(N, -1) # (N, Hf*Wf)
eval_train_map = eval_train_map - eval_train_map.min(1, keepdim=True)[0]
eval_train_map = eval_train_map / eval_train_map.max(1, keepdim=True)[0]
eval_train_map = eval_train_map.view(N, 1, Hf, Wf)
eval_train_map = F.interpolate(eval_train_map, size=images.shape[-2:], mode='bilinear')
return eval_train_map
def feat_cam_normalized(model,x,y):
images = x.cuda(non_blocking=True)
feat_map = model.module.f_train_feat_map(images) # (N, C, H, W)
N, Cf, Hf, Wf = feat_map.shape
#import pdb
#pdb.set_trace()
feat_map=torch.bmm(model.module.linear.weight[y].unsqueeze(1),feat_map.reshape(N,Cf,Hf*Wf))
eval_train_map = feat_map.sum(1).view(N, -1) # (N, Hf*Wf)
eval_train_map = eval_train_map - eval_train_map.min(1, keepdim=True)[0]
eval_train_map = eval_train_map / eval_train_map.max(1, keepdim=True)[0]
eval_train_map = eval_train_map.view(N, 1, Hf, Wf)
eval_train_map = F.interpolate(eval_train_map, size=images.shape[-2:], mode='bilinear')
return eval_train_map
def feat_normalized(model,x):
images = x.cuda(non_blocking=True)
feat_map = model.f_train_feat_map(images) # (N, C, H, W)
N, Cf, Hf, Wf = feat_map.shape
eval_train_map = feat_map.sum(1).view(N, -1) # (N, Hf*Wf)
eval_train_map = eval_train_map - eval_train_map.min(1, keepdim=True)[0]
eval_train_map = eval_train_map / eval_train_map.max(1, keepdim=True)[0]
eval_train_map = eval_train_map.view(N, 1, Hf, Wf)
eval_train_map = F.interpolate(eval_train_map, size=images.shape[-2:], mode='bilinear')
return eval_train_map
def Hbeta_torch(D, beta=1.0):
P = torch.exp(-D.clone() * beta)
sumP = torch.sum(P)
H = torch.log(sumP) + beta * torch.sum(D * P) / sumP
P = P / sumP
return H, P
def x2p_torch(X, tol=1e-5, perplexity=30.0):
"""
Performs a binary search to get P-values in such a way that each
conditional Gaussian has the same perplexity.
"""
# Initialize some variables
print("Computing pairwise distances...")
(n, d) = X.shape
sum_X = torch.sum(X*X, 1)
D = torch.add(torch.add(-2 * torch.mm(X, X.t()), sum_X).t(), sum_X)
P = torch.zeros(n, n)
beta = torch.ones(n, 1)
logU = torch.log(torch.tensor([perplexity]))
n_list = [i for i in range(n)]
# Loop over all datapoints
for i in range(n):
# Print progress
if i % 500 == 0:
print("Computing P-values for point %d of %d..." % (i, n))
# Compute the Gaussian kernel and entropy for the current precision
# there may be something wrong with this setting None
betamin = None
betamax = None
Di = D[i, n_list[0:i]+n_list[i+1:n]]
(H, thisP) = Hbeta_torch(Di, beta[i])
# Evaluate whether the perplexity is within tolerance
Hdiff = H - logU
tries = 0
while torch.abs(Hdiff) > tol and tries < 50:
# If not, increase or decrease precision
if Hdiff > 0:
betamin = beta[i].clone()
if betamax is None:
beta[i] = beta[i] * 2.
else:
beta[i] = (beta[i] + betamax) / 2.
else:
betamax = beta[i].clone()
if betamin is None:
beta[i] = beta[i] / 2.
else:
beta[i] = (beta[i] + betamin) / 2.
# Recompute the values
(H, thisP) = Hbeta_torch(Di, beta[i])
Hdiff = H - logU
tries += 1
# Set the final row of P
P[i, n_list[0:i]+n_list[i+1:n]] = thisP
# Return final P-matrix
return P
def pca_torch(X, no_dims=50):
print("Preprocessing the data using PCA...")
(n, d) = X.shape
X = X - torch.mean(X, 0)
(l, M) = torch.eig(torch.mm(X.t(), X), True)
# split M real
# this part may be some difference for complex eigenvalue
# but complex eignevalue is meanless here, so they are replaced by their real part
i = 0
while i < d:
if l[i, 1] != 0:
M[:, i+1] = M[:, i]
i += 2
else:
i += 1
Y = torch.mm(X, M[:, 0:no_dims])
return Y
def tsne(X, no_dims=2, initial_dims=50, perplexity=30.0):
"""
Runs t-SNE on the dataset in the NxD array X to reduce its
dimensionality to no_dims dimensions. The syntaxis of the function is
`Y = tsne.tsne(X, no_dims, perplexity), where X is an NxD NumPy array.
"""
# Check inputs
if isinstance(no_dims, float):
print("Error: array X should not have type float.")
return -1
if round(no_dims) != no_dims:
print("Error: number of dimensions should be an integer.")
return -1
# Initialize variables
X = pca_torch(X, initial_dims)
(n, d) = X.shape
max_iter = 1000
initial_momentum = 0.5
final_momentum = 0.8
eta = 500
min_gain = 0.01
Y = torch.randn(n, no_dims)
dY = torch.zeros(n, no_dims)
iY = torch.zeros(n, no_dims)
gains = torch.ones(n, no_dims)
# Compute P-values
P = x2p_torch(X, 1e-5, perplexity)
P = P + P.t()
P = P / torch.sum(P)
P = P * 4. # early exaggeration
print("get P shape", P.shape)
P = torch.max(P, torch.tensor([1e-21]))
# Run iterations
for iter in range(max_iter):
# Compute pairwise affinities
sum_Y = torch.sum(Y*Y, 1)
num = -2. * torch.mm(Y, Y.t())
num = 1. / (1. + torch.add(torch.add(num, sum_Y).t(), sum_Y))
num[range(n), range(n)] = 0.
Q = num / torch.sum(num)
Q = torch.max(Q, torch.tensor([1e-12]))
# Compute gradient
PQ = P - Q
for i in range(n):
dY[i, :] = torch.sum((PQ[:, i] * num[:, i]).repeat(no_dims, 1).t() * (Y[i, :] - Y), 0)
# Perform the update
if iter < 20:
momentum = initial_momentum
else:
momentum = final_momentum
gains = (gains + 0.2) * ((dY > 0.) != (iY > 0.)).double() + (gains * 0.8) * ((dY > 0.) == (iY > 0.)).double()
gains[gains < min_gain] = min_gain
iY = momentum * iY - eta * (gains * dY)
Y = Y + iY
Y = Y - torch.mean(Y, 0)
# Compute current value of cost function
if (iter + 1) % 10 == 0:
C = torch.sum(P * torch.log(P / Q))
print("Iteration %d: error is %f" % (iter + 1, C))
# Stop lying about P-values
if iter == 100:
P = P / 4.
# Return solution
return Y
def test_model_conti(Basic_model,Loder,j):
test_accuracy = 0
task_num=len(Loder)
for kk in range(len(Loder)):
k=j
correct = 0
num = 0
for batch_idx, (data, target) in enumerate(Loder):
data, target = data.cuda(), target.cuda()
# data, target = Variable(data, volatile=True), Variable(target)
Basic_model.eval()
mask=torch.nn.functional.one_hot(target%10,num_classes=10)
# pdb.set_trace()
pred = Basic_model.forward(data)#[:,:10*task_num]#torch.cat((Basic_model.forward(data)[:,10*(i):10*(i+1)]*mask,Basic_model.forward(data)[:,10*(j):10*(j+1)]),dim=1)
pred[:,10*k:10*(k+1)]=pred[:,10*k:10*(k+1)]*mask
Pred = pred.data.max(1, keepdim=True)[1]
num += data.size()[0]
target=target
# print("final", Pred, target.data.view_as(Pred))
# print(target,"True",pred)
correct += Pred.eq(target.data.view_as(Pred)).cpu().sum()
test_accuracy += (100. * correct / num)#*0.5 # len(data_loader.dataset)
# print(
# 'Test set{}: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)'
# .format(i,
# test_loss, correct, num,
# 100. * correct / num, ))
return test_accuracy/task_num
def test_model_task(Basic_model,loder1,loder2, i,j):
test_loss = 0
correct = 0
num = 0
for batch_idx, (data, target) in enumerate(loder1):
data, target = data.cuda(), target.cuda()
# data, target = Variable(data, volatile=True), Variable(target)
Basic_model.eval()
mask=torch.nn.functional.one_hot(target%10,num_classes=10)
# pdb.set_trace()
pred = torch.cat((Basic_model.forward(data)[:,10*(i):10*(i+1)]*mask,Basic_model.forward(data)[:,10*(j):10*(j+1)]),dim=1)
Pred = pred.data.max(1, keepdim=True)[1]
num += data.size()[0]
target=target-10*i
# print("final", Pred, target.data.view_as(Pred))
# print(target,"True",pred)
correct += Pred.eq(target.data.view_as(Pred)).cpu().sum()
test_accuracy = (100. * correct / num)*0.5 # len(data_loader.dataset)
correct = 0
num = 0
for batch_idx, (data, target) in enumerate(loder2):
data, target = data.cuda(), target.cuda()
# data, target = Variable(data, volatile=True), Variable(target)
Basic_model.eval()
mask = torch.nn.functional.one_hot(target % 10, num_classes=10)
#pdb.set_trace()
pred = torch.cat((Basic_model.forward(data)[:, 10 * (i):10 * (i + 1)],
Basic_model.forward(data)[:, 10 * (j):10 * (j + 1)]* mask),dim=1)
Pred = pred.data.max(1, keepdim=True)[1]
num += data.size()[0]
target = target - 10 * j +10
# print("final", Pred, target.data.view_as(Pred))
# print(target,"True",pred)
correct += Pred.eq(target.data.view_as(Pred)).cpu().sum()
test_accuracy += (100. * correct / num)*0.5
# print(
# 'Test set{}: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)'
# .format(i,
# test_loss, correct, num,
# 100. * correct / num, ))
return test_accuracy
def test_model_cur(Basic_model,loder, i):
test_loss = 0
correct = 0
num = 0
for batch_idx, (data, target) in enumerate(loder):
data, target = data.cuda(), target.cuda()
# data, target = Variable(data, volatile=True), Variable(target)
Basic_model.eval()
pred = Basic_model.forward(data)[:,10*(i):10*(i+1)]
Pred = pred.data.max(1, keepdim=True)[1]
num += data.size()[0]
target=target-10*i
# print("final", Pred, target.data.view_as(Pred))
# print(target,"True",pred)
correct += Pred.eq(target.data.view_as(Pred)).cpu().sum()
test_accuracy = 100. * correct / num # len(data_loader.dataset)
# print(
# 'Test set{}: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)'
# .format(i,
# test_loss, correct, num,
# 100. * correct / num, ))
return test_accuracy
def test_model_past(Basic_model,loder, i):
test_loss = 0
correct = 0
num = 0
for batch_idx, (data, target) in enumerate(loder):
data, target = data.cuda(), target.cuda()
# data, target = Variable(data, volatile=True), Variable(target)
Basic_model.eval()
pred = Basic_model.forward(data)[:,:10*(i+1)]
Pred = pred.data.max(1, keepdim=True)[1]
num += data.size()[0]
target=target
# print("final", Pred, target.data.view_as(Pred))
# print(target,"True",pred)
correct += Pred.eq(target.data.view_as(Pred)).cpu().sum()
test_accuracy = 100. * correct / num # len(data_loader.dataset)
# print(
# 'Test set{}: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)'
# .format(i,
# test_loss, correct, num,
# 100. * correct / num, ))
return test_accuracy
def test_model_mix(Basic_model,loder, i):
test_loss = 0
correct = 0
num = 0
for batch_idx, (data, target) in enumerate(loder):
data, target = data.cuda(), target.cuda()
# data, target = Variable(data, volatile=True), Variable(target)
Basic_model.eval()
pred = torch.cat((Basic_model.forward(data)[:,10*(i):10*(i+1)],Basic_model.forward(data)[:,-10:]),dim=1)
Pred = pred.data.max(1, keepdim=True)[1]
num += data.size()[0]
target=target-10*i
# print("final", Pred, target.data.view_as(Pred))
# print(target,"True",pred)
correct += Pred.eq(target.data.view_as(Pred)).cpu().sum()
test_accuracy = 100. * correct / num # len(data_loader.dataset)
# print(
# 'Test set{}: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)'
# .format(i,
# test_loss, correct, num,
# 100. * correct / num, ))
return test_accuracy
def test_model_future(Basic_model,loder, i):
test_loss = 0
correct = 0
num = 0
for batch_idx, (data, target) in enumerate(loder):
data, target = data.cuda(), target.cuda()
# data, target = Variable(data, volatile=True), Variable(target)
Basic_model.eval()
pred = Basic_model.forward(data)[:,10*i:]
Pred = pred.data.max(1, keepdim=True)[1]
num += data.size()[0]
target=target-10*i
correct += Pred.eq(target.data.view_as(Pred)).cpu().sum()
test_accuracy = 100. * correct / num # len(data_loader.dataset)
print(
'Test set{}: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)'
.format(i,
test_loss, correct, num,
100. * correct / num, ))
return test_accuracy
def test_model(Basic_model,loder, i):
test_loss = 0
correct = 0
num = 0
for batch_idx, (data, target) in enumerate(loder):
data, target = data.cuda(), target.cuda()
# data, target = Variable(data, volatile=True), Variable(target)
Basic_model.eval()
pred = Basic_model.forward(data)
Pred = pred.data.max(1, keepdim=True)[1]
num += data.size()[0]
correct += Pred.eq(target.data.view_as(Pred)).cpu().sum()
test_accuracy = 100. * correct / num # len(data_loader.dataset)
print(
'Test set{}: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)'
.format(i,
test_loss, correct, num,
100. * correct / num, ))
return test_accuracy
def get_true_prob(x, y, llabel):
num = x.size()[0]
true = []
true2 = []
for i in range(num):
if y[i] in llabel:
true.append(1)
else:
true.append(0)
# true.append(x[i][y[i]])
# true2.append(0.5)
# true.append(x[i][y[i]])
return torch.FloatTensor(true).cuda() # ,#torch.FloatTensor(true2).cuda()
def get_prob_rate(x, logits, label):
num = x.size()[0]
logits = F.softmax(logits, dim=1)
rate = []
# true2=[]
for i in range(num):
true_prob = logits[i][label[i]].item()
max_prob = torch.max(logits[i])
rate.append(true_prob / max_prob)
return torch.FloatTensor(rate).cuda()
def get_prob_rate_cross( logits, label, t):
logits = F.softmax(logits, dim=1)
rate = []
num = logits.size()[0]
# true2=[]
# import pdb
# pdb.set_trace()
for i in range(num):
true_prob = logits[i][label[i]].item()
# import pdb
# pdb.set_trace()
max_prob = torch.max(logits[i, :-t])
rate.append(true_prob / max_prob)
return torch.FloatTensor(rate).cuda()
def get_mean_rate_cross( logits, label, t):
logits = F.softmax(logits, dim=1)
rate = []
num = logits.size()[0]
# true2=[]
# import pdb
# pdb.set_trace()
for i in range(num):
true_prob = logits[i][label[i]].item()
# import pdb
# pdb.set_trace()
max_prob = torch.max(logits[i, :-t])
rate.append(true_prob / max_prob)
return torch.FloatTensor(rate).cuda()
| 32,557
| 37.759524
| 175
|
py
|
GSA
|
GSA-main/GSA_CVPR/buffer.py
|
import numpy as np
import math
import pdb
import torch
import torch.nn as nn
import torch.nn.functional as F
class Buffer(nn.Module):
def __init__(self, args, input_size=None):
super().__init__()
self.args = args
self.k = 0.03
self.place_left = True
if input_size is None:
input_size = args.input_size
# TODO(change this:)
if args.gen:
if 'mnist' in args.dataset:
img_size = 784
economy = img_size // input_size[0]
elif 'cifar' in args.dataset:
img_size = 32 * 32 * 3
economy = img_size // (input_size[0] ** 2)
elif 'imagenet' in args.dataset:
img_size = 84 * 84 * 3
economy = img_size // (input_size[0] ** 2)
else:
economy = 1
buffer_size = args.buffer_size
print('buffer has %d slots' % buffer_size,args.buffer_size)
bx = torch.FloatTensor(buffer_size, *input_size).fill_(0)
print("bx",bx.shape)
by = torch.LongTensor(buffer_size).fill_(0)
bt = torch.LongTensor(buffer_size).fill_(0)
logits = torch.FloatTensor(buffer_size, args.n_classes).fill_(0)
feature= torch.FloatTensor(buffer_size, 512).fill_(0)
#if args.cuda:
bx = bx.cuda()#to(args.device)
by = by.cuda()#to(args.device)
bt = bt.cuda()#to(args.device)
logits = logits.cuda()#to(args.device)
feature=feature.cuda()
self.save_logits=None
self.current_index = 0
self.n_seen_so_far = 0
self.is_full = 0
# registering as buffer allows us to save the object using `torch.save`
self.register_buffer('bx', bx)
self.register_buffer('by', by)
self.register_buffer('bt', bt)
self.register_buffer('logits', logits)
self.register_buffer('feature',feature)
self.to_one_hot = lambda x : x.new(x.size(0), args.n_classes).fill_(0).scatter_(1, x.unsqueeze(1), 1)
self.arange_like = lambda x : torch.arange(x.size(0)).to(x.device)
self.shuffle = lambda x : x[torch.randperm(x.size(0))]
@property
def x(self):
return self.bx[:self.current_index]
def is_empty(self) -> bool:
"""
Returns true if the buffer is empty, false otherwise.
"""
if self.n_seen_so_far == 0:
return True
else:
return False
@property
def y(self):
return self.to_one_hot(self.by[:self.current_index])
@property
def t(self):
return self.bt[:self.current_index]
@property
def valid(self):
return self.is_valid[:self.current_index]
def display(self, gen=None, epoch=-1):
from torchvision.utils import save_image
from PIL import Image
if 'cifar' in self.args.dataset:
shp = (-1, 3, 32, 32)
elif 'tinyimagenet' in self.args.dataset:
shp = (-1, 3, 64, 64)
else:
shp = (-1, 1, 28, 28)
if gen is not None:
x = gen.decode(self.x)
else:
x = self.x
save_image((x.reshape(shp) * 0.5 + 0.5), 'samples/buffer_%d.png' % epoch, nrow=int(self.current_index ** 0.5))
#Image.open('buffer_%d.png' % epoch).show()
print(self.y.sum(dim=0))
def add_reservoir(self, x, y, logits, t):
n_elem = x.size(0)
# x=x.reshape(x.size(0),1,1,-1)
place_left = max(0, self.bx.size(0) - self.current_index)
offset = min(place_left, n_elem)
# print(self.bx.shape,x[:offset].shape)
save_logits = logits is not None
self.save_logits=logits is not None
# add whatever still fits in the buffer
place_left = max(0, self.bx.size(0) - self.current_index)
if place_left:
offset = min(place_left, n_elem)
# print(offset)
# print(self.bx[self.current_index: self.current_index + offset].data.shape)
# print(x[:offset].shape)
self.bx[self.current_index: self.current_index + offset].data.copy_(x[:offset])
self.by[self.current_index: self.current_index + offset].data.copy_(y[:offset])
self.bt[self.current_index: self.current_index + offset].fill_(t)
if save_logits:
#print("存")
self.logits[self.current_index: self.current_index + offset].data.copy_(logits[:offset])
#self.feature[self.current_index: self.current_index+offset].data.copy_(feature[:offset])
self.current_index += offset
self.n_seen_so_far += offset
# everything was added
if offset == x.size(0):
return
self.place_left = False
# remove what is already in the buffer
x, y = x[place_left:], y[place_left:]
indices = torch.FloatTensor(x.size(0)).to(x.device).uniform_(0, self.n_seen_so_far).long()
valid_indices = (indices < self.bx.size(0)).long()
idx_new_data = valid_indices.nonzero().squeeze(-1)
idx_buffer = indices[idx_new_data]
self.n_seen_so_far += x.size(0)
if idx_buffer.numel() == 0:
return
assert idx_buffer.max() < self.bx.size(0), pdb.set_trace()
assert idx_buffer.max() < self.by.size(0), pdb.set_trace()
assert idx_buffer.max() < self.bt.size(0), pdb.set_trace()
assert idx_new_data.max() < x.size(0), pdb.set_trace()
assert idx_new_data.max() < y.size(0), pdb.set_trace()
# perform overwrite op
self.bx[idx_buffer] = x[idx_new_data].cuda()
self.by[idx_buffer] = y[idx_new_data].cuda()
self.bt[idx_buffer] = t
if save_logits:
self.logits[idx_buffer] = logits[idx_new_data]
#self.feature[idx_buffer] = feature[idx_new_data]
def measure_valid(self, generator, classifier):
with torch.no_grad():
# fetch valid examples
valid_indices = self.valid.nonzero()
valid_x, valid_y = self.bx[valid_indices], self.by[valid_indices]
one_hot_y = self.to_one_hot(valid_y.flatten())
hid_x = generator.idx_2_hid(valid_x)
x_hat = generator.decode(hid_x)
logits = classifier(x_hat)
_, pred = logits.max(dim=1)
one_hot_pred = self.to_one_hot(pred)
correct = one_hot_pred * one_hot_y
per_class_correct = correct.sum(dim=0)
per_class_deno = one_hot_y.sum(dim=0)
per_class_acc = per_class_correct.float() / per_class_deno.float()
self.class_weight = 1. - per_class_acc
self.valid_acc = per_class_acc
self.valid_deno = per_class_deno
def shuffle_(self):
indices = torch.randperm(self.current_index).to(self.args.device)
self.bx = self.bx[indices]
self.by = self.by[indices]
self.bt = self.bt[indices]
def delete_up_to(self, remove_after_this_idx):
self.bx = self.bx[:remove_after_this_idx]
self.by = self.by[:remove_after_this_idx]
self.br = self.bt[:remove_after_this_idx]
def sample(self, amt, exclude_task = None, ret_ind = False):
if self.save_logits:
if exclude_task is not None:
valid_indices = (self.t != exclude_task)
valid_indices = valid_indices.nonzero().squeeze()
bx, by, bt, logits= self.bx[valid_indices], self.by[valid_indices], self.bt[valid_indices],self.logits[valid_indices]
else:
bx, by, bt, logits = self.bx[:self.current_index], self.by[:self.current_index], self.bt[:self.current_index],self.logits[:self.current_index]#,self.feature[:self.current_index]
if bx.size(0) < amt:
if ret_ind:
return bx, by, logits,bt, torch.from_numpy(np.arange(bx.size(0)))
else:
return bx, by, logits,bt
else:
indices = torch.from_numpy(np.random.choice(bx.size(0), amt, replace=False))
#if self.args.cuda:
indices = indices.cuda()#to(self.args.device)
# import pdb
# pdb.set_trace()
if ret_ind:
return bx[indices], by[indices],logits[indices],bt[indices], indices
else:
return bx[indices], by[indices],logits[indices], bt[indices]
else:
# return 0
if exclude_task is not None:
valid_indices = (self.t != exclude_task)
valid_indices = valid_indices.nonzero().squeeze()
bx, by, bt = self.bx[valid_indices], self.by[valid_indices], self.bt[valid_indices]
else:
bx, by, bt = self.bx[:self.current_index], self.by[:self.current_index], self.bt[:self.current_index]
if bx.size(0) < amt:
if ret_ind:
return bx, by, bt, torch.from_numpy(np.arange(bx.size(0)))
else:
return bx, by, bt
else:
indices = torch.from_numpy(np.random.choice(bx.size(0), amt, replace=False))
#if self.args.cuda:
indices = indices.cuda()#to(self.args.device)
if ret_ind:
return bx[indices], by[indices], bt[indices], indices
else:
return bx[indices], by[indices], bt[indices]
def split(self, amt):
indices = torch.randperm(self.current_index).to(self.args.device)
return indices[:amt], indices[amt:]
def presample(self, amt, task = None, ret_ind = False):
if self.save_logits:
if task is not None:
valid_indices = (self.t <= task)
valid_indices = valid_indices.nonzero().squeeze()
bx, by, bt, logits= self.bx[valid_indices], self.by[valid_indices], self.bt[valid_indices],self.logits[valid_indices]
else:
bx, by, bt, logits = self.bx[:self.current_index], self.by[:self.current_index], self.bt[:self.current_index],self.logits[:self.current_index]
if bx.size(0) < amt:
if ret_ind:
return bx, by, logits,bt, torch.from_numpy(np.arange(bx.size(0)))
else:
return bx, by, logits,bt
else:
indices = torch.from_numpy(np.random.choice(bx.size(0), amt, replace=False))
#if self.args.cuda:
indices = indices.cuda()#to(self.args.device)
if ret_ind:
return bx[indices], by[indices],logits[indices],bt[indices], indices
else:
return bx[indices], by[indices],logits[indices], bt[indices]
else:
return 0
def prob_index(self,distribution,amt):
n=int(len(distribution)/2)
valid_sum_indices=None
for task_index in range(n):
prob_cur_task=distribution[task_index]+distribution[task_index+1]
va_cur_index=(self.t==task_index)
valid_cur_indices = va_cur_index.nonzero().squeeze()
indices = torch.from_numpy(np.random.choice(len(valid_cur_indices), int(amt*prob_cur_task), replace=False))
valid_cur_indices=valid_cur_indices[indices]
if valid_sum_indices is None:
valid_sum_indices=(valid_cur_indices)
else:
valid_sum_indices = torch.cat((valid_cur_indices,valid_sum_indices))
return valid_sum_indices
def pro_sample(self, amt, distribution, ret_ind = False):
#task=exclude_task
#if task>=2:
# import pdb
# pdb.set_trace()
#
if self.save_logits:
#if task is not None:
# valid_indices = (self.t == task)
# valid_indices = valid_indices.nonzero().squeeze()
# bx, by, bt, logits= self.bx[valid_indices], self.by[valid_indices], self.bt[valid_indices],self.logits[valid_indices]
# else:
probi_index= self.prob_index(distribution, amt)
return self.bx[probi_index], self.by[probi_index], self.bt[probi_index],self.logits[probi_index]
else:
probi_index = self.prob_index(distribution, amt)
return self.bx[probi_index], self.by[probi_index], self.bt[probi_index]
def prob_class_index(self,distribution,amt):
n=int(len(distribution))
valid_sum_indices=None
# import pdb
#pdb.set_trace()
for class_index in range(n):
prob_cur_class=distribution[class_index]#+distribution[task_index+1]
va_cur_index=(self.by==class_index)
valid_cur_indices = va_cur_index.nonzero().squeeze()
indices = torch.from_numpy(np.random.choice(len(valid_cur_indices), int(amt*prob_cur_class), replace=False))
valid_cur_indices=valid_cur_indices[indices]
if valid_sum_indices is None:
valid_sum_indices=(valid_cur_indices)
else:
valid_sum_indices = torch.cat((valid_cur_indices,valid_sum_indices))
return valid_sum_indices
def pro_class_sample(self, amt, distribution, ret_ind = False):
#task=exclude_task
#if task>=2:
# import pdb
# pdb.set_trace()
#
if self.save_logits:
#if task is not None:
# valid_indices = (self.t == task)
# valid_indices = valid_indices.nonzero().squeeze()
# bx, by, bt, logits= self.bx[valid_indices], self.by[valid_indices], self.bt[valid_indices],self.logits[valid_indices]
# else:
# pdb.set_trace()
probi_index= self.prob_class_index(distribution, amt)
# bx,by,bt,logits=self.bx.squeeze(0), self.by.squeeze(0), self.bt.squeeze(0), self.logits.squeeze(0)
# if probi_index is None:probi_index=torch.tensor([], device='cuda:0', dtype=torch.int64)
# import pdb
# pdb.set_trace()
return self.bx[probi_index],self.by[probi_index],self.bt[probi_index],self.logits[probi_index]
else:
probi_index = self.prob_class_index(distribution, amt)
return self.bx[probi_index], self.by[probi_index], self.bt[probi_index]
def onlysample(self, amt, task = None, ret_ind = False):
if self.save_logits:
if task is not None:
valid_indices = (self.t == task)
valid_indices = valid_indices.nonzero().squeeze()
bx, by, bt, logits= self.bx[valid_indices], self.by[valid_indices], self.bt[valid_indices],self.logits[valid_indices]
else:
bx, by, bt, logits = self.bx[:self.current_index], self.by[:self.current_index], self.bt[:self.current_index],self.logits[:self.current_index]
if bx.size(0) < amt:
if ret_ind:
return bx, by, logits,bt, torch.from_numpy(np.arange(bx.size(0)))
else:
return bx, by, logits,bt
else:
indices = torch.from_numpy(np.random.choice(bx.size(0), amt, replace=False))
#if self.args.cuda:
indices = indices.cuda()#to(self.args.device)
if ret_ind:
return bx[indices], by[indices],logits[indices],bt[indices], indices
else:
return bx[indices], by[indices],logits[indices], bt[indices]
else:
if task is not None:
valid_indices = (self.t == task)
valid_indices = valid_indices.nonzero().squeeze()
bx, by, bt = self.bx[valid_indices], self.by[valid_indices], self.bt[valid_indices]
else:
bx, by, bt= self.bx[:self.current_index], self.by[:self.current_index], self.bt[
:self.current_index]
if bx.size(0) < amt:
if ret_ind:
return bx, by, bt, torch.from_numpy(np.arange(bx.size(0)))
else:
return bx, by, bt
else:
indices = torch.from_numpy(np.random.choice(bx.size(0), amt, replace=False))
# if self.args.cuda:
indices = indices.cuda() # to(self.args.device)
if ret_ind:
return bx[indices], by[indices], bt[indices], indices
else:
return bx[indices], by[indices], bt[indices]
def get_cifar_buffer(args, hH=8, gen=None):
args.input_size = (hH, hH)
args.gen = True
return Buffer(args, gen=gen)
| 16,903
| 38.962175
| 193
|
py
|
GSA
|
GSA-main/GSA_CVPR/Resnet18.py
|
# Copyright 2020-present, Pietro Buzzega, Matteo Boschini, Angelo Porrello, Davide Abati, Simone Calderara.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.functional import relu, avg_pool2d
from typing import List
#from modified_linear import *
from torch.nn import functional as F
def conv3x3(in_planes: int, out_planes: int, stride: int = 1) -> F.conv2d:
"""
Instantiates a 3x3 convolutional layer with no bias.
:param in_planes: number of input channels
:param out_planes: number of output channels
:param stride: stride of the convolution
:return: convolutional layer
"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
"""
The basic block of ResNet.
"""
expansion = 1
def __init__(self, in_planes: int, planes: int, stride: int = 1) -> None:
"""
Instantiates the basic block of the network.
:param in_planes: the number of input channels
:param planes: the number of channels (to be possibly expanded)
"""
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(in_planes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1,
stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * planes)
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
Compute a forward pass.
:param x: input tensor (batch_size, input_size)
:return: output tensor (10)
"""
out = relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = relu(out)
return out
class ResNet(nn.Module):
"""
ResNet network architecture. Designed for complex datasets.
"""
def __init__(self, block: BasicBlock, num_blocks: List[int],
num_classes: int, nf: int) -> None:
"""
Instantiates the layers of the network.
:param block: the basic ResNet block
:param num_blocks: the number of blocks per layer
:param num_classes: the number of output classes
:param nf: the number of filters
"""
super(ResNet, self).__init__()
self.in_planes = nf
self.block = block
self.num_classes = num_classes
self.nf = nf
self.conv1 = conv3x3(3, nf * 1)
self.bn1 = nn.BatchNorm2d(nf * 1)
self.layer1 = self._make_layer(block, nf * 1, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, nf * 2, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, nf * 4, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, nf * 8, num_blocks[3], stride=2)
self.num_classes=num_classes
self.linear = nn.Linear(nf * 8 * block.expansion, self.num_classes)#nn.utils.weight_norm(nn.Linear(nf * 8 * block.expansion, self.num_classes))
# torch.nn.init.xavier_uniform(self.linear.weight)
self.out_dim = nf * 8 * block.expansion
self.drop = nn.Dropout(p=0.2)
# self.drop2 = nn.Dropout(p=0.3)
self.simclr=nn.Linear(nf * 8 * block.expansion, 128)
self.simclr2 = nn.Linear(nf * 8 * block.expansion, 128)
self._features = nn.Sequential(self.conv1,
self.bn1,
self.layer1,
self.layer2,
self.layer3,
self.layer4
)
self.classifier = self.linear
def f_train_feat_map(self, x: torch.Tensor,mask=None) -> torch.Tensor:
out = relu(self.bn1(self.conv1(x)))
# pdb.set_trace()
out = self.layer1(out)#,None)#,mask) # 64, 32, 32
out = self.layer2(out)#,None)#,mask) # 128, 16, 16
out = self.layer3(out)#,None) # 256, 8, 8
# pdb.set_trace()
#out = self.layer4.BasicBlock0
out = self.layer4(out)#,None) # 512, 4, 4
#out = avg_pool2d(out, out.shape[2]) # 512, 1, 1
#out = out.view(out.size(0), -1) # 512
return out
def _make_layer(self, block: BasicBlock, planes: int,
num_blocks: int, stride: int) -> nn.Module:
"""
Instantiates a ResNet layer.
:param block: ResNet basic block
:param planes: channels across the network
:param num_blocks: number of blocks
:param stride: stride
:return: ResNet layer
"""
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def f_train(self, x: torch.Tensor) -> torch.Tensor:
out = relu(self.bn1(self.conv1(x)))
# out = self.drop(out)
out = self.layer1(out) # 64, 32, 32
# out = self.drop(out)
out = self.layer2(out) # 128, 16, 16
# out = self.drop(out)
out = self.layer3(out) # 256, 8, 8
# out = self.drop(out)
out = self.layer4(out) # 512, 4, 4
# out = self.drop(out)
out = avg_pool2d(out, out.shape[2]) # 512, 1, 1
out = out.view(out.size(0), -1) # 512
return out
def f_inter(self, x: torch.Tensor) -> torch.Tensor:
out = relu(self.bn1(self.conv1(x)),inplace=True)
out = self.layer1(out) # 64, 32, 32
out = self.layer2(out) # 128, 16, 16
# 512, 1, 1
out = self.layer3(out) # 256, 8, 8
out = self.layer4(out)
out = out.view(out.size(0), -1) # 512
return out
def forward(self, x: torch.Tensor, is_simclr=False,is_simclr2=False,is_drop=False) -> torch.Tensor:
"""
Compute a forward pass.
:param x: input tensor (batch_size, *input_shape)
:return: output tensor (output_classes)
"""
'''
out = relu(self.bn1(self.conv1(x)))
out = self.layer1(out) # 64, 32, 32
out = self.layer2(out) # 128, 16, 16
out = self.layer3(out) # 256, 8, 8
'''
out = self.f_train(x)
#out = self.drop(out)
'''
out = self.layer4(out) # 512, 4, 4
out = avg_pool2d(out, out.shape[2]) # 512, 1, 1
out = out.view(out.size(0), -1) # 512
'''
if is_simclr:
# out=self.drop2(out)
out = self.simclr(out)
elif is_drop:
#out=nn.dropout
out=self.drop(out)
out = self.linear(out)
# out=out.detach()
# out = self.drop(out)
else:
# out=out / (out.norm(dim=1, keepdim=True) + 1e-8)
# out = self.drop(out)
out = self.linear(out)
return out
def change_output_dim(self, new_dim, second_iter=False):
self.prev_weights = nn.Linear(self.out_dim, self.num_classes+new_dim)
in_features = self.out_dim
out_features = self.num_classes+new_dim
# old_embedding_weights = self.embedding.weight.data
# create a new embedding of the new size
#nn.Embedding(new_vocab_size, embedding_dim)
# initialize the values for the new embedding. this does random, but you might want to use something like GloVe
new_weights =nn.Linear(in_features,out_features)#nn.Linear(in_features,out_features,bias=False)
# as your old values may have been updated, you want to retrieve these updates values
# new_weights[:old_vocab_size] = old_embedding_weights
print("in_features:", in_features, "out_features:", out_features)
## self.weight_new =Parameter(torch.Tensor(out_features,in_features))
# new_out_features = new_dim
# num_new_classes = new_dim - out_features
#new_fc = SplitCosineLinear(in_features, out_features, num_new_classes)
# new_fc= nn.Linear(in_features,out_features)
# torch.nn.init.xavier_uniform(new_fc.weight)
# self.weight_new.data[:self.num_classes] = self.linear.weight.data
new_weights.weight.data[:self.num_classes] = self.linear.weight.data
new_weights.bias.data[:self.num_classes] = self.linear.bias.data
# self.prev_weights.weight.data[:self.num_classes] = self.linear.weight.data
# self.prev_weights.bias.data[:self.num_classes] = self.linear.bias.data
# self.linear.weight = self.weight_new#nn.Linear(in_features, out_features)
#self.linear.weight.data.copy_(new_weights.weight.data)
#elf.linear.bias.data.copy_(new_weights.bias.data)
#new_fc.sigma.data = self.fc.sigma.data
from torch.nn.parameter import Parameter
self.linear = new_weights.cuda()
self.linear.requires_grad=True
self.num_classes = out_features
# return prev_weights
def features(self, x: torch.Tensor) -> torch.Tensor:
"""
Returns the non-activated output of the second-last layer.
:param x: input tensor (batch_size, *input_shape)
:return: output tensor (??)
"""
out = self._features(x)
out = avg_pool2d(out, out.shape[2])
feat = out.view(out.size(0), -1)
return feat
def prev_logit(self, x: torch.Tensor) -> torch.Tensor:
"""
Returns the non-activated output of the second-last layer.
:param x: input tensor (batch_size, *input_shape)
:return: output tensor (??)
"""
out = self.prev_weights(x)
return out
def get_params(self) -> torch.Tensor:
"""
Returns all the parameters concatenated in a single tensor.
:return: parameters tensor (??)
"""
params = []
for pp in list(self.parameters()):
params.append(pp.view(-1))
return torch.cat(params)
def set_params(self, new_params: torch.Tensor) -> None:
"""
Sets the parameters to a given value.
:param new_params: concatenated values to be set (??)
"""
assert new_params.size() == self.get_params().size()
progress = 0
for pp in list(self.parameters()):
cand_params = new_params[progress: progress +
torch.tensor(pp.size()).prod()].view(pp.size())
progress += torch.tensor(pp.size()).prod()
pp.data = cand_params
def get_grads(self) -> torch.Tensor:
"""
Returns all the gradients concatenated in a single tensor.
:return: gradients tensor (??)
"""
grads = []
for pp in list(self.parameters()):
grads.append(pp.grad.view(-1))
return torch.cat(grads)
def resnet18(nclasses: int, nf: int = 64) -> ResNet:
"""
Instantiates a ResNet18 network.
:param nclasses: number of output classes
:param nf: number of filters
:return: ResNet network
"""
return ResNet(BasicBlock, [2, 2, 2, 2], nclasses, nf=64)
| 11,584
| 37.108553
| 151
|
py
|
GSA
|
GSA-main/GSA_CVPR/conf.py
|
# Copyright 2020-present, Pietro Buzzega, Matteo Boschini, Angelo Porrello, Davide Abati, Simone Calderara.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import random
import torch
import numpy as np
from abc import abstractmethod
from argparse import Namespace
from torch import nn as nn
from torchvision.transforms import transforms
from torch.utils.data import DataLoader
from typing import Tuple
from torchvision import datasets
import numpy as np
def get_device() -> torch.device:
"""
Returns the GPU device if available else CPU.
"""
return torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def base_path() -> str:
"""
Returns the base bath where to log accuracies and tensorboard data.
"""
return './data/'
def set_random_seed(seed: int) -> None:
"""
Sets the seeds at a certain value.
:param seed: the value to be set
"""
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
class ContinualDataset:
"""
Continual learning evaluation setting.
"""
NAME = None
SETTING = None
N_CLASSES_PER_TASK = None
N_TASKS = None
TRANSFORM = None
def __init__(self, args: Namespace) -> None:
"""
Initializes the train and test lists of dataloaders.
:param args: the arguments which contains the hyperparameters
"""
self.train_loader = None
self.test_loaders = []
self.i = 0
self.args = args
@abstractmethod
def get_data_loaders(self) -> Tuple[DataLoader, DataLoader]:
"""
Creates and returns the training and test loaders for the current task.
The current training loader and all test loaders are stored in self.
:return: the current training and test loaders
"""
pass
@abstractmethod
def not_aug_dataloader(self, batch_size: int) -> DataLoader:
"""
Returns the dataloader of the current task,
not applying data augmentation.
:param batch_size: the batch size of the loader
:return: the current training loader
"""
pass
@staticmethod
@abstractmethod
def get_backbone() -> nn.Module:
"""
Returns the backbone to be used for to the current dataset.
"""
pass
@staticmethod
@abstractmethod
def get_transform() -> transforms:
"""
Returns the transform to be used for to the current dataset.
"""
pass
@staticmethod
@abstractmethod
def get_loss() -> nn.functional:
"""
Returns the loss to be used for to the current dataset.
"""
pass
@staticmethod
@abstractmethod
def get_normalization_transform() -> transforms:
"""
Returns the transform used for normalizing the current dataset.
"""
pass
@staticmethod
@abstractmethod
def get_denormalization_transform() -> transforms:
"""
Returns the transform used for denormalizing the current dataset.
"""
pass
| 3,209
| 25.311475
| 107
|
py
|
GSA
|
GSA-main/GSA_CVPR/test_cifar100.py
|
import ipaddress
import sys, argparse
import numpy as np
import torch
from torch.nn.functional import relu, avg_pool2d
from buffer import Buffer
# import utils
import datetime
from torch.nn.functional import relu
import torch
import torch.nn as nn
import torch.nn.functional as F
from CSL import tao as TL
from CSL import classifier as C
from CSL.utils import normalize
from CSL.contrastive_learning import get_similarity_matrix, NT_xent, Supervised_NT_xent, SupConLoss
import torch.optim.lr_scheduler as lr_scheduler
from CSL.shedular import GradualWarmupScheduler
import torch
import torchvision.transforms as transforms
import torchvision
# Arguments
parser = argparse.ArgumentParser()
parser.add_argument('--seed', type=int, default=0, help='(default=%(default)d)')
parser.add_argument('--experiment', default='cifar-10', type=str, required=False, help='(default=%(default)s)')
parser.add_argument('--lr', default=0.02, type=float, required=False, help='(default=%(default)f)')
parser.add_argument('--parameter', type=str, default='', help='(default=%(default)s)')
parser.add_argument('--dataset', type=str, default='cifar', help='(default=%(default)s)')
parser.add_argument('--input_size', type=str, default=[3, 32, 32], help='(default=%(default)s)')
parser.add_argument('--buffer_size', type=int, default=1000, help='(default=%(default)s)')
parser.add_argument('--gen', type=str, default=True, help='(default=%(default)s)')
parser.add_argument('--p1', type=float, default=0.1, help='(default=%(default)s)')
parser.add_argument('--cuda', type=str, default='1', help='(default=%(default)s)')
parser.add_argument('--n_classes', type=int, default=512, help='(default=%(default)s)')
parser.add_argument('--buffer_batch_size', type=int, default=64, help='(default=%(default)s)')
args = parser.parse_args()
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # ignore warning
os.environ["CUDA_VISIBLE_DEVICES"] = args.cuda # use gpu0,1
def flip_inner(x, flip1, flip2):
num = x.shape[0]
# print(num)
a = x # .permute(0,1,3,2)
a = a.view(num, 3, 2, 16, 32)
# imshow(torchvision.utils.make_grid(a))
a = a.permute(2, 0, 1, 3, 4)
s1 = a[0] # .permute(1,0, 2, 3)#, 4)
s2 = a[1] # .permute(1,0, 2, 3)
# print("a",a.shape,a[:63][0].shape)
if flip1:
s1 = torch.flip(s1, (3,)) # torch.rot90(s1, 2*rot1, (2, 3))
if flip2:
s2 = torch.flip(s2, (3,)) # torch.rot90(s2, 2*rot2, (2, 3))
s = torch.cat((s1.unsqueeze(2), s2.unsqueeze(2)), dim=2)
# imshow(torchvision.utils.make_grid(s[2]))
# print("s",s.shape)
# S = s.permute(0,1, 2, 3, 4) # .view(3,32,32)
# print("S",S.shape)
S = s.reshape(num, 3, 32, 32)
# S =S.permute(0,1,3,2)
# imshow(torchvision.utils.make_grid(S[2]))
# print("S", S.shape)
return S
def RandomFlip(x, num):
# print(x.shape)
#aug_x = simclr_aug(x)
x=simclr_aug(x)
X = []
# print(x.shape)
# for i in range(4):
X.append(x)
X.append(flip_inner(x, 1, 1))
X.append(flip_inner(x, 0, 1))
X.append(flip_inner(x, 1, 0))
# else:
# x1=rot_inner(x,0,1)
return torch.cat([X[i] for i in range(num)], dim=0)
def rot_inner(x, rot1, rot2):
num = x.shape[0]
# print(num)
a = x.permute(0, 1, 3, 2)
a = a.view(num, 3, 2, 16, 32)
# imshow(torchvision.utils.make_grid(a))
a = a.permute(2, 0, 1, 3, 4)
s1 = a[0] # .permute(1,0, 2, 3)#, 4)
s2 = a[1] # .permute(1,0, 2, 3)
# print("a",a.shape,a[:63][0].shape)
s1 = torch.rot90(s1, 2 * rot1, (2, 3))
s2 = torch.rot90(s2, 2 * rot2, (2, 3))
s = torch.cat((s1.unsqueeze(2), s2.unsqueeze(2)), dim=2)
S = s.reshape(num, 3, 32, 32)
S = S.permute(0, 1, 3, 2)
return S
def Rotation(x, r):
# print(x.shape)
x = torch.rot90(x, r, (2, 3))
X = []
# print(x.shape)
X.append(rot_inner(x, 0, 0))
X.append(rot_inner(x, 1, 1))
X.append(rot_inner(x, 1, 0))
X.append(rot_inner(x, 0, 1))
return x
oop = 4
print('=' * 100)
print('Arguments =')
for arg in vars(args):
print('\t' + arg + ':', getattr(args, arg))
print('=' * 100)
print(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), 'GPU ' + os.environ["CUDA_VISIBLE_DEVICES"])
print('=' * 100)
########################################################################################################################
# Seed
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(args.seed)
else:
print('[CUDA unavailable]')
sys.exit()
import cifar as dataloader
from Resnet18 import resnet18 as b_model
from buffer import Buffer as buffer
# imagenet200 import SequentialTinyImagenet as STI
from torch.optim import Adam, SGD # ,SparseAdam
import torch.nn.functional as F
from copy import deepcopy
import matplotlib.pyplot as plt
def test_model_cur(loder, i):
test_loss = 0
correct = 0
num = 0
for batch_idx, (data, target) in enumerate(loder):
data, target = data.cuda(), target.cuda()
# data, target = Variable(data, volatile=True), Variable(target)
Basic_model.eval()
pred = Basic_model.forward(data)[:,2*(i):2*(i+1)]
Pred = pred.data.max(1, keepdim=True)[1]
num += data.size()[0]
target=target-2*i
# print("final", Pred, target.data.view_as(Pred))
# print(target,"True",pred)
correct += Pred.eq(target.data.view_as(Pred)).cpu().sum()
test_accuracy = 100. * correct / num # len(data_loader.dataset)
print(
'Test set{}: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)'
.format(i,
test_loss, correct, num,
100. * correct / num, ))
return test_accuracy
def test_model_past(loder, i):
test_loss = 0
correct = 0
num = 0
for batch_idx, (data, target) in enumerate(loder):
data, target = data.cuda(), target.cuda()
# data, target = Variable(data, volatile=True), Variable(target)
Basic_model.eval()
pred = Basic_model.forward(data)[:,:2*(i+1)]
Pred = pred.data.max(1, keepdim=True)[1]
num += data.size()[0]
target=target
# print("final", Pred, target.data.view_as(Pred))
# print(target,"True",pred)
correct += Pred.eq(target.data.view_as(Pred)).cpu().sum()
test_accuracy = 100. * correct / num # len(data_loader.dataset)
print(
'Test set{}: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)'
.format(i,
test_loss, correct, num,
100. * correct / num, ))
return test_accuracy
def test_model_future(loder, i):
test_loss = 0
correct = 0
num = 0
for batch_idx, (data, target) in enumerate(loder):
data, target = data.cuda(), target.cuda()
# data, target = Variable(data, volatile=True), Variable(target)
Basic_model.eval()
pred = Basic_model.forward(data)[:,2*i:]
Pred = pred.data.max(1, keepdim=True)[1]
num += data.size()[0]
target=target-2*i
correct += Pred.eq(target.data.view_as(Pred)).cpu().sum()
test_accuracy = 100. * correct / num # len(data_loader.dataset)
print(
'Test set{}: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)'
.format(i,
test_loss, correct, num,
100. * correct / num, ))
return test_accuracy
def test_model(loder, i):
test_loss = 0
correct = 0
num = 0
for batch_idx, (data, target) in enumerate(loder):
data, target = data.cuda(), target.cuda()
# data, target = Variable(data, volatile=True), Variable(target)
Basic_model.eval()
pred = Basic_model.forward(data)
Pred = pred.data.max(1, keepdim=True)[1]
num += data.size()[0]
correct += Pred.eq(target.data.view_as(Pred)).cpu().sum()
test_accuracy = 100. * correct / num # len(data_loader.dataset)
print(
'Test set{}: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)'
.format(i,
test_loss, correct, num,
100. * correct / num, ))
return test_accuracy
def get_true_prob(x, y, llabel):
num = x.size()[0]
true = []
true2 = []
for i in range(num):
if y[i] in llabel:
true.append(1)
else:
true.append(0)
# true.append(x[i][y[i]])
# true2.append(0.5)
# true.append(x[i][y[i]])
return torch.FloatTensor(true).cuda() # ,#torch.FloatTensor(true2).cuda()
def get_prob_rate(x, logits, label):
num = x.size()[0]
logits = F.softmax(logits, dim=1)
rate = []
# true2=[]
for i in range(num):
true_prob = logits[i][label[i]].item()
max_prob = torch.max(logits[i])
rate.append(true_prob / max_prob)
return torch.FloatTensor(rate).cuda()
def get_prob_rate_cross( logits, label, t):
logits = F.softmax(logits, dim=1)
rate = []
num = logits.size()[0]
# true2=[]
# import pdb
# pdb.set_trace()
for i in range(num):
true_prob = logits[i][label[i]].item()
# import pdb
# pdb.set_trace()
max_prob = torch.max(logits[i, :-t])
rate.append(true_prob / max_prob)
return torch.FloatTensor(rate).cuda()
def get_mean_rate_cross( logits, label, t):
logits = F.softmax(logits, dim=1)
rate = []
num = logits.size()[0]
# true2=[]
# import pdb
# pdb.set_trace()
for i in range(num):
true_prob = logits[i][label[i]].item()
# import pdb
# pdb.set_trace()
max_prob = torch.max(logits[i, :-t])
rate.append(true_prob / max_prob)
return torch.FloatTensor(rate).cuda()
print('Load data...')
num_class_per_task=10
data, taskcla, inputsize, Loder, test_loder = dataloader.get_cifar100_10(seed=args.seed)
data2, taskcla2, inputsize2, Loder2, test_loder2 = dataloader.get_cifar100_100d(seed=args.seed)
print('Input size =', inputsize, '\nTask info =', taskcla)
buffero = buffer(args).cuda()
Basic_model = b_model(num_class_per_task).cuda()
llabel = {}
Optimizer = Adam(Basic_model.parameters(), lr=0.001, betas=(0.9, 0.99),
weight_decay=1e-4) # SGD(Basic_model.parameters(), lr=0.02, momentum=0.9)
from apex import amp
Basic_model, Optimizer = amp.initialize(Basic_model, Optimizer,opt_level="O1")
hflip = TL.HorizontalFlipLayer().cuda()
cutperm = TL.CutPerm().cuda()
with torch.no_grad():
resize_scale = (0.6, 1.0) # resize scaling factor,default [0.08,1]
# if P.resize_fix: # if resize_fix is True, use same scale
# resize_scale = (P.resize_factor, P.resize_factor)
# Align augmentation
# color_jitter = TL.ColorJitterLayer(brightness=0.4, contrast=0.4, saturation=0.4, hue=0.1, p=0.8).cuda()
color_gray = TL.RandomColorGrayLayer(p=0.2).cuda()
resize_crop = TL.RandomResizedCropLayer(scale=resize_scale, size=[32, 32, 3]).cuda()
simclr_aug = transform = torch.nn.Sequential(color_gray, resize_crop,
# color_jitter, # 这个不会变换大小,但是会变化通道值,新旧混杂
# resize_crop,
)
#color_gray, # 这个也不会,混搭
# resize_crop,
# for n,w in Basic_model.named_parameters():
# print(n,w.shape)
Max_acc = []
print('=' * 100)
print(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), 'GPU ' + os.environ["CUDA_VISIBLE_DEVICES"])
print('=' * 100)
class_holder = []
class_prototype = {}
buffer_per_class = 7
flip_num = 2
negative_logits_SUM = None
positive_logits_SUM = None
num_SUM = 0
Category_sum=None
import pdb
#pdb.set_trace()
for run in range(1):
# rank = torch.randperm(len(Loder))
rank = torch.arange(0,10)#tensor([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
for i in range(len(Loder)):
new_class_holder = []
print(i)
task_id = i
prev_index=True
if i > 0:
Basic_model.change_output_dim(num_class_per_task)
Category_sum = torch.cat((Category_sum, torch.zeros(num_class_per_task)))
negative_logits_SUM = torch.cat(
(negative_logits_SUM, torch.zeros(num_class_per_task).cuda()))
positive_logits_SUM = torch.cat(
(positive_logits_SUM, torch.zeros(num_class_per_task).cuda()))
# Category_sum = torch.cat((Category_sum, torch.zeros(num_class_per_task)))
# negative_logits_SUM = torch.cat((negative_logits_SUM, torch.zeros(num_class_per_task).cuda()))
# positive_logits_SUM = torch.cat((positive_logits_SUM, torch.zeros(num_class_per_task).cuda()))
#if task_id>=2:
# for name,param in Basic_model.named_parameters():
# if "layer1.0" in name:
# param.requires_grad=False
# if "layer2.0" in name:
# param.requires_grad=False
# if "layer3.0" in name:
# param.requires_grad=False
train_loader = Loder[rank[i].item()]['train']
negative_logits_sum=None
positive_logits_sum=None
sum_num=0
category_sum = None
for epoch in range(1):
Basic_model.train()
num_d = 0
for batch_idx, (x, y) in enumerate(train_loader):
# if batch_idx>=10:
# continue
num_d += x.shape[0]
if num_d % 5000 == 0:
print(num_d, num_d / 10000)
llabel[i] = []
Y = deepcopy(y)
for j in range(len(Y)):
if Y[j] not in class_holder:
class_holder.append(Y[j].detach())
class_prototype[Y[j].detach()] = 0
new_class_holder.append(Y[j].detach())
#if i > 0:
# Basic_model.change_output_dim(num_class_per_task)
# if i > 0:
#Basic_model.change_output_dim(1)
Optimizer.zero_grad()
# if args.cuda:
x, y = x.cuda(), y.cuda()
ori_x = x.detach()
ori_y = y.detach()
x = x.requires_grad_()
# import pdb
# pdb.set_trace()
if batch_idx==0&task_id==0:
cur_x, cur_y,_ = torch.zeros(1),torch.zeros(1),torch.zeros(1)#,None,None#buffero.onlysample(22, task=task_id)
else:
cur_x, cur_y, _,_ = buffero.onlysample(22, task=task_id)
if len(cur_x.shape) > 3:
x = torch.cat((x, cur_x), dim=0)
y = torch.cat((y, cur_y))
images1 = torch.cat([torch.rot90(x, rot, (2, 3)) for rot in range(1)]) # 4B
images2 = torch.cat([torch.rot90(x, rot, (2, 3)) for rot in range(1)]) # 4B
images_pair = torch.cat([images1, simclr_aug(images2)], dim=0) # 8B
labels1 = y.cuda()
# print("LLLL",labels1.shape)
rot_sim_labels = torch.cat([labels1 + 100 * i for i in range(1)], dim=0)
Rot_sim_labels = torch.cat([labels1 + 0 * i for i in range(1)], dim=0)
rot_sim_labels = rot_sim_labels.cuda()
outputs_aux = Basic_model(images_pair, is_simclr=True)
simclr = normalize(outputs_aux) # normalize
sim_matrix = get_similarity_matrix(simclr)
loss_sim1 = Supervised_NT_xent(sim_matrix, labels=rot_sim_labels,
temperature=0.07)
if not buffero.is_empty():
buffer_batch_size = 64
# x = x.requires_grad_()
x = RandomFlip(x, flip_num)
y = y.repeat(flip_num)
x = x.requires_grad_()
hidden_pred = Basic_model.f_train(simclr_aug(x))
pred_y = Basic_model.linear(hidden_pred)
#
t = num_class_per_task#len(new_class_holder)
if task_id>0:
pred_y_new = pred_y[:, -t:]#torch.cat([Basic_model.linear(hidden_pred)[:, :-t].data.detach(),pred_y[:, -t:]],dim=1)
loss_balance = (pred_y[:,:-t]**2).mean()
else:
pred_y_new=pred_y
loss_balance=0
min_y = min(new_class_holder)
y_new = y - num_class_per_task*i#min_y
num_x=ori_y.size()[0]
rate=len(new_class_holder)/len(class_holder)
mem_x, mem_y, logits, bt = buffero.sample(int(buffer_batch_size*(1-rate))*1, exclude_task=task_id)
#if task_id>0:
#distribution = torch.ones(2 * task_id).cuda()
#distribution /= distribution.sum()
# pdb.set_trace()
# if task_id>=3:
# pdb.set_trace()
# mem_x, mem_y, _, bt = buffero.pro_class_sample(int(buffer_batch_size*(1-rate))*1, distribution=distribution)
# index_only = torch.randperm(mem_y_only.size()[0])
# mem_x_only = mem_x_only[index_only][:]
#mem_y_only = mem_y_only[index_only][:]
index_x=ori_x
index_y=ori_y
if len(cur_x.shape) > 3:
index_x = torch.cat((index_x, cur_x), dim=0)
index_y = torch.cat((index_y, cur_y))
all_x = torch.cat((mem_x, index_x), dim=0)
all_y = torch.cat((mem_y, index_y))
# index_cur = torch.randperm(index_y.size()[0])
# index_x = index_x[index_cur][:]
#index_y = index_y[index_cur][:]
# if len(class_holder)>len(new_class_holder):
# prev_hiddens=Previous_model.forward(mem_x)
# cur_hiddens=Basic_model.forward(mem_x)[:,:-len(new_class_holder)]
# cur_logits=torch.sum(F.softmax(Basic_model.forward(mem_x))[:,:-len(new_class_holder)],dim=1)
# _,idx_cur=torch.sort(cur_logits)
# mem_x=mem_x[idx_cur]
# mem_y=mem_y[idx_cur]
# import pdb
# pdb.set_trace()
# logits_cur=F.softmax(Basic_model.forward(ori_x))
# logits_pre=torch.sum(logits_cur[:,:-len(new_class_holder)],dim=1)
# _,idx_pre=torch.sort(logits_pre,descending=True)
# ori_x=ori_x[idx_pre]
# ori_y=ori_y[idx_pre]
mem_x = torch.cat((mem_x[:int(buffer_batch_size*(1-rate))],index_x[:int(buffer_batch_size*rate)]),dim=0)
mem_y = torch.cat((mem_y[:int(buffer_batch_size*(1-rate))],index_y[:int(buffer_batch_size*rate)]))
logits = torch.cat((logits[:int(buffer_batch_size*(1-rate))],Basic_model.f_train(index_x[:int(buffer_batch_size*rate)])),dim=0)
index = torch.randperm(mem_y.size()[0])
mem_x=mem_x[index][:]
mem_y=mem_y[index][:]
logits=logits[index][:]
mem_dif = torch.zeros_like(mem_x)
mem_dif.data = deepcopy(mem_x.data)
loss_div = 0
with torch.no_grad():
from utils import feat_normalized
feat = feat_normalized(Basic_model, mem_x)
feat_all = feat_normalized(Basic_model, all_x)
num = mem_x.shape[0]
# repeat_num=2
# mem_x = mem_x.repeat(repeat_num, 1, 1, 1)
mask_object = feat > 0.5#args.p2
mask_object_2 = feat_all > 0.5#0.5args.p2
for ii in range((task_id) * 2):
# index_mix=[]
index = mem_y == ii
index_dif = all_y != ii # .float()#
if index.sum() > 0:
# for tt in range(repeat_num-1):
# index_mix.append(mem_y==ii+1)
# pdb.set_trace()
random_id = torch.from_numpy(
np.random.choice(index_dif.sum().cpu().item(), index.sum().cpu().item(),
replace=True)).cuda() # torch.randperm(index.sum())
mask_background1 = ((mask_object[index]).float() + (
~mask_object_2[index_dif][random_id]).float() == 2)
mask_background2 = mask_object[index].float() - mask_background1.float()
# pdb.set_trace()
mem_dif[index] = mem_x[:num][index] * (
1 - mask_object[index].float() + mask_background2.float()) + all_x[index_dif][
random_id] * mask_background1
# pdb.set_trace()
# mem_y=mem_y.repeat(repeat_num)
teacher_temperature = 0.1
student_temperature = 0.07
# mem_x = mem_x.requires_grad_()
with torch.no_grad():
hidden_normal = normalize(Basic_model.simclr(Basic_model.f_train(mem_x)))
hidden_same_normal = normalize(Basic_model.simclr(Basic_model.f_train(mem_x)))
hidden_same_batch = torch.matmul(hidden_same_normal, hidden_normal.t()) / teacher_temperature
relation_sam = F.softmax(hidden_same_batch, dim=0)
mem_dif = mem_dif.requires_grad_()
hidden_dif_normal = normalize(Basic_model.simclr(Basic_model.f_train(mem_dif)))
hidden_dif_batch = torch.matmul(hidden_dif_normal, hidden_normal.t()) / student_temperature
relation_dif = F.log_softmax(hidden_dif_batch, dim=0)
loss_dif = F.kl_div(relation_dif, relation_sam,
reduction='batchmean') # -(relation_sam * torch.nn.functional.log_softmax(relation_dif, 1)).sum()/relation_dif.shape[0]
mem_y = mem_y.reshape(-1)
mem_x = mem_x.requires_grad_()
images1_r = torch.cat([Rotation(mem_x, r) for r in range(1)])
images2_r = torch.cat([Rotation(mem_x, r) for r in range(1)])
images_pair_r = torch.cat([images1_r, simclr_aug(images2_r)], dim=0)
u = Basic_model(images_pair_r, is_simclr=True)
images_out_r = u
simclr_r = normalize(images_out_r)
rot_sim_labels_r = torch.cat([mem_y.cuda() + 100 * i for i in range(1)], dim=0)
sim_matrix_r = get_similarity_matrix(simclr_r)
loss_sim_r = Supervised_NT_xent(sim_matrix_r, labels=rot_sim_labels_r, temperature=0.07)
lo1 = 1 * loss_sim_r + 1*loss_sim1
hidden = Basic_model.f_train(mem_x)
# if len(class_holder) > len(new_class_holder):
# T=2
# loss_kd= 1.0*((hidden-logits)**2).mean()+2.0*((prev_hiddens-cur_hiddens)**2).mean()
#else:
# loss_kd = 1.0*((hidden-logits)**2).mean()
# if len(class_holder) > len(new_class_holder):
# import pdb
# pdb.set_trace()
mem_x = RandomFlip(mem_x, flip_num)
mem_y = mem_y.repeat(flip_num)
y_pred = Basic_model.forward(mem_x)
y_pred_hidden=Basic_model.f_train(mem_x)
loss_old=0
#if i >0:
# pdb.set_trace()
# prev_logits= Previous_model.linear(y_pred_hidden)
# loss_old=F.mse_loss(prev_logits,y_pred[:,:-2])
y_pred_new = y_pred
loss_only=0
# category_matrix_new = torch.zeros(logits_new.shape)
exp_new = torch.exp(y_pred_new)
# positive_matrix = torch.ones_like(exp_new)
# Negative_matrix = torch.ones_like(exp_new)
# for i_v in range(int(exp_new.shape[0])):
# category_matrix_new[i_v][mem_y[i_v]] = 1
# Negative_matrix[i_v][:-len(new_class_holder)] = 1 / (torch.exp(-NT[:-len(new_class_holder)] - 0.1))
# if mem_y[i_v] in new_class_holder:
# continue
#1 / NT[:-len(new_class_holder)]
# else:
# positive_matrix[i_v][mem_y[i_v]] = 1#1/(NT[mem_y[i_v]])
# if mem_y[i_v] in new_class_holder:
# Negative_matrix[i_v][:-len(new_class_holder)] = 1 / NT[:-len(new_class_holder)]
# positive_matrix[i_v][mem_y[i_v]] = 1 # 1 / (NT[mem_y[i_v]])
#else:
# positive_matrix[i_v][mem_y[i_v]] = 1 / (torch.exp(-ANT[mem_y[i_v]] - 0.1))
# pdb.set_trace()
# if task_id > 0:
# print(Negative_matrix)
exp_new = exp_new# * Negative_matrix
# pdb.set_trace()
exp_new_sum = torch.sum(exp_new, dim=1)
logits_new = (exp_new / exp_new_sum.unsqueeze(1))
category_matrix_new = torch.zeros(logits_new.shape)
for i_v in range(int(logits_new.shape[0])):
category_matrix_new[i_v][mem_y[i_v]] = 1
# positive_matrix[i_v][mem_y[i_v]]=0
# if task_id>0:
# import pdb
# pdb.set_trace()
# import pdb
# pdb.set_trace()
positive_prob = torch.zeros(logits_new.shape)
false_prob = deepcopy(logits_new.detach())
for i_t in range(int(logits_new.shape[0])):
false_prob[i_t][mem_y[i_t]] = 0
positive_prob[i_t][mem_y[i_t]] = logits_new[i_t][mem_y[i_t]].detach()
if negative_logits_sum is None:
negative_logits_sum = torch.sum(false_prob, dim=0)
positive_logits_sum = torch.sum(positive_prob, dim=0)
if i == 0:
Category_sum = torch.sum(category_matrix_new, dim=0)
else:
Category_sum += torch.sum(category_matrix_new, dim=0) # .cuda()
category_sum = torch.sum(category_matrix_new, dim=0)
else:
Category_sum += torch.sum(category_matrix_new, dim=0) # .cuda()
negative_logits_sum += torch.sum(false_prob, dim=0)
positive_logits_sum += torch.sum(positive_prob, dim=0)
category_sum += torch.sum(category_matrix_new, dim=0)
if negative_logits_SUM is None:
negative_logits_SUM = torch.sum(false_prob, dim=0).cuda()
positive_logits_SUM = torch.sum(positive_prob, dim=0).cuda()
else:
negative_logits_SUM += torch.sum(false_prob, dim=0).cuda()
positive_logits_SUM += torch.sum(positive_prob, dim=0).cuda()
sum_num += int(logits_new.shape[0])
if batch_idx < 5:
ANT = torch.ones(len(class_holder))
NT = torch.ones(len(class_holder))
else:
# pdb.set_trace()
ANT = (Category_sum.cuda() - positive_logits_SUM).cuda()/negative_logits_SUM.cuda() #/ (Category_sum.cuda() - positive_logits_SUM).cuda()
NT = negative_logits_sum.cuda() / (category_sum - positive_logits_sum).cuda()
ttt = torch.zeros(logits_new.shape)
for qqq in range(mem_y.shape[0]):
if mem_y[qqq]>=len(ANT):
ttt[qqq][mem_y[qqq]] = 1
else:
ttt[qqq][mem_y[qqq]] = 2 / (1+torch.exp(1-(ANT[mem_y[qqq]])))
# if mem_y[qqq] in new_class_holder:
# ttt[qqq][mem_y[qqq]] = 1 # (ANT[mem_y[qqq]])
#else:
# ttt[qqq][mem_y[qqq]] = 1 / (1+torch.exp(-ANT[mem_y[qqq]] - 1))
# logits_new==logits_new_p
#import pdb
#pdb.set_trace()
# if len(class_holder) > len(new_class_holder):
# identity_matrix_new=torch.ones(logits_new.shape)
# logits_=F.softmax(y_pred_new,dim=1)
#if batch_idx>0:
# ANT=negative_logits_SUM.cuda() / (Category_sum.cuda() - positive_logits_SUM).cuda()
#.detach()
# aaa=F.nll_loss(torch.log(logits_new),mem_y)
# if batch_idx>3:
# pdb.set_trace()
#+0.05#1+torch.exp(-mem_y[qqq].float())
# print(ttt)
loss_n=-torch.sum(torch.log(logits_new)*ttt.cuda())/mem_y.shape[0]
loss =2* loss_n + 1 * F.cross_entropy(
pred_y_new, y_new)#+loss_balance#+2*loss_sim_r+loss_sim1#+loss_dif#+loss_old#+2*loss_only
else:
x = RandomFlip(x, flip_num)
y = y.repeat(flip_num)
x = x.requires_grad_()
hidden_pred = Basic_model.f_train(simclr_aug(x))
pred_y = Basic_model.linear(hidden_pred)
t = num_class_per_task#len(new_class_holder)
pred_y_new = pred_y[:, -t:]
min_y = num_class_per_task*i#min(new_class_holder)
y_new = y - min_y
loss = F.cross_entropy(pred_y_new, y_new)
copy_x = ori_x
copy_y = ori_y.unsqueeze(1)
copy_hidden = Basic_model.f_train(copy_x).detach()
with amp.scale_loss(loss, Optimizer) as scaled_loss:
scaled_loss.backward()
# loss.backward()
Optimizer.step()
buffero.add_reservoir(x=copy_x.detach(), y=copy_y.squeeze(1).detach(), logits=copy_hidden.float().detach(),
t=i)
weights_path = 'weights_pre.pt'
torch.save(Basic_model.state_dict(), weights_path)
Previous_model = deepcopy(Basic_model)
print(len(class_holder))
# import pdb
# pdb.set_trace()
#if task_id>0:
print(negative_logits_SUM.cuda(),(Category_sum.cuda()-positive_logits_SUM).cuda(),category_sum,sum_num,negative_logits_SUM.cuda()/(Category_sum.cuda()-positive_logits_SUM).cuda())
for j in range(i + 1):
print("ori", rank[j].item())
a = test_model(Loder[rank[j].item()]['test'], j)
if j == i:
Max_acc.append(a)
if a > Max_acc[j]:
Max_acc[j] = a
# if task_id>=1:
# import pdb
# pdb.set_trace()
import pdb
class_acc=[]
for j in range(100):
acc = test_model(Loder2[j]['test'], j)
class_acc.append(acc)
print(class_acc,'!')
pdb.set_trace()
print('=' * 100)
print(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), 'GPU ' + os.environ["CUDA_VISIBLE_DEVICES"])
print('=' * 100)
import pdb
test_loss = 0
correct = 0
num = 0
for batch_idx, (data, target) in enumerate(test_loder):
data, target = data.cuda(), target.cuda()
# data, target = Variable(data, volatile=True), Variable(target)
Basic_model.eval()
pred = F.softmax(Basic_model.forward(data),dim=1)
Pred = pred.data.max(1, keepdim=True)[1]
num += data.size()[0]
# print("final", Pred, target.data.view_as(Pred))
# print(target,"True",pred)
correct += Pred.eq(target.data.view_as(Pred)).cpu().sum()
test_accuracy = 100. * correct / num # len(data_loader.dataset)
print(
'Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)'
.format(
test_loss, correct, num,
100. * correct / num, ))
print(Max_acc)
import pdb
pdb.set_trace()
n = 0
sum = 0
for m in range(len(Max_acc)):
sum += Max_acc[m]
n += 1
print(sum / n)
| 33,352
| 38.65874
| 187
|
py
|
GSA
|
GSA-main/GSA_CVPR/cifar.py
|
import os,sys
import numpy as np
import torch
#import utils
from torchvision import datasets,transforms
from sklearn.utils import shuffle
import torch.utils.data as Data
def get(seed=0,pc_valid=0.10):
data = {}
taskcla = []
size = [3, 32, 32]
t_num=2
# CIFAR10
if not os.path.isdir('./data/binary_cifar_/'):
os.makedirs('./data/binary_cifar_')
t_num = 2
mean = [x / 255 for x in [125.3, 123.0, 113.9]]
std = [x / 255 for x in [63.0, 62.1, 66.7]]
dat={}
dat['train']=datasets.CIFAR10('./data/', train=True, download=True, transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)]))
dat['test']=datasets.CIFAR10('./data/', train=False, download=True, transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)]))
for t in range(10//t_num):
data[t] = {}
data[t]['name'] = 'cifar10-' + str(t_num*t) + '-' + str(t_num*(t+1)-1)
data[t]['ncla'] = t_num
for s in ['train', 'test']:
loader = torch.utils.data.DataLoader(dat[s], batch_size=1, shuffle=False)
data[t][s] = {'x': [], 'y': []}
for image, target in loader:
label = target.numpy()[0]
if label in range(t_num*t, t_num*(t+1)):
data[t][s]['x'].append(image)
data[t][s]['y'].append(label)
t = 10 // t_num
data[t] = {}
data[t]['name'] = 'cifar10-all'
data[t]['ncla'] = 10
for s in ['train', 'test']:
loader = torch.utils.data.DataLoader(dat[s], batch_size=1, shuffle=False)
data[t][s] = {'x': [], 'y': []}
for image, target in loader:
label = target.numpy()[0]
data[t][s]['x'].append(image)
data[t][s]['y'].append(label)
# "Unify" and save
for t in data.keys():
for s in ['train', 'test']:
data[t][s]['x'] = torch.stack(data[t][s]['x']).view(-1, size[0], size[1], size[2])
data[t][s]['y'] = torch.LongTensor(np.array(data[t][s]['y'], dtype=int)).view(-1)
torch.save(data[t][s]['x'],
os.path.join(os.path.expanduser('./data/binary_cifar_'), 'data' + str(t) + s + 'x.bin'))
torch.save(data[t][s]['y'],
os.path.join(os.path.expanduser('./data/binary_cifar_'), 'data' + str(t) + s + 'y.bin'))
# Load binary files
data = {}
ids = list(np.arange(6))
print('Task order =', ids)
for i in range(6):
data[i] = dict.fromkeys(['name','ncla','train','test'])
for s in ['train','test']:
data[i][s]={'x':[],'y':[]}
data[i][s]['x']=torch.load(os.path.join(os.path.expanduser('./data/binary_cifar_'),'data'+str(ids[i])+s+'x.bin'))
data[i][s]['y']=torch.load(os.path.join(os.path.expanduser('./data/binary_cifar_'),'data'+str(ids[i])+s+'y.bin'))
data[i]['ncla'] = len(np.unique(data[i]['train']['y'].numpy()))
data[i]['name'] = 'cifar10->>>' + str(i * data[i]['ncla']) + '-' + str(data[i]['ncla'] * (i + 1) - 1)
# Others
n=0
for t in data.keys():
print("T",t)
taskcla.append((t, data[t]['ncla']))
n+=data[t]['ncla']
data['ncla'] = n
Loder={}
Loder_test={}
for t in range(5):
print("t",t)
Loder[t] = dict.fromkeys(['name', 'ncla', 'train', 'test', 'valid'])
Loder_test[t] = dict.fromkeys(['name', 'ncla', 'train', 'test', 'valid'])
u1 = torch.tensor(data[t]['train']['x'].reshape(-1, 3, 32, 32)) # .item()
u2 = torch.tensor(data[t]['test']['x'].reshape(-1, 3, 32, 32))
# print("u1",u1.size())
TOTAL_NUM = u1.size()[0]
NUM_VALID = int(round(TOTAL_NUM * 0.1))
NUM_TRAIN = int(round(TOTAL_NUM - NUM_VALID))
# u1.size()[0]
# u2=torch.tensor(data[t]['train']['y'].reshape(-1))
#u3 = data[t]['valid']['x']
# print("u3",u3.size(),s)
# u4=data[t]['valid']['y']
dataset_new_train = Data.TensorDataset(data[t]['train']['x'], data[t]['train']['y'])
dataset_new_test = Data.TensorDataset(data[t]['test']['x'], data[t]['test']['y'])
train_loader = torch.utils.data.DataLoader(
dataset_new_train,
batch_size=10,
shuffle=True,
)
test_loader = torch.utils.data.DataLoader(
dataset_new_test,
batch_size=64,
shuffle=True,
)
Loder[t]['train'] = train_loader
Loder[t]['test'] = test_loader
#Loder[t]['valid'] = valid_loader
mean = [x / 255 for x in [125.3, 123.0, 113.9]]
std = [x / 255 for x in [63.0, 62.1, 66.7]]
test_dataset= datasets.CIFAR10('./data/', train=False, download=True, transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)]))#Data.TensorDataset(data[10//t_num]['test']['x'], data[10//t_num]['test']['y'])
'''
mean = [x / 255 for x in [125.3, 123.0, 113.9]]
std = [x / 255 for x in [63.0, 62.1, 66.7]]
train_dataset=datasets.CIFAR10('./data/', train=True, download=True, transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)]))
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=64,
shuffle=True,
)
Loder={}
Loder[0] = dict.fromkeys(['name', 'ncla', 'train', 'test', 'valid'])
Loder[0]['train']=train_loader
'''
test_dataset=datasets.CIFAR10('./data/', train=False, download=True,
transform=transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean, std)]))
test_loader = torch.utils.data.DataLoader(
test_dataset,
batch_size=64,
shuffle=True,
)
print("Loder is prepared")
return data, taskcla[:10//data[0]['ncla']], size,Loder,test_loader
def get_pretrain_AOP(seed=0,pc_valid=0.10):
data = {}
taskcla = []
size = [3, 32, 32]
t_num=2
# CIFAR10
import clip
device = "cuda" if torch.cuda.is_available() else "cpu"
import pdb
# pdb.set_trace()
model, preprocess = clip.load('ViT-B/32', device)
if not os.path.isdir('./data/binary_cifar_pretr/'):
os.makedirs('./data/binary_cifar_pretr')
t_num = 2
mean = [x / 255 for x in [125.3, 123.0, 113.9]]
std = [x / 255 for x in [63.0, 62.1, 66.7]]
dat={}
dat['train']=datasets.CIFAR10('./data/', train=True, download=True, transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)]))
dat['test']=datasets.CIFAR10('./data/', train=False, download=True, transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)]))
for t in range(10//t_num):
print(t,"t")
num=0
data[t] = {}
data[t]['name'] = 'cifar10-' + str(t_num*t) + '-' + str(t_num*(t+1)-1)
data[t]['ncla'] = t_num
for s in ['train', 'test']:
loader = torch.utils.data.DataLoader(dat[s], batch_size=1, shuffle=False)
data[t][s] = {'x': [], 'y': []}
for image, target in loader:
label = target.numpy()[0]
if label in range(t_num*t, t_num*(t+1)):
num+=1
if num%100==0:
print(num)
# import pdb
# pdb.set_trace()
with torch.no_grad():
image=transforms.ToPILImage()(image.squeeze(0))
image_input = preprocess(image).unsqueeze(0).to(device)
image_features = model.encode_image(image_input)
image=image_features.squeeze(0)
data[t][s]['x'].append(image)
data[t][s]['y'].append(label)
t = 10 // t_num
data[t] = {}
data[t]['name'] = 'cifar10-all'
data[t]['ncla'] = 10
for s in ['train', 'test']:
loader = torch.utils.data.DataLoader(dat[s], batch_size=1, shuffle=False)
data[t][s] = {'x': [], 'y': []}
for image, target in loader:
label = target.numpy()[0]
with torch.no_grad():
image = transforms.ToPILImage()(image.squeeze(0))
image_input = preprocess(image).unsqueeze(0).to(device)
image_features = model.encode_image(image_input)
image = image_features.squeeze(0)
data[t][s]['x'].append(image)
data[t][s]['y'].append(label)
# "Unify" and save
for t in data.keys():
for s in ['train', 'test']:
data[t][s]['x'] = torch.stack(data[t][s]['x']).view(-1, 512)
data[t][s]['y'] = torch.LongTensor(np.array(data[t][s]['y'], dtype=int)).view(-1)
torch.save(data[t][s]['x'],
os.path.join(os.path.expanduser('./data/binary_cifar_p'), 'data' + str(t) + s + 'x.bin'))
torch.save(data[t][s]['y'],
os.path.join(os.path.expanduser('./data/binary_cifar_p'), 'data' + str(t) + s + 'y.bin'))
# Load binary files
data = {}
ids = list(np.arange(6))
print('Task order =', ids)
for i in range(6):
data[i] = dict.fromkeys(['name','ncla','train','test'])
for s in ['train','test']:
data[i][s]={'x':[],'y':[]}
data[i][s]['x']=torch.load(os.path.join(os.path.expanduser('./data/binary_cifar_p'),'data'+str(ids[i])+s+'x.bin'))
data[i][s]['y']=torch.load(os.path.join(os.path.expanduser('./data/binary_cifar_p'),'data'+str(ids[i])+s+'y.bin'))
data[i]['ncla'] = len(np.unique(data[i]['train']['y'].numpy()))
data[i]['name'] = 'cifar10->>>' + str(i * data[i]['ncla']) + '-' + str(data[i]['ncla'] * (i + 1) - 1)
# Others
n = 0
for t in data.keys():
taskcla.append((t, data[t]['ncla']))
n += data[t]['ncla']
data['ncla'] = n
# pdb.set_trace()
return data, taskcla[:10 // data[0]['ncla']], size
def get_pretrain(seed=0,pc_valid=0.10):
data = {}
taskcla = []
size = [3, 32, 32]
t_num=2
# CIFAR10
import clip
device = "cuda" if torch.cuda.is_available() else "cpu"
import pdb
# pdb.set_trace()
model, preprocess = clip.load('ViT-B/32', device)
if not os.path.isdir('./data/binary_cifar_pretr/'):
os.makedirs('./data/binary_cifar_pretr')
t_num = 2
mean = [x / 255 for x in [125.3, 123.0, 113.9]]
std = [x / 255 for x in [63.0, 62.1, 66.7]]
dat={}
dat['train']=datasets.CIFAR10('./data/', train=True, download=True, transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)]))
dat['test']=datasets.CIFAR10('./data/', train=False, download=True, transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)]))
for t in range(10//t_num):
print(t,"t")
num=0
data[t] = {}
data[t]['name'] = 'cifar10-' + str(t_num*t) + '-' + str(t_num*(t+1)-1)
data[t]['ncla'] = t_num
for s in ['train', 'test']:
loader = torch.utils.data.DataLoader(dat[s], batch_size=1, shuffle=False)
data[t][s] = {'x': [], 'y': []}
for image, target in loader:
label = target.numpy()[0]
if label in range(t_num*t, t_num*(t+1)):
num+=1
if num%100==0:
print(num)
# import pdb
# pdb.set_trace()
with torch.no_grad():
image=transforms.ToPILImage()(image.squeeze(0))
image_input = preprocess(image).unsqueeze(0).to(device)
image_features = model.encode_image(image_input)
image=image_features.squeeze(0)
data[t][s]['x'].append(image)
data[t][s]['y'].append(label)
t = 10 // t_num
data[t] = {}
data[t]['name'] = 'cifar10-all'
data[t]['ncla'] = 10
for s in ['train', 'test']:
loader = torch.utils.data.DataLoader(dat[s], batch_size=1, shuffle=False)
data[t][s] = {'x': [], 'y': []}
for image, target in loader:
label = target.numpy()[0]
with torch.no_grad():
image = transforms.ToPILImage()(image.squeeze(0))
image_input = preprocess(image).unsqueeze(0).to(device)
image_features = model.encode_image(image_input)
image = image_features.squeeze(0)
data[t][s]['x'].append(image)
data[t][s]['y'].append(label)
# "Unify" and save
for t in data.keys():
for s in ['train', 'test']:
data[t][s]['x'] = torch.stack(data[t][s]['x']).view(-1, 512)
data[t][s]['y'] = torch.LongTensor(np.array(data[t][s]['y'], dtype=int)).view(-1)
torch.save(data[t][s]['x'],
os.path.join(os.path.expanduser('./data/binary_cifar_p'), 'data' + str(t) + s + 'x.bin'))
torch.save(data[t][s]['y'],
os.path.join(os.path.expanduser('./data/binary_cifar_p'), 'data' + str(t) + s + 'y.bin'))
# Load binary files
data = {}
ids = list(np.arange(6))
print('Task order =', ids)
for i in range(6):
data[i] = dict.fromkeys(['name','ncla','train','test'])
for s in ['train','test']:
data[i][s]={'x':[],'y':[]}
data[i][s]['x']=torch.load(os.path.join(os.path.expanduser('./data/binary_cifar_p'),'data'+str(ids[i])+s+'x.bin'))
data[i][s]['y']=torch.load(os.path.join(os.path.expanduser('./data/binary_cifar_p'),'data'+str(ids[i])+s+'y.bin'))
data[i]['ncla'] = len(np.unique(data[i]['train']['y'].numpy()))
data[i]['name'] = 'cifar10->>>' + str(i * data[i]['ncla']) + '-' + str(data[i]['ncla'] * (i + 1) - 1)
# Others
n=0
for t in data.keys():
print("T",t)
taskcla.append((t, data[t]['ncla']))
n+=data[t]['ncla']
data['ncla'] = n
Loder={}
Loder_test={}
for t in range(5):
print("t",t)
Loder[t] = dict.fromkeys(['name', 'ncla', 'train', 'test', 'valid'])
Loder_test[t] = dict.fromkeys(['name', 'ncla', 'train', 'test', 'valid'])
u1 = torch.tensor(data[t]['train']['x'].reshape(-1, 512)) # .item()
u2 = torch.tensor(data[t]['test']['x'].reshape(-1, 512))
# print("u1",u1.size())
TOTAL_NUM = u1.size()[0]
NUM_VALID = int(round(TOTAL_NUM * 0.1))
NUM_TRAIN = int(round(TOTAL_NUM - NUM_VALID))
# u1.size()[0]
# u2=torch.tensor(data[t]['train']['y'].reshape(-1))
#u3 = data[t]['valid']['x']
# print("u3",u3.size(),s)
# u4=data[t]['valid']['y']
dataset_new_train = Data.TensorDataset(data[t]['train']['x'], data[t]['train']['y'])
dataset_new_test = Data.TensorDataset(data[t]['test']['x'], data[t]['test']['y'])
train_loader = torch.utils.data.DataLoader(
dataset_new_train,
batch_size=32,
shuffle=True,
)
test_loader = torch.utils.data.DataLoader(
dataset_new_test,
batch_size=32,
shuffle=True,
)
Loder[t]['train'] = train_loader
Loder[t]['test'] = test_loader
#Loder[t]['valid'] = valid_loader
mean = [x / 255 for x in [125.3, 123.0, 113.9]]
std = [x / 255 for x in [63.0, 62.1, 66.7]]
test_dataset= datasets.CIFAR10('./data/', train=False, download=True, transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)]))#Data.TensorDataset(data[10//t_num]['test']['x'], data[10//t_num]['test']['y'])
'''
mean = [x / 255 for x in [125.3, 123.0, 113.9]]
std = [x / 255 for x in [63.0, 62.1, 66.7]]
train_dataset=datasets.CIFAR10('./data/', train=True, download=True, transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)]))
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=64,
shuffle=True,
)
Loder={}
Loder[0] = dict.fromkeys(['name', 'ncla', 'train', 'test', 'valid'])
Loder[0]['train']=train_loader
'''
test_dataset=datasets.CIFAR10('./data/', train=False, download=True,transform=transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean, std)]))
#import pdb
#pdb.set_trace()
test_loader = torch.utils.data.DataLoader(
test_dataset,
batch_size=32,
shuffle=True,
)
print("Loder is prepared")
return data, taskcla[:10//data[0]['ncla']], size,Loder,test_loader
def get_a_order(seed=0,pc_valid=0.10):
data = {}
taskcla = []
size = [3, 32, 32]
t_num=2
# CIFAR10
if not os.path.isdir('./data/binary_cifar_a1/'):
os.makedirs('./data/binary_cifar_a1')
t_num = 2
np.random.seed(101)
cls_list = [i for i in range(10)]
np.random.shuffle(cls_list)
class_mapping = np.array(cls_list, copy=True)
mean = [x / 255 for x in [125.3, 123.0, 113.9]]
std = [x / 255 for x in [63.0, 62.1, 66.7]]
dat={}
dat['train']=datasets.CIFAR10('./data/', train=True, download=True, transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)]))
dat['test']=datasets.CIFAR10('./data/', train=False, download=True, transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)]))
for t in range(10//t_num):
data[t] = {}
data[t]['name'] = 'cifar10-' + str(t_num*t) + '-' + str(t_num*(t+1)-1)
data[t]['ncla'] = t_num
for s in ['train', 'test']:
loader = torch.utils.data.DataLoader(dat[s], batch_size=1, shuffle=False)
data[t][s] = {'x': [], 'y': []}
for image, target in loader:
label = target.numpy()[0]
if cls_list.index(label) in range(t_num*t, t_num*(t+1)):
data[t][s]['x'].append(image)
data[t][s]['y'].append(cls_list.index(label))
t = 10 // t_num
data[t] = {}
data[t]['name'] = 'cifar10-all'
data[t]['ncla'] = 10
for s in ['train', 'test']:
loader = torch.utils.data.DataLoader(dat[s], batch_size=1, shuffle=False)
data[t][s] = {'x': [], 'y': []}
for image, target in loader:
label = target.numpy()[0]
data[t][s]['x'].append(image)
data[t][s]['y'].append(cls_list.index(label))
# "Unify" and save
for t in data.keys():
for s in ['train', 'test']:
data[t][s]['x'] = torch.stack(data[t][s]['x']).view(-1, size[0], size[1], size[2])
data[t][s]['y'] = torch.LongTensor(np.array(data[t][s]['y'], dtype=int)).view(-1)
torch.save(data[t][s]['x'],
os.path.join(os.path.expanduser('./data/binary_cifar_a1'), 'data' + str(t) + s + 'x.bin'))
torch.save(data[t][s]['y'],
os.path.join(os.path.expanduser('./data/binary_cifar_a1'), 'data' + str(t) + s + 'y.bin'))
# Load binary files
data = {}
ids = list(np.arange(6))
print('Task order =', ids)
for i in range(6):
data[i] = dict.fromkeys(['name','ncla','train','test'])
for s in ['train','test']:
data[i][s]={'x':[],'y':[]}
data[i][s]['x']=torch.load(os.path.join(os.path.expanduser('./data/binary_cifar_a1'),'data'+str(ids[i])+s+'x.bin'))
data[i][s]['y']=torch.load(os.path.join(os.path.expanduser('./data/binary_cifar_a1'),'data'+str(ids[i])+s+'y.bin'))
data[i]['ncla'] = len(np.unique(data[i]['train']['y'].numpy()))
data[i]['name'] = 'cifar10->>>' + str(i * data[i]['ncla']) + '-' + str(data[i]['ncla'] * (i + 1) - 1)
# Others
n=0
for t in data.keys():
print("T",t)
taskcla.append((t, data[t]['ncla']))
n+=data[t]['ncla']
data['ncla'] = n
Loder={}
Loder_test={}
for t in range(5):
print("t",t)
Loder[t] = dict.fromkeys(['name', 'ncla', 'train', 'test', 'valid'])
Loder_test[t] = dict.fromkeys(['name', 'ncla', 'train', 'test', 'valid'])
u1 = torch.tensor(data[t]['train']['x'].reshape(-1, 3, 32, 32)) # .item()
u2 = torch.tensor(data[t]['test']['x'].reshape(-1, 3, 32, 32))
# print("u1",u1.size())
TOTAL_NUM = u1.size()[0]
NUM_VALID = int(round(TOTAL_NUM * 0.1))
NUM_TRAIN = int(round(TOTAL_NUM - NUM_VALID))
# u1.size()[0]
# u2=torch.tensor(data[t]['train']['y'].reshape(-1))
#u3 = data[t]['valid']['x']
# print("u3",u3.size(),s)
# u4=data[t]['valid']['y']
dataset_new_train = Data.TensorDataset(data[t]['train']['x'], data[t]['train']['y'])
dataset_new_test = Data.TensorDataset(data[t]['test']['x'], data[t]['test']['y'])
train_loader = torch.utils.data.DataLoader(
dataset_new_train,
batch_size=32,
shuffle=True,
)
test_loader = torch.utils.data.DataLoader(
dataset_new_test,
batch_size=64,
shuffle=True,
)
Loder[t]['train'] = train_loader
Loder[t]['test'] = test_loader
#Loder[t]['valid'] = valid_loader
# mean = [x / 255 for x in [125.3, 123.0, 113.9]]
# std = [x / 255 for x in [63.0, 62.1, 66.7]]
# test_dataset= datasets.CIFAR10('./data/', train=False, download=True, transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)]))#Data.TensorDataset(data[10//t_num]['test']['x'], data[10//t_num]['test']['y'])
# test_dataset=datasets.CIFAR10('./data/', train=False, download=True,
# transform=transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean, std)]))
dataset_new_test = Data.TensorDataset(data[5]['test']['x'], data[5]['test']['y'])
test_loader = torch.utils.data.DataLoader(
dataset_new_test,
batch_size=64,
shuffle=True,
)
#test_loader = torch.utils.data.DataLoader(
# test_dataset,
# batch_size=64,
# shuffle=True,
#)
print("Loder is prepared")
return data, taskcla[:10//data[0]['ncla']], size,Loder,test_loader
def get_revisit(seed=0,pc_valid=0.10):
data = {}
taskcla = []
size = [3, 32, 32]
t_num=2
# CIFAR10
if not os.path.isdir('./data/binary_cifar_/'):
os.makedirs('./data/binary_cifar_')
t_num = 2
mean = [x / 255 for x in [125.3, 123.0, 113.9]]
std = [x / 255 for x in [63.0, 62.1, 66.7]]
dat={}
dat['train']=datasets.CIFAR10('./data/', train=True, download=True, transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)]))
dat['test']=datasets.CIFAR10('./data/', train=False, download=True, transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)]))
for t in range(10//t_num):
data[t] = {}
data[t]['name'] = 'cifar10-' + str(t_num*t) + '-' + str(t_num*(t+1)-1)
data[t]['ncla'] = t_num
for s in ['train', 'test']:
loader = torch.utils.data.DataLoader(dat[s], batch_size=1, shuffle=False)
data[t][s] = {'x': [], 'y': []}
for image, target in loader:
label = target.numpy()[0]
if label in range(t_num*t, t_num*(t+1)):
data[t][s]['x'].append(image)
data[t][s]['y'].append(label)
t = 10 // t_num
data[t] = {}
data[t]['name'] = 'cifar10-all'
data[t]['ncla'] = 10
for s in ['train', 'test']:
loader = torch.utils.data.DataLoader(dat[s], batch_size=1, shuffle=False)
data[t][s] = {'x': [], 'y': []}
for image, target in loader:
label = target.numpy()[0]
data[t][s]['x'].append(image)
data[t][s]['y'].append(label)
# "Unify" and save
for t in data.keys():
for s in ['train', 'test']:
data[t][s]['x'] = torch.stack(data[t][s]['x']).view(-1, size[0], size[1], size[2])
data[t][s]['y'] = torch.LongTensor(np.array(data[t][s]['y'], dtype=int)).view(-1)
torch.save(data[t][s]['x'],
os.path.join(os.path.expanduser('./data/binary_cifar_'), 'data' + str(t) + s + 'x.bin'))
torch.save(data[t][s]['y'],
os.path.join(os.path.expanduser('./data/binary_cifar_'), 'data' + str(t) + s + 'y.bin'))
# Load binary files
data = {}
ids = list(np.arange(6))
print('Task order =', ids)
for i in range(6):
data[i] = dict.fromkeys(['name','ncla','train','test'])
for s in ['train','test']:
data[i][s]={'x':[],'y':[]}
data[i][s]['x']=torch.load(os.path.join(os.path.expanduser('./data/binary_cifar_'),'data'+str(ids[i])+s+'x.bin'))
data[i][s]['y']=torch.load(os.path.join(os.path.expanduser('./data/binary_cifar_'),'data'+str(ids[i])+s+'y.bin'))
data[i]['ncla'] = len(np.unique(data[i]['train']['y'].numpy()))
data[i]['name'] = 'cifar10->>>' + str(i * data[i]['ncla']) + '-' + str(data[i]['ncla'] * (i + 1) - 1)
# Others
n=0
for t in data.keys():
print("T",t)
taskcla.append((t, data[t]['ncla']))
n+=data[t]['ncla']
data['ncla'] = n
Loder={}
for t in range(5):
print("t",t)
u1 = torch.tensor(data[t]['train']['x'].reshape(-1, 3, 32, 32)) # .item()
# print("u1",u1.size())
TOTAL_NUM = u1.size()[0]
NUM_VALID = int(round(TOTAL_NUM * 0.1))
NUM_TRAIN = int(round(TOTAL_NUM - NUM_VALID))
# u1.size()[0]
# u2=torch.tensor(data[t]['train']['y'].reshape(-1))
#u3 = data[t]['valid']['x']
# print("u3",u3.size(),s)
# u4=data[t]['valid']['y']
for i in range(2):
dataset_new_train = Data.TensorDataset(data[t]['train']['x'][i*int(TOTAL_NUM/2):(i+1)*int(TOTAL_NUM/2)], data[t]['train']['y'][i*int(TOTAL_NUM/2):(i+1)*int(TOTAL_NUM/2)])
#dataset_new_valid = Data.TensorDataset(data[t]['valid']['x'], data[t]['valid']['y'])
train_loader = torch.utils.data.DataLoader(
dataset_new_train,
batch_size=10,
shuffle=True,
)
Loder[2 * t+ i] = dict.fromkeys(['name', 'ncla', 'train', 'test', 'valid'])
Loder[2*t+i]['train'] = train_loader
#Loder[t]['valid'] = valid_loader
mean = [x / 255 for x in [125.3, 123.0, 113.9]]
std = [x / 255 for x in [63.0, 62.1, 66.7]]
test_dataset= datasets.CIFAR10('./data/', train=False, download=True, transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)]))#Data.TensorDataset(data[10//t_num]['test']['x'], data[10//t_num]['test']['y'])
'''
mean = [x / 255 for x in [125.3, 123.0, 113.9]]
std = [x / 255 for x in [63.0, 62.1, 66.7]]
train_dataset=datasets.CIFAR10('./data/', train=True, download=True, transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)]))
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=64,
shuffle=True,
)
Loder={}
Loder[0] = dict.fromkeys(['name', 'ncla', 'train', 'test', 'valid'])
Loder[0]['train']=train_loader
'''
test_dataset=datasets.CIFAR10('./data/', train=False, download=True,
transform=transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean, std)]))
test_loader = torch.utils.data.DataLoader(
test_dataset,
batch_size=64,
shuffle=True,
)
print("Loder is prepared")
return data, taskcla[:10//data[0]['ncla']], size,Loder,test_loader
def get_cifar100(seed=0,pc_valid=0.10):
data = {}
taskcla = []
size = [3, 32, 32]
# CIFAR10
if not os.path.isdir('./data/binary_cifar100_100/'):
os.makedirs('./data/binary_cifar100_100')
t_num = 10
mean = [x / 255 for x in [125.3, 123.0, 113.9]]
std = [x / 255 for x in [63.0, 62.1, 66.7]]
dat={}
dat['train']=datasets.CIFAR100('./data/', train=True, download=True, transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)]))
dat['test']=datasets.CIFAR100('./data/', train=False, download=True, transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)]))
for t in range(100//t_num):
print(t)
data[t] = {}
data[t]['name'] = 'cifar100-' + str(t_num*t) + '-' + str(t_num*(t+1)-1)
data[t]['ncla'] = t_num
for s in ['train', 'test']:
loader = torch.utils.data.DataLoader(dat[s], batch_size=1, shuffle=False)
data[t][s] = {'x': [], 'y': []}
for image, target in loader:
label = target.numpy()[0]
if label in range(t_num*t, t_num*(t+1)):
data[t][s]['x'].append(image)
data[t][s]['y'].append(label)
t = 100 // t_num
data[t] = {}
data[t]['name'] = 'cifar100-all'
data[t]['ncla'] = 100
for s in ['train', 'test']:
loader = torch.utils.data.DataLoader(dat[s], batch_size=1, shuffle=False)
data[t][s] = {'x': [], 'y': []}
for image, target in loader:
label = target.numpy()[0]
data[t][s]['x'].append(image)
data[t][s]['y'].append(label)
# "Unify" and save
for t in data.keys():
for s in ['train', 'test']:
data[t][s]['x'] = torch.stack(data[t][s]['x']).view(-1, size[0], size[1], size[2])
data[t][s]['y'] = torch.LongTensor(np.array(data[t][s]['y'], dtype=int)).view(-1)
torch.save(data[t][s]['x'],
os.path.join(os.path.expanduser('./data/binary_cifar100_100'), 'data' + str(t) + s + 'x.bin'))
torch.save(data[t][s]['y'],
os.path.join(os.path.expanduser('./data/binary_cifar100_100'), 'data' + str(t) + s + 'y.bin'))
# Load binary files
data = {}
ids = list(np.arange(11))
print('Task order =', ids)
for i in range(11):
data[i] = dict.fromkeys(['name','ncla','train','test'])
for s in ['train','test']:
data[i][s]={'x':[],'y':[]}
data[i][s]['x']=torch.load(os.path.join(os.path.expanduser('./data/binary_cifar100_100'),'data'+str(ids[i])+s+'x.bin'))
data[i][s]['y']=torch.load(os.path.join(os.path.expanduser('./data/binary_cifar100_100'),'data'+str(ids[i])+s+'y.bin'))
data[i]['ncla'] = len(np.unique(data[i]['train']['y'].numpy()))
data[i]['name'] = 'cifar100->>>' + str(i * data[i]['ncla']) + '-' + str(data[i]['ncla'] * (i + 1) - 1)
# Others
n = 0
for t in data.keys():
print("T", t)
taskcla.append((t, data[t]['ncla']))
n += data[t]['ncla']
data['ncla'] = n
Loder = {}
for t in range(10):
print("t", t)
Loder[t] = dict.fromkeys(['name', 'ncla', 'train', 'test', 'valid'])
u1 = torch.tensor(data[t]['train']['x'].reshape(-1, 3, 32, 32)) # .item()
# print("u1",u1.size())
TOTAL_NUM = u1.size()[0]
NUM_VALID = int(round(TOTAL_NUM * 0.1))
NUM_TRAIN = int(round(TOTAL_NUM - NUM_VALID))
# u1.size()[0]
# u2=torch.tensor(data[t]['train']['y'].reshape(-1))
# u3 = data[t]['valid']['x']
# print("u3",u3.size(),s)
# u4=data[t]['valid']['y']
dataset_new_train = Data.TensorDataset(data[t]['train']['x'], data[t]['train']['y'])
dataset_new_test = Data.TensorDataset(data[t]['test']['x'], data[t]['test']['y'])
train_loader = torch.utils.data.DataLoader(
dataset_new_train,
batch_size=10,
shuffle=True,
)
test_loader = torch.utils.data.DataLoader(
dataset_new_test,
batch_size=64,
shuffle=True,
)
Loder[t]['train'] = train_loader
Loder[t]['test'] = test_loader
mean = [x / 255 for x in [125.3, 123.0, 113.9]]
std = [x / 255 for x in [63.0, 62.1, 66.7]]
# test_dataset = datasets.CIFAR100('./data/', train=False, download=True, transform=transforms.Compose(
# [transforms.ToTensor(), transforms.Normalize(mean,
# std)])) # Data.TensorDataset(data[10//t_num]['test']['x'], data[10//t_num]['test']['y'])
'''
mean = [x / 255 for x in [125.3, 123.0, 113.9]]
std = [x / 255 for x in [63.0, 62.1, 66.7]]
train_dataset=datasets.CIFAR10('./data/', train=True, download=True, transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)]))
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=64,
shuffle=True,
)
Loder={}
Loder[0] = dict.fromkeys(['name', 'ncla', 'train', 'test', 'valid'])
Loder[0]['train']=train_loader
'''
test_dataset = datasets.CIFAR100('./data/', train=False, download=True,
transform=transforms.Compose(
[transforms.ToTensor(), transforms.Normalize(mean, std)]))
test_loader = torch.utils.data.DataLoader(
test_dataset,
batch_size=64,
shuffle=True,
)
print("Loder is prepared")
return data, taskcla[:10 // data[0]['ncla']], size, Loder, test_loader
def get_cifar100_joint(seed=0,pc_valid=0.10):
data = {}
taskcla = []
size = [3, 32, 32]
# CIFAR10
if not os.path.isdir('./data/binary_cifar100_j/'):
os.makedirs('./data/binary_cifar100_j')
t_num = 100
mean = [x / 255 for x in [125.3, 123.0, 113.9]]
std = [x / 255 for x in [63.0, 62.1, 66.7]]
dat={}
dat['train']=datasets.CIFAR100('./data/', train=True, download=True, transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)]))
dat['test']=datasets.CIFAR100('./data/', train=False, download=True, transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)]))
for t in range(100//t_num):
print(t)
data[t] = {}
data[t]['name'] = 'cifar100-' + str(t_num*t) + '-' + str(t_num*(t+1)-1)
data[t]['ncla'] = t_num
for s in ['train', 'test']:
loader = torch.utils.data.DataLoader(dat[s], batch_size=1, shuffle=False)
data[t][s] = {'x': [], 'y': []}
for image, target in loader:
label = target.numpy()[0]
if label in range(t_num*t, t_num*(t+1)):
data[t][s]['x'].append(image)
data[t][s]['y'].append(label)
t = 100 // t_num
data[t] = {}
data[t]['name'] = 'cifar100-all'
data[t]['ncla'] = 100
for s in ['train', 'test']:
loader = torch.utils.data.DataLoader(dat[s], batch_size=1, shuffle=False)
data[t][s] = {'x': [], 'y': []}
for image, target in loader:
label = target.numpy()[0]
data[t][s]['x'].append(image)
data[t][s]['y'].append(label)
# "Unify" and save
for t in data.keys():
for s in ['train', 'test']:
data[t][s]['x'] = torch.stack(data[t][s]['x']).view(-1, size[0], size[1], size[2])
data[t][s]['y'] = torch.LongTensor(np.array(data[t][s]['y'], dtype=int)).view(-1)
torch.save(data[t][s]['x'],
os.path.join(os.path.expanduser('./data/binary_cifar100_j'), 'data' + str(t) + s + 'x.bin'))
torch.save(data[t][s]['y'],
os.path.join(os.path.expanduser('./data/binary_cifar100_j'), 'data' + str(t) + s + 'y.bin'))
# Load binary files
data = {}
ids = list(np.arange(2))
print('Task order =', ids)
for i in range(2):
data[i] = dict.fromkeys(['name','ncla','train','test'])
for s in ['train','test']:
data[i][s]={'x':[],'y':[]}
data[i][s]['x']=torch.load(os.path.join(os.path.expanduser('./data/binary_cifar100_j'),'data'+str(ids[i])+s+'x.bin'))
data[i][s]['y']=torch.load(os.path.join(os.path.expanduser('./data/binary_cifar100_j'),'data'+str(ids[i])+s+'y.bin'))
data[i]['ncla'] = len(np.unique(data[i]['train']['y'].numpy()))
data[i]['name'] = 'cifar100->>>' + str(i * data[i]['ncla']) + '-' + str(data[i]['ncla'] * (i + 1) - 1)
# Others
n = 0
for t in data.keys():
print("T", t)
taskcla.append((t, data[t]['ncla']))
n += data[t]['ncla']
data['ncla'] = n
Loder = {}
for t in range(1):
print("t", t)
Loder[t] = dict.fromkeys(['name', 'ncla', 'train', 'test', 'valid'])
u1 = torch.tensor(data[t]['train']['x'].reshape(-1, 3, 32, 32)) # .item()
# print("u1",u1.size())
TOTAL_NUM = u1.size()[0]
NUM_VALID = int(round(TOTAL_NUM * 0.1))
NUM_TRAIN = int(round(TOTAL_NUM - NUM_VALID))
# u1.size()[0]
# u2=torch.tensor(data[t]['train']['y'].reshape(-1))
# u3 = data[t]['valid']['x']
# print("u3",u3.size(),s)
# u4=data[t]['valid']['y']
dataset_new_train = Data.TensorDataset(data[t]['train']['x'], data[t]['train']['y'])
dataset_new_test = Data.TensorDataset(data[t]['test']['x'], data[t]['test']['y'])
train_loader = torch.utils.data.DataLoader(
dataset_new_train,
batch_size=10,
shuffle=True,
)
test_loader = torch.utils.data.DataLoader(
dataset_new_test,
batch_size=64,
shuffle=True,
)
Loder[t]['train'] = train_loader
Loder[t]['test'] = test_loader
mean = [x / 255 for x in [125.3, 123.0, 113.9]]
std = [x / 255 for x in [63.0, 62.1, 66.7]]
# test_dataset = datasets.CIFAR100('./data/', train=False, download=True, transform=transforms.Compose(
# [transforms.ToTensor(), transforms.Normalize(mean,
# std)])) # Data.TensorDataset(data[10//t_num]['test']['x'], data[10//t_num]['test']['y'])
'''
mean = [x / 255 for x in [125.3, 123.0, 113.9]]
std = [x / 255 for x in [63.0, 62.1, 66.7]]
train_dataset=datasets.CIFAR10('./data/', train=True, download=True, transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)]))
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=64,
shuffle=True,
)
Loder={}
Loder[0] = dict.fromkeys(['name', 'ncla', 'train', 'test', 'valid'])
Loder[0]['train']=train_loader
'''
test_dataset = datasets.CIFAR100('./data/', train=False, download=True,
transform=transforms.Compose(
[transforms.ToTensor(), transforms.Normalize(mean, std)]))
test_loader = torch.utils.data.DataLoader(
test_dataset,
batch_size=64,
shuffle=True,
)
print("Loder is prepared")
return data, taskcla[:10 // data[0]['ncla']], size, Loder, test_loader
def get_cifar100_50(seed=0,pc_valid=0.10):
data = {}
taskcla = []
size = [3, 32, 32]
# CIFAR10
if not os.path.isdir('./data/binary_cifar100_22/'):
os.makedirs('./data/binary_cifar100_22')
t_class_num = 2
mean = [x / 255 for x in [125.3, 123.0, 113.9]]
std = [x / 255 for x in [63.0, 62.1, 66.7]]
dat={}
dat['train']=datasets.CIFAR100('./data/', train=True, download=True, transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)]))
dat['test']=datasets.CIFAR100('./data/', train=False, download=True, transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)]))
for t in range(100//t_class_num):
print(t)
data[t] = {}
data[t]['name'] = 'cifar100-' + str(t_class_num*t) + '-' + str(t_class_num*(t+1)-1)
data[t]['ncla'] = t_class_num
for s in ['train', 'test']:
loader = torch.utils.data.DataLoader(dat[s], batch_size=1, shuffle=False)
data[t][s] = {'x': [], 'y': []}
for image, target in loader:
label = target.numpy()[0]
if label in range(t_class_num*t, t_class_num*(t+1)):
data[t][s]['x'].append(image)
data[t][s]['y'].append(label)
t = 100 // t_class_num
data[t] = {}
data[t]['name'] = 'cifar100-all'
data[t]['ncla'] = 100
for s in ['train', 'test']:
loader = torch.utils.data.DataLoader(dat[s], batch_size=1, shuffle=False)
data[t][s] = {'x': [], 'y': []}
for image, target in loader:
label = target.numpy()[0]
data[t][s]['x'].append(image)
data[t][s]['y'].append(label)
# "Unify" and save
for t in data.keys():
for s in ['train', 'test']:
data[t][s]['x'] = torch.stack(data[t][s]['x']).view(-1, size[0], size[1], size[2])
data[t][s]['y'] = torch.LongTensor(np.array(data[t][s]['y'], dtype=int)).view(-1)
torch.save(data[t][s]['x'],
os.path.join(os.path.expanduser('./data/binary_cifar100_2'), 'data' + str(t) + s + 'x.bin'))
torch.save(data[t][s]['y'],
os.path.join(os.path.expanduser('./data/binary_cifar100_2'), 'data' + str(t) + s + 'y.bin'))
# Load binary files
data = {}
ids = list(np.arange(51))
print('Task order =', ids)
for i in range(51):
data[i] = dict.fromkeys(['name','ncla','train','test'])
for s in ['train','test']:
data[i][s]={'x':[],'y':[]}
data[i][s]['x']=torch.load(os.path.join(os.path.expanduser('./data/binary_cifar100_2'),'data'+str(ids[i])+s+'x.bin'))
data[i][s]['y']=torch.load(os.path.join(os.path.expanduser('./data/binary_cifar100_2'),'data'+str(ids[i])+s+'y.bin'))
data[i]['ncla'] = len(np.unique(data[i]['train']['y'].numpy()))
data[i]['name'] = 'cifar100->>>' + str(i * data[i]['ncla']) + '-' + str(data[i]['ncla'] * (i + 1) - 1)
# Others
n = 0
for t in data.keys():
print("T", t)
taskcla.append((t, data[t]['ncla']))
n += data[t]['ncla']
data['ncla'] = n
Loder = {}
for t in range(50):
print("t", t)
Loder[t] = dict.fromkeys(['name', 'ncla', 'train', 'test', 'valid'])
u1 = torch.tensor(data[t]['train']['x'].reshape(-1, 3, 32, 32)) # .item()
# print("u1",u1.size())
TOTAL_NUM = u1.size()[0]
NUM_VALID = int(round(TOTAL_NUM * 0.1))
NUM_TRAIN = int(round(TOTAL_NUM - NUM_VALID))
# u1.size()[0]
# u2=torch.tensor(data[t]['train']['y'].reshape(-1))
# u3 = data[t]['valid']['x']
# print("u3",u3.size(),s)
# u4=data[t]['valid']['y']
dataset_new_train = Data.TensorDataset(data[t]['train']['x'], data[t]['train']['y'])
dataset_new_test = Data.TensorDataset(data[t]['test']['x'], data[t]['test']['y'])
train_loader = torch.utils.data.DataLoader(
dataset_new_train,
batch_size=10,
shuffle=True,
)
test_loader = torch.utils.data.DataLoader(
dataset_new_test,
batch_size=64,
shuffle=True,
)
Loder[t]['train'] = train_loader
Loder[t]['test'] = test_loader
mean = [x / 255 for x in [125.3, 123.0, 113.9]]
std = [x / 255 for x in [63.0, 62.1, 66.7]]
# test_dataset = datasets.CIFAR100('./data/', train=False, download=True, transform=transforms.Compose(
# [transforms.ToTensor(), transforms.Normalize(mean,
# std)])) # Data.TensorDataset(data[10//t_num]['test']['x'], data[10//t_num]['test']['y'])
'''
mean = [x / 255 for x in [125.3, 123.0, 113.9]]
std = [x / 255 for x in [63.0, 62.1, 66.7]]
train_dataset=datasets.CIFAR10('./data/', train=True, download=True, transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)]))
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=64,
shuffle=True,
)
Loder={}
Loder[0] = dict.fromkeys(['name', 'ncla', 'train', 'test', 'valid'])
Loder[0]['train']=train_loader
'''
test_dataset = datasets.CIFAR100('./data/', train=False, download=True,
transform=transforms.Compose(
[transforms.ToTensor(), transforms.Normalize(mean, std)]))
test_loader = torch.utils.data.DataLoader(
test_dataset,
batch_size=64,
shuffle=True,
)
print("Loder is prepared")
return data, taskcla[:10 // data[0]['ncla']], size, Loder, test_loader
def get_mnist(seed=0,pc_valid=0.10):
data = {}
taskcla = []
size = [1, 28, 28]
# CIFAR10
if not os.path.isdir('./data/binary_mnist/'):
os.makedirs('./data/binary_mnist')
t_class_num = 2
mean = (0.1307,)
std = (0.3081,)
dat={}
dat['train']=datasets.MNIST('./data/', train=True, download=True, transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)]))
dat['test']=datasets.MNIST('./data/', train=False, download=True, transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)]))
for t in range(10//t_class_num):
print(t)
data[t] = {}
data[t]['name'] = 'mnist' + str(t_class_num*t) + '-' + str(t_class_num*(t+1)-1)
data[t]['ncla'] = t_class_num
for s in ['train', 'test']:
loader = torch.utils.data.DataLoader(dat[s], batch_size=1, shuffle=False)
data[t][s] = {'x': [], 'y': []}
for image, target in loader:
label = target.numpy()[0]
if label in range(t_class_num*t, t_class_num*(t+1)):
data[t][s]['x'].append(image)
data[t][s]['y'].append(label)
t = 10 // t_class_num
data[t] = {}
data[t]['name'] = 'mnist-all'
data[t]['ncla'] = 10
for s in ['train', 'test']:
loader = torch.utils.data.DataLoader(dat[s], batch_size=1, shuffle=False)
data[t][s] = {'x': [], 'y': []}
for image, target in loader:
label = target.numpy()[0]
data[t][s]['x'].append(image)
data[t][s]['y'].append(label)
# "Unify" and save
for t in data.keys():
for s in ['train', 'test']:
data[t][s]['x'] = torch.stack(data[t][s]['x']).view(-1, size[0], size[1], size[2])
data[t][s]['y'] = torch.LongTensor(np.array(data[t][s]['y'], dtype=int)).view(-1)
torch.save(data[t][s]['x'],
os.path.join(os.path.expanduser('./data/binary_mnist'), 'data' + str(t) + s + 'x.bin'))
torch.save(data[t][s]['y'],
os.path.join(os.path.expanduser('./data/binary_mnist'), 'data' + str(t) + s + 'y.bin'))
# Load binary files
data = {}
ids = list(np.arange(6))
print('Task order =', ids)
for i in range(6):
data[i] = dict.fromkeys(['name','ncla','train','test'])
for s in ['train','test']:
data[i][s]={'x':[],'y':[]}
data[i][s]['x']=torch.load(os.path.join(os.path.expanduser('./data/binary_mnist'),'data'+str(ids[i])+s+'x.bin'))
data[i][s]['y']=torch.load(os.path.join(os.path.expanduser('./data/binary_mnist'),'data'+str(ids[i])+s+'y.bin'))
data[i]['ncla'] = len(np.unique(data[i]['train']['y'].numpy()))
data[i]['name'] = 'mnist->>>' + str(i * data[i]['ncla']) + '-' + str(data[i]['ncla'] * (i + 1) - 1)
# Others
n = 0
for t in data.keys():
print("T", t)
taskcla.append((t, data[t]['ncla']))
n += data[t]['ncla']
data['ncla'] = n
Loder = {}
for t in range(5):
print("t", t)
Loder[t] = dict.fromkeys(['name', 'ncla', 'train', 'test', 'valid'])
u1 = torch.tensor(data[t]['train']['x'].reshape(-1, 1, 28, 28)) # .item()
# print("u1",u1.size())
TOTAL_NUM = u1.size()[0]
NUM_VALID = int(round(TOTAL_NUM * 0.1))
NUM_TRAIN = int(round(TOTAL_NUM - NUM_VALID))
# u1.size()[0]
# u2=torch.tensor(data[t]['train']['y'].reshape(-1))
# u3 = data[t]['valid']['x']
# print("u3",u3.size(),s)
# u4=data[t]['valid']['y']
dataset_new_train = Data.TensorDataset(data[t]['train']['x'], data[t]['train']['y'])
dataset_new_test = Data.TensorDataset(data[t]['test']['x'], data[t]['test']['y'])
train_loader = torch.utils.data.DataLoader(
dataset_new_train,
batch_size=10,
shuffle=True,
)
test_loader = torch.utils.data.DataLoader(
dataset_new_test,
batch_size=64,
shuffle=True,
)
Loder[t]['train'] = train_loader
Loder[t]['test'] = test_loader
mean = (0.1307,)
std = (0.3081,)
test_dataset = datasets.MNIST('./data/', train=False, download=True,
transform=transforms.Compose(
[transforms.ToTensor(), transforms.Normalize(mean, std)]))
test_loader = torch.utils.data.DataLoader(
test_dataset,
batch_size=64,
shuffle=True,
)
print("Loder is prepared")
return data, taskcla[:10 // data[0]['ncla']], size, Loder, test_loader
def get_cifar100_20(seed=0,pc_valid=0.10):
data = {}
taskcla = []
size = [3, 32, 32]
# CIFAR10
if not os.path.isdir('./data/binary_cifar100_5/'):
os.makedirs('./data/binary_cifar100_5')
t_class_num = 5
mean = [x / 255 for x in [125.3, 123.0, 113.9]]
std = [x / 255 for x in [63.0, 62.1, 66.7]]
dat={}
dat['train']=datasets.CIFAR100('./data/', train=True, download=True, transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)]))
dat['test']=datasets.CIFAR100('./data/', train=False, download=True, transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)]))
for t in range(100//t_class_num):
print(t)
data[t] = {}
data[t]['name'] = 'cifar100-' + str(t_class_num*t) + '-' + str(t_class_num*(t+1)-1)
data[t]['ncla'] = t_class_num
for s in ['train', 'test']:
loader = torch.utils.data.DataLoader(dat[s], batch_size=1, shuffle=False)
data[t][s] = {'x': [], 'y': []}
for image, target in loader:
label = target.numpy()[0]
if label in range(t_class_num*t, t_class_num*(t+1)):
data[t][s]['x'].append(image)
data[t][s]['y'].append(label)
t = 100 // t_class_num
data[t] = {}
data[t]['name'] = 'cifar100-all'
data[t]['ncla'] = 100
for s in ['train', 'test']:
loader = torch.utils.data.DataLoader(dat[s], batch_size=1, shuffle=False)
data[t][s] = {'x': [], 'y': []}
for image, target in loader:
label = target.numpy()[0]
data[t][s]['x'].append(image)
data[t][s]['y'].append(label)
# "Unify" and save
for t in data.keys():
for s in ['train', 'test']:
data[t][s]['x'] = torch.stack(data[t][s]['x']).view(-1, size[0], size[1], size[2])
data[t][s]['y'] = torch.LongTensor(np.array(data[t][s]['y'], dtype=int)).view(-1)
torch.save(data[t][s]['x'],
os.path.join(os.path.expanduser('./data/binary_cifar100_5'), 'data' + str(t) + s + 'x.bin'))
torch.save(data[t][s]['y'],
os.path.join(os.path.expanduser('./data/binary_cifar100_5'), 'data' + str(t) + s + 'y.bin'))
# Load binary files
data = {}
ids = list(np.arange(21))
print('Task order =', ids)
for i in range(21):
data[i] = dict.fromkeys(['name','ncla','train','test'])
for s in ['train','test']:
data[i][s]={'x':[],'y':[]}
data[i][s]['x']=torch.load(os.path.join(os.path.expanduser('./data/binary_cifar100_5'),'data'+str(ids[i])+s+'x.bin'))
data[i][s]['y']=torch.load(os.path.join(os.path.expanduser('./data/binary_cifar100_5'),'data'+str(ids[i])+s+'y.bin'))
data[i]['ncla'] = len(np.unique(data[i]['train']['y'].numpy()))
data[i]['name'] = 'cifar100->>>' + str(i * data[i]['ncla']) + '-' + str(data[i]['ncla'] * (i + 1) - 1)
# Others
n = 0
for t in data.keys():
print("T", t)
taskcla.append((t, data[t]['ncla']))
n += data[t]['ncla']
data['ncla'] = n
Loder = {}
for t in range(20):
print("t", t)
Loder[t] = dict.fromkeys(['name', 'ncla', 'train', 'test', 'valid'])
u1 = torch.tensor(data[t]['train']['x'].reshape(-1, 3, 32, 32)) # .item()
# print("u1",u1.size())
TOTAL_NUM = u1.size()[0]
NUM_VALID = int(round(TOTAL_NUM * 0.1))
NUM_TRAIN = int(round(TOTAL_NUM - NUM_VALID))
# u1.size()[0]
# u2=torch.tensor(data[t]['train']['y'].reshape(-1))
# u3 = data[t]['valid']['x']
# print("u3",u3.size(),s)
# u4=data[t]['valid']['y']
dataset_new_train = Data.TensorDataset(data[t]['train']['x'], data[t]['train']['y'])
dataset_new_test = Data.TensorDataset(data[t]['test']['x'], data[t]['test']['y'])
train_loader = torch.utils.data.DataLoader(
dataset_new_train,
batch_size=32,
shuffle=True,
)
test_loader = torch.utils.data.DataLoader(
dataset_new_test,
batch_size=64,
shuffle=True,
)
Loder[t]['train'] = train_loader
Loder[t]['test'] = test_loader
mean = [x / 255 for x in [125.3, 123.0, 113.9]]
std = [x / 255 for x in [63.0, 62.1, 66.7]]
# test_dataset = datasets.CIFAR100('./data/', train=False, download=True, transform=transforms.Compose(
# [transforms.ToTensor(), transforms.Normalize(mean,
# std)])) # Data.TensorDataset(data[10//t_num]['test']['x'], data[10//t_num]['test']['y'])
'''
mean = [x / 255 for x in [125.3, 123.0, 113.9]]
std = [x / 255 for x in [63.0, 62.1, 66.7]]
train_dataset=datasets.CIFAR10('./data/', train=True, download=True, transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)]))
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=64,
shuffle=True,
)
Loder={}
Loder[0] = dict.fromkeys(['name', 'ncla', 'train', 'test', 'valid'])
Loder[0]['train']=train_loader
'''
test_dataset = datasets.CIFAR100('./data/', train=False, download=True,
transform=transforms.Compose(
[transforms.ToTensor(), transforms.Normalize(mean, std)]))
test_loader = torch.utils.data.DataLoader(
test_dataset,
batch_size=64,
shuffle=True,
)
print("Loder is prepared")
return data, taskcla[:10 // data[0]['ncla']], size, Loder, test_loader
def get_cifar100_10(seed=0,pc_valid=0.10):
data = {}
taskcla = []
size = [3, 32, 32]
# CIFAR10
if not os.path.isdir('./data/binary_cifar100_10/'):
os.makedirs('./data/binary_cifar100_10')
t_class_num = 10
mean = [x / 255 for x in [125.3, 123.0, 113.9]]
std = [x / 255 for x in [63.0, 62.1, 66.7]]
dat={}
dat['train']=datasets.CIFAR100('./data/', train=True, download=True, transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)]))
dat['test']=datasets.CIFAR100('./data/', train=False, download=True, transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)]))
for t in range(100//t_class_num):
print(t)
data[t] = {}
data[t]['name'] = 'cifar100-' + str(t_class_num*t) + '-' + str(t_class_num*(t+1)-1)
data[t]['ncla'] = t_class_num
for s in ['train', 'test']:
loader = torch.utils.data.DataLoader(dat[s], batch_size=1, shuffle=False)
data[t][s] = {'x': [], 'y': []}
for image, target in loader:
label = target.numpy()[0]
if label in range(t_class_num*t, t_class_num*(t+1)):
data[t][s]['x'].append(image)
data[t][s]['y'].append(label)
t = 100 // t_class_num
data[t] = {}
data[t]['name'] = 'cifar100-all'
data[t]['ncla'] = 100
for s in ['train', 'test']:
loader = torch.utils.data.DataLoader(dat[s], batch_size=1, shuffle=False)
data[t][s] = {'x': [], 'y': []}
for image, target in loader:
label = target.numpy()[0]
data[t][s]['x'].append(image)
data[t][s]['y'].append(label)
# "Unify" and save
for t in data.keys():
for s in ['train', 'test']:
data[t][s]['x'] = torch.stack(data[t][s]['x']).view(-1, size[0], size[1], size[2])
data[t][s]['y'] = torch.LongTensor(np.array(data[t][s]['y'], dtype=int)).view(-1)
torch.save(data[t][s]['x'],
os.path.join(os.path.expanduser('./data/binary_cifar100_10'), 'data' + str(t) + s + 'x.bin'))
torch.save(data[t][s]['y'],
os.path.join(os.path.expanduser('./data/binary_cifar100_10'), 'data' + str(t) + s + 'y.bin'))
# Load binary files
data = {}
ids = list(np.arange(11))
print('Task order =', ids)
for i in range(11):
data[i] = dict.fromkeys(['name','ncla','train','test'])
for s in ['train','test']:
data[i][s]={'x':[],'y':[]}
data[i][s]['x']=torch.load(os.path.join(os.path.expanduser('./data/binary_cifar100_10'),'data'+str(ids[i])+s+'x.bin'))
data[i][s]['y']=torch.load(os.path.join(os.path.expanduser('./data/binary_cifar100_10'),'data'+str(ids[i])+s+'y.bin'))
data[i]['ncla'] = len(np.unique(data[i]['train']['y'].numpy()))
data[i]['name'] = 'cifar100->>>' + str(i * data[i]['ncla']) + '-' + str(data[i]['ncla'] * (i + 1) - 1)
# Others
n = 0
for t in data.keys():
print("T", t)
taskcla.append((t, data[t]['ncla']))
n += data[t]['ncla']
data['ncla'] = n
Loder = {}
for t in range(10):
print("t", t)
Loder[t] = dict.fromkeys(['name', 'ncla', 'train', 'test', 'valid'])
u1 = torch.tensor(data[t]['train']['x'].reshape(-1, 3, 32, 32)) # .item()
# print("u1",u1.size())
TOTAL_NUM = u1.size()[0]
NUM_VALID = int(round(TOTAL_NUM * 0.1))
NUM_TRAIN = int(round(TOTAL_NUM - NUM_VALID))
# u1.size()[0]
# u2=torch.tensor(data[t]['train']['y'].reshape(-1))
# u3 = data[t]['valid']['x']
# print("u3",u3.size(),s)
# u4=data[t]['valid']['y']
dataset_new_train = Data.TensorDataset(data[t]['train']['x'], data[t]['train']['y'])
dataset_new_test = Data.TensorDataset(data[t]['test']['x'], data[t]['test']['y'])
train_loader = torch.utils.data.DataLoader(
dataset_new_train,
batch_size=10,
shuffle=True,
)
test_loader = torch.utils.data.DataLoader(
dataset_new_test,
batch_size=64,
shuffle=True,
)
Loder[t]['train'] = train_loader
Loder[t]['test'] = test_loader
mean = [x / 255 for x in [125.3, 123.0, 113.9]]
std = [x / 255 for x in [63.0, 62.1, 66.7]]
# test_dataset = datasets.CIFAR100('./data/', train=False, download=True, transform=transforms.Compose(
# [transforms.ToTensor(), transforms.Normalize(mean,
# std)])) # Data.TensorDataset(data[10//t_num]['test']['x'], data[10//t_num]['test']['y'])
'''
mean = [x / 255 for x in [125.3, 123.0, 113.9]]
std = [x / 255 for x in [63.0, 62.1, 66.7]]
train_dataset=datasets.CIFAR10('./data/', train=True, download=True, transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)]))
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=64,
shuffle=True,
)
Loder={}
Loder[0] = dict.fromkeys(['name', 'ncla', 'train', 'test', 'valid'])
Loder[0]['train']=train_loader
'''
test_dataset = datasets.CIFAR100('./data/', train=False, download=True,
transform=transforms.Compose(
[transforms.ToTensor(), transforms.Normalize(mean, std)]))
test_loader = torch.utils.data.DataLoader(
test_dataset,
batch_size=64,
shuffle=True,
)
print("Loder is prepared")
return data, taskcla[:10 // data[0]['ncla']], size, Loder, test_loader
def get_cifar100_5_5(seed=0,pc_valid=0.10):
data = {}
taskcla = []
size = [3, 32, 32]
# CIFAR10
if not os.path.isdir('./data/binary_cifar100_5_5/'):
os.makedirs('./data/binary_cifar100_5_5')
t_num = 9
mean = [x / 255 for x in [125.3, 123.0, 113.9]]
std = [x / 255 for x in [63.0, 62.1, 66.7]]
dat={}
dat['train']=datasets.CIFAR100('./data/', train=True, download=True, transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)]))
dat['test']=datasets.CIFAR100('./data/', train=False, download=True, transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)]))
for t in range(9):
print(t)
if t==0:
data[t] = {}
data[t]['name'] = 'cifar100-' + str(t_num * t) + '-' + str(t_num * (t + 1) - 1)
data[t]['ncla'] = t_num
for s in ['train', 'test']:
loader = torch.utils.data.DataLoader(dat[s], batch_size=1, shuffle=False)
data[t][s] = {'x': [], 'y': []}
for image, target in loader:
label = target.numpy()[0]
if label in range(0, 60):
data[t][s]['x'].append(image)
data[t][s]['y'].append(label)
else:
data[t] = {}
data[t]['name'] = 'cifar100-' + str(t_num*t) + '-' + str(t_num*(t+1)-1)
data[t]['ncla'] = t_num
for s in ['train', 'test']:
loader = torch.utils.data.DataLoader(dat[s], batch_size=1, shuffle=False)
data[t][s] = {'x': [], 'y': []}
class_num={}
for i in range(60+5*(t-1),60+5*t):
class_num[i]=0
for image, target in loader:
label = target.numpy()[0]
if label in range(60+5*(t-1), 60+5*t):
if class_num[label]<5:
data[t][s]['x'].append(image)
data[t][s]['y'].append(label)
class_num[label]+=1
else:
continue
t = 100 // t_num
data[t] = {}
data[t]['name'] = 'cifar100-all'
data[t]['ncla'] = 100
for s in ['train', 'test']:
loader = torch.utils.data.DataLoader(dat[s], batch_size=1, shuffle=False)
data[t][s] = {'x': [], 'y': []}
for image, target in loader:
label = target.numpy()[0]
data[t][s]['x'].append(image)
data[t][s]['y'].append(label)
# "Unify" and save
for t in data.keys():
for s in ['train', 'test']:
data[t][s]['x'] = torch.stack(data[t][s]['x']).view(-1, size[0], size[1], size[2])
data[t][s]['y'] = torch.LongTensor(np.array(data[t][s]['y'], dtype=int)).view(-1)
torch.save(data[t][s]['x'],
os.path.join(os.path.expanduser('./data/binary_cifar100_5_5'), 'data' + str(t) + s + 'x.bin'))
torch.save(data[t][s]['y'],
os.path.join(os.path.expanduser('./data/binary_cifar100_5_5'), 'data' + str(t) + s + 'y.bin'))
# Load binary files
data = {}
ids = list(np.arange(9))
print('Task order =', ids)
for i in range(9):
data[i] = dict.fromkeys(['name','ncla','train','test'])
for s in ['train','test']:
data[i][s]={'x':[],'y':[]}
data[i][s]['x']=torch.load(os.path.join(os.path.expanduser('./data/binary_cifar100_5_5'),'data'+str(ids[i])+s+'x.bin'))
data[i][s]['y']=torch.load(os.path.join(os.path.expanduser('./data/binary_cifar100_5_5'),'data'+str(ids[i])+s+'y.bin'))
data[i]['ncla'] = len(np.unique(data[i]['train']['y'].numpy()))
data[i]['name'] = 'cifar100->>>' + str(i * data[i]['ncla']) + '-' + str(data[i]['ncla'] * (i + 1) - 1)
# Others
n = 0
for t in data.keys():
print("T", t)
taskcla.append((t, data[t]['ncla']))
n += data[t]['ncla']
data['ncla'] = n
Loder = {}
for t in range(9):
print("t", t)
Loder[t] = dict.fromkeys(['name', 'ncla', 'train', 'test', 'valid'])
u1 = torch.tensor(data[t]['train']['x'].reshape(-1, 3, 32, 32)) # .item()
# print("u1",u1.size())
TOTAL_NUM = u1.size()[0]
NUM_VALID = int(round(TOTAL_NUM * 0.1))
NUM_TRAIN = int(round(TOTAL_NUM - NUM_VALID))
# u1.size()[0]
# u2=torch.tensor(data[t]['train']['y'].reshape(-1))
# u3 = data[t]['valid']['x']
# print("u3",u3.size(),s)
# u4=data[t]['valid']['y']
dataset_new_train = Data.TensorDataset(data[t]['train']['x'], data[t]['train']['y'])
# dataset_new_valid = Data.TensorDataset(data[t]['valid']['x'], data[t]['valid']['y'])
if t==0:
train_loader = torch.utils.data.DataLoader(
dataset_new_train,
batch_size=128,
shuffle=True,
)
else:
train_loader = torch.utils.data.DataLoader(
dataset_new_train,
batch_size=25,
shuffle=True,
)
Loder[t]['train'] = train_loader
# Loder[t]['valid'] = valid_loader
mean = [x / 255 for x in [125.3, 123.0, 113.9]]
std = [x / 255 for x in [63.0, 62.1, 66.7]]
# test_dataset = datasets.CIFAR100('./data/', train=False, download=True, transform=transforms.Compose(
# [transforms.ToTensor(), transforms.Normalize(mean,
# std)])) # Data.TensorDataset(data[10//t_num]['test']['x'], data[10//t_num]['test']['y'])
'''
mean = [x / 255 for x in [125.3, 123.0, 113.9]]
std = [x / 255 for x in [63.0, 62.1, 66.7]]
train_dataset=datasets.CIFAR10('./data/', train=True, download=True, transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)]))
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=64,
shuffle=True,
)
Loder={}
Loder[0] = dict.fromkeys(['name', 'ncla', 'train', 'test', 'valid'])
Loder[0]['train']=train_loader
'''
test_dataset = datasets.CIFAR100('./data/', train=False, download=True,
transform=transforms.Compose(
[transforms.ToTensor(), transforms.Normalize(mean, std)]))
test_loader = torch.utils.data.DataLoader(
test_dataset,
batch_size=2000,
shuffle=True,
)
print("Loder is prepared")
return data, taskcla[:10 // data[0]['ncla']], size, Loder, test_loader
from tinyimagenet import MyTinyImagenet
from conf import base_path
def get_tinyimagenet_100(seed=0,pc_valid=0.10):
data = {}
taskcla = []
size = [3, 64, 64]
# CIFAR10
if not os.path.isdir('./data/binary_tiny200_222/'):
os.makedirs('./data/binary_tiny200_222')
t_class_num = 2
#mean = [x / 255 for x in [125.3, 123.0, 113.9]]
#std = [x / 255 for x in [63.0, 62.1, 66.7]]
dat={}
transform = transforms.Normalize((0.4802, 0.4480, 0.3975),
(0.2770, 0.2691, 0.2821))
test_transform = transforms.Compose(
[transforms.ToTensor(), transform])
train = MyTinyImagenet(base_path() + 'TINYIMG',
train=True, download=True, transform=test_transform)
# train = datasets.CIFAR100('Data/', train=True, download=True)
test = MyTinyImagenet(base_path() + 'TINYIMG',
train=False, download=True, transform=test_transform)
dat['train']=train#datasets.CIFAR100('./data/', train=True, download=True, transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)]))
dat['test']=test #datasets.CIFAR100('./data/', train=False, download=True, transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)]))
for t in range(200//t_class_num):
print(t)
data[t] = {}
data[t]['name'] = 'cifar100-' + str(t_class_num*t) + '-' + str(t_class_num*(t+1)-1)
data[t]['ncla'] = t_class_num
for s in ['train', 'test']:
loader = torch.utils.data.DataLoader(dat[s], batch_size=1, shuffle=False)
data[t][s] = {'x': [], 'y': []}
for image, target in loader:
label = target.numpy()[0]
if label in range(t_class_num*t, t_class_num*(t+1)):
data[t][s]['x'].append(image)
data[t][s]['y'].append(label)
t = 200 // t_class_num
data[t] = {}
data[t]['name'] = 'tiny200-all'
data[t]['ncla'] = 200
for s in ['train', 'test']:
loader = torch.utils.data.DataLoader(dat[s], batch_size=1, shuffle=False)
data[t][s] = {'x': [], 'y': []}
for image, target in loader:
label = target.numpy()[0]
data[t][s]['x'].append(image)
data[t][s]['y'].append(label)
# "Unify" and save
for t in data.keys():
for s in ['train', 'test']:
data[t][s]['x'] = torch.stack(data[t][s]['x']).view(-1, size[0], size[1], size[2])
data[t][s]['y'] = torch.LongTensor(np.array(data[t][s]['y'], dtype=int)).view(-1)
torch.save(data[t][s]['x'],
os.path.join(os.path.expanduser('./data/binary_tiny200_22'), 'data' + str(t) + s + 'x.bin'))
torch.save(data[t][s]['y'],
os.path.join(os.path.expanduser('./data/binary_tiny200_22'), 'data' + str(t) + s + 'y.bin'))
# Load binary files
data = {}
ids = list(np.arange(101))
print('Task order =', ids)
for i in range(101):
data[i] = dict.fromkeys(['name','ncla','train','test'])
for s in ['train','test']:
data[i][s]={'x':[],'y':[]}
data[i][s]['x']=torch.load(os.path.join(os.path.expanduser('./data/binary_tiny200_22'),'data'+str(ids[i])+s+'x.bin'))
data[i][s]['y']=torch.load(os.path.join(os.path.expanduser('./data/binary_tiny200_22'),'data'+str(ids[i])+s+'y.bin'))
data[i]['ncla'] = len(np.unique(data[i]['train']['y'].numpy()))
data[i]['name'] = 'cifar100->>>' + str(i * data[i]['ncla']) + '-' + str(data[i]['ncla'] * (i + 1) - 1)
# Others
n = 0
for t in data.keys():
print("T", t)
taskcla.append((t, data[t]['ncla']))
n += data[t]['ncla']
data['ncla'] = n
Loder = {}
for t in range(100):
print("t", t)
Loder[t] = dict.fromkeys(['name', 'ncla', 'train', 'test', 'valid'])
u1 = torch.tensor(data[t]['train']['x'].reshape(-1, 3, 64, 64)) # .item()
# print("u1",u1.size())
TOTAL_NUM = u1.size()[0]
NUM_VALID = int(round(TOTAL_NUM * 0.1))
NUM_TRAIN = int(round(TOTAL_NUM - NUM_VALID))
# u1.size()[0]
# u2=torch.tensor(data[t]['train']['y'].reshape(-1))
# u3 = data[t]['valid']['x']
# print("u3",u3.size(),s)
# u4=data[t]['valid']['y']
dataset_new_train = Data.TensorDataset(data[t]['train']['x'], data[t]['train']['y'])
dataset_new_test = Data.TensorDataset(data[t]['test']['x'], data[t]['test']['y'])
train_loader = torch.utils.data.DataLoader(
dataset_new_train,
batch_size=10,
shuffle=True,
)
test_loader = torch.utils.data.DataLoader(
dataset_new_test,
batch_size=64,
shuffle=True,
)
Loder[t]['train'] = train_loader
Loder[t]['test'] = test_loader
transform = transforms.Normalize((0.4802, 0.4480, 0.3975),
(0.2770, 0.2691, 0.2821))
test_transform = transforms.Compose(
[transforms.ToTensor(), transform])
test = MyTinyImagenet(base_path() + 'TINYIMG',
train=False, download=True, transform=test_transform)
test_loader = torch.utils.data.DataLoader(
test,
batch_size=64,
shuffle=True,
)
print("Loder is prepared")
return data, taskcla[:10 // data[0]['ncla']], size, Loder, test_loader
| 75,193
| 45.04654
| 239
|
py
|
GSA
|
GSA-main/GSA_CVPR/CSL/base_model.py
|
from abc import *
import torch.nn as nn
import torch
import torch.nn.functional as F
class BaseModel(nn.Module, metaclass=ABCMeta):
def __init__(self, last_dim=300, num_classes=10, simclr_dim=400):
super(BaseModel, self).__init__()
self.linear = nn.Linear(last_dim, num_classes)
self.out_num=1
self.weight3 = nn.Parameter(torch.Tensor(3 + self.out_num, 300))
self.simclr_layer = nn.Sequential(
nn.Linear(last_dim, last_dim),
nn.ReLU(),
nn.Linear(last_dim, simclr_dim),
)
self.shift_cls_layer = nn.Linear(last_dim, 4)
self.joint_distribution_layer = nn.Linear(last_dim, 4 * num_classes)
@abstractmethod
def penultimate(self, inputs, all_features=False):
pass
def forward(self, inputs, penultimate=False, simclr=False, shift=False):
_aux = {}
_return_aux = False
features = self.penultimate(inputs)#这里是MLP最后一层
#print("feature",features.shape)
output = F.linear(features,self.weight3)#再跑一个线性层,变成分类任务
if penultimate:
_return_aux = True
_aux['penultimate'] = features#这里跑的是没head的输出
if simclr:
_return_aux = True
_aux['simclr'] = self.simclr_layer(features)#这里跑的是simclr,128
if shift:
_return_aux = True
_aux['shift'] = self.shift_cls_layer(features)#这里是预测shift,4
if _return_aux:
return output[:,:self.out_num], _aux
return output[:,:self.out_num]
| 1,540
| 29.82
| 76
|
py
|
GSA
|
GSA-main/GSA_CVPR/CSL/tao.py
|
import math
import numbers
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Function
if torch.__version__ >= '1.4.0':
kwargs = {'align_corners': False}
else:
kwargs = {}
def rgb2hsv(rgb):
"""Convert a 4-d RGB tensor to the HSV counterpart.
Here, we compute hue using atan2() based on the definition in [1],
instead of using the common lookup table approach as in [2, 3].
Those values agree when the angle is a multiple of 30°,
otherwise they may differ at most ~1.2°.
References
[1] https://en.wikipedia.org/wiki/Hue
[2] https://www.rapidtables.com/convert/color/rgb-to-hsv.html
[3] https://github.com/scikit-image/scikit-image/blob/master/skimage/color/colorconv.py#L212
"""
r, g, b = rgb[:, 0, :, :], rgb[:, 1, :, :], rgb[:, 2, :, :]
Cmax = rgb.max(1)[0]
Cmin = rgb.min(1)[0]
delta = Cmax - Cmin
hue = torch.atan2(math.sqrt(3) * (g - b), 2 * r - g - b)
hue = (hue % (2 * math.pi)) / (2 * math.pi)
saturate = delta / Cmax
value = Cmax
hsv = torch.stack([hue, saturate, value], dim=1)
hsv[~torch.isfinite(hsv)] = 0.
return hsv
def hsv2rgb(hsv):
"""Convert a 4-d HSV tensor to the RGB counterpart.
>>> %timeit hsv2rgb(hsv)
2.37 ms ± 13.4 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)
>>> %timeit rgb2hsv_fast(rgb)
298 µs ± 542 ns per loop (mean ± std. dev. of 7 runs, 1000 loops each)
>>> torch.allclose(hsv2rgb(hsv), hsv2rgb_fast(hsv), atol=1e-6)
True
References
[1] https://en.wikipedia.org/wiki/HSL_and_HSV#HSV_to_RGB_alternative
"""
h, s, v = hsv[:, [0]], hsv[:, [1]], hsv[:, [2]]
c = v * s
n = hsv.new_tensor([5, 3, 1]).view(3, 1, 1)
k = (n + h * 6) % 6
t = torch.min(k, 4 - k)
t = torch.clamp(t, 0, 1)
return v - c * t
class RandomResizedCropLayer(nn.Module):
def __init__(self, size=None, scale=(0.08, 1.0), ratio=(3. / 4., 4. / 3.)):
'''
Inception Crop
size (tuple): size of fowarding image (C, W, H)
scale (tuple): range of size of the origin size cropped
ratio (tuple): range of aspect ratio of the origin aspect ratio cropped
'''
super(RandomResizedCropLayer, self).__init__()
_eye = torch.eye(2, 3)
self.size = size
self.register_buffer('_eye', _eye)
self.scale = scale
self.ratio = ratio
def forward(self, inputs, whbias=None):
_device = inputs.device
N = inputs.size(0)
_theta = self._eye.repeat(N, 1, 1)
if whbias is None:
whbias = self._sample_latent(inputs)
_theta[:, 0, 0] = whbias[:, 0]
_theta[:, 1, 1] = whbias[:, 1]
_theta[:, 0, 2] = whbias[:, 2]
_theta[:, 1, 2] = whbias[:, 3]
grid = F.affine_grid(_theta, inputs.size(), **kwargs).to(_device)
output = F.grid_sample(inputs, grid, padding_mode='reflection', **kwargs)
if self.size is not None:
output = F.adaptive_avg_pool2d(output, self.size)
return output#再次仿射取样,——theta考虑whbias
def _clamp(self, whbias):
w = whbias[:, 0]
h = whbias[:, 1]
w_bias = whbias[:, 2]
h_bias = whbias[:, 3]
# Clamp with scale
w = torch.clamp(w, *self.scale)
h = torch.clamp(h, *self.scale)
# Clamp with ratio
w = self.ratio[0] * h + torch.relu(w - self.ratio[0] * h)
w = self.ratio[1] * h - torch.relu(self.ratio[1] * h - w)
# Clamp with bias range: w_bias \in (w - 1, 1 - w), h_bias \in (h - 1, 1 - h)
w_bias = w - 1 + torch.relu(w_bias - w + 1)
w_bias = 1 - w - torch.relu(1 - w - w_bias)
h_bias = h - 1 + torch.relu(h_bias - h + 1)
h_bias = 1 - h - torch.relu(1 - h - h_bias)
whbias = torch.stack([w, h, w_bias, h_bias], dim=0).t()
return whbias
def _sample_latent(self, inputs):
_device = inputs.device
N, _, width, height = inputs.shape
# N * 10 trial
area = width * height
target_area = np.random.uniform(*self.scale, N * 10) * area
log_ratio = (math.log(self.ratio[0]), math.log(self.ratio[1]))
aspect_ratio = np.exp(np.random.uniform(*log_ratio, N * 10))
# If doesn't satisfy ratio condition, then do central crop
w = np.round(np.sqrt(target_area * aspect_ratio))
h = np.round(np.sqrt(target_area / aspect_ratio))
cond = (0 < w) * (w <= width) * (0 < h) * (h <= height)
w = w[cond]
h = h[cond]
cond_len = w.shape[0]
if cond_len >= N:
w = w[:N]
h = h[:N]
else:
w = np.concatenate([w, np.ones(N - cond_len) * width])
h = np.concatenate([h, np.ones(N - cond_len) * height])
w_bias = np.random.randint(w - width, width - w + 1) / width
h_bias = np.random.randint(h - height, height - h + 1) / height
w = w / width
h = h / height
whbias = np.column_stack([w, h, w_bias, h_bias])
whbias = torch.tensor(whbias, device=_device)
return whbias
class HorizontalFlipRandomCrop(nn.Module):
def __init__(self, max_range):
super(HorizontalFlipRandomCrop, self).__init__()
self.max_range = max_range
_eye = torch.eye(2, 3)
self.register_buffer('_eye', _eye)
def forward(self, input, sign=None, bias=None, rotation=None):
_device = input.device
N = input.size(0)
_theta = self._eye.repeat(N, 1, 1)
if sign is None:
sign = torch.bernoulli(torch.ones(N, device=_device) * 0.5) * 2 - 1
if bias is None:
bias = torch.empty((N, 2), device=_device).uniform_(-self.max_range, self.max_range)
_theta[:, 0, 0] = sign
_theta[:, :, 2] = bias
if rotation is not None:
_theta[:, 0:2, 0:2] = rotation
grid = F.affine_grid(_theta, input.size(), **kwargs).to(_device)
output = F.grid_sample(input, grid, padding_mode='reflection', **kwargs)
return output
def _sample_latent(self, N, device=None):
sign = torch.bernoulli(torch.ones(N, device=device) * 0.5) * 2 - 1
bias = torch.empty((N, 2), device=device).uniform_(-self.max_range, self.max_range)
return sign, bias
class Rotation(nn.Module):
def __init__(self, max_range = 4):
super(Rotation, self).__init__()
self.max_range = max_range
self.prob = 0.5
def forward(self, input, aug_index=None):
_device = input.device
#print(self.prob)
_, _, H, W = input.size()
if aug_index is None:
aug_index = np.random.randint(4)#随机四个里生成一个数
output = torch.rot90(input, aug_index, (2, 3))#如果是aug》0,从y轴转向x轴,转90*aug,反之亦然。(2,3)是要转的维度
_prob = input.new_full((input.size(0),), self.prob)#产生一个inputsize大小,值为0.5的tensor,不会加在a上,直接给prob
_mask = torch.bernoulli(_prob).view(-1, 1, 1, 1)#按照prob中p用beinoulli生成0/1值,实际上是每个样本是否输出的mask
output = _mask * input + (1-_mask) * output#这样做要么是原图像,要么旋转90*aug
else:
aug_index = aug_index % self.max_range
output = torch.rot90(input, aug_index, (2, 3))#旋转角度不mask,原样返回
return output
class CutPerm(nn.Module):
def __init__(self, max_range = 4):
super(CutPerm, self).__init__()
self.max_range = max_range
self.prob = 0.5
def forward(self, input, aug_index=None):
_device = input.device
_, _, H, W = input.size()
if aug_index is None:
aug_index = np.random.randint(4)
output = self._cutperm(input, aug_index)
_prob = input.new_full((input.size(0),), self.prob)
_mask = torch.bernoulli(_prob).view(-1, 1, 1, 1)
output = _mask * input + (1 - _mask) * output
else:
aug_index = aug_index % self.max_range
output = self._cutperm(input, aug_index)
return output
def _cutperm(self, inputs, aug_index):
_, _, H, W = inputs.size()
h_mid = int(H / 2)
w_mid = int(W / 2)
jigsaw_h = aug_index // 2
jigsaw_v = aug_index % 2
if jigsaw_h == 1:
inputs = torch.cat((inputs[:, :, h_mid:, :], inputs[:, :, 0:h_mid, :]), dim=2)
if jigsaw_v == 1:
inputs = torch.cat((inputs[:, :, :, w_mid:], inputs[:, :, :, 0:w_mid]), dim=3)
return inputs
class HorizontalFlipLayer(nn.Module):
def __init__(self):
"""
img_size : (int, int, int)
Height and width must be powers of 2. E.g. (32, 32, 1) or
(64, 128, 3). Last number indicates number of channels, e.g. 1 for
grayscale or 3 for RGB
"""
super(HorizontalFlipLayer, self).__init__()
_eye = torch.eye(2, 3)#对角矩阵取前两行
self.register_buffer('_eye', _eye)
def forward(self, inputs):
_device = inputs.device
N = inputs.size(0)#batch——size
_theta = self._eye.repeat(N, 1, 1)#重复N份,拼一起
r_sign = torch.bernoulli(torch.ones(N, device=_device) * 0.5) * 2 - 1#0.5概率生成mask
_theta[:, 0, 0] = r_sign#把mask加入
grid = F.affine_grid(_theta, inputs.size(), **kwargs).to(_device)
inputs = F.grid_sample(inputs, grid, padding_mode='reflection', **kwargs)
return inputs#做一系列仿射变换,得到图像
class RandomColorGrayLayer(nn.Module):
def __init__(self, p):
super(RandomColorGrayLayer, self).__init__()
self.prob = p#0.2
_weight = torch.tensor([[0.299, 0.587, 0.114]])
self.register_buffer('_weight', _weight.view(1, 3, 1, 1))
def forward(self, inputs, aug_index=None):
if aug_index == 0:
return inputs
l = F.conv2d(inputs, self._weight)#卷积处理,只有一个轨道了
gray = torch.cat([l, l, l], dim=1)#通道扩增3倍,得到原来的大小
if aug_index is None:
_prob = inputs.new_full((inputs.size(0),), self.prob)
_mask = torch.bernoulli(_prob).view(-1, 1, 1, 1)
gray = inputs * (1 - _mask) + gray * _mask
return gray
class ColorJitterLayer(nn.Module):
def __init__(self, p, brightness, contrast, saturation, hue):
super(ColorJitterLayer, self).__init__()
self.prob = p#0.8
self.brightness = self._check_input(brightness, 'brightness')#[0.6,1.4]
self.contrast = self._check_input(contrast, 'contrast')#[0.6,1.4]
self.saturation = self._check_input(saturation, 'saturation')#[0.6,1.4]
self.hue = self._check_input(hue, 'hue', center=0, bound=(-0.5, 0.5),
clip_first_on_zero=False)#hue 0.8,return[-0.1,0.1]
def _check_input(self, value, name, center=1, bound=(0, float('inf')), clip_first_on_zero=True):
if isinstance(value, numbers.Number):
if value < 0:
raise ValueError("If {} is a single number, it must be non negative.".format(name))
value = [center - value, center + value]#hue[-0.1,0.1]
if clip_first_on_zero:
value[0] = max(value[0], 0)
elif isinstance(value, (tuple, list)) and len(value) == 2:
if not bound[0] <= value[0] <= value[1] <= bound[1]:
raise ValueError("{} values should be between {}".format(name, bound))
else:
raise TypeError("{} should be a single number or a list/tuple with lenght 2.".format(name))
# if value is 0 or (1., 1.) for brightness/contrast/saturation
# or (0., 0.) for hue, do nothing
if value[0] == value[1] == center:
value = None
return value
def adjust_contrast(self, x):
if self.contrast:
factor = x.new_empty(x.size(0), 1, 1, 1).uniform_(*self.contrast)#
means = torch.mean(x, dim=[2, 3], keepdim=True)#【batch——size,3,1,1】
x = (x - means) * factor + means#【32】【3】每个先减去对应means,再【32】乘以一个【0.6到1.4】中对应数,然后加(1-factor)*means 也是对应【32】加
return torch.clamp(x, 0, 1)#维持在0,1中
def adjust_hsv(self, x):
f_h = x.new_zeros(x.size(0), 1, 1)
f_s = x.new_ones(x.size(0), 1, 1)
f_v = x.new_ones(x.size(0), 1, 1)#生成(batch_size,1,1)的0/1矩阵
if self.hue:
f_h.uniform_(*self.hue)#生成【batch_size,1,1】其中值在-0.1,0.1之间
if self.saturation:
f_s = f_s.uniform_(*self.saturation)#同事,值在0.6到1.4之间
if self.brightness:
f_v = f_v.uniform_(*self.brightness)
return RandomHSVFunction.apply(x, f_h, f_s, f_v)#对每个通道做一些随机HSV变化
def transform(self, inputs):
# Shuffle transform
if np.random.rand() > 0.5:
transforms = [self.adjust_contrast, self.adjust_hsv]
else:
transforms = [self.adjust_hsv, self.adjust_contrast]
for t in transforms:
inputs = t(inputs)#对input随机套两个组合比较是必须的
return inputs
def forward(self, inputs):
_prob = inputs.new_full((inputs.size(0),), self.prob)
_mask = torch.bernoulli(_prob).view(-1, 1, 1, 1)#生成mask
return inputs * (1 - _mask) + self.transform(inputs) * _mask
class RandomHSVFunction(Function):
@staticmethod
def forward(ctx, x, f_h, f_s, f_v):
# ctx is a context object that can be used to stash information
# for backward computation
x = rgb2hsv(x)#从 hsv tensor 变 RGB tensor
h = x[:, 0, :, :]#第一个通道【32,32,32】
h += (f_h * 255. / 360.)#给每个在【32】中的值加f_h*255/360 对应的那个位置的值
h = (h % 1)#求余数
x[:, 0, :, :] = h#第一个通道这样,加法然后取余
x[:, 1, :, :] = x[:, 1, :, :] * f_s#这里只是乘
x[:, 2, :, :] = x[:, 2, :, :] * f_v
x = torch.clamp(x, 0, 1)#裁剪,超过0,1范围的变0/1
x = hsv2rgb(x)#返回
return x
@staticmethod
def backward(ctx, grad_output):
# We return as many input gradients as there were arguments.
# Gradients of non-Tensor arguments to forward must be None.
grad_input = None
if ctx.needs_input_grad[0]:
grad_input = grad_output.clone()
return grad_input, None, None, None
class NormalizeLayer(nn.Module):
"""
In order to certify radii in original coordinates rather than standardized coordinates, we
add the Gaussian noise _before_ standardizing, which is why we have standardization be the first
layer of the classifier rather than as a part of preprocessing as is typical.
"""
def __init__(self):
super(NormalizeLayer, self).__init__()
def forward(self, inputs):
return (inputs - 0.5) / 0.5
import torch
from torch import Tensor
from torchvision.transforms.functional import to_pil_image, to_tensor
from torch.nn.functional import conv2d, pad as torch_pad
from typing import Any, List, Sequence, Optional
import numbers
import numpy as np
import torch
from PIL import Image
from typing import Tuple
class GaussianBlur(torch.nn.Module):
"""Blurs image with randomly chosen Gaussian blur.
The image can be a PIL Image or a Tensor, in which case it is expected
to have [..., C, H, W] shape, where ... means an arbitrary number of leading
dimensions
Args:
kernel_size (int or sequence): Size of the Gaussian kernel.
sigma (float or tuple of float (min, max)): Standard deviation to be used for
creating kernel to perform blurring. If float, sigma is fixed. If it is tuple
of float (min, max), sigma is chosen uniformly at random to lie in the
given range.
Returns:
PIL Image or Tensor: Gaussian blurred version of the input image.
"""
def __init__(self, kernel_size, sigma=(0.1, 2.0)):
super().__init__()
self.kernel_size = _setup_size(kernel_size, "Kernel size should be a tuple/list of two integers")
for ks in self.kernel_size:
if ks <= 0 or ks % 2 == 0:
raise ValueError("Kernel size value should be an odd and positive number.")
if isinstance(sigma, numbers.Number):
if sigma <= 0:
raise ValueError("If sigma is a single number, it must be positive.")
sigma = (sigma, sigma)
elif isinstance(sigma, Sequence) and len(sigma) == 2:
if not 0. < sigma[0] <= sigma[1]:
raise ValueError("sigma values should be positive and of the form (min, max).")
else:
raise ValueError("sigma should be a single number or a list/tuple with length 2.")
self.sigma = sigma
@staticmethod
def get_params(sigma_min: float, sigma_max: float) -> float:
"""Choose sigma for random gaussian blurring.
Args:
sigma_min (float): Minimum standard deviation that can be chosen for blurring kernel.
sigma_max (float): Maximum standard deviation that can be chosen for blurring kernel.
Returns:
float: Standard deviation to be passed to calculate kernel for gaussian blurring.
"""
return torch.empty(1).uniform_(sigma_min, sigma_max).item()
def forward(self, img: Tensor) -> Tensor:
"""
Args:
img (PIL Image or Tensor): image to be blurred.
Returns:
PIL Image or Tensor: Gaussian blurred image
"""
sigma = self.get_params(self.sigma[0], self.sigma[1])
return gaussian_blur(img, self.kernel_size, [sigma, sigma])
def __repr__(self):
s = '(kernel_size={}, '.format(self.kernel_size)
s += 'sigma={})'.format(self.sigma)
return self.__class__.__name__ + s
@torch.jit.unused
def _is_pil_image(img: Any) -> bool:
return isinstance(img, Image.Image)
def _setup_size(size, error_msg):
if isinstance(size, numbers.Number):
return int(size), int(size)
if isinstance(size, Sequence) and len(size) == 1:
return size[0], size[0]
if len(size) != 2:
raise ValueError(error_msg)
return size
def _is_tensor_a_torch_image(x: Tensor) -> bool:
return x.ndim >= 2
def _get_gaussian_kernel1d(kernel_size: int, sigma: float) -> Tensor:
ksize_half = (kernel_size - 1) * 0.5
x = torch.linspace(-ksize_half, ksize_half, steps=kernel_size)
pdf = torch.exp(-0.5 * (x / sigma).pow(2))
kernel1d = pdf / pdf.sum()
return kernel1d
def _cast_squeeze_in(img: Tensor, req_dtype: torch.dtype) -> Tuple[Tensor, bool, bool, torch.dtype]:
need_squeeze = False
# make image NCHW
if img.ndim < 4:
img = img.unsqueeze(dim=0)
need_squeeze = True
out_dtype = img.dtype
need_cast = False
if out_dtype != req_dtype:
need_cast = True
img = img.to(req_dtype)
return img, need_cast, need_squeeze, out_dtype
def _cast_squeeze_out(img: Tensor, need_cast: bool, need_squeeze: bool, out_dtype: torch.dtype):
if need_squeeze:
img = img.squeeze(dim=0)
if need_cast:
# it is better to round before cast
img = torch.round(img).to(out_dtype)
return img
def _get_gaussian_kernel2d(
kernel_size: List[int], sigma: List[float], dtype: torch.dtype, device: torch.device
) -> Tensor:
kernel1d_x = _get_gaussian_kernel1d(kernel_size[0], sigma[0]).to(device, dtype=dtype)
kernel1d_y = _get_gaussian_kernel1d(kernel_size[1], sigma[1]).to(device, dtype=dtype)
kernel2d = torch.mm(kernel1d_y[:, None], kernel1d_x[None, :])
return kernel2d
def _gaussian_blur(img: Tensor, kernel_size: List[int], sigma: List[float]) -> Tensor:
"""PRIVATE METHOD. Performs Gaussian blurring on the img by given kernel.
.. warning::
Module ``transforms.functional_tensor`` is private and should not be used in user application.
Please, consider instead using methods from `transforms.functional` module.
Args:
img (Tensor): Image to be blurred
kernel_size (sequence of int or int): Kernel size of the Gaussian kernel ``(kx, ky)``.
sigma (sequence of float or float, optional): Standard deviation of the Gaussian kernel ``(sx, sy)``.
Returns:
Tensor: An image that is blurred using gaussian kernel of given parameters
"""
if not (isinstance(img, torch.Tensor) or _is_tensor_a_torch_image(img)):
raise TypeError('img should be Tensor Image. Got {}'.format(type(img)))
dtype = img.dtype if torch.is_floating_point(img) else torch.float32
kernel = _get_gaussian_kernel2d(kernel_size, sigma, dtype=dtype, device=img.device)
kernel = kernel.expand(img.shape[-3], 1, kernel.shape[0], kernel.shape[1])
img, need_cast, need_squeeze, out_dtype = _cast_squeeze_in(img, kernel.dtype)
# padding = (left, right, top, bottom)
padding = [kernel_size[0] // 2, kernel_size[0] // 2, kernel_size[1] // 2, kernel_size[1] // 2]
img = torch_pad(img, padding, mode="reflect")
img = conv2d(img, kernel, groups=img.shape[-3])
img = _cast_squeeze_out(img, need_cast, need_squeeze, out_dtype)
return img
def gaussian_blur(img: Tensor, kernel_size: List[int], sigma: Optional[List[float]] = None) -> Tensor:
"""Performs Gaussian blurring on the img by given kernel.
The image can be a PIL Image or a Tensor, in which case it is expected
to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions
Args:
img (PIL Image or Tensor): Image to be blurred
kernel_size (sequence of ints or int): Gaussian kernel size. Can be a sequence of integers
like ``(kx, ky)`` or a single integer for square kernels.
In torchscript mode kernel_size as single int is not supported, use a tuple or
list of length 1: ``[ksize, ]``.
sigma (sequence of floats or float, optional): Gaussian kernel standard deviation. Can be a
sequence of floats like ``(sigma_x, sigma_y)`` or a single float to define the
same sigma in both X/Y directions. If None, then it is computed using
``kernel_size`` as ``sigma = 0.3 * ((kernel_size - 1) * 0.5 - 1) + 0.8``.
Default, None. In torchscript mode sigma as single float is
not supported, use a tuple or list of length 1: ``[sigma, ]``.
Returns:
PIL Image or Tensor: Gaussian Blurred version of the image.
"""
if not isinstance(kernel_size, (int, list, tuple)):
raise TypeError('kernel_size should be int or a sequence of integers. Got {}'.format(type(kernel_size)))
if isinstance(kernel_size, int):
kernel_size = [kernel_size, kernel_size]
if len(kernel_size) != 2:
raise ValueError('If kernel_size is a sequence its length should be 2. Got {}'.format(len(kernel_size)))
for ksize in kernel_size:
if ksize % 2 == 0 or ksize < 0:
raise ValueError('kernel_size should have odd and positive integers. Got {}'.format(kernel_size))
if sigma is None:
sigma = [ksize * 0.15 + 0.35 for ksize in kernel_size]
if sigma is not None and not isinstance(sigma, (int, float, list, tuple)):
raise TypeError('sigma should be either float or sequence of floats. Got {}'.format(type(sigma)))
if isinstance(sigma, (int, float)):
sigma = [float(sigma), float(sigma)]
if isinstance(sigma, (list, tuple)) and len(sigma) == 1:
sigma = [sigma[0], sigma[0]]
if len(sigma) != 2:
raise ValueError('If sigma is a sequence, its length should be 2. Got {}'.format(len(sigma)))
for s in sigma:
if s <= 0.:
raise ValueError('sigma should have positive values. Got {}'.format(sigma))
t_img = img
if not isinstance(img, torch.Tensor):
if not _is_pil_image(img):
raise TypeError('img should be PIL Image or Tensor. Got {}'.format(type(img)))
t_img = to_tensor(img)
output = _gaussian_blur(t_img, kernel_size, sigma)
if not isinstance(img, torch.Tensor):
output = to_pil_image(output)
return output
| 23,890
| 36.388106
| 117
|
py
|
GSA
|
GSA-main/GSA_CVPR/CSL/utils.py
|
import os
import pickle
import random
import shutil
import sys
from datetime import datetime
import numpy as np
import torch
from matplotlib import pyplot as plt
from tensorboardX import SummaryWriter
class Logger(object):
"""Reference: https://gist.github.com/gyglim/1f8dfb1b5c82627ae3efcfbbadb9f514"""
def __init__(self, fn, ask=True, local_rank=0):
self.local_rank = local_rank
if self.local_rank == 0:
if not os.path.exists("./logs/"):
os.mkdir("./logs/")
logdir = self._make_dir(fn)
if not os.path.exists(logdir):
os.mkdir(logdir)
if len(os.listdir(logdir)) != 0 and ask:
ans = input("log_dir is not empty. All data inside log_dir will be deleted. "
"Will you proceed [y/N]? ")
if ans in ['y', 'Y']:
shutil.rmtree(logdir)
else:
exit(1)
self.set_dir(logdir)
def _make_dir(self, fn):
today = datetime.today().strftime("%y%m%d")
logdir = 'logs/' + fn
return logdir
def set_dir(self, logdir, log_fn='log.txt'):
self.logdir = logdir
if not os.path.exists(logdir):
os.mkdir(logdir)
self.writer = SummaryWriter(logdir)
self.log_file = open(os.path.join(logdir, log_fn), 'a')
def log(self, string):
if self.local_rank == 0:
self.log_file.write('[%s] %s' % (datetime.now(), string) + '\n')
self.log_file.flush()
print('[%s] %s' % (datetime.now(), string))
sys.stdout.flush()
def log_dirname(self, string):
if self.local_rank == 0:
self.log_file.write('%s (%s)' % (string, self.logdir) + '\n')
self.log_file.flush()
print('%s (%s)' % (string, self.logdir))
sys.stdout.flush()
def scalar_summary(self, tag, value, step):
"""Log a scalar variable."""
if self.local_rank == 0:
self.writer.add_scalar(tag, value, step)
def image_summary(self, tag, images, step):
"""Log a list of images."""
if self.local_rank == 0:
self.writer.add_image(tag, images, step)
def histo_summary(self, tag, values, step):
"""Log a histogram of the tensor of values."""
if self.local_rank == 0:
self.writer.add_histogram(tag, values, step, bins='auto')
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.value = 0
self.average = 0
self.sum = 0
self.count = 0
def reset(self):
self.value = 0
self.average = 0
self.sum = 0
self.count = 0
def update(self, value, n=1):
self.value = value
self.sum += value * n
self.count += n
self.average = self.sum / self.count
def load_checkpoint(logdir, mode='last'):
if mode == 'last':
model_path = os.path.join(logdir, 'last.model')
optim_path = os.path.join(logdir, 'last.optim')
config_path = os.path.join(logdir, 'last.config')
elif mode == 'best':
model_path = os.path.join(logdir, 'best.model')
optim_path = os.path.join(logdir, 'best.optim')
config_path = os.path.join(logdir, 'best.config')
else:
raise NotImplementedError()
print("=> Loading checkpoint from '{}'".format(logdir))
if os.path.exists(model_path):
model_state = torch.load(model_path)
optim_state = torch.load(optim_path)
with open(config_path, 'rb') as handle:
cfg = pickle.load(handle)
else:
return None, None, None
return model_state, optim_state, cfg
def save_checkpoint(epoch, model_state, optim_state, logdir):
last_model = os.path.join(logdir, 'last.model')
last_optim = os.path.join(logdir, 'last.optim')
last_config = os.path.join(logdir, 'last.config')
opt = {
'epoch': epoch,
}
torch.save(model_state, last_model)
torch.save(optim_state, last_optim)
with open(last_config, 'wb') as handle:
pickle.dump(opt, handle, protocol=pickle.HIGHEST_PROTOCOL)
def load_linear_checkpoint(logdir, mode='last'):
if mode == 'last':
linear_optim_path = os.path.join(logdir, 'last.linear_optim')
elif mode == 'best':
linear_optim_path = os.path.join(logdir, 'best.linear_optim')
else:
raise NotImplementedError()
print("=> Loading linear optimizer checkpoint from '{}'".format(logdir))
if os.path.exists(linear_optim_path):
linear_optim_state = torch.load(linear_optim_path)
return linear_optim_state
else:
return None
def save_linear_checkpoint(linear_optim_state, logdir):
last_linear_optim = os.path.join(logdir, 'last.linear_optim')
torch.save(linear_optim_state, last_linear_optim)
def set_random_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
def normalize(x, dim=1, eps=1e-8):
return x / (x.norm(dim=dim, keepdim=True) + eps)
def make_model_diagrams(probs, labels, n_bins=10):
"""
outputs - a torch tensor (size n x num_classes) with the outputs from the final linear layer
- NOT the softmaxes
labels - a torch tensor (size n) with the labels
"""
confidences, predictions = probs.max(1)
accuracies = torch.eq(predictions, labels)
f, rel_ax = plt.subplots(1, 2, figsize=(4, 2.5))
# Reliability diagram
bins = torch.linspace(0, 1, n_bins + 1)
bins[-1] = 1.0001
width = bins[1] - bins[0]
bin_indices = [confidences.ge(bin_lower) * confidences.lt(bin_upper) for bin_lower, bin_upper in
zip(bins[:-1], bins[1:])]
bin_corrects = [torch.mean(accuracies[bin_index]) for bin_index in bin_indices]
bin_scores = [torch.mean(confidences[bin_index]) for bin_index in bin_indices]
confs = rel_ax.bar(bins[:-1], bin_corrects.numpy(), width=width)
gaps = rel_ax.bar(bins[:-1], (bin_scores - bin_corrects).numpy(), bottom=bin_corrects.numpy(), color=[1, 0.7, 0.7],
alpha=0.5, width=width, hatch='//', edgecolor='r')
rel_ax.plot([0, 1], [0, 1], '--', color='gray')
rel_ax.legend([confs, gaps], ['Outputs', 'Gap'], loc='best', fontsize='small')
# Clean up
rel_ax.set_ylabel('Accuracy')
rel_ax.set_xlabel('Confidence')
f.tight_layout()
return f
| 6,511
| 30.61165
| 119
|
py
|
GSA
|
GSA-main/GSA_CVPR/CSL/classifier.py
|
import torch.nn as nn
#from models.resnet import ResNet18, ResNet34, ResNet50
#from models.resnet_imagenet import resnet18, resnet50
from CSL import tao as TL
def get_simclr_augmentation(P, image_size):
# parameter for resizecrop
#P.resize_fix = False
resize_scale = (P.resize_factor, 1.0) # resize scaling factor,default [0.08,1]
# if P.resize_fix: # if resize_fix is True, use same scale
# resize_scale = (P.resize_factor, P.resize_factor)
# Align augmentation
color_jitter = TL.ColorJitterLayer(brightness=0.4, contrast=0.4, saturation=0.4, hue=0.1, p=0.8)
color_gray = TL.RandomColorGrayLayer(p=0.2)
resize_crop = TL.RandomResizedCropLayer(scale=resize_scale, size=image_size)
# Transform define #
print("P",P.dataset)
if P.dataset == 'imagenet': # Using RandomResizedCrop at PIL transform
transform = nn.Sequential(
color_jitter,
color_gray,
)
elif P.dataset =='split_mnist':
print("MNOSTYYY")
transform = nn.Sequential(
# 这个不会变换大小,但是会变化通道值,新旧混杂
# 这个也不会,混搭
resize_crop, # 再次仿射取样,不会变大小
)
elif P.dataset== "mnist":
transform = nn.Sequential(
# 这个不会变换大小,但是会变化通道值,新旧混杂
# 这个也不会,混搭
resize_crop, # 再次仿射取样,不会变大小
)
elif P.dataset=="cifar10":
transform = nn.Sequential(
color_jitter,#这个不会变换大小,但是会变化通道值,新旧混杂
color_gray,#这个也不会,混搭
resize_crop,#再次仿射取样,不会变大小
)
return transform
def get_shift_module(P, eval=False):
if P.shift_trans_type == 'rotation':
shift_transform = TL.Rotation()
K_shift = 4
elif P.shift_trans_type == 'cutperm':
shift_transform = TL.CutPerm()
K_shift = 4
else:
shift_transform = nn.Identity()
K_shift = 1#啥也不做,one_class=1
if not eval and not ('sup' in P.mode):
assert P.batch_size == int(128/K_shift)
return shift_transform, K_shift
def get_shift_classifer(model, K_shift):
model.shift_cls_layer = nn.Linear(model.last_dim, K_shift)#改成预测4类shift
return model
def get_classifier(mode, n_classes=10):
if mode == 'resnet18':
classifier = ResNet18(num_classes=n_classes)
elif mode == 'resnet34':
classifier = ResNet34(num_classes=n_classes)
elif mode == 'resnet50':
classifier = ResNet50(num_classes=n_classes)
elif mode == 'resnet18_imagenet':
classifier = resnet18(num_classes=n_classes)
elif mode == 'resnet50_imagenet':
classifier = resnet50(num_classes=n_classes)
else:
raise NotImplementedError()
return classifier
| 2,686
| 27.585106
| 100
|
py
|
GSA
|
GSA-main/GSA_CVPR/CSL/shedular.py
|
from torch.optim.lr_scheduler import _LRScheduler
from torch.optim.lr_scheduler import ReduceLROnPlateau
class GradualWarmupScheduler(_LRScheduler):
""" Gradually warm-up(increasing) learning rate in optimizer.
Proposed in 'Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour'.
Args:
optimizer (Optimizer): Wrapped optimizer.
multiplier: target learning rate = base lr * multiplier if multiplier > 1.0. if multiplier = 1.0, lr starts from 0 and ends up with the base_lr.
total_epoch: target learning rate is reached at total_epoch, gradually
after_scheduler: after target_epoch, use this scheduler(eg. ReduceLROnPlateau)
"""
def __init__(self, optimizer, multiplier, total_epoch, after_scheduler=None):
self.multiplier = multiplier
if self.multiplier < 1.:
raise ValueError('multiplier should be greater thant or equal to 1.')
self.total_epoch = total_epoch
self.after_scheduler = after_scheduler
self.finished = False
super(GradualWarmupScheduler, self).__init__(optimizer)
def get_lr(self):
if self.last_epoch > self.total_epoch:
if self.after_scheduler:
if not self.finished:
self.after_scheduler.base_lrs = [base_lr * self.multiplier for base_lr in self.base_lrs]
self.finished = True
return self.after_scheduler.get_lr()
return [base_lr * self.multiplier for base_lr in self.base_lrs]
if self.multiplier == 1.0:
return [base_lr * (float(self.last_epoch) / self.total_epoch) for base_lr in self.base_lrs]
else:
return [base_lr * ((self.multiplier - 1.) * self.last_epoch / self.total_epoch + 1.) for base_lr in self.base_lrs]
def step_ReduceLROnPlateau(self, metrics, epoch=None):
if epoch is None:
epoch = self.last_epoch + 1
self.last_epoch = epoch if epoch != 0 else 1 # ReduceLROnPlateau is called at the end of epoch, whereas others are called at beginning
if self.last_epoch <= self.total_epoch:
warmup_lr = [base_lr * ((self.multiplier - 1.) * self.last_epoch / self.total_epoch + 1.) for base_lr in self.base_lrs]
for param_group, lr in zip(self.optimizer.param_groups, warmup_lr):
param_group['lr'] = lr
else:
if epoch is None:
self.after_scheduler.step(metrics, None)
else:
self.after_scheduler.step(metrics, epoch - self.total_epoch)
def step(self, epoch=None, metrics=None):
if type(self.after_scheduler) != ReduceLROnPlateau:
if self.finished and self.after_scheduler:
if epoch is None:
self.after_scheduler.step(None)
else:
self.after_scheduler.step(epoch - self.total_epoch)
else:
return super(GradualWarmupScheduler, self).step(epoch)
else:
self.step_ReduceLROnPlateau(metrics, epoch)
| 3,069
| 46.96875
| 152
|
py
|
GSA
|
GSA-main/GSA_CVPR/CSL/contrastive_learning.py
|
import torch
import torch.distributed as dist
import diffdist.functional as distops
import torch.nn as nn
import torch.nn.functional as F
def get_similarity_matrix(outputs, chunk=2, multi_gpu=False):
'''
Compute similarity matrix
- outputs: (B', d) tensor for B' = B * chunk
- sim_matrix: (B', B') tensor
'''
if multi_gpu:
outputs_gathered = []
for out in outputs.chunk(chunk):
gather_t = [torch.empty_like(out) for _ in range(dist.get_world_size())]
gather_t = torch.cat(distops.all_gather(gather_t, out))
outputs_gathered.append(gather_t)
outputs = torch.cat(outputs_gathered)
#sim_matrix = F.cosine_similarity(outputs.unsqueeze(1), outputs.unsqueeze(0), dim=-1)
sim_matrix = torch.mm(outputs, outputs.t()) # (B', d), (d, B') -> (B', B')#这里是sim(z(x),z(x'))
return sim_matrix
def NT_xent(sim_matrix, temperature=0.5, chunk=2, eps=1e-8):
'''
Compute NT_xent loss
- sim_matrix: (B', B') tensor for B' = B * chunk (first 2B are pos samples)
'''
device = sim_matrix.device
#print(temperature)
B = sim_matrix.size(0) // chunk # B = B' / chunk#256/2=128
# C=B//4
#print("sim0",sim_matrix)
eye = torch.eye(B * chunk).to(device) # (B', B')#对焦矩阵【256,256】
sim_matrix = torch.exp(sim_matrix / temperature) * (1 - eye) # remove diagonal#exp(【256】/0.5)然后去掉中间相同的
#print("sim1",sim_matrix)#对角线是自己乘自己,没用,一直是1
denom = torch.sum(sim_matrix, dim=1, keepdim=True)#第一维求和这里应该就是分母。
#print("I",I)
sim_matrix = -torch.log(sim_matrix / (denom + eps) + eps) # loss matrix除以分母再取log
#print("sim2",sim_matrix)
loss = torch.sum(sim_matrix[:B, B:].diag() + sim_matrix[B:, :B].diag()) / (2 * B)#取对角线上的元素和的平均做loss
#分两块是表示对称
return loss
def Supervised_NT_xent(sim_matrix, labels, temperature=0.5, chunk=2, eps=1e-8, multi_gpu=False):
'''
Compute NT_xent loss
- sim_matrix: (B', B') tensor for B' = B * chunk (first 2B are pos samples)
'''
device = sim_matrix.device
labels1 = labels
labels1 = labels1.repeat(2)
logits_max, _ = torch.max(sim_matrix, dim=1, keepdim=True)
sim_matrix = sim_matrix - logits_max.detach()
B = sim_matrix.size(0) // chunk # B = B' / chunk
eye = torch.eye(B * chunk).to(device) # (B', B')
sim_matrix = torch.exp(sim_matrix / temperature) * (1 - eye) # remove diagonal
denom = torch.sum(sim_matrix, dim=1, keepdim=True)
sim_matrix = -torch.log(sim_matrix/(denom+eps)+eps) # loss matrix
labels1 = labels1.contiguous().view(-1, 1)
Mask1 = torch.eq(labels1, labels1.t()).float().to(device)
Mask1 = Mask1 / (Mask1.sum(dim=1, keepdim=True) + eps)
a = 1
b = 1 # all is 1 means 2:1,-0.5&1 is1:2 no,all 1 is 1+1/n:n-1/n
# print(a,b)
#print("Ma",Mask.shape,sim_matrix.shape)
loss1 = torch.sum(Mask1 * sim_matrix) / (2 * B)
Loss = a * (torch.sum(sim_matrix[:B, B:].diag() + sim_matrix[B:, :B].diag()) / (2 * B)) + b * loss1#+1*loss2
return Loss
def Sup(sim_matrix, labels, temperature=0.5, chunk=2, eps=1e-8, multi_gpu=False):
'''
Compute NT_xent loss
- sim_matrix: (B', B') tensor for B' = B * chunk (first 2B are pos samples)
'''
device = sim_matrix.device
labels1 = labels
if multi_gpu:
gather_t = [torch.empty_like(labels1) for _ in range(dist.get_world_size())]
labels = torch.cat(distops.all_gather(gather_t, labels1))
labels1 = labels1.repeat(2)
#labels2 = labels1.repeat(2)
print("0",sim_matrix)
#logits_max, _ = torch.max(sim_matrix, dim=1, keepdim=True)
#print("lll", logits_max.shape, sim_matrix.shape)
#sim_matrix = sim_matrix - logits_max.detach()
#I=torch.zeros([sim_matrix.shape[0],sim_matrix.shape[0]])+1
#I=I.cuda()
#print("ee1",sim_matrix.shape)
B = sim_matrix.size(0) // chunk # B = B' / chunk
#print("BBB",B,chunk)
eye = torch.eye(B * chunk).to(device) # (B', B')
#sim_matrix = sim_matrix * (1 - eye) # remove diagonal
#print("ee2", sim_matrix.shape)
#denom = torch.sum(sim_matrix, dim=1, keepdim=True)
print("1",sim_matrix)
sim_matrix = -torch.log(torch.max(sim_matrix,eps)[0])*(1-eye) # loss matrix
print("2",sim_matrix)
#print("ee3", sim_matrix.shape)
labels1 = labels1.contiguous().view(-1, 1)
#labels2 = labels2.contiguous().view(-1, 1)
#print("LLLL",labels)
Mask1 = torch.eq(labels1, labels1.t()).float().to(device)
#Mask2 = torch.eq(labels1, labels1.t()).float().to(device)
#print("mmm",Mask)
#Mask = eye * torch.stack([labels == labels[i] for i in range(labels.size(0))]).float().to(device)
Mask1 = Mask1 / (Mask1.sum(dim=1, keepdim=True) + eps)
#Mask2 = Mask2 / (Mask2.sum(dim=1, keepdim=True) + eps)
# print("M",Mask1)
#print("MMMM", Mask.shape, Mask.shape)
# loss = torch.sum(Mask * sim_matrix) / (2 * B)
a = 1
#b = 1 # all is 1 means 2:1,-0.5&1 is1:2 no,all 1 is 1+1/n:n-1/n
# print(a,b)
#print("Ma",Mask.shape,sim_matrix.shape)
loss1 = torch.sum(Mask1 * sim_matrix) / (2 * B)
#print(loss1)
#loss2 = torch.sum(Mask2 * sim_matrix) / (2 * B)
#print(sim_matrix)
# print(torch.sum(sim_matrix[:B, :B].diag() + sim_matrix[B:, B:].diag()) / (2 * B),"loss")
# and balance question
Loss = a* loss1#+1*loss2
# loss = torch.sum(Mask * sim_matrix) / (2 * B)
return Loss
class SupConLoss(nn.Module):
"""Supervised Contrastive Learning: https://arxiv.org/pdf/2004.11362.pdf.
It also supports the unsupervised contrastive loss in SimCLR"""
def __init__(self, temperature=0.07, contrast_mode='all',
base_temperature=0.07):
super(SupConLoss, self).__init__()
self.temperature = temperature
self.contrast_mode = contrast_mode
self.base_temperature = base_temperature
def forward(self, features, labels=None, mask=None):
"""Compute loss for model. If both `labels` and `mask` are None,
it degenerates to SimCLR unsupervised loss:
https://arxiv.org/pdf/2002.05709.pdf
Args:
features: hidden vector of shape [bsz, n_views, ...].
labels: ground truth of shape [bsz].
mask: contrastive mask of shape [bsz, bsz], mask_{i,j}=1 if sample j
has the same class as sample i. Can be asymmetric.
Returns:
A loss scalar.
"""
device = (torch.device('cuda')
if features.is_cuda
else torch.device('cpu'))
if len(features.shape) < 3:
raise ValueError('`features` needs to be [bsz, n_views, ...],'
'at least 3 dimensions are required')
if len(features.shape) > 3:
features = features.view(features.shape[0], features.shape[1], -1)#和我们的不一样
batch_size = features.shape[0]
if labels is not None and mask is not None:
raise ValueError('Cannot define both `labels` and `mask`')
elif labels is None and mask is None:
mask = torch.eye(batch_size, dtype=torch.float32).to(device)
elif labels is not None:
# labels = labels.repeat(2)
labels = labels.contiguous().view(-1, 1)
# print("L",labels.shape,batch_size)
if labels.shape[0] != batch_size:
raise ValueError('Num of labels does not match num of features')
mask = torch.eq(labels, labels.T).float().to(device)#对每个数据,将它和所有数据从第一个到尾比较,标签同则1否则0获得对同label的mask标签
# print("mask",mask)
else:
mask = mask.float().to(device)
contrast_count = features.shape[1]#2
contrast_feature = torch.cat(torch.unbind(features, dim=1), dim=0)#从第一个维度view全部切开,然后再拼起来
# print("c_f",contrast_feature.shape)#还原
if self.contrast_mode == 'one':
anchor_feature = features[:, 0]
anchor_count = 1
elif self.contrast_mode == 'all':
anchor_feature = contrast_feature#是否考虑不同view,其实我们的就是考虑不同view的版本
anchor_count = contrast_count
else:
raise ValueError('Unknown mode: {}'.format(self.contrast_mode))
# compute logits
anchor_dot_contrast = torch.div(
torch.matmul(anchor_feature, contrast_feature.T),
self.temperature)#计算内积
# for numerical stability
logits_max, _ = torch.max(anchor_dot_contrast, dim=1, keepdim=True)
logits = anchor_dot_contrast - logits_max.detach()#为了数值稳定性减去值
# tile mask
mask = mask.repeat(anchor_count, contrast_count)#repeat是扩增操作,横扩增2倍纵扩增两倍几个views repeat几次
# mask-out self-contrast cases
logits_mask = torch.scatter(
torch.ones_like(mask),
1,
torch.arange(batch_size * anchor_count).view(-1, 1).to(device),
0
)#生成一个logits_mask,其实就是一个对角元素0其余是1的矩阵
#print("log",logits_mask.shape)
mask = mask * logits_mask#去掉和自己的比较
# compute log_prob
exp_logits = torch.exp(logits) * logits_mask#去掉和自己的比较
log_prob = logits - torch.log(exp_logits.sum(1, keepdim=True))#构建分子除分母
#print(log_prob.shape,mask.sum(0),mask.sum(1))
# compute mean of log-likelihood over positive
mean_log_prob_pos = (mask * log_prob).sum(1) / mask.sum(1)#同类别的pair取平均
#print(mean_log_prob_pos.shape)
# loss
loss = - (self.temperature / self.base_temperature) * mean_log_prob_pos
loss = loss.view(anchor_count, batch_size).mean()#单个view
return loss
| 9,602
| 37.107143
| 112
|
py
|
GSA
|
GSA-main/GSA_CVPR/CSL/general_loss.py
|
import torch
import numpy
def generalized_contrastive_loss(
hidden1,
hidden2,
lambda_weight=0.5,
temperature=0.5,
dist='normal',
hidden_norm=True,
loss_scaling=2.0):
"""Generalized contrastive loss.
Both hidden1 and hidden2 should have shape of (n, d).
Configurations to get following losses:
* decoupled NT-Xent loss: set dist='logsumexp', hidden_norm=True
* SWD with normal distribution: set dist='normal', hidden_norm=False
* SWD with uniform hypersphere: set dist='normal', hidden_norm=True
* SWD with uniform hypercube: set dist='uniform', hidden_norm=False
"""
hidden_dim = hidden1.shape[-1] # get hidden dimension
#print(hidden_dim)
#print(hidden1.shape)
if hidden_norm:
hidden1 = hidden1 / (hidden1.norm(dim=1, keepdim=True) + 1e-8)#torchtf.math.l2_normalize(hidden1, -1)
hidden2 = hidden2 / (hidden2.norm(dim=1, keepdim=True) + 1e-8)
loss_align = torch.mean((hidden1 - hidden2)**2)/2
#print(loss_align)
hiddens = torch.cat([hidden1, hidden2], 0)
#print(hiddens.shape)
if dist == 'logsumexp':
loss_dist_match = get_logsumexp_loss(hiddens, temperature)
else:
a = torch.empty([hidden_dim, hidden_dim]).normal_(0, 1).cuda()
rand_w = torch.nn.init.orthogonal_(a).cuda()
#print("a",a==rand_w)
#rand_w=a
# print("send",rand_w)
# print("rand",rand_w.shape)
#initializer = torch.nn.init.orthogonal()# tf.keras.initializers.Orthogonal()
#rand_w = initializer([hidden_dim, hidden_dim])
loss_dist_match = get_swd_loss(hiddens, rand_w,
prior=dist,
hidden_norm=hidden_norm)
a= loss_scaling * (-loss_align + lambda_weight * loss_dist_match)
#print("a",loss_dist_match)
return a,loss_align,loss_dist_match
def get_logsumexp_loss(states, temperature):
scores = torch.matmul(states, states.t()) .cuda() # (bsz, bsz)
bias = torch.log(torch.tensor(states.shape[1]).float()).cuda()
#print(bias)
# eye = torch.eye(scores.shape[1]).cuda()# a constant
return torch.mean(torch.log(torch.sum(torch.exp(scores / temperature),dim=1)+1e-8).cuda()).cuda()
def sort(x):
"""Returns the matrix x where each row is sorted (ascending)."""
u = x.detach().cpu().numpy()
t = numpy.argsort(u, axis=1)
p = torch.from_numpy(t).long().cuda()
b = torch.gather(x, -1, p)
return b
'''
xshape = x.shape
print(xshape[1])
rank = torch.sum((x.unsqueeze(2) > x.unsqueeze(1)), dim=2).cuda()
print("r",rank)
for i in range(128):
for j in range(128):
if rank[i][j] < 0:
print(rank[i][j])
elif rank[i][j] >= 128:
print("r",rank[i][j])
rank_inv = torch.einsum(
'dbc,c->db',
torch.Tensor.permute(torch.nn.functional.one_hot(rank.long(), xshape[1]), [0, 2, 1]).float().cuda(),
torch.arange(xshape[1]).float().cuda()).cuda() # (dim, bsz)
# x = gather_nd(x, rank_inv.int(), axis=-1, batch_dims=-1)
q= torch.nn.functional.one_hot(rank, xshape[1]).transpose(2,1).float().cpu()
print("a")
#q=torch.from_numpy(numpy.transpose(torch.nn.functional.one_hot(rank, xshape[1]).int().cpu().numpy(), [0, 2, 1])).float().cuda()
for i in range(128):
print(torch.sum(q[31][i]),i)
print(q.shape)
print(torch.sum(q[31][60]))
t = torch.matmul(q[31][60], torch.arange(xshape[1]).float())
print(t)
t = numpy.array(t.cpu())
q = numpy.array(q.cpu())
#numpy.savetxt('/home/guoyd/Dataset/np2.txt', t)
numpy.savetxt('/home/guoyd/Dataset/np.txt', q[31][60])
# t=torch.matmul(q[31],torch.arange(xshape[1]).float().cuda())
# torch.arange(xshape[1]).float().cuda().cuda())
#print("rr",q==rank_inv)
#l=[]
# w=False
# s=0
for i in range(128):
for j in range(128):
if rank_inv[i][j]<0:
print(rank_inv[i][j])
elif rank_inv[i][j]>=128:
print(rank_inv[i][j],i,j)
w=True
s=i
#for s in range(128):
# print(rank_inv[31][s])
#if w:
# for j in range(128):
# l.append(rank_inv[s][j])
#l=l.sort()
#for i in range(128):
# print(l[i])
p=list(rank_inv[s][:])
p.sort()
n=0
for i in range(len(p)):
print(p[i],len(p),n)
n=n+1
#print(rank_inv[i][s])
b = torch.gather(x, -1, rank_inv.long().cuda())
#print("b",b)
'''
# return b
def get_swd_loss(states, rand_w, prior='normal', stddev=1., hidden_norm=True):
states_shape = states.shape
#print("get", rand_w)
states = torch.matmul(states, rand_w)
#print("get", rand_w)
states_t = sort(states.t())
#print("get2",states_t)# (dim, bsz)
#print("get", rand_w)
#print("t",states_t)
#print("p",prior)
if prior == 'normal':
states_prior = torch.empty(states_shape).normal_(mean=1e-6,std=1+1e-8)#torch.randn(states_shape, mean=0, stddev=stddev)
elif prior == 'uniform':
states_prior = torch.empty(states_shape).uniform_(-1.0,1.0)
else:
raise ValueError('Unknown prior {}'.format(prior))
#print("s", states_prior)
if hidden_norm:
states_prior = states_prior / (states_prior.norm(dim=1, keepdim=True) + 1e-8)
#tf.math.l2_normalize(states_prior, -1)
#print("get", rand_w)
states_prior = torch.matmul(states_prior.cuda(), rand_w)
# print("S", states_prior)
states_prior_t = sort(states_prior.t()) # (dim, bsz)
#print("ss",states_prior_t)
#a=torch.mean((states_prior_t - states_t)**2)
#print("los",states_prior_t-states_t)
return torch.mean((states_prior_t - states_t)**2)
'''
def get_contrastive_loss(z1, z2, nt_xent_temp): # [batch_size, dim]
batch_size = tf.shape(z1)[0]
dim = tf.shape(z1)[1]
z1 = tf.math.l2_normalize(z1, -1)
z2 = tf.math.l2_normalize(z2, -1)
sim = tf.matmul(z1, z2, transpose_b=True) # [batch_size, batch_size]
sim /= nt_xent_temp
labels = tf.eye(batch_size)
loss = (
get_cls_loss(labels, sim) +
get_cls_loss(labels, tf.transpose(sim))
)
return tf.reduce_mean(loss), sim
def get_cls_loss(labels, outputs):
return tf.reduce_mean(cls_loss_object(labels, outputs))
cls_loss_object = tf.keras.losses.CategoricalCrossentropy(
from_logits=True,
reduction=tf.keras.losses.Reduction.NONE)
'''
| 6,102
| 30.621762
| 130
|
py
|
FMLD
|
FMLD-main/mask-test.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 31 22:57:43 2020
@author: borut batagelj
"""
import os
import torch
from torchvision import transforms, datasets
from torch.utils.data import DataLoader
from torch import nn
# Applying Transforms to the Data
image_transforms = {
'test': transforms.Compose([
transforms.Resize(size=(224, 224)),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])
])
}
# Load the Data
dataset = 'faces'
test_directory = os.path.join(dataset, 'test')
# Batch size
bs = 128
# Number of classes
num_classes = 2
# Load Data from folders
data = {
'test': datasets.ImageFolder(root=test_directory, transform=image_transforms['test']),
}
class_names = data['test'].classes
transform=image_transforms['test']
# Get a mapping of the indices to the class names, in order to see the output classes of the test images.
idx_to_class = {v: k for k, v in data['test'].class_to_idx.items()}
print('Classes: ',idx_to_class)
# Size of Data, to be used for calculating Average Loss and Accuracy
test_data_size = len(data['test'])
# Create iterators for the Data loaded using DataLoader module
test_data_loader = DataLoader(data['test'], batch_size=bs, shuffle=False)
# Print the test set data sizes
print('Number of faces: ',test_data_size)
def computeTestSetAccuracy(model, loss_criterion, data_loader, data_size):
'''
Function to compute the accuracy on the test set
Parameters
:param model: Model to test
:param loss_criterion: Loss Criterion to minimize
'''
test_acc = 0.0
test_loss = 0.0
# Validation - No gradient tracking needed
with torch.no_grad():
# Set to evaluation mode
model.eval()
# Validation loop
for j, (inputs, labels) in enumerate(data_loader):
inputs = inputs.to(device)
labels = labels.to(device)
# Forward pass - compute outputs on input data using the model
outputs = model(inputs)
# Compute loss
#loss = loss_criterion(outputs, labels)
# Compute the total loss for the batch and add it to valid_loss
#test_loss += loss.item() * inputs.size(0)
# Calculate validation accuracy
ret, predictions = torch.max(outputs.data, 1)
correct_counts = predictions.eq(labels.data.view_as(predictions))
# Convert correct_counts to float and then compute the mean
acc = torch.mean(correct_counts.type(torch.FloatTensor))
# Compute total accuracy in the whole batch and add to valid_acc
test_acc += acc.item() * inputs.size(0)
# Find average test loss and test accuracy
#avg_test_loss = test_loss/data_size
avg_test_acc = test_acc/data_size
return avg_test_acc
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
loss_func = nn.CrossEntropyLoss() #for a multi-class classification problem
model_file = 'resnet152.pt'
if os.path.exists(model_file):
model = torch.load(model_file)
model = model.to(device)
avg_test_acc=computeTestSetAccuracy(model, loss_func, test_data_loader, test_data_size)
print("Test accuracy : " + str(avg_test_acc))
else:
print("Warrning: No Pytorch model for classification: resnet152.pt. Please Download it from GitHub link.\n")
| 3,464
| 28.615385
| 112
|
py
|
FMLD
|
FMLD-main/show_save_gt.py
|
# Copyright 2021 Borut Batagelj.
import glob
import os
import xml.etree.ElementTree as ET
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from tqdm import tqdm
show_annotations=False #show image with annotations
save_faces=True #save faces from images to folders: correctly_worn, without_mask, incorrectly_worn
gt_dir='FMLD_annotations/'; #FMLD xml folder
images_wider_dir='WIDER/' #folder where are WIDER_val and WIDER_train
images_mafa_dir='MAFA/' #folder where are test-images and train-images
gt_files=glob.glob(gt_dir+'*/*.xml')
gt_num=len(gt_files)
if save_faces and not os.path.exists('faces'):
os.makedirs('faces/test/compliant/correctly_worn')
os.makedirs('faces/test/non-compliant/without_mask')
os.makedirs('faces/test/non-compliant/incorrectly_worn')
os.makedirs('faces/train/compliant/correctly_worn')
os.makedirs('faces/train/non-compliant/without_mask')
os.makedirs('faces/train/non-compliant/incorrectly_worn')
for xml_file in tqdm(gt_files):
tree = ET.parse(xml_file)
root = tree.getroot()
if save_faces:
filename = root.find('filename').text
folder = root.find('folder').text
database = root.find('source/database').text
path = root.find('path').text
if database == 'WIDER':
image_path = os.path.join(images_wider_dir,path)
elif database == 'MAFA':
image_path = os.path.join(images_mafa_dir,path)
if save_faces or show_annotations:
if not os.path.exists(image_path):
filepath = os.path.dirname(image_path)
print(f'Download {database} dataset and provide images in folder: {filepath}.\n')
quit()
I = plt.imread(image_path)
[h,w,c]=I.shape
if show_annotations:
plt.imshow(I)
ax = plt.gca()
for ii, boxes in enumerate(root.iter('object'), start=1):
name = boxes.find('name').text
ymin, xmin, ymax, xmax = None, None, None, None
xmin = max(0,int(float(boxes.find("bndbox/xmin").text)))
ymin = max(0,int(float(boxes.find("bndbox/ymin").text)))
xmax = min(w,int(float(boxes.find("bndbox/xmax").text)))
ymax = min(h,int(float(boxes.find("bndbox/ymax").text)))
BBox=[xmin, ymin, xmax-xmin, ymax-ymin]
sub_folder = None
if boxes.find('difficult').text == '1':
col='white'
else:
if name == 'unmasked_face':
col='red'
sub_folder = 'non-compliant/without_mask/'
elif name == 'masked_face':
col='green'
sub_folder = 'compliant/correctly_worn/'
elif name == 'invalid_face':
col='blue'
elif name == 'incorrectly_masked_face':
col='yellow'
sub_folder = 'non-compliant/incorrectly_worn/'
if save_faces and sub_folder:
plt.imsave(os.path.join('faces',folder,sub_folder,filename[0:-4]+'-face'+str(ii)+'.png'), I[ymin:ymax,xmin:xmax,:])
if show_annotations:
rect = patches.Rectangle((BBox[0], BBox[1]), BBox[2], BBox[3],linewidth=2, edgecolor=col, facecolor='none')
ax.add_patch(rect)
plt.show()
| 3,245
| 33.531915
| 131
|
py
|
GNNImpute
|
GNNImpute-main/example/test.py
|
# %%
import numpy as np
import scanpy as sc
from scipy import sparse
from sklearn.cluster import KMeans
from sklearn.metrics import mean_squared_error, mean_absolute_error
from sklearn.metrics.cluster import adjusted_rand_score, normalized_mutual_info_score
from scipy.stats import pearsonr
from sklearn.metrics.pairwise import cosine_similarity
from GNNImpute.api import GNNImpute
# %%
adata = sc.read_h5ad('../data/Klein/masked/Klein_01.h5ad')
maskIndex = sparse.load_npz('../data/Klein/masked/Klein_maskIndex_01.csv.npz')
def pearsonr_error(y, h):
res = []
if len(y.shape) < 2:
y = y.reshape((1, -1))
h = h.reshape((1, -1))
for i in range(y.shape[0]):
res.append(pearsonr(y[i], h[i])[0])
return np.mean(res)
def cosine_similarity_score(y, h):
if len(y.shape) < 2:
y = y.reshape((1, -1))
h = h.reshape((1, -1))
cos = cosine_similarity(y, h)
res = []
for i in range(len(cos)):
res.append(cos[i][i])
return np.mean(res)
# %%
adata = GNNImpute(adata=adata,
layer='GATConv',
no_cuda=False,
epochs=3000,
lr=0.001,
weight_decay=0.0005,
hidden=50,
patience=200,
fastmode=False,
heads=3,
use_raw=True,
verbose=True)
# %%
dropout_pred = adata.X[adata.obs.idx_test]
dropout_true = adata.raw.X.A[adata.obs.idx_test]
masking_row_test, masking_col_test = np.where(maskIndex.A[adata.obs.idx_test, :] > 0)
y = dropout_true[masking_row_test, masking_col_test]
h = dropout_pred[masking_row_test, masking_col_test]
mse = float('%.4f' % mean_squared_error(y, h))
mae = float('%.4f' % mean_absolute_error(y, h))
pcc = float('%.4f' % pearsonr_error(y, h))
cs = float('%.4f' % cosine_similarity_score(y, h))
# %%
clusters = adata.obs.cluster.values
adata_pred = sc.AnnData(adata.X)
sc.pp.normalize_total(adata_pred)
sc.pp.log1p(adata_pred)
sc.pp.highly_variable_genes(adata_pred, n_top_genes=2000)
adata_pred = adata_pred[:, adata_pred.var.highly_variable]
sc.pp.scale(adata_pred, max_value=10)
kmeans = KMeans(n_clusters=len(set(clusters))).fit(adata_pred.X)
ari = float('%.4f' % adjusted_rand_score(clusters, kmeans.labels_))
nmi = float('%.4f' % normalized_mutual_info_score(clusters, kmeans.labels_))
# %%
print(mse, mae, pcc, cs, ari, nmi)
| 2,432
| 25.445652
| 85
|
py
|
GNNImpute
|
GNNImpute-main/data/mask.py
|
# %%
import os
import copy
import numpy as np
import pandas as pd
import scanpy as sc
from scipy import sparse
# %%
# def mask(data_train, masked_prob):
# """
# 将表达矩阵中非零的值随机置为0并返回,同时返回置为0的元素的坐标
# :param data_train: 表达矩阵
# :param masked_prob: 置0比例
# :return:
# """
# index_pair_train = np.where(data_train != 0)
# masking_idx_train = np.random.choice(index_pair_train[0].shape[0], int(index_pair_train[0].shape[0] * masked_prob),
# replace=False)
# # to retrieve the position of the masked: data_train[index_pair_train[0][masking_idx], index_pair[1][masking_idx]]
# X_train = copy.deepcopy(data_train)
# X_train[index_pair_train[0][masking_idx_train], index_pair_train[1][masking_idx_train]] = 0
# return X_train, index_pair_train[0][masking_idx_train], index_pair_train[1][masking_idx_train]
def maskPerCol(data_train, masked_prob):
"""
将表达矩阵中每列非零的值随机置为0并返回,同时返回置为0的元素的坐标
:param data_train: 表达矩阵
:param masked_prob: 置0比例
:return:
"""
X_train = copy.deepcopy(data_train)
rows = []
cols = []
for col in range(data_train.shape[1]):
index_pair_train = np.where(data_train[:, col])
if index_pair_train[0].shape[0] <= 3:
continue
masking_idx_train = np.random.choice(index_pair_train[0].shape[0],
int(index_pair_train[0].shape[0] * masked_prob),
replace=False)
X_train[index_pair_train[0][masking_idx_train], [col] * masking_idx_train.shape[0]] = 0
for i in index_pair_train[0][masking_idx_train]:
rows.append(i)
cols.append(col)
return X_train, rows, cols
# %%
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--masked_prob', default=0.1, type=float)
parser.add_argument('--dataset', default='Klein', type=str)
parser.add_argument('--downsample', default=1.0, type=float)
args = parser.parse_args()
adata = sc.read_h5ad('./data/%s/processed/%s.h5ad' % (args.dataset, args.dataset))
sc.pp.normalize_total(adata)
adata.raw = adata
# %%
path = './data/%s/masked' % args.dataset
if not os.path.exists(path):
os.makedirs(path)
masked, masking_row, masking_col = maskPerCol(adata.raw.X.A, args.masked_prob)
pd.DataFrame(masked, index=adata.obs.index, columns=adata.var.index) \
.T.to_csv(path + '/%s_%s.csv' % (args.dataset, str(args.masked_prob).replace('.', '')))
adata.X = sparse.csr_matrix(masked)
adata.write(path + '/%s_%s.h5ad' % (args.dataset, str(args.masked_prob).replace('.', '')))
# %%
maskIndex = sparse.coo_matrix(([1] * len(masking_col), (masking_row, masking_col)))
sparse.save_npz(path + '/%s_maskIndex_%s.csv' % (args.dataset, str(args.masked_prob).replace('.', '')), maskIndex)
| 2,838
| 30.898876
| 121
|
py
|
GNNImpute
|
GNNImpute-main/data/PBMC/preprocess.py
|
# %%
import os
import sys
import codecs
import scanpy as sc
sys.stdout = codecs.getwriter("utf-8")(sys.stdout.detach())
# %%
adata = sc.read_10x_mtx('./data/PBMC/', var_names='gene_symbols', cache=True)
# %%
adata.var['mt'] = adata.var_names.str.startswith('MT-')
sc.pp.calculate_qc_metrics(adata, qc_vars=['mt'], percent_top=None, log1p=False, inplace=True)
adata = adata[adata.obs.n_genes_by_counts < 2000, :]
adata = adata[adata.obs.pct_counts_mt < 5, :]
sc.pp.filter_cells(adata, min_genes=200)
sc.pp.filter_genes(adata, min_cells=3)
adata.raw = adata
# %%
folder = os.path.exists('./data/PBMC/processed')
if not folder:
os.makedirs('./data/PBMC/processed')
adata.write('./data/PBMC/processed/PBMC.h5ad')
# %%
sc.pp.normalize_total(adata, target_sum=1e4)
sc.pp.log1p(adata)
sc.pp.highly_variable_genes(adata, min_mean=0.0125, max_mean=3, min_disp=0.5)
adata.raw = adata
adata = adata[:, adata.var.highly_variable]
sc.pp.regress_out(adata, ['total_counts', 'pct_counts_mt'])
sc.pp.scale(adata, max_value=10)
sc.tl.pca(adata, svd_solver='arpack')
sc.pp.neighbors(adata, n_neighbors=10, n_pcs=40)
sc.tl.leiden(adata)
marker_genes = ['S100A9', 'GZMH', 'HLA-DRB5', 'RP11-290F20.3', 'CD7', 'LTB', 'LYZ', 'RPS5', 'CD74', 'GZMA', 'RPS8',
'FCER1G', 'RPL32', 'GNLY', 'S100A8', 'B2M', 'LST1', 'RPS13', 'HLA-DQA1', 'RPL11', 'S100A10', 'RPLP2',
'RPS2', 'S100A6', 'S100A4', 'LYAR', 'HLA-DRB1', 'AIF1', 'CCL5', 'TYROBP', 'CD52', 'IL7R', 'CTSW',
'HLA-DPB1', 'CLIC3', 'CD79B', 'FTH1', 'HLA-DPA1', 'CST3', 'RPL31', 'FTL', 'RPL13', 'FXYD5', 'RPS6',
'CD79A', 'GZMK', 'NKG7', 'HLA-B', 'IL32', 'HLA-DRA']
sc.pl.heatmap(adata, marker_genes, groupby='leiden', dendrogram=True, swap_axes=True, use_raw=True)
| 1,769
| 32.396226
| 117
|
py
|
GNNImpute
|
GNNImpute-main/data/Klein/preprocess.py
|
# %%
import os
import scanpy as sc
from scipy import sparse
# %%
adataD0 = sc.read_csv('./data/Klein/GSM1599494_ES_d0_main.csv.bz2')
adataD2 = sc.read_csv('./data/Klein/GSM1599497_ES_d2_LIFminus.csv.bz2')
adataD4 = sc.read_csv('./data/Klein/GSM1599498_ES_d4_LIFminus.csv.bz2')
adataD7 = sc.read_csv('./data/Klein/GSM1599499_ES_d7_LIFminus.csv.bz2')
# %%
adata = sc.AnnData.concatenate(adataD0.T, adataD2.T, adataD4.T, adataD7.T, batch_key='cluster',
batch_categories=['d0', 'd2', 'd4', 'd7', ])
adata.X = sparse.csr_matrix(adata.X)
# %%
sc.pp.calculate_qc_metrics(adata, percent_top=None, log1p=False, inplace=True)
adata = adata[adata.obs.total_counts < 75000, :]
# sc.pl.scatter(adata, x='total_counts', y='n_genes_by_counts')
# sc.pl.violin(adata, ['n_genes_by_counts', 'total_counts'], jitter=False, multi_panel=True)
sc.pp.filter_cells(adata, min_genes=200)
sc.pp.filter_genes(adata, min_cells=3)
adata.raw = adata
# %%
folder = os.path.exists('./data/Klein/processed')
if not folder:
os.makedirs('./data/Klein/processed')
adata.write('./data/Klein/processed/Klein.h5ad')
| 1,126
| 26.487805
| 95
|
py
|
GNNImpute
|
GNNImpute-main/GNNImpute/layer.py
|
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
def layer(layer_type, **kwargs):
if layer_type == 'GCNConv':
return GraphConvolution(in_features=kwargs['in_channels'], out_features=kwargs['out_channels'])
elif layer_type == 'GATConv':
return MultiHeadAttentionLayer(in_features=kwargs['in_channels'], out_features=kwargs['out_channels'],
heads=kwargs['heads'], concat=kwargs['concat'])
class GraphConvolution(torch.nn.Module):
"""
Simple GCN layer, similar to https://arxiv.org/abs/1609.02907
"""
def __init__(self, in_features, out_features, bias=True):
super(GraphConvolution, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = nn.Parameter(torch.FloatTensor(in_features, out_features))
if bias:
self.bias = nn.Parameter(torch.FloatTensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
stdv = 1. / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def forward(self, input, adj):
support = torch.mm(input, self.weight)
output = torch.spmm(adj, support)
if self.bias is not None:
return output + self.bias
else:
return output
def __repr__(self):
return self.__class__.__name__ + ' (' \
+ str(self.in_features) + ' -> ' \
+ str(self.out_features) + ')'
class GraphAttentionLayer(nn.Module):
"""
Simple GAT layer, similar to https://arxiv.org/abs/1710.10903
"""
def __init__(self, in_features, out_features, dropout=0.6, alpha=0.2, concat=True):
super(GraphAttentionLayer, self).__init__()
self.dropout = dropout
self.in_features = in_features
self.out_features = out_features
self.alpha = alpha
self.concat = concat
self.W = nn.Parameter(torch.empty(size=(in_features, out_features)))
nn.init.xavier_uniform_(self.W.data, gain=1.414)
self.a = nn.Parameter(torch.empty(size=(2 * out_features, 1)))
nn.init.xavier_uniform_(self.a.data, gain=1.414)
self.leakyrelu = nn.LeakyReLU(self.alpha)
def forward(self, h, adj):
Wh = torch.mm(h, self.W) # h.shape: (N, in_features), Wh.shape: (N, out_features)
e = self._prepare_attentional_mechanism_input(Wh)
zero_vec = -9e15 * torch.ones_like(e)
attention = torch.where(adj > 0, e, zero_vec)
attention = F.softmax(attention, dim=1)
attention = F.dropout(attention, self.dropout, training=self.training)
h_prime = torch.matmul(attention, Wh)
if self.concat:
return F.elu(h_prime)
else:
return h_prime
def _prepare_attentional_mechanism_input(self, Wh):
# Wh.shape (N, out_feature)
# self.a.shape (2 * out_feature, 1)
# Wh1&2.shape (N, 1)
# e.shape (N, N)
Wh1 = torch.matmul(Wh, self.a[:self.out_features, :])
Wh2 = torch.matmul(Wh, self.a[self.out_features:, :])
# broadcast add
e = Wh1 + Wh2.T
return self.leakyrelu(e)
def __repr__(self):
return self.__class__.__name__ + ' (' + str(self.in_features) + ' -> ' + str(self.out_features) + ')'
class MultiHeadAttentionLayer(nn.Module):
def __init__(self, in_features, out_features, heads, concat=True):
super(MultiHeadAttentionLayer, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.attentions = [GraphAttentionLayer(in_features, out_features, concat=concat) for _ in range(heads)]
for i, attention in enumerate(self.attentions):
self.add_module('attention_{}'.format(i), attention)
def forward(self, x, adj):
x = torch.cat([torch.unsqueeze(att(x, adj), 0) for att in self.attentions])
x = torch.mean(x, dim=0)
return x
def __repr__(self):
return self.__class__.__name__ + ' (' + str(self.in_features) + ' -> ' + str(self.out_features) + ')'
| 4,298
| 35.74359
| 111
|
py
|
GNNImpute
|
GNNImpute-main/GNNImpute/utils.py
|
import torch
import numpy as np
import scanpy as sc
import scipy.sparse as sp
from sklearn.decomposition import PCA
from sklearn.neighbors import kneighbors_graph
def normalize(adata, filter_min_counts=True, size_factors=True, normalize_input=True, logtrans_input=True):
if filter_min_counts:
sc.pp.filter_genes(adata, min_counts=1)
sc.pp.filter_cells(adata, min_counts=1)
# if size_factors or normalize_input or logtrans_input:
# adata.raw = adata.copy()
# else:
# adata.raw = adata
if size_factors:
sc.pp.normalize_per_cell(adata)
adata.obs['size_factors'] = adata.obs.n_counts / np.median(adata.obs.n_counts)
else:
adata.obs['size_factors'] = 1.0
if logtrans_input:
sc.pp.log1p(adata)
if normalize_input:
sc.pp.scale(adata)
return adata
def train_val_split(adata, train_size=0.6, val_size=0.2, test_size=0.2):
assert train_size + val_size + test_size == 1
adata = adata.copy()
cell_nums = adata.n_obs
test_val = np.random.choice(cell_nums, int(cell_nums * (val_size + test_size)), replace=False)
idx_train = [i for i in list(range(cell_nums)) if i not in test_val]
idx_test = np.random.choice(test_val, int(len(test_val) * (test_size / (val_size + test_size))), replace=False)
idx_val = [i for i in test_val if i not in idx_test]
tmp = np.zeros(cell_nums, dtype=bool)
tmp[idx_train] = True
adata.obs['idx_train'] = tmp
tmp = np.zeros(cell_nums, dtype=bool)
tmp[idx_val] = True
adata.obs['idx_val'] = tmp
tmp = np.zeros(cell_nums, dtype=bool)
tmp[idx_test] = True
adata.obs['idx_test'] = tmp
return adata
def row_normalize(mx):
"""Row-normalize sparse matrix"""
rowsum = np.array(mx.sum(1))
r_inv = np.power(rowsum, -1).flatten()
r_inv[np.isinf(r_inv)] = 0.
r_mat_inv = sp.diags(r_inv)
mx = r_mat_inv.dot(mx)
return mx
def kneighbor(adata, n_components=50, k=5):
pca = PCA(n_components=n_components)
data_pca = pca.fit_transform(adata.X)
A = kneighbors_graph(data_pca, k, mode='connectivity', include_self=True)
return row_normalize(A)
def adata2gdata(adata, use_raw=True):
adj = kneighbor(adata, n_components=50, k=5)
adj = torch.tensor(adj.A, dtype=torch.float)
features = torch.tensor(adata.X, dtype=torch.float)
labels = torch.tensor(adata.X, dtype=torch.float)
size_factors = torch.tensor(adata.obs.size_factors, dtype=torch.float).reshape(-1, 1)
if use_raw:
labels = torch.tensor(adata.raw.X.A, dtype=torch.float)
train_mask = torch.tensor(adata.obs.idx_train, dtype=torch.bool)
val_mask = torch.tensor(adata.obs.idx_val, dtype=torch.bool)
return {
'x': features,
'y': labels,
'size_factors': size_factors,
'adj': adj,
'train_mask': train_mask,
'val_mask': val_mask
}
| 2,914
| 28.15
| 115
|
py
|
GNNImpute
|
GNNImpute-main/GNNImpute/model.py
|
import torch
import torch.nn.functional as F
from .layer import layer
class GNNImpute(torch.nn.Module):
def __init__(self, input_dim, h_dim=512, z_dim=50, layerType='GATConv', heads=3):
super(GNNImpute, self).__init__()
#### Encoder ####
self.encode_conv1 = layer(layerType, in_channels=input_dim, out_channels=h_dim,
heads=heads, concat=False)
self.encode_bn1 = torch.nn.BatchNorm1d(h_dim)
self.encode_conv2 = layer(layerType, in_channels=h_dim, out_channels=z_dim,
heads=heads, concat=False)
self.encode_bn2 = torch.nn.BatchNorm1d(z_dim)
#### Decoder ####
self.decode_linear1 = torch.nn.Linear(z_dim, h_dim)
self.decode_bn1 = torch.nn.BatchNorm1d(h_dim)
self.decode_linear2 = torch.nn.Linear(h_dim, input_dim)
def encode(self, x, edge_index):
x = F.relu(self.encode_bn1(self.encode_conv1(x, edge_index)))
x = F.dropout(x, p=0.5, training=self.training)
x = F.relu(self.encode_bn2(self.encode_conv2(x, edge_index)))
x = F.dropout(x, p=0.5, training=self.training)
return x
def decode(self, x):
x = F.relu(self.decode_bn1(self.decode_linear1(x)))
x = F.relu(self.decode_linear2(x))
return x
def forward(self, x, edge_index, size_factors):
z = self.encode(x, edge_index)
x = self.decode(z)
x = x * size_factors
return x
| 1,491
| 32.155556
| 87
|
py
|
GNNImpute
|
GNNImpute-main/GNNImpute/api.py
|
from .model import GNNImpute as Model
from .train import train
from .utils import adata2gdata, train_val_split, normalize
def GNNImpute(adata,
layer='GATConv',
no_cuda=False,
epochs=3000,
lr=0.001,
weight_decay=0.0005,
hidden=50,
patience=200,
fastmode=False,
heads=3,
use_raw=True,
verbose=True):
input_dim = adata.n_vars
model = Model(input_dim=input_dim, h_dim=512, z_dim=hidden, layerType=layer, heads=heads)
adata = normalize(adata, filter_min_counts=False)
adata = train_val_split(adata)
gdata = adata2gdata(adata, use_raw=use_raw)
train(gdata=gdata, model=model, no_cuda=no_cuda, epochs=epochs, lr=lr, weight_decay=weight_decay,
patience=patience, fastmode=fastmode, verbose=verbose)
pred = model(gdata['x'], gdata['adj'], gdata['size_factors'])
adata.X = pred.detach().cpu()
return adata
| 1,004
| 28.558824
| 101
|
py
|
GNNImpute
|
GNNImpute-main/GNNImpute/__init__.py
| 0
| 0
| 0
|
py
|
|
GNNImpute
|
GNNImpute-main/GNNImpute/train.py
|
import os
import time
import glob
import torch
def train(gdata, model,
no_cuda=False,
epochs=3000,
lr=0.001,
weight_decay=0.0005,
patience=200,
fastmode=False,
verbose=True):
device = torch.device('cuda' if torch.cuda.is_available() and not no_cuda else 'cpu')
model = model.to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=weight_decay)
lossFunc = torch.nn.MSELoss(reduction='mean')
for key in gdata.keys():
gdata[key] = gdata[key].to(device)
def train_wrapper(epoch):
model.train()
optimizer.zero_grad()
pred = model(gdata['x'], gdata['adj'], gdata['size_factors'])
dropout_pred = pred[gdata['train_mask']]
dropout_true = gdata['y'][gdata['train_mask']]
loss_train = lossFunc(dropout_pred, dropout_true)
loss_train.backward()
optimizer.step()
if not fastmode:
model.eval()
pred = model(gdata['x'], gdata['adj'], gdata['size_factors'])
dropout_pred = pred[gdata['val_mask']]
dropout_true = gdata['y'][gdata['val_mask']]
loss_val = lossFunc(dropout_pred, dropout_true)
if (epoch + 1) % 10 == 0 and verbose:
print('Epoch: {:04d}'.format(epoch + 1),
'loss_train: {:.4f}'.format(loss_train.data.item()),
'loss_val: {:.4f}'.format(loss_val.data.item()))
return loss_val.data.item()
t_total = time.time()
loss_values = []
bad_counter = 0
best = float('inf')
best_epoch = 0
for epoch in range(epochs):
loss_values.append(train_wrapper(epoch))
if loss_values[-1] < best:
torch.save(model.state_dict(), '{}.pkl'.format(epoch))
best = loss_values[-1]
best_epoch = epoch
bad_counter = 0
else:
bad_counter += 1
if bad_counter == patience:
break
files = glob.glob('*.pkl')
for file in files:
epoch_nb = int(file.split('.')[0])
if epoch_nb != best_epoch:
os.remove(file)
print('Total time elapsed: {:.4f}s'.format(time.time() - t_total))
# Restore best model
model.load_state_dict(torch.load('{}.pkl'.format(best_epoch)))
| 2,321
| 27.317073
| 89
|
py
|
dcstfn
|
dcstfn-master/experiment/run.py
|
import sys
sys.path.append('..')
import os
os.environ['KERAS_BACKEND'] = 'tensorflow'
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import argparse
from functools import partial
import json
from keras import optimizers
from pathlib import Path
from toolbox.data import load_train_set
from toolbox.model import get_model
from toolbox.experiment import Experiment
parser = argparse.ArgumentParser()
parser.add_argument('config', type=Path)
args = parser.parse_args()
param = json.load(args.config.open())
# Model
scale = param['scale']
build_model = partial(get_model(param['model']['name']),
**param['model']['params'])
if 'optimizer' in param:
optimizer = getattr(optimizers, param['optimizer']['name'].lower())
optimizer = optimizer(**param['optimizer']['params'])
else:
optimizer = 'adam'
lr_block_size = tuple(param['lr_block_size'])
# Data
load_train_set = partial(load_train_set,
lr_sub_size=param['lr_sub_size'],
lr_sub_stride=param['lr_sub_stride'])
# Training
expt = Experiment(scale=param['scale'], load_set=load_train_set,
build_model=build_model, optimizer=optimizer,
save_dir=param['save_dir'])
print('training process...')
expt.train(train_set=param['train_set'], val_set=param['val_set'],
epochs=param['epochs'], resume=True)
# Evaluation
print('evaluation process...')
for test_set in param['test_sets']:
expt.test(test_set=test_set, lr_block_size=lr_block_size)
| 1,517
| 28.192308
| 71
|
py
|
dcstfn
|
dcstfn-master/experiment/__init__.py
| 0
| 0
| 0
|
py
|
|
dcstfn
|
dcstfn-master/toolbox/experiment.py
|
from functools import partial
from pathlib import Path
import time
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from keras import backend as K
from keras.callbacks import CSVLogger, ModelCheckpoint
from keras.utils.vis_utils import plot_model
from keras.preprocessing.image import img_to_array
from osgeo import gdal_array
from toolbox.data import data_dir, load_image_pairs, load_test_set
from toolbox.metrics import psnr, r2
class Experiment(object):
def __init__(self, scale=16, load_set=None, build_model=None,
optimizer='adam', save_dir='.'):
self.scale = scale
self.load_set = partial(load_set, scale=scale)
self.build_model = partial(build_model)
self.optimizer = optimizer
self.save_dir = Path(save_dir)
self.save_dir.mkdir(parents=True, exist_ok=True)
self.config_file = self.save_dir / 'config.yaml'
self.model_file = self.save_dir / 'model.hdf5'
self.visual_file = self.save_dir / 'model.eps'
self.train_dir = self.save_dir / 'train'
self.train_dir.mkdir(exist_ok=True)
self.history_file = self.train_dir / 'history.csv'
self.weights_dir = self.train_dir / 'weights'
self.weights_dir.mkdir(exist_ok=True)
self.test_dir = self.save_dir / 'test'
self.test_dir.mkdir(exist_ok=True)
def weights_file(self, epoch=None):
if epoch is None:
return self.weights_dir / 'ep{epoch:04d}.hdf5'
else:
return self.weights_dir / 'ep{:04d}.hdf5'.format(epoch)
@property
def latest_epoch(self):
try:
return pd.read_csv(str(self.history_file))['epoch'].iloc[-1]
except (FileNotFoundError, pd.io.common.EmptyDataError):
pass
return -1
@staticmethod
def _ensure_dimension(array, dim):
while len(array.shape) < dim:
array = array[np.newaxis, ...]
return array
@staticmethod
def _ensure_channel(array, c):
return array[..., c:c + 1]
@staticmethod
def validate(array):
array = Experiment._ensure_dimension(array, 4)
array = Experiment._ensure_channel(array, 0)
return array
def compile(self, model):
"""Compile model with default settings."""
model.compile(optimizer=self.optimizer, loss='mse', metrics=[psnr, r2])
return model
def train(self, train_set, val_set, epochs=10, resume=True):
# Load and process data
x_train, y_train = self.load_set(train_set)
x_val, y_val = self.load_set(val_set)
assert len(x_train) == 3 and len(x_val) == 3
for i in range(3):
x_train[i], x_val[i] = [self.validate(x) for x in [x_train[i], x_val[i]]]
y_train, y_val = [self.validate(y) for y in [y_train, y_val]]
# Compile model
model = self.compile(self.build_model(*x_train))
model.summary()
self.config_file.write_text(model.to_yaml())
plot_model(model, to_file=str(self.visual_file), show_shapes=True)
# Inherit weights
if resume:
latest_epoch = self.latest_epoch
if latest_epoch > -1:
weights_file = self.weights_file(epoch=latest_epoch)
model.load_weights(str(weights_file))
initial_epoch = latest_epoch + 1
else:
initial_epoch = 0
# Set up callbacks
callbacks = []
callbacks += [ModelCheckpoint(str(self.model_file))]
callbacks += [ModelCheckpoint(str(self.weights_file()),
save_weights_only=True)]
callbacks += [CSVLogger(str(self.history_file), append=resume)]
# Train
model.fit(x_train, y_train, batch_size=320, epochs=epochs, callbacks=callbacks,
validation_data=(x_val, y_val), initial_epoch=initial_epoch)
# Plot metrics history
prefix = str(self.history_file).rsplit('.', maxsplit=1)[0]
df = pd.read_csv(str(self.history_file))
epoch = df['epoch']
for metric in ['Loss', 'PSNR', 'R2']:
train = df[metric.lower()]
val = df['val_' + metric.lower()]
plt.figure()
plt.plot(epoch, train, label='train')
plt.plot(epoch, val, label='val')
plt.legend(loc='best')
plt.xlabel('Epoch')
plt.ylabel(metric)
plt.savefig('.'.join([prefix, metric.lower(), 'eps']))
plt.close()
def test(self, test_set, lr_block_size=(20, 20), metrics=[psnr, r2]):
print('Test on', test_set)
output_dir = self.test_dir / test_set
output_dir.mkdir(exist_ok=True)
# Evaluate metrics on each image
rows = []
for image_path in (data_dir / test_set).glob('*'):
if image_path.is_dir():
rows += [self.test_on_image(image_path, output_dir, lr_block_size=lr_block_size, metrics=metrics)]
df = pd.DataFrame(rows)
# Compute average metrics
row = pd.Series()
row['name'] = 'average'
for col in df:
if col != 'name':
row[col] = df[col].mean()
df = df.append(row, ignore_index=True)
df.to_csv(str(self.test_dir / '{}/metrics.csv'.format(test_set)))
def test_on_image(self, image_dir, output_dir, lr_block_size=(20, 20), metrics=[psnr, r2]):
# Load images
print('loading image pairs from {}'.format(image_dir))
input_images, valid_image = load_image_pairs(image_dir, scale=self.scale)
assert len(input_images) == 3
name = input_images[-1].filename.name if hasattr(input_images[-1], 'filename') else ''
print('Predict on image {}'.format(name))
# Generate output image and measure run time
# x_inputs的shape为四数组(数目,长度,宽度,通道数)
x_inputs = [self.validate(img_to_array(im)) for im in input_images]
assert x_inputs[0].shape[1] % lr_block_size[0] == 0
assert x_inputs[0].shape[2] % lr_block_size[1] == 0
x_train, _ = load_test_set((input_images, valid_image),
lr_block_size=lr_block_size, scale=self.scale)
model = self.compile(self.build_model(*x_train))
if self.model_file.exists():
model.load_weights(str(self.model_file))
t_start = time.perf_counter()
y_preds = model.predict(x_train, batch_size=1) # 结果的shape为四维
# 预测结束后进行恢复
y_pred = np.empty(x_inputs[1].shape[-3:], dtype=np.float32)
row_step = lr_block_size[0] * self.scale
col_step = lr_block_size[1] * self.scale
rows = x_inputs[0].shape[2] // lr_block_size[1]
cols = x_inputs[0].shape[1] // lr_block_size[0]
count = 0
for j in range(rows):
for i in range(cols):
y_pred[i * row_step: (i + 1) * row_step, j * col_step: (j + 1) * col_step] = y_preds[count]
count += 1
assert count == rows * cols
t_end = time.perf_counter()
# Record metrics
row = pd.Series()
row['name'] = name
row['time'] = t_end - t_start
y_true = self.validate(img_to_array(valid_image))
y_pred = self.validate(y_pred)
for metric in metrics:
row[metric.__name__] = K.eval(metric(y_true, y_pred))
prototype = str(valid_image.filename) if hasattr(valid_image, 'filename') else None
gdal_array.SaveArray(y_pred[0].squeeze().astype(np.int16),
str(output_dir / name),
prototype=prototype)
return row
| 7,661
| 37.119403
| 114
|
py
|
dcstfn
|
dcstfn-master/toolbox/misc.py
|
import math
def factorize(n):
def prime(n):
return not [x for x in range(2, int(math.sqrt(n)) + 1) if n % x == 0]
primes = []
candidates = range(2, n + 1)
candidate = 2
while not primes and candidate in candidates:
if n % candidate == 0 and prime(candidate):
primes += [candidate] + factorize(n // candidate)
candidate += 1
return primes
if __name__ == '__main__':
print(factorize(26))
| 453
| 24.222222
| 77
|
py
|
dcstfn
|
dcstfn-master/toolbox/model.py
|
import keras.layers
from keras.layers import Input, Conv2D, Conv2DTranspose, MaxPooling2D, Dense
from keras.models import Model, Sequential
##################################################################
# Deep Convolutional SpatioTemporal Fusion Network (DCSTFN)
##################################################################
def dcstfn(coarse_input, fine_input, coarse_pred, d=[32, 64, 128]):
pool_size = 2
coarse_model = _htls_cnet(coarse_input, coarse_pred, d)
fine_model = _hslt_cnet(fine_input, d)
# 三个网络的融合
coarse_input_layer = Input(shape=coarse_input.shape[-3:])
coarse_input_model = coarse_model(coarse_input_layer)
fine_input_layer = Input(shape=fine_input.shape[-3:])
fine_input_model = fine_model(fine_input_layer)
subtracted_layer = keras.layers.subtract([fine_input_model, coarse_input_model])
coarse_pred_layer = Input(shape=coarse_pred.shape[-3:])
coarse_pred_model = coarse_model(coarse_pred_layer)
added_layer = keras.layers.add([subtracted_layer, coarse_pred_model])
merged_layer = Conv2DTranspose(d[1], 3, strides=pool_size,
padding='same',
kernel_initializer='he_normal',
activation='relu')(added_layer)
dense_layer = Dense(d[0], activation='relu')(merged_layer)
final_out = Dense(fine_input.shape[-1])(dense_layer)
model = Model([coarse_input_layer, fine_input_layer, coarse_pred_layer], final_out)
return model
def _hslt_cnet(fine_input, d, pool_size=2):
# 对于Landsat高分辨率影像建立网络
fine_model = Sequential()
fine_model.add(Conv2D(d[0], 3, padding='same',
kernel_initializer='he_normal',
activation='relu', input_shape=fine_input.shape[-3:]))
fine_model.add(Conv2D(d[1], 3, padding='same',
kernel_initializer='he_normal',
activation='relu'))
fine_model.add(MaxPooling2D(pool_size=pool_size, padding='same'))
fine_model.add(Conv2D(d[1], 3, padding='same',
kernel_initializer='he_normal',
activation='relu'))
fine_model.add(Conv2D(d[2], 3, padding='same',
kernel_initializer='he_normal',
activation='relu'))
return fine_model
def _htls_cnet(coarse_input, coarse_pred, d):
# 对于两张MODIS影像建立相同的网络
assert coarse_input.shape == coarse_pred.shape
coarse_model = Sequential()
coarse_model.add(Conv2D(d[0], 3, padding='same',
kernel_initializer='he_normal',
activation='relu', input_shape=coarse_input.shape[-3:]))
coarse_model.add(Conv2D(d[1], 3, padding='same',
kernel_initializer='he_normal',
activation='relu'))
for n in [2, 2, 2]:
coarse_model.add(Conv2DTranspose(d[1], 3, strides=n, padding='same',
kernel_initializer='he_normal'))
coarse_model.add(Conv2D(d[2], 3, padding='same',
kernel_initializer='he_normal',
activation='relu'))
return coarse_model
def get_model(name):
"""通过字符串形式的函数名称得到该函数对象,可以直接对该函数进行调用"""
return globals()[name]
| 3,336
| 43.493333
| 87
|
py
|
dcstfn
|
dcstfn-master/toolbox/data.py
|
from pathlib import Path
import numpy as np
from functools import partial
from keras.preprocessing.image import img_to_array
from osgeo import gdal_array
from PIL import Image
repo_dir = Path(__file__).parents[1]
data_dir = repo_dir / 'data'
input_suffix = 'input'
pred_suffix = 'pred'
valid_suffix = 'valid'
modis_prefix = 'MOD09A1'
landsat_prefix = 'LC08'
def gen_patches(image, size, stride=None):
"""将输入图像分割成给定大小的小块"""
if not isinstance(size, tuple):
size = (size, size)
if stride is None:
stride = size
elif not isinstance(stride, tuple):
stride = (stride, stride)
# 这里是列优先
for i in range(0, image.size[0] - size[0] + 1, stride[0]):
for j in range(0, image.size[1] - size[1] + 1, stride[1]):
yield image.crop([i, j, i + size[0], j + size[1]])
def load_image_pairs(directory, scale=16):
"""从指定目录中加载高低分辨率的图像对(包括两幅MODIS影像和两幅Landsat影像)"""
path_list = []
for path in Path(directory).glob('*.tif'):
path_list.append(path)
assert len(path_list) == 4
for path in path_list:
img_name = path.name
if pred_suffix in img_name:
modis_pred_path = path
elif valid_suffix in img_name:
landsat_valid_path = path
elif input_suffix in img_name:
if img_name.startswith(modis_prefix):
modis_input_path = path
elif img_name.startswith(landsat_prefix):
landsat_input_path = path
path_list = [modis_input_path, landsat_input_path, modis_pred_path, landsat_valid_path]
image_list = []
for path in path_list:
data = gdal_array.LoadFile(str(path)).astype(np.int32)
image = Image.fromarray(data)
setattr(image, 'filename', path)
image_list.append(image)
assert image_list[0].size == image_list[0].size
assert image_list[1].size == image_list[1].size
assert image_list[1].size[0] == image_list[0].size[0] * scale
assert image_list[1].size[1] == image_list[0].size[1] * scale
return image_list[:3], image_list[-1]
def sample_to_array(samples, lr_gen_sub, hr_gen_sub, patches):
# samples是当前批次的图片,patches是存储的容器
assert len(samples) == 4
for i in range(4):
if i % 2 == 0:
patches[i] += [img_to_array(img) for img in lr_gen_sub(samples[i])]
else:
patches[i] += [img_to_array(img) for img in hr_gen_sub(samples[i])]
def load_train_set(image_dir, lr_sub_size=10, lr_sub_stride=5, scale=16):
"""从给定的数据目录中加载高低分辨率的数据(根据高分辨率图像采样得到低分辨的图像)"""
hr_sub_size = lr_sub_size * scale
hr_sub_stride = lr_sub_stride * scale
lr_gen_sub = partial(gen_patches, size=lr_sub_size, stride=lr_sub_stride)
hr_gen_sub = partial(gen_patches, size=hr_sub_size, stride=hr_sub_stride)
patches = [[] for _ in range(4)]
for path in (data_dir / image_dir).glob('*'):
if path.is_dir():
print('loading image pairs from {}'.format(path))
samples = load_image_pairs(path, scale=scale)
samples = [*samples[0], samples[1]]
sample_to_array(samples, lr_gen_sub, hr_gen_sub, patches)
for i in range(4):
patches[i] = np.stack(patches[i])
# 返回结果为一个四维的数组(数目,长度,宽度,通道数)
return patches[:3], patches[-1]
def load_test_set(samples, lr_block_size=(20, 20), scale=16):
assert len(samples) == 2
hr_block_size = [m * scale for m in lr_block_size]
lr_gen_sub = partial(gen_patches, size=tuple(lr_block_size))
hr_gen_sub = partial(gen_patches, size=tuple(hr_block_size))
patches = [[] for _ in range(4)]
samples = [*samples[0], samples[1]]
sample_to_array(samples, lr_gen_sub, hr_gen_sub, patches)
for i in range(4):
patches[i] = np.stack(patches[i])
return patches[:3], patches[-1]
| 3,794
| 32
| 91
|
py
|
dcstfn
|
dcstfn-master/toolbox/metrics.py
|
from keras import backend as K
import tensorflow as tf
import numpy as np
def cov(x, y):
return K.mean((x - K.mean(x)) * K.transpose((y - K.mean(y))))
def psnr(y_true, y_pred, data_range=10000):
"""Peak signal-to-noise ratio averaged over samples and channels."""
mse = K.mean(K.square(y_true - y_pred), axis=(-3, -2))
return K.mean(20 * K.log(data_range / K.sqrt(mse)) / np.log(10))
def ssim(y_true, y_pred, data_range=10000):
"""structural similarity measurement system."""
K1 = 0.01
K2 = 0.03
mu_x = K.mean(y_pred)
mu_y = K.mean(y_true)
sig_x = K.std(y_pred)
sig_y = K.std(y_true)
sig_xy = cov(y_true, y_pred)
L = data_range
C1 = (K1 * L) ** 2
C2 = (K2 * L) ** 2
return ((2 * mu_x * mu_y + C1) * (2 * sig_xy * C2) /
(mu_x ** 2 + mu_y ** 2 + C1) * (sig_x ** 2 + sig_y ** 2 + C2))
def r2(y_true, y_pred):
# mean函数调用了tensor的属性,不能直接是一个ndarray
tf_true = y_true
if not isinstance(y_true, tf.Tensor):
tf_true = tf.convert_to_tensor(y_true)
res = K.sum(K.square(y_true - y_pred))
tot = K.sum(K.square(y_true - K.mean(tf_true)))
return 1 - res / (tot + K.epsilon())
| 1,180
| 25.840909
| 74
|
py
|
dcstfn
|
dcstfn-master/toolbox/__init__.py
| 1
| 0
| 0
|
py
|
|
dcstfn
|
dcstfn-master/utils/evaluate.py
|
import argparse
from pathlib import Path
import numpy as np
from osgeo import gdal_array
from math import sqrt
from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score
from skimage.measure import compare_psnr, compare_ssim
def evaluate(y_true, y_pred, func):
assert y_true.shape == y_pred.shape
if y_true.ndim == 2:
y_true = y_true[np.newaxis, :]
y_pred = y_pred[np.newaxis, :]
metrics = []
for i in range(y_true.shape[0]):
metrics.append(func(y_true[i], y_pred[i]))
return metrics
def mae(y_true, y_pred):
return evaluate(y_true, y_pred,
lambda x, y: mean_absolute_error(x.ravel(), y.ravel()))
def rmse(y_true, y_pred):
return evaluate(y_true, y_pred,
lambda x, y: sqrt(mean_squared_error(x.ravel(), y.ravel())))
def r2(y_true, y_pred):
return evaluate(y_true, y_pred,
lambda x, y: r2_score(x.ravel(), y.ravel()))
def kge(y_true, y_pred):
def compute(x, y):
im_true = x.ravel()
im_pred = y.ravel()
r = np.corrcoef(im_true, im_pred)[1, 0]
m_true = np.mean(im_true)
m_pred = np.mean(im_pred)
std_true = np.std(im_true)
std_pred = np.std(im_pred)
return 1 - np.sqrt((r - 1) ** 2
+ (std_pred / std_true - 1) ** 2
+ (m_pred / m_true - 1) ** 2)
return evaluate(y_true, y_pred, compute)
def psnr(y_true, y_pred, data_range=10000):
return evaluate(y_true, y_pred,
lambda x, y: compare_psnr(x, y, data_range=data_range))
def ssim(y_true, y_pred, data_range=10000):
return evaluate(y_true, y_pred,
lambda x, y: compare_ssim(x, y, data_range=data_range))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# 输入数据为真实数据和预测数据
parser.add_argument('inputs', nargs='+', type=Path)
args = parser.parse_args()
inputs = args.inputs
assert len(inputs) == 2
ix = gdal_array.LoadFile(str(inputs[0].expanduser().resolve()))
iy = gdal_array.LoadFile(str(inputs[1].expanduser().resolve()))
print('RMSE: ', *rmse(ix, iy))
print('R2: ', *r2(ix, iy))
print('KGE: ', *kge(ix, iy))
print('SSIM: ', *ssim(ix, iy))
| 2,272
| 28.519481
| 80
|
py
|
dcstfn
|
dcstfn-master/utils/draw_loss.py
|
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_context("paper", rc={'font.sans-serif': 'Helvetica',
'font.size': 12})
df_green = pd.read_csv('~/Resources/Experiments/dcfnex-12/dcstfn-green/train/history.csv')
df_red = pd.read_csv('~/Resources/Experiments/dcfnex-12/dcstfn-red/train/history.csv')
df_nir = pd.read_csv('~/Resources/Experiments/dcfnex-12/dcstfn-nir/train/history.csv')
df_green = df_green.head(50)
df_red = df_red.head(50)
df_nir = df_nir.head(50)
epoch = df_green['epoch']
metrics = ('loss', 'val_loss')
labels = ('Green', 'Red', 'NIR')
colors = ('green', 'red', 'orange')
linestyles = ('-', '--')
fig, ax = plt.subplots()
for metric, linestyle in zip(metrics, linestyles):
score = (df_green[metric], df_red[metric], df_nir[metric])
for i in range(3):
ax.plot(epoch + 1, score[i], label=labels[i], color=colors[i],
linestyle=linestyle)
ax.set_yscale('log')
ax.set_xlabel('Epoch', fontsize=12)
ax.set_ylabel('MSE', fontsize=12)
ax.tick_params(axis='both', which='major', labelsize=9)
ax.tick_params(axis='both', which='minor', labelsize=8)
ax.set_xticks(range(0, epoch.size + 1, 10))
ytick_labels = ax.yaxis.get_ticklabels(minor=True)
ytick_labels[16] = r'$2\times10^4$'
ytick_labels[17] = r'$3\times10^4$'
ytick_labels[18] = r'$4\times10^4$'
ytick_labels[24] = r'$2\times 10^5$'
ax.yaxis.set_ticklabels(ytick_labels, minor=True)
grid_color = (0.95, 0.95, 0.95)
ax.grid(True, color=grid_color)
for n in (20000, 30000, 40000, 200000):
ax.axhline(y=n, color=grid_color, linewidth=0.6)
for i in range(2):
ax.plot([], [], color='black', linestyle=linestyles[i])
ax.grid(True)
lines = ax.get_lines()
color_legend = ax.legend(handles=[lines[i] for i in range(3)], labels=labels,
loc=1, bbox_to_anchor=(0.967, 1), fontsize=10, frameon=False)
line_legend = ax.legend(handles=[lines[i] for i in range(-2, 0)], labels=('Training', 'Validation'),
loc=1, bbox_to_anchor=(0.778, 1), fontsize=10, frameon=False)
ax.add_artist(color_legend)
ax.add_artist(line_legend)
ax.set_title('Loss Curve', fontsize=14, fontweight='bold')
plt.savefig('loss.png', dpi=900)
plt.close()
| 2,234
| 35.048387
| 100
|
py
|
dcstfn
|
dcstfn-master/utils/draw_sr.py
|
import argparse
from pathlib import Path
import numpy as np
from scipy.stats import gaussian_kde
from sklearn.metrics import r2_score
import matplotlib.pyplot as plt
from osgeo import gdal_array
import seaborn as sns
sns.set_context("paper", rc={'font.sans-serif': 'Arial',
'font.size': 12})
parser = argparse.ArgumentParser()
parser.add_argument('--true', '-t', type=Path, required=True,
help='the true observation data path')
parser.add_argument('--predict', '-p', type=Path, required=True,
help='the prediction data path')
parser.add_argument('--band', '-b', type=int, required=True,
help='the indicator for spectral band (0 for green, 1 for red, 2 for nir)')
parser.add_argument('--title', '-n', type=str, required=True,
help='the title of the image')
parser.add_argument('--output', '-o', type=str, required=True,
help='the output image file')
args = parser.parse_args()
true_file = args.true.expanduser()
pred_file = args.predict.expanduser()
band_ix = args.band
title = args.title
output_name = args.output
ix = gdal_array.LoadFile(str(true_file))
iy = gdal_array.LoadFile(str(pred_file))
if ix.ndim == 3:
ix = ix[band_ix]
iy = iy[band_ix]
# 单波段数据
assert ix.ndim == 2 and iy.ndim == 2
x = ix[:500, :500].flatten()
y = iy[:500, :500].flatten()
r2 = r2_score(x, y)
xy = np.vstack([x, y])
z = gaussian_kde(xy)(xy)
idx = z.argsort()
x, y, z = x[idx], y[idx], z[idx]
fig = plt.figure()
ax = plt.gca()
ax.scatter(x, y, c=z, s=1, cmap=plt.cm.rainbow)
max_sr = 3000 if band_ix in (0, 1) else 6000
ax.set_xlim((0, max_sr))
ax.set_ylim((0, max_sr))
ax.plot([0, max_sr], [0, max_sr], linewidth=1, color='gray')
ax.set_title(title, fontsize=14, fontweight='bold')
band_names = ['Green band', 'Red band', 'NIR band']
ax.text(max_sr * 0.1, max_sr * 0.9, band_names[band_ix], fontsize=10)
ax.text(max_sr * 0.8, max_sr * 0.1, r'$R^2=$' + '{:.3f}'.format(r2), fontsize=10)
ax.set_xlabel("Observed reflectance", fontsize=12)
ax.set_ylabel("Predicted reflectance", fontsize=12)
fig.savefig(output_name, dpi=900)
plt.close()
| 2,157
| 32.2
| 95
|
py
|
dcstfn
|
dcstfn-master/utils/draw_fit.py
|
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_context("paper", rc={'font.sans-serif': 'Helvetica',
'font.size': 12})
df_green = pd.read_csv('~/Resources/Experiments/dcfnex-12/dcstfn-green/train/history.csv')
df_red = pd.read_csv('~/Resources/Experiments/dcfnex-12/dcstfn-red/train/history.csv')
df_nir = pd.read_csv('~/Resources/Experiments/dcfnex-12/dcstfn-nir/train/history.csv')
df_green = df_green.head(50)
df_red = df_red.head(50)
df_nir = df_nir.head(50)
epoch = df_green['epoch']
metrics = ('r2', 'val_r2')
labels = ('Green', 'Red', 'NIR')
colors = ('green', 'red', 'orange')
linestyles = ('-', '--')
fig, ax = plt.subplots()
for metric, linestyle in zip(metrics, linestyles):
score = (df_green[metric], df_red[metric], df_nir[metric])
for i in range(3):
ax.plot(epoch + 1, score[i], label=labels[i], color=colors[i],
linestyle=linestyle)
ax.set_xlabel('Epoch', fontsize=12)
ax.set_ylabel(r'$R^2$', fontsize=12)
ax.tick_params(axis='both', which='both', labelsize=9)
ax.set_xticks(range(0, epoch.size + 1, 10))
ax.set_ylim([0.5, 0.9])
ax.grid(True, color=(0.95, 0.95, 0.95))
for i in range(2):
ax.plot([], [], color='black', linestyle=linestyles[i])
ax.grid(True)
lines = ax.get_lines()
color_legend = ax.legend(handles=[lines[i] for i in range(3)], labels=labels,
loc=4, bbox_to_anchor=(0.967, 0.0), fontsize=10, frameon=False)
line_legend = ax.legend(handles=[lines[i] for i in range(-2, 0)], labels=('Training', 'Validation'),
loc=4, bbox_to_anchor=(0.778, 0.0), fontsize=10, frameon=False)
ax.add_artist(color_legend)
ax.add_artist(line_legend)
ax.set_title('Fitted Curve', fontsize=14, fontweight='bold')
plt.savefig('r2.png', dpi=900)
plt.close()
| 1,820
| 34.019231
| 100
|
py
|
dcstfn
|
dcstfn-master/utils/__init__.py
| 0
| 0
| 0
|
py
|
|
MRI-ROI-prediction
|
MRI-ROI-prediction-main/lrmain.py
|
import os
import numpy as np
import time
import glob
import random
import tensorflow as tf
tf.compat.v1.disable_eager_execution()
FLAGS = tf.compat.v1.flags.FLAGS
tf.compat.v1.flags.DEFINE_string('EXP','temp',"exp. name")
tf.compat.v1.flags.DEFINE_integer('mod', 0, "model") # 0=share, 1=chstack, 2=3D
class ConvNet(object):
def __init__(self):
self.lr = 0.0001
self.batch_size = 1
self.gstep = tf.Variable(0, dtype=tf.int32, trainable=False, name='global_step')
self.image_size=512
if FLAGS.mod==1:
from bmbn2D import inference
elif FLAGS.mod==2:
from bmbn import inference
else:
from share import inference
def parser(self,serialized_example):
"""Parses a single tf.Example into image and label tensors."""
features = tf.io.parse_single_example(serialized_example,
features={
'top': tf.io.FixedLenFeature([], tf.float32),
'bottom': tf.io.FixedLenFeature([], tf.float32),
'right': tf.io.FixedLenFeature([], tf.float32),
'left': tf.io.FixedLenFeature([], tf.float32),
'image': tf.io.FixedLenFeature([], tf.string),
}, name='features')
image = tf.io.decode_raw(features['image'], tf.float32)
image = tf.reshape(image, [self.image_size,self.image_size,-1])
label = tf.stack([features['top'],features['bottom'],features['right'],features['left']])
return image,label
def get_data(self):
with tf.name_scope('data'):
self.filenames = tf.compat.v1.placeholder(tf.string, shape=[None])
dataset = tf.data.TFRecordDataset(self.filenames)
dataset=dataset.map(self.parser,num_parallel_calls=4)
if FLAGS.mod!=1:
dataset=dataset.batch(1)
else:
dataset=dataset.padded_batch(self.batch_size,padded_shapes=([512,512,40],[4]))
dataset=dataset.shuffle(100)
self.iterator = tf.compat.v1.data.make_initializable_iterator(dataset)
self.img, self.label= self.iterator.get_next()
self.img=tf.image.per_image_standardization(self.img)
self.shift = tf.compat.v1.placeholder(tf.int32, name='shift')
self.img=tf.roll(self.img,self.shift,[0,1])
self.label+=tf.cast(self.shift[1],tf.float32)
def loss(self):
with tf.name_scope('loss'):
'''toploss=tf.where(self.label[0,0]>self.logits[1],
2*tf.keras.losses.MSE(self.label[:,0],self.logits[1]),
tf.keras.losses.MSE(self.label[:,0],self.logits[1]))
bottomloss=tf.where(self.label[0,1]<self.logits[0],
2*tf.keras.losses.MSE(self.label[:,1],self.logits[0]),
tf.keras.losses.MSE(self.label[:,1],self.logits[0]))
self.loss=toploss+bottomloss'''
self.loss=tf.keras.losses.MSE(self.label[:,3],(self.logits[0]))+tf.keras.losses.MSE(self.label[:,2],self.logits[1])
def optimize(self):
self.opt = tf.compat.v1.train.AdamOptimizer(self.lr).minimize(self.loss,
global_step=self.gstep)
def summary(self):
with tf.name_scope('summaries'):
tf.compat.v1.summary.scalar('loss', self.loss)
self.summary_op = tf.compat.v1.summary.merge_all()
def build(self):
self.get_data()
self.inference()
self.loss()
self.optimize()
self.summary()
def train_one_epoch(self, sess, saver, init, writer, epoch, step):
start_time = time.time()
train_filenames=sorted(glob.glob("/mnt/raid5/Loc/trainC/*.tfrecord"))
sess.run(init.initializer, feed_dict={self.filenames: train_filenames})
try:
while True:
shiftof=[-10,-5,0,5,10]
#feedalp=np.(self.img.shape)
dx,dy=(random.choice(shiftof),random.choice(shiftof))
_, l, summaries,tsnr,tscore,img = sess.run([self.opt, self.loss, self.summary_op,self.label,self.logits,self.img], feed_dict={self.drop_prob:0.2, self.shift:[dy,dx]})#self.alpha:feedalp
writer.add_summary(summaries, global_step=step)
if step % 100 == 0:
print('Loss at step {0}: {1}'.format(step, l))
step += 1
except tf.errors.OutOfRangeError:
pass
return step
def eval_once(self, sess, init, writer, step):
eval_filenames=sorted(glob.glob("./testC/*.tfrecord"))
sess.run(init.initializer, feed_dict={self.filenames:eval_filenames})
scores=[]
truepf=[]
IoUs=[]
hIoUs=[]
try:
while True:
score,btrue_pf= sess.run([self.logits,self.label], feed_dict={self.drop_prob:0.0, self.shift:[0,0]})
score=[max(0.0,score[0]),min(512.0,score[1])]
scores+=[score[0],score[1]]
truepf+=[btrue_pf[0][3],btrue_pf[0][2]]
IoUs+=[(min(score[1],btrue_pf[0][2])-max(score[0],btrue_pf[0][3]))/(max(score[1],btrue_pf[0][2])-min(score[0],btrue_pf[0][3]))]
except tf.errors.OutOfRangeError:
pass
print('score= ', scores, 'label= ', truepf)
pf_error=np.mean(abs(np.array(scores)-np.array(truepf)))
IoU=np.mean(np.array(IoUs))
evalsum = tf.compat.v1.Summary()
evalsum.value.add(tag='pf_error', simple_value=pf_error)
evalsum.value.add(tag='IoU', simple_value=IoU)
writer.add_summary(evalsum, global_step=step)
return pf_error
def train(self, n_epochs):
try:
os.mkdir('checkpoints/'+FLAGS.EXP)
except:
pass
writer = tf.compat.v1.summary.FileWriter('./graphs/'+FLAGS.EXP, tf.compat.v1.get_default_graph())
config = tf.compat.v1.ConfigProto(log_device_placement=False)
config.gpu_options.per_process_gpu_memory_fraction = 0.9
with tf.compat.v1.Session(config=config) as sess:
sess.run(tf.compat.v1.global_variables_initializer())
saver = tf.compat.v1.train.Saver()
ckpt = tf.train.get_checkpoint_state('checkpoints/'+FLAGS.EXP)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
step = self.gstep.eval()
best_error=200
for epoch in range(n_epochs):
step = self.train_one_epoch(sess, saver, self.iterator, writer, epoch, step)
if (epoch+1) % 10 == 1:
pf_error=self.eval_once(sess, self.iterator, writer, step)
if pf_error<=best_error:
best_error=min(pf_error,best_error)
saver.save(sess, 'checkpoints/'+FLAGS.EXP+'/ckpt', step)
saver.save(sess, 'checkpoints/'+FLAGS.EXP+'/ckpt', step)
print('DONE with best error ',best_error)
writer.close()
if __name__ == '__main__':
model = ConvNet()
model.build()
model.train(n_epochs=2000)
| 7,202
| 42.920732
| 201
|
py
|
MRI-ROI-prediction
|
MRI-ROI-prediction-main/main.py
|
import os
import numpy as np
import time
import glob
import random
import tensorflow as tf
tf.compat.v1.disable_eager_execution()
FLAGS = tf.compat.v1.flags.FLAGS
tf.compat.v1.flags.DEFINE_string('EXP','temp',"exp. name")
tf.compat.v1.flags.DEFINE_integer('mod', 0, "model") # 0=share, 1=chstack, 2=3D
class ConvNet(object):
def __init__(self):
self.lr = 0.0001
self.batch_size = 1
self.gstep = tf.Variable(0, dtype=tf.int32, trainable=False, name='global_step')
self.image_size=512
if FLAGS.mod==1:
from bmbn2D import inference
elif FLAGS.mod==2:
from bmbn import inference
else:
from share import inference
def parser(self,serialized_example):
"""Parses a single tf.Example into image and label tensors."""
features = tf.io.parse_single_example(serialized_example,
features={
'top': tf.io.FixedLenFeature([], tf.float32),
'bottom': tf.io.FixedLenFeature([], tf.float32),
'right': tf.io.FixedLenFeature([], tf.float32),
'left': tf.io.FixedLenFeature([], tf.float32),
'image': tf.io.FixedLenFeature([], tf.string),
}, name='features')
image = tf.io.decode_raw(features['image'], tf.float32)
image = tf.reshape(image, [self.image_size,self.image_size,-1])
label = tf.stack([features['top'],features['bottom'],features['right'],features['left']])
return image,label
def get_data(self):
with tf.name_scope('data'):
self.filenames = tf.compat.v1.placeholder(tf.string, shape=[None])
dataset = tf.data.TFRecordDataset(self.filenames)
dataset=dataset.map(self.parser,num_parallel_calls=4)
if FLAGS.mod!=1:
dataset=dataset.batch(1)
else:
dataset=dataset.padded_batch(self.batch_size,padded_shapes=([512,512,40],[4]))
dataset=dataset.shuffle(100)
self.iterator = tf.compat.v1.data.make_initializable_iterator(dataset)
self.img, self.label= self.iterator.get_next()
self.img=tf.image.per_image_standardization(self.img)
self.shift = tf.compat.v1.placeholder(tf.int32, name='shift')
self.img=tf.roll(self.img,self.shift,[0,1])
self.label+=tf.cast(self.shift[0],tf.float32)
def loss(self):
with tf.name_scope('loss'):
'''toploss=tf.where(self.label[0,0]>self.logits[1],
2*tf.keras.losses.MSE(self.label[:,0],self.logits[1]),
tf.keras.losses.MSE(self.label[:,0],self.logits[1]))
bottomloss=tf.where(self.label[0,1]<self.logits[0],
2*tf.keras.losses.MSE(self.label[:,1],self.logits[0]),
tf.keras.losses.MSE(self.label[:,1],self.logits[0]))
self.loss=toploss+bottomloss'''
self.loss=tf.keras.losses.MSE(self.label[:,1],(self.logits[0]))+tf.keras.losses.MSE(self.label[:,0],self.logits[1])
def optimize(self):
self.opt = tf.compat.v1.train.AdamOptimizer(self.lr).minimize(self.loss,
global_step=self.gstep)
def summary(self):
with tf.name_scope('summaries'):
tf.compat.v1.summary.scalar('loss', self.loss)
self.summary_op = tf.compat.v1.summary.merge_all()
def build(self):
self.get_data()
self.inference()
self.loss()
self.optimize()
self.summary()
def train_one_epoch(self, sess, saver, init, writer, epoch, step):
start_time = time.time()
train_filenames=sorted(glob.glob("/mnt/raid5/kllei/Loc/trainab/*.tfrecord"))
sess.run(init.initializer, feed_dict={self.filenames: train_filenames})
try:
while True:
shiftof=[-30,-20,-10,0,10,20,30]
shiftofx=[-6,-3,0,3,6]
#feedalp=np.(self.img.shape)
dx=random.choice(shiftofx)
dy=random.choice(shiftof)
_, l, summaries,tsnr,tscore,img = sess.run([self.opt, self.loss, self.summary_op,self.label,self.logits,self.img], feed_dict={self.drop_prob:0.2, self.shift:[dy,dx]})#self.alpha:feedalp
writer.add_summary(summaries, global_step=step)
if step % 100 == 0:
print('Loss at step {0}: {1}'.format(step, l))
step += 1
except tf.errors.OutOfRangeError:
pass
return step
def eval_once(self, sess, init, writer, step):
eval_filenames=sorted(glob.glob("./testab/*.tfrecord"))
sess.run(init.initializer, feed_dict={self.filenames:eval_filenames})
scores=[]
truepf=[]
IoUs=[]
hIoUs=[]
try:
while True:
score,btrue_pf= sess.run([self.logits,self.label], feed_dict={self.drop_prob:0.0, self.shift:[0,0]})
scores+=[score[0],score[1]]
truepf+=[btrue_pf[0][1],btrue_pf[0][0]]
IoUs+=[(min(score[1],btrue_pf[0][0])-max(score[0],btrue_pf[0][1]))/(max(score[1],btrue_pf[0][0])-min(score[0],btrue_pf[0][1]))]
except tf.errors.OutOfRangeError:
pass
print('score= ', scores, 'label= ', truepf)
pf_error=np.mean(abs(np.array(scores)-np.array(truepf)))
IoU=np.mean(np.array(IoUs))
evalsum = tf.compat.v1.Summary()
evalsum.value.add(tag='pf_error', simple_value=pf_error)
evalsum.value.add(tag='IoU', simple_value=IoU)
writer.add_summary(evalsum, global_step=step)
return pf_error
def train(self, n_epochs):
try:
os.mkdir('checkpoints/'+FLAGS.EXP)
except:
pass
writer = tf.compat.v1.summary.FileWriter('./graphs/'+FLAGS.EXP, tf.compat.v1.get_default_graph())
config = tf.compat.v1.ConfigProto(log_device_placement=False)
config.gpu_options.per_process_gpu_memory_fraction = 0.9
with tf.compat.v1.Session(config=config) as sess:
sess.run(tf.compat.v1.global_variables_initializer())
saver = tf.compat.v1.train.Saver()
ckpt = tf.train.get_checkpoint_state('checkpoints/'+FLAGS.EXP)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
step = self.gstep.eval()
best_error=200
for epoch in range(n_epochs):
step = self.train_one_epoch(sess, saver, self.iterator, writer, epoch, step)
if (epoch+1) % 10 == 1:
pf_error=self.eval_once(sess, self.iterator, writer, step)
if pf_error<=best_error:
best_error=min(pf_error,best_error)
saver.save(sess, 'checkpoints/'+FLAGS.EXP+'/ckpt', step)
saver.save(sess, 'checkpoints/'+FLAGS.EXP+'/ckpt', step)
print('DONE with best error ',best_error)
writer.close()
if __name__ == '__main__':
model = ConvNet()
model.build()
model.train(n_epochs=3000)
| 7,211
| 42.709091
| 201
|
py
|
MRI-ROI-prediction
|
MRI-ROI-prediction-main/bmbn2D.py
|
import tensorflow as tf
def inference(self):
conv0 = tf.keras.layers.Conv2D(filters=16,
kernel_size=[5,5],
padding='SAME',
name='conv0')(self.img)
pool0 = tf.keras.layers.MaxPool2D(pool_size=[2, 2], strides=2, name='pool0')(conv0)
n0=tf.keras.layers.BatchNormalization()(pool0)
a0=tf.keras.layers.ReLU()(n0)
conv1 = tf.keras.layers.Conv2D(filters=32,
kernel_size=[5, 5],
padding='SAME',
name='conv1')(a0)
pool1 = tf.keras.layers.MaxPool2D(pool_size=[2,2], strides=2, name='pool1')(conv1)
n1=tf.keras.layers.BatchNormalization()(pool1)
a1=tf.keras.layers.ReLU()(n1)
conv2 = tf.keras.layers.Conv2D(filters=32,
kernel_size=[5, 5],
strides=[2,2],
padding='SAME',
name='conv2')(a1)
n2=tf.keras.layers.BatchNormalization()(conv2)
a2=tf.keras.layers.ReLU()(n2)
conv3 = tf.keras.layers.Conv2D(filters=32,
kernel_size=[3,3],
strides=2,
padding='SAME',
name='conv3')(a2)
n3=tf.keras.layers.BatchNormalization()(conv3)
a3=tf.keras.layers.ReLU()(n3)
conv31 = tf.keras.layers.Conv2D(filters=32,
kernel_size=[3,3],
strides=1,
padding='SAME',
name='conv31')(a3)
n31=tf.keras.layers.BatchNormalization()(conv31)
a31=tf.keras.layers.ReLU()(n31)
conv30 = tf.keras.layers.Conv2D(filters=32,
kernel_size=[1,1],
strides=2,
padding='SAME',
name='conv30')(a2)
n3=tf.keras.layers.BatchNormalization()(a31+conv30)
a3=tf.keras.layers.ReLU()(n3)
conv4 = tf.keras.layers.Conv2D(filters=16,
kernel_size=[3, 3],
strides=2,
padding='SAME',
name='conv4')(a3)
n4=tf.keras.layers.BatchNormalization()(conv4)
a4=tf.keras.layers.ReLU()(n4)
self.drop_prob = tf.compat.v1.placeholder(tf.float32, name='keep_prob')
dropout = tf.keras.layers.Dropout(self.drop_prob,
name='dropout')(a4,training=True)
flat=tf.keras.layers.Flatten()(dropout)
self.logits=tf.squeeze(tf.keras.layers.Dense(2)(flat))
| 2,898
| 44.296875
| 91
|
py
|
MRI-ROI-prediction
|
MRI-ROI-prediction-main/bmbn.py
|
import tensorflow as tf
def inference(self):
conv0 = tf.keras.layers.Conv3D(filters=16,
kernel_size=[5,5,5],
padding='SAME',
name='conv0')(tf.expand_dims(self.img, axis=-1))
pool0 = tf.keras.layers.MaxPool3D(pool_size=[2, 2,1], strides=2, name='pool0')(conv0)
n0=tf.keras.layers.BatchNormalization()(pool0)
a0=tf.keras.layers.ReLU()(n0)
conv1 = tf.keras.layers.Conv3D(filters=32,
kernel_size=[5, 5,5],
padding='SAME',
name='conv1')(a0)
pool1 = tf.keras.layers.MaxPool3D(pool_size=[2,2, 1], strides=2, name='pool1')(conv1)
n1=tf.keras.layers.BatchNormalization()(pool1)
a1=tf.keras.layers.ReLU()(n1)
conv2 = tf.keras.layers.Conv3D(filters=32,
kernel_size=[5, 5,5],
strides=[2,2,1],
padding='SAME',
name='conv2')(a1)
n2=tf.keras.layers.BatchNormalization()(conv2)
a2=tf.keras.layers.ReLU()(n2)
conv3 = tf.keras.layers.Conv3D(filters=32,
kernel_size=[3,3,3],
strides=2,
padding='SAME',
name='conv3')(a2)
n3=tf.keras.layers.BatchNormalization()(conv3)
a3=tf.keras.layers.ReLU()(n3)
conv4 = tf.keras.layers.Conv3D(filters=16,
kernel_size=[3, 3,3],
strides=2,
padding='SAME',
name='conv4')(a3)
n4=tf.keras.layers.BatchNormalization()(conv4)
a4=tf.keras.layers.ReLU()(n4)
conv42 = tf.keras.layers.Conv3D(filters=16,
kernel_size=[3, 3,3],
strides=2,
padding='SAME',
name='conv42')(a3)
n42=tf.keras.layers.BatchNormalization()(conv42)
a42=tf.keras.layers.ReLU()(n42)
self.drop_prob = tf.compat.v1.placeholder(tf.float32, name='keep_prob')
dropout = tf.keras.layers.Dropout(self.drop_prob,
name='dropout')(a4,training=True)
dropout2 = tf.keras.layers.Dropout(self.drop_prob,
name='dropout2')(a42,training=True)
flat=tf.keras.layers.Flatten()(dropout)
mean=tf.math.reduce_mean(flat,keepdims=True)
flat2=tf.keras.layers.Flatten()(dropout2)
mean2=tf.math.reduce_mean(flat,keepdims=True)
self.logits=(tf.squeeze(tf.keras.layers.Dense(2)(mean))[0],tf.squeeze(tf.keras.layers.Dense(2)(mean2))[0])
| 2,932
| 44.828125
| 114
|
py
|
MRI-ROI-prediction
|
MRI-ROI-prediction-main/share.py
|
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
def inference(self):
encoder_input = keras.Input(shape=(512, 512, 1), name="one_slice")
x = layers.Conv2D(16, 5, activation="relu", strides=2)(encoder_input)
x = layers.LayerNormalization()(x)
x2 = layers.Conv2D(32, 5, activation="relu", strides=2)(x)
encoder_output = layers.LayerNormalization()(x2)
x3 = layers.Conv2D(32, 3, activation="relu", strides=2)(encoder_output)
encoder_output2 = layers.BatchNormalization()(x3)
encoder = keras.Model(encoder_input, encoder_output, name="encoder")
h = layers.Conv2D(32, 3, activation="relu", strides=2)(encoder_output)
h = layers.LayerNormalization()(h)
h = layers.Flatten()(h)
attnn_output = layers.Dense(1)(h)
attnder = keras.Model(encoder_input, attnn_output, name="attentionnet")
use_attn = (False,True)[1]
self.img = tf.transpose(self.img,[3,1,2,0])
stack=tf.vectorized_map(lambda x0:encoder(tf.expand_dims(x0, axis=0)), self.img)
if use_attn:
attention=tf.vectorized_map(lambda x0:attnder(tf.expand_dims(x0, axis=0)), self.img)
self.alpha=layers.Softmax()(tf.squeeze(attention,[1,2]))
first=tf.math.reduce_sum(stack*tf.reshape(self.alpha,(-1,1,1,1,1)),axis=0)
else:
first=tf.math.reduce_mean(stack,axis=0)
x = layers.Conv2D(32, 3, activation="relu", strides=2)(first)
x = layers.BatchNormalization()(x)
flat = layers.Flatten()(x)
self.drop_prob = tf.compat.v1.placeholder(tf.float32, name='keep_prob')
dropout = layers.Dropout(self.drop_prob, name='dropout')(flat,training=True)
self.logits = tf.squeeze(layers.Dense(2)(dropout))
| 1,724
| 42.125
| 92
|
py
|
MRI-ROI-prediction
|
MRI-ROI-prediction-main/demo.py
|
import os
import numpy as np
import time
import glob
import tensorflow as tf
tf.compat.v1.disable_eager_execution()
FLAGS = tf.compat.v1.flags.FLAGS
tf.compat.v1.flags.DEFINE_string('EXP','newattn2AsS/ckpt-80546',"exp and ckpt name")
tf.compat.v1.flags.DEFINE_integer('mod', 0, "model") # 0=share, 1=chstack, 2=3D
class ConvNet(object):
def __init__(self):
self.lr = 0.0001
self.batch_size = 1
self.gstep = tf.Variable(0, dtype=tf.int32, trainable=False, name='global_step')
self.image_size=512
if FLAGS.mod==1:
from bmbn2D import inference
elif FLAGS.mod==2:
from bmbn import inference
else:
from share import inference
def parser(self,serialized_example):
"""Parses a single tf.Example into image and label tensors."""
features = tf.io.parse_single_example(serialized_example,
features={
'top': tf.io.FixedLenFeature([], tf.float32),
'bottom': tf.io.FixedLenFeature([], tf.float32),
'right': tf.io.FixedLenFeature([], tf.float32),
'left': tf.io.FixedLenFeature([], tf.float32),
'image': tf.io.FixedLenFeature([], tf.string),
}, name='features')
image = tf.io.decode_raw(features['image'], tf.float32)
image = tf.reshape(image, [self.image_size,self.image_size,-1])
label = tf.stack([features['top'],features['bottom'],features['right'],features['left']])
return image,label
def get_data(self):
with tf.name_scope('data'):
self.filenames = tf.compat.v1.placeholder(tf.string, shape=[None])
dataset = tf.data.TFRecordDataset(self.filenames)
dataset=dataset.map(self.parser,num_parallel_calls=4)
if FLAGS.mod!=1:
dataset=dataset.batch(1)
else:
dataset=dataset.padded_batch(self.batch_size,padded_shapes=([512,512,40],[4]))
self.iterator = tf.compat.v1.data.make_initializable_iterator(dataset)
self.img, self.label= self.iterator.get_next()
self.img=tf.image.per_image_standardization(self.img)
def build(self):
self.get_data()
self.inference()
def eval_once(self, sess, init):
eval_filenames=sorted(glob.glob("./test/*.tfrecord"))
start_time = time.time()
sess.run(init.initializer, feed_dict={self.filenames:eval_filenames})
scores=[]
truepf=[]
IoUs=[]
alps=[]
try:
while True:
score,btrue_pf= sess.run([self.logits,self.label], feed_dict={self.drop_prob:0.0})
score=[max(0,score[0]),min(512.0, score[1])]
scores+=[score[0],score[1]]
#truepf+=[btrue_pf[0][1],btrue_pf[0][0]]
truepf+=[btrue_pf[0][3],btrue_pf[0][2]]
#IoUs+=[(min(score[1],btrue_pf[0][0])-max(score[0],btrue_pf[0][1]))/(max(score[1],btrue_pf[0][0])-min(score[0],btrue_pf[0][1]))]
IoUs+=[(min(score[1],btrue_pf[0][2])-max(score[0],btrue_pf[0][3]))/(max(score[1],btrue_pf[0][2])-min(score[0],btrue_pf[0][3]))]
except tf.errors.OutOfRangeError:
pass
end_time = time.time()
print('TIME ', end_time-start_time)
print(eval_filenames)
print('score= ', scores, 'label= ', truepf)
pf_error=np.mean(abs(np.array(scores)-np.array(truepf)))
IoU=np.mean(np.array(IoUs))
print('IoU= ', IoUs)
return pf_error,IoU
def train(self):
config = tf.compat.v1.ConfigProto(log_device_placement=False)
config.gpu_options.per_process_gpu_memory_fraction = 0.95
with tf.compat.v1.Session(config=config) as sess:
sess.run(tf.compat.v1.global_variables_initializer())
saver = tf.compat.v1.train.Saver()
saver.restore(sess, 'checkpoints/'+FLAGS.EXP)
pf_error,iou=self.eval_once(sess, self.iterator)
print('DONE with error ', pf_error, iou)
if __name__ == '__main__':
model = ConvNet()
model.build()
model.train()
| 4,214
| 39.142857
| 144
|
py
|
self-adaptive
|
self-adaptive-master/eval.py
|
import glob
from datetime import datetime
from tqdm import tqdm
from torch.utils.data import DataLoader
from utils.parser import val_parser
from loss.semantic_seg import CrossEntropyLoss
import models.backbone
import models
from utils.modeling import freeze_layers
from utils.self_adapt_norm import reinit_alpha
from utils.metrics import *
from utils.calibration import *
from datasets.labels import *
from datasets.self_adapt_augment import TrainTestAugDataset
torch.backends.cudnn.benchmark = True
# We set a maximum image size which can be fit on the GPU, in case the image is larger, we first downsample it
# to then upsample the prediction back to the original resolution. This is especially required for high resolution
# Mapillary images
img_max_size = [1024, 2048]
def main(opts):
# Setup metric
time_stamp = datetime.now().strftime('%Y_%m_%d_%H_%M_%S')
iou_meter = runningScore(opts.num_classes)
print(f"Current inference run {time_stamp} has started!")
# Set device
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Setup dataset and transforms
test_dataset = TrainTestAugDataset(device=device,
root=opts.dataset_root,
only_inf=opts.only_inf,
source=opts.source,
crop_size=img_max_size,
split=opts.dataset_split,
threshold=opts.threshold,
tta=opts.tta,
flips=opts.flips,
scales=opts.scales,
grayscale=opts.grayscale)
test_loader = DataLoader(test_dataset,
batch_size=opts.batch_size,
shuffle=False,
num_workers=opts.num_workers)
# Load and setup model
model = models.__dict__[opts.arch_type](backbone_name=opts.backbone_name,
num_classes=opts.num_classes,
update_source_bn=False,
dropout=opts.dropout)
model = torch.nn.DataParallel(model)
# Pick newest checkpoints
if os.path.exists(opts.checkpoints_root):
checkpoint = max(glob.glob(os.path.join(opts.checkpoints_root, opts.checkpoint)), key=os.path.getctime)
model.load_state_dict(torch.load(checkpoint, map_location=device), strict=True)
# Reinitialize alpha if a custom alpha other than the one in the checkpoints is given
if opts.alpha is not None:
reinit_alpha(model, alpha=opts.alpha, device=device)
else:
raise ValueError(f"Checkpoints directory {opts.checkpoints_root} does not exist")
model = model.to(device)
# Set up Self-adaptive learning optimizer and loss
optimizer = torch.optim.SGD(
model.parameters(),
lr=opts.base_lr,
momentum=opts.momentum,
weight_decay=opts.weight_decay
)
criterion = CrossEntropyLoss().to(device)
if opts.calibration:
# Calibration meter
cal_meter = CalibrationMeter(
device,
n_bins=10,
num_classes=opts.num_classes,
num_images=len(test_loader)
)
model.eval()
# Create GradScaler for mixed precision
if opts.mixed_precision:
scaler = torch.cuda.amp.GradScaler()
for test_idx, (img_test, gt_test, crop_test, crop_transforms) in enumerate(tqdm(test_loader)):
# Put img on GPU if available
img_test = img_test.to(device)
if opts.only_inf:
# Forward pass with original image
with torch.no_grad():
if opts.mixed_precision:
with torch.cuda.amp.autocast():
out_test = model(img=img_test)['pred']
else:
out_test = model(img=img_test)['pred']
else:
# Reload checkpoints
model.load_state_dict(torch.load(checkpoint, map_location=device), strict=True)
# Reinitialize alpha if a custom alpha other than the one in the checkpoints is given
if opts.alpha is not None:
reinit_alpha(model, alpha=opts.alpha, device=device)
model = model.to(device)
# Compute augmented predictions
crop_test_fused = []
for crop_test_sub in crop_test:
with torch.no_grad():
if opts.mixed_precision:
with torch.cuda.amp.autocast():
out_test = model(img=crop_test_sub)['pred']
else:
out_test = model(img=crop_test_sub)['pred']
crop_test_fused.append(torch.nn.functional.softmax(out_test, dim=1))
# Create pseudo gt from augmentations based on their softmax probabilities
pseudo_gt = test_dataset.create_pseudo_gt(
crop_test_fused, crop_transforms, [1, opts.num_classes, *img_test.shape[-2:]]
)
pseudo_gt = pseudo_gt.to(device)
if opts.tta:
# Use pseudo gt for evaluation
out_test = pseudo_gt
else:
model.train()
# Freeze layers if given
freeze_layers(opts, model)
# Self-adaptive learning loop
model = model.to(device)
for epoch in range(opts.num_epochs):
if opts.mixed_precision:
with torch.cuda.amp.autocast():
out_test = model(img=img_test)['pred']
else:
out_test = model(img=img_test)['pred']
if opts.mixed_precision:
with torch.cuda.amp.autocast():
loss_train = criterion(out_test, pseudo_gt)
else:
loss_train = criterion(out_test, pseudo_gt)
optimizer.zero_grad()
if opts.mixed_precision:
scaler.scale(loss_train).backward()
scaler.step(optimizer)
scaler.update()
else:
loss_train.backward()
optimizer.step()
# Do actual forward pass with updated model
model.eval()
with torch.no_grad():
if opts.mixed_precision:
with torch.cuda.amp.autocast():
out_test = model(img=img_test)['pred']
else:
out_test = model(img=img_test)['pred']
# Upsample prediction to gt resolution
out_test = torch.nn.functional.interpolate(out_test, size=gt_test.shape[-2:], mode='bilinear')
# Update calibration meter
if opts.calibration:
cal_meter.calculate_bins(out_test, gt_test.to(device))
# Add prediction
iou_meter.update(gt_test.cpu().numpy(), torch.argmax(out_test, dim=1).cpu().numpy())
# Save output
score, _, _, _ = iou_meter.get_scores()
mean_iou = score['Mean IoU :']
# Compute ECE
if opts.calibration:
cal_meter.calculate_mean_over_dataset()
print(f"ECE: {cal_meter.overall_ece}")
print(f"Mean IoU: {mean_iou}")
print(f"Current inference run {time_stamp} is finished!")
if __name__ == '__main__':
args = val_parser()
print(args)
main(args)
| 7,737
| 38.886598
| 114
|
py
|
self-adaptive
|
self-adaptive-master/train.py
|
import pathlib, os
from torch.utils.data import DataLoader
from torch.nn import SyncBatchNorm
from datetime import datetime
from tqdm import tqdm
from shutil import copyfile
from utils.parser import train_parser
import models.backbone
from loss.semantic_seg import CrossEntropyLoss
import datasets
from optimizer.schedulers import *
from utils.metrics import *
from utils.distributed import init_process, clean_up
from utils import transforms
from utils.self_adapt_norm import reinit_alpha
import torch.distributed
import torch.multiprocessing as mp
from torch.utils.data.distributed import DistributedSampler
# We set a maximum image size which can be fit on the GPU, in case the image is larger, we first downsample it
# to then upsample the prediction back to the original resolution. This is especially required for high resolution
# Mapillary images
img_max_size = (1024, 2048)
def main(opts):
# Force disable distributed
opts.distributed = False if not torch.cuda.is_available() else opts.distributed
# Distributed training with multiple gpus
if opts.distributed:
opts.batch_size = opts.batch_size // opts.gpus
mp.spawn(train,
nprocs=opts.gpus,
args=(opts,))
# DataParallel with GPUs or CPU
else:
train(gpu=0, opts=opts)
def train(gpu: int,
opts):
# Create checkpoints directory
pathlib.Path(opts.checkpoints_root).mkdir(parents=True, exist_ok=True)
# Setup dataset
# Get target domain from dataset path
target_train = os.path.basename(opts.dataset_root)
target_val = os.path.basename(opts.val_dataset_root)
train_transforms = transforms.Compose([transforms.RandomResizedCrop(opts.crop_size),
transforms.RandomHFlip(),
transforms.RandGaussianBlur(),
transforms.ColorJitter(),
transforms.MaskGrayscale(),
transforms.ToTensor(),
transforms.IdsToTrainIds(source=target_train, target=target_train),
transforms.Normalize()])
val_transforms = transforms.Compose([transforms.ToTensor(),
transforms.IdsToTrainIds(source=target_train, target=target_val),
transforms.ImgResize(img_max_size),
transforms.Normalize()])
train_dataset = datasets.__dict__[target_train](root=opts.dataset_root,
split="train",
transforms=train_transforms)
val_dataset = datasets.__dict__[target_val](root=opts.val_dataset_root,
split="val",
transforms=val_transforms)
# Setup model
model = models.__dict__[opts.arch_type](backbone_name=opts.backbone_name,
num_classes=opts.num_classes,
alpha=opts.alpha,
dropout=opts.dropout,
update_source_bn=True)
if opts.distributed:
# Initialize process group
rank = init_process(opts, gpu)
# Convert batch normalization to SyncBatchNorm and setup CUDA
model = SyncBatchNorm.convert_sync_batchnorm(model)
torch.cuda.set_device(gpu)
model.cuda(gpu)
# Wrap model in DistributedDataParallel
model = torch.nn.parallel.DistributedDataParallel(module=model, device_ids=[gpu], find_unused_parameters=True)
# Setup data sampler and loader
train_sampler = DistributedSampler(dataset=train_dataset, num_replicas=opts.world_size, rank=rank, shuffle=True)
val_sampler = DistributedSampler(dataset=val_dataset, num_replicas=opts.world_size, rank=rank, shuffle=False)
else:
# Run on GPU if available else on CPU
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = torch.nn.DataParallel(model).to(device)
train_sampler = None
val_sampler = None
# Set main process and device
main_process = not opts.distributed or (opts.distributed and rank == 0)
device = gpu if opts.distributed else device
# Add tensorboard writer and setup metric
time_stamp = datetime.now().strftime('%Y_%m_%d_%H_%M_%S')
if main_process:
print(f"Current training run {time_stamp} has started!")
iou_meter = runningScore(opts.num_classes)
alphas = np.round(np.linspace(0, 1, opts.num_alphas), 5) if opts.num_alphas > 1 else [opts.alpha]
# Setup dataloader
train_loader = DataLoader(train_dataset,
batch_size=opts.batch_size,
num_workers=opts.num_workers,
sampler=train_sampler,
shuffle=(train_sampler is None),
pin_memory=True if torch.cuda.is_available() else False)
val_loader = DataLoader(val_dataset,
batch_size=1,
num_workers=opts.num_workers,
sampler=val_sampler,
shuffle=False,
pin_memory=True if torch.cuda.is_available() else False)
# Setup loss
criterion = CrossEntropyLoss().to(device)
# Setup lr scheduler, optimizer and loss
optimizer = torch.optim.SGD(model.parameters(),
lr=opts.base_lr,
momentum=opts.momentum,
weight_decay=opts.weight_decay)
scheduler = get_scheduler(scheduler_type=opts.lr_scheduler,
optimizer=optimizer,
max_iter=len(train_loader) * opts.num_epochs + 1)
# Training
mean_iou_best_alphas = [0] * opts.num_alphas
model.train()
for epoch in tqdm(range(opts.num_epochs)):
if opts.distributed:
train_sampler.set_epoch(epoch)
for train_idx, (img_train, gt_train) in enumerate(train_loader):
# Put img and gt on GPU if available
img_train, gt_train = img_train.to(device), gt_train.to(device)
# Forward pass, backward pass and optimization
out_train = model(img=img_train)
loss_train = criterion(out_train['pred'], gt_train)
# Zero the parameter gradients
optimizer.zero_grad()
loss_train.backward()
optimizer.step()
scheduler.step()
# Validation
if epoch >= opts.validation_start and epoch % opts.validation_step == 0:
if main_process:
# Set model to eval
model.eval()
with torch.no_grad():
score_alphas, class_iou_epoch_alphas = [], []
for alpha_idx, alpha in enumerate(alphas):
reinit_alpha(model, alpha, device)
for val_idx, (img_val, gt_val) in enumerate(val_loader):
# Put img and gt on GPU if available
img_val, gt_val = img_val.to(device), gt_val.to(device)
# Forward pass and loss calculation
out_val = model(img=img_val)['pred']
# Upsample prediction to gt resolution
out_val = torch.nn.functional.interpolate(out_val,
size=gt_val.shape[-2:],
mode='bilinear')
# Update iou meter
iou_meter.update(gt_val.cpu().numpy(), torch.argmax(out_val, dim=1).cpu().numpy())
score, class_iou_epoch, _, _ = iou_meter.get_scores()
mean_iou_epoch = score['Mean IoU :']
score_alphas.append(mean_iou_epoch)
iou_meter.reset()
# Save model if mean iou higher than before
if mean_iou_epoch > mean_iou_best_alphas[alpha_idx]:
checkpoints_path = os.path.join(opts.checkpoints_root,
time_stamp + f'_alpha_{alpha}.pth')
if os.path.isfile(checkpoints_path):
os.remove(checkpoints_path)
torch.save(model.state_dict(), checkpoints_path)
mean_iou_best_alphas[alpha_idx] = mean_iou_epoch
# Switch model to train
model.train()
# Final result
if main_process and epoch == opts.num_epochs - 1:
print(f"alphas: {[i for i in alphas]}:")
print(f"IoUs: {mean_iou_best_alphas}")
checkpoints_path = os.path.join(opts.checkpoints_root, time_stamp + '.pth')
if os.path.isfile(checkpoints_path):
os.remove(checkpoints_path)
alpha_ind_max = torch.argmax(torch.tensor(mean_iou_best_alphas)).item()
alpha = alphas[alpha_ind_max]
checkpoints_alpha_path = os.path.join(opts.checkpoints_root,
time_stamp + f'_alpha_{alpha}.pth')
copyfile(checkpoints_alpha_path, checkpoints_path)
print(f"Saved checkpoint based on alpha = {alpha}")
print(f"Current training run {time_stamp} is finished!")
if opts.distributed:
clean_up()
if __name__ == '__main__':
args = train_parser()
print(args)
main(args)
| 9,999
| 44.248869
| 120
|
py
|
self-adaptive
|
self-adaptive-master/models/hrnet.py
|
"""Source: https://github.com/HRNet/HRNet-Semantic-Segmentation"""
# ------------------------------------------------------------------------------
# Copyright (c) Microsoft
# Licensed under the MIT License.
# Written by RainbowSecret (yhyuan@pku.edu.cn)
# ------------------------------------------------------------------------------
import torch
import numpy as np
import logging
from typing import Dict
import torch.nn as nn
import torch.nn.functional as F
from torch.hub import load_state_dict_from_url
from utils.dropout import add_dropout
from utils.self_adapt_norm import replace_batchnorm
logger = logging.getLogger('hrnet_backbone')
ALIGN_CORNERS = None
__all__ = ['hrnet18', 'hrnet32', 'hrnet48']
model_urls = {
'hrnet18': 'https://opr0mq.dm.files.1drv.com/y4mIoWpP2n-LUohHHANpC0jrOixm1FZgO2OsUtP2DwIozH5RsoYVyv_De5wDgR6XuQmirMV3C0AljLeB-zQXevfLlnQpcNeJlT9Q8LwNYDwh3TsECkMTWXCUn3vDGJWpCxQcQWKONr5VQWO1hLEKPeJbbSZ6tgbWwJHgHF7592HY7ilmGe39o5BhHz7P9QqMYLBts6V7QGoaKrr0PL3wvvR4w',
'hrnet32': 'https://opr74a.dm.files.1drv.com/y4mKOuRSNGQQlp6wm_a9bF-UEQwp6a10xFCLhm4bqjDu6aSNW9yhDRM7qyx0vK0WTh42gEaniUVm3h7pg0H-W0yJff5qQtoAX7Zze4vOsqjoIthp-FW3nlfMD0-gcJi8IiVrMWqVOw2N3MbCud6uQQrTaEAvAdNjtjMpym1JghN-F060rSQKmgtq5R-wJe185IyW4-_c5_ItbhYpCyLxdqdEQ',
'hrnet48': 'https://optgaw.dm.files.1drv.com/y4mWNpya38VArcDInoPaL7GfPMgcop92G6YRkabO1QTSWkCbo7djk8BFZ6LK_KHHIYE8wqeSAChU58NVFOZEvqFaoz392OgcyBrq_f8XGkusQep_oQsuQ7DPQCUrdLwyze_NlsyDGWot0L9agkQ-M_SfNr10ETlCF5R7BdKDZdupmcMXZc-IE3Ysw1bVHdOH4l-XEbEKFAi6ivPUbeqlYkRMQ'
}
# model_urls = {
# 'resnet18_ibn_a': 'https://github.com/XingangPan/IBN-Net/releases/download/v1.0/resnet18_ibn_a-2f571257.pth',
# 'resnet34_ibn_a': 'https://github.com/XingangPan/IBN-Net/releases/download/v1.0/resnet34_ibn_a-94bc1577.pth',
# 'resnet50_ibn_a': 'https://github.com/XingangPan/IBN-Net/releases/download/v1.0/resnet50_ibn_a-d9d0bb7b.pth',
# 'resnet101_ibn_a': 'https://github.com/XingangPan/IBN-Net/releases/download/v1.0/resnet101_ibn_a-59ea0ac6.pth',
# 'resnet18_ibn_b': 'https://github.com/XingangPan/IBN-Net/releases/download/v1.0/resnet18_ibn_b-bc2f3c11.pth',
# 'resnet34_ibn_b': 'https://github.com/XingangPan/IBN-Net/releases/download/v1.0/resnet34_ibn_b-04134c37.pth',
# 'resnet50_ibn_b': 'https://github.com/XingangPan/IBN-Net/releases/download/v1.0/resnet50_ibn_b-9ca61e85.pth',
# 'resnet101_ibn_b': 'https://github.com/XingangPan/IBN-Net/releases/download/v1.0/resnet101_ibn_b-c55f6dba.pth',
# }
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class HighResolutionModule(nn.Module):
def __init__(self, num_branches, blocks, num_blocks, num_inchannels,
num_channels, fuse_method, multi_scale_output=True, norm_layer=None):
super(HighResolutionModule, self).__init__()
self._check_branches(
num_branches, blocks, num_blocks, num_inchannels, num_channels)
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self.norm_layer = norm_layer
self.num_inchannels = num_inchannels
self.fuse_method = fuse_method
self.num_branches = num_branches
self.multi_scale_output = multi_scale_output
self.branches = self._make_branches(
num_branches, blocks, num_blocks, num_channels)
self.fuse_layers = self._make_fuse_layers()
self.relu = nn.ReLU(inplace=True)
def _check_branches(self, num_branches, blocks, num_blocks,
num_inchannels, num_channels):
if num_branches != len(num_blocks):
error_msg = 'NUM_BRANCHES({}) <> NUM_BLOCKS({})'.format(
num_branches, len(num_blocks))
logger.error(error_msg)
raise ValueError(error_msg)
if num_branches != len(num_channels):
error_msg = 'NUM_BRANCHES({}) <> NUM_CHANNELS({})'.format(
num_branches, len(num_channels))
logger.error(error_msg)
raise ValueError(error_msg)
if num_branches != len(num_inchannels):
error_msg = 'NUM_BRANCHES({}) <> NUM_INCHANNELS({})'.format(
num_branches, len(num_inchannels))
logger.error(error_msg)
raise ValueError(error_msg)
def _make_one_branch(self, branch_index, block, num_blocks, num_channels,
stride=1):
downsample = None
if stride != 1 or \
self.num_inchannels[branch_index] != num_channels[branch_index] * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.num_inchannels[branch_index],
num_channels[branch_index] * block.expansion,
kernel_size=1, stride=stride, bias=False),
self.norm_layer(num_channels[branch_index] * block.expansion),
)
layers = []
layers.append(block(self.num_inchannels[branch_index],
num_channels[branch_index], stride, downsample, norm_layer=self.norm_layer))
self.num_inchannels[branch_index] = \
num_channels[branch_index] * block.expansion
for i in range(1, num_blocks[branch_index]):
layers.append(block(self.num_inchannels[branch_index],
num_channels[branch_index], norm_layer=self.norm_layer))
return nn.Sequential(*layers)
def _make_branches(self, num_branches, block, num_blocks, num_channels):
branches = []
for i in range(num_branches):
branches.append(
self._make_one_branch(i, block, num_blocks, num_channels))
return nn.ModuleList(branches)
def _make_fuse_layers(self):
if self.num_branches == 1:
return None
num_branches = self.num_branches
num_inchannels = self.num_inchannels
fuse_layers = []
for i in range(num_branches if self.multi_scale_output else 1):
fuse_layer = []
for j in range(num_branches):
if j > i:
fuse_layer.append(nn.Sequential(
nn.Conv2d(num_inchannels[j],
num_inchannels[i],
1,
1,
0,
bias=False),
self.norm_layer(num_inchannels[i])))
elif j == i:
fuse_layer.append(None)
else:
conv3x3s = []
for k in range(i - j):
if k == i - j - 1:
num_outchannels_conv3x3 = num_inchannels[i]
conv3x3s.append(nn.Sequential(
nn.Conv2d(num_inchannels[j],
num_outchannels_conv3x3,
3, 2, 1, bias=False),
self.norm_layer(num_outchannels_conv3x3)))
else:
num_outchannels_conv3x3 = num_inchannels[j]
conv3x3s.append(nn.Sequential(
nn.Conv2d(num_inchannels[j],
num_outchannels_conv3x3,
3, 2, 1, bias=False),
self.norm_layer(num_outchannels_conv3x3),
nn.ReLU(inplace=True)))
fuse_layer.append(nn.Sequential(*conv3x3s))
fuse_layers.append(nn.ModuleList(fuse_layer))
return nn.ModuleList(fuse_layers)
def get_num_inchannels(self):
return self.num_inchannels
def forward(self, x):
if self.num_branches == 1:
return [self.branches[0](x[0])]
for i in range(self.num_branches):
x[i] = self.branches[i](x[i])
x_fuse = []
for i in range(len(self.fuse_layers)):
y = x[0] if i == 0 else self.fuse_layers[i][0](x[0])
for j in range(1, self.num_branches):
if i == j:
y = y + x[j]
elif j > i:
width_output = x[i].shape[-1]
height_output = x[i].shape[-2]
y = y + F.interpolate(
self.fuse_layers[i][j](x[j]),
size=[height_output, width_output],
mode='bilinear',
align_corners=True
)
else:
y = y + self.fuse_layers[i][j](x[j])
x_fuse.append(self.relu(y))
return x_fuse
blocks_dict = {
'BASIC': BasicBlock,
'BOTTLENECK': Bottleneck
}
class HighResolutionNet(nn.Module):
def __init__(self,
cfg,
norm_layer=None,
num_classes: int = 19):
super(HighResolutionNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self.norm_layer = norm_layer
# stem network
# stem net
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=2, padding=1,
bias=False)
self.bn1 = self.norm_layer(64)
self.conv2 = nn.Conv2d(64, 64, kernel_size=3, stride=2, padding=1,
bias=False)
self.bn2 = self.norm_layer(64)
self.relu = nn.ReLU(inplace=True)
# stage 1
self.stage1_cfg = cfg['STAGE1']
num_channels = self.stage1_cfg['NUM_CHANNELS'][0]
block = blocks_dict[self.stage1_cfg['BLOCK']]
num_blocks = self.stage1_cfg['NUM_BLOCKS'][0]
self.layer1 = self._make_layer(block, 64, num_channels, num_blocks)
stage1_out_channel = block.expansion * num_channels
# stage 2
self.stage2_cfg = cfg['STAGE2']
num_channels = self.stage2_cfg['NUM_CHANNELS']
block = blocks_dict[self.stage2_cfg['BLOCK']]
num_channels = [
num_channels[i] * block.expansion for i in range(len(num_channels))]
self.transition1 = self._make_transition_layer(
[stage1_out_channel], num_channels)
self.stage2, pre_stage_channels = self._make_stage(
self.stage2_cfg, num_channels)
# stage 3
self.stage3_cfg = cfg['STAGE3']
num_channels = self.stage3_cfg['NUM_CHANNELS']
block = blocks_dict[self.stage3_cfg['BLOCK']]
num_channels = [
num_channels[i] * block.expansion for i in range(len(num_channels))]
self.transition2 = self._make_transition_layer(
pre_stage_channels, num_channels)
self.stage3, pre_stage_channels = self._make_stage(
self.stage3_cfg, num_channels)
# stage 4
self.stage4_cfg = cfg['STAGE4']
num_channels = self.stage4_cfg['NUM_CHANNELS']
block = blocks_dict[self.stage4_cfg['BLOCK']]
num_channels = [
num_channels[i] * block.expansion for i in range(len(num_channels))]
self.transition3 = self._make_transition_layer(
pre_stage_channels, num_channels)
self.stage4, pre_stage_channels = self._make_stage(
self.stage4_cfg, num_channels, multi_scale_output=True)
last_inp_channels = np.int(np.sum(pre_stage_channels))
self.last_layer = nn.Sequential(
nn.Conv2d(
in_channels=last_inp_channels,
out_channels=last_inp_channels,
kernel_size=1,
stride=1,
padding=0),
self.norm_layer(last_inp_channels),
nn.ReLU(inplace=True),
nn.Conv2d(
in_channels=last_inp_channels,
out_channels=num_classes,
kernel_size=1,
stride=1,
padding=0)
)
def _make_transition_layer(
self, num_channels_pre_layer, num_channels_cur_layer):
num_branches_cur = len(num_channels_cur_layer)
num_branches_pre = len(num_channels_pre_layer)
transition_layers = []
for i in range(num_branches_cur):
if i < num_branches_pre:
if num_channels_cur_layer[i] != num_channels_pre_layer[i]:
transition_layers.append(nn.Sequential(
nn.Conv2d(num_channels_pre_layer[i],
num_channels_cur_layer[i],
3,
1,
1,
bias=False),
self.norm_layer(num_channels_cur_layer[i]),
nn.ReLU(inplace=True)))
else:
transition_layers.append(None)
else:
conv3x3s = []
for j in range(i + 1 - num_branches_pre):
inchannels = num_channels_pre_layer[-1]
outchannels = num_channels_cur_layer[i] \
if j == i - num_branches_pre else inchannels
conv3x3s.append(nn.Sequential(
nn.Conv2d(
inchannels, outchannels, 3, 2, 1, bias=False),
self.norm_layer(outchannels),
nn.ReLU(inplace=True)))
transition_layers.append(nn.Sequential(*conv3x3s))
return nn.ModuleList(transition_layers)
def _make_layer(self, block, inplanes, planes, blocks, stride=1):
downsample = None
if stride != 1 or inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
self.norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(inplanes, planes, stride, downsample, norm_layer=self.norm_layer))
inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(inplanes, planes, norm_layer=self.norm_layer))
return nn.Sequential(*layers)
def _make_stage(self, layer_config, num_inchannels,
multi_scale_output=True):
num_modules = layer_config['NUM_MODULES']
num_branches = layer_config['NUM_BRANCHES']
num_blocks = layer_config['NUM_BLOCKS']
num_channels = layer_config['NUM_CHANNELS']
block = blocks_dict[layer_config['BLOCK']]
fuse_method = layer_config['FUSE_METHOD']
modules = []
for i in range(num_modules):
# multi_scale_output is only used last module
if not multi_scale_output and i == num_modules - 1:
reset_multi_scale_output = False
else:
reset_multi_scale_output = True
modules.append(
HighResolutionModule(num_branches,
block,
num_blocks,
num_inchannels,
num_channels,
fuse_method,
reset_multi_scale_output,
norm_layer=self.norm_layer)
)
num_inchannels = modules[-1].get_num_inchannels()
return nn.Sequential(*modules), num_inchannels
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.layer1(x)
x_list = []
for i in range(self.stage2_cfg['NUM_BRANCHES']):
if self.transition1[i] is not None:
x_list.append(self.transition1[i](x))
else:
x_list.append(x)
y_list = self.stage2(x_list)
x_list = []
for i in range(self.stage3_cfg['NUM_BRANCHES']):
if self.transition2[i] is not None:
if i < self.stage2_cfg['NUM_BRANCHES']:
x_list.append(self.transition2[i](y_list[i]))
else:
x_list.append(self.transition2[i](y_list[-1]))
else:
x_list.append(y_list[i])
y_list = self.stage3(x_list)
x_list = []
for i in range(self.stage4_cfg['NUM_BRANCHES']):
if self.transition3[i] is not None:
if i < self.stage3_cfg['NUM_BRANCHES']:
x_list.append(self.transition3[i](y_list[i]))
else:
x_list.append(self.transition3[i](y_list[-1]))
else:
x_list.append(y_list[i])
x = self.stage4(x_list)
# Upsampling
x0_h, x0_w = x[0].size(2), x[0].size(3)
x1 = F.interpolate(x[1], size=(x0_h, x0_w), mode='bilinear', align_corners=ALIGN_CORNERS)
x2 = F.interpolate(x[2], size=(x0_h, x0_w), mode='bilinear', align_corners=ALIGN_CORNERS)
x3 = F.interpolate(x[3], size=(x0_h, x0_w), mode='bilinear', align_corners=ALIGN_CORNERS)
x = torch.cat([x[0], x1, x2, x3], 1)
x = self.last_layer(x)
return x
def _hrnet(arch, pretrained, progress, num_classes: int = 19):
from models.hrnet_config import MODEL_CONFIGS
model = HighResolutionNet(MODEL_CONFIGS[arch], num_classes=num_classes)
if pretrained:
model_url = model_urls[arch]
state_dict = load_state_dict_from_url(model_url,
progress=progress)
model.load_state_dict(state_dict, strict=False)
return model
class HRNet(torch.nn.Module):
def __init__(self,
hrnet_name: str,
num_classes: int = 19,
dropout: bool = False,
alpha: float = 0.0,
update_source_bn: bool = True):
super(HRNet, self).__init__()
self.model = _hrnet(hrnet_name, pretrained=True, progress=True, num_classes=num_classes)
# Add dropout layers after relu
if dropout:
add_dropout(model=self)
# Replace BN layers with SaN layers
replace_batchnorm(self, alpha=alpha, update_source_bn=update_source_bn)
def forward(self,
img: torch.Tensor) -> Dict[str, torch.Tensor]:
"""
Args:
img: Batch of input images
Returns:
output:
'pred': Segmentation output of images
"""
# Create output dict of forward pass
output_dict = {}
# Compute probabilities for semantic classes
x = self.model(img)
# Upsample to full resolution
x = torch.nn.functional.interpolate(x, size=img.shape[2:], mode='bilinear', align_corners=True)
output_dict['pred'] = x
return output_dict
def hrnet18(backbone_name: str = None,
num_classes: int = 19,
alpha: float = 0.5,
update_source_bn: bool = True,
dropout: bool = False):
return HRNet("hrnet18",
num_classes,
dropout,
alpha=alpha,
update_source_bn=update_source_bn)
def hrnet32(backbone_name: str = None,
num_classes: int = 19,
alpha: float = 0.5,
update_source_bn: bool = True,
dropout: bool = False):
return HRNet("hrnet32",
num_classes,
dropout,
alpha=alpha,
update_source_bn=update_source_bn)
def hrnet48(backbone_name: str = None,
num_classes: int = 19,
alpha: float = 0.5,
update_source_bn: bool = True,
dropout: bool = False):
return HRNet("hrnet48",
num_classes,
dropout,
alpha=alpha,
update_source_bn=update_source_bn)
| 23,160
| 38.322581
| 268
|
py
|
self-adaptive
|
self-adaptive-master/models/hrnet_config.py
|
"""Source: https://github.com/HRNet/HRNet-Semantic-Segmentation"""
# ------------------------------------------------------------------------------
# Copyright (c) Microsoft
# Licensed under the MIT License.
# Create by Bin Xiao (Bin.Xiao@microsoft.com)
# Modified by Ke Sun (sunk@mail.ustc.edu.cn), Rainbowsecret (yuyua@microsoft.com)
# ------------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from yacs.config import CfgNode as CN
# configs for HRNet48
HRNET_48 = CN()
HRNET_48.FINAL_CONV_KERNEL = 1
HRNET_48.STAGE1 = CN()
HRNET_48.STAGE1.NUM_MODULES = 1
HRNET_48.STAGE1.NUM_BRANCHES = 1
HRNET_48.STAGE1.NUM_BLOCKS = [4]
HRNET_48.STAGE1.NUM_CHANNELS = [64]
HRNET_48.STAGE1.BLOCK = 'BOTTLENECK'
HRNET_48.STAGE1.FUSE_METHOD = 'SUM'
HRNET_48.STAGE2 = CN()
HRNET_48.STAGE2.NUM_MODULES = 1
HRNET_48.STAGE2.NUM_BRANCHES = 2
HRNET_48.STAGE2.NUM_BLOCKS = [4, 4]
HRNET_48.STAGE2.NUM_CHANNELS = [48, 96]
HRNET_48.STAGE2.BLOCK = 'BASIC'
HRNET_48.STAGE2.FUSE_METHOD = 'SUM'
HRNET_48.STAGE3 = CN()
HRNET_48.STAGE3.NUM_MODULES = 4
HRNET_48.STAGE3.NUM_BRANCHES = 3
HRNET_48.STAGE3.NUM_BLOCKS = [4, 4, 4]
HRNET_48.STAGE3.NUM_CHANNELS = [48, 96, 192]
HRNET_48.STAGE3.BLOCK = 'BASIC'
HRNET_48.STAGE3.FUSE_METHOD = 'SUM'
HRNET_48.STAGE4 = CN()
HRNET_48.STAGE4.NUM_MODULES = 3
HRNET_48.STAGE4.NUM_BRANCHES = 4
HRNET_48.STAGE4.NUM_BLOCKS = [4, 4, 4, 4]
HRNET_48.STAGE4.NUM_CHANNELS = [48, 96, 192, 384]
HRNET_48.STAGE4.BLOCK = 'BASIC'
HRNET_48.STAGE4.FUSE_METHOD = 'SUM'
# configs for HRNet32
HRNET_32 = CN()
HRNET_32.FINAL_CONV_KERNEL = 1
HRNET_32.STAGE1 = CN()
HRNET_32.STAGE1.NUM_MODULES = 1
HRNET_32.STAGE1.NUM_BRANCHES = 1
HRNET_32.STAGE1.NUM_BLOCKS = [4]
HRNET_32.STAGE1.NUM_CHANNELS = [64]
HRNET_32.STAGE1.BLOCK = 'BOTTLENECK'
HRNET_32.STAGE1.FUSE_METHOD = 'SUM'
HRNET_32.STAGE2 = CN()
HRNET_32.STAGE2.NUM_MODULES = 1
HRNET_32.STAGE2.NUM_BRANCHES = 2
HRNET_32.STAGE2.NUM_BLOCKS = [4, 4]
HRNET_32.STAGE2.NUM_CHANNELS = [32, 64]
HRNET_32.STAGE2.BLOCK = 'BASIC'
HRNET_32.STAGE2.FUSE_METHOD = 'SUM'
HRNET_32.STAGE3 = CN()
HRNET_32.STAGE3.NUM_MODULES = 4
HRNET_32.STAGE3.NUM_BRANCHES = 3
HRNET_32.STAGE3.NUM_BLOCKS = [4, 4, 4]
HRNET_32.STAGE3.NUM_CHANNELS = [32, 64, 128]
HRNET_32.STAGE3.BLOCK = 'BASIC'
HRNET_32.STAGE3.FUSE_METHOD = 'SUM'
HRNET_32.STAGE4 = CN()
HRNET_32.STAGE4.NUM_MODULES = 3
HRNET_32.STAGE4.NUM_BRANCHES = 4
HRNET_32.STAGE4.NUM_BLOCKS = [4, 4, 4, 4]
HRNET_32.STAGE4.NUM_CHANNELS = [32, 64, 128, 256]
HRNET_32.STAGE4.BLOCK = 'BASIC'
HRNET_32.STAGE4.FUSE_METHOD = 'SUM'
# configs for HRNet18
HRNET_18 = CN()
HRNET_18.FINAL_CONV_KERNEL = 1
HRNET_18.STAGE1 = CN()
HRNET_18.STAGE1.NUM_MODULES = 1
HRNET_18.STAGE1.NUM_BRANCHES = 1
HRNET_18.STAGE1.NUM_BLOCKS = [4]
HRNET_18.STAGE1.NUM_CHANNELS = [64]
HRNET_18.STAGE1.BLOCK = 'BOTTLENECK'
HRNET_18.STAGE1.FUSE_METHOD = 'SUM'
HRNET_18.STAGE2 = CN()
HRNET_18.STAGE2.NUM_MODULES = 1
HRNET_18.STAGE2.NUM_BRANCHES = 2
HRNET_18.STAGE2.NUM_BLOCKS = [4, 4]
HRNET_18.STAGE2.NUM_CHANNELS = [18, 36]
HRNET_18.STAGE2.BLOCK = 'BASIC'
HRNET_18.STAGE2.FUSE_METHOD = 'SUM'
HRNET_18.STAGE3 = CN()
HRNET_18.STAGE3.NUM_MODULES = 4
HRNET_18.STAGE3.NUM_BRANCHES = 3
HRNET_18.STAGE3.NUM_BLOCKS = [4, 4, 4]
HRNET_18.STAGE3.NUM_CHANNELS = [18, 36, 72]
HRNET_18.STAGE3.BLOCK = 'BASIC'
HRNET_18.STAGE3.FUSE_METHOD = 'SUM'
HRNET_18.STAGE4 = CN()
HRNET_18.STAGE4.NUM_MODULES = 3
HRNET_18.STAGE4.NUM_BRANCHES = 4
HRNET_18.STAGE4.NUM_BLOCKS = [4, 4, 4, 4]
HRNET_18.STAGE4.NUM_CHANNELS = [18, 36, 72, 144]
HRNET_18.STAGE4.BLOCK = 'BASIC'
HRNET_18.STAGE4.FUSE_METHOD = 'SUM'
MODEL_CONFIGS = {
'hrnet18': HRNET_18,
'hrnet32': HRNET_32,
'hrnet48': HRNET_48,
}
| 3,735
| 27.519084
| 81
|
py
|
self-adaptive
|
self-adaptive-master/models/deeplabv3.py
|
"""Source: https://github.com/VainF/DeepLabV3Plus-Pytorch"""
from torch import nn
from torch.nn import functional as F
import torch
from typing import Dict
from collections import OrderedDict
from utils.dropout import add_dropout
from utils.self_adapt_norm import replace_batchnorm
from models.backbone_v3 import resnet
__all__ = ["DeepLabV3"]
class _SimpleSegmentationModel(nn.Module):
def __init__(self, backbone, classifier):
super(_SimpleSegmentationModel, self).__init__()
self.backbone = backbone
self.classifier = classifier
def forward(self, x):
input_shape = x.shape[-2:]
features = self.backbone(x)
x = self.classifier(features)
x = F.interpolate(x, size=input_shape, mode='bilinear', align_corners=False)
return x
class IntermediateLayerGetter(nn.ModuleDict):
"""
Module wrapper that returns intermediate layers from a model
It has a strong assumption that the modules have been registered
into the model in the same order as they are used.
This means that one should **not** reuse the same nn.Module
twice in the forward if you want this to work.
Additionally, it is only able to query submodules that are directly
assigned to the model. So if `model` is passed, `model.feature1` can
be returned, but not `model.feature1.layer2`.
Arguments:
model (nn.Module): model on which we will extract the features
return_layers (Dict[name, new_name]): a dict containing the names
of the modules for which the activations will be returned as
the key of the dict, and the value of the dict is the name
of the returned activation (which the user can specify).
Examples::
>>> m = torchvision.models.resnet18(pretrained=True)
>>> # extract layer1 and layer3, giving as names `feat1` and feat2`
>>> new_m = torchvision.models._utils.IntermediateLayerGetter(m,
>>> {'layer1': 'feat1', 'layer3': 'feat2'})
>>> out = new_m(torch.rand(1, 3, 224, 224))
>>> print([(k, v.shape) for k, v in out.items()])
>>> [('feat1', torch.Size([1, 64, 56, 56])),
>>> ('feat2', torch.Size([1, 256, 14, 14]))]
"""
def __init__(self, model, return_layers):
if not set(return_layers).issubset([name for name, _ in model.named_children()]):
raise ValueError("return_layers are not present in model")
orig_return_layers = return_layers
return_layers = {k: v for k, v in return_layers.items()}
layers = OrderedDict()
for name, module in model.named_children():
layers[name] = module
if name in return_layers:
del return_layers[name]
if not return_layers:
break
super(IntermediateLayerGetter, self).__init__(layers)
self.return_layers = orig_return_layers
def forward(self, x):
out = OrderedDict()
for name, module in self.named_children():
x = module(x)
if name in self.return_layers:
out_name = self.return_layers[name]
out[out_name] = x
return out
class DeepLabV3(_SimpleSegmentationModel):
"""
Implements DeepLabV3 model from
`"Rethinking Atrous Convolution for Semantic Image Segmentation"
<https://arxiv.org/abs/1706.05587>`_.
Arguments:
backbone (nn.Module): the network used to compute the features for the model.
The backbone should return an OrderedDict[Tensor], with the key being
"out" for the last feature map used, and "aux" if an auxiliary classifier
is used.
classifier (nn.Module): module that takes the "out" element returned from
the backbone and returns a dense prediction.
aux_classifier (nn.Module, optional): auxiliary classifier used during training
"""
pass
class DeepLabHeadV3Plus(nn.Module):
def __init__(self, in_channels, low_level_channels, num_classes, aspp_dilate=[12, 24, 36]):
super(DeepLabHeadV3Plus, self).__init__()
self.project = nn.Sequential(
nn.Conv2d(low_level_channels, 48, 1, bias=False),
nn.BatchNorm2d(48),
nn.ReLU(inplace=True),
)
self.aspp = ASPP(in_channels, aspp_dilate)
self.classifier = nn.Sequential(
nn.Conv2d(304, 256, 3, padding=1, bias=False),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.Conv2d(256, num_classes, 1)
)
self._init_weight()
def forward(self, feature):
low_level_feature = self.project(feature['low_level'])
output_feature = self.aspp(feature['out'])
output_feature = F.interpolate(output_feature, size=low_level_feature.shape[2:], mode='bilinear',
align_corners=False)
return self.classifier(torch.cat([low_level_feature, output_feature], dim=1))
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
class DeepLabHead(nn.Module):
def __init__(self, in_channels, num_classes, aspp_dilate=[12, 24, 36]):
super(DeepLabHead, self).__init__()
self.classifier = nn.Sequential(
ASPP(in_channels, aspp_dilate),
nn.Conv2d(256, 256, 3, padding=1, bias=False),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.Conv2d(256, num_classes, 1)
)
self._init_weight()
def forward(self, feature):
return self.classifier(feature['out'])
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
class AtrousSeparableConvolution(nn.Module):
""" Atrous Separable Convolution
"""
def __init__(self, in_channels, out_channels, kernel_size,
stride=1, padding=0, dilation=1, bias=True):
super(AtrousSeparableConvolution, self).__init__()
self.body = nn.Sequential(
# Separable Conv
nn.Conv2d(in_channels, in_channels, kernel_size=kernel_size, stride=stride, padding=padding,
dilation=dilation, bias=bias, groups=in_channels),
# PointWise Conv
nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0, bias=bias),
)
self._init_weight()
def forward(self, x):
return self.body(x)
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
class ASPPConv(nn.Sequential):
def __init__(self, in_channels, out_channels, dilation):
modules = [
nn.Conv2d(in_channels, out_channels, 3, padding=dilation, dilation=dilation, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True)
]
super(ASPPConv, self).__init__(*modules)
class ASPPPooling(nn.Sequential):
def __init__(self, in_channels, out_channels):
super(ASPPPooling, self).__init__(
nn.AdaptiveAvgPool2d(1),
nn.Conv2d(in_channels, out_channels, 1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True))
def forward(self, x):
size = x.shape[-2:]
x = super(ASPPPooling, self).forward(x)
return F.interpolate(x, size=size, mode='bilinear', align_corners=False)
class ASPP(nn.Module):
def __init__(self, in_channels, atrous_rates):
super(ASPP, self).__init__()
out_channels = 256
modules = []
modules.append(nn.Sequential(
nn.Conv2d(in_channels, out_channels, 1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True)))
rate1, rate2, rate3 = tuple(atrous_rates)
modules.append(ASPPConv(in_channels, out_channels, rate1))
modules.append(ASPPConv(in_channels, out_channels, rate2))
modules.append(ASPPConv(in_channels, out_channels, rate3))
modules.append(ASPPPooling(in_channels, out_channels))
self.convs = nn.ModuleList(modules)
self.project = nn.Sequential(
nn.Conv2d(5 * out_channels, out_channels, 1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
nn.Dropout(0.1), )
def forward(self, x):
res = []
for conv in self.convs:
res.append(conv(x))
res = torch.cat(res, dim=1)
return self.project(res)
def convert_to_separable_conv(module):
new_module = module
if isinstance(module, nn.Conv2d) and module.kernel_size[0] > 1:
new_module = AtrousSeparableConvolution(module.in_channels,
module.out_channels,
module.kernel_size,
module.stride,
module.padding,
module.dilation,
module.bias)
for name, child in module.named_children():
new_module.add_module(name, convert_to_separable_conv(child))
def _segm_resnet(name, backbone_name, num_classes, output_stride, pretrained_backbone):
if output_stride == 8:
replace_stride_with_dilation = [False, True, True]
aspp_dilate = [12, 24, 36]
else:
replace_stride_with_dilation = [False, False, True]
aspp_dilate = [6, 12, 18]
backbone = resnet.__dict__[backbone_name](
pretrained=pretrained_backbone,
replace_stride_with_dilation=replace_stride_with_dilation)
inplanes = 2048
low_level_planes = 256
if name == 'deeplabv3plus':
return_layers = {'layer4': 'out', 'layer1': 'low_level'}
classifier = DeepLabHeadV3Plus(inplanes, low_level_planes, num_classes, aspp_dilate)
elif name == 'deeplabv3':
return_layers = {'layer4': 'out'}
classifier = DeepLabHead(inplanes, num_classes, aspp_dilate)
backbone = IntermediateLayerGetter(backbone, return_layers=return_layers)
model = DeepLabV3(backbone, classifier)
return model
def _load_model(arch_type, backbone, num_classes, output_stride, pretrained_backbone):
if backbone.startswith('resnet'):
model = _segm_resnet(arch_type, backbone, num_classes, output_stride=output_stride,
pretrained_backbone=pretrained_backbone)
else:
raise NotImplementedError
return model
class DeepLabV3Plus(torch.nn.Module):
def __init__(self,
backbone_name: str,
num_classes: int = 19,
dropout: bool = False,
alpha: float = 0.0,
update_source_bn: bool = True):
super(DeepLabV3Plus, self).__init__()
self.model = _load_model('deeplabv3plus', backbone_name, num_classes, output_stride=8, pretrained_backbone=True)
# Add dropout layers after relu
if dropout:
add_dropout(model=self)
# Replace BN layers with SaN layers
replace_batchnorm(self, alpha=alpha, update_source_bn=update_source_bn)
def forward(self,
img: torch.Tensor) -> Dict[str, torch.Tensor]:
"""
Args:
img: Batch of input images
Returns:
output:
'pred': Segmentation output of images
"""
# Create output dict of forward pass
output_dict = {}
# Compute probabilities for semantic classes
if self.training and img.shape[0] == 1:
output_dict['pred'] = self.model(torch.cat((img, img), dim=0))[0].unsqueeze(0)
else:
output_dict['pred'] = self.model(img)
return output_dict
def deeplabv3plus(backbone_name: str,
num_classes: int = 19,
alpha: float = 0.5,
update_source_bn: bool = True,
dropout: bool = False):
return DeepLabV3Plus(backbone_name,
num_classes,
dropout,
alpha=alpha,
update_source_bn=update_source_bn)
| 12,863
| 36.614035
| 120
|
py
|
self-adaptive
|
self-adaptive-master/models/deeplab.py
|
import torch
from typing import Dict
from utils.dropout import add_dropout
from utils.self_adapt_norm import replace_batchnorm
import models.backbone
class DeepLab(torch.nn.Module):
def __init__(self,
backbone_name: str,
num_classes: int = 19,
dropout: bool = False,
alpha: float = 0.0,
update_source_bn: bool = True):
super(DeepLab, self).__init__()
self.backbone = models.backbone.__dict__[backbone_name](pretrained=True)
# Initialize classification head
self.cls_head = torch.nn.Conv2d(
self.backbone.out_channels, num_classes, kernel_size=1, stride=1, padding=0
)
torch.nn.init.normal_(self.cls_head.weight.data, mean=0, std=0.01)
torch.nn.init.constant_(self.cls_head.bias.data, 0.0)
# Variable image size during forward pass
self.img_size = None
# Add dropout layers after relu
if dropout:
add_dropout(model=self)
# Replace BN layers with SaN layers
replace_batchnorm(self, alpha=alpha, update_source_bn=update_source_bn)
def forward(self,
img: torch.Tensor) -> Dict[str, torch.Tensor]:
"""
Args:
img: Batch of input images
Returns:
output:
'backbone': Output features of backbone
'pred': Segmentation output of images
"""
# Create output dict of forward pass
output_dict = {}
# Set image output size
self.img_size = img.shape[2:]
# Compute probabilities for semantic classes at stride 8
x = self.backbone(img)
output_dict['backbone'] = x
# Compute output logits
x = self._backbone_to_logits(x)
output_dict['pred'] = x
return output_dict
def _backbone_to_logits(self,
x: torch.Tensor) -> torch.Tensor:
"""
Args:
x: Backbone output features
Returns:
x: Upsampled semantic segmentation logits
"""
# Compute class logits
x = self.cls_head(x)
# Bilinear upsampling to full resolution
x = torch.nn.functional.interpolate(x,
size=self.img_size,
mode='bilinear',
align_corners=True)
return x
def deeplab(backbone_name: str,
num_classes: int = 19,
alpha: float = 0.5,
update_source_bn: bool = True,
dropout: bool = False):
return DeepLab(backbone_name,
num_classes,
dropout,
alpha=alpha,
update_source_bn=update_source_bn)
| 2,839
| 29.869565
| 87
|
py
|
self-adaptive
|
self-adaptive-master/models/__init__.py
|
from models.deeplab import deeplab
from models.deeplabv3 import deeplabv3plus
from models.hrnet import hrnet18, hrnet32, hrnet48
| 129
| 31.5
| 50
|
py
|
self-adaptive
|
self-adaptive-master/models/backbone/resnet.py
|
'''
Source: torchvision
'''
import torch
import torch.nn as nn
from torch.hub import load_state_dict_from_url
# __all__ = {'resnet18': resnet18, 'resnet50': resnet50}
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',
'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',
'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth',
'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth',
}
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
# Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)
# while original implementation places the stride at the first 1x1 convolution(self.conv1)
# according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385.
# This variant is also known as ResNet V1.5 and improves accuracy according to
# https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000, zero_init_residual=False,
groups=1, width_per_group=64, replace_stride_with_dilation=None,
norm_layer=None):
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0], stride=1, dilation=1)
self.layer2 = self._make_layer(block, 128, layers[1], stride=2, dilation=1)
self.layer3 = self._make_layer(block, 256, layers[2], stride=1, dilation=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilation=4)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
self.out_channels = list(module
for module in self.modules()
if isinstance(module, torch.nn.Conv2d))[-1].out_channels
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilation=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
#if dilate:
# self.dilation *= stride
# stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def _forward_impl(self, x):
# See note [TorchScript super()]
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return x
def forward(self, x):
return self._forward_impl(x)
def _resnet(arch, block, layers, pretrained, progress, **kwargs):
model = ResNet(block, layers, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch],
progress=progress)
model.load_state_dict(state_dict)
return model
def resnet101(pretrained=False,
progress=True,
**kwargs):
r"""ResNet-101 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet101', Bottleneck, [3, 4, 23, 3], pretrained, progress,
**kwargs)
def resnet50(pretrained=False,
progress=True,
**kwargs):
r"""ResNet-50 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
model = _resnet('resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress,
**kwargs)
return model
def resnet18(pretrained=False, progress=True, **kwargs):
r"""ResNet-18 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
model = _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress,
**kwargs)
return model
| 10,374
| 38.150943
| 106
|
py
|
self-adaptive
|
self-adaptive-master/models/backbone/__init__.py
|
from models.backbone.resnet import *
| 36
| 36
| 36
|
py
|
self-adaptive
|
self-adaptive-master/models/backbone_v3/resnet.py
|
import torch
import torch.nn as nn
from torch.hub import load_state_dict_from_url
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152', 'resnext50_32x4d', 'resnext101_32x8d',
'wide_resnet50_2', 'wide_resnet101_2']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',
'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',
'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth',
'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth',
}
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000, zero_init_residual=False,
groups=1, width_per_group=64, replace_stride_with_dilation=None,
norm_layer=None):
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
dilate=replace_stride_with_dilation[2])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
def _resnet(arch, block, layers, pretrained, progress, **kwargs):
model = ResNet(block, layers, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch],
progress=progress)
model.load_state_dict(state_dict)
return model
def resnet18(pretrained=False, progress=True, **kwargs):
r"""ResNet-18 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress,
**kwargs)
def resnet34(pretrained=False, progress=True, **kwargs):
r"""ResNet-34 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet34', BasicBlock, [3, 4, 6, 3], pretrained, progress,
**kwargs)
def resnet50(pretrained=False, progress=True, **kwargs):
r"""ResNet-50 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress,
**kwargs)
def resnet101(pretrained=False, progress=True, **kwargs):
r"""ResNet-101 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet101', Bottleneck, [3, 4, 23, 3], pretrained, progress,
**kwargs)
def resnet152(pretrained=False, progress=True, **kwargs):
r"""ResNet-152 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet152', Bottleneck, [3, 8, 36, 3], pretrained, progress,
**kwargs)
def resnext50_32x4d(pretrained=False, progress=True, **kwargs):
r"""ResNeXt-50 32x4d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 4
return _resnet('resnext50_32x4d', Bottleneck, [3, 4, 6, 3],
pretrained, progress, **kwargs)
def resnext101_32x8d(pretrained=False, progress=True, **kwargs):
r"""ResNeXt-101 32x8d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 8
return _resnet('resnext101_32x8d', Bottleneck, [3, 4, 23, 3],
pretrained, progress, **kwargs)
def wide_resnet50_2(pretrained=False, progress=True, **kwargs):
r"""Wide ResNet-50-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet50_2', Bottleneck, [3, 4, 6, 3],
pretrained, progress, **kwargs)
def wide_resnet101_2(pretrained=False, progress=True, **kwargs):
r"""Wide ResNet-101-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet101_2', Bottleneck, [3, 4, 23, 3],
pretrained, progress, **kwargs)
| 13,547
| 39.807229
| 107
|
py
|
self-adaptive
|
self-adaptive-master/datasets/labels.py
|
import torch
from collections import namedtuple
from cityscapesscripts.helpers.labels import labels as cs_labels
from cityscapesscripts.helpers.labels import Label
synthia_cs_labels = [
# name id trainId category catId hasInstances ignoreInEval color
Label('unlabeled', 0, 255, 'void', 0, False, True, (0, 0, 0)),
Label('ego vehicle', 1, 255, 'void', 0, False, True, (0, 0, 0)),
Label('rectification border', 2, 255, 'void', 0, False, True, (0, 0, 0)),
Label('out of roi', 3, 255, 'void', 0, False, True, (0, 0, 0)),
Label('static', 4, 255, 'void', 0, False, True, (0, 0, 0)),
Label('dynamic', 5, 255, 'void', 0, False, True, (111, 74, 0)),
Label('ground', 6, 255, 'void', 0, False, True, (81, 0, 81)),
Label('road', 7, 0, 'flat', 1, False, False, (128, 64, 128)),
Label('sidewalk', 8, 1, 'flat', 1, False, False, (244, 35, 232)),
Label('parking', 9, 255, 'flat', 1, False, True, (250, 170, 160)),
Label('rail track', 10, 255, 'flat', 1, False, True, (230, 150, 140)),
Label('building', 11, 2, 'construction', 2, False, False, (70, 70, 70)),
Label('wall', 12, 3, 'construction', 2, False, False, (102, 102, 156)),
Label('fence', 13, 4, 'construction', 2, False, False, (190, 153, 153)),
Label('guard rail', 14, 255, 'construction', 2, False, True, (180, 165, 180)),
Label('bridge', 15, 255, 'construction', 2, False, True, (150, 100, 100)),
Label('tunnel', 16, 255, 'construction', 2, False, True, (150, 120, 90)),
Label('pole', 17, 5, 'object', 3, False, False, (153, 153, 153)),
Label('polegroup', 18, 255, 'object', 3, False, True, (153, 153, 153)),
Label('traffic light', 19, 6, 'object', 3, False, False, (250, 170, 30)),
Label('traffic sign', 20, 7, 'object', 3, False, False, (220, 220, 0)),
Label('vegetation', 21, 8, 'nature', 4, False, False, (107, 142, 35)),
Label('terrain', 22, 255, 'nature', 4, False, True, (152, 251, 152)),
# Removed because not present in Synthia dataset
Label('sky', 23, 9, 'sky', 5, False, False, (70, 130, 180)),
Label('person', 24, 10, 'human', 6, True, False, (220, 20, 60)),
Label('rider', 25, 11, 'human', 6, True, False, (255, 0, 0)),
Label('car', 26, 12, 'vehicle', 7, True, False, (0, 0, 142)),
Label('truck', 27, 255, 'vehicle', 7, True, True, (0, 0, 70)), # Removed because not present in Synthia dataset
Label('bus', 28, 13, 'vehicle', 7, True, False, (0, 60, 100)),
Label('caravan', 29, 255, 'vehicle', 7, True, True, (0, 0, 90)),
Label('trailer', 30, 255, 'vehicle', 7, True, True, (0, 0, 110)),
Label('train', 31, 255, 'vehicle', 7, True, True, (0, 80, 100)), # Removed because not present in Synthia dataset
Label('motorcycle', 32, 14, 'vehicle', 7, True, False, (0, 0, 230)),
Label('bicycle', 33, 15, 'vehicle', 7, True, False, (119, 11, 32)),
Label('license plate', -1, -1, 'vehicle', 7, False, True, (0, 0, 142)),
]
synthia_bdd_labels = [
Label('unlabeled', 255, 255, 'void', 0, False, True, (0, 0, 0)),
Label('dynamic', 255, 255, 'void', 0, False, True, (111, 74, 0)),
Label('ego vehicle', 255, 255, 'void', 0, False, True, (0, 0, 0)),
Label('ground', 255, 255, 'void', 0, False, True, (81, 0, 81)),
Label('static', 255, 255, 'void', 0, False, True, (0, 0, 0)),
Label('parking', 255, 255, 'flat', 1, False, True, (250, 170, 160)),
Label('rail track', 255, 255, 'flat', 1, False, True, (230, 150, 140)),
Label('road', 0, 0, 'flat', 1, False, False, (128, 64, 128)),
Label('sidewalk', 1, 1, 'flat', 1, False, False, (244, 35, 232)),
Label('bridge', 255, 255, 'construction', 2, False, True, (150, 100, 100)),
Label('building', 2, 2, 'construction', 2, False, False, (70, 70, 70)),
Label('wall', 3, 3, 'construction', 2, False, False, (102, 102, 156)),
Label('fence', 4, 4, 'construction', 2, False, False, (190, 153, 153)),
Label('garage', 255, 255, 'construction', 2, False, True, (180, 100, 180)),
Label('guard rail', 255, 255, 'construction', 2, False, True, (180, 165, 180)),
Label('tunnel', 255, 255, 'construction', 2, False, True, (150, 120, 90)),
Label('banner', 255, 255, 'object', 3, False, True, (250, 170, 100)),
Label('billboard', 255, 255, 'object', 3, False, True, (220, 220, 250)),
Label('lane divider', 255, 255, 'object', 3, False, True, (255, 165, 0)),
Label('parking sign', 255, 255, 'object', 3, False, False, (220, 20, 60)),
Label('pole', 5, 5, 'object', 3, False, False, (153, 153, 153)),
Label('polegroup', 255, 255, 'object', 3, False, True, (153, 153, 153)),
Label('street light', 255, 255, 'object', 3, False, True, (220, 220, 100)),
Label('traffic cone', 255, 255, 'object', 3, False, True, (255, 70, 0)),
Label('traffic device', 255, 255, 'object', 3, False, True, (220, 220, 220)),
Label('traffic light', 6, 6, 'object', 3, False, False, (250, 170, 30)),
Label('traffic sign', 7, 7, 'object', 3, False, False, (220, 220, 0)),
Label('traffic sign frame', 255, 255, 'object', 3, False, True, (250, 170, 250)),
Label('vegetation', 8, 8, 'nature', 4, False, False, (107, 142, 35)),
Label('terrain', 9, 255, 'nature', 4, False, True, (152, 251, 152)), # Removed from dataset
Label('sky', 10, 9, 'sky', 5, False, False, (70, 130, 180)),
Label('person', 11, 10, 'human', 6, True, False, (220, 20, 60)),
Label('rider', 12, 11, 'human', 6, True, False, (255, 0, 0)),
Label('car', 13, 12, 'vehicle', 7, True, False, (0, 0, 142)),
Label('bus', 15, 13, 'vehicle', 7, True, False, (0, 60, 100)),
Label('motorcycle', 17, 14, 'vehicle', 7, True, False, (0, 0, 230)),
Label('bicycle', 18, 15, 'vehicle', 7, True, False, (119, 11, 32)),
Label('caravan', 255, 255, 'vehicle', 7, True, True, (0, 0, 90)),
Label('trailer', 255, 255, 'vehicle', 7, True, True, (0, 0, 110)),
Label('truck', 14, 255, 'vehicle', 7, True, False, (0, 0, 70)),
Label('train', 16, 255, 'vehicle', 7, True, False, (0, 80, 100)),
]
SynthiaClass = namedtuple(
"SynthiaClass",
["name", "id", "trainId", "ignoreInEval", "color"]
)
synthia_labels = [
SynthiaClass("road", 3, 0, False, (128, 64, 128)),
SynthiaClass("sidewalk", 4, 1, False, (244, 35, 232)),
SynthiaClass("building", 2, 2, False, (70, 70, 70)),
SynthiaClass("wall", 21, 3, False, (102, 102, 156)),
SynthiaClass("fence", 5, 4, False, (64, 64, 128)),
SynthiaClass("pole", 7, 5, False, (153, 153, 153)),
SynthiaClass("traffic light", 15, 6, False, (250, 170, 30)),
SynthiaClass("traffic sign", 9, 7, False, (220, 220, 0)),
SynthiaClass("vegetation", 6, 8, False, (107, 142, 35)),
SynthiaClass("terrain", 16, 255, True, (152, 251, 152)),
SynthiaClass("sky", 1, 9, False, (70, 130, 180)),
SynthiaClass("pedestrian", 10, 10, False, (220, 20, 60)),
SynthiaClass("rider", 17, 11, False, (255, 0, 0)),
SynthiaClass("car", 8, 12, False, (0, 0, 142)),
SynthiaClass("truck", 18, 255, True, (0, 0, 70)),
SynthiaClass("bus", 19, 13, False, (0, 60, 100)),
SynthiaClass("train", 20, 255, True, (0, 80, 100)),
SynthiaClass("motorcycle", 12, 14, False, (0, 0, 230)),
SynthiaClass("bicycle", 11, 15, False, (119, 11, 32)),
SynthiaClass("void", 0, 255, True, (0, 0, 0)),
SynthiaClass("parking slot", 13, 255, True, (250, 170, 160)),
SynthiaClass("road-work", 14, 255, True, (128, 64, 64)),
SynthiaClass("lanemarking", 22, 255, True, (102, 102, 156))
]
MapillaryClass = namedtuple(
"MapillaryClass",
["id", "trainId"]
)
mapillary_labels = [
MapillaryClass(13, 0),
MapillaryClass(24, 0),
MapillaryClass(41, 0),
MapillaryClass(2, 1),
MapillaryClass(15, 1),
MapillaryClass(17, 2),
MapillaryClass(6, 3),
MapillaryClass(3, 4),
MapillaryClass(45, 5),
MapillaryClass(47, 5),
MapillaryClass(48, 6),
MapillaryClass(50, 7),
MapillaryClass(30, 8),
MapillaryClass(29, 9),
MapillaryClass(27, 10),
MapillaryClass(19, 11),
MapillaryClass(20, 12),
MapillaryClass(21, 12),
MapillaryClass(22, 12),
MapillaryClass(55, 13),
MapillaryClass(61, 14),
MapillaryClass(54, 15),
MapillaryClass(58, 16),
MapillaryClass(57, 17),
MapillaryClass(52, 18),
]
mapillary_synthia_labels = [
MapillaryClass(13, 0),
MapillaryClass(24, 0),
MapillaryClass(41, 0),
MapillaryClass(2, 1),
MapillaryClass(15, 1),
MapillaryClass(17, 2),
MapillaryClass(6, 3),
MapillaryClass(3, 4),
MapillaryClass(45, 5),
MapillaryClass(47, 5),
MapillaryClass(48, 6),
MapillaryClass(50, 7),
MapillaryClass(30, 8),
MapillaryClass(29, 255), #terrain
MapillaryClass(27, 9),
MapillaryClass(19, 10),
MapillaryClass(20, 11),
MapillaryClass(21, 11),
MapillaryClass(22, 11),
MapillaryClass(55, 12),
MapillaryClass(61, 255), #truck
MapillaryClass(54, 13),
MapillaryClass(58, 255), #train
MapillaryClass(57, 14),
MapillaryClass(52, 15),
]
WilddashClass = namedtuple(
"WilddashClass",
["id", "trainId"]
)
wilddash_labels = [
WilddashClass(0, 255),
WilddashClass(1, 255),
WilddashClass(2, 255),
WilddashClass(3, 255),
WilddashClass(4, 255),
WilddashClass(5, 255),
WilddashClass(6, 255),
WilddashClass(7, 0),
WilddashClass(8, 1),
WilddashClass(9, 255),
WilddashClass(10, 255),
WilddashClass(11, 2),
WilddashClass(12, 3),
WilddashClass(13, 4),
WilddashClass(14, 255),
WilddashClass(15, 255),
WilddashClass(16, 255),
WilddashClass(17, 5),
WilddashClass(18, 255),
WilddashClass(19, 6),
WilddashClass(20, 7),
WilddashClass(21, 8),
WilddashClass(22, 9),
WilddashClass(23, 10),
WilddashClass(24, 11),
WilddashClass(25, 12),
WilddashClass(26, 13),
WilddashClass(27, 14),
WilddashClass(28, 15),
WilddashClass(29, 255),
WilddashClass(30, 255),
WilddashClass(31, 16),
WilddashClass(32, 17),
WilddashClass(33, 18),
WilddashClass(34, 13),
WilddashClass(35, 13),
WilddashClass(36, 255),
WilddashClass(37, 255),
WilddashClass(38, 0),
]
wilddash_synthia_labels = [
WilddashClass(0, 255),
WilddashClass(1, 255),
WilddashClass(2, 255),
WilddashClass(3, 255),
WilddashClass(4, 255),
WilddashClass(5, 255),
WilddashClass(6, 255),
WilddashClass(7, 0),
WilddashClass(8, 1),
WilddashClass(9, 255),
WilddashClass(10, 255),
WilddashClass(11, 2),
WilddashClass(12, 3),
WilddashClass(13, 4),
WilddashClass(14, 255),
WilddashClass(15, 255),
WilddashClass(16, 255),
WilddashClass(17, 5),
WilddashClass(18, 255),
WilddashClass(19, 6),
WilddashClass(20, 7),
WilddashClass(21, 8),
WilddashClass(22, 255), #terrain
WilddashClass(23, 9),
WilddashClass(24, 10),
WilddashClass(25, 11),
WilddashClass(26, 12),
WilddashClass(27, 255), #truck
WilddashClass(28, 13),
WilddashClass(29, 255),
WilddashClass(30, 255),
WilddashClass(31, 255), #train
WilddashClass(32, 14),
WilddashClass(33, 15),
WilddashClass(34, 12),
WilddashClass(35, 12),
WilddashClass(36, 255),
WilddashClass(37, 255),
WilddashClass(38, 0),
]
def convert_ids_to_trainids(gt: torch.Tensor,
source: str,
target: str) -> torch.Tensor:
"""
Args:
gt: Ground truth tensor with labels from 0 to 34 / 0 to 33 and -1
source: Name of source domain
target: Name of target domain
Returns:
gt: Groundtruth tensor with labels from 0 to 18 and 255 for non training ids
"""
# Check if target domain is BDD, if true check source domain
if target == "bdd":
# Check if source domain is GTA, if true, return gt without conversion, because BDD has train_ids -> [0, 19]
if source == "gta":
return gt
else:
# If source domain is Synthia, use Synthia/BDD lookup table
labels = synthia_bdd_labels
# Check if target domain is IDD, if true check source domain
elif target == "idd":
# Check if source domain is GTA, if true, return gt without conversion, because IDD has train_ids -> [0, 19]
if source == "gta":
return gt
else:
# If source domain is Synthia, use Synthia/IDD lookup table
labels = synthia_bdd_labels
# If target is not BDD, check source domain
elif target == "synthia" and source == "synthia":
labels = synthia_labels
elif target == "mapillary":
# If source domain is GTA, use standard CS lookup table
if source == "gta":
labels = mapillary_labels
# If source domain is Synthia, use Synthia/CS lookup table
else:
labels = mapillary_synthia_labels
elif target == "wilddash":
# If source domain is GTA, use standard CS lookup table
if source == "gta":
labels = wilddash_labels
# If source domain is Synthia, use Synthia/CS lookup table
else:
labels = wilddash_synthia_labels
elif target in ["cityscapes", "gta"]:
# If source domain is GTA, use standard CS lookup table
if source == "gta":
labels = cs_labels
# If source domain is Synthia, use Synthia/CS lookup table
else:
labels = synthia_cs_labels
else:
raise ValueError(f"Target domain {target} unknown")
gt_copy = torch.ones_like(gt) * 255
for cs_label in labels:
orig_id = cs_label.id
new_id = cs_label.trainId
# Manually set license plate to id 34 and trainId 255
if orig_id == -1:
orig_id = 34
new_id = 255
gt_copy[gt == orig_id] = new_id
return gt_copy
def convert_trainids_to_ids(pred: torch.Tensor,
source: str,
target: str) -> torch.Tensor:
"""
Args:
gt: Groundtruth tensor with labels from 0 to 34 / 0 to 33 and -1
source: Name of source domain
target: Name of target domain
Returns:
gt: Groundtruth tensor with labels from 0 to 18 and 255 for non training ids
"""
# Check if target domain is BDD, if true check source domain
if target == "bdd":
# Check if source domain is GTA, if true, return gt without conversion, because BDD has train_ids -> [0, 19]
if source == "gta":
return pred
else:
# If source domain is Synthia, use Synthia/BDD lookup table
labels = synthia_bdd_labels
# If target is not BDD, check source domain
else:
# If source domain is GTA, use standard CS lookup table
if source == "gta":
labels = cs_labels
# If source domain is Synthia, use Synthia/CS lookup table
else:
labels = synthia_cs_labels
for cs_label in labels[::-1]:
orig_id = cs_label.id
new_id = cs_label.trainId
# Manually set license plate to id 34 and trainId 255
if orig_id == -1:
orig_id = 34
new_id = 255
pred[pred == orig_id] = new_id
return pred
| 15,789
| 38.673367
| 118
|
py
|
self-adaptive
|
self-adaptive-master/datasets/wilddash.py
|
import os
import torch
from PIL import Image
from typing import Callable, Optional, Tuple, List
class WilddashDataset(object):
"""
Unzip the downloaded wd_public_02.zip to /path/to/wilddash
The wilddash dataset is required to have following folder structure after unzipping:
wilddash/
/images/*.jpg
/labels/*.png
"""
def __init__(self,
root,
split,
transforms: Optional[Callable] = None):
super(WilddashDataset, self).__init__()
self.split = split
self.transforms = transforms
images_root = os.path.join(root, "images")
self.images = []
targets_root = os.path.join(root, "labels")
self.targets = []
for img_name in os.listdir(images_root):
target_name = img_name.replace(".jpg", ".png")
self.images.append(os.path.join(images_root, img_name))
self.targets.append(os.path.join(targets_root, target_name))
def __getitem__(self, index: int) -> Tuple[torch.Tensor, torch.Tensor]:
image = Image.open(self.images[index]).convert('RGB')
target = Image.open(self.targets[index])
if self.transforms is not None:
image, target = self.transforms(image, target)
return image, target
def __len__(self) -> int:
return len(self.images)
def wilddash(root: str,
split: str,
transforms: List[Callable]):
return WilddashDataset(root=root,
split=split,
transforms=transforms)
| 1,608
| 30.54902
| 88
|
py
|
self-adaptive
|
self-adaptive-master/datasets/cityscapes.py
|
import torchvision
from typing import Any, List, Callable
class CityscapesDataset(torchvision.datasets.Cityscapes):
def __init__(self,
transforms: List[Callable],
*args: Any,
**kwargs: Any):
super(CityscapesDataset, self).__init__(*args,
**kwargs,
target_type="semantic")
self.transforms = transforms
def cityscapes(root: str,
split: str,
transforms: List[Callable]):
return CityscapesDataset(root=root,
split=split,
transforms=transforms)
| 704
| 29.652174
| 71
|
py
|
self-adaptive
|
self-adaptive-master/datasets/idd.py
|
import os
from typing import Tuple, List, Callable, Optional
from PIL import Image
import torch
class IDDDataset(object):
"""
Follow these steps to prepare the IDD dataset:
- Unpack the downloaded dataset: tar -xf idd-segmentation.tar.gz -C /path/to/IDD_Segmentation/
- Rename the directory from IDD_Segmentation to idd: mv /path/to/IDD_Segmentation /path/to/idd
Create train IDs from polygon annotations:
- wget https://github.com/AutoNUE/public-code/archive/refs/heads/master.zip
- unzip master.zip -d iddscripts
- export PYTHONPATH="${PYTHONPATH}:iddscripts/public-code-master/helpers/"
- pip install -r iddscripts/public-code-master/requirements.txt
- python iddscripts/public-code-master/preperation/createLabels.py --datadir /path/to/idd --id-type csTrainId --num-workers 1
- rm -rf iddscripts
The IDD dataset is required to have following folder structure:
idd/
leftImg8bit/
train/city/*.png
test/city/*.png
val/city/*.png
gtFine/
train/city/*.png
test/city/*.png
val/city/*.png
"""
def __init__(self,
root,
split,
transforms: Optional[Callable] = None):
super(IDDDataset, self).__init__()
self.mode = 'gtFine'
self.images_dir = os.path.join(root, 'leftImg8bit', split)
self.targets_dir = os.path.join(root, self.mode, split)
self.split = split
self.images = []
self.targets = []
self.transforms = transforms
for city in os.listdir(self.images_dir):
img_dir = os.path.join(self.images_dir, city)
target_dir = os.path.join(self.targets_dir, city)
for file_name in os.listdir(img_dir):
target_name = file_name.split("_leftImg8bit.png")[0] + "_gtFine_labelcsTrainIds.png"
self.images.append(os.path.join(img_dir, file_name))
self.targets.append(os.path.join(target_dir, target_name))
def __getitem__(self, index: int) -> Tuple[torch.Tensor, torch.Tensor]:
image = Image.open(self.images[index]).convert('RGB')
target = Image.open(self.targets[index])
if self.transforms is not None:
image, target = self.transforms(image, target)
return image, target
def __len__(self) -> int:
return len(self.images)
def idd(root: str,
split: str,
transforms: List[Callable]):
return IDDDataset(root=root,
split=split,
transforms=transforms)
| 2,650
| 37.42029
| 129
|
py
|
self-adaptive
|
self-adaptive-master/datasets/self_adapt_augment.py
|
import torchvision.transforms.functional as F
import torchvision.transforms as tf
from PIL import Image, ImageFilter
import torch
from typing import List, Any
import os
import datasets
from utils import transforms
class TrainTestAugDataset:
def __init__(self,
device,
source,
crop_size: List[int],
transforms_list: transforms.Compose = transforms.Compose([]),
only_inf: bool = False,
combined_augmentation: bool = True,
ignore_index: int = 255,
threshold: float = 0.7,
getinfo: bool = False,
tta: bool = False,
flip_all_augs: bool = False,
flips: bool = True,
scales: list = [1.0],
grayscale: bool = False,
colorjitter: bool = False,
gaussblur: bool = False,
rotation: bool = False,
rot_angle: int = 30,
jitter_factor: float = 0.4,
gauss_radius: float = 1.0,
*args: Any,
**kwargs: Any):
self.root = kwargs['root']
self.device = device
self.source = source
self.target = os.path.basename(self.root)
self.dataset = datasets.__dict__[self.target](root=self.root,
split=kwargs['split'],
transforms=transforms_list)
self.combined_augmentation=combined_augmentation
self.dataset.transforms = transforms_list
self.ignore_index = ignore_index
self.threshold = threshold
self.getinfo = getinfo
self.tta = tta
self.scales = scales
self.flip_all_augs = flip_all_augs
self.flips = flips
self.grayscale = grayscale
self.colorjitter = colorjitter
self.gaussblur = gaussblur
self.rotation = rotation
self.rot_angle = int(rot_angle)
self.jitter_factor = jitter_factor
self.gauss_radius = gauss_radius
self.augs = [None]
if self.flips: self.augs.append("flip")
if self.grayscale: self.augs.append("gray")
if self.colorjitter: self.augs.append("jitter")
if self.gaussblur: self.augs.append("gauss")
if self.rotation: self.augs.append("rot")
self.resize_image_pre = transforms.ImgResizePIL(crop_size)
self.only_inf = only_inf
def __getitem__(self, idx: int):
image, target = self.dataset.__getitem__(idx)
# Resize image
image = self.resize_image_pre(image)
crop_imgs = []
transforms_list = []
trans = transforms.Compose([transforms.ToTensor(),
transforms.IdsToTrainIds(source=self.source, target=self.target),
transforms.Normalize()])
if self.only_inf:
image, target = trans(image, target)
return image, target, [], []
if self.combined_augmentation: self.augs = [None, None]
for scale in self.scales:
for idx, aug in enumerate(self.augs):
# Apply scaling
i, j = 0, 0
w, h = [int(i*scale) for i in image.size]
crop_img = image.resize((w, h), Image.BILINEAR)
# Additional augmentations on every duplicate of the scale
flip_action, rot_action, gray_action, jitter_action, gauss_action = False, False, False, False, False
if self.flip_all_augs and idx != 0:
flip_action = True
crop_img = F.hflip(crop_img)
if self.combined_augmentation and idx == 1:
if self.flips:
flip_action = True
crop_img = F.hflip(crop_img)
if self.rotation:
rot_action = True
crop_img = F.rotate(crop_img, angle=self.rot_angle, expand=True, fill=0)
if self.colorjitter:
jitter_action = True
crop_img = tf.ColorJitter(brightness=self.jitter_factor,
contrast=self.jitter_factor,
saturation=self.jitter_factor,
hue=min(0.1, self.jitter_factor))(crop_img)
if self.gaussblur:
gauss_action = True
crop_img = crop_img.filter(ImageFilter.GaussianBlur(self.gauss_radius))
if self.grayscale:
gray_action = True
crop_img = F.to_grayscale(crop_img, num_output_channels=3)
if not self.combined_augmentation:
if aug == "flip":
flip_action = True
crop_img = F.hflip(crop_img)
if aug == "rot":
rot_action = True
crop_img = F.rotate(crop_img, angle=self.rot_angle, expand=True, fill=0)
if aug == "jitter":
jitter_action = True
crop_img = tf.ColorJitter(brightness=self.jitter_factor,
contrast=self.jitter_factor,
saturation=self.jitter_factor,
hue=min(0.1, self.jitter_factor))(crop_img)
if aug == "gauss":
gauss_action = True
crop_img = crop_img.filter(ImageFilter.GaussianBlur(self.gauss_radius))
if aug == "gray":
gray_action = True
crop_img = F.to_grayscale(crop_img, num_output_channels=3)
crop_img, _ = trans(crop_img, crop_img)
transforms_list.append((i, j, w, h, flip_action, rot_action,
self.rot_angle, gray_action, jitter_action, gauss_action))
crop_imgs.append(crop_img)
image, target = trans(image, target)
return image, target, crop_imgs, transforms_list
def __len__(self) -> int:
return len(self.dataset.images)
def create_pseudo_gt(self,
crops_soft: torch.Tensor,
crop_transforms: List[List[torch.Tensor]],
out_shape: torch.Tensor) -> torch.Tensor:
"""
Args:
crops_soft: Tensor with model outputs of crops (N, C, H, W)
crop_transforms: List with transformations (e.g. random crop and hflip parameters)
out_shape: Tensor with output shape
Returns:
pseudo_gt: Pseudo ground truth based on softmax probabilities
"""
num_classes = crops_soft[0].shape[1]
crops_soft_all = torch.ones(len(crops_soft), num_classes, *out_shape[-2:]) * self.ignore_index
for crop_idx, (crop_soft, crop_transform) in enumerate(zip(crops_soft, crop_transforms)):
i, j, h, w, flip_action, rot_action, rot_angle, gray_action, jitter_action, gauss_action = crop_transform
# Reaugment Images
if rot_action:
# Rotate back
crop_soft = F.rotate(crop_soft, angle=-int(rot_angle))
crop_soft = tf.CenterCrop(size=(h, w))(crop_soft)
if flip_action:
crop_soft = F.hflip(crop_soft)
# Scale to original scale
crop_soft = torch.nn.functional.interpolate(
crop_soft, size=[*out_shape[-2:]], mode='bilinear', align_corners=True
)
h, w = out_shape[-2:]
crops_soft_all[crop_idx, :, i:i+h, j:j+w] = crop_soft.squeeze(0)
pseudo_gt = torch.mean(crops_soft_all, dim=0)
if self.tta:
pseudo_gt = pseudo_gt.unsqueeze(0)
else:
# Create mask to compare only max predictions
compare_mask = torch.amax(pseudo_gt, dim=0, keepdim=True) == pseudo_gt
class_threshold = self.threshold * torch.max(torch.max(pseudo_gt, dim=1)[0], dim=1)[0]
if self.getinfo: print(f"Class thresholds: {class_threshold}")
class_threshold = class_threshold.unsqueeze(1).unsqueeze(1).repeat(1, pseudo_gt.shape[1],
pseudo_gt.shape[2])
# Set ignore indices for pixels having not enough pixels or ignore indices
threshold_mask = class_threshold < pseudo_gt
threshold_mask = torch.amax(torch.mul(threshold_mask, compare_mask), dim=0)
final_mask = threshold_mask.unsqueeze(0).unsqueeze(0)
pseudo_gt = torch.argmax(pseudo_gt, dim=0, keepdim=True).unsqueeze(0)
pseudo_gt[~final_mask] = self.ignore_index
return pseudo_gt
| 9,098
| 43.385366
| 117
|
py
|
self-adaptive
|
self-adaptive-master/datasets/gta.py
|
import os
import glob
import argparse
import pathlib
import PIL.Image
import torch
from typing import List, Callable, Optional, Tuple
from tqdm import tqdm
import urllib.request
import shutil
import scipy.io
class GTADataset(object):
"""
Download, unzip, and split data: python datasets/gta.py --dataset-root /path/to/gta --download-data --split-data
This also removes samples with size mismatches between image and annotation
The GTA dataset is required to have following folder structure:
gta/
images/
train/*.png
test/*.png
val/*.png
labels/
train/*.png
test/*.png
val/*.png
"""
def __init__(self,
root,
split,
transforms: Optional[Callable] = None):
super(GTADataset, self).__init__()
self.images_dir = os.path.join(root, "images", split)
self.targets_dir = os.path.join(root, "labels", split)
self.split = split
self.images = []
self.targets = []
self.transforms = transforms
for file_name in os.listdir(self.images_dir):
target_name = file_name
self.images.append(os.path.join(self.images_dir, file_name))
self.targets.append(os.path.join(self.targets_dir, target_name))
def __getitem__(self, index: int) -> Tuple[torch.Tensor, torch.Tensor]:
image = PIL.Image.open(self.images[index]).convert('RGB')
target = PIL.Image.open(self.targets[index])
if self.transforms is not None:
image, target = self.transforms(image, target)
return image, target
def __len__(self) -> int:
return len(self.images)
def gta(root: str,
split: str,
transforms: List[Callable]):
return GTADataset(root=root,
split=split,
transforms=transforms)
def preprocess(dataset_root: str):
"""
Function to remove data samples with size mismatches between image and annotation
"""
# Create catalog of every GTA image in dataset directory
dataset_split = ["train", "val", "test"]
# Count deleted files
count_del = 0
for split in dataset_split:
images = sorted(glob.glob(os.path.join(dataset_root, "images", split, "*.png")))
labels = sorted(glob.glob(os.path.join(dataset_root, "labels", split, "*.png")))
assert len(images) == len(labels), "Length of catalogs does not match!"
print("Preprocessing images and labels")
for image, label in tqdm(zip(images, labels)):
# Assert that label corresponds to current image
image_name = image.split("/")[-1]
label_name = label.split("/")[-1]
assert image_name == label_name
# Load image and label
img = PIL.Image.open(image)
gt = PIL.Image.open(label)
if img.size != gt.size:
print(f"Found data sample pair with unmatching size. Deleting file with name: {image_name} and {label_name}.")
# Delete mismatching data samples
os.remove(path=image)
os.remove(path=label)
count_del += 1
print(f"{count_del} images have been removed from the dataset")
def download_dataset(dataset_root: str, download_path_main: str ="https://download.visinf.tu-darmstadt.de/data/from_games"):
download_path = os.path.join(download_path_main, "data")
pathlib.Path(dataset_root).mkdir(exist_ok=True, parents=True)
for i in tqdm(range(1, 11)):
index = f"{i:02}"
for file_name in ["images", "labels"]:
file_name_zip = f"{index}_{file_name}.zip"
file_path = os.path.join(download_path, file_name_zip)
out_path = os.path.join(dataset_root, file_name_zip)
urllib.request.urlretrieve(file_path, filename=out_path)
shutil.unpack_archive(out_path, dataset_root)
os.remove(out_path)
mapping_name = "read_mapping.zip"
download_path_map = os.path.join(download_path_main, "code", mapping_name)
out_path = os.path.join(dataset_root, mapping_name)
urllib.request.urlretrieve(download_path_map, filename=out_path)
shutil.unpack_archive(out_path, os.path.join(dataset_root, "read_mapping"))
os.remove(out_path)
def load_split(path: str):
mat = scipy.io.loadmat(path)
trainIds = mat['trainIds']
valIds = mat['valIds']
testIds = mat['testIds']
return trainIds, valIds, testIds
def load_mapping(path: str):
mat = scipy.io.loadmat(path)
classes = mat['classes']
cityscapesMap = mat['cityscapesMap']
camvidMap = mat['camvidMap']
return classes, cityscapesMap, camvidMap
def split_dataset(dataset_root):
# Get trainIds, valIds, testIds
path_to_map = os.path.join(dataset_root, "read_mapping")
path_to_mat = os.path.join(path_to_map, "split.mat")
trainIds, valIds, testIds = load_split(path=path_to_mat)
split_ids = [trainIds.squeeze(), valIds.squeeze(), testIds.squeeze()]
split_paths = ['train', 'val', 'test']
img_dir = os.path.join(dataset_root, "images")
label_dir = os.path.join(dataset_root, "labels")
img_out_dir = os.path.join(dataset_root, "images")
label_out_dir = os.path.join(dataset_root, "labels")
for split_id, split_path in zip(split_ids, split_paths):
path_split_image = os.path.join(img_out_dir, split_path)
path_split_label = os.path.join(label_out_dir, split_path)
pathlib.Path(path_split_label).mkdir(parents=True, exist_ok=True)
pathlib.Path(path_split_image).mkdir(parents=True, exist_ok=True)
for img_id in tqdm(split_id):
img_name = str(img_id).zfill(5) + '.png'
shutil.move(os.path.join(img_dir, img_name), os.path.join(path_split_image, img_name))
shutil.move(os.path.join(label_dir, img_name), os.path.join(path_split_label, img_name))
shutil.rmtree(path_to_map)
if img_dir != img_out_dir:
shutil.rmtree(img_dir)
if label_dir != label_out_dir:
shutil.rmtree(label_dir)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--dataset-root", type=str, default=os.path.join(os.getcwd(), "datasets", "gta"))
parser.add_argument("--download-data", action="store_true")
parser.add_argument("--split-data", action="store_true")
args = parser.parse_args()
if args.download_data:
download_dataset(args.dataset_root)
if args.split_data:
split_dataset(args.dataset_root)
preprocess(args.dataset_root)
| 6,655
| 37.473988
| 126
|
py
|
self-adaptive
|
self-adaptive-master/datasets/bdd.py
|
import torch
import os
from PIL import Image
from typing import Callable, Optional, Tuple, List
class BerkeleyDataset(object):
"""
First unzip the images: unzip bdd100k_images_10k.zip -d /path/to/bdd100k
Second unzip the labels in the same directory: unzip bdd100k_sem_seg_labels_trainval.zip -d /path/to/bdd100k
Third rename the directory from bdd100k to bdd: mv /path/to/bdd100k /path/to/bdd
The BDD dataset is required to have following folder structure:
bdd/
images/
10k/
train/*.jpg
test/*.jpg
val/*.jpg
labels/
sem_seg/
masks/
train/*.png
val/*.png
"""
def __init__(self,
root,
split,
transforms: Optional[Callable] = None):
super(BerkeleyDataset, self).__init__()
self.split = split
self.transforms = transforms
images_root = os.path.join(root, "images", "10k", split)
self.images = []
targets_root = os.path.join(root, "labels", "sem_seg", "masks", split)
self.targets = []
for img_name in os.listdir(images_root):
target_name = img_name.replace(".jpg", ".png")
self.images.append(os.path.join(images_root, img_name))
self.targets.append(os.path.join(targets_root, target_name))
def __getitem__(self, index: int) -> Tuple[torch.Tensor, torch.Tensor]:
image = Image.open(self.images[index]).convert('RGB')
target = Image.open(self.targets[index])
if self.transforms is not None:
image, target = self.transforms(image, target)
return image, target
def __len__(self) -> int:
return len(self.images)
def bdd(root: str,
split: str,
transforms: List[Callable]):
return BerkeleyDataset(root=root,
split=split,
transforms=transforms)
| 2,054
| 31.619048
| 112
|
py
|
self-adaptive
|
self-adaptive-master/datasets/__init__.py
|
from datasets.bdd import *
from datasets.cityscapes import *
from datasets.synthia import *
from datasets.gta import *
from datasets.mapillary import *
from datasets.wilddash import *
from datasets.idd import *
| 210
| 29.142857
| 33
|
py
|
self-adaptive
|
self-adaptive-master/datasets/synthia.py
|
from PIL import Image
from typing import Optional, Callable, Tuple, List
import os
import torch
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
class SynthiaDataset(object):
"""
The Synthia dataset is required to have following folder structure:
synthia/
leftImg8bit/
train/seq_id/*.png
val/seq_id/*.png
gtFine/
train/seq_id/*.png
val/seq_id/*.png
"""
def __init__(self,
root,
split,
transforms: Optional[Callable] = None):
super(SynthiaDataset, self).__init__()
self.mode = 'gtFine'
self.images_dir = os.path.join(root, 'leftImg8bit', split)
self.targets_dir = os.path.join(root, self.mode, split)
self.split = split
self.images = []
self.targets = []
self.transforms = transforms
for city in os.listdir(self.images_dir):
img_dir = os.path.join(self.images_dir, city)
target_dir = os.path.join(self.targets_dir, city)
for file_name in os.listdir(img_dir):
target_id = '{}'.format(file_name.split('_leftImg8bit')[0])
target_suffix = "_gtFine_labelIds"
target_ext = ".png"
target_name = target_id + target_suffix + target_ext
self.images.append(os.path.join(img_dir, file_name))
self.targets.append(os.path.join(target_dir, target_name))
def __getitem__(self, index: int) -> Tuple[torch.Tensor, torch.Tensor]:
image = Image.open(self.images[index]).convert('RGB')
target = Image.open(self.targets[index])
if self.transforms is not None:
image, target = self.transforms(image, target)
return image, target
def __len__(self) -> int:
return len(self.images)
def synthia(root: str,
split: str,
transforms: List[Callable]):
return SynthiaDataset(root=root,
split=split,
transforms=transforms)
| 2,133
| 32.34375
| 75
|
py
|
self-adaptive
|
self-adaptive-master/datasets/mapillary.py
|
import os
from PIL import Image
from typing import Callable, Optional, Tuple, List
import torch
class MapillaryDataset(object):
"""
The Mapillary dataset is required to have following folder structure:
mapillary/
training/
v1.2/labels/*.png
images/*.jpg
"""
def __init__(self,
root,
split,
transforms: Optional[Callable] = None):
super(MapillaryDataset, self).__init__()
self.mode = 'gtFine'
# Use only subset of 2000 the training images for val, as inference otherwise takes too long
self.num_images = 2000
self.images = []
self.targets = []
self.transforms = transforms
val_root = os.path.join(root, "training")
labels_root = os.path.join(val_root, "v1.2", "labels")
imgs_root = os.path.join(val_root, "images")
img_names = os.listdir(imgs_root)
for i, img_name in enumerate(img_names):
label_name = img_name.replace(".jpg", ".png")
img_path = os.path.join(imgs_root, img_name)
label_path = os.path.join(labels_root, label_name)
if i < self.num_images:
self.images.append(img_path)
self.targets.append(label_path)
else:
break
def __getitem__(self, index: int) -> Tuple[torch.Tensor, torch.Tensor]:
image = Image.open(self.images[index]).convert('RGB')
target = Image.open(self.targets[index])
if self.transforms is not None:
image, target = self.transforms(image, target)
return image, target
def __len__(self) -> int:
return len(self.images)
def mapillary(root: str,
split: str,
transforms: List[Callable]):
return MapillaryDataset(root=root,
split=split,
transforms=transforms)
| 1,962
| 31.716667
| 100
|
py
|
self-adaptive
|
self-adaptive-master/loss/semantic_seg.py
|
import torch
from typing import Dict
class CrossEntropyLoss(torch.nn.Module):
def __init__(self,
ignore_index: int = 255):
super(CrossEntropyLoss, self).__init__()
self.criterion = torch.nn.CrossEntropyLoss(ignore_index=ignore_index, reduction="none")
self.ignore_index = ignore_index
def forward(self,
output: torch.Tensor,
gt: torch.Tensor):
"""
Args:
output: Probabilities for every pixel with stride of 16
gt: Labeled image at full resolution
Returns:
total_loss: Cross entropy loss
"""
# Compare output and groundtruth at downsampled resolution
gt = gt.long().squeeze(1)
loss = self.criterion(output, gt)
# Compute total loss
total_loss = (loss[gt != self.ignore_index]).mean()
return total_loss
class PSPNetLoss(torch.nn.Module):
def __init__(self,
ignore_index: int = 255,
alpha: float = 0.0):
super(PSPNetLoss, self).__init__()
self.seg_criterion = torch.nn.CrossEntropyLoss(ignore_index=ignore_index)
self.cls_criterion = torch.nn.CrossEntropyLoss(ignore_index=ignore_index)
self.ignore_index = ignore_index
self.alpha = alpha
def forward(self,
output_dict: Dict[str, torch.Tensor],
gt: torch.Tensor):
"""
Args:
output_dict: Probabilities for every pixel with stride of 16
gt: Labeled image at full resolution
Returns:
total_loss: Cross entropy loss
"""
# Compare output and groundtruth at downsampled resolution
gt = gt.long().squeeze(1)
seg_loss = self.seg_criterion(output_dict['final'], gt)
cls_loss = self.cls_criterion(output_dict['aux'], gt)
total_loss = seg_loss + self.alpha * cls_loss
return total_loss
| 1,961
| 30.645161
| 95
|
py
|
self-adaptive
|
self-adaptive-master/utils/parser.py
|
import argparse
import os
def base_parser():
parser = argparse.ArgumentParser()
parser.add_argument("--dataset-root", type=str, default=os.path.join(os.getcwd(), "datasets", "gta"))
parser.add_argument("--checkpoints-root", type=str, default=os.path.join(os.getcwd(), "checkpoints", "runs"))
parser.add_argument("--num-classes", type=int, default=19, choices=[19, 16], help="Set 19 for a GTA trained model and 16 for a SYNTHIA trained model")
parser.add_argument("--backbone-name", type=str, default="resnet50", choices=["resnet50", "resnet101"])
parser.add_argument("--arch-type", type=str, default="deeplab", choices=["deeplab", "deeplabv3plus", "hrnet18", "hrnet48"])
parser.add_argument("--batch-size", type=int, default=1)
parser.add_argument("--num-workers", type=int, default=8)
parser.add_argument("--num-epochs", type=int, default=50)
parser.add_argument("--dropout", action="store_true", help="Enable dropout during training/Use pre-trained Dropout model")
parser.add_argument("--alpha", type=float, default=None, help="Between 0.0 and 1.0 for val; For inference: Only set this alpha to [0.0, 1.0] if you want to change the alpha from the checkpoint to a custom alpha")
parser.add_argument("--base-lr", type=float, default=5e-3)
parser.add_argument("--weight-decay", type=float, default=1e-4)
parser.add_argument("--momentum", type=float, default=0.9)
return parser
def train_parser():
parser = base_parser()
parser.add_argument("--val-dataset-root", type=str, default=os.path.join(os.getcwd(), "datasets", "wilddash"))
parser.add_argument("--validation-start", type=int, default=0)
parser.add_argument("--validation-step", type=int, default=1)
parser.add_argument("--distributed", action="store_true")
parser.add_argument("--gpus", type=int, default=1)
parser.add_argument("--lr-scheduler", type=str, choices=["constant", "poly"], default="poly")
parser.add_argument("--crop-size", nargs='+', type=int, default=[512, 512])
parser.add_argument("--num-alphas", type=int, default=1, help="1: --alpha is chosen for val, >1: creates alpha linspace vector with [0:num-alphas:1] for val")
return parser.parse_args()
def val_parser():
parser = base_parser()
parser.add_argument("--dataset-split", type=str, default="val")
parser.add_argument("--source", type=str, default="gta", choices=["gta", "synthia"])
parser.add_argument("--checkpoint", type=str, default=None, help="Name of checkpoint file")
parser.add_argument("--threshold", type=float, default=0.7)
parser.add_argument("--tta", action="store_true")
parser.add_argument("--only-inf", action="store_true")
parser.add_argument("--scales", nargs="+", type=float, default=[0.25, 0.5, 0.75])
parser.add_argument("--flips", action="store_true", help="Apply augmentation flip to all images")
parser.add_argument("--grayscale",action="store_true", help="Apply grayscaling for Self-adaptation")
parser.add_argument("--calibration", action="store_true", help="Compute calibration during inference")
parser.add_argument("--resnet-layers", nargs="+", type=int, default=[1, 2], help="1, 2, 3 and/or 4 which will be frozen for Self-adaptation")
parser.add_argument("--hrnet-layers", nargs="+", type=int, default=[1, 2], help="1, 2 and/or 3 which will be frozen for Self-adaptation")
parser.add_argument("--mixed-precision", action="store_true", help="Use mixed precision")
return parser.parse_args()
| 3,512
| 72.1875
| 218
|
py
|
self-adaptive
|
self-adaptive-master/utils/montecarlo.py
|
import torch
import numpy as np
from typing import Union, List
class MonteCarloDropout(object):
def __init__(self,
size: Union[List, int],
passes: int = 10,
classes: int = 19):
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
self.vanilla_prediction = torch.zeros(size=(1, size[0], size[1]), device=self.device)
self.vanilla_confidence = torch.zeros(size=(1, size[0], size[1]), device=self.device)
self.mcd_predictions = torch.zeros(size=(passes, size[0], size[1]), device=self.device)
self.mcd_confidences = torch.zeros(size=(passes, size[0], size[1]), device=self.device)
self.softmax = torch.zeros(size=(passes, classes, size[0], size[1]), device=self.device)
self.mean_softmax = None
self.var_softmax = None
self.passes = passes
# Save Dropout layers for checking
self.dropout_layers = []
def enable_dropout(self,
model: torch.nn.Module):
"""
Args:
model: Pytorch model
"""
for m in model.modules():
if m.__class__.__name__.startswith("Dropout"):
m.train()
self.dropout_layers.append(m)
def save_predictions(self,
pass_idx: int,
current_prediction: torch.Tensor,
current_confidence: torch.Tensor):
if type(current_prediction) == torch.Tensor:
# Send tensors to CPU and convert to numpy
current_prediction = current_prediction.squeeze(0).cpu().numpy()
current_confidence = current_confidence.squeeze(0).cpu().numpy()
self.mcd_predictions[pass_idx] = current_prediction
self.mcd_confidences[pass_idx] = current_confidence
def save_softmax(self,
pass_idx: int,
softmax: torch.Tensor):
self.softmax[pass_idx] = softmax
def avg_softmax(self):
# Average softmax over all forward passes
self.mean_softmax = torch.mean(self.softmax, dim=0, keepdim=True)
self.var_softmax = torch.var(self.softmax, dim=0, keepdim=True)
# Get mean confidence and prediction
confidence, prediction = self.mean_softmax.max(dim=1)
return confidence, prediction, self.mean_softmax
def avg_predictions(self):
# Calculate mean and var over multiple MCD predictions
mean_pred = np.mean(self.mcd_predictions, axis=0)
var_pred = np.var(self.mcd_predictions, axis=0)
# Calculate mean and var over multiple MCD confidences
mean_conf = np.mean(self.mcd_confidences, axis=0)
var_conf = np.var(self.mcd_confidences, axis=0)
return {"Mean prediction": mean_pred,
"Variance prediction": var_pred,
"Mean confidence": mean_conf,
"Variance confidence": var_conf}
| 2,978
| 36.2375
| 96
|
py
|
self-adaptive
|
self-adaptive-master/utils/modeling.py
|
import functools
import torch
def rsetattr(obj, attr, val):
pre, _, post = attr.rpartition('.')
return setattr(rgetattr(obj, pre) if pre else obj, post, val)
def rgetattr(obj, attr, *args):
def _getattr(obj, attr):
return getattr(obj, attr, *args)
return functools.reduce(_getattr, [obj] + attr.split('.'))
def freeze_layers(opts, model: torch.nn.Module):
if len(opts.resnet_layers) != 0 and "resnet" in opts.backbone_name and "deeplab" in opts.arch_type:
if opts.arch_type == "deeplab":
model_name = model.module
elif opts.arch_type == "deeplabv3plus":
model_name = model.module.model
else:
raise ValueError(f"{opts.arch_type} not compatible with resnet layer freezing")
for idx, p in enumerate(model_name.named_parameters()):
if idx <= 3:
p[1].requires_grad = False
else:
break
for layer in opts.resnet_layers:
layer = "layer" + str(layer)
for para in getattr(model_name.backbone, layer).named_parameters():
para[1].requires_grad = False
if len(opts.hrnet_layers) != 0 and "hrnet" in opts.arch_type:
for idx, p in enumerate(model.module.model.named_parameters()):
if idx <= 3:
p[1].requires_grad = False
else:
break
for layer_idx in opts.hrnet_layers:
layer = "transition" + str(layer_idx)
for para in getattr(model.module.model, layer).named_parameters():
para[1].requires_grad = False
layer = "stage" + str(layer_idx + 1)
for para in getattr(model.module.model, layer).named_parameters():
para[1].requires_grad = False
| 1,782
| 40.465116
| 103
|
py
|
self-adaptive
|
self-adaptive-master/utils/calibration.py
|
"""
Guo et al.: O Calibration of Modern Neural Networks, 2017, ICML
https://arxiv.org/abs/1706.04599
Code based on implementation of G. Pleiss: https://gist.github.com/gpleiss/0b17bc4bd118b49050056cfcd5446c71
"""
import torch
import numpy as np
import matplotlib.pyplot as plt
import pickle
import os
import pathlib
class CalibrationMeter(object):
def __init__(self,
device,
n_bins: int = 10,
num_images: int = 500,
num_classes: int = 19):
# Initiate bins
self.device = device
self.num_classes = num_classes
self.num_images = num_images
self.num_bins = n_bins
self.width = 1.0 / n_bins
self.bins = torch.linspace(0, 1, n_bins + 1, device=self.device)
self.bin_centers = np.linspace(0, 1.0 - self.width, n_bins) + self.width / 2
self.bin_uppers = self.bins[1:]
self.bin_lowers = self.bins[:-1]
# Save bins per class
self.scores_per_class = torch.zeros(size=(self.num_classes, self.num_bins), device=self.device)
self.corrects_per_class = torch.zeros_like(self.scores_per_class, device=self.device)
self.ece_per_class = torch.zeros(size=(self.num_classes, 1), device=self.device)
self.class_pixels_total = torch.zeros(size=(self.num_classes, 1), device=self.device)
# Save accuracy and confidence values per class per batch
self.class_acc_per_batch = [torch.zeros(0, device=self.device) for _ in range(self.num_classes)]
self.class_conf_per_batch = [torch.zeros(0, device=self.device) for _ in range(self.num_classes)]
# For whole dataset
self.overall_corrects = torch.from_numpy(np.zeros_like(self.bin_centers)).to(device)
self.overall_scores = torch.from_numpy(np.zeros_like(self.bin_centers)).to(device)
self.overall_ece = 0
def calculate_bins(self,
output: torch.Tensor,
label: torch.Tensor,
mcd: bool = False):
"""
Calculate accuracy and confidence values per class and per image. Then, partition confidences into bins.
This results into accuracy/confidence bins for each class per image.
"""
# Get rid of batch dimension
label = label.squeeze(0)
if mcd:
softmaxes = output
else:
# Logits to predictions
softmaxes = torch.nn.functional.softmax(output, dim=1)
for cls in range(self.num_classes):
# Filter predictions
confidences, predictions = softmaxes.max(dim=1)
predictions[predictions != cls] = 255
# Compute accuracies
class_accuracy = torch.eq(predictions[label == cls], label[label == cls])
class_confidence = confidences[label == cls]
class_pixels = predictions[label == cls].size()[0]
# Partition bins
bin_indices = [class_confidence.ge(bin_lower) * class_confidence.lt(bin_upper) for bin_lower, bin_upper in
zip(self.bins[:-1], self.bins[1:])]
bin_corrects = class_pixels * torch.tensor([torch.mean(class_accuracy[bin_index].float())
for bin_index in bin_indices], device=self.device)
bin_scores = class_pixels * torch.tensor([torch.mean(class_confidence[bin_index].float())
for bin_index in bin_indices], device=self.device)
# Calculate ECE
ece = class_pixels * self._calc_ece(class_accuracy, class_confidence,
bin_lowers=self.bin_lowers, bin_uppers=self.bin_uppers)
# Check nan
bin_corrects[torch.isnan(bin_corrects) == True] = 0
bin_scores[torch.isnan(bin_scores) == True] = 0
self.corrects_per_class[cls] += bin_corrects
self.scores_per_class[cls] += bin_scores
self.ece_per_class[cls] += ece
self.class_pixels_total[cls] += class_pixels
def calculate_mean_over_dataset(self):
for cls in range(self.num_classes):
self.overall_corrects += \
(self.corrects_per_class[cls] / (self.class_pixels_total[cls].item() + 1e-9)) / self.num_classes
self.overall_scores += \
(self.scores_per_class[cls] / (self.class_pixels_total[cls].item() + 1e-9)) / self.num_classes
self.overall_ece += \
(self.ece_per_class[cls].item() / (self.class_pixels_total[cls].item() + 1e-9)) / self.num_classes
def save_data(self,
where: str,
what: str):
"""
Save entire calibration meter object instance for later use.
"""
# Create directory for storing results
pathlib.Path(where).mkdir(parents=True, exist_ok=True)
# Save results
with open(os.path.join(where, what), "wb") as output:
pickle.dump(self, output, pickle.HIGHEST_PROTOCOL)
@staticmethod
def _calc_ece(accuracies, confidence, bin_lowers, bin_uppers):
# Calculate ECE
ece = 0
for bin_lower, bin_upper in zip(bin_lowers, bin_uppers):
# Calculated |confidence - accuracy| in each bin
in_bin = confidence.gt(bin_lower.item()) * confidence.le(bin_upper.item())
prop_in_bin = in_bin.float().mean()
if prop_in_bin.item() > 0:
accuracy_in_bin = accuracies[in_bin].float().mean()
avg_confidence_in_bin = confidence[in_bin].mean()
ece += torch.abs(avg_confidence_in_bin - accuracy_in_bin) * prop_in_bin
return ece
def plot_mean(self):
"""
Plots reliability diagram meant over all classes.
Returns:
Figure
"""
# Calculate gaps
gap = self.overall_scores - self.overall_corrects
# Create figure
fig, ax = plt.subplots(figsize=(9, 9))
plt.grid()
fontsize = 25
# Create bars
confs = plt.bar(self.bin_centers, self.overall_corrects, width=self.width, ec='black')
gaps = plt.bar(self.bin_centers, gap, bottom=self.overall_corrects, color=[1, 0.7, 0.7],
alpha=0.5, width=self.width, hatch='//', edgecolor='r')
plt.plot([0, 1], [0, 1], '--', color='gray')
plt.legend([confs, gaps], ['Outputs', 'Gap'], loc='best', fontsize='xx-large')
# Clean up
bbox_props = dict(boxstyle="round", fc="lightgrey", ec="brown", lw=2)
plt.text(0.2, 0.75, f"ECE: {np.round_(self.overall_ece, decimals=3)}", ha="center",
va="center", size=fontsize-2, weight='bold', bbox=bbox_props)
plt.title("Reliability Diagram", size=fontsize + 2)
plt.ylabel("Accuracy", size=fontsize)
plt.xlabel("Confidence", size=fontsize)
plt.xlim(0, 1)
plt.ylim(0, 1)
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(18)
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(18)
return fig
def plot_cls_diagrams(self):
"""
Plots for each class a reliability diagram.
Returns:
List of Figures
"""
list_figures = []
for cls in range(self.num_classes):
bin_corrects = self.corrects_per_class[cls].cpu().numpy() / (self.class_pixels_total[cls].cpu().item() + 1e-9)
bin_scores = self.scores_per_class[cls].cpu().numpy() / (self.class_pixels_total[cls].cpu().item() +1e-9)
ece = self.ece_per_class[cls].cpu().item() / (self.class_pixels_total[cls].cpu().item() + 1e-9)
# Calculate gaps
gap = bin_scores - bin_corrects
# Create figure
figure = plt.figure(0, figsize=(8, 8))
plt.grid()
# Create bars
confs = plt.bar(self.bin_centers, bin_corrects, width=self.width, ec='black')
gaps = plt.bar(self.bin_centers, gap, bottom=bin_corrects, color=[1, 0.7, 0.7], alpha=0.5,
width=self.width, hatch='//', edgecolor='r')
plt.plot([0, 1], [0, 1], '--', color='gray')
plt.legend([confs, gaps], ['Outputs', 'Gap'], loc='best', fontsize='small')
# Clean up
bbox_props = dict(boxstyle="round", fc="lightgrey", ec="brown", lw=2)
plt.text(0.2, 0.85, f"ECE: {np.round_(ece, decimals=3)}", ha="center", va="center", size=20,
weight='bold',
bbox=bbox_props)
plt.title("Reliability Diagram", size=20)
plt.ylabel("Accuracy", size=18)
plt.xlabel("Confidence", size=18)
plt.xlim(0, 1)
plt.ylim(0, 1)
list_figures.append(figure)
# Clear current figure
plt.close(figure)
return list_figures
| 9,011
| 41.309859
| 122
|
py
|
self-adaptive
|
self-adaptive-master/utils/dropout.py
|
from utils.modeling import rsetattr
import torch, math
def add_dropout(model: torch.nn.Module,
dropout_start_perc: float = 0.0,
dropout_stop_perc: float = 1.0,
dropout_prob: float = 0.1):
# Add dropout layers after relu
dropout_cls = torch.nn.Dropout
dropout_prev_modules = (torch.nn.ReLU6, torch.nn.ReLU)
max_pos = len([m for m in model.modules() if isinstance(m, dropout_prev_modules)])
start_pos = math.floor(dropout_start_perc * max_pos)
stop_pos = math.floor(dropout_stop_perc * max_pos)
pos_ind = 0
for m_name, m in model.named_modules():
if isinstance(m, dropout_prev_modules):
pos_ind += 1
if pos_ind >= start_pos and pos_ind <= stop_pos:
rsetattr(model, m_name, torch.nn.Sequential(m, dropout_cls(p=dropout_prob)))
| 854
| 39.714286
| 92
|
py
|
self-adaptive
|
self-adaptive-master/utils/distributed.py
|
import os
import torch
import torch.distributed
def init_process(opts,
gpu: int) -> int:
# Define world size
opts.world_size = opts.gpus
os.environ['MASTER_ADDR'] = '127.0.0.1'
os.environ['MASTER_PORT'] = '8888'
# Calculate rank
rank = gpu
# Initiate process group
torch.distributed.init_process_group(backend='nccl',
init_method='env://',
world_size=opts.world_size,
rank=rank)
print(f"{rank + 1}/{opts.world_size} process initialized.\n")
return rank
def clean_up():
torch.distributed.destroy_process_group()
| 702
| 24.107143
| 68
|
py
|
self-adaptive
|
self-adaptive-master/utils/metrics.py
|
# Adapted from score written by wkentaro
# https://github.com/wkentaro/pytorch-fcn/blob/master/torchfcn/utils.py
import numpy as np
class runningScore():
def __init__(self,
n_classes: int):
self.n_classes = n_classes
self.confusion_matrix = np.zeros((n_classes, n_classes))
def _fast_hist(self,
label_true: np.ndarray,
label_pred: np.ndarray,
n_class: int):
mask = (label_true >= 0) & (label_true < n_class)
hist = np.bincount(
n_class * label_true[mask].astype(int) + label_pred[mask], minlength=n_class ** 2
).reshape(n_class, n_class)
return hist
def update(self,
label_trues: np.ndarray,
label_preds: np.ndarray):
for lt, lp in zip(label_trues, label_preds):
self.confusion_matrix += self._fast_hist(lt.flatten(), lp.flatten(), self.n_classes)
def get_scores(self):
"""
Returns accuracy score evaluation result.
- overall accuracy
- mean accuracy
- mean IU
- fwavacc
"""
hist = self.confusion_matrix
acc = np.diag(hist).sum() / hist.sum()
acc_cls = np.diag(hist) / hist.sum(axis=1)
acc_cls = np.nanmean(acc_cls)
iu = np.diag(hist) / (hist.sum(axis=1) + hist.sum(axis=0) - np.diag(hist))
mean_iu = np.nanmean(iu)
freq = hist.sum(axis=1) / hist.sum()
fwavacc = (freq[freq > 0] * iu[freq > 0]).sum()
cls_iu = dict(zip(range(self.n_classes), iu))
return (
{
"Overall Acc:": acc,
"Mean Acc :": acc_cls,
"FreqW Acc :t": fwavacc,
"Mean IoU :": mean_iu,
},
cls_iu,
hist,
iu,
)
def reset(self):
self.confusion_matrix = np.zeros((self.n_classes, self.n_classes))
| 1,963
| 30.174603
| 96
|
py
|
self-adaptive
|
self-adaptive-master/utils/self_adapt_norm.py
|
import torch.nn as nn
from copy import deepcopy
from utils.modeling import *
class SelfAdaptiveNormalization(nn.Module):
def __init__(self,
num_features: int,
unweighted_stats: bool = False,
eps: float = 1e-5,
momentum: float = 0.1,
alpha: float = 0.5,
alpha_train: bool = False,
affine: bool = True,
track_running_stats: bool = True,
training: bool = False,
update_source: bool = True):
super(SelfAdaptiveNormalization, self).__init__()
self.alpha = nn.Parameter(torch.tensor(alpha), requires_grad=alpha_train)
self.alpha_train = alpha_train
self.training = training
self.unweighted_stats = unweighted_stats
self.eps = eps
self.update_source = update_source
self.batch_norm = nn.BatchNorm2d(
num_features,
eps,
momentum,
affine,
track_running_stats
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
if (not self.training and not self.unweighted_stats) or (self.training and self.alpha_train):
if self.alpha_train:
self.alpha.requires_grad_()
# Compute statistics from batch
x_mean = torch.mean(x, dim=(0, 2, 3))
x_var = torch.var(x, dim=(0, 2, 3), unbiased=False)
# Weigh batch statistics with running statistics
alpha = torch.clamp(self.alpha, 0, 1)
weighted_mean = (1 - alpha) * self.batch_norm.running_mean.detach() + alpha * x_mean
weighted_var = (1 - alpha) * self.batch_norm.running_var.detach() + alpha * x_var
# Update running statistics based on momentum
if self.update_source and self.training:
self.batch_norm.running_mean = (1 - self.batch_norm.momentum) * self.batch_norm.running_mean\
+ self.batch_norm.momentum * x_mean
self.batch_norm.running_var = (1 - self.batch_norm.momentum) * self.batch_norm.running_var\
+ self.batch_norm.momentum * x_var
return compute_bn(
x, weighted_mean, weighted_var,
self.batch_norm.weight, self.batch_norm.bias, self.eps
)
return self.batch_norm(x)
def compute_bn(input: torch.Tensor, weighted_mean: torch.Tensor, weighted_var: torch.Tensor,
weight: torch.Tensor, bias: torch.Tensor, eps: float) -> torch.Tensor:
input = (input - weighted_mean[None, :, None, None]) / (torch.sqrt(weighted_var[None, :, None, None] + eps))
input = input * weight[None, :, None, None] + bias[None, :, None, None]
return input
def replace_batchnorm(m: torch.nn.Module,
alpha: float,
update_source_bn: bool = True):
if alpha is None:
alpha = 0.0
for name, child in m.named_children():
if isinstance(child, torch.nn.BatchNorm2d):
wbn = SelfAdaptiveNormalization(num_features=child.num_features,
alpha=alpha, update_source=update_source_bn)
setattr(wbn.batch_norm, "running_mean", deepcopy(child.running_mean))
setattr(wbn.batch_norm, "running_var", deepcopy(child.running_var))
setattr(wbn.batch_norm, "weight", deepcopy(child.weight))
setattr(wbn.batch_norm, "bias", deepcopy(child.bias))
wbn.to(next(m.parameters()).device.type)
setattr(m, name, wbn)
else:
replace_batchnorm(child, alpha=alpha, update_source_bn=update_source_bn)
def reinit_alpha(m: torch.nn.Module,
alpha: float,
device: torch.device,
alpha_train: bool = False):
layers = [module for module in m.modules() if isinstance(module, SelfAdaptiveNormalization)]
for i, layer in enumerate(layers):
layer.alpha = nn.Parameter(torch.tensor(alpha).to(device), requires_grad=alpha_train)
| 4,140
| 43.053191
| 112
|
py
|
self-adaptive
|
self-adaptive-master/utils/transforms.py
|
import torch, random
import torchvision.transforms.functional as F
import torchvision.transforms as tf
import numpy as np
from PIL import Image, ImageFilter
from typing import Tuple, List, Callable
from datasets.labels import convert_ids_to_trainids, convert_trainids_to_ids
class Compose:
def __init__(self,
transforms: List[Callable]):
self.transforms = transforms
def __call__(self,
img: Image.Image,
gt: Image.Image) -> Tuple[torch.Tensor, torch.Tensor]:
for transform in self.transforms:
img, gt = transform(img, gt)
return img, gt
class ToTensor:
def __call__(self,
img: Image.Image,
gt: Image.Image) -> Tuple[torch.Tensor, torch.Tensor]:
img = F.to_tensor(np.array(img))
gt = torch.from_numpy(np.array(gt)).unsqueeze(0)
return img, gt
class Resize:
def __init__(self,
resize: Tuple[int]):
self.img_resize = tf.Resize(size=resize,
interpolation=Image.BILINEAR)
self.gt_resize = tf.Resize(size=resize,
interpolation=Image.NEAREST)
def __call__(self,
img: Image.Image,
gt: Image.Image) -> Tuple[Image.Image, Image.Image]:
img = self.img_resize(img)
gt = self.gt_resize(gt)
return img, gt
class ImgResize:
def __init__(self,
resize: Tuple[int, int]):
self.resize = resize
self.num_pixels = self.resize[0]*self.resize[1]
def __call__(self,
img: torch.Tensor,
gt: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
if torch.prod(torch.tensor(img.shape[-2:])) > self.num_pixels:
img = torch.nn.functional.interpolate(img.unsqueeze(0), size=self.resize, mode='bilinear').squeeze(0)
return img, gt
class ImgResizePIL:
def __init__(self,
resize: Tuple[int]):
self.resize = resize
self.num_pixels = self.resize[0]*self.resize[1]
def __call__(self,
img: Image) -> Image:
if img.height*img.width > self.num_pixels:
img = img.resize((self.resize[1], self.resize[0]), Image.BILINEAR)
return img
class Normalize:
def __init__(self,
mean: List[float] = [0.485, 0.456, 0.406],
std: List[float] = [0.229, 0.224, 0.225]):
self.norm = tf.Normalize(mean=mean,
std=std)
def __call__(self,
img: torch.Tensor,
gt: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
img = self.norm(img)
return img, gt
class RandomHFlip:
def __init__(self,
percentage: float = 0.5):
self.percentage = percentage
def __call__(self,
img: Image.Image,
gt: Image.Image) -> Tuple[Image.Image, Image.Image]:
if random.random() < self.percentage:
img = F.hflip(img)
gt = F.hflip(gt)
return img, gt
class RandomResizedCrop:
def __init__(self,
crop_size: List[int]):
self.crop = tf.RandomResizedCrop(size=tuple(crop_size))
def __call__(self,
img: Image.Image,
gt: Image.Image) -> Tuple[Image.Image, Image.Image]:
i, j, h, w = self.crop.get_params(img=img,
scale=self.crop.scale,
ratio=self.crop.ratio)
img = F.resized_crop(img, i, j, h, w, self.crop.size, Image.BILINEAR)
gt = F.resized_crop(gt, i, j, h, w, self.crop.size, Image.NEAREST)
return img, gt
class CenterCrop:
def __init__(self,
crop_size: int):
self.crop = tf.CenterCrop(size=crop_size)
def __call__(self,
img: Image.Image,
gt: Image.Image) -> Tuple[Image.Image, Image.Image]:
img = self.crop(img)
gt = self.crop(gt)
return img, gt
class IdsToTrainIds:
def __init__(self,
source: str,
target: str):
self.source = source
self.target = target
self.ids_to_trainids = convert_ids_to_trainids
def __call__(self,
img: torch.Tensor,
gt: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
gt = self.ids_to_trainids(gt, source=self.source, target=self.target)
return img, gt
class TrainIdsToIds:
def __init__(self,
source: str,
target: str):
self.source = source
self.target = target
self.trainids_to_ids = convert_trainids_to_ids
def __call__(self,
img: torch.Tensor,
gt: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
gt = self.trainids_to_ids(gt, source=self.source, target=self.target)
return img, gt
class ColorJitter:
def __init__(self, percentage: float = 0.5, brightness: float = 0.3,
contrast: float = 0.3, saturation: float = 0.3, hue: float = 0.1):
self.percentage = percentage
self.jitter = tf.ColorJitter(brightness=brightness,
contrast=contrast,
saturation=saturation,
hue=hue)
def __call__(self,
img: Image.Image,
gt: Image.Image) -> Tuple[Image.Image, Image.Image]:
if random.random() < self.percentage:
img = self.jitter(img)
return img, gt
class MaskGrayscale:
def __init__(self, percentage: float = 0.1):
self.percentage = percentage
def __call__(self,
img: Image.Image,
gt: Image.Image) -> Tuple[Image.Image, Image.Image]:
if self.percentage > random.random():
img = F.to_grayscale(img, num_output_channels=3)
return img, gt
class RandGaussianBlur:
def __init__(self, radius: List[float] = [.1, 2.]):
self.radius = radius
def __call__(self,
img: Image.Image,
gt: Image.Image) -> Tuple[Image.Image, Image.Image]:
radius = random.uniform(self.radius[0], self.radius[1])
img = img.filter(ImageFilter.GaussianBlur(radius))
return img, gt
| 6,474
| 26.553191
| 113
|
py
|
self-adaptive
|
self-adaptive-master/optimizer/schedulers.py
|
'''
Source: https://github.com/meetshah1995/pytorch-semseg
'''
from torch.optim.lr_scheduler import _LRScheduler
import torch
from typing import List
def get_scheduler(scheduler_type: str,
optimizer: torch.optim.Optimizer,
max_iter: int) -> _LRScheduler:
if scheduler_type == "constant":
return ConstantLR(optimizer=optimizer)
elif scheduler_type == "poly":
return PolyLR(optimizer=optimizer,
max_iter=max_iter)
else:
raise ValueError(f"Scheduler {scheduler_type} unknown")
class ConstantLR(_LRScheduler):
def __init__(self,
optimizer: torch.optim.Optimizer,
last_epoch: int = -1):
super(ConstantLR, self).__init__(optimizer, last_epoch)
def get_lr(self) -> List[float]:
"""
Returns:
lr: Current learning rate based on iteration
"""
return self.base_lrs
class PolyLR(_LRScheduler):
def __init__(self,
optimizer: torch.optim.Optimizer,
max_iter: int,
decay_iter: int = 1,
gamma: float = 0.9,
last_epoch: int = -1):
self.max_iter = max_iter
self.decay_iter = decay_iter
self.gamma = gamma
self.factor: float
super(PolyLR, self).__init__(optimizer, last_epoch)
def get_lr(self) -> List[float]:
"""
Returns:
lr: Current learning rate based on iteration
"""
assert self.last_epoch < self.max_iter\
, f"Last epoch is {self.last_epoch} but needs to be smaller than max iter {self.max_iter}"
self.factor = (1 - self.last_epoch / float(self.max_iter)) ** self.gamma
return [base_lr * self.factor for base_lr in self.base_lrs]
| 1,823
| 30.448276
| 102
|
py
|
drlviz
|
drlviz-master/distributions.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 14 11:35:22 2018
@author: edward
"""
import torch.nn as nn
import torch.nn.functional as F
class Categorical(nn.Module):
def __init__(self, num_inputs, num_outputs):
super(Categorical, self).__init__()
self.linear = nn.Linear(num_inputs, num_outputs)
def forward(self, x):
x = self.linear(x)
return x
def sample(self, x, deterministic):
x = self(x)
probs = F.softmax(x, dim=1)
if deterministic is False:
action = probs.multinomial()
else:
action = probs.max(1, keepdim=True)[1]
return action
def logprobs_and_entropy(self, x, actions):
x = self(x)
log_probs = F.log_softmax(x, dim=1)
probs = F.softmax(x, dim=1)
action_log_probs = log_probs.gather(1, actions)
dist_entropy = -(log_probs * probs).sum(-1).mean()
return action_log_probs, dist_entropy
| 991
| 22.069767
| 58
|
py
|
drlviz
|
drlviz-master/multi_env.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 14 09:54:26 2018
@author: edward
A class that can be used to implement many parallel environments
"""
import multiprocessing as mp
import numpy as np
try:
from gym.spaces.box import Box
from baselines.common.atari_wrappers import make_atari, wrap_deepmind
except ImportError:
print('Unable to import gym / OpenAI baselines, I assume you are running the doom env')
from arguments import parse_game_args
from environments import DoomEnvironment
def worker(in_queue, out_queue, params):
env = DoomEnvironment(params)
while True:
action = in_queue.get()
if action is None:
break
elif action == 'reset':
out_queue.put(env.reset())
elif action == 'depth_trim':
out_queue.put(env.get_depth()[2:-2,2:-2])
elif action == 'depth':
out_queue.put(env.get_depth())
else:
obs, reward, done, info = env.step(action)
out_queue.put((obs, reward, done, info))
class MultiEnvsMP(object):
def __init__(self, env_id, num_envs, num_processes, params):
self.in_queues = [mp.Queue() for _ in range(num_envs)]
self.out_queues = [mp.Queue() for _ in range(num_envs)]
self.workers = []
for in_queue, out_queue in zip(self.in_queues, self.out_queues):
print('Creating environment')
process = mp.Process(target=worker, args=(in_queue, out_queue, params))
self.workers.append(process)
process.start()
#print('There are {} workers'.format(len(self.workers)))
assert env_id == 'doom', 'Multiprocessing only implemented for doom envirnment'
tmp_env = DoomEnvironment(params)
self.num_actions = tmp_env.num_actions
self.obs_shape = (3, params.screen_height, params.screen_width)
self.prep = False # Observations already in CxHxW order
def reset(self):
new_obs = []
for queue in self.in_queues:
queue.put('reset')
for queue in self.out_queues:
obs = queue.get()
new_obs.append(self.prep_obs(obs))
return np.stack(new_obs)
def get_depths(self, trim=True):
depths = []
command = 'depth'
if trim: command = 'depth_trim'
for queue in self.in_queues:
queue.put(command)
for queue in self.out_queues:
depths.append(queue.get())
return np.stack(depths)
def prep_obs(self, obs):
if self.prep:
return obs.transpose(2,0,1)
else:
return obs
def step(self, actions):
new_obs = []
rewards = []
dones = []
infos = []
for action, queue in zip(actions, self.in_queues):
queue.put(action)
for queue in self.out_queues:
obs, reward, done, info = queue.get()
new_obs.append(self.prep_obs(obs))
rewards.append(reward)
dones.append(done)
infos.append(infos)
return np.stack(new_obs), rewards, dones, infos
class MultiEnvs(object):
def __init__(self, env_id, num_envs, num_processes, params):
if env_id == 'doom':
# for the doom scenarios
self.envs = [DoomEnvironment(params) for i in range(num_envs)]
self.num_actions = self.envs[0].num_actions
self.obs_shape = (3, params.screen_height, params.screen_width)
self.prep = False # Observations already in CxHxW order
elif env_id == 'home':
assert 0, 'HoME has not been implemented yet'
else:
# if testing on Atari games such as Pong etc
self.envs = [wrap_deepmind(make_atari(env_id)) for i in range(num_envs)]
observation_space = self.envs[0].observation_space
obs_shape = observation_space.shape
observation_space = Box(
observation_space.low[0,0,0],
observation_space.high[0,0,0],
[obs_shape[2], obs_shape[1], obs_shape[0]]
)
action_space = self.envs[0].action_space
self.num_actions = action_space.n
self.obs_shape = observation_space.shape
self.prep = True
def reset(self):
return np.stack([self.prep_obs(env.reset()) for env in self.envs])
def get_depths(self, trim=True):
if trim:
return np.stack([env.get_depth()[2:-2,2:-2] for env in self.envs])
else:
return np.stack([env.get_depth() for env in self.envs])
def prep_obs(self, obs):
if self.prep:
return obs.transpose(2,0,1)
else:
return obs
def step(self, actions):
new_obs = []
rewards = []
dones = []
infos = []
for env, action in zip(self.envs, actions):
obs, reward, done, info = env.step(action)
# if done:
# obs = env.reset()
new_obs.append(self.prep_obs(obs))
rewards.append(reward)
dones.append(done)
infos.append(infos)
return np.stack(new_obs), rewards, dones, infos
if __name__ == '__main__':
params = parse_game_args()
params.scenario_dir = '../resources/scenarios/'
mp_test_envs = MultiEnvsMP(params.simulator, params.num_environments, 1, params)
mp_test_envs.reset()
actions = [2]*16
for i in range(10):
new_obs, rewards, dones, infos = mp_test_envs.step(actions)
print(mp_test_envs.get_depths().shape)
print(rewards, np.stack(rewards))
envs = MultiEnvs(params.simulator, params.num_environments, 1, params)
envs.reset()
for i in range(10):
new_obs, rewards, dones, infos = envs.step(actions)
print(envs.get_depths().shape)
print(rewards, np.stack(rewards))
def test_mp_reset():
mp_test_envs.reset()
def test_mp_get_obs():
actions = [2]*16
new_obs, rewards, dones, infos = mp_test_envs.step(actions)
def test_sp_reset():
envs.reset()
def test_sp_get_obs():
actions = [2]*16
new_obs, rewards, dones, infos = envs.step(actions)
print('#'*80)
print('#'*80)
print('--- Running timing tests ---')
print('#'*80)
print('Multiprocessing')
print('MP Reset test 1000 trials', timeit.timeit("test_mp_reset()", number=10))
print('#'*80)
print('Multiprocessing')
| 6,831
| 28.704348
| 98
|
py
|
drlviz
|
drlviz-master/arguments.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 14 10:37:33 2018
@author: edward
PongNoFrameskip-v4
"""
import argparse
def parse_game_args():
""" Defines the arguments used for both training and testing the network"""
parser = argparse.ArgumentParser(description='Parameters')
# =========================================================================
# Environment Parameters
# =========================================================================
parser.add_argument('--simulator', type=str, default="doom", help='The environment')
parser.add_argument('--scenario', type=str, default='health_gathering.cfg', help='The scenario')
parser.add_argument('--screen_size', type=str, default='320X180', help='Size of Screen, width x height')
parser.add_argument('--screen_height', type=int, default=64, help='Height of the screen')
parser.add_argument('--screen_width', type=int, default=112, help='Width of the screen')
parser.add_argument('--num_environments', type=int, default=16, help='the number of parallel enviroments')
parser.add_argument('--limit_actions', default=False, action='store_true', help='limited the size of the action space to F, L, R, F+L, F+R')
parser.add_argument('--use_depth', type=bool, default=False, help='Use the Depth Buffer')
parser.add_argument('--scenario_dir', type=str, default='scenarios/', help='location of game scenarios')
parser.add_argument('--show_window', type=bool, default=False, help='Show the game window')
#parser.add_argument('--decimate', type=bool, default=True, help='Subsample the observations')
parser.add_argument('--resize', type=bool, default=True, help='Use resize for decimation rather ran downsample')
parser.add_argument('--norm_obs', dest='norm_obs',default=False, action='store_false', help='Divide the obs by 255.0')
# =========================================================================
# Model Parameters
# =========================================================================
parser.add_argument('--hidden_size', type=int, default=512, help='LSTM hidden size')
parser.add_argument('--conv_filters', type=int, default=32, help='Number of convolutional filters' )
parser.add_argument('--predict_depth', default=False, action='store_true', help='make depth predictions')
parser.add_argument('--reload_model', type=str, default='', help='directory and iter of model to load dir,iter')
parser.add_argument('--model_checkpoint', type=str, default='', help='the name of a specific model to evaluate, used when making videos')
# =========================================================================
# Training Parameters
# =========================================================================
parser.add_argument('--learning_rate', type=float, default=7e-4, help='training learning rate')
parser.add_argument('--gamma', type=float, default=0.99, help='reward discount factor')
parser.add_argument('--frame_skip', type=int, default=4, help='number of frames to repeat last action')
parser.add_argument('--train_freq', type=int, default=4, help='how often the model is updated')
parser.add_argument('--train_report_freq', type=int, default=100, help='how often to report the train loss')
parser.add_argument('--max_iters', type=int, default=5000000, help='maximum number of traning iterations')
parser.add_argument('--eval_freq', type=int, default=5000, help='how often the model is evaluated, in games')
parser.add_argument('--eval_games', type=int, default=10, help='how often the model is evaluated, in games')
parser.add_argument('--cuda', type=bool, default=False, help='Use the GPU?')
parser.add_argument('--model_save_rate', type=int, default=10000, help='How often to save the model in iters')
parser.add_argument('--pretrained_head',type=str, default='', help='Name of pretrained convolutional head')
parser.add_argument('--freeze_pretrained', type=bool, default=True, help='Whether to freeze the weights in pretrained head')
parser.add_argument('--eps', type=float, default=1e-5, help='RMSprop optimizer epsilon (default: 1e-5)')
parser.add_argument('--alpha', type=float, default=0.99, help='RMSprop optimizer alpha (default: 0.99)')
parser.add_argument('--use-gae', action='store_true', default=False, help='use generalized advantage estimation')
parser.add_argument('--tau', type=float, default=0.95, help='gae parameter (default: 0.95)')
parser.add_argument('--entropy_coef', type=float, default=0.01, help='entropy term coefficient (default: 0.01)')
parser.add_argument('--value_loss_coef', type=float, default=0.5, help='value loss coefficient (default: 0.5)')
parser.add_argument('--max_grad_norm', type=float, default=0.5, help='max norm of gradients (default: 0.5)')
parser.add_argument('--num_steps', type=int, default=5, help='number of forward steps in A2C (default: 5)')
parser.add_argument('--num_stack', type=int, default=1,help='number of frames to stack (default: 4)')
parser.add_argument('--recurrent_policy', action='store_true', default=True, help='use a recurrent policy')
parser.add_argument('--num_frames', type=int, default=10000000, help='total number of frames')
parser.add_argument('--depth_coef', type=float, default=0.01, help='weighting for depth loss')
parser.add_argument('--no_reward_average', default=False, action='store_true', help='switch of reward averaging during frame skip')
parser.add_argument('--use_em_loss', default=False, action='store_true', help='Use the discrete EM loss, optimal transport for depth preds')
# =========================================================================
# Logging Parameters
# =========================================================================
parser.add_argument('--user_dir', type=str, default='theo', help='Users home dir name')
parser.add_argument('--log_interval', type=int, default=100, help='How often to log')
return parser.parse_args()
if __name__ == '__main__':
params = parse_game_args()
print(params)
print(params.action_size)
import os
print(os.listdir(params.scenario_dir))
print(params.scenario)
| 6,390
| 70.808989
| 144
|
py
|
drlviz
|
drlviz-master/reduce.py
|
import ujson
from random import randint
import numpy as np
import torch
from torch.autograd import Variable
from arguments import parse_game_args
from doom_evaluation import BaseAgent
from environments import DoomEnvironment
from models import CNNPolicy
import base64
import io
from PIL import Image
def gen_classic(selh, file):
params = parse_game_args()
params.scenario = "health_gathering_supreme.cfg"
env = DoomEnvironment(params)
device = torch.device("cuda" if False else "cpu")
num_actions = env.num_actions
network = CNNPolicy(3, num_actions, True, (3, 64, 112)).to(device)
checkpoint = torch.load('models/' + "health_gathering_supreme" + '.pth.tar', map_location=lambda storage, loc: storage)
network.load_state_dict(checkpoint['model'])
agent = BaseAgent(network, params)
ERU = {'env': env, 'agent': agent}
selh = torch.from_numpy(selh).type(torch.FloatTensor)
selh = Variable(selh, volatile=True)
ERU['env'].set_seed(randint(0, 999999999))
ERU['env'].reset()
scores = []
hiddens = []
inputs = []
saliencies = []
actions = []
probabilities = []
health = []
positions = []
orientations = []
velocities = []
items = []
fov = []
w = 0
while not ERU['env'].is_episode_finished():
obsvervation = io.BytesIO()
obs = ERU['env'].get_observation()
temp = ERU['env'].state.screen_buffer
Image.fromarray(temp.transpose(1, 2, 0)).save(obsvervation, format="JPEG")
action, value, action_probs, grads = ERU['agent'].get_action_value_and_probs_zeroes(obs, selh, epsilon=0.0)
hidden = ERU['agent'].model.get_gru_h()
h = ''
for elem in hidden[0][0]:
h += str(elem) + ","
h = h[:-1]
h = h.split(',')
probs = ""
for elem in action_probs[0]:
probs += str(elem) + ","
probs = probs[:-1]
probs = probs.split(',')
sa = io.BytesIO()
t = Image.fromarray(grads, 'L')
t.save(sa, format="JPEG")
scores.append(str(round(ERU['env'].game.get_total_reward(), 2)))
hiddens.append(h)
inputs.append(base64.b64encode(obsvervation.getvalue()))
saliencies.append(base64.b64encode(sa.getvalue()))
actions.append(str(action))
probabilities.append(probs)
health.append(ERU['env'].get_health())
positions.append(ERU['env'].get_pos())
orientations.append(ERU['env'].get_ori())
velocities.append(ERU['env'].get_velo())
items.append(ERU['env'].get_item())
fov.append(ERU['env'].get_fov())
ERU['env'].make_action(int(action))
print('Iteration', w, '/525')
w += 1
result = {'episode0': {
'inputs': inputs,
'actions': actions,
'probabilities': probabilities,
'saliencies': saliencies,
'scores': scores,
'positions': positions,
'health': health,
'hiddens': hiddens,
'orientations': orientations,
'velocities': velocities,
'items': items,
'fov': fov
}
}
with open(file, 'w') as f:
ujson.dump(result, f, indent=4, sort_keys=True)
return result
def remove_all():
return np.full(
shape=512,
fill_value=0.02,
dtype=np.float)
def top(n):
top = [2, 13, 375, 105, 141, 203, 12, 381, 500, 496, 485, 455, 74, 315, 308, 75, 93, 223, 302, 207, 2, 108, 384, 177, 266, 129, 158, 182, 211, 85, 323, 205, 115, 421, 332, 400, 72, 21, 139, 220, 402, 499, 343, 215, 280, 194, 66, 65, 56, 284, 106, 86, 376, 161, 471, 262, 483, 312, 237, 195, 197, 335, 488, 260, 290, 146, 116, 11, 30, 477, 425, 458, 417, 379, 87, 448, 298, 79, 474, 208, 265, 213, 31, 169, 149, 219, 413, 270, 240, 256,
468, 288, 152, 18, 100, 15, 502, 258, 176, 187, 23, 244, 359, 168, 101, 17, 247, 493, 238, 320, 268, 319, 282, 487, 325, 420, 179, 392, 511, 482, 350, 239, 142, 200, 251, 148, 170, 112, 50, 344, 173, 193, 422, 189, 291, 371, 313, 113, 463, 339, 131, 469, 120, 362, 62, 435, 224, 406, 172, 78, 484, 295, 416, 346, 49, 164, 34, 150, 70, 160, 389, 236, 409, 67, 180, 159, 441, 69, 162, 190, 361, 145, 127, 370, 155, 281, 94, 329,
10,
137, 272, 27, 366, 16, 309, 460, 464, 333, 204, 229, 348, 278, 226, 466, 436, 7, 503, 428, 232, 257, 32, 221, 181, 218, 283, 405, 104, 60, 230, 241, 25, 19, 84, 191, 318, 286, 431, 461, 111, 263, 310, 399, 8, 107, 299, 233, 39, 356, 143, 430, 209, 360, 307, 28, 147, 134, 217, 125, 199, 490, 340, 188, 167, 401, 119, 98, 364, 103, 377, 216, 52, 453, 296, 0, 235, 114, 253, 274, 122, 465, 462, 358, 457, 89, 198, 373, 276, 443,
367, 354, 254, 285, 450, 345, 68, 398, 369, 41, 228, 243, 271, 365, 439, 480, 437, 479, 90, 294, 394, 6, 330, 418, 390, 37, 311, 432, 363, 178, 222, 368, 48, 407, 506, 433, 135, 20, 40, 374, 128, 51, 225, 404, 99, 410, 165, 138, 357, 470, 252, 349, 196, 509, 341, 35, 175, 46, 73, 97, 492, 316, 102, 423, 459, 227, 166, 117, 478, 391, 387, 412, 396, 395, 140, 475, 24, 314, 383, 264, 214, 382, 55, 242, 352, 334, 393, 76, 5,
328,
38, 255, 279, 124, 80, 126, 297, 451, 53, 110, 202, 45, 331, 505, 63, 275, 445, 419, 388, 163, 372, 206, 249, 261, 61, 118, 481, 301, 442, 136, 3, 43, 397, 324, 342, 183, 353, 336, 82, 44, 454, 501, 77, 347, 157, 305, 287, 59, 497, 438, 248, 486, 504, 472, 185, 91, 452, 22, 322, 408, 355, 133, 201, 429, 508, 132, 440, 317, 447, 449, 151, 427, 88, 415, 121, 234, 144, 351, 456, 269, 245, 434, 380, 473, 109, 337, 47, 385, 510,
58, 491, 489, 250, 14, 498, 386, 424, 231, 476, 156, 378, 192, 171, 277, 4, 300, 54, 411, 292, 36, 306, 210, 130, 83, 338, 186, 414, 123, 321, 293, 303, 184, 495, 9, 494, 246, 153, 446, 426, 174, 95, 96, 507, 81, 327, 64, 33, 1, 29, 42, 304, 403, 154, 467, 273, 57, 326, 289, 212, 26, 71, 444, 267, 259]
apply_oder(n, top)
def change(n):
ch = [215, 86, 290, 266, 108, 262, 106, 483, 448, 471, 417, 421, 265, 194, 502, 187, 320, 244, 176, 323, 413, 72, 169, 359, 17, 177, 100, 379, 268, 511, 500, 335, 463, 75, 30, 406, 308, 238, 161, 205, 312, 258, 219, 193, 474, 200, 240, 173, 62, 288, 208, 282, 344, 339, 31, 170, 485, 120, 224, 10, 332, 164, 291, 148, 67, 236, 409, 27, 50, 94, 101, 150, 87, 416, 487, 34, 23, 420, 56, 484, 428, 158, 260, 78, 168, 466, 272, 107, 189,
381, 422, 455, 49, 211, 460, 493, 441, 230, 159, 172, 162, 70, 221, 425, 251, 477, 142, 366, 464, 209, 333, 84, 191, 217, 213, 348, 469, 319, 298, 129, 160, 179, 435, 195, 364, 149, 443, 296, 468, 285, 313, 283, 458, 399, 69, 377, 12, 74, 239, 28, 488, 114, 263, 39, 188, 310, 218, 52, 450, 119, 294, 369, 181, 278, 330, 190, 6, 97, 392, 346, 387, 318, 104, 457, 178, 311, 360, 233, 68, 131, 367, 90, 41, 492, 390, 46, 180, 20,
398, 98, 365, 60, 480, 295, 357, 232, 499, 175, 165, 407, 167, 345, 430, 137, 220, 151, 418, 475, 490, 478, 243, 2, 111, 397, 43, 140, 470, 264, 152, 21, 48, 196, 439, 66, 383, 254, 166, 40, 415, 38, 404, 229, 16, 145, 204, 354, 15, 125, 394, 454, 362, 206, 432, 437, 456, 128, 506, 503, 257, 305, 25, 462, 117, 11, 325, 301, 99, 334, 393, 0, 352, 235, 297, 401, 508, 316, 479, 102, 127, 321, 228, 368, 287, 449, 274, 55, 198,
207, 347, 18, 391, 317, 302, 144, 85, 396, 331, 138, 340, 271, 118, 5, 14, 112, 380, 459, 389, 408, 185, 234, 465, 51, 431, 261, 374, 495, 280, 434, 77, 436, 497, 139, 29, 37, 315, 385, 45, 155, 253, 395, 245, 370, 19, 225, 141, 201, 80, 210, 400, 35, 223, 73, 372, 461, 322, 275, 47, 476, 110, 355, 307, 231, 4, 373, 36, 115, 303, 197, 501, 429, 136, 24, 95, 255, 358, 237, 89, 154, 281, 338, 489, 163, 328, 226, 121, 93, 496,
442, 445, 324, 342, 113, 183, 269, 71, 44, 382, 494, 58, 329, 453, 481, 227, 452, 314, 386, 216, 447, 88, 246, 133, 507, 505, 350, 132, 337, 504, 388, 199, 438, 124, 22, 378, 130, 286, 276, 63, 143, 53, 491, 351, 64, 343, 353, 83, 414, 509, 336, 473, 427, 419, 472, 433, 446, 411, 467, 153, 241, 412, 510, 122, 256, 57, 123, 156, 250, 192, 277, 384, 252, 202, 486, 279, 212, 3, 327, 146, 214, 424, 59, 82, 293, 134, 361, 304, 259,
306, 109, 81, 65, 184, 440, 135, 222, 341, 247, 498, 13, 103, 363, 1, 186, 426, 289, 91, 54, 403, 157, 482, 444, 147, 410, 423, 76, 42, 267, 451, 92, 116, 61, 375, 79, 249, 284, 33, 174, 126, 273, 376, 292, 182, 105, 26, 32, 96, 349, 326, 248, 242, 356, 8, 7, 402, 405, 203, 299, 171, 371, 270, 309, 9, 300]
apply_oder(n, ch)
def tsne_1d_projection(n):
proj = [381, 500, 203, 92, 141, 12, 485, 105, 375, 13, 308, 75, 455, 496, 74, 315, 93, 223, 302, 207, 2, 384, 158, 129, 211, 266, 108, 85, 182, 323, 205, 115, 400, 332, 139, 21, 220, 402, 177, 499, 343, 72, 280, 194, 215, 66, 65, 284, 56, 421, 197, 237, 195, 376, 11, 477, 30, 146, 290, 116, 312, 335, 79, 106, 260, 87, 213, 161, 458, 262, 488, 425, 86, 417, 471, 298, 31, 483, 474, 448, 265, 168, 208, 392, 288, 17, 379, 493, 18, 173,
256, 200, 100, 176, 344, 240, 502, 282, 291, 268, 189, 149, 320, 409, 187, 120, 23, 142, 148, 162, 295, 219, 67, 258, 27, 464, 359, 170, 484, 193, 377, 236, 468, 270, 181, 150, 247, 233, 413, 251, 244, 482, 319, 350, 172, 406, 101, 169, 160, 371, 272, 420, 416, 463, 164, 339, 50, 333, 62, 145, 428, 239, 511, 487, 441, 221, 466, 457, 179, 34, 238, 348, 224, 113, 329, 460, 422, 78, 362, 469, 309, 190, 313, 278, 10, 435, 281,
370, 131, 361, 299, 232, 241, 7, 127, 8, 399, 69, 119, 39, 436, 461, 49, 229, 159, 52, 307, 401, 318, 389, 104, 286, 230, 257, 94, 111, 112, 226, 465, 143, 134, 209, 431, 84, 366, 122, 354, 283, 254, 394, 137, 28, 46, 218, 325, 152, 15, 155, 405, 32, 16, 358, 503, 199, 346, 356, 263, 103, 147, 19, 216, 138, 98, 125, 274, 25, 490, 453, 204, 107, 135, 341, 180, 70, 242, 360, 128, 340, 367, 222, 225, 396, 369, 202, 509, 0, 432,
480, 478, 349, 363, 276, 364, 60, 310, 37, 437, 191, 433, 398, 334, 228, 214, 68, 506, 249, 390, 217, 185, 117, 252, 188, 316, 301, 41, 35, 279, 365, 423, 61, 439, 89, 430, 53, 44, 382, 479, 175, 20, 102, 178, 126, 504, 114, 294, 393, 82, 314, 388, 462, 271, 330, 77, 505, 124, 5, 336, 296, 196, 407, 374, 198, 51, 391, 412, 368, 450, 404, 55, 261, 165, 275, 206, 373, 80, 235, 324, 6, 167, 163, 443, 136, 383, 140, 264, 459,
40, 22, 442, 99, 372, 97, 73, 451, 447, 410, 438, 456, 91, 395, 497, 486, 380, 255, 473, 311, 76, 491, 253, 36, 342, 110, 351, 440, 508, 184, 90, 14, 243, 475, 418, 292, 38, 501, 183, 250, 59, 130, 328, 472, 434, 133, 397, 54, 285, 345, 386, 166, 492, 227, 88, 245, 331, 83, 449, 201, 297, 452, 498, 476, 454, 118, 427, 357, 355, 45, 429, 387, 510, 58, 470, 489, 121, 414, 156, 306, 385, 132, 186, 234, 305, 353, 347, 47, 300,
210, 144, 481, 494, 338, 337, 246, 446, 151, 411, 408, 9, 403, 445, 424, 293, 495, 415, 63, 273, 95, 33, 109, 212, 1, 507, 303, 153, 304, 71, 321, 57, 154, 259, 29, 317, 231, 287, 326, 43, 327, 64, 289, 322, 81, 267, 26, 42, 171, 277, 444, 174, 467, 378, 192, 426, 4, 123, 269, 352, 419, 96, 3, 48, 157, 248, 24]
return apply_oder(n, proj)
def apply_oder(n, order):
assert n < 512, "n must be < 512"
mask = remove_all()
for i in range(n):
mask[order[i]] = 1
return mask
if __name__ == '__main__':
# mask = top(20) # This line allows you to keep the top activated 20 elements
# mask = change(20) # This line allows you to keep the top changing 20 elements
mask = tsne_1d_projection(50) # This line allows you to keep the top tsne_1d_projection 50 elements
# mask = remove_all() #This removes all elements.
data = gen_classic(mask, "result.json")
| 11,617
| 64.638418
| 440
|
py
|
drlviz
|
drlviz-master/splitter.py
|
import ujson as ujson
def split_json(file):
fi = None
with open(file, "r") as f:
fi = ujson.load(f)
with open("data/"+file, "w") as ujson_file:
ujson.dump(fi["episode0"], ujson_file, indent=4)
if __name__ == '__main__':
split_json('health_gathering_supreme.json')
| 300
| 20.5
| 56
|
py
|
drlviz
|
drlviz-master/environments.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 8 11:03:06 2018
@author: edward
"""
from vizdoom import DoomGame, ScreenResolution, GameVariable, Button, AutomapMode
import numpy as np
from cv2 import resize
import cv2
class DoomEnvironment():
"""
A wrapper class for the Doom Maze Environment
"""
def __init__(self, params):
self.game = DoomGame()
VALID_SCENARIOS = ['my_way_home.cfg',
'health_gathering.cfg',
'health_gathering_supreme.cfg',
'health_gathering_supreme_no_death_penalty.cfg',
'custom_maze_001.cfg',
'custom_maze_002.cfg',
'take_cover.cfg']
if params.scenario in VALID_SCENARIOS:
self.game.load_config(params.scenario_dir + params.scenario)
else:
assert 0, 'Invalid environment {}'.format(params.scenario)
if params.screen_size == '320X180':
self.game.set_screen_resolution(ScreenResolution.RES_640X360)
else:
assert 0, 'Invalid screen_size {}'.format(params.screen_size)
if params.use_depth or params.predict_depth:
self.game.set_depth_buffer_enabled(True)
self.game.set_labels_buffer_enabled(True)
self.game.set_automap_buffer_enabled(False)
# self.game.set_automap_mode(AutomapMode.OBJECTS)
# self.game.set_automap_rotate(False)
# self.game.set_automap_render_textures(False)
self.predict_depth = params.predict_depth
self.screen_width = params.screen_width
self.screen_height = params.screen_height
self.no_reward_average = params.no_reward_average
self.game.set_window_visible(params.show_window)
self.game.set_render_hud(False)
self.game.init()
if GameVariable.HEALTH in self.game.get_available_game_variables():
self.previous_health = self.game.get_game_variable(GameVariable.HEALTH)
self.resize = params.resize
self.frame_skip = params.frame_skip
self.norm_obs = params.norm_obs
self.action_map = self._gen_actions(self.game, params.limit_actions, params.scenario)
params.num_actions = len(self.action_map)
self.num_actions = len(self.action_map)
# print('Environment initialized')
def _gen_actions(self, game, limit_action_space, sc):
buttons = game.get_available_buttons()
# if buttons == [Button.TURN_LEFT, Button.TURN_RIGHT, Button.MOVE_FORWARD, Button.MOVE_BACKWARD]:
if sc == 'take_cover.cfg':
feasible_actions = [[True, False], [False, True]]
else:
if limit_action_space:
feasible_actions = [[True, False, False, False], # Left
[False, True, False, False], # Right
[False, False, True, False], # Forward
[True, False, True, False], # Left + Forward
[False, True, True, False]] # Right + forward
else:
feasible_actions = [[True, False, False, False], # Left
[False, True, False, False], # Right
[False, False, True, False], # Forward
[False, False, False, True], # Backward
[True, False, True, False], # Left + Forward
[True, False, False, True], # Left + Backward
[False, True, True, False], # Right + forward
[False, True, False, True]] # Right + backward
action_map = {i: act for i, act in enumerate(feasible_actions)}
# print(action_map)
return action_map
def reset(self):
self.game.new_episode()
if GameVariable.HEALTH in self.game.get_available_game_variables():
self.previous_health = self.game.get_game_variable(GameVariable.HEALTH)
return self.get_observation()
def is_episode_finished(self):
return self.game.is_episode_finished()
def get_observation(self):
self.state = self.game.get_state()
observation = self.state.screen_buffer
if self.resize:
# cv2 resize is 10x faster than skimage 1.37 ms -> 126 us
observation = resize(
observation.transpose(1, 2, 0),
(self.screen_width, self.screen_height), cv2.INTER_AREA
).transpose(2, 0, 1)
return self._normalize_observation(observation[:])
def get_depth(self):
assert self.predict_depth, 'Trying to predict depth but this option was not enabled in arguments'
depth = self.state.depth_buffer
return self._prepare_depth(depth)
def _prepare_depth(self, depth_buffer):
"""
resize the depth buffer so it is the same size as the output of the models conv head
discretize the values in range 0-7 so we can predict the depth in as a classification
"""
resized_depth = resize(depth_buffer, (self.screen_width // 8, self.screen_height // 8), cv2.INTER_AREA).astype(
np.float32) * (1.0 / 255.0)
return np.clip(np.floor((10 ** resized_depth - 1.0) * 5.0), 0.0, 7.0).astype(np.uint8)
def _normalize_observation(self, observation):
"""
Normalize the observation by making it in the range 0.0-1.0
type conversion first is 2x faster
multiplication is 4x faster than division
"""
if self.norm_obs:
return observation.astype(np.float32) * (1.0 / 255.0)
else:
return observation.astype(np.float32)
def make_action(self, action):
"""
perform an action, includes an option to skip frames but repeat
the same action.
TODO: Is normalization of the reward by the count required here?
"""
reward = self.game.make_action(self.action_map[action])
reward += self._check_health()
count = 1.0
for skip in range(1, self.frame_skip):
if self.is_episode_finished():
break
reward += self.game.make_action(self.action_map[action])
reward += self._check_health()
count += 1.0
if self.no_reward_average:
count = 1.0
return reward / count
def step(self, action):
reward = self.make_action(action)
done = self.is_episode_finished()
if done:
obs = self.reset()
else:
obs = self.get_observation()
return obs, reward, done, None
def _check_health(self):
"""
Modification to reward function in order to reward the act of finding a health pack
"""
health_reward = 0.0
if GameVariable.HEALTH not in self.game.get_available_game_variables():
self.previous_health = self.game.get_game_variable(GameVariable.HEALTH)
return health_reward
if self.game.get_game_variable(GameVariable.HEALTH) > self.previous_health:
# print('found healthkit')
health_reward = 1.0
self.previous_health = self.game.get_game_variable(GameVariable.HEALTH)
return health_reward
def get_total_reward(self):
return self.game.get_total_reward()
def get_pos(self):
return [self.game.get_game_variable(GameVariable.POSITION_X), self.game.get_game_variable(GameVariable.POSITION_Y)]
def get_map(self):
if self.game.get_state().automap_buffer is not None:
return self.game.get_state().automap_buffer
else:
return 'nope'
def set_seed(self, seed):
self.game.set_seed(seed)
def get_seed(self):
return self.game.get_seed()
def get_health(self):
return self.game.get_game_variable(GameVariable.HEALTH)
def get_ori(self):
return self.game.get_game_variable(GameVariable.ANGLE)
def get_secret(self):
return self.game.get_game_variable(GameVariable.SECRETCOUNT)
def get_item(self):
return self.game.get_game_variable(GameVariable.ITEMCOUNT)
def get_velo(self):
return [self.game.get_game_variable(GameVariable.VELOCITY_X), self.game.get_game_variable(GameVariable.VELOCITY_Y)]
def get_fov(self):
res = []
if len(self.game.get_state().labels) > 0:
for i in range(len(self.game.get_state().labels)):
lab = self.game.get_state().labels[i]
res.append({"object_id": lab.object_id, "object_name": lab.object_name, "object_position_x": lab.object_position_x, "object_position_y": lab.object_position_y, "object_x": lab.x})
return res
def test():
def simulate_rollout(env):
from random import choice
buffer = []
env.reset()
k = 0
while not env.is_episode_finished():
k += 1
obs = env.get_observation()
buffer.append(obs)
# Makes a random action and save the reward.
reward = env.make_action(choice(list(range(env.num_actions))))
print('Game finished in {} steps'.format(k))
print('Total rewards = {}'.format(env.get_total_reward()))
return k, buffer
# =============================================================================
# Test the environment
# =============================================================================
from arguments import parse_game_args
params = parse_game_args()
env = DoomEnvironment(params)
print(env.num_actions)
print(env.game.get_available_buttons())
print(len(env.action_map))
print(env.game.get_screen_height(), env.game.get_screen_width())
print(env.get_observation().shape)
import matplotlib.pyplot as plt
plt.imshow(env.get_observation().transpose(1, 2, 0))
plt.figure()
plt.imshow(env.get_observation().transpose(1, 2, 0))
env.decimate = False
def resize_obs(observation):
observation = observation.transpose(1, 2, 0)
observation = resize(observation, (observation.shape[0] / 2, observation.shape[1] / 2))
observation = observation.transpose(2, 0, 1)
return observation
data = env.get_observation().transpose(1, 2, 0)
from skimage.transform import rescale, resize, downscale_local_mean
data_resized = resize(data, (data.shape[0] / 2, data.shape[1] / 2))
plt.figure()
plt.imshow(data_resized)
obs = env.get_observation()
obs_rs = resize_obs(obs)
assert 0
for action in env.action_map.keys():
reward = env.make_action(action)
print(reward, env.is_episode_finished())
for i in range(100):
k, b = simulate_rollout(env)
print(env.game.get_available_game_variables())
print(env.game.get_game_variable(GameVariable.HEALTH))
def test_label_buffer():
import matplotlib.pyplot as plt
import random
from doom_rdqn.arguments import parse_game_args
params = parse_game_args()
params.decimate = False
env = DoomEnvironment(params)
for i in range(10):
env.make_action(random.choice(list(range(8))))
state = env.game.get_state()
labels_buffer = state.labels_buffer
label = state.labels
plt.subplot(1, 2, 1)
plt.imshow(env.get_observation().transpose(1, 2, 0))
plt.subplot(1, 2, 2)
plt.imshow(labels_buffer)
plt.figure()
plt.imshow(resize(labels_buffer, (56, 32), cv2.INTER_AREA))
plt.figure()
plt.imshow(resize(env.get_observation().transpose(1, 2, 0), (112, 64), cv2.INTER_AREA))
data = env.get_observation()
def resize_test(image):
return resize(image.transpose(1, 2, 0), (112, 64)).transpose(2, 0, 1)
if __name__ == '__main__':
import matplotlib.pyplot as plt
import random
from doom_rdqn.arguments import parse_game_args
params = parse_game_args()
env = DoomEnvironment(params)
state = env.game.get_state()
| 12,227
| 34.239193
| 195
|
py
|
drlviz
|
drlviz-master/models.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 14 10:53:06 2018
@author: edward
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from distributions import Categorical
# A temporary solution from the master branch.
# https://github.com/pytorch/pytorch/blob/7752fe5d4e50052b3b0bbc9109e599f8157febc0/torch/nn/init.py#L312
# Remove after the next version of PyTorch gets release.
def orthogonal(tensor, gain=1):
if tensor.ndimension() < 2:
raise ValueError("Only tensors with 2 or more dimensions are supported")
rows = tensor.size(0)
cols = tensor[0].numel()
flattened = torch.Tensor(rows, cols).normal_(0, 1)
if rows < cols:
flattened.t_()
# Compute the qr factorization
q, r = torch.qr(flattened)
# Make Q uniform according to https://arxiv.org/pdf/math-ph/0609050.pdf
d = torch.diag(r, 0)
ph = d.sign()
q *= ph.expand_as(q)
if rows < cols:
q.t_()
tensor.view_as(q).copy_(q)
tensor.mul_(gain)
return tensor
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1 or classname.find('Linear') != -1:
orthogonal(m.weight.data)
if m.bias is not None:
m.bias.data.fill_(0)
class FFPolicy(nn.Module):
def __init__(self):
super(FFPolicy, self).__init__()
def forward(self, inputs, states, masks, masktry):
raise NotImplementedError
def act(self, inputs, states, masks, deterministic=False):
value, x, states = self(inputs, states, masks)
action = self.dist.sample(x, deterministic=deterministic)
action_log_probs, dist_entropy = self.dist.logprobs_and_entropy(x, action)
return value, action, action_log_probs, states
def evaluate_actions(self, inputs, states, masks, actions, pred_depths=False):
if pred_depths:
value, x, states, depths = self(inputs, states, masks, pred_depths)
action_log_probs, dist_entropy = self.dist.logprobs_and_entropy(x, actions)
return value, action_log_probs, dist_entropy, states, depths
else:
value, x, states = self(inputs, states, masks)
action_log_probs, dist_entropy = self.dist.logprobs_and_entropy(x, actions)
return value, action_log_probs, dist_entropy, states, None
def get_action_value_and_probs(self, inputs, states, masks, masktry, deterministic=False):
value, x, states = self(inputs, states, masks, masktry)
action = self.dist.sample(x, deterministic=deterministic)
action_log_probs, dist_entropy = self.dist.logprobs_and_entropy(x, action)
return value, action, F.softmax(self.dist(x), dim=1), states, x
class CNNPolicy(FFPolicy):
def __init__(self, num_inputs, num_actions, use_gru, input_shape):
super(CNNPolicy, self).__init__()
# self.conv1 = nn.Conv2d(num_inputs, 32, 8, stride=4)
# self.relu1 = nn.ReLU(True)
# self.conv2 = nn.Conv2d(32, 64, 4, stride=2)
# self.relu2 = nn.ReLU(True)
# self.conv3 = nn.Conv2d(64, 32, 3, stride=1)
# self.relu3 = nn.ReLU()
self.h = None
self.conv_head = nn.Sequential(nn.Conv2d(num_inputs, 32, 8, stride=4),
nn.ReLU(True),
nn.Conv2d(32, 64, 4, stride=2),
nn.ReLU(True),
nn.Conv2d(64, 32, 3, stride=1),
nn.ReLU())
conv_input = torch.autograd.Variable(torch.randn((1,) + input_shape))
self.conv_out_size = self.conv_head(conv_input).nelement()
self.hidden_size = 512
self.linear1 = nn.Linear(self.conv_out_size, self.hidden_size)
if use_gru:
self.gru = nn.GRUCell(512, 512)
self.critic_linear = nn.Linear(512, 1)
self.dist = Categorical(512, num_actions)
self.eval()
self.reset_parameters()
@property
def state_size(self):
if hasattr(self, 'gru'):
return 512
else:
return 1
def reset_parameters(self):
self.apply(weights_init)
relu_gain = nn.init.calculate_gain('relu')
for i in range(0, 6, 2):
self.conv_head[i].weight.data.mul_(relu_gain)
self.linear1.weight.data.mul_(relu_gain)
if hasattr(self, 'gru'):
orthogonal(self.gru.weight_ih.data)
orthogonal(self.gru.weight_hh.data)
self.gru.bias_ih.data.fill_(0)
self.gru.bias_hh.data.fill_(0)
if self.dist.__class__.__name__ == "DiagGaussian":
self.dist.fc_mean.weight.data.mul_(0.01)
def forward(self, inputs, states, masks, masktry, pred_depth=False):
x = self.conv_head(inputs * (1.0 / 255.0))
x = x.view(-1, self.conv_out_size)
x = self.linear1(x)
x = F.relu(x)
if hasattr(self, 'gru'):
if inputs.size(0) == states.size(0):
x = states = self.gru(x, states * masks)
if len(masktry) > 0:
x = states = states * masktry
self.h = x
else:
x = x.view(-1, states.size(0), x.size(1))
masks = masks.view(-1, states.size(0), 1)
outputs = []
for i in range(x.size(0)):
hx = states = self.gru(x[i], states * masks[i])
outputs.append(hx)
x = torch.cat(outputs, 0)
return self.critic_linear(x), x, states
#
# def get_cnn_w(self):
# a = self.conv1.cpu().weight.data
# b = self.conv2.cpu().weight.data
# c = self.conv3.cpu().weight.data
#
# self.conv1.cuda()
# self.conv2.cuda()
# self.conv3.cuda()
# return [a, b, c]
#
# def get_cnn_f(self):
# a = self.x1.cpu().data.numpy()
# b = self.x2.cpu().data.numpy()
# c = self.x3.cpu().data.numpy()
#
# return [a, b, c]
#
def get_gru_h(self):
return [self.h.cpu().data.numpy()]
class CNNDepthPolicy(FFPolicy):
def __init__(self, num_inputs, num_actions, use_gru, input_shape):
super(CNNDepthPolicy, self).__init__()
self.conv_head = nn.Sequential(nn.Conv2d(num_inputs, 32, 8, stride=4),
nn.ReLU(True),
nn.Conv2d(32, 64, 4, stride=2),
nn.ReLU(True),
nn.Conv2d(64, 32, 3, stride=1),
nn.ReLU())
self.depth_head = nn.Conv2d(32, 8, 1, 1)
conv_input = torch.autograd.Variable(torch.randn((1,) + input_shape))
print(conv_input.size(), self.conv_head(conv_input).size())
self.conv_out_size = self.conv_head(conv_input).nelement()
self.linear1 = nn.Linear(self.conv_out_size, 512)
if use_gru:
self.gru = nn.GRUCell(512, 512)
self.critic_linear = nn.Linear(512, 1)
self.dist = Categorical(512, num_actions)
self.train()
self.reset_parameters()
@property
def state_size(self):
if hasattr(self, 'gru'):
return 512
else:
return 1
def reset_parameters(self):
self.apply(weights_init)
relu_gain = nn.init.calculate_gain('relu')
for i in range(0, 6, 2):
self.conv_head[i].weight.data.mul_(relu_gain)
self.linear1.weight.data.mul_(relu_gain)
if hasattr(self, 'gru'):
orthogonal(self.gru.weight_ih.data)
orthogonal(self.gru.weight_hh.data)
self.gru.bias_ih.data.fill_(0)
self.gru.bias_hh.data.fill_(0)
if self.dist.__class__.__name__ == "DiagGaussian":
self.dist.fc_mean.weight.data.mul_(0.01)
def forward(self, inputs, states, masks, pred_depth=False):
x = self.conv_head(inputs * (1.0 / 255.0))
if pred_depth:
depth = self.depth_head(x)
x = x.view(-1, self.conv_out_size)
x = self.linear1(x)
x = F.relu(x)
if hasattr(self, 'gru'):
if inputs.size(0) == states.size(0):
x = states = self.gru(x, states * masks)
else:
x = x.view(-1, states.size(0), x.size(1))
masks = masks.view(-1, states.size(0), 1)
outputs = []
for i in range(x.size(0)):
hx = states = self.gru(x[i], states * masks[i])
outputs.append(hx)
x = torch.cat(outputs, 0)
if pred_depth:
return self.critic_linear(x), x, states, depth
else:
return self.critic_linear(x), x, states
if __name__ == '__main__':
depth_model = CNNDepthPolicy(3, 8, False, (3, 64, 112))
example_input = torch.autograd.Variable(torch.randn(1, 3, 64, 112))
c, x, s, d = depth_model(example_input, None, torch.autograd.Variable(torch.Tensor([1])), True)
d.size()
conv_head = nn.Sequential(nn.Conv2d(3, 32, 8, stride=4),
nn.ReLU(True),
nn.Conv2d(32, 64, 4, stride=2),
nn.ReLU(True),
nn.Conv2d(64, 32, 3, stride=1),
nn.ReLU())
step1 = nn.Conv2d(3, 32, 8, stride=4)(example_input)
step2 = nn.Sequential(nn.Conv2d(3, 32, 8, stride=4),
nn.ReLU(True),
nn.Conv2d(32, 64, 4, stride=2))(example_input)
step3 = nn.Sequential(nn.Conv2d(3, 32, 8, stride=4),
nn.ReLU(True),
nn.Conv2d(32, 64, 4, stride=2),
nn.ReLU(True),
nn.Conv2d(64, 32, 3, stride=1),
nn.ReLU())(example_input)
print('Step1', step1.size())
print('Step2', step2.size())
print('Step3', step3.size())
| 10,104
| 33.370748
| 104
|
py
|
drlviz
|
drlviz-master/doom_evaluation.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 14 14:31:17 2018
@author: edward
"""
if __name__ == '__main__': # changes backend for animation tests
import matplotlib
matplotlib.use("Agg")
import numpy as np
from collections import deque
from moviepy.editor import ImageSequenceClip
from environments import DoomEnvironment
import torch
from torch import Tensor
from torch.autograd import Variable
from arguments import parse_game_args
from multi_env import MultiEnvs
from models import CNNPolicy
import matplotlib.pyplot as plt
class BaseAgent(object):
def __init__(self, model, params):
self.model = model
self.cuda = params.cuda
self.gradients = None
self.step = 0
# self.update_relus()
if params.num_stack > 1:
self.exp_size = params.num_stack
self.short_term_memory = deque()
self.state = Variable(torch.zeros(1, model.state_size), volatile=True)
self.mask = Variable(Tensor([1.0]), volatile=True)
print(self.mask)
if params.cuda:
self.state = self.state.cuda()
self.mask = self.mask.cuda()
def get_action(self, observation, epsilon=0.0):
if hasattr(self, 'short_term_memory'):
observation = self._prepare_observation(observation)
observation = Variable(torch.from_numpy(observation), volatile=True).unsqueeze(0)
if self.cuda:
print('la>')
observation = observation.cuda()
_, action, _, self.state = self.model.act(observation, self.state, self.mask, deterministic=True)
return action.cpu().data.numpy()[0, 0]
def get_action_value_and_probs(self, observation, epsilon=0.0):
if hasattr(self, 'short_term_memory'):
observation = self._prepare_observation(observation)
observation = Variable(torch.from_numpy(observation).unsqueeze(0), requires_grad=True)
if self.cuda:
observation = observation.cuda()
value, action, probs, self.state, x = self.model.get_action_value_and_probs(observation, self.state, self.mask, [], deterministic=True)
self.model.zero_grad()
te = probs.cpu().data.numpy()
one_hot_output = torch.cuda.FloatTensor(1, x.size()[-1]).zero_()
one_hot_output[0][te.argmax()] = 1
probs = Variable(probs.data, requires_grad=True)
x.backward(gradient=one_hot_output)
x.detach_()
grads = observation.grad.data.clamp(min=0)
grads.squeeze_()
grads.transpose_(0, 1)
grads.transpose_(1, 2)
grads = np.amax(grads.cpu().numpy(), axis=2)
grads -= grads.min()
grads /= grads.max()
grads *= 254
grads = grads.astype(np.int8)
return action.cpu().data.numpy()[0, 0], value.cpu().data.numpy(), probs.cpu().data.numpy(), grads
def get_action_value_and_probs_zeroes(self, observation, mask2, epsilon=0.0):
if hasattr(self, 'short_term_memory'):
observation = self._prepare_observation(observation)
observation = Variable(torch.from_numpy(observation).unsqueeze(0), requires_grad=True)
if self.cuda:
observation = observation.cuda()
value, action, probs, self.state, x = self.model.get_action_value_and_probs(observation, self.state, self.mask, mask2, deterministic=True)
self.model.zero_grad()
# te = probs.cpu().data.numpy()
# one_hot_output = torch.cuda.FloatTensor(1, x.size()[-1]).zero_()
# one_hot_output[0][te.argmax()] = 1
# probs = Variable(probs.data, requires_grad=True)
x.backward(gradient=x)
x.detach_()
grads = observation.grad.data.clamp(min=0)
grads.squeeze_()
grads.transpose_(0, 1)
grads.transpose_(1, 2)
grads = np.amax(grads.cpu().numpy(), axis=2)
grads -= grads.min()
grads /= grads.max()
grads *= 254
grads = grads.astype(np.int8)
return action.cpu().data.numpy()[0, 0], value.cpu().data.numpy(), probs.cpu().data.numpy(), grads
def reset(self):
"""
reset the models hidden layer when starting a new rollout
"""
if hasattr(self, 'short_term_memory'):
self.short_term_memory = deque()
self.state = Variable(torch.zeros(1, self.model.state_size), volatile=True)
if self.cuda:
self.state = self.state.cuda()
self.step = 0
def _prepare_observation(self, observation):
"""
As the network expects an input of n frames, we must store a small
short term memory of frames. At input this is completely empty so
I pad with the first observations 4 times
"""
if len(self.short_term_memory) == 0:
for _ in range(self.exp_size):
self.short_term_memory.append(observation)
self.short_term_memory.popleft()
self.short_term_memory.append(observation)
return np.vstack(self.short_term_memory)
def get_step(self):
return self.step
def eval_model(model, params, logger, step, train_iters, num_games):
env = DoomEnvironment(params)
agent = BaseAgent(model, params)
eval_agent(agent, env, logger, params, step, train_iters, num_games)
def eval_agent(agent, env, logger, params, step, train_iters, num_games=10):
"""
Evaluates an agents performance in an environment Two metrics are
computed: number of games suceeded and average total reward.
"""
# TODO: Back up the enviroment so the agent can start where it left off
best_obs = None
worst_obs = None
best_reward = -10000
worst_reward = 100000
accumulated_rewards = 0.0
reward_list = []
time_list = []
for game in range(num_games):
env.reset()
agent.reset()
k = 0
rewards = []
obss = []
while not env.is_episode_finished():
obs = env.get_observation()
action = agent.get_action(obs, epsilon=0.0)
reward = env.make_action(action)
rewards.append(reward)
if not params.norm_obs:
obs = obs * (1.0 / 255.0)
obss.append(obs)
k += 1
time_list.append(k)
reward_list.append(env.get_total_reward())
if env.get_total_reward() > best_reward:
best_obs = obss
best_reward = env.get_total_reward()
if env.get_total_reward() < worst_reward:
worst_obs = obss
worst_reward = env.get_total_reward()
accumulated_rewards += env.get_total_reward()
write_movie(params, logger, best_obs, step, best_reward)
write_movie(params, logger, worst_obs, step + 1, worst_reward)
logger.write('Step: {:0004}, Iter: {:000000008} Eval mean reward: {:0003.3f}'.format(step, train_iters, accumulated_rewards / num_games))
logger.write('Step: {:0004}, Game rewards: {}, Game times: {}'.format(step, reward_list, time_list))
def write_movie(params, logger, observations, step, score):
observations = [o.transpose(1, 2, 0) * 255.0 for o in observations]
clip = ImageSequenceClip(observations, fps=int(30 / params.frame_skip))
output_dir = logger.get_eval_output()
clip.write_videofile('{}eval{:0004}_{:00005.0f}.mp4'.format(output_dir, step, score * 100))
if __name__ == '__main__':
# Test to improve movie with action probs, values etc
params = parse_game_args()
params.norm_obs = False
params.recurrent_policy = True
envs = MultiEnvs(params.simulator, 1, 1, params)
obs_shape = envs.obs_shape
obs_shape = (obs_shape[0] * params.num_stack, *obs_shape[1:])
model = CNNPolicy(obs_shape[0], envs.num_actions, params.recurrent_policy, obs_shape)
env = DoomEnvironment(params)
agent = BaseAgent(model, params)
env.reset()
agent.reset()
rewards = []
obss = []
actions = []
action_probss = []
values = []
while not env.is_episode_finished():
obs = env.get_observation()
# action = agent.get_action(obs, epsilon=0.0)
action, value, action_probs = agent.get_action_value_and_probs(obs, epsilon=0.0)
# print(action)
reward = env.make_action(action)
rewards.append(reward)
obss.append(obs)
actions.append(actions)
action_probss.append(action_probs)
values.append(value)
value_queue = deque()
reward_queue = deque()
for i in range(64):
value_queue.append(0.0)
reward_queue.append(0.0)
import matplotlib.animation as manimation
FFMpegWriter = manimation.writers['ffmpeg']
metadata = dict(title='Movie Test', artist='Edward Beeching',
comment='First movie with data')
writer = FFMpegWriter(fps=7.5, metadata=metadata)
# plt.style.use('seaborn-paper')
fig = plt.figure(figsize=(16, 9))
ax1 = plt.subplot2grid((6, 6), (0, 0), colspan=6, rowspan=4)
ax2 = plt.subplot2grid((6, 6), (4, 3), colspan=3, rowspan=2)
ax3 = plt.subplot2grid((6, 6), (4, 0), colspan=3, rowspan=1)
ax4 = plt.subplot2grid((6, 6), (5, 0), colspan=3, rowspan=1)
# World plot
im = ax1.imshow(obs.transpose(1, 2, 0) / 255.0)
ax1.axis('off')
# Action plot
bar_object = ax2.bar('L, R, F, B, L + F, L + B, R + F, R + B'.split(','), action_probs.tolist()[0])
ax2.set_title('Action Probabilities', position=(0.5, 0.85))
# plt.title('Action probabilities')
# ax2.axis('on')
ax2.set_ylim([-0.01, 1.01])
# values
values_ob, = ax3.plot(value_queue)
ax3.set_title('State Values', position=(0.1, 0.05))
ax3.set_ylim([np.min(np.stack(values)) - 0.2, np.max(np.stack(values)) + 0.2])
ax3.get_xaxis().set_visible(False)
# plt.title('State values')
rewards_ob, = ax4.plot(reward_queue)
ax4.set_title('Rewards', position=(0.07, 0.05))
# plt.title('Reward values')
ax4.set_ylim([-0.01, 1.0])
fig.tight_layout()
print('writing')
with writer.saving(fig, "writer_test.mp4", 100):
for observation, action_probs, value, reward in zip(obss, action_probss, values, rewards):
im.set_array(observation.transpose(1, 2, 0) / 255.0)
for b, v in zip(bar_object, action_probs.tolist()[0]):
b.set_height(v)
value_queue.popleft()
value_queue.append(value[0, 0])
reward_queue.popleft()
reward_queue.append(reward)
values_ob.set_ydata(value_queue)
rewards_ob.set_ydata(reward_queue)
writer.grab_frame()
| 10,654
| 32.296875
| 146
|
py
|
Halo-FDCA
|
Halo-FDCA-master/HaloFitting.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
Author: J.M. Boxelaar
Version: October 2020
'''
from astropy.coordinates import SkyCoord
import logging
import os
from datetime import datetime
import astropy.units as u
import numpy as np
import argparse
import FDCA
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean type expected.')
def init_logging(args):
path = args.out_path
if path[-1]=='/': path = path[:-1]
now = str(datetime.now())[:19]
filename = args.d_file.split('/')[-1]
if not os.path.exists(path+'/log/'):
os.makedirs(path+'/log/')
d = {
'version': 1,
'formatters': {
'detailed': {
'class': 'logging.Formatter',
'format': '%(asctime)s %(name)-12s %(processName)-2s %(levelname)-8s %(message)s'
}
},
'handlers': {
'file': {
'class': 'logging.FileHandler',
'filename': path+'/log/'+filename+'_'+now.replace(' ','_')+'.log',
'mode': 'w',
'formatter': 'detailed',
},
},
'root': {
'level': 'INFO',
'handlers': ['file'] #,'console'
},
}
root = logging.getLogger()
root.setLevel(logging.INFO)
logging.config.dictConfig(d)
return logging
def get_initial_guess(halo):
r_guess = halo.radius/(3.5*halo.pix_size)
r_bound = halo.data.shape[0]/2.
if r_guess >= halo.data.shape[1]/2.: r_guess = halo.data.shape[1]/4.
diff = np.abs(halo.margin)
p0 = (halo.I0, halo.centre_pix[0]+diff[0],
halo.centre_pix[1]+diff[2], r_guess,r_guess,r_guess,r_guess,0.,0.,0.)
bounds = ([0.,0.,0.,0.,0.,0.,0.,-np.inf, 0., -np.inf],
[np.inf,halo.data.shape[0],halo.data.shape[1],
r_bound,r_bound,r_bound,r_bound,np.inf, np.inf, np.inf])
return p0,bounds
if __name__=='__main__':
parser = argparse.ArgumentParser(description='Halo-FDCA: An automated flux density calculator for radio halos in galaxy clusters. (Boxelaar et al.)')
parser.add_argument('object', help='(str) Cluster object name', type=str)
parser.add_argument('d_file', help='(str) FITS image location (containing radio halo).', type=str)
parser.add_argument('-z', help='(float) cluster redshift', required=True, type=float)
parser.add_argument('-model', help='(str) Model to use. choose from (circle, ellipse, rotated_ellipse, skewed). Default: circle', choices=['circle', 'ellipse', 'rotated_ellipse', 'skewed'], default='circle', type=str)
parser.add_argument('-frame', help='(str) Coordinate frame. Default: ICRS', default='icrs', type=str)
parser.add_argument('-loc', help="(str) Sky coordinates of cluster. provide coordinates of the form: 'hh mm ss.ss -dd mm ss.s' in hourangle units. Default: None and image centre is chosen.", default = None, type=str)
parser.add_argument('-m', help='(bool) choose to include mask or not. If True, -maskPath should be specified. Default: True',default=True, type=str2bool)
parser.add_argument('-m_file', help='(str) Mask file location. Default: None', default=None, type=str)
parser.add_argument('-out_path', help='(str) Path to code output. Default: directory code is in.', default='./', type=str)
parser.add_argument('-fov', help='(bool) Declare if image size has to be decreased before MCMC-ing. Amount of decreasement has ben automatically set to 3.5*r_e. Default: True',default=True, type=str2bool)
parser.add_argument('-spectr_idx',help='(float) Set spectral index of cluster (S ~ nu^alpha). Used to calculate power and extrapolate flux to arbitrary frequencies. Default: -1.2',default=-1.2, type=float)
parser.add_argument('-walkers', help='(int) Number of walkers to deploy in the MCMC algorithm. Default: 200',default=200, type=int)
parser.add_argument('-steps', help='(int) Number of evauations each walker has to do. Default: 1200',default=1200, type=int)
parser.add_argument('-burntime', help='(int) Burn-in time for MCMC walkers. See emcee documentation for info. Default: None. this is 1/4th of the steps.',default=None, type=int)
parser.add_argument('-max_radius',help='(float) Maximum posiible radius cut-off. Fitted halos cannot have any r > max_radius. In units of kpc. Default: None (implying image_size/2).',default=None, type=float)
parser.add_argument('-gamma_prior',help='(bool) Whether to use a gamma distribution as a prior for radii. Default is False. For the gamma parameters: shape = 2.5, scale = 120 kpc. Default: False',default=False, type=str2bool)
parser.add_argument('-k_exp', help='(bool) Whether to use k exponent to change shape of exponential distribution. Default: False',default=False, type=str2bool)
parser.add_argument('-off', help='(bool) Whether to use an offset in the model (use this when radius is estimated to be too big). Default: False',default=False, type=str2bool)
parser.add_argument('-s', help='(bool) Whether to save the mcmc sampler chain in a fits file. Default: True.',default=True, type=str2bool)
parser.add_argument('-run_mcmc', help='(bool) Whether to run a MCMC routine or skip it to go straight to processing. can be done if a runned sample already exists in the output path. Default: True',default=True, type=str2bool)
parser.add_argument('-int_max', help='(float) Integration radius in r_e units. Default: inf',default=np.inf, type=float)
parser.add_argument('-freq', help='(float) frequency in MHz to calculate flux in. When given, the spectral index will be used. Default: image frequency',default=None, type=str)
parser.add_argument('-rms', help='(float) Set manual rms noise level to be used by the code in uJy/beam Default: rms calculated by code',default=0., type=float)
args = parser.parse_args()
loc = args.loc
#if args.freq != None:
# args.freq = args.freq*u.MHz
if loc is not None:
loc = SkyCoord(args.loc, unit=(u.hourangle, u.deg), frame=args.frame)
logging = init_logging(args)
logger = logging.getLogger(args.object)
logger.log(logging.INFO, 'Start Process for: '+ args.object)
logger.log(logging.INFO, 'Run Arguments: \n'+ str(args)+ '\n')
halo = FDCA.Radio_Halo(args.object, args.d_file, maskpath=args.m_file, mask=args.m,
decreased_fov=args.fov,logger=logger, loc=loc,
M500=None, R500=None, z=args.z,
outputpath=args.out_path, spectr_index=args.spectr_idx, rms=args.rms)
p0, bounds = get_initial_guess(halo)
if args.freq is None: args.freq = halo.freq.value
if args.run_mcmc:
fit = FDCA.markov_chain_monte_carlo.fitting(halo, halo.data_mcmc, args.model, p0,
bounds, walkers=args.walkers,
steps=args.steps, logger=halo.log,
burntime=args.burntime,
mask=args.m, maskpath=args.m_file,
max_radius=args.max_radius,
gamma_prior=args.gamma_prior,
k_exponent=args.k_exp, offset=args.off)
fit.__preFit__()
fit.__run__(save=args.s)
else: pass
processing = FDCA.markov_chain_monte_carlo.processing(halo, halo.data, args.model,
logger=halo.log,mask=args.m,
maskpath=args.m_file, save=args.s,
k_exponent=args.k_exp, offset=False,
burntime=args.burntime)
processing.plot_results()
processing.get_chi2_value()
frequency = float(args.freq)*u.MHz
processing.get_flux(int_max=args.int_max, freq=frequency)# error is one sigma (68%).
processing.get_power(freq=frequency)
halo.Close()
| 8,374
| 53.383117
| 231
|
py
|
Halo-FDCA
|
Halo-FDCA-master/FDCA/fdca_utils.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
Author: J.M. Boxelaar
'''
from __future__ import division
import sys
import time
import os
import logging
import pyregion
import numpy as np
import pandas as pd
from scipy.optimize import curve_fit
from scipy import ndimage
from skimage.measure import block_reduce
from skimage.transform import rescale
import matplotlib.pyplot as plt
from matplotlib.colors import Normalize
from mpl_toolkits.mplot3d import Axes3D
from astropy.io import fits
from astropy import wcs
import astropy.units as u
from astropy.convolution import Gaussian2DKernel
from astropy.convolution import convolve
np.seterr(divide='ignore', invalid='ignore')
rad2deg=180./np.pi
deg2rad=np.pi/180.
Jydeg2 = u.Jy/(u.deg*u.deg)
mJyarcsec2 = u.mJy/(u.arcsec*u.arcsec)
def add_parameter_labels(obj, array):
full_array = np.zeros(obj.params.shape)
full_array[obj.params] = np.array(array)
parameterised_array = pd.DataFrame.from_dict({'params': full_array},
orient='index',columns=obj.paramNames).loc['params']
return parameterised_array
def convolve_model(halo, Ir, rotate):
if rotate:
Ir = rotate_image(halo,Ir,decrease_fov=True)
return convolve_with_gaussian(halo, Ir).ravel()
def gauss(x,mu,sigma,A):
return A*np.exp(-1./2*((x-mu)/sigma)**2.)
def convolve_with_gaussian(obj, data):
sigma1 = (obj.bmaj/obj.pix_size)/np.sqrt(8*np.log(2.))
sigma2 = (obj.bmin/obj.pix_size)/np.sqrt(8*np.log(2.))
kernel = Gaussian2DKernel(sigma1, sigma2, obj.bpa.to(u.rad))
try:
astropy_conv = convolve(data.value,kernel,boundary='extend',normalize_kernel=True)
except:
astropy_conv = convolve(data,kernel,boundary='extend',normalize_kernel=True)
return astropy_conv
def circle_model(obj, theta, rotate=False):
G = ((obj.x_pix-theta['x0'])**2+(obj.y_pix-theta['y0'])**2)/theta['r1']**2
Ir = theta['I0']*np.exp(-G**(0.5+theta['k_exp']))+theta['off']
return convolve_model(obj.halo, Ir, rotate).ravel()
def ellipse_model(obj, theta, rotate=False):
G = ((obj.x_pix-theta['x0'])/theta['r1'])**2+((obj.y_pix-theta['y0'])/theta['r2'])**2
Ir = theta['I0']*np.exp(-G**(0.5+theta['k_exp']))+theta['off']
return convolve_model(obj.halo, Ir, rotate).ravel()
def rotated_ellipse_model(obj, theta, rotate=False):
x = (obj.x_pix-theta['x0'])*np.cos(theta['ang']) + (obj.y_pix-theta['y0'])*np.sin(theta['ang'])
y = -(obj.x_pix-theta['x0'])*np.sin(theta['ang']) + (obj.y_pix-theta['y0'])*np.cos(theta['ang'])
G = (x/theta['r1'])**2.+(y/theta['r2'])**2.
Ir = theta['I0']*np.exp(-G**(0.5+theta['k_exp']))+theta['off']
return convolve_model(obj.halo, Ir, rotate).ravel()
def skewed_model(obj, theta, rotate=False):
G_pp = G(obj.x_pix, obj.y_pix, theta['I0'],theta['x0'],theta['y0'],theta['r1'],theta['r3'],theta['ang'], 1., 1.)
G_mm = G(obj.x_pix, obj.y_pix, theta['I0'],theta['x0'],theta['y0'],theta['r2'],theta['r4'],theta['ang'], -1., -1.)
G_pm = G(obj.x_pix, obj.y_pix, theta['I0'],theta['x0'],theta['y0'],theta['r1'],theta['r4'],theta['ang'], 1., -1.)
G_mp = G(obj.x_pix, obj.y_pix, theta['I0'],theta['x0'],theta['y0'],theta['r2'],theta['r3'],theta['ang'], -1., 1.)
Ir = theta['I0']*(G_pp+G_pm+G_mm+G_mp)
return convolve_model(obj.halo, Ir, rotate).ravel()
def G(x,y, I0, x0, y0, re_x,re_y, ang, sign_x, sign_y):
x_rot = (x-x0)*np.cos(ang)+(y-y0)*np.sin(ang)
y_rot = -(x-x0)*np.sin(ang)+(y-y0)*np.cos(ang)
func = (np.sqrt(sign_x * x_rot)**4.)/(re_x**2.) +\
(np.sqrt(sign_y * y_rot)**4.)/(re_y**2.)
exponent = np.exp(-np.sqrt(func))
exponent[np.where(np.isnan(exponent))]=0.
return exponent
def noise_modelling(obj):
noise = 15.*(np.random.randn(len(obj.halo.x_pix),len(obj.halo.y_pix))-0.030)*u.Jy
return (convolve_with_gaussian(obj.halo, noise)*obj.halo.rmsnoise).value
def noise_characterisation(obj, data):
mask = np.copy(data)
#mask[obj.data.value>2*obj.rmsnoise.value]=np.nan
nbin = 100
bins = np.linspace(-5*obj.rmsnoise.value,
8*obj.rmsnoise.value, nbin)
x = np.linspace(-5*obj.rmsnoise.value,
8*obj.rmsnoise.value, 1000)
binscenters = np.array([0.5 * (bins[i] + bins[i+1]) for i in range(len(bins)-1)])
hist_data, data_bins = np.histogram(mask.ravel(), bins=bins)
popt, pcov = curve_fit(gauss, xdata=binscenters, ydata=hist_data,
p0=(0,0.000003,5000))
return popt
def advanced_noise_modeling(obj,seed=False):
if seed:
np.random.seed(12345)
noise = np.random.randn(len(obj.halo.x_pix),len(obj.halo.y_pix))*u.Jy
noise_conv = convolve_with_gaussian(obj.halo, noise)
var = np.mean((noise_conv.ravel())**2.)- np.mean(noise_conv.ravel())**2.
noise_conv = noise_conv*(obj.halo.noise_char[1]/np.sqrt(var))
noise_conv -= np.mean(noise_conv.ravel()) - obj.halo.noise_char[0]
#plot.quick_imshow(obj.halo, noise_conv*u.Jy, noise=False)
return noise_conv*u.Jy
def create_artificial_halo(obj, model, seed):
theory_noise = advanced_noise_modeling(obj, seed).value
#plot.quick_imshow(obj.halo, (model+theory_noise)*u.Jy, noise=False)
return model+theory_noise
def export_fits(data, path, header=None):
try:
hdu = fits.PrimaryHDU(data.value, header=header)
except:
hdu = fits.PrimaryHDU(data, header=header)
hdul = fits.HDUList([hdu])
hdul.writeto(path, overwrite=True)
def masking(obj, mask):
try: halo = obj.halo
except: halo = obj
if mask:
'''FIND MASK:'''
if os.path.isfile(halo.maskPath):
mask = True
else:
mask=False
obj.log.log(logging.ERROR,'No regionfile found,continueing without mask')
'''SET MASK:'''
if mask:
regionpath = halo.maskPath
outfile = halo.basedir+'/'+halo.file.replace('.fits','')+'_MASK.fits'
mask_region(halo.path, regionpath, outfile)
'''In 'Radio_Halo', there is a function to decrease the fov of an image. The mask
is made wrt the entire image. fov_info makes the mask the same shape as
the image and overlays it'''
image_mask = fits.open(outfile)[0].data[0,0,
halo.fov_info[0]:halo.fov_info[1],
halo.fov_info[2]:halo.fov_info[3]]
obj.log.log(logging.INFO,'MCMC Mask set')
os.remove(outfile)
else:
obj.log.log(logging.INFO,'MCMC No mask set')
mask=False
if mask==False:
image_mask = np.zeros_like(halo.original_image[halo.fov_info[0]:halo.fov_info[1],
halo.fov_info[2]:halo.fov_info[3]])
return image_mask, mask
def mask_region(infilename,ds9region,outfilename):
hdu=fits.open(infilename)
hduflat = flatten(hdu)
map=hdu[0].data
r = pyregion.open(ds9region)
manualmask = r.get_mask(hdu=hduflat)
hdu[0].data[0][0][np.where(manualmask == False)] = 0.0
hdu[0].data[0][0][np.where(manualmask == True)] = 1.0
hdu.writeto(outfilename,overwrite=True)
return outfilename
def flatten(f):
""" Flatten a fits file so that it becomes a 2D image. Return new header and data """
naxis=f[0].header['NAXIS']
if naxis<2:
raise RadioError('Can\'t make map from this')
if naxis is 2:
return fits.PrimaryHDU(header=f[0].header,data=f[0].data)
w = wcs.WCS(f[0].header)
wn = wcs.WCS(naxis=2)
wn.wcs.crpix[0]=w.wcs.crpix[0]
wn.wcs.crpix[1]=w.wcs.crpix[1]
wn.wcs.cdelt=w.wcs.cdelt[0:2]
wn.wcs.crval=w.wcs.crval[0:2]
wn.wcs.ctype[0]=w.wcs.ctype[0]
wn.wcs.ctype[1]=w.wcs.ctype[1]
header = wn.to_header()
header["NAXIS"]=2
copy=('EQUINOX','EPOCH','BMAJ', 'BMIN', 'BPA', 'RESTFRQ', 'TELESCOP', 'OBSERVER')
for k in copy:
r=f[0].header.get(k)
if r is not None:
header[k]=r
slice=[]
for i in range(naxis,0,-1):
if i<=2:
slice.append(np.s_[:],)
else:
slice.append(0)
hdu = fits.PrimaryHDU(header=header,data=f[0].data[tuple(slice)])
return hdu
def get_rms(hdu,boxsize=1000,niter=200,eps=1e-6,verbose=False):
hdu = fits.open(hdu)
data=hdu[0].data
hdu.close()
if len(data.shape)==4:
_,_,ys,xs=data.shape
subim=data[0,0,0:ys,0:xs].flatten()
else:
ys,xs=data.shape
subim=data[0:ys,0:xs].flatten()
oldrms=1
subim = np.delete(subim,np.where(np.isnan(subim)))
for i in range(niter):
rms=np.std(subim)
if np.abs(oldrms-rms)/rms < eps:
return rms
subim=subim[np.abs(subim)<5*rms]
oldrms=rms
raise Exception('Failed to converge')
def findrms(data, niter=100, maskSup=1e-7):
m = data[np.abs(data)>maskSup]
rmsold = np.std(m)
diff = 1e-1
cut = 3.
bins = np.arange(np.min(m),np.max(m),(np.max(m)-np.min(m))/30.)
med = np.median(m)
for i in range(niter):
ind = np.where(np.abs(m-med)<rmsold*cut)[0]
rms = np.std(m[ind])
if np.abs((rms-rmsold)/rmsold)<diff: break
rmsold = rms
return rms
def setMask(self, data):
regionpath = self.halo.maskPath
outfile = self.halo.basedir+'Data/Masks/'+self.halo.target+'_mask.fits'
mask_region(self.halo.path, regionpath, outfile)
'''In 'Radio_Halo', there is a function to decrease the fov of an image. The mask
is made wrt the entire image. fov_info makes the mask the same shape as
the image and overlays it'''
self.image_mask = fits.open(outfile)[0].data[0,0,
self.halo.fov_info[0]:self.halo.fov_info[1],
self.halo.fov_info[2]:self.halo.fov_info[3]]
def regridding(obj, data, decrease_fov=False, mask=False):
data_rot = rotate_image(obj, data.value, decrease_fov, mask)
regrid = regrid_to_beamsize(obj, data_rot)*data.unit
return regrid
def rotate_image(obj,img, decrease_fov=False, mask=False):
if mask: cval=1
else: cval=0
if not decrease_fov:
if np.array(img.shape)[0]%2 is 0:
img = np.delete(img, 0, 0)
if np.array(img.shape)[1]%2 is 0:
img = np.delete(img, 0, 1)
pivot = (np.array(img.shape)/2).astype(np.int64)
padX = [int(img.shape[1]) - pivot[0], pivot[0]]
padY = [int(img.shape[0]) - pivot[1], pivot[1]]
img_pad = np.pad(img, [padY, padX], 'constant', constant_values=(cval))
img_rot = ndimage.rotate(img_pad, -obj.bpa.value, reshape=False,mode='constant',cval=cval)
#plt.imshow(img_rot[padY[0]:-padY[1], padX[0]:-padX[1]])
#plt.show()
return img_rot[padY[0]:-padY[1], padX[0]:-padX[1]]
else:
img_rot = ndimage.rotate(img, -obj.bpa.value, reshape=False,mode='constant',cval=cval)
f= img_rot[obj.margin[2]:obj.margin[3], obj.margin[0]:obj.margin[1]]
#plt.imshow(f)
#plt.show()
return f
def regrid_to_beamsize(obj, img, accuracy=100.):
y_scale = np.sqrt(obj.beam_area*obj.bmin/obj.bmaj).value
x_scale = (obj.beam_area/y_scale).value
new_pix_size = np.array((y_scale,x_scale))
accuracy = int(1./accuracy*100)
scale = np.round(accuracy*new_pix_size/obj.pix_size).astype(np.int64).value
pseudo_size = (accuracy*np.array(img.shape) ).astype(np.int64)
pseudo_array = np.zeros((pseudo_size))
orig_scale = (np.array(pseudo_array.shape)/np.array(img.shape)).astype(np.int64)
elements = np.prod(np.array(orig_scale,dtype='float64'))
if accuracy is 1:
pseudo_array = np.copy(img)
else:
for j in range(img.shape[0]):
for i in range(img.shape[1]):
pseudo_array[orig_scale[1]*i:orig_scale[1]*(i+1),
orig_scale[0]*j:orig_scale[0]*(j+1)] = img[i,j]/elements
f= block_reduce(pseudo_array, block_size=tuple(scale), func=np.sum, cval=0)
f=np.delete(f, -1, axis=0)
f=np.delete(f, -1, axis=1)
#plt.imshow(f)
#plt.show()
#print(pseudo_array.shape, scale, f.shape)
return f
def gamma_dist(x, shape, scale):
from scipy.special import gamma
return (x**(shape-1.)*np.exp(-x/scale))/(gamma(shape)*(scale**shape))
| 12,390
| 35.337243
| 118
|
py
|
Halo-FDCA
|
Halo-FDCA-master/FDCA/HaloObject.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
Author: J.M. Boxelaar
Version: 08 June 2020
'''
# Built in module imports
import sys
import os
import logging
import time
from multiprocessing import Pool
# Scipy, astropy, emcee imports
import numpy as np
from scipy.optimize import curve_fit
import matplotlib.pyplot as plt
from astropy.io import fits
from astropy import wcs
import astropy.units as u
from astropy.convolution import Gaussian2DKernel
from astropy.convolution import convolve
from astroquery.vizier import Vizier
from astropy.coordinates import SkyCoord
from astropy.cosmology import FlatLambdaCDM
from . import fdca_utils as utils
np.seterr(divide='ignore', invalid='ignore')
rad2deg = 180./np.pi
deg2rad = np.pi/180.
Jydeg2 = u.Jy/(u.deg*u.deg)
mJyarcsec2 = u.mJy/(u.arcsec*u.arcsec)
uJyarcsec2 = 1.e-3*u.mJy/(u.arcsec*u.arcsec)
class Radio_Halo(object):
'''
-CLASS DESCRIPTION-
This class initiates a Radio_Halo object containing all image and physical
information. A Halo obect has to be passed to the MCMC module.
The Halo class aslo performs preliminary processes to make MCMC possible
-INPUT-
object (str): Name of galaxy cluster. Currently only supports its PSZ2 or MCXC name.
If another object needs to be passed, fill in the physical
characteristics manually
path (str): Path to data read from 'database.dat'. Compatible with
Leiden Observatory data structure.
decrease_fov (bool): Declare if image size has to be decreased before MCMCing. Amount
of decreasement has ben automatically set to 3.5*r_e in
self.exponentialFit().
logger: Configured logging object to log info to a .log file. If not given,
a new file will be created.
loc (SkyCoord object): Manually inserted cluster location as an astropy.SkyCoord
object. If None: location is gathered from a Vizier query.
Otherwise: provide Astropy SkyCoord object with approximate
centre of radio halo.
M500 (float): Manually inserted mass. If None: mass is gathered from a Vizier query
If not None: must be value given in 1e14 SolMass
R500 (float): Manually inserted R500 radius. If None: radius is gathered from
a Vizier query (MCXC only). If not None, must be value given
in Mega Parsec.
z (float): Manually inserted redshift. If None: redshift is gathered from
a Vizier query
spectr_index (float): Manually inserted halo spectral index (S_v = v^(spectr_index)).
Value is used when extrapolating flux density and calculating
power values. Default is -1.2 (No conclusions can be drawn
from using this default value in calculations).
'''
def __init__(self, object, path, decreased_fov=False, maskpath=None, mask=False,
logger=logging, loc=None, M500=None, R500=None, z=None,
outputpath='./', spectr_index=-1.2, rms=0):
self.rmsnoise = rms #manual noise level mJy/beam
self.user_radius = R500
self.user_loc = loc
self.log = logger
if object[:4] == 'MCXC':
self.cat = 'J/A+A/534/A109/mcxc'
elif object[:4] == 'PSZ2':
self.cat = 'J/A+A/594/A27/psz2'
elif object[:3] == 'WHL':
self.cat = 'J/MNRAS/436/275/table2'
elif object[:5] == 'Abell':
self.cat = 'VII/110A/table3'
else:
self.cat=None
self.log.log(logging.ERROR,'Unknown what catalogue to use. If no costum values are given, filling values will be used')
self.target = str(object)
self.path = path
self.alpha = spectr_index
self.name = self.target.replace('MCXC','MCXC ')
self.name = self.target.replace('PSZ2','PSZ2 ')
self.name = self.target.replace('Abell','Abell ')
self.name = self.target.replace('WHL','')
self.cosmology = FlatLambdaCDM(H0=70, Om0=0.3)
self.table = Vizier.query_object(self.name,catalog=self.cat)
self.initiatePaths(maskpath,outputpath)
data = self.unpack_File()
self.get_beam_area()
self.original_image = np.copy(data)
x = np.arange(0, data.shape[1], step=1, dtype='float')
y = np.arange(0, data.shape[0], step=1, dtype='float')
self.x_pix, self.y_pix = np.meshgrid(x,y)
self.get_object_location(loc)
self.extract_object_info(M500, R500, z)
self.fov_info = [0,data.shape[0],0,data.shape[1]]
self.image_mask, self.mask = utils.masking(self, mask)
self.exponentialFit(data, first=True) # Find centre of the image centre_pix
if self.header['BUNIT']=='JY/BEAM' or self.header['BUNIT']=='Jy/beam':
self.data = data*(u.Jy/self.beam2pix)
else:
self.log.log(logging.CRITICAL,'Possibly other units than jy/beam, CHECK HEADER UNITS!')
sys.exit()
self.pix_to_world()
self.set_image_characteristics(decreased_fov)
def initiatePaths(self, maskpath, outputpath):
self.basedir = outputpath
if outputpath[-1]=='/': self.basedir = outputpath[:-1]
txt = self.path.split('/')
self.file = txt[-1]
self.dataPath = '/'+'/'.join(txt[:-1])+'/'
self.plotPath = self.basedir+'/Plots/'
self.modelPath = self.basedir+'/'
if not os.path.isdir(self.modelPath):
self.log.log(logging.INFO,'Creating modelling directory')
os.makedirs(self.modelPath)
if not os.path.isdir(self.plotPath):
self.log.log(logging.INFO,'Creating plotting directory')
os.makedirs(self.plotPath)
if maskpath == None:
self.maskPath = self.basedir+'/'+self.target+'.reg'
else:
self.maskPath = maskpath
def get_object_location(self, loc):
if loc is not None:
self.loc = loc
'''
elif self.target[:4] == 'MCXC':
coord = str(self.table[self.cat]['RAJ2000'][0])+' '\
+ str(self.table[self.cat]['DEJ2000'][0])
self.loc = SkyCoord(coord, unit=(u.hourangle,u.deg))
elif self.target[:5] == 'Abell':
coord = str(self.table[self.cat]['_RA.icrs'][0])+' '\
+ str(self.table[self.cat]['_DE.icrs'][0])
self.loc = SkyCoord(coord, unit=(u.hourangle,u.deg))
elif self.target[:4] == 'PSZ2':
coord = [self.table[self.cat]['RAJ2000'][0],self.table[self.cat]['DEJ2000'][0]]
self.loc = SkyCoord(coord[0], coord[1], unit=u.deg)
elif self.target[:3] == 'WHL':
coord = [self.table[self.cat]['RAJ2000'][0],self.table[self.cat]['DEJ2000'][0]]
self.loc = SkyCoord(coord[0], coord[1], unit=u.deg)
'''
else:
self.log.log(logging.WARNING,'No halo sky location given. Assuming image centre.')
self.log.log(logging.INFO,'- Not giving an approximate location can affect MCMC performance -')
#cent_pix = (np.array([self.original_image.shape])/2).astype(np.int64)
cent_pix = np.asarray(self.original_image.shape, dtype=np.float64).reshape(1,2)/2.
w = wcs.WCS(self.header)
coord = w.celestial.wcs_pix2world(cent_pix,1)
self.loc = SkyCoord(coord[0,0], coord[0,1], unit=u.deg)
self.user_loc = False
def extract_object_info(self, M500, R500, z):
'''Written for MCXC catalogue. Information is gathered from there. If custom
parameters are given, these will be used. if nothing is found, filling
values are set. This is only a problem if you try to calculate radio power.'''
try:
if self.target[:4] == 'MCXC':
self.M500 = float(self.table[self.cat]['M500'][0])*1.e14*u.Msun
self.L500 = float(self.table[self.cat]['L500'][0])*1.e37*u.Watt
self.R500 = float(self.table[self.cat]['R500'][0])*u.Mpc
self.z = float(self.table[self.cat]['z'][0])
self.M500_std = 0.*u.Msun
elif self.target[:3] == 'WHL':
self.z = float(self.table[self.cat]['z'][0])
self.R500 = 1.*u.Mpc
self.M500 = 3.e14*u.Msun
self.user_radius = False
#self.log.log(logging.WARNING,'No R500 key found. setting R500='\
# +str(self.R500.value)+'Mpc to continue')
elif self.target[:5] == 'Abell':
try:
self.z = float(self.table[self.cat]['z'][0])
except:
self.z = 0.1
#self.log.log(logging.WARNING,'No valid z key found. setting z='\
# +str(self.z)+' as filling to continue. Ignore this message if -z != None')
self.R500 = 1.*u.Mpc
self.user_radius = False
#self.log.log(logging.WARNING,'No R500 key found. setting R500='\
# +str(self.R500.value)+'Mpc to continue')
elif self.target[:4] == 'PSZ2':
self.M500 = float(self.table[self.cat]['MSZ'][0])*1.e14*u.Msun
self.M500_std = np.max([float(self.table[self.cat]['E_MSZ'][0]),
float(self.table[self.cat]['e_MSZ'][0])])*1.e14*u.Msun
self.z = float(self.table[self.cat]['z'][0])
try:
self.R500 = float(self.table[self.cat]['R500'][0])*u.Mpc
except:
self.R500 = 1.*u.Mpc
self.user_radius = False
else:
self.R500 = 1.*u.Mpc
self.z = 0.1
self.user_radius = False
except:
print('catalogue search FAILED')
self.R500 = 1.*u.Mpc
self.z = 0.1
self.user_radius = False
if M500 is not None:
self.M500 = float(M500)*1.e14*u.Msun
self.M500_std = 0.*u.Msun
self.log.log(logging.INFO,'Custom M500 mass set')
if R500 is not None:
self.R500 = float(R500)*u.Mpc
self.log.log(logging.INFO,'Custom R500 radius set')
self.user_radius=self.R500
if z is not None:
self.z = float(z)
self.log.log(logging.INFO,'Custom redshift set')
self.factor = self.cosmology.kpc_proper_per_arcmin(self.z).to(u.Mpc/u.deg)
self.radius_real = self.R500/self.factor
self.freq = (self.header['CRVAL3']*u.Hz).to(u.MHz)
def set_image_characteristics(self, decrease_img_size):
if self.rmsnoise != 0.:
self.rmsnoise,self.imagenoise = u.Jy*self.get_noise(self.data*self.beam2pix)/self.beam2pix
else:
self.rmsnoise = 1.e-6*(self.rmsnoise/self.beam2pix)*u.Jy
self.imagenoise = 0.
self.log.log(logging.INFO,'rms noise %f microJansky/beam' % (1.e6*(self.rmsnoise*self.beam2pix).value))
self.log.log(logging.INFO,'rms noise %f microJansky/arcsec2' % (1.e6*(self.rmsnoise/self.pix_area).to(u.Jy/u.arcsec**2.).value))
if decrease_img_size:
self.decrease_fov(self.data)
x = np.arange(0, np.shape(self.data.value)[1], step=1, dtype='float')
y = np.arange(0, np.shape(self.data.value)[0], step=1, dtype='float')
self.x_pix, self.y_pix = np.meshgrid(x,y)
self.image_mask, self.mask = utils.masking(self, self.mask)
self.exponentialFit(self.data.value)
else:
pivot = ((np.sqrt(2.)/2.-0.5)*np.array(self.data.shape)).astype(np.int64)
padX = [pivot[0], pivot[0]]
padY = [pivot[1], pivot[1]]
self.data_mcmc = np.pad(self.data, [padY, padX], 'constant')
self.fov_info_mcmc = [-pivot[0],self.data.shape[0]+pivot[0],
-pivot[1],self.data.shape[1]+pivot[1]]
self.fov_info = [0,self.data.shape[0],0,self.data.shape[1]]
self.margin = np.array(self.fov_info)-np.array(self.fov_info_mcmc)
self.data = self.data[self.fov_info[0]:self.fov_info[1],
self.fov_info[2]:self.fov_info[3]]
self.ra = self.ra[self.fov_info[2]:self.fov_info[3]]
self.dec = self.dec[self.fov_info[0]:self.fov_info[1]]
self.noise_char = utils.noise_characterisation(self,self.data.value)
self.pix2kpc = self.pix_size*self.factor.to(u.kpc/u.deg)
def get_beam_area(self):
try:
self.bmaj = self.header['BMIN']*u.deg
self.bmin = self.header['BMAJ']*u.deg
self.bpa = self.header['BPA']*u.deg
except KeyError:
string = str(self.header['HISTORY'])
self.bmaj = self.findstring(string, 'BMAJ')*u.deg
self.bmin = self.findstring(string, 'BMIN')*u.deg
self.bpa = self.findstring(string, 'BPA')*u.deg
self.pix_size = abs(self.header['CDELT2'])*u.deg
beammaj = self.bmaj/(2.*(2.*np.log(2.))**0.5) # Convert to sigma
beammin = self.bmin/(2.*(2.*np.log(2.))**0.5) # Convert to sigma
self.pix_area = abs(self.header['CDELT1']*self.header['CDELT2'])*u.deg*u.deg
self.beam_area = 2.*np.pi*1.0*beammaj*beammin
self.beam2pix = self.beam_area/self.pix_area
def unpack_File(self):
self.hdul = fits.open(self.path)
try:
data = self.hdul[0].data[0,0,:,:]
except:
data = self.hdul[0].data
self.header = self.hdul[0].header
data[np.isnan(data)]=0
return data
def findstring(self, string, key):
string = string.split('\n')
for i in range(len(string)):
if string[i].find(key) != -1 and string[i].find('CLEAN') != -1:
line = string[i]
the_key = line.find(key)
start = line[the_key:].find('=')+the_key+1
while line[start]==' ':
start+=1
if line[start:].find(' ') == -1:
return float(line[start:])
end = line[start:].find(' ')+start
return float(line[start:end])
def get_noise(self, data, ampnoise=0.2):
rmsnoise = utils.findrms(data.value)
#rmsnoise = utils.get_rms(self.path)
imagenoise = 0.#np.sqrt((ampnoise*data)**2+(rmsnoise*np.sqrt(1./self.beam2pix))**2)
return rmsnoise, imagenoise
def decrease_fov(self, data, width=2):
''' Function decreases image size based on first fit in exponentialFit.
Slightly bigger image is used in MCMC. data is stored in self.data_mcmc'''
self.cropped = False
error = False
image_width = width*self.radius/self.pix_size
test_fov = [int(self.centre_pix[1] - np.sqrt(2.01)*image_width),
int(self.centre_pix[1] + np.sqrt(2.01)*image_width),
int(self.centre_pix[0] - np.sqrt(2.01)*image_width),
int(self.centre_pix[0] + np.sqrt(2.01)*image_width)]
for margin in test_fov:
if margin < 0 or margin > np.array(self.data.shape).min():
error = True
if error:
self.log.log(logging.ERROR,'{}: Decreasing FoV not possible. Halo is too big'.format(self.target))
pivot = ((np.sqrt(2.)/2.-0.5)*np.array(data.shape)).astype(np.int64)
padX = [pivot[0], pivot[0]]
padY = [pivot[1], pivot[1]]
self.data_mcmc = np.pad(data, [padY, padX], 'constant')
self.fov_info_mcmc = [-pivot[0],self.data.shape[0]+pivot[0],
-pivot[1],self.data.shape[1]+pivot[1]]
self.fov_info = [0,self.data.shape[0],0,self.data.shape[1]]
else:
self.fov_info = [int(self.centre_pix[1] - image_width),
int(self.centre_pix[1] + image_width),
int(self.centre_pix[0] - image_width),
int(self.centre_pix[0] + image_width)]
self.fov_info_mcmc = [int(self.centre_pix[1] - np.sqrt(2.01)*image_width),
int(self.centre_pix[1] + np.sqrt(2.01)*image_width),
int(self.centre_pix[0] - np.sqrt(2.01)*image_width),
int(self.centre_pix[0] + np.sqrt(2.01)*image_width)]
self.data_mcmc = data[self.fov_info_mcmc[0]:self.fov_info_mcmc[1],
self.fov_info_mcmc[2]:self.fov_info_mcmc[3]]
self.cropped = True
self.margin = np.array(self.fov_info)-np.array(self.fov_info_mcmc)
self.data = data[self.fov_info[0]:self.fov_info[1],
self.fov_info[2]:self.fov_info[3]]
self.ra = self.ra[self.fov_info[2]:self.fov_info[3]]
self.dec = self.dec[self.fov_info[0]:self.fov_info[1]]
#plt.imshow(self.data.value)
#plt.show()
def pix_to_world(self):
w = wcs.WCS(self.header)
centre_pix = np.array([[self.centre_pix[0],self.centre_pix[1]]])
world_coord = w.celestial.wcs_pix2world(centre_pix,1)
if world_coord[0,0]<0.: world_coord[0,0] += 360
#if world_coord[0,1]<0.: world_coord[0,1] += 360
self.centre_wcs = (np.array([world_coord[0,0],world_coord[0,1]])*u.deg)
self.ra = np.arange(0,len(self.x_pix))*self.pix_size
self.dec = np.arange(0,len(self.y_pix))*self.pix_size
self.ra -= self.ra[self.centre_pix[0]]-self.centre_wcs[0]
self.dec -= self.dec[self.centre_pix[1]]-self.centre_wcs[1]
def find_halo_centre(self, data, first):
if first or self.original_image.shape == self.data.shape:
w = wcs.WCS(self.header)
centre_wcs = np.array([[self.loc.ra.deg,self.loc.dec.deg]])
world_coord = w.celestial.wcs_world2pix(centre_wcs,1,ra_dec_order=True)
return np.array([world_coord[0,0],world_coord[0,1]])
else:
return np.array((data.shape[1]/2.,data.shape[0]/2.),dtype=np.int64)
def pre_mcmc_func(self, obj, *theta):
I0, x0, y0, re = theta
model = obj.circle_model((obj.x_pix,obj.y_pix), I0, x0, y0, re )
if obj.mask:
return model[obj.image_mask.ravel() == 0]
else: return model
def exponentialFit(self, data, first=False):
plotdata = np.copy(data)
plotdata[self.image_mask==1]=0
max_flux = np.max(plotdata)
centre_pix = self.find_halo_centre(data, first)
if not first:
size = self.radius/(3.5*self.pix_size)
max_flux = self.I0
else: size = data.shape[1]/4.
bounds = ([0.,0.,0.,0.,],
[np.inf,data.shape[0],
data.shape[1],
data.shape[1]/2.])
if self.user_radius != False:
size = (self.radius_real/2.)/self.pix_size
image = data.ravel()
if self.mask:
image = data.ravel()[self.image_mask.ravel() == 0]
popt, pcov = curve_fit(self.pre_mcmc_func,self,
image, p0=(max_flux,centre_pix[0],
centre_pix[1],size), bounds=bounds)
if (self.user_radius != False and self.radius_real<(3.5*popt[3]*self.pix_size)):
popt[3]=size
print('size overwrite')
#if first:
self.radius = 3.5*popt[3]*self.pix_size
self.centre_pix = np.array([popt[1],popt[2]], dtype=np.int64)
self.I0 = popt[0]
def circle_model(self, coords, I0, x0, y0, re):
x,y = coords
r = np.sqrt((x-x0)**2+(y-y0)**2)
Ir = I0 * np.exp(-(r/re))
return Ir.ravel()
def Close(self):
self.hdul.close()
self.log.log(logging.INFO,'closed Halo object {}'.format(self.target))
| 20,320
| 43.85872
| 136
|
py
|
Halo-FDCA
|
Halo-FDCA-master/FDCA/plotting_fits.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
Author: J.M. Boxelaar
'''
import numpy as np
import astropy.units as u
import sys
import scipy.stats as stats
from astropy.coordinates import SkyCoord
import matplotlib.pyplot as plt
import os
#import aplpy
from scipy.optimize import curve_fit
import matplotlib.colors as mplc
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import ScalarFormatter
from scipy import ndimage
from scipy import signal
from . import fdca_utils as utils
Jydeg2 = u.Jy/(u.deg*u.deg)
mJyarcsec2 = u.mJy/(u.arcsec*u.arcsec)
uJyarcsec2 = 1.e-3*u.mJy/(u.arcsec*u.arcsec)
titlesize = 20
labelsize = 13
def fit_result(obj, model, data, noise, mask=False, regrid=False):
halo = obj.halo
ra = halo.ra.value
dec = halo.dec.value
bmin = halo.bmin
bmaj = halo.bmaj
scale = 1.
xlabel = 'RA [Deg]'
ylabel = 'DEC [Deg]'
scale = 1.
#if mask:
image_mask = obj.image_mask
if regrid:
data = utils.regridding(obj.halo,data, decrease_fov=True)
model = utils.regridding(obj.halo,model)
#if mask:
image_mask = utils.regridding(obj.halo, obj.image_mask*u.Jy, mask= not obj.halo.cropped).value
noise = utils.findrms(data.value)*u.Jy
scale = (np.array((bmin.value,bmaj.value))/halo.pix_size).value
bmin = bmin/(scale[0]*halo.pix_size)
bmaj = bmaj/(scale[1]*halo.pix_size)
ra = np.arange(0,data.shape[1])#halo.ra.value
dec = np.arange(0,data.shape[0])#halo.dec.value
xlabel = 'Pixels'
ylabel = 'Pixels'
#plt.imshow(image_mask)
#plt.show()
fig, axes = plt.subplots(ncols=3, nrows=1, sharey=True)
for axi in axes.flat:
axi.xaxis.set_major_locator(plt.MaxNLocator(5))
axi.xaxis.set_major_formatter(ScalarFormatter(useOffset=False))
axi.yaxis.set_major_formatter(ScalarFormatter(useOffset=False))
fig.set_size_inches(3.2*5,5.1)
draw_sizebar(halo,axes[0], scale, regrid)
draw_ellipse(halo,axes[0], bmin, bmaj, regrid)
data = (data/halo.pix_area).to(uJyarcsec2).value
noise = (noise/halo.pix_area).to(uJyarcsec2).value
model = (model/halo.pix_area).to(uJyarcsec2).value
masked_data = np.copy(data)
#if mask:
if regrid:
masked_data[image_mask > obj.mask_treshold*image_mask.max()] =-10000.
else:
masked_data[image_mask==1]= -10000.
if regrid:
NORMres = mplc.Normalize(vmin=-2.*noise, vmax=1.*masked_data.max())
else: NORMres = mplc.Normalize(vmin=-2.*noise, vmax=1.*masked_data.max())
#Trying two different functions since names were changed in recent matplotlib 3.3 update.
try:
Normdiv = mplc.TwoSlopeNorm(vcenter=0., vmin=0.8*(data-model).min(), vmax=0.8*(data-model).max())
except:
Normdiv = mplc.DivergingNorm(vcenter=0., vmin=0.8*(data-model).min(), vmax=0.8*(data-model).max())
im1 = axes[0].imshow(masked_data,cmap='inferno', origin='lower',
extent=(ra.max(),ra.min(),dec.min(),dec.max()), norm = NORMres)
LEVEL = np.array([1,2,4,8,16,32,64,128,256,512,1024,2048,4096])*noise
cont1 = axes[0].contour(model,colors='white', levels=LEVEL, alpha=0.6,
extent=(ra.max(),ra.min(),dec.min(),dec.max()), norm = NORMres,linewidths=1.)
cont2 = axes[0].contour(masked_data,colors='lightgreen', levels=np.array([-9999.8]),
alpha=0.6, linestyles='-',extent=(ra.max(),ra.min(),dec.min(),dec.max()),
norm = NORMres,linewidths=1.5)
axes[0].annotate('$V(x,y)$',xy=(0.5, 1), xycoords='axes fraction',
fontsize=titlesize, xytext=(0, -9), textcoords='offset points',
ha='center', va='top', color='white')
axes[0].set_title("Radio data", fontsize=titlesize)
axes[0].set_xlabel(xlabel, fontsize=labelsize)
axes[0].set_ylabel(ylabel, fontsize=labelsize)
axes[0].grid(color='white', linestyle='-', alpha=0.25)
plt.tight_layout()
im2 = axes[1].imshow(model,cmap='inferno', origin='lower',
extent=(ra.max(),ra.min(),dec.min(),dec.max()), norm = NORMres)
axes[1].annotate('$I(x,y)$',xy=(0.5, 1), xycoords='axes fraction',
fontsize=titlesize, xytext=(0, -9), textcoords='offset points',
ha='center', va='top', color='white')
axes[1].set_title(obj.modelName.replace('_',' ')+" model", fontsize=titlesize)
axes[1].set_xlabel(xlabel, fontsize=labelsize)
axes[1].grid(color='white', linestyle='-', alpha=0.25)
cbar = fig.colorbar(im2,ax=axes[1])
cbar.ax.set_ylabel('$\\mu$Jy arcsec$^{-2}$',fontsize=labelsize)
#cbar.formatter = ScalarFormatter(useMathText=False)
#cbar.formatter = ticker.LogFormatter(base=10.,labelOnlyBase=True)
#cbar.formatter = ticker.StrMethodFormatter('%.2f')
plt.tight_layout()
im3 = axes[2].imshow(data-model, cmap='PuOr_r', origin='lower',
extent=(ra.max(),ra.min(),dec.min(),dec.max()), norm = Normdiv)
cont4 = axes[2].contour(masked_data,
colors='red', levels=np.array([-9999.8]), alpha=0.6, linestyles='-',
extent=(ra.max(),ra.min(),dec.min(),dec.max()), norm = NORMres,linewidths=1.5)
try:
cont3 = axes[2].contour(model, alpha=0.7, colors='black', levels=[2*noise],
extent=(ra.max(),ra.min(),dec.min(),dec.max()), norm=NORMres)
axes[2].clabel(cont3, fontsize=12, inline=1, fmt='2$\\sigma_{\\mathrm{rms}}$',colors='black')
except: pass
axes[2].annotate('$V(x,y)-I(x,y)$',xy=(0.5, 1), xycoords='axes fraction',
fontsize=titlesize, xytext=(0, -9), textcoords='offset points',
ha='center', va='top', color='black')
axes[2].set_title("Residual image", fontsize=titlesize)
axes[2].set_xlabel(xlabel, fontsize=labelsize)
axes[2].grid(color='black', linestyle='-', alpha=0.25)
plt.tight_layout()
import matplotlib.ticker as ticker
cbar = fig.colorbar(im3,ax=axes[2])
cbar.ax.set_ylabel('$\\mu$Jy arcsec$^{-2}$',fontsize=labelsize)
#cbar.formatter = ScalarFormatter(useMathText=False)
#cbar.formatter = ticker.LogFormatter(base=10.,labelOnlyBase=True)
#cbar.formatter = ticker.StrMethodFormatter('%.2f')
if regrid:
plt.savefig(halo.plotPath +halo.file.replace('.fits','')+'_mcmc_model'+obj.filename_append+'_REGRID.pdf')
else:
plt.savefig(halo.plotPath +halo.file.replace('.fits','')+'_mcmc_model'+obj.filename_append+'.pdf')
#plt.show()
plt.clf()
plt.close(fig)
def draw_sizebar(obj,ax, scale, regrid=False):
"""
Draw a horizontal bar with length of 0.1 in data coordinates,
with a fixed label underneath.
"""
if regrid:
length = 0.1/obj.factor.to(u.Mpc/u.deg)/(scale[1]*obj.pix_size)
else:
length = 0.1/obj.factor.to(u.Mpc/u.deg)
from mpl_toolkits.axes_grid1.anchored_artists import AnchoredSizeBar
asb = AnchoredSizeBar(ax.transData,length.value*2.5,
r"250 kpc",
loc='lower center',
pad=0.1, borderpad=0.5, sep=5,
frameon=False, color='white')#, fontsize=labelsize)
ax.add_artist(asb)
def draw_ellipse(obj,ax, bmin, bmaj, regrid=False):
from mpl_toolkits.axes_grid1.anchored_artists import AnchoredEllipse
"""
Draw an ellipse of width=0.1, height=0.15 in data coordinates
"""
bpa = obj.bpa.value
if regrid:
bpa = 0
try:
ae = AnchoredEllipse(ax.transData, width=bmaj.value, height=bmin.value,
angle=-bpa, loc='lower left', pad=0.3, borderpad=0.3,
frameon=True,color='lightskyblue')
except:
ae = AnchoredEllipse(ax.transData, width=bmaj.value, height=bmin.value,
angle=-bpa, loc='lower left', pad=0.3, borderpad=0.3,
frameon=True)
ax.add_artist(ae)
def model_comparisson(halo, mask=False):
fig, axes = plt.subplots(ncols=3, nrows=1, sharey=True)
bmin = halo.bmin
bmaj = halo.bmaj
scale = 1.
model4 = halo.result4.model
model6 = halo.result6.model
model8 = halo.result8.model
ra = halo.ra.value
dec = halo.dec.value
for axi in axes.flat:
axi.xaxis.set_major_locator(plt.MaxNLocator(5))
axi.xaxis.set_major_formatter(ScalarFormatter(useOffset=False))
axi.yaxis.set_major_formatter(ScalarFormatter(useOffset=False))
fig.set_size_inches(3.2*5,5.1)
vmin=-2*(halo.rmsnoise/halo.pix_area).to(uJyarcsec2).value
vmax=4*(halo.result4.params_units[0])
data = (np.copy(halo.result4.data)/halo.pix_area).to(uJyarcsec2).value
noise = (halo.rmsnoise/halo.pix_area).to(uJyarcsec2).value
masked_data = data.copy()
#if mask:
masked_data[halo.result4.image_mask==1]= -10000.
LEVEL = np.arange(1,7)*(halo.rmsnoise/halo.pix_area).to(uJyarcsec2).value
#NORM = mplc.LogNorm(vmin=0.4*(halo.rmsnoise/halo.pix_area).to(uJyarcsec2).value,
# vmax=20*(halo.rmsnoise/halo.pix_area).to(uJyarcsec2).value)
#NORM = SymLogNorm(2.*halo.result4.params_units[0] , linscale=1.0, vmin=vmin, vmax=vmax)
#NORMres = mplc.Normalize(vmin=-2.*(halo.rmsnoise/halo.pix_area).to(uJyarcsec2).value,
# vmax=1.*(data/halo.pix_area).to(uJyarcsec2).value.max())
NORMres = mplc.Normalize(vmin=-2.*noise, vmax=2.*masked_data.max())
im1 = axes[0].imshow(masked_data,
cmap='inferno', origin='lower',
extent=(ra.max(),ra.min(),dec.min(),dec.max()), norm = NORMres)
try:
cont1 = axes[0].contour((model4/halo.pix_area).to(uJyarcsec2).value,
colors='white', levels=LEVEL, alpha=0.6,
extent=(ra.max(),ra.min(),dec.min(),dec.max()), norm = NORMres,linewidths=1.)
cont2 = axes[0].contour(masked_data,
colors='lightgreen', levels=np.array([-999.8]), alpha=0.9, linestyles='-',
extent=(ra.max(),ra.min(),dec.min(),dec.max()), norm = NORMres,linewidths=1.5)
except:
print('PROCESSING: Failed making contours')
pass
axes[0].set_title('Circular\n $S_{\\mathrm{1.5 GHz}}=%.1f\\pm%.1f$ mJy' % (halo.result4.flux_val.value, halo.result4.flux_err.value), fontsize=15)
axes[0].set_xlabel('RA [deg]', fontsize=labelsize)
axes[0].set_ylabel('DEC [deg]', fontsize=labelsize)
axes[0].grid(color='white', linestyle='-', alpha=0.25)
draw_sizebar(halo,axes[0], scale)
draw_ellipse(halo,axes[0], bmin, bmaj)
plt.tight_layout()
im2 = axes[1].imshow(masked_data,
cmap='inferno', origin='lower',
extent=(ra.max(),ra.min(),dec.min(),dec.max()), norm = NORMres)
try:
cont3 = axes[1].contour((model6/halo.pix_area).to(uJyarcsec2).value,
colors='white', levels=LEVEL, alpha=0.6,
extent=(ra.max(),ra.min(),dec.min(),dec.max()), norm = NORMres,linewidths=1.)
cont4 = axes[1].contour(masked_data,
colors='lightgreen', levels=np.array([-999.8]), alpha=0.9, linestyles='-',
extent=(ra.max(),ra.min(),dec.min(),dec.max()), norm = NORMres,linewidths=1.5)
except:
print('PROCESSING: Failed making contours')
pass
axes[1].set_title('Elliptical\n $S_{\\mathrm{1.5 GHz}}=%.1f\\pm%.1f$ mJy' % (halo.result6.flux_val.value, halo.result8.flux_err.value), fontsize=15)
axes[1].set_xlabel('RA [deg]', fontsize=labelsize)
axes[1].set_ylabel('DEC [deg]', fontsize=labelsize)
axes[1].grid(color='white', linestyle='-', alpha=0.25)
draw_sizebar(halo,axes[0], scale)
draw_ellipse(halo,axes[0], bmin, bmaj)
plt.tight_layout()
im3 = axes[2].imshow(masked_data,
cmap='inferno', origin='lower',
extent=(ra.max(),ra.min(),dec.min(),dec.max()), norm = NORMres)
try:
cont5 = axes[2].contour((model8/halo.pix_area).to(uJyarcsec2).value,
colors='white', levels=LEVEL, alpha=0.6,
extent=(ra.max(),ra.min(),dec.min(),dec.max()), norm = NORMres,linewidths=1.)
cont6 = axes[2].contour(masked_data,
colors='lightgreen', levels=np.array([-999.8]), alpha=0.9, linestyles='-',
extent=(ra.max(),ra.min(),dec.min(),dec.max()), norm = NORMres,linewidths=1.5)
except:
print('PROCESSING: Failed making contours')
pass
axes[2].set_title('Skewed \n $S_{\\mathrm{1.5 GHz}}=%.1f\\pm%.1f$ mJy' % (halo.result8.flux_val.value, halo.result8.flux_err.value), fontsize=15)
axes[2].set_xlabel('RA [deg]', fontsize=labelsize)
axes[2].set_ylabel('DEC [deg]', fontsize=labelsize)
axes[2].grid(color='white', linestyle='-', alpha=0.25)
draw_sizebar(halo,axes[0], scale)
draw_ellipse(halo,axes[0], bmin, bmaj)
plt.tight_layout()
import matplotlib.ticker as ticker
cbar = fig.colorbar(im3)
cbar.ax.set_ylabel('$\\mu$Jy arcsec$^{-2}$',fontsize=labelsize)
#cbar.formatter = ScalarFormatter(useMathText=False)
#cbar.formatter = ticker.LogFormatter(base=10.,labelOnlyBase=True)
cbar.formatter = ticker.StrMethodFormatter('%.2f')
plt.savefig(halo.plotPath +halo.file.replace('.fits','')+'_mcmc_model_ALL.pdf')
#plt.show()
plt.clf()
plt.close(fig)
| 13,664
| 41.306502
| 152
|
py
|
Halo-FDCA
|
Halo-FDCA-master/FDCA/markov_chain_monte_carlo.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
Author: J.M. Boxelaar
Version: 08 June 2020
'''
from __future__ import division
import sys
import os
import logging
from multiprocessing import Pool, cpu_count, freeze_support, set_start_method
import numpy as np
import pandas as pd
from scipy.optimize import curve_fit
import scipy.stats as stats
from scipy import ndimage
from scipy.special import gammainc, gamma
import matplotlib.pyplot as plt
from matplotlib.colors import Normalize, LogNorm
from skimage.measure import block_reduce
from skimage.transform import rescale
from astropy.convolution import Gaussian2DKernel
from astropy.convolution import convolve
from astropy.io import fits
from astropy import wcs
from astropy import units as u
from astropy.coordinates import SkyCoord
import emcee
import corner
# Subfile imports
from . import fdca_utils as utils
from . import plotting_fits as plot
set_start_method("fork")
freeze_support()
rad2deg = 180./np.pi
deg2rad = np.pi/180.
Jydeg2 = u.Jy/(u.deg*u.deg)
mJyarcsec2 = u.mJy/(u.arcsec*u.arcsec)
uJyarcsec2 = 1.e-3*u.mJy/(u.arcsec*u.arcsec)
class fitting(object):
'''
-CLASS DESCRIPTION-
-INPUT-
_parent_ (Radio_Halo object): Radio_Halo object containing all relevant
object information
data (2D array): Data array to be fitted. It is adviced to
use 'Radio_Halo.data_mcmc'
dim (int): number of parameters of fitting model to use. Choose from (8,6,5,4).
Note: currently, only dim=8 works.
p0 (array like): Initial robust guess for fit parameters. Used for preliminary
scipy.optimize.curve_fit. See Scipy documentation for more info.
bounds (2-tuple of array_like): Initial robust guess for fit parameter bounds.
Used for preliminary scipy.curve_fit. See Scipy
documentation for more info.
walkers (int): Number of walkers to deploy in the MCMC algorithm
steps (int): Number of evauations each walker has to do.
save (bool): Whether to save the mcmc sampler chain in a fits file. default is False
burntime (int): burn-in time for MCMC walkers. See emcee documentation for info.
logger: Configured logging object to log info to a .log file. If not given,
nothing happens.
rebin (bool): default is True. regridding data to beamsize to fit to indipendent
datapoints. Default is True.
Forward (bool): Depricated.
Mask (bool): applying mask to image. If true: a DS9 .reg has to be present in the
Radio_halo.maskPath direcory Default is False.
maskpath (str): Custom path to DS9 region file, read from database.dat.
If '--' is given, and mas=True, the standard
directory will be searched.
max_radius (float): maximum posiible radius cut-off. Fitted halos cannot have any
r > max_radius. In units of kpc.
Default is None (implying image_size/2).
gamma_prior (bool): wether to use a gamma distribution as a prior for radii.
Default is False. For the gamma parameters:
shape = 2.5, scale = 120 kpc.
'''
def __init__(self, _parent_, data, dim, p0, bounds, walkers, steps,
burntime=None, logger=logging, rebin=True, mask=False,
maskpath='--', max_radius=None, gamma_prior=False,
k_exponent=False, offset=False):
if dim not in ['circle','ellipse', 'rotated_ellipse', 'skewed']:
print('Provide valid function kind')
sys.exit()
p0 = list(p0)
self.orig_shape = _parent_.data.shape
self.rebin = rebin
self.log = logger
self.halo = _parent_
self.noise = _parent_.imagenoise
self.rms = _parent_.rmsnoise
self.sigma = (self.rms*self.halo.beam2pix).value
self.data = data
self.steps = int(steps)
self.mask_treshold = 0.5
self.k_exponent = k_exponent
self.offset = offset
self.gamma_prior = gamma_prior
self.p0 = p0
self.bounds = bounds
self.check_settings(dim, walkers, mask, burntime, maskpath, max_radius)
x = np.arange(0,_parent_.data.shape[1],1)
y = np.arange(0,_parent_.data.shape[0],1)
self.x_pix, self.y_pix = np.meshgrid(x,y)
self.dof = len(data.value.flat) - self.dim
def __preFit__(self):
#try:
self.pre_mcmc_fit(self.halo.data, p0=np.array(self.p0), bounds=np.array(self.bounds))
#except Exception as e:
# self.log.log(logging.CRITICAL,'MCMC Failed to execute pre-fit with error message:\n')
# self.log.log(logging.CRITICAL,e)
# sys.exit()
def __run__(self, save=False):
data = self.set_data_to_use(self.data)
x = np.arange(0, self.data.shape[1])
y = np.arange(0, self.data.shape[0])
coord = np.meshgrid(x,y)
theta_guess = self.popt[self.params]
self.mcmc_noise = utils.findrms(data)
pos = [theta_guess*(1.+1.e-3*np.random.randn(self.dim)) for i in range(self.walkers)]
# set_dictionary is called to create a dictionary with necessary atributes
# because 'Pool' cannot pickle the fitting object.
halo_info = set_dictionary(self)
num_CPU = cpu_count()
with Pool(num_CPU) as pool:
sampler = emcee.EnsembleSampler(self.walkers, self.dim, lnprob, pool=pool,
args=[data,coord,halo_info])
sampler.run_mcmc(pos, self.steps, progress=True)
self.sampler_chain = sampler.chain
self.samples = self.sampler_chain[:,int(self.burntime):,:].reshape((-1,self.dim))
if save:
self.__save__()
self.plotSampler()
return self.sampler_chain
def __save__(self):
path = '%s%s_mcmc_samples%s.fits' % (self.halo.modelPath,
self.halo.file.replace('.fits',''),
self.filename_append)
self.hdu = fits.PrimaryHDU()
self.hdu.data = self.sampler_chain
self.set_sampler_header()
self.hdu.writeto(path, overwrite=True)
def check_settings(self, dim, walkers, mask, burntime, maskpath, max_radius):
self.modelName = dim
self.paramNames = ['I0','x0','y0','r1','r2','r3','r4','ang','k_exp','off']
if dim=='circle':
self._func_ = utils.circle_model
self._func_mcmc = circle_model
self.AppliedParameters = [True,True,True,True,False,False,False,False,False,False]
elif dim == 'ellipse':
self._func_ = utils.ellipse_model
self._func_mcmc = ellipse_model
self.AppliedParameters = [True,True,True,True,True,False,False,False,False,False]
elif dim == 'rotated_ellipse':
self._func_ = utils.rotated_ellipse_model
self._func_mcmc = rotated_ellipse_model
self.AppliedParameters = [True,True,True,True,True,False,False,True,False,False]
elif dim == 'skewed':
self._func_ = utils.skewed_model
self._func_mcmc = skewed_model
self.AppliedParameters = [True,True,True,True,True,True,True,True,False,False]
else:
self.log.log(logging.CRITICAL,'CRITICAL: invalid model name')
print('CRITICAL: invalid model name')
sys.exit()
if self.k_exponent: self.AppliedParameters[-2] = True
if self.offset: self.AppliedParameters[-1] = True
self.params = pd.DataFrame.from_dict({'params':self.AppliedParameters},
orient='index',columns=self.paramNames).loc['params']
self.dim = len(self.params[self.params==True])
if walkers >= 2*self.dim:
self.walkers = int(walkers)
else:
self.walkers = int(2*self.dim+4)
self.log.log(logging.WARNING,'MCMC Too few walkers, nwalkers = {}'.format(self.walkers))
self.image_mask, self.mask = utils.masking(self, mask)
if burntime is None:
self.burntime = int(0.125*self.steps)
elif 0. > burntime or burntime >= 0.8*self.steps:
self.log.log(logging.ERROR,'MCMC Input burntime of {} is invalid. setting burntime to {}'\
.format(burntime, 0.25*self.steps))
self.burntime = int(0.25*self.steps)
else:
self.burntime = int(burntime)
if max_radius == None:
self.max_radius = self.data.shape[0]/2.
else:
self.max_radius = max_radius/self.halo.pix2kpc.value
filename_append = '_%s' % (self.modelName)
if self.mask: filename_append += '_mask'
if self.k_exponent: filename_append += '_exp'
if self.offset: filename_append += '_offset'
self.filename_append = filename_append
def find_mask(self):
if os.path.isfile(self.halo.maskPath):
self.mask = True
else:
self.mask=False
self.log.log(logging.ERROR,'No regionfile found,continueing without mask')
def setMask(self, data):
regionpath = self.halo.maskPath
outfile = self.halo.basedir+'Data/Masks/'+self.halo.target+'_mask.fits'
utils.mask_region(self.halo.path, regionpath, outfile)
'''In 'Radio_Halo', there is a function to decrease the fov of an image. The mask
is made wrt the entire image. fov_info makes the mask the same shape as
the image and overlays it'''
self.image_mask = fits.open(outfile)[0].data[0,0,
self.halo.fov_info[0]:self.halo.fov_info[1],
self.halo.fov_info[2]:self.halo.fov_info[3]]
def at(self, parameter):
par = np.array(self.paramNames)[self.params]
return np.where(par == parameter)[0][0]
def set_data_to_use(self,data):
if self.rebin:
binned_data = utils.regridding(self.halo, data, decrease_fov=True)
if not self.mask:
self.image_mask = np.zeros(self.halo.data.shape)
self.binned_image_mask = utils.regridding(self.halo, self.image_mask*u.Jy, mask = not self.halo.cropped).value
use = binned_data.value
return use.ravel()[self.binned_image_mask.ravel() <=\
self.mask_treshold*self.binned_image_mask.max()]
else:
if self.mask:
return self.data.value.ravel()[self.image_mask.ravel() <= 0.5]
else: return self.data.value.ravel()
def pre_mcmc_func(self, obj, *theta):
theta = utils.add_parameter_labels(obj, theta)
model = self._func_(obj, theta)
if obj.mask:
return model[obj.image_mask.ravel() == 0]
else: return model
def pre_mcmc_fit(self, image, p0, bounds):
data = image.ravel()
p0[1]-=self.halo.margin[2]
p0[2]-=self.halo.margin[0]
if self.mask:
data = data[self.image_mask.ravel() == 0]
bounds = (list(bounds[0,self.params]), list(bounds[1,self.params]))
popt, pcov = curve_fit(self.pre_mcmc_func,self,data,
p0=tuple(p0[self.params]),
bounds=bounds)
perr = np.sqrt(np.diag(pcov))
#plt.imshow(image.value)
#plt.contour(self._func_(self,*popt).reshape(image.shape))
#plt.show()
popt[1]+= self.halo.margin[2]
popt[2]+= self.halo.margin[0]
self.popt = utils.add_parameter_labels(self, popt)
self.perr = perr
if not self.k_exponent: self.popt['k_exp'] = 0.5
if not self.offset: self.popt['off'] = 0.0
if self.modelName == 'skewed':
'''longest dimension of elliptical shape should always be the x-axis.
This routine switches x and y if necessary to accomplish this.'''
if (self.popt['r1']+self.popt['r2']) <= (self.popt['r3']+self.popt['r4']):
self.popt['r1'], self.popt['r3'] = self.popt['r3'], self.popt['r1']
self.popt['r2'], self.popt['r4'] = self.popt['r4'], self.popt['r3']
self.popt['ang'] += np.pi/2.
if self.modelName in ['ellipse','rotated_ellipse']:
if self.popt['r1']<=self.popt['r2']:
self.popt['r1'],self.popt['r2'] = self.popt['r2'],self.popt['r1']
self.popt['ang'] += np.pi/2.
if self.modelName in ['rotated_ellipse', 'skewed']:
'''Angle of ellipse from positive x should be between 0 and pi.'''
self.popt['ang'] = self.popt['ang']%(2*np.pi)
if self.popt['ang']>=np.pi:
self.popt['ang'] -= np.pi
for r in range(4):
r += 1
if self.popt['r'+str(r)] > self.max_radius:
self.popt['r'+str(r)] = self.max_radius
self.centre_pix = np.array([self.popt['x0'],self.popt['y0']], dtype=np.int64)
self.centre_wcs = np.array((self.halo.ra.value[self.centre_pix[1]],
self.halo.dec.value[self.centre_pix[0]]))*u.deg
popt_units = self.transform_units(np.copy(self.popt))
popt_units = utils.add_parameter_labels(self, popt_units[self.params])
self.log.log(logging.INFO,'MCMC initial guess: \n{} \n and units: muJy/arcsec2, deg, deg, r_e: kpc, rad'.format(popt_units,self.perr))
x = np.arange(0,self.data.shape[1],1)
y = np.arange(0,self.data.shape[0],1)
self.x_pix, self.y_pix = np.meshgrid(x,y)
def plotSampler(self):
fig, axes = plt.subplots(ncols=1, nrows=self.dim, sharex=True)
axes[0].set_title('Number of walkers: '+str(self.walkers))
for axi in axes.flat:
axi.yaxis.set_major_locator(plt.MaxNLocator(3))
fig.set_size_inches(2*10,15)
for i in range(self.dim):
axes[i].plot(self.sampler_chain[:, int(self.burntime):, i].transpose(),
color='black', alpha=0.3)
axes[i].set_ylabel('param '+str(i+1), fontsize=15)
plt.tick_params(labelsize=15)
plt.savefig('%s%s_walkers%s.pdf' % (self.halo.plotPath,
self.halo.target,self.filename_append),dpi=300)
plt.clf()
plt.close(fig)
labels = list()
for i in range(self.dim):
labels.append('Param '+str(i+1))
fig = corner.corner(self.samples,labels=labels, quantiles=[0.160, 0.5, 0.840],
truths=np.asarray(self.popt[self.params]),
show_titles=True, title_fmt='.5f')
plt.savefig('%s%s_cornerplot%s.pdf' % (self.halo.plotPath,
self.halo.target,self.filename_append),dpi=300)
plt.clf()
plt.close(fig)
def transform_units(self, params):
params[0] = ((u.Jy*params[0]/self.halo.pix_area).to(uJyarcsec2)).value
params[1] = (params[1]-self.centre_pix[0])*self.halo.pix_size.value+self.centre_wcs[0].value
params[2] = (params[2]-self.centre_pix[1])*self.halo.pix_size.value+self.centre_wcs[1].value
params[3] = ((params[3]*self.halo.pix2kpc).to(u.kpc)).value
if self.modelName in ['ellipse', 'rotated_ellipse', 'skewed']:
params[4] = ((params[4]*self.halo.pix2kpc).to(u.kpc)).value
if self.modelName == 'skewed':
params[5] = ((params[5]*self.halo.pix2kpc).to(u.kpc)).value
params[6] = ((params[6]*self.halo.pix2kpc).to(u.kpc)).value
if self.modelName in ['rotated_ellipse', 'skewed']:
params[self.at('ang')] = params[self.at('ang')]
return params
def set_sampler_header(self):
self.hdu.header['nwalkers'] = (self.walkers)
self.hdu.header['steps'] = (self.steps)
self.hdu.header['dim'] = (self.dim)
self.hdu.header['burntime'] = (self.burntime)
self.hdu.header['OBJECT'] = (self.halo.name,'Object which was fitted')
self.hdu.header['IMAGE'] = (self.halo.file)
self.hdu.header['UNIT_0'] = ('JY/PIX','unit of fit parameter')
self.hdu.header['UNIT_1'] = ('PIX','unit of fit parameter')
self.hdu.header['UNIT_2'] = ('PIX','unit of fit parameter')
self.hdu.header['UNIT_3'] = ('PIX','unit of fit parameter')
if self.dim>=5:
self.hdu.header['UNIT_4'] = ('PIX','unit of fit parameter')
if self.dim == 8:
self.hdu.header['UNIT_5'] = ('PIX','unit of fit parameter')
self.hdu.header['UNIT_6'] = ('PIX','unit of fit parameter')
if self.dim >= 6:
self.hdu.header['UNIT_7'] = ('RAD','unit of fit parameter')
if self.dim == 7:
self.hdu.header['UNIT_P'] = ('NONE','unit of fit parameter')
for i in range(len(self.popt[self.params])):
self.hdu.header['INIT_'+str(i)] = (self.popt[self.params][i], 'MCMC initial guess')
self.hdu.header['MASK'] = (self.mask,'was the data masked during fitting')
def set_dictionary(obj):
halo_info = {
"modelName": obj.modelName,
"bmaj": obj.halo.bmaj,
"bmin": obj.halo.bmin,
"bpa": obj.halo.bpa,
"pix_size": obj.halo.pix_size,
"beam_area": obj.halo.beam_area,
"beam2pix": obj.halo.beam2pix,
"pix2kpc": obj.halo.pix2kpc,
"mask": obj.mask,
"sigma": obj.mcmc_noise,
"margin": obj.halo.margin,
"_func_": obj._func_mcmc,
"image_mask": obj.image_mask,
"binned_image_mask": obj.binned_image_mask,
"mask_treshold": obj.mask_treshold,
"max_radius": obj.max_radius,
"params": obj.params,
"paramNames": obj.paramNames,
"gamma_prior": obj.gamma_prior,
}
return halo_info
def set_model_to_use(info,data):
binned_data = regrid_to_beamsize(info, data.value)
return binned_data.ravel()[info['binned_image_mask'].ravel() <=\
info['mask_treshold']*info['binned_image_mask'].max()]
def rotate_image(info,img, decrease_fov=False):
margin = info['margin']
img_rot = ndimage.rotate(img, -info['bpa'].value, reshape=False)
f = img_rot[margin[2]:margin[3], margin[0]:margin[1]]
#plt.imshow(f)
#plt.show()
return f
def regrid_to_beamsize(info, img, accuracy=100.):
x_scale = np.sqrt(np.pi/(4*np.log(2.)))*info['bmaj'].value
y_scale = np.sqrt(np.pi/(4*np.log(2.)))*info['bmin'].value
new_pix_size = np.array((y_scale,x_scale))
accuracy = int(1./accuracy*100)
scale = np.round(accuracy*new_pix_size/info['pix_size']).astype(np.int64).value
pseudo_size = (accuracy*np.array(img.shape) ).astype(np.int64)
pseudo_array = np.zeros((pseudo_size))
orig_scale = (np.array(pseudo_array.shape)/np.array(img.shape)).astype(np.int64)
elements = np.prod(np.array(orig_scale,dtype='float64'))
if accuracy == 1:
pseudo_array = np.copy(img)
else:
for j in range(img.shape[0]):
for i in range(img.shape[1]):
pseudo_array[orig_scale[1]*i:orig_scale[1]*(i+1),
orig_scale[0]*j:orig_scale[0]*(j+1)] = img[i,j]/elements
f= block_reduce(pseudo_array, block_size=tuple(scale), func=np.sum, cval=0)
f=np.delete(f, -1, axis=0)
f=np.delete(f, -1, axis=1)
#plt.imshow(f)
#plt.show()
#print(pseudo_array.shape, scale, f.shape)
return f
def convolve_with_gaussian(info,data,rotate):
if rotate:
data = rotate_image(info,data,decrease_fov=True)
sigma1 = (info['bmaj']/info['pix_size'])/np.sqrt(8*np.log(2.))
sigma2 = (info['bmin']/info['pix_size'])/np.sqrt(8*np.log(2.))
kernel = Gaussian2DKernel(sigma1, sigma2, info['bpa'])
astropy_conv = convolve(data,kernel,boundary='extend',normalize_kernel=True)
return astropy_conv
def circle_model(info, coords, theta, rotate=False):
x,y = coords
G = ((x-theta['x0'])**2+(y-theta['y0'])**2)/theta['r1']**2
Ir = theta['I0']*np.exp(-G**(0.5+theta['k_exp']))+theta['off']
return convolve_with_gaussian(info, Ir, rotate)
def ellipse_model(info, coord , theta, rotate=False):
x,y = coord
G = ((x-theta['x0'])/theta['r1'])**2+((y-theta['y0'])/theta['r2'])**2
Ir = theta['I0']*np.exp(-G**(0.5+theta['k_exp']))+theta['off']
return convolve_with_gaussian(info, Ir, rotate)
def rotated_ellipse_model(info, coord, theta, rotate=False):
x,y = coord
x_rot = (x-theta['x0'])*np.cos(theta['ang']) + (y-theta['y0'])*np.sin(theta['ang'])
y_rot = -(x-theta['x0'])*np.sin(theta['ang']) + (y-theta['y0'])*np.cos(theta['ang'])
G = (x_rot/theta['r1'])**2.+(y_rot/theta['r2'])**2.
Ir = theta['I0']*np.exp(-G**(0.5+theta['k_exp']))+theta['off']
return convolve_with_gaussian(info, Ir, rotate)
def skewed_model(info, coord, theta, rotate=False):
x,y=coord
G_pp = G(x, y, theta['I0'],theta['x0'],theta['y0'],theta['r1'],theta['r3'],theta['ang'], 1., 1.)
G_mm = G(x, y, theta['I0'],theta['x0'],theta['y0'],theta['r2'],theta['r4'],theta['ang'], -1., -1.)
G_pm = G(x, y, theta['I0'],theta['x0'],theta['y0'],theta['r1'],theta['r4'],theta['ang'], 1., -1.)
G_mp = G(x, y, theta['I0'],theta['x0'],theta['y0'],theta['r2'],theta['r3'],theta['ang'], -1., 1.)
Ir = (theta['I0']*(G_pp+G_pm+G_mm+G_mp))
return convolve_with_gaussian(info, Ir, rotate)
def G(x,y, I0, x0, y0, re_x,re_y, ang, sign_x, sign_y):
x_rot = (x-x0)*np.cos(ang)+(y-y0)*np.sin(ang)
y_rot = -(x-x0)*np.sin(ang)+(y-y0)*np.cos(ang)
func = (np.sqrt(sign_x * x_rot)**4.)/(re_x**2.) +\
(np.sqrt(sign_y * y_rot)**4.)/(re_y**2.)
exponent = np.exp(-np.sqrt(func))
exponent[np.where(np.isnan(exponent))]=0.
return exponent
def lnL(theta, data, coord, info):
kwargs = {"rotate" : True}
raw_model = info['_func_'](info,coord,theta,**kwargs)*u.Jy
model = set_model_to_use(info, raw_model)
return -np.sum( ((data-model)**2.)/(2*info['sigma']**2.)\
+ np.log(np.sqrt(2*np.pi)*info['sigma']) )
def lnprior(theta, shape, info):
prior = -np.inf
if (theta['I0'] > 0) and (-0.4 < theta['k_exp'] < 19):
if (0 <= theta['x0'] < shape[1]) and (0 <= theta['y0'] < shape[0]):
if 0 < theta['r1'] < info['max_radius']:
if -np.pi/4. < theta['ang'] < 5*np.pi/4.:
prior = 0.0
if not (0 <= theta['r2'] <= theta['r1']):
prior = -np.inf
if prior != -np.inf:
if info['modelName'] == 'circle':
radii = np.array([theta['r1']])
else:
radii = np.array([theta['r1'],theta['r2']])
if info['gamma_prior']:
prior = np.sum(np.log(utils.gamma_dist(radii, 2.3, 120./info['pix2kpc'].value)))
return prior
def lnprior8(theta, shape, info):
prior = -np.inf
if theta['I0']>0 and (0 < theta['x0'] < shape[1]) and (0 < theta['y0'] < shape[0]):
if theta['r1'] > 0. and theta['r2'] > 0. and theta['r3'] > 0. and theta['r4'] > 0.:
if (0. < (theta['r3']+theta['r4']) <= (theta['r1']+theta['r2'])) and ((theta['r1']+theta['r2']) < info['max_radius']*2.):
if -np.pi/4. < theta['ang'] < 5*np.pi/4.:
prior = 0.0
if prior != -np.inf and info['gamma_prior']:
#guess = 225./info['pix2kpc'] #average based on known sample of halos.
#prior = -np.sum(1./2*((theta['r1'])**2 + (theta['r2'])**2)/((info['max_radius']/4.)**2))
radii = np.array([theta['r1'],theta['r2'],theta['r3'],theta['r4']])
prior = np.sum(np.log(utils.gamma_dist(radii, 2.3, 120./info['pix2kpc'].value)))
return prior
def lnprob(theta, data, coord, info):
theta = add_parameter_labels(info['params'], info['paramNames'], theta)
if info['modelName'] == 'skewed':
lp = lnprior8(theta, coord[0].shape, info)
else:
lp = lnprior(theta, coord[0].shape, info)
if not np.isfinite(lp):
return -np.inf
return lnL(theta, data, coord, info) + lp
def add_parameter_labels(params, paramNames, array):
full_array = np.zeros(params.shape)
full_array[params==True] = array
parameterised_array = pd.DataFrame.from_dict({'params': full_array},
orient='index',columns=paramNames).loc['params']
return parameterised_array
class processing(object):
'''
-CLASS DESCRIPTION-
-INPUT-
_parent_ (Radio_Halo object): Radio_Halo object containing all relevant
object information
data (2D array): Data array to be fitted. It is adviced to
use 'Radio_Halo.data_mcmc'
dim (int): number of parameters of fitting model to use. Choose from (8,6,5,4).
Note: currently, only dim=8 works.
walkers (int): Number of walkers to deploy in the MCMC algorithm
steps (int): Number of evauations each walker has to do.
save (bool): Whether to save the mcmc sampler chain in a fits file. default is False
burntime (int): burn-in time for MCMC walkers. See emcee documentation for info.
logger: Configured logging object to log info to a .log file. If not given,
nothing happens.
rebin (bool): default is True. regridding data to beamsize to fit to indipendent
datapoints. Default is True.
Forward (bool): Depricated.
Mask (bool): applying mask to image. If true: a DS9 .reg has to be present in the
Radio_halo.maskPath direcory Default is False.
maskpath (str): Custom path to DS9 region file, read from database.dat.
If '--' is given, and mask=True, the standard
directory will be searched.
'''
def __init__(self, _parent_, data, dim, logger, save=True, mask=False, rebin=True,
maskpath='--', k_exponent=False, offset=False, burntime=None):
x = np.arange(0,data.shape[1],1)
y = np.arange(0,data.shape[0],1)
self.x_pix, self.y_pix = np.meshgrid(x,y)
self.log = logger
self.log.log(logging.INFO,'Model name: {}'.format(dim))
self.noise = _parent_.imagenoise
self.rms = _parent_.rmsnoise
self.data = data
self.save = save
self.halo = _parent_
self.alpha = _parent_.alpha # spectral index guess
self.k_exponent = k_exponent
self.offset = offset
self.mask_treshold = 0.5
self.check_settings(dim, mask, maskpath)
self.extract_chain_file(rebin)
self.retreive_mcmc_params()
self.set_labels_and_units()
self.dof = len(data.value.flat) - self.dim
def plot_results(self):
plot.fit_result(self, self.model, self.halo.data,
self.halo.rmsnoise, mask=self.mask, regrid=False)
plot.fit_result(self, self.model, self.halo.data_mcmc,
self.halo.rmsnoise, mask=self.mask,regrid=True)
self.plotSampler()
self.cornerplot()
def check_settings(self, dim, mask, maskpath):
self.modelName = dim
self.paramNames = ['I0','x0','y0','r1','r2','r3','r4','ang','k_exp','off']
if dim=='circle':
self._func_ = utils.circle_model
self.AppliedParameters = [True,True,True,True,False,False,False,False,False,False]
elif dim == 'ellipse':
self._func_ = utils.ellipse_model
self.AppliedParameters = [True,True,True,True,True,False,False,False,False,False]
elif dim == 'rotated_ellipse':
self._func_ = utils.rotated_ellipse_model
self.AppliedParameters = [True,True,True,True,True,False,False,True,False,False]
elif dim == 'skewed':
self._func_ = utils.skewed_model
self.AppliedParameters = [True,True,True,True,True,True,True,True,False,False]
else:
self.log.log(logging.CRITICAL,'CRITICAL: invalid model name')
print('CRITICAL: invalid model name')
sys.exit()
if self.k_exponent: self.AppliedParameters[-2] = True
if self.offset: self.AppliedParameters[-1] = True
self.params = pd.DataFrame.from_dict({'params':self.AppliedParameters},
orient='index',columns=self.paramNames).loc['params']
self.dim = len(self.params[self.params])
self.image_mask = np.zeros(self.halo.data.shape)
self.image_mask, self.mask = utils.masking(self, mask)
'''
if mask:
if maskpath == '--':
self.halo.maskPath = self.halo.basedir+'Output/'+self.halo.target+'.reg'
else:
self.halo.maskPath = maskpath
fitting.find_mask(self)
if self.mask:
fitting.setMask(self,self.data)
self.log.log(logging.INFO,'MCMC Mask set')
else:
self.log.log(logging.INFO,'MCMC No mask set')
self.mask=False
'''
def extract_chain_file(self, rebin):
filename_append = '_{}'.format(self.modelName)
if self.mask: filename_append += '_mask'
#if rebin: filename_append += '_rebin'
if self.k_exponent: filename_append += '_exp'
if self.offset: filename_append += '_offset'
self.filename_append = filename_append
self.rebin = rebin
sampler_chain = fits.open(self.halo.modelPath+self.halo.file.replace('.fits','')+\
'_mcmc_samples'+self.filename_append+'.fits')
self.sampler = (sampler_chain[0].data)
self.info = sampler_chain[0].header
def at(self, parameter):
par = np.array(self.paramNames)[self.params]
return np.where(par == parameter)[0][0]
def retreive_mcmc_params(self):
self.walkers = self.info['nwalkers']
self.steps = self.info['steps']
burntime = int(self.info['burntime'])
self.popt = utils.add_parameter_labels(self, np.zeros(self.dim))
for i in range(self.dim):
self.popt[i] = self.info['INIT_'+str(i)]
if burntime is None:
self.burntime = int(0.25*self.steps)
elif 0. > burntime or burntime >= self.steps:
self.log.log(logging.ERROR,'MCMC Input burntime of {} is invalid. setting burntime to {}'\
.format(burntime, 0.25*self.steps))
self.burntime = int(0.25*self.steps)
else:
self.burntime = int(burntime)
samples = self.sampler[:, self.burntime:, :].reshape((-1, self.dim))
#translate saples for location to right Fov.
samples[:,self.at('x0')] -= self.halo.margin[2]
samples[:,self.at('y0')] -= self.halo.margin[0]
self.percentiles = self.get_percentiles(samples)
self.parameters = utils.add_parameter_labels(self, self.percentiles[:,1].reshape(self.dim))
self.centre_pix = np.array([self.parameters['x0'],self.parameters['y0']], dtype=np.int64)
self.model = self._func_(self, self.parameters)\
.reshape(self.x_pix.shape)*u.Jy
self.samples = samples
def get_percentiles(self,samples):
percentiles = np.ones((samples.shape[1],3))
for i in range(samples.shape[1]):
percentiles[i,:] = np.percentile(samples[:, i], [16, 50, 84])
if self.modelName in ['rotated_ellipse', 'skewed']:
cosine = np.percentile(np.cos(samples[:,self.at('ang')]), [16, 50, 84])
sine = np.percentile(np.sin(samples[:,self.at('ang')]), [16, 50, 84])
arccosine = np.arccos(cosine)
arcsine = np.arcsin(sine)
if arcsine[1] == arccosine[1]:
ang = arcsine.copy()
elif arcsine[1] == -arccosine[1]:
ang = arcsine.copy()
elif arcsine[1] != arccosine[1] and arcsine[1] != -arccosine[1]:
if arcsine[1] < 0:
ang = -arccosine.copy()
elif arcsine[1] > 0:
ang = arccosine.copy()
else:
self.log.log(logging.ERROR,'Angle matching failed in processing.get_percentiles. continueing with default.')
ang = np.percentile(samples[:,self.at('ang')], [16, 50, 84])
percentiles[self.at('ang'),:] = ang
return percentiles
def cornerplot(self):
try:
fig = corner.corner(self.samples_units,labels=self.labels_units,truths=self.popt_units[self.params],
quantiles=[0.160, 0.5, 0.840], show_titles=True, max_n_ticks=3, title_fmt=self.fmt)
except:
fig = corner.corner(self.samples_units,labels=self.labels_units,truths=self.popt_units[self.params],
quantiles=[0.160, 0.5, 0.840], show_titles=True, max_n_ticks=3, title_fmt='1.2g')
if self.save:
plt.savefig(self.halo.plotPath+self.halo.file.replace('.fits','')+'_cornerplot'+self.filename_append+'.pdf')
plt.clf()
plt.close(fig)
else:
plt.show()
def plotSampler(self):
fig, axes = plt.subplots(ncols=1, nrows=self.dim, sharex=True)
axes[0].set_title('Number of walkers: '+str(self.walkers), fontsize=25)
for axi in axes.flat:
axi.yaxis.set_major_locator(plt.MaxNLocator(3))
fig.set_size_inches(2*10,15)
for i in range(self.dim):
axes[i].plot(self.sampler[:, :, i].transpose(),color='black', alpha=0.3,lw=0.5)
axes[i].set_ylabel(self.labels[i], fontsize=20)
axes[-1].set_xlabel('steps', fontsize=20)
axes[i].axvline(0.3*self.sampler.shape[1], ls='dashed', color='red')
axes[i].tick_params(labelsize=20)
plt.xlim(0, self.sampler.shape[1])
if self.save:
plt.savefig(self.halo.plotPath+self.halo.file.replace('.fits','')+'_walkers'+self.filename_append+'.pdf')
plt.clf()
plt.close(fig)
else:
plt.show()
def set_labels_and_units(self):
self.samples_units = self.samples.copy()
samples_units = self.samples.copy()
samples_list = list()
x0 = np.percentile(self.samples.real[:, 1], [16, 50, 84])[1]-abs(self.halo.margin[1])
y0 = np.percentile(self.samples.real[:, 2], [16, 50, 84])[1]-abs(self.halo.margin[0])
self.centre_pix = np.array([x0,y0], dtype=np.int64)
self.centre_wcs = np.array((self.halo.ra.value[self.centre_pix[1]],
self.halo.dec.value[self.centre_pix[0]]))*u.deg
for i in range(self.dim):
samples_list.append(samples_units[:,i])
transformed = self.transform_units(samples_list)
for i in range(self.dim):
self.samples_units[:,i] = transformed[i]
self.popt_units = self.transform_units(np.copy(self.popt))
self.percentiles_units = self.get_percentiles(self.samples_units)
self.params_units = utils.add_parameter_labels(self, self.percentiles_units[:,1].reshape(self.dim))
self.get_units()
uncertainties1 = self.percentiles_units[:,1]-self.percentiles_units[:,0]
uncertainties2 = self.percentiles_units[:,2]-self.percentiles_units[:,1]
self.log.log(logging.INFO, '\n Parameters: \n%s \nOne sigma parameter uncertainties (lower, upper): \
\n%s \n%s \nIn Units: %s' % (str(self.params_units[self.params]),
str(uncertainties1), str(uncertainties2), str(self.units)))
def transform_units(self, params):
params[0] = ((u.Jy*params[0]/self.halo.pix_area).to(uJyarcsec2)).value
params[1] = (params[1]-self.centre_pix[0])*self.halo.pix_size.value+self.centre_wcs[0].value
params[2] = (params[2]-self.centre_pix[1])*self.halo.pix_size.value+self.centre_wcs[1].value
params[3] = ((params[3]*self.halo.pix2kpc).to(u.kpc)).value
if self.modelName in ['ellipse', 'rotated_ellipse', 'skewed']:
params[4] = ((params[4]*self.halo.pix2kpc).to(u.kpc)).value
if self.modelName == 'skewed':
params[5] = ((params[5]*self.halo.pix2kpc).to(u.kpc)).value
params[6] = ((params[6]*self.halo.pix2kpc).to(u.kpc)).value
if self.modelName in ['rotated_ellipse', 'skewed']:
params[self.at('ang')] = params[self.at('ang')]
return params
def get_units(self):
labels = ['$I_0$','$x_0$','$y_0$']
units = ['$\\mu$Jy arcsec$^{-2}$','deg','deg']
fmt = ['.2f','.4f','.4f']
if self.modelName == 'skewed':
labels.extend(('$r_{x^+}$','$r_{x^-}$','$r_{y^+}$','$r_{y^-}$'))
units.extend(('kpc','kpc','kpc','kpc'))
fmt.extend(('.0f','.0f','.0f','.0f'))
elif self.modelName in ['ellipse', 'rotated_ellipse']:
labels.extend(('$r_{x}$','$r_{y}$'))
units.extend(('kpc','kpc'))
fmt.extend(('.1f','.1f'))
elif self.modelName == 'circle':
labels.append('$r_{e}$')
units.append('kpc')
fmt.append('.1f')
if self.modelName in ['rotated_ellipse', 'skewed']:
labels.append('$\\phi_e$')
units.append('Rad')
fmt.append('.3f')
if self.k_exponent:
labels.append('$k$')
units.append(' ')
fmt.append('.3f')
if self.offset:
labels.append('$C$')
units.append(' ')
fmt.append('.3f')
self.labels = np.array(labels,dtype='<U30')
self.units = np.array(units, dtype='<U30')
self.fmt = np.array(fmt, dtype='<U30')
self.labels_units = np.copy(self.labels)
for i in range(self.dim):
self.labels_units[i] = self.labels[i]+' ['+self.units[i]+']'
def get_confidence_interval(self, percentage=95, units=True):
alpha = 1. - percentage/100.
z_alpha = stats.norm.ppf(1.-alpha/2.)
se = np.zeros(self.params.shape)
if units:
for i in range(self.dim):
se[self.params] = np.sqrt( np.mean(self.samples_units[:, i]**2.)\
-np.mean(self.samples_units[:, i])**2. )
conf_low = self.params_units-z_alpha*se
conf_up = self.params_units+z_alpha*se
for i in range(self.dim):
self.log.log(logging.INFO,'{}% Confidence interval of {}: ({:.5f}, {:.5f}) {}'\
.format(percentage,self.labels[i],conf_low[i],
conf_up[i],self.units[i]))
self.log.log(logging.INFO,'')
else:
for i in range(self.dim):
se[i] = np.sqrt( np.mean(self.samples[:, i]**2.)\
-np.mean(self.samples[:, i])**2. )
conf_low = self.parameters-z_alpha*se
conf_up = self.parameters+z_alpha*se
for i in range(self.dim):
self.log.log(logging.INFO,'{}% Confidence interval of {}: ({:.5f}, {:.5f})'\
.format(percentage,self.labels[i],conf_low[i],
conf_up[i]))
self.log.log(logging.INFO,'')
return [conf_low, conf_up]
def get_chi2_value(self,mask_treshold = 0.4):
self.mask_treshold = mask_treshold
x = np.arange(0,self.halo.data_mcmc.shape[1],1)
y = np.arange(0,self.halo.data_mcmc.shape[0],1)
self.x_pix, self.y_pix = np.meshgrid(x,y)
params = self.parameters.copy()
params[1] += self.halo.margin[2]
params[2] += self.halo.margin[0]
binned_data = fitting.set_data_to_use(self, self.halo.data_mcmc)
model = self._func_(self, params, rotate=True).reshape(self.halo.data.shape)*u.Jy
binned_model = utils.regrid_to_beamsize(self.halo, model)
self.rmsregrid = utils.findrms(binned_data)
if not self.mask:
self.image_mask = np.zeros(self.halo.data.shape)
binned_image_mask = utils.regridding(self.halo, self.image_mask*u.Jy, mask=not self.halo.cropped).value
binned_model = binned_model.ravel()[binned_image_mask.ravel() <=\
mask_treshold*binned_image_mask.max()]
chi2 = np.sum( ((binned_data-binned_model)/(self.rmsregrid))**2. )
binned_dof = len(binned_data)-self.dim
self.chi2_red = chi2/binned_dof
self.ln_likelihood = -np.sum( ((binned_data-binned_model)**2.)/(2*(self.rmsregrid)**2.)\
+ np.log(np.sqrt(2*np.pi)*self.rmsregrid))
self.AIC = 2*(self.dim-self.ln_likelihood)
self.log.log(logging.INFO,'chi^2: {}'.format(chi2))
self.log.log(logging.INFO,'effective DoF: {}'.format(binned_dof))
self.log.log(logging.INFO,'chi^2_red: {}'.format(self.chi2_red))
#self.log.log(logging.INFO,'AIC: {}'.format(self.AIC))
x = np.arange(0,self.data.shape[1],1)
y = np.arange(0,self.data.shape[0],1)
self.x_pix, self.y_pix = np.meshgrid(x,y)
def get_flux(self, int_max=np.inf, freq=None):
if freq is None:
freq = self.halo.freq
a = self.samples[:,3]*self.halo.pix_size
if self.modelName=='skewed':
b = self.samples[:,5]*self.halo.pix_size
c = self.samples[:,4]*self.halo.pix_size
d = self.samples[:,6]*self.halo.pix_size
factor = (a*b+c*d+a*d+b*c)
elif self.modelName in ['ellipse','rotated_ellipse']:
b = self.samples[:,4]*self.halo.pix_size
factor = 4*a*b
else:
factor = 4*a**2
if self.k_exponent: m = self.samples[:,self.at('k_exp')]+0.5
else: m=0.5
I0 = u.Jy*self.samples[:,0]/self.halo.pix_area
flux = (gamma(1./m)*np.pi*I0/(4*m) * factor * gammainc(1./m, int_max**(2*m))\
*(freq/self.halo.freq)**self.alpha).to(u.mJy)
self.flux = np.copy(flux)
self.flux_freq = freq
self.flux_val = np.percentile(flux, 50)
self.flux_err = ((np.percentile(flux, 84)-np.percentile(flux, 16))/2.)
#cal = 0.1
#sub = 0.1 # Osinga et al. 2020
#self.flux_std = np.sqrt((cal*self.flux_val.value)**2+sub**2+flux_err**2)*u.mJy
#self.flux_err = np.sqrt((cal*self.flux.value)**2+sub**2+flux_err**2)*u.mJy
self.log.log(logging.INFO,'MCMC Flux at {:.1f} {}: {:.2f} +/- {:.2f} {}'\
.format(freq.value, freq.unit, self.flux_val.value,
self.flux_err.value,flux.unit))
self.log.log(logging.INFO,'Integration radius '+str(int_max))
self.log.log(logging.INFO,'S/N based on flux {:.2f}'\
.format(self.flux_val.value/self.flux_err.value))
def get_power(self, freq=None):
if freq is None:
freq = self.halo.freq
d_L = self.halo.cosmology.luminosity_distance(self.halo.z)
power = (4*np.pi*d_L**2. *((1.+self.halo.z)**((-1.*self.alpha) - 1.))*\
self.flux*((freq/self.flux_freq)**self.alpha)).to(u.W/u.Hz)
power_std = (4*np.pi*d_L**2. *((1.+self.halo.z)**((-1.*self.alpha) - 1.))*\
self.flux_err*((freq/self.flux_freq)**self.alpha)).to(u.W/u.Hz)
self.power_std = np.percentile(power_std,50)
cal = 0.1
sub = 0.1 # Osinga et al. 2020
self.power = np.copy(power)
self.power_val = np.percentile(power,[50])[0]
power_err = ((np.percentile(power, [84])[0]-np.percentile(power, [16])[0])/2.).value
self.power_std = np.sqrt((cal*self.power_val.value)**2+sub**2+power_err**2)
self.log.log(logging.INFO,'Power at {:.1f} {}: ({:.3g} +/- {:.3g}) {}'\
.format(freq.value, freq.unit,
np.percentile(power,[50])[0].value,
(np.percentile(power, [84])[0]-\
np.percentile(power, [16])[0]).value/2.,
power.unit))
| 44,842
| 43.050098
| 142
|
py
|
Halo-FDCA
|
Halo-FDCA-master/FDCA/__init__.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
Author: J.M. Boxelaar
Version: 13 October 2020
'''
import logging
import sys, os
import logging.config
import logging.handlers
from . import HaloObject
from . import markov_chain_monte_carlo
from . import fdca_utils as utils
#from . import plotting_fits
__version__ = '1.0.0'
def Radio_Halo(object, path, decreased_fov=True, maskpath=None, mask=False, loc=None,
M500=None, R500=None, z=None, outputpath='./', spectr_index=-1.2,
logger=logging, rms=0):
halo = HaloObject.Radio_Halo(object, path, maskpath=maskpath, mask=mask,
decreased_fov=decreased_fov,logger=logger, loc=loc,
M500=M500, R500=R500, z=z,outputpath=outputpath,
spectr_index=spectr_index, rms=rms)
return halo
| 866
| 28.896552
| 85
|
py
|
Halo-FDCA
|
Halo-FDCA-master/FDCA/.ipynb_checkpoints/__init__-checkpoint.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
Author: J.M. Boxelaar
Version: 13 October 2020
'''
import logging
import sys, os
import logging.config
import logging.handlers
from . import HaloObject
from . import markov_chain_monte_carlo
from . import fdca_utils as utils
#from . import plotting_fits
__version__ = '1.0.0'
def Radio_Halo(object, path, decreased_fov=True, maskpath=None, mask=False, loc=None,
M500=None, R500=None, z=None, outputpath='./', spectr_index=-1.2,
logger=logging, rms=0):
halo = HaloObject.Radio_Halo(object, path, maskpath=maskpath, mask=mask,
decreased_fov=decreased_fov,logger=logger, loc=loc,
M500=M500, R500=R500, z=z,outputpath=outputpath,
spectr_index=spectr_index, rms=rms)
return halo
| 866
| 28.896552
| 85
|
py
|
Halo-FDCA
|
Halo-FDCA-master/FDCA/.ipynb_checkpoints/HaloObject-checkpoint.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
Author: J.M. Boxelaar
Version: 08 June 2020
'''
# Built in module imports
import sys
import os
import logging
import time
from multiprocessing import Pool
# Scipy, astropy, emcee imports
import numpy as np
from scipy.optimize import curve_fit
import matplotlib.pyplot as plt
from astropy.io import fits
from astropy import wcs
import astropy.units as u
from astropy.convolution import Gaussian2DKernel
from astropy.convolution import convolve
from astroquery.vizier import Vizier
from astropy.coordinates import SkyCoord
from astropy.cosmology import FlatLambdaCDM
from . import fdca_utils as utils
np.seterr(divide='ignore', invalid='ignore')
rad2deg = 180./np.pi
deg2rad = np.pi/180.
Jydeg2 = u.Jy/(u.deg*u.deg)
mJyarcsec2 = u.mJy/(u.arcsec*u.arcsec)
uJyarcsec2 = 1.e-3*u.mJy/(u.arcsec*u.arcsec)
class Radio_Halo(object):
'''
-CLASS DESCRIPTION-
This class initiates a Radio_Halo object containing all image and physical
information. A Halo obect has to be passed to the MCMC module.
The Halo class aslo performs preliminary processes to make MCMC possible
-INPUT-
object (str): Name of galaxy cluster. Currently only supports its PSZ2 or MCXC name.
If another object needs to be passed, fill in the physical
characteristics manually
path (str): Path to data read from 'database.dat'. Compatible with
Leiden Observatory data structure.
decrease_fov (bool): Declare if image size has to be decreased before MCMCing. Amount
of decreasement has ben automatically set to 3.5*r_e in
self.exponentialFit().
logger: Configured logging object to log info to a .log file. If not given,
a new file will be created.
loc (SkyCoord object): Manually inserted cluster location as an astropy.SkyCoord
object. If None: location is gathered from a Vizier query.
Otherwise: provide Astropy SkyCoord object with approximate
centre of radio halo.
M500 (float): Manually inserted mass. If None: mass is gathered from a Vizier query
If not None: must be value given in 1e14 SolMass
R500 (float): Manually inserted R500 radius. If None: radius is gathered from
a Vizier query (MCXC only). If not None, must be value given
in Mega Parsec.
z (float): Manually inserted redshift. If None: redshift is gathered from
a Vizier query
spectr_index (float): Manually inserted halo spectral index (S_v = v^(spectr_index)).
Value is used when extrapolating flux density and calculating
power values. Default is -1.2 (No conclusions can be drawn
from using this default value in calculations).
'''
def __init__(self, object, path, decreased_fov=False, maskpath=None, mask=False,
logger=logging, loc=None, M500=None, R500=None, z=None,
outputpath='./', spectr_index=-1.2, rms=0):
self.rmsnoise = rms #manual noise level mJy/beam
self.user_radius = R500
self.user_loc = loc
self.log = logger
if object[:4] == 'MCXC':
self.cat = 'J/A+A/534/A109/mcxc'
elif object[:4] == 'PSZ2':
self.cat = 'J/A+A/594/A27/psz2'
elif object[:3] == 'WHL':
self.cat = 'J/MNRAS/436/275/table2'
elif object[:5] == 'Abell':
self.cat = 'VII/110A/table3'
else:
self.cat=None
self.log.log(logging.ERROR,'Unknown what catalogue to use. If no costum values are given, filling values will be used')
self.target = str(object)
self.path = path
self.alpha = spectr_index
self.name = self.target.replace('MCXC','MCXC ')
self.name = self.target.replace('PSZ2','PSZ2 ')
self.name = self.target.replace('Abell','Abell ')
self.name = self.target.replace('WHL','')
self.cosmology = FlatLambdaCDM(H0=70, Om0=0.3)
self.table = Vizier.query_object(self.name,catalog=self.cat)
self.initiatePaths(maskpath,outputpath)
data = self.unpack_File()
self.get_beam_area()
self.original_image = np.copy(data)
x = np.arange(0, data.shape[1], step=1, dtype='float')
y = np.arange(0, data.shape[0], step=1, dtype='float')
self.x_pix, self.y_pix = np.meshgrid(x,y)
self.get_object_location(loc)
self.extract_object_info(M500, R500, z)
self.fov_info = [0,data.shape[0],0,data.shape[1]]
self.image_mask, self.mask = utils.masking(self, mask)
self.exponentialFit(data, first=True) # Find centre of the image centre_pix
if self.header['BUNIT']=='JY/BEAM' or self.header['BUNIT']=='Jy/beam':
self.data = data*(u.Jy/self.beam2pix)
else:
self.log.log(logging.CRITICAL,'Possibly other units than jy/beam, CHECK HEADER UNITS!')
sys.exit()
self.pix_to_world()
self.set_image_characteristics(decreased_fov)
def initiatePaths(self, maskpath, outputpath):
self.basedir = outputpath
if outputpath[-1]=='/': self.basedir = outputpath[:-1]
txt = self.path.split('/')
self.file = txt[-1]
self.dataPath = '/'+'/'.join(txt[:-1])+'/'
self.plotPath = self.basedir+'/Plots/'
self.modelPath = self.basedir+'/'
if not os.path.isdir(self.modelPath):
self.log.log(logging.INFO,'Creating modelling directory')
os.makedirs(self.modelPath)
if not os.path.isdir(self.plotPath):
self.log.log(logging.INFO,'Creating plotting directory')
os.makedirs(self.plotPath)
if maskpath == None:
self.maskPath = self.basedir+'/'+self.target+'.reg'
else:
self.maskPath = maskpath
def get_object_location(self, loc):
if loc is not None:
self.loc = loc
'''
elif self.target[:4] == 'MCXC':
coord = str(self.table[self.cat]['RAJ2000'][0])+' '\
+ str(self.table[self.cat]['DEJ2000'][0])
self.loc = SkyCoord(coord, unit=(u.hourangle,u.deg))
elif self.target[:5] == 'Abell':
coord = str(self.table[self.cat]['_RA.icrs'][0])+' '\
+ str(self.table[self.cat]['_DE.icrs'][0])
self.loc = SkyCoord(coord, unit=(u.hourangle,u.deg))
elif self.target[:4] == 'PSZ2':
coord = [self.table[self.cat]['RAJ2000'][0],self.table[self.cat]['DEJ2000'][0]]
self.loc = SkyCoord(coord[0], coord[1], unit=u.deg)
elif self.target[:3] == 'WHL':
coord = [self.table[self.cat]['RAJ2000'][0],self.table[self.cat]['DEJ2000'][0]]
self.loc = SkyCoord(coord[0], coord[1], unit=u.deg)
'''
else:
self.log.log(logging.WARNING,'No halo sky location given. Assuming image centre.')
self.log.log(logging.INFO,'- Not giving an approximate location can affect MCMC performance -')
#cent_pix = (np.array([self.original_image.shape])/2).astype(np.int64)
cent_pix = np.asarray(self.original_image.shape, dtype=np.float64).reshape(1,2)/2.
w = wcs.WCS(self.header)
coord = w.celestial.wcs_pix2world(cent_pix,1)
self.loc = SkyCoord(coord[0,0], coord[0,1], unit=u.deg)
self.user_loc = False
def extract_object_info(self, M500, R500, z):
'''Written for MCXC catalogue. Information is gathered from there. If custom
parameters are given, these will be used. if nothing is found, filling
values are set. This is only a problem if you try to calculate radio power.'''
try:
if self.target[:4] == 'MCXC':
self.M500 = float(self.table[self.cat]['M500'][0])*1.e14*u.Msun
self.L500 = float(self.table[self.cat]['L500'][0])*1.e37*u.Watt
self.R500 = float(self.table[self.cat]['R500'][0])*u.Mpc
self.z = float(self.table[self.cat]['z'][0])
self.M500_std = 0.*u.Msun
elif self.target[:3] == 'WHL':
self.z = float(self.table[self.cat]['z'][0])
self.R500 = 1.*u.Mpc
self.M500 = 3.e14*u.Msun
self.user_radius = False
#self.log.log(logging.WARNING,'No R500 key found. setting R500='\
# +str(self.R500.value)+'Mpc to continue')
elif self.target[:5] == 'Abell':
try:
self.z = float(self.table[self.cat]['z'][0])
except:
self.z = 0.1
#self.log.log(logging.WARNING,'No valid z key found. setting z='\
# +str(self.z)+' as filling to continue. Ignore this message if -z != None')
self.R500 = 1.*u.Mpc
self.user_radius = False
#self.log.log(logging.WARNING,'No R500 key found. setting R500='\
# +str(self.R500.value)+'Mpc to continue')
elif self.target[:4] == 'PSZ2':
self.M500 = float(self.table[self.cat]['MSZ'][0])*1.e14*u.Msun
self.M500_std = np.max([float(self.table[self.cat]['E_MSZ'][0]),
float(self.table[self.cat]['e_MSZ'][0])])*1.e14*u.Msun
self.z = float(self.table[self.cat]['z'][0])
try:
self.R500 = float(self.table[self.cat]['R500'][0])*u.Mpc
except:
self.R500 = 1.*u.Mpc
self.user_radius = False
else:
self.R500 = 1.*u.Mpc
self.z = 0.1
self.user_radius = False
except:
print('catalogue search FAILED')
self.R500 = 1.*u.Mpc
self.z = 0.1
self.user_radius = False
if M500 is not None:
self.M500 = float(M500)*1.e14*u.Msun
self.M500_std = 0.*u.Msun
self.log.log(logging.INFO,'Custom M500 mass set')
if R500 is not None:
self.R500 = float(R500)*u.Mpc
self.log.log(logging.INFO,'Custom R500 radius set')
self.user_radius=self.R500
if z is not None:
self.z = float(z)
self.log.log(logging.INFO,'Custom redshift set')
self.factor = self.cosmology.kpc_proper_per_arcmin(self.z).to(u.Mpc/u.deg)
self.radius_real = self.R500/self.factor
self.freq = (self.header['CRVAL3']*u.Hz).to(u.MHz)
def set_image_characteristics(self, decrease_img_size):
if self.rmsnoise != 0.:
self.rmsnoise,self.imagenoise = u.Jy*self.get_noise(self.data*self.beam2pix)/self.beam2pix
else:
self.rmsnoise = 1.e-6*(self.rmsnoise/self.beam2pix)*u.Jy
self.imagenoise = 0.
self.log.log(logging.INFO,'rms noise %f microJansky/beam' % (1.e6*(self.rmsnoise*self.beam2pix).value))
self.log.log(logging.INFO,'rms noise %f microJansky/arcsec2' % (1.e6*(self.rmsnoise/self.pix_area).to(u.Jy/u.arcsec**2.).value))
if decrease_img_size:
self.decrease_fov(self.data)
x = np.arange(0, np.shape(self.data.value)[1], step=1, dtype='float')
y = np.arange(0, np.shape(self.data.value)[0], step=1, dtype='float')
self.x_pix, self.y_pix = np.meshgrid(x,y)
self.image_mask, self.mask = utils.masking(self, self.mask)
self.exponentialFit(self.data.value)
else:
pivot = ((np.sqrt(2.)/2.-0.5)*np.array(self.data.shape)).astype(np.int64)
padX = [pivot[0], pivot[0]]
padY = [pivot[1], pivot[1]]
self.data_mcmc = np.pad(self.data, [padY, padX], 'constant')
self.fov_info_mcmc = [-pivot[0],self.data.shape[0]+pivot[0],
-pivot[1],self.data.shape[1]+pivot[1]]
self.fov_info = [0,self.data.shape[0],0,self.data.shape[1]]
self.margin = np.array(self.fov_info)-np.array(self.fov_info_mcmc)
self.data = self.data[self.fov_info[0]:self.fov_info[1],
self.fov_info[2]:self.fov_info[3]]
self.ra = self.ra[self.fov_info[2]:self.fov_info[3]]
self.dec = self.dec[self.fov_info[0]:self.fov_info[1]]
self.noise_char = utils.noise_characterisation(self,self.data.value)
self.pix2kpc = self.pix_size*self.factor.to(u.kpc/u.deg)
def get_beam_area(self):
try:
self.bmaj = self.header['BMIN']*u.deg
self.bmin = self.header['BMAJ']*u.deg
self.bpa = self.header['BPA']*u.deg
except KeyError:
string = str(self.header['HISTORY'])
self.bmaj = self.findstring(string, 'BMAJ')*u.deg
self.bmin = self.findstring(string, 'BMIN')*u.deg
self.bpa = self.findstring(string, 'BPA')*u.deg
self.pix_size = abs(self.header['CDELT2'])*u.deg
beammaj = self.bmaj/(2.*(2.*np.log(2.))**0.5) # Convert to sigma
beammin = self.bmin/(2.*(2.*np.log(2.))**0.5) # Convert to sigma
self.pix_area = abs(self.header['CDELT1']*self.header['CDELT2'])*u.deg*u.deg
self.beam_area = 2.*np.pi*1.0*beammaj*beammin
self.beam2pix = self.beam_area/self.pix_area
def unpack_File(self):
self.hdul = fits.open(self.path)
try:
data = self.hdul[0].data[0,0,:,:]
except:
data = self.hdul[0].data
self.header = self.hdul[0].header
data[np.isnan(data)]=0
return data
def findstring(self, string, key):
string = string.split('\n')
for i in range(len(string)):
if string[i].find(key) != -1 and string[i].find('CLEAN') != -1:
line = string[i]
the_key = line.find(key)
start = line[the_key:].find('=')+the_key+1
while line[start]==' ':
start+=1
if line[start:].find(' ') == -1:
return float(line[start:])
end = line[start:].find(' ')+start
return float(line[start:end])
def get_noise(self, data, ampnoise=0.2):
rmsnoise = utils.findrms(data.value)
#rmsnoise = utils.get_rms(self.path)
imagenoise = 0.#np.sqrt((ampnoise*data)**2+(rmsnoise*np.sqrt(1./self.beam2pix))**2)
return rmsnoise, imagenoise
def decrease_fov(self, data, width=2):
''' Function decreases image size based on first fit in exponentialFit.
Slightly bigger image is used in MCMC. data is stored in self.data_mcmc'''
self.cropped = False
error = False
image_width = width*self.radius/self.pix_size
test_fov = [int(self.centre_pix[1] - np.sqrt(2.01)*image_width),
int(self.centre_pix[1] + np.sqrt(2.01)*image_width),
int(self.centre_pix[0] - np.sqrt(2.01)*image_width),
int(self.centre_pix[0] + np.sqrt(2.01)*image_width)]
for margin in test_fov:
if margin < 0 or margin > np.array(self.data.shape).min():
error = True
if error:
self.log.log(logging.ERROR,'{}: Decreasing FoV not possible. Halo is too big'.format(self.target))
pivot = ((np.sqrt(2.)/2.-0.5)*np.array(data.shape)).astype(np.int64)
padX = [pivot[0], pivot[0]]
padY = [pivot[1], pivot[1]]
self.data_mcmc = np.pad(data, [padY, padX], 'constant')
self.fov_info_mcmc = [-pivot[0],self.data.shape[0]+pivot[0],
-pivot[1],self.data.shape[1]+pivot[1]]
self.fov_info = [0,self.data.shape[0],0,self.data.shape[1]]
else:
self.fov_info = [int(self.centre_pix[1] - image_width),
int(self.centre_pix[1] + image_width),
int(self.centre_pix[0] - image_width),
int(self.centre_pix[0] + image_width)]
self.fov_info_mcmc = [int(self.centre_pix[1] - np.sqrt(2.01)*image_width),
int(self.centre_pix[1] + np.sqrt(2.01)*image_width),
int(self.centre_pix[0] - np.sqrt(2.01)*image_width),
int(self.centre_pix[0] + np.sqrt(2.01)*image_width)]
self.data_mcmc = data[self.fov_info_mcmc[0]:self.fov_info_mcmc[1],
self.fov_info_mcmc[2]:self.fov_info_mcmc[3]]
self.cropped = True
self.margin = np.array(self.fov_info)-np.array(self.fov_info_mcmc)
self.data = data[self.fov_info[0]:self.fov_info[1],
self.fov_info[2]:self.fov_info[3]]
self.ra = self.ra[self.fov_info[2]:self.fov_info[3]]
self.dec = self.dec[self.fov_info[0]:self.fov_info[1]]
#plt.imshow(self.data.value)
#plt.show()
def pix_to_world(self):
w = wcs.WCS(self.header)
centre_pix = np.array([[self.centre_pix[0],self.centre_pix[1]]])
world_coord = w.celestial.wcs_pix2world(centre_pix,1)
if world_coord[0,0]<0.: world_coord[0,0] += 360
#if world_coord[0,1]<0.: world_coord[0,1] += 360
self.centre_wcs = (np.array([world_coord[0,0],world_coord[0,1]])*u.deg)
self.ra = np.arange(0,len(self.x_pix))*self.pix_size
self.dec = np.arange(0,len(self.y_pix))*self.pix_size
self.ra -= self.ra[self.centre_pix[0]]-self.centre_wcs[0]
self.dec -= self.dec[self.centre_pix[1]]-self.centre_wcs[1]
def find_halo_centre(self, data, first):
if first or self.original_image.shape == self.data.shape:
w = wcs.WCS(self.header)
centre_wcs = np.array([[self.loc.ra.deg,self.loc.dec.deg]])
world_coord = w.celestial.wcs_world2pix(centre_wcs,1,ra_dec_order=True)
return np.array([world_coord[0,0],world_coord[0,1]])
else:
return np.array((data.shape[1]/2.,data.shape[0]/2.),dtype=np.int64)
def pre_mcmc_func(self, obj, *theta):
I0, x0, y0, re = theta
model = obj.circle_model((obj.x_pix,obj.y_pix), I0, x0, y0, re )
if obj.mask:
return model[obj.image_mask.ravel() == 0]
else: return model
def exponentialFit(self, data, first=False):
plotdata = np.copy(data)
plotdata[self.image_mask==1]=0
max_flux = np.max(plotdata)
centre_pix = self.find_halo_centre(data, first)
if not first:
size = self.radius/(3.5*self.pix_size)
max_flux = self.I0
else: size = data.shape[1]/4.
bounds = ([0.,0.,0.,0.,],
[np.inf,data.shape[0],
data.shape[1],
data.shape[1]/2.])
if self.user_radius != False:
size = (self.radius_real/2.)/self.pix_size
image = data.ravel()
if self.mask:
image = data.ravel()[self.image_mask.ravel() == 0]
popt, pcov = curve_fit(self.pre_mcmc_func,self,
image, p0=(max_flux,centre_pix[0],
centre_pix[1],size), bounds=bounds)
if (self.user_radius != False and self.radius_real<(3.5*popt[3]*self.pix_size)):
popt[3]=size
print('size overwrite')
#if first:
self.radius = 3.5*popt[3]*self.pix_size
self.centre_pix = np.array([popt[1],popt[2]], dtype=np.int64)
self.I0 = popt[0]
def circle_model(self, coords, I0, x0, y0, re):
x,y = coords
r = np.sqrt((x-x0)**2+(y-y0)**2)
Ir = I0 * np.exp(-(r/re))
return Ir.ravel()
def Close(self):
self.hdul.close()
self.log.log(logging.INFO,'closed Halo object {}'.format(self.target))
| 20,320
| 43.85872
| 136
|
py
|
Halo-FDCA
|
Halo-FDCA-master/FDCA/.ipynb_checkpoints/markov_chain_monte_carlo-checkpoint.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
Author: J.M. Boxelaar
Version: 08 June 2020
'''
from __future__ import division
import sys
import os
import logging
from multiprocessing import Pool, cpu_count, freeze_support, set_start_method
import numpy as np
import pandas as pd
from scipy.optimize import curve_fit
import scipy.stats as stats
from scipy import ndimage
from scipy.special import gammainc, gamma
import matplotlib.pyplot as plt
from matplotlib.colors import Normalize, LogNorm
from skimage.measure import block_reduce
from skimage.transform import rescale
from astropy.convolution import Gaussian2DKernel
from astropy.convolution import convolve
from astropy.io import fits
from astropy import wcs
from astropy import units as u
from astropy.coordinates import SkyCoord
import emcee
import corner
# Subfile imports
from . import fdca_utils as utils
from . import plotting_fits as plot
set_start_method("fork")
freeze_support()
rad2deg = 180./np.pi
deg2rad = np.pi/180.
Jydeg2 = u.Jy/(u.deg*u.deg)
mJyarcsec2 = u.mJy/(u.arcsec*u.arcsec)
uJyarcsec2 = 1.e-3*u.mJy/(u.arcsec*u.arcsec)
class fitting(object):
'''
-CLASS DESCRIPTION-
-INPUT-
_parent_ (Radio_Halo object): Radio_Halo object containing all relevant
object information
data (2D array): Data array to be fitted. It is adviced to
use 'Radio_Halo.data_mcmc'
dim (int): number of parameters of fitting model to use. Choose from (8,6,5,4).
Note: currently, only dim=8 works.
p0 (array like): Initial robust guess for fit parameters. Used for preliminary
scipy.optimize.curve_fit. See Scipy documentation for more info.
bounds (2-tuple of array_like): Initial robust guess for fit parameter bounds.
Used for preliminary scipy.curve_fit. See Scipy
documentation for more info.
walkers (int): Number of walkers to deploy in the MCMC algorithm
steps (int): Number of evauations each walker has to do.
save (bool): Whether to save the mcmc sampler chain in a fits file. default is False
burntime (int): burn-in time for MCMC walkers. See emcee documentation for info.
logger: Configured logging object to log info to a .log file. If not given,
nothing happens.
rebin (bool): default is True. regridding data to beamsize to fit to indipendent
datapoints. Default is True.
Forward (bool): Depricated.
Mask (bool): applying mask to image. If true: a DS9 .reg has to be present in the
Radio_halo.maskPath direcory Default is False.
maskpath (str): Custom path to DS9 region file, read from database.dat.
If '--' is given, and mas=True, the standard
directory will be searched.
max_radius (float): maximum posiible radius cut-off. Fitted halos cannot have any
r > max_radius. In units of kpc.
Default is None (implying image_size/2).
gamma_prior (bool): wether to use a gamma distribution as a prior for radii.
Default is False. For the gamma parameters:
shape = 2.5, scale = 120 kpc.
'''
def __init__(self, _parent_, data, dim, p0, bounds, walkers, steps,
burntime=None, logger=logging, rebin=True, mask=False,
maskpath='--', max_radius=None, gamma_prior=False,
k_exponent=False, offset=False):
if dim not in ['circle','ellipse', 'rotated_ellipse', 'skewed']:
print('Provide valid function kind')
sys.exit()
p0 = list(p0)
self.orig_shape = _parent_.data.shape
self.rebin = rebin
self.log = logger
self.halo = _parent_
self.noise = _parent_.imagenoise
self.rms = _parent_.rmsnoise
self.sigma = (self.rms*self.halo.beam2pix).value
self.data = data
self.steps = int(steps)
self.mask_treshold = 0.5
self.k_exponent = k_exponent
self.offset = offset
self.gamma_prior = gamma_prior
self.p0 = p0
self.bounds = bounds
self.check_settings(dim, walkers, mask, burntime, maskpath, max_radius)
x = np.arange(0,_parent_.data.shape[1],1)
y = np.arange(0,_parent_.data.shape[0],1)
self.x_pix, self.y_pix = np.meshgrid(x,y)
self.dof = len(data.value.flat) - self.dim
def __preFit__(self):
#try:
self.pre_mcmc_fit(self.halo.data, p0=np.array(self.p0), bounds=np.array(self.bounds))
#except Exception as e:
# self.log.log(logging.CRITICAL,'MCMC Failed to execute pre-fit with error message:\n')
# self.log.log(logging.CRITICAL,e)
# sys.exit()
def __run__(self, save=False):
data = self.set_data_to_use(self.data)
x = np.arange(0, self.data.shape[1])
y = np.arange(0, self.data.shape[0])
coord = np.meshgrid(x,y)
theta_guess = self.popt[self.params]
self.mcmc_noise = utils.findrms(data)
pos = [theta_guess*(1.+1.e-3*np.random.randn(self.dim)) for i in range(self.walkers)]
# set_dictionary is called to create a dictionary with necessary atributes
# because 'Pool' cannot pickle the fitting object.
halo_info = set_dictionary(self)
num_CPU = cpu_count()
with Pool(num_CPU) as pool:
sampler = emcee.EnsembleSampler(self.walkers, self.dim, lnprob, pool=pool,
args=[data,coord,halo_info])
sampler.run_mcmc(pos, self.steps, progress=True)
self.sampler_chain = sampler.chain
self.samples = self.sampler_chain[:,int(self.burntime):,:].reshape((-1,self.dim))
if save:
self.__save__()
self.plotSampler()
return self.sampler_chain
def __save__(self):
path = '%s%s_mcmc_samples%s.fits' % (self.halo.modelPath,
self.halo.file.replace('.fits',''),
self.filename_append)
self.hdu = fits.PrimaryHDU()
self.hdu.data = self.sampler_chain
self.set_sampler_header()
self.hdu.writeto(path, overwrite=True)
def check_settings(self, dim, walkers, mask, burntime, maskpath, max_radius):
self.modelName = dim
self.paramNames = ['I0','x0','y0','r1','r2','r3','r4','ang','k_exp','off']
if dim=='circle':
self._func_ = utils.circle_model
self._func_mcmc = circle_model
self.AppliedParameters = [True,True,True,True,False,False,False,False,False,False]
elif dim == 'ellipse':
self._func_ = utils.ellipse_model
self._func_mcmc = ellipse_model
self.AppliedParameters = [True,True,True,True,True,False,False,False,False,False]
elif dim == 'rotated_ellipse':
self._func_ = utils.rotated_ellipse_model
self._func_mcmc = rotated_ellipse_model
self.AppliedParameters = [True,True,True,True,True,False,False,True,False,False]
elif dim == 'skewed':
self._func_ = utils.skewed_model
self._func_mcmc = skewed_model
self.AppliedParameters = [True,True,True,True,True,True,True,True,False,False]
else:
self.log.log(logging.CRITICAL,'CRITICAL: invalid model name')
print('CRITICAL: invalid model name')
sys.exit()
if self.k_exponent: self.AppliedParameters[-2] = True
if self.offset: self.AppliedParameters[-1] = True
self.params = pd.DataFrame.from_dict({'params':self.AppliedParameters},
orient='index',columns=self.paramNames).loc['params']
self.dim = len(self.params[self.params==True])
if walkers >= 2*self.dim:
self.walkers = int(walkers)
else:
self.walkers = int(2*self.dim+4)
self.log.log(logging.WARNING,'MCMC Too few walkers, nwalkers = {}'.format(self.walkers))
self.image_mask, self.mask = utils.masking(self, mask)
if burntime is None:
self.burntime = int(0.125*self.steps)
elif 0. > burntime or burntime >= 0.8*self.steps:
self.log.log(logging.ERROR,'MCMC Input burntime of {} is invalid. setting burntime to {}'\
.format(burntime, 0.25*self.steps))
self.burntime = int(0.25*self.steps)
else:
self.burntime = int(burntime)
if max_radius == None:
self.max_radius = self.data.shape[0]/2.
else:
self.max_radius = max_radius/self.halo.pix2kpc.value
filename_append = '_%s' % (self.modelName)
if self.mask: filename_append += '_mask'
if self.k_exponent: filename_append += '_exp'
if self.offset: filename_append += '_offset'
self.filename_append = filename_append
def find_mask(self):
if os.path.isfile(self.halo.maskPath):
self.mask = True
else:
self.mask=False
self.log.log(logging.ERROR,'No regionfile found,continueing without mask')
def setMask(self, data):
regionpath = self.halo.maskPath
outfile = self.halo.basedir+'Data/Masks/'+self.halo.target+'_mask.fits'
utils.mask_region(self.halo.path, regionpath, outfile)
'''In 'Radio_Halo', there is a function to decrease the fov of an image. The mask
is made wrt the entire image. fov_info makes the mask the same shape as
the image and overlays it'''
self.image_mask = fits.open(outfile)[0].data[0,0,
self.halo.fov_info[0]:self.halo.fov_info[1],
self.halo.fov_info[2]:self.halo.fov_info[3]]
def at(self, parameter):
par = np.array(self.paramNames)[self.params]
return np.where(par == parameter)[0][0]
def set_data_to_use(self,data):
if self.rebin:
binned_data = utils.regridding(self.halo, data, decrease_fov=True)
if not self.mask:
self.image_mask = np.zeros(self.halo.data.shape)
self.binned_image_mask = utils.regridding(self.halo, self.image_mask*u.Jy, mask = not self.halo.cropped).value
use = binned_data.value
return use.ravel()[self.binned_image_mask.ravel() <=\
self.mask_treshold*self.binned_image_mask.max()]
else:
if self.mask:
return self.data.value.ravel()[self.image_mask.ravel() <= 0.5]
else: return self.data.value.ravel()
def pre_mcmc_func(self, obj, *theta):
theta = utils.add_parameter_labels(obj, theta)
model = self._func_(obj, theta)
if obj.mask:
return model[obj.image_mask.ravel() == 0]
else: return model
def pre_mcmc_fit(self, image, p0, bounds):
data = image.ravel()
p0[1]-=self.halo.margin[2]
p0[2]-=self.halo.margin[0]
if self.mask:
data = data[self.image_mask.ravel() == 0]
bounds = (list(bounds[0,self.params]), list(bounds[1,self.params]))
popt, pcov = curve_fit(self.pre_mcmc_func,self,data,
p0=tuple(p0[self.params]),
bounds=bounds)
perr = np.sqrt(np.diag(pcov))
#plt.imshow(image.value)
#plt.contour(self._func_(self,*popt).reshape(image.shape))
#plt.show()
popt[1]+= self.halo.margin[2]
popt[2]+= self.halo.margin[0]
self.popt = utils.add_parameter_labels(self, popt)
self.perr = perr
if not self.k_exponent: self.popt['k_exp'] = 0.5
if not self.offset: self.popt['off'] = 0.0
if self.modelName == 'skewed':
'''longest dimension of elliptical shape should always be the x-axis.
This routine switches x and y if necessary to accomplish this.'''
if (self.popt['r1']+self.popt['r2']) <= (self.popt['r3']+self.popt['r4']):
self.popt['r1'], self.popt['r3'] = self.popt['r3'], self.popt['r1']
self.popt['r2'], self.popt['r4'] = self.popt['r4'], self.popt['r3']
self.popt['ang'] += np.pi/2.
if self.modelName in ['ellipse','rotated_ellipse']:
if self.popt['r1']<=self.popt['r2']:
self.popt['r1'],self.popt['r2'] = self.popt['r2'],self.popt['r1']
self.popt['ang'] += np.pi/2.
if self.modelName in ['rotated_ellipse', 'skewed']:
'''Angle of ellipse from positive x should be between 0 and pi.'''
self.popt['ang'] = self.popt['ang']%(2*np.pi)
if self.popt['ang']>=np.pi:
self.popt['ang'] -= np.pi
for r in range(4):
r += 1
if self.popt['r'+str(r)] > self.max_radius:
self.popt['r'+str(r)] = self.max_radius
self.centre_pix = np.array([self.popt['x0'],self.popt['y0']], dtype=np.int64)
self.centre_wcs = np.array((self.halo.ra.value[self.centre_pix[1]],
self.halo.dec.value[self.centre_pix[0]]))*u.deg
popt_units = self.transform_units(np.copy(self.popt))
popt_units = utils.add_parameter_labels(self, popt_units[self.params])
self.log.log(logging.INFO,'MCMC initial guess: \n{} \n and units: muJy/arcsec2, deg, deg, r_e: kpc, rad'.format(popt_units,self.perr))
x = np.arange(0,self.data.shape[1],1)
y = np.arange(0,self.data.shape[0],1)
self.x_pix, self.y_pix = np.meshgrid(x,y)
def plotSampler(self):
fig, axes = plt.subplots(ncols=1, nrows=self.dim, sharex=True)
axes[0].set_title('Number of walkers: '+str(self.walkers))
for axi in axes.flat:
axi.yaxis.set_major_locator(plt.MaxNLocator(3))
fig.set_size_inches(2*10,15)
for i in range(self.dim):
axes[i].plot(self.sampler_chain[:, int(self.burntime):, i].transpose(),
color='black', alpha=0.3)
axes[i].set_ylabel('param '+str(i+1), fontsize=15)
plt.tick_params(labelsize=15)
plt.savefig('%s%s_walkers%s.pdf' % (self.halo.plotPath,
self.halo.target,self.filename_append),dpi=300)
plt.clf()
plt.close(fig)
labels = list()
for i in range(self.dim):
labels.append('Param '+str(i+1))
fig = corner.corner(self.samples,labels=labels, quantiles=[0.160, 0.5, 0.840],
truths=np.asarray(self.popt[self.params]),
show_titles=True, title_fmt='.5f')
plt.savefig('%s%s_cornerplot%s.pdf' % (self.halo.plotPath,
self.halo.target,self.filename_append),dpi=300)
plt.clf()
plt.close(fig)
def transform_units(self, params):
params[0] = ((u.Jy*params[0]/self.halo.pix_area).to(uJyarcsec2)).value
params[1] = (params[1]-self.centre_pix[0])*self.halo.pix_size.value+self.centre_wcs[0].value
params[2] = (params[2]-self.centre_pix[1])*self.halo.pix_size.value+self.centre_wcs[1].value
params[3] = ((params[3]*self.halo.pix2kpc).to(u.kpc)).value
if self.modelName in ['ellipse', 'rotated_ellipse', 'skewed']:
params[4] = ((params[4]*self.halo.pix2kpc).to(u.kpc)).value
if self.modelName == 'skewed':
params[5] = ((params[5]*self.halo.pix2kpc).to(u.kpc)).value
params[6] = ((params[6]*self.halo.pix2kpc).to(u.kpc)).value
if self.modelName in ['rotated_ellipse', 'skewed']:
params[self.at('ang')] = params[self.at('ang')]
return params
def set_sampler_header(self):
self.hdu.header['nwalkers'] = (self.walkers)
self.hdu.header['steps'] = (self.steps)
self.hdu.header['dim'] = (self.dim)
self.hdu.header['burntime'] = (self.burntime)
self.hdu.header['OBJECT'] = (self.halo.name,'Object which was fitted')
self.hdu.header['IMAGE'] = (self.halo.file)
self.hdu.header['UNIT_0'] = ('JY/PIX','unit of fit parameter')
self.hdu.header['UNIT_1'] = ('PIX','unit of fit parameter')
self.hdu.header['UNIT_2'] = ('PIX','unit of fit parameter')
self.hdu.header['UNIT_3'] = ('PIX','unit of fit parameter')
if self.dim>=5:
self.hdu.header['UNIT_4'] = ('PIX','unit of fit parameter')
if self.dim == 8:
self.hdu.header['UNIT_5'] = ('PIX','unit of fit parameter')
self.hdu.header['UNIT_6'] = ('PIX','unit of fit parameter')
if self.dim >= 6:
self.hdu.header['UNIT_7'] = ('RAD','unit of fit parameter')
if self.dim == 7:
self.hdu.header['UNIT_P'] = ('NONE','unit of fit parameter')
for i in range(len(self.popt[self.params])):
self.hdu.header['INIT_'+str(i)] = (self.popt[self.params][i], 'MCMC initial guess')
self.hdu.header['MASK'] = (self.mask,'was the data masked during fitting')
def set_dictionary(obj):
halo_info = {
"modelName": obj.modelName,
"bmaj": obj.halo.bmaj,
"bmin": obj.halo.bmin,
"bpa": obj.halo.bpa,
"pix_size": obj.halo.pix_size,
"beam_area": obj.halo.beam_area,
"beam2pix": obj.halo.beam2pix,
"pix2kpc": obj.halo.pix2kpc,
"mask": obj.mask,
"sigma": obj.mcmc_noise,
"margin": obj.halo.margin,
"_func_": obj._func_mcmc,
"image_mask": obj.image_mask,
"binned_image_mask": obj.binned_image_mask,
"mask_treshold": obj.mask_treshold,
"max_radius": obj.max_radius,
"params": obj.params,
"paramNames": obj.paramNames,
"gamma_prior": obj.gamma_prior,
}
return halo_info
def set_model_to_use(info,data):
binned_data = regrid_to_beamsize(info, data.value)
return binned_data.ravel()[info['binned_image_mask'].ravel() <=\
info['mask_treshold']*info['binned_image_mask'].max()]
def rotate_image(info,img, decrease_fov=False):
margin = info['margin']
img_rot = ndimage.rotate(img, -info['bpa'].value, reshape=False)
f = img_rot[margin[2]:margin[3], margin[0]:margin[1]]
#plt.imshow(f)
#plt.show()
return f
def regrid_to_beamsize(info, img, accuracy=100.):
x_scale = np.sqrt(np.pi/(4*np.log(2.)))*info['bmaj'].value
y_scale = np.sqrt(np.pi/(4*np.log(2.)))*info['bmin'].value
new_pix_size = np.array((y_scale,x_scale))
accuracy = int(1./accuracy*100)
scale = np.round(accuracy*new_pix_size/info['pix_size']).astype(np.int64).value
pseudo_size = (accuracy*np.array(img.shape) ).astype(np.int64)
pseudo_array = np.zeros((pseudo_size))
orig_scale = (np.array(pseudo_array.shape)/np.array(img.shape)).astype(np.int64)
elements = np.prod(np.array(orig_scale,dtype='float64'))
if accuracy == 1:
pseudo_array = np.copy(img)
else:
for j in range(img.shape[0]):
for i in range(img.shape[1]):
pseudo_array[orig_scale[1]*i:orig_scale[1]*(i+1),
orig_scale[0]*j:orig_scale[0]*(j+1)] = img[i,j]/elements
f= block_reduce(pseudo_array, block_size=tuple(scale), func=np.sum, cval=0)
f=np.delete(f, -1, axis=0)
f=np.delete(f, -1, axis=1)
#plt.imshow(f)
#plt.show()
#print(pseudo_array.shape, scale, f.shape)
return f
def convolve_with_gaussian(info,data,rotate):
if rotate:
data = rotate_image(info,data,decrease_fov=True)
sigma1 = (info['bmaj']/info['pix_size'])/np.sqrt(8*np.log(2.))
sigma2 = (info['bmin']/info['pix_size'])/np.sqrt(8*np.log(2.))
kernel = Gaussian2DKernel(sigma1, sigma2, info['bpa'])
astropy_conv = convolve(data,kernel,boundary='extend',normalize_kernel=True)
return astropy_conv
def circle_model(info, coords, theta, rotate=False):
x,y = coords
G = ((x-theta['x0'])**2+(y-theta['y0'])**2)/theta['r1']**2
Ir = theta['I0']*np.exp(-G**(0.5+theta['k_exp']))+theta['off']
return convolve_with_gaussian(info, Ir, rotate)
def ellipse_model(info, coord , theta, rotate=False):
x,y = coord
G = ((x-theta['x0'])/theta['r1'])**2+((y-theta['y0'])/theta['r2'])**2
Ir = theta['I0']*np.exp(-G**(0.5+theta['k_exp']))+theta['off']
return convolve_with_gaussian(info, Ir, rotate)
def rotated_ellipse_model(info, coord, theta, rotate=False):
x,y = coord
x_rot = (x-theta['x0'])*np.cos(theta['ang']) + (y-theta['y0'])*np.sin(theta['ang'])
y_rot = -(x-theta['x0'])*np.sin(theta['ang']) + (y-theta['y0'])*np.cos(theta['ang'])
G = (x_rot/theta['r1'])**2.+(y_rot/theta['r2'])**2.
Ir = theta['I0']*np.exp(-G**(0.5+theta['k_exp']))+theta['off']
return convolve_with_gaussian(info, Ir, rotate)
def skewed_model(info, coord, theta, rotate=False):
x,y=coord
G_pp = G(x, y, theta['I0'],theta['x0'],theta['y0'],theta['r1'],theta['r3'],theta['ang'], 1., 1.)
G_mm = G(x, y, theta['I0'],theta['x0'],theta['y0'],theta['r2'],theta['r4'],theta['ang'], -1., -1.)
G_pm = G(x, y, theta['I0'],theta['x0'],theta['y0'],theta['r1'],theta['r4'],theta['ang'], 1., -1.)
G_mp = G(x, y, theta['I0'],theta['x0'],theta['y0'],theta['r2'],theta['r3'],theta['ang'], -1., 1.)
Ir = (theta['I0']*(G_pp+G_pm+G_mm+G_mp))
return convolve_with_gaussian(info, Ir, rotate)
def G(x,y, I0, x0, y0, re_x,re_y, ang, sign_x, sign_y):
x_rot = (x-x0)*np.cos(ang)+(y-y0)*np.sin(ang)
y_rot = -(x-x0)*np.sin(ang)+(y-y0)*np.cos(ang)
func = (np.sqrt(sign_x * x_rot)**4.)/(re_x**2.) +\
(np.sqrt(sign_y * y_rot)**4.)/(re_y**2.)
exponent = np.exp(-np.sqrt(func))
exponent[np.where(np.isnan(exponent))]=0.
return exponent
def lnL(theta, data, coord, info):
kwargs = {"rotate" : True}
raw_model = info['_func_'](info,coord,theta,**kwargs)*u.Jy
model = set_model_to_use(info, raw_model)
return -np.sum( ((data-model)**2.)/(2*info['sigma']**2.)\
+ np.log(np.sqrt(2*np.pi)*info['sigma']) )
def lnprior(theta, shape, info):
prior = -np.inf
if (theta['I0'] > 0) and (-0.4 < theta['k_exp'] < 19):
if (0 <= theta['x0'] < shape[1]) and (0 <= theta['y0'] < shape[0]):
if 0 < theta['r1'] < info['max_radius']:
if -np.pi/4. < theta['ang'] < 5*np.pi/4.:
prior = 0.0
if not (0 <= theta['r2'] <= theta['r1']):
prior = -np.inf
if prior != -np.inf:
if info['modelName'] == 'circle':
radii = np.array([theta['r1']])
else:
radii = np.array([theta['r1'],theta['r2']])
if info['gamma_prior']:
prior = np.sum(np.log(utils.gamma_dist(radii, 2.3, 120./info['pix2kpc'].value)))
return prior
def lnprior8(theta, shape, info):
prior = -np.inf
if theta['I0']>0 and (0 < theta['x0'] < shape[1]) and (0 < theta['y0'] < shape[0]):
if theta['r1'] > 0. and theta['r2'] > 0. and theta['r3'] > 0. and theta['r4'] > 0.:
if (0. < (theta['r3']+theta['r4']) <= (theta['r1']+theta['r2'])) and ((theta['r1']+theta['r2']) < info['max_radius']*2.):
if -np.pi/4. < theta['ang'] < 5*np.pi/4.:
prior = 0.0
if prior != -np.inf and info['gamma_prior']:
#guess = 225./info['pix2kpc'] #average based on known sample of halos.
#prior = -np.sum(1./2*((theta['r1'])**2 + (theta['r2'])**2)/((info['max_radius']/4.)**2))
radii = np.array([theta['r1'],theta['r2'],theta['r3'],theta['r4']])
prior = np.sum(np.log(utils.gamma_dist(radii, 2.3, 120./info['pix2kpc'].value)))
return prior
def lnprob(theta, data, coord, info):
theta = add_parameter_labels(info['params'], info['paramNames'], theta)
if info['modelName'] == 'skewed':
lp = lnprior8(theta, coord[0].shape, info)
else:
lp = lnprior(theta, coord[0].shape, info)
if not np.isfinite(lp):
return -np.inf
return lnL(theta, data, coord, info) + lp
def add_parameter_labels(params, paramNames, array):
full_array = np.zeros(params.shape)
full_array[params==True] = array
parameterised_array = pd.DataFrame.from_dict({'params': full_array},
orient='index',columns=paramNames).loc['params']
return parameterised_array
class processing(object):
'''
-CLASS DESCRIPTION-
-INPUT-
_parent_ (Radio_Halo object): Radio_Halo object containing all relevant
object information
data (2D array): Data array to be fitted. It is adviced to
use 'Radio_Halo.data_mcmc'
dim (int): number of parameters of fitting model to use. Choose from (8,6,5,4).
Note: currently, only dim=8 works.
walkers (int): Number of walkers to deploy in the MCMC algorithm
steps (int): Number of evauations each walker has to do.
save (bool): Whether to save the mcmc sampler chain in a fits file. default is False
burntime (int): burn-in time for MCMC walkers. See emcee documentation for info.
logger: Configured logging object to log info to a .log file. If not given,
nothing happens.
rebin (bool): default is True. regridding data to beamsize to fit to indipendent
datapoints. Default is True.
Forward (bool): Depricated.
Mask (bool): applying mask to image. If true: a DS9 .reg has to be present in the
Radio_halo.maskPath direcory Default is False.
maskpath (str): Custom path to DS9 region file, read from database.dat.
If '--' is given, and mask=True, the standard
directory will be searched.
'''
def __init__(self, _parent_, data, dim, logger, save=True, mask=False, rebin=True,
maskpath='--', k_exponent=False, offset=False, burntime=None):
x = np.arange(0,data.shape[1],1)
y = np.arange(0,data.shape[0],1)
self.x_pix, self.y_pix = np.meshgrid(x,y)
self.log = logger
self.log.log(logging.INFO,'Model name: {}'.format(dim))
self.noise = _parent_.imagenoise
self.rms = _parent_.rmsnoise
self.data = data
self.save = save
self.halo = _parent_
self.alpha = _parent_.alpha # spectral index guess
self.k_exponent = k_exponent
self.offset = offset
self.mask_treshold = 0.5
self.check_settings(dim, mask, maskpath)
self.extract_chain_file(rebin)
self.retreive_mcmc_params()
self.set_labels_and_units()
self.dof = len(data.value.flat) - self.dim
def plot_results(self):
plot.fit_result(self, self.model, self.halo.data,
self.halo.rmsnoise, mask=self.mask, regrid=False)
plot.fit_result(self, self.model, self.halo.data_mcmc,
self.halo.rmsnoise, mask=self.mask,regrid=True)
self.plotSampler()
self.cornerplot()
def check_settings(self, dim, mask, maskpath):
self.modelName = dim
self.paramNames = ['I0','x0','y0','r1','r2','r3','r4','ang','k_exp','off']
if dim=='circle':
self._func_ = utils.circle_model
self.AppliedParameters = [True,True,True,True,False,False,False,False,False,False]
elif dim == 'ellipse':
self._func_ = utils.ellipse_model
self.AppliedParameters = [True,True,True,True,True,False,False,False,False,False]
elif dim == 'rotated_ellipse':
self._func_ = utils.rotated_ellipse_model
self.AppliedParameters = [True,True,True,True,True,False,False,True,False,False]
elif dim == 'skewed':
self._func_ = utils.skewed_model
self.AppliedParameters = [True,True,True,True,True,True,True,True,False,False]
else:
self.log.log(logging.CRITICAL,'CRITICAL: invalid model name')
print('CRITICAL: invalid model name')
sys.exit()
if self.k_exponent: self.AppliedParameters[-2] = True
if self.offset: self.AppliedParameters[-1] = True
self.params = pd.DataFrame.from_dict({'params':self.AppliedParameters},
orient='index',columns=self.paramNames).loc['params']
self.dim = len(self.params[self.params])
self.image_mask = np.zeros(self.halo.data.shape)
self.image_mask, self.mask = utils.masking(self, mask)
'''
if mask:
if maskpath == '--':
self.halo.maskPath = self.halo.basedir+'Output/'+self.halo.target+'.reg'
else:
self.halo.maskPath = maskpath
fitting.find_mask(self)
if self.mask:
fitting.setMask(self,self.data)
self.log.log(logging.INFO,'MCMC Mask set')
else:
self.log.log(logging.INFO,'MCMC No mask set')
self.mask=False
'''
def extract_chain_file(self, rebin):
filename_append = '_{}'.format(self.modelName)
if self.mask: filename_append += '_mask'
#if rebin: filename_append += '_rebin'
if self.k_exponent: filename_append += '_exp'
if self.offset: filename_append += '_offset'
self.filename_append = filename_append
self.rebin = rebin
sampler_chain = fits.open(self.halo.modelPath+self.halo.file.replace('.fits','')+\
'_mcmc_samples'+self.filename_append+'.fits')
self.sampler = (sampler_chain[0].data)
self.info = sampler_chain[0].header
def at(self, parameter):
par = np.array(self.paramNames)[self.params]
return np.where(par == parameter)[0][0]
def retreive_mcmc_params(self):
self.walkers = self.info['nwalkers']
self.steps = self.info['steps']
burntime = int(self.info['burntime'])
self.popt = utils.add_parameter_labels(self, np.zeros(self.dim))
for i in range(self.dim):
self.popt[i] = self.info['INIT_'+str(i)]
if burntime is None:
self.burntime = int(0.25*self.steps)
elif 0. > burntime or burntime >= self.steps:
self.log.log(logging.ERROR,'MCMC Input burntime of {} is invalid. setting burntime to {}'\
.format(burntime, 0.25*self.steps))
self.burntime = int(0.25*self.steps)
else:
self.burntime = int(burntime)
samples = self.sampler[:, self.burntime:, :].reshape((-1, self.dim))
#translate saples for location to right Fov.
samples[:,self.at('x0')] -= self.halo.margin[2]
samples[:,self.at('y0')] -= self.halo.margin[0]
self.percentiles = self.get_percentiles(samples)
self.parameters = utils.add_parameter_labels(self, self.percentiles[:,1].reshape(self.dim))
self.centre_pix = np.array([self.parameters['x0'],self.parameters['y0']], dtype=np.int64)
self.model = self._func_(self, self.parameters)\
.reshape(self.x_pix.shape)*u.Jy
self.samples = samples
def get_percentiles(self,samples):
percentiles = np.ones((samples.shape[1],3))
for i in range(samples.shape[1]):
percentiles[i,:] = np.percentile(samples[:, i], [16, 50, 84])
if self.modelName in ['rotated_ellipse', 'skewed']:
cosine = np.percentile(np.cos(samples[:,self.at('ang')]), [16, 50, 84])
sine = np.percentile(np.sin(samples[:,self.at('ang')]), [16, 50, 84])
arccosine = np.arccos(cosine)
arcsine = np.arcsin(sine)
if arcsine[1] == arccosine[1]:
ang = arcsine.copy()
elif arcsine[1] == -arccosine[1]:
ang = arcsine.copy()
elif arcsine[1] != arccosine[1] and arcsine[1] != -arccosine[1]:
if arcsine[1] < 0:
ang = -arccosine.copy()
elif arcsine[1] > 0:
ang = arccosine.copy()
else:
self.log.log(logging.ERROR,'Angle matching failed in processing.get_percentiles. continueing with default.')
ang = np.percentile(samples[:,self.at('ang')], [16, 50, 84])
percentiles[self.at('ang'),:] = ang
return percentiles
def cornerplot(self):
try:
fig = corner.corner(self.samples_units,labels=self.labels_units,truths=self.popt_units[self.params],
quantiles=[0.160, 0.5, 0.840], show_titles=True, max_n_ticks=3, title_fmt=self.fmt)
except:
fig = corner.corner(self.samples_units,labels=self.labels_units,truths=self.popt_units[self.params],
quantiles=[0.160, 0.5, 0.840], show_titles=True, max_n_ticks=3, title_fmt='1.2g')
if self.save:
plt.savefig(self.halo.plotPath+self.halo.file.replace('.fits','')+'_cornerplot'+self.filename_append+'.pdf')
plt.clf()
plt.close(fig)
else:
plt.show()
def plotSampler(self):
fig, axes = plt.subplots(ncols=1, nrows=self.dim, sharex=True)
axes[0].set_title('Number of walkers: '+str(self.walkers), fontsize=25)
for axi in axes.flat:
axi.yaxis.set_major_locator(plt.MaxNLocator(3))
fig.set_size_inches(2*10,15)
for i in range(self.dim):
axes[i].plot(self.sampler[:, :, i].transpose(),color='black', alpha=0.3,lw=0.5)
axes[i].set_ylabel(self.labels[i], fontsize=20)
axes[-1].set_xlabel('steps', fontsize=20)
axes[i].axvline(0.3*self.sampler.shape[1], ls='dashed', color='red')
axes[i].tick_params(labelsize=20)
plt.xlim(0, self.sampler.shape[1])
if self.save:
plt.savefig(self.halo.plotPath+self.halo.file.replace('.fits','')+'_walkers'+self.filename_append+'.pdf')
plt.clf()
plt.close(fig)
else:
plt.show()
def set_labels_and_units(self):
self.samples_units = self.samples.copy()
samples_units = self.samples.copy()
samples_list = list()
x0 = np.percentile(self.samples.real[:, 1], [16, 50, 84])[1]-abs(self.halo.margin[1])
y0 = np.percentile(self.samples.real[:, 2], [16, 50, 84])[1]-abs(self.halo.margin[0])
self.centre_pix = np.array([x0,y0], dtype=np.int64)
self.centre_wcs = np.array((self.halo.ra.value[self.centre_pix[1]],
self.halo.dec.value[self.centre_pix[0]]))*u.deg
for i in range(self.dim):
samples_list.append(samples_units[:,i])
transformed = self.transform_units(samples_list)
for i in range(self.dim):
self.samples_units[:,i] = transformed[i]
self.popt_units = self.transform_units(np.copy(self.popt))
self.percentiles_units = self.get_percentiles(self.samples_units)
self.params_units = utils.add_parameter_labels(self, self.percentiles_units[:,1].reshape(self.dim))
self.get_units()
uncertainties1 = self.percentiles_units[:,1]-self.percentiles_units[:,0]
uncertainties2 = self.percentiles_units[:,2]-self.percentiles_units[:,1]
self.log.log(logging.INFO, '\n Parameters: \n%s \nOne sigma parameter uncertainties (lower, upper): \
\n%s \n%s \nIn Units: %s' % (str(self.params_units[self.params]),
str(uncertainties1), str(uncertainties2), str(self.units)))
def transform_units(self, params):
params[0] = ((u.Jy*params[0]/self.halo.pix_area).to(uJyarcsec2)).value
params[1] = (params[1]-self.centre_pix[0])*self.halo.pix_size.value+self.centre_wcs[0].value
params[2] = (params[2]-self.centre_pix[1])*self.halo.pix_size.value+self.centre_wcs[1].value
params[3] = ((params[3]*self.halo.pix2kpc).to(u.kpc)).value
if self.modelName in ['ellipse', 'rotated_ellipse', 'skewed']:
params[4] = ((params[4]*self.halo.pix2kpc).to(u.kpc)).value
if self.modelName == 'skewed':
params[5] = ((params[5]*self.halo.pix2kpc).to(u.kpc)).value
params[6] = ((params[6]*self.halo.pix2kpc).to(u.kpc)).value
if self.modelName in ['rotated_ellipse', 'skewed']:
params[self.at('ang')] = params[self.at('ang')]
return params
def get_units(self):
labels = ['$I_0$','$x_0$','$y_0$']
units = ['$\\mu$Jy arcsec$^{-2}$','deg','deg']
fmt = ['.2f','.4f','.4f']
if self.modelName == 'skewed':
labels.extend(('$r_{x^+}$','$r_{x^-}$','$r_{y^+}$','$r_{y^-}$'))
units.extend(('kpc','kpc','kpc','kpc'))
fmt.extend(('.0f','.0f','.0f','.0f'))
elif self.modelName in ['ellipse', 'rotated_ellipse']:
labels.extend(('$r_{x}$','$r_{y}$'))
units.extend(('kpc','kpc'))
fmt.extend(('.1f','.1f'))
elif self.modelName == 'circle':
labels.append('$r_{e}$')
units.append('kpc')
fmt.append('.1f')
if self.modelName in ['rotated_ellipse', 'skewed']:
labels.append('$\\phi_e$')
units.append('Rad')
fmt.append('.3f')
if self.k_exponent:
labels.append('$k$')
units.append(' ')
fmt.append('.3f')
if self.offset:
labels.append('$C$')
units.append(' ')
fmt.append('.3f')
self.labels = np.array(labels,dtype='<U30')
self.units = np.array(units, dtype='<U30')
self.fmt = np.array(fmt, dtype='<U30')
self.labels_units = np.copy(self.labels)
for i in range(self.dim):
self.labels_units[i] = self.labels[i]+' ['+self.units[i]+']'
def get_confidence_interval(self, percentage=95, units=True):
alpha = 1. - percentage/100.
z_alpha = stats.norm.ppf(1.-alpha/2.)
se = np.zeros(self.params.shape)
if units:
for i in range(self.dim):
se[self.params] = np.sqrt( np.mean(self.samples_units[:, i]**2.)\
-np.mean(self.samples_units[:, i])**2. )
conf_low = self.params_units-z_alpha*se
conf_up = self.params_units+z_alpha*se
for i in range(self.dim):
self.log.log(logging.INFO,'{}% Confidence interval of {}: ({:.5f}, {:.5f}) {}'\
.format(percentage,self.labels[i],conf_low[i],
conf_up[i],self.units[i]))
self.log.log(logging.INFO,'')
else:
for i in range(self.dim):
se[i] = np.sqrt( np.mean(self.samples[:, i]**2.)\
-np.mean(self.samples[:, i])**2. )
conf_low = self.parameters-z_alpha*se
conf_up = self.parameters+z_alpha*se
for i in range(self.dim):
self.log.log(logging.INFO,'{}% Confidence interval of {}: ({:.5f}, {:.5f})'\
.format(percentage,self.labels[i],conf_low[i],
conf_up[i]))
self.log.log(logging.INFO,'')
return [conf_low, conf_up]
def get_chi2_value(self,mask_treshold = 0.4):
self.mask_treshold = mask_treshold
x = np.arange(0,self.halo.data_mcmc.shape[1],1)
y = np.arange(0,self.halo.data_mcmc.shape[0],1)
self.x_pix, self.y_pix = np.meshgrid(x,y)
params = self.parameters.copy()
params[1] += self.halo.margin[2]
params[2] += self.halo.margin[0]
binned_data = fitting.set_data_to_use(self, self.halo.data_mcmc)
model = self._func_(self, params, rotate=True).reshape(self.halo.data.shape)*u.Jy
binned_model = utils.regrid_to_beamsize(self.halo, model)
self.rmsregrid = utils.findrms(binned_data)
if not self.mask:
self.image_mask = np.zeros(self.halo.data.shape)
binned_image_mask = utils.regridding(self.halo, self.image_mask*u.Jy, mask=not self.halo.cropped).value
binned_model = binned_model.ravel()[binned_image_mask.ravel() <=\
mask_treshold*binned_image_mask.max()]
chi2 = np.sum( ((binned_data-binned_model)/(self.rmsregrid))**2. )
binned_dof = len(binned_data)-self.dim
self.chi2_red = chi2/binned_dof
self.ln_likelihood = -np.sum( ((binned_data-binned_model)**2.)/(2*(self.rmsregrid)**2.)\
+ np.log(np.sqrt(2*np.pi)*self.rmsregrid))
self.AIC = 2*(self.dim-self.ln_likelihood)
self.log.log(logging.INFO,'chi^2: {}'.format(chi2))
self.log.log(logging.INFO,'effective DoF: {}'.format(binned_dof))
self.log.log(logging.INFO,'chi^2_red: {}'.format(self.chi2_red))
#self.log.log(logging.INFO,'AIC: {}'.format(self.AIC))
x = np.arange(0,self.data.shape[1],1)
y = np.arange(0,self.data.shape[0],1)
self.x_pix, self.y_pix = np.meshgrid(x,y)
def get_flux(self, int_max=np.inf, freq=None):
if freq is None:
freq = self.halo.freq
a = self.samples[:,3]*self.halo.pix_size
if self.modelName=='skewed':
b = self.samples[:,5]*self.halo.pix_size
c = self.samples[:,4]*self.halo.pix_size
d = self.samples[:,6]*self.halo.pix_size
factor = (a*b+c*d+a*d+b*c)
elif self.modelName in ['ellipse','rotated_ellipse']:
b = self.samples[:,4]*self.halo.pix_size
factor = 4*a*b
else:
factor = 4*a**2
if self.k_exponent: m = self.samples[:,self.at('k_exp')]+0.5
else: m=0.5
I0 = u.Jy*self.samples[:,0]/self.halo.pix_area
flux = (gamma(1./m)*np.pi*I0/(4*m) * factor * gammainc(1./m, int_max**(2*m))\
*(freq/self.halo.freq)**self.alpha).to(u.mJy)
self.flux = np.copy(flux)
self.flux_freq = freq
self.flux_val = np.percentile(flux, 50)
self.flux_err = ((np.percentile(flux, 84)-np.percentile(flux, 16))/2.)
#cal = 0.1
#sub = 0.1 # Osinga et al. 2020
#self.flux_std = np.sqrt((cal*self.flux_val.value)**2+sub**2+flux_err**2)*u.mJy
#self.flux_err = np.sqrt((cal*self.flux.value)**2+sub**2+flux_err**2)*u.mJy
self.log.log(logging.INFO,'MCMC Flux at {:.1f} {}: {:.2f} +/- {:.2f} {}'\
.format(freq.value, freq.unit, self.flux_val.value,
self.flux_err.value,flux.unit))
self.log.log(logging.INFO,'Integration radius '+str(int_max))
self.log.log(logging.INFO,'S/N based on flux {:.2f}'\
.format(self.flux_val.value/self.flux_err.value))
def get_power(self, freq=None):
if freq is None:
freq = self.halo.freq
d_L = self.halo.cosmology.luminosity_distance(self.halo.z)
power = (4*np.pi*d_L**2. *((1.+self.halo.z)**((-1.*self.alpha) - 1.))*\
self.flux*((freq/self.flux_freq)**self.alpha)).to(u.W/u.Hz)
power_std = (4*np.pi*d_L**2. *((1.+self.halo.z)**((-1.*self.alpha) - 1.))*\
self.flux_err*((freq/self.flux_freq)**self.alpha)).to(u.W/u.Hz)
self.power_std = np.percentile(power_std,50)
cal = 0.1
sub = 0.1 # Osinga et al. 2020
self.power = np.copy(power)
self.power_val = np.percentile(power,[50])[0]
power_err = ((np.percentile(power, [84])[0]-np.percentile(power, [16])[0])/2.).value
self.power_std = np.sqrt((cal*self.power_val.value)**2+sub**2+power_err**2)
self.log.log(logging.INFO,'Power at {:.1f} {}: ({:.3g} +/- {:.3g}) {}'\
.format(freq.value, freq.unit,
np.percentile(power,[50])[0].value,
(np.percentile(power, [84])[0]-\
np.percentile(power, [16])[0]).value/2.,
power.unit))
| 44,842
| 43.050098
| 142
|
py
|
spyn-repr
|
spyn-repr-master/scopes.py
|
from collections import deque
from collections import defaultdict
from spn.linked.nodes import SumNode
from spn.linked.nodes import ProductNode
from spn.linked.nodes import CategoricalIndicatorNode
from spn.linked.layers import CategoricalIndicatorLayer
from spn.linked.layers import SumLayer
from spn.linked.layers import ProductLayer
from spn.linked.spn import Spn as LinkedSpn
import numpy
import itertools
def topological_layer_sort(layers):
"""
layers is a sequence of layers
"""
#
#
layers_dict = {layer: layer.input_layers for layer in layers}
sorted_layers = []
while layers_dict:
acyclic = False
temp_layers_dict = dict(layers_dict)
for layer, descendants in temp_layers_dict.items():
for desc_layer in descendants:
if desc_layer in layers_dict:
break
else:
acyclic = True
del layers_dict[layer]
sorted_layers.append(layer)
if not acyclic:
raise RuntimeError("A cyclic dependency occurred")
return sorted_layers
| 1,122
| 22.893617
| 65
|
py
|
spyn-repr
|
spyn-repr-master/ocr_letters.py
|
import numpy
import matplotlib
import matplotlib.pyplot as pyplot
import pickle
import os
def load_ocr_letters_data_split_from_txt(data_path):
data = numpy.loadtxt(data_path, delimiter=' ')
x, y = data[:, :-1].astype(numpy.int32), data[:, -1].astype(numpy.int32)
print('Loaded dataset:\n\tx: {}\ty: {}'.format(x.shape, y.shape))
assert x.shape[0] == y.shape[0]
assert y.ndim == 1
assert x.shape[1] == 128
return x, y
def load_ocr_letters_from_txt(data_dir, split_names=['ocr_letters_train.txt',
'ocr_letters_valid.txt',
'ocr_letters_test.txt']):
split_paths = [os.path.join(data_dir, file_name)
for file_name in split_names]
data_splits = [load_ocr_letters_data_split_from_txt(path) for path in split_paths]
return data_splits
def save_ocr_letters_pickle(data_splits, output_path):
with open(output_path, 'wb') as data_file:
pickle.dump(data_splits, data_file)
def load_ocr_letters_pickle(data_path):
data_splits = None
with open(data_path, 'rb') as data_file:
data_splits = pickle.load(data_file)
return data_splits
def plot_m_by_n_images(images,
m, n,
fig_size=(12, 12),
cmap=matplotlib.cm.binary):
fig = pyplot.figure(figsize=fig_size)
for x in range(m):
for y in range(n):
ax = fig.add_subplot(m, n, n * y + x + 1)
ax.matshow(images[n * y + x], cmap=cmap)
pyplot.xticks(numpy.array([]))
pyplot.yticks(numpy.array([]))
pyplot.show()
def array_2_mat(array, n_rows=16):
return array.reshape(n_rows, -1)
def plot_ocr_letters(image_arrays,
m, n,
n_rows=16,
fig_size=(12, 12),
invert=True,
cmap=matplotlib.cm.binary):
image_matrixes = None
if invert:
image_matrixes = [array_2_mat(1 - img, n_rows) for img in image_arrays]
else:
image_matrixes = [array_2_mat(img, n_rows) for img in image_arrays]
plot_m_by_n_images(image_matrixes, m, n, fig_size, cmap)
| 2,250
| 27.858974
| 86
|
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.