code
stringlengths
17
6.64M
def run_experiment(argv): default_log_dir = config.LOG_DIR now = datetime.datetime.now(dateutil.tz.tzlocal()) rand_id = str(uuid.uuid4())[:5] timestamp = now.strftime('%Y_%m_%d_%H_%M_%S_%f_%Z') default_exp_name = ('experiment_%s_%s' % (timestamp, rand_id)) parser = argparse.ArgumentParser() parser.add_argument('--n_parallel', type=int, default=1, help="Number of parallel workers to perform rollouts. 0 => don't start any workers") parser.add_argument('--exp_name', type=str, default=default_exp_name, help='Name of the experiment.') parser.add_argument('--log_dir', type=str, default=None, help='Path to save the log and iteration snapshot.') parser.add_argument('--snapshot_mode', type=str, default='all', help='Mode to save the snapshot. Can be either "all" (all iterations will be saved), "last" (only the last iteration will be saved), or "none" (do not save snapshots)') parser.add_argument('--snapshot_gap', type=int, default=1, help='Gap between snapshot iterations.') parser.add_argument('--tabular_log_file', type=str, default='progress.csv', help='Name of the tabular log file (in csv).') parser.add_argument('--text_log_file', type=str, default='debug.log', help='Name of the text log file (in pure text).') parser.add_argument('--params_log_file', type=str, default='params.json', help='Name of the parameter log file (in json).') parser.add_argument('--variant_log_file', type=str, default='variant.json', help='Name of the variant log file (in json).') parser.add_argument('--resume_from', type=str, default=None, help='Name of the pickle file to resume experiment from.') parser.add_argument('--plot', type=ast.literal_eval, default=False, help='Whether to plot the iteration results') parser.add_argument('--log_tabular_only', type=ast.literal_eval, default=False, help='Whether to only print the tabular log information (in a horizontal format)') parser.add_argument('--seed', type=int, help='Random seed for numpy') parser.add_argument('--args_data', type=str, help='Pickled data for stub objects') parser.add_argument('--variant_data', type=str, help='Pickled data for variant configuration') parser.add_argument('--use_cloudpickle', type=ast.literal_eval, default=False) args = parser.parse_args(argv[1:]) if (args.seed is not None): set_seed(args.seed) if (args.n_parallel > 0): from rllab.sampler import parallel_sampler parallel_sampler.initialize(n_parallel=args.n_parallel) if (args.seed is not None): parallel_sampler.set_seed(args.seed) if args.plot: from rllab.plotter import plotter plotter.init_worker() if (args.log_dir is None): log_dir = osp.join(default_log_dir, args.exp_name) else: log_dir = args.log_dir tabular_log_file = osp.join(log_dir, args.tabular_log_file) text_log_file = osp.join(log_dir, args.text_log_file) params_log_file = osp.join(log_dir, args.params_log_file) if (args.variant_data is not None): variant_data = pickle.loads(base64.b64decode(args.variant_data)) variant_log_file = osp.join(log_dir, args.variant_log_file) logger.log_variant(variant_log_file, variant_data) else: variant_data = None if (not args.use_cloudpickle): logger.log_parameters_lite(params_log_file, args) logger.add_text_output(text_log_file) logger.add_tabular_output(tabular_log_file) prev_snapshot_dir = logger.get_snapshot_dir() prev_mode = logger.get_snapshot_mode() logger.set_snapshot_dir(log_dir) logger.set_snapshot_mode(args.snapshot_mode) logger.set_snapshot_gap(args.snapshot_gap) logger.set_log_tabular_only(args.log_tabular_only) logger.push_prefix(('[%s] ' % args.exp_name)) if (args.resume_from is not None): data = joblib.load(args.resume_from) assert ('algo' in data) algo = data['algo'] algo.train() elif args.use_cloudpickle: import cloudpickle method_call = cloudpickle.loads(base64.b64decode(args.args_data)) method_call(variant_data) else: data = pickle.loads(base64.b64decode(args.args_data)) maybe_iter = concretize(data) if is_iterable(maybe_iter): for _ in maybe_iter: pass logger.set_snapshot_mode(prev_mode) logger.set_snapshot_dir(prev_snapshot_dir) logger.remove_tabular_output(tabular_log_file) logger.remove_text_output(text_log_file) logger.pop_prefix()
def setup_iam(): iam_client = boto3.client('iam', aws_access_key_id=ACCESS_KEY, aws_secret_access_key=ACCESS_SECRET) iam = boto3.resource('iam', aws_access_key_id=ACCESS_KEY, aws_secret_access_key=ACCESS_SECRET) try: existing_role = iam.Role('rllab') existing_role.load() if (not query_yes_no('There is an existing role named rllab. Proceed to delete everything rllab-related and recreate?', default='no')): sys.exit() print('Listing instance profiles...') inst_profiles = existing_role.instance_profiles.all() for prof in inst_profiles: for role in prof.roles: print(('Removing role %s from instance profile %s' % (role.name, prof.name))) prof.remove_role(RoleName=role.name) print(('Deleting instance profile %s' % prof.name)) prof.delete() for policy in existing_role.policies.all(): print(('Deleting inline policy %s' % policy.name)) policy.delete() for policy in existing_role.attached_policies.all(): print(('Detaching policy %s' % policy.arn)) existing_role.detach_policy(PolicyArn=policy.arn) print('Deleting role') existing_role.delete() except botocore.exceptions.ClientError as e: if (e.response['Error']['Code'] == 'NoSuchEntity'): pass else: raise e print('Creating role rllab') iam_client.create_role(Path='/', RoleName='rllab', AssumeRolePolicyDocument=json.dumps({'Version': '2012-10-17', 'Statement': [{'Action': 'sts:AssumeRole', 'Effect': 'Allow', 'Principal': {'Service': 'ec2.amazonaws.com'}}]})) role = iam.Role('rllab') print('Attaching policies') role.attach_policy(PolicyArn='arn:aws:iam::aws:policy/AmazonS3FullAccess') role.attach_policy(PolicyArn='arn:aws:iam::aws:policy/ResourceGroupsandTagEditorFullAccess') print('Creating inline policies') iam_client.put_role_policy(RoleName=role.name, PolicyName='CreateTags', PolicyDocument=json.dumps({'Version': '2012-10-17', 'Statement': [{'Effect': 'Allow', 'Action': ['ec2:CreateTags'], 'Resource': ['*']}]})) iam_client.put_role_policy(RoleName=role.name, PolicyName='TerminateInstances', PolicyDocument=json.dumps({'Version': '2012-10-17', 'Statement': [{'Sid': 'Stmt1458019101000', 'Effect': 'Allow', 'Action': ['ec2:TerminateInstances'], 'Resource': ['*']}]})) print('Creating instance profile rllab') iam_client.create_instance_profile(InstanceProfileName='rllab', Path='/') print('Adding role rllab to instance profile rllab') iam_client.add_role_to_instance_profile(InstanceProfileName='rllab', RoleName='rllab')
def setup_s3(): print(('Creating S3 bucket at s3://%s' % S3_BUCKET_NAME)) s3_client = boto3.client('s3', aws_access_key_id=ACCESS_KEY, aws_secret_access_key=ACCESS_SECRET) try: s3_client.create_bucket(ACL='private', Bucket=S3_BUCKET_NAME, CreateBucketConfiguration={'LocationConstraint': 'us-west-1'}) except botocore.exceptions.ClientError as e: if (e.response['Error']['Code'] == 'BucketAlreadyExists'): raise ValueError(('Bucket %s already exists. Please reconfigure S3_BUCKET_NAME' % S3_BUCKET_NAME)) elif (e.response['Error']['Code'] == 'BucketAlreadyOwnedByYou'): print('Bucket already created by you') else: raise e print('S3 bucket created')
def setup_ec2(): for region in ['us-west-1', 'us-west-2', 'us-east-1']: print(('Setting up region %s' % region)) ec2 = boto3.resource('ec2', region_name=region, aws_access_key_id=ACCESS_KEY, aws_secret_access_key=ACCESS_SECRET) ec2_client = boto3.client('ec2', region_name=region, aws_access_key_id=ACCESS_KEY, aws_secret_access_key=ACCESS_SECRET) existing_vpcs = list(ec2.vpcs.all()) assert (len(existing_vpcs) >= 1) vpc = existing_vpcs[0] print(('Creating security group in VPC %s' % str(vpc.id))) try: security_group = vpc.create_security_group(GroupName='rllab-sg', Description='Security group for rllab') except botocore.exceptions.ClientError as e: if (e.response['Error']['Code'] == 'InvalidGroup.Duplicate'): sgs = list(vpc.security_groups.filter(GroupNames=['rllab-sg'])) security_group = sgs[0] else: raise e ALL_REGION_AWS_SECURITY_GROUP_IDS[region] = [security_group.id] ec2_client.create_tags(Resources=[security_group.id], Tags=[{'Key': 'Name', 'Value': 'rllab-sg'}]) try: security_group.authorize_ingress(FromPort=22, ToPort=22, IpProtocol='tcp', CidrIp='0.0.0.0/0') except botocore.exceptions.ClientError as e: if (e.response['Error']['Code'] == 'InvalidPermission.Duplicate'): pass else: raise e print(('Security group created with id %s' % str(security_group.id))) key_name = ('rllab-%s' % region) try: print(('Trying to create key pair with name %s' % key_name)) key_pair = ec2_client.create_key_pair(KeyName=key_name) except botocore.exceptions.ClientError as e: if (e.response['Error']['Code'] == 'InvalidKeyPair.Duplicate'): if (not query_yes_no(('Key pair with name %s exists. Proceed to delete and recreate?' % key_name), 'no')): sys.exit() print(('Deleting existing key pair with name %s' % key_name)) ec2_client.delete_key_pair(KeyName=key_name) print(('Recreating key pair with name %s' % key_name)) key_pair = ec2_client.create_key_pair(KeyName=key_name) else: raise e key_pair_folder_path = os.path.join(config.PROJECT_PATH, 'private', 'key_pairs') file_name = os.path.join(key_pair_folder_path, ('%s.pem' % key_name)) print('Saving keypair file') console.mkdir_p(key_pair_folder_path) with os.fdopen(os.open(file_name, (os.O_WRONLY | os.O_CREAT), 384), 'w') as handle: handle.write((key_pair['KeyMaterial'] + '\n')) os.system(('ssh-add %s' % file_name)) ALL_REGION_AWS_KEY_NAMES[region] = key_name
def write_config(): print('Writing config file...') content = CONFIG_TEMPLATE.substitute(all_region_aws_key_names=json.dumps(ALL_REGION_AWS_KEY_NAMES, indent=4), all_region_aws_security_group_ids=json.dumps(ALL_REGION_AWS_SECURITY_GROUP_IDS, indent=4), s3_bucket_name=S3_BUCKET_NAME) config_personal_file = os.path.join(config.PROJECT_PATH, 'rllab/config_personal.py') if os.path.exists(config_personal_file): if (not query_yes_no('rllab/config_personal.py exists. Override?', 'no')): sys.exit() with open(config_personal_file, 'wb') as f: f.write(content.encode('utf-8'))
def setup(): setup_s3() setup_iam() setup_ec2() write_config()
def query_yes_no(question, default='yes'): 'Ask a yes/no question via raw_input() and return their answer.\n\n "question" is a string that is presented to the user.\n "default" is the presumed answer if the user just hits <Enter>.\n It must be "yes" (the default), "no" or None (meaning\n an answer is required of the user).\n\n The "answer" return value is True for "yes" or False for "no".\n ' valid = {'yes': True, 'y': True, 'ye': True, 'no': False, 'n': False} if (default is None): prompt = ' [y/n] ' elif (default == 'yes'): prompt = ' [Y/n] ' elif (default == 'no'): prompt = ' [y/N] ' else: raise ValueError(("invalid default answer: '%s'" % default)) while True: sys.stdout.write((question + prompt)) choice = input().lower() if ((default is not None) and (choice == '')): return valid[default] elif (choice in valid): return valid[choice] else: sys.stdout.write("Please respond with 'yes' or 'no' (or 'y' or 'n').\n")
def getcameras(self, context): cameras = [] for object in context.scene.objects: if (object.type == 'CAMERA'): cameras.append(object) return [(cam.name, cam.name, cam.name) for cam in cameras]
def printLogo(): log('===========================================================================') log(' __ __ __ __ _ ') log(' __ ___ __/ / / /___ / /___ ____ __________ _____ / /_ (_)_________') log(' / / / / | / / /_/ / __ \\/ / __ \\/ __ `/ ___/ __ `/ __ \\/ __ \\/ / ___/ ___/') log('/ /_/ /| |/ / __ / /_/ / / /_/ / /_/ / / / /_/ / /_/ / / / / / /__(__ ) ') log('\\__,_/ |___/_/ /_/\\____/_/\\____/\\__, /_/ \\__,_/ .___/_/ /_/_/\\___/____/ ') log(' /____/ /_/ ') log('===========================================================================')
def log(s): 'print a debug message' if DEBUG: print(s)
def create_image(name, k=1): 'creates defect textures' if (name not in bpy.data.images): bpy.ops.image.new(name=name, width=(k * 1024), height=(k * 1024), color=(0.0, 0.0, 0.0, 0.0)) else: log('- create_image() : textures exists')
def create_view_layers(context): 'todo: checks naming of view layers' context.scene.view_layers[0].name = 'real' if ('Ground Truth' not in context.scene.view_layers): context.scene.view_layers.new(name='ground_truth')
def create_mode_switcher_node_group(): if ('mode_switcher' not in bpy.data.node_groups): test_group = bpy.data.node_groups.new('mode_switcher', 'ShaderNodeTree') group_inputs = test_group.nodes.new('NodeGroupInput') group_inputs.location = ((- 350), 0) test_group.inputs.new('NodeSocketShader', 'Real') test_group.inputs.new('NodeSocketColor', 'Ground Truth') group_outputs = test_group.nodes.new('NodeGroupOutput') group_outputs.location = (300, 0) test_group.outputs.new('NodeSocketShader', 'Switch') node_mix = test_group.nodes.new('ShaderNodeMixShader') node_mix.location = (100, 0) modeDriver = bpy.data.node_groups['mode_switcher'].driver_add('nodes["Mix Shader"].inputs[0].default_value') modeDriver.driver.expression = 'mode' modeVariable = modeDriver.driver.variables.new() modeVariable.name = 'mode' modeVariable.type = 'SINGLE_PROP' modeVariable.targets[0].id_type = 'SCENE' modeVariable.targets[0].id = bpy.data.scenes['Scene'] modeVariable.targets[0].data_path = 'uv_holographics.mode' test_group.links.new(group_inputs.outputs['Real'], node_mix.inputs[1]) test_group.links.new(group_inputs.outputs['Ground Truth'], node_mix.inputs[2]) test_group.links.new(node_mix.outputs[0], group_outputs.inputs['Switch']) else: log('- create_mode_switcher_node_group() : node group already exists')
def add_camera_focus(context, cameraName, target): camera = context.scene.objects[cameraName] if ('Track To' not in camera.constraints): tracker = camera.constraints.new(type='TRACK_TO') tracker.target = target tracker.track_axis = 'TRACK_NEGATIVE_Z' tracker.up_axis = 'UP_Y' else: log('- add_camera_focus() : camera constraint already exists')
def toggle_mode(context): 'helper function for background switching' scene = context.scene uvh = scene.uv_holographics if (uvh.mode == 0): log('switching to GT') uvh.mode = 1 scene.render.filter_size = 0 scene.view_settings.view_transform = 'Standard' else: log('switching to realistic') uvh.mode = 0 scene.render.filter_size = 1.5 scene.view_settings.view_transform = 'Filmic' bpy.data.node_groups['mode_switcher'].animation_data.drivers[0].driver.expression = 'mode'
def render_layer(context, layer, id): '\n Renders a specific layer, useful for compositing view.\n This function is mode agnostic.\n ' scene = context.scene uvh = scene.uv_holographics scene.render.filepath = f'{uvh.output_dir}{layer}/{id:04d}.png' bpy.ops.render.render(write_still=True, layer=layer)
def run_variation(context): '\n manipulates objects to create variations\n todo: scenarios\n todo: read from XML file\n ' camera = context.scene.objects['Camera'] uvh = context.scene.uv_holographics r = (uvh.camera_dist_mean + uniform((- uvh.camera_dist_var), uvh.camera_dist_var)) theta = ((np.pi / 2) + uniform(((- np.pi) / 4), (np.pi / 8))) phi = uniform(0, (2 * np.pi)) randX = ((r * np.sin(theta)) * np.cos(phi)) randY = ((r * np.sin(theta)) * np.sin(phi)) randZ = (r * np.cos(theta)) camera.location = (randX, randY, randZ)
def insert_mode_switcher_node(context, material): 'Inserts a mode_switcher group node in the materials that are in target_collection' log(f'checking for {material.name}') for l in material.node_tree.links: if (l.to_socket.name == 'Surface'): if (l.from_socket.name == 'Switch'): log('- mode_switcher already inserted') else: log('found end link, operating ..') open_node_pre = l.from_node open_node_post = l.to_node material.node_tree.links.remove(l) group = material.node_tree.nodes.new(type='ShaderNodeGroup') group.node_tree = bpy.data.node_groups['mode_switcher'] material.node_tree.links.new(open_node_pre.outputs[0], group.inputs[0]) material.node_tree.links.new(group.outputs[0], open_node_post.inputs[0]) log('[[done]]')
class MyProperties(PropertyGroup): separate_background: BoolProperty(name='Separate background class', description='Extra class for background', default=True) n_defects: IntProperty(name='Defect classes', description='Number of defect classes', default=1, min=1, max=10) n_samples: IntProperty(name='Number of samples', description='Number of samples to generate', default=1, min=1, max=500) target_object: PointerProperty(type=bpy.types.Object) target_collection: PointerProperty(type=bpy.types.Collection) output_dir: StringProperty(name='Output folder', description='Choose a directory:', default='../output/', maxlen=1024, subtype='DIR_PATH') mode: IntProperty(name='Visualization mode', description='Realistic/Ground truth', default=0, min=0, max=1) generate_real_only: BoolProperty(name='Generate real only', description='', default=False) min_camera_angle: FloatProperty(name='Min camera angle', default=0.0, min=0.0, max=1.0) max_camera_angle: FloatProperty(name='Max camera angle', default=1.0, min=0.0, max=1.0) camera_dist_mean: FloatProperty(name='Camera dist mean', default=5.0, min=0.0, max=10.0) camera_dist_var: FloatProperty(name='Camera dist var', default=1.0, min=0.0, max=4.0)
class WM_OT_GenerateComponents(Operator): 'Generate blank texture maps and view layers' bl_label = 'Generate components' bl_idname = 'wm.gen_components' def execute(self, context): scene = context.scene uvh = scene.uv_holographics log('- generating components') for i in range(uvh.n_defects): create_image(name=f'defect{i}', k=2) create_view_layers(context) create_mode_switcher_node_group() add_camera_focus(context, 'Camera', uvh.target_object) log('[[done]]') return {'FINISHED'}
class WM_OT_UpdateMaterials(Operator): 'Updates existing material nodes of objects in target_collection' bl_label = 'Update Materials' bl_idname = 'wm.update_materials' def execute(self, context): for o in context.scene.uv_holographics.target_collection.objects: for m in o.data.materials: insert_mode_switcher_node(context, m) return {'FINISHED'}
class WM_OT_ToggleMaterials(Operator): 'Toggle between realistic view and ground truth' bl_label = 'Toggle Real/GT' bl_idname = 'wm.toggle_real_gt' def execute(self, context): toggle_mode(context) return {'FINISHED'}
class WM_OT_SampleVariation(Operator): 'Runs a sample variation' bl_label = 'Sample variation' bl_idname = 'wm.sample_variation' def execute(self, context): run_variation(context) return {'FINISHED'}
class WM_OT_StartScenarios(Operator): 'Vary camera positions' bl_label = 'Generate' bl_idname = 'wm.start_scenarios' def execute(self, context): scene = context.scene uvh = scene.uv_holographics if (uvh.mode != 0): toggle_mode(context) for i in range(uvh.n_samples): run_variation(context) render_layer(context, 'real', (i + 1)) if (not uvh.generate_real_only): toggle_mode(context) render_layer(context, 'ground_truth', (i + 1)) toggle_mode(context) log('[[done]]') return {'FINISHED'}
class OBJECT_PT_CustomPanel(Panel): bl_label = 'uvHolographics' bl_idname = 'OBJECT_PT_custom_panel' bl_space_type = 'VIEW_3D' bl_region_type = 'UI' bl_category = 'Annotation' bl_context = 'objectmode' @classmethod def poll(self, context): return (context.object is not None) def draw_header(self, context): global custom_icons self.layout.label(text='', icon_value=custom_icons['custom_icon'].icon_id) def draw(self, context): layout = self.layout scene = context.scene uvh = scene.uv_holographics layout.label(text='Setup') box = layout.box() box.prop(uvh, 'separate_background') box.prop(uvh, 'n_defects') box.prop(uvh, 'target_object') box.operator('wm.gen_components') box.prop(uvh, 'target_collection') box.operator('wm.update_materials') layout.label(text='Camera parameters') box = layout.box() box.prop(uvh, 'min_camera_angle', slider=True) box.prop(uvh, 'max_camera_angle', slider=True) box.prop(uvh, 'camera_dist_mean', slider=True) box.prop(uvh, 'camera_dist_var', slider=True) layout.label(text='Operations') box = layout.box() box.operator('wm.toggle_real_gt') box.operator('wm.sample_variation') layout.label(text='Generation') box = layout.box() box.prop(uvh, 'generate_real_only') box.prop(uvh, 'n_samples') box.prop(uvh, 'output_dir') box.operator('wm.start_scenarios') box.separator()
def register(): from bpy.utils import register_class global custom_icons printLogo() for cls in classes: register_class(cls) custom_icons = bpy.utils.previews.new() script_path = bpy.context.space_data.text.filepath icons_dir = os.path.join(os.path.dirname(script_path), 'icons') custom_icons.load('custom_icon', os.path.join(icons_dir, 'logo.png'), 'IMAGE') bpy.types.Scene.uv_holographics = PointerProperty(type=MyProperties)
def unregister(): from bpy.utils import unregister_class global custom_icons for cls in reversed(classes): unregister_class(cls) del bpy.types.Scene.uv_holographics bpy.utils.previews.remove(custom_icons)
class S2VGraph(object): def __init__(self, g, label, node_tags=None, node_features=None): '\n g: a networkx graph\n label: an integer graph label\n node_tags: a list of integer node tags\n node_features: a torch float tensor, one-hot representation of the tag that is used as input to neural nets\n edge_mat: a torch long tensor, contain edge list, will be used to create torch sparse tensor\n neighbors: list of neighbors (without self-loop)\n ' self.label = label self.g = g self.node_tags = node_tags self.neighbors = [] self.node_features = 0 self.edge_mat = 0 self.max_neighbor = 0
def load_data(dataset, degree_as_tag): '\n dataset: name of dataset\n test_proportion: ratio of test train split\n seed: random seed for random splitting of dataset\n ' print('loading data') g_list = [] label_dict = {} feat_dict = {} triggered = False with open(('dataset/%s/%s.txt' % (dataset, dataset)), 'r') as f: n_g = int(f.readline().strip()) for i in range(n_g): row = f.readline().strip().split() (n, l) = [int(w) for w in row] if (not (l in label_dict)): mapped = len(label_dict) label_dict[l] = mapped g = nx.Graph() node_tags = [] node_features = [] n_edges = 0 for j in range(n): g.add_node(j) row = f.readline().strip().split() tmp = (int(row[1]) + 2) if (tmp == len(row)): row = [int(w) for w in row] attr = None else: (row, attr) = ([int(w) for w in row[:tmp]], np.array([float(w) for w in row[tmp:]])) triggered = True if triggered: print('Triggered already') if (not (row[0] in feat_dict)): mapped = len(feat_dict) feat_dict[row[0]] = mapped node_tags.append(feat_dict[row[0]]) if (tmp > len(row)): node_features.append(attr) n_edges += row[1] for k in range(2, len(row)): g.add_edge(j, row[k]) if (node_features != []): node_features = np.stack(node_features) node_feature_flag = True else: node_features = None node_feature_flag = False assert (len(g) == n) g_list.append(S2VGraph(g, l, node_tags)) for g in g_list: g.neighbors = [[] for i in range(len(g.g))] for (i, j) in g.g.edges(): g.neighbors[i].append(j) g.neighbors[j].append(i) degree_list = [] for i in range(len(g.g)): g.neighbors[i] = g.neighbors[i] degree_list.append(len(g.neighbors[i])) g.max_neighbor = max(degree_list) g.label = label_dict[g.label] edges = [list(pair) for pair in g.g.edges()] edges.extend([[i, j] for (j, i) in edges]) deg_list = list(dict(g.g.degree(range(len(g.g)))).values()) g.edge_mat = tf.transpose(tf.constant(edges)) if degree_as_tag: for g in g_list: g.node_tags = list(dict(g.g.degree).values()) tagset = set([]) for g in g_list: tagset = tagset.union(set(g.node_tags)) tagset = list(tagset) tag2index = {tagset[i]: i for i in range(len(tagset))} for g in g_list: node_features = np.zeros((len(g.node_tags), len(tagset))) node_features[(range(len(g.node_tags)), [tag2index[tag] for tag in g.node_tags])] = 1 g.node_features = tf.constant(node_features) print(('# classes: %d' % len(label_dict))) print(('# maximum node tag: %d' % len(tagset))) print(('# data: %d' % len(g_list))) return (g_list, len(label_dict))
def separate_data(graph_list, seed, fold_idx): assert ((0 <= fold_idx) and (fold_idx < 10)), 'fold_idx must be from 0 to 9.' skf = StratifiedKFold(n_splits=10, shuffle=True, random_state=seed) labels = [graph.label for graph in graph_list] idx_list = [] for idx in skf.split(np.zeros(len(labels)), labels): idx_list.append(idx) (train_idx, test_idx) = idx_list[fold_idx] train_graph_list = [graph_list[i] for i in train_idx] test_graph_list = [graph_list[i] for i in test_idx] return (train_graph_list, test_graph_list)
class MLP(tf.keras.layers.Layer): def __init__(self, num_layers, hidden_dim, output_dim): '\n num_layers: number of layers in the neural networks (EXCLUDING the input layer). If num_layers=1, this reduces to linear model.\n hidden_dim: dimensionality of hidden units at ALL layers\n output_dim: number of classes for prediction\n ' super(MLP, self).__init__() self.linear_or_not = True self.num_layers = num_layers if (num_layers < 1): raise ValueError('number of layers should be positive!') elif (num_layers == 1): self.linear = Linear_model(output_dim=output_dim) else: self.linear_or_not = False self.multi = Multi_model(layers=num_layers, hidden_dim=hidden_dim, output_dim=output_dim) def call(self, input_features): if self.linear_or_not: return self.linear(input_features) else: return self.multi(input_features)
class Linear_model(tf.keras.layers.Layer): def __init__(self, output_dim): super(Linear_model, self).__init__() self.output_layer = tf.keras.layers.Dense(units=output_dim) def call(self, input_features): return self.output_layer(input_features)
class Multi_model(tf.keras.layers.Layer): def __init__(self, layers, hidden_dim, output_dim): super(Multi_model, self).__init__() self.layers = layers self.dense_list = [] self.batch_list = [] for i in range((layers - 1)): self.dense_list.append(tf.keras.layers.Dense(units=hidden_dim)) self.batch_list.append(tf.keras.layers.BatchNormalization()) self.dense_list.append(tf.keras.layers.Dense(units=output_dim)) def call(self, input_features): for i in range((self.layers - 1)): densed = self.dense_list[i](input_features) batched = self.batch_list[i](densed) input_features = tf.nn.relu(batched) multi_result = self.dense_list[(- 1)](input_features) return multi_result
class GraphCNN(tf.keras.Model): def __init__(self, num_layers, num_mlp_layers, hidden_dim, output_dim, final_dropout, learn_eps, graph_pooling_type, neighbor_pooling_type): '\n num_layers: number of layers in the neural networks (INCLUDING the input layer)\n num_mlp_layers: number of layers in mlps (EXCLUDING the input layer)\n hidden_dim: dimensionality of hidden units at ALL layers\n output_dim: number of classes for prediction\n final_dropout: dropout ratio on the final linear layer\n learn_eps: If True, learn epsilon to distinguish center nodes from neighboring nodes. If False, aggregate neighbors and center nodes altogether. \n neighbor_pooling_type: how to aggregate neighbors (mean, average, or max)\n graph_pooling_type: how to aggregate entire nodes in a graph (mean, average)\n ' super(GraphCNN, self).__init__() self.final_dropout = final_dropout self.num_layers = num_layers self.graph_pooling_type = graph_pooling_type self.neighbor_pooling_type = neighbor_pooling_type self.learn_eps = learn_eps self.eps = tf.Variable(tf.zeros((self.num_layers - 1))) self.mlps = [] self.batches = [] self.linears = [] self.drops = [] for layer in range((self.num_layers - 1)): self.mlps.append(MLP(num_mlp_layers, hidden_dim, hidden_dim)) self.batches.append(tf.keras.layers.BatchNormalization()) self.linears.append(tf.keras.layers.Dense(output_dim)) self.drops.append(tf.keras.layers.Dropout(final_dropout)) self.linears.append(tf.keras.layers.Dense(output_dim)) self.drops.append(tf.keras.layers.Dropout(final_dropout)) def __preprocess_neighbors_maxpool(self, batch_graph): max_deg = max([graph.max_neighbor for graph in batch_graph]) padded_neighbor_list = [] start_idx = [0] for (i, graph) in enumerate(batch_graph): start_idx.append((start_idx[i] + len(graph.g))) padded_neighbors = [] for j in range(len(graph.neighbors)): pad = [(n + start_idx[i]) for n in graph.neighbors[j]] pad.extend(([(- 1)] * (max_deg - len(pad)))) if (not self.learn_eps): pad.append((j + start_idx[i])) padded_neighbors.append(pad) padded_neighbor_list.extend(padded_neighbors) return tf.constant(padded_neighbor_list) def __preprocess_neighbors_sumavepool(self, batch_graph): edge_mat_list = [] start_idx = [0] for (i, graph) in enumerate(batch_graph): start_idx.append((start_idx[i] + len(graph.g))) edge_mat_list.append((graph.edge_mat + start_idx[i])) Adj_block_idx = tf.concat(edge_mat_list, 1) Adj_block_elem = tf.ones(Adj_block_idx.shape[1]) if (not self.learn_eps): num_node = start_idx[(- 1)] self_loop_edge = tf.constant([range(num_node), range(num_node)]) elem = tf.ones(num_node) Adj_block_idx = tf.concat([Adj_block_idx, self_loop_edge], 1) Adj_block_elem = tf.concat([Adj_block_elem, elem], 0) Adj_block_idx = tf.cast(tf.transpose(Adj_block_idx), tf.int64) Adj_block = tf.SparseTensor(indices=Adj_block_idx, values=Adj_block_elem, dense_shape=[start_idx[(- 1)], start_idx[(- 1)]]) return Adj_block def __preprocess_graphpool(self, batch_graph): start_idx = [0] for (i, graph) in enumerate(batch_graph): start_idx.append((start_idx[i] + len(graph.g))) idx = [] elem = [] for (i, graph) in enumerate(batch_graph): if (self.graph_pooling_type == 'average'): elem.extend(([(1.0 / len(graph.g))] * len(graph.g))) else: elem.extend(([1] * len(graph.g))) idx.extend([[i, j] for j in range(start_idx[i], start_idx[(i + 1)], 1)]) elem = tf.constant(elem) graph_pool = tf.SparseTensor(indices=idx, values=elem, dense_shape=[len(batch_graph), start_idx[(- 1)]]) graph_pool = tf.cast(graph_pool, tf.float32) return graph_pool def maxpool(self, h, padded_neighbor_list): dummy = tf.reduce_min(tf_output, axis=0, keepdims=True) h_with_dummy = tf.concat([tf_output, tf_dummy], 0) pooled_rep = tf.reduce_max(h_with_dummy[padded_neighbor_list], axis=1) return pooled_rep def next_layer_eps(self, h, layer, padded_neighbor_list=None, Adj_block=None): if (self.neighbor_pooling_type == 'max'): pooled = self.maxpool(h, padded_neighbor_list) else: h2 = tf.cast(h, tf.float32) pooled = tf.sparse.sparse_dense_matmul(Adj_block, h2) if (self.neighbor_pooling_type == 'average'): degree = tf.sparse.sparse_dense_matmul(Adj_block, tf.ones([Adj_block.shape[0], 1])) pooled = (pooled / degree) pooled = (pooled + ((1 + self.eps[layer]) * h2)) pooled_rep = self.mlps[layer](pooled) h = self.batches[layer](pooled_rep) h = tf.nn.relu(h) h = tf.cast(h, tf.float32) return h def next_layer(self, h, layer, padded_neighbor_list=None, Adj_block=None): if (self.neighbor_pooling_type == 'max'): pooled = self.maxpool(h, padded_neighbor_list) else: pooled = tf.sparse.sparse_dense_matmul(Adj_block, h) if (self.neighbor_pooling_type == 'average'): degree = tf.sparse.sparse_dense_matmul(Adj_block, tf.ones([Adj_block.shape[0], 1])) pooled = (pooled / degree) pooled_rep = self.mlps[layer](pooled) h = self.batches[layer](pooled_rep) h = tf.nn.relu(h) return h def call(self, batch_graph): X_concat = tf.concat([graph.node_features for graph in batch_graph], 0) graph_pool = self.__preprocess_graphpool(batch_graph) if (self.neighbor_pooling_type == 'max'): padded_neighbor_list = self.__preprocess_neighbors_maxpool(batch_graph) else: Adj_block = self.__preprocess_neighbors_sumavepool(batch_graph) hidden_rep = [X_concat] h = X_concat for layer in range((self.num_layers - 1)): if ((self.neighbor_pooling_type == 'max') and self.learn_eps): h = self.next_layer_eps(h, layer, padded_neighbor_list=padded_neighbor_list) elif ((not (self.neighbor_pooling_type == 'max')) and self.learn_eps): h = self.next_layer_eps(h, layer, Adj_block=Adj_block) elif ((self.neighbor_pooling_type == 'max') and (not self.learn_eps)): h = self.next_layer(h, layer, padded_neighbor_list=padded_neighbor_list) elif ((not (self.neighbor_pooling_type == 'max')) and (not self.learn_eps)): h = self.next_layer(h, layer, Adj_block=Adj_block) hidden_rep.append(h) score_over_layer = 0 for (layer, h) in enumerate(hidden_rep): h = tf.cast(h, tf.float32) pooled_h = tf.sparse.sparse_dense_matmul(graph_pool, h) linear_outcome = self.linears[layer](pooled_h) dropped_outcome = self.drops[layer](linear_outcome) score_over_layer += dropped_outcome return score_over_layer
def train(args, model, train_graphs, opt, epoch): total_iters = args.iters_per_epoch pbar = tqdm(range(total_iters), unit='batch') loss_accum = 0 for pos in pbar: selected_idx = np.random.permutation(len(train_graphs))[:args.batch_size] batch_graph = [train_graphs[idx] for idx in selected_idx] labels = tf.constant([graph.label for graph in batch_graph]) loss_accum = 0 with tf.GradientTape() as tape: output = model(batch_graph) loss = loss_object(labels, output) gradients = tape.gradient(loss, model.trainable_variables) gradient_variables = zip(gradients, model.trainable_variables) opt.apply_gradients(gradient_variables) loss_accum += loss pbar.set_description(f'epoch: {epoch}') average_loss = (loss_accum / total_iters) print(f'loss training: {average_loss}') return average_loss
def pass_data_iteratively(model, graphs, minibatch_size=64): output = [] idx = np.arange(len(graphs)) for i in range(0, len(graphs), minibatch_size): sampled_idx = idx[i:(i + minibatch_size)] if (len(sampled_idx) == 0): continue output.append(model([graphs[j] for j in sampled_idx])) return tf.concat(output, 0)
def tf_check_acc(pred, labels): pred = tf.cast(pred, tf.int32) correct = tf.equal(pred, labels) answer = 0 for element in correct: if element: answer += 1 return answer
def test(args, model, train_graphs, test_graphs, epoch): output = pass_data_iteratively(model, train_graphs) pred = tf.argmax(output, 1) labels = tf.constant([graph.label for graph in train_graphs]) correct = tf_check_acc(pred, labels) acc_train = (correct / float(len(train_graphs))) output = pass_data_iteratively(model, test_graphs) pred = tf.argmax(output, 1) labels = tf.constant([graph.label for graph in test_graphs]) correct = tf_check_acc(pred, labels) acc_test = (correct / float(len(test_graphs))) print(('accuracy train: %f test: %f' % (acc_train, acc_test))) return (acc_train, acc_test)
def train(args, model, train_graphs, opt, epoch): total_iters = args.iters_per_epoch pbar = tqdm(range(total_iters), unit='batch') loss_accum = 0 for pos in pbar: selected_idx = np.random.permutation(len(train_graphs))[:args.batch_size] batch_graph = [train_graphs[idx] for idx in selected_idx] labels = tf.constant([graph.label for graph in batch_graph]) loss_accum = 0 with tf.GradientTape() as tape: output = model(batch_graph) loss = loss_object(labels, output) gradients = tape.gradient(loss, model.trainable_variables) gradient_variables = zip(gradients, model.trainable_variables) opt.apply_gradients(gradient_variables) loss_accum += loss pbar.set_description(f'epoch: {epoch}') average_loss = (loss_accum / total_iters) print(f'loss training: {average_loss}') return average_loss
def pass_data_iteratively(model, graphs, minibatch_size=64): output = [] idx = np.arange(len(graphs)) for i in range(0, len(graphs), minibatch_size): sampled_idx = idx[i:(i + minibatch_size)] if (len(sampled_idx) == 0): continue output.append(model([graphs[j] for j in sampled_idx])) return tf.concat(output, 0)
def tf_check_acc(pred, labels): pred = tf.cast(pred, tf.int32) correct = tf.equal(pred, labels) answer = 0 for element in correct: if element: answer += 1 return answer
def test(args, model, train_graphs, test_graphs, epoch): output = pass_data_iteratively(model, train_graphs) pred = tf.argmax(output, 1) labels = tf.constant([graph.label for graph in train_graphs]) correct = tf_check_acc(pred, labels) acc_train = (correct / float(len(train_graphs))) output = pass_data_iteratively(model, test_graphs) pred = tf.argmax(output, 1) labels = tf.constant([graph.label for graph in test_graphs]) correct = tf_check_acc(pred, labels) acc_test = (correct / float(len(test_graphs))) print(('accuracy train: %f test: %f' % (acc_train, acc_test))) return (acc_train, acc_test)
class GraphCNN(tf.keras.Model): def __init__(self, num_layers, num_mlp_layers, hidden_dim, output_dim, final_dropout, learn_eps, graph_pooling_type, neighbor_pooling_type): '\n num_layers: number of layers in the neural networks (INCLUDING the input layer)\n num_mlp_layers: number of layers in mlps (EXCLUDING the input layer)\n hidden_dim: dimensionality of hidden units at ALL layers\n output_dim: number of classes for prediction\n final_dropout: dropout ratio on the final linear layer\n learn_eps: If True, learn epsilon to distinguish center nodes from neighboring nodes. If False, aggregate neighbors and center nodes altogether. \n neighbor_pooling_type: how to aggregate neighbors (mean, average, or max)\n graph_pooling_type: how to aggregate entire nodes in a graph (mean, average)\n ' super(GraphCNN, self).__init__() self.final_dropout = final_dropout self.num_layers = num_layers self.graph_pooling_type = graph_pooling_type self.neighbor_pooling_type = neighbor_pooling_type self.learn_eps = learn_eps self.eps = tf.Variable(tf.zeros((self.num_layers - 1))) self.mlps = [] self.batches = [] self.linears = [] self.drops = [] for layer in range((self.num_layers - 1)): self.mlps.append(MLP(num_mlp_layers, hidden_dim, hidden_dim)) self.batches.append(tf.keras.layers.BatchNormalization()) self.linears.append(tf.keras.layers.Dense(output_dim)) self.drops.append(tf.keras.layers.Dropout(final_dropout)) self.linears.append(tf.keras.layers.Dense(output_dim)) self.drops.append(tf.keras.layers.Dropout(final_dropout)) def __preprocess_neighbors_maxpool(self, batch_graph): max_deg = max([graph.max_neighbor for graph in batch_graph]) padded_neighbor_list = [] start_idx = [0] for (i, graph) in enumerate(batch_graph): start_idx.append((start_idx[i] + len(graph.g))) padded_neighbors = [] for j in range(len(graph.neighbors)): pad = [(n + start_idx[i]) for n in graph.neighbors[j]] pad.extend(([(- 1)] * (max_deg - len(pad)))) if (not self.learn_eps): pad.append((j + start_idx[i])) padded_neighbors.append(pad) padded_neighbor_list.extend(padded_neighbors) return tf.constant(padded_neighbor_list) def __preprocess_neighbors_sumavepool(self, batch_graph): edge_mat_list = [] start_idx = [0] for (i, graph) in enumerate(batch_graph): start_idx.append((start_idx[i] + len(graph.g))) edge_mat_list.append((graph.edge_mat + start_idx[i])) Adj_block_idx = tf.concat(edge_mat_list, 1) Adj_block_elem = tf.ones(Adj_block_idx.shape[1]) if (not self.learn_eps): num_node = start_idx[(- 1)] self_loop_edge = tf.constant([range(num_node), range(num_node)]) elem = tf.ones(num_node) Adj_block_idx = tf.concat([Adj_block_idx, self_loop_edge], 1) Adj_block_elem = tf.concat([Adj_block_elem, elem], 0) Adj_block_idx = tf.cast(tf.transpose(Adj_block_idx), tf.int64) Adj_block = tf.SparseTensor(indices=Adj_block_idx, values=Adj_block_elem, dense_shape=[start_idx[(- 1)], start_idx[(- 1)]]) return Adj_block def __preprocess_graphpool(self, batch_graph): start_idx = [0] for (i, graph) in enumerate(batch_graph): start_idx.append((start_idx[i] + len(graph.g))) idx = [] elem = [] for (i, graph) in enumerate(batch_graph): if (self.graph_pooling_type == 'average'): elem.extend(([(1.0 / len(graph.g))] * len(graph.g))) else: elem.extend(([1] * len(graph.g))) idx.extend([[i, j] for j in range(start_idx[i], start_idx[(i + 1)], 1)]) elem = tf.constant(elem) graph_pool = tf.SparseTensor(indices=idx, values=elem, dense_shape=[len(batch_graph), start_idx[(- 1)]]) graph_pool = tf.cast(graph_pool, tf.float32) return graph_pool def maxpool(self, h, padded_neighbor_list): dummy = tf.reduce_min(tf_output, axis=0, keepdims=True) h_with_dummy = tf.concat([tf_output, tf_dummy], 0) pooled_rep = tf.reduce_max(h_with_dummy[padded_neighbor_list], axis=1) return pooled_rep def next_layer_eps(self, h, layer, padded_neighbor_list=None, Adj_block=None): if (self.neighbor_pooling_type == 'max'): pooled = self.maxpool(h, padded_neighbor_list) else: h2 = tf.cast(h, tf.float32) pooled = tf.sparse.sparse_dense_matmul(Adj_block, h2) if (self.neighbor_pooling_type == 'average'): degree = tf.sparse.sparse_dense_matmul(Adj_block, tf.ones([Adj_block.shape[0], 1])) pooled = (pooled / degree) pooled = (pooled + ((1 + self.eps[layer]) * h2)) pooled_rep = self.mlps[layer](pooled) h = self.batches[layer](pooled_rep) h = tf.nn.relu(h) h = tf.cast(h, tf.float32) return h def next_layer(self, h, layer, padded_neighbor_list=None, Adj_block=None): if (self.neighbor_pooling_type == 'max'): pooled = self.maxpool(h, padded_neighbor_list) else: pooled = tf.sparse.sparse_dense_matmul(Adj_block, h) if (self.neighbor_pooling_type == 'average'): degree = tf.sparse.sparse_dense_matmul(Adj_block, tf.ones([Adj_block.shape[0], 1])) pooled = (pooled / degree) pooled_rep = self.mlps[layer](pooled) h = self.batches[layer](pooled_rep) h = tf.nn.relu(h) return h def call(self, batch_graph): X_concat = tf.concat([graph.node_features for graph in batch_graph], 0) graph_pool = self.__preprocess_graphpool(batch_graph) if (self.neighbor_pooling_type == 'max'): padded_neighbor_list = self.__preprocess_neighbors_maxpool(batch_graph) else: Adj_block = self.__preprocess_neighbors_sumavepool(batch_graph) hidden_rep = [X_concat] h = X_concat for layer in range((self.num_layers - 1)): if ((self.neighbor_pooling_type == 'max') and self.learn_eps): h = self.next_layer_eps(h, layer, padded_neighbor_list=padded_neighbor_list) elif ((not (self.neighbor_pooling_type == 'max')) and self.learn_eps): h = self.next_layer_eps(h, layer, Adj_block=Adj_block) elif ((self.neighbor_pooling_type == 'max') and (not self.learn_eps)): h = self.next_layer(h, layer, padded_neighbor_list=padded_neighbor_list) elif ((not (self.neighbor_pooling_type == 'max')) and (not self.learn_eps)): h = self.next_layer(h, layer, Adj_block=Adj_block) hidden_rep.append(h) score_over_layer = 0 for (layer, h) in enumerate(hidden_rep): h = tf.cast(h, tf.float32) pooled_h = tf.sparse.sparse_dense_matmul(graph_pool, h) linear_outcome = self.linears[layer](pooled_h) dropped_outcome = self.drops[layer](linear_outcome) score_over_layer += dropped_outcome return score_over_layer
class MLP(tf.keras.layers.Layer): def __init__(self, num_layers, hidden_dim, output_dim): '\n num_layers: number of layers in the neural networks (EXCLUDING the input layer). If num_layers=1, this reduces to linear model.\n hidden_dim: dimensionality of hidden units at ALL layers\n output_dim: number of classes for prediction\n ' super(MLP, self).__init__() self.linear_or_not = True self.num_layers = num_layers if (num_layers < 1): raise ValueError('number of layers should be positive!') elif (num_layers == 1): self.linear = Linear_model(output_dim=output_dim) else: self.linear_or_not = False self.multi = Multi_model(layers=num_layers, hidden_dim=hidden_dim, output_dim=output_dim) def call(self, input_features): if self.linear_or_not: return self.linear(input_features) else: return self.multi(input_features)
class Linear_model(tf.keras.layers.Layer): def __init__(self, output_dim): super(Linear_model, self).__init__() self.output_layer = tf.keras.layers.Dense(units=output_dim) def call(self, input_features): return self.output_layer(input_features)
class Multi_model(tf.keras.layers.Layer): def __init__(self, layers, hidden_dim, output_dim): super(Multi_model, self).__init__() self.layers = layers self.dense_list = [] self.batch_list = [] for i in range((layers - 1)): self.dense_list.append(tf.keras.layers.Dense(units=hidden_dim)) self.batch_list.append(tf.keras.layers.BatchNormalization()) self.dense_list.append(tf.keras.layers.Dense(units=output_dim)) def call(self, input_features): for i in range((self.layers - 1)): densed = self.dense_list[i](input_features) batched = self.batch_list[i](densed) input_features = tf.nn.relu(batched) multi_result = self.dense_list[(- 1)](input_features) return multi_result
class S2VGraph(object): def __init__(self, g, label, node_tags=None, node_features=None): '\n g: a networkx graph\n label: an integer graph label\n node_tags: a list of integer node tags\n node_features: a torch float tensor, one-hot representation of the tag that is used as input to neural nets\n edge_mat: a torch long tensor, contain edge list, will be used to create torch sparse tensor\n neighbors: list of neighbors (without self-loop)\n ' self.label = label self.g = g self.node_tags = node_tags self.neighbors = [] self.node_features = 0 self.edge_mat = 0 self.max_neighbor = 0
def load_data(dataset, degree_as_tag): '\n dataset: name of dataset\n test_proportion: ratio of test train split\n seed: random seed for random splitting of dataset\n ' print('loading data') g_list = [] label_dict = {} feat_dict = {} triggered = False with open(('dataset/%s/%s.txt' % (dataset, dataset)), 'r') as f: n_g = int(f.readline().strip()) for i in range(n_g): row = f.readline().strip().split() (n, l) = [int(w) for w in row] if (not (l in label_dict)): mapped = len(label_dict) label_dict[l] = mapped g = nx.Graph() node_tags = [] node_features = [] n_edges = 0 for j in range(n): g.add_node(j) row = f.readline().strip().split() tmp = (int(row[1]) + 2) if (tmp == len(row)): row = [int(w) for w in row] attr = None else: (row, attr) = ([int(w) for w in row[:tmp]], np.array([float(w) for w in row[tmp:]])) triggered = True if triggered: print('Triggered already') if (not (row[0] in feat_dict)): mapped = len(feat_dict) feat_dict[row[0]] = mapped node_tags.append(feat_dict[row[0]]) if (tmp > len(row)): node_features.append(attr) n_edges += row[1] for k in range(2, len(row)): g.add_edge(j, row[k]) if (node_features != []): node_features = np.stack(node_features) node_feature_flag = True else: node_features = None node_feature_flag = False assert (len(g) == n) g_list.append(S2VGraph(g, l, node_tags)) for g in g_list: g.neighbors = [[] for i in range(len(g.g))] for (i, j) in g.g.edges(): g.neighbors[i].append(j) g.neighbors[j].append(i) degree_list = [] for i in range(len(g.g)): g.neighbors[i] = g.neighbors[i] degree_list.append(len(g.neighbors[i])) g.max_neighbor = max(degree_list) g.label = label_dict[g.label] edges = [list(pair) for pair in g.g.edges()] edges.extend([[i, j] for (j, i) in edges]) deg_list = list(dict(g.g.degree(range(len(g.g)))).values()) g.edge_mat = tf.transpose(tf.constant(edges)) if degree_as_tag: for g in g_list: g.node_tags = list(dict(g.g.degree).values()) tagset = set([]) for g in g_list: tagset = tagset.union(set(g.node_tags)) tagset = list(tagset) tag2index = {tagset[i]: i for i in range(len(tagset))} for g in g_list: node_features = np.zeros((len(g.node_tags), len(tagset))) node_features[(range(len(g.node_tags)), [tag2index[tag] for tag in g.node_tags])] = 1 g.node_features = tf.constant(node_features) print(('# classes: %d' % len(label_dict))) print(('# maximum node tag: %d' % len(tagset))) print(('# data: %d' % len(g_list))) return (g_list, len(label_dict))
def separate_data(graph_list, seed, fold_idx): assert ((0 <= fold_idx) and (fold_idx < 10)), 'fold_idx must be from 0 to 9.' skf = StratifiedKFold(n_splits=10, shuffle=True, random_state=seed) labels = [graph.label for graph in graph_list] idx_list = [] for idx in skf.split(np.zeros(len(labels)), labels): idx_list.append(idx) (train_idx, test_idx) = idx_list[fold_idx] train_graph_list = [graph_list[i] for i in train_idx] test_graph_list = [graph_list[i] for i in test_idx] return (train_graph_list, test_graph_list)
def remap_module(module_type, k, v): if (module_type == 'ConvBnAct'): k = k.replace('bn1.', 'bn.') elif (module_type == 'InvertedResidual'): k = k.replace('conv_pw.', 'conv_exp.') k = k.replace('bn1.', 'bn_exp.') k = k.replace('bn2.', 'bn_dw.') k = k.replace('bn3.', 'bn_pwl.') elif (module_type == 'EdgeResidual'): k = k.replace('bn1.', 'bn_exp.') k = k.replace('bn2.', 'bn_pwl.') elif (module_type == 'DepthwiseSeparableConv'): k = k.replace('bn1.', 'bn_dw.') k = k.replace('bn2.', 'bn_pw.') elif (module_type == 'SqueezeExcite'): k = k.replace('conv_reduce.', 'reduce.') k = k.replace('conv_expand.', 'expand.') elif (module_type == 'EdgeResidual'): k = k.replace('bn1.', 'bn_exp.') k = k.replace('bn3.', 'bn_pwl.') elif (module_type == 'EfficientNet'): k = k.replace('conv_stem.', 'stem.conv.') k = k.replace('bn1.', 'stem.bn.') k = k.replace('conv_head.', 'head.conv_pw.') k = k.replace('bn2.', 'head.bn.') k = k.replace('classifier.', 'head.classifier.') elif (module_type == 'MobileNetV3'): k = k.replace('conv_stem.', 'stem.conv.') k = k.replace('bn1.', 'stem.bn.') k = k.replace('conv_head.', 'head.conv_pw.') k = k.replace('bn2.', 'head.bn.') k = k.replace('classifier.', 'head.classifier.') return (k, v)
def export_model(model_name, output_dir=''): timm_model_name = model_name.replace('pt_', '') m = timm.create_model(timm_model_name, pretrained=True) d = dict(m.named_modules()) data = {} names = [] types = [] for (k, v) in m.state_dict().items(): if ('num_batches' in k): continue k_split = k.split('.') layer_name = '.'.join(k_split[:(- 1)]) parent_name = '.'.join(k_split[:(- 2)]) parent_module = d[parent_name] parent_type = type(parent_module).__name__ if ('MixedConv' in parent_type): parent_name = '.'.join(k_split[:(- 3)]) parent_module = d[parent_name] parent_type = type(parent_module).__name__ (k, v) = remap_module(parent_type, k, v) type_str = '' if (layer_name in d): type_str = type(d[layer_name]).__name__ if (type_str == 'Conv2dSame'): type_str = 'Conv2d' types.append(type_str) print(k, type_str, v.shape) data[str(len(data))] = v.numpy() names.append(k) tempf = tempfile.NamedTemporaryFile(delete=False, dir='./') np.savez(tempf, names=np.array(names), types=types, **data) tempf.close() with open(tempf.name, 'rb') as f: sha_hash = hashlib.sha256(f.read()).hexdigest() if output_dir: assert os.path.isdir(output_dir) else: output_dir = './' final_filename = ('-'.join([model_name, sha_hash[:8]]) + '.npz') shutil.move(tempf.name, os.path.join(output_dir, final_filename))
def main(): args = parser.parse_args() all_models = list_models(pretrained=True) if (args.model == 'all'): for model_name in all_models: export_model(model_name, args.output) else: export_model(args.model, args.output)
def _parse_ksize(ss): if ss.isdigit(): return int(ss) else: return [int(k) for k in ss.split('.')]
def _decode_block_str(block_str): " Decode block definition string\n\n Gets a list of block arg (dicts) through a string notation of arguments.\n E.g. ir_r2_k3_s2_e1_i32_o16_se0.25_noskip\n\n All args can exist in any order with the exception of the leading string which\n is assumed to indicate the block type.\n\n leading string - block type (\n ir = InvertedResidual, ds = DepthwiseSep, dsa = DeptwhiseSep with pw act, cn = ConvBnAct)\n r - number of repeat blocks,\n k - kernel size,\n s - strides (1-9),\n e - expansion ratio,\n c - output channels,\n se - squeeze/excitation ratio\n n - activation fn ('re', 'r6', 'hs', or 'sw')\n Args:\n block_str: a string representation of block arguments.\n Returns:\n A list of block args (dicts)\n Raises:\n ValueError: if the string def not properly specified (TODO)\n " assert isinstance(block_str, str) ops = block_str.split('_') block_type = ops[0] ops = ops[1:] options = {} noskip = False for op in ops: if (op == 'noskip'): noskip = True elif op.startswith('n'): key = op[0] v = op[1:] if (v == 're'): value = 'relu' elif (v == 'r6'): value = 'relu6' elif (v == 'hs'): value = 'hard_silu' elif (v == 'sw'): value = 'swish' else: continue options[key] = value else: splits = re.split('(\\d.*)', op) if (len(splits) >= 2): (key, value) = splits[:2] options[key] = value act_fn = (options['n'] if ('n' in options) else None) exp_kernel_size = (_parse_ksize(options['a']) if ('a' in options) else 1) pw_kernel_size = (_parse_ksize(options['p']) if ('p' in options) else 1) fake_in_chs = (int(options['fc']) if ('fc' in options) else 0) se_ratio = (float(options['se']) if ('se' in options) else 0.0) num_repeat = int(options['r']) if (block_type == 'ir'): block_args = dict(dw_kernel_size=_parse_ksize(options['k']), exp_kernel_size=exp_kernel_size, pw_kernel_size=pw_kernel_size, out_chs=int(options['c']), exp_ratio=float(options['e']), se_ratio=se_ratio, stride=int(options['s']), act_fn=act_fn, noskip=noskip) elif ((block_type == 'ds') or (block_type == 'dsa')): block_args = dict(dw_kernel_size=_parse_ksize(options['k']), pw_kernel_size=pw_kernel_size, out_chs=int(options['c']), se_ratio=se_ratio, stride=int(options['s']), act_fn=act_fn, pw_act=(block_type == 'dsa'), noskip=((block_type == 'dsa') or noskip)) elif (block_type == 'er'): block_args = dict(exp_kernel_size=_parse_ksize(options['k']), pw_kernel_size=pw_kernel_size, out_chs=int(options['c']), exp_ratio=float(options['e']), fake_in_chs=fake_in_chs, se_ratio=se_ratio, stride=int(options['s']), act_fn=act_fn, noskip=noskip) elif (block_type == 'cn'): block_args = dict(kernel_size=int(options['k']), out_chs=int(options['c']), stride=int(options['s']), act_fn=act_fn) else: assert False, ('Unknown block type (%s)' % block_type) return (block_type, block_args, num_repeat)
def _scale_stage_depth(stage_defs, repeats, depth_multiplier=1.0, depth_trunc='ceil'): ' Per-stage depth scaling\n Scales the block repeats in each stage. This depth scaling impl maintains\n compatibility with the EfficientNet scaling method, while allowing sensible\n scaling for other models that may have multiple block arg definitions in each stage.\n ' num_repeat = sum(repeats) if (depth_trunc == 'round'): num_repeat_scaled = max(1, round((num_repeat * depth_multiplier))) else: num_repeat_scaled = int(math.ceil((num_repeat * depth_multiplier))) repeats_scaled = [] for r in repeats[::(- 1)]: rs = max(1, round(((r / num_repeat) * num_repeat_scaled))) repeats_scaled.append(rs) num_repeat -= r num_repeat_scaled -= rs repeats_scaled = repeats_scaled[::(- 1)] defs_scaled = [] for (d, rep) in zip(stage_defs, repeats_scaled): defs_scaled.extend([deepcopy(d) for _ in range(rep)]) return defs_scaled
def decode_arch_def(arch_def, depth_multiplier=1.0, depth_trunc='ceil', experts_multiplier=1, fix_first_last=False): arch_args = [] for (stage_idx, block_strings) in enumerate(arch_def): assert isinstance(block_strings, list) stage_args = [] repeats = [] for block_str in block_strings: assert isinstance(block_str, str) (bt, ba, rep) = _decode_block_str(block_str) stage_args.append((bt, ba)) repeats.append(rep) if (fix_first_last and ((stage_idx == 0) or (stage_idx == (len(arch_def) - 1)))): arch_args.append(_scale_stage_depth(stage_args, repeats, 1.0, depth_trunc)) else: arch_args.append(_scale_stage_depth(stage_args, repeats, depth_multiplier, depth_trunc)) return arch_args
def arch_mnasnet_a1(variant, feat_multiplier=1.0, **kwargs): 'Creates a mnasnet-a1 model.\n\n Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/mnasnet\n Paper: https://arxiv.org/pdf/1807.11626.pdf.\n\n Args:\n feat_multiplier: multiplier to number of channels per layer.\n ' arch_def = [['ds_r1_k3_s1_e1_c16_noskip'], ['ir_r2_k3_s2_e6_c24'], ['ir_r3_k5_s2_e3_c40_se0.25'], ['ir_r4_k3_s2_e6_c80'], ['ir_r2_k3_s1_e6_c112_se0.25'], ['ir_r3_k5_s2_e6_c160_se0.25'], ['ir_r1_k3_s1_e6_c320']] model_kwargs = dict(block_defs=decode_arch_def(arch_def), stem_size=32, feat_multiplier=feat_multiplier, **kwargs) return model_kwargs
def arch_mnasnet_b1(variant, feat_multiplier=1.0, **kwargs): 'Creates a mnasnet-b1 model.\n\n Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/mnasnet\n Paper: https://arxiv.org/pdf/1807.11626.pdf.\n\n Args:\n feat_multiplier: multiplier to number of channels per layer.\n ' arch_def = [['ds_r1_k3_s1_c16_noskip'], ['ir_r3_k3_s2_e3_c24'], ['ir_r3_k5_s2_e3_c40'], ['ir_r3_k5_s2_e6_c80'], ['ir_r2_k3_s1_e6_c96'], ['ir_r4_k5_s2_e6_c192'], ['ir_r1_k3_s1_e6_c320_noskip']] model_kwargs = dict(block_defs=decode_arch_def(arch_def), stem_size=32, feat_multiplier=feat_multiplier, **kwargs) return model_kwargs
def arch_mnasnet_small(variant, feat_multiplier=1.0, **kwargs): 'Creates a mnasnet-b1 model.\n\n Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/mnasnet\n Paper: https://arxiv.org/pdf/1807.11626.pdf.\n\n Args:\n feat_multiplier: multiplier to number of channels per layer.\n ' arch_def = [['ds_r1_k3_s1_c8'], ['ir_r1_k3_s2_e3_c16'], ['ir_r2_k3_s2_e6_c16'], ['ir_r4_k5_s2_e6_c32_se0.25'], ['ir_r3_k3_s1_e6_c32_se0.25'], ['ir_r3_k5_s2_e6_c88_se0.25'], ['ir_r1_k3_s1_e6_c144']] model_kwargs = dict(block_defs=decode_arch_def(arch_def), stem_size=8, feat_multiplier=feat_multiplier, **kwargs) return model_kwargs
def arch_mobilenet_v2(variant, feat_multiplier=1.0, depth_multiplier=1.0, fix_stem_head=False, **kwargs): ' Generate MobileNet-V2 network\n Ref impl: https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet_v2.py\n Paper: https://arxiv.org/abs/1801.04381\n ' arch_def = [['ds_r1_k3_s1_c16'], ['ir_r2_k3_s2_e6_c24'], ['ir_r3_k3_s2_e6_c32'], ['ir_r4_k3_s2_e6_c64'], ['ir_r3_k3_s1_e6_c96'], ['ir_r3_k3_s2_e6_c160'], ['ir_r1_k3_s1_e6_c320']] model_kwargs = dict(block_defs=decode_arch_def(arch_def, depth_multiplier=depth_multiplier, fix_first_last=fix_stem_head), num_features=(1280 if fix_stem_head else round_features(1280, feat_multiplier, 8, None)), stem_size=32, fix_stem=fix_stem_head, feat_multiplier=feat_multiplier, act_fn=kwargs.pop('act_fn', 'relu6'), **kwargs) return model_kwargs
def arch_fbnetc(variant, feat_multiplier=1.0, **kwargs): " FBNet-C\n\n Paper: https://arxiv.org/abs/1812.03443\n Ref Impl: https://github.com/facebookresearch/maskrcnn-benchmark/blob/master/maskrcnn_benchmark/modeling/backbone/fbnet_modeldef.py\n\n NOTE: the impl above does not relate to the 'C' variant here, that was derived from paper,\n it was used to confirm some building block details\n " arch_def = [['ir_r1_k3_s1_e1_c16'], ['ir_r1_k3_s2_e6_c24', 'ir_r2_k3_s1_e1_c24'], ['ir_r1_k5_s2_e6_c32', 'ir_r1_k5_s1_e3_c32', 'ir_r1_k5_s1_e6_c32', 'ir_r1_k3_s1_e6_c32'], ['ir_r1_k5_s2_e6_c64', 'ir_r1_k5_s1_e3_c64', 'ir_r2_k5_s1_e6_c64'], ['ir_r3_k5_s1_e6_c112', 'ir_r1_k5_s1_e3_c112'], ['ir_r4_k5_s2_e6_c184'], ['ir_r1_k3_s1_e6_c352']] model_kwargs = dict(block_defs=decode_arch_def(arch_def), stem_size=16, num_features=1984, feat_multiplier=feat_multiplier, **kwargs) return model_kwargs
def arch_spnasnet(variant, feat_multiplier=1.0, **kwargs): 'Creates the Single-Path NAS model from search targeted for Pixel1 phone.\n\n Paper: https://arxiv.org/abs/1904.02877\n\n Args:\n feat_multiplier: multiplier to number of channels per layer.\n ' arch_def = [['ds_r1_k3_s1_c16_noskip'], ['ir_r3_k3_s2_e3_c24'], ['ir_r1_k5_s2_e6_c40', 'ir_r3_k3_s1_e3_c40'], ['ir_r1_k5_s2_e6_c80', 'ir_r3_k3_s1_e3_c80'], ['ir_r1_k5_s1_e6_c96', 'ir_r3_k5_s1_e3_c96'], ['ir_r4_k5_s2_e6_c192'], ['ir_r1_k3_s1_e6_c320_noskip']] model_kwargs = dict(block_defs=decode_arch_def(arch_def), stem_size=32, feat_multiplier=feat_multiplier, **kwargs) return model_kwargs
def arch_efficientnet(variant, feat_multiplier=1.0, depth_multiplier=1.0, **kwargs): "Creates an EfficientNet model.\n\n Ref impl: https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/efficientnet_model.py\n Paper: https://arxiv.org/abs/1905.11946\n\n EfficientNet params\n name: (feat_multiplier, depth_multiplier, resolution, dropout_rate)\n 'efficientnet-b0': (1.0, 1.0, 224, 0.2),\n 'efficientnet-b1': (1.0, 1.1, 240, 0.2),\n 'efficientnet-b2': (1.1, 1.2, 260, 0.3),\n 'efficientnet-b3': (1.2, 1.4, 300, 0.3),\n 'efficientnet-b4': (1.4, 1.8, 380, 0.4),\n 'efficientnet-b5': (1.6, 2.2, 456, 0.4),\n 'efficientnet-b6': (1.8, 2.6, 528, 0.5),\n 'efficientnet-b7': (2.0, 3.1, 600, 0.5),\n 'efficientnet-b8': (2.2, 3.6, 672, 0.5),\n 'efficientnet-l2': (4.3, 5.3, 800, 0.5),\n\n Args:\n feat_multiplier: multiplier to number of channels per layer\n depth_multiplier: multiplier to number of repeats per stage\n\n " arch_def = [['ds_r1_k3_s1_e1_c16_se0.25'], ['ir_r2_k3_s2_e6_c24_se0.25'], ['ir_r2_k5_s2_e6_c40_se0.25'], ['ir_r3_k3_s2_e6_c80_se0.25'], ['ir_r3_k5_s1_e6_c112_se0.25'], ['ir_r4_k5_s2_e6_c192_se0.25'], ['ir_r1_k3_s1_e6_c320_se0.25']] model_kwargs = dict(block_defs=decode_arch_def(arch_def, depth_multiplier), num_features=round_features(1280, feat_multiplier, 8, None), stem_size=32, feat_multiplier=feat_multiplier, act_fn=kwargs.pop('act_fn', 'swish'), **kwargs) return model_kwargs
def arch_efficientnet_edge(variant, feat_multiplier=1.0, depth_multiplier=1.0, **kwargs): ' Creates an EfficientNet-EdgeTPU model\n\n Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet/edgetpu\n ' arch_def = [['er_r1_k3_s1_e4_c24_fc24_noskip'], ['er_r2_k3_s2_e8_c32'], ['er_r4_k3_s2_e8_c48'], ['ir_r5_k5_s2_e8_c96'], ['ir_r4_k5_s1_e8_c144'], ['ir_r2_k5_s2_e8_c192']] model_kwargs = dict(block_defs=decode_arch_def(arch_def, depth_multiplier), num_features=round_features(1280, feat_multiplier, 8, None), stem_size=32, feat_multiplier=feat_multiplier, act_fn=kwargs.pop('act_fn', 'relu'), **kwargs) return model_kwargs
def arch_efficientnet_lite(variant, feat_multiplier=1.0, depth_multiplier=1.0, **kwargs): "Creates an EfficientNet-Lite model.\n\n Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet/lite\n Paper: https://arxiv.org/abs/1905.11946\n\n EfficientNet params\n name: (feat_multiplier, depth_multiplier, resolution, dropout_rate)\n 'efficientnet-lite0': (1.0, 1.0, 224, 0.2),\n 'efficientnet-lite1': (1.0, 1.1, 240, 0.2),\n 'efficientnet-lite2': (1.1, 1.2, 260, 0.3),\n 'efficientnet-lite3': (1.2, 1.4, 280, 0.3),\n 'efficientnet-lite4': (1.4, 1.8, 300, 0.3),\n\n Args:\n feat_multiplier: multiplier to number of channels per layer\n depth_multiplier: multiplier to number of repeats per stage\n " arch_def = [['ds_r1_k3_s1_e1_c16'], ['ir_r2_k3_s2_e6_c24'], ['ir_r2_k5_s2_e6_c40'], ['ir_r3_k3_s2_e6_c80'], ['ir_r3_k5_s1_e6_c112'], ['ir_r4_k5_s2_e6_c192'], ['ir_r1_k3_s1_e6_c320']] model_kwargs = dict(block_defs=decode_arch_def(arch_def, depth_multiplier, fix_first_last=True), num_features=1280, stem_size=32, fix_stem=True, feat_multiplier=feat_multiplier, act_fn=kwargs.pop('act_fn', 'relu6'), **kwargs) return model_kwargs
def arch_mixnet_s(variant, feat_multiplier=1.0, **kwargs): 'Creates a MixNet Small model.\n\n Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/mnasnet/mixnet\n Paper: https://arxiv.org/abs/1907.09595\n ' arch_def = [['ds_r1_k3_s1_e1_c16'], ['ir_r1_k3_a1.1_p1.1_s2_e6_c24', 'ir_r1_k3_a1.1_p1.1_s1_e3_c24'], ['ir_r1_k3.5.7_s2_e6_c40_se0.5_nsw', 'ir_r3_k3.5_a1.1_p1.1_s1_e6_c40_se0.5_nsw'], ['ir_r1_k3.5.7_p1.1_s2_e6_c80_se0.25_nsw', 'ir_r2_k3.5_p1.1_s1_e6_c80_se0.25_nsw'], ['ir_r1_k3.5.7_a1.1_p1.1_s1_e6_c120_se0.5_nsw', 'ir_r2_k3.5.7.9_a1.1_p1.1_s1_e3_c120_se0.5_nsw'], ['ir_r1_k3.5.7.9.11_s2_e6_c200_se0.5_nsw', 'ir_r2_k3.5.7.9_p1.1_s1_e6_c200_se0.5_nsw']] model_kwargs = dict(block_defs=decode_arch_def(arch_def), num_features=1536, stem_size=16, feat_multiplier=feat_multiplier, **kwargs) return model_kwargs
def arch_mixnet_m(variant, feat_multiplier=1.0, depth_multiplier=1.0, **kwargs): 'Creates a MixNet Medium-Large model.\n\n Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/mnasnet/mixnet\n Paper: https://arxiv.org/abs/1907.09595\n ' arch_def = [['ds_r1_k3_s1_e1_c24'], ['ir_r1_k3.5.7_a1.1_p1.1_s2_e6_c32', 'ir_r1_k3_a1.1_p1.1_s1_e3_c32'], ['ir_r1_k3.5.7.9_s2_e6_c40_se0.5_nsw', 'ir_r3_k3.5_a1.1_p1.1_s1_e6_c40_se0.5_nsw'], ['ir_r1_k3.5.7_s2_e6_c80_se0.25_nsw', 'ir_r3_k3.5.7.9_a1.1_p1.1_s1_e6_c80_se0.25_nsw'], ['ir_r1_k3_s1_e6_c120_se0.5_nsw', 'ir_r3_k3.5.7.9_a1.1_p1.1_s1_e3_c120_se0.5_nsw'], ['ir_r1_k3.5.7.9_s2_e6_c200_se0.5_nsw', 'ir_r3_k3.5.7.9_p1.1_s1_e6_c200_se0.5_nsw']] model_kwargs = dict(block_defs=decode_arch_def(arch_def, depth_multiplier, depth_trunc='round'), num_features=1536, stem_size=24, feat_multiplier=feat_multiplier, **kwargs) return model_kwargs
def arch_mobilenet_v3(variant, feat_multiplier=1.0, **kwargs): 'Creates a MobileNet-V3 model.\n\n Ref impl: ?\n Paper: https://arxiv.org/abs/1905.02244\n\n Args:\n feat_multiplier: multiplier to number of channels per layer.\n ' if ('small' in variant): num_features = 1024 if ('minimal' in variant): act_fn = kwargs.pop('act_fn', 'relu') arch_def = [['ds_r1_k3_s2_e1_c16'], ['ir_r1_k3_s2_e4.5_c24', 'ir_r1_k3_s1_e3.67_c24'], ['ir_r1_k3_s2_e4_c40', 'ir_r2_k3_s1_e6_c40'], ['ir_r2_k3_s1_e3_c48'], ['ir_r3_k3_s2_e6_c96'], ['cn_r1_k1_s1_c576']] else: act_fn = kwargs.pop('act_fn', 'hard_swish') arch_def = [['ds_r1_k3_s2_e1_c16_se0.25_nre'], ['ir_r1_k3_s2_e4.5_c24_nre', 'ir_r1_k3_s1_e3.67_c24_nre'], ['ir_r1_k5_s2_e4_c40_se0.25', 'ir_r2_k5_s1_e6_c40_se0.25'], ['ir_r2_k5_s1_e3_c48_se0.25'], ['ir_r3_k5_s2_e6_c96_se0.25'], ['cn_r1_k1_s1_c576']] else: num_features = 1280 if ('minimal' in variant): act_fn = kwargs.pop('act_fn', 'relu') arch_def = [['ds_r1_k3_s1_e1_c16'], ['ir_r1_k3_s2_e4_c24', 'ir_r1_k3_s1_e3_c24'], ['ir_r3_k3_s2_e3_c40'], ['ir_r1_k3_s2_e6_c80', 'ir_r1_k3_s1_e2.5_c80', 'ir_r2_k3_s1_e2.3_c80'], ['ir_r2_k3_s1_e6_c112'], ['ir_r3_k3_s2_e6_c160'], ['cn_r1_k1_s1_c960']] else: act_fn = kwargs.pop('act_fn', 'hard_swish') arch_def = [['ds_r1_k3_s1_e1_c16_nre'], ['ir_r1_k3_s2_e4_c24_nre', 'ir_r1_k3_s1_e3_c24_nre'], ['ir_r3_k5_s2_e3_c40_se0.25_nre'], ['ir_r1_k3_s2_e6_c80', 'ir_r1_k3_s1_e2.5_c80', 'ir_r2_k3_s1_e2.3_c80'], ['ir_r2_k3_s1_e6_c112_se0.25'], ['ir_r3_k5_s2_e6_c160_se0.25'], ['cn_r1_k1_s1_c960']] model_kwargs = dict(block_defs=decode_arch_def(arch_def), num_features=num_features, stem_size=16, feat_multiplier=feat_multiplier, act_fn=act_fn, se_cfg=dict(bound_act_fn='relu', gate_fn='hard_sigmoid', reduce_from_block=False, divisor=8), efficient_head=True, **kwargs) return model_kwargs
def make_divisible(v, divisor=8, min_value=None): min_value = (min_value or divisor) new_v = max(min_value, ((int((v + (divisor / 2))) // divisor) * divisor)) if (new_v < (0.9 * v)): new_v += divisor return new_v
def round_features(features, multiplier=1.0, divisor=8, feat_min=None): 'Round number of filters based on depth multiplier.' if (not multiplier): return features features *= multiplier return make_divisible(features, divisor, feat_min)
def _log_info_if(msg, condition): if condition: _logger.info(msg)
class EfficientNetBuilder(): ' Build Trunk Blocks\n\n This ended up being somewhat of a cross between\n https://github.com/tensorflow/tpu/blob/master/models/official/mnasnet/mnasnet_models.py\n and\n https://github.com/facebookresearch/maskrcnn-benchmark/blob/master/maskrcnn_benchmark/modeling/backbone/fbnet_builder.py\n\n ' def __init__(self, in_chs, block_defs, block_factory, feat_multiplier=1.0, feat_divisor=8, feat_min=None, output_stride=32, pad_type='', conv_layer=None, norm_layer=None, se_layer=None, act_fn=None, drop_path_rate=0.0, feature_location='', verbose=False): assert (output_stride in (32, 16, 8, 4, 2)) self.in_chs = in_chs self.block_defs = block_defs self.block_factory = block_factory self.feat_multiplier = feat_multiplier self.feat_divisor = feat_divisor self.feat_min = feat_min self.output_stride = output_stride self.act_fn = act_fn self.drop_path_rate = drop_path_rate self.default_args = dict(pad_type=pad_type, conv_layer=conv_layer, norm_layer=norm_layer, se_layer=se_layer) self.feature_location = feature_location assert (feature_location in ('bottleneck', 'expansion', '')) self.verbose = verbose self.features = [] def _round_channels(self, chs): return round_features(chs, self.feat_multiplier, self.feat_divisor, self.feat_min) def _make_block(self, block_type, block_args, stage_idx, block_idx, flat_idx, block_count): drop_path_rate = ((self.drop_path_rate * flat_idx) / block_count) act_fn = (block_args['act_fn'] if (block_args['act_fn'] is not None) else self.act_fn) act_fn = self.block_factory.get_act_fn(act_fn) ba_overlay = dict(in_chs=self.in_chs, out_chs=self._round_channels(block_args['out_chs']), act_fn=act_fn, drop_path_rate=drop_path_rate, **self.default_args) block_args.update(ba_overlay) if (('fake_in_chs' in block_args) and block_args['fake_in_chs']): block_args['fake_in_chs'] = self._round_channels(block_args['fake_in_chs']) assert (block_args['act_fn'] is not None) _log_info_if(f' {block_type.upper()} {block_idx}, Args: {str(block_args)}', self.verbose) if (block_type == 'ir'): block = self.block_factory.InvertedResidual(stage_idx, block_idx, **block_args) elif ((block_type == 'ds') or (block_type == 'dsa')): block = self.block_factory.DepthwiseSeparable(stage_idx, block_idx, **block_args) elif (block_type == 'er'): block = self.block_factory.EdgeResidual(stage_idx, block_idx, **block_args) elif (block_type == 'cn'): block = self.block_factory.ConvBnAct(stage_idx, block_idx, **block_args) else: assert False, ('Uknkown block type (%s) while building model.' % block_type) self.in_chs = block_args['out_chs'] return block def __call__(self): ' Build the blocks\n Return:\n List of stages (each stage being a list of blocks)\n ' _log_info_if(('Building model trunk with %d stages...' % len(self.block_defs)), self.verbose) num_blocks = sum([len(x) for x in self.block_defs]) flat_idx = 0 current_stride = 2 current_dilation = 1 stages = [] for (stage_idx, stage_defs) in enumerate(self.block_defs): _log_info_if('Stack: {}'.format(stage_idx), self.verbose) blocks = [] for (block_idx, block_def) in enumerate(stage_defs): _log_info_if(' Block: {}'.format(block_idx), self.verbose) last_block = ((block_idx + 1) == len(stage_defs)) (block_type, block_args) = block_def block_args = dict(**block_args) assert (block_args['stride'] in (1, 2)) if (block_idx >= 1): block_args['stride'] = 1 next_dilation = current_dilation if (block_args['stride'] > 1): next_output_stride = (current_stride * block_args['stride']) if (next_output_stride > self.output_stride): next_dilation = (current_dilation * block_args['stride']) block_args['stride'] = 1 _log_info_if(' Converting stride to dilation to maintain output_stride=={}'.format(self.output_stride), self.verbose) else: current_stride = next_output_stride block_args['dilation'] = current_dilation if (next_dilation != current_dilation): current_dilation = next_dilation blocks.append(self._make_block(block_type, block_args, stage_idx, block_idx, flat_idx, num_blocks)) flat_idx += 1 stages.append(blocks) return stages
def get_bn_args_tf(): return _BN_ARGS_TF.copy()
def get_bn_args_pt(): return _BN_ARGS_PT.copy()
def load_state_dict(filename, include_type_map=False, transpose=False): np_weights = np.load(filename) var_names = np_weights['names'] var_types = [] if ('types' in np_weights): var_types = np_weights['types'] var_values = [np_weights[str(i)] for i in range(len(var_names))] jax_state_dict = {} type_map = {} for (i, (k, v)) in enumerate(zip(var_names, var_values)): if transpose: assert (len(v.shape) in (1, 2, 4)) if (len(v.shape) == 4): v = v.transpose((2, 3, 1, 0)) elif (len(v.shape) == 2): v = v.transpose() jax_state_dict[k] = jnp.array(v) if (include_type_map and (len(var_types) == len(var_names))): t = var_types[i] type_map[k] = t.lower() if len(type_map): return (jax_state_dict, type_map) else: return jax_state_dict
def split_state_dict(state_dict): ' split a state_dict into params and other state\n FIXME currently other state is assumed to be norm running state\n ' out_params = {} out_state = {} for (k, v) in state_dict.items(): if any(((n in k) for n in _STATE_NAMES)): out_state[k] = v else: out_params[k] = v return (out_params, out_state)
def get_outdir(path, *paths, retry_inc=False): outdir = os.path.join(path, *paths) if (not os.path.exists(outdir)): os.makedirs(outdir) elif retry_inc: count = 1 outdir_inc = ((outdir + '-') + str(count)) while os.path.exists(outdir_inc): count = (count + 1) outdir_inc = ((outdir + '-') + str(count)) assert (count < 100) outdir = outdir_inc os.makedirs(outdir) return outdir
def cross_entropy_loss(logits, labels, label_smoothing=0.0, dtype=jnp.float32): 'Compute cross entropy for logits and labels w/ label smoothing\n Args:\n logits: [batch, length, num_classes] float array.\n labels: categorical labels [batch, length] int array.\n label_smoothing: label smoothing constant, used to determine the on and off values.\n dtype: dtype to perform loss calcs in, including log_softmax\n ' num_classes = logits.shape[(- 1)] labels = jax.nn.one_hot(labels, num_classes, dtype=dtype) if (label_smoothing > 0): labels = ((labels * (1 - label_smoothing)) + (label_smoothing / num_classes)) logp = jax.nn.log_softmax(logits.astype(dtype)) return (- jnp.mean(jnp.sum((logp * labels), axis=(- 1))))
def onehot(labels, num_classes, on_value=1.0, off_value=0.0, dtype=jnp.float32): x = (labels[(..., None)] == jnp.arange(num_classes)[None]) x = lax.select(x, jnp.full(x.shape, on_value), jnp.full(x.shape, off_value)) return x.astype(dtype)
def weighted_cross_entropy_loss(logits, labels, weights=None, label_smoothing=0.0, dtype=jnp.float32): 'Compute weighted cross entropy for logits and labels w/ label smoothing.\n Args:\n logits: [batch, length, num_classes] float array.\n labels: categorical labels [batch, length] int array.\n weights: None or array of shape [batch, length].\n label_smoothing: label smoothing constant, used to determine the on and off values.\n dtype: dtype to perform loss calcs in, including log_softmax\n Returns:\n Tuple of scalar loss and batch normalizing factor.\n ' if (logits.ndim != (labels.ndim + 1)): raise ValueError(f'Incorrect shapes. Got shape {logits.shape} logits and {labels.shape} targets') num_classes = logits.shape[(- 1)] off_value = (label_smoothing / num_classes) on_value = ((1.0 - label_smoothing) + off_value) soft_targets = onehot(labels, num_classes, on_value=on_value, off_value=off_value, dtype=dtype) logp = jax.nn.log_softmax(logits.astype(dtype)) loss = jnp.sum((logp * soft_targets), axis=(- 1)) if (weights is not None): loss = (loss * weights) return (- loss.mean())
def create_lr_schedule_epochs(base_lr, decay_type, steps_per_epoch, total_epochs, decay_rate=0.1, decay_epochs=0, warmup_epochs=5.0, power=1.0, min_lr=1e-05): total_steps = int((total_epochs * steps_per_epoch)) decay_steps = int((decay_epochs * steps_per_epoch)) warmup_steps = int((warmup_epochs * steps_per_epoch)) return create_lr_schedule(base_lr=base_lr, decay_type=decay_type, total_steps=total_steps, decay_rate=decay_rate, decay_steps=decay_steps, warmup_steps=warmup_steps, power=power, min_lr=min_lr)
def create_lr_schedule(base_lr, decay_type, total_steps, decay_rate=0.1, decay_steps=0, warmup_steps=0, power=1.0, min_lr=1e-05): 'Creates learning rate schedule.\n\n Currently only warmup + {linear,cosine} but will be a proper mini-language\n like preprocessing one in the future.\n\n Args:\n total_steps: The total number of steps to run.\n base_lr: The starting learning-rate (without warmup).\n decay_type: One of \'cosine\', \'step\', \'poly\', \'exponential\', \'constant\'\n decay_rate: Decay fraction for step / exponential schedules\n decay_steps: Number of steps for each application of decay_rate\n warmup_steps: how many steps to warm up for.\n min_lr: Minimum learning rate.\n\n Returns:\n A function learning_rate(step): float -> {"learning_rate": float}.\n ' def step_fn(step): 'Step to learning rate function.' lr = base_lr step_mwu = jnp.maximum(0.0, (step - warmup_steps)) step_pct = jnp.clip((step_mwu / float((total_steps - warmup_steps))), 0.0, 1.0) if (decay_type == 'cosine'): lr = (min_lr + ((lr * 0.5) * (1.0 + jnp.cos((jnp.pi * step_pct))))) elif (decay_type == 'step'): assert (decay_steps > 0) lr = (lr * (decay_rate ** (step_mwu // decay_steps))) elif decay_type.startswith('poly'): lr = (min_lr + ((lr - min_lr) * ((1.0 - step_pct) ** power))) elif decay_type.startswith('exp'): assert (decay_steps > 0) lr = (lr * (decay_rate ** (step_mwu / decay_steps))) elif ((not decay_type) or decay_type.startswith('const')): lr = lr else: raise ValueError(f'Unknown lr type {decay_type}') lr = jnp.maximum(min_lr, lr) if warmup_steps: lr = (lr * jnp.minimum(1.0, (step / warmup_steps))) return jnp.asarray(lr, dtype=jnp.float32) return step_fn
class AverageMeter(): 'Computes and stores the average and current value' def __init__(self): self.reset() def reset(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 def update(self, val, n=1): self.val = val self.sum += (val * n) self.count += n self.avg = (self.sum / self.count)
def correct_topk(logits, labels, topk=(1,)): top = lax.top_k(logits, max(topk))[1].transpose() correct = (top == labels.reshape(1, (- 1))) return [correct[:k].reshape((- 1)).sum(axis=0) for k in topk]
def acc_topk(logits, labels, topk=(1,)): top = lax.top_k(logits, max(topk))[1].transpose() correct = (top == labels.reshape(1, (- 1))) return [((correct[:k].reshape((- 1)).sum(axis=0) * 100) / labels.shape[0]) for k in topk]
def get_model_cfg(name): return deepcopy(_model_cfg[name])
def list_models(pattern='', pretrained=True): model_names = [] for (k, c) in _model_cfg.items(): if ((pretrained and c['default_cfg']['url']) or (not pretrained)): model_names.append(k) if pattern: model_names = fnmatch.filter(model_names, pattern) return model_names
def dcfg(url='', **kwargs): ' Default Dataset Config (ie ImageNet pretraining config)' cfg = dict(url=url, num_classes=1000, input_size=(3, 224, 224), pool_size=(7, 7), crop_pct=0.875, interpolation='bicubic', mean=IMAGENET_MEAN, std=IMAGENET_STD) cfg.update(kwargs) return cfg
def pt_acfg(**kwargs): ' Architecture Config (PyTorch model origin) ' bn_cfg = (kwargs.pop('bn_cfg', None) or get_bn_args_pt()) return {'pad_type': 'LIKE', 'bn_cfg': bn_cfg, **kwargs}
def tf_acfg(**kwargs): ' Architecture Config (Tensorflow model origin) ' bn_cfg = (kwargs.pop('bn_cfg', None) or get_bn_args_tf()) return {'pad_type': 'SAME', 'bn_cfg': bn_cfg, **kwargs}
def get_jax_dir(): jax_home = os.path.expanduser(os.getenv(ENV_JAX_HOME, os.path.join(os.getenv(ENV_XDG_CACHE_HOME, DEFAULT_CACHE_DIR), 'jax'))) return jax_home
def download_url_to_file(url, dst, hash_prefix=None, progress=True): 'Download object at the given URL to a local path.\n\n Args:\n url (string): URL of the object to download\n dst (string): Full path where object will be saved, e.g. `/tmp/temporary_file`\n hash_prefix (string, optional): If not None, the SHA256 downloaded file should start with `hash_prefix`.\n Default: None\n progress (bool, optional): whether or not to display a progress bar to stderr\n Default: True\n\n ' file_size = None req = Request(url, headers={'User-Agent': 'jax.zoo'}) u = urlopen(req) meta = u.info() if hasattr(meta, 'getheaders'): content_length = meta.getheaders('Content-Length') else: content_length = meta.get_all('Content-Length') if ((content_length is not None) and (len(content_length) > 0)): file_size = int(content_length[0]) dst = os.path.expanduser(dst) dst_dir = os.path.dirname(dst) f = tempfile.NamedTemporaryFile(delete=False, dir=dst_dir) try: if (hash_prefix is not None): sha256 = hashlib.sha256() with tqdm(total=file_size, disable=(not progress), unit='B', unit_scale=True, unit_divisor=1024) as pbar: while True: buffer = u.read(8192) if (len(buffer) == 0): break f.write(buffer) if (hash_prefix is not None): sha256.update(buffer) pbar.update(len(buffer)) f.close() if (hash_prefix is not None): digest = sha256.hexdigest() if (digest[:len(hash_prefix)] != hash_prefix): raise RuntimeError('invalid hash value (expected "{}", got "{}")'.format(hash_prefix, digest)) shutil.move(f.name, dst) finally: f.close() if os.path.exists(f.name): os.remove(f.name)
def load_state_dict_from_url(url, model_dir=None, transpose=False, progress=True, check_hash=True, file_name=None): "Loads the serialized npz object at the given URL.\n\n If the object is already present in `model_dir`, it's deserialized and returned.\n\n The default value of `model_dir` is ``<jax_dir>/checkpoints`` where\n `jax_dir` is the directory returned by :func:`get_dir`.\n\n Args:\n url (string): URL of the object to download\n model_dir (string, optional): directory in which to save the object\n transpose (bool): transpose the weights from PyTorch (ie OIHW) style layouts to Tensorflow (ie HWIO conv2d)\n progress (bool, optional): whether or not to display a progress bar to stderr. Default: True\n check_hash(bool, optional): If True, the filename part of the URL should follow the naming convention\n ``filename-<sha256>.ext`` where ``<sha256>`` is the first eight or more\n digits of the SHA256 hash of the contents of the file. The hash is used to\n ensure unique names and to verify the contents of the file. Default: False\n file_name (string, optional): name for the downloaded file. Filename from `url` will be used if not set.\n\n " if os.getenv('TORCH_MODEL_ZOO'): warnings.warn('TORCH_MODEL_ZOO is deprecated, please use env TORCH_HOME instead') if (model_dir is None): model_dir = os.path.join(get_jax_dir(), 'checkpoints') try: os.makedirs(model_dir) except OSError as e: if (e.errno == errno.EEXIST): pass else: raise parts = urlparse(url) filename = os.path.basename(parts.path) if (file_name is not None): filename = file_name cached_file = os.path.join(model_dir, filename) if (not os.path.exists(cached_file)): sys.stderr.write('Downloading: "{}" to {}\n'.format(url, cached_file)) hash_prefix = (HASH_REGEX.search(filename).group(1) if check_hash else None) download_url_to_file(url, cached_file, hash_prefix, progress=progress) return load_state_dict(cached_file, transpose=transpose)
def scale_by_learning_rate(learning_rate: ScalarOrSchedule): if callable(learning_rate): return optax.scale_by_schedule((lambda count: (- learning_rate(count)))) return optax.scale((- learning_rate))
def update_moment(updates, moments, decay, order): return jax.tree_multimap((lambda g, t: (((1 - decay) * (g ** order)) + (decay * t))), updates, moments)
def exclude_bias_and_norm(path: Tuple[Any], val: jnp.ndarray) -> jnp.ndarray: 'Filter to exclude biaises and normalizations weights.' del val if ((path[(- 1)] == 'bias') or (path[(- 1)] == 'scale')): return False return True
def _partial_update(updates: optax.Updates, new_updates: optax.Updates, params: optax.Params, filter_fn: Optional[FilterFn]=None) -> optax.Updates: 'Returns new_update for params which filter_fn is True else updates.' if (filter_fn is None): return new_updates wrapped_filter_fn = (lambda x, y: jnp.array(filter_fn(x, y))) params_to_filter = nest.map_structure_with_path(wrapped_filter_fn, params) def _update_fn(g: jnp.ndarray, t: jnp.ndarray, m: jnp.ndarray) -> jnp.ndarray: m = m.astype(g.dtype) return ((g * (1.0 - m)) + (t * m)) return jax.tree_multimap(_update_fn, updates, new_updates, params_to_filter)
class ScaleByLarsState(NamedTuple): mu: jnp.ndarray
def scale_by_lars(momentum: float=0.9, eta: float=0.001, filter_fn: Optional[FilterFn]=None) -> optax.GradientTransformation: 'Rescales updates according to the LARS algorithm.\n\n Does not include weight decay.\n References:\n [You et al, 2017](https://arxiv.org/abs/1708.03888)\n\n Args:\n momentum: momentum coeficient.\n eta: LARS coefficient.\n filter_fn: an optional filter function.\n\n Returns:\n An (init_fn, update_fn) tuple.\n ' def init_fn(params: optax.Params) -> ScaleByLarsState: mu = jax.tree_multimap(jnp.zeros_like, params) return ScaleByLarsState(mu=mu) def update_fn(updates: optax.Updates, state: ScaleByLarsState, params: optax.Params) -> Tuple[(optax.Updates, ScaleByLarsState)]: def lars_adaptation(update: jnp.ndarray, param: jnp.ndarray) -> jnp.ndarray: param_norm = jnp.linalg.norm(param) update_norm = jnp.linalg.norm(update) return (update * jnp.where((param_norm > 0.0), jnp.where((update_norm > 0), ((eta * param_norm) / update_norm), 1.0), 1.0)) adapted_updates = jax.tree_multimap(lars_adaptation, updates, params) adapted_updates = _partial_update(updates, adapted_updates, params, filter_fn) mu = jax.tree_multimap((lambda g, t: ((momentum * g) + t)), state.mu, adapted_updates) return (mu, ScaleByLarsState(mu=mu)) return optax.GradientTransformation(init_fn, update_fn)
class AddWeightDecayState(NamedTuple): 'Stateless transformation.'