body_hash
stringlengths
64
64
body
stringlengths
23
109k
docstring
stringlengths
1
57k
path
stringlengths
4
198
name
stringlengths
1
115
repository_name
stringlengths
7
111
repository_stars
float64
0
191k
lang
stringclasses
1 value
body_without_docstring
stringlengths
14
108k
unified
stringlengths
45
133k
f2d6e9606c25846d104c63d147a9f7b90fcfd9468e162e1f0e4988e8db4b1ebb
def load_object(self, obj_path, scale=np.array([1, 1, 1]), transform_orn=None, transform_pos=None, input_kd=None, texture_scale=1.0, load_texture=True, overwrite_material=None): '\n Load a wavefront obj file into the renderer and create a VisualObject to manage it.\n\n :param obj_path: path of obj file\n :param scale: scale, default 1\n :param transform_orn: rotation quaternion, convention xyzw\n :param transform_pos: translation for loading, it is a list of length 3\n :param input_kd: if loading material fails, use this default material. input_kd should be a list of length 3\n :param texture_scale: texture scale for the object, downsample to save memory.\n :param load_texture: load texture or not\n :param overwrite_material: whether to overwrite the default Material (usually with a RandomizedMaterial for material randomization)\n :return: VAO_ids\n ' if (self.optimization_process_executed and self.optimized): logging.error('Using optimized renderer and optimization process is already excuted, cannot add new objects') return reader = tinyobjloader.ObjReader() logging.info('Loading {}'.format(obj_path)) if obj_path.endswith('encrypted.obj'): ret = reader.ParseFromFileWithKey(obj_path, igibson.key_path) else: ret = reader.ParseFromFile(obj_path) vertex_data_indices = [] face_indices = [] if (not ret): logging.error('Warning: {}'.format(reader.Warning())) logging.error('Error: {}'.format(reader.Error())) logging.error('Failed to load: {}'.format(obj_path)) sys.exit((- 1)) if reader.Warning(): logging.warning('Warning: {}'.format(reader.Warning())) attrib = reader.GetAttrib() logging.debug('Num vertices = {}'.format(len(attrib.vertices))) logging.debug('Num normals = {}'.format(len(attrib.normals))) logging.debug('Num texcoords = {}'.format(len(attrib.texcoords))) materials = reader.GetMaterials() logging.debug('Num materials: {}'.format(len(materials))) if (logging.root.level <= logging.DEBUG): for m in materials: logging.debug('Material name: {}'.format(m.name)) logging.debug('Material diffuse: {}'.format(m.diffuse)) shapes = reader.GetShapes() logging.debug('Num shapes: {}'.format(len(shapes))) material_count = len(self.materials_mapping) if ((overwrite_material is not None) and (len(materials) > 1)): logging.warning('passed in one material ends up overwriting multiple materials') for (i, item) in enumerate(materials): if (overwrite_material is not None): if isinstance(overwrite_material, RandomizedMaterial): self.load_randomized_material(overwrite_material) elif isinstance(overwrite_material, ProceduralMaterial): self.load_procedural_material(overwrite_material) material = overwrite_material elif ((item.diffuse_texname != '') and load_texture): obj_dir = os.path.dirname(obj_path) texture = self.load_texture_file(os.path.join(obj_dir, item.diffuse_texname)) texture_metallic = self.load_texture_file(os.path.join(obj_dir, item.metallic_texname)) texture_roughness = self.load_texture_file(os.path.join(obj_dir, item.roughness_texname)) texture_normal = self.load_texture_file(os.path.join(obj_dir, item.bump_texname)) material = Material('texture', texture_id=texture, metallic_texture_id=texture_metallic, roughness_texture_id=texture_roughness, normal_texture_id=texture_normal) else: material = Material('color', kd=item.diffuse) self.materials_mapping[(i + material_count)] = material if (input_kd is not None): self.materials_mapping[(len(materials) + material_count)] = Material('color', kd=input_kd, texture_id=(- 1)) else: self.materials_mapping[(len(materials) + material_count)] = Material('color', kd=[0.5, 0.5, 0.5], texture_id=(- 1)) VAO_ids = [] vertex_position = np.array(attrib.vertices).reshape(((len(attrib.vertices) // 3), 3)) vertex_normal = np.array(attrib.normals).reshape(((len(attrib.normals) // 3), 3)) vertex_texcoord = np.array(attrib.texcoords).reshape(((len(attrib.texcoords) // 2), 2)) for shape in shapes: logging.debug('Shape name: {}'.format(shape.name)) material_id = shape.mesh.material_ids[0] logging.debug('material_id = {}'.format(material_id)) logging.debug('num_indices = {}'.format(len(shape.mesh.indices))) n_indices = len(shape.mesh.indices) np_indices = shape.mesh.numpy_indices().reshape((n_indices, 3)) shape_vertex_index = np_indices[(:, 0)] shape_normal_index = np_indices[(:, 1)] shape_texcoord_index = np_indices[(:, 2)] shape_vertex = vertex_position[shape_vertex_index] if (len(vertex_normal) == 0): shape_normal = np.zeros((shape_vertex.shape[0], 3)) else: shape_normal = vertex_normal[shape_normal_index] for i in range(3): shape_vertex[(:, i)] *= scale[i] if (scale[i] < 0): shape_normal[(:, i)] *= (- 1) if (len(vertex_texcoord) == 0): shape_texcoord = np.zeros((shape_vertex.shape[0], 2)) else: shape_texcoord = vertex_texcoord[shape_texcoord_index] if (transform_orn is not None): orn = quat2rotmat(xyzw2wxyz(transform_orn)) shape_vertex = shape_vertex.dot(orn[(:3, :3)].T) shape_normal = shape_normal.dot(orn[(:3, :3)].T) if (transform_pos is not None): shape_vertex += np.array(transform_pos) v0 = shape_vertex[(0::3, :)] v1 = shape_vertex[(1::3, :)] v2 = shape_vertex[(2::3, :)] uv0 = shape_texcoord[(0::3, :)] uv1 = shape_texcoord[(1::3, :)] uv2 = shape_texcoord[(2::3, :)] delta_pos1 = (v1 - v0) delta_pos2 = (v2 - v0) delta_uv1 = (uv1 - uv0) delta_uv2 = (uv2 - uv0) r = (1.0 / ((delta_uv1[(:, 0)] * delta_uv2[(:, 1)]) - (delta_uv1[(:, 1)] * delta_uv2[(:, 0)]))) tangent = (((delta_pos1 * delta_uv2[(:, 1)][(:, None)]) - (delta_pos2 * delta_uv1[(:, 1)][(:, None)])) * r[(:, None)]) bitangent = (((delta_pos2 * delta_uv1[(:, 0)][(:, None)]) - (delta_pos1 * delta_uv2[(:, 0)][(:, None)])) * r[(:, None)]) bitangent = bitangent.repeat(3, axis=0) tangent = tangent.repeat(3, axis=0) vertices = np.concatenate([shape_vertex, shape_normal, shape_texcoord, tangent, bitangent], axis=(- 1)) faces = np.array(range(len(vertices))).reshape(((len(vertices) // 3), 3)) vertexData = vertices.astype(np.float32) [VAO, VBO] = self.r.load_object_meshrenderer(self.shaderProgram, vertexData) self.VAOs.append(VAO) self.VBOs.append(VBO) face_indices.append(len(self.faces)) self.faces.append(faces) self.objects.append(obj_path) vertex_data_indices.append(len(self.vertex_data)) self.vertex_data.append(vertexData) self.shapes.append(shape) if (material_id == (- 1)): self.mesh_materials.append((len(materials) + material_count)) else: self.mesh_materials.append((material_id + material_count)) logging.debug('mesh_materials: {}'.format(self.mesh_materials)) VAO_ids.append((self.get_num_objects() - 1)) new_obj = VisualObject(obj_path, VAO_ids=VAO_ids, vertex_data_indices=vertex_data_indices, face_indices=face_indices, id=len(self.visual_objects), renderer=self) self.visual_objects.append(new_obj) return VAO_ids
Load a wavefront obj file into the renderer and create a VisualObject to manage it. :param obj_path: path of obj file :param scale: scale, default 1 :param transform_orn: rotation quaternion, convention xyzw :param transform_pos: translation for loading, it is a list of length 3 :param input_kd: if loading material fails, use this default material. input_kd should be a list of length 3 :param texture_scale: texture scale for the object, downsample to save memory. :param load_texture: load texture or not :param overwrite_material: whether to overwrite the default Material (usually with a RandomizedMaterial for material randomization) :return: VAO_ids
igibson/render/mesh_renderer/mesh_renderer_cpu.py
load_object
suresh-guttikonda/iGibson
0
python
def load_object(self, obj_path, scale=np.array([1, 1, 1]), transform_orn=None, transform_pos=None, input_kd=None, texture_scale=1.0, load_texture=True, overwrite_material=None): '\n Load a wavefront obj file into the renderer and create a VisualObject to manage it.\n\n :param obj_path: path of obj file\n :param scale: scale, default 1\n :param transform_orn: rotation quaternion, convention xyzw\n :param transform_pos: translation for loading, it is a list of length 3\n :param input_kd: if loading material fails, use this default material. input_kd should be a list of length 3\n :param texture_scale: texture scale for the object, downsample to save memory.\n :param load_texture: load texture or not\n :param overwrite_material: whether to overwrite the default Material (usually with a RandomizedMaterial for material randomization)\n :return: VAO_ids\n ' if (self.optimization_process_executed and self.optimized): logging.error('Using optimized renderer and optimization process is already excuted, cannot add new objects') return reader = tinyobjloader.ObjReader() logging.info('Loading {}'.format(obj_path)) if obj_path.endswith('encrypted.obj'): ret = reader.ParseFromFileWithKey(obj_path, igibson.key_path) else: ret = reader.ParseFromFile(obj_path) vertex_data_indices = [] face_indices = [] if (not ret): logging.error('Warning: {}'.format(reader.Warning())) logging.error('Error: {}'.format(reader.Error())) logging.error('Failed to load: {}'.format(obj_path)) sys.exit((- 1)) if reader.Warning(): logging.warning('Warning: {}'.format(reader.Warning())) attrib = reader.GetAttrib() logging.debug('Num vertices = {}'.format(len(attrib.vertices))) logging.debug('Num normals = {}'.format(len(attrib.normals))) logging.debug('Num texcoords = {}'.format(len(attrib.texcoords))) materials = reader.GetMaterials() logging.debug('Num materials: {}'.format(len(materials))) if (logging.root.level <= logging.DEBUG): for m in materials: logging.debug('Material name: {}'.format(m.name)) logging.debug('Material diffuse: {}'.format(m.diffuse)) shapes = reader.GetShapes() logging.debug('Num shapes: {}'.format(len(shapes))) material_count = len(self.materials_mapping) if ((overwrite_material is not None) and (len(materials) > 1)): logging.warning('passed in one material ends up overwriting multiple materials') for (i, item) in enumerate(materials): if (overwrite_material is not None): if isinstance(overwrite_material, RandomizedMaterial): self.load_randomized_material(overwrite_material) elif isinstance(overwrite_material, ProceduralMaterial): self.load_procedural_material(overwrite_material) material = overwrite_material elif ((item.diffuse_texname != ) and load_texture): obj_dir = os.path.dirname(obj_path) texture = self.load_texture_file(os.path.join(obj_dir, item.diffuse_texname)) texture_metallic = self.load_texture_file(os.path.join(obj_dir, item.metallic_texname)) texture_roughness = self.load_texture_file(os.path.join(obj_dir, item.roughness_texname)) texture_normal = self.load_texture_file(os.path.join(obj_dir, item.bump_texname)) material = Material('texture', texture_id=texture, metallic_texture_id=texture_metallic, roughness_texture_id=texture_roughness, normal_texture_id=texture_normal) else: material = Material('color', kd=item.diffuse) self.materials_mapping[(i + material_count)] = material if (input_kd is not None): self.materials_mapping[(len(materials) + material_count)] = Material('color', kd=input_kd, texture_id=(- 1)) else: self.materials_mapping[(len(materials) + material_count)] = Material('color', kd=[0.5, 0.5, 0.5], texture_id=(- 1)) VAO_ids = [] vertex_position = np.array(attrib.vertices).reshape(((len(attrib.vertices) // 3), 3)) vertex_normal = np.array(attrib.normals).reshape(((len(attrib.normals) // 3), 3)) vertex_texcoord = np.array(attrib.texcoords).reshape(((len(attrib.texcoords) // 2), 2)) for shape in shapes: logging.debug('Shape name: {}'.format(shape.name)) material_id = shape.mesh.material_ids[0] logging.debug('material_id = {}'.format(material_id)) logging.debug('num_indices = {}'.format(len(shape.mesh.indices))) n_indices = len(shape.mesh.indices) np_indices = shape.mesh.numpy_indices().reshape((n_indices, 3)) shape_vertex_index = np_indices[(:, 0)] shape_normal_index = np_indices[(:, 1)] shape_texcoord_index = np_indices[(:, 2)] shape_vertex = vertex_position[shape_vertex_index] if (len(vertex_normal) == 0): shape_normal = np.zeros((shape_vertex.shape[0], 3)) else: shape_normal = vertex_normal[shape_normal_index] for i in range(3): shape_vertex[(:, i)] *= scale[i] if (scale[i] < 0): shape_normal[(:, i)] *= (- 1) if (len(vertex_texcoord) == 0): shape_texcoord = np.zeros((shape_vertex.shape[0], 2)) else: shape_texcoord = vertex_texcoord[shape_texcoord_index] if (transform_orn is not None): orn = quat2rotmat(xyzw2wxyz(transform_orn)) shape_vertex = shape_vertex.dot(orn[(:3, :3)].T) shape_normal = shape_normal.dot(orn[(:3, :3)].T) if (transform_pos is not None): shape_vertex += np.array(transform_pos) v0 = shape_vertex[(0::3, :)] v1 = shape_vertex[(1::3, :)] v2 = shape_vertex[(2::3, :)] uv0 = shape_texcoord[(0::3, :)] uv1 = shape_texcoord[(1::3, :)] uv2 = shape_texcoord[(2::3, :)] delta_pos1 = (v1 - v0) delta_pos2 = (v2 - v0) delta_uv1 = (uv1 - uv0) delta_uv2 = (uv2 - uv0) r = (1.0 / ((delta_uv1[(:, 0)] * delta_uv2[(:, 1)]) - (delta_uv1[(:, 1)] * delta_uv2[(:, 0)]))) tangent = (((delta_pos1 * delta_uv2[(:, 1)][(:, None)]) - (delta_pos2 * delta_uv1[(:, 1)][(:, None)])) * r[(:, None)]) bitangent = (((delta_pos2 * delta_uv1[(:, 0)][(:, None)]) - (delta_pos1 * delta_uv2[(:, 0)][(:, None)])) * r[(:, None)]) bitangent = bitangent.repeat(3, axis=0) tangent = tangent.repeat(3, axis=0) vertices = np.concatenate([shape_vertex, shape_normal, shape_texcoord, tangent, bitangent], axis=(- 1)) faces = np.array(range(len(vertices))).reshape(((len(vertices) // 3), 3)) vertexData = vertices.astype(np.float32) [VAO, VBO] = self.r.load_object_meshrenderer(self.shaderProgram, vertexData) self.VAOs.append(VAO) self.VBOs.append(VBO) face_indices.append(len(self.faces)) self.faces.append(faces) self.objects.append(obj_path) vertex_data_indices.append(len(self.vertex_data)) self.vertex_data.append(vertexData) self.shapes.append(shape) if (material_id == (- 1)): self.mesh_materials.append((len(materials) + material_count)) else: self.mesh_materials.append((material_id + material_count)) logging.debug('mesh_materials: {}'.format(self.mesh_materials)) VAO_ids.append((self.get_num_objects() - 1)) new_obj = VisualObject(obj_path, VAO_ids=VAO_ids, vertex_data_indices=vertex_data_indices, face_indices=face_indices, id=len(self.visual_objects), renderer=self) self.visual_objects.append(new_obj) return VAO_ids
def load_object(self, obj_path, scale=np.array([1, 1, 1]), transform_orn=None, transform_pos=None, input_kd=None, texture_scale=1.0, load_texture=True, overwrite_material=None): '\n Load a wavefront obj file into the renderer and create a VisualObject to manage it.\n\n :param obj_path: path of obj file\n :param scale: scale, default 1\n :param transform_orn: rotation quaternion, convention xyzw\n :param transform_pos: translation for loading, it is a list of length 3\n :param input_kd: if loading material fails, use this default material. input_kd should be a list of length 3\n :param texture_scale: texture scale for the object, downsample to save memory.\n :param load_texture: load texture or not\n :param overwrite_material: whether to overwrite the default Material (usually with a RandomizedMaterial for material randomization)\n :return: VAO_ids\n ' if (self.optimization_process_executed and self.optimized): logging.error('Using optimized renderer and optimization process is already excuted, cannot add new objects') return reader = tinyobjloader.ObjReader() logging.info('Loading {}'.format(obj_path)) if obj_path.endswith('encrypted.obj'): ret = reader.ParseFromFileWithKey(obj_path, igibson.key_path) else: ret = reader.ParseFromFile(obj_path) vertex_data_indices = [] face_indices = [] if (not ret): logging.error('Warning: {}'.format(reader.Warning())) logging.error('Error: {}'.format(reader.Error())) logging.error('Failed to load: {}'.format(obj_path)) sys.exit((- 1)) if reader.Warning(): logging.warning('Warning: {}'.format(reader.Warning())) attrib = reader.GetAttrib() logging.debug('Num vertices = {}'.format(len(attrib.vertices))) logging.debug('Num normals = {}'.format(len(attrib.normals))) logging.debug('Num texcoords = {}'.format(len(attrib.texcoords))) materials = reader.GetMaterials() logging.debug('Num materials: {}'.format(len(materials))) if (logging.root.level <= logging.DEBUG): for m in materials: logging.debug('Material name: {}'.format(m.name)) logging.debug('Material diffuse: {}'.format(m.diffuse)) shapes = reader.GetShapes() logging.debug('Num shapes: {}'.format(len(shapes))) material_count = len(self.materials_mapping) if ((overwrite_material is not None) and (len(materials) > 1)): logging.warning('passed in one material ends up overwriting multiple materials') for (i, item) in enumerate(materials): if (overwrite_material is not None): if isinstance(overwrite_material, RandomizedMaterial): self.load_randomized_material(overwrite_material) elif isinstance(overwrite_material, ProceduralMaterial): self.load_procedural_material(overwrite_material) material = overwrite_material elif ((item.diffuse_texname != ) and load_texture): obj_dir = os.path.dirname(obj_path) texture = self.load_texture_file(os.path.join(obj_dir, item.diffuse_texname)) texture_metallic = self.load_texture_file(os.path.join(obj_dir, item.metallic_texname)) texture_roughness = self.load_texture_file(os.path.join(obj_dir, item.roughness_texname)) texture_normal = self.load_texture_file(os.path.join(obj_dir, item.bump_texname)) material = Material('texture', texture_id=texture, metallic_texture_id=texture_metallic, roughness_texture_id=texture_roughness, normal_texture_id=texture_normal) else: material = Material('color', kd=item.diffuse) self.materials_mapping[(i + material_count)] = material if (input_kd is not None): self.materials_mapping[(len(materials) + material_count)] = Material('color', kd=input_kd, texture_id=(- 1)) else: self.materials_mapping[(len(materials) + material_count)] = Material('color', kd=[0.5, 0.5, 0.5], texture_id=(- 1)) VAO_ids = [] vertex_position = np.array(attrib.vertices).reshape(((len(attrib.vertices) // 3), 3)) vertex_normal = np.array(attrib.normals).reshape(((len(attrib.normals) // 3), 3)) vertex_texcoord = np.array(attrib.texcoords).reshape(((len(attrib.texcoords) // 2), 2)) for shape in shapes: logging.debug('Shape name: {}'.format(shape.name)) material_id = shape.mesh.material_ids[0] logging.debug('material_id = {}'.format(material_id)) logging.debug('num_indices = {}'.format(len(shape.mesh.indices))) n_indices = len(shape.mesh.indices) np_indices = shape.mesh.numpy_indices().reshape((n_indices, 3)) shape_vertex_index = np_indices[(:, 0)] shape_normal_index = np_indices[(:, 1)] shape_texcoord_index = np_indices[(:, 2)] shape_vertex = vertex_position[shape_vertex_index] if (len(vertex_normal) == 0): shape_normal = np.zeros((shape_vertex.shape[0], 3)) else: shape_normal = vertex_normal[shape_normal_index] for i in range(3): shape_vertex[(:, i)] *= scale[i] if (scale[i] < 0): shape_normal[(:, i)] *= (- 1) if (len(vertex_texcoord) == 0): shape_texcoord = np.zeros((shape_vertex.shape[0], 2)) else: shape_texcoord = vertex_texcoord[shape_texcoord_index] if (transform_orn is not None): orn = quat2rotmat(xyzw2wxyz(transform_orn)) shape_vertex = shape_vertex.dot(orn[(:3, :3)].T) shape_normal = shape_normal.dot(orn[(:3, :3)].T) if (transform_pos is not None): shape_vertex += np.array(transform_pos) v0 = shape_vertex[(0::3, :)] v1 = shape_vertex[(1::3, :)] v2 = shape_vertex[(2::3, :)] uv0 = shape_texcoord[(0::3, :)] uv1 = shape_texcoord[(1::3, :)] uv2 = shape_texcoord[(2::3, :)] delta_pos1 = (v1 - v0) delta_pos2 = (v2 - v0) delta_uv1 = (uv1 - uv0) delta_uv2 = (uv2 - uv0) r = (1.0 / ((delta_uv1[(:, 0)] * delta_uv2[(:, 1)]) - (delta_uv1[(:, 1)] * delta_uv2[(:, 0)]))) tangent = (((delta_pos1 * delta_uv2[(:, 1)][(:, None)]) - (delta_pos2 * delta_uv1[(:, 1)][(:, None)])) * r[(:, None)]) bitangent = (((delta_pos2 * delta_uv1[(:, 0)][(:, None)]) - (delta_pos1 * delta_uv2[(:, 0)][(:, None)])) * r[(:, None)]) bitangent = bitangent.repeat(3, axis=0) tangent = tangent.repeat(3, axis=0) vertices = np.concatenate([shape_vertex, shape_normal, shape_texcoord, tangent, bitangent], axis=(- 1)) faces = np.array(range(len(vertices))).reshape(((len(vertices) // 3), 3)) vertexData = vertices.astype(np.float32) [VAO, VBO] = self.r.load_object_meshrenderer(self.shaderProgram, vertexData) self.VAOs.append(VAO) self.VBOs.append(VBO) face_indices.append(len(self.faces)) self.faces.append(faces) self.objects.append(obj_path) vertex_data_indices.append(len(self.vertex_data)) self.vertex_data.append(vertexData) self.shapes.append(shape) if (material_id == (- 1)): self.mesh_materials.append((len(materials) + material_count)) else: self.mesh_materials.append((material_id + material_count)) logging.debug('mesh_materials: {}'.format(self.mesh_materials)) VAO_ids.append((self.get_num_objects() - 1)) new_obj = VisualObject(obj_path, VAO_ids=VAO_ids, vertex_data_indices=vertex_data_indices, face_indices=face_indices, id=len(self.visual_objects), renderer=self) self.visual_objects.append(new_obj) return VAO_ids<|docstring|>Load a wavefront obj file into the renderer and create a VisualObject to manage it. :param obj_path: path of obj file :param scale: scale, default 1 :param transform_orn: rotation quaternion, convention xyzw :param transform_pos: translation for loading, it is a list of length 3 :param input_kd: if loading material fails, use this default material. input_kd should be a list of length 3 :param texture_scale: texture scale for the object, downsample to save memory. :param load_texture: load texture or not :param overwrite_material: whether to overwrite the default Material (usually with a RandomizedMaterial for material randomization) :return: VAO_ids<|endoftext|>
b7f082a7c5372f5b6fb7fae64ca0a9576f3c9176ba8c906d4edfb8f766e37232
def add_instance(self, object_id, pybullet_uuid=None, class_id=0, pose_trans=np.eye(4), pose_rot=np.eye(4), dynamic=False, softbody=False, use_pbr=True, use_pbr_mapping=True, shadow_caster=True): '\n Create instance for a visual object and link it to pybullet\n\n :param object_id: id of visual object\n :param pybullet_uuid: body id in pybullet\n :param class_id: class_id to render semantics\n :param pose_trans: initial translations for the visual object\n :param pose_rot: initial rotation matrix for the visual object\n :param dynamic: whether the instance is dynamic\n :param softbody: whether the instance is soft body\n :param use_pbr: whether to use PBR\n :param use_pbr_mapping: whether to use PBR mapping\n :param shadow_caster: whether to cast shadow\n ' if (self.optimization_process_executed and self.optimized): logging.error('Using optimized renderer and optimization process is already excuted, cannot add new objects') return use_pbr = (use_pbr and self.rendering_settings.enable_pbr) use_pbr_mapping = (use_pbr_mapping and self.rendering_settings.enable_pbr) instance = Instance(self.visual_objects[object_id], id=len(self.instances), pybullet_uuid=pybullet_uuid, class_id=class_id, pose_trans=pose_trans, pose_rot=pose_rot, dynamic=dynamic, softbody=softbody, use_pbr=use_pbr, use_pbr_mapping=use_pbr_mapping, shadow_caster=shadow_caster) self.instances.append(instance)
Create instance for a visual object and link it to pybullet :param object_id: id of visual object :param pybullet_uuid: body id in pybullet :param class_id: class_id to render semantics :param pose_trans: initial translations for the visual object :param pose_rot: initial rotation matrix for the visual object :param dynamic: whether the instance is dynamic :param softbody: whether the instance is soft body :param use_pbr: whether to use PBR :param use_pbr_mapping: whether to use PBR mapping :param shadow_caster: whether to cast shadow
igibson/render/mesh_renderer/mesh_renderer_cpu.py
add_instance
suresh-guttikonda/iGibson
0
python
def add_instance(self, object_id, pybullet_uuid=None, class_id=0, pose_trans=np.eye(4), pose_rot=np.eye(4), dynamic=False, softbody=False, use_pbr=True, use_pbr_mapping=True, shadow_caster=True): '\n Create instance for a visual object and link it to pybullet\n\n :param object_id: id of visual object\n :param pybullet_uuid: body id in pybullet\n :param class_id: class_id to render semantics\n :param pose_trans: initial translations for the visual object\n :param pose_rot: initial rotation matrix for the visual object\n :param dynamic: whether the instance is dynamic\n :param softbody: whether the instance is soft body\n :param use_pbr: whether to use PBR\n :param use_pbr_mapping: whether to use PBR mapping\n :param shadow_caster: whether to cast shadow\n ' if (self.optimization_process_executed and self.optimized): logging.error('Using optimized renderer and optimization process is already excuted, cannot add new objects') return use_pbr = (use_pbr and self.rendering_settings.enable_pbr) use_pbr_mapping = (use_pbr_mapping and self.rendering_settings.enable_pbr) instance = Instance(self.visual_objects[object_id], id=len(self.instances), pybullet_uuid=pybullet_uuid, class_id=class_id, pose_trans=pose_trans, pose_rot=pose_rot, dynamic=dynamic, softbody=softbody, use_pbr=use_pbr, use_pbr_mapping=use_pbr_mapping, shadow_caster=shadow_caster) self.instances.append(instance)
def add_instance(self, object_id, pybullet_uuid=None, class_id=0, pose_trans=np.eye(4), pose_rot=np.eye(4), dynamic=False, softbody=False, use_pbr=True, use_pbr_mapping=True, shadow_caster=True): '\n Create instance for a visual object and link it to pybullet\n\n :param object_id: id of visual object\n :param pybullet_uuid: body id in pybullet\n :param class_id: class_id to render semantics\n :param pose_trans: initial translations for the visual object\n :param pose_rot: initial rotation matrix for the visual object\n :param dynamic: whether the instance is dynamic\n :param softbody: whether the instance is soft body\n :param use_pbr: whether to use PBR\n :param use_pbr_mapping: whether to use PBR mapping\n :param shadow_caster: whether to cast shadow\n ' if (self.optimization_process_executed and self.optimized): logging.error('Using optimized renderer and optimization process is already excuted, cannot add new objects') return use_pbr = (use_pbr and self.rendering_settings.enable_pbr) use_pbr_mapping = (use_pbr_mapping and self.rendering_settings.enable_pbr) instance = Instance(self.visual_objects[object_id], id=len(self.instances), pybullet_uuid=pybullet_uuid, class_id=class_id, pose_trans=pose_trans, pose_rot=pose_rot, dynamic=dynamic, softbody=softbody, use_pbr=use_pbr, use_pbr_mapping=use_pbr_mapping, shadow_caster=shadow_caster) self.instances.append(instance)<|docstring|>Create instance for a visual object and link it to pybullet :param object_id: id of visual object :param pybullet_uuid: body id in pybullet :param class_id: class_id to render semantics :param pose_trans: initial translations for the visual object :param pose_rot: initial rotation matrix for the visual object :param dynamic: whether the instance is dynamic :param softbody: whether the instance is soft body :param use_pbr: whether to use PBR :param use_pbr_mapping: whether to use PBR mapping :param shadow_caster: whether to cast shadow<|endoftext|>
d3c3a97d3a909c081900aca7ec4728ed11b64b41eb0b1de2ab2551472e7cf885
def add_instance_group(self, object_ids, link_ids, poses_trans, poses_rot, pybullet_uuid=None, class_id=0, dynamic=False, robot=None, use_pbr=True, use_pbr_mapping=True, shadow_caster=True): '\n Create an instance group for a list of visual objects and link it to pybullet\n\n :param object_ids: object ids of the visual objects\n :param link_ids: link_ids in pybullet\n :param poses_trans: initial translations for each visual object\n :param poses_rot: initial rotation matrix for each visual object\n :param pybullet_uuid: body id in pybullet\n :param class_id: class_id to render semantics\n :param dynamic: whether the instance group is dynamic\n :param robot: The robot associated with this InstanceGroup\n :param use_pbr: whether to use PBR\n :param use_pbr_mapping: whether to use PBR mapping\n :param shadow_caster: whether to cast shadow\n ' if (self.optimization_process_executed and self.optimized): logging.error('Using optimized renderer and optimization process is already excuted, cannot add new objects') return use_pbr = (use_pbr and self.rendering_settings.enable_pbr) use_pbr_mapping = (use_pbr_mapping and self.rendering_settings.enable_pbr) instance_group = InstanceGroup([self.visual_objects[object_id] for object_id in object_ids], id=len(self.instances), link_ids=link_ids, pybullet_uuid=pybullet_uuid, class_id=class_id, poses_trans=poses_trans, poses_rot=poses_rot, dynamic=dynamic, robot=robot, use_pbr=use_pbr, use_pbr_mapping=use_pbr_mapping, shadow_caster=shadow_caster) self.instances.append(instance_group)
Create an instance group for a list of visual objects and link it to pybullet :param object_ids: object ids of the visual objects :param link_ids: link_ids in pybullet :param poses_trans: initial translations for each visual object :param poses_rot: initial rotation matrix for each visual object :param pybullet_uuid: body id in pybullet :param class_id: class_id to render semantics :param dynamic: whether the instance group is dynamic :param robot: The robot associated with this InstanceGroup :param use_pbr: whether to use PBR :param use_pbr_mapping: whether to use PBR mapping :param shadow_caster: whether to cast shadow
igibson/render/mesh_renderer/mesh_renderer_cpu.py
add_instance_group
suresh-guttikonda/iGibson
0
python
def add_instance_group(self, object_ids, link_ids, poses_trans, poses_rot, pybullet_uuid=None, class_id=0, dynamic=False, robot=None, use_pbr=True, use_pbr_mapping=True, shadow_caster=True): '\n Create an instance group for a list of visual objects and link it to pybullet\n\n :param object_ids: object ids of the visual objects\n :param link_ids: link_ids in pybullet\n :param poses_trans: initial translations for each visual object\n :param poses_rot: initial rotation matrix for each visual object\n :param pybullet_uuid: body id in pybullet\n :param class_id: class_id to render semantics\n :param dynamic: whether the instance group is dynamic\n :param robot: The robot associated with this InstanceGroup\n :param use_pbr: whether to use PBR\n :param use_pbr_mapping: whether to use PBR mapping\n :param shadow_caster: whether to cast shadow\n ' if (self.optimization_process_executed and self.optimized): logging.error('Using optimized renderer and optimization process is already excuted, cannot add new objects') return use_pbr = (use_pbr and self.rendering_settings.enable_pbr) use_pbr_mapping = (use_pbr_mapping and self.rendering_settings.enable_pbr) instance_group = InstanceGroup([self.visual_objects[object_id] for object_id in object_ids], id=len(self.instances), link_ids=link_ids, pybullet_uuid=pybullet_uuid, class_id=class_id, poses_trans=poses_trans, poses_rot=poses_rot, dynamic=dynamic, robot=robot, use_pbr=use_pbr, use_pbr_mapping=use_pbr_mapping, shadow_caster=shadow_caster) self.instances.append(instance_group)
def add_instance_group(self, object_ids, link_ids, poses_trans, poses_rot, pybullet_uuid=None, class_id=0, dynamic=False, robot=None, use_pbr=True, use_pbr_mapping=True, shadow_caster=True): '\n Create an instance group for a list of visual objects and link it to pybullet\n\n :param object_ids: object ids of the visual objects\n :param link_ids: link_ids in pybullet\n :param poses_trans: initial translations for each visual object\n :param poses_rot: initial rotation matrix for each visual object\n :param pybullet_uuid: body id in pybullet\n :param class_id: class_id to render semantics\n :param dynamic: whether the instance group is dynamic\n :param robot: The robot associated with this InstanceGroup\n :param use_pbr: whether to use PBR\n :param use_pbr_mapping: whether to use PBR mapping\n :param shadow_caster: whether to cast shadow\n ' if (self.optimization_process_executed and self.optimized): logging.error('Using optimized renderer and optimization process is already excuted, cannot add new objects') return use_pbr = (use_pbr and self.rendering_settings.enable_pbr) use_pbr_mapping = (use_pbr_mapping and self.rendering_settings.enable_pbr) instance_group = InstanceGroup([self.visual_objects[object_id] for object_id in object_ids], id=len(self.instances), link_ids=link_ids, pybullet_uuid=pybullet_uuid, class_id=class_id, poses_trans=poses_trans, poses_rot=poses_rot, dynamic=dynamic, robot=robot, use_pbr=use_pbr, use_pbr_mapping=use_pbr_mapping, shadow_caster=shadow_caster) self.instances.append(instance_group)<|docstring|>Create an instance group for a list of visual objects and link it to pybullet :param object_ids: object ids of the visual objects :param link_ids: link_ids in pybullet :param poses_trans: initial translations for each visual object :param poses_rot: initial rotation matrix for each visual object :param pybullet_uuid: body id in pybullet :param class_id: class_id to render semantics :param dynamic: whether the instance group is dynamic :param robot: The robot associated with this InstanceGroup :param use_pbr: whether to use PBR :param use_pbr_mapping: whether to use PBR mapping :param shadow_caster: whether to cast shadow<|endoftext|>
b825eadcd6418c689f1f49377cdedabc3cb4541c30b2726e4d0074599326806f
def add_robot(self, object_ids, link_ids, poses_trans, poses_rot, pybullet_uuid=None, class_id=0, dynamic=False, robot=None): '\n Create an instance group (a robot) for a list of visual objects and link it to pybullet\n\n :param object_ids: object ids of the visual objects\n :param link_ids: link_ids in pybullet\n :param poses_trans: initial translations for each visual object\n :param poses_rot: initial rotation matrix for each visual object\n :param pybullet_uuid: body id in pybullet\n :param class_id: class_id to render semantics\n :param dynamic: whether the instance group is dynamic\n :param robot: The robot associated with this InstanceGroup\n ' if (self.optimization_process_executed and self.optimized): logging.error('Using optimized renderer and optimization process is already excuted, cannot add new objects') return robot = Robot([self.visual_objects[object_id] for object_id in object_ids], id=len(self.instances), link_ids=link_ids, pybullet_uuid=pybullet_uuid, class_id=class_id, poses_trans=poses_trans, poses_rot=poses_rot, dynamic=dynamic, robot=robot, use_pbr=False, use_pbr_mapping=False) self.instances.append(robot)
Create an instance group (a robot) for a list of visual objects and link it to pybullet :param object_ids: object ids of the visual objects :param link_ids: link_ids in pybullet :param poses_trans: initial translations for each visual object :param poses_rot: initial rotation matrix for each visual object :param pybullet_uuid: body id in pybullet :param class_id: class_id to render semantics :param dynamic: whether the instance group is dynamic :param robot: The robot associated with this InstanceGroup
igibson/render/mesh_renderer/mesh_renderer_cpu.py
add_robot
suresh-guttikonda/iGibson
0
python
def add_robot(self, object_ids, link_ids, poses_trans, poses_rot, pybullet_uuid=None, class_id=0, dynamic=False, robot=None): '\n Create an instance group (a robot) for a list of visual objects and link it to pybullet\n\n :param object_ids: object ids of the visual objects\n :param link_ids: link_ids in pybullet\n :param poses_trans: initial translations for each visual object\n :param poses_rot: initial rotation matrix for each visual object\n :param pybullet_uuid: body id in pybullet\n :param class_id: class_id to render semantics\n :param dynamic: whether the instance group is dynamic\n :param robot: The robot associated with this InstanceGroup\n ' if (self.optimization_process_executed and self.optimized): logging.error('Using optimized renderer and optimization process is already excuted, cannot add new objects') return robot = Robot([self.visual_objects[object_id] for object_id in object_ids], id=len(self.instances), link_ids=link_ids, pybullet_uuid=pybullet_uuid, class_id=class_id, poses_trans=poses_trans, poses_rot=poses_rot, dynamic=dynamic, robot=robot, use_pbr=False, use_pbr_mapping=False) self.instances.append(robot)
def add_robot(self, object_ids, link_ids, poses_trans, poses_rot, pybullet_uuid=None, class_id=0, dynamic=False, robot=None): '\n Create an instance group (a robot) for a list of visual objects and link it to pybullet\n\n :param object_ids: object ids of the visual objects\n :param link_ids: link_ids in pybullet\n :param poses_trans: initial translations for each visual object\n :param poses_rot: initial rotation matrix for each visual object\n :param pybullet_uuid: body id in pybullet\n :param class_id: class_id to render semantics\n :param dynamic: whether the instance group is dynamic\n :param robot: The robot associated with this InstanceGroup\n ' if (self.optimization_process_executed and self.optimized): logging.error('Using optimized renderer and optimization process is already excuted, cannot add new objects') return robot = Robot([self.visual_objects[object_id] for object_id in object_ids], id=len(self.instances), link_ids=link_ids, pybullet_uuid=pybullet_uuid, class_id=class_id, poses_trans=poses_trans, poses_rot=poses_rot, dynamic=dynamic, robot=robot, use_pbr=False, use_pbr_mapping=False) self.instances.append(robot)<|docstring|>Create an instance group (a robot) for a list of visual objects and link it to pybullet :param object_ids: object ids of the visual objects :param link_ids: link_ids in pybullet :param poses_trans: initial translations for each visual object :param poses_rot: initial rotation matrix for each visual object :param pybullet_uuid: body id in pybullet :param class_id: class_id to render semantics :param dynamic: whether the instance group is dynamic :param robot: The robot associated with this InstanceGroup<|endoftext|>
12aa1c4a6dfe48e186b10b39cd36dcb533eac6b0cce3b9c87bbe97a4f58a1692
def add_text(self, text_data='PLACEHOLDER: PLEASE REPLACE!', font_name='OpenSans', font_style='Regular', font_size=48, color=[0, 0, 0], pixel_pos=[0, 0], pixel_size=[200, 200], scale=1.0, background_color=None, render_to_tex=False): '\n Creates a Text object with the given parameters. Returns the text object to the caller,\n so various settings can be changed - eg. text content, position, scale, etc.\n :param text_data: starting text to display (can be changed at a later time by set_text)\n :param font_name: name of font to render - same as font folder in iGibson assets\n :param font_style: style of font - one of [regular, italic, bold]\n :param font_size: size of font to render\n :param color: [r, g, b] color\n :param pixel_pos: [x, y] position of top-left corner of text box, in pixel coordinates\n :param pixel_size: [w, h] size of text box in pixel coordinates\n :param scale: scale factor for resizing text\n :param background_color: color of the background in form [r, g, b, a] - background will only appear if this is not None\n :param render_to_tex: whether text should be rendered to an OpenGL texture or the screen (the default)\n ' text = Text(text_data=text_data, font_name=font_name, font_style=font_style, font_size=font_size, color=color, pos=pixel_pos, scale=scale, tbox_height=pixel_size[1], tbox_width=pixel_size[0], render_to_tex=render_to_tex, background_color=background_color, text_manager=self.text_manager) self.texts.append(text) return text
Creates a Text object with the given parameters. Returns the text object to the caller, so various settings can be changed - eg. text content, position, scale, etc. :param text_data: starting text to display (can be changed at a later time by set_text) :param font_name: name of font to render - same as font folder in iGibson assets :param font_style: style of font - one of [regular, italic, bold] :param font_size: size of font to render :param color: [r, g, b] color :param pixel_pos: [x, y] position of top-left corner of text box, in pixel coordinates :param pixel_size: [w, h] size of text box in pixel coordinates :param scale: scale factor for resizing text :param background_color: color of the background in form [r, g, b, a] - background will only appear if this is not None :param render_to_tex: whether text should be rendered to an OpenGL texture or the screen (the default)
igibson/render/mesh_renderer/mesh_renderer_cpu.py
add_text
suresh-guttikonda/iGibson
0
python
def add_text(self, text_data='PLACEHOLDER: PLEASE REPLACE!', font_name='OpenSans', font_style='Regular', font_size=48, color=[0, 0, 0], pixel_pos=[0, 0], pixel_size=[200, 200], scale=1.0, background_color=None, render_to_tex=False): '\n Creates a Text object with the given parameters. Returns the text object to the caller,\n so various settings can be changed - eg. text content, position, scale, etc.\n :param text_data: starting text to display (can be changed at a later time by set_text)\n :param font_name: name of font to render - same as font folder in iGibson assets\n :param font_style: style of font - one of [regular, italic, bold]\n :param font_size: size of font to render\n :param color: [r, g, b] color\n :param pixel_pos: [x, y] position of top-left corner of text box, in pixel coordinates\n :param pixel_size: [w, h] size of text box in pixel coordinates\n :param scale: scale factor for resizing text\n :param background_color: color of the background in form [r, g, b, a] - background will only appear if this is not None\n :param render_to_tex: whether text should be rendered to an OpenGL texture or the screen (the default)\n ' text = Text(text_data=text_data, font_name=font_name, font_style=font_style, font_size=font_size, color=color, pos=pixel_pos, scale=scale, tbox_height=pixel_size[1], tbox_width=pixel_size[0], render_to_tex=render_to_tex, background_color=background_color, text_manager=self.text_manager) self.texts.append(text) return text
def add_text(self, text_data='PLACEHOLDER: PLEASE REPLACE!', font_name='OpenSans', font_style='Regular', font_size=48, color=[0, 0, 0], pixel_pos=[0, 0], pixel_size=[200, 200], scale=1.0, background_color=None, render_to_tex=False): '\n Creates a Text object with the given parameters. Returns the text object to the caller,\n so various settings can be changed - eg. text content, position, scale, etc.\n :param text_data: starting text to display (can be changed at a later time by set_text)\n :param font_name: name of font to render - same as font folder in iGibson assets\n :param font_style: style of font - one of [regular, italic, bold]\n :param font_size: size of font to render\n :param color: [r, g, b] color\n :param pixel_pos: [x, y] position of top-left corner of text box, in pixel coordinates\n :param pixel_size: [w, h] size of text box in pixel coordinates\n :param scale: scale factor for resizing text\n :param background_color: color of the background in form [r, g, b, a] - background will only appear if this is not None\n :param render_to_tex: whether text should be rendered to an OpenGL texture or the screen (the default)\n ' text = Text(text_data=text_data, font_name=font_name, font_style=font_style, font_size=font_size, color=color, pos=pixel_pos, scale=scale, tbox_height=pixel_size[1], tbox_width=pixel_size[0], render_to_tex=render_to_tex, background_color=background_color, text_manager=self.text_manager) self.texts.append(text) return text<|docstring|>Creates a Text object with the given parameters. Returns the text object to the caller, so various settings can be changed - eg. text content, position, scale, etc. :param text_data: starting text to display (can be changed at a later time by set_text) :param font_name: name of font to render - same as font folder in iGibson assets :param font_style: style of font - one of [regular, italic, bold] :param font_size: size of font to render :param color: [r, g, b] color :param pixel_pos: [x, y] position of top-left corner of text box, in pixel coordinates :param pixel_size: [w, h] size of text box in pixel coordinates :param scale: scale factor for resizing text :param background_color: color of the background in form [r, g, b, a] - background will only appear if this is not None :param render_to_tex: whether text should be rendered to an OpenGL texture or the screen (the default)<|endoftext|>
06d033a12627147b46363c68cb89fc3a77d7664567248ae43bb8ee9c4c892840
def set_camera(self, camera, target, up, cache=False): '\n Set camera pose\n\n :param camera: camera position\n :param target: camera target\n :param up: up direction\n :param cache: whether to cache pose\n ' self.camera = camera self.target = target self.up = up if cache: self.last_V = np.copy(self.cache) V = lookat(self.camera, self.target, up=self.up) self.V = np.ascontiguousarray(V, np.float32) self.set_light_position_direction([self.camera[0], self.camera[1], 10], [self.camera[0], self.camera[1], 0]) if cache: self.cache = self.V
Set camera pose :param camera: camera position :param target: camera target :param up: up direction :param cache: whether to cache pose
igibson/render/mesh_renderer/mesh_renderer_cpu.py
set_camera
suresh-guttikonda/iGibson
0
python
def set_camera(self, camera, target, up, cache=False): '\n Set camera pose\n\n :param camera: camera position\n :param target: camera target\n :param up: up direction\n :param cache: whether to cache pose\n ' self.camera = camera self.target = target self.up = up if cache: self.last_V = np.copy(self.cache) V = lookat(self.camera, self.target, up=self.up) self.V = np.ascontiguousarray(V, np.float32) self.set_light_position_direction([self.camera[0], self.camera[1], 10], [self.camera[0], self.camera[1], 0]) if cache: self.cache = self.V
def set_camera(self, camera, target, up, cache=False): '\n Set camera pose\n\n :param camera: camera position\n :param target: camera target\n :param up: up direction\n :param cache: whether to cache pose\n ' self.camera = camera self.target = target self.up = up if cache: self.last_V = np.copy(self.cache) V = lookat(self.camera, self.target, up=self.up) self.V = np.ascontiguousarray(V, np.float32) self.set_light_position_direction([self.camera[0], self.camera[1], 10], [self.camera[0], self.camera[1], 0]) if cache: self.cache = self.V<|docstring|>Set camera pose :param camera: camera position :param target: camera target :param up: up direction :param cache: whether to cache pose<|endoftext|>
f736b32cb98be41b3db3ee5cc287632c51728babe5ee374979956712893c61cb
def set_z_near_z_far(self, znear, zfar): '\n Set z limit for camera\n\n :param znear: lower limit for z\n :param zfar: upper limit for z\n ' self.znear = znear self.zfar = zfar
Set z limit for camera :param znear: lower limit for z :param zfar: upper limit for z
igibson/render/mesh_renderer/mesh_renderer_cpu.py
set_z_near_z_far
suresh-guttikonda/iGibson
0
python
def set_z_near_z_far(self, znear, zfar): '\n Set z limit for camera\n\n :param znear: lower limit for z\n :param zfar: upper limit for z\n ' self.znear = znear self.zfar = zfar
def set_z_near_z_far(self, znear, zfar): '\n Set z limit for camera\n\n :param znear: lower limit for z\n :param zfar: upper limit for z\n ' self.znear = znear self.zfar = zfar<|docstring|>Set z limit for camera :param znear: lower limit for z :param zfar: upper limit for z<|endoftext|>
bd4421f2d6e8563b10975dd7ecc1c06b88f14ce5954554d482b2ec3c3281d6f5
def set_fov(self, fov): '\n Set the field of view. Given the vertical fov, set it.\n Also, compute the horizontal fov based on the aspect ratio, and set it.\n\n :param fov: vertical fov\n ' self.vertical_fov = fov self.horizontal_fov = (((2 * np.arctan(((np.tan((((self.vertical_fov / 180.0) * np.pi) / 2.0)) * self.width) / self.height))) / np.pi) * 180.0) P = perspective(self.vertical_fov, (float(self.width) / float(self.height)), self.znear, self.zfar) self.P = np.ascontiguousarray(P, np.float32)
Set the field of view. Given the vertical fov, set it. Also, compute the horizontal fov based on the aspect ratio, and set it. :param fov: vertical fov
igibson/render/mesh_renderer/mesh_renderer_cpu.py
set_fov
suresh-guttikonda/iGibson
0
python
def set_fov(self, fov): '\n Set the field of view. Given the vertical fov, set it.\n Also, compute the horizontal fov based on the aspect ratio, and set it.\n\n :param fov: vertical fov\n ' self.vertical_fov = fov self.horizontal_fov = (((2 * np.arctan(((np.tan((((self.vertical_fov / 180.0) * np.pi) / 2.0)) * self.width) / self.height))) / np.pi) * 180.0) P = perspective(self.vertical_fov, (float(self.width) / float(self.height)), self.znear, self.zfar) self.P = np.ascontiguousarray(P, np.float32)
def set_fov(self, fov): '\n Set the field of view. Given the vertical fov, set it.\n Also, compute the horizontal fov based on the aspect ratio, and set it.\n\n :param fov: vertical fov\n ' self.vertical_fov = fov self.horizontal_fov = (((2 * np.arctan(((np.tan((((self.vertical_fov / 180.0) * np.pi) / 2.0)) * self.width) / self.height))) / np.pi) * 180.0) P = perspective(self.vertical_fov, (float(self.width) / float(self.height)), self.znear, self.zfar) self.P = np.ascontiguousarray(P, np.float32)<|docstring|>Set the field of view. Given the vertical fov, set it. Also, compute the horizontal fov based on the aspect ratio, and set it. :param fov: vertical fov<|endoftext|>
5ea76c01bace93c3d083f5a007f66c71e6706cda77094a1e7c0528254fa6b1fe
def set_light_color(self, color): '\n Set light color\n\n :param color: light color\n ' self.lightcolor = color
Set light color :param color: light color
igibson/render/mesh_renderer/mesh_renderer_cpu.py
set_light_color
suresh-guttikonda/iGibson
0
python
def set_light_color(self, color): '\n Set light color\n\n :param color: light color\n ' self.lightcolor = color
def set_light_color(self, color): '\n Set light color\n\n :param color: light color\n ' self.lightcolor = color<|docstring|>Set light color :param color: light color<|endoftext|>
3aacf7efd345759429962b2f86b3b27256a54d0654be02797ad9b8e56111fa3e
def get_intrinsics(self): '\n Get camera intrinsics\n\n :return: camera instrincs\n ' P = self.P (w, h) = (self.width, self.height) znear = self.znear a = ((2.0 * znear) / P[(0, 0)]) b = (P[(2, 0)] * a) right = ((a + b) / 2.0) left = (b - right) c = ((- (2.0 * znear)) / P[(1, 1)]) d = (P[(2, 1)] * c) top = ((c + d) / 2.0) bottom = (d - top) fu = ((w * znear) / (right - left)) fv = (((- h) * znear) / (top - bottom)) u0 = (w - ((right * fu) / znear)) v0 = (h - ((bottom * fv) / znear)) return np.array([[fu, 0, u0], [0, fv, v0], [0, 0, 1]])
Get camera intrinsics :return: camera instrincs
igibson/render/mesh_renderer/mesh_renderer_cpu.py
get_intrinsics
suresh-guttikonda/iGibson
0
python
def get_intrinsics(self): '\n Get camera intrinsics\n\n :return: camera instrincs\n ' P = self.P (w, h) = (self.width, self.height) znear = self.znear a = ((2.0 * znear) / P[(0, 0)]) b = (P[(2, 0)] * a) right = ((a + b) / 2.0) left = (b - right) c = ((- (2.0 * znear)) / P[(1, 1)]) d = (P[(2, 1)] * c) top = ((c + d) / 2.0) bottom = (d - top) fu = ((w * znear) / (right - left)) fv = (((- h) * znear) / (top - bottom)) u0 = (w - ((right * fu) / znear)) v0 = (h - ((bottom * fv) / znear)) return np.array([[fu, 0, u0], [0, fv, v0], [0, 0, 1]])
def get_intrinsics(self): '\n Get camera intrinsics\n\n :return: camera instrincs\n ' P = self.P (w, h) = (self.width, self.height) znear = self.znear a = ((2.0 * znear) / P[(0, 0)]) b = (P[(2, 0)] * a) right = ((a + b) / 2.0) left = (b - right) c = ((- (2.0 * znear)) / P[(1, 1)]) d = (P[(2, 1)] * c) top = ((c + d) / 2.0) bottom = (d - top) fu = ((w * znear) / (right - left)) fv = (((- h) * znear) / (top - bottom)) u0 = (w - ((right * fu) / znear)) v0 = (h - ((bottom * fv) / znear)) return np.array([[fu, 0, u0], [0, fv, v0], [0, 0, 1]])<|docstring|>Get camera intrinsics :return: camera instrincs<|endoftext|>
80730bf038c409b9a75b6c073932cf5efdef467423a1323590f74cb7e5031d1b
def set_projection_matrix(self, fu, fv, u0, v0, znear, zfar): '\n Set projection matrix, given camera intrincs parameters\n ' w = self.width h = self.height self.znear = znear self.zfar = zfar L = (((- u0) * znear) / fu) R = (((+ (w - u0)) * znear) / fu) T = (((- v0) * znear) / fv) B = (((+ (h - v0)) * znear) / fv) P = np.zeros((4, 4), dtype=np.float32) P[(0, 0)] = ((2 * znear) / (R - L)) P[(1, 1)] = (((- 2) * znear) / (T - B)) P[(2, 0)] = ((R + L) / (R - L)) P[(2, 1)] = ((T + B) / (T - B)) P[(2, 2)] = ((- (zfar + znear)) / (zfar - znear)) P[(2, 3)] = (- 1.0) P[(3, 2)] = (((2 * zfar) * znear) / (znear - zfar)) self.P = P
Set projection matrix, given camera intrincs parameters
igibson/render/mesh_renderer/mesh_renderer_cpu.py
set_projection_matrix
suresh-guttikonda/iGibson
0
python
def set_projection_matrix(self, fu, fv, u0, v0, znear, zfar): '\n \n ' w = self.width h = self.height self.znear = znear self.zfar = zfar L = (((- u0) * znear) / fu) R = (((+ (w - u0)) * znear) / fu) T = (((- v0) * znear) / fv) B = (((+ (h - v0)) * znear) / fv) P = np.zeros((4, 4), dtype=np.float32) P[(0, 0)] = ((2 * znear) / (R - L)) P[(1, 1)] = (((- 2) * znear) / (T - B)) P[(2, 0)] = ((R + L) / (R - L)) P[(2, 1)] = ((T + B) / (T - B)) P[(2, 2)] = ((- (zfar + znear)) / (zfar - znear)) P[(2, 3)] = (- 1.0) P[(3, 2)] = (((2 * zfar) * znear) / (znear - zfar)) self.P = P
def set_projection_matrix(self, fu, fv, u0, v0, znear, zfar): '\n \n ' w = self.width h = self.height self.znear = znear self.zfar = zfar L = (((- u0) * znear) / fu) R = (((+ (w - u0)) * znear) / fu) T = (((- v0) * znear) / fv) B = (((+ (h - v0)) * znear) / fv) P = np.zeros((4, 4), dtype=np.float32) P[(0, 0)] = ((2 * znear) / (R - L)) P[(1, 1)] = (((- 2) * znear) / (T - B)) P[(2, 0)] = ((R + L) / (R - L)) P[(2, 1)] = ((T + B) / (T - B)) P[(2, 2)] = ((- (zfar + znear)) / (zfar - znear)) P[(2, 3)] = (- 1.0) P[(3, 2)] = (((2 * zfar) * znear) / (znear - zfar)) self.P = P<|docstring|>Set projection matrix, given camera intrincs parameters<|endoftext|>
ce329165d1982948961d36f3a48b00aa168020ccd5ad3be5d00e6429bbd4abb2
def readbuffer(self, modes=AVAILABLE_MODALITIES): "\n Read framebuffer of rendering.\n\n :param modes: it should be a tuple consisting of a subset of ('rgb', 'normal', 'seg', '3d', 'scene_flow', 'optical_flow').\n :return: a list of numpy arrays corresponding to `modes`\n " results = [] if isinstance(modes, str): modes = [modes] for mode in modes: if (mode not in AVAILABLE_MODALITIES): raise Exception('unknown rendering mode: {}'.format(mode)) frame = self.r.readbuffer_meshrenderer(mode, self.width, self.height, self.fbo) frame = frame.reshape(self.height, self.width, 4)[(::(- 1), :)] results.append(frame) return results
Read framebuffer of rendering. :param modes: it should be a tuple consisting of a subset of ('rgb', 'normal', 'seg', '3d', 'scene_flow', 'optical_flow'). :return: a list of numpy arrays corresponding to `modes`
igibson/render/mesh_renderer/mesh_renderer_cpu.py
readbuffer
suresh-guttikonda/iGibson
0
python
def readbuffer(self, modes=AVAILABLE_MODALITIES): "\n Read framebuffer of rendering.\n\n :param modes: it should be a tuple consisting of a subset of ('rgb', 'normal', 'seg', '3d', 'scene_flow', 'optical_flow').\n :return: a list of numpy arrays corresponding to `modes`\n " results = [] if isinstance(modes, str): modes = [modes] for mode in modes: if (mode not in AVAILABLE_MODALITIES): raise Exception('unknown rendering mode: {}'.format(mode)) frame = self.r.readbuffer_meshrenderer(mode, self.width, self.height, self.fbo) frame = frame.reshape(self.height, self.width, 4)[(::(- 1), :)] results.append(frame) return results
def readbuffer(self, modes=AVAILABLE_MODALITIES): "\n Read framebuffer of rendering.\n\n :param modes: it should be a tuple consisting of a subset of ('rgb', 'normal', 'seg', '3d', 'scene_flow', 'optical_flow').\n :return: a list of numpy arrays corresponding to `modes`\n " results = [] if isinstance(modes, str): modes = [modes] for mode in modes: if (mode not in AVAILABLE_MODALITIES): raise Exception('unknown rendering mode: {}'.format(mode)) frame = self.r.readbuffer_meshrenderer(mode, self.width, self.height, self.fbo) frame = frame.reshape(self.height, self.width, 4)[(::(- 1), :)] results.append(frame) return results<|docstring|>Read framebuffer of rendering. :param modes: it should be a tuple consisting of a subset of ('rgb', 'normal', 'seg', '3d', 'scene_flow', 'optical_flow'). :return: a list of numpy arrays corresponding to `modes`<|endoftext|>
d6f17dab613a215645a48eaf44c4d2fbc092bbfba21e03e5a3f2770cf34c2b8e
def render(self, modes=AVAILABLE_MODALITIES, hidden=(), return_buffer=True, render_shadow_pass=True, render_text_pass=True): "\n A function to render all the instances in the renderer and read the output from framebuffer.\n\n :param modes: a tuple consisting of a subset of ('rgb', 'normal', 'seg', '3d', 'scene_flow', 'optical_flow').\n :param hidden: hidden instances to skip. When rendering from a robot's perspective, it's own body can be hidden\n :param return_buffer: whether to return the frame buffers as numpy arrays\n :param render_shadow_pass: whether to render shadow\n :return: a list of float32 numpy arrays of shape (H, W, 4) corresponding to `modes`, where last channel is alpha\n " if (self.optimized and (not self.optimization_process_executed)): self.optimize_vertex_and_texture() if self.optimized: self.update_optimized_texture() if (('seg' in modes) and self.rendering_settings.msaa): logging.warning('Rendering segmentation masks with MSAA on may generate interpolation artifacts. It is recommended to turn MSAA off when rendering segmentation.') render_shadow_pass = (render_shadow_pass and ('rgb' in modes)) need_flow_info = (('optical_flow' in modes) or ('scene_flow' in modes)) self.update_dynamic_positions(need_flow_info=need_flow_info) if (self.enable_shadow and render_shadow_pass): if self.msaa: self.r.render_meshrenderer_pre(1, self.fbo_ms, self.fbo) else: self.r.render_meshrenderer_pre(0, 0, self.fbo) if self.optimized: shadow_hidden_instances = [i for i in self.instances if ((not i.shadow_caster) and (not i.hidden))] for instance in shadow_hidden_instances: instance.hidden = True self.update_hidden_highlight_state(shadow_hidden_instances) self.r.updateDynamicData(self.shaderProgram, self.pose_trans_array, self.pose_rot_array, self.last_trans_array, self.last_rot_array, self.V, self.last_V, self.P, self.lightV, self.lightP, ShadowPass.HAS_SHADOW_RENDER_SHADOW, self.camera) self.r.renderOptimized(self.optimized_VAO) for instance in shadow_hidden_instances: instance.hidden = False self.update_hidden_highlight_state(shadow_hidden_instances) else: for instance in self.instances: if (((instance not in hidden) and (not instance.hidden)) and instance.shadow_caster): instance.render(shadow_pass=ShadowPass.HAS_SHADOW_RENDER_SHADOW) self.r.render_meshrenderer_post() if self.msaa: self.r.blit_buffer(self.width, self.height, self.fbo_ms, self.fbo) self.r.readbuffer_meshrenderer_shadow_depth(self.width, self.height, self.fbo, self.depth_tex_shadow) if self.optimized: all_instances = [i for i in self.instances if (i.or_buffer_indices is not None)] self.update_hidden_highlight_state(all_instances) if self.msaa: self.r.render_meshrenderer_pre(1, self.fbo_ms, self.fbo) else: self.r.render_meshrenderer_pre(0, 0, self.fbo) if self.rendering_settings.enable_pbr: self.r.renderSkyBox(self.skyboxShaderProgram, self.V, self.P) if self.optimized: if self.enable_shadow: self.r.updateDynamicData(self.shaderProgram, self.pose_trans_array, self.pose_rot_array, self.last_trans_array, self.last_rot_array, self.V, self.last_V, self.P, self.lightV, self.lightP, ShadowPass.HAS_SHADOW_RENDER_SCENE, self.camera) else: self.r.updateDynamicData(self.shaderProgram, self.pose_trans_array, self.pose_rot_array, self.last_trans_array, self.last_rot_array, self.V, self.last_V, self.P, self.lightV, self.lightP, ShadowPass.NO_SHADOW, self.camera) self.r.renderOptimized(self.optimized_VAO) else: for instance in self.instances: if ((instance not in hidden) and (not instance.hidden)): if self.enable_shadow: instance.render(shadow_pass=ShadowPass.HAS_SHADOW_RENDER_SCENE) else: instance.render(shadow_pass=ShadowPass.NO_SHADOW) if render_text_pass: self.r.preRenderTextFramebufferSetup(self.text_manager.FBO) for text in self.texts: text.render() self.r.render_meshrenderer_post() if self.msaa: self.r.blit_buffer(self.width, self.height, self.fbo_ms, self.fbo) if return_buffer: return self.readbuffer(modes)
A function to render all the instances in the renderer and read the output from framebuffer. :param modes: a tuple consisting of a subset of ('rgb', 'normal', 'seg', '3d', 'scene_flow', 'optical_flow'). :param hidden: hidden instances to skip. When rendering from a robot's perspective, it's own body can be hidden :param return_buffer: whether to return the frame buffers as numpy arrays :param render_shadow_pass: whether to render shadow :return: a list of float32 numpy arrays of shape (H, W, 4) corresponding to `modes`, where last channel is alpha
igibson/render/mesh_renderer/mesh_renderer_cpu.py
render
suresh-guttikonda/iGibson
0
python
def render(self, modes=AVAILABLE_MODALITIES, hidden=(), return_buffer=True, render_shadow_pass=True, render_text_pass=True): "\n A function to render all the instances in the renderer and read the output from framebuffer.\n\n :param modes: a tuple consisting of a subset of ('rgb', 'normal', 'seg', '3d', 'scene_flow', 'optical_flow').\n :param hidden: hidden instances to skip. When rendering from a robot's perspective, it's own body can be hidden\n :param return_buffer: whether to return the frame buffers as numpy arrays\n :param render_shadow_pass: whether to render shadow\n :return: a list of float32 numpy arrays of shape (H, W, 4) corresponding to `modes`, where last channel is alpha\n " if (self.optimized and (not self.optimization_process_executed)): self.optimize_vertex_and_texture() if self.optimized: self.update_optimized_texture() if (('seg' in modes) and self.rendering_settings.msaa): logging.warning('Rendering segmentation masks with MSAA on may generate interpolation artifacts. It is recommended to turn MSAA off when rendering segmentation.') render_shadow_pass = (render_shadow_pass and ('rgb' in modes)) need_flow_info = (('optical_flow' in modes) or ('scene_flow' in modes)) self.update_dynamic_positions(need_flow_info=need_flow_info) if (self.enable_shadow and render_shadow_pass): if self.msaa: self.r.render_meshrenderer_pre(1, self.fbo_ms, self.fbo) else: self.r.render_meshrenderer_pre(0, 0, self.fbo) if self.optimized: shadow_hidden_instances = [i for i in self.instances if ((not i.shadow_caster) and (not i.hidden))] for instance in shadow_hidden_instances: instance.hidden = True self.update_hidden_highlight_state(shadow_hidden_instances) self.r.updateDynamicData(self.shaderProgram, self.pose_trans_array, self.pose_rot_array, self.last_trans_array, self.last_rot_array, self.V, self.last_V, self.P, self.lightV, self.lightP, ShadowPass.HAS_SHADOW_RENDER_SHADOW, self.camera) self.r.renderOptimized(self.optimized_VAO) for instance in shadow_hidden_instances: instance.hidden = False self.update_hidden_highlight_state(shadow_hidden_instances) else: for instance in self.instances: if (((instance not in hidden) and (not instance.hidden)) and instance.shadow_caster): instance.render(shadow_pass=ShadowPass.HAS_SHADOW_RENDER_SHADOW) self.r.render_meshrenderer_post() if self.msaa: self.r.blit_buffer(self.width, self.height, self.fbo_ms, self.fbo) self.r.readbuffer_meshrenderer_shadow_depth(self.width, self.height, self.fbo, self.depth_tex_shadow) if self.optimized: all_instances = [i for i in self.instances if (i.or_buffer_indices is not None)] self.update_hidden_highlight_state(all_instances) if self.msaa: self.r.render_meshrenderer_pre(1, self.fbo_ms, self.fbo) else: self.r.render_meshrenderer_pre(0, 0, self.fbo) if self.rendering_settings.enable_pbr: self.r.renderSkyBox(self.skyboxShaderProgram, self.V, self.P) if self.optimized: if self.enable_shadow: self.r.updateDynamicData(self.shaderProgram, self.pose_trans_array, self.pose_rot_array, self.last_trans_array, self.last_rot_array, self.V, self.last_V, self.P, self.lightV, self.lightP, ShadowPass.HAS_SHADOW_RENDER_SCENE, self.camera) else: self.r.updateDynamicData(self.shaderProgram, self.pose_trans_array, self.pose_rot_array, self.last_trans_array, self.last_rot_array, self.V, self.last_V, self.P, self.lightV, self.lightP, ShadowPass.NO_SHADOW, self.camera) self.r.renderOptimized(self.optimized_VAO) else: for instance in self.instances: if ((instance not in hidden) and (not instance.hidden)): if self.enable_shadow: instance.render(shadow_pass=ShadowPass.HAS_SHADOW_RENDER_SCENE) else: instance.render(shadow_pass=ShadowPass.NO_SHADOW) if render_text_pass: self.r.preRenderTextFramebufferSetup(self.text_manager.FBO) for text in self.texts: text.render() self.r.render_meshrenderer_post() if self.msaa: self.r.blit_buffer(self.width, self.height, self.fbo_ms, self.fbo) if return_buffer: return self.readbuffer(modes)
def render(self, modes=AVAILABLE_MODALITIES, hidden=(), return_buffer=True, render_shadow_pass=True, render_text_pass=True): "\n A function to render all the instances in the renderer and read the output from framebuffer.\n\n :param modes: a tuple consisting of a subset of ('rgb', 'normal', 'seg', '3d', 'scene_flow', 'optical_flow').\n :param hidden: hidden instances to skip. When rendering from a robot's perspective, it's own body can be hidden\n :param return_buffer: whether to return the frame buffers as numpy arrays\n :param render_shadow_pass: whether to render shadow\n :return: a list of float32 numpy arrays of shape (H, W, 4) corresponding to `modes`, where last channel is alpha\n " if (self.optimized and (not self.optimization_process_executed)): self.optimize_vertex_and_texture() if self.optimized: self.update_optimized_texture() if (('seg' in modes) and self.rendering_settings.msaa): logging.warning('Rendering segmentation masks with MSAA on may generate interpolation artifacts. It is recommended to turn MSAA off when rendering segmentation.') render_shadow_pass = (render_shadow_pass and ('rgb' in modes)) need_flow_info = (('optical_flow' in modes) or ('scene_flow' in modes)) self.update_dynamic_positions(need_flow_info=need_flow_info) if (self.enable_shadow and render_shadow_pass): if self.msaa: self.r.render_meshrenderer_pre(1, self.fbo_ms, self.fbo) else: self.r.render_meshrenderer_pre(0, 0, self.fbo) if self.optimized: shadow_hidden_instances = [i for i in self.instances if ((not i.shadow_caster) and (not i.hidden))] for instance in shadow_hidden_instances: instance.hidden = True self.update_hidden_highlight_state(shadow_hidden_instances) self.r.updateDynamicData(self.shaderProgram, self.pose_trans_array, self.pose_rot_array, self.last_trans_array, self.last_rot_array, self.V, self.last_V, self.P, self.lightV, self.lightP, ShadowPass.HAS_SHADOW_RENDER_SHADOW, self.camera) self.r.renderOptimized(self.optimized_VAO) for instance in shadow_hidden_instances: instance.hidden = False self.update_hidden_highlight_state(shadow_hidden_instances) else: for instance in self.instances: if (((instance not in hidden) and (not instance.hidden)) and instance.shadow_caster): instance.render(shadow_pass=ShadowPass.HAS_SHADOW_RENDER_SHADOW) self.r.render_meshrenderer_post() if self.msaa: self.r.blit_buffer(self.width, self.height, self.fbo_ms, self.fbo) self.r.readbuffer_meshrenderer_shadow_depth(self.width, self.height, self.fbo, self.depth_tex_shadow) if self.optimized: all_instances = [i for i in self.instances if (i.or_buffer_indices is not None)] self.update_hidden_highlight_state(all_instances) if self.msaa: self.r.render_meshrenderer_pre(1, self.fbo_ms, self.fbo) else: self.r.render_meshrenderer_pre(0, 0, self.fbo) if self.rendering_settings.enable_pbr: self.r.renderSkyBox(self.skyboxShaderProgram, self.V, self.P) if self.optimized: if self.enable_shadow: self.r.updateDynamicData(self.shaderProgram, self.pose_trans_array, self.pose_rot_array, self.last_trans_array, self.last_rot_array, self.V, self.last_V, self.P, self.lightV, self.lightP, ShadowPass.HAS_SHADOW_RENDER_SCENE, self.camera) else: self.r.updateDynamicData(self.shaderProgram, self.pose_trans_array, self.pose_rot_array, self.last_trans_array, self.last_rot_array, self.V, self.last_V, self.P, self.lightV, self.lightP, ShadowPass.NO_SHADOW, self.camera) self.r.renderOptimized(self.optimized_VAO) else: for instance in self.instances: if ((instance not in hidden) and (not instance.hidden)): if self.enable_shadow: instance.render(shadow_pass=ShadowPass.HAS_SHADOW_RENDER_SCENE) else: instance.render(shadow_pass=ShadowPass.NO_SHADOW) if render_text_pass: self.r.preRenderTextFramebufferSetup(self.text_manager.FBO) for text in self.texts: text.render() self.r.render_meshrenderer_post() if self.msaa: self.r.blit_buffer(self.width, self.height, self.fbo_ms, self.fbo) if return_buffer: return self.readbuffer(modes)<|docstring|>A function to render all the instances in the renderer and read the output from framebuffer. :param modes: a tuple consisting of a subset of ('rgb', 'normal', 'seg', '3d', 'scene_flow', 'optical_flow'). :param hidden: hidden instances to skip. When rendering from a robot's perspective, it's own body can be hidden :param return_buffer: whether to return the frame buffers as numpy arrays :param render_shadow_pass: whether to render shadow :return: a list of float32 numpy arrays of shape (H, W, 4) corresponding to `modes`, where last channel is alpha<|endoftext|>
731075939dc88febd29a484c260a53a96440045609df02ebddc83e3baa900ca2
def render_companion_window(self): '\n Render companion window.\n The viewer is responsible for calling this to update the window,\n if cv2 is not being used for window display\n ' self.r.render_companion_window_from_buffer(self.fbo)
Render companion window. The viewer is responsible for calling this to update the window, if cv2 is not being used for window display
igibson/render/mesh_renderer/mesh_renderer_cpu.py
render_companion_window
suresh-guttikonda/iGibson
0
python
def render_companion_window(self): '\n Render companion window.\n The viewer is responsible for calling this to update the window,\n if cv2 is not being used for window display\n ' self.r.render_companion_window_from_buffer(self.fbo)
def render_companion_window(self): '\n Render companion window.\n The viewer is responsible for calling this to update the window,\n if cv2 is not being used for window display\n ' self.r.render_companion_window_from_buffer(self.fbo)<|docstring|>Render companion window. The viewer is responsible for calling this to update the window, if cv2 is not being used for window display<|endoftext|>
7a38919deda40805c48d13e13d347493e1edcb22a0cad27f096bb97b71796366
def get_visual_objects(self): '\n Return visual objects\n ' return self.visual_objects
Return visual objects
igibson/render/mesh_renderer/mesh_renderer_cpu.py
get_visual_objects
suresh-guttikonda/iGibson
0
python
def get_visual_objects(self): '\n \n ' return self.visual_objects
def get_visual_objects(self): '\n \n ' return self.visual_objects<|docstring|>Return visual objects<|endoftext|>
c37bfd7adb1f6138bbd6f97834436d74f64261a65898f844dd59a179c7ba7237
def get_instances(self): '\n Return instances\n ' return self.instances
Return instances
igibson/render/mesh_renderer/mesh_renderer_cpu.py
get_instances
suresh-guttikonda/iGibson
0
python
def get_instances(self): '\n \n ' return self.instances
def get_instances(self): '\n \n ' return self.instances<|docstring|>Return instances<|endoftext|>
96b5b55d111982019210bed31c89a67ad293a4bc898b2945857f415ca13210a1
def dump(self): '\n Dump instance vertex and face information\n ' instances_vertices = [] instances_faces = [] len_v = 0 for instance in self.instances: (vertex_info, face_info) = instance.dump() for (v, f) in zip(vertex_info, face_info): instances_vertices.append(v) instances_faces.append((f + len_v)) len_v += len(v) instances_vertices = np.concatenate(instances_vertices, axis=0) instances_faces = np.concatenate(instances_faces, axis=0) return (instances_vertices, instances_faces)
Dump instance vertex and face information
igibson/render/mesh_renderer/mesh_renderer_cpu.py
dump
suresh-guttikonda/iGibson
0
python
def dump(self): '\n \n ' instances_vertices = [] instances_faces = [] len_v = 0 for instance in self.instances: (vertex_info, face_info) = instance.dump() for (v, f) in zip(vertex_info, face_info): instances_vertices.append(v) instances_faces.append((f + len_v)) len_v += len(v) instances_vertices = np.concatenate(instances_vertices, axis=0) instances_faces = np.concatenate(instances_faces, axis=0) return (instances_vertices, instances_faces)
def dump(self): '\n \n ' instances_vertices = [] instances_faces = [] len_v = 0 for instance in self.instances: (vertex_info, face_info) = instance.dump() for (v, f) in zip(vertex_info, face_info): instances_vertices.append(v) instances_faces.append((f + len_v)) len_v += len(v) instances_vertices = np.concatenate(instances_vertices, axis=0) instances_faces = np.concatenate(instances_faces, axis=0) return (instances_vertices, instances_faces)<|docstring|>Dump instance vertex and face information<|endoftext|>
113d49263cd6f8fbce7d8e64d3b3eb2473fb59dfb082392d3e428208a529db9f
def set_light_pos(self, light): '\n Set light position\n\n :param light: light position\n ' self.lightpos = light
Set light position :param light: light position
igibson/render/mesh_renderer/mesh_renderer_cpu.py
set_light_pos
suresh-guttikonda/iGibson
0
python
def set_light_pos(self, light): '\n Set light position\n\n :param light: light position\n ' self.lightpos = light
def set_light_pos(self, light): '\n Set light position\n\n :param light: light position\n ' self.lightpos = light<|docstring|>Set light position :param light: light position<|endoftext|>
856b46e6cd08d0a57c89d239afe7cb997fca80fe000e95031d92753bbdf83c2c
def get_num_objects(self): '\n Return the number of objects\n ' return len(self.objects)
Return the number of objects
igibson/render/mesh_renderer/mesh_renderer_cpu.py
get_num_objects
suresh-guttikonda/iGibson
0
python
def get_num_objects(self): '\n \n ' return len(self.objects)
def get_num_objects(self): '\n \n ' return len(self.objects)<|docstring|>Return the number of objects<|endoftext|>
190c79f3bbb1562e44ce7aa67e853ed2fc0ca79bb98d005d1ad52319bd76ba25
def set_pose(self, pose, idx): '\n Set pose for a specific instance\n\n :param pose: instance pose\n :param idx: instance id\n ' self.instances[idx].last_rot = np.copy(self.instances[idx].pose_rot) self.instances[idx].last_trans = np.copy(self.instances[idx].pose_trans) self.instances[idx].pose_rot = np.ascontiguousarray(quat2rotmat(pose[3:])) self.instances[idx].pose_trans = np.ascontiguousarray(xyz2mat(pose[:3]))
Set pose for a specific instance :param pose: instance pose :param idx: instance id
igibson/render/mesh_renderer/mesh_renderer_cpu.py
set_pose
suresh-guttikonda/iGibson
0
python
def set_pose(self, pose, idx): '\n Set pose for a specific instance\n\n :param pose: instance pose\n :param idx: instance id\n ' self.instances[idx].last_rot = np.copy(self.instances[idx].pose_rot) self.instances[idx].last_trans = np.copy(self.instances[idx].pose_trans) self.instances[idx].pose_rot = np.ascontiguousarray(quat2rotmat(pose[3:])) self.instances[idx].pose_trans = np.ascontiguousarray(xyz2mat(pose[:3]))
def set_pose(self, pose, idx): '\n Set pose for a specific instance\n\n :param pose: instance pose\n :param idx: instance id\n ' self.instances[idx].last_rot = np.copy(self.instances[idx].pose_rot) self.instances[idx].last_trans = np.copy(self.instances[idx].pose_trans) self.instances[idx].pose_rot = np.ascontiguousarray(quat2rotmat(pose[3:])) self.instances[idx].pose_trans = np.ascontiguousarray(xyz2mat(pose[:3]))<|docstring|>Set pose for a specific instance :param pose: instance pose :param idx: instance id<|endoftext|>
b197e24950b797fbfe49fa45229caac52f4370a5e1cc688362221cbcacddb694
def release(self): '\n Clean everything, and release the openGL context.\n ' logging.debug('Releasing. {}'.format(self.glstring)) self.clean() self.r.release()
Clean everything, and release the openGL context.
igibson/render/mesh_renderer/mesh_renderer_cpu.py
release
suresh-guttikonda/iGibson
0
python
def release(self): '\n \n ' logging.debug('Releasing. {}'.format(self.glstring)) self.clean() self.r.release()
def release(self): '\n \n ' logging.debug('Releasing. {}'.format(self.glstring)) self.clean() self.r.release()<|docstring|>Clean everything, and release the openGL context.<|endoftext|>
5f2f1c145bf4a6deda2f6e99693b73b7c1a7190edb1dc743dc72c81fb7095dc1
def clean(self): '\n Clean all the framebuffers, objects and instances\n ' clean_list = ([self.color_tex_rgb, self.color_tex_normal, self.color_tex_semantics, self.color_tex_3d, self.depth_tex, self.color_tex_scene_flow, self.color_tex_optical_flow, self.color_tex_ins_seg, self.text_manager.render_tex] + [i for i in self.text_manager.tex_ids]) fbo_list = [self.fbo, self.text_manager.FBO] if self.msaa: clean_list += [self.color_tex_rgb_ms, self.color_tex_normal_ms, self.color_tex_semantics_ms, self.color_tex_3d_ms, self.depth_tex_ms, self.color_tex_scene_flow_ms, self.color_tex_optical_flow_ms, self.color_tex_ins_seg_ms] fbo_list += [self.fbo_ms] text_vaos = [t.VAO for t in self.texts] text_vbos = [t.VBO for t in self.texts] if (self.optimized and self.optimization_process_executed): self.r.clean_meshrenderer_optimized(clean_list, [self.tex_id_1, self.tex_id_2], fbo_list, ([self.optimized_VAO] + text_vaos), ([self.optimized_VBO] + text_vbos), [self.optimized_EBO]) else: self.r.clean_meshrenderer(clean_list, self.textures, fbo_list, (self.VAOs + text_vaos), (self.VBOs + text_vbos)) self.text_manager.tex_ids = [] self.color_tex_rgb = None self.color_tex_normal = None self.color_tex_semantics = None self.color_tex_3d = None self.color_tex_scene_flow = None self.color_tex_optical_flow = None self.color_tex_ins_seg = None self.depth_tex = None self.fbo = None self.VAOs = [] self.VBOs = [] self.textures = [] self.objects = [] self.faces = [] self.visual_objects = [] self.instances = [] self.vertex_data = [] self.shapes = [] save_path = os.path.join(igibson.ig_dataset_path, 'tmp') if os.path.isdir(save_path): shutil.rmtree(save_path)
Clean all the framebuffers, objects and instances
igibson/render/mesh_renderer/mesh_renderer_cpu.py
clean
suresh-guttikonda/iGibson
0
python
def clean(self): '\n \n ' clean_list = ([self.color_tex_rgb, self.color_tex_normal, self.color_tex_semantics, self.color_tex_3d, self.depth_tex, self.color_tex_scene_flow, self.color_tex_optical_flow, self.color_tex_ins_seg, self.text_manager.render_tex] + [i for i in self.text_manager.tex_ids]) fbo_list = [self.fbo, self.text_manager.FBO] if self.msaa: clean_list += [self.color_tex_rgb_ms, self.color_tex_normal_ms, self.color_tex_semantics_ms, self.color_tex_3d_ms, self.depth_tex_ms, self.color_tex_scene_flow_ms, self.color_tex_optical_flow_ms, self.color_tex_ins_seg_ms] fbo_list += [self.fbo_ms] text_vaos = [t.VAO for t in self.texts] text_vbos = [t.VBO for t in self.texts] if (self.optimized and self.optimization_process_executed): self.r.clean_meshrenderer_optimized(clean_list, [self.tex_id_1, self.tex_id_2], fbo_list, ([self.optimized_VAO] + text_vaos), ([self.optimized_VBO] + text_vbos), [self.optimized_EBO]) else: self.r.clean_meshrenderer(clean_list, self.textures, fbo_list, (self.VAOs + text_vaos), (self.VBOs + text_vbos)) self.text_manager.tex_ids = [] self.color_tex_rgb = None self.color_tex_normal = None self.color_tex_semantics = None self.color_tex_3d = None self.color_tex_scene_flow = None self.color_tex_optical_flow = None self.color_tex_ins_seg = None self.depth_tex = None self.fbo = None self.VAOs = [] self.VBOs = [] self.textures = [] self.objects = [] self.faces = [] self.visual_objects = [] self.instances = [] self.vertex_data = [] self.shapes = [] save_path = os.path.join(igibson.ig_dataset_path, 'tmp') if os.path.isdir(save_path): shutil.rmtree(save_path)
def clean(self): '\n \n ' clean_list = ([self.color_tex_rgb, self.color_tex_normal, self.color_tex_semantics, self.color_tex_3d, self.depth_tex, self.color_tex_scene_flow, self.color_tex_optical_flow, self.color_tex_ins_seg, self.text_manager.render_tex] + [i for i in self.text_manager.tex_ids]) fbo_list = [self.fbo, self.text_manager.FBO] if self.msaa: clean_list += [self.color_tex_rgb_ms, self.color_tex_normal_ms, self.color_tex_semantics_ms, self.color_tex_3d_ms, self.depth_tex_ms, self.color_tex_scene_flow_ms, self.color_tex_optical_flow_ms, self.color_tex_ins_seg_ms] fbo_list += [self.fbo_ms] text_vaos = [t.VAO for t in self.texts] text_vbos = [t.VBO for t in self.texts] if (self.optimized and self.optimization_process_executed): self.r.clean_meshrenderer_optimized(clean_list, [self.tex_id_1, self.tex_id_2], fbo_list, ([self.optimized_VAO] + text_vaos), ([self.optimized_VBO] + text_vbos), [self.optimized_EBO]) else: self.r.clean_meshrenderer(clean_list, self.textures, fbo_list, (self.VAOs + text_vaos), (self.VBOs + text_vbos)) self.text_manager.tex_ids = [] self.color_tex_rgb = None self.color_tex_normal = None self.color_tex_semantics = None self.color_tex_3d = None self.color_tex_scene_flow = None self.color_tex_optical_flow = None self.color_tex_ins_seg = None self.depth_tex = None self.fbo = None self.VAOs = [] self.VBOs = [] self.textures = [] self.objects = [] self.faces = [] self.visual_objects = [] self.instances = [] self.vertex_data = [] self.shapes = [] save_path = os.path.join(igibson.ig_dataset_path, 'tmp') if os.path.isdir(save_path): shutil.rmtree(save_path)<|docstring|>Clean all the framebuffers, objects and instances<|endoftext|>
6b0c501f1b415a84fb8585e3cccbfc79f2b1ef4f0a505c93c6b966e41cdfa4ed
def transform_pose(self, pose): '\n Transform pose from world frame to camera frame\n\n :param pose: pose in world frame\n :return: pose in camera frame\n ' pose_rot = quat2rotmat(pose[3:]) pose_trans = xyz2mat(pose[:3]) pose_cam = self.V.dot(pose_trans.T).dot(pose_rot).T return np.concatenate([mat2xyz(pose_cam), safemat2quat(pose_cam[(:3, :3)].T)])
Transform pose from world frame to camera frame :param pose: pose in world frame :return: pose in camera frame
igibson/render/mesh_renderer/mesh_renderer_cpu.py
transform_pose
suresh-guttikonda/iGibson
0
python
def transform_pose(self, pose): '\n Transform pose from world frame to camera frame\n\n :param pose: pose in world frame\n :return: pose in camera frame\n ' pose_rot = quat2rotmat(pose[3:]) pose_trans = xyz2mat(pose[:3]) pose_cam = self.V.dot(pose_trans.T).dot(pose_rot).T return np.concatenate([mat2xyz(pose_cam), safemat2quat(pose_cam[(:3, :3)].T)])
def transform_pose(self, pose): '\n Transform pose from world frame to camera frame\n\n :param pose: pose in world frame\n :return: pose in camera frame\n ' pose_rot = quat2rotmat(pose[3:]) pose_trans = xyz2mat(pose[:3]) pose_cam = self.V.dot(pose_trans.T).dot(pose_rot).T return np.concatenate([mat2xyz(pose_cam), safemat2quat(pose_cam[(:3, :3)].T)])<|docstring|>Transform pose from world frame to camera frame :param pose: pose in world frame :return: pose in camera frame<|endoftext|>
9b81a182c017edaaf593265a9f34a9ca4119438f8d9cc4436c555850126692c9
def render_robot_cameras(self, modes='rgb'): '\n Render robot camera images\n\n :return: a list of frames (number of modalities x number of robots)\n ' frames = [] for instance in self.instances: if isinstance(instance, Robot): camera_pos = instance.robot.eyes.get_position() orn = instance.robot.eyes.get_orientation() mat = quat2rotmat(xyzw2wxyz(orn))[(:3, :3)] view_direction = mat.dot(np.array([1, 0, 0])) self.set_camera(camera_pos, (camera_pos + view_direction), [0, 0, 1], cache=True) hidden_instances = [] if self.rendering_settings.hide_robot: hidden_instances.append(instance) for item in self.render(modes=modes, hidden=hidden_instances): frames.append(item) behavior_robots = (robot for robot in self.simulator.robots if isinstance(robot, BehaviorRobot)) for robot in behavior_robots: frames.extend(robot.render_camera_image(modes=modes)) return frames
Render robot camera images :return: a list of frames (number of modalities x number of robots)
igibson/render/mesh_renderer/mesh_renderer_cpu.py
render_robot_cameras
suresh-guttikonda/iGibson
0
python
def render_robot_cameras(self, modes='rgb'): '\n Render robot camera images\n\n :return: a list of frames (number of modalities x number of robots)\n ' frames = [] for instance in self.instances: if isinstance(instance, Robot): camera_pos = instance.robot.eyes.get_position() orn = instance.robot.eyes.get_orientation() mat = quat2rotmat(xyzw2wxyz(orn))[(:3, :3)] view_direction = mat.dot(np.array([1, 0, 0])) self.set_camera(camera_pos, (camera_pos + view_direction), [0, 0, 1], cache=True) hidden_instances = [] if self.rendering_settings.hide_robot: hidden_instances.append(instance) for item in self.render(modes=modes, hidden=hidden_instances): frames.append(item) behavior_robots = (robot for robot in self.simulator.robots if isinstance(robot, BehaviorRobot)) for robot in behavior_robots: frames.extend(robot.render_camera_image(modes=modes)) return frames
def render_robot_cameras(self, modes='rgb'): '\n Render robot camera images\n\n :return: a list of frames (number of modalities x number of robots)\n ' frames = [] for instance in self.instances: if isinstance(instance, Robot): camera_pos = instance.robot.eyes.get_position() orn = instance.robot.eyes.get_orientation() mat = quat2rotmat(xyzw2wxyz(orn))[(:3, :3)] view_direction = mat.dot(np.array([1, 0, 0])) self.set_camera(camera_pos, (camera_pos + view_direction), [0, 0, 1], cache=True) hidden_instances = [] if self.rendering_settings.hide_robot: hidden_instances.append(instance) for item in self.render(modes=modes, hidden=hidden_instances): frames.append(item) behavior_robots = (robot for robot in self.simulator.robots if isinstance(robot, BehaviorRobot)) for robot in behavior_robots: frames.extend(robot.render_camera_image(modes=modes)) return frames<|docstring|>Render robot camera images :return: a list of frames (number of modalities x number of robots)<|endoftext|>
9ead40ffe1c2b48df1097850f1e2ffa209a285944caaa1eb297f257c9305993c
def optimize_vertex_and_texture(self): '\n Optimize vertex and texture for optimized renderer\n ' for tex_file in self.texture_files: print('Texture: ', tex_file) cutoff = (5000 * 5000) shouldShrinkSmallTextures = True smallTexSize = 512 texture_files = sorted(self.texture_files.items(), key=(lambda x: x[1])) texture_files = [item[0] for item in texture_files] (self.tex_id_1, self.tex_id_2, self.tex_id_layer_mapping) = self.r.generateArrayTextures(texture_files, cutoff, shouldShrinkSmallTextures, smallTexSize, igibson.key_path) print(self.tex_id_layer_mapping) print(len(self.texture_files), self.texture_files) self.textures.append(self.tex_id_1) self.textures.append(self.tex_id_2) offset_faces = [] curr_index_offset = 0 for i in range(len(self.vertex_data)): face_idxs = self.faces[i] offset_face_idxs = (face_idxs + curr_index_offset) offset_faces.append(offset_face_idxs) curr_index_offset += len(self.vertex_data[i]) duplicate_vao_ids = [] class_id_array = [] instance_id_array = [] pbr_data_array = [] hidden_array = [] for instance in self.instances: if isinstance(instance, Instance): ids = instance.object.VAO_ids or_buffer_idx_start = len(duplicate_vao_ids) duplicate_vao_ids.extend(ids) or_buffer_idx_end = len(duplicate_vao_ids) instance.or_buffer_indices = list(np.arange(or_buffer_idx_start, or_buffer_idx_end)) class_id_array.extend(([(float(instance.class_id) / MAX_CLASS_COUNT)] * len(ids))) instance_id_array.extend(([(float(instance.id) / MAX_INSTANCE_COUNT)] * len(ids))) pbr_data_array.extend(([[float(instance.use_pbr), 1.0, 1.0, 1.0]] * len(ids))) hidden_array.extend(([[float(instance.hidden), 1.0, 1.0, 1.0]] * len(ids))) elif (isinstance(instance, InstanceGroup) or isinstance(instance, Robot)): id_sum = 0 temp_or_buffer_indices = [] for vo in instance.objects: ids = vo.VAO_ids or_buffer_idx_start = len(duplicate_vao_ids) duplicate_vao_ids.extend(ids) or_buffer_idx_end = len(duplicate_vao_ids) temp_or_buffer_indices.extend(list(np.arange(or_buffer_idx_start, or_buffer_idx_end))) id_sum += len(ids) instance.or_buffer_indices = list(temp_or_buffer_indices) class_id_array.extend(([(float(instance.class_id) / MAX_CLASS_COUNT)] * id_sum)) instance_id_array.extend(([(float(instance.id) / MAX_INSTANCE_COUNT)] * id_sum)) pbr_data_array.extend(([[float(instance.use_pbr), 1.0, 1.0, 1.0]] * id_sum)) hidden_array.extend(([[float(instance.hidden), 1.0, 1.0, 1.0]] * id_sum)) self.or_buffer_shape_num = len(duplicate_vao_ids) self.trans_data = np.zeros((self.or_buffer_shape_num, 4, 4)) self.rot_data = np.zeros((self.or_buffer_shape_num, 4, 4)) index_ptr_offsets = [] index_counts = [] indices = [] diffuse_color_array = [] tex_num_array = [] tex_layer_array = [] roughness_tex_num_array = [] roughness_tex_layer_array = [] metallic_tex_num_array = [] metallic_tex_layer_array = [] normal_tex_num_array = [] normal_tex_layer_array = [] transform_param_array = [] index_offset = 0 for id in duplicate_vao_ids: index_ptr_offsets.append(index_offset) id_idxs = list(offset_faces[id].flatten()) indices.extend(id_idxs) index_count = len(id_idxs) index_counts.append(index_count) index_offset += index_count id_material = self.materials_mapping[self.mesh_materials[id]] texture_id = id_material.texture_id if ((texture_id == (- 1)) or (texture_id is None)): tex_num_array.append((- 1)) tex_layer_array.append((- 1)) else: (tex_num, tex_layer) = self.tex_id_layer_mapping[texture_id] tex_num_array.append(tex_num) tex_layer_array.append(tex_layer) roughness_texture_id = id_material.roughness_texture_id if ((roughness_texture_id == (- 1)) or (roughness_texture_id is None)): roughness_tex_num_array.append((- 1)) roughness_tex_layer_array.append((- 1)) else: (tex_num, tex_layer) = self.tex_id_layer_mapping[roughness_texture_id] roughness_tex_num_array.append(tex_num) roughness_tex_layer_array.append(tex_layer) metallic_texture_id = id_material.metallic_texture_id if ((metallic_texture_id == (- 1)) or (metallic_texture_id is None)): metallic_tex_num_array.append((- 1)) metallic_tex_layer_array.append((- 1)) else: (tex_num, tex_layer) = self.tex_id_layer_mapping[metallic_texture_id] metallic_tex_num_array.append(tex_num) metallic_tex_layer_array.append(tex_layer) normal_texture_id = id_material.normal_texture_id if ((normal_texture_id == (- 1)) or (normal_texture_id is None)): normal_tex_num_array.append((- 1)) normal_tex_layer_array.append((- 1)) else: (tex_num, tex_layer) = self.tex_id_layer_mapping[normal_texture_id] normal_tex_num_array.append(tex_num) normal_tex_layer_array.append(tex_layer) transform_param = id_material.transform_param transform_param_array.append([transform_param[0], transform_param[1], transform_param[2], 1.0]) kd = np.asarray(id_material.kd, dtype=np.float32) kd_vec_4 = [kd[0], kd[1], kd[2], 1.0] diffuse_color_array.append(np.ascontiguousarray(kd_vec_4, dtype=np.float32)) index_ptr_offsets = np.ascontiguousarray(index_ptr_offsets, dtype=np.int32) index_counts = np.ascontiguousarray(index_counts, dtype=np.int32) indices = np.ascontiguousarray(indices, dtype=np.int32) frag_shader_data = [] pbr_data = [] hidden_data = [] uv_data = [] frag_shader_roughness_metallic_data = [] frag_shader_normal_data = [] for i in range(len(duplicate_vao_ids)): data_list = [float(tex_num_array[i]), float(tex_layer_array[i]), class_id_array[i], instance_id_array[i]] frag_shader_data.append(np.ascontiguousarray(data_list, dtype=np.float32)) pbr_data.append(np.ascontiguousarray(pbr_data_array[i], dtype=np.float32)) hidden_data.append(np.ascontiguousarray(hidden_array[i], dtype=np.float32)) roughness_metallic_data_list = [float(roughness_tex_num_array[i]), float(roughness_tex_layer_array[i]), float(metallic_tex_num_array[i]), float(metallic_tex_layer_array[i])] frag_shader_roughness_metallic_data.append(np.ascontiguousarray(roughness_metallic_data_list, dtype=np.float32)) normal_data_list = [float(normal_tex_num_array[i]), float(normal_tex_layer_array[i]), 0.0, 0.0] frag_shader_normal_data.append(np.ascontiguousarray(normal_data_list, dtype=np.float32)) uv_data.append(np.ascontiguousarray(transform_param_array[i], dtype=np.float32)) merged_frag_shader_data = np.ascontiguousarray(np.concatenate(frag_shader_data, axis=0), np.float32) merged_frag_shader_roughness_metallic_data = np.ascontiguousarray(np.concatenate(frag_shader_roughness_metallic_data, axis=0), np.float32) merged_frag_shader_normal_data = np.ascontiguousarray(np.concatenate(frag_shader_normal_data, axis=0), np.float32) merged_diffuse_color_array = np.ascontiguousarray(np.concatenate(diffuse_color_array, axis=0), np.float32) merged_pbr_data = np.ascontiguousarray(np.concatenate(pbr_data, axis=0), np.float32) self.merged_hidden_data = np.ascontiguousarray(np.concatenate(hidden_data, axis=0), np.float32) self.merged_uv_data = np.ascontiguousarray(np.concatenate(uv_data, axis=0), np.float32) merged_vertex_data = np.concatenate(self.vertex_data, axis=0) print('Merged vertex data shape:') print(merged_vertex_data.shape) print('Enable pbr: {}'.format(self.rendering_settings.enable_pbr)) if self.msaa: buffer = self.fbo_ms else: buffer = self.fbo (self.optimized_VAO, self.optimized_VBO, self.optimized_EBO) = self.r.renderSetup(self.shaderProgram, self.V, self.P, self.lightpos, self.lightcolor, merged_vertex_data, index_ptr_offsets, index_counts, indices, merged_frag_shader_data, merged_frag_shader_roughness_metallic_data, merged_frag_shader_normal_data, merged_diffuse_color_array, merged_pbr_data, self.merged_hidden_data, self.merged_uv_data, self.tex_id_1, self.tex_id_2, buffer, float(self.rendering_settings.enable_pbr), float(self.rendering_settings.blend_highlight), self.depth_tex_shadow) self.optimization_process_executed = True
Optimize vertex and texture for optimized renderer
igibson/render/mesh_renderer/mesh_renderer_cpu.py
optimize_vertex_and_texture
suresh-guttikonda/iGibson
0
python
def optimize_vertex_and_texture(self): '\n \n ' for tex_file in self.texture_files: print('Texture: ', tex_file) cutoff = (5000 * 5000) shouldShrinkSmallTextures = True smallTexSize = 512 texture_files = sorted(self.texture_files.items(), key=(lambda x: x[1])) texture_files = [item[0] for item in texture_files] (self.tex_id_1, self.tex_id_2, self.tex_id_layer_mapping) = self.r.generateArrayTextures(texture_files, cutoff, shouldShrinkSmallTextures, smallTexSize, igibson.key_path) print(self.tex_id_layer_mapping) print(len(self.texture_files), self.texture_files) self.textures.append(self.tex_id_1) self.textures.append(self.tex_id_2) offset_faces = [] curr_index_offset = 0 for i in range(len(self.vertex_data)): face_idxs = self.faces[i] offset_face_idxs = (face_idxs + curr_index_offset) offset_faces.append(offset_face_idxs) curr_index_offset += len(self.vertex_data[i]) duplicate_vao_ids = [] class_id_array = [] instance_id_array = [] pbr_data_array = [] hidden_array = [] for instance in self.instances: if isinstance(instance, Instance): ids = instance.object.VAO_ids or_buffer_idx_start = len(duplicate_vao_ids) duplicate_vao_ids.extend(ids) or_buffer_idx_end = len(duplicate_vao_ids) instance.or_buffer_indices = list(np.arange(or_buffer_idx_start, or_buffer_idx_end)) class_id_array.extend(([(float(instance.class_id) / MAX_CLASS_COUNT)] * len(ids))) instance_id_array.extend(([(float(instance.id) / MAX_INSTANCE_COUNT)] * len(ids))) pbr_data_array.extend(([[float(instance.use_pbr), 1.0, 1.0, 1.0]] * len(ids))) hidden_array.extend(([[float(instance.hidden), 1.0, 1.0, 1.0]] * len(ids))) elif (isinstance(instance, InstanceGroup) or isinstance(instance, Robot)): id_sum = 0 temp_or_buffer_indices = [] for vo in instance.objects: ids = vo.VAO_ids or_buffer_idx_start = len(duplicate_vao_ids) duplicate_vao_ids.extend(ids) or_buffer_idx_end = len(duplicate_vao_ids) temp_or_buffer_indices.extend(list(np.arange(or_buffer_idx_start, or_buffer_idx_end))) id_sum += len(ids) instance.or_buffer_indices = list(temp_or_buffer_indices) class_id_array.extend(([(float(instance.class_id) / MAX_CLASS_COUNT)] * id_sum)) instance_id_array.extend(([(float(instance.id) / MAX_INSTANCE_COUNT)] * id_sum)) pbr_data_array.extend(([[float(instance.use_pbr), 1.0, 1.0, 1.0]] * id_sum)) hidden_array.extend(([[float(instance.hidden), 1.0, 1.0, 1.0]] * id_sum)) self.or_buffer_shape_num = len(duplicate_vao_ids) self.trans_data = np.zeros((self.or_buffer_shape_num, 4, 4)) self.rot_data = np.zeros((self.or_buffer_shape_num, 4, 4)) index_ptr_offsets = [] index_counts = [] indices = [] diffuse_color_array = [] tex_num_array = [] tex_layer_array = [] roughness_tex_num_array = [] roughness_tex_layer_array = [] metallic_tex_num_array = [] metallic_tex_layer_array = [] normal_tex_num_array = [] normal_tex_layer_array = [] transform_param_array = [] index_offset = 0 for id in duplicate_vao_ids: index_ptr_offsets.append(index_offset) id_idxs = list(offset_faces[id].flatten()) indices.extend(id_idxs) index_count = len(id_idxs) index_counts.append(index_count) index_offset += index_count id_material = self.materials_mapping[self.mesh_materials[id]] texture_id = id_material.texture_id if ((texture_id == (- 1)) or (texture_id is None)): tex_num_array.append((- 1)) tex_layer_array.append((- 1)) else: (tex_num, tex_layer) = self.tex_id_layer_mapping[texture_id] tex_num_array.append(tex_num) tex_layer_array.append(tex_layer) roughness_texture_id = id_material.roughness_texture_id if ((roughness_texture_id == (- 1)) or (roughness_texture_id is None)): roughness_tex_num_array.append((- 1)) roughness_tex_layer_array.append((- 1)) else: (tex_num, tex_layer) = self.tex_id_layer_mapping[roughness_texture_id] roughness_tex_num_array.append(tex_num) roughness_tex_layer_array.append(tex_layer) metallic_texture_id = id_material.metallic_texture_id if ((metallic_texture_id == (- 1)) or (metallic_texture_id is None)): metallic_tex_num_array.append((- 1)) metallic_tex_layer_array.append((- 1)) else: (tex_num, tex_layer) = self.tex_id_layer_mapping[metallic_texture_id] metallic_tex_num_array.append(tex_num) metallic_tex_layer_array.append(tex_layer) normal_texture_id = id_material.normal_texture_id if ((normal_texture_id == (- 1)) or (normal_texture_id is None)): normal_tex_num_array.append((- 1)) normal_tex_layer_array.append((- 1)) else: (tex_num, tex_layer) = self.tex_id_layer_mapping[normal_texture_id] normal_tex_num_array.append(tex_num) normal_tex_layer_array.append(tex_layer) transform_param = id_material.transform_param transform_param_array.append([transform_param[0], transform_param[1], transform_param[2], 1.0]) kd = np.asarray(id_material.kd, dtype=np.float32) kd_vec_4 = [kd[0], kd[1], kd[2], 1.0] diffuse_color_array.append(np.ascontiguousarray(kd_vec_4, dtype=np.float32)) index_ptr_offsets = np.ascontiguousarray(index_ptr_offsets, dtype=np.int32) index_counts = np.ascontiguousarray(index_counts, dtype=np.int32) indices = np.ascontiguousarray(indices, dtype=np.int32) frag_shader_data = [] pbr_data = [] hidden_data = [] uv_data = [] frag_shader_roughness_metallic_data = [] frag_shader_normal_data = [] for i in range(len(duplicate_vao_ids)): data_list = [float(tex_num_array[i]), float(tex_layer_array[i]), class_id_array[i], instance_id_array[i]] frag_shader_data.append(np.ascontiguousarray(data_list, dtype=np.float32)) pbr_data.append(np.ascontiguousarray(pbr_data_array[i], dtype=np.float32)) hidden_data.append(np.ascontiguousarray(hidden_array[i], dtype=np.float32)) roughness_metallic_data_list = [float(roughness_tex_num_array[i]), float(roughness_tex_layer_array[i]), float(metallic_tex_num_array[i]), float(metallic_tex_layer_array[i])] frag_shader_roughness_metallic_data.append(np.ascontiguousarray(roughness_metallic_data_list, dtype=np.float32)) normal_data_list = [float(normal_tex_num_array[i]), float(normal_tex_layer_array[i]), 0.0, 0.0] frag_shader_normal_data.append(np.ascontiguousarray(normal_data_list, dtype=np.float32)) uv_data.append(np.ascontiguousarray(transform_param_array[i], dtype=np.float32)) merged_frag_shader_data = np.ascontiguousarray(np.concatenate(frag_shader_data, axis=0), np.float32) merged_frag_shader_roughness_metallic_data = np.ascontiguousarray(np.concatenate(frag_shader_roughness_metallic_data, axis=0), np.float32) merged_frag_shader_normal_data = np.ascontiguousarray(np.concatenate(frag_shader_normal_data, axis=0), np.float32) merged_diffuse_color_array = np.ascontiguousarray(np.concatenate(diffuse_color_array, axis=0), np.float32) merged_pbr_data = np.ascontiguousarray(np.concatenate(pbr_data, axis=0), np.float32) self.merged_hidden_data = np.ascontiguousarray(np.concatenate(hidden_data, axis=0), np.float32) self.merged_uv_data = np.ascontiguousarray(np.concatenate(uv_data, axis=0), np.float32) merged_vertex_data = np.concatenate(self.vertex_data, axis=0) print('Merged vertex data shape:') print(merged_vertex_data.shape) print('Enable pbr: {}'.format(self.rendering_settings.enable_pbr)) if self.msaa: buffer = self.fbo_ms else: buffer = self.fbo (self.optimized_VAO, self.optimized_VBO, self.optimized_EBO) = self.r.renderSetup(self.shaderProgram, self.V, self.P, self.lightpos, self.lightcolor, merged_vertex_data, index_ptr_offsets, index_counts, indices, merged_frag_shader_data, merged_frag_shader_roughness_metallic_data, merged_frag_shader_normal_data, merged_diffuse_color_array, merged_pbr_data, self.merged_hidden_data, self.merged_uv_data, self.tex_id_1, self.tex_id_2, buffer, float(self.rendering_settings.enable_pbr), float(self.rendering_settings.blend_highlight), self.depth_tex_shadow) self.optimization_process_executed = True
def optimize_vertex_and_texture(self): '\n \n ' for tex_file in self.texture_files: print('Texture: ', tex_file) cutoff = (5000 * 5000) shouldShrinkSmallTextures = True smallTexSize = 512 texture_files = sorted(self.texture_files.items(), key=(lambda x: x[1])) texture_files = [item[0] for item in texture_files] (self.tex_id_1, self.tex_id_2, self.tex_id_layer_mapping) = self.r.generateArrayTextures(texture_files, cutoff, shouldShrinkSmallTextures, smallTexSize, igibson.key_path) print(self.tex_id_layer_mapping) print(len(self.texture_files), self.texture_files) self.textures.append(self.tex_id_1) self.textures.append(self.tex_id_2) offset_faces = [] curr_index_offset = 0 for i in range(len(self.vertex_data)): face_idxs = self.faces[i] offset_face_idxs = (face_idxs + curr_index_offset) offset_faces.append(offset_face_idxs) curr_index_offset += len(self.vertex_data[i]) duplicate_vao_ids = [] class_id_array = [] instance_id_array = [] pbr_data_array = [] hidden_array = [] for instance in self.instances: if isinstance(instance, Instance): ids = instance.object.VAO_ids or_buffer_idx_start = len(duplicate_vao_ids) duplicate_vao_ids.extend(ids) or_buffer_idx_end = len(duplicate_vao_ids) instance.or_buffer_indices = list(np.arange(or_buffer_idx_start, or_buffer_idx_end)) class_id_array.extend(([(float(instance.class_id) / MAX_CLASS_COUNT)] * len(ids))) instance_id_array.extend(([(float(instance.id) / MAX_INSTANCE_COUNT)] * len(ids))) pbr_data_array.extend(([[float(instance.use_pbr), 1.0, 1.0, 1.0]] * len(ids))) hidden_array.extend(([[float(instance.hidden), 1.0, 1.0, 1.0]] * len(ids))) elif (isinstance(instance, InstanceGroup) or isinstance(instance, Robot)): id_sum = 0 temp_or_buffer_indices = [] for vo in instance.objects: ids = vo.VAO_ids or_buffer_idx_start = len(duplicate_vao_ids) duplicate_vao_ids.extend(ids) or_buffer_idx_end = len(duplicate_vao_ids) temp_or_buffer_indices.extend(list(np.arange(or_buffer_idx_start, or_buffer_idx_end))) id_sum += len(ids) instance.or_buffer_indices = list(temp_or_buffer_indices) class_id_array.extend(([(float(instance.class_id) / MAX_CLASS_COUNT)] * id_sum)) instance_id_array.extend(([(float(instance.id) / MAX_INSTANCE_COUNT)] * id_sum)) pbr_data_array.extend(([[float(instance.use_pbr), 1.0, 1.0, 1.0]] * id_sum)) hidden_array.extend(([[float(instance.hidden), 1.0, 1.0, 1.0]] * id_sum)) self.or_buffer_shape_num = len(duplicate_vao_ids) self.trans_data = np.zeros((self.or_buffer_shape_num, 4, 4)) self.rot_data = np.zeros((self.or_buffer_shape_num, 4, 4)) index_ptr_offsets = [] index_counts = [] indices = [] diffuse_color_array = [] tex_num_array = [] tex_layer_array = [] roughness_tex_num_array = [] roughness_tex_layer_array = [] metallic_tex_num_array = [] metallic_tex_layer_array = [] normal_tex_num_array = [] normal_tex_layer_array = [] transform_param_array = [] index_offset = 0 for id in duplicate_vao_ids: index_ptr_offsets.append(index_offset) id_idxs = list(offset_faces[id].flatten()) indices.extend(id_idxs) index_count = len(id_idxs) index_counts.append(index_count) index_offset += index_count id_material = self.materials_mapping[self.mesh_materials[id]] texture_id = id_material.texture_id if ((texture_id == (- 1)) or (texture_id is None)): tex_num_array.append((- 1)) tex_layer_array.append((- 1)) else: (tex_num, tex_layer) = self.tex_id_layer_mapping[texture_id] tex_num_array.append(tex_num) tex_layer_array.append(tex_layer) roughness_texture_id = id_material.roughness_texture_id if ((roughness_texture_id == (- 1)) or (roughness_texture_id is None)): roughness_tex_num_array.append((- 1)) roughness_tex_layer_array.append((- 1)) else: (tex_num, tex_layer) = self.tex_id_layer_mapping[roughness_texture_id] roughness_tex_num_array.append(tex_num) roughness_tex_layer_array.append(tex_layer) metallic_texture_id = id_material.metallic_texture_id if ((metallic_texture_id == (- 1)) or (metallic_texture_id is None)): metallic_tex_num_array.append((- 1)) metallic_tex_layer_array.append((- 1)) else: (tex_num, tex_layer) = self.tex_id_layer_mapping[metallic_texture_id] metallic_tex_num_array.append(tex_num) metallic_tex_layer_array.append(tex_layer) normal_texture_id = id_material.normal_texture_id if ((normal_texture_id == (- 1)) or (normal_texture_id is None)): normal_tex_num_array.append((- 1)) normal_tex_layer_array.append((- 1)) else: (tex_num, tex_layer) = self.tex_id_layer_mapping[normal_texture_id] normal_tex_num_array.append(tex_num) normal_tex_layer_array.append(tex_layer) transform_param = id_material.transform_param transform_param_array.append([transform_param[0], transform_param[1], transform_param[2], 1.0]) kd = np.asarray(id_material.kd, dtype=np.float32) kd_vec_4 = [kd[0], kd[1], kd[2], 1.0] diffuse_color_array.append(np.ascontiguousarray(kd_vec_4, dtype=np.float32)) index_ptr_offsets = np.ascontiguousarray(index_ptr_offsets, dtype=np.int32) index_counts = np.ascontiguousarray(index_counts, dtype=np.int32) indices = np.ascontiguousarray(indices, dtype=np.int32) frag_shader_data = [] pbr_data = [] hidden_data = [] uv_data = [] frag_shader_roughness_metallic_data = [] frag_shader_normal_data = [] for i in range(len(duplicate_vao_ids)): data_list = [float(tex_num_array[i]), float(tex_layer_array[i]), class_id_array[i], instance_id_array[i]] frag_shader_data.append(np.ascontiguousarray(data_list, dtype=np.float32)) pbr_data.append(np.ascontiguousarray(pbr_data_array[i], dtype=np.float32)) hidden_data.append(np.ascontiguousarray(hidden_array[i], dtype=np.float32)) roughness_metallic_data_list = [float(roughness_tex_num_array[i]), float(roughness_tex_layer_array[i]), float(metallic_tex_num_array[i]), float(metallic_tex_layer_array[i])] frag_shader_roughness_metallic_data.append(np.ascontiguousarray(roughness_metallic_data_list, dtype=np.float32)) normal_data_list = [float(normal_tex_num_array[i]), float(normal_tex_layer_array[i]), 0.0, 0.0] frag_shader_normal_data.append(np.ascontiguousarray(normal_data_list, dtype=np.float32)) uv_data.append(np.ascontiguousarray(transform_param_array[i], dtype=np.float32)) merged_frag_shader_data = np.ascontiguousarray(np.concatenate(frag_shader_data, axis=0), np.float32) merged_frag_shader_roughness_metallic_data = np.ascontiguousarray(np.concatenate(frag_shader_roughness_metallic_data, axis=0), np.float32) merged_frag_shader_normal_data = np.ascontiguousarray(np.concatenate(frag_shader_normal_data, axis=0), np.float32) merged_diffuse_color_array = np.ascontiguousarray(np.concatenate(diffuse_color_array, axis=0), np.float32) merged_pbr_data = np.ascontiguousarray(np.concatenate(pbr_data, axis=0), np.float32) self.merged_hidden_data = np.ascontiguousarray(np.concatenate(hidden_data, axis=0), np.float32) self.merged_uv_data = np.ascontiguousarray(np.concatenate(uv_data, axis=0), np.float32) merged_vertex_data = np.concatenate(self.vertex_data, axis=0) print('Merged vertex data shape:') print(merged_vertex_data.shape) print('Enable pbr: {}'.format(self.rendering_settings.enable_pbr)) if self.msaa: buffer = self.fbo_ms else: buffer = self.fbo (self.optimized_VAO, self.optimized_VBO, self.optimized_EBO) = self.r.renderSetup(self.shaderProgram, self.V, self.P, self.lightpos, self.lightcolor, merged_vertex_data, index_ptr_offsets, index_counts, indices, merged_frag_shader_data, merged_frag_shader_roughness_metallic_data, merged_frag_shader_normal_data, merged_diffuse_color_array, merged_pbr_data, self.merged_hidden_data, self.merged_uv_data, self.tex_id_1, self.tex_id_2, buffer, float(self.rendering_settings.enable_pbr), float(self.rendering_settings.blend_highlight), self.depth_tex_shadow) self.optimization_process_executed = True<|docstring|>Optimize vertex and texture for optimized renderer<|endoftext|>
c5b52aefa9c9cb42e48d25af338e686dd30671d8cf0b474b3786f4f8cdb92b68
def update_optimized_texture_internal(self): '\n Update the texture_id for optimized renderer\n ' duplicate_vao_ids = [] class_id_array = [] instance_id_array = [] pbr_data_array = [] hidden_array = [] for instance in self.instances: if isinstance(instance, Instance): ids = instance.object.VAO_ids or_buffer_idx_start = len(duplicate_vao_ids) duplicate_vao_ids.extend(ids) or_buffer_idx_end = len(duplicate_vao_ids) instance.or_buffer_indices = list(np.arange(or_buffer_idx_start, or_buffer_idx_end)) class_id_array.extend(([(float(instance.class_id) / MAX_CLASS_COUNT)] * len(ids))) instance_id_array.extend(([(float(instance.id) / MAX_INSTANCE_COUNT)] * len(ids))) pbr_data_array.extend(([[float(instance.use_pbr), 1.0, 1.0, 1.0]] * len(ids))) hidden_array.extend(([[float(instance.hidden), 1.0, 1.0, 1.0]] * len(ids))) elif (isinstance(instance, InstanceGroup) or isinstance(instance, Robot)): id_sum = 0 temp_or_buffer_indices = [] for vo in instance.objects: ids = vo.VAO_ids or_buffer_idx_start = len(duplicate_vao_ids) duplicate_vao_ids.extend(ids) or_buffer_idx_end = len(duplicate_vao_ids) temp_or_buffer_indices.extend(list(np.arange(or_buffer_idx_start, or_buffer_idx_end))) id_sum += len(ids) instance.or_buffer_indices = list(temp_or_buffer_indices) class_id_array.extend(([(float(instance.class_id) / MAX_CLASS_COUNT)] * id_sum)) instance_id_array.extend(([(float(instance.id) / MAX_INSTANCE_COUNT)] * id_sum)) pbr_data_array.extend(([[float(instance.use_pbr), 1.0, 1.0, 1.0]] * id_sum)) hidden_array.extend(([[float(instance.hidden), 1.0, 1.0, 1.0]] * id_sum)) index_ptr_offsets = [] index_counts = [] indices = [] diffuse_color_array = [] tex_num_array = [] tex_layer_array = [] roughness_tex_num_array = [] roughness_tex_layer_array = [] metallic_tex_num_array = [] metallic_tex_layer_array = [] normal_tex_num_array = [] normal_tex_layer_array = [] transform_param_array = [] for id in duplicate_vao_ids: id_material = self.materials_mapping[self.mesh_materials[id]] texture_id = id_material.texture_id if ((texture_id == (- 1)) or (texture_id is None)): tex_num_array.append((- 1)) tex_layer_array.append((- 1)) else: (tex_num, tex_layer) = self.tex_id_layer_mapping[texture_id] tex_num_array.append(tex_num) tex_layer_array.append(tex_layer) roughness_texture_id = id_material.roughness_texture_id if ((roughness_texture_id == (- 1)) or (roughness_texture_id is None)): roughness_tex_num_array.append((- 1)) roughness_tex_layer_array.append((- 1)) else: (tex_num, tex_layer) = self.tex_id_layer_mapping[roughness_texture_id] roughness_tex_num_array.append(tex_num) roughness_tex_layer_array.append(tex_layer) metallic_texture_id = id_material.metallic_texture_id if ((metallic_texture_id == (- 1)) or (metallic_texture_id is None)): metallic_tex_num_array.append((- 1)) metallic_tex_layer_array.append((- 1)) else: (tex_num, tex_layer) = self.tex_id_layer_mapping[metallic_texture_id] metallic_tex_num_array.append(tex_num) metallic_tex_layer_array.append(tex_layer) normal_texture_id = id_material.normal_texture_id if ((normal_texture_id == (- 1)) or (normal_texture_id is None)): normal_tex_num_array.append((- 1)) normal_tex_layer_array.append((- 1)) else: (tex_num, tex_layer) = self.tex_id_layer_mapping[normal_texture_id] normal_tex_num_array.append(tex_num) normal_tex_layer_array.append(tex_layer) transform_param = id_material.transform_param transform_param_array.append([transform_param[0], transform_param[1], transform_param[2], 1.0]) kd = np.asarray(id_material.kd, dtype=np.float32) kd_vec_4 = [kd[0], kd[1], kd[2], 1.0] diffuse_color_array.append(np.ascontiguousarray(kd_vec_4, dtype=np.float32)) frag_shader_data = [] pbr_data = [] hidden_data = [] uv_data = [] frag_shader_roughness_metallic_data = [] frag_shader_normal_data = [] for i in range(len(duplicate_vao_ids)): data_list = [float(tex_num_array[i]), float(tex_layer_array[i]), class_id_array[i], instance_id_array[i]] frag_shader_data.append(np.ascontiguousarray(data_list, dtype=np.float32)) pbr_data.append(np.ascontiguousarray(pbr_data_array[i], dtype=np.float32)) hidden_data.append(np.ascontiguousarray(hidden_array[i], dtype=np.float32)) roughness_metallic_data_list = [float(roughness_tex_num_array[i]), float(roughness_tex_layer_array[i]), float(metallic_tex_num_array[i]), float(metallic_tex_layer_array[i])] frag_shader_roughness_metallic_data.append(np.ascontiguousarray(roughness_metallic_data_list, dtype=np.float32)) normal_data_list = [float(normal_tex_num_array[i]), float(normal_tex_layer_array[i]), 0.0, 0.0] frag_shader_normal_data.append(np.ascontiguousarray(normal_data_list, dtype=np.float32)) uv_data.append(np.ascontiguousarray(transform_param_array[i], dtype=np.float32)) merged_frag_shader_data = np.ascontiguousarray(np.concatenate(frag_shader_data, axis=0), np.float32) merged_frag_shader_roughness_metallic_data = np.ascontiguousarray(np.concatenate(frag_shader_roughness_metallic_data, axis=0), np.float32) merged_frag_shader_normal_data = np.ascontiguousarray(np.concatenate(frag_shader_normal_data, axis=0), np.float32) merged_diffuse_color_array = np.ascontiguousarray(np.concatenate(diffuse_color_array, axis=0), np.float32) merged_pbr_data = np.ascontiguousarray(np.concatenate(pbr_data, axis=0), np.float32) self.merged_hidden_data = np.ascontiguousarray(np.concatenate(hidden_data, axis=0), np.float32) self.merged_uv_data = np.ascontiguousarray(np.concatenate(uv_data, axis=0), np.float32) self.r.updateTextureIdArrays(self.shaderProgram, merged_frag_shader_data, merged_frag_shader_roughness_metallic_data, merged_frag_shader_normal_data, merged_diffuse_color_array, merged_pbr_data, self.merged_hidden_data, self.merged_uv_data)
Update the texture_id for optimized renderer
igibson/render/mesh_renderer/mesh_renderer_cpu.py
update_optimized_texture_internal
suresh-guttikonda/iGibson
0
python
def update_optimized_texture_internal(self): '\n \n ' duplicate_vao_ids = [] class_id_array = [] instance_id_array = [] pbr_data_array = [] hidden_array = [] for instance in self.instances: if isinstance(instance, Instance): ids = instance.object.VAO_ids or_buffer_idx_start = len(duplicate_vao_ids) duplicate_vao_ids.extend(ids) or_buffer_idx_end = len(duplicate_vao_ids) instance.or_buffer_indices = list(np.arange(or_buffer_idx_start, or_buffer_idx_end)) class_id_array.extend(([(float(instance.class_id) / MAX_CLASS_COUNT)] * len(ids))) instance_id_array.extend(([(float(instance.id) / MAX_INSTANCE_COUNT)] * len(ids))) pbr_data_array.extend(([[float(instance.use_pbr), 1.0, 1.0, 1.0]] * len(ids))) hidden_array.extend(([[float(instance.hidden), 1.0, 1.0, 1.0]] * len(ids))) elif (isinstance(instance, InstanceGroup) or isinstance(instance, Robot)): id_sum = 0 temp_or_buffer_indices = [] for vo in instance.objects: ids = vo.VAO_ids or_buffer_idx_start = len(duplicate_vao_ids) duplicate_vao_ids.extend(ids) or_buffer_idx_end = len(duplicate_vao_ids) temp_or_buffer_indices.extend(list(np.arange(or_buffer_idx_start, or_buffer_idx_end))) id_sum += len(ids) instance.or_buffer_indices = list(temp_or_buffer_indices) class_id_array.extend(([(float(instance.class_id) / MAX_CLASS_COUNT)] * id_sum)) instance_id_array.extend(([(float(instance.id) / MAX_INSTANCE_COUNT)] * id_sum)) pbr_data_array.extend(([[float(instance.use_pbr), 1.0, 1.0, 1.0]] * id_sum)) hidden_array.extend(([[float(instance.hidden), 1.0, 1.0, 1.0]] * id_sum)) index_ptr_offsets = [] index_counts = [] indices = [] diffuse_color_array = [] tex_num_array = [] tex_layer_array = [] roughness_tex_num_array = [] roughness_tex_layer_array = [] metallic_tex_num_array = [] metallic_tex_layer_array = [] normal_tex_num_array = [] normal_tex_layer_array = [] transform_param_array = [] for id in duplicate_vao_ids: id_material = self.materials_mapping[self.mesh_materials[id]] texture_id = id_material.texture_id if ((texture_id == (- 1)) or (texture_id is None)): tex_num_array.append((- 1)) tex_layer_array.append((- 1)) else: (tex_num, tex_layer) = self.tex_id_layer_mapping[texture_id] tex_num_array.append(tex_num) tex_layer_array.append(tex_layer) roughness_texture_id = id_material.roughness_texture_id if ((roughness_texture_id == (- 1)) or (roughness_texture_id is None)): roughness_tex_num_array.append((- 1)) roughness_tex_layer_array.append((- 1)) else: (tex_num, tex_layer) = self.tex_id_layer_mapping[roughness_texture_id] roughness_tex_num_array.append(tex_num) roughness_tex_layer_array.append(tex_layer) metallic_texture_id = id_material.metallic_texture_id if ((metallic_texture_id == (- 1)) or (metallic_texture_id is None)): metallic_tex_num_array.append((- 1)) metallic_tex_layer_array.append((- 1)) else: (tex_num, tex_layer) = self.tex_id_layer_mapping[metallic_texture_id] metallic_tex_num_array.append(tex_num) metallic_tex_layer_array.append(tex_layer) normal_texture_id = id_material.normal_texture_id if ((normal_texture_id == (- 1)) or (normal_texture_id is None)): normal_tex_num_array.append((- 1)) normal_tex_layer_array.append((- 1)) else: (tex_num, tex_layer) = self.tex_id_layer_mapping[normal_texture_id] normal_tex_num_array.append(tex_num) normal_tex_layer_array.append(tex_layer) transform_param = id_material.transform_param transform_param_array.append([transform_param[0], transform_param[1], transform_param[2], 1.0]) kd = np.asarray(id_material.kd, dtype=np.float32) kd_vec_4 = [kd[0], kd[1], kd[2], 1.0] diffuse_color_array.append(np.ascontiguousarray(kd_vec_4, dtype=np.float32)) frag_shader_data = [] pbr_data = [] hidden_data = [] uv_data = [] frag_shader_roughness_metallic_data = [] frag_shader_normal_data = [] for i in range(len(duplicate_vao_ids)): data_list = [float(tex_num_array[i]), float(tex_layer_array[i]), class_id_array[i], instance_id_array[i]] frag_shader_data.append(np.ascontiguousarray(data_list, dtype=np.float32)) pbr_data.append(np.ascontiguousarray(pbr_data_array[i], dtype=np.float32)) hidden_data.append(np.ascontiguousarray(hidden_array[i], dtype=np.float32)) roughness_metallic_data_list = [float(roughness_tex_num_array[i]), float(roughness_tex_layer_array[i]), float(metallic_tex_num_array[i]), float(metallic_tex_layer_array[i])] frag_shader_roughness_metallic_data.append(np.ascontiguousarray(roughness_metallic_data_list, dtype=np.float32)) normal_data_list = [float(normal_tex_num_array[i]), float(normal_tex_layer_array[i]), 0.0, 0.0] frag_shader_normal_data.append(np.ascontiguousarray(normal_data_list, dtype=np.float32)) uv_data.append(np.ascontiguousarray(transform_param_array[i], dtype=np.float32)) merged_frag_shader_data = np.ascontiguousarray(np.concatenate(frag_shader_data, axis=0), np.float32) merged_frag_shader_roughness_metallic_data = np.ascontiguousarray(np.concatenate(frag_shader_roughness_metallic_data, axis=0), np.float32) merged_frag_shader_normal_data = np.ascontiguousarray(np.concatenate(frag_shader_normal_data, axis=0), np.float32) merged_diffuse_color_array = np.ascontiguousarray(np.concatenate(diffuse_color_array, axis=0), np.float32) merged_pbr_data = np.ascontiguousarray(np.concatenate(pbr_data, axis=0), np.float32) self.merged_hidden_data = np.ascontiguousarray(np.concatenate(hidden_data, axis=0), np.float32) self.merged_uv_data = np.ascontiguousarray(np.concatenate(uv_data, axis=0), np.float32) self.r.updateTextureIdArrays(self.shaderProgram, merged_frag_shader_data, merged_frag_shader_roughness_metallic_data, merged_frag_shader_normal_data, merged_diffuse_color_array, merged_pbr_data, self.merged_hidden_data, self.merged_uv_data)
def update_optimized_texture_internal(self): '\n \n ' duplicate_vao_ids = [] class_id_array = [] instance_id_array = [] pbr_data_array = [] hidden_array = [] for instance in self.instances: if isinstance(instance, Instance): ids = instance.object.VAO_ids or_buffer_idx_start = len(duplicate_vao_ids) duplicate_vao_ids.extend(ids) or_buffer_idx_end = len(duplicate_vao_ids) instance.or_buffer_indices = list(np.arange(or_buffer_idx_start, or_buffer_idx_end)) class_id_array.extend(([(float(instance.class_id) / MAX_CLASS_COUNT)] * len(ids))) instance_id_array.extend(([(float(instance.id) / MAX_INSTANCE_COUNT)] * len(ids))) pbr_data_array.extend(([[float(instance.use_pbr), 1.0, 1.0, 1.0]] * len(ids))) hidden_array.extend(([[float(instance.hidden), 1.0, 1.0, 1.0]] * len(ids))) elif (isinstance(instance, InstanceGroup) or isinstance(instance, Robot)): id_sum = 0 temp_or_buffer_indices = [] for vo in instance.objects: ids = vo.VAO_ids or_buffer_idx_start = len(duplicate_vao_ids) duplicate_vao_ids.extend(ids) or_buffer_idx_end = len(duplicate_vao_ids) temp_or_buffer_indices.extend(list(np.arange(or_buffer_idx_start, or_buffer_idx_end))) id_sum += len(ids) instance.or_buffer_indices = list(temp_or_buffer_indices) class_id_array.extend(([(float(instance.class_id) / MAX_CLASS_COUNT)] * id_sum)) instance_id_array.extend(([(float(instance.id) / MAX_INSTANCE_COUNT)] * id_sum)) pbr_data_array.extend(([[float(instance.use_pbr), 1.0, 1.0, 1.0]] * id_sum)) hidden_array.extend(([[float(instance.hidden), 1.0, 1.0, 1.0]] * id_sum)) index_ptr_offsets = [] index_counts = [] indices = [] diffuse_color_array = [] tex_num_array = [] tex_layer_array = [] roughness_tex_num_array = [] roughness_tex_layer_array = [] metallic_tex_num_array = [] metallic_tex_layer_array = [] normal_tex_num_array = [] normal_tex_layer_array = [] transform_param_array = [] for id in duplicate_vao_ids: id_material = self.materials_mapping[self.mesh_materials[id]] texture_id = id_material.texture_id if ((texture_id == (- 1)) or (texture_id is None)): tex_num_array.append((- 1)) tex_layer_array.append((- 1)) else: (tex_num, tex_layer) = self.tex_id_layer_mapping[texture_id] tex_num_array.append(tex_num) tex_layer_array.append(tex_layer) roughness_texture_id = id_material.roughness_texture_id if ((roughness_texture_id == (- 1)) or (roughness_texture_id is None)): roughness_tex_num_array.append((- 1)) roughness_tex_layer_array.append((- 1)) else: (tex_num, tex_layer) = self.tex_id_layer_mapping[roughness_texture_id] roughness_tex_num_array.append(tex_num) roughness_tex_layer_array.append(tex_layer) metallic_texture_id = id_material.metallic_texture_id if ((metallic_texture_id == (- 1)) or (metallic_texture_id is None)): metallic_tex_num_array.append((- 1)) metallic_tex_layer_array.append((- 1)) else: (tex_num, tex_layer) = self.tex_id_layer_mapping[metallic_texture_id] metallic_tex_num_array.append(tex_num) metallic_tex_layer_array.append(tex_layer) normal_texture_id = id_material.normal_texture_id if ((normal_texture_id == (- 1)) or (normal_texture_id is None)): normal_tex_num_array.append((- 1)) normal_tex_layer_array.append((- 1)) else: (tex_num, tex_layer) = self.tex_id_layer_mapping[normal_texture_id] normal_tex_num_array.append(tex_num) normal_tex_layer_array.append(tex_layer) transform_param = id_material.transform_param transform_param_array.append([transform_param[0], transform_param[1], transform_param[2], 1.0]) kd = np.asarray(id_material.kd, dtype=np.float32) kd_vec_4 = [kd[0], kd[1], kd[2], 1.0] diffuse_color_array.append(np.ascontiguousarray(kd_vec_4, dtype=np.float32)) frag_shader_data = [] pbr_data = [] hidden_data = [] uv_data = [] frag_shader_roughness_metallic_data = [] frag_shader_normal_data = [] for i in range(len(duplicate_vao_ids)): data_list = [float(tex_num_array[i]), float(tex_layer_array[i]), class_id_array[i], instance_id_array[i]] frag_shader_data.append(np.ascontiguousarray(data_list, dtype=np.float32)) pbr_data.append(np.ascontiguousarray(pbr_data_array[i], dtype=np.float32)) hidden_data.append(np.ascontiguousarray(hidden_array[i], dtype=np.float32)) roughness_metallic_data_list = [float(roughness_tex_num_array[i]), float(roughness_tex_layer_array[i]), float(metallic_tex_num_array[i]), float(metallic_tex_layer_array[i])] frag_shader_roughness_metallic_data.append(np.ascontiguousarray(roughness_metallic_data_list, dtype=np.float32)) normal_data_list = [float(normal_tex_num_array[i]), float(normal_tex_layer_array[i]), 0.0, 0.0] frag_shader_normal_data.append(np.ascontiguousarray(normal_data_list, dtype=np.float32)) uv_data.append(np.ascontiguousarray(transform_param_array[i], dtype=np.float32)) merged_frag_shader_data = np.ascontiguousarray(np.concatenate(frag_shader_data, axis=0), np.float32) merged_frag_shader_roughness_metallic_data = np.ascontiguousarray(np.concatenate(frag_shader_roughness_metallic_data, axis=0), np.float32) merged_frag_shader_normal_data = np.ascontiguousarray(np.concatenate(frag_shader_normal_data, axis=0), np.float32) merged_diffuse_color_array = np.ascontiguousarray(np.concatenate(diffuse_color_array, axis=0), np.float32) merged_pbr_data = np.ascontiguousarray(np.concatenate(pbr_data, axis=0), np.float32) self.merged_hidden_data = np.ascontiguousarray(np.concatenate(hidden_data, axis=0), np.float32) self.merged_uv_data = np.ascontiguousarray(np.concatenate(uv_data, axis=0), np.float32) self.r.updateTextureIdArrays(self.shaderProgram, merged_frag_shader_data, merged_frag_shader_roughness_metallic_data, merged_frag_shader_normal_data, merged_diffuse_color_array, merged_pbr_data, self.merged_hidden_data, self.merged_uv_data)<|docstring|>Update the texture_id for optimized renderer<|endoftext|>
c5138b306a93c6b4faf92dbbfb0fa816b586c8f22da4a6dfefa9269d43da0166
def update_hidden_highlight_state(self, instances): '\n Updates the hidden state of a list of instances\n This function is called by instances and not every frame, since hiding is a very infrequent operation.\n ' if (not self.optimization_process_executed): logging.warning('Trying to set hidden state before vertices are merged, converted to no-op') return for instance in instances: buf_idxs = instance.or_buffer_indices vec4_buf_idxs = [(idx * 4) for idx in buf_idxs] vec4_buf_idxs_highlight = [((idx * 4) + 1) for idx in buf_idxs] self.merged_hidden_data[vec4_buf_idxs] = float(instance.hidden) self.merged_hidden_data[vec4_buf_idxs_highlight] = float(instance.highlight) self.r.updateHiddenData(self.shaderProgram, np.ascontiguousarray(self.merged_hidden_data, dtype=np.float32))
Updates the hidden state of a list of instances This function is called by instances and not every frame, since hiding is a very infrequent operation.
igibson/render/mesh_renderer/mesh_renderer_cpu.py
update_hidden_highlight_state
suresh-guttikonda/iGibson
0
python
def update_hidden_highlight_state(self, instances): '\n Updates the hidden state of a list of instances\n This function is called by instances and not every frame, since hiding is a very infrequent operation.\n ' if (not self.optimization_process_executed): logging.warning('Trying to set hidden state before vertices are merged, converted to no-op') return for instance in instances: buf_idxs = instance.or_buffer_indices vec4_buf_idxs = [(idx * 4) for idx in buf_idxs] vec4_buf_idxs_highlight = [((idx * 4) + 1) for idx in buf_idxs] self.merged_hidden_data[vec4_buf_idxs] = float(instance.hidden) self.merged_hidden_data[vec4_buf_idxs_highlight] = float(instance.highlight) self.r.updateHiddenData(self.shaderProgram, np.ascontiguousarray(self.merged_hidden_data, dtype=np.float32))
def update_hidden_highlight_state(self, instances): '\n Updates the hidden state of a list of instances\n This function is called by instances and not every frame, since hiding is a very infrequent operation.\n ' if (not self.optimization_process_executed): logging.warning('Trying to set hidden state before vertices are merged, converted to no-op') return for instance in instances: buf_idxs = instance.or_buffer_indices vec4_buf_idxs = [(idx * 4) for idx in buf_idxs] vec4_buf_idxs_highlight = [((idx * 4) + 1) for idx in buf_idxs] self.merged_hidden_data[vec4_buf_idxs] = float(instance.hidden) self.merged_hidden_data[vec4_buf_idxs_highlight] = float(instance.highlight) self.r.updateHiddenData(self.shaderProgram, np.ascontiguousarray(self.merged_hidden_data, dtype=np.float32))<|docstring|>Updates the hidden state of a list of instances This function is called by instances and not every frame, since hiding is a very infrequent operation.<|endoftext|>
6565eefbbbc7427dc93a1d2ce4854d3a78da294137213b115326f9c38de12a22
def update_dynamic_positions(self, need_flow_info=False): '\n Update all dynamic positions\n\n :param need_flow_info: whether flow information is required\n ' for instance in self.instances: if isinstance(instance, Instance): buf_idxs = instance.or_buffer_indices if (not buf_idxs): continue self.trans_data[buf_idxs] = np.array(instance.pose_trans) self.rot_data[buf_idxs] = np.array(instance.pose_rot) elif (isinstance(instance, InstanceGroup) or isinstance(instance, Robot)): buf_idxs = instance.or_buffer_indices if (not buf_idxs): continue self.trans_data[buf_idxs] = np.array(instance.poses_trans) self.rot_data[buf_idxs] = np.array(instance.poses_rot) if need_flow_info: if (self.pose_trans_array is not None): self.last_trans_array = np.copy(self.pose_trans_array) else: self.last_trans_array = np.ascontiguousarray(np.concatenate(self.trans_data, axis=0)) if (self.pose_rot_array is not None): self.last_rot_array = np.copy(self.pose_rot_array) else: self.last_rot_array = np.ascontiguousarray(np.concatenate(self.rot_data, axis=0)) else: self.last_rot_array = self.pose_rot_array self.last_trans_array = self.pose_trans_array self.pose_trans_array = np.ascontiguousarray(self.trans_data) self.pose_rot_array = np.ascontiguousarray(self.rot_data)
Update all dynamic positions :param need_flow_info: whether flow information is required
igibson/render/mesh_renderer/mesh_renderer_cpu.py
update_dynamic_positions
suresh-guttikonda/iGibson
0
python
def update_dynamic_positions(self, need_flow_info=False): '\n Update all dynamic positions\n\n :param need_flow_info: whether flow information is required\n ' for instance in self.instances: if isinstance(instance, Instance): buf_idxs = instance.or_buffer_indices if (not buf_idxs): continue self.trans_data[buf_idxs] = np.array(instance.pose_trans) self.rot_data[buf_idxs] = np.array(instance.pose_rot) elif (isinstance(instance, InstanceGroup) or isinstance(instance, Robot)): buf_idxs = instance.or_buffer_indices if (not buf_idxs): continue self.trans_data[buf_idxs] = np.array(instance.poses_trans) self.rot_data[buf_idxs] = np.array(instance.poses_rot) if need_flow_info: if (self.pose_trans_array is not None): self.last_trans_array = np.copy(self.pose_trans_array) else: self.last_trans_array = np.ascontiguousarray(np.concatenate(self.trans_data, axis=0)) if (self.pose_rot_array is not None): self.last_rot_array = np.copy(self.pose_rot_array) else: self.last_rot_array = np.ascontiguousarray(np.concatenate(self.rot_data, axis=0)) else: self.last_rot_array = self.pose_rot_array self.last_trans_array = self.pose_trans_array self.pose_trans_array = np.ascontiguousarray(self.trans_data) self.pose_rot_array = np.ascontiguousarray(self.rot_data)
def update_dynamic_positions(self, need_flow_info=False): '\n Update all dynamic positions\n\n :param need_flow_info: whether flow information is required\n ' for instance in self.instances: if isinstance(instance, Instance): buf_idxs = instance.or_buffer_indices if (not buf_idxs): continue self.trans_data[buf_idxs] = np.array(instance.pose_trans) self.rot_data[buf_idxs] = np.array(instance.pose_rot) elif (isinstance(instance, InstanceGroup) or isinstance(instance, Robot)): buf_idxs = instance.or_buffer_indices if (not buf_idxs): continue self.trans_data[buf_idxs] = np.array(instance.poses_trans) self.rot_data[buf_idxs] = np.array(instance.poses_rot) if need_flow_info: if (self.pose_trans_array is not None): self.last_trans_array = np.copy(self.pose_trans_array) else: self.last_trans_array = np.ascontiguousarray(np.concatenate(self.trans_data, axis=0)) if (self.pose_rot_array is not None): self.last_rot_array = np.copy(self.pose_rot_array) else: self.last_rot_array = np.ascontiguousarray(np.concatenate(self.rot_data, axis=0)) else: self.last_rot_array = self.pose_rot_array self.last_trans_array = self.pose_trans_array self.pose_trans_array = np.ascontiguousarray(self.trans_data) self.pose_rot_array = np.ascontiguousarray(self.rot_data)<|docstring|>Update all dynamic positions :param need_flow_info: whether flow information is required<|endoftext|>
f9ff9bfa3c3ce0d4dd3486f61d19d08c0da7cb9b1ec0e4b16d3ceda748fe5509
def use_pbr(self, use_pbr, use_pbr_mapping): '\n Apply PBR setting to every instance\n\n :param use_pbr: whether to use pbr\n :param use_pbr_mapping: whether to use pbr mapping\n ' for instance in self.instances: instance.use_pbr = use_pbr instance.use_pbr_mapping = use_pbr_mapping
Apply PBR setting to every instance :param use_pbr: whether to use pbr :param use_pbr_mapping: whether to use pbr mapping
igibson/render/mesh_renderer/mesh_renderer_cpu.py
use_pbr
suresh-guttikonda/iGibson
0
python
def use_pbr(self, use_pbr, use_pbr_mapping): '\n Apply PBR setting to every instance\n\n :param use_pbr: whether to use pbr\n :param use_pbr_mapping: whether to use pbr mapping\n ' for instance in self.instances: instance.use_pbr = use_pbr instance.use_pbr_mapping = use_pbr_mapping
def use_pbr(self, use_pbr, use_pbr_mapping): '\n Apply PBR setting to every instance\n\n :param use_pbr: whether to use pbr\n :param use_pbr_mapping: whether to use pbr mapping\n ' for instance in self.instances: instance.use_pbr = use_pbr instance.use_pbr_mapping = use_pbr_mapping<|docstring|>Apply PBR setting to every instance :param use_pbr: whether to use pbr :param use_pbr_mapping: whether to use pbr mapping<|endoftext|>
c7afd2c76e322d9ae8e4c4cdb3beac2d7482fd0446ce3bffcec710cc5c690cf5
def setup_lidar_param(self): '\n Set up LiDAR params\n ' lidar_vertical_low = (((- 15) / 180.0) * np.pi) lidar_vertical_high = ((15 / 180.0) * np.pi) lidar_vertical_n_beams = 16 lidar_vertical_beams = np.arange(lidar_vertical_low, (lidar_vertical_high + ((lidar_vertical_high - lidar_vertical_low) / (lidar_vertical_n_beams - 1))), ((lidar_vertical_high - lidar_vertical_low) / (lidar_vertical_n_beams - 1))) lidar_horizontal_low = (((- 45) / 180.0) * np.pi) lidar_horizontal_high = ((45 / 180.0) * np.pi) lidar_horizontal_n_beams = 468 lidar_horizontal_beams = np.arange(lidar_horizontal_low, lidar_horizontal_high, ((lidar_horizontal_high - lidar_horizontal_low) / lidar_horizontal_n_beams)) (xx, yy) = np.meshgrid(lidar_vertical_beams, lidar_horizontal_beams) xx = xx.flatten() yy = yy.flatten() x_samples = ((((np.tan(xx) / np.cos(yy)) * self.height) // 2) + (self.height // 2)).astype(np.int) y_samples = (((np.tan(yy) * self.height) // 2) + (self.height // 2)).astype(np.int) self.x_samples = x_samples.flatten() self.y_samples = y_samples.flatten()
Set up LiDAR params
igibson/render/mesh_renderer/mesh_renderer_cpu.py
setup_lidar_param
suresh-guttikonda/iGibson
0
python
def setup_lidar_param(self): '\n \n ' lidar_vertical_low = (((- 15) / 180.0) * np.pi) lidar_vertical_high = ((15 / 180.0) * np.pi) lidar_vertical_n_beams = 16 lidar_vertical_beams = np.arange(lidar_vertical_low, (lidar_vertical_high + ((lidar_vertical_high - lidar_vertical_low) / (lidar_vertical_n_beams - 1))), ((lidar_vertical_high - lidar_vertical_low) / (lidar_vertical_n_beams - 1))) lidar_horizontal_low = (((- 45) / 180.0) * np.pi) lidar_horizontal_high = ((45 / 180.0) * np.pi) lidar_horizontal_n_beams = 468 lidar_horizontal_beams = np.arange(lidar_horizontal_low, lidar_horizontal_high, ((lidar_horizontal_high - lidar_horizontal_low) / lidar_horizontal_n_beams)) (xx, yy) = np.meshgrid(lidar_vertical_beams, lidar_horizontal_beams) xx = xx.flatten() yy = yy.flatten() x_samples = ((((np.tan(xx) / np.cos(yy)) * self.height) // 2) + (self.height // 2)).astype(np.int) y_samples = (((np.tan(yy) * self.height) // 2) + (self.height // 2)).astype(np.int) self.x_samples = x_samples.flatten() self.y_samples = y_samples.flatten()
def setup_lidar_param(self): '\n \n ' lidar_vertical_low = (((- 15) / 180.0) * np.pi) lidar_vertical_high = ((15 / 180.0) * np.pi) lidar_vertical_n_beams = 16 lidar_vertical_beams = np.arange(lidar_vertical_low, (lidar_vertical_high + ((lidar_vertical_high - lidar_vertical_low) / (lidar_vertical_n_beams - 1))), ((lidar_vertical_high - lidar_vertical_low) / (lidar_vertical_n_beams - 1))) lidar_horizontal_low = (((- 45) / 180.0) * np.pi) lidar_horizontal_high = ((45 / 180.0) * np.pi) lidar_horizontal_n_beams = 468 lidar_horizontal_beams = np.arange(lidar_horizontal_low, lidar_horizontal_high, ((lidar_horizontal_high - lidar_horizontal_low) / lidar_horizontal_n_beams)) (xx, yy) = np.meshgrid(lidar_vertical_beams, lidar_horizontal_beams) xx = xx.flatten() yy = yy.flatten() x_samples = ((((np.tan(xx) / np.cos(yy)) * self.height) // 2) + (self.height // 2)).astype(np.int) y_samples = (((np.tan(yy) * self.height) // 2) + (self.height // 2)).astype(np.int) self.x_samples = x_samples.flatten() self.y_samples = y_samples.flatten()<|docstring|>Set up LiDAR params<|endoftext|>
7e3d0a17eba4b1e268c7acb7913dab5ac0ade39037a094b02e0fa365e1acef59
def get_lidar_from_depth(self): '\n Get partial LiDAR readings from depth sensors with limited FOV\n :return: partial LiDAR readings with limited FOV\n ' lidar_readings = self.render(modes='3d')[0] lidar_readings = lidar_readings[(self.x_samples, self.y_samples, :3)] dist = np.linalg.norm(lidar_readings, axis=1) lidar_readings = lidar_readings[(dist > 0)] lidar_readings[(:, 2)] = (- lidar_readings[(:, 2)]) return lidar_readings
Get partial LiDAR readings from depth sensors with limited FOV :return: partial LiDAR readings with limited FOV
igibson/render/mesh_renderer/mesh_renderer_cpu.py
get_lidar_from_depth
suresh-guttikonda/iGibson
0
python
def get_lidar_from_depth(self): '\n Get partial LiDAR readings from depth sensors with limited FOV\n :return: partial LiDAR readings with limited FOV\n ' lidar_readings = self.render(modes='3d')[0] lidar_readings = lidar_readings[(self.x_samples, self.y_samples, :3)] dist = np.linalg.norm(lidar_readings, axis=1) lidar_readings = lidar_readings[(dist > 0)] lidar_readings[(:, 2)] = (- lidar_readings[(:, 2)]) return lidar_readings
def get_lidar_from_depth(self): '\n Get partial LiDAR readings from depth sensors with limited FOV\n :return: partial LiDAR readings with limited FOV\n ' lidar_readings = self.render(modes='3d')[0] lidar_readings = lidar_readings[(self.x_samples, self.y_samples, :3)] dist = np.linalg.norm(lidar_readings, axis=1) lidar_readings = lidar_readings[(dist > 0)] lidar_readings[(:, 2)] = (- lidar_readings[(:, 2)]) return lidar_readings<|docstring|>Get partial LiDAR readings from depth sensors with limited FOV :return: partial LiDAR readings with limited FOV<|endoftext|>
d4dc6e16c53ec9d2e51d0762ed6f4bb99b9d68453f1ff785adcd0e8c4835c541
def get_lidar_all(self, offset_with_camera=np.array([0, 0, 0])): '\n Get complete LiDAR readings by patching together partial ones\n :param offset_with_camera: optionally place the lidar scanner\n with an offset to the camera\n :return: complete 360 degree LiDAR readings\n ' for instance in self.instances: if isinstance(instance, Robot): camera_pos = (instance.robot.eyes.get_position() + offset_with_camera) orn = instance.robot.eyes.get_orientation() mat = quat2rotmat(xyzw2wxyz(orn))[(:3, :3)] view_direction = mat.dot(np.array([1, 0, 0])) self.set_camera(camera_pos, (camera_pos + view_direction), [0, 0, 1]) original_fov = self.vertical_fov self.set_fov(90) lidar_readings = [] r = np.array([[np.cos(((- np.pi) / 2)), 0, (- np.sin(((- np.pi) / 2))), 0], [0, 1, 0, 0], [np.sin(((- np.pi) / 2)), 0, np.cos(((- np.pi) / 2)), 0], [0, 0, 0, 1]]) transformation_matrix = np.eye(4) for i in range(4): lidar_one_view = self.get_lidar_from_depth() lidar_readings.append(lidar_one_view.dot(transformation_matrix[(:3, :3)])) self.V = r.dot(self.V) transformation_matrix = np.linalg.inv(r).dot(transformation_matrix) lidar_readings = np.concatenate(lidar_readings, axis=0) lidar_readings = lidar_readings.dot(np.array([[1, 0, 0], [0, 0, 1], [0, 1, 0]])) self.set_fov(original_fov) return lidar_readings
Get complete LiDAR readings by patching together partial ones :param offset_with_camera: optionally place the lidar scanner with an offset to the camera :return: complete 360 degree LiDAR readings
igibson/render/mesh_renderer/mesh_renderer_cpu.py
get_lidar_all
suresh-guttikonda/iGibson
0
python
def get_lidar_all(self, offset_with_camera=np.array([0, 0, 0])): '\n Get complete LiDAR readings by patching together partial ones\n :param offset_with_camera: optionally place the lidar scanner\n with an offset to the camera\n :return: complete 360 degree LiDAR readings\n ' for instance in self.instances: if isinstance(instance, Robot): camera_pos = (instance.robot.eyes.get_position() + offset_with_camera) orn = instance.robot.eyes.get_orientation() mat = quat2rotmat(xyzw2wxyz(orn))[(:3, :3)] view_direction = mat.dot(np.array([1, 0, 0])) self.set_camera(camera_pos, (camera_pos + view_direction), [0, 0, 1]) original_fov = self.vertical_fov self.set_fov(90) lidar_readings = [] r = np.array([[np.cos(((- np.pi) / 2)), 0, (- np.sin(((- np.pi) / 2))), 0], [0, 1, 0, 0], [np.sin(((- np.pi) / 2)), 0, np.cos(((- np.pi) / 2)), 0], [0, 0, 0, 1]]) transformation_matrix = np.eye(4) for i in range(4): lidar_one_view = self.get_lidar_from_depth() lidar_readings.append(lidar_one_view.dot(transformation_matrix[(:3, :3)])) self.V = r.dot(self.V) transformation_matrix = np.linalg.inv(r).dot(transformation_matrix) lidar_readings = np.concatenate(lidar_readings, axis=0) lidar_readings = lidar_readings.dot(np.array([[1, 0, 0], [0, 0, 1], [0, 1, 0]])) self.set_fov(original_fov) return lidar_readings
def get_lidar_all(self, offset_with_camera=np.array([0, 0, 0])): '\n Get complete LiDAR readings by patching together partial ones\n :param offset_with_camera: optionally place the lidar scanner\n with an offset to the camera\n :return: complete 360 degree LiDAR readings\n ' for instance in self.instances: if isinstance(instance, Robot): camera_pos = (instance.robot.eyes.get_position() + offset_with_camera) orn = instance.robot.eyes.get_orientation() mat = quat2rotmat(xyzw2wxyz(orn))[(:3, :3)] view_direction = mat.dot(np.array([1, 0, 0])) self.set_camera(camera_pos, (camera_pos + view_direction), [0, 0, 1]) original_fov = self.vertical_fov self.set_fov(90) lidar_readings = [] r = np.array([[np.cos(((- np.pi) / 2)), 0, (- np.sin(((- np.pi) / 2))), 0], [0, 1, 0, 0], [np.sin(((- np.pi) / 2)), 0, np.cos(((- np.pi) / 2)), 0], [0, 0, 0, 1]]) transformation_matrix = np.eye(4) for i in range(4): lidar_one_view = self.get_lidar_from_depth() lidar_readings.append(lidar_one_view.dot(transformation_matrix[(:3, :3)])) self.V = r.dot(self.V) transformation_matrix = np.linalg.inv(r).dot(transformation_matrix) lidar_readings = np.concatenate(lidar_readings, axis=0) lidar_readings = lidar_readings.dot(np.array([[1, 0, 0], [0, 0, 1], [0, 1, 0]])) self.set_fov(original_fov) return lidar_readings<|docstring|>Get complete LiDAR readings by patching together partial ones :param offset_with_camera: optionally place the lidar scanner with an offset to the camera :return: complete 360 degree LiDAR readings<|endoftext|>
8a6ef209e98ab0f5f736ed013fdf0bb6b031b6add2a1290918f33246e28d9810
def get_cube(self, mode='rgb', use_robot_camera=False): "\n :param mode: simulator rendering mode, 'rgb' or '3d'\n :param use_robot_camera: use the camera pose from robot\n\n :return: List of sensor readings, normalized to [0.0, 1.0], ordered as [F, R, B, L, U, D] * n_cameras\n " orig_fov = self.vertical_fov self.set_fov(90) org_V = np.copy(self.V) if use_robot_camera: for instance in self.instances: if isinstance(instance, Robot): camera_pos = instance.robot.eyes.get_position() orn = instance.robot.eyes.get_orientation() mat = quat2rotmat(xyzw2wxyz(orn))[(:3, :3)] view_direction = mat.dot(np.array([1, 0, 0])) self.set_camera(camera_pos, (camera_pos + view_direction), [0, 0, 1]) def render_cube(): frames = [] r = np.array([[np.cos(((- np.pi) / 2)), 0, (- np.sin(((- np.pi) / 2))), 0], [0, 1, 0, 0], [np.sin(((- np.pi) / 2)), 0, np.cos(((- np.pi) / 2)), 0], [0, 0, 0, 1]]) for i in range(4): frames.append(self.render(modes=mode)[0]) self.V = r.dot(self.V) r_up = np.array([[1, 0, 0, 0], [0, 0, (- 1), 0], [0, (- 1), 0, 0], [0, 0, 0, 1]]) self.V = r_up.dot(org_V) frames.append(self.render(modes=mode)[0]) r_down = np.array([[1, 0, 0, 0], [0, 0, (- 1), 0], [0, 1, 0, 0], [0, 0, 0, 1]]) self.V = r_down.dot(org_V) frames.append(self.render(modes=mode)[0]) return frames frames = render_cube() self.V = org_V self.set_fov(orig_fov) return frames
:param mode: simulator rendering mode, 'rgb' or '3d' :param use_robot_camera: use the camera pose from robot :return: List of sensor readings, normalized to [0.0, 1.0], ordered as [F, R, B, L, U, D] * n_cameras
igibson/render/mesh_renderer/mesh_renderer_cpu.py
get_cube
suresh-guttikonda/iGibson
0
python
def get_cube(self, mode='rgb', use_robot_camera=False): "\n :param mode: simulator rendering mode, 'rgb' or '3d'\n :param use_robot_camera: use the camera pose from robot\n\n :return: List of sensor readings, normalized to [0.0, 1.0], ordered as [F, R, B, L, U, D] * n_cameras\n " orig_fov = self.vertical_fov self.set_fov(90) org_V = np.copy(self.V) if use_robot_camera: for instance in self.instances: if isinstance(instance, Robot): camera_pos = instance.robot.eyes.get_position() orn = instance.robot.eyes.get_orientation() mat = quat2rotmat(xyzw2wxyz(orn))[(:3, :3)] view_direction = mat.dot(np.array([1, 0, 0])) self.set_camera(camera_pos, (camera_pos + view_direction), [0, 0, 1]) def render_cube(): frames = [] r = np.array([[np.cos(((- np.pi) / 2)), 0, (- np.sin(((- np.pi) / 2))), 0], [0, 1, 0, 0], [np.sin(((- np.pi) / 2)), 0, np.cos(((- np.pi) / 2)), 0], [0, 0, 0, 1]]) for i in range(4): frames.append(self.render(modes=mode)[0]) self.V = r.dot(self.V) r_up = np.array([[1, 0, 0, 0], [0, 0, (- 1), 0], [0, (- 1), 0, 0], [0, 0, 0, 1]]) self.V = r_up.dot(org_V) frames.append(self.render(modes=mode)[0]) r_down = np.array([[1, 0, 0, 0], [0, 0, (- 1), 0], [0, 1, 0, 0], [0, 0, 0, 1]]) self.V = r_down.dot(org_V) frames.append(self.render(modes=mode)[0]) return frames frames = render_cube() self.V = org_V self.set_fov(orig_fov) return frames
def get_cube(self, mode='rgb', use_robot_camera=False): "\n :param mode: simulator rendering mode, 'rgb' or '3d'\n :param use_robot_camera: use the camera pose from robot\n\n :return: List of sensor readings, normalized to [0.0, 1.0], ordered as [F, R, B, L, U, D] * n_cameras\n " orig_fov = self.vertical_fov self.set_fov(90) org_V = np.copy(self.V) if use_robot_camera: for instance in self.instances: if isinstance(instance, Robot): camera_pos = instance.robot.eyes.get_position() orn = instance.robot.eyes.get_orientation() mat = quat2rotmat(xyzw2wxyz(orn))[(:3, :3)] view_direction = mat.dot(np.array([1, 0, 0])) self.set_camera(camera_pos, (camera_pos + view_direction), [0, 0, 1]) def render_cube(): frames = [] r = np.array([[np.cos(((- np.pi) / 2)), 0, (- np.sin(((- np.pi) / 2))), 0], [0, 1, 0, 0], [np.sin(((- np.pi) / 2)), 0, np.cos(((- np.pi) / 2)), 0], [0, 0, 0, 1]]) for i in range(4): frames.append(self.render(modes=mode)[0]) self.V = r.dot(self.V) r_up = np.array([[1, 0, 0, 0], [0, 0, (- 1), 0], [0, (- 1), 0, 0], [0, 0, 0, 1]]) self.V = r_up.dot(org_V) frames.append(self.render(modes=mode)[0]) r_down = np.array([[1, 0, 0, 0], [0, 0, (- 1), 0], [0, 1, 0, 0], [0, 0, 0, 1]]) self.V = r_down.dot(org_V) frames.append(self.render(modes=mode)[0]) return frames frames = render_cube() self.V = org_V self.set_fov(orig_fov) return frames<|docstring|>:param mode: simulator rendering mode, 'rgb' or '3d' :param use_robot_camera: use the camera pose from robot :return: List of sensor readings, normalized to [0.0, 1.0], ordered as [F, R, B, L, U, D] * n_cameras<|endoftext|>
941ac89616d95cfa33bef89c1edfeb2b427b91e6a30a62c03903945e53ec9674
def get_equi(self, mode='rgb', use_robot_camera=False): "\n :param mode: simulator rendering mode, 'rgb' or '3d'\n :param use_robot_camera: use the camera pose from robot\n :return: List of sensor readings, normalized to [0.0, 1.0], ordered as [F, R, B, L, U, D]\n " frames = self.get_cube(mode=mode, use_robot_camera=use_robot_camera) frames = [frames[0], frames[1][(:, ::(- 1), :)], frames[2][(:, ::(- 1), :)], frames[3], frames[4], frames[5]] equi = py360convert.c2e(cubemap=frames, h=frames[0].shape[0], w=(frames[0].shape[0] * 2), cube_format='list') return equi
:param mode: simulator rendering mode, 'rgb' or '3d' :param use_robot_camera: use the camera pose from robot :return: List of sensor readings, normalized to [0.0, 1.0], ordered as [F, R, B, L, U, D]
igibson/render/mesh_renderer/mesh_renderer_cpu.py
get_equi
suresh-guttikonda/iGibson
0
python
def get_equi(self, mode='rgb', use_robot_camera=False): "\n :param mode: simulator rendering mode, 'rgb' or '3d'\n :param use_robot_camera: use the camera pose from robot\n :return: List of sensor readings, normalized to [0.0, 1.0], ordered as [F, R, B, L, U, D]\n " frames = self.get_cube(mode=mode, use_robot_camera=use_robot_camera) frames = [frames[0], frames[1][(:, ::(- 1), :)], frames[2][(:, ::(- 1), :)], frames[3], frames[4], frames[5]] equi = py360convert.c2e(cubemap=frames, h=frames[0].shape[0], w=(frames[0].shape[0] * 2), cube_format='list') return equi
def get_equi(self, mode='rgb', use_robot_camera=False): "\n :param mode: simulator rendering mode, 'rgb' or '3d'\n :param use_robot_camera: use the camera pose from robot\n :return: List of sensor readings, normalized to [0.0, 1.0], ordered as [F, R, B, L, U, D]\n " frames = self.get_cube(mode=mode, use_robot_camera=use_robot_camera) frames = [frames[0], frames[1][(:, ::(- 1), :)], frames[2][(:, ::(- 1), :)], frames[3], frames[4], frames[5]] equi = py360convert.c2e(cubemap=frames, h=frames[0].shape[0], w=(frames[0].shape[0] * 2), cube_format='list') return equi<|docstring|>:param mode: simulator rendering mode, 'rgb' or '3d' :param use_robot_camera: use the camera pose from robot :return: List of sensor readings, normalized to [0.0, 1.0], ordered as [F, R, B, L, U, D]<|endoftext|>
cab123a9789f215a19a54902109e4f99d390ad14fe806ca9dcdbe6d13c95766a
def reset_widgets_over() -> None: '\n Reset widget over.\n\n :return: None\n ' check_widget_mouseleave(force=True)
Reset widget over. :return: None
test/_utils.py
reset_widgets_over
mseyne/pygame-menu
419
python
def reset_widgets_over() -> None: '\n Reset widget over.\n\n :return: None\n ' check_widget_mouseleave(force=True)
def reset_widgets_over() -> None: '\n Reset widget over.\n\n :return: None\n ' check_widget_mouseleave(force=True)<|docstring|>Reset widget over. :return: None<|endoftext|>
76a9e64a1362fefe2051b2b1e877e427398401c2b630e86484b828da4846a1fe
def test_reset_surface() -> None: '\n Reset test surface.\n\n :return: None\n ' global surface surface = pygame.display.set_mode(WINDOW_SIZE)
Reset test surface. :return: None
test/_utils.py
test_reset_surface
mseyne/pygame-menu
419
python
def test_reset_surface() -> None: '\n Reset test surface.\n\n :return: None\n ' global surface surface = pygame.display.set_mode(WINDOW_SIZE)
def test_reset_surface() -> None: '\n Reset test surface.\n\n :return: None\n ' global surface surface = pygame.display.set_mode(WINDOW_SIZE)<|docstring|>Reset test surface. :return: None<|endoftext|>
b3ff817e358c5b43cac420520e58f26d92cd487a87b3b5aadb7c9f155d719950
def setUp(self) -> None: '\n Reset the surface.\n ' test_reset_surface()
Reset the surface.
test/_utils.py
setUp
mseyne/pygame-menu
419
python
def setUp(self) -> None: '\n \n ' test_reset_surface()
def setUp(self) -> None: '\n \n ' test_reset_surface()<|docstring|>Reset the surface.<|endoftext|>
ba3f53e9a7a6d64e2f6255d63a064b7880bcbcef5497149f04e446ada41aa646
@staticmethod def joy_motion(x: NumberType=0, y: NumberType=0, inlist: bool=True, testmode: bool=True) -> EventListType: '\n Create a pygame joy controller motion event.\n\n :param x: X-axis movement\n :param y: Y-axis movement\n :param inlist: Return event in a list\n :param testmode: Event is in test mode\n :return: Event\n ' if ((x != 0) and (y != 0)): return [PygameEventUtils.joy_motion(x=x, inlist=False, testmode=testmode), PygameEventUtils.joy_motion(y=y, inlist=False, testmode=testmode)] event_obj = None if (x != 0): event_obj = pygame.event.Event(pygame.JOYAXISMOTION, {'value': x, 'axis': pygame_menu.controls.JOY_AXIS_X, 'test': testmode}) if (y != 0): event_obj = pygame.event.Event(pygame.JOYAXISMOTION, {'value': y, 'axis': pygame_menu.controls.JOY_AXIS_Y, 'test': testmode}) if inlist: event_obj = [event_obj] return event_obj
Create a pygame joy controller motion event. :param x: X-axis movement :param y: Y-axis movement :param inlist: Return event in a list :param testmode: Event is in test mode :return: Event
test/_utils.py
joy_motion
mseyne/pygame-menu
419
python
@staticmethod def joy_motion(x: NumberType=0, y: NumberType=0, inlist: bool=True, testmode: bool=True) -> EventListType: '\n Create a pygame joy controller motion event.\n\n :param x: X-axis movement\n :param y: Y-axis movement\n :param inlist: Return event in a list\n :param testmode: Event is in test mode\n :return: Event\n ' if ((x != 0) and (y != 0)): return [PygameEventUtils.joy_motion(x=x, inlist=False, testmode=testmode), PygameEventUtils.joy_motion(y=y, inlist=False, testmode=testmode)] event_obj = None if (x != 0): event_obj = pygame.event.Event(pygame.JOYAXISMOTION, {'value': x, 'axis': pygame_menu.controls.JOY_AXIS_X, 'test': testmode}) if (y != 0): event_obj = pygame.event.Event(pygame.JOYAXISMOTION, {'value': y, 'axis': pygame_menu.controls.JOY_AXIS_Y, 'test': testmode}) if inlist: event_obj = [event_obj] return event_obj
@staticmethod def joy_motion(x: NumberType=0, y: NumberType=0, inlist: bool=True, testmode: bool=True) -> EventListType: '\n Create a pygame joy controller motion event.\n\n :param x: X-axis movement\n :param y: Y-axis movement\n :param inlist: Return event in a list\n :param testmode: Event is in test mode\n :return: Event\n ' if ((x != 0) and (y != 0)): return [PygameEventUtils.joy_motion(x=x, inlist=False, testmode=testmode), PygameEventUtils.joy_motion(y=y, inlist=False, testmode=testmode)] event_obj = None if (x != 0): event_obj = pygame.event.Event(pygame.JOYAXISMOTION, {'value': x, 'axis': pygame_menu.controls.JOY_AXIS_X, 'test': testmode}) if (y != 0): event_obj = pygame.event.Event(pygame.JOYAXISMOTION, {'value': y, 'axis': pygame_menu.controls.JOY_AXIS_Y, 'test': testmode}) if inlist: event_obj = [event_obj] return event_obj<|docstring|>Create a pygame joy controller motion event. :param x: X-axis movement :param y: Y-axis movement :param inlist: Return event in a list :param testmode: Event is in test mode :return: Event<|endoftext|>
1d81eed317e7925c30268a96b487bd334b78d98267d135a251906f01e7adffef
@staticmethod def joy_center(testmode: bool=True, inlist: bool=True) -> EventListType: '\n Centers the joy.\n\n :param testmode: Event is in test mode\n :param inlist: Event is within a list\n :return: Center joy event\n ' event_obj = pygame.event.Event(pygame.JOYAXISMOTION, {'value': 0, 'axis': pygame_menu.controls.JOY_AXIS_Y, 'test': testmode}) if inlist: event_obj = [event_obj] return event_obj
Centers the joy. :param testmode: Event is in test mode :param inlist: Event is within a list :return: Center joy event
test/_utils.py
joy_center
mseyne/pygame-menu
419
python
@staticmethod def joy_center(testmode: bool=True, inlist: bool=True) -> EventListType: '\n Centers the joy.\n\n :param testmode: Event is in test mode\n :param inlist: Event is within a list\n :return: Center joy event\n ' event_obj = pygame.event.Event(pygame.JOYAXISMOTION, {'value': 0, 'axis': pygame_menu.controls.JOY_AXIS_Y, 'test': testmode}) if inlist: event_obj = [event_obj] return event_obj
@staticmethod def joy_center(testmode: bool=True, inlist: bool=True) -> EventListType: '\n Centers the joy.\n\n :param testmode: Event is in test mode\n :param inlist: Event is within a list\n :return: Center joy event\n ' event_obj = pygame.event.Event(pygame.JOYAXISMOTION, {'value': 0, 'axis': pygame_menu.controls.JOY_AXIS_Y, 'test': testmode}) if inlist: event_obj = [event_obj] return event_obj<|docstring|>Centers the joy. :param testmode: Event is in test mode :param inlist: Event is within a list :return: Center joy event<|endoftext|>
509000d47faaa6dd2b99fcdea97e922cb551984963cf0a7ad7a110aad6f64a26
@staticmethod def joy_hat_motion(key: Tuple[(int, int)], inlist: bool=True, testmode: bool=True) -> EventListType: '\n Create a pygame joy controller key event.\n\n :param key: Key to press\n :param inlist: Return event in a list\n :param testmode: Event is in test mode\n :return: Event\n ' event_obj = pygame.event.Event(pygame.JOYHATMOTION, {'value': key, 'test': testmode}) if inlist: event_obj = [event_obj] return event_obj
Create a pygame joy controller key event. :param key: Key to press :param inlist: Return event in a list :param testmode: Event is in test mode :return: Event
test/_utils.py
joy_hat_motion
mseyne/pygame-menu
419
python
@staticmethod def joy_hat_motion(key: Tuple[(int, int)], inlist: bool=True, testmode: bool=True) -> EventListType: '\n Create a pygame joy controller key event.\n\n :param key: Key to press\n :param inlist: Return event in a list\n :param testmode: Event is in test mode\n :return: Event\n ' event_obj = pygame.event.Event(pygame.JOYHATMOTION, {'value': key, 'test': testmode}) if inlist: event_obj = [event_obj] return event_obj
@staticmethod def joy_hat_motion(key: Tuple[(int, int)], inlist: bool=True, testmode: bool=True) -> EventListType: '\n Create a pygame joy controller key event.\n\n :param key: Key to press\n :param inlist: Return event in a list\n :param testmode: Event is in test mode\n :return: Event\n ' event_obj = pygame.event.Event(pygame.JOYHATMOTION, {'value': key, 'test': testmode}) if inlist: event_obj = [event_obj] return event_obj<|docstring|>Create a pygame joy controller key event. :param key: Key to press :param inlist: Return event in a list :param testmode: Event is in test mode :return: Event<|endoftext|>
356495d254c332ce9410bc06335f43568874e3ff2bd679a258c3888a70a49f52
@staticmethod def joy_button(button: int, evtype: int=pygame.JOYBUTTONDOWN, inlist: bool=True, testmode: bool=True) -> EventListType: '\n Create a pygame joy controller key event.\n\n :param button: Button to press\n :param evtype: Event type\n :param inlist: Return event in a list\n :param testmode: Event is in test mode\n :return: Event\n ' event_obj = pygame.event.Event(evtype, {'button': button, 'test': testmode}) if inlist: event_obj = [event_obj] return event_obj
Create a pygame joy controller key event. :param button: Button to press :param evtype: Event type :param inlist: Return event in a list :param testmode: Event is in test mode :return: Event
test/_utils.py
joy_button
mseyne/pygame-menu
419
python
@staticmethod def joy_button(button: int, evtype: int=pygame.JOYBUTTONDOWN, inlist: bool=True, testmode: bool=True) -> EventListType: '\n Create a pygame joy controller key event.\n\n :param button: Button to press\n :param evtype: Event type\n :param inlist: Return event in a list\n :param testmode: Event is in test mode\n :return: Event\n ' event_obj = pygame.event.Event(evtype, {'button': button, 'test': testmode}) if inlist: event_obj = [event_obj] return event_obj
@staticmethod def joy_button(button: int, evtype: int=pygame.JOYBUTTONDOWN, inlist: bool=True, testmode: bool=True) -> EventListType: '\n Create a pygame joy controller key event.\n\n :param button: Button to press\n :param evtype: Event type\n :param inlist: Return event in a list\n :param testmode: Event is in test mode\n :return: Event\n ' event_obj = pygame.event.Event(evtype, {'button': button, 'test': testmode}) if inlist: event_obj = [event_obj] return event_obj<|docstring|>Create a pygame joy controller key event. :param button: Button to press :param evtype: Event type :param inlist: Return event in a list :param testmode: Event is in test mode :return: Event<|endoftext|>
57eb0116500ea138b013361a1575cef5b3aef071a029c5a2be4648ee2fdcc0a4
@staticmethod def test_widget_key_press(widget: 'pygame_menu.widgets.Widget', testmode: bool=True) -> None: '\n Test keypress widget.\n\n :param widget: Widget object\n :param testmode: Event is in test mode\n :return: None\n ' widget.update(PygameEventUtils.key(pygame.K_BACKSPACE, keydown=True, testmode=testmode)) widget.update(PygameEventUtils.key(pygame.K_DELETE, keydown=True, testmode=testmode)) widget.update(PygameEventUtils.key(pygame.K_LEFT, keydown=True, testmode=testmode)) widget.update(PygameEventUtils.key(pygame.K_RIGHT, keydown=True, testmode=testmode)) widget.update(PygameEventUtils.key(pygame.K_END, keydown=True, testmode=testmode)) widget.update(PygameEventUtils.key(pygame.K_HOME, keydown=True, testmode=testmode))
Test keypress widget. :param widget: Widget object :param testmode: Event is in test mode :return: None
test/_utils.py
test_widget_key_press
mseyne/pygame-menu
419
python
@staticmethod def test_widget_key_press(widget: 'pygame_menu.widgets.Widget', testmode: bool=True) -> None: '\n Test keypress widget.\n\n :param widget: Widget object\n :param testmode: Event is in test mode\n :return: None\n ' widget.update(PygameEventUtils.key(pygame.K_BACKSPACE, keydown=True, testmode=testmode)) widget.update(PygameEventUtils.key(pygame.K_DELETE, keydown=True, testmode=testmode)) widget.update(PygameEventUtils.key(pygame.K_LEFT, keydown=True, testmode=testmode)) widget.update(PygameEventUtils.key(pygame.K_RIGHT, keydown=True, testmode=testmode)) widget.update(PygameEventUtils.key(pygame.K_END, keydown=True, testmode=testmode)) widget.update(PygameEventUtils.key(pygame.K_HOME, keydown=True, testmode=testmode))
@staticmethod def test_widget_key_press(widget: 'pygame_menu.widgets.Widget', testmode: bool=True) -> None: '\n Test keypress widget.\n\n :param widget: Widget object\n :param testmode: Event is in test mode\n :return: None\n ' widget.update(PygameEventUtils.key(pygame.K_BACKSPACE, keydown=True, testmode=testmode)) widget.update(PygameEventUtils.key(pygame.K_DELETE, keydown=True, testmode=testmode)) widget.update(PygameEventUtils.key(pygame.K_LEFT, keydown=True, testmode=testmode)) widget.update(PygameEventUtils.key(pygame.K_RIGHT, keydown=True, testmode=testmode)) widget.update(PygameEventUtils.key(pygame.K_END, keydown=True, testmode=testmode)) widget.update(PygameEventUtils.key(pygame.K_HOME, keydown=True, testmode=testmode))<|docstring|>Test keypress widget. :param widget: Widget object :param testmode: Event is in test mode :return: None<|endoftext|>
a5bda0aaed25b7f054ef4b67273f62228130543a4143a4bf074ea9856b634ce3
@staticmethod def keydown_mod_ctrl(key: int, inlist: bool=True, testmode: bool=True) -> EventListType: '\n Create a mod ctrl keydown event (Ctrl+Key).\n\n :param key: Key to press\n :param inlist: Return event in a list\n :param testmode: Event is in test mode\n :return: Event\n ' pygame.key.set_mods(pygame.KMOD_CTRL) event_obj = pygame.event.Event(pygame.KEYDOWN, {'key': key, 'test': testmode}) if inlist: event_obj = [event_obj] return event_obj
Create a mod ctrl keydown event (Ctrl+Key). :param key: Key to press :param inlist: Return event in a list :param testmode: Event is in test mode :return: Event
test/_utils.py
keydown_mod_ctrl
mseyne/pygame-menu
419
python
@staticmethod def keydown_mod_ctrl(key: int, inlist: bool=True, testmode: bool=True) -> EventListType: '\n Create a mod ctrl keydown event (Ctrl+Key).\n\n :param key: Key to press\n :param inlist: Return event in a list\n :param testmode: Event is in test mode\n :return: Event\n ' pygame.key.set_mods(pygame.KMOD_CTRL) event_obj = pygame.event.Event(pygame.KEYDOWN, {'key': key, 'test': testmode}) if inlist: event_obj = [event_obj] return event_obj
@staticmethod def keydown_mod_ctrl(key: int, inlist: bool=True, testmode: bool=True) -> EventListType: '\n Create a mod ctrl keydown event (Ctrl+Key).\n\n :param key: Key to press\n :param inlist: Return event in a list\n :param testmode: Event is in test mode\n :return: Event\n ' pygame.key.set_mods(pygame.KMOD_CTRL) event_obj = pygame.event.Event(pygame.KEYDOWN, {'key': key, 'test': testmode}) if inlist: event_obj = [event_obj] return event_obj<|docstring|>Create a mod ctrl keydown event (Ctrl+Key). :param key: Key to press :param inlist: Return event in a list :param testmode: Event is in test mode :return: Event<|endoftext|>
39fe3086016d2658829dc586395b6963f738ff52b31bf8194e3153c0381abbf3
@staticmethod def release_key_mod() -> None: '\n Release pygame key mods.\n\n :return: None\n ' pygame.key.set_mods(pygame.KMOD_NONE)
Release pygame key mods. :return: None
test/_utils.py
release_key_mod
mseyne/pygame-menu
419
python
@staticmethod def release_key_mod() -> None: '\n Release pygame key mods.\n\n :return: None\n ' pygame.key.set_mods(pygame.KMOD_NONE)
@staticmethod def release_key_mod() -> None: '\n Release pygame key mods.\n\n :return: None\n ' pygame.key.set_mods(pygame.KMOD_NONE)<|docstring|>Release pygame key mods. :return: None<|endoftext|>
07e5c09a513799d6445c21e93f7c2844d49ddc571b28a79a03340b5d66431097
@staticmethod def keydown_mod_alt(key: int, inlist: bool=True, testmode: bool=True) -> EventListType: '\n Create a mod alt keydown event (Alt+Key).\n\n :param key: Key to press\n :param inlist: Return event in a list\n :param testmode: Event is in test mode\n :return: Event\n ' pygame.key.set_mods(pygame.KMOD_ALT) event_obj = pygame.event.Event(pygame.KEYDOWN, {'key': key, 'test': testmode}) if inlist: event_obj = [event_obj] return event_obj
Create a mod alt keydown event (Alt+Key). :param key: Key to press :param inlist: Return event in a list :param testmode: Event is in test mode :return: Event
test/_utils.py
keydown_mod_alt
mseyne/pygame-menu
419
python
@staticmethod def keydown_mod_alt(key: int, inlist: bool=True, testmode: bool=True) -> EventListType: '\n Create a mod alt keydown event (Alt+Key).\n\n :param key: Key to press\n :param inlist: Return event in a list\n :param testmode: Event is in test mode\n :return: Event\n ' pygame.key.set_mods(pygame.KMOD_ALT) event_obj = pygame.event.Event(pygame.KEYDOWN, {'key': key, 'test': testmode}) if inlist: event_obj = [event_obj] return event_obj
@staticmethod def keydown_mod_alt(key: int, inlist: bool=True, testmode: bool=True) -> EventListType: '\n Create a mod alt keydown event (Alt+Key).\n\n :param key: Key to press\n :param inlist: Return event in a list\n :param testmode: Event is in test mode\n :return: Event\n ' pygame.key.set_mods(pygame.KMOD_ALT) event_obj = pygame.event.Event(pygame.KEYDOWN, {'key': key, 'test': testmode}) if inlist: event_obj = [event_obj] return event_obj<|docstring|>Create a mod alt keydown event (Alt+Key). :param key: Key to press :param inlist: Return event in a list :param testmode: Event is in test mode :return: Event<|endoftext|>
f40febde0a44021684de29a543acc46cad88e21da26cc459c0810000dba241dc
@staticmethod def keydown(key: Union[(int, VectorIntType)], testmode: bool=True, inlist: bool=True) -> EventListType: '\n Keydown list.\n\n :param key: Key to press\n :param testmode: Event is in test mode\n :param inlist: Return event in a list\n :return: Event list\n ' if isinstance(key, int): key = [key] ev = [] for k in key: assert isinstance(k, int) ev.append(PygameEventUtils.key(k, keydown=True, inlist=False, testmode=testmode)) if (not inlist): assert (len(ev) == 1) return ev[0] return ev
Keydown list. :param key: Key to press :param testmode: Event is in test mode :param inlist: Return event in a list :return: Event list
test/_utils.py
keydown
mseyne/pygame-menu
419
python
@staticmethod def keydown(key: Union[(int, VectorIntType)], testmode: bool=True, inlist: bool=True) -> EventListType: '\n Keydown list.\n\n :param key: Key to press\n :param testmode: Event is in test mode\n :param inlist: Return event in a list\n :return: Event list\n ' if isinstance(key, int): key = [key] ev = [] for k in key: assert isinstance(k, int) ev.append(PygameEventUtils.key(k, keydown=True, inlist=False, testmode=testmode)) if (not inlist): assert (len(ev) == 1) return ev[0] return ev
@staticmethod def keydown(key: Union[(int, VectorIntType)], testmode: bool=True, inlist: bool=True) -> EventListType: '\n Keydown list.\n\n :param key: Key to press\n :param testmode: Event is in test mode\n :param inlist: Return event in a list\n :return: Event list\n ' if isinstance(key, int): key = [key] ev = [] for k in key: assert isinstance(k, int) ev.append(PygameEventUtils.key(k, keydown=True, inlist=False, testmode=testmode)) if (not inlist): assert (len(ev) == 1) return ev[0] return ev<|docstring|>Keydown list. :param key: Key to press :param testmode: Event is in test mode :param inlist: Return event in a list :return: Event list<|endoftext|>
df55d091c849bd8f0e957c87309ad397ea0d9a3010af2529aec2640c876dfe21
@staticmethod def key(key: int, char: str=' ', inlist: bool=True, keydown: bool=False, keyup: bool=False, testmode: bool=True) -> EventListType: '\n Create a keyboard event.\n\n :param key: Key to press\n :param char: Char representing the key\n :param inlist: Return event in a list\n :param keydown: Event is keydown\n :param keyup: Event is keyup\n :param testmode: Event is in test mode\n :return: Event\n ' if (keyup and keydown): raise ValueError('keyup and keydown cannot be active at the same time') if ((keydown == keyup) and (not keydown)): raise ValueError('keyup and keydown cannot be false at the same time') event = (- 1) if keydown: event = pygame.KEYDOWN if keyup: event = pygame.KEYUP event_obj = pygame.event.Event(event, {'key': key, 'test': testmode}) if (len(char) == 1): event_obj.dict['unicode'] = char if inlist: event_obj = [event_obj] return event_obj
Create a keyboard event. :param key: Key to press :param char: Char representing the key :param inlist: Return event in a list :param keydown: Event is keydown :param keyup: Event is keyup :param testmode: Event is in test mode :return: Event
test/_utils.py
key
mseyne/pygame-menu
419
python
@staticmethod def key(key: int, char: str=' ', inlist: bool=True, keydown: bool=False, keyup: bool=False, testmode: bool=True) -> EventListType: '\n Create a keyboard event.\n\n :param key: Key to press\n :param char: Char representing the key\n :param inlist: Return event in a list\n :param keydown: Event is keydown\n :param keyup: Event is keyup\n :param testmode: Event is in test mode\n :return: Event\n ' if (keyup and keydown): raise ValueError('keyup and keydown cannot be active at the same time') if ((keydown == keyup) and (not keydown)): raise ValueError('keyup and keydown cannot be false at the same time') event = (- 1) if keydown: event = pygame.KEYDOWN if keyup: event = pygame.KEYUP event_obj = pygame.event.Event(event, {'key': key, 'test': testmode}) if (len(char) == 1): event_obj.dict['unicode'] = char if inlist: event_obj = [event_obj] return event_obj
@staticmethod def key(key: int, char: str=' ', inlist: bool=True, keydown: bool=False, keyup: bool=False, testmode: bool=True) -> EventListType: '\n Create a keyboard event.\n\n :param key: Key to press\n :param char: Char representing the key\n :param inlist: Return event in a list\n :param keydown: Event is keydown\n :param keyup: Event is keyup\n :param testmode: Event is in test mode\n :return: Event\n ' if (keyup and keydown): raise ValueError('keyup and keydown cannot be active at the same time') if ((keydown == keyup) and (not keydown)): raise ValueError('keyup and keydown cannot be false at the same time') event = (- 1) if keydown: event = pygame.KEYDOWN if keyup: event = pygame.KEYUP event_obj = pygame.event.Event(event, {'key': key, 'test': testmode}) if (len(char) == 1): event_obj.dict['unicode'] = char if inlist: event_obj = [event_obj] return event_obj<|docstring|>Create a keyboard event. :param key: Key to press :param char: Char representing the key :param inlist: Return event in a list :param keydown: Event is keydown :param keyup: Event is keyup :param testmode: Event is in test mode :return: Event<|endoftext|>
0e378c09bbccde5c3c0083a9b63ccc2c46eb108b5837c1db205f644c08a69b2a
@staticmethod def enter_window(inlist: bool=True, testmode: bool=True) -> EventListType: '\n Enter window event.\n\n :param inlist: Return event in a list\n :param testmode: Event is in test mode\n :return: Event\n ' ev = pygame.event.Event(pygame.ACTIVEEVENT, {'gain': 1, 'test': testmode}) if inlist: ev = [ev] return ev
Enter window event. :param inlist: Return event in a list :param testmode: Event is in test mode :return: Event
test/_utils.py
enter_window
mseyne/pygame-menu
419
python
@staticmethod def enter_window(inlist: bool=True, testmode: bool=True) -> EventListType: '\n Enter window event.\n\n :param inlist: Return event in a list\n :param testmode: Event is in test mode\n :return: Event\n ' ev = pygame.event.Event(pygame.ACTIVEEVENT, {'gain': 1, 'test': testmode}) if inlist: ev = [ev] return ev
@staticmethod def enter_window(inlist: bool=True, testmode: bool=True) -> EventListType: '\n Enter window event.\n\n :param inlist: Return event in a list\n :param testmode: Event is in test mode\n :return: Event\n ' ev = pygame.event.Event(pygame.ACTIVEEVENT, {'gain': 1, 'test': testmode}) if inlist: ev = [ev] return ev<|docstring|>Enter window event. :param inlist: Return event in a list :param testmode: Event is in test mode :return: Event<|endoftext|>
45f9c5e48d40abf80399e429285ffb0c80ec7cca58bc2d3cc4714c90d042d3e5
@staticmethod def leave_window(inlist: bool=True, testmode: bool=True) -> EventListType: '\n Leave window event.\n\n :param inlist: Return event in a list\n :param testmode: Event is in test mode\n :return: Event\n ' ev = pygame.event.Event(pygame.ACTIVEEVENT, {'gain': 0, 'test': testmode}) if inlist: ev = [ev] return ev
Leave window event. :param inlist: Return event in a list :param testmode: Event is in test mode :return: Event
test/_utils.py
leave_window
mseyne/pygame-menu
419
python
@staticmethod def leave_window(inlist: bool=True, testmode: bool=True) -> EventListType: '\n Leave window event.\n\n :param inlist: Return event in a list\n :param testmode: Event is in test mode\n :return: Event\n ' ev = pygame.event.Event(pygame.ACTIVEEVENT, {'gain': 0, 'test': testmode}) if inlist: ev = [ev] return ev
@staticmethod def leave_window(inlist: bool=True, testmode: bool=True) -> EventListType: '\n Leave window event.\n\n :param inlist: Return event in a list\n :param testmode: Event is in test mode\n :return: Event\n ' ev = pygame.event.Event(pygame.ACTIVEEVENT, {'gain': 0, 'test': testmode}) if inlist: ev = [ev] return ev<|docstring|>Leave window event. :param inlist: Return event in a list :param testmode: Event is in test mode :return: Event<|endoftext|>
6cb78706ba60b478ecce400370383835250b802687e854988c197b7fb7489600
@staticmethod def mouse_click(x: NumberType, y: NumberType, inlist: bool=True, evtype: int=pygame.MOUSEBUTTONUP, rel: Tuple2IntType=(0, 0), button: int=3, testmode: bool=True, update_mouse: bool=False) -> EventListType: '\n Generate a mouse click event.\n\n :param x: X coordinate in px\n :param y: Y coordinate in px\n :param inlist: Return event in a list\n :param evtype: event type, it can be MOUSEBUTTONUP or MOUSEBUTTONDOWN\n :param rel: Rel position (relative movement)\n :param button: Which button presses, ``1`` to ``3`` are the main buttons; ``4`` and ``5`` is the wheel\n :param testmode: Event is in test mode\n :param update_mouse: If ``True`` updates the mouse position\n :return: Event\n ' assert (isinstance(button, int) and (button > 0)) assert isinstance(x, NumberInstance) assert isinstance(y, NumberInstance) assert_vector(rel, 2, int) x = int(x) y = int(y) event_obj = pygame.event.Event(evtype, {'button': button, 'pos': (x, y), 'rel': rel, 'test': testmode}) if update_mouse: pygame.mouse.set_pos((x, y)) if inlist: event_obj = [event_obj] return event_obj
Generate a mouse click event. :param x: X coordinate in px :param y: Y coordinate in px :param inlist: Return event in a list :param evtype: event type, it can be MOUSEBUTTONUP or MOUSEBUTTONDOWN :param rel: Rel position (relative movement) :param button: Which button presses, ``1`` to ``3`` are the main buttons; ``4`` and ``5`` is the wheel :param testmode: Event is in test mode :param update_mouse: If ``True`` updates the mouse position :return: Event
test/_utils.py
mouse_click
mseyne/pygame-menu
419
python
@staticmethod def mouse_click(x: NumberType, y: NumberType, inlist: bool=True, evtype: int=pygame.MOUSEBUTTONUP, rel: Tuple2IntType=(0, 0), button: int=3, testmode: bool=True, update_mouse: bool=False) -> EventListType: '\n Generate a mouse click event.\n\n :param x: X coordinate in px\n :param y: Y coordinate in px\n :param inlist: Return event in a list\n :param evtype: event type, it can be MOUSEBUTTONUP or MOUSEBUTTONDOWN\n :param rel: Rel position (relative movement)\n :param button: Which button presses, ``1`` to ``3`` are the main buttons; ``4`` and ``5`` is the wheel\n :param testmode: Event is in test mode\n :param update_mouse: If ``True`` updates the mouse position\n :return: Event\n ' assert (isinstance(button, int) and (button > 0)) assert isinstance(x, NumberInstance) assert isinstance(y, NumberInstance) assert_vector(rel, 2, int) x = int(x) y = int(y) event_obj = pygame.event.Event(evtype, {'button': button, 'pos': (x, y), 'rel': rel, 'test': testmode}) if update_mouse: pygame.mouse.set_pos((x, y)) if inlist: event_obj = [event_obj] return event_obj
@staticmethod def mouse_click(x: NumberType, y: NumberType, inlist: bool=True, evtype: int=pygame.MOUSEBUTTONUP, rel: Tuple2IntType=(0, 0), button: int=3, testmode: bool=True, update_mouse: bool=False) -> EventListType: '\n Generate a mouse click event.\n\n :param x: X coordinate in px\n :param y: Y coordinate in px\n :param inlist: Return event in a list\n :param evtype: event type, it can be MOUSEBUTTONUP or MOUSEBUTTONDOWN\n :param rel: Rel position (relative movement)\n :param button: Which button presses, ``1`` to ``3`` are the main buttons; ``4`` and ``5`` is the wheel\n :param testmode: Event is in test mode\n :param update_mouse: If ``True`` updates the mouse position\n :return: Event\n ' assert (isinstance(button, int) and (button > 0)) assert isinstance(x, NumberInstance) assert isinstance(y, NumberInstance) assert_vector(rel, 2, int) x = int(x) y = int(y) event_obj = pygame.event.Event(evtype, {'button': button, 'pos': (x, y), 'rel': rel, 'test': testmode}) if update_mouse: pygame.mouse.set_pos((x, y)) if inlist: event_obj = [event_obj] return event_obj<|docstring|>Generate a mouse click event. :param x: X coordinate in px :param y: Y coordinate in px :param inlist: Return event in a list :param evtype: event type, it can be MOUSEBUTTONUP or MOUSEBUTTONDOWN :param rel: Rel position (relative movement) :param button: Which button presses, ``1`` to ``3`` are the main buttons; ``4`` and ``5`` is the wheel :param testmode: Event is in test mode :param update_mouse: If ``True`` updates the mouse position :return: Event<|endoftext|>
4e203be2f99754a86bbe0e018df541f4f280a53276f0871db34dcc9e616f96ff
@staticmethod def touch_click(x: NumberType, y: NumberType, inlist: bool=True, evtype: int=FINGERUP, rel: Tuple2IntType=(0, 0), normalize: bool=True, menu: Union[('pygame_menu.Menu', None)]=None, testmode: bool=True) -> EventListType: '\n Generate a mouse click event.\n\n :param x: X coordinate\n :param y: Y coordinate\n :param inlist: Return event in a list\n :param evtype: Event type, it can be FINGERUP, FINGERDOWN or FINGERMOTION\n :param rel: Rel position (relative movement)\n :param normalize: Normalize event position\n :param menu: Menu reference\n :param testmode: Event is in test mode\n :return: Event\n ' assert isinstance(x, NumberInstance) assert isinstance(y, NumberInstance) assert_vector(rel, 2, int) if normalize: assert (menu is not None), 'menu reference must be provided if normalize is used (related to touch events)' display_size = menu.get_window_size() x /= display_size[0] y /= display_size[1] event_obj = pygame.event.Event(evtype, {'x': x, 'y': y, 'rel': rel, 'test': testmode}) if inlist: event_obj = [event_obj] return event_obj
Generate a mouse click event. :param x: X coordinate :param y: Y coordinate :param inlist: Return event in a list :param evtype: Event type, it can be FINGERUP, FINGERDOWN or FINGERMOTION :param rel: Rel position (relative movement) :param normalize: Normalize event position :param menu: Menu reference :param testmode: Event is in test mode :return: Event
test/_utils.py
touch_click
mseyne/pygame-menu
419
python
@staticmethod def touch_click(x: NumberType, y: NumberType, inlist: bool=True, evtype: int=FINGERUP, rel: Tuple2IntType=(0, 0), normalize: bool=True, menu: Union[('pygame_menu.Menu', None)]=None, testmode: bool=True) -> EventListType: '\n Generate a mouse click event.\n\n :param x: X coordinate\n :param y: Y coordinate\n :param inlist: Return event in a list\n :param evtype: Event type, it can be FINGERUP, FINGERDOWN or FINGERMOTION\n :param rel: Rel position (relative movement)\n :param normalize: Normalize event position\n :param menu: Menu reference\n :param testmode: Event is in test mode\n :return: Event\n ' assert isinstance(x, NumberInstance) assert isinstance(y, NumberInstance) assert_vector(rel, 2, int) if normalize: assert (menu is not None), 'menu reference must be provided if normalize is used (related to touch events)' display_size = menu.get_window_size() x /= display_size[0] y /= display_size[1] event_obj = pygame.event.Event(evtype, {'x': x, 'y': y, 'rel': rel, 'test': testmode}) if inlist: event_obj = [event_obj] return event_obj
@staticmethod def touch_click(x: NumberType, y: NumberType, inlist: bool=True, evtype: int=FINGERUP, rel: Tuple2IntType=(0, 0), normalize: bool=True, menu: Union[('pygame_menu.Menu', None)]=None, testmode: bool=True) -> EventListType: '\n Generate a mouse click event.\n\n :param x: X coordinate\n :param y: Y coordinate\n :param inlist: Return event in a list\n :param evtype: Event type, it can be FINGERUP, FINGERDOWN or FINGERMOTION\n :param rel: Rel position (relative movement)\n :param normalize: Normalize event position\n :param menu: Menu reference\n :param testmode: Event is in test mode\n :return: Event\n ' assert isinstance(x, NumberInstance) assert isinstance(y, NumberInstance) assert_vector(rel, 2, int) if normalize: assert (menu is not None), 'menu reference must be provided if normalize is used (related to touch events)' display_size = menu.get_window_size() x /= display_size[0] y /= display_size[1] event_obj = pygame.event.Event(evtype, {'x': x, 'y': y, 'rel': rel, 'test': testmode}) if inlist: event_obj = [event_obj] return event_obj<|docstring|>Generate a mouse click event. :param x: X coordinate :param y: Y coordinate :param inlist: Return event in a list :param evtype: Event type, it can be FINGERUP, FINGERDOWN or FINGERMOTION :param rel: Rel position (relative movement) :param normalize: Normalize event position :param menu: Menu reference :param testmode: Event is in test mode :return: Event<|endoftext|>
4cbf8447519581dffe0e3dcf78e614eac6006a67bded1b96f8db9942493b413c
@staticmethod def topleft_rect_mouse_motion(rect: Union[('pygame_menu.widgets.Widget', 'pygame.Rect', Tuple2NumberType)], inlist: bool=True, delta: Tuple2IntType=(0, 0), testmode: bool=True, update_mouse: bool=False) -> EventListType: '\n Mouse motion event.\n\n :param rect: Widget, Rect object, or Tuple\n :param inlist: If ``True`` return the event within a list\n :param delta: Add tuple to rect position\n :param testmode: Event is in test mode\n :param update_mouse: If ``True`` updates the mouse position\n :return: Event\n ' if isinstance(rect, pygame_menu.widgets.Widget): (x, y) = rect.get_rect(to_real_position=True, render=True).topleft elif isinstance(rect, pygame.Rect): (x, y) = rect.topleft elif isinstance(rect, VectorInstance): (x, y) = (rect[0], rect[1]) else: raise ValueError('unknown rect type') return PygameEventUtils.middle_rect_click(rect=(x, y), evtype=pygame.MOUSEMOTION, inlist=inlist, delta=delta, testmode=testmode, update_mouse=update_mouse)
Mouse motion event. :param rect: Widget, Rect object, or Tuple :param inlist: If ``True`` return the event within a list :param delta: Add tuple to rect position :param testmode: Event is in test mode :param update_mouse: If ``True`` updates the mouse position :return: Event
test/_utils.py
topleft_rect_mouse_motion
mseyne/pygame-menu
419
python
@staticmethod def topleft_rect_mouse_motion(rect: Union[('pygame_menu.widgets.Widget', 'pygame.Rect', Tuple2NumberType)], inlist: bool=True, delta: Tuple2IntType=(0, 0), testmode: bool=True, update_mouse: bool=False) -> EventListType: '\n Mouse motion event.\n\n :param rect: Widget, Rect object, or Tuple\n :param inlist: If ``True`` return the event within a list\n :param delta: Add tuple to rect position\n :param testmode: Event is in test mode\n :param update_mouse: If ``True`` updates the mouse position\n :return: Event\n ' if isinstance(rect, pygame_menu.widgets.Widget): (x, y) = rect.get_rect(to_real_position=True, render=True).topleft elif isinstance(rect, pygame.Rect): (x, y) = rect.topleft elif isinstance(rect, VectorInstance): (x, y) = (rect[0], rect[1]) else: raise ValueError('unknown rect type') return PygameEventUtils.middle_rect_click(rect=(x, y), evtype=pygame.MOUSEMOTION, inlist=inlist, delta=delta, testmode=testmode, update_mouse=update_mouse)
@staticmethod def topleft_rect_mouse_motion(rect: Union[('pygame_menu.widgets.Widget', 'pygame.Rect', Tuple2NumberType)], inlist: bool=True, delta: Tuple2IntType=(0, 0), testmode: bool=True, update_mouse: bool=False) -> EventListType: '\n Mouse motion event.\n\n :param rect: Widget, Rect object, or Tuple\n :param inlist: If ``True`` return the event within a list\n :param delta: Add tuple to rect position\n :param testmode: Event is in test mode\n :param update_mouse: If ``True`` updates the mouse position\n :return: Event\n ' if isinstance(rect, pygame_menu.widgets.Widget): (x, y) = rect.get_rect(to_real_position=True, render=True).topleft elif isinstance(rect, pygame.Rect): (x, y) = rect.topleft elif isinstance(rect, VectorInstance): (x, y) = (rect[0], rect[1]) else: raise ValueError('unknown rect type') return PygameEventUtils.middle_rect_click(rect=(x, y), evtype=pygame.MOUSEMOTION, inlist=inlist, delta=delta, testmode=testmode, update_mouse=update_mouse)<|docstring|>Mouse motion event. :param rect: Widget, Rect object, or Tuple :param inlist: If ``True`` return the event within a list :param delta: Add tuple to rect position :param testmode: Event is in test mode :param update_mouse: If ``True`` updates the mouse position :return: Event<|endoftext|>
1f2a8cb214dfb4a4d421dfea1fc22c5cdcb3bade5b161f46577bd682804c1024
@staticmethod def mouse_motion(rect: Union[('pygame_menu.widgets.Widget', 'pygame.Rect', Tuple2NumberType)], inlist: bool=True, rel: Tuple2IntType=(0, 0), delta: Tuple2IntType=(0, 0), testmode: bool=True, update_mouse: bool=False) -> EventListType: '\n Mouse motion event.\n\n :param rect: Widget, Rect object, or Tuple\n :param inlist: If ``True`` return the event within a list\n :param rel: Rel position (relative movement)\n :param delta: Add tuple to rect position\n :param testmode: Event is in test mode\n :param update_mouse: If ``True`` updates the mouse position\n :return: Event\n ' return PygameEventUtils.middle_rect_click(rect=rect, evtype=pygame.MOUSEMOTION, rel=rel, inlist=inlist, delta=delta, testmode=testmode, update_mouse=update_mouse)
Mouse motion event. :param rect: Widget, Rect object, or Tuple :param inlist: If ``True`` return the event within a list :param rel: Rel position (relative movement) :param delta: Add tuple to rect position :param testmode: Event is in test mode :param update_mouse: If ``True`` updates the mouse position :return: Event
test/_utils.py
mouse_motion
mseyne/pygame-menu
419
python
@staticmethod def mouse_motion(rect: Union[('pygame_menu.widgets.Widget', 'pygame.Rect', Tuple2NumberType)], inlist: bool=True, rel: Tuple2IntType=(0, 0), delta: Tuple2IntType=(0, 0), testmode: bool=True, update_mouse: bool=False) -> EventListType: '\n Mouse motion event.\n\n :param rect: Widget, Rect object, or Tuple\n :param inlist: If ``True`` return the event within a list\n :param rel: Rel position (relative movement)\n :param delta: Add tuple to rect position\n :param testmode: Event is in test mode\n :param update_mouse: If ``True`` updates the mouse position\n :return: Event\n ' return PygameEventUtils.middle_rect_click(rect=rect, evtype=pygame.MOUSEMOTION, rel=rel, inlist=inlist, delta=delta, testmode=testmode, update_mouse=update_mouse)
@staticmethod def mouse_motion(rect: Union[('pygame_menu.widgets.Widget', 'pygame.Rect', Tuple2NumberType)], inlist: bool=True, rel: Tuple2IntType=(0, 0), delta: Tuple2IntType=(0, 0), testmode: bool=True, update_mouse: bool=False) -> EventListType: '\n Mouse motion event.\n\n :param rect: Widget, Rect object, or Tuple\n :param inlist: If ``True`` return the event within a list\n :param rel: Rel position (relative movement)\n :param delta: Add tuple to rect position\n :param testmode: Event is in test mode\n :param update_mouse: If ``True`` updates the mouse position\n :return: Event\n ' return PygameEventUtils.middle_rect_click(rect=rect, evtype=pygame.MOUSEMOTION, rel=rel, inlist=inlist, delta=delta, testmode=testmode, update_mouse=update_mouse)<|docstring|>Mouse motion event. :param rect: Widget, Rect object, or Tuple :param inlist: If ``True`` return the event within a list :param rel: Rel position (relative movement) :param delta: Add tuple to rect position :param testmode: Event is in test mode :param update_mouse: If ``True`` updates the mouse position :return: Event<|endoftext|>
a25620faaff1a824ed1a770bf1cdf3ee182f9ac3d2f13fb3f81482206cb5db19
@staticmethod def middle_rect_click(rect: Union[('pygame_menu.widgets.Widget', 'pygame.Rect', Tuple2NumberType)], menu: Optional['pygame_menu.Menu']=None, evtype: int=pygame.MOUSEBUTTONUP, inlist: bool=True, rel: Tuple2IntType=(0, 0), button: int=3, delta: Tuple2IntType=(0, 0), testmode: bool=True, update_mouse: bool=False) -> EventListType: '\n Return event clicking the middle of a given rect.\n\n :param rect: Widget, Rect object, or Tuple\n :param menu: Menu object\n :param evtype: event type, it can be MOUSEBUTTONUP, MOUSEBUTTONDOWN, MOUSEMOTION, FINGERUP, FINGERDOWN, FINGERMOTION\n :param inlist: If ``True`` return the event within a list\n :param rel: Rel position (relative movement)\n :param button: Which button presses, ``1`` to ``3`` are the main buttons; ``4`` and ``5`` is the wheel\n :param delta: Add tuple to rect position\n :param testmode: Event is in test mode\n :param update_mouse: If ``True`` updates the mouse position\n :return: Event\n ' assert (isinstance(button, int) and (button > 0)) assert_vector(rel, 2, int) assert_vector(delta, 2, int) if isinstance(rect, pygame_menu.widgets.Widget): (x, y) = rect.get_rect(to_real_position=True, render=True, apply_padding=False).center menu = rect.get_menu() elif isinstance(rect, pygame.Rect): (x, y) = rect.center elif isinstance(rect, VectorInstance): (x, y) = (rect[0], rect[1]) else: raise ValueError('unknown rect type') if ((evtype == FINGERDOWN) or (evtype == FINGERUP) or (evtype == FINGERMOTION)): assert (menu is not None), 'menu cannot be none if FINGERDOWN, FINGERUP, or FINGERMOTION' display = menu.get_window_size() evt = pygame.event.Event(evtype, {'button': button, 'rel': rel, 'test': testmode, 'x': ((x + delta[0]) / display[0]), 'y': ((y + delta[1]) / display[1])}) if inlist: evt = [evt] return evt return PygameEventUtils.mouse_click(x=(x + delta[0]), y=(y + delta[1]), inlist=inlist, evtype=evtype, rel=rel, button=button, testmode=testmode, update_mouse=update_mouse)
Return event clicking the middle of a given rect. :param rect: Widget, Rect object, or Tuple :param menu: Menu object :param evtype: event type, it can be MOUSEBUTTONUP, MOUSEBUTTONDOWN, MOUSEMOTION, FINGERUP, FINGERDOWN, FINGERMOTION :param inlist: If ``True`` return the event within a list :param rel: Rel position (relative movement) :param button: Which button presses, ``1`` to ``3`` are the main buttons; ``4`` and ``5`` is the wheel :param delta: Add tuple to rect position :param testmode: Event is in test mode :param update_mouse: If ``True`` updates the mouse position :return: Event
test/_utils.py
middle_rect_click
mseyne/pygame-menu
419
python
@staticmethod def middle_rect_click(rect: Union[('pygame_menu.widgets.Widget', 'pygame.Rect', Tuple2NumberType)], menu: Optional['pygame_menu.Menu']=None, evtype: int=pygame.MOUSEBUTTONUP, inlist: bool=True, rel: Tuple2IntType=(0, 0), button: int=3, delta: Tuple2IntType=(0, 0), testmode: bool=True, update_mouse: bool=False) -> EventListType: '\n Return event clicking the middle of a given rect.\n\n :param rect: Widget, Rect object, or Tuple\n :param menu: Menu object\n :param evtype: event type, it can be MOUSEBUTTONUP, MOUSEBUTTONDOWN, MOUSEMOTION, FINGERUP, FINGERDOWN, FINGERMOTION\n :param inlist: If ``True`` return the event within a list\n :param rel: Rel position (relative movement)\n :param button: Which button presses, ``1`` to ``3`` are the main buttons; ``4`` and ``5`` is the wheel\n :param delta: Add tuple to rect position\n :param testmode: Event is in test mode\n :param update_mouse: If ``True`` updates the mouse position\n :return: Event\n ' assert (isinstance(button, int) and (button > 0)) assert_vector(rel, 2, int) assert_vector(delta, 2, int) if isinstance(rect, pygame_menu.widgets.Widget): (x, y) = rect.get_rect(to_real_position=True, render=True, apply_padding=False).center menu = rect.get_menu() elif isinstance(rect, pygame.Rect): (x, y) = rect.center elif isinstance(rect, VectorInstance): (x, y) = (rect[0], rect[1]) else: raise ValueError('unknown rect type') if ((evtype == FINGERDOWN) or (evtype == FINGERUP) or (evtype == FINGERMOTION)): assert (menu is not None), 'menu cannot be none if FINGERDOWN, FINGERUP, or FINGERMOTION' display = menu.get_window_size() evt = pygame.event.Event(evtype, {'button': button, 'rel': rel, 'test': testmode, 'x': ((x + delta[0]) / display[0]), 'y': ((y + delta[1]) / display[1])}) if inlist: evt = [evt] return evt return PygameEventUtils.mouse_click(x=(x + delta[0]), y=(y + delta[1]), inlist=inlist, evtype=evtype, rel=rel, button=button, testmode=testmode, update_mouse=update_mouse)
@staticmethod def middle_rect_click(rect: Union[('pygame_menu.widgets.Widget', 'pygame.Rect', Tuple2NumberType)], menu: Optional['pygame_menu.Menu']=None, evtype: int=pygame.MOUSEBUTTONUP, inlist: bool=True, rel: Tuple2IntType=(0, 0), button: int=3, delta: Tuple2IntType=(0, 0), testmode: bool=True, update_mouse: bool=False) -> EventListType: '\n Return event clicking the middle of a given rect.\n\n :param rect: Widget, Rect object, or Tuple\n :param menu: Menu object\n :param evtype: event type, it can be MOUSEBUTTONUP, MOUSEBUTTONDOWN, MOUSEMOTION, FINGERUP, FINGERDOWN, FINGERMOTION\n :param inlist: If ``True`` return the event within a list\n :param rel: Rel position (relative movement)\n :param button: Which button presses, ``1`` to ``3`` are the main buttons; ``4`` and ``5`` is the wheel\n :param delta: Add tuple to rect position\n :param testmode: Event is in test mode\n :param update_mouse: If ``True`` updates the mouse position\n :return: Event\n ' assert (isinstance(button, int) and (button > 0)) assert_vector(rel, 2, int) assert_vector(delta, 2, int) if isinstance(rect, pygame_menu.widgets.Widget): (x, y) = rect.get_rect(to_real_position=True, render=True, apply_padding=False).center menu = rect.get_menu() elif isinstance(rect, pygame.Rect): (x, y) = rect.center elif isinstance(rect, VectorInstance): (x, y) = (rect[0], rect[1]) else: raise ValueError('unknown rect type') if ((evtype == FINGERDOWN) or (evtype == FINGERUP) or (evtype == FINGERMOTION)): assert (menu is not None), 'menu cannot be none if FINGERDOWN, FINGERUP, or FINGERMOTION' display = menu.get_window_size() evt = pygame.event.Event(evtype, {'button': button, 'rel': rel, 'test': testmode, 'x': ((x + delta[0]) / display[0]), 'y': ((y + delta[1]) / display[1])}) if inlist: evt = [evt] return evt return PygameEventUtils.mouse_click(x=(x + delta[0]), y=(y + delta[1]), inlist=inlist, evtype=evtype, rel=rel, button=button, testmode=testmode, update_mouse=update_mouse)<|docstring|>Return event clicking the middle of a given rect. :param rect: Widget, Rect object, or Tuple :param menu: Menu object :param evtype: event type, it can be MOUSEBUTTONUP, MOUSEBUTTONDOWN, MOUSEMOTION, FINGERUP, FINGERDOWN, FINGERMOTION :param inlist: If ``True`` return the event within a list :param rel: Rel position (relative movement) :param button: Which button presses, ``1`` to ``3`` are the main buttons; ``4`` and ``5`` is the wheel :param delta: Add tuple to rect position :param testmode: Event is in test mode :param update_mouse: If ``True`` updates the mouse position :return: Event<|endoftext|>
611336249c9f10f8942c69762ba6b85e8815a37b0b6cb6221f29b94610c6d346
@staticmethod def get_font(name: str, size: int) -> 'pygame.font.Font': '\n Returns a font.\n\n :param name: Font name\n :param size: Font size\n :return: Font\n ' return pygame_menu.font.get_font(name, size)
Returns a font. :param name: Font name :param size: Font size :return: Font
test/_utils.py
get_font
mseyne/pygame-menu
419
python
@staticmethod def get_font(name: str, size: int) -> 'pygame.font.Font': '\n Returns a font.\n\n :param name: Font name\n :param size: Font size\n :return: Font\n ' return pygame_menu.font.get_font(name, size)
@staticmethod def get_font(name: str, size: int) -> 'pygame.font.Font': '\n Returns a font.\n\n :param name: Font name\n :param size: Font size\n :return: Font\n ' return pygame_menu.font.get_font(name, size)<|docstring|>Returns a font. :param name: Font name :param size: Font size :return: Font<|endoftext|>
8d8b52c03d0c247208bbf78659975a67879b0c2b2a2858215062038263d70cc9
@staticmethod def random_font() -> str: '\n Return a random font from the library.\n\n :return: Font file\n ' opt = random.randrange(0, len(FONT_EXAMPLES)) return FONT_EXAMPLES[opt]
Return a random font from the library. :return: Font file
test/_utils.py
random_font
mseyne/pygame-menu
419
python
@staticmethod def random_font() -> str: '\n Return a random font from the library.\n\n :return: Font file\n ' opt = random.randrange(0, len(FONT_EXAMPLES)) return FONT_EXAMPLES[opt]
@staticmethod def random_font() -> str: '\n Return a random font from the library.\n\n :return: Font file\n ' opt = random.randrange(0, len(FONT_EXAMPLES)) return FONT_EXAMPLES[opt]<|docstring|>Return a random font from the library. :return: Font file<|endoftext|>
e2b1b76df234651e970fea6368edba11d0b7e11670a815e02d6961b873717b0e
@staticmethod def load_font(font: str, size: int) -> 'pygame.font.Font': '\n Load font from file.\n\n :param font: Font name\n :param size: Font size\n :return: Font object\n ' return pygame_menu.font.get_font(font, size)
Load font from file. :param font: Font name :param size: Font size :return: Font object
test/_utils.py
load_font
mseyne/pygame-menu
419
python
@staticmethod def load_font(font: str, size: int) -> 'pygame.font.Font': '\n Load font from file.\n\n :param font: Font name\n :param size: Font size\n :return: Font object\n ' return pygame_menu.font.get_font(font, size)
@staticmethod def load_font(font: str, size: int) -> 'pygame.font.Font': '\n Load font from file.\n\n :param font: Font name\n :param size: Font size\n :return: Font object\n ' return pygame_menu.font.get_font(font, size)<|docstring|>Load font from file. :param font: Font name :param size: Font size :return: Font object<|endoftext|>
b54ea0cb88e4647e744fd608bb5abf06011469ec4c462411642f8623e58197b4
@staticmethod def random_system_font() -> str: '\n Return random system font.\n\n :return: System font name\n ' fonts = pygame.font.get_fonts() fonts.sort() fonts.pop(0) return fonts[int(random.randrange(0, len(fonts)))]
Return random system font. :return: System font name
test/_utils.py
random_system_font
mseyne/pygame-menu
419
python
@staticmethod def random_system_font() -> str: '\n Return random system font.\n\n :return: System font name\n ' fonts = pygame.font.get_fonts() fonts.sort() fonts.pop(0) return fonts[int(random.randrange(0, len(fonts)))]
@staticmethod def random_system_font() -> str: '\n Return random system font.\n\n :return: System font name\n ' fonts = pygame.font.get_fonts() fonts.sort() fonts.pop(0) return fonts[int(random.randrange(0, len(fonts)))]<|docstring|>Return random system font. :return: System font name<|endoftext|>
24c38c7c3703b898432bd15c656b86362e1bd8d1aafc882e4417c8d1119f42a9
@staticmethod def generic_menu(center_content: bool=True, column_max_width: MenuColumnMaxWidthType=None, column_min_width: MenuColumnMinWidthType=0, columns: int=1, enabled: bool=True, height: NumberType=400, mouse_motion_selection: bool=False, onclose: Any=None, onreset: Any=None, position_x: NumberType=50, position_y: NumberType=50, rows: MenuRowsType=None, theme: 'pygame_menu.themes.Theme'=pygame_menu.themes.THEME_DEFAULT, title: str='', width: NumberType=600, *args, **kwargs) -> 'pygame_menu.Menu': "\n Generate a generic test menu.\n\n :param center_content: Center menu content\n :param column_max_width: List/Tuple representing the maximum width of each column in px, ``None`` equals no limit. For example ``column_max_width=500`` (each column width can be 500px max), or ``column_max_width=(400, 500)`` (first column 400px, second 500). If ``0` is given uses the menu width. This method does not resize the widgets, only determines the dynamic width of the column layout\n :param column_min_width: List/Tuple representing the minimum width of each column in px. For example ``column_min_width=500`` (each column width is 500px min), or ``column_max_width=(400, 500)`` (first column 400px, second 500). By default it's ``0``. Negative values are not accepted\n :param columns: Number of columns\n :param enabled: Menu is enabled. If ``False`` Menu cannot be drawn\n :param height: Menu height in px\n :param mouse_motion_selection: Select widgets using mouse motion. If ``True`` menu draws a ``focus`` on the selected widget\n :param onclose: Event or function applied when closing the Menu\n :param onreset: Function executed when resetting the Menu\n :param position_x: X position of the menu\n :param position_y: Y position of the menu\n :param rows: Number of rows\n :param theme: Menu theme\n :param title: Menu title\n :param width: Menu width in px\n :param args: Additional args\n :param kwargs: Optional keyword arguments\n :return: Menu\n " return pygame_menu.Menu(*args, center_content=center_content, column_max_width=column_max_width, column_min_width=column_min_width, columns=columns, enabled=enabled, height=height, mouse_motion_selection=mouse_motion_selection, onclose=onclose, onreset=onreset, position=(position_x, position_y), rows=rows, theme=theme, title=title, width=width, **kwargs)
Generate a generic test menu. :param center_content: Center menu content :param column_max_width: List/Tuple representing the maximum width of each column in px, ``None`` equals no limit. For example ``column_max_width=500`` (each column width can be 500px max), or ``column_max_width=(400, 500)`` (first column 400px, second 500). If ``0` is given uses the menu width. This method does not resize the widgets, only determines the dynamic width of the column layout :param column_min_width: List/Tuple representing the minimum width of each column in px. For example ``column_min_width=500`` (each column width is 500px min), or ``column_max_width=(400, 500)`` (first column 400px, second 500). By default it's ``0``. Negative values are not accepted :param columns: Number of columns :param enabled: Menu is enabled. If ``False`` Menu cannot be drawn :param height: Menu height in px :param mouse_motion_selection: Select widgets using mouse motion. If ``True`` menu draws a ``focus`` on the selected widget :param onclose: Event or function applied when closing the Menu :param onreset: Function executed when resetting the Menu :param position_x: X position of the menu :param position_y: Y position of the menu :param rows: Number of rows :param theme: Menu theme :param title: Menu title :param width: Menu width in px :param args: Additional args :param kwargs: Optional keyword arguments :return: Menu
test/_utils.py
generic_menu
mseyne/pygame-menu
419
python
@staticmethod def generic_menu(center_content: bool=True, column_max_width: MenuColumnMaxWidthType=None, column_min_width: MenuColumnMinWidthType=0, columns: int=1, enabled: bool=True, height: NumberType=400, mouse_motion_selection: bool=False, onclose: Any=None, onreset: Any=None, position_x: NumberType=50, position_y: NumberType=50, rows: MenuRowsType=None, theme: 'pygame_menu.themes.Theme'=pygame_menu.themes.THEME_DEFAULT, title: str=, width: NumberType=600, *args, **kwargs) -> 'pygame_menu.Menu': "\n Generate a generic test menu.\n\n :param center_content: Center menu content\n :param column_max_width: List/Tuple representing the maximum width of each column in px, ``None`` equals no limit. For example ``column_max_width=500`` (each column width can be 500px max), or ``column_max_width=(400, 500)`` (first column 400px, second 500). If ``0` is given uses the menu width. This method does not resize the widgets, only determines the dynamic width of the column layout\n :param column_min_width: List/Tuple representing the minimum width of each column in px. For example ``column_min_width=500`` (each column width is 500px min), or ``column_max_width=(400, 500)`` (first column 400px, second 500). By default it's ``0``. Negative values are not accepted\n :param columns: Number of columns\n :param enabled: Menu is enabled. If ``False`` Menu cannot be drawn\n :param height: Menu height in px\n :param mouse_motion_selection: Select widgets using mouse motion. If ``True`` menu draws a ``focus`` on the selected widget\n :param onclose: Event or function applied when closing the Menu\n :param onreset: Function executed when resetting the Menu\n :param position_x: X position of the menu\n :param position_y: Y position of the menu\n :param rows: Number of rows\n :param theme: Menu theme\n :param title: Menu title\n :param width: Menu width in px\n :param args: Additional args\n :param kwargs: Optional keyword arguments\n :return: Menu\n " return pygame_menu.Menu(*args, center_content=center_content, column_max_width=column_max_width, column_min_width=column_min_width, columns=columns, enabled=enabled, height=height, mouse_motion_selection=mouse_motion_selection, onclose=onclose, onreset=onreset, position=(position_x, position_y), rows=rows, theme=theme, title=title, width=width, **kwargs)
@staticmethod def generic_menu(center_content: bool=True, column_max_width: MenuColumnMaxWidthType=None, column_min_width: MenuColumnMinWidthType=0, columns: int=1, enabled: bool=True, height: NumberType=400, mouse_motion_selection: bool=False, onclose: Any=None, onreset: Any=None, position_x: NumberType=50, position_y: NumberType=50, rows: MenuRowsType=None, theme: 'pygame_menu.themes.Theme'=pygame_menu.themes.THEME_DEFAULT, title: str=, width: NumberType=600, *args, **kwargs) -> 'pygame_menu.Menu': "\n Generate a generic test menu.\n\n :param center_content: Center menu content\n :param column_max_width: List/Tuple representing the maximum width of each column in px, ``None`` equals no limit. For example ``column_max_width=500`` (each column width can be 500px max), or ``column_max_width=(400, 500)`` (first column 400px, second 500). If ``0` is given uses the menu width. This method does not resize the widgets, only determines the dynamic width of the column layout\n :param column_min_width: List/Tuple representing the minimum width of each column in px. For example ``column_min_width=500`` (each column width is 500px min), or ``column_max_width=(400, 500)`` (first column 400px, second 500). By default it's ``0``. Negative values are not accepted\n :param columns: Number of columns\n :param enabled: Menu is enabled. If ``False`` Menu cannot be drawn\n :param height: Menu height in px\n :param mouse_motion_selection: Select widgets using mouse motion. If ``True`` menu draws a ``focus`` on the selected widget\n :param onclose: Event or function applied when closing the Menu\n :param onreset: Function executed when resetting the Menu\n :param position_x: X position of the menu\n :param position_y: Y position of the menu\n :param rows: Number of rows\n :param theme: Menu theme\n :param title: Menu title\n :param width: Menu width in px\n :param args: Additional args\n :param kwargs: Optional keyword arguments\n :return: Menu\n " return pygame_menu.Menu(*args, center_content=center_content, column_max_width=column_max_width, column_min_width=column_min_width, columns=columns, enabled=enabled, height=height, mouse_motion_selection=mouse_motion_selection, onclose=onclose, onreset=onreset, position=(position_x, position_y), rows=rows, theme=theme, title=title, width=width, **kwargs)<|docstring|>Generate a generic test menu. :param center_content: Center menu content :param column_max_width: List/Tuple representing the maximum width of each column in px, ``None`` equals no limit. For example ``column_max_width=500`` (each column width can be 500px max), or ``column_max_width=(400, 500)`` (first column 400px, second 500). If ``0` is given uses the menu width. This method does not resize the widgets, only determines the dynamic width of the column layout :param column_min_width: List/Tuple representing the minimum width of each column in px. For example ``column_min_width=500`` (each column width is 500px min), or ``column_max_width=(400, 500)`` (first column 400px, second 500). By default it's ``0``. Negative values are not accepted :param columns: Number of columns :param enabled: Menu is enabled. If ``False`` Menu cannot be drawn :param height: Menu height in px :param mouse_motion_selection: Select widgets using mouse motion. If ``True`` menu draws a ``focus`` on the selected widget :param onclose: Event or function applied when closing the Menu :param onreset: Function executed when resetting the Menu :param position_x: X position of the menu :param position_y: Y position of the menu :param rows: Number of rows :param theme: Menu theme :param title: Menu title :param width: Menu width in px :param args: Additional args :param kwargs: Optional keyword arguments :return: Menu<|endoftext|>
1a1720dfe5fc6b67b404169ad0870db1143db40963a05ba2602b8e270b418ecb
@fill_in_docstring def __init__(self, interval: IntervalData=1, filename: Optional[str]=None, *, method: str='structure_factor_mean', source: Union[(None, int, Callable)]=None, verbose: bool=False): '\n Args:\n interval:\n {ARG_TRACKER_INTERVAL}\n filename (str, optional):\n Determines the file to which the data is written in JSON format\n method (str):\n Method used for determining the length scale. Details are explained in\n the function :func:`~droplets.image_analysis.get_length_scale`.\n source (int or callable, optional):\n Determines how a field is extracted from `fields`. If `None`, `fields`\n is passed as is, assuming it is already a scalar field. This works for\n the simple, standard case where only a single \n :class:`~pde.fields.scalar.ScalarField` is treated. Alternatively,\n `source` can be an integer, indicating which field is extracted from an\n instance of :class:`~pde.fields.collection.FieldCollection`. Lastly,\n `source` can be a function that takes `fields` as an argument and\n returns the desired field.\n verbose (bool):\n Determines whether errors in determining the length scales are logged.\n ' super().__init__(interval=interval) self.length_scales: List[float] = [] self.times: List[float] = [] self.filename = filename self.method = method self.source = source self.verbose = verbose
Args: interval: {ARG_TRACKER_INTERVAL} filename (str, optional): Determines the file to which the data is written in JSON format method (str): Method used for determining the length scale. Details are explained in the function :func:`~droplets.image_analysis.get_length_scale`. source (int or callable, optional): Determines how a field is extracted from `fields`. If `None`, `fields` is passed as is, assuming it is already a scalar field. This works for the simple, standard case where only a single :class:`~pde.fields.scalar.ScalarField` is treated. Alternatively, `source` can be an integer, indicating which field is extracted from an instance of :class:`~pde.fields.collection.FieldCollection`. Lastly, `source` can be a function that takes `fields` as an argument and returns the desired field. verbose (bool): Determines whether errors in determining the length scales are logged.
droplets/trackers.py
__init__
zwicker-group/py-droplets
2
python
@fill_in_docstring def __init__(self, interval: IntervalData=1, filename: Optional[str]=None, *, method: str='structure_factor_mean', source: Union[(None, int, Callable)]=None, verbose: bool=False): '\n Args:\n interval:\n {ARG_TRACKER_INTERVAL}\n filename (str, optional):\n Determines the file to which the data is written in JSON format\n method (str):\n Method used for determining the length scale. Details are explained in\n the function :func:`~droplets.image_analysis.get_length_scale`.\n source (int or callable, optional):\n Determines how a field is extracted from `fields`. If `None`, `fields`\n is passed as is, assuming it is already a scalar field. This works for\n the simple, standard case where only a single \n :class:`~pde.fields.scalar.ScalarField` is treated. Alternatively,\n `source` can be an integer, indicating which field is extracted from an\n instance of :class:`~pde.fields.collection.FieldCollection`. Lastly,\n `source` can be a function that takes `fields` as an argument and\n returns the desired field.\n verbose (bool):\n Determines whether errors in determining the length scales are logged.\n ' super().__init__(interval=interval) self.length_scales: List[float] = [] self.times: List[float] = [] self.filename = filename self.method = method self.source = source self.verbose = verbose
@fill_in_docstring def __init__(self, interval: IntervalData=1, filename: Optional[str]=None, *, method: str='structure_factor_mean', source: Union[(None, int, Callable)]=None, verbose: bool=False): '\n Args:\n interval:\n {ARG_TRACKER_INTERVAL}\n filename (str, optional):\n Determines the file to which the data is written in JSON format\n method (str):\n Method used for determining the length scale. Details are explained in\n the function :func:`~droplets.image_analysis.get_length_scale`.\n source (int or callable, optional):\n Determines how a field is extracted from `fields`. If `None`, `fields`\n is passed as is, assuming it is already a scalar field. This works for\n the simple, standard case where only a single \n :class:`~pde.fields.scalar.ScalarField` is treated. Alternatively,\n `source` can be an integer, indicating which field is extracted from an\n instance of :class:`~pde.fields.collection.FieldCollection`. Lastly,\n `source` can be a function that takes `fields` as an argument and\n returns the desired field.\n verbose (bool):\n Determines whether errors in determining the length scales are logged.\n ' super().__init__(interval=interval) self.length_scales: List[float] = [] self.times: List[float] = [] self.filename = filename self.method = method self.source = source self.verbose = verbose<|docstring|>Args: interval: {ARG_TRACKER_INTERVAL} filename (str, optional): Determines the file to which the data is written in JSON format method (str): Method used for determining the length scale. Details are explained in the function :func:`~droplets.image_analysis.get_length_scale`. source (int or callable, optional): Determines how a field is extracted from `fields`. If `None`, `fields` is passed as is, assuming it is already a scalar field. This works for the simple, standard case where only a single :class:`~pde.fields.scalar.ScalarField` is treated. Alternatively, `source` can be an integer, indicating which field is extracted from an instance of :class:`~pde.fields.collection.FieldCollection`. Lastly, `source` can be a function that takes `fields` as an argument and returns the desired field. verbose (bool): Determines whether errors in determining the length scales are logged.<|endoftext|>
15b3a93507a8a7fee9396e0130426ce85036534b06acf9ee9b9b35262e44db18
def handle(self, field: FieldBase, t: float): 'handle data supplied to this tracker\n\n Args:\n field (:class:`~pde.fields.FieldBase`):\n The current state of the simulation\n t (float):\n The associated time\n ' from pde.visualization.plotting import extract_field from .image_analysis import get_length_scale scalar_field = extract_field(field, self.source, 0) try: length = get_length_scale(scalar_field, method=self.method) except Exception: if self.verbose: self._logger.exception('Could not determine length scale') length = np.nan self.times.append(t) self.length_scales.append(length)
handle data supplied to this tracker Args: field (:class:`~pde.fields.FieldBase`): The current state of the simulation t (float): The associated time
droplets/trackers.py
handle
zwicker-group/py-droplets
2
python
def handle(self, field: FieldBase, t: float): 'handle data supplied to this tracker\n\n Args:\n field (:class:`~pde.fields.FieldBase`):\n The current state of the simulation\n t (float):\n The associated time\n ' from pde.visualization.plotting import extract_field from .image_analysis import get_length_scale scalar_field = extract_field(field, self.source, 0) try: length = get_length_scale(scalar_field, method=self.method) except Exception: if self.verbose: self._logger.exception('Could not determine length scale') length = np.nan self.times.append(t) self.length_scales.append(length)
def handle(self, field: FieldBase, t: float): 'handle data supplied to this tracker\n\n Args:\n field (:class:`~pde.fields.FieldBase`):\n The current state of the simulation\n t (float):\n The associated time\n ' from pde.visualization.plotting import extract_field from .image_analysis import get_length_scale scalar_field = extract_field(field, self.source, 0) try: length = get_length_scale(scalar_field, method=self.method) except Exception: if self.verbose: self._logger.exception('Could not determine length scale') length = np.nan self.times.append(t) self.length_scales.append(length)<|docstring|>handle data supplied to this tracker Args: field (:class:`~pde.fields.FieldBase`): The current state of the simulation t (float): The associated time<|endoftext|>
9c4fff596cebd58fef36645d85395fd410a7afcdfd20663168090316e98b8157
def finalize(self, info: InfoDict=None) -> None: 'finalize the tracker, supplying additional information\n\n Args:\n info (dict):\n Extra information from the simulation\n ' super().finalize(info) if self.filename: import json data = {'times': self.times, 'length_scales': self.length_scales} with open(self.filename, 'w') as fp: json.dump(data, fp)
finalize the tracker, supplying additional information Args: info (dict): Extra information from the simulation
droplets/trackers.py
finalize
zwicker-group/py-droplets
2
python
def finalize(self, info: InfoDict=None) -> None: 'finalize the tracker, supplying additional information\n\n Args:\n info (dict):\n Extra information from the simulation\n ' super().finalize(info) if self.filename: import json data = {'times': self.times, 'length_scales': self.length_scales} with open(self.filename, 'w') as fp: json.dump(data, fp)
def finalize(self, info: InfoDict=None) -> None: 'finalize the tracker, supplying additional information\n\n Args:\n info (dict):\n Extra information from the simulation\n ' super().finalize(info) if self.filename: import json data = {'times': self.times, 'length_scales': self.length_scales} with open(self.filename, 'w') as fp: json.dump(data, fp)<|docstring|>finalize the tracker, supplying additional information Args: info (dict): Extra information from the simulation<|endoftext|>
a61dadd4a3487c87ae09c18faee33d8320491afbe331c13df0ed6115a7bf712f
@fill_in_docstring def __init__(self, interval: IntervalData=1, filename: Optional[str]=None, *, emulsion_timecourse=None, source: Union[(None, int, Callable)]=None, threshold: Union[(float, str)]=0.5, minimal_radius: float=0, refine: bool=False, perturbation_modes: int=0): "\n Args:\n interval:\n {ARG_TRACKER_INTERVAL}\n filename (str, optional):\n Determines the file to which the final data is written as an HDF5 file.\n emulsion_timecourse (:class:`EmulsionTimeCourse`, optional):\n Can be an instance of :class:`~droplets.emulsions.EmulsionTimeCourse`\n that is used to store the data.\n source (int or callable, optional):\n Determines how a field is extracted from `fields`. If `None`, `fields`\n is passed as is, assuming it is already a scalar field. This works for\n the simple, standard case where only a single ScalarField is treated.\n Alternatively, `source` can be an integer, indicating which field is\n extracted from an instance of :class:`~pde.fields.FieldCollection`.\n Lastly, `source` can be a function that takes `fields` as an argument\n and returns the desired field.\n threshold (float or str):\n The threshold for binarizing the image. The special value 'auto' takes\n the mean between the minimum and the maximum of the data as a guess.\n minimal_radius (float):\n Minimal radius of droplets that will be retained.\n refine (bool):\n Flag determining whether the droplet coordinates should be\n refined using fitting. This is a potentially slow procedure.\n perturbation_modes (int):\n An option describing how many perturbation modes should be\n considered when refining droplets.\n\n " super().__init__(interval=interval) if (emulsion_timecourse is None): self.data = EmulsionTimeCourse() else: self.data = emulsion_timecourse self.filename = filename self.source = source self.threshold = threshold self.minimal_radius = minimal_radius self.refine = refine self.perturbation_modes = perturbation_modes
Args: interval: {ARG_TRACKER_INTERVAL} filename (str, optional): Determines the file to which the final data is written as an HDF5 file. emulsion_timecourse (:class:`EmulsionTimeCourse`, optional): Can be an instance of :class:`~droplets.emulsions.EmulsionTimeCourse` that is used to store the data. source (int or callable, optional): Determines how a field is extracted from `fields`. If `None`, `fields` is passed as is, assuming it is already a scalar field. This works for the simple, standard case where only a single ScalarField is treated. Alternatively, `source` can be an integer, indicating which field is extracted from an instance of :class:`~pde.fields.FieldCollection`. Lastly, `source` can be a function that takes `fields` as an argument and returns the desired field. threshold (float or str): The threshold for binarizing the image. The special value 'auto' takes the mean between the minimum and the maximum of the data as a guess. minimal_radius (float): Minimal radius of droplets that will be retained. refine (bool): Flag determining whether the droplet coordinates should be refined using fitting. This is a potentially slow procedure. perturbation_modes (int): An option describing how many perturbation modes should be considered when refining droplets.
droplets/trackers.py
__init__
zwicker-group/py-droplets
2
python
@fill_in_docstring def __init__(self, interval: IntervalData=1, filename: Optional[str]=None, *, emulsion_timecourse=None, source: Union[(None, int, Callable)]=None, threshold: Union[(float, str)]=0.5, minimal_radius: float=0, refine: bool=False, perturbation_modes: int=0): "\n Args:\n interval:\n {ARG_TRACKER_INTERVAL}\n filename (str, optional):\n Determines the file to which the final data is written as an HDF5 file.\n emulsion_timecourse (:class:`EmulsionTimeCourse`, optional):\n Can be an instance of :class:`~droplets.emulsions.EmulsionTimeCourse`\n that is used to store the data.\n source (int or callable, optional):\n Determines how a field is extracted from `fields`. If `None`, `fields`\n is passed as is, assuming it is already a scalar field. This works for\n the simple, standard case where only a single ScalarField is treated.\n Alternatively, `source` can be an integer, indicating which field is\n extracted from an instance of :class:`~pde.fields.FieldCollection`.\n Lastly, `source` can be a function that takes `fields` as an argument\n and returns the desired field.\n threshold (float or str):\n The threshold for binarizing the image. The special value 'auto' takes\n the mean between the minimum and the maximum of the data as a guess.\n minimal_radius (float):\n Minimal radius of droplets that will be retained.\n refine (bool):\n Flag determining whether the droplet coordinates should be\n refined using fitting. This is a potentially slow procedure.\n perturbation_modes (int):\n An option describing how many perturbation modes should be\n considered when refining droplets.\n\n " super().__init__(interval=interval) if (emulsion_timecourse is None): self.data = EmulsionTimeCourse() else: self.data = emulsion_timecourse self.filename = filename self.source = source self.threshold = threshold self.minimal_radius = minimal_radius self.refine = refine self.perturbation_modes = perturbation_modes
@fill_in_docstring def __init__(self, interval: IntervalData=1, filename: Optional[str]=None, *, emulsion_timecourse=None, source: Union[(None, int, Callable)]=None, threshold: Union[(float, str)]=0.5, minimal_radius: float=0, refine: bool=False, perturbation_modes: int=0): "\n Args:\n interval:\n {ARG_TRACKER_INTERVAL}\n filename (str, optional):\n Determines the file to which the final data is written as an HDF5 file.\n emulsion_timecourse (:class:`EmulsionTimeCourse`, optional):\n Can be an instance of :class:`~droplets.emulsions.EmulsionTimeCourse`\n that is used to store the data.\n source (int or callable, optional):\n Determines how a field is extracted from `fields`. If `None`, `fields`\n is passed as is, assuming it is already a scalar field. This works for\n the simple, standard case where only a single ScalarField is treated.\n Alternatively, `source` can be an integer, indicating which field is\n extracted from an instance of :class:`~pde.fields.FieldCollection`.\n Lastly, `source` can be a function that takes `fields` as an argument\n and returns the desired field.\n threshold (float or str):\n The threshold for binarizing the image. The special value 'auto' takes\n the mean between the minimum and the maximum of the data as a guess.\n minimal_radius (float):\n Minimal radius of droplets that will be retained.\n refine (bool):\n Flag determining whether the droplet coordinates should be\n refined using fitting. This is a potentially slow procedure.\n perturbation_modes (int):\n An option describing how many perturbation modes should be\n considered when refining droplets.\n\n " super().__init__(interval=interval) if (emulsion_timecourse is None): self.data = EmulsionTimeCourse() else: self.data = emulsion_timecourse self.filename = filename self.source = source self.threshold = threshold self.minimal_radius = minimal_radius self.refine = refine self.perturbation_modes = perturbation_modes<|docstring|>Args: interval: {ARG_TRACKER_INTERVAL} filename (str, optional): Determines the file to which the final data is written as an HDF5 file. emulsion_timecourse (:class:`EmulsionTimeCourse`, optional): Can be an instance of :class:`~droplets.emulsions.EmulsionTimeCourse` that is used to store the data. source (int or callable, optional): Determines how a field is extracted from `fields`. If `None`, `fields` is passed as is, assuming it is already a scalar field. This works for the simple, standard case where only a single ScalarField is treated. Alternatively, `source` can be an integer, indicating which field is extracted from an instance of :class:`~pde.fields.FieldCollection`. Lastly, `source` can be a function that takes `fields` as an argument and returns the desired field. threshold (float or str): The threshold for binarizing the image. The special value 'auto' takes the mean between the minimum and the maximum of the data as a guess. minimal_radius (float): Minimal radius of droplets that will be retained. refine (bool): Flag determining whether the droplet coordinates should be refined using fitting. This is a potentially slow procedure. perturbation_modes (int): An option describing how many perturbation modes should be considered when refining droplets.<|endoftext|>
c159d915aaff2eb9cb0ad7e749da96e5b8f098d46702b6c208009e0a253e0dea
def initialize(self, field: FieldBase, info: InfoDict=None) -> float: '\n Args:\n field (:class:`~pde.fields.base.FieldBase`):\n An example of the data that will be analyzed by the tracker\n info (dict):\n Extra information from the simulation\n\n Returns:\n float: The first time the tracker needs to handle data\n ' if (self.data.grid is None): self.data.grid = field.grid elif (not self.data.grid.compatible_with(field.grid)): raise RuntimeError('Grid of the Emulsion is incompatible with the grid of current state') return super().initialize(field, info)
Args: field (:class:`~pde.fields.base.FieldBase`): An example of the data that will be analyzed by the tracker info (dict): Extra information from the simulation Returns: float: The first time the tracker needs to handle data
droplets/trackers.py
initialize
zwicker-group/py-droplets
2
python
def initialize(self, field: FieldBase, info: InfoDict=None) -> float: '\n Args:\n field (:class:`~pde.fields.base.FieldBase`):\n An example of the data that will be analyzed by the tracker\n info (dict):\n Extra information from the simulation\n\n Returns:\n float: The first time the tracker needs to handle data\n ' if (self.data.grid is None): self.data.grid = field.grid elif (not self.data.grid.compatible_with(field.grid)): raise RuntimeError('Grid of the Emulsion is incompatible with the grid of current state') return super().initialize(field, info)
def initialize(self, field: FieldBase, info: InfoDict=None) -> float: '\n Args:\n field (:class:`~pde.fields.base.FieldBase`):\n An example of the data that will be analyzed by the tracker\n info (dict):\n Extra information from the simulation\n\n Returns:\n float: The first time the tracker needs to handle data\n ' if (self.data.grid is None): self.data.grid = field.grid elif (not self.data.grid.compatible_with(field.grid)): raise RuntimeError('Grid of the Emulsion is incompatible with the grid of current state') return super().initialize(field, info)<|docstring|>Args: field (:class:`~pde.fields.base.FieldBase`): An example of the data that will be analyzed by the tracker info (dict): Extra information from the simulation Returns: float: The first time the tracker needs to handle data<|endoftext|>
791216dad8ab4ea05f631491de2d601bac3159c2e5cbfc7028fef6bdaea24f5e
def handle(self, field: FieldBase, t: float) -> None: 'handle data supplied to this tracker\n\n Args:\n field (:class:`~pde.fields.base.FieldBase`):\n The current state of the simulation\n t (float):\n The associated time\n ' from pde.visualization.plotting import extract_field from .image_analysis import locate_droplets scalar_field = extract_field(field, self.source, 0) emulsion = locate_droplets(scalar_field, threshold=self.threshold, refine=self.refine, modes=self.perturbation_modes, minimal_radius=self.minimal_radius) self.data.append(emulsion, t)
handle data supplied to this tracker Args: field (:class:`~pde.fields.base.FieldBase`): The current state of the simulation t (float): The associated time
droplets/trackers.py
handle
zwicker-group/py-droplets
2
python
def handle(self, field: FieldBase, t: float) -> None: 'handle data supplied to this tracker\n\n Args:\n field (:class:`~pde.fields.base.FieldBase`):\n The current state of the simulation\n t (float):\n The associated time\n ' from pde.visualization.plotting import extract_field from .image_analysis import locate_droplets scalar_field = extract_field(field, self.source, 0) emulsion = locate_droplets(scalar_field, threshold=self.threshold, refine=self.refine, modes=self.perturbation_modes, minimal_radius=self.minimal_radius) self.data.append(emulsion, t)
def handle(self, field: FieldBase, t: float) -> None: 'handle data supplied to this tracker\n\n Args:\n field (:class:`~pde.fields.base.FieldBase`):\n The current state of the simulation\n t (float):\n The associated time\n ' from pde.visualization.plotting import extract_field from .image_analysis import locate_droplets scalar_field = extract_field(field, self.source, 0) emulsion = locate_droplets(scalar_field, threshold=self.threshold, refine=self.refine, modes=self.perturbation_modes, minimal_radius=self.minimal_radius) self.data.append(emulsion, t)<|docstring|>handle data supplied to this tracker Args: field (:class:`~pde.fields.base.FieldBase`): The current state of the simulation t (float): The associated time<|endoftext|>
02df24286690ab34c7144397f43a489b14a9da0dab39f9d421aa83e9e3813560
def finalize(self, info: InfoDict=None) -> None: 'finalize the tracker, supplying additional information\n\n Args:\n info (dict):\n Extra information from the simulation\n ' super().finalize(info) if self.filename: self.data.to_file(self.filename)
finalize the tracker, supplying additional information Args: info (dict): Extra information from the simulation
droplets/trackers.py
finalize
zwicker-group/py-droplets
2
python
def finalize(self, info: InfoDict=None) -> None: 'finalize the tracker, supplying additional information\n\n Args:\n info (dict):\n Extra information from the simulation\n ' super().finalize(info) if self.filename: self.data.to_file(self.filename)
def finalize(self, info: InfoDict=None) -> None: 'finalize the tracker, supplying additional information\n\n Args:\n info (dict):\n Extra information from the simulation\n ' super().finalize(info) if self.filename: self.data.to_file(self.filename)<|docstring|>finalize the tracker, supplying additional information Args: info (dict): Extra information from the simulation<|endoftext|>
dfff71494e1dd16009b1c37c3fbba08c0c9f6b14782c5d7130a034e0c597306a
@staticmethod def swap(index1: int, index2: int, array_list: list): '\n This method swaps elements in a list corresponding to indexes given in parameters.\n\n :param index1: The first index.\n :param index2: The second index.\n :param array_list: The list in which we need to swap elements.\n :type index1: int\n :type index2: int\n :type array_list: list\n ' temp = array_list[index1] array_list[index1] = array_list[index2] array_list[index2] = temp
This method swaps elements in a list corresponding to indexes given in parameters. :param index1: The first index. :param index2: The second index. :param array_list: The list in which we need to swap elements. :type index1: int :type index2: int :type array_list: list
src/sorting/quick_sort.py
swap
pranaychandekar/dsa
2
python
@staticmethod def swap(index1: int, index2: int, array_list: list): '\n This method swaps elements in a list corresponding to indexes given in parameters.\n\n :param index1: The first index.\n :param index2: The second index.\n :param array_list: The list in which we need to swap elements.\n :type index1: int\n :type index2: int\n :type array_list: list\n ' temp = array_list[index1] array_list[index1] = array_list[index2] array_list[index2] = temp
@staticmethod def swap(index1: int, index2: int, array_list: list): '\n This method swaps elements in a list corresponding to indexes given in parameters.\n\n :param index1: The first index.\n :param index2: The second index.\n :param array_list: The list in which we need to swap elements.\n :type index1: int\n :type index2: int\n :type array_list: list\n ' temp = array_list[index1] array_list[index1] = array_list[index2] array_list[index2] = temp<|docstring|>This method swaps elements in a list corresponding to indexes given in parameters. :param index1: The first index. :param index2: The second index. :param array_list: The list in which we need to swap elements. :type index1: int :type index2: int :type array_list: list<|endoftext|>
f684682015eb31ca68cfa7a5aef0b3468cdee969d35aaf144dcf1eabefae2123
def partition(self, start: int, end: int, array_list: list): '\n This method creates partition in the list corresponding to the pivot element and return the pivot index.\n\n :param start: The start index.\n :param end: The end index\n :param array_list: The list of numbers.\n :type start: int\n :type end: int\n :type array_list: list\n :return: The partition pivot index\n :rtype: int\n ' pivot = array_list[end] partition_index = start for i in range(start, end): if (array_list[i] <= pivot): self.swap(i, partition_index, array_list) partition_index = (partition_index + 1) self.swap(partition_index, end, array_list) return partition_index
This method creates partition in the list corresponding to the pivot element and return the pivot index. :param start: The start index. :param end: The end index :param array_list: The list of numbers. :type start: int :type end: int :type array_list: list :return: The partition pivot index :rtype: int
src/sorting/quick_sort.py
partition
pranaychandekar/dsa
2
python
def partition(self, start: int, end: int, array_list: list): '\n This method creates partition in the list corresponding to the pivot element and return the pivot index.\n\n :param start: The start index.\n :param end: The end index\n :param array_list: The list of numbers.\n :type start: int\n :type end: int\n :type array_list: list\n :return: The partition pivot index\n :rtype: int\n ' pivot = array_list[end] partition_index = start for i in range(start, end): if (array_list[i] <= pivot): self.swap(i, partition_index, array_list) partition_index = (partition_index + 1) self.swap(partition_index, end, array_list) return partition_index
def partition(self, start: int, end: int, array_list: list): '\n This method creates partition in the list corresponding to the pivot element and return the pivot index.\n\n :param start: The start index.\n :param end: The end index\n :param array_list: The list of numbers.\n :type start: int\n :type end: int\n :type array_list: list\n :return: The partition pivot index\n :rtype: int\n ' pivot = array_list[end] partition_index = start for i in range(start, end): if (array_list[i] <= pivot): self.swap(i, partition_index, array_list) partition_index = (partition_index + 1) self.swap(partition_index, end, array_list) return partition_index<|docstring|>This method creates partition in the list corresponding to the pivot element and return the pivot index. :param start: The start index. :param end: The end index :param array_list: The list of numbers. :type start: int :type end: int :type array_list: list :return: The partition pivot index :rtype: int<|endoftext|>
db2ae1b9a373680d94d77cf7d1a0892b2b0e4fc93225ee12b3021747fffcb6ac
def random_partition(self, start: int, end: int, unsorted_list: list): '\n This method randomly picks the pivot index and then swaps it with the last index before partitioning to\n avoid the worst case time complexity.\n\n :param start: The start index.\n :param end: The end index\n :param unsorted_list: The original list.\n :type start: int\n :type end: int\n :type unsorted_list: list\n :return: The partition pivot index.\n :rtype: int\n ' pivot_index = random.randint(start, end) self.swap(pivot_index, end, unsorted_list) return self.partition(start, end, unsorted_list)
This method randomly picks the pivot index and then swaps it with the last index before partitioning to avoid the worst case time complexity. :param start: The start index. :param end: The end index :param unsorted_list: The original list. :type start: int :type end: int :type unsorted_list: list :return: The partition pivot index. :rtype: int
src/sorting/quick_sort.py
random_partition
pranaychandekar/dsa
2
python
def random_partition(self, start: int, end: int, unsorted_list: list): '\n This method randomly picks the pivot index and then swaps it with the last index before partitioning to\n avoid the worst case time complexity.\n\n :param start: The start index.\n :param end: The end index\n :param unsorted_list: The original list.\n :type start: int\n :type end: int\n :type unsorted_list: list\n :return: The partition pivot index.\n :rtype: int\n ' pivot_index = random.randint(start, end) self.swap(pivot_index, end, unsorted_list) return self.partition(start, end, unsorted_list)
def random_partition(self, start: int, end: int, unsorted_list: list): '\n This method randomly picks the pivot index and then swaps it with the last index before partitioning to\n avoid the worst case time complexity.\n\n :param start: The start index.\n :param end: The end index\n :param unsorted_list: The original list.\n :type start: int\n :type end: int\n :type unsorted_list: list\n :return: The partition pivot index.\n :rtype: int\n ' pivot_index = random.randint(start, end) self.swap(pivot_index, end, unsorted_list) return self.partition(start, end, unsorted_list)<|docstring|>This method randomly picks the pivot index and then swaps it with the last index before partitioning to avoid the worst case time complexity. :param start: The start index. :param end: The end index :param unsorted_list: The original list. :type start: int :type end: int :type unsorted_list: list :return: The partition pivot index. :rtype: int<|endoftext|>
34c64cbef4e485b6c376bbd2b408381c64cf5b805ea3c7fd9eccfcc0f2d6aaed
def quick_sort(self, start: int, end: int, unsorted_list: list): '\n This method sorts a given list in ascending order using Quick Sort algorithm.\n\n :param start: The start index.\n :param end: The end index\n :param unsorted_list: The original list.\n :type start: int\n :type end: int\n :type unsorted_list: list\n ' if (start < end): partition_index = self.random_partition(start, end, unsorted_list) self.quick_sort(start, (partition_index - 1), unsorted_list) self.quick_sort((partition_index + 1), end, unsorted_list)
This method sorts a given list in ascending order using Quick Sort algorithm. :param start: The start index. :param end: The end index :param unsorted_list: The original list. :type start: int :type end: int :type unsorted_list: list
src/sorting/quick_sort.py
quick_sort
pranaychandekar/dsa
2
python
def quick_sort(self, start: int, end: int, unsorted_list: list): '\n This method sorts a given list in ascending order using Quick Sort algorithm.\n\n :param start: The start index.\n :param end: The end index\n :param unsorted_list: The original list.\n :type start: int\n :type end: int\n :type unsorted_list: list\n ' if (start < end): partition_index = self.random_partition(start, end, unsorted_list) self.quick_sort(start, (partition_index - 1), unsorted_list) self.quick_sort((partition_index + 1), end, unsorted_list)
def quick_sort(self, start: int, end: int, unsorted_list: list): '\n This method sorts a given list in ascending order using Quick Sort algorithm.\n\n :param start: The start index.\n :param end: The end index\n :param unsorted_list: The original list.\n :type start: int\n :type end: int\n :type unsorted_list: list\n ' if (start < end): partition_index = self.random_partition(start, end, unsorted_list) self.quick_sort(start, (partition_index - 1), unsorted_list) self.quick_sort((partition_index + 1), end, unsorted_list)<|docstring|>This method sorts a given list in ascending order using Quick Sort algorithm. :param start: The start index. :param end: The end index :param unsorted_list: The original list. :type start: int :type end: int :type unsorted_list: list<|endoftext|>
488bf379eb9830ee758bbe61d6f35ee92d18fe7425b122c173764d68e4cb154b
def get_key(): 'Wait for the next key pressed and return its ascii code.' result = None if (os.name == 'nt'): import msvcrt result = msvcrt.getch() else: import termios fd = sys.stdin.fileno() oldterm = termios.tcgetattr(fd) newattr = termios.tcgetattr(fd) newattr[3] = ((newattr[3] & (~ termios.ICANON)) & (~ termios.ECHO)) termios.tcsetattr(fd, termios.TCSANOW, newattr) try: result = sys.stdin.read(1) except IOError: pass finally: termios.tcsetattr(fd, termios.TCSAFLUSH, oldterm) return ord(result)
Wait for the next key pressed and return its ascii code.
vigenere_solver/input.py
get_key
nobe4/vigenere-solver
0
python
def get_key(): result = None if (os.name == 'nt'): import msvcrt result = msvcrt.getch() else: import termios fd = sys.stdin.fileno() oldterm = termios.tcgetattr(fd) newattr = termios.tcgetattr(fd) newattr[3] = ((newattr[3] & (~ termios.ICANON)) & (~ termios.ECHO)) termios.tcsetattr(fd, termios.TCSANOW, newattr) try: result = sys.stdin.read(1) except IOError: pass finally: termios.tcsetattr(fd, termios.TCSAFLUSH, oldterm) return ord(result)
def get_key(): result = None if (os.name == 'nt'): import msvcrt result = msvcrt.getch() else: import termios fd = sys.stdin.fileno() oldterm = termios.tcgetattr(fd) newattr = termios.tcgetattr(fd) newattr[3] = ((newattr[3] & (~ termios.ICANON)) & (~ termios.ECHO)) termios.tcsetattr(fd, termios.TCSANOW, newattr) try: result = sys.stdin.read(1) except IOError: pass finally: termios.tcsetattr(fd, termios.TCSAFLUSH, oldterm) return ord(result)<|docstring|>Wait for the next key pressed and return its ascii code.<|endoftext|>
b8714e39e4c7d672bf9a87235bdd1bf793c0c8b8af8cb2e0e9b823cc2211eff9
def __init__(self, num_batches, BATCH_SIZE, model_kwargs, shuffle=True, corr=False, train=True, mask=False): '\n Args: \n num_batches: Number of batches of synthetic data\n BATCH_SIZE: batchsize of synthetic data\n model_kwargs: dictionary containing "x_dim" which indicates input data size\n shuffle: True sets condition vector in input data to 0 for all possible permutations\n corr: True sets dependent input dimensions via a correlation matrix \n ' self.num_batches = num_batches self.BATCH_SIZE = BATCH_SIZE self.corr = corr self.shuffle = shuffle self.model_kwargs = model_kwargs self.train = train (Batches_X, Batches_C, Batches_conds) = (torch.empty([0]), torch.empty([0]), torch.empty([0])) for (j, i) in enumerate(range(self.num_batches)): if (self.corr is False): m = MultivariateNormal(torch.zeros(self.model_kwargs['x_dim']), torch.eye(self.model_kwargs['x_dim'])) else: if (j == 0): corr_matrix = self.random_corr_mat(D=self.model_kwargs['x_dim']) corr_matrix = torch.from_numpy(corr_matrix) m = MultivariateNormal(torch.zeros(self.model_kwargs['x_dim']).float(), corr_matrix.float()) X = m.sample((self.BATCH_SIZE,)) C = X.clone() count = 0 if (self.shuffle is True): while (count == 0): C_mask = torch.zeros(C.shape).bernoulli_(0.5) count = 1 else: C_mask = torch.zeros(C.shape).bernoulli_(0) C[C_mask.byte()] = 0 C_indicator = (C_mask == 0) C = torch.cat([C.float(), C_indicator.float()], 1) X = X.view([1, (- 1), self.model_kwargs['x_dim']]) C = C.view([1, (- 1), (self.model_kwargs['x_dim'] * 2)]) conds = C[(:, :, self.model_kwargs['x_dim']:)].sum(2) Batches_X = torch.cat([Batches_X, X], 0) Batches_C = torch.cat([Batches_C, C], 0) Batches_conds = torch.cat([Batches_conds, conds], 0) self._batches_x = Batches_X self._batches_c = Batches_C self._batches_conds = Batches_conds
Args: num_batches: Number of batches of synthetic data BATCH_SIZE: batchsize of synthetic data model_kwargs: dictionary containing "x_dim" which indicates input data size shuffle: True sets condition vector in input data to 0 for all possible permutations corr: True sets dependent input dimensions via a correlation matrix
CVAE_testbed/datasets/synthetic.py
__init__
AllenCellModeling/CVAE_testbed
2
python
def __init__(self, num_batches, BATCH_SIZE, model_kwargs, shuffle=True, corr=False, train=True, mask=False): '\n Args: \n num_batches: Number of batches of synthetic data\n BATCH_SIZE: batchsize of synthetic data\n model_kwargs: dictionary containing "x_dim" which indicates input data size\n shuffle: True sets condition vector in input data to 0 for all possible permutations\n corr: True sets dependent input dimensions via a correlation matrix \n ' self.num_batches = num_batches self.BATCH_SIZE = BATCH_SIZE self.corr = corr self.shuffle = shuffle self.model_kwargs = model_kwargs self.train = train (Batches_X, Batches_C, Batches_conds) = (torch.empty([0]), torch.empty([0]), torch.empty([0])) for (j, i) in enumerate(range(self.num_batches)): if (self.corr is False): m = MultivariateNormal(torch.zeros(self.model_kwargs['x_dim']), torch.eye(self.model_kwargs['x_dim'])) else: if (j == 0): corr_matrix = self.random_corr_mat(D=self.model_kwargs['x_dim']) corr_matrix = torch.from_numpy(corr_matrix) m = MultivariateNormal(torch.zeros(self.model_kwargs['x_dim']).float(), corr_matrix.float()) X = m.sample((self.BATCH_SIZE,)) C = X.clone() count = 0 if (self.shuffle is True): while (count == 0): C_mask = torch.zeros(C.shape).bernoulli_(0.5) count = 1 else: C_mask = torch.zeros(C.shape).bernoulli_(0) C[C_mask.byte()] = 0 C_indicator = (C_mask == 0) C = torch.cat([C.float(), C_indicator.float()], 1) X = X.view([1, (- 1), self.model_kwargs['x_dim']]) C = C.view([1, (- 1), (self.model_kwargs['x_dim'] * 2)]) conds = C[(:, :, self.model_kwargs['x_dim']:)].sum(2) Batches_X = torch.cat([Batches_X, X], 0) Batches_C = torch.cat([Batches_C, C], 0) Batches_conds = torch.cat([Batches_conds, conds], 0) self._batches_x = Batches_X self._batches_c = Batches_C self._batches_conds = Batches_conds
def __init__(self, num_batches, BATCH_SIZE, model_kwargs, shuffle=True, corr=False, train=True, mask=False): '\n Args: \n num_batches: Number of batches of synthetic data\n BATCH_SIZE: batchsize of synthetic data\n model_kwargs: dictionary containing "x_dim" which indicates input data size\n shuffle: True sets condition vector in input data to 0 for all possible permutations\n corr: True sets dependent input dimensions via a correlation matrix \n ' self.num_batches = num_batches self.BATCH_SIZE = BATCH_SIZE self.corr = corr self.shuffle = shuffle self.model_kwargs = model_kwargs self.train = train (Batches_X, Batches_C, Batches_conds) = (torch.empty([0]), torch.empty([0]), torch.empty([0])) for (j, i) in enumerate(range(self.num_batches)): if (self.corr is False): m = MultivariateNormal(torch.zeros(self.model_kwargs['x_dim']), torch.eye(self.model_kwargs['x_dim'])) else: if (j == 0): corr_matrix = self.random_corr_mat(D=self.model_kwargs['x_dim']) corr_matrix = torch.from_numpy(corr_matrix) m = MultivariateNormal(torch.zeros(self.model_kwargs['x_dim']).float(), corr_matrix.float()) X = m.sample((self.BATCH_SIZE,)) C = X.clone() count = 0 if (self.shuffle is True): while (count == 0): C_mask = torch.zeros(C.shape).bernoulli_(0.5) count = 1 else: C_mask = torch.zeros(C.shape).bernoulli_(0) C[C_mask.byte()] = 0 C_indicator = (C_mask == 0) C = torch.cat([C.float(), C_indicator.float()], 1) X = X.view([1, (- 1), self.model_kwargs['x_dim']]) C = C.view([1, (- 1), (self.model_kwargs['x_dim'] * 2)]) conds = C[(:, :, self.model_kwargs['x_dim']:)].sum(2) Batches_X = torch.cat([Batches_X, X], 0) Batches_C = torch.cat([Batches_C, C], 0) Batches_conds = torch.cat([Batches_conds, conds], 0) self._batches_x = Batches_X self._batches_c = Batches_C self._batches_conds = Batches_conds<|docstring|>Args: num_batches: Number of batches of synthetic data BATCH_SIZE: batchsize of synthetic data model_kwargs: dictionary containing "x_dim" which indicates input data size shuffle: True sets condition vector in input data to 0 for all possible permutations corr: True sets dependent input dimensions via a correlation matrix<|endoftext|>
2b96e70b0921469f03f95779838445c59b98b9773c05b59b2b0f8debb3f3264a
def __getitem__(self, idx): '\n Returns a tuple. (X, C, sum(C[mid:end])). \n X is the input, \n C is the condition, \n sum(C[mid:end]) is the sum of the indicators in C. It tells us how many of the condition\n columns have been masked\n ' return (self._batches_x[idx], self._batches_c[idx], self._batches_conds[idx])
Returns a tuple. (X, C, sum(C[mid:end])). X is the input, C is the condition, sum(C[mid:end]) is the sum of the indicators in C. It tells us how many of the condition columns have been masked
CVAE_testbed/datasets/synthetic.py
__getitem__
AllenCellModeling/CVAE_testbed
2
python
def __getitem__(self, idx): '\n Returns a tuple. (X, C, sum(C[mid:end])). \n X is the input, \n C is the condition, \n sum(C[mid:end]) is the sum of the indicators in C. It tells us how many of the condition\n columns have been masked\n ' return (self._batches_x[idx], self._batches_c[idx], self._batches_conds[idx])
def __getitem__(self, idx): '\n Returns a tuple. (X, C, sum(C[mid:end])). \n X is the input, \n C is the condition, \n sum(C[mid:end]) is the sum of the indicators in C. It tells us how many of the condition\n columns have been masked\n ' return (self._batches_x[idx], self._batches_c[idx], self._batches_conds[idx])<|docstring|>Returns a tuple. (X, C, sum(C[mid:end])). X is the input, C is the condition, sum(C[mid:end]) is the sum of the indicators in C. It tells us how many of the condition columns have been masked<|endoftext|>
86fcf20f79b155eebb219442de015e6bee6b6757d53895e052c9deeb1dfaf05f
def random_corr_mat(self, D=10, beta=1): 'Generate random valid correlation matrix of dimension D.\n Smaller beta gives larger off diagonal correlations (beta > 0).' P = np.zeros([D, D]) S = np.eye(D) for k in range(0, (D - 1)): for i in range((k + 1), D): P[(k, i)] = ((2 * np.random.beta(beta, beta)) - 1) p = P[(k, i)] for l in reversed(range(k)): p = ((p * np.sqrt(((1 - (P[(l, i)] ** 2)) * (1 - (P[(l, k)] ** 2))))) + (P[(l, i)] * P[(l, k)])) S[(k, i)] = S[(i, k)] = p p = np.random.permutation(D) for i in range(D): S[(:, i)] = S[(p, i)] for i in range(D): S[(i, :)] = S[(i, p)] return S
Generate random valid correlation matrix of dimension D. Smaller beta gives larger off diagonal correlations (beta > 0).
CVAE_testbed/datasets/synthetic.py
random_corr_mat
AllenCellModeling/CVAE_testbed
2
python
def random_corr_mat(self, D=10, beta=1): 'Generate random valid correlation matrix of dimension D.\n Smaller beta gives larger off diagonal correlations (beta > 0).' P = np.zeros([D, D]) S = np.eye(D) for k in range(0, (D - 1)): for i in range((k + 1), D): P[(k, i)] = ((2 * np.random.beta(beta, beta)) - 1) p = P[(k, i)] for l in reversed(range(k)): p = ((p * np.sqrt(((1 - (P[(l, i)] ** 2)) * (1 - (P[(l, k)] ** 2))))) + (P[(l, i)] * P[(l, k)])) S[(k, i)] = S[(i, k)] = p p = np.random.permutation(D) for i in range(D): S[(:, i)] = S[(p, i)] for i in range(D): S[(i, :)] = S[(i, p)] return S
def random_corr_mat(self, D=10, beta=1): 'Generate random valid correlation matrix of dimension D.\n Smaller beta gives larger off diagonal correlations (beta > 0).' P = np.zeros([D, D]) S = np.eye(D) for k in range(0, (D - 1)): for i in range((k + 1), D): P[(k, i)] = ((2 * np.random.beta(beta, beta)) - 1) p = P[(k, i)] for l in reversed(range(k)): p = ((p * np.sqrt(((1 - (P[(l, i)] ** 2)) * (1 - (P[(l, k)] ** 2))))) + (P[(l, i)] * P[(l, k)])) S[(k, i)] = S[(i, k)] = p p = np.random.permutation(D) for i in range(D): S[(:, i)] = S[(p, i)] for i in range(D): S[(i, :)] = S[(i, p)] return S<|docstring|>Generate random valid correlation matrix of dimension D. Smaller beta gives larger off diagonal correlations (beta > 0).<|endoftext|>
c153d4267ec13d13f78d666a8040ea4e74549404968e1466a81100581964118b
def glInitInstancedArraysARB(): 'Return boolean indicating whether this extension is available' from OpenGL import extensions return extensions.hasGLExtension(EXTENSION_NAME)
Return boolean indicating whether this extension is available
PyOpenGL-3.0.2/OpenGL/raw/GL/ARB/instanced_arrays.py
glInitInstancedArraysARB
frederica07/Dragon_Programming_Process
0
python
def glInitInstancedArraysARB(): from OpenGL import extensions return extensions.hasGLExtension(EXTENSION_NAME)
def glInitInstancedArraysARB(): from OpenGL import extensions return extensions.hasGLExtension(EXTENSION_NAME)<|docstring|>Return boolean indicating whether this extension is available<|endoftext|>
3a1a21f86342d3e84a0b1e58b8d7087bc06e5a9e2e7eb64c104ce721fa4f8baa
@user_passes_test(login_allowed) def index(request): '\n Groups listing/index.\n ' client = get_httpclient_instance(request) users = client.users.get() collab_groups = client.collaborationgroups.get() new_notifications_count = len(client.notificationlogs.unread.get()) for group in collab_groups: group['member_ids'] = [member.id for member in group.members] return render(request, 'web/collaborationgroups/index.html', {'title': 'Groups', 'groups': collab_groups, 'users': users, 'new_notifications_count': new_notifications_count})
Groups listing/index.
coco/web/views/collaborationgroups.py
index
coco-project/ipynbsrv
0
python
@user_passes_test(login_allowed) def index(request): '\n \n ' client = get_httpclient_instance(request) users = client.users.get() collab_groups = client.collaborationgroups.get() new_notifications_count = len(client.notificationlogs.unread.get()) for group in collab_groups: group['member_ids'] = [member.id for member in group.members] return render(request, 'web/collaborationgroups/index.html', {'title': 'Groups', 'groups': collab_groups, 'users': users, 'new_notifications_count': new_notifications_count})
@user_passes_test(login_allowed) def index(request): '\n \n ' client = get_httpclient_instance(request) users = client.users.get() collab_groups = client.collaborationgroups.get() new_notifications_count = len(client.notificationlogs.unread.get()) for group in collab_groups: group['member_ids'] = [member.id for member in group.members] return render(request, 'web/collaborationgroups/index.html', {'title': 'Groups', 'groups': collab_groups, 'users': users, 'new_notifications_count': new_notifications_count})<|docstring|>Groups listing/index.<|endoftext|>
a8d6eb4f6cbf49f98adcc316f5f1b46115e345204106e774e851bdd90ade89a8
@user_passes_test(login_allowed) def manage(request, group_id): '\n Manage single group.\n ' client = get_httpclient_instance(request) group = client.collaborationgroups(group_id).get() members = group.members users = client.users.get() group['member_ids'] = [member.id for member in members] new_notifications_count = len(client.notificationlogs.unread.get()) return render(request, 'web/collaborationgroups/manage.html', {'title': 'Group', 'group': group, 'members': members, 'users': users, 'new_notifications_count': new_notifications_count})
Manage single group.
coco/web/views/collaborationgroups.py
manage
coco-project/ipynbsrv
0
python
@user_passes_test(login_allowed) def manage(request, group_id): '\n \n ' client = get_httpclient_instance(request) group = client.collaborationgroups(group_id).get() members = group.members users = client.users.get() group['member_ids'] = [member.id for member in members] new_notifications_count = len(client.notificationlogs.unread.get()) return render(request, 'web/collaborationgroups/manage.html', {'title': 'Group', 'group': group, 'members': members, 'users': users, 'new_notifications_count': new_notifications_count})
@user_passes_test(login_allowed) def manage(request, group_id): '\n \n ' client = get_httpclient_instance(request) group = client.collaborationgroups(group_id).get() members = group.members users = client.users.get() group['member_ids'] = [member.id for member in members] new_notifications_count = len(client.notificationlogs.unread.get()) return render(request, 'web/collaborationgroups/manage.html', {'title': 'Group', 'group': group, 'members': members, 'users': users, 'new_notifications_count': new_notifications_count})<|docstring|>Manage single group.<|endoftext|>
17c1d790dc8cff3761893ce81654f2d95f873b8e2f6477e32117343ea95a915c
def user_data_processing(): '\n\t对原始user数据进行处理\n\tUserID:不做处理\n\tJobID:不做处理\n\tGender字段:需要将‘F’和‘M’转换成0和1。\n\tAge字段:要转成7个连续数字0~6。\n\t舍弃: zip-code\n\t' print('user_data_processing....') user_title = ['UserID', 'Gender', 'Age', 'JobID', 'Zip-code'] users = pd.read_table('./ml-1m/users.dat', sep='::', header=None, names=user_title, engine='python') users = users.filter(regex='UserID|Gender|Age|JobID') users_orig = users.values gender_to_int = {'F': 0, 'M': 1} users['Gender'] = users['Gender'].map(gender_to_int) age2int = {val: ii for (ii, val) in enumerate(set(users['Age']))} users['Age'] = users['Age'].map(age2int) return (users, users_orig)
对原始user数据进行处理 UserID:不做处理 JobID:不做处理 Gender字段:需要将‘F’和‘M’转换成0和1。 Age字段:要转成7个连续数字0~6。 舍弃: zip-code
data_processing.py
user_data_processing
BlackFeather0303/Movie-Recommendation-System
8
python
def user_data_processing(): '\n\t对原始user数据进行处理\n\tUserID:不做处理\n\tJobID:不做处理\n\tGender字段:需要将‘F’和‘M’转换成0和1。\n\tAge字段:要转成7个连续数字0~6。\n\t舍弃: zip-code\n\t' print('user_data_processing....') user_title = ['UserID', 'Gender', 'Age', 'JobID', 'Zip-code'] users = pd.read_table('./ml-1m/users.dat', sep='::', header=None, names=user_title, engine='python') users = users.filter(regex='UserID|Gender|Age|JobID') users_orig = users.values gender_to_int = {'F': 0, 'M': 1} users['Gender'] = users['Gender'].map(gender_to_int) age2int = {val: ii for (ii, val) in enumerate(set(users['Age']))} users['Age'] = users['Age'].map(age2int) return (users, users_orig)
def user_data_processing(): '\n\t对原始user数据进行处理\n\tUserID:不做处理\n\tJobID:不做处理\n\tGender字段:需要将‘F’和‘M’转换成0和1。\n\tAge字段:要转成7个连续数字0~6。\n\t舍弃: zip-code\n\t' print('user_data_processing....') user_title = ['UserID', 'Gender', 'Age', 'JobID', 'Zip-code'] users = pd.read_table('./ml-1m/users.dat', sep='::', header=None, names=user_title, engine='python') users = users.filter(regex='UserID|Gender|Age|JobID') users_orig = users.values gender_to_int = {'F': 0, 'M': 1} users['Gender'] = users['Gender'].map(gender_to_int) age2int = {val: ii for (ii, val) in enumerate(set(users['Age']))} users['Age'] = users['Age'].map(age2int) return (users, users_orig)<|docstring|>对原始user数据进行处理 UserID:不做处理 JobID:不做处理 Gender字段:需要将‘F’和‘M’转换成0和1。 Age字段:要转成7个连续数字0~6。 舍弃: zip-code<|endoftext|>
7e39c5c267712042ec63eee371e31de9fc335b1b67ebad6341146561f3059753
def movie_data_processing(title_length=16): '\n\t对原始movie数据不作处理\n\tGenres字段:进行int映射,因为有些电影是多个Genres的组合,需要再将每个电影的Genres字段转成数字列表.\n\tTitle字段:首先去除掉title中的year。然后将title映射成数字列表。(int映射粒度为单词而不是整个title)\n\tGenres和Title字段需要将长度统一,这样在神经网络中方便处理。\n\t空白部分用‘< PAD >’对应的数字填充。\n\t' print('movie_data_processing....') movies_title = ['MovieID', 'Title', 'Genres'] movies = pd.read_table('./ml-1m/movies.dat', sep='::', header=None, names=movies_title, engine='python') movies_orig = movies.values pattern = re.compile('^(.*)\\((\\d+)\\)$') title_re_year = {val: pattern.match(val).group(1) for val in set(movies['Title'])} movies['Title'] = movies['Title'].map(title_re_year) title_set = set() for val in movies['Title'].str.split(): title_set.update(val) title_set.add('PADDING') title2int = {val: ii for (ii, val) in enumerate(title_set)} title_map = {val: [title2int[row] for row in val.split()] for val in set(movies['Title'])} for key in title_map.keys(): padding_length = (title_length - len(title_map[key])) padding = ([title2int['PADDING']] * padding_length) title_map[key].extend(padding) movies['Title'] = movies['Title'].map(title_map) print(len(movies['Title'][0])) genres_set = set() for val in movies['Genres'].str.split('|'): genres_set.update(val) genres_set.add('PADDING') genres2int = {val: ii for (ii, val) in enumerate(genres_set)} genres_map = {val: [genres2int[row] for row in val.split('|')] for val in set(movies['Genres'])} for key in genres_map: padding_length = (len(genres_set) - len(genres_map[key])) padding = ([genres2int['PADDING']] * padding_length) genres_map[key].extend(padding) movies['Genres'] = movies['Genres'].map(genres_map) return (movies, movies_orig, genres2int, title_set)
对原始movie数据不作处理 Genres字段:进行int映射,因为有些电影是多个Genres的组合,需要再将每个电影的Genres字段转成数字列表. Title字段:首先去除掉title中的year。然后将title映射成数字列表。(int映射粒度为单词而不是整个title) Genres和Title字段需要将长度统一,这样在神经网络中方便处理。 空白部分用‘< PAD >’对应的数字填充。
data_processing.py
movie_data_processing
BlackFeather0303/Movie-Recommendation-System
8
python
def movie_data_processing(title_length=16): '\n\t对原始movie数据不作处理\n\tGenres字段:进行int映射,因为有些电影是多个Genres的组合,需要再将每个电影的Genres字段转成数字列表.\n\tTitle字段:首先去除掉title中的year。然后将title映射成数字列表。(int映射粒度为单词而不是整个title)\n\tGenres和Title字段需要将长度统一,这样在神经网络中方便处理。\n\t空白部分用‘< PAD >’对应的数字填充。\n\t' print('movie_data_processing....') movies_title = ['MovieID', 'Title', 'Genres'] movies = pd.read_table('./ml-1m/movies.dat', sep='::', header=None, names=movies_title, engine='python') movies_orig = movies.values pattern = re.compile('^(.*)\\((\\d+)\\)$') title_re_year = {val: pattern.match(val).group(1) for val in set(movies['Title'])} movies['Title'] = movies['Title'].map(title_re_year) title_set = set() for val in movies['Title'].str.split(): title_set.update(val) title_set.add('PADDING') title2int = {val: ii for (ii, val) in enumerate(title_set)} title_map = {val: [title2int[row] for row in val.split()] for val in set(movies['Title'])} for key in title_map.keys(): padding_length = (title_length - len(title_map[key])) padding = ([title2int['PADDING']] * padding_length) title_map[key].extend(padding) movies['Title'] = movies['Title'].map(title_map) print(len(movies['Title'][0])) genres_set = set() for val in movies['Genres'].str.split('|'): genres_set.update(val) genres_set.add('PADDING') genres2int = {val: ii for (ii, val) in enumerate(genres_set)} genres_map = {val: [genres2int[row] for row in val.split('|')] for val in set(movies['Genres'])} for key in genres_map: padding_length = (len(genres_set) - len(genres_map[key])) padding = ([genres2int['PADDING']] * padding_length) genres_map[key].extend(padding) movies['Genres'] = movies['Genres'].map(genres_map) return (movies, movies_orig, genres2int, title_set)
def movie_data_processing(title_length=16): '\n\t对原始movie数据不作处理\n\tGenres字段:进行int映射,因为有些电影是多个Genres的组合,需要再将每个电影的Genres字段转成数字列表.\n\tTitle字段:首先去除掉title中的year。然后将title映射成数字列表。(int映射粒度为单词而不是整个title)\n\tGenres和Title字段需要将长度统一,这样在神经网络中方便处理。\n\t空白部分用‘< PAD >’对应的数字填充。\n\t' print('movie_data_processing....') movies_title = ['MovieID', 'Title', 'Genres'] movies = pd.read_table('./ml-1m/movies.dat', sep='::', header=None, names=movies_title, engine='python') movies_orig = movies.values pattern = re.compile('^(.*)\\((\\d+)\\)$') title_re_year = {val: pattern.match(val).group(1) for val in set(movies['Title'])} movies['Title'] = movies['Title'].map(title_re_year) title_set = set() for val in movies['Title'].str.split(): title_set.update(val) title_set.add('PADDING') title2int = {val: ii for (ii, val) in enumerate(title_set)} title_map = {val: [title2int[row] for row in val.split()] for val in set(movies['Title'])} for key in title_map.keys(): padding_length = (title_length - len(title_map[key])) padding = ([title2int['PADDING']] * padding_length) title_map[key].extend(padding) movies['Title'] = movies['Title'].map(title_map) print(len(movies['Title'][0])) genres_set = set() for val in movies['Genres'].str.split('|'): genres_set.update(val) genres_set.add('PADDING') genres2int = {val: ii for (ii, val) in enumerate(genres_set)} genres_map = {val: [genres2int[row] for row in val.split('|')] for val in set(movies['Genres'])} for key in genres_map: padding_length = (len(genres_set) - len(genres_map[key])) padding = ([genres2int['PADDING']] * padding_length) genres_map[key].extend(padding) movies['Genres'] = movies['Genres'].map(genres_map) return (movies, movies_orig, genres2int, title_set)<|docstring|>对原始movie数据不作处理 Genres字段:进行int映射,因为有些电影是多个Genres的组合,需要再将每个电影的Genres字段转成数字列表. Title字段:首先去除掉title中的year。然后将title映射成数字列表。(int映射粒度为单词而不是整个title) Genres和Title字段需要将长度统一,这样在神经网络中方便处理。 空白部分用‘< PAD >’对应的数字填充。<|endoftext|>
422a8cdf935e202e4409255579713cbcb0390e743f15a5bae555b51146e599ff
def rating_data_processing(): '\n\trating数据处理,只需要将timestamps舍去,保留其他属性即可\n\t' print('rating_data_processing....') ratings_title = ['UserID', 'MovieID', 'ratings', 'timestamps'] ratings = pd.read_table('./ml-1m/ratings.dat', sep='::', header=None, names=ratings_title, engine='python') ratings = ratings.filter(regex='UserID|MovieID|ratings') return ratings
rating数据处理,只需要将timestamps舍去,保留其他属性即可
data_processing.py
rating_data_processing
BlackFeather0303/Movie-Recommendation-System
8
python
def rating_data_processing(): '\n\t\n\t' print('rating_data_processing....') ratings_title = ['UserID', 'MovieID', 'ratings', 'timestamps'] ratings = pd.read_table('./ml-1m/ratings.dat', sep='::', header=None, names=ratings_title, engine='python') ratings = ratings.filter(regex='UserID|MovieID|ratings') return ratings
def rating_data_processing(): '\n\t\n\t' print('rating_data_processing....') ratings_title = ['UserID', 'MovieID', 'ratings', 'timestamps'] ratings = pd.read_table('./ml-1m/ratings.dat', sep='::', header=None, names=ratings_title, engine='python') ratings = ratings.filter(regex='UserID|MovieID|ratings') return ratings<|docstring|>rating数据处理,只需要将timestamps舍去,保留其他属性即可<|endoftext|>
3295f2bf388130b81e2c98a81146840b8800ff840fcef3612c7d4ecec765b5dc
def get_feature(): '\n\t将多个方法整合在一起,得到movie数据,user数据,rating数据。\n\t然后将三个table合并到一起,组成一个大table。\n\t最后将table切割,分别得到features 和 target(rating)\n\t' title_length = 16 (users, users_orig) = user_data_processing() (movies, movies_orig, genres2int, title_set) = movie_data_processing() ratings = rating_data_processing() data = pd.merge(pd.merge(ratings, users), movies) (feature_pd, tragets_pd) = (data.drop(['ratings'], axis=1), data['ratings']) features = feature_pd.values targets = tragets_pd.values (train_x, test_x, train_y, test_y) = train_test_split(features, targets, test_size=0.2) pickle.dump([train_x, train_y], open('model/processed_data/train_data.p', 'wb')) pickle.dump([test_x, test_y], open('model/processed_data/test_data.p', 'wb')) pickle.dump(features, open('model/features.p', 'wb')) pickle.dump(targets, open('model/target.p', 'wb')) pickle.dump((users, users_orig, movies, movies_orig), open('model/processed_data/original_data.p', 'wb')) title_vocb_num = (len(title_set) + 1) genres_num = len(genres2int) movie_id_num = (max(movies['MovieID']) + 1) pickle.dump((movie_id_num, title_length, title_vocb_num, genres_num), open('model/argument.p', 'wb')) return (features, targets)
将多个方法整合在一起,得到movie数据,user数据,rating数据。 然后将三个table合并到一起,组成一个大table。 最后将table切割,分别得到features 和 target(rating)
data_processing.py
get_feature
BlackFeather0303/Movie-Recommendation-System
8
python
def get_feature(): '\n\t将多个方法整合在一起,得到movie数据,user数据,rating数据。\n\t然后将三个table合并到一起,组成一个大table。\n\t最后将table切割,分别得到features 和 target(rating)\n\t' title_length = 16 (users, users_orig) = user_data_processing() (movies, movies_orig, genres2int, title_set) = movie_data_processing() ratings = rating_data_processing() data = pd.merge(pd.merge(ratings, users), movies) (feature_pd, tragets_pd) = (data.drop(['ratings'], axis=1), data['ratings']) features = feature_pd.values targets = tragets_pd.values (train_x, test_x, train_y, test_y) = train_test_split(features, targets, test_size=0.2) pickle.dump([train_x, train_y], open('model/processed_data/train_data.p', 'wb')) pickle.dump([test_x, test_y], open('model/processed_data/test_data.p', 'wb')) pickle.dump(features, open('model/features.p', 'wb')) pickle.dump(targets, open('model/target.p', 'wb')) pickle.dump((users, users_orig, movies, movies_orig), open('model/processed_data/original_data.p', 'wb')) title_vocb_num = (len(title_set) + 1) genres_num = len(genres2int) movie_id_num = (max(movies['MovieID']) + 1) pickle.dump((movie_id_num, title_length, title_vocb_num, genres_num), open('model/argument.p', 'wb')) return (features, targets)
def get_feature(): '\n\t将多个方法整合在一起,得到movie数据,user数据,rating数据。\n\t然后将三个table合并到一起,组成一个大table。\n\t最后将table切割,分别得到features 和 target(rating)\n\t' title_length = 16 (users, users_orig) = user_data_processing() (movies, movies_orig, genres2int, title_set) = movie_data_processing() ratings = rating_data_processing() data = pd.merge(pd.merge(ratings, users), movies) (feature_pd, tragets_pd) = (data.drop(['ratings'], axis=1), data['ratings']) features = feature_pd.values targets = tragets_pd.values (train_x, test_x, train_y, test_y) = train_test_split(features, targets, test_size=0.2) pickle.dump([train_x, train_y], open('model/processed_data/train_data.p', 'wb')) pickle.dump([test_x, test_y], open('model/processed_data/test_data.p', 'wb')) pickle.dump(features, open('model/features.p', 'wb')) pickle.dump(targets, open('model/target.p', 'wb')) pickle.dump((users, users_orig, movies, movies_orig), open('model/processed_data/original_data.p', 'wb')) title_vocb_num = (len(title_set) + 1) genres_num = len(genres2int) movie_id_num = (max(movies['MovieID']) + 1) pickle.dump((movie_id_num, title_length, title_vocb_num, genres_num), open('model/argument.p', 'wb')) return (features, targets)<|docstring|>将多个方法整合在一起,得到movie数据,user数据,rating数据。 然后将三个table合并到一起,组成一个大table。 最后将table切割,分别得到features 和 target(rating)<|endoftext|>
1b72150c4d7730eee863ff3ba877d03805ecab43b283d1381d29b3004281fc42
def find_peaks(flux, window=51, niter=5, clip_iter=5, clip_sigma_upper=5.0, clip_sigma_lower=5.0, detection_sigma=3.0, min_peak_dist_sigma=5.0, gaussian_width=1.0, make_fig=False): '\n * Subtract median filter (param "window")\n * Iterate: (param "niter")\n * Sigma clip, estimate noise (params clip_iter, clip_sigma_upper clip_sigma_lower)\n * Find peaks (param detection_sigma)\n * Remove peaks too close to previous (param min_peak_dist_sigma)\n * Fit Gaussians to peaks (initialize width at param gaussian_width)\n Returns:\n allpeakx: locations of peaks\n fullmodel: the model of all the gaussians\n If make_fig=True: fig, a plot showing all the peaks found at each iteration.\n ' xarr = np.arange(len(flux)) flux = (flux - signal.medfilt(flux, window)) continuum = models.Linear1D(slope=0, intercept=0) fullmodel = continuum allpeakx = [] allpeaksigma = [] fitter = fitting.LevMarLSQFitter() if make_fig: (fig, axes) = plt.subplots(niter) for iiter in range(niter): tflux = (flux - fullmodel(xarr)) cflux = sigma_clip(tflux, iters=clip_iter, sigma_upper=clip_sigma_upper, sigma_lower=clip_sigma_lower) noise = np.std(cflux) deriv = np.gradient(tflux) peaklocs = (((deriv[:(- 1)] >= 0) & (deriv[1:] < 0)) & (tflux[:(- 1)] > (detection_sigma * noise))) peakx = np.where(peaklocs)[0] peaky = flux[:(- 1)][peaklocs] peaks_to_keep = np.ones_like(peakx, dtype=bool) for (ix, x) in enumerate(peakx): z = ((x - np.array(allpeakx)) / np.array(allpeaksigma)) if np.any((np.abs(z) < min_peak_dist_sigma)): peaks_to_keep[ix] = False peakx = peakx[peaks_to_keep] peaky = peaky[peaks_to_keep] for (x, y) in zip(peakx, peaky): g = models.Gaussian1D(amplitude=y, mean=x, stddev=gaussian_width) fullmodel = (fullmodel + g) print('iter {}: {} peaks (found {}, added {})'.format(iiter, (fullmodel.n_submodels() - 1), len(peaks_to_keep), len(peakx))) fullmodel = fitter(fullmodel, xarr, flux, maxiter=(200 * (fullmodel.parameters.size + 1))) print(fitter.fit_info['message'], fitter.fit_info['ierr']) peak_x_indices = np.where([('mean_' in param) for param in fullmodel.param_names])[0] peak_y_indices = (peak_x_indices - 1) peak_sigma_indices = (peak_x_indices + 1) allpeakx = fullmodel.parameters[peak_x_indices] allpeaky = fullmodel.parameters[peak_y_indices] allpeaksigma = fullmodel.parameters[peak_sigma_indices] if make_fig: try: ax = axes[iiter] except: ax = axes ax.plot(xarr, flux) ax.plot(peakx, peaky, 'ro') ax.plot(xarr, fullmodel(xarr), lw=1) ax.axhspan((- noise), (+ noise), color='k', alpha=0.2) ax.plot(xarr, (flux - fullmodel(xarr))) ax.vlines(allpeakx, (allpeaky * 1.1), ((allpeaky * 1.1) + 300), color='r', lw=1) if make_fig: return (allpeakx, fullmodel, fig) return (allpeakx, fullmodel)
* Subtract median filter (param "window") * Iterate: (param "niter") * Sigma clip, estimate noise (params clip_iter, clip_sigma_upper clip_sigma_lower) * Find peaks (param detection_sigma) * Remove peaks too close to previous (param min_peak_dist_sigma) * Fit Gaussians to peaks (initialize width at param gaussian_width) Returns: allpeakx: locations of peaks fullmodel: the model of all the gaussians If make_fig=True: fig, a plot showing all the peaks found at each iteration.
smhr_session/specutils/reduction.py
find_peaks
alexji/smhr-session
0
python
def find_peaks(flux, window=51, niter=5, clip_iter=5, clip_sigma_upper=5.0, clip_sigma_lower=5.0, detection_sigma=3.0, min_peak_dist_sigma=5.0, gaussian_width=1.0, make_fig=False): '\n * Subtract median filter (param "window")\n * Iterate: (param "niter")\n * Sigma clip, estimate noise (params clip_iter, clip_sigma_upper clip_sigma_lower)\n * Find peaks (param detection_sigma)\n * Remove peaks too close to previous (param min_peak_dist_sigma)\n * Fit Gaussians to peaks (initialize width at param gaussian_width)\n Returns:\n allpeakx: locations of peaks\n fullmodel: the model of all the gaussians\n If make_fig=True: fig, a plot showing all the peaks found at each iteration.\n ' xarr = np.arange(len(flux)) flux = (flux - signal.medfilt(flux, window)) continuum = models.Linear1D(slope=0, intercept=0) fullmodel = continuum allpeakx = [] allpeaksigma = [] fitter = fitting.LevMarLSQFitter() if make_fig: (fig, axes) = plt.subplots(niter) for iiter in range(niter): tflux = (flux - fullmodel(xarr)) cflux = sigma_clip(tflux, iters=clip_iter, sigma_upper=clip_sigma_upper, sigma_lower=clip_sigma_lower) noise = np.std(cflux) deriv = np.gradient(tflux) peaklocs = (((deriv[:(- 1)] >= 0) & (deriv[1:] < 0)) & (tflux[:(- 1)] > (detection_sigma * noise))) peakx = np.where(peaklocs)[0] peaky = flux[:(- 1)][peaklocs] peaks_to_keep = np.ones_like(peakx, dtype=bool) for (ix, x) in enumerate(peakx): z = ((x - np.array(allpeakx)) / np.array(allpeaksigma)) if np.any((np.abs(z) < min_peak_dist_sigma)): peaks_to_keep[ix] = False peakx = peakx[peaks_to_keep] peaky = peaky[peaks_to_keep] for (x, y) in zip(peakx, peaky): g = models.Gaussian1D(amplitude=y, mean=x, stddev=gaussian_width) fullmodel = (fullmodel + g) print('iter {}: {} peaks (found {}, added {})'.format(iiter, (fullmodel.n_submodels() - 1), len(peaks_to_keep), len(peakx))) fullmodel = fitter(fullmodel, xarr, flux, maxiter=(200 * (fullmodel.parameters.size + 1))) print(fitter.fit_info['message'], fitter.fit_info['ierr']) peak_x_indices = np.where([('mean_' in param) for param in fullmodel.param_names])[0] peak_y_indices = (peak_x_indices - 1) peak_sigma_indices = (peak_x_indices + 1) allpeakx = fullmodel.parameters[peak_x_indices] allpeaky = fullmodel.parameters[peak_y_indices] allpeaksigma = fullmodel.parameters[peak_sigma_indices] if make_fig: try: ax = axes[iiter] except: ax = axes ax.plot(xarr, flux) ax.plot(peakx, peaky, 'ro') ax.plot(xarr, fullmodel(xarr), lw=1) ax.axhspan((- noise), (+ noise), color='k', alpha=0.2) ax.plot(xarr, (flux - fullmodel(xarr))) ax.vlines(allpeakx, (allpeaky * 1.1), ((allpeaky * 1.1) + 300), color='r', lw=1) if make_fig: return (allpeakx, fullmodel, fig) return (allpeakx, fullmodel)
def find_peaks(flux, window=51, niter=5, clip_iter=5, clip_sigma_upper=5.0, clip_sigma_lower=5.0, detection_sigma=3.0, min_peak_dist_sigma=5.0, gaussian_width=1.0, make_fig=False): '\n * Subtract median filter (param "window")\n * Iterate: (param "niter")\n * Sigma clip, estimate noise (params clip_iter, clip_sigma_upper clip_sigma_lower)\n * Find peaks (param detection_sigma)\n * Remove peaks too close to previous (param min_peak_dist_sigma)\n * Fit Gaussians to peaks (initialize width at param gaussian_width)\n Returns:\n allpeakx: locations of peaks\n fullmodel: the model of all the gaussians\n If make_fig=True: fig, a plot showing all the peaks found at each iteration.\n ' xarr = np.arange(len(flux)) flux = (flux - signal.medfilt(flux, window)) continuum = models.Linear1D(slope=0, intercept=0) fullmodel = continuum allpeakx = [] allpeaksigma = [] fitter = fitting.LevMarLSQFitter() if make_fig: (fig, axes) = plt.subplots(niter) for iiter in range(niter): tflux = (flux - fullmodel(xarr)) cflux = sigma_clip(tflux, iters=clip_iter, sigma_upper=clip_sigma_upper, sigma_lower=clip_sigma_lower) noise = np.std(cflux) deriv = np.gradient(tflux) peaklocs = (((deriv[:(- 1)] >= 0) & (deriv[1:] < 0)) & (tflux[:(- 1)] > (detection_sigma * noise))) peakx = np.where(peaklocs)[0] peaky = flux[:(- 1)][peaklocs] peaks_to_keep = np.ones_like(peakx, dtype=bool) for (ix, x) in enumerate(peakx): z = ((x - np.array(allpeakx)) / np.array(allpeaksigma)) if np.any((np.abs(z) < min_peak_dist_sigma)): peaks_to_keep[ix] = False peakx = peakx[peaks_to_keep] peaky = peaky[peaks_to_keep] for (x, y) in zip(peakx, peaky): g = models.Gaussian1D(amplitude=y, mean=x, stddev=gaussian_width) fullmodel = (fullmodel + g) print('iter {}: {} peaks (found {}, added {})'.format(iiter, (fullmodel.n_submodels() - 1), len(peaks_to_keep), len(peakx))) fullmodel = fitter(fullmodel, xarr, flux, maxiter=(200 * (fullmodel.parameters.size + 1))) print(fitter.fit_info['message'], fitter.fit_info['ierr']) peak_x_indices = np.where([('mean_' in param) for param in fullmodel.param_names])[0] peak_y_indices = (peak_x_indices - 1) peak_sigma_indices = (peak_x_indices + 1) allpeakx = fullmodel.parameters[peak_x_indices] allpeaky = fullmodel.parameters[peak_y_indices] allpeaksigma = fullmodel.parameters[peak_sigma_indices] if make_fig: try: ax = axes[iiter] except: ax = axes ax.plot(xarr, flux) ax.plot(peakx, peaky, 'ro') ax.plot(xarr, fullmodel(xarr), lw=1) ax.axhspan((- noise), (+ noise), color='k', alpha=0.2) ax.plot(xarr, (flux - fullmodel(xarr))) ax.vlines(allpeakx, (allpeaky * 1.1), ((allpeaky * 1.1) + 300), color='r', lw=1) if make_fig: return (allpeakx, fullmodel, fig) return (allpeakx, fullmodel)<|docstring|>* Subtract median filter (param "window") * Iterate: (param "niter") * Sigma clip, estimate noise (params clip_iter, clip_sigma_upper clip_sigma_lower) * Find peaks (param detection_sigma) * Remove peaks too close to previous (param min_peak_dist_sigma) * Fit Gaussians to peaks (initialize width at param gaussian_width) Returns: allpeakx: locations of peaks fullmodel: the model of all the gaussians If make_fig=True: fig, a plot showing all the peaks found at each iteration.<|endoftext|>
4fe81e366b3668749d59b976b15ed267f6b6a79fdaa179cb8b1d8f485619e9d8
def __init__(self, gen_model, disc_model, hr_shape, learning_rate=0.001, gen_model_save_path='models/generator_upscale_2_times.h5', disc_model_save_path='models/discriminator_upscale_2_times.h5'): 'disc_patch: output shape of last layer of discriminator model.\n hr_shape: height resolution shape.\n ' self.gen_model = gen_model self.disc_model = disc_model self.disc_patch = disc_model.get_layer('disc_output_layer').output_shape[1:] self.pretrain_iteration = 1 self.pretrain_iteration = 1 self.hr_shape = hr_shape self.gen_model_save_path = gen_model_save_path self.disc_model_save_path = disc_model_save_path self.vgg = keras.applications.VGG19(weights='imagenet', input_shape=self.hr_shape, include_top=False) self.vgg.trainable = False self.vgg_model = keras.models.Model(inputs=self.vgg.input, outputs=self.vgg.get_layer('block5_conv4').output) self.lr = learning_rate gen_schedule = keras.optimizers.schedules.ExponentialDecay(self.lr, decay_steps=100000, decay_rate=0.95, staircase=True) disc_schedule = keras.optimizers.schedules.ExponentialDecay((self.lr * 5), decay_steps=100000, decay_rate=0.95, staircase=True) self.gen_optimizer = keras.optimizers.Adam(learning_rate=gen_schedule) self.disc_optimizer = keras.optimizers.Adam(learning_rate=disc_schedule)
disc_patch: output shape of last layer of discriminator model. hr_shape: height resolution shape.
model_training.py
__init__
duongtruongtrong/super_resolution_video_srgan
1
python
def __init__(self, gen_model, disc_model, hr_shape, learning_rate=0.001, gen_model_save_path='models/generator_upscale_2_times.h5', disc_model_save_path='models/discriminator_upscale_2_times.h5'): 'disc_patch: output shape of last layer of discriminator model.\n hr_shape: height resolution shape.\n ' self.gen_model = gen_model self.disc_model = disc_model self.disc_patch = disc_model.get_layer('disc_output_layer').output_shape[1:] self.pretrain_iteration = 1 self.pretrain_iteration = 1 self.hr_shape = hr_shape self.gen_model_save_path = gen_model_save_path self.disc_model_save_path = disc_model_save_path self.vgg = keras.applications.VGG19(weights='imagenet', input_shape=self.hr_shape, include_top=False) self.vgg.trainable = False self.vgg_model = keras.models.Model(inputs=self.vgg.input, outputs=self.vgg.get_layer('block5_conv4').output) self.lr = learning_rate gen_schedule = keras.optimizers.schedules.ExponentialDecay(self.lr, decay_steps=100000, decay_rate=0.95, staircase=True) disc_schedule = keras.optimizers.schedules.ExponentialDecay((self.lr * 5), decay_steps=100000, decay_rate=0.95, staircase=True) self.gen_optimizer = keras.optimizers.Adam(learning_rate=gen_schedule) self.disc_optimizer = keras.optimizers.Adam(learning_rate=disc_schedule)
def __init__(self, gen_model, disc_model, hr_shape, learning_rate=0.001, gen_model_save_path='models/generator_upscale_2_times.h5', disc_model_save_path='models/discriminator_upscale_2_times.h5'): 'disc_patch: output shape of last layer of discriminator model.\n hr_shape: height resolution shape.\n ' self.gen_model = gen_model self.disc_model = disc_model self.disc_patch = disc_model.get_layer('disc_output_layer').output_shape[1:] self.pretrain_iteration = 1 self.pretrain_iteration = 1 self.hr_shape = hr_shape self.gen_model_save_path = gen_model_save_path self.disc_model_save_path = disc_model_save_path self.vgg = keras.applications.VGG19(weights='imagenet', input_shape=self.hr_shape, include_top=False) self.vgg.trainable = False self.vgg_model = keras.models.Model(inputs=self.vgg.input, outputs=self.vgg.get_layer('block5_conv4').output) self.lr = learning_rate gen_schedule = keras.optimizers.schedules.ExponentialDecay(self.lr, decay_steps=100000, decay_rate=0.95, staircase=True) disc_schedule = keras.optimizers.schedules.ExponentialDecay((self.lr * 5), decay_steps=100000, decay_rate=0.95, staircase=True) self.gen_optimizer = keras.optimizers.Adam(learning_rate=gen_schedule) self.disc_optimizer = keras.optimizers.Adam(learning_rate=disc_schedule)<|docstring|>disc_patch: output shape of last layer of discriminator model. hr_shape: height resolution shape.<|endoftext|>
885e7eb657155f9ffe6b5cc37042e38ae9f12433a2829e8542111a45299b3b5d
@tf.function def _feature_loss(self, hr, sr): '\n Returns Mean Square Error of VGG19 feature extracted original image (y) and VGG19 feature extracted generated image (y_hat).\n Args:\n hr: A tf tensor of original image (y)\n sr: A tf tensor of generated image (y_hat)\n Returns:\n mse: Mean Square Error.\n ' sr = keras.applications.vgg19.preprocess_input((((sr + 1.0) * 255) / 2.0)) hr = keras.applications.vgg19.preprocess_input((((hr + 1.0) * 255) / 2.0)) sr_features = (self.vgg_model(sr) / 12.75) hr_features = (self.vgg_model(hr) / 12.75) mse = keras.losses.MeanSquaredError()(hr_features, sr_features) return mse
Returns Mean Square Error of VGG19 feature extracted original image (y) and VGG19 feature extracted generated image (y_hat). Args: hr: A tf tensor of original image (y) sr: A tf tensor of generated image (y_hat) Returns: mse: Mean Square Error.
model_training.py
_feature_loss
duongtruongtrong/super_resolution_video_srgan
1
python
@tf.function def _feature_loss(self, hr, sr): '\n Returns Mean Square Error of VGG19 feature extracted original image (y) and VGG19 feature extracted generated image (y_hat).\n Args:\n hr: A tf tensor of original image (y)\n sr: A tf tensor of generated image (y_hat)\n Returns:\n mse: Mean Square Error.\n ' sr = keras.applications.vgg19.preprocess_input((((sr + 1.0) * 255) / 2.0)) hr = keras.applications.vgg19.preprocess_input((((hr + 1.0) * 255) / 2.0)) sr_features = (self.vgg_model(sr) / 12.75) hr_features = (self.vgg_model(hr) / 12.75) mse = keras.losses.MeanSquaredError()(hr_features, sr_features) return mse
@tf.function def _feature_loss(self, hr, sr): '\n Returns Mean Square Error of VGG19 feature extracted original image (y) and VGG19 feature extracted generated image (y_hat).\n Args:\n hr: A tf tensor of original image (y)\n sr: A tf tensor of generated image (y_hat)\n Returns:\n mse: Mean Square Error.\n ' sr = keras.applications.vgg19.preprocess_input((((sr + 1.0) * 255) / 2.0)) hr = keras.applications.vgg19.preprocess_input((((hr + 1.0) * 255) / 2.0)) sr_features = (self.vgg_model(sr) / 12.75) hr_features = (self.vgg_model(hr) / 12.75) mse = keras.losses.MeanSquaredError()(hr_features, sr_features) return mse<|docstring|>Returns Mean Square Error of VGG19 feature extracted original image (y) and VGG19 feature extracted generated image (y_hat). Args: hr: A tf tensor of original image (y) sr: A tf tensor of generated image (y_hat) Returns: mse: Mean Square Error.<|endoftext|>
a202e88b1afbfcd0fb47c9a7c17cb58cfeffeaab8e8b71b3d6d02a758dea7d2f
@tf.function def _pretrain_step(self, x, y): '\n Single step of generator pre-training.\n Args:\n gen_model: A compiled generator model.\n x: The low resolution image tensor.\n y: The high resolution image tensor.\n ' with tf.GradientTape() as tape: fake_hr = self.gen_model(x) loss_mse = keras.losses.MeanSquaredError()(y, fake_hr) grads = tape.gradient(loss_mse, self.gen_model.trainable_variables) self.gen_optimizer.apply_gradients(zip(grads, self.gen_model.trainable_variables)) return loss_mse
Single step of generator pre-training. Args: gen_model: A compiled generator model. x: The low resolution image tensor. y: The high resolution image tensor.
model_training.py
_pretrain_step
duongtruongtrong/super_resolution_video_srgan
1
python
@tf.function def _pretrain_step(self, x, y): '\n Single step of generator pre-training.\n Args:\n gen_model: A compiled generator model.\n x: The low resolution image tensor.\n y: The high resolution image tensor.\n ' with tf.GradientTape() as tape: fake_hr = self.gen_model(x) loss_mse = keras.losses.MeanSquaredError()(y, fake_hr) grads = tape.gradient(loss_mse, self.gen_model.trainable_variables) self.gen_optimizer.apply_gradients(zip(grads, self.gen_model.trainable_variables)) return loss_mse
@tf.function def _pretrain_step(self, x, y): '\n Single step of generator pre-training.\n Args:\n gen_model: A compiled generator model.\n x: The low resolution image tensor.\n y: The high resolution image tensor.\n ' with tf.GradientTape() as tape: fake_hr = self.gen_model(x) loss_mse = keras.losses.MeanSquaredError()(y, fake_hr) grads = tape.gradient(loss_mse, self.gen_model.trainable_variables) self.gen_optimizer.apply_gradients(zip(grads, self.gen_model.trainable_variables)) return loss_mse<|docstring|>Single step of generator pre-training. Args: gen_model: A compiled generator model. x: The low resolution image tensor. y: The high resolution image tensor.<|endoftext|>
bd88368ff1a6f4ace9075fd8a8cbab73784be0198efff3d3e6140db106ef1ec8
def pretrain_generator(self, dataset, writer, log_iter=200): 'Function that pretrains the generator slightly, to avoid local minima.\n Args:\n gen_model: A compiled generator model.\n dataset: A tf dataset object of low and high res images to pretrain over.\n writer: A summary writer object.\n Returns:\n None\n ' with writer.as_default(): for _ in range(1): for (x, y) in dataset: loss = self._pretrain_step(x, y) if ((self.pretrain_iteration % log_iter) == 0): print(f'Pretrain Step: {self.pretrain_iteration}, Pretrain MSE Loss: {loss}') tf.summary.scalar('MSE Loss', loss, step=tf.cast(self.pretrain_iteration, tf.int64)) writer.flush() self.pretrain_iteration += 1
Function that pretrains the generator slightly, to avoid local minima. Args: gen_model: A compiled generator model. dataset: A tf dataset object of low and high res images to pretrain over. writer: A summary writer object. Returns: None
model_training.py
pretrain_generator
duongtruongtrong/super_resolution_video_srgan
1
python
def pretrain_generator(self, dataset, writer, log_iter=200): 'Function that pretrains the generator slightly, to avoid local minima.\n Args:\n gen_model: A compiled generator model.\n dataset: A tf dataset object of low and high res images to pretrain over.\n writer: A summary writer object.\n Returns:\n None\n ' with writer.as_default(): for _ in range(1): for (x, y) in dataset: loss = self._pretrain_step(x, y) if ((self.pretrain_iteration % log_iter) == 0): print(f'Pretrain Step: {self.pretrain_iteration}, Pretrain MSE Loss: {loss}') tf.summary.scalar('MSE Loss', loss, step=tf.cast(self.pretrain_iteration, tf.int64)) writer.flush() self.pretrain_iteration += 1
def pretrain_generator(self, dataset, writer, log_iter=200): 'Function that pretrains the generator slightly, to avoid local minima.\n Args:\n gen_model: A compiled generator model.\n dataset: A tf dataset object of low and high res images to pretrain over.\n writer: A summary writer object.\n Returns:\n None\n ' with writer.as_default(): for _ in range(1): for (x, y) in dataset: loss = self._pretrain_step(x, y) if ((self.pretrain_iteration % log_iter) == 0): print(f'Pretrain Step: {self.pretrain_iteration}, Pretrain MSE Loss: {loss}') tf.summary.scalar('MSE Loss', loss, step=tf.cast(self.pretrain_iteration, tf.int64)) writer.flush() self.pretrain_iteration += 1<|docstring|>Function that pretrains the generator slightly, to avoid local minima. Args: gen_model: A compiled generator model. dataset: A tf dataset object of low and high res images to pretrain over. writer: A summary writer object. Returns: None<|endoftext|>
4dd45fcdb7a6bdd774e239b58b80697b2ae6736eee4bd4f19b1a3c41c40bbb58
@tf.function def _train_step(self, x, y): 'Single train step function for the SRGAN.\n Args:\n gen_model: A compiled generator model.\n disc_model: A compiled discriminator model.\n x: The low resolution input image.\n y: The desired high resolution output image.\n Returns:\n disc_loss: The mean loss of the discriminator.\n adv_loss: The Binary Crossentropy loss between real label and predicted label.\n cont_loss: The Mean Square Error of VGG19 feature extracted original image (y) and VGG19 feature extractedgenerated image (y_hat).\n mse_loss: The Mean Square Error of original image (y) and generated image (y_hat).\n ' valid = tf.ones(((x.shape[0],) + self.disc_patch)) fake = tf.zeros(((x.shape[0],) + self.disc_patch)) with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape: fake_hr = self.gen_model(x) valid_prediction = self.disc_model(y) fake_prediction = self.disc_model(fake_hr) feat_loss = self._feature_loss(y, fake_hr) adv_loss = (0.001 * keras.losses.BinaryCrossentropy()(valid, fake_prediction)) mse_loss = (0.1 * keras.losses.MeanSquaredError()(y, fake_hr)) perceptual_loss = ((feat_loss + adv_loss) + mse_loss) valid_loss = keras.losses.BinaryCrossentropy()(valid, valid_prediction) fake_loss = keras.losses.BinaryCrossentropy()(fake, fake_prediction) disc_loss = tf.add(valid_loss, fake_loss) gen_grads = gen_tape.gradient(perceptual_loss, self.gen_model.trainable_variables) self.gen_optimizer.apply_gradients(zip(gen_grads, self.gen_model.trainable_variables)) disc_grads = disc_tape.gradient(disc_loss, self.disc_model.trainable_variables) self.disc_optimizer.apply_gradients(zip(disc_grads, self.disc_model.trainable_variables)) return (disc_loss, adv_loss, feat_loss, mse_loss)
Single train step function for the SRGAN. Args: gen_model: A compiled generator model. disc_model: A compiled discriminator model. x: The low resolution input image. y: The desired high resolution output image. Returns: disc_loss: The mean loss of the discriminator. adv_loss: The Binary Crossentropy loss between real label and predicted label. cont_loss: The Mean Square Error of VGG19 feature extracted original image (y) and VGG19 feature extractedgenerated image (y_hat). mse_loss: The Mean Square Error of original image (y) and generated image (y_hat).
model_training.py
_train_step
duongtruongtrong/super_resolution_video_srgan
1
python
@tf.function def _train_step(self, x, y): 'Single train step function for the SRGAN.\n Args:\n gen_model: A compiled generator model.\n disc_model: A compiled discriminator model.\n x: The low resolution input image.\n y: The desired high resolution output image.\n Returns:\n disc_loss: The mean loss of the discriminator.\n adv_loss: The Binary Crossentropy loss between real label and predicted label.\n cont_loss: The Mean Square Error of VGG19 feature extracted original image (y) and VGG19 feature extractedgenerated image (y_hat).\n mse_loss: The Mean Square Error of original image (y) and generated image (y_hat).\n ' valid = tf.ones(((x.shape[0],) + self.disc_patch)) fake = tf.zeros(((x.shape[0],) + self.disc_patch)) with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape: fake_hr = self.gen_model(x) valid_prediction = self.disc_model(y) fake_prediction = self.disc_model(fake_hr) feat_loss = self._feature_loss(y, fake_hr) adv_loss = (0.001 * keras.losses.BinaryCrossentropy()(valid, fake_prediction)) mse_loss = (0.1 * keras.losses.MeanSquaredError()(y, fake_hr)) perceptual_loss = ((feat_loss + adv_loss) + mse_loss) valid_loss = keras.losses.BinaryCrossentropy()(valid, valid_prediction) fake_loss = keras.losses.BinaryCrossentropy()(fake, fake_prediction) disc_loss = tf.add(valid_loss, fake_loss) gen_grads = gen_tape.gradient(perceptual_loss, self.gen_model.trainable_variables) self.gen_optimizer.apply_gradients(zip(gen_grads, self.gen_model.trainable_variables)) disc_grads = disc_tape.gradient(disc_loss, self.disc_model.trainable_variables) self.disc_optimizer.apply_gradients(zip(disc_grads, self.disc_model.trainable_variables)) return (disc_loss, adv_loss, feat_loss, mse_loss)
@tf.function def _train_step(self, x, y): 'Single train step function for the SRGAN.\n Args:\n gen_model: A compiled generator model.\n disc_model: A compiled discriminator model.\n x: The low resolution input image.\n y: The desired high resolution output image.\n Returns:\n disc_loss: The mean loss of the discriminator.\n adv_loss: The Binary Crossentropy loss between real label and predicted label.\n cont_loss: The Mean Square Error of VGG19 feature extracted original image (y) and VGG19 feature extractedgenerated image (y_hat).\n mse_loss: The Mean Square Error of original image (y) and generated image (y_hat).\n ' valid = tf.ones(((x.shape[0],) + self.disc_patch)) fake = tf.zeros(((x.shape[0],) + self.disc_patch)) with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape: fake_hr = self.gen_model(x) valid_prediction = self.disc_model(y) fake_prediction = self.disc_model(fake_hr) feat_loss = self._feature_loss(y, fake_hr) adv_loss = (0.001 * keras.losses.BinaryCrossentropy()(valid, fake_prediction)) mse_loss = (0.1 * keras.losses.MeanSquaredError()(y, fake_hr)) perceptual_loss = ((feat_loss + adv_loss) + mse_loss) valid_loss = keras.losses.BinaryCrossentropy()(valid, valid_prediction) fake_loss = keras.losses.BinaryCrossentropy()(fake, fake_prediction) disc_loss = tf.add(valid_loss, fake_loss) gen_grads = gen_tape.gradient(perceptual_loss, self.gen_model.trainable_variables) self.gen_optimizer.apply_gradients(zip(gen_grads, self.gen_model.trainable_variables)) disc_grads = disc_tape.gradient(disc_loss, self.disc_model.trainable_variables) self.disc_optimizer.apply_gradients(zip(disc_grads, self.disc_model.trainable_variables)) return (disc_loss, adv_loss, feat_loss, mse_loss)<|docstring|>Single train step function for the SRGAN. Args: gen_model: A compiled generator model. disc_model: A compiled discriminator model. x: The low resolution input image. y: The desired high resolution output image. Returns: disc_loss: The mean loss of the discriminator. adv_loss: The Binary Crossentropy loss between real label and predicted label. cont_loss: The Mean Square Error of VGG19 feature extracted original image (y) and VGG19 feature extractedgenerated image (y_hat). mse_loss: The Mean Square Error of original image (y) and generated image (y_hat).<|endoftext|>
568775c451c6633ccd412a0507d09f73caab8cf6dd8cd0fe1a0ffcf84bce2e39
def train(self, dataset, writer, log_iter=200): '\n Function that defines a single training step for the SR-GAN.\n Args:\n gen_model: A compiled generator model.\n disc_model: A compiled discriminator model.\n dataset: A tf data object that contains low and high res images.\n log_iter: Number of iterations after which to add logs in \n tensorboard.\n writer: Summary writer\n ' with writer.as_default(): for (x, y) in dataset: (disc_loss, adv_loss, feat_loss, mse_loss) = self._train_step(x, y) if ((self.pretrain_iteration % log_iter) == 0): print(f'Train Step: {self.pretrain_iteration}, Adversarial Loss: {adv_loss}, Feature Loss: {feat_loss}, MSE Loss: {mse_loss}, Discriminator Loss: {disc_loss}') tf.summary.scalar('Adversarial Loss', adv_loss, step=self.pretrain_iteration) tf.summary.scalar('Feature Loss', feat_loss, step=self.pretrain_iteration) tf.summary.scalar('MSE Loss', mse_loss, step=self.pretrain_iteration) tf.summary.scalar('Discriminator Loss', disc_loss, step=self.pretrain_iteration) if ((self.pretrain_iteration % (log_iter * 10)) == 0): tf.summary.image('Low Res', tf.cast((255 * x), tf.uint8), step=self.pretrain_iteration) tf.summary.image('High Res', tf.cast(((255 * (y + 1.0)) / 2.0), tf.uint8), step=self.pretrain_iteration) tf.summary.image('Generated', tf.cast(((255 * (self.gen_model.predict(x) + 1.0)) / 2.0), tf.uint8), step=self.pretrain_iteration) self.gen_model.save(self.gen_model_save_path) self.disc_model.save(self.disc_model_save_path) writer.flush() self.pretrain_iteration += 1
Function that defines a single training step for the SR-GAN. Args: gen_model: A compiled generator model. disc_model: A compiled discriminator model. dataset: A tf data object that contains low and high res images. log_iter: Number of iterations after which to add logs in tensorboard. writer: Summary writer
model_training.py
train
duongtruongtrong/super_resolution_video_srgan
1
python
def train(self, dataset, writer, log_iter=200): '\n Function that defines a single training step for the SR-GAN.\n Args:\n gen_model: A compiled generator model.\n disc_model: A compiled discriminator model.\n dataset: A tf data object that contains low and high res images.\n log_iter: Number of iterations after which to add logs in \n tensorboard.\n writer: Summary writer\n ' with writer.as_default(): for (x, y) in dataset: (disc_loss, adv_loss, feat_loss, mse_loss) = self._train_step(x, y) if ((self.pretrain_iteration % log_iter) == 0): print(f'Train Step: {self.pretrain_iteration}, Adversarial Loss: {adv_loss}, Feature Loss: {feat_loss}, MSE Loss: {mse_loss}, Discriminator Loss: {disc_loss}') tf.summary.scalar('Adversarial Loss', adv_loss, step=self.pretrain_iteration) tf.summary.scalar('Feature Loss', feat_loss, step=self.pretrain_iteration) tf.summary.scalar('MSE Loss', mse_loss, step=self.pretrain_iteration) tf.summary.scalar('Discriminator Loss', disc_loss, step=self.pretrain_iteration) if ((self.pretrain_iteration % (log_iter * 10)) == 0): tf.summary.image('Low Res', tf.cast((255 * x), tf.uint8), step=self.pretrain_iteration) tf.summary.image('High Res', tf.cast(((255 * (y + 1.0)) / 2.0), tf.uint8), step=self.pretrain_iteration) tf.summary.image('Generated', tf.cast(((255 * (self.gen_model.predict(x) + 1.0)) / 2.0), tf.uint8), step=self.pretrain_iteration) self.gen_model.save(self.gen_model_save_path) self.disc_model.save(self.disc_model_save_path) writer.flush() self.pretrain_iteration += 1
def train(self, dataset, writer, log_iter=200): '\n Function that defines a single training step for the SR-GAN.\n Args:\n gen_model: A compiled generator model.\n disc_model: A compiled discriminator model.\n dataset: A tf data object that contains low and high res images.\n log_iter: Number of iterations after which to add logs in \n tensorboard.\n writer: Summary writer\n ' with writer.as_default(): for (x, y) in dataset: (disc_loss, adv_loss, feat_loss, mse_loss) = self._train_step(x, y) if ((self.pretrain_iteration % log_iter) == 0): print(f'Train Step: {self.pretrain_iteration}, Adversarial Loss: {adv_loss}, Feature Loss: {feat_loss}, MSE Loss: {mse_loss}, Discriminator Loss: {disc_loss}') tf.summary.scalar('Adversarial Loss', adv_loss, step=self.pretrain_iteration) tf.summary.scalar('Feature Loss', feat_loss, step=self.pretrain_iteration) tf.summary.scalar('MSE Loss', mse_loss, step=self.pretrain_iteration) tf.summary.scalar('Discriminator Loss', disc_loss, step=self.pretrain_iteration) if ((self.pretrain_iteration % (log_iter * 10)) == 0): tf.summary.image('Low Res', tf.cast((255 * x), tf.uint8), step=self.pretrain_iteration) tf.summary.image('High Res', tf.cast(((255 * (y + 1.0)) / 2.0), tf.uint8), step=self.pretrain_iteration) tf.summary.image('Generated', tf.cast(((255 * (self.gen_model.predict(x) + 1.0)) / 2.0), tf.uint8), step=self.pretrain_iteration) self.gen_model.save(self.gen_model_save_path) self.disc_model.save(self.disc_model_save_path) writer.flush() self.pretrain_iteration += 1<|docstring|>Function that defines a single training step for the SR-GAN. Args: gen_model: A compiled generator model. disc_model: A compiled discriminator model. dataset: A tf data object that contains low and high res images. log_iter: Number of iterations after which to add logs in tensorboard. writer: Summary writer<|endoftext|>
63820232f3d7249c037065fb83aed63e4ae7f51a648a6bf258a27744be7ac04b
def makecookie(userconfig, password, cookiepath): '\n Return the current valid cookie heaader for the values supplied in the\n userconfig, the straight password and the cookiepath.\n ' from login import encodestring from Cookie import SimpleCookie thecookie = SimpleCookie() cookiestring = encodestring(userconfig['username'], password) maxage = userconfig['max-age'] thecookie['userid'] = cookiestring if (maxage and int(maxage)): thecookie['userid']['max-age'] = int(maxage) if cookiepath: thecookie['userid']['path'] = cookiepath return thecookie.output()
Return the current valid cookie heaader for the values supplied in the userconfig, the straight password and the cookiepath.
modules/logintools/loginutils.py
makecookie
rknaebel/rstWeb
32
python
def makecookie(userconfig, password, cookiepath): '\n Return the current valid cookie heaader for the values supplied in the\n userconfig, the straight password and the cookiepath.\n ' from login import encodestring from Cookie import SimpleCookie thecookie = SimpleCookie() cookiestring = encodestring(userconfig['username'], password) maxage = userconfig['max-age'] thecookie['userid'] = cookiestring if (maxage and int(maxage)): thecookie['userid']['max-age'] = int(maxage) if cookiepath: thecookie['userid']['path'] = cookiepath return thecookie.output()
def makecookie(userconfig, password, cookiepath): '\n Return the current valid cookie heaader for the values supplied in the\n userconfig, the straight password and the cookiepath.\n ' from login import encodestring from Cookie import SimpleCookie thecookie = SimpleCookie() cookiestring = encodestring(userconfig['username'], password) maxage = userconfig['max-age'] thecookie['userid'] = cookiestring if (maxage and int(maxage)): thecookie['userid']['max-age'] = int(maxage) if cookiepath: thecookie['userid']['path'] = cookiepath return thecookie.output()<|docstring|>Return the current valid cookie heaader for the values supplied in the userconfig, the straight password and the cookiepath.<|endoftext|>
8d4718b2cf2197c6e4950758dfc3e8b1be809d63bd2344f46d299cbeead76dee
def emptycookie(cookiepath=None): 'Return an empty cookie with max-age 0.\n Used for logout features.\n ' from Cookie import SimpleCookie thecookie = SimpleCookie() thecookie['userid'] = '' thecookie['userid']['max-age'] = 0 if cookiepath: thecookie['userid']['path'] = cookiepath return thecookie.output()
Return an empty cookie with max-age 0. Used for logout features.
modules/logintools/loginutils.py
emptycookie
rknaebel/rstWeb
32
python
def emptycookie(cookiepath=None): 'Return an empty cookie with max-age 0.\n Used for logout features.\n ' from Cookie import SimpleCookie thecookie = SimpleCookie() thecookie['userid'] = thecookie['userid']['max-age'] = 0 if cookiepath: thecookie['userid']['path'] = cookiepath return thecookie.output()
def emptycookie(cookiepath=None): 'Return an empty cookie with max-age 0.\n Used for logout features.\n ' from Cookie import SimpleCookie thecookie = SimpleCookie() thecookie['userid'] = thecookie['userid']['max-age'] = 0 if cookiepath: thecookie['userid']['path'] = cookiepath return thecookie.output()<|docstring|>Return an empty cookie with max-age 0. Used for logout features.<|endoftext|>
be7b55f2292a460d06da2378fb89a629a2217b4d2d72ae66fb34e83ef746fdb7
def createuser(userdir, realname, username, email, password, adminlev): 'Create a new user.' from time import time from modules.dataenc import pass_enc from modules.configobj import ConfigObj user = ConfigObj((userdir + 'default.ini')) user.filename = ((userdir + username) + '.ini') user['username'] = username user['realname'] = realname user['email'] = email user['admin'] = adminlev user['password'] = pass_enc(password, timestamp=True, daynumber=True) user['created'] = str(time()) user.write()
Create a new user.
modules/logintools/loginutils.py
createuser
rknaebel/rstWeb
32
python
def createuser(userdir, realname, username, email, password, adminlev): from time import time from modules.dataenc import pass_enc from modules.configobj import ConfigObj user = ConfigObj((userdir + 'default.ini')) user.filename = ((userdir + username) + '.ini') user['username'] = username user['realname'] = realname user['email'] = email user['admin'] = adminlev user['password'] = pass_enc(password, timestamp=True, daynumber=True) user['created'] = str(time()) user.write()
def createuser(userdir, realname, username, email, password, adminlev): from time import time from modules.dataenc import pass_enc from modules.configobj import ConfigObj user = ConfigObj((userdir + 'default.ini')) user.filename = ((userdir + username) + '.ini') user['username'] = username user['realname'] = realname user['email'] = email user['admin'] = adminlev user['password'] = pass_enc(password, timestamp=True, daynumber=True) user['created'] = str(time()) user.write()<|docstring|>Create a new user.<|endoftext|>
2c81258e3d931c4cbb0aae2f99efa1be3c9254a781025eafe49ec22217b914bb
def __init__(self, json_block): '\n Constructor\n :param json_block: content of a "FILTER" key\n :type json_block: Dict\n ' if ('@name' in json_block.keys()): self.name = json_block['@name'] else: self.name = 'NoName' self.value = json_block['@value'] self.key = json_block['@ref'] self.col_number = (- 1)
Constructor :param json_block: content of a "FILTER" key :type json_block: Dict
python/client/inst_builder/row_filter.py
__init__
loumir/modelinstanceinvot-code
0
python
def __init__(self, json_block): '\n Constructor\n :param json_block: content of a "FILTER" key\n :type json_block: Dict\n ' if ('@name' in json_block.keys()): self.name = json_block['@name'] else: self.name = 'NoName' self.value = json_block['@value'] self.key = json_block['@ref'] self.col_number = (- 1)
def __init__(self, json_block): '\n Constructor\n :param json_block: content of a "FILTER" key\n :type json_block: Dict\n ' if ('@name' in json_block.keys()): self.name = json_block['@name'] else: self.name = 'NoName' self.value = json_block['@value'] self.key = json_block['@ref'] self.col_number = (- 1)<|docstring|>Constructor :param json_block: content of a "FILTER" key :type json_block: Dict<|endoftext|>
928d13a53221e92b6eecd0922f53e6349e8f2d66deb9001834d9e42491fd28a2
def map_col_number(self, column_mapping): '\n Set the number of the column to be used by the filter\n :param column_mapping: needed to bind self.key with column index\n :type column_mapping: ColumnMapping instance\n ' self.col_number = column_mapping.get_col_index_by_name(self.key)
Set the number of the column to be used by the filter :param column_mapping: needed to bind self.key with column index :type column_mapping: ColumnMapping instance
python/client/inst_builder/row_filter.py
map_col_number
loumir/modelinstanceinvot-code
0
python
def map_col_number(self, column_mapping): '\n Set the number of the column to be used by the filter\n :param column_mapping: needed to bind self.key with column index\n :type column_mapping: ColumnMapping instance\n ' self.col_number = column_mapping.get_col_index_by_name(self.key)
def map_col_number(self, column_mapping): '\n Set the number of the column to be used by the filter\n :param column_mapping: needed to bind self.key with column index\n :type column_mapping: ColumnMapping instance\n ' self.col_number = column_mapping.get_col_index_by_name(self.key)<|docstring|>Set the number of the column to be used by the filter :param column_mapping: needed to bind self.key with column index :type column_mapping: ColumnMapping instance<|endoftext|>
c4e1ab8a1388ac535f4874c518e9510ece98cb30bfc7aa4aa2bc026398440bcd
def row_match(self, row): '\n :param row: data row\n :type row: numpy table row\n :return: true if the data row matches the filtering condition\n :rtype: boolean\n ' if (str(row[self.col_number]) == str(self.value)): return True return False
:param row: data row :type row: numpy table row :return: true if the data row matches the filtering condition :rtype: boolean
python/client/inst_builder/row_filter.py
row_match
loumir/modelinstanceinvot-code
0
python
def row_match(self, row): '\n :param row: data row\n :type row: numpy table row\n :return: true if the data row matches the filtering condition\n :rtype: boolean\n ' if (str(row[self.col_number]) == str(self.value)): return True return False
def row_match(self, row): '\n :param row: data row\n :type row: numpy table row\n :return: true if the data row matches the filtering condition\n :rtype: boolean\n ' if (str(row[self.col_number]) == str(self.value)): return True return False<|docstring|>:param row: data row :type row: numpy table row :return: true if the data row matches the filtering condition :rtype: boolean<|endoftext|>
04f5bac4a2ddc38c78dec5417d4c97abe35bc528f16e2a8fa76bf62df4599e7e
def shutting_down(globals=globals): '\n Whether the interpreter is currently shutting down.\n For use in finalizers, __del__ methods, and similar; it is advised\n to early bind this function rather than look it up when calling it,\n since at shutdown module globals may be cleared.\n ' v = globals().get('_shutting_down') return ((v is True) or (v is None))
Whether the interpreter is currently shutting down. For use in finalizers, __del__ methods, and similar; it is advised to early bind this function rather than look it up when calling it, since at shutdown module globals may be cleared.
numba/utils.py
shutting_down
meawoppl/numba
1
python
def shutting_down(globals=globals): '\n Whether the interpreter is currently shutting down.\n For use in finalizers, __del__ methods, and similar; it is advised\n to early bind this function rather than look it up when calling it,\n since at shutdown module globals may be cleared.\n ' v = globals().get('_shutting_down') return ((v is True) or (v is None))
def shutting_down(globals=globals): '\n Whether the interpreter is currently shutting down.\n For use in finalizers, __del__ methods, and similar; it is advised\n to early bind this function rather than look it up when calling it,\n since at shutdown module globals may be cleared.\n ' v = globals().get('_shutting_down') return ((v is True) or (v is None))<|docstring|>Whether the interpreter is currently shutting down. For use in finalizers, __del__ methods, and similar; it is advised to early bind this function rather than look it up when calling it, since at shutdown module globals may be cleared.<|endoftext|>
14b82f17c770d13a9ec93479adeaa99c2010869c1ba271875b7a65da90e1f767
def bit_length(intval): '\n Return the number of bits necessary to represent integer `intval`.\n ' assert isinstance(intval, INT_TYPES) return (len(bin(abs(intval))) - 2)
Return the number of bits necessary to represent integer `intval`.
numba/utils.py
bit_length
meawoppl/numba
1
python
def bit_length(intval): '\n \n ' assert isinstance(intval, INT_TYPES) return (len(bin(abs(intval))) - 2)
def bit_length(intval): '\n \n ' assert isinstance(intval, INT_TYPES) return (len(bin(abs(intval))) - 2)<|docstring|>Return the number of bits necessary to represent integer `intval`.<|endoftext|>
ed5b39b7a960f8889ec38594ba71366e4d67e7c05b8a9801963fe101cfbbc21d
def _is_inherited_from_object(cls, op): '\n Whether operator *op* on *cls* is inherited from the root object type.\n ' if (PYVERSION >= (3,)): object_op = getattr(object, op) cls_op = getattr(cls, op) return (object_op is cls_op) else: return (op not in dir(cls))
Whether operator *op* on *cls* is inherited from the root object type.
numba/utils.py
_is_inherited_from_object
meawoppl/numba
1
python
def _is_inherited_from_object(cls, op): '\n \n ' if (PYVERSION >= (3,)): object_op = getattr(object, op) cls_op = getattr(cls, op) return (object_op is cls_op) else: return (op not in dir(cls))
def _is_inherited_from_object(cls, op): '\n \n ' if (PYVERSION >= (3,)): object_op = getattr(object, op) cls_op = getattr(cls, op) return (object_op is cls_op) else: return (op not in dir(cls))<|docstring|>Whether operator *op* on *cls* is inherited from the root object type.<|endoftext|>
686618bcba0ea962d7b610f9c770eed9d5cc12847560492ad953104cb9b0709a
def total_ordering(cls): 'Class decorator that fills in missing ordering methods' convert = {'__lt__': [('__gt__', (lambda self, other: _not_op_and_not_eq(self.__lt__, self, other))), ('__le__', (lambda self, other: _op_or_eq(self.__lt__, self, other))), ('__ge__', (lambda self, other: _not_op(self.__lt__, other)))], '__le__': [('__ge__', (lambda self, other: _not_op_or_eq(self.__le__, self, other))), ('__lt__', (lambda self, other: _op_and_not_eq(self.__le__, self, other))), ('__gt__', (lambda self, other: _not_op(self.__le__, other)))], '__gt__': [('__lt__', (lambda self, other: _not_op_and_not_eq(self.__gt__, self, other))), ('__ge__', (lambda self, other: _op_or_eq(self.__gt__, self, other))), ('__le__', (lambda self, other: _not_op(self.__gt__, other)))], '__ge__': [('__le__', (lambda self, other: _not_op_or_eq(self.__ge__, self, other))), ('__gt__', (lambda self, other: _op_and_not_eq(self.__ge__, self, other))), ('__lt__', (lambda self, other: _not_op(self.__ge__, other)))]} roots = [op for op in convert if (not _is_inherited_from_object(cls, op))] if (not roots): raise ValueError('must define at least one ordering operation: < > <= >=') root = max(roots) for (opname, opfunc) in convert[root]: if (opname not in roots): opfunc.__name__ = opname opfunc.__doc__ = getattr(int, opname).__doc__ setattr(cls, opname, opfunc) return cls
Class decorator that fills in missing ordering methods
numba/utils.py
total_ordering
meawoppl/numba
1
python
def total_ordering(cls): convert = {'__lt__': [('__gt__', (lambda self, other: _not_op_and_not_eq(self.__lt__, self, other))), ('__le__', (lambda self, other: _op_or_eq(self.__lt__, self, other))), ('__ge__', (lambda self, other: _not_op(self.__lt__, other)))], '__le__': [('__ge__', (lambda self, other: _not_op_or_eq(self.__le__, self, other))), ('__lt__', (lambda self, other: _op_and_not_eq(self.__le__, self, other))), ('__gt__', (lambda self, other: _not_op(self.__le__, other)))], '__gt__': [('__lt__', (lambda self, other: _not_op_and_not_eq(self.__gt__, self, other))), ('__ge__', (lambda self, other: _op_or_eq(self.__gt__, self, other))), ('__le__', (lambda self, other: _not_op(self.__gt__, other)))], '__ge__': [('__le__', (lambda self, other: _not_op_or_eq(self.__ge__, self, other))), ('__gt__', (lambda self, other: _op_and_not_eq(self.__ge__, self, other))), ('__lt__', (lambda self, other: _not_op(self.__ge__, other)))]} roots = [op for op in convert if (not _is_inherited_from_object(cls, op))] if (not roots): raise ValueError('must define at least one ordering operation: < > <= >=') root = max(roots) for (opname, opfunc) in convert[root]: if (opname not in roots): opfunc.__name__ = opname opfunc.__doc__ = getattr(int, opname).__doc__ setattr(cls, opname, opfunc) return cls
def total_ordering(cls): convert = {'__lt__': [('__gt__', (lambda self, other: _not_op_and_not_eq(self.__lt__, self, other))), ('__le__', (lambda self, other: _op_or_eq(self.__lt__, self, other))), ('__ge__', (lambda self, other: _not_op(self.__lt__, other)))], '__le__': [('__ge__', (lambda self, other: _not_op_or_eq(self.__le__, self, other))), ('__lt__', (lambda self, other: _op_and_not_eq(self.__le__, self, other))), ('__gt__', (lambda self, other: _not_op(self.__le__, other)))], '__gt__': [('__lt__', (lambda self, other: _not_op_and_not_eq(self.__gt__, self, other))), ('__ge__', (lambda self, other: _op_or_eq(self.__gt__, self, other))), ('__le__', (lambda self, other: _not_op(self.__gt__, other)))], '__ge__': [('__le__', (lambda self, other: _not_op_or_eq(self.__ge__, self, other))), ('__gt__', (lambda self, other: _op_and_not_eq(self.__ge__, self, other))), ('__lt__', (lambda self, other: _not_op(self.__ge__, other)))]} roots = [op for op in convert if (not _is_inherited_from_object(cls, op))] if (not roots): raise ValueError('must define at least one ordering operation: < > <= >=') root = max(roots) for (opname, opfunc) in convert[root]: if (opname not in roots): opfunc.__name__ = opname opfunc.__doc__ = getattr(int, opname).__doc__ setattr(cls, opname, opfunc) return cls<|docstring|>Class decorator that fills in missing ordering methods<|endoftext|>
d0a16f0d2798bd48f6512e55ebbb8b8a845c714a0194ce396cd700e7d50fe9b8
def __call__(self, _=None): 'If alive then mark as dead and return func(*args, **kwargs);\n otherwise return None' info = self._registry.pop(self, None) if (info and (not self._shutdown)): return info.func(*info.args, **(info.kwargs or {}))
If alive then mark as dead and return func(*args, **kwargs); otherwise return None
numba/utils.py
__call__
meawoppl/numba
1
python
def __call__(self, _=None): 'If alive then mark as dead and return func(*args, **kwargs);\n otherwise return None' info = self._registry.pop(self, None) if (info and (not self._shutdown)): return info.func(*info.args, **(info.kwargs or {}))
def __call__(self, _=None): 'If alive then mark as dead and return func(*args, **kwargs);\n otherwise return None' info = self._registry.pop(self, None) if (info and (not self._shutdown)): return info.func(*info.args, **(info.kwargs or {}))<|docstring|>If alive then mark as dead and return func(*args, **kwargs); otherwise return None<|endoftext|>
7ce071e866424377ad7d61781eec2693a0e77faed9df255e4270996c94851efe
def detach(self): 'If alive then mark as dead and return (obj, func, args, kwargs);\n otherwise return None' info = self._registry.get(self) obj = (info and info.weakref()) if ((obj is not None) and self._registry.pop(self, None)): return (obj, info.func, info.args, (info.kwargs or {}))
If alive then mark as dead and return (obj, func, args, kwargs); otherwise return None
numba/utils.py
detach
meawoppl/numba
1
python
def detach(self): 'If alive then mark as dead and return (obj, func, args, kwargs);\n otherwise return None' info = self._registry.get(self) obj = (info and info.weakref()) if ((obj is not None) and self._registry.pop(self, None)): return (obj, info.func, info.args, (info.kwargs or {}))
def detach(self): 'If alive then mark as dead and return (obj, func, args, kwargs);\n otherwise return None' info = self._registry.get(self) obj = (info and info.weakref()) if ((obj is not None) and self._registry.pop(self, None)): return (obj, info.func, info.args, (info.kwargs or {}))<|docstring|>If alive then mark as dead and return (obj, func, args, kwargs); otherwise return None<|endoftext|>