index
int64 | repo_name
string | branch_name
string | path
string | content
string | import_graph
string |
|---|---|---|---|---|---|
39,078,728
|
adrianogil/miru
|
refs/heads/main
|
/src/python/miru/engine/camera.py
|
from .transform import Transform
class Camera:
ORTOGRAPHIC = 1
PERSPECTIVE = 2
def __init__(self):
self.mode = Camera.PERSPECTIVE
self.fov = 60
self.near = 0.01
self.far = 1000
self.transform = Transform()
@staticmethod
def parse(data):
c = Camera()
if 'mode' in data:
if data['mode'] == "perspective":
c.mode = Camera.PERSPECTIVE
else:
c.mode = Camera.ORTOGRAPHIC
if 'fov' in data:
c.fov = int(data['fov'])
if 'near' in data:
c.near = float(data['near'])
if 'far' in data:
c.far = float(data['far'])
if 'transform' in data:
c.transform.parse(data['transform'])
return c
|
{"/src/python/miru/engine/transform.py": ["/src/python/miru/engine/vector.py"], "/src/python/miru/raytracing/scene.py": ["/src/python/miru/raytracing/ray.py"], "/src/python/miru/engine/color.py": ["/src/python/miru/engine/vector.py"], "/src/python/miru/engine/light.py": ["/src/python/miru/engine/transform.py", "/src/python/miru/engine/color.py"], "/src/python/miru/engine/camera.py": ["/src/python/miru/engine/transform.py"], "/src/python/miru/engine/material.py": ["/src/python/miru/engine/color.py"]}
|
39,078,729
|
adrianogil/miru
|
refs/heads/main
|
/src/python/miru/raytracing/ray.py
|
class Ray:
def __init__(self, p1, p2):
self.p1 = p1
self.p2 = p2
self.origin = p1
self.direction = p2.minus(p1).normalized()
def __str__(self):
return "( " + str(self.origin) + " , " + str(self.direction) + " )"
|
{"/src/python/miru/engine/transform.py": ["/src/python/miru/engine/vector.py"], "/src/python/miru/raytracing/scene.py": ["/src/python/miru/raytracing/ray.py"], "/src/python/miru/engine/color.py": ["/src/python/miru/engine/vector.py"], "/src/python/miru/engine/light.py": ["/src/python/miru/engine/transform.py", "/src/python/miru/engine/color.py"], "/src/python/miru/engine/camera.py": ["/src/python/miru/engine/transform.py"], "/src/python/miru/engine/material.py": ["/src/python/miru/engine/color.py"]}
|
39,078,730
|
adrianogil/miru
|
refs/heads/main
|
/src/python/miru/raymarching/sdfcube.py
|
from miru.engine.transform import Transform
from miru.engine.color import Color
from miru.engine.vector import Vector3
class SDFCube:
def __init__(self, size):
self.size = size
self.transform = Transform()
self.color = Color.random()
def pre_render(self):
pass
def distance(self, position):
x = max(
position.x - self.transform.position.x - self.size.x / 2.0,
self.transform.position.x - position.x - self.size.x / 2.0
)
y = max(
position.y - self.transform.position.y - self.size.y / 2.0,
self.transform.position.y - position.y - self.size.y / 2.0
)
z = max(
position.z - self.transform.position.z - self.size.z / 2.0,
self.transform.position.z - position.z - self.size.z / 2.0
)
d = max(x, y)
d = max(d, z)
# print("SDFCube - distance - " + str(position) + " - " + str(d))
return d
def render(self, scene, position):
return self.color;
@staticmethod
def parse(data):
# print('parsing data ' + str(data))
cube = None
if 'size' in data:
size = data['size']
cube = SDFCube(Vector3(size[0], size[1], size[2]))
if 'transform' in data:
cube.transform.parse(data['transform'])
if 'color' in data:
color = data['color']
cube.color = Color(color[0], color[1], color[2], color[3])
return cube
|
{"/src/python/miru/engine/transform.py": ["/src/python/miru/engine/vector.py"], "/src/python/miru/raytracing/scene.py": ["/src/python/miru/raytracing/ray.py"], "/src/python/miru/engine/color.py": ["/src/python/miru/engine/vector.py"], "/src/python/miru/engine/light.py": ["/src/python/miru/engine/transform.py", "/src/python/miru/engine/color.py"], "/src/python/miru/engine/camera.py": ["/src/python/miru/engine/transform.py"], "/src/python/miru/engine/material.py": ["/src/python/miru/engine/color.py"]}
|
39,078,731
|
adrianogil/miru
|
refs/heads/main
|
/src/python/miru/raytracing/quadmesh.py
|
from vector import Vector2
def create(mesh, vector1, vector2, initial_position):
initial_index = len(mesh.vertices)
# print("quadmesh.create - " + str(initial_index))
mesh.vertices.append(initial_position)
mesh.vertices.append(initial_position.add(vector1))
mesh.vertices.append(initial_position.add(vector1).add(vector2))
mesh.vertices.append(initial_position.add(vector2))
mesh.uvs = []
mesh.uvs.append(Vector2(0,0))
mesh.uvs.append(Vector2(1,0))
mesh.uvs.append(Vector2(1,1))
mesh.uvs.append(Vector2(0,1))
mesh.add_triangle(initial_index+0, initial_index+1, initial_index+3)
mesh.add_triangle(initial_index+1, initial_index+3, initial_index+2)
mesh.add_triangle(initial_index+0, initial_index+3, initial_index+1)
mesh.add_triangle(initial_index+1, initial_index+2, initial_index+3)
|
{"/src/python/miru/engine/transform.py": ["/src/python/miru/engine/vector.py"], "/src/python/miru/raytracing/scene.py": ["/src/python/miru/raytracing/ray.py"], "/src/python/miru/engine/color.py": ["/src/python/miru/engine/vector.py"], "/src/python/miru/engine/light.py": ["/src/python/miru/engine/transform.py", "/src/python/miru/engine/color.py"], "/src/python/miru/engine/camera.py": ["/src/python/miru/engine/transform.py"], "/src/python/miru/engine/material.py": ["/src/python/miru/engine/color.py"]}
|
39,078,732
|
adrianogil/miru
|
refs/heads/main
|
/src/python/miru/raymarching/scene.py
|
from PIL import Image
import numpy as np
import os
from miru.engine.vector import Vector3
from miru.engine.color import Color
from miru.engine.camera import Camera
from miru.engine.sceneparser import SceneParser
from miru.engine.material import Material
from miru.raymarching.sdfobjects import SDFCube, SDFSphere
try:
range = xrange
except NameError:
pass
class RenderData:
def __init__(self):
self.img = None
self.pixel_width = 0
self.pixel_height = 0
self.pixels = []
class Scene:
def __init__(self):
self.objects = []
self.post_processing_effects = []
self.background_color = Color(0.0,0.0,0.0,1.0)
self.light = None
self.ssaa_level = 1
self.min_marching_distance = 0.06
self.max_marching_steps = 40
self.render_height = 50
self.render_width = 50
self.target_image_file = "test.jpg"
def add_objects(self, obj):
self.objects.append(obj)
def add_post_processing(self, effect):
self.post_processing_effects.append(effect)
def set_ssaa(self, level):
self.ssaa_level = level;
def set_camera(self, camera):
self.camera = camera
def set_light(self, light_obj):
self.light = light_obj
def get_light(self):
return self.light
def raymarching(self, position, view_direction):
min_distance = 10000
next_distance = self.min_marching_distance
find_surface = False
pixel_color = self.background_color
for s in range(0, self.max_marching_steps):
for o in self.objects:
distance = o.distance(position)
if distance < self.min_marching_distance:
pixel_color = o.render(self, {"hit_point": position})
find_surface = True
break
if distance < min_distance:
min_distance = distance
next_distance = min_distance
if find_surface:
break
position = position.add(view_direction.multiply(next_distance))
return pixel_color
def render(self, pixel_height=-1, pixel_width=-1, image_file=""):
if pixel_height > 0:
target_pixel_height = pixel_height
else:
target_pixel_height = self.render_height
if pixel_width > 0:
target_pixel_width = pixel_width
else:
target_pixel_width = self.render_width
if image_file == "":
image_file = self.target_image_file
ssaa_render_data = RenderData()
ssaa_render_data.pixel_height = self.ssaa_level * pixel_height
ssaa_render_data.pixel_width = self.ssaa_level * pixel_width
pixel_height = self.ssaa_level * target_pixel_height
pixel_width = self.ssaa_level * target_pixel_width
nearplane_pos = self.camera.transform.position.add(self.camera.transform.forward.multiply(self.camera.near))
height_size = 2 * self.camera.near * np.tan(self.camera.fov * 0.5 * np.pi / 180)
width_size = (pixel_width*1.0/pixel_height) * height_size
# PIL accesses images in Cartesian co-ordinates, so it is Image[columns, rows]
ssaa_render_data.img = Image.new( 'RGB', (pixel_width, pixel_height), "black") # create a new black image
ssaa_render_data.pixels = ssaa_render_data.img.load() # create the pixel map
for o in self.objects:
o.pre_render()
for x in range(0, pixel_width):
for y in range(0, pixel_height):
# World position of target pixel in near plane
pixel_pos = self.camera.transform.right.multiply(((x - 0.5 * pixel_width)/(pixel_width) * width_size))\
.add(self.camera.transform.up.multiply((-1) * (y - 0.5*pixel_height)/(pixel_height) * height_size))\
.add(nearplane_pos)
position = pixel_pos
view_direction = pixel_pos.minus(self.camera.transform.position).normalized()
pixel_color = self.raymarching(position, view_direction)
ssaa_render_data.pixels[x,y] = pixel_color.to_tuple(3)
for post_fx in self.post_processing_effects:
post_fx.apply_effect(ssaa_render_data)
if self.ssaa_level > 1:
render_data = RenderData()
render_data.pixel_height = target_pixel_height
render_data.pixel_width = target_pixel_width
# PIL accesses images in Cartesian co-ordinates, so it is Image[columns, rows]
render_data.img = Image.new( 'RGB', (target_pixel_width, target_pixel_height), "black") # create a new black image
render_data.pixels = render_data.img.load() # create the pixel map
half_ssaa_level = 0.5 * self.ssaa_level
for x in range(0, target_pixel_width):
for y in range(0, target_pixel_height):
colors = [0, 0, 0]
for c in range(0, 3):
sum_values = 0
total_values = 0
for fx in range(0, self.ssaa_level):
for fy in range(0, self.ssaa_level):
ix = x * self.ssaa_level + fx - half_ssaa_level
iy = y * self.ssaa_level + fy - half_ssaa_level
ix = int(ix)
iy = int(iy)
if ix >= 0 and ix < ssaa_render_data.pixel_width and \
iy >= 0 and iy < ssaa_render_data.pixel_height:
sum_values = sum_values + ssaa_render_data.pixels[ix,iy][c]
total_values = total_values + 1
# print(sum_values)
colors[c] = int(sum_values*1.0/total_values)
# print(colors)
render_data.pixels[x,y] = tuple(colors)
else:
render_data = ssaa_render_data
if image_file == '':
return np.ndarray(render_data.pixels)
render_data.img.save(image_file)
if __name__ == "__main__":
import sys
if len(sys.argv) > 1:
target_scene_file = sys.argv[1]
print('Parsing file %s' % (target_scene_file))
objs = {
"cube": SDFCube.parse,
"sphere": SDFSphere.parse,
"camera": Camera.parse,
}
target_scene = Scene()
parser = SceneParser(objs)
parser.parse(target_scene_file, target_scene)
if len(sys.argv) > 2:
target_image_file = sys.argv[2]
target_scene.render(image_file=target_image_file)
else:
target_scene.render()
|
{"/src/python/miru/engine/transform.py": ["/src/python/miru/engine/vector.py"], "/src/python/miru/raytracing/scene.py": ["/src/python/miru/raytracing/ray.py"], "/src/python/miru/engine/color.py": ["/src/python/miru/engine/vector.py"], "/src/python/miru/engine/light.py": ["/src/python/miru/engine/transform.py", "/src/python/miru/engine/color.py"], "/src/python/miru/engine/camera.py": ["/src/python/miru/engine/transform.py"], "/src/python/miru/engine/material.py": ["/src/python/miru/engine/color.py"]}
|
39,078,733
|
adrianogil/miru
|
refs/heads/main
|
/src/python/miru/raytracing/test.py
|
from scene import Scene
from camera import Camera
from sphere import Sphere
from plane import Plane
from cube import Cube
from vector import Vector3
from mesh import Mesh
import quadmesh
from light import Light
import random
import os
from color import Color
from meanfilter import MeanFilter
from kernelfilter import KernelFilter
import numpy as np
try:
range = xrange
except NameError:
pass
total_sphere = random.randint(1,15)
scene_test = Scene()
light = Light(Color(1.0, 1.0, 1.0, 1.0), 1.0)
light.transform.position = Vector3(0.0, 2.0, -2.0)
scene_test.set_light(light)
# cube = Cube(Vector3(1.2,0,0), Vector3(0,1.1,0),Vector3(0,0,1.3))
# cube.transform.position = Vector3(0,0,2)
# cube.transform.rotation = Vector3(2.0, 15.0, 5.5)
# scene_test.add_objects(cube)
# v1 = Vector3(-1,-1, 4)
# v2 = Vector3(-1, 1, 4)
# v3 = Vector3( 1, 1, 4)
# v4 = Vector3( 1,-1, 4)
# v5 = Vector3(-2, 0, 4)
# v6 = Vector3( 2, 0, 4)
# plane1 = Plane([v1,v2,v3,v4])
# scene_test.add_objects(plane1)
for i in range(0, total_sphere):
s = Sphere(random.randint(10,200) / 100.0)
s.transform.position = Vector3(random.randint(-3,3),random.randint(-3,3),random.randint(2,10))
s.albedo = Color(float(random.randint(20,255))*1.0/255.0, float(random.randint(20,255))*1.0/255.0, float(random.randint(20,255))*1.0/255.0, 1.0)
print("Sphere got color " + str(s.albedo))
scene_test.add_objects(s)
# s2 = Sphere(1.6)
# s2.transform.position = Vector3(0,1,4)
# s2.albedo = (0,0,255)
# scene_test.add_objects(s2)
# mesh = Mesh()
# mesh.vertices = [Vector3(0.0, 0.0, 2.0), Vector3(2.0, 0.0, 2.0), Vector3(1.0, 2.0, 3.0)]
# mesh.triangles = [0,1,2]
# quadmesh.create(mesh, Vector3(0.0, 0.0, 2.0), Vector3(2.0, 8.0, 2.0), Vector3(1.0, 2.0, 3.0))
# mesh.albedo = (255,255,0)
# scene_test.add_objects(mesh)
c = Camera()
c.fov = 90
scene_test.set_ssaa(1)
scene_test.set_camera(c)
blur_kernel = np.matrix([[0.0625, 0.125, 0.0625],
[0.125, 0.25, 0.125],
[0.0625, 0.125, 0.0625]])
sharpen_kernel = np.matrix([[ 0, -1, 0],
[-1, 5, -1],
[ 0, -1, 0]])
unsharp_kernel = (-1.0/256)* np.matrix([[ 1, 4, 6, 4, 1],
[ 4, 16, 24, 16, 4],
[ 6, 24, -476, 24, 6],
[ 4, 16, 24, 16, 4],
[ 1, 4, 6, 4, 1]])
# kernel = np.matrix([[],[],[]])
# scene_test.add_post_processing(KernelFilter(unsharp_kernel, 5, 5))
# scene_test.add_post_processing(KernelFilter(sharpen_kernel, 3, 3))
# scene_test.add_post_processing(MeanFilter())
# blur_kernel = np.matrix([[0.0625, 0.125, 0.0625],[0.125, 0.25, 0.125],[0.0625, 0.125, 0.0625]])
# kernel = np.matrix([[],[],[]])
# scene_test.add_post_processing(KernelFilter(blur_kernel, 3, 3))
# scene_test.add_post_processing(MeanFilter())
if os.path.exists("/sdcard/Raytracing/"):
render_image = "/sdcard/Raytracing/test"
else:
render_image = 'test'
render_extension = '.jpg'
render_sizex = 200
render_sizey = 200
scene_test.render(render_sizex, render_sizey, render_image + render_extension)
scene_test.set_ssaa(2)
scene_test.render(render_sizex, render_sizey, render_image + '_ssaa2' + render_extension)
scene_test.set_ssaa(3)
scene_test.render(render_sizex, render_sizey, render_image + '_ssaa3' + render_extension)
scene_test.set_ssaa(4)
scene_test.render(render_sizex, render_sizey, render_image + '_ssaa4' + render_extension)
print('Scene rasterized in image path: %s' % (render_image,))
|
{"/src/python/miru/engine/transform.py": ["/src/python/miru/engine/vector.py"], "/src/python/miru/raytracing/scene.py": ["/src/python/miru/raytracing/ray.py"], "/src/python/miru/engine/color.py": ["/src/python/miru/engine/vector.py"], "/src/python/miru/engine/light.py": ["/src/python/miru/engine/transform.py", "/src/python/miru/engine/color.py"], "/src/python/miru/engine/camera.py": ["/src/python/miru/engine/transform.py"], "/src/python/miru/engine/material.py": ["/src/python/miru/engine/color.py"]}
|
39,078,734
|
adrianogil/miru
|
refs/heads/main
|
/src/python/miru/raytracing/test_video.py
|
import imageio
from scene import Scene
from camera import Camera
from sphere import Sphere
from plane import Plane
from cube import Cube
from vector import Vector3
from mesh import Mesh
import quadmesh
from light import Light
import random
import os
from color import Color
from meanfilter import MeanFilter
from kernelfilter import KernelFilter
from material import Material
from shader_lambertiantint import LambertianTintShader
from texture import Texture
import numpy as np
import datetime
try:
range = xrange
except NameError:
pass
def render_random_spheres(scene):
total_sphere = random.randint(1,15)
light = Light(Color(1.0, 1.0, 1.0, 1.0), 1.0)
light.transform.position = Vector3(0.0, 2.0, -2.0)
scene.set_light(light)
for i in range(0, total_sphere):
s = Sphere(random.randint(10,200) / 100.0)
s.transform.position = Vector3(random.randint(-3,3),random.randint(-3,3),random.randint(2,10))
s.albedo = Color(float(random.randint(20,255))*1.0/255.0, float(random.randint(20,255))*1.0/255.0, float(random.randint(20,255))*1.0/255.0, 1.0)
print("Sphere got color " + str(s.albedo))
scene.add_objects(s)
v1 = Vector3(8.0, 0.0, -1.0)
v2 = Vector3(0.0, 8.0, -3.0)
animation_velocity = 0.5
def update_method(t):
p = Vector3(0.0, 2.0, -2.0)
p = p.add(v1.multiply(np.cos(animation_velocity*t*np.pi)))
p = p.add(v2.multiply(np.sin(animation_velocity*t*np.pi)))
light.transform.position = p
return update_method
def render_moon(scene):
light = Light(Color(1.0, 1.0, 1.0, 1.0), 1.0)
light.transform.position = Vector3(0.0, 2.0, -2.0)
scene.set_light(light)
lambertianTintMaterial = Material()
lambertianTintMaterial.albedo = Color(1.0, 1.0, 1.0, 1.0)
lambertianTintMaterial.shader = LambertianTintShader()
s1_earth = Sphere(0.6)
s1_earth.transform.position = Vector3(0, 0, 1.5)
s1_earth.material = lambertianTintMaterial.clone()
scene.add_objects(s1_earth)
s2_moon = Sphere(0.4)
s2_moon.transform.position = Vector3(-0.2, -0.5, 1.2)
s2_moon.material = lambertianTintMaterial.clone()
s2_moon.material.set_texture(Texture("images/moon.jpg"))
scene.add_objects(s2_moon)
v1 = Vector3(0.0, 1.5, 0.5)
v2 = Vector3(0.5, -1.0, 0.0)
animation_velocity = 0.4
def update_method(t):
p = Vector3(-0.2, -0.5, 1.2)
p = p.add(v1.multiply(np.cos(animation_velocity*t*np.pi)))
p = p.add(v2.multiply(np.sin(animation_velocity*t*np.pi)))
s2_moon.transform.position = p
s2_moon.transform.rotation = Vector3(0.0, np.mod(0.5*animation_velocity*t, 360), 0.0)
return update_method
# Render scene
# @param total_time - Total time in seconds
def render(scene, update_method, render_sizex = 224, render_sizey = 256, \
render_extension = '.mp4', total_time = 10.0, fps = 15):
if os.path.exists("/sdcard/Raytracing/"):
render_image = "/sdcard/Raytracing/test_video"
else:
render_image = 'rendered/test_video'
now = datetime.datetime.now()
render_image = render_image + now.strftime("_%Y_%m_%d_%H_%M_%S")
delta_time = 1.0 / fps
t = 0
frame_count = 0
render_image = render_image + render_extension
with imageio.get_writer(render_image, fps=fps) as writer:
for f in xrange(0, int(fps*total_time)):
update_method(t)
t = t + delta_time
frame_count = frame_count + 1
scene.render(render_sizex, render_sizey, 'temp/git_video_img_' + str(frame_count) + '.jpg')
writer.append_data(imageio.imread('temp/git_video_img_' + str(frame_count) + '.jpg'))
print('Scene rasterized in image path: %s' % (render_image,))
def render_setup():
scene = Scene()
c = Camera()
c.fov = 60
scene.set_ssaa(1)
scene.set_camera(c)
# update_method = render_random_spheres(scene)
update_method = render_moon(scene)
render(scene, update_method, total_time=5.0)
if __name__ == '__main__':
render_setup()
|
{"/src/python/miru/engine/transform.py": ["/src/python/miru/engine/vector.py"], "/src/python/miru/raytracing/scene.py": ["/src/python/miru/raytracing/ray.py"], "/src/python/miru/engine/color.py": ["/src/python/miru/engine/vector.py"], "/src/python/miru/engine/light.py": ["/src/python/miru/engine/transform.py", "/src/python/miru/engine/color.py"], "/src/python/miru/engine/camera.py": ["/src/python/miru/engine/transform.py"], "/src/python/miru/engine/material.py": ["/src/python/miru/engine/color.py"]}
|
39,078,735
|
adrianogil/miru
|
refs/heads/main
|
/src/python/miru/raytracing/meanfilter.py
|
class MeanFilter:
"""
MeanFilter 3x3 for each color channel
"""
def __init__(self):
self.filter_size_x = 5
self.filter_size_y = 5
def apply_effect(self, render_data):
for x in range(0, render_data.pixel_width):
for y in range(0, render_data.pixel_height):
colors = [0,0,0]
for c in range(0, 3):
sum_kernel = 0
total_kernel = 0
for fx in range(0, self.filter_size_x):
for fy in range(0, self.filter_size_y):
ix = int(x + fx - 0.5*self.filter_size_x)
iy = int(y + fy - 0.5*self.filter_size_y)
if ix >= 0 and ix < render_data.pixel_width and \
iy >= 0 and iy < render_data.pixel_height:
sum_kernel = sum_kernel + render_data.pixels[ix,iy][c]
total_kernel = total_kernel + 1
if total_kernel > 0:
colors[c] = int(sum_kernel * 1.0 / total_kernel)
render_data.pixels[x,y] = tuple(colors)
|
{"/src/python/miru/engine/transform.py": ["/src/python/miru/engine/vector.py"], "/src/python/miru/raytracing/scene.py": ["/src/python/miru/raytracing/ray.py"], "/src/python/miru/engine/color.py": ["/src/python/miru/engine/vector.py"], "/src/python/miru/engine/light.py": ["/src/python/miru/engine/transform.py", "/src/python/miru/engine/color.py"], "/src/python/miru/engine/camera.py": ["/src/python/miru/engine/transform.py"], "/src/python/miru/engine/material.py": ["/src/python/miru/engine/color.py"]}
|
39,078,736
|
adrianogil/miru
|
refs/heads/main
|
/src/python/miru/engine/material.py
|
from .color import Color
from miru.engine.shaders import UnlitShader, LambertianTintShader
class Material:
def __init__(self):
self.texture = None
self.albedo = Color(1.0, 1.0, 1.0, 1.0)
self.shader = UnlitShader()
self.debug_mode = False
self.debug_render_type = "None"
def clone(self):
new_material = Material()
new_material.albedo = self.albedo.clone()
new_material.texture = self.texture
new_material.shader = self.shader
return new_material
def set_texture(self, texture):
self.texture = texture
def set_shader(self, shader):
self.shader = shader
def render(self, scene, interception):
if self.debug_mode:
return self.debug_render(scene, interception)
return self.shader.frag_render(self, scene, interception)
def debug_render(self, scene, interception):
c = Color.white()
if self.debug_render_type == "normal":
c.set_rgb(interception['normal'])
return c
@staticmethod
def default():
material = Material()
material.shader = UnlitShader()
return material
@staticmethod
def parse(data):
material = Material()
if 'shader' in data:
shader_name = data['shader']
if shader_name == 'unlit':
material.shader = UnlitShader()
elif shader_name == "lambertian":
material.shader = LambertianTintShader()
if 'albedo' in data:
material.albedo = Color.from_array(data['albedo'])
if 'debug' in data:
if 'active' in data['debug']:
debug_mode = data['debug']['active'] == "True"
material.debug_mode = debug_mode
else:
material.debug_mode = True
if 'render' in data['debug']:
material.debug_render_type = data['debug']['render']
return material
|
{"/src/python/miru/engine/transform.py": ["/src/python/miru/engine/vector.py"], "/src/python/miru/raytracing/scene.py": ["/src/python/miru/raytracing/ray.py"], "/src/python/miru/engine/color.py": ["/src/python/miru/engine/vector.py"], "/src/python/miru/engine/light.py": ["/src/python/miru/engine/transform.py", "/src/python/miru/engine/color.py"], "/src/python/miru/engine/camera.py": ["/src/python/miru/engine/transform.py"], "/src/python/miru/engine/material.py": ["/src/python/miru/engine/color.py"]}
|
39,078,737
|
adrianogil/miru
|
refs/heads/main
|
/src/python/miru/raytracing/camera.py
|
from miru.engine.transform import Transform
class Camera:
ORTOGRAPHIC=1
PERSPECTIVE=2
def __init__(self):
self.mode = Camera.PERSPECTIVE
self.fov = 60
self.near = 0.01
self.far = 1000
self.transform = Transform()
|
{"/src/python/miru/engine/transform.py": ["/src/python/miru/engine/vector.py"], "/src/python/miru/raytracing/scene.py": ["/src/python/miru/raytracing/ray.py"], "/src/python/miru/engine/color.py": ["/src/python/miru/engine/vector.py"], "/src/python/miru/engine/light.py": ["/src/python/miru/engine/transform.py", "/src/python/miru/engine/color.py"], "/src/python/miru/engine/camera.py": ["/src/python/miru/engine/transform.py"], "/src/python/miru/engine/material.py": ["/src/python/miru/engine/color.py"]}
|
39,078,738
|
adrianogil/miru
|
refs/heads/main
|
/src/python/miru/raytracing/scenetests.py
|
from miru.raytracing.scene import render_scene
import os
def run_scenes_tests():
test_folder = "miru_test_results"
miru_folder = os.environ["MIRU_PROJ_PATH"]
if not os.path.exists(test_folder):
os.makedirs(test_folder)
# 1o Test: scenes/test/four_spheres_in_corners.scene
scene_file = os.path.join(miru_folder, "scenes/test/four_spheres_in_corners.scene")
image_path = os.path.join(test_folder, "four_spheres_in_corners.png")
render_scene(scene_file, image_path)
if __name__ == '__main__':
run_scenes_tests()
|
{"/src/python/miru/engine/transform.py": ["/src/python/miru/engine/vector.py"], "/src/python/miru/raytracing/scene.py": ["/src/python/miru/raytracing/ray.py"], "/src/python/miru/engine/color.py": ["/src/python/miru/engine/vector.py"], "/src/python/miru/engine/light.py": ["/src/python/miru/engine/transform.py", "/src/python/miru/engine/color.py"], "/src/python/miru/engine/camera.py": ["/src/python/miru/engine/transform.py"], "/src/python/miru/engine/material.py": ["/src/python/miru/engine/color.py"]}
|
39,078,739
|
adrianogil/miru
|
refs/heads/main
|
/src/python/miru/raytracing/kernelfilter.py
|
class KernelFilter:
"""
KernelFilter for each color channels
"""
def __init__(self, kernel, sizex, sizey):
self.kernel = kernel
self.filter_size_x = sizex
self.filter_size_y = sizey
def apply_effect(self, render_data):
for x in range(0, render_data.pixel_width):
for y in range(0, render_data.pixel_height):
colors = [0,0,0]
for c in range(0, 3):
sum_kernel = 0.0
for fx in range(0, self.filter_size_x):
for fy in range(0, self.filter_size_y):
ix = int(x + fx - 0.5*self.filter_size_x)
iy = int(y + fy - 0.5*self.filter_size_y)
if ix >= 0 and ix < render_data.pixel_width and \
iy >= 0 and iy < render_data.pixel_height:
sum_kernel = sum_kernel + self.kernel[fx,fy]*float(render_data.pixels[ix,iy][c])
colors[c] = int(sum_kernel)
# print(str(colors))
render_data.pixels[x,y] = tuple(colors)
|
{"/src/python/miru/engine/transform.py": ["/src/python/miru/engine/vector.py"], "/src/python/miru/raytracing/scene.py": ["/src/python/miru/raytracing/ray.py"], "/src/python/miru/engine/color.py": ["/src/python/miru/engine/vector.py"], "/src/python/miru/engine/light.py": ["/src/python/miru/engine/transform.py", "/src/python/miru/engine/color.py"], "/src/python/miru/engine/camera.py": ["/src/python/miru/engine/transform.py"], "/src/python/miru/engine/material.py": ["/src/python/miru/engine/color.py"]}
|
39,078,740
|
adrianogil/miru
|
refs/heads/main
|
/src/python/miru/raytracing/mesh.py
|
import sys
from vector import Vector3, Vector2
import numpy as np
import meshtools
class Mesh:
def __init__(self, vertices=[], triangles=[]):
self.vertices = vertices
self.triangles = triangles
self.uvs = None
self.normals = None
self.material = None
def add_triangle(self, t1, t2, t3):
self.triangles.append(t1)
self.triangles.append(t2)
self.triangles.append(t3)
def pre_render(self):
self.transform.update_internals()
def render(self, scene, interception):
if self.material != None:
return self.material.render(scene, interception)
# Checks if the specified ray hits the triagnlge descibed by p1, p2 and p3.
# ray-triangle intersection algorithm implementation.
# <param name="p1">Vertex 1 of the triangle.</param>
# <param name="p2">Vertex 2 of the triangle.</param>
# <param name="p3">Vertex 3 of the triangle.</param>
# <param name="ray">The ray to test hit for.</param>
# <returns><c>true</c> when the ray hits the triangle, otherwise <c>false</c></returns>
def intercepts(self, ray):
for t in xrange(0, len(self.triangles)/3):
intersection_data = self.intercept_triangle(t, ray)
if intersection_data['result']:
return intersection_data
return {'result': False, 'hit_point': Vector3.zero, 'normal' : None, 'uv' : Vector2.zero}
def get_mean_triangle_vector(self, t):
p1 = self.vertices[self.triangles[3*t]]
p2 = self.vertices[self.triangles[3*t+1]]
p3 = self.vertices[self.triangles[3*t+2]]
return p1.add(p2).add(p3).multiply(1.0/3.0)
def intercept_triangle(self, t, ray):
result = {'result': False, 'hit_point': Vector3.zero, 'normal' : None, 'uv' : Vector2.zero}
t1 = self.triangles[3*t]
t2 = self.triangles[3*t+1]
t3 = self.triangles[3*t+2]
p1 = self.vertices[t1]
p2 = self.vertices[t2]
p3 = self.vertices[t3]
# Vectors from p1 to p2/p3 (edges)
# Find vectors for two edges sharing vertex/point p1
e1 = p2.minus(p1)
e2 = p3.minus(p1)
# calculating determinant
p = ray.direction.cross_product(e2)
# Calculate determinat
det = e1.dot_product(p)
epsilon = sys.float_info.epsilon
# if determinant is near zero, ray lies in plane of triangle otherwise not
if det > -epsilon and det < epsilon:
return result;
invDet = 1.0 / det
# calculate distance from p1 to ray origin
t = ray.origin.minus(p1);
# Calculate u parameter
u = t.dot_product(p) * invDet
# Check for ray hit
if u < 0 or u > 1:
return result
# Prepare to test v parameter
q = t.cross_product(e1)
# Calculate v parameter
v = ray.direction.dot_product(q) * invDet
# Check for ray hit
if v < 0 or u + v > 1:
return result
if (e2.dot_product(q) * invDet) > epsilon:
n = e1.normalized().cross_product(e2.normalized()).normalized()
denom = n.dot_product(ray.direction)
# print(str(denom))
if np.abs(denom) > 1e-6:
if denom < 0:
return result
# n.multiply(-1, False)
p1l0 = p1.minus(ray.origin)
t = p1l0.dot_product(n)
if t < 0:
return result
hit_point = ray.origin.add(ray.direction.multiply(t))
bary_coord = meshtools.barycentric(hit_point, p1, p2, p3)
if self.uvs != None:
uv = self.uvs[t1].multiply(bary_coord.x)
uv = uv.add(self.uvs[t2].multiply(bary_coord.y))
uv = uv.add(self.uvs[t3].multiply(bary_coord.z))
else:
uv = None
# print('mesh.intercepts - hit_point - ' + str(hit_point))
# print('mesh.intercepts - normal - ' + str(n))
# print('mesh.intercepts - uv - ' + str(uv))
# ray does intersect
result['result'] = True
result['hit_point'] = hit_point
result['normal'] = n.multiply(-1)
result['uv'] = uv
return result
# No hit at all
return result
|
{"/src/python/miru/engine/transform.py": ["/src/python/miru/engine/vector.py"], "/src/python/miru/raytracing/scene.py": ["/src/python/miru/raytracing/ray.py"], "/src/python/miru/engine/color.py": ["/src/python/miru/engine/vector.py"], "/src/python/miru/engine/light.py": ["/src/python/miru/engine/transform.py", "/src/python/miru/engine/color.py"], "/src/python/miru/engine/camera.py": ["/src/python/miru/engine/transform.py"], "/src/python/miru/engine/material.py": ["/src/python/miru/engine/color.py"]}
|
39,078,741
|
adrianogil/miru
|
refs/heads/main
|
/src/python/miru/raytracing/test_texture.py
|
from scene import Scene
from camera import Camera
from sphere import Sphere
from plane import Plane
from cube import Cube
from miru.engine.vector import Vector3
from mesh import Mesh
import quadmesh
from light import Light
import random
import os
from color import Color
from texture import Texture
from miru.engine.material import Material
from miru.engine.shader_lambertiantint import LambertianTintShader
from meanfilter import MeanFilter
from kernelfilter import KernelFilter
import numpy as np
try:
range = xrange
except NameError:
pass
total_sphere = random.randint(1,15)
scene_test = Scene()
scene_test.background_color = Color.random()
light = Light(Color(1.0, 1.0, 1.0, 1.0), 1.0)
light.transform.position = Vector3(1.0, 2.0, 1.0)
scene_test.set_light(light)
lambertianTintMaterial = Material()
lambertianTintMaterial.albedo = Color(1.0, 1.0, 1.0, 1.0)
lambertianTintMaterial.shader = LambertianTintShader()
# s1 = Sphere(0.6)
# s1.transform.position = Vector3(0, 2.4, 4)
# s1.material = lambertianTintMaterial
# scene_test.add_objects(s1)
# s2 = Sphere(1.2)
# s2.transform.position = Vector3(-0.2, -2.4, 4)
# s2.material = lambertianTintMaterial.clone()
# s2.material.set_texture(Texture("images/moon.jpg"))
# scene_test.add_objects(s2)
zelda_texture = Texture("images/zelda.jpg")
position = Vector3(-2.0, 0.0, 3.0)
v1 = Vector3(2.0, 0.0, 0.0).multiply(zelda_texture.aspect_ratio)
v2 = Vector3(0.0, 2.0, 0.0)
mesh = Mesh()
quadmesh.create(mesh, v1, v2, position)
mesh.material = lambertianTintMaterial.clone()
mesh.material.albedo = Color(1.0, 0.5, 0.5, 1.0)
mesh.material.texture = zelda_texture
scene_test.add_objects(mesh)
c = Camera()
c.fov = 90
scene_test.set_ssaa(1)
scene_test.set_camera(c)
# blur_kernel = np.matrix([[0.0625, 0.125, 0.0625],
# [0.125, 0.25, 0.125],
# [0.0625, 0.125, 0.0625]])
# sharpen_kernel = np.matrix([[ 0, -1, 0],
# [-1, 5, -1],
# [ 0, -1, 0]])
# unsharp_kernel = (-1.0/256)* np.matrix([[ 1, 4, 6, 4, 1],
# [ 4, 16, 24, 16, 4],
# [ 6, 24, -476, 24, 6],
# [ 4, 16, 24, 16, 4],
# [ 1, 4, 6, 4, 1]])
# kernel = np.matrix([[],[],[]])
# scene_test.add_post_processing(KernelFilter(unsharp_kernel, 5, 5))
# scene_test.add_post_processing(KernelFilter(sharpen_kernel, 3, 3))
# scene_test.add_post_processing(MeanFilter())
# blur_kernel = np.matrix([[0.0625, 0.125, 0.0625],[0.125, 0.25, 0.125],[0.0625, 0.125, 0.0625]])
# kernel = np.matrix([[],[],[]])
# scene_test.add_post_processing(KernelFilter(blur_kernel, 3, 3))
# scene_test.add_post_processing(MeanFilter())
if os.path.exists("/sdcard/Raytracing/"):
render_image = "/sdcard/Raytracing/test"
else:
render_image = 'test_texture'
render_extension = '.jpg'
render_sizex = 200
render_sizey = 200
scene_test.render(render_sizex, render_sizey, render_image+ '_ssaa1' + render_extension)
scene_test.set_ssaa(2)
scene_test.render(render_sizex, render_sizey, render_image + '_ssaa2' + render_extension)
scene_test.set_ssaa(3)
scene_test.render(render_sizex, render_sizey, render_image + '_ssaa3' + render_extension)
scene_test.set_ssaa(4)
scene_test.render(render_sizex, render_sizey, render_image + '_ssaa4' + render_extension)
print('Scene rasterized in image path: %s' % (render_image,))
|
{"/src/python/miru/engine/transform.py": ["/src/python/miru/engine/vector.py"], "/src/python/miru/raytracing/scene.py": ["/src/python/miru/raytracing/ray.py"], "/src/python/miru/engine/color.py": ["/src/python/miru/engine/vector.py"], "/src/python/miru/engine/light.py": ["/src/python/miru/engine/transform.py", "/src/python/miru/engine/color.py"], "/src/python/miru/engine/camera.py": ["/src/python/miru/engine/transform.py"], "/src/python/miru/engine/material.py": ["/src/python/miru/engine/color.py"]}
|
39,088,705
|
andirigible/wwplando
|
refs/heads/main
|
/randomizers/items.py
|
import os
import re
from collections import OrderedDict
from fs_helpers import *
import tweaks
def randomize_items(self):
print("Randomizing items...")
if self.options.get("race_mode"):
randomize_boss_rewards(self)
if not self.options.get("keylunacy"):
randomize_dungeon_items(self)
randomize_progression_items(self)
# Place unique non-progress items.
while self.logic.unplaced_nonprogress_items:
accessible_undone_locations = self.logic.get_accessible_remaining_locations()
item_name = self.rng.choice(self.logic.unplaced_nonprogress_items)
possible_locations = self.logic.filter_locations_valid_for_item(accessible_undone_locations, item_name)
if not possible_locations:
raise Exception("No valid locations left to place non-progress items!")
location_name = self.rng.choice(possible_locations)
self.logic.set_location_to_item(location_name, item_name)
accessible_undone_locations = self.logic.get_accessible_remaining_locations()
inaccessible_locations = [loc for loc in self.logic.remaining_item_locations if loc not in accessible_undone_locations]
if inaccessible_locations:
print("Inaccessible locations:")
for location_name in inaccessible_locations:
print(location_name)
# Fill remaining unused locations with consumables (Rupees, spoils, and bait).
locations_to_place_consumables_at = self.logic.remaining_item_locations.copy()
for location_name in locations_to_place_consumables_at:
possible_items = self.logic.filter_items_valid_for_location(self.logic.unplaced_fixed_consumable_items, location_name)
if len(possible_items) == 0:
possible_items = self.logic.filter_items_valid_for_location(self.logic.duplicatable_consumable_items, location_name)
if len(possible_items) == 0:
raise Exception("No valid consumable items for location %s" % location_name)
item_name = self.rng.choice(possible_items)
self.logic.set_location_to_item(location_name, item_name)
def randomize_boss_rewards(self):
# Try to generate dungeon boss reward locations until a valid set of locations is found.
for i in range(50):
if try_randomize_boss_rewards(self):
return
raise Exception("Cannot randomize boss rewards! Please try randomizing with a different seed.")
def try_randomize_boss_rewards(self):
if not self.options.get("progression_dungeons"):
raise Exception("Cannot randomize boss rewards when progress items are not allowed in dungeons.")
boss_reward_items = []
total_num_rewards = int(self.options.get("num_race_mode_dungeons"))
unplaced_progress_items_degrouped = []
for item_name in self.logic.unplaced_progress_items:
if item_name in self.logic.progress_item_groups:
unplaced_progress_items_degrouped += self.logic.progress_item_groups[item_name]
else:
unplaced_progress_items_degrouped.append(item_name)
# Try to make all the rewards be Triforce Shards.
# May not be possible if the player chose to start with too many shards.
num_additional_rewards_needed = total_num_rewards
triforce_shards = [
item_name for item_name in unplaced_progress_items_degrouped
if item_name.startswith("Triforce Shard ")
]
self.rng.shuffle(triforce_shards)
boss_reward_items += triforce_shards[0:num_additional_rewards_needed]
# If we still need more rewards, use sword upgrades.
# May still not fill up all 4 slots if the player starts with 8 shards and a sword.
num_additional_rewards_needed = total_num_rewards - len(boss_reward_items)
if num_additional_rewards_needed > 0:
sword_upgrades = [
item_name for item_name in unplaced_progress_items_degrouped
if item_name == "Progressive Sword"
]
boss_reward_items += sword_upgrades[0:num_additional_rewards_needed]
# If we still need more rewards, use bow upgrades.
# May still not fill up all 4 slots if the player starts with 8 shards and is in swordless mode.
num_additional_rewards_needed = total_num_rewards - len(boss_reward_items)
if num_additional_rewards_needed > 0:
bow_upgrades = [
item_name for item_name in unplaced_progress_items_degrouped
if item_name == "Progressive Bow"
]
boss_reward_items += bow_upgrades[0:num_additional_rewards_needed]
possible_additional_rewards = ["Hookshot", "Mirror Shield", "Boomerang"]
# If we STILL need more rewards, use the Hookshot, Mirror Shield, and Boomerang.
num_additional_rewards_needed = total_num_rewards - len(boss_reward_items)
if num_additional_rewards_needed > 0:
additional_rewards = [
item_name for item_name in unplaced_progress_items_degrouped
if item_name in possible_additional_rewards
]
boss_reward_items += additional_rewards[0:num_additional_rewards_needed]
self.rng.shuffle(boss_reward_items)
if len(boss_reward_items) != total_num_rewards:
raise Exception("Number of boss reward items is incorrect: " + ", ".join(boss_reward_items))
possible_boss_locations = [
loc for loc in self.logic.remaining_item_locations
if self.logic.item_locations[loc]["Original item"] == "Heart Container"
]
if len(possible_boss_locations) != 6:
raise Exception("Number of boss item locations is incorrect: " + ", ".join(possible_boss_locations))
boss_reward_locations = OrderedDict()
# Decide what reward item to place in each boss location.
for item_name in boss_reward_items:
if self.dungeons_only_start and "Dragon Roost Cavern - Gohma Heart Container" in possible_boss_locations:
location_name = "Dragon Roost Cavern - Gohma Heart Container"
elif self.dungeons_only_start and "Forbidden Woods - Kalle Demos Heart Container" in possible_boss_locations:
location_name = "Forbidden Woods - Kalle Demos Heart Container"
else:
location_name = self.rng.choice(possible_boss_locations)
possible_boss_locations.remove(location_name)
boss_reward_locations[location_name] = item_name
# Verify that the dungeon boss rewards were placed in a way that allows them all to be accessible.
locations_valid = validate_boss_reward_locations(self, boss_reward_locations)
# If the dungeon boss reward locations are not valid, a new set of dungeon boss reward locations will be generated.
if not locations_valid:
return False
# Remove any Triforce Shards we're about to use from the progress item group, and add them as ungrouped progress items instead.
for group_name, group_item_names in self.logic.progress_item_groups.items():
items_to_remove_from_group = [
item_name for item_name in group_item_names
if item_name in boss_reward_items
]
for item_name in items_to_remove_from_group:
self.logic.progress_item_groups[group_name].remove(item_name)
if group_name in self.logic.unplaced_progress_items:
for item_name in items_to_remove_from_group:
self.logic.unplaced_progress_items.append(item_name)
if len(self.logic.progress_item_groups[group_name]) == 0:
if group_name in self.logic.unplaced_progress_items:
self.logic.unplaced_progress_items.remove(group_name)
for location_name, item_name in boss_reward_locations.items():
self.logic.set_prerandomization_item_location(location_name, item_name)
self.race_mode_required_locations.append(location_name)
dungeon_name, _ = self.logic.split_location_name_by_zone(location_name)
self.race_mode_required_dungeons.append(dungeon_name)
banned_dungeons = []
for boss_location_name in possible_boss_locations:
dungeon_name, _ = self.logic.split_location_name_by_zone(boss_location_name)
banned_dungeons.append(dungeon_name)
for location_name in self.logic.item_locations:
zone_name, _ = self.logic.split_location_name_by_zone(location_name)
if self.logic.is_dungeon_location(location_name) and zone_name in banned_dungeons:
self.race_mode_banned_locations.append(location_name)
elif location_name == "Mailbox - Letter from Orca" and "Forbidden Woods" in banned_dungeons:
self.race_mode_banned_locations.append(location_name)
elif location_name == "Mailbox - Letter from Baito" and "Earth Temple" in banned_dungeons:
self.race_mode_banned_locations.append(location_name)
elif location_name == "Mailbox - Letter from Aryll" and "Forsaken Fortress" in banned_dungeons:
self.race_mode_banned_locations.append(location_name)
elif location_name == "Mailbox - Letter from Tingle" and "Forsaken Fortress" in banned_dungeons:
self.race_mode_banned_locations.append(location_name)
return True
def validate_boss_reward_locations(self, boss_reward_locations):
boss_reward_items = list(boss_reward_locations.values())
# Temporarily own every item that is not a dungeon boss reward.
items_to_temporarily_add = self.logic.unplaced_progress_items.copy()
for item_name in boss_reward_items:
if item_name in items_to_temporarily_add:
items_to_temporarily_add.remove(item_name)
for item_name in items_to_temporarily_add:
self.logic.add_owned_item_or_item_group(item_name)
locations_valid = True
temporary_boss_reward_items = []
remaining_boss_reward_items = boss_reward_items.copy()
remaining_boss_locations = list(boss_reward_locations.keys())
while remaining_boss_reward_items:
# Consider a dungeon boss reward to be accessible when every location in the dungeon is accessible.
accessible_undone_locations = self.logic.get_accessible_remaining_locations()
inaccessible_dungeons = []
for location_name in self.logic.remaining_item_locations:
if self.logic.is_dungeon_location(location_name) and location_name not in accessible_undone_locations:
dungeon_name, _ = self.logic.split_location_name_by_zone(location_name)
inaccessible_dungeons.append(dungeon_name)
newly_accessible_boss_locations = []
for boss_location in remaining_boss_locations:
dungeon_name, _ = self.logic.split_location_name_by_zone(boss_location)
if dungeon_name not in inaccessible_dungeons:
newly_accessible_boss_locations.append(boss_location)
# If there are no more accessible dungeon boss rewards, consider the dungeon boss locations to be invalid.
if not newly_accessible_boss_locations:
locations_valid = False
break
# Temporarily own dungeon boss rewards that are now accessible.
for location_name in newly_accessible_boss_locations:
item_name = boss_reward_locations[location_name]
self.logic.add_owned_item_or_item_group(item_name)
temporary_boss_reward_items.append(item_name)
remaining_boss_reward_items.remove(item_name)
remaining_boss_locations.remove(location_name)
# Remove temporarily owned items.
for item_name in items_to_temporarily_add:
self.logic.remove_owned_item_or_item_group(item_name)
for item_name in temporary_boss_reward_items:
self.logic.remove_owned_item_or_item_group(item_name)
return locations_valid
def randomize_dungeon_items(self):
# Places dungeon-specific items first so all the dungeon locations don't get used up by other items.
# Temporarily add all items except for dungeon keys while we randomize them.
items_to_temporarily_add = [
item_name for item_name in (self.logic.unplaced_progress_items + self.logic.unplaced_nonprogress_items)
if not self.logic.is_dungeon_item(item_name)
]
for item_name in items_to_temporarily_add:
self.logic.add_owned_item_or_item_group(item_name)
if self.dungeons_only_start:
# Choose a random location out of the 6 easiest locations to access in DRC.
# This location will not have the big key, dungeon map, or compass on this seed. (But can still have small keys/non-dungeon items.)
# This is to prevent a rare error in dungeons-only-start.
self.drc_failsafe_location = self.rng.choice([
"Dragon Roost Cavern - First Room",
"Dragon Roost Cavern - Alcove With Water Jugs",
"Dragon Roost Cavern - Boarded Up Chest",
"Dragon Roost Cavern - Rat Room",
"Dragon Roost Cavern - Rat Room Boarded Up Chest",
"Dragon Roost Cavern - Bird's Nest",
])
# Randomize small keys.
small_keys_to_place = [
item_name for item_name in (self.logic.unplaced_progress_items + self.logic.unplaced_nonprogress_items)
if item_name.endswith(" Small Key")
]
assert len(small_keys_to_place) > 0
for item_name in small_keys_to_place:
place_dungeon_item(self, item_name)
self.logic.add_owned_item(item_name) # Temporarily add small keys to the player's inventory while placing them.
# Randomize big keys.
big_keys_to_place = [
item_name for item_name in (self.logic.unplaced_progress_items + self.logic.unplaced_nonprogress_items)
if item_name.endswith(" Big Key")
]
assert len(big_keys_to_place) > 0
for item_name in big_keys_to_place:
place_dungeon_item(self, item_name)
self.logic.add_owned_item(item_name) # Temporarily add big keys to the player's inventory while placing them.
# Randomize dungeon maps and compasses.
other_dungeon_items_to_place = [
item_name for item_name in (self.logic.unplaced_progress_items + self.logic.unplaced_nonprogress_items)
if item_name.endswith(" Dungeon Map")
or item_name.endswith(" Compass")
]
assert len(other_dungeon_items_to_place) > 0
for item_name in other_dungeon_items_to_place:
place_dungeon_item(self, item_name)
# Remove the items we temporarily added.
for item_name in items_to_temporarily_add:
self.logic.remove_owned_item_or_item_group(item_name)
for item_name in small_keys_to_place:
self.logic.remove_owned_item(item_name)
for item_name in big_keys_to_place:
self.logic.remove_owned_item(item_name)
def place_dungeon_item(self, item_name):
accessible_undone_locations = self.logic.get_accessible_remaining_locations()
accessible_undone_locations = [
loc for loc in accessible_undone_locations
if loc not in self.logic.prerandomization_item_locations
]
if not self.options.get("progression_tingle_chests"):
accessible_undone_locations = [
loc for loc in accessible_undone_locations
if not "Tingle Chest" in self.logic.item_locations[loc]["Types"]
]
possible_locations = self.logic.filter_locations_valid_for_item(accessible_undone_locations, item_name)
if self.dungeons_only_start and item_name == "DRC Small Key":
# If we're in a dungeons-only-start, we have to ban small keys from appearing in the path that sequence breaks the hanging platform.
# A key you need to progress appearing there can cause issues that dead-end the item placement logic when there are no locations outside DRC for the randomizer to give you other items at.
possible_locations = [
loc for loc in possible_locations
if not loc in ["Dragon Roost Cavern - Big Key Chest", "Dragon Roost Cavern - Tingle Statue Chest"]
]
if self.dungeons_only_start and item_name in ["DRC Big Key", "DRC Dungeon Map", "DRC Compass"]:
# If we're in a dungeons-only start, we have to ban dungeon items except small keys from appearing in all 6 of the 6 easiest locations to access in DRC.
# If we don't do this, there is a small chance that those 6 locations will be filled with 3 small keys, the dungeon map, and the compass. The 4th small key will be in the path that sequence breaks the hanging platform, but there will be no open spots to put any non-dungeon items like grappling hook.
# To prevent this specific problem, one location (chosen randomly) is not allowed to have these items at all in dungeons-only-start. It can still have small keys and non-dungeon items.
possible_locations = [
loc for loc in possible_locations
if loc != self.drc_failsafe_location
]
if not possible_locations:
raise Exception("No valid locations left to place dungeon items!")
location_name = self.rng.choice(possible_locations)
self.logic.set_prerandomization_item_location(location_name, item_name)
def randomize_progression_items(self):
accessible_undone_locations = self.logic.get_accessible_remaining_locations(for_progression=True)
if len(accessible_undone_locations) == 0:
raise Exception("No progress locations are accessible at the very start of the game!")
# Place progress items.
location_weights = {}
current_weight = 1
while self.logic.unplaced_progress_items:
accessible_undone_locations = self.logic.get_accessible_remaining_locations(for_progression=True)
if not accessible_undone_locations:
raise Exception("No locations left to place progress items!")
# If the player gained access to any predetermined item locations, we need to give them those items.
newly_accessible_predetermined_item_locations = [
loc for loc in accessible_undone_locations
if loc in self.logic.prerandomization_item_locations
]
if newly_accessible_predetermined_item_locations:
for predetermined_item_location_name in newly_accessible_predetermined_item_locations:
predetermined_item_name = self.logic.prerandomization_item_locations[predetermined_item_location_name]
self.logic.set_location_to_item(predetermined_item_location_name, predetermined_item_name)
continue # Redo this loop iteration with the predetermined item locations no longer being considered 'remaining'.
for location in accessible_undone_locations:
if location not in location_weights:
location_weights[location] = current_weight
elif location_weights[location] > 1:
location_weights[location] -= 1
current_weight += 1
possible_items = self.logic.unplaced_progress_items.copy()
# Don't randomly place items that already had their location predetermined.
unfound_prerand_locs = [
loc for loc in self.logic.prerandomization_item_locations
if loc in self.logic.remaining_item_locations
]
for location_name in unfound_prerand_locs:
prerand_item = self.logic.prerandomization_item_locations[location_name]
if prerand_item not in self.logic.all_progress_items:
continue
possible_items.remove(prerand_item)
if len(possible_items) == 0:
raise Exception("Only items left to place are predetermined items at inaccessible locations!")
# Filter out items that are not valid in any of the locations we might use.
possible_items = self.logic.filter_items_by_any_valid_location(possible_items, accessible_undone_locations)
if len(possible_items) == 0:
raise Exception("No valid locations left for any of the unplaced progress items!")
# Remove duplicates from the list so items like swords and bows aren't so likely to show up early.
unique_possible_items = []
for item_name in possible_items:
if item_name not in unique_possible_items:
unique_possible_items.append(item_name)
possible_items = unique_possible_items
must_place_useful_item = False
should_place_useful_item = True
if len(accessible_undone_locations) == 1 and len(possible_items) > 1:
# If we're on the last accessible location but not the last item we HAVE to place an item that unlocks new locations.
# (Otherwise we will still try to place a useful item, but failing will not result in an error.)
must_place_useful_item = True
elif len(accessible_undone_locations) >= 17:
# If we have a lot of locations open, we don't need to be so strict with prioritizing currently useful items.
# This can give the randomizer a chance to place things like Delivery Bag or small keys for dungeons that need x2 to do anything.
should_place_useful_item = False
# If we wind up placing a useful item it can be a single item or a group.
# But if we place an item that is not yet useful, we need to exclude groups that are not useful.
# This is so that a group doesn't wind up taking every single possible remaining location while not opening up new ones.
possible_groups = [name for name in possible_items if name in self.logic.progress_item_groups]
useless_groups = self.logic.get_all_useless_items(possible_groups)
possible_items_when_not_placing_useful = [name for name in possible_items if name not in useless_groups]
# Only exception is when there's exclusively groups left to place. Then we allow groups even if they're not useful.
if len(possible_items_when_not_placing_useful) == 0 and len(possible_items) > 0:
possible_items_when_not_placing_useful = possible_items
if must_place_useful_item or should_place_useful_item:
shuffled_list = possible_items.copy()
self.rng.shuffle(shuffled_list)
item_name = self.logic.get_first_useful_item(shuffled_list)
if item_name is None:
if must_place_useful_item:
raise Exception("No useful progress items to place!")
else:
# We'd like to be placing a useful item, but there are no useful items to place.
# Instead we choose an item that isn't useful yet by itself, but has a high usefulness fraction.
# In other words, which item has the smallest number of other items needed before it becomes useful?
# We'd prefer to place an item which is 1/2 of what you need to access a new location over one which is 1/5 for example.
item_by_usefulness_fraction = self.logic.get_items_by_usefulness_fraction(possible_items_when_not_placing_useful)
# We want to limit it to choosing items at the maximum usefulness fraction.
# Since the values we have are the denominator of the fraction, we actually call min() instead of max().
max_usefulness = min(item_by_usefulness_fraction.values())
items_at_max_usefulness = [
item_name for item_name, usefulness in item_by_usefulness_fraction.items()
if usefulness == max_usefulness
]
item_name = self.rng.choice(items_at_max_usefulness)
else:
item_name = self.rng.choice(possible_items_when_not_placing_useful)
if self.options.get("race_mode"):
locations_filtered = [
loc for loc in accessible_undone_locations
if loc not in self.race_mode_banned_locations
]
if item_name in self.logic.progress_item_groups:
num_locs_needed = len(self.logic.progress_item_groups[item_name])
else:
num_locs_needed = 1
if len(locations_filtered) >= num_locs_needed:
accessible_undone_locations = locations_filtered
else:
raise Exception("Failed to prevent progress items from appearing in unchosen dungeons for race mode.")
if item_name in self.logic.progress_item_groups:
# If we're placing an entire item group, we use different logic for deciding the location.
# We do not weight towards newly accessible locations.
# And we have to select multiple different locations, one for each item in the group.
group_name = item_name
possible_locations_for_group = accessible_undone_locations.copy()
self.rng.shuffle(possible_locations_for_group)
self.logic.set_multiple_locations_to_group(possible_locations_for_group, group_name)
else:
possible_locations = self.logic.filter_locations_valid_for_item(accessible_undone_locations, item_name)
# Try to prevent chains of charts that lead to sunken treasures with more charts in them.
# If the only locations we have available are sunken treasures we don't have much choice though, so still allow it then.
if item_name.startswith("Treasure Chart") or item_name.startswith("Triforce Chart"):
possible_locations_without_sunken_treasures = [
loc for loc in possible_locations
if not "Sunken Treasure" in self.logic.item_locations[loc]["Types"]
]
if possible_locations_without_sunken_treasures:
possible_locations = possible_locations_without_sunken_treasures
# We weight it so newly accessible locations are more likely to be chosen.
# This way there is still a good chance it will not choose a new location.
possible_locations_with_weighting = []
for location_name in possible_locations:
weight = location_weights[location_name]
possible_locations_with_weighting += [location_name]*weight
location_name = self.rng.choice(possible_locations_with_weighting)
self.logic.set_location_to_item(location_name, item_name)
# Make sure locations that should have predetermined items in them have them properly placed, even if the above logic missed them for some reason.
for location_name in self.logic.prerandomization_item_locations:
if location_name in self.logic.remaining_item_locations:
dungeon_item_name = self.logic.prerandomization_item_locations[location_name]
self.logic.set_location_to_item(location_name, dungeon_item_name)
game_beatable = self.logic.check_requirement_met("Can Reach and Defeat Ganondorf")
if not game_beatable:
raise Exception("Game is not beatable on this seed! This error shouldn't happen.")
def write_changed_items(self):
for location_name, item_name in self.logic.done_item_locations.items():
paths = self.logic.item_locations[location_name]["Paths"]
for path in paths:
change_item(self, path, item_name)
def change_item(self, path, item_name):
item_id = self.item_name_to_id[item_name]
rel_match = re.search(r"^(rels/[^.]+\.rel)@([0-9A-F]{4})$", path)
main_dol_match = re.search(r"^main.dol@(8[0-9A-F]{7})$", path)
custom_symbol_match = re.search(r"^CustomSymbol:(.+)$", path)
chest_match = re.search(r"^([^/]+/[^/]+\.arc)(?:/Layer([0-9a-b]))?/Chest([0-9A-F]{3})$", path)
event_match = re.search(r"^([^/]+/[^/]+\.arc)/Event([0-9A-F]{3}):[^/]+/Actor([0-9A-F]{3})/Action([0-9A-F]{3})$", path)
scob_match = re.search(r"^([^/]+/[^/]+\.arc)(?:/Layer([0-9a-b]))?/ScalableObject([0-9A-F]{3})$", path)
actor_match = re.search(r"^([^/]+/[^/]+\.arc)(?:/Layer([0-9a-b]))?/Actor([0-9A-F]{3})$", path)
if rel_match:
rel_path = rel_match.group(1)
offset = int(rel_match.group(2), 16)
path = os.path.join("files", rel_path)
change_hardcoded_item_in_rel(self, path, offset, item_id)
elif main_dol_match:
address = int(main_dol_match.group(1), 16)
change_hardcoded_item_in_dol(self, address, item_id)
elif custom_symbol_match:
custom_symbol = custom_symbol_match.group(1)
if custom_symbol not in self.main_custom_symbols:
raise Exception("Invalid custom symbol: %s" % custom_symbol)
address = self.main_custom_symbols[custom_symbol]
change_hardcoded_item_in_dol(self, address, item_id)
elif chest_match:
arc_path = "files/res/Stage/" + chest_match.group(1)
if chest_match.group(2):
layer = int(chest_match.group(2), 16)
else:
layer = None
chest_index = int(chest_match.group(3), 16)
change_chest_item(self, arc_path, chest_index, layer, item_id)
elif event_match:
arc_path = "files/res/Stage/" + event_match.group(1)
event_index = int(event_match.group(2), 16)
actor_index = int(event_match.group(3), 16)
action_index = int(event_match.group(4), 16)
change_event_item(self, arc_path, event_index, actor_index, action_index, item_id)
elif scob_match:
arc_path = "files/res/Stage/" + scob_match.group(1)
if scob_match.group(2):
layer = int(scob_match.group(2), 16)
else:
layer = None
scob_index = int(scob_match.group(3), 16)
change_scob_item(self, arc_path, scob_index, layer, item_id)
elif actor_match:
arc_path = "files/res/Stage/" + actor_match.group(1)
if actor_match.group(2):
layer = int(actor_match.group(2), 16)
else:
layer = None
actor_index = int(actor_match.group(3), 16)
change_actor_item(self, arc_path, actor_index, layer, item_id)
else:
raise Exception("Invalid item path: " + path)
def change_hardcoded_item_in_dol(self, address, item_id):
self.dol.write_data(write_u8, address, item_id)
def change_hardcoded_item_in_rel(self, path, offset, item_id):
rel = self.get_rel(path)
rel.write_data(write_u8, offset, item_id)
def change_chest_item(self, arc_path, chest_index, layer, item_id):
if arc_path.endswith("Stage.arc"):
dzx = self.get_arc(arc_path).get_file("stage.dzs")
else:
dzx = self.get_arc(arc_path).get_file("room.dzr")
chest = dzx.entries_by_type_and_layer("TRES", layer)[chest_index]
chest.item_id = item_id
chest.save_changes()
def change_event_item(self, arc_path, event_index, actor_index, action_index, item_id):
event_list = self.get_arc(arc_path).get_file("event_list.dat")
action = event_list.events[event_index].actors[actor_index].actions[action_index]
if 0x6D <= item_id <= 0x72: # Song
action.name = "059get_dance"
action.properties[0].value = [item_id-0x6D]
else:
action.name = "011get_item"
action.properties[0].value = [item_id]
def change_scob_item(self, arc_path, scob_index, layer, item_id):
if arc_path.endswith("Stage.arc"):
dzx = self.get_arc(arc_path).get_file("stage.dzs")
else:
dzx = self.get_arc(arc_path).get_file("room.dzr")
scob = dzx.entries_by_type_and_layer("SCOB", layer)[scob_index]
if scob.actor_class_name in ["d_a_salvage", "d_a_tag_kb_item"]:
scob.item_id = item_id
scob.save_changes()
else:
raise Exception("%s/SCOB%03X is an unknown type of SCOB" % (arc_path, scob_index))
def change_actor_item(self, arc_path, actor_index, layer, item_id):
if arc_path.endswith("Stage.arc"):
dzx = self.get_arc(arc_path).get_file("stage.dzs")
else:
dzx = self.get_arc(arc_path).get_file("room.dzr")
actr = dzx.entries_by_type_and_layer("ACTR", layer)[actor_index]
if actr.actor_class_name in ["d_a_item", "d_a_boss_item"]:
actr.item_id = item_id
else:
raise Exception("%s/ACTR%03X is not an item" % (arc_path, actor_index))
actr.save_changes()
|
{"/wwlib/rarc.py": ["/wwlib/bmg.py", "/wwlib/bdl.py"], "/randomizer.py": ["/wwlib/rarc.py", "/wwlib/gcm.py", "/customizer.py"]}
|
39,088,706
|
andirigible/wwplando
|
refs/heads/main
|
/randomizers/charts.py
|
import copy
def randomize_charts(self):
# Shuffles around which chart points to each sector.
randomizable_charts = [chart for chart in self.chart_list.charts if chart.type in [0, 1, 2, 6]]
original_charts = copy.deepcopy(randomizable_charts)
# Sort the charts by their texture ID so we get the same results even if we randomize them multiple times.
original_charts.sort(key=lambda chart: chart.texture_id)
self.rng.shuffle(original_charts)
for chart in randomizable_charts:
chart_to_copy_from = original_charts.pop()
chart.texture_id = chart_to_copy_from.texture_id
chart.sector_x = chart_to_copy_from.sector_x
chart.sector_y = chart_to_copy_from.sector_y
for random_pos_index in range(4):
possible_pos = chart.possible_random_positions[random_pos_index]
possible_pos_to_copy_from = chart_to_copy_from.possible_random_positions[random_pos_index]
possible_pos.chart_texture_x_offset = possible_pos_to_copy_from.chart_texture_x_offset
possible_pos.chart_texture_y_offset = possible_pos_to_copy_from.chart_texture_y_offset
possible_pos.salvage_x_pos = possible_pos_to_copy_from.salvage_x_pos
possible_pos.salvage_y_pos = possible_pos_to_copy_from.salvage_y_pos
chart.save_changes()
# Then update the salvage object on the sea so it knows what chart corresponds to it now.
dzx = self.get_arc("files/res/Stage/sea/Room%d.arc" % chart.island_number).get_file("room.dzr")
for scob in dzx.entries_by_type("SCOB"):
if scob.actor_class_name == "d_a_salvage" and scob.salvage_type == 0:
scob.chart_index_plus_1 = chart.owned_chart_index_plus_1
scob.save_changes()
self.island_number_to_chart_name[chart_to_copy_from.island_number] = chart.item_name
self.logic.update_chart_macros()
|
{"/wwlib/rarc.py": ["/wwlib/bmg.py", "/wwlib/bdl.py"], "/randomizer.py": ["/wwlib/rarc.py", "/wwlib/gcm.py", "/customizer.py"]}
|
39,088,707
|
andirigible/wwplando
|
refs/heads/main
|
/wwlib/j3d.py
|
import os
from enum import Enum
from io import BytesIO
from collections import OrderedDict
from wwlib.bti import BTI
from fs_helpers import *
IMPLEMENTED_CHUNK_TYPES = [
"TEX1",
"MAT3",
"MDL3",
"TRK1",
]
class J3DFile:
def __init__(self, data):
if try_read_str(data, 0, 4) == "Yaz0":
data = Yaz0.decompress(data)
self.data = data
self.read()
def read(self):
data = self.data
self.magic = read_str(data, 0, 4)
assert self.magic.startswith("J3D")
self.file_type = read_str(data, 4, 4)
self.length = read_u32(data, 8)
self.num_chunks = read_u32(data, 0x0C)
self.chunks = []
self.chunk_by_type = {}
offset = 0x20
for chunk_index in range(self.num_chunks):
if offset == data_len(data):
# Normally the number of chunks tells us when to stop reading.
# But in rare cases like Bk.arc/bk_boko.bmt, the number of chunks can be greater than how many chunks are actually in the file, so we need to detect when we've reached the end of the file manually.
break
chunk_magic = read_str(data, offset, 4)
if chunk_magic in IMPLEMENTED_CHUNK_TYPES:
chunk_class = globals().get(chunk_magic, None)
else:
chunk_class = J3DChunk
chunk = chunk_class()
chunk.read(data, offset)
self.chunks.append(chunk)
self.chunk_by_type[chunk.magic] = chunk
if chunk.magic in IMPLEMENTED_CHUNK_TYPES:
setattr(self, chunk.magic.lower(), chunk)
offset += chunk.size
def save_changes(self):
data = self.data
# Cut off the chunk data first since we're replacing this data entirely.
data.truncate(0x20)
data.seek(0x20)
for chunk in self.chunks:
chunk.save_changes()
chunk.data.seek(0)
chunk_data = chunk.data.read()
data.write(chunk_data)
self.length = data_len(data)
self.num_chunks = len(self.chunks)
write_magic_str(data, 0, self.magic, 4)
write_magic_str(data, 4, self.file_type, 4)
write_u32(data, 8, self.length)
write_u32(data, 0xC, self.num_chunks)
class J3DFileEntry(J3DFile):
def __init__(self, file_entry):
self.file_entry = file_entry
self.file_entry.decompress_data_if_necessary()
super(J3DFileEntry, self).__init__(self.file_entry.data)
class BDL(J3DFileEntry):
def __init__(self, file_entry):
super().__init__(file_entry)
assert self.magic == "J3D2"
assert self.file_type == "bdl4"
class BMD(J3DFileEntry):
def __init__(self, file_entry):
super().__init__(file_entry)
assert self.magic == "J3D2"
assert self.file_type == "bmd3" or self.file_type == "bmd2"
class BMT(J3DFileEntry):
def __init__(self, file_entry):
super().__init__(file_entry)
assert self.magic == "J3D2"
assert self.file_type == "bmt3"
class BRK(J3DFileEntry):
def __init__(self, file_entry):
super().__init__(file_entry)
assert self.magic == "J3D1"
assert self.file_type == "brk1"
class J3DChunk:
def __init__(self):
self.magic = None
self.size = None
self.data = None
def read(self, file_data, chunk_offset):
self.magic = read_str(file_data, chunk_offset, 4)
self.size = read_u32(file_data, chunk_offset+4)
file_data.seek(chunk_offset)
self.data = BytesIO(file_data.read(self.size))
self.read_chunk_specific_data()
def read_chunk_specific_data(self):
pass
def save_changes(self):
self.save_chunk_specific_data()
# Pad the size of this chunk to the next 0x20 bytes.
align_data_to_nearest(self.data, 0x20)
self.size = data_len(self.data)
write_magic_str(self.data, 0, self.magic, 4)
write_u32(self.data, 4, self.size)
def save_chunk_specific_data(self):
pass
def read_string_table(self, string_table_offset):
num_strings = read_u16(self.data, string_table_offset+0x00)
#padding = read_u16(self.data, string_table_offset+0x02)
#assert padding == 0xFFFF
strings = []
offset = string_table_offset + 4
for i in range(num_strings):
string_hash = read_u16(self.data, offset+0x00)
string_data_offset = read_u16(self.data, offset+0x02)
string = read_str_until_null_character(self.data, string_table_offset + string_data_offset)
strings.append(string)
offset += 4
return strings
def write_string_table(self, string_table_offset, strings):
num_strings = len(strings)
write_u16(self.data, string_table_offset+0x00, num_strings)
write_u16(self.data, string_table_offset+0x02, 0xFFFF)
offset = string_table_offset + 4
next_string_data_offset = 4 + num_strings*4
for string in strings:
hash = 0
for char in string:
hash *= 3
hash += ord(char)
hash &= 0xFFFF
write_u16(self.data, offset+0x00, hash)
write_u16(self.data, offset+0x02, next_string_data_offset)
write_str_with_null_byte(self.data, string_table_offset+next_string_data_offset, string)
offset += 4
next_string_data_offset += len(string) + 1
class TEX1(J3DChunk):
def read_chunk_specific_data(self):
self.textures = []
self.num_textures = read_u16(self.data, 8)
self.texture_header_list_offset = read_u32(self.data, 0x0C)
for texture_index in range(self.num_textures):
bti_header_offset = self.texture_header_list_offset + texture_index*0x20
texture = BTI(self.data, bti_header_offset)
self.textures.append(texture)
self.string_table_offset = read_u32(self.data, 0x10)
self.texture_names = self.read_string_table(self.string_table_offset)
self.textures_by_name = OrderedDict()
for i, texture in enumerate(self.textures):
texture_name = self.texture_names[i]
if texture_name not in self.textures_by_name:
self.textures_by_name[texture_name] = []
self.textures_by_name[texture_name].append(texture)
def save_chunk_specific_data(self):
# Does not support adding new textures currently.
assert len(self.textures) == self.num_textures
next_available_data_offset = 0x20 + self.num_textures*0x20 # Right after the last header ends
self.data.truncate(next_available_data_offset)
self.data.seek(next_available_data_offset)
image_data_offsets = {}
for i, texture in enumerate(self.textures):
filename = self.texture_names[i]
if filename in image_data_offsets:
texture.image_data_offset = image_data_offsets[filename] - texture.header_offset
continue
self.data.seek(next_available_data_offset)
texture.image_data_offset = next_available_data_offset - texture.header_offset
image_data_offsets[filename] = next_available_data_offset
texture.image_data.seek(0)
self.data.write(texture.image_data.read())
align_data_to_nearest(self.data, 0x20)
next_available_data_offset = data_len(self.data)
palette_data_offsets = {}
for i, texture in enumerate(self.textures):
filename = self.texture_names[i]
if filename in palette_data_offsets:
texture.palette_data_offset = palette_data_offsets[filename] - texture.header_offset
continue
self.data.seek(next_available_data_offset)
if texture.needs_palettes():
texture.palette_data_offset = next_available_data_offset - texture.header_offset
palette_data_offsets[filename] = next_available_data_offset
texture.palette_data.seek(0)
self.data.write(texture.palette_data.read())
align_data_to_nearest(self.data, 0x20)
next_available_data_offset = data_len(self.data)
else:
# If the image doesn't use palettes its palette offset is just the same as the first texture's image offset.
first_texture = self.textures[0]
texture.palette_data_offset = first_texture.image_data_offset + first_texture.header_offset - texture.header_offset
palette_data_offsets[filename] = first_texture.image_data_offset + first_texture.header_offset
for texture in self.textures:
texture.save_header_changes()
self.string_table_offset = next_available_data_offset
write_u32(self.data, 0x10, self.string_table_offset)
self.write_string_table(self.string_table_offset, self.texture_names)
class MAT3(J3DChunk):
def read_chunk_specific_data(self):
self.tev_reg_colors_offset = read_u32(self.data, 0x50)
self.tev_konst_colors_offset = read_u32(self.data, 0x54)
self.tev_stages_offset = read_u32(self.data, 0x58)
self.num_reg_colors = (self.tev_konst_colors_offset - self.tev_reg_colors_offset) // 8
self.reg_colors = []
for i in range(self.num_reg_colors):
r = read_s16(self.data, self.tev_reg_colors_offset + i*8 + 0)
g = read_s16(self.data, self.tev_reg_colors_offset + i*8 + 2)
b = read_s16(self.data, self.tev_reg_colors_offset + i*8 + 4)
a = read_s16(self.data, self.tev_reg_colors_offset + i*8 + 6)
self.reg_colors.append((r, g, b, a))
self.num_konst_colors = (self.tev_stages_offset - self.tev_konst_colors_offset) // 4
self.konst_colors = []
for i in range(self.num_konst_colors):
r = read_u8(self.data, self.tev_konst_colors_offset + i*4 + 0)
g = read_u8(self.data, self.tev_konst_colors_offset + i*4 + 1)
b = read_u8(self.data, self.tev_konst_colors_offset + i*4 + 2)
a = read_u8(self.data, self.tev_konst_colors_offset + i*4 + 3)
self.konst_colors.append((r, g, b, a))
self.string_table_offset = read_u32(self.data, 0x14)
self.mat_names = self.read_string_table(self.string_table_offset)
def save_chunk_specific_data(self):
for i in range(self.num_reg_colors):
r, g, b, a = self.reg_colors[i]
write_s16(self.data, self.tev_reg_colors_offset + i*8 + 0, r)
write_s16(self.data, self.tev_reg_colors_offset + i*8 + 2, g)
write_s16(self.data, self.tev_reg_colors_offset + i*8 + 4, b)
write_s16(self.data, self.tev_reg_colors_offset + i*8 + 6, a)
for i in range(self.num_konst_colors):
r, g, b, a = self.konst_colors[i]
write_u8(self.data, self.tev_konst_colors_offset + i*4 + 0, r)
write_u8(self.data, self.tev_konst_colors_offset + i*4 + 1, g)
write_u8(self.data, self.tev_konst_colors_offset + i*4 + 2, b)
write_u8(self.data, self.tev_konst_colors_offset + i*4 + 3, a)
class MDL3(J3DChunk):
def read_chunk_specific_data(self):
self.num_entries = read_u16(self.data, 0x08)
self.packets_offset = read_u32(self.data, 0x0C)
self.entries = []
packet_offset = self.packets_offset
for i in range(self.num_entries):
entry_offset = read_u32(self.data, packet_offset + 0x00)
entry_size = read_u32(self.data, packet_offset + 0x04)
entry = MDLEntry(self.data, entry_offset+packet_offset, entry_size)
self.entries.append(entry)
packet_offset += 8
self.string_table_offset = read_u32(self.data, 0x20)
self.mat_names = self.read_string_table(self.string_table_offset)
def save_chunk_specific_data(self):
for entry in self.entries:
entry.save_changes()
entry.data.seek(0)
entry_data = entry.data.read()
self.data.seek(entry.entry_offset)
self.data.write(entry_data)
class MDLEntry:
def __init__(self, chunk_data, entry_offset, size):
self.entry_offset = entry_offset
self.size = size
chunk_data.seek(self.entry_offset)
self.data = BytesIO(chunk_data.read(self.size))
self.read()
def read(self):
self.bp_commands = []
self.xf_commands = []
offset = 0
while offset < self.size:
command_type = read_u8(self.data, offset)
if command_type == MDLCommandType.BP.value:
command = BPCommand(self.data)
offset = command.read(offset)
self.bp_commands.append(command)
elif command_type == MDLCommandType.XF.value:
command = XFCommand(self.data)
offset = command.read(offset)
self.xf_commands.append(command)
elif command_type == MDLCommandType.END_MARKER.value:
break
else:
raise Exception("Invalid MDL3 command type: %02X" % command_type)
def save_changes(self):
offset = 0
for command in self.bp_commands:
offset = command.save(offset)
for command in self.xf_commands:
offset = command.save(offset)
if offset % 0x20 != 0:
padding_bytes_needed = (0x20 - (offset % 0x20))
padding = b"\0"*padding_bytes_needed
write_bytes(self.data, offset, padding)
offset += padding_bytes_needed
# Adding new commands not supported.
assert offset <= self.size
class MDLCommandType(Enum):
END_MARKER = 0x00
XF = 0x10
BP = 0x61
class BPRegister(Enum):
GEN_MODE = 0x00
IND_MTXA0 = 0x06
IND_MTXB0 = 0x07
IND_MTXC0 = 0x08
IND_MTXA1 = 0x09
IND_MTXB1 = 0x0A
IND_MTXC1 = 0x0B
IND_MTXA2 = 0x0C
IND_MTXB2 = 0x0D
IND_MTXC2 = 0x0E
IND_IMASK = 0x0F
IND_CMD0 = 0x10
IND_CMD1 = 0x11
IND_CMD2 = 0x12
IND_CMD3 = 0x13
IND_CMD4 = 0x14
IND_CMD5 = 0x15
IND_CMD6 = 0x16
IND_CMD7 = 0x17
IND_CMD8 = 0x18
IND_CMD9 = 0x19
IND_CMDA = 0x1A
IND_CMDB = 0x1B
IND_CMDC = 0x1C
IND_CMDD = 0x1D
IND_CMDE = 0x1E
IND_CMDF = 0x1F
SCISSOR_0 = 0x20
SCISSOR_1 = 0x21
SU_LPSIZE = 0x22
SU_COUNTER = 0x23
RAS_COUNTER = 0x24
RAS1_SS0 = 0x25
RAS1_SS1 = 0x26
RAS1_IREF = 0x27
RAS1_TREF0 = 0x28
RAS1_TREF1 = 0x29
RAS1_TREF2 = 0x2A
RAS1_TREF3 = 0x2B
RAS1_TREF4 = 0x2C
RAS1_TREF5 = 0x2D
RAS1_TREF6 = 0x2E
RAS1_TREF7 = 0x2F
SU_SSIZE0 = 0x30
SU_TSIZE0 = 0x31
SU_SSIZE1 = 0x32
SU_TSIZE1 = 0x33
SU_SSIZE2 = 0x34
SU_TSIZE2 = 0x35
SU_SSIZE3 = 0x36
SU_TSIZE3 = 0x37
SU_SSIZE4 = 0x38
SU_TSIZE4 = 0x39
SU_SSIZE5 = 0x3A
SU_TSIZE5 = 0x3B
SU_SSIZE6 = 0x3C
SU_TSIZE6 = 0x3D
SU_SSIZE7 = 0x3E
SU_TSIZE7 = 0x3F
PE_ZMODE = 0x40
PE_CMODE0 = 0x41
PE_CMODE1 = 0x42
PE_CONTROL = 0x43
field_mask = 0x44
PE_DONE = 0x45
clock = 0x46
PE_TOKEN = 0x47
PE_TOKEN_INT = 0x48
EFB_SOURCE_RECT_TOP_LEFT = 0x49
EFB_SOURCE_RECT_WIDTH_HEIGHT = 0x4A
XFB_TARGET_ADDRESS = 0x4B
DISP_COPY_Y_SCALE = 0x4E
PE_COPY_CLEAR_AR = 0x4F
PE_COPY_CLEAR_GB = 0x50
PE_COPY_CLEAR_Z = 0x51
PE_COPY_EXECUTE = 0x52
SCISSOR_BOX_OFFSET = 0x59
TEX_LOADTLUT0 = 0x64
TEX_LOADTLUT1 = 0x65
TX_SET_MODE0_I0 = 0x80
TX_SET_MODE0_I1 = 0x81
TX_SET_MODE0_I2 = 0x82
TX_SET_MODE0_I3 = 0x83
TX_SET_MODE1_I0 = 0x84
TX_SET_MODE1_I1 = 0x85
TX_SET_MODE1_I2 = 0x86
TX_SET_MODE1_I3 = 0x87
TX_SETIMAGE0_I0 = 0x88
TX_SETIMAGE0_I1 = 0x89
TX_SETIMAGE0_I2 = 0x8A
TX_SETIMAGE0_I3 = 0x8B
TX_SETIMAGE1_I0 = 0x8C
TX_SETIMAGE1_I1 = 0x8D
TX_SETIMAGE1_I2 = 0x8E
TX_SETIMAGE1_I3 = 0x8F
TX_SETIMAGE2_I0 = 0x90
TX_SETIMAGE2_I1 = 0x91
TX_SETIMAGE2_I2 = 0x92
TX_SETIMAGE2_I3 = 0x93
TX_SETIMAGE3_I0 = 0x94
TX_SETIMAGE3_I1 = 0x95
TX_SETIMAGE3_I2 = 0x96
TX_SETIMAGE3_I3 = 0x97
TX_LOADTLUT0 = 0x98
TX_LOADTLUT1 = 0x99
TX_LOADTLUT2 = 0x9A
TX_LOADTLUT3 = 0x9B
TX_SET_MODE0_I4 = 0xA0
TX_SET_MODE0_I5 = 0xA1
TX_SET_MODE0_I6 = 0xA2
TX_SET_MODE0_I7 = 0xA3
TX_SET_MODE1_I4 = 0xA4
TX_SET_MODE1_I5 = 0xA5
TX_SET_MODE1_I6 = 0xA6
TX_SET_MODE1_I7 = 0xA7
TX_SETIMAGE0_I4 = 0xA8
TX_SETIMAGE0_I5 = 0xA9
TX_SETIMAGE0_I6 = 0xAA
TX_SETIMAGE0_I7 = 0xAB
TX_SETIMAGE1_I4 = 0xAC
TX_SETIMAGE1_I5 = 0xAD
TX_SETIMAGE1_I6 = 0xAE
TX_SETIMAGE1_I7 = 0xAF
TX_SETIMAGE2_I4 = 0xB0
TX_SETIMAGE2_I5 = 0xB1
TX_SETIMAGE2_I6 = 0xB2
TX_SETIMAGE2_I7 = 0xB3
TX_SETIMAGE3_I4 = 0xB4
TX_SETIMAGE3_I5 = 0xB5
TX_SETIMAGE3_I6 = 0xB6
TX_SETIMAGE3_I7 = 0xB7
TX_SETTLUT_I4 = 0xB8
TX_SETTLUT_I5 = 0xB9
TX_SETTLUT_I6 = 0xBA
TX_SETTLUT_I7 = 0xBB
TEV_COLOR_ENV_0 = 0xC0
TEV_ALPHA_ENV_0 = 0xC1
TEV_COLOR_ENV_1 = 0xC2
TEV_ALPHA_ENV_1 = 0xC3
TEV_COLOR_ENV_2 = 0xC4
TEV_ALPHA_ENV_2 = 0xC5
TEV_COLOR_ENV_3 = 0xC6
TEV_ALPHA_ENV_3 = 0xC7
TEV_COLOR_ENV_4 = 0xC8
TEV_ALPHA_ENV_4 = 0xC9
TEV_COLOR_ENV_5 = 0xCA
TEV_ALPHA_ENV_5 = 0xCB
TEV_COLOR_ENV_6 = 0xCC
TEV_ALPHA_ENV_6 = 0xCD
TEV_COLOR_ENV_7 = 0xCE
TEV_ALPHA_ENV_7 = 0xCF
TEV_COLOR_ENV_8 = 0xD0
TEV_ALPHA_ENV_8 = 0xD1
TEV_COLOR_ENV_9 = 0xD2
TEV_ALPHA_ENV_9 = 0xD3
TEV_COLOR_ENV_A = 0xD4
TEV_ALPHA_ENV_A = 0xD5
TEV_COLOR_ENV_B = 0xD6
TEV_ALPHA_ENV_B = 0xD7
TEV_COLOR_ENV_C = 0xD8
TEV_ALPHA_ENV_C = 0xD9
TEV_COLOR_ENV_D = 0xDA
TEV_ALPHA_ENV_D = 0xDB
TEV_COLOR_ENV_E = 0xDC
TEV_ALPHA_ENV_E = 0xDD
TEV_COLOR_ENV_F = 0xDE
TEV_ALPHA_ENV_F = 0xDF
TEV_REGISTERL_0 = 0xE0
TEV_REGISTERH_0 = 0xE1
TEV_REGISTERL_1 = 0xE2
TEV_REGISTERH_1 = 0xE3
TEV_REGISTERL_2 = 0xE4
TEV_REGISTERH_2 = 0xE5
TEV_REGISTERL_3 = 0xE6
TEV_REGISTERH_3 = 0xE7
FOG_RANGE = 0xE8
TEV_FOG_PARAM_0 = 0xEE
TEV_FOG_PARAM_1 = 0xEF
TEV_FOG_PARAM_2 = 0xF0
TEV_FOG_PARAM_3 = 0xF1
TEV_FOG_COLOR = 0xF2
TEV_ALPHAFUNC = 0xF3
TEV_Z_ENV_0 = 0xF4
TEV_Z_ENV_1 = 0xF5
TEV_KSEL_0 = 0xF6
TEV_KSEL_1 = 0xF7
TEV_KSEL_2 = 0xF8
TEV_KSEL_3 = 0xF9
TEV_KSEL_4 = 0xFA
TEV_KSEL_5 = 0xFB
TEV_KSEL_6 = 0xFC
TEV_KSEL_7 = 0xFD
BP_MASK = 0xFE
class BPCommand:
def __init__(self, data):
self.data = data
def read(self, offset):
assert read_u8(self.data, offset) == MDLCommandType.BP.value
offset += 1
bitfield = read_u32(self.data, offset)
offset += 4
self.register = (bitfield & 0xFF000000) >> 24
self.value = (bitfield & 0x00FFFFFF)
return offset
def save(self, offset):
write_u8(self.data, offset, MDLCommandType.BP.value)
offset += 1
bitfield = (self.register << 24) & 0xFF000000
bitfield |= self.value & 0x00FFFFFF
write_u32(self.data, offset, bitfield)
offset += 4
return offset
class XFRegister(Enum):
SETNUMCHAN = 0x1009
SETCHAN0_AMBCOLOR = 0x100A
SETCHAN0_MATCOLOR = 0x100C
SETCHAN0_COLOR = 0x100E
SETNUMTEXGENS = 0x103F
SETTEXMTXINFO = 0x1040
SETPOSMTXINFO = 0x1050
class XFCommand:
def __init__(self, data):
self.data = data
def read(self, offset):
assert read_u8(self.data, offset) == MDLCommandType.XF.value
offset += 1
num_args = read_u16(self.data, offset) + 1
offset += 2
self.register = read_u16(self.data, offset)
offset += 2
self.args = []
for i in range(num_args):
arg = read_u32(self.data, offset)
offset += 4
self.args.append(arg)
return offset
def save(self, offset):
write_u8(self.data, offset, MDLCommandType.XF.value)
offset += 1
num_args = len(self.args)
write_u16(self.data, offset, num_args-1)
offset += 2
write_u16(self.data, offset, self.register)
offset += 2
for arg in self.args:
write_u32(self.data, offset, arg)
offset += 4
return offset
class TRK1(J3DChunk):
def read_chunk_specific_data(self):
assert read_str(self.data, 0, 4) == "TRK1"
self.loop_mode = LoopMode(read_u8(self.data, 0x08))
assert read_u8(self.data, 0x09) == 0xFF
self.duration = read_u16(self.data, 0x0A)
reg_color_anims_count = read_u16(self.data, 0x0C)
konst_color_anims_count = read_u16(self.data, 0x0E)
reg_r_count = read_u16(self.data, 0x10)
reg_g_count = read_u16(self.data, 0x12)
reg_b_count = read_u16(self.data, 0x14)
reg_a_count = read_u16(self.data, 0x16)
konst_r_count = read_u16(self.data, 0x18)
konst_g_count = read_u16(self.data, 0x1A)
konst_b_count = read_u16(self.data, 0x1C)
konst_a_count = read_u16(self.data, 0x1E)
reg_color_anims_offset = read_u32(self.data, 0x20)
konst_color_anims_offset = read_u32(self.data, 0x24)
reg_remap_table_offset = read_u32(self.data, 0x28)
konst_remap_table_offset = read_u32(self.data, 0x2C)
reg_mat_names_table_offset = read_u32(self.data, 0x30)
konst_mat_names_table_offset = read_u32(self.data, 0x34)
reg_r_offset = read_u32(self.data, 0x38)
reg_g_offset = read_u32(self.data, 0x3C)
reg_b_offset = read_u32(self.data, 0x40)
reg_a_offset = read_u32(self.data, 0x44)
konst_r_offset = read_u32(self.data, 0x48)
konst_g_offset = read_u32(self.data, 0x4C)
konst_b_offset = read_u32(self.data, 0x50)
konst_a_offset = read_u32(self.data, 0x54)
# Ensure the remap tables are identity.
# Actual remapping not currently supported by this implementation.
for i in range(reg_color_anims_count):
assert i == read_u16(self.data, reg_remap_table_offset+i*2)
for i in range(konst_color_anims_count):
assert i == read_u16(self.data, konst_remap_table_offset+i*2)
reg_mat_names = self.read_string_table(reg_mat_names_table_offset)
konst_mat_names = self.read_string_table(konst_mat_names_table_offset)
reg_r_track_data = []
for i in range(reg_r_count):
r = read_s16(self.data, reg_r_offset+i*2)
reg_r_track_data.append(r)
reg_g_track_data = []
for i in range(reg_g_count):
g = read_s16(self.data, reg_g_offset+i*2)
reg_g_track_data.append(g)
reg_b_track_data = []
for i in range(reg_b_count):
b = read_s16(self.data, reg_b_offset+i*2)
reg_b_track_data.append(b)
reg_a_track_data = []
for i in range(reg_a_count):
a = read_s16(self.data, reg_a_offset+i*2)
reg_a_track_data.append(a)
konst_r_track_data = []
for i in range(konst_r_count):
r = read_s16(self.data, konst_r_offset+i*2)
konst_r_track_data.append(r)
konst_g_track_data = []
for i in range(konst_g_count):
g = read_s16(self.data, konst_g_offset+i*2)
konst_g_track_data.append(g)
konst_b_track_data = []
for i in range(konst_b_count):
b = read_s16(self.data, konst_b_offset+i*2)
konst_b_track_data.append(b)
konst_a_track_data = []
for i in range(konst_a_count):
a = read_s16(self.data, konst_a_offset+i*2)
konst_a_track_data.append(a)
reg_animations = []
konst_animations = []
self.mat_name_to_reg_anims = OrderedDict()
self.mat_name_to_konst_anims = OrderedDict()
offset = reg_color_anims_offset
for i in range(reg_color_anims_count):
anim = ColorAnimation()
anim.read(self.data, offset, reg_r_track_data, reg_g_track_data, reg_b_track_data, reg_a_track_data)
offset += ColorAnimation.DATA_SIZE
reg_animations.append(anim)
mat_name = reg_mat_names[i]
if mat_name not in self.mat_name_to_reg_anims:
self.mat_name_to_reg_anims[mat_name] = []
self.mat_name_to_reg_anims[mat_name].append(anim)
offset = konst_color_anims_offset
for i in range(konst_color_anims_count):
anim = ColorAnimation()
anim.read(self.data, offset, konst_r_track_data, konst_g_track_data, konst_b_track_data, konst_a_track_data)
offset += ColorAnimation.DATA_SIZE
konst_animations.append(anim)
mat_name = konst_mat_names[i]
if mat_name not in self.mat_name_to_konst_anims:
self.mat_name_to_konst_anims[mat_name] = []
self.mat_name_to_konst_anims[mat_name].append(anim)
def save_chunk_specific_data(self):
# Cut off all the data, we're rewriting it entirely.
self.data.truncate(0)
# Placeholder for the header.
self.data.seek(0)
self.data.write(b"\0"*0x58)
align_data_to_nearest(self.data, 0x20)
offset = self.data.tell()
reg_animations = []
konst_animations = []
reg_mat_names = []
konst_mat_names = []
for mat_name, anims in self.mat_name_to_reg_anims.items():
for anim in anims:
reg_animations.append(anim)
reg_mat_names.append(mat_name)
for mat_name, anims in self.mat_name_to_konst_anims.items():
for anim in anims:
konst_animations.append(anim)
konst_mat_names.append(mat_name)
reg_r_track_data = []
reg_g_track_data = []
reg_b_track_data = []
reg_a_track_data = []
reg_color_anims_offset = offset
if not reg_animations:
reg_color_anims_offset = 0
for anim in reg_animations:
anim.save_changes(self.data, offset, reg_r_track_data, reg_g_track_data, reg_b_track_data, reg_a_track_data)
offset += ColorAnimation.DATA_SIZE
align_data_to_nearest(self.data, 4)
offset = self.data.tell()
konst_r_track_data = []
konst_g_track_data = []
konst_b_track_data = []
konst_a_track_data = []
konst_color_anims_offset = offset
if not konst_animations:
konst_color_anims_offset = 0
for anim in konst_animations:
anim.save_changes(self.data, offset, konst_r_track_data, konst_g_track_data, konst_b_track_data, konst_a_track_data)
offset += ColorAnimation.DATA_SIZE
align_data_to_nearest(self.data, 4)
offset = self.data.tell()
reg_r_offset = offset
if not reg_r_track_data:
reg_r_offset = 0
for r in reg_r_track_data:
write_s16(self.data, offset, r)
offset += 2
align_data_to_nearest(self.data, 4)
offset = self.data.tell()
reg_g_offset = offset
if not reg_g_track_data:
reg_g_offset = 0
for g in reg_g_track_data:
write_s16(self.data, offset, g)
offset += 2
align_data_to_nearest(self.data, 4)
offset = self.data.tell()
reg_b_offset = offset
if not reg_b_track_data:
reg_b_offset = 0
for b in reg_b_track_data:
write_s16(self.data, offset, b)
offset += 2
align_data_to_nearest(self.data, 4)
offset = self.data.tell()
reg_a_offset = offset
if not reg_a_track_data:
reg_a_offset = 0
for a in reg_a_track_data:
write_s16(self.data, offset, a)
offset += 2
align_data_to_nearest(self.data, 4)
offset = self.data.tell()
konst_r_offset = offset
if not konst_r_track_data:
konst_r_offset = 0
for r in konst_r_track_data:
write_s16(self.data, offset, r)
offset += 2
align_data_to_nearest(self.data, 4)
offset = self.data.tell()
konst_g_offset = offset
if not konst_g_track_data:
konst_g_offset = 0
for g in konst_g_track_data:
write_s16(self.data, offset, g)
offset += 2
align_data_to_nearest(self.data, 4)
offset = self.data.tell()
konst_b_offset = offset
if not konst_b_track_data:
konst_b_offset = 0
for b in konst_b_track_data:
write_s16(self.data, offset, b)
offset += 2
align_data_to_nearest(self.data, 4)
offset = self.data.tell()
konst_a_offset = offset
if not konst_a_track_data:
konst_a_offset = 0
for a in konst_a_track_data:
write_s16(self.data, offset, a)
offset += 2
align_data_to_nearest(self.data, 4)
offset = self.data.tell()
# Remaps tables always written as identity, remapping not supported.
reg_remap_table_offset = offset
if not reg_animations:
reg_remap_table_offset = 0
for i in range(len(reg_animations)):
write_u16(self.data, offset, i)
offset += 2
konst_remap_table_offset = offset
if not konst_animations:
konst_remap_table_offset = 0
for i in range(len(konst_animations)):
write_u16(self.data, offset, i)
offset += 2
align_data_to_nearest(self.data, 4)
offset = self.data.tell()
reg_mat_names_table_offset = offset
self.write_string_table(reg_mat_names_table_offset, reg_mat_names)
align_data_to_nearest(self.data, 4)
offset = self.data.tell()
konst_mat_names_table_offset = offset
self.write_string_table(konst_mat_names_table_offset, konst_mat_names)
# Write the header.
write_magic_str(self.data, 0, "TRK1", 4)
write_u8(self.data, 0x08, self.loop_mode.value)
write_u8(self.data, 0x09, 0xFF)
write_u16(self.data, 0x0A, self.duration)
write_u16(self.data, 0x0C, len(reg_animations))
write_u16(self.data, 0x0E, len(konst_animations))
write_s16(self.data, 0x10, len(reg_r_track_data))
write_s16(self.data, 0x12, len(reg_g_track_data))
write_s16(self.data, 0x14, len(reg_b_track_data))
write_s16(self.data, 0x16, len(reg_a_track_data))
write_s16(self.data, 0x18, len(konst_r_track_data))
write_s16(self.data, 0x1A, len(konst_g_track_data))
write_s16(self.data, 0x1C, len(konst_b_track_data))
write_s16(self.data, 0x1E, len(konst_a_track_data))
write_u32(self.data, 0x20, reg_color_anims_offset)
write_u32(self.data, 0x24, konst_color_anims_offset)
write_u32(self.data, 0x28, reg_remap_table_offset)
write_u32(self.data, 0x2C, konst_remap_table_offset)
write_u32(self.data, 0x30, reg_mat_names_table_offset)
write_u32(self.data, 0x34, konst_mat_names_table_offset)
write_u32(self.data, 0x38, reg_r_offset)
write_u32(self.data, 0x3C, reg_g_offset)
write_u32(self.data, 0x40, reg_b_offset)
write_u32(self.data, 0x44, reg_a_offset)
write_u32(self.data, 0x48, konst_r_offset)
write_u32(self.data, 0x4C, konst_g_offset)
write_u32(self.data, 0x50, konst_b_offset)
write_u32(self.data, 0x54, konst_a_offset)
class LoopMode(Enum):
ONCE = 0
ONCE_AND_RESET = 1
REPEAT = 2
MIRRORED_ONCE = 3
MIRRORED_REPEAT = 4
class TangentType(Enum):
IN = 0
IN_OUT = 1
class AnimationTrack:
DATA_SIZE = 6
def __init__(self):
self.tangent_type = TangentType.IN_OUT
self.keyframes = []
def read(self, data, offset, track_data):
self.count = read_u16(data, offset+0)
self.index = read_u16(data, offset+2)
self.tangent_type = TangentType(read_u16(data, offset+4))
self.keyframes = []
if self.count == 1:
keyframe = AnimationKeyframe(0, track_data[self.index], 0, 0)
self.keyframes.append(keyframe)
else:
if self.tangent_type == TangentType.IN:
for i in range(self.index, self.index + self.count*3, 3):
keyframe = AnimationKeyframe(track_data[i+0], track_data[i+1], track_data[i+2], track_data[i+2])
self.keyframes.append(keyframe)
elif self.tangent_type == TangentType.IN_OUT:
for i in range(self.index, self.index + self.count*4, 4):
keyframe = AnimationKeyframe(track_data[i+0], track_data[i+1], track_data[i+2], track_data[i+3])
self.keyframes.append(keyframe)
else:
raise Exception("Invalid tangent type")
def save_changes(self, data, offset, track_data):
self.count = len(self.keyframes)
this_track_data = []
if self.count == 1:
this_track_data.append(self.keyframes[0].value)
else:
if self.tangent_type == TangentType.IN:
for keyframe in self.keyframes:
this_track_data.append(keyframe.time)
this_track_data.append(keyframe.value)
this_track_data.append(keyframe.tangent_in)
elif self.tangent_type == TangentType.IN_OUT:
for keyframe in self.keyframes:
this_track_data.append(keyframe.time)
this_track_data.append(keyframe.value)
this_track_data.append(keyframe.tangent_in)
this_track_data.append(keyframe.tangent_out)
else:
raise Exception("Invalid tangent type")
# Try to find if this track's data is already in the full track list to avoid duplicating data.
self.index = None
for i in range(len(track_data) - len(this_track_data) + 1):
found_match = True
for j in range(len(this_track_data)):
if track_data[i+j] != this_track_data[j]:
found_match = False
break
if found_match:
self.index = i
break
if self.index is None:
# If this data isn't already in the list, we append it to the end.
self.index = len(track_data)
track_data += this_track_data
write_u16(data, offset+0, self.count)
write_u16(data, offset+2, self.index)
write_u16(data, offset+4, self.tangent_type.value)
class AnimationKeyframe:
def __init__(self, time, value, tangent_in, tangent_out):
self.time = time
self.value = value
self.tangent_in = tangent_in
self.tangent_out = tangent_out
class ColorAnimation:
DATA_SIZE = 4*AnimationTrack.DATA_SIZE + 4
def __init__(self):
pass
def read(self, data, offset, r_track_data, g_track_data, b_track_data, a_track_data):
self.r = AnimationTrack()
self.r.read(data, offset, r_track_data)
offset += AnimationTrack.DATA_SIZE
self.g = AnimationTrack()
self.g.read(data, offset, g_track_data)
offset += AnimationTrack.DATA_SIZE
self.b = AnimationTrack()
self.b.read(data, offset, b_track_data)
offset += AnimationTrack.DATA_SIZE
self.a = AnimationTrack()
self.a.read(data, offset, a_track_data)
offset += AnimationTrack.DATA_SIZE
self.color_id = read_u8(data, offset)
offset += 4
def save_changes(self, data, offset, r_track_data, g_track_data, b_track_data, a_track_data):
self.r.save_changes(data, offset, r_track_data)
offset += AnimationTrack.DATA_SIZE
self.g.save_changes(data, offset, g_track_data)
offset += AnimationTrack.DATA_SIZE
self.b.save_changes(data, offset, b_track_data)
offset += AnimationTrack.DATA_SIZE
self.a.save_changes(data, offset, a_track_data)
offset += AnimationTrack.DATA_SIZE
write_u8(data, offset, self.color_id)
write_u8(data, offset+1, 0xFF)
write_u8(data, offset+2, 0xFF)
write_u8(data, offset+3, 0xFF)
offset += 4
|
{"/wwlib/rarc.py": ["/wwlib/bmg.py", "/wwlib/bdl.py"], "/randomizer.py": ["/wwlib/rarc.py", "/wwlib/gcm.py", "/customizer.py"]}
|
39,088,708
|
andirigible/wwplando
|
refs/heads/main
|
/randomizer.py
|
import os
from io import BytesIO
import shutil
from pathlib import Path
import re
from random import Random
from collections import OrderedDict
import hashlib
import yaml
from fs_helpers import *
from wwlib.yaz0 import Yaz0
from wwlib.rarc import RARC
from wwlib.dol import DOL
from wwlib.rel import REL, RELRelocation, RELRelocationType
from wwlib.gcm import GCM
from wwlib.jpc import JPC
import tweaks
from asm import patcher
from logic.logic import Logic
from paths import DATA_PATH, ASM_PATH, RANDO_ROOT_PATH, IS_RUNNING_FROM_SOURCE
import customizer
from wwlib import stage_searcher
from asm import disassemble
try:
from keys.seed_key import SEED_KEY
except ImportError:
SEED_KEY = ""
from randomizers import items
from randomizers import charts
from randomizers import starting_island
from randomizers import entrances
from randomizers import music
from randomizers import enemies
from randomizers import palettes
with open(os.path.join(RANDO_ROOT_PATH, "version.txt"), "r") as f:
VERSION = f.read().strip()
VERSION_WITHOUT_COMMIT = VERSION
# Try to add the git commit hash to the version number if running from source.
if IS_RUNNING_FROM_SOURCE:
version_suffix = "_NOGIT"
git_commit_head_file = os.path.join(RANDO_ROOT_PATH, ".git", "HEAD")
if os.path.isfile(git_commit_head_file):
with open(git_commit_head_file, "r") as f:
head_file_contents = f.read().strip()
if head_file_contents.startswith("ref: "):
# Normal head, HEAD file has a reference to a branch which contains the commit hash
relative_path_to_hash_file = head_file_contents[len("ref: "):]
path_to_hash_file = os.path.join(RANDO_ROOT_PATH, ".git", relative_path_to_hash_file)
if os.path.isfile(path_to_hash_file):
with open(path_to_hash_file, "r") as f:
hash_file_contents = f.read()
version_suffix = "_" + hash_file_contents[:7]
elif re.search(r"^[0-9a-f]{40}$", head_file_contents):
# Detached head, commit hash directly in the HEAD file
version_suffix = "_" + head_file_contents[:7]
VERSION += version_suffix
CLEAN_WIND_WAKER_ISO_MD5 = 0xd8e4d45af2032a081a0f446384e9261b
class TooFewProgressionLocationsError(Exception):
pass
class InvalidCleanISOError(Exception):
pass
class Randomizer:
def __init__(self, seed, clean_iso_path, randomized_output_folder, options, permalink=None, cmd_line_args=OrderedDict()):
self.randomized_output_folder = randomized_output_folder
self.options = options
self.seed = seed
self.permalink = permalink
self.dry_run = ("-dry" in cmd_line_args)
self.disassemble = ("-disassemble" in cmd_line_args)
self.export_disc_to_folder = ("-exportfolder" in cmd_line_args)
self.no_logs = ("-nologs" in cmd_line_args)
self.bulk_test = ("-bulk" in cmd_line_args)
if self.bulk_test:
self.dry_run = True
self.no_logs = True
self.print_used_flags = ("-printflags" in cmd_line_args)
if ("-noitemrando" in cmd_line_args) and IS_RUNNING_FROM_SOURCE:
self.randomize_items = False
else:
self.randomize_items = True
self.map_select = ("-mapselect" in cmd_line_args)
self.heap_display = ("-heap" in cmd_line_args)
self.test_room_args = None
if "-test" in cmd_line_args:
args = cmd_line_args["-test"]
if args is not None:
stage, room, spawn = args.split(",")
self.test_room_args = {"stage": stage, "room": int(room), "spawn": int(spawn)}
seed_string = self.seed
if self.options.get("do_not_generate_spoiler_log"):
seed_string += SEED_KEY
self.integer_seed = self.convert_string_to_integer_md5(seed_string)
self.rng = self.get_new_rng()
self.arcs_by_path = {}
self.jpcs_by_path = {}
self.rels_by_path = {}
self.symbol_maps_by_path = {}
self.raw_files_by_path = {}
self.used_actor_ids = list(range(0x1F6))
self.read_text_file_lists()
if not self.dry_run:
if not os.path.isfile(clean_iso_path):
raise InvalidCleanISOError("Clean WW ISO does not exist: %s" % clean_iso_path)
self.verify_supported_version(clean_iso_path)
self.gcm = GCM(clean_iso_path)
self.gcm.read_entire_disc()
dol_data = self.gcm.read_file_data("sys/main.dol")
self.dol = DOL()
self.dol.read(dol_data)
try:
self.chart_list = self.get_arc("files/res/Msg/fmapres.arc").get_file("cmapdat.bin")
except (InvalidOffsetError, AssertionError):
# An invalid offset error when reading fmapres.arc seems to happen when the user has a corrupted clean ISO.
# Alternatively, fmapres.arc's magic bytes not being RARC can also happen here, also caused by a corrupted clean ISO.
# The reason for this is unknown, but when this happens check the ISO's MD5 and if it's wrong say so in an error message.
self.verify_correct_clean_iso_md5(clean_iso_path)
# But if the ISO's MD5 is correct just raise the normal offset error.
raise
self.bmg = self.get_arc("files/res/Msg/bmgres.arc").get_file("zel_00.bmg")
if self.disassemble:
self.disassemble_all_code()
if self.print_used_flags:
stage_searcher.print_all_used_item_pickup_flags(self)
stage_searcher.print_all_used_chest_open_flags(self)
stage_searcher.print_all_event_flags_used_by_stb_cutscenes(self)
# Starting items. This list is read by the Logic when initializing your currently owned items list.
self.starting_items = [
"Wind Waker",
"Wind's Requiem",
"Ballad of Gales",
"Song of Passing",
"Hero's Shield",
"Boat's Sail",
]
self.starting_items += self.options.get("starting_gear", [])
if self.options.get("sword_mode") == "Start with Hero's Sword":
self.starting_items.append("Progressive Sword")
# Add starting Triforce Shards.
num_starting_triforce_shards = int(self.options.get("num_starting_triforce_shards", 0))
for i in range(num_starting_triforce_shards):
self.starting_items.append("Triforce Shard %d" % (i+1))
starting_pohs = self.options.get("starting_pohs")
for i in range(starting_pohs):
self.starting_items.append("Piece of Heart")
starting_hcs = self.options.get("starting_hcs")
for i in range(starting_hcs):
self.starting_items.append("Heart Container")
# Default entrances connections to be used if the entrance randomizer is not on.
self.entrance_connections = OrderedDict([
("Dungeon Entrance On Dragon Roost Island", "Dragon Roost Cavern"),
("Dungeon Entrance In Forest Haven Sector", "Forbidden Woods"),
("Dungeon Entrance In Tower of the Gods Sector", "Tower of the Gods"),
("Dungeon Entrance On Headstone Island", "Earth Temple"),
("Dungeon Entrance On Gale Isle", "Wind Temple"),
("Secret Cave Entrance on Outset Island", "Savage Labyrinth"),
("Secret Cave Entrance on Dragon Roost Island", "Dragon Roost Island Secret Cave"),
("Secret Cave Entrance on Fire Mountain", "Fire Mountain Secret Cave"),
("Secret Cave Entrance on Ice Ring Isle", "Ice Ring Isle Secret Cave"),
("Secret Cave Entrance on Private Oasis", "Cabana Labyrinth"),
("Secret Cave Entrance on Needle Rock Isle", "Needle Rock Isle Secret Cave"),
("Secret Cave Entrance on Angular Isles", "Angular Isles Secret Cave"),
("Secret Cave Entrance on Boating Course", "Boating Course Secret Cave"),
("Secret Cave Entrance on Stone Watcher Island", "Stone Watcher Island Secret Cave"),
("Secret Cave Entrance on Overlook Island", "Overlook Island Secret Cave"),
("Secret Cave Entrance on Bird's Peak Rock", "Bird's Peak Rock Secret Cave"),
("Secret Cave Entrance on Pawprint Isle", "Pawprint Isle Chuchu Cave"),
("Secret Cave Entrance on Pawprint Isle Side Isle", "Pawprint Isle Wizzrobe Cave"),
("Secret Cave Entrance on Diamond Steppe Island", "Diamond Steppe Island Warp Maze Cave"),
("Secret Cave Entrance on Bomb Island", "Bomb Island Secret Cave"),
("Secret Cave Entrance on Rock Spire Isle", "Rock Spire Isle Secret Cave"),
("Secret Cave Entrance on Shark Island", "Shark Island Secret Cave"),
("Secret Cave Entrance on Cliff Plateau Isles", "Cliff Plateau Isles Secret Cave"),
("Secret Cave Entrance on Horseshoe Island", "Horseshoe Island Secret Cave"),
("Secret Cave Entrance on Star Island", "Star Island Secret Cave"),
])
self.dungeon_and_cave_island_locations = OrderedDict([
("Dragon Roost Cavern", "Dragon Roost Island"),
("Forbidden Woods", "Forest Haven"),
("Tower of the Gods", "Tower of the Gods"),
("Earth Temple", "Headstone Island"),
("Wind Temple", "Gale Isle"),
("Secret Cave Entrance on Outset Island", "Outset Island"),
("Secret Cave Entrance on Dragon Roost Island", "Dragon Roost Island"),
("Secret Cave Entrance on Fire Mountain", "Fire Mountain"),
("Secret Cave Entrance on Ice Ring Isle", "Ice Ring Isle"),
("Secret Cave Entrance on Private Oasis", "Private Oasis"),
("Secret Cave Entrance on Needle Rock Isle", "Needle Rock Isle"),
("Secret Cave Entrance on Angular Isles", "Angular Isles"),
("Secret Cave Entrance on Boating Course", "Boating Course"),
("Secret Cave Entrance on Stone Watcher Island", "Stone Watcher Island"),
("Secret Cave Entrance on Overlook Island", "Overlook Island"),
("Secret Cave Entrance on Bird's Peak Rock", "Bird's Peak Rock"),
("Secret Cave Entrance on Pawprint Isle", "Pawprint Isle"),
("Secret Cave Entrance on Pawprint Isle Side Isle", "Pawprint Isle"),
("Secret Cave Entrance on Diamond Steppe Island", "Diamond Steppe Island"),
("Secret Cave Entrance on Bomb Island", "Bomb Island"),
("Secret Cave Entrance on Rock Spire Isle", "Rock Spire Isle"),
("Secret Cave Entrance on Shark Island", "Shark Island"),
("Secret Cave Entrance on Cliff Plateau Isles", "Cliff Plateau Isles"),
("Secret Cave Entrance on Horseshoe Island", "Horseshoe Island"),
("Secret Cave Entrance on Star Island", "Star Island"),
])
# Default starting island (Outset) if the starting island randomizer is not on.
self.starting_island_index = 44
# Default charts for each island.
self.island_number_to_chart_name = OrderedDict([
(1, "Treasure Chart 25"),
(2, "Treasure Chart 7"),
(3, "Treasure Chart 24"),
(4, "Triforce Chart 2"),
(5, "Treasure Chart 11"),
(6, "Triforce Chart 7"),
(7, "Treasure Chart 13"),
(8, "Treasure Chart 41"),
(9, "Treasure Chart 29"),
(10, "Treasure Chart 22"),
(11, "Treasure Chart 18"),
(12, "Treasure Chart 30"),
(13, "Treasure Chart 39"),
(14, "Treasure Chart 19"),
(15, "Treasure Chart 8"),
(16, "Treasure Chart 2"),
(17, "Treasure Chart 10"),
(18, "Treasure Chart 26"),
(19, "Treasure Chart 3"),
(20, "Treasure Chart 37"),
(21, "Treasure Chart 27"),
(22, "Treasure Chart 38"),
(23, "Triforce Chart 1"),
(24, "Treasure Chart 21"),
(25, "Treasure Chart 6"),
(26, "Treasure Chart 14"),
(27, "Treasure Chart 34"),
(28, "Treasure Chart 5"),
(29, "Treasure Chart 28"),
(30, "Treasure Chart 35"),
(31, "Triforce Chart 3"),
(32, "Triforce Chart 6"),
(33, "Treasure Chart 1"),
(34, "Treasure Chart 20"),
(35, "Treasure Chart 36"),
(36, "Treasure Chart 23"),
(37, "Treasure Chart 12"),
(38, "Treasure Chart 16"),
(39, "Treasure Chart 4"),
(40, "Treasure Chart 17"),
(41, "Treasure Chart 31"),
(42, "Triforce Chart 5"),
(43, "Treasure Chart 9"),
(44, "Triforce Chart 4"),
(45, "Treasure Chart 40"),
(46, "Triforce Chart 8"),
(47, "Treasure Chart 15"),
(48, "Treasure Chart 32"),
(49, "Treasure Chart 33"),
])
# This list will hold the randomly selected dungeon boss locations that are required in race mode.
# If race mode is not on, this list will remain empty.
self.race_mode_required_locations = []
# This list will hold the dungeon names of the race mode required locations.
# If race mode is not on, this list will remain empty.
self.race_mode_required_dungeons = []
# This list will hold all item location names that should not have any items in them in race mode.
# If race mode is not on, this list will remain empty.
self.race_mode_banned_locations = []
self.custom_model_name = "Link"
self.using_custom_sail_texture = False
self.logic = Logic(self)
num_progress_locations = self.logic.get_num_progression_locations()
max_race_mode_banned_locations = self.logic.get_max_race_mode_banned_locations()
num_progress_items = self.logic.get_num_progression_items()
if num_progress_locations - max_race_mode_banned_locations < num_progress_items:
error_message = "Not enough progress locations to place all progress items.\n\n"
error_message += "Total progress items: %d\n" % num_progress_items
error_message += "Progress locations with current options: %d\n" % num_progress_locations
if max_race_mode_banned_locations > 0:
error_message += "Maximum Race Mode banned locations: %d\n" % max_race_mode_banned_locations
error_message += "\nYou need to check more of the progress location options in order to give the randomizer enough space to place all the items."
raise TooFewProgressionLocationsError(error_message)
# We need to determine if the user's selected options result in a dungeons-only-start.
# Dungeons-only-start meaning that the only locations accessible at the start of the run are dungeon locations.
# e.g. If the user selects Dungeons, Expensive Purchases, and Sunken Treasures, the dungeon locations are the only ones the player can check first.
# We need to distinguish this situation because it can cause issues for the randomizer's item placement logic (specifically when placing keys in DRC).
self.logic.temporarily_make_dungeon_entrance_macros_impossible()
accessible_undone_locations = self.logic.get_accessible_remaining_locations(for_progression=True)
if len(accessible_undone_locations) == 0:
self.dungeons_only_start = True
else:
self.dungeons_only_start = False
self.logic.update_entrance_connection_macros() # Reset the dungeon entrance macros.
# Also determine if these options result in a dungeons-and-caves-only-start.
# Dungeons-and-caves-only-start means the only locations accessible at the start of the run are dungeon or secret cave locations.
# This situation can also cause issues for the item placement logic (specifically when placing the first item of the run).
self.logic.temporarily_make_entrance_macros_impossible()
accessible_undone_locations = self.logic.get_accessible_remaining_locations(for_progression=True)
if len(accessible_undone_locations) == 0:
self.dungeons_and_caves_only_start = True
else:
self.dungeons_and_caves_only_start = False
self.logic.update_entrance_connection_macros() # Reset the entrance macros.
def randomize(self):
options_completed = 0
yield("Modifying game code...", options_completed)
customizer.decide_on_link_model(self)
if not self.dry_run:
self.apply_necessary_tweaks()
if self.options.get("swift_sail"):
tweaks.make_sail_behave_like_swift_sail(self)
if self.options.get("instant_text_boxes"):
tweaks.make_all_text_instant(self)
if self.options.get("reveal_full_sea_chart"):
patcher.apply_patch(self, "reveal_sea_chart")
if self.options.get("add_shortcut_warps_between_dungeons"):
tweaks.add_inter_dungeon_warp_pots(self)
if self.options.get("invert_camera_x_axis"):
patcher.apply_patch(self, "invert_camera_x_axis")
tweaks.update_skip_rematch_bosses_game_variable(self)
tweaks.update_sword_mode_game_variable(self)
if self.options.get("sword_mode") == "Swordless":
patcher.apply_patch(self, "swordless")
tweaks.update_text_for_swordless(self)
if self.options.get("randomize_entrances") not in ["Disabled", None, "Dungeons"]:
tweaks.disable_ice_ring_isle_and_fire_mountain_effects_indoors(self)
tweaks.update_starting_gear(self)
if self.options.get("disable_tingle_chests_with_tingle_bombs"):
patcher.apply_patch(self, "disable_tingle_bombs_on_tingle_chests")
if self.options.get("remove_title_and_ending_videos"):
tweaks.remove_title_and_ending_videos(self)
if self.options.get("remove_music"):
patcher.apply_patch(self, "remove_music")
if self.map_select:
patcher.apply_patch(self, "map_select")
if IS_RUNNING_FROM_SOURCE or "BETA" in VERSION_WITHOUT_COMMIT:
tweaks.enable_developer_mode(self)
if self.heap_display:
tweaks.enable_heap_display(self)
if self.test_room_args is not None:
tweaks.test_room(self)
options_completed += 1
yield("Randomizing...", options_completed)
if self.options.get("randomize_charts"):
self.reset_rng()
charts.randomize_charts(self)
if self.options.get("randomize_starting_island"):
self.reset_rng()
starting_island.randomize_starting_island(self)
if self.options.get("randomize_entrances") not in ["Disabled", None]:
self.reset_rng()
entrances.randomize_entrances(self)
if self.options.get("randomize_music"):
self.reset_rng()
music.randomize_music(self)
options_completed += 1
# Enemies must be randomized before items in order for the enemy logic to properly take into account what items you do and don't start with.
if self.options.get("randomize_enemies"):
yield("Randomizing enemy locations...", options_completed)
self.reset_rng()
enemies.randomize_enemies(self)
if self.options.get("randomize_enemy_palettes"):
yield("Randomizing enemy colors...", options_completed)
self.reset_rng()
palettes.randomize_enemy_palettes(self)
options_completed += 10
yield("Randomizing items...", options_completed)
if self.randomize_items:
self.reset_rng()
items.randomize_items(self)
options_completed += 2
yield("Saving items...", options_completed)
if self.randomize_items and not self.dry_run:
items.write_changed_items(self)
tweaks.randomize_and_update_hints(self)
if not self.dry_run:
self.apply_necessary_post_randomization_tweaks()
options_completed += 7
yield("Saving randomized ISO...", options_completed)
if not self.dry_run:
generator = self.save_randomized_iso()
while True:
# Need to use a while loop to go through the generator instead of a for loop, as a for loop would silently exit if a StopIteration error ever happened for any reason.
next_progress_text, files_done = next(generator)
if files_done == -1:
break
percentage_done = files_done/len(self.gcm.files_by_path)
yield("Saving randomized ISO...", options_completed+int(percentage_done*9))
options_completed += 9
yield("Writing logs...", options_completed)
if self.randomize_items:
if not self.options.get("do_not_generate_spoiler_log"):
self.write_spoiler_log()
self.write_non_spoiler_log()
yield("Done", -1)
def apply_necessary_tweaks(self):
patcher.apply_patch(self, "custom_funcs")
patcher.apply_patch(self, "make_game_nonlinear")
patcher.apply_patch(self, "remove_cutscenes")
patcher.apply_patch(self, "flexible_item_locations")
patcher.apply_patch(self, "fix_vanilla_bugs")
patcher.apply_patch(self, "misc_rando_features")
tweaks.add_custom_actor_rels(self)
tweaks.skip_wakeup_intro_and_start_at_dock(self)
tweaks.start_ship_at_outset(self)
tweaks.fix_deku_leaf_model(self)
tweaks.allow_all_items_to_be_field_items(self)
tweaks.remove_shop_item_forced_uniqueness_bit(self)
tweaks.remove_forsaken_fortress_2_cutscenes(self)
tweaks.make_items_progressive(self)
tweaks.add_ganons_tower_warp_to_ff2(self)
tweaks.add_chest_in_place_medli_grappling_hook_gift(self)
tweaks.add_chest_in_place_queen_fairy_cutscene(self)
#tweaks.add_cube_to_earth_temple_first_room(self)
tweaks.add_more_magic_jars(self)
tweaks.modify_title_screen_logo(self)
tweaks.update_game_name_icon_and_banners(self)
tweaks.allow_dungeon_items_to_appear_anywhere(self)
#tweaks.remove_ballad_of_gales_warp_in_cutscene(self)
tweaks.fix_shop_item_y_offsets(self)
tweaks.shorten_zephos_event(self)
tweaks.update_korl_dialogue(self)
tweaks.set_num_starting_triforce_shards(self)
tweaks.set_starting_health(self)
tweaks.add_pirate_ship_to_windfall(self)
tweaks.remove_makar_kidnapping_event(self)
tweaks.increase_player_movement_speeds(self)
tweaks.add_chart_number_to_item_get_messages(self)
tweaks.increase_grapple_animation_speed(self)
tweaks.increase_block_moving_animation(self)
tweaks.increase_misc_animations(self)
tweaks.shorten_auction_intro_event(self)
tweaks.disable_invisible_walls(self)
tweaks.add_hint_signs(self)
tweaks.prevent_door_boulder_softlocks(self)
tweaks.update_tingle_statue_item_get_funcs(self)
patcher.apply_patch(self, "tingle_chests_without_tuner")
tweaks.make_tingle_statue_reward_rupee_rainbow_colored(self)
tweaks.show_seed_hash_on_name_entry_screen(self)
tweaks.fix_ghost_ship_chest_crash(self)
tweaks.implement_key_bag(self)
tweaks.add_chest_in_place_of_jabun_cutscene(self)
tweaks.add_chest_in_place_of_master_sword(self)
tweaks.update_beedle_spoil_selling_text(self)
tweaks.fix_totg_warp_out_spawn_pos(self)
tweaks.remove_phantom_ganon_requirement_from_eye_reefs(self)
tweaks.fix_forsaken_fortress_door_softlock(self)
tweaks.add_new_bog_warp(self)
tweaks.make_rat_holes_visible_from_behind(self)
tweaks.add_failsafe_id_0_spawns(self)
tweaks.remove_minor_panning_cutscenes(self)
tweaks.fix_message_closing_sound_on_quest_status_screen(self)
tweaks.fix_stone_head_bugs(self)
customizer.replace_link_model(self)
tweaks.change_starting_clothes(self)
tweaks.check_hide_ship_sail(self)
customizer.change_player_clothes_color(self)
def apply_necessary_post_randomization_tweaks(self):
if self.randomize_items:
tweaks.update_shop_item_descriptions(self)
tweaks.update_auction_item_names(self)
tweaks.update_battlesquid_item_names(self)
tweaks.update_item_names_in_letter_advertising_rock_spire_shop(self)
tweaks.update_savage_labyrinth_hint_tablet(self)
tweaks.show_quest_markers_on_sea_chart_for_dungeons(self, dungeon_names=self.race_mode_required_dungeons)
tweaks.prevent_fire_mountain_lava_softlock(self)
def verify_supported_version(self, clean_iso_path):
with open(clean_iso_path, "rb") as f:
game_id = try_read_str(f, 0, 6)
if game_id != "GZLE01":
if game_id and game_id.startswith("GZL"):
raise InvalidCleanISOError("Invalid version of Wind Waker. Only the USA version is supported by this randomizer.")
else:
raise InvalidCleanISOError("Invalid game given as the clean ISO. You must specify a Wind Waker ISO (USA version).")
def verify_correct_clean_iso_md5(self, clean_iso_path):
md5 = hashlib.md5()
with open(clean_iso_path, "rb") as f:
while True:
chunk = f.read(1024*1024)
if not chunk:
break
md5.update(chunk)
integer_md5 = int(md5.hexdigest(), 16)
if integer_md5 != CLEAN_WIND_WAKER_ISO_MD5:
raise InvalidCleanISOError("Invalid clean Wind Waker ISO. Your ISO may be corrupted.\n\nCorrect ISO MD5 hash: %x\nYour ISO's MD5 hash: %x" % (CLEAN_WIND_WAKER_ISO_MD5, integer_md5))
def read_text_file_lists(self):
# Get item names.
self.item_names = {}
self.item_name_to_id = {}
with open(os.path.join(DATA_PATH, "item_names.txt"), "r") as f:
matches = re.findall(r"^([0-9a-f]{2}) - (.+)$", f.read(), re.IGNORECASE | re.MULTILINE)
for item_id, item_name in matches:
if item_name:
item_id = int(item_id, 16)
self.item_names[item_id] = item_name
if item_name in self.item_name_to_id:
raise Exception("Duplicate item name: " + item_name)
self.item_name_to_id[item_name] = item_id
# Get stage and island names for debug purposes.
self.stage_names = {}
with open(os.path.join(DATA_PATH, "stage_names.txt"), "r") as f:
while True:
stage_folder = f.readline()
if not stage_folder:
break
stage_name = f.readline()
self.stage_names[stage_folder.strip()] = stage_name.strip()
self.island_names = {}
self.island_number_to_name = {}
self.island_name_to_number = {}
with open(os.path.join(DATA_PATH, "island_names.txt"), "r") as f:
while True:
room_arc_name = f.readline()
if not room_arc_name:
break
island_name = f.readline().strip()
self.island_names[room_arc_name.strip()] = island_name
island_number = int(re.search(r"Room(\d+)", room_arc_name).group(1))
self.island_number_to_name[island_number] = island_name
self.island_name_to_number[island_name] = island_number
self.item_ids_without_a_field_model = []
with open(os.path.join(DATA_PATH, "items_without_field_models.txt"), "r") as f:
matches = re.findall(r"^([0-9a-f]{2}) ", f.read(), re.IGNORECASE | re.MULTILINE)
for item_id in matches:
if item_name:
item_id = int(item_id, 16)
self.item_ids_without_a_field_model.append(item_id)
self.arc_name_pointers = {}
with open(os.path.join(DATA_PATH, "item_resource_arc_name_pointers.txt"), "r") as f:
matches = re.findall(r"^([0-9a-f]{2}) ([0-9a-f]{8}) ", f.read(), re.IGNORECASE | re.MULTILINE)
for item_id, arc_name_pointer in matches:
item_id = int(item_id, 16)
arc_name_pointer = int(arc_name_pointer, 16)
self.arc_name_pointers[item_id] = arc_name_pointer
self.icon_name_pointer = {}
with open(os.path.join(DATA_PATH, "item_resource_icon_name_pointers.txt"), "r") as f:
matches = re.findall(r"^([0-9a-f]{2}) ([0-9a-f]{8}) ", f.read(), re.IGNORECASE | re.MULTILINE)
for item_id, icon_name_pointer in matches:
item_id = int(item_id, 16)
icon_name_pointer = int(icon_name_pointer, 16)
self.icon_name_pointer[item_id] = icon_name_pointer
with open(os.path.join(ASM_PATH, "custom_symbols.txt"), "r") as f:
self.custom_symbols = yaml.safe_load(f)
self.main_custom_symbols = self.custom_symbols["sys/main.dol"]
with open(os.path.join(ASM_PATH, "free_space_start_offsets.txt"), "r") as f:
self.free_space_start_offsets = yaml.safe_load(f)
with open(os.path.join(DATA_PATH, "progress_item_hints.txt"), "r") as f:
self.progress_item_hints = yaml.safe_load(f)
with open(os.path.join(DATA_PATH, "island_name_hints.txt"), "r") as f:
self.island_name_hints = yaml.safe_load(f)
with open(os.path.join(DATA_PATH, "enemy_types.txt"), "r") as f:
self.enemy_types = yaml.safe_load(f)
with open(os.path.join(DATA_PATH, "palette_randomizable_files.txt"), "r") as f:
self.palette_randomizable_files = yaml.safe_load(f)
def get_arc(self, arc_path):
arc_path = arc_path.replace("\\", "/")
if arc_path in self.arcs_by_path:
return self.arcs_by_path[arc_path]
else:
data = self.gcm.read_file_data(arc_path)
arc = RARC()
arc.read(data)
self.arcs_by_path[arc_path] = arc
return arc
def get_jpc(self, jpc_path):
jpc_path = jpc_path.replace("\\", "/")
if jpc_path in self.jpcs_by_path:
return self.jpcs_by_path[jpc_path]
else:
data = self.gcm.read_file_data(jpc_path)
jpc = JPC(data)
self.jpcs_by_path[jpc_path] = jpc
return jpc
def get_rel(self, rel_path):
rel_path = rel_path.replace("\\", "/")
if rel_path in self.rels_by_path:
return self.rels_by_path[rel_path]
else:
if not rel_path.startswith("files/rels/"):
raise Exception("Invalid REL path: %s" % rel_path)
rel_name = os.path.basename(rel_path)
rels_arc = self.get_arc("files/RELS.arc")
rel_file_entry = rels_arc.get_file_entry(rel_name)
if rel_file_entry:
rel_file_entry.decompress_data_if_necessary()
data = rel_file_entry.data
else:
data = self.gcm.read_file_data(rel_path)
rel = REL()
rel.read(data)
self.rels_by_path[rel_path] = rel
return rel
def get_symbol_map(self, map_path):
map_path = map_path.replace("\\", "/")
if map_path in self.symbol_maps_by_path:
return self.symbol_maps_by_path[map_path]
else:
data = self.gcm.read_file_data(map_path)
map_text = read_all_bytes(data).decode("ascii")
if map_path == "files/maps/framework.map":
addr_to_name_map = disassemble.get_main_symbols(map_text)
else:
rel_name = os.path.splitext(os.path.basename(map_path))[0]
rel = self.get_rel("files/rels/%s.rel" % rel_name)
addr_to_name_map = disassemble.get_rel_symbols(rel, map_text)
symbol_map = {}
for address, name in addr_to_name_map.items():
symbol_map[name] = address
self.symbol_maps_by_path[map_path] = symbol_map
return symbol_map
def get_raw_file(self, file_path):
file_path = file_path.replace("\\", "/")
if file_path in self.raw_files_by_path:
return self.raw_files_by_path[file_path]
else:
if file_path.startswith("files/rels/"):
raise Exception("Cannot read a REL as a raw file.")
elif file_path == "sys/main.dol":
raise Exception("Cannot read the DOL as a raw file.")
data = self.gcm.read_file_data(file_path)
if try_read_str(data, 0, 4) == "Yaz0":
data = Yaz0.decompress(data)
self.raw_files_by_path[file_path] = data
return data
def replace_arc(self, arc_path, new_data):
if arc_path not in self.gcm.files_by_path:
raise Exception("Cannot replace RARC that doesn't exist: " + arc_path)
arc = RARC()
arc.read(new_data)
self.arcs_by_path[arc_path] = arc
def replace_raw_file(self, file_path, new_data):
if file_path not in self.gcm.files_by_path:
raise Exception("Cannot replace file that doesn't exist: " + file_path)
self.raw_files_by_path[file_path] = new_data
def add_new_raw_file(self, file_path, new_data):
if file_path.lower() in self.gcm.files_by_path_lowercase:
raise Exception("Cannot add a new file that has the same path and name as an existing one: " + file_path)
self.gcm.add_new_file(file_path, new_data)
self.raw_files_by_path[file_path] = new_data
def add_new_rel(self, rel_path, new_rel, section_index_of_actor_profile, offset_of_actor_profile):
if not rel_path.startswith("files/rels/"):
raise Exception("Cannot add a new REL to a folder besides files/rels/: " + rel_path)
if rel_path.lower() in self.gcm.files_by_path_lowercase:
raise Exception("Cannot add a new REL that has the same name as an existing one: " + rel_path)
# Read the actor ID out of the actor profile.
section_data_actor_profile = new_rel.sections[section_index_of_actor_profile].data
new_actor_id = read_u16(section_data_actor_profile, offset_of_actor_profile+8)
if new_actor_id in self.used_actor_ids:
raise Exception("Cannot add a new REL with an actor ID that is already used:\nActor ID: %03X\nNew REL path: %s" % (new_actor_id, rel_path))
# We need to add the new REL to the profile list.
profile_list = self.get_rel("files/rels/f_pc_profile_lst.rel")
rel_relocation = RELRelocation()
rel_relocation.relocation_type = RELRelocationType.R_PPC_ADDR32
rel_relocation.curr_section_num = 4 # List section
rel_relocation.relocation_offset = new_actor_id*4 # Offset in the list
# Write a null placeholder for the pointer to the profile that will be relocated.
list_data = profile_list.sections[rel_relocation.curr_section_num].data
write_u32(list_data, new_actor_id*4, 0)
# For some reason, there's an extra four 0x00 bytes after the last entry in the list, so we put that there just to be safe.
write_u32(list_data, new_actor_id*4+4, 0)
rel_relocation.section_num_to_relocate_against = section_index_of_actor_profile
rel_relocation.symbol_address = offset_of_actor_profile
if new_rel.id in profile_list.relocation_entries_for_module:
raise Exception("Cannot add a new REL with a unique ID that is already present in the profile list:\nREL ID: %03X\nNew REL path: %s" % (new_rel.id, rel_path))
profile_list.relocation_entries_for_module[new_rel.id] = [rel_relocation]
# Then add the REL to the game's filesystem.
self.gcm.add_new_file(rel_path)
self.rels_by_path[rel_path] = new_rel
# Don't allow this actor ID to be used again by any more custom RELs we add.
self.used_actor_ids.append(new_actor_id)
def save_randomized_iso(self):
self.bmg.save_changes()
for file_path, data in self.raw_files_by_path.items():
self.gcm.changed_files[file_path] = data
self.dol.save_changes()
self.gcm.changed_files["sys/main.dol"] = self.dol.data
for rel_path, rel in self.rels_by_path.items():
rel.save_changes(preserve_section_data_offsets=True)
rel_name = os.path.basename(rel_path)
rels_arc = self.get_arc("files/RELS.arc")
rel_file_entry = rels_arc.get_file_entry(rel_name)
if rel_file_entry:
# The REL already wrote to the same BytesIO object as the file entry uses, so no need to do anything more here.
assert rel_file_entry.data == rel.data
else:
self.gcm.changed_files[rel_path] = rel.data
for arc_path, arc in self.arcs_by_path.items():
for file_name, instantiated_file in arc.instantiated_object_files.items():
if file_name == "event_list.dat":
instantiated_file.save_changes()
arc.save_changes()
self.gcm.changed_files[arc_path] = arc.data
for jpc_path, jpc in self.jpcs_by_path.items():
jpc.save_changes()
self.gcm.changed_files[jpc_path] = jpc.data
if self.export_disc_to_folder:
output_folder_path = os.path.join(self.randomized_output_folder, "WW Random %s" % self.seed)
generator = self.gcm.export_disc_to_folder_with_changed_files(output_folder_path)
else:
output_file_path = os.path.join(self.randomized_output_folder, "WW Random %s.iso" % self.seed)
generator = self.gcm.export_disc_to_iso_with_changed_files(output_file_path)
while True:
# Need to use a while loop to go through the generator instead of a for loop, as a for loop would silently exit if a StopIteration error ever happened for any reason.
next_progress_text, files_done = next(generator)
if files_done == -1:
break
yield(next_progress_text, files_done)
yield("Done", -1)
def convert_string_to_integer_md5(self, string):
return int(hashlib.md5(string.encode('utf-8')).hexdigest(), 16)
def get_new_rng(self):
rng = Random()
rng.seed(self.integer_seed)
if self.options.get("do_not_generate_spoiler_log"):
for i in range(1, 100):
rng.getrandbits(i)
return rng
def reset_rng(self):
self.rng = self.get_new_rng()
def calculate_playthrough_progression_spheres(self):
progression_spheres = []
logic = Logic(self)
previously_accessible_locations = []
game_beatable = False
while logic.unplaced_progress_items:
progress_items_in_this_sphere = OrderedDict()
accessible_locations = logic.get_accessible_remaining_locations()
locations_in_this_sphere = [
loc for loc in accessible_locations
if loc not in previously_accessible_locations
]
if not locations_in_this_sphere:
raise Exception("Failed to calculate progression spheres")
if not self.options.get("keylunacy"):
# If the player gained access to any small keys, we need to give them the keys without counting that as a new sphere.
newly_accessible_predetermined_item_locations = [
loc for loc in locations_in_this_sphere
if loc in self.logic.prerandomization_item_locations
]
newly_accessible_small_key_locations = [
loc for loc in newly_accessible_predetermined_item_locations
if self.logic.prerandomization_item_locations[loc].endswith(" Small Key")
]
if newly_accessible_small_key_locations:
for small_key_location_name in newly_accessible_small_key_locations:
item_name = self.logic.prerandomization_item_locations[small_key_location_name]
assert item_name.endswith(" Small Key")
logic.add_owned_item(item_name)
previously_accessible_locations += newly_accessible_small_key_locations
continue # Redo this loop iteration with the small key locations no longer being considered 'remaining'.
for location_name in locations_in_this_sphere:
item_name = self.logic.done_item_locations[location_name]
if item_name in logic.all_progress_items:
progress_items_in_this_sphere[location_name] = item_name
if not game_beatable:
game_beatable = logic.check_requirement_met("Can Reach and Defeat Ganondorf")
if game_beatable:
progress_items_in_this_sphere["Ganon's Tower - Rooftop"] = "Defeat Ganondorf"
progression_spheres.append(progress_items_in_this_sphere)
for location_name, item_name in progress_items_in_this_sphere.items():
if item_name == "Defeat Ganondorf":
continue
logic.add_owned_item(item_name)
for group_name, item_names in logic.progress_item_groups.items():
entire_group_is_owned = all(item_name in logic.currently_owned_items for item_name in item_names)
if entire_group_is_owned and group_name in logic.unplaced_progress_items:
logic.unplaced_progress_items.remove(group_name)
previously_accessible_locations = accessible_locations
if not game_beatable:
# If the game wasn't already beatable on a previous progression sphere but it is now we add one final one just for this.
game_beatable = logic.check_requirement_met("Can Reach and Defeat Ganondorf")
if game_beatable:
final_progression_sphere = OrderedDict([
("Ganon's Tower - Rooftop", "Defeat Ganondorf"),
])
progression_spheres.append(final_progression_sphere)
return progression_spheres
def get_log_header(self):
header = ""
header += "Wind Waker Randomizer Version %s\n" % VERSION
if self.permalink:
header += "Permalink: %s\n" % self.permalink
header += "Seed: %s\n" % self.seed
header += "Options selected:\n "
non_disabled_options = [
name for name in self.options
if self.options[name] not in [False, [], {}, OrderedDict()]
and name != "randomized_gear" # Just takes up space
]
option_strings = []
for option_name in non_disabled_options:
if isinstance(self.options[option_name], bool):
option_strings.append(option_name)
else:
if option_name == "custom_colors":
# Only show non-default colors.
default_colors = customizer.get_default_colors(self)
value = OrderedDict()
for custom_color_name, custom_color_value in self.options[option_name].items():
if custom_color_value != default_colors[custom_color_name]:
value[custom_color_name] = custom_color_value
if value == OrderedDict():
# No colors changed from default, don't show it at all.
continue
else:
value = self.options[option_name]
option_strings.append("%s: %s" % (option_name, value))
header += ", ".join(option_strings)
header += "\n\n\n"
return header
def get_zones_and_max_location_name_len(self, locations):
zones = OrderedDict()
max_location_name_length = 0
for location_name in locations:
zone_name, specific_location_name = self.logic.split_location_name_by_zone(location_name)
if zone_name not in zones:
zones[zone_name] = []
zones[zone_name].append((location_name, specific_location_name))
if len(specific_location_name) > max_location_name_length:
max_location_name_length = len(specific_location_name)
return (zones, max_location_name_length)
def write_non_spoiler_log(self):
if self.no_logs:
return
log_str = self.get_log_header()
progress_locations, nonprogress_locations = self.logic.get_progress_and_non_progress_locations()
zones, max_location_name_length = self.get_zones_and_max_location_name_len(self.logic.done_item_locations)
format_string = " %s\n"
# Write progress item locations.
log_str += "### Locations that may or may not have progress items in them on this run:\n"
for zone_name, locations_in_zone in zones.items():
if not any(loc for (loc, _) in locations_in_zone if loc in progress_locations):
# No progress locations for this zone.
continue
log_str += zone_name + ":\n"
for (location_name, specific_location_name) in locations_in_zone:
if location_name in progress_locations:
item_name = self.logic.done_item_locations[location_name]
log_str += format_string % specific_location_name
log_str += "\n\n"
# Write nonprogress item locations.
log_str += "### Locations that cannot have progress items in them on this run:\n"
for zone_name, locations_in_zone in zones.items():
if not any(loc for (loc, _) in locations_in_zone if loc in nonprogress_locations):
# No nonprogress locations for this zone.
continue
log_str += zone_name + ":\n"
for (location_name, specific_location_name) in locations_in_zone:
if location_name in nonprogress_locations:
item_name = self.logic.done_item_locations[location_name]
log_str += format_string % specific_location_name
nonspoiler_log_output_path = os.path.join(self.randomized_output_folder, "WW Random %s - Non-Spoiler Log.txt" % self.seed)
with open(nonspoiler_log_output_path, "w") as f:
f.write(log_str)
def write_spoiler_log(self):
if self.no_logs:
# We still calculate progression spheres even if we're not going to write them anywhere to catch more errors in testing.
self.calculate_playthrough_progression_spheres()
return
spoiler_log = self.get_log_header()
# Write progression spheres.
spoiler_log += "Playthrough:\n"
progression_spheres = self.calculate_playthrough_progression_spheres()
all_progression_sphere_locations = [loc for locs in progression_spheres for loc in locs]
zones, max_location_name_length = self.get_zones_and_max_location_name_len(all_progression_sphere_locations)
format_string = " %-" + str(max_location_name_length+1) + "s %s\n"
for i, progression_sphere in enumerate(progression_spheres):
spoiler_log += "%d:\n" % (i+1)
for zone_name, locations_in_zone in zones.items():
if not any(loc for (loc, _) in locations_in_zone if loc in progression_sphere):
# No locations in this zone are used in this sphere.
continue
spoiler_log += " %s:\n" % zone_name
for (location_name, specific_location_name) in locations_in_zone:
if location_name in progression_sphere:
if location_name == "Ganon's Tower - Rooftop":
item_name = "Defeat Ganondorf"
else:
item_name = self.logic.done_item_locations[location_name]
spoiler_log += format_string % (specific_location_name + ":", item_name)
spoiler_log += "\n\n\n"
# Write item locations.
spoiler_log += "All item locations:\n"
zones, max_location_name_length = self.get_zones_and_max_location_name_len(self.logic.done_item_locations)
format_string = " %-" + str(max_location_name_length+1) + "s %s\n"
for zone_name, locations_in_zone in zones.items():
spoiler_log += zone_name + ":\n"
for (location_name, specific_location_name) in locations_in_zone:
item_name = self.logic.done_item_locations[location_name]
spoiler_log += format_string % (specific_location_name + ":", item_name)
spoiler_log += "\n\n\n"
# Write starting island.
spoiler_log += "Starting island: "
spoiler_log += self.island_number_to_name[self.starting_island_index]
spoiler_log += "\n"
spoiler_log += "\n\n\n"
# Write dungeon/secret cave entrances.
spoiler_log += "Entrances:\n"
for entrance_name, dungeon_or_cave_name in self.entrance_connections.items():
spoiler_log += " %-48s %s\n" % (entrance_name+":", dungeon_or_cave_name)
spoiler_log += "\n\n\n"
# Write treasure charts.
spoiler_log += "Charts:\n"
chart_name_to_island_number = {}
for island_number in range(1, 49+1):
chart_name = self.logic.macros["Chart for Island %d" % island_number][0]
chart_name_to_island_number[chart_name] = island_number
for chart_number in range(1, 49+1):
if chart_number <= 8:
chart_name = "Triforce Chart %d" % chart_number
else:
chart_name = "Treasure Chart %d" % (chart_number-8)
island_number = chart_name_to_island_number[chart_name]
island_name = self.island_number_to_name[island_number]
spoiler_log += " %-18s %s\n" % (chart_name+":", island_name)
spoiler_log_output_path = os.path.join(self.randomized_output_folder, "WW Random %s - Spoiler Log.txt" % self.seed)
with open(spoiler_log_output_path, "w") as f:
f.write(spoiler_log)
def write_error_log(self, error_message):
if self.no_logs:
return
error_log_str = ""
try:
error_log_str += self.get_log_header()
except Exception as e:
print("Error getting log header for error log: " + str(e))
error_log_str += error_message
error_log_output_path = os.path.join(self.randomized_output_folder, "WW Random %s - Error Log.txt" % self.seed)
with open(error_log_output_path, "w") as f:
f.write(error_log_str)
def disassemble_all_code(self):
disassemble.disassemble_all_code(self)
|
{"/wwlib/rarc.py": ["/wwlib/bmg.py", "/wwlib/bdl.py"], "/randomizer.py": ["/wwlib/rarc.py", "/wwlib/gcm.py", "/customizer.py"]}
|
39,105,054
|
sahilsid/EWSNet
|
refs/heads/main
|
/convert_js.py
|
import os
import sys
import pandas as pd
from sklearn.model_selection import train_test_split
import numpy as np
from tensorflow.keras import backend as K
from tensorflow.keras.layers import Conv1D, BatchNormalization, GlobalAveragePooling1D, Permute, Dropout, Flatten
from tensorflow.keras.layers import Input, Dense, LSTM, concatenate, Activation, GRU, SimpleRNN
from tensorflow.keras.models import Model
from tensorflow.keras import regularizers
import tensorflow as tf
import pickle
from random import randint
import tensorflowjs as tfjs
def generate_dynamic_lstmfcn(NB_CLASS, NUM_CELLS=128):
ip = Input(shape=(1, None))
x = Permute((2, 1))(ip)
x = LSTM(NUM_CELLS)(x)
x = Dropout(0.2)(x)
y = Permute((2, 1))(ip)
y = Conv1D(128, 8, padding='same', kernel_initializer='he_uniform')(y)
y = BatchNormalization()(y)
y = Activation('relu')(y)
y = Conv1D(256, 5, padding='same', kernel_initializer='he_uniform')(y)
y = BatchNormalization()(y)
y = Activation('relu')(y)
y = Conv1D(128, 3, padding='same', kernel_initializer='he_uniform')(y)
y = BatchNormalization()(y)
y = Activation('relu')(y)
y = GlobalAveragePooling1D()(y)
x = concatenate([x, y])
x = Dense(256, activation='relu',kernel_regularizer=regularizers.l2(0.01))(x)
out = Dense(NB_CLASS, activation='softmax',kernel_regularizer=regularizers.l2(0.001))(x)
model = Model(ip, out)
return model
epochs = 25
LR = 5e-5
batch_size = 512
N_TRIALS = 25
TRAINING = True
for trial_no in range(1,N_TRIALS+1):
seed = randint(0,1e3)
tf.random.set_seed(seed)
dataset_map = [('T11-NOISE/TRIAL-{}'.format(trial_no),0),('T12-GAUSSIAN/TRIAL-{}'.format(trial_no),1)]
base_log_name = '%s_%d_cells_new_datasets.csv'
base_weights_dir = '%s_%d_cells_weights/'
normalize_dataset = False
MODELS = [('dynamic_lstmfcn',generate_dynamic_lstmfcn),]
CELLS = [128]
for model_id, (MODEL_NAME, model_fn) in enumerate(MODELS):
for cell in CELLS:
for dname, did in dataset_map:
NB_CLASS = 3
K.clear_session()
weights_dir = base_weights_dir % (MODEL_NAME, cell)
os.makedirs('weights/' + weights_dir,exist_ok=True)
dataset_name_ = weights_dir + dname
model = model_fn(NB_CLASS, cell)
optm = tf.keras.optimizers.Adam()
model.compile(optimizer=optm, loss='sparse_categorical_crossentropy', metrics=['accuracy'])
model.load_weights("./weights/%s_weights.h5" % dataset_name_)
os.makedirs("weights/Pretrained/tfjs/Dataset-W/",exist_ok=True)
os.makedirs("weights/Pretrained/tfjs/Dataset-C/",exist_ok=True)
os.makedirs("weights/Pretrained/Dataset-W/",exist_ok=True)
os.makedirs("weights/Pretrained/Dataset-C/",exist_ok=True)
if("GAUSSIAN" in dname):
model.save("weights/Pretrained/Dataset-W/{}.h5".format(trial_no))
model = tf.keras.models.load_model("weights/Pretrained/Dataset-W/{}.h5".format(trial_no))
tfjs.converters.save_keras_model(model, "weights/Pretrained/tfjs/Dataset-W/{}".format(trial_no))
else:
model.save("weights/Pretrained/Dataset-C/{}.h5".format(trial_no))
model = tf.keras.models.load_model("weights/Pretrained/Dataset-C/{}.h5".format(trial_no))
tfjs.converters.save_keras_model(model, "weights/Pretrained/tfjs/Dataset-C/{}".format(trial_no))
|
{"/src/model_training/exp_utils.py": ["/src/utils/generic_utils.py"]}
|
39,105,055
|
sahilsid/EWSNet
|
refs/heads/main
|
/ewsnet.py
|
import os
import tensorflow as tf
from tensorflow.keras import backend as K
from tensorflow.keras.layers import Conv1D, BatchNormalization, GlobalAveragePooling1D, Permute, Dropout, Flatten
from tensorflow.keras.layers import Input, Dense, LSTM, concatenate, Activation, GRU, SimpleRNN
from tensorflow.keras.models import Model
from tensorflow.keras import regularizers
import numpy as np
class EWSNet():
def __init__(self,ensemble=1, weight_dir=None, prefix="",suffix=".h5"):
self.ensemble = ensemble
self.model = [self.build_model() for _ in range(self.ensemble)]
if weight_dir is not None:
self.load_model(weight_dir,prefix,suffix)
self.labels=["No Transition","Smooth Transition","Critical Transition"]
def build_model(self):
ip = Input(shape=(1, None))
x = Permute((2, 1))(ip)
x = LSTM(128)(x)
x = Dropout(0.2)(x)
y = Permute((2, 1))(ip)
y = Conv1D(128, 8, padding='same', kernel_initializer='he_uniform')(y)
y = BatchNormalization()(y)
y = Activation('relu')(y)
y = Conv1D(256, 5, padding='same', kernel_initializer='he_uniform')(y)
y = BatchNormalization()(y)
y = Activation('relu')(y)
y = Conv1D(128, 3, padding='same', kernel_initializer='he_uniform')(y)
y = BatchNormalization()(y)
y = Activation('relu')(y)
y = GlobalAveragePooling1D()(y)
x = concatenate([x, y])
x = Dense(256, activation='relu',kernel_regularizer=regularizers.l2(0.01))(x)
out = Dense(3, activation='softmax',kernel_regularizer=regularizers.l2(0.001))(x)
model = Model(ip, out)
return model
def load_model(self,weight_dir,prefix,suffix):
print("=="*30)
for i in range(self.ensemble):
print("Loading Model : {}/{}{}{}".format(weight_dir,prefix,i+1,suffix))
if(os.path.exists("{}/{}{}{}".format(weight_dir,prefix,i+1,suffix))):
self.model[i] = tf.keras.models.load_model("{}/{}{}{}".format(weight_dir,prefix,i+1,suffix))
else:
raise NameError
print("=="*30)
def predict(self,x):
x = np.array(x)
x = np.reshape(x,(1,1,x.shape[0]))
predictions = np.array([self.model[i](x)[0] for i in range(self.ensemble)])
predictions = np.mean(predictions,axis=0)
predictions = {
"No Transition" :predictions[0],
"Smooth Transition" :predictions[1],
"Critical Transition":predictions[2],
}
return self.labels[np.argmax(predictions)],predictions
if __name__ == '__main__':
weight_dir = "./weights/Pretrained"
dataset = "W"
prefix = ""
suffix = ".h5"
ensemble = 25
ewsnet = EWSNet(ensemble=ensemble, weight_dir=os.path.join(weight_dir,"Dataset-{}".format(dataset)), prefix=prefix,suffix=suffix)
x = np.random.randint(1,2,(20,))
print(ewsnet.predict(x))
|
{"/src/model_training/exp_utils.py": ["/src/utils/generic_utils.py"]}
|
39,110,314
|
Hsuxu/vision
|
refs/heads/master
|
/torchvision/prototype/transforms/_transform.py
|
import collections.abc
import inspect
import re
from typing import Any, Callable, Dict, Optional, Type, Union, cast, Set, Collection
import torch
from torch import nn
from torchvision.prototype import features
from torchvision.prototype.utils._internal import add_suggestion
class Transform(nn.Module):
"""Base class for transforms.
A transform operates on a full sample at once, which might be a nested container of elements to transform. The
non-container elements of the sample will be dispatched to feature transforms based on their type in case it is
supported by the transform. Each transform needs to define at least one feature transform, which is canonical done
as static method:
.. code-block::
class ImageIdentity(Transform):
@staticmethod
def image(input):
return input
To achieve correct results for a complete sample, each transform should implement feature transforms for every
:class:`Feature` it can handle:
.. code-block::
class Identity(Transform):
@staticmethod
def image(input):
return input
@staticmethod
def bounding_box(input):
return input
...
If the name of a static method in camel-case matches the name of a :class:`Feature`, the feature transform is
auto-registered. Supported pairs are:
+----------------+----------------+
| method name | `Feature` |
+================+================+
| `image` | `Image` |
+----------------+----------------+
| `bounding_box` | `BoundingBox` |
+----------------+----------------+
| `label` | `Label` |
+----------------+----------------+
If you don't want to stick to this scheme, you can disable the auto-registration and perform it manually:
.. code-block::
def my_image_transform(input):
...
class MyTransform(Transform, auto_register=False):
def __init__(self):
super().__init__()
self.register_feature_transform(Image, my_image_transform)
self.register_feature_transform(BoundingBox, self.my_bounding_box_transform)
@staticmethod
def my_bounding_box_transform(input):
...
In any case, the registration will assert that the feature transform can be invoked with
``feature_transform(input, **params)``.
.. warning::
Feature transforms are **registered on the class and not on the instance**. This means you cannot have two
instances of the same :class:`Transform` with different feature transforms.
If the feature transforms needs additional parameters, you need to
overwrite the :meth:`~Transform.get_params` method. It needs to return the parameter dictionary that will be
unpacked and its contents passed to each feature transform:
.. code-block::
class Rotate(Transform):
def __init__(self, degrees):
super().__init__()
self.degrees = degrees
def get_params(self, sample):
return dict(degrees=self.degrees)
def image(input, *, degrees):
...
The :meth:`~Transform.get_params` method will be invoked once per sample. Thus, in case of randomly sampled
parameters they will be the same for all features of the whole sample.
.. code-block::
class RandomRotate(Transform)
def __init__(self, range):
super().__init__()
self._dist = torch.distributions.Uniform(range)
def get_params(self, sample):
return dict(degrees=self._dist.sample().item())
@staticmethod
def image(input, *, degrees):
...
In case the sampling depends on one or more features at runtime, the complete ``sample`` gets passed to the
:meth:`Transform.get_params` method. Derivative transforms that only changes the parameter sampling, but the
feature transformations are identical, can simply wrap the transform they dispatch to:
.. code-block::
class RandomRotate(Transform, wraps=Rotate):
def get_params(self, sample):
return dict(degrees=float(torch.rand(())) * 30.0)
To transform a sample, you simply call an instance of the transform with it:
.. code-block::
transform = MyTransform()
sample = dict(input=Image(torch.tensor(...)), target=BoundingBox(torch.tensor(...)), ...)
transformed_sample = transform(sample)
.. note::
To use a :class:`Transform` with a dataset, simply use it as map:
.. code-block::
torchvision.datasets.load(...).map(MyTransform())
"""
_BUILTIN_FEATURE_TYPES = (
features.BoundingBox,
features.Image,
features.Label,
)
_FEATURE_NAME_MAP = {
"_".join([part.lower() for part in re.findall("[A-Z][^A-Z]*", feature_type.__name__)]): feature_type
for feature_type in _BUILTIN_FEATURE_TYPES
}
_feature_transforms: Dict[Type[features.Feature], Callable]
NO_OP_FEATURE_TYPES: Collection[Type[features.Feature]] = ()
def __init_subclass__(
cls, *, wraps: Optional[Type["Transform"]] = None, auto_register: bool = True, verbose: bool = False
):
cls._feature_transforms = {} if wraps is None else wraps._feature_transforms.copy()
if wraps:
cls.NO_OP_FEATURE_TYPES = wraps.NO_OP_FEATURE_TYPES
if auto_register:
cls._auto_register(verbose=verbose)
@staticmethod
def _has_allowed_signature(feature_transform: Callable) -> bool:
"""Checks if ``feature_transform`` can be invoked with ``feature_transform(input, **params)``"""
parameters = tuple(inspect.signature(feature_transform).parameters.values())
if not parameters:
return False
elif len(parameters) == 1:
return parameters[0].kind != inspect.Parameter.KEYWORD_ONLY
else:
return parameters[1].kind != inspect.Parameter.POSITIONAL_ONLY
@classmethod
def register_feature_transform(cls, feature_type: Type[features.Feature], transform: Callable) -> None:
"""Registers a transform for given feature on the class.
If a transform object is called or :meth:`Transform.apply` is invoked, inputs are dispatched to the registered
transforms based on their type.
Args:
feature_type: Feature type the transformation is registered for.
transform: Feature transformation.
Raises:
TypeError: If ``transform`` cannot be invoked with ``transform(input, **params)``.
"""
if not cls._has_allowed_signature(transform):
raise TypeError("Feature transform cannot be invoked with transform(input, **params)")
cls._feature_transforms[feature_type] = transform
@classmethod
def _auto_register(cls, *, verbose: bool = False) -> None:
"""Auto-registers methods on the class as feature transforms if they meet the following criteria:
1. They are static.
2. They can be invoked with `cls.feature_transform(input, **params)`.
3. They are public.
4. Their name in camel case matches the name of a builtin feature, e.g. 'bounding_box' and 'BoundingBox'.
The name from 4. determines for which feature the method is registered.
.. note::
The ``auto_register`` and ``verbose`` flags need to be passed as keyword arguments to the class:
.. code-block::
class MyTransform(Transform, auto_register=True, verbose=True):
...
Args:
verbose: If ``True``, prints to STDOUT which methods were registered or why a method was not registered
"""
for name, value in inspect.getmembers(cls):
# check if attribute is a static method and was defined in the subclass
# TODO: this needs to be revisited to allow subclassing of custom transforms
if not (name in cls.__dict__ and inspect.isfunction(value)):
continue
not_registered_prefix = f"{cls.__name__}.{name}() was not registered as feature transform, because"
if not cls._has_allowed_signature(value):
if verbose:
print(f"{not_registered_prefix} it cannot be invoked with {name}(input, **params).")
continue
if name.startswith("_"):
if verbose:
print(f"{not_registered_prefix} it is private.")
continue
try:
feature_type = cls._FEATURE_NAME_MAP[name]
except KeyError:
if verbose:
print(
add_suggestion(
f"{not_registered_prefix} its name doesn't match any known feature type.",
word=name,
possibilities=cls._FEATURE_NAME_MAP.keys(),
close_match_hint=lambda close_match: (
f"Did you mean to name it '{close_match}' "
f"to be registered for type '{cls._FEATURE_NAME_MAP[close_match]}'?"
),
)
)
continue
cls.register_feature_transform(feature_type, value)
if verbose:
print(
f"{cls.__name__}.{name}() was registered as feature transform for type '{feature_type.__name__}'."
)
@classmethod
def from_callable(
cls,
feature_transform: Union[Callable, Dict[Type[features.Feature], Callable]],
*,
name: str = "FromCallable",
get_params: Optional[Union[Dict[str, Any], Callable[[Any], Dict[str, Any]]]] = None,
) -> "Transform":
"""Creates a new transform from a callable.
Args:
feature_transform: Feature transform that will be registered to handle :class:`Image`'s. Can be passed as
dictionary in which case each key-value-pair is needs to consists of a ``Feature`` type and the
corresponding transform.
name: Name of the transform.
get_params: Parameter dictionary ``params`` that will be passed to ``feature_transform(input, **params)``.
Can be passed as callable in which case it will be called with the transform instance (``self``) and
the input of the transform.
Raises:
TypeError: If ``feature_transform`` cannot be invoked with ``feature_transform(input, **params)``.
"""
if get_params is None:
get_params = dict()
attributes = dict(
get_params=get_params if callable(get_params) else lambda self, sample: get_params, # type: ignore[misc]
)
transform_cls = cast(Type[Transform], type(name, (cls,), attributes))
if callable(feature_transform):
feature_transform = {features.Image: feature_transform}
for feature_type, transform in feature_transform.items():
transform_cls.register_feature_transform(feature_type, transform)
return transform_cls()
@classmethod
def supported_feature_types(cls) -> Set[Type[features.Feature]]:
return set(cls._feature_transforms.keys())
@classmethod
def supports(cls, obj: Any) -> bool:
"""Checks if object or type is supported.
Args:
obj: Object or type.
"""
# TODO: should this handle containers?
feature_type = obj if isinstance(obj, type) else type(obj)
return feature_type is torch.Tensor or feature_type in cls.supported_feature_types()
@classmethod
def transform(cls, input: Union[torch.Tensor, features.Feature], **params: Any) -> torch.Tensor:
"""Applies the registered feature transform to the input based on its type.
This can be uses as feature type generic functional interface:
.. code-block::
transform = Rotate.transform
transformed_image = transform(Image(torch.tensor(...)), degrees=30.0)
transformed_bbox = transform(BoundingBox(torch.tensor(...)), degrees=-10.0)
Args:
input: ``input`` in ``feature_transform(input, **params)``
**params: Parameter dictionary ``params`` in ``feature_transform(input, **params)``.
Returns:
Transformed input.
"""
feature_type = type(input)
if not cls.supports(feature_type):
raise TypeError(f"{cls.__name__}() is not able to handle inputs of type {feature_type}.")
if feature_type is torch.Tensor:
# To keep BC, we treat all regular torch.Tensor's as images
feature_type = features.Image
input = feature_type(input)
feature_type = cast(Type[features.Feature], feature_type)
feature_transform = cls._feature_transforms[feature_type]
output = feature_transform(input, **params)
if type(output) is torch.Tensor:
output = feature_type(output, like=input)
return output
def _transform_recursively(self, sample: Any, *, params: Dict[str, Any]) -> Any:
"""Recurses through a sample and invokes :meth:`Transform.transform` on non-container elements.
If an element is not supported by the transform, it is returned untransformed.
Args:
sample: Sample.
params: Parameter dictionary ``params`` that will be passed to ``feature_transform(input, **params)``.
"""
# We explicitly exclude str's here since they are self-referential and would cause an infinite recursion loop:
# "a" == "a"[0][0]...
if isinstance(sample, collections.abc.Sequence) and not isinstance(sample, str):
return [self._transform_recursively(item, params=params) for item in sample]
elif isinstance(sample, collections.abc.Mapping):
return {name: self._transform_recursively(item, params=params) for name, item in sample.items()}
else:
feature_type = type(sample)
if not self.supports(feature_type):
if (
not issubclass(feature_type, features.Feature)
# issubclass is not a strict check, but also allows the type checked against. Thus, we need to
# check it separately
or feature_type is features.Feature
or feature_type in self.NO_OP_FEATURE_TYPES
):
return sample
raise TypeError(
f"{type(self).__name__}() is not able to handle inputs of type {feature_type}. "
f"If you want it to be a no-op, add the feature type to {type(self).__name__}.NO_OP_FEATURE_TYPES."
)
return self.transform(cast(Union[torch.Tensor, features.Feature], sample), **params)
def get_params(self, sample: Any) -> Dict[str, Any]:
"""Returns the parameter dictionary used to transform the current sample.
.. note::
Since ``sample`` might be a nested container, it is recommended to use the
:class:`torchvision.datasets.utils.Query` class if you need to extract information from it.
Args:
sample: Current sample.
Returns:
Parameter dictionary ``params`` in ``feature_transform(input, **params)``.
"""
return dict()
def forward(
self,
*inputs: Any,
params: Optional[Dict[str, Any]] = None,
) -> Any:
if not self._feature_transforms:
raise RuntimeError(f"{type(self).__name__}() has no registered feature transform.")
sample = inputs if len(inputs) > 1 else inputs[0]
if params is None:
params = self.get_params(sample)
return self._transform_recursively(sample, params=params)
|
{"/torchvision/prototype/transforms/_transform.py": ["/torchvision/prototype/transforms/_utils.py", "/torchvision/prototype/__init__.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/transforms/_meta.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py"], "/torchvision/prototype/transforms/_deprecated.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py", "/torchvision/prototype/transforms/_transform.py", "/torchvision/prototype/transforms/_utils.py"], "/test/test_prototype_transforms_utils.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/_utils.py"], "/test/test_prototype_transforms.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/_utils.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/features/_mask.py": ["/torchvision/prototype/features/_feature.py"], "/torchvision/prototype/transforms/_augment.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py", "/torchvision/prototype/transforms/_transform.py", "/torchvision/prototype/transforms/_utils.py"], "/torchvision/prototype/transforms/_geometry.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py", "/torchvision/prototype/transforms/_transform.py", "/torchvision/prototype/transforms/_utils.py", "/torchvision/prototype/datasets/utils/__init__.py", "/torchvision/prototype/features/__init__.py"], "/torchvision/prototype/features/_image.py": ["/torchvision/prototype/features/_feature.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/transforms/functional/_temporal.py": ["/torchvision/prototype/__init__.py"], "/torchvision/prototype/transforms/__init__.py": ["/torchvision/prototype/transforms/_transform.py", "/torchvision/prototype/transforms/_presets.py", "/torchvision/prototype/transforms/_augment.py", "/torchvision/prototype/transforms/_color.py", "/torchvision/prototype/transforms/_container.py", "/torchvision/prototype/transforms/_geometry.py", "/torchvision/prototype/transforms/_meta.py", "/torchvision/prototype/transforms/_misc.py", "/torchvision/prototype/transforms/_temporal.py", "/torchvision/prototype/transforms/_type_conversion.py", "/torchvision/prototype/transforms/_deprecated.py"], "/torchvision/prototype/features/_bounding_box.py": ["/torchvision/prototype/features/_feature.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/features/__init__.py": ["/torchvision/prototype/features/_bounding_box.py", "/torchvision/prototype/features/_feature.py", "/torchvision/prototype/features/_image.py", "/torchvision/prototype/features/_label.py", "/torchvision/prototype/features/_mask.py"], "/torchvision/prototype/transforms/functional/_augment.py": ["/torchvision/prototype/__init__.py"], "/torchvision/prototype/transforms/_container.py": ["/torchvision/prototype/transforms/__init__.py"], "/torchvision/prototype/transforms/_temporal.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py"], "/test/prototype_common_utils.py": ["/torchvision/prototype/__init__.py"], "/torchvision/prototype/transforms/functional/_type_conversion.py": ["/torchvision/prototype/__init__.py"], "/torchvision/prototype/transforms/_type_conversion.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py"], "/torchvision/prototype/transforms/_misc.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py", "/torchvision/prototype/transforms/_utils.py", "/torchvision/prototype/features/__init__.py"], "/torchvision/prototype/transforms/_color.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py", "/torchvision/prototype/transforms/_transform.py", "/torchvision/prototype/transforms/_utils.py"], "/torchvision/prototype/transforms/_presets.py": ["/torchvision/prototype/transforms/__init__.py"], "/torchvision/prototype/transforms/_utils.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/features/_feature.py"], "/torchvision/prototype/datasets/_builtin/imagenet.py": ["/torchvision/prototype/datasets/utils/__init__.py", "/torchvision/prototype/features/__init__.py", "/torchvision/prototype/utils/_internal.py"], "/test/test_prototype_features.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/utils/_internal.py"], "/test/builtin_dataset_mocks.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/datasets/_builtin/semeion.py": ["/torchvision/prototype/datasets/decoder.py", "/torchvision/prototype/datasets/utils/__init__.py"], "/torchvision/prototype/datasets/decoder.py": ["/torchvision/prototype/__init__.py"], "/torchvision/prototype/features/_label.py": ["/torchvision/prototype/features/_feature.py"], "/torchvision/prototype/features/_feature.py": ["/torchvision/prototype/utils/_internal.py"], "/test/test_prototype_builtin_datasets.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/datasets/_builtin/celeba.py": ["/torchvision/prototype/datasets/utils/__init__.py"]}
|
39,110,315
|
Hsuxu/vision
|
refs/heads/master
|
/torchvision/prototype/datasets/_builtin/imagenet.py
|
import functools
import io
import pathlib
import re
from typing import Any, Callable, Dict, List, Optional, Tuple, cast
import torch
from torchdata.datapipes.iter import IterDataPipe, LineReader, IterKeyZipper, Mapper, TarArchiveReader, Filter
from torchvision.prototype.datasets.utils import (
Dataset,
DatasetConfig,
DatasetInfo,
OnlineResource,
ManualDownloadResource,
DatasetType,
)
from torchvision.prototype.datasets.utils._internal import (
INFINITE_BUFFER_SIZE,
BUILTIN_DIR,
path_comparator,
Enumerator,
getitem,
read_mat,
hint_sharding,
hint_shuffling,
)
from torchvision.prototype.features import Label
from torchvision.prototype.utils._internal import FrozenMapping
class ImageNetResource(ManualDownloadResource):
def __init__(self, **kwargs: Any) -> None:
super().__init__("Register on https://image-net.org/ and follow the instructions there.", **kwargs)
class ImageNet(Dataset):
def _make_info(self) -> DatasetInfo:
name = "imagenet"
categories, wnids = zip(*DatasetInfo.read_categories_file(BUILTIN_DIR / f"{name}.categories"))
return DatasetInfo(
name,
type=DatasetType.IMAGE,
dependencies=("scipy",),
categories=categories,
homepage="https://www.image-net.org/",
valid_options=dict(split=("train", "val", "test")),
extra=dict(
wnid_to_category=FrozenMapping(zip(wnids, categories)),
category_to_wnid=FrozenMapping(zip(categories, wnids)),
sizes=FrozenMapping(
[
(DatasetConfig(split="train"), 1_281_167),
(DatasetConfig(split="val"), 50_000),
(DatasetConfig(split="test"), 100_000),
]
),
),
)
def supports_sharded(self) -> bool:
return True
@property
def category_to_wnid(self) -> Dict[str, str]:
return cast(Dict[str, str], self.info.extra.category_to_wnid)
@property
def wnid_to_category(self) -> Dict[str, str]:
return cast(Dict[str, str], self.info.extra.wnid_to_category)
_IMAGES_CHECKSUMS = {
"train": "b08200a27a8e34218a0e58fde36b0fe8f73bc377f4acea2d91602057c3ca45bb",
"val": "c7e06a6c0baccf06d8dbeb6577d71efff84673a5dbdd50633ab44f8ea0456ae0",
"test_v10102019": "9cf7f8249639510f17d3d8a0deb47cd22a435886ba8e29e2b3223e65a4079eb4",
}
def resources(self, config: DatasetConfig) -> List[OnlineResource]:
name = "test_v10102019" if config.split == "test" else config.split
images = ImageNetResource(file_name=f"ILSVRC2012_img_{name}.tar", sha256=self._IMAGES_CHECKSUMS[name])
devkit = ImageNetResource(
file_name="ILSVRC2012_devkit_t12.tar.gz",
sha256="b59243268c0d266621fd587d2018f69e906fb22875aca0e295b48cafaa927953",
)
return [images, devkit]
_TRAIN_IMAGE_NAME_PATTERN = re.compile(r"(?P<wnid>n\d{8})_\d+[.]JPEG")
def _collate_train_data(self, data: Tuple[str, io.IOBase]) -> Tuple[Tuple[Label, str, str], Tuple[str, io.IOBase]]:
path = pathlib.Path(data[0])
wnid = self._TRAIN_IMAGE_NAME_PATTERN.match(path.name).group("wnid") # type: ignore[union-attr]
category = self.wnid_to_category[wnid]
label_data = (Label(self.categories.index(category)), category, wnid)
return label_data, data
_VAL_TEST_IMAGE_NAME_PATTERN = re.compile(r"ILSVRC2012_(val|test)_(?P<id>\d{8})[.]JPEG")
def _val_test_image_key(self, data: Tuple[str, Any]) -> int:
path = pathlib.Path(data[0])
return int(self._VAL_TEST_IMAGE_NAME_PATTERN.match(path.name).group("id")) # type: ignore[union-attr]
def _collate_val_data(
self, data: Tuple[Tuple[int, int], Tuple[str, io.IOBase]]
) -> Tuple[Tuple[Label, str, str], Tuple[str, io.IOBase]]:
label_data, image_data = data
_, label = label_data
category = self.categories[label]
wnid = self.category_to_wnid[category]
return (Label(label), category, wnid), image_data
def _collate_test_data(self, data: Tuple[str, io.IOBase]) -> Tuple[None, Tuple[str, io.IOBase]]:
return None, data
def _collate_and_decode_sample(
self,
data: Tuple[Optional[Tuple[Label, str, str]], Tuple[str, io.IOBase]],
*,
decoder: Optional[Callable[[io.IOBase], torch.Tensor]],
) -> Dict[str, Any]:
label_data, (path, buffer) = data
sample = dict(
path=path,
image=decoder(buffer) if decoder else buffer,
)
if label_data:
sample.update(dict(zip(("label", "category", "wnid"), label_data)))
return sample
def _make_datapipe(
self,
resource_dps: List[IterDataPipe],
*,
config: DatasetConfig,
decoder: Optional[Callable[[io.IOBase], torch.Tensor]],
) -> IterDataPipe[Dict[str, Any]]:
images_dp, devkit_dp = resource_dps
if config.split == "train":
# the train archive is a tar of tars
dp = TarArchiveReader(images_dp)
dp = hint_sharding(dp)
dp = hint_shuffling(dp)
dp = Mapper(dp, self._collate_train_data)
elif config.split == "val":
devkit_dp = Filter(devkit_dp, path_comparator("name", "ILSVRC2012_validation_ground_truth.txt"))
devkit_dp = LineReader(devkit_dp, return_path=False)
devkit_dp = Mapper(devkit_dp, int)
devkit_dp = Enumerator(devkit_dp, 1)
devkit_dp = hint_sharding(devkit_dp)
devkit_dp = hint_shuffling(devkit_dp)
dp = IterKeyZipper(
devkit_dp,
images_dp,
key_fn=getitem(0),
ref_key_fn=self._val_test_image_key,
buffer_size=INFINITE_BUFFER_SIZE,
)
dp = Mapper(dp, self._collate_val_data)
else: # config.split == "test"
dp = hint_sharding(images_dp)
dp = hint_shuffling(dp)
dp = Mapper(dp, self._collate_test_data)
return Mapper(dp, functools.partial(self._collate_and_decode_sample, decoder=decoder))
# Although the WordNet IDs (wnids) are unique, the corresponding categories are not. For example, both n02012849
# and n03126707 are labeled 'crane' while the first means the bird and the latter means the construction equipment
_WNID_MAP = {
"n03126707": "construction crane",
"n03710721": "tank suit",
}
def _generate_categories(self, root: pathlib.Path) -> List[Tuple[str, ...]]:
resources = self.resources(self.default_config)
devkit_dp = resources[1].load(root / self.name)
devkit_dp = Filter(devkit_dp, path_comparator("name", "meta.mat"))
meta = next(iter(devkit_dp))[1]
synsets = read_mat(meta, squeeze_me=True)["synsets"]
categories_and_wnids = cast(
List[Tuple[str, ...]],
[
(self._WNID_MAP.get(wnid, category.split(",", 1)[0]), wnid)
for _, wnid, category, _, num_children, *_ in synsets
# if num_children > 0, we are looking at a superclass that has no direct instance
if num_children == 0
],
)
categories_and_wnids.sort(key=lambda category_and_wnid: category_and_wnid[1])
return categories_and_wnids
|
{"/torchvision/prototype/transforms/_transform.py": ["/torchvision/prototype/transforms/_utils.py", "/torchvision/prototype/__init__.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/transforms/_meta.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py"], "/torchvision/prototype/transforms/_deprecated.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py", "/torchvision/prototype/transforms/_transform.py", "/torchvision/prototype/transforms/_utils.py"], "/test/test_prototype_transforms_utils.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/_utils.py"], "/test/test_prototype_transforms.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/_utils.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/features/_mask.py": ["/torchvision/prototype/features/_feature.py"], "/torchvision/prototype/transforms/_augment.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py", "/torchvision/prototype/transforms/_transform.py", "/torchvision/prototype/transforms/_utils.py"], "/torchvision/prototype/transforms/_geometry.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py", "/torchvision/prototype/transforms/_transform.py", "/torchvision/prototype/transforms/_utils.py", "/torchvision/prototype/datasets/utils/__init__.py", "/torchvision/prototype/features/__init__.py"], "/torchvision/prototype/features/_image.py": ["/torchvision/prototype/features/_feature.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/transforms/functional/_temporal.py": ["/torchvision/prototype/__init__.py"], "/torchvision/prototype/transforms/__init__.py": ["/torchvision/prototype/transforms/_transform.py", "/torchvision/prototype/transforms/_presets.py", "/torchvision/prototype/transforms/_augment.py", "/torchvision/prototype/transforms/_color.py", "/torchvision/prototype/transforms/_container.py", "/torchvision/prototype/transforms/_geometry.py", "/torchvision/prototype/transforms/_meta.py", "/torchvision/prototype/transforms/_misc.py", "/torchvision/prototype/transforms/_temporal.py", "/torchvision/prototype/transforms/_type_conversion.py", "/torchvision/prototype/transforms/_deprecated.py"], "/torchvision/prototype/features/_bounding_box.py": ["/torchvision/prototype/features/_feature.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/features/__init__.py": ["/torchvision/prototype/features/_bounding_box.py", "/torchvision/prototype/features/_feature.py", "/torchvision/prototype/features/_image.py", "/torchvision/prototype/features/_label.py", "/torchvision/prototype/features/_mask.py"], "/torchvision/prototype/transforms/functional/_augment.py": ["/torchvision/prototype/__init__.py"], "/torchvision/prototype/transforms/_container.py": ["/torchvision/prototype/transforms/__init__.py"], "/torchvision/prototype/transforms/_temporal.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py"], "/test/prototype_common_utils.py": ["/torchvision/prototype/__init__.py"], "/torchvision/prototype/transforms/functional/_type_conversion.py": ["/torchvision/prototype/__init__.py"], "/torchvision/prototype/transforms/_type_conversion.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py"], "/torchvision/prototype/transforms/_misc.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py", "/torchvision/prototype/transforms/_utils.py", "/torchvision/prototype/features/__init__.py"], "/torchvision/prototype/transforms/_color.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py", "/torchvision/prototype/transforms/_transform.py", "/torchvision/prototype/transforms/_utils.py"], "/torchvision/prototype/transforms/_presets.py": ["/torchvision/prototype/transforms/__init__.py"], "/torchvision/prototype/transforms/_utils.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/features/_feature.py"], "/torchvision/prototype/datasets/_builtin/imagenet.py": ["/torchvision/prototype/datasets/utils/__init__.py", "/torchvision/prototype/features/__init__.py", "/torchvision/prototype/utils/_internal.py"], "/test/test_prototype_features.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/utils/_internal.py"], "/test/builtin_dataset_mocks.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/datasets/_builtin/semeion.py": ["/torchvision/prototype/datasets/decoder.py", "/torchvision/prototype/datasets/utils/__init__.py"], "/torchvision/prototype/datasets/decoder.py": ["/torchvision/prototype/__init__.py"], "/torchvision/prototype/features/_label.py": ["/torchvision/prototype/features/_feature.py"], "/torchvision/prototype/features/_feature.py": ["/torchvision/prototype/utils/_internal.py"], "/test/test_prototype_builtin_datasets.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/datasets/_builtin/celeba.py": ["/torchvision/prototype/datasets/utils/__init__.py"]}
|
39,110,316
|
Hsuxu/vision
|
refs/heads/master
|
/torchvision/prototype/transforms/_container.py
|
from typing import Any, List
import torch
from torch import nn
from torchvision.prototype.transforms import Transform
class ContainerTransform(nn.Module):
def supports(self, obj: Any) -> bool:
raise NotImplementedError()
def forward(self, *inputs: Any) -> Any:
raise NotImplementedError()
def _make_repr(self, lines: List[str]) -> str:
extra_repr = self.extra_repr()
if extra_repr:
lines = [self.extra_repr(), *lines]
head = f"{type(self).__name__}("
tail = ")"
body = [f" {line.rstrip()}" for line in lines]
return "\n".join([head, *body, tail])
class WrapperTransform(ContainerTransform):
def __init__(self, transform: Transform):
super().__init__()
self._transform = transform
def supports(self, obj: Any) -> bool:
return self._transform.supports(obj)
def __repr__(self) -> str:
return self._make_repr(repr(self._transform).splitlines())
class MultiTransform(ContainerTransform):
def __init__(self, *transforms: Transform) -> None:
super().__init__()
self._transforms = transforms
def supports(self, obj: Any) -> bool:
return all(transform.supports(obj) for transform in self._transforms)
def __repr__(self) -> str:
lines = []
for idx, transform in enumerate(self._transforms):
partial_lines = repr(transform).splitlines()
lines.append(f"({idx:d}): {partial_lines[0]}")
lines.extend(partial_lines[1:])
return self._make_repr(lines)
class Compose(MultiTransform):
def forward(self, *inputs: Any) -> Any:
sample = inputs if len(inputs) > 1 else inputs[0]
for transform in self._transforms:
sample = transform(sample)
return sample
class RandomApply(WrapperTransform):
def __init__(self, transform: Transform, *, p: float = 0.5) -> None:
super().__init__(transform)
self._p = p
def forward(self, *inputs: Any) -> Any:
sample = inputs if len(inputs) > 1 else inputs[0]
if float(torch.rand(())) < self._p:
return sample
return self._transform(sample)
def extra_repr(self) -> str:
return f"p={self._p}"
class RandomChoice(MultiTransform):
def forward(self, *inputs: Any) -> Any:
idx = int(torch.randint(len(self._transforms), size=()))
transform = self._transforms[idx]
return transform(*inputs)
class RandomOrder(MultiTransform):
def forward(self, *inputs: Any) -> Any:
for idx in torch.randperm(len(self._transforms)):
transform = self._transforms[idx]
inputs = transform(*inputs)
return inputs
|
{"/torchvision/prototype/transforms/_transform.py": ["/torchvision/prototype/transforms/_utils.py", "/torchvision/prototype/__init__.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/transforms/_meta.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py"], "/torchvision/prototype/transforms/_deprecated.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py", "/torchvision/prototype/transforms/_transform.py", "/torchvision/prototype/transforms/_utils.py"], "/test/test_prototype_transforms_utils.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/_utils.py"], "/test/test_prototype_transforms.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/_utils.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/features/_mask.py": ["/torchvision/prototype/features/_feature.py"], "/torchvision/prototype/transforms/_augment.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py", "/torchvision/prototype/transforms/_transform.py", "/torchvision/prototype/transforms/_utils.py"], "/torchvision/prototype/transforms/_geometry.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py", "/torchvision/prototype/transforms/_transform.py", "/torchvision/prototype/transforms/_utils.py", "/torchvision/prototype/datasets/utils/__init__.py", "/torchvision/prototype/features/__init__.py"], "/torchvision/prototype/features/_image.py": ["/torchvision/prototype/features/_feature.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/transforms/functional/_temporal.py": ["/torchvision/prototype/__init__.py"], "/torchvision/prototype/transforms/__init__.py": ["/torchvision/prototype/transforms/_transform.py", "/torchvision/prototype/transforms/_presets.py", "/torchvision/prototype/transforms/_augment.py", "/torchvision/prototype/transforms/_color.py", "/torchvision/prototype/transforms/_container.py", "/torchvision/prototype/transforms/_geometry.py", "/torchvision/prototype/transforms/_meta.py", "/torchvision/prototype/transforms/_misc.py", "/torchvision/prototype/transforms/_temporal.py", "/torchvision/prototype/transforms/_type_conversion.py", "/torchvision/prototype/transforms/_deprecated.py"], "/torchvision/prototype/features/_bounding_box.py": ["/torchvision/prototype/features/_feature.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/features/__init__.py": ["/torchvision/prototype/features/_bounding_box.py", "/torchvision/prototype/features/_feature.py", "/torchvision/prototype/features/_image.py", "/torchvision/prototype/features/_label.py", "/torchvision/prototype/features/_mask.py"], "/torchvision/prototype/transforms/functional/_augment.py": ["/torchvision/prototype/__init__.py"], "/torchvision/prototype/transforms/_container.py": ["/torchvision/prototype/transforms/__init__.py"], "/torchvision/prototype/transforms/_temporal.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py"], "/test/prototype_common_utils.py": ["/torchvision/prototype/__init__.py"], "/torchvision/prototype/transforms/functional/_type_conversion.py": ["/torchvision/prototype/__init__.py"], "/torchvision/prototype/transforms/_type_conversion.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py"], "/torchvision/prototype/transforms/_misc.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py", "/torchvision/prototype/transforms/_utils.py", "/torchvision/prototype/features/__init__.py"], "/torchvision/prototype/transforms/_color.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py", "/torchvision/prototype/transforms/_transform.py", "/torchvision/prototype/transforms/_utils.py"], "/torchvision/prototype/transforms/_presets.py": ["/torchvision/prototype/transforms/__init__.py"], "/torchvision/prototype/transforms/_utils.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/features/_feature.py"], "/torchvision/prototype/datasets/_builtin/imagenet.py": ["/torchvision/prototype/datasets/utils/__init__.py", "/torchvision/prototype/features/__init__.py", "/torchvision/prototype/utils/_internal.py"], "/test/test_prototype_features.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/utils/_internal.py"], "/test/builtin_dataset_mocks.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/datasets/_builtin/semeion.py": ["/torchvision/prototype/datasets/decoder.py", "/torchvision/prototype/datasets/utils/__init__.py"], "/torchvision/prototype/datasets/decoder.py": ["/torchvision/prototype/__init__.py"], "/torchvision/prototype/features/_label.py": ["/torchvision/prototype/features/_feature.py"], "/torchvision/prototype/features/_feature.py": ["/torchvision/prototype/utils/_internal.py"], "/test/test_prototype_builtin_datasets.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/datasets/_builtin/celeba.py": ["/torchvision/prototype/datasets/utils/__init__.py"]}
|
39,110,317
|
Hsuxu/vision
|
refs/heads/master
|
/torchvision/prototype/datasets/utils/__init__.py
|
from . import _internal
from ._dataset import DatasetType, DatasetConfig, DatasetInfo, Dataset
from ._query import SampleQuery
from ._resource import OnlineResource, HttpResource, GDriveResource, ManualDownloadResource
|
{"/torchvision/prototype/transforms/_transform.py": ["/torchvision/prototype/transforms/_utils.py", "/torchvision/prototype/__init__.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/transforms/_meta.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py"], "/torchvision/prototype/transforms/_deprecated.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py", "/torchvision/prototype/transforms/_transform.py", "/torchvision/prototype/transforms/_utils.py"], "/test/test_prototype_transforms_utils.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/_utils.py"], "/test/test_prototype_transforms.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/_utils.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/features/_mask.py": ["/torchvision/prototype/features/_feature.py"], "/torchvision/prototype/transforms/_augment.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py", "/torchvision/prototype/transforms/_transform.py", "/torchvision/prototype/transforms/_utils.py"], "/torchvision/prototype/transforms/_geometry.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py", "/torchvision/prototype/transforms/_transform.py", "/torchvision/prototype/transforms/_utils.py", "/torchvision/prototype/datasets/utils/__init__.py", "/torchvision/prototype/features/__init__.py"], "/torchvision/prototype/features/_image.py": ["/torchvision/prototype/features/_feature.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/transforms/functional/_temporal.py": ["/torchvision/prototype/__init__.py"], "/torchvision/prototype/transforms/__init__.py": ["/torchvision/prototype/transforms/_transform.py", "/torchvision/prototype/transforms/_presets.py", "/torchvision/prototype/transforms/_augment.py", "/torchvision/prototype/transforms/_color.py", "/torchvision/prototype/transforms/_container.py", "/torchvision/prototype/transforms/_geometry.py", "/torchvision/prototype/transforms/_meta.py", "/torchvision/prototype/transforms/_misc.py", "/torchvision/prototype/transforms/_temporal.py", "/torchvision/prototype/transforms/_type_conversion.py", "/torchvision/prototype/transforms/_deprecated.py"], "/torchvision/prototype/features/_bounding_box.py": ["/torchvision/prototype/features/_feature.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/features/__init__.py": ["/torchvision/prototype/features/_bounding_box.py", "/torchvision/prototype/features/_feature.py", "/torchvision/prototype/features/_image.py", "/torchvision/prototype/features/_label.py", "/torchvision/prototype/features/_mask.py"], "/torchvision/prototype/transforms/functional/_augment.py": ["/torchvision/prototype/__init__.py"], "/torchvision/prototype/transforms/_container.py": ["/torchvision/prototype/transforms/__init__.py"], "/torchvision/prototype/transforms/_temporal.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py"], "/test/prototype_common_utils.py": ["/torchvision/prototype/__init__.py"], "/torchvision/prototype/transforms/functional/_type_conversion.py": ["/torchvision/prototype/__init__.py"], "/torchvision/prototype/transforms/_type_conversion.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py"], "/torchvision/prototype/transforms/_misc.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py", "/torchvision/prototype/transforms/_utils.py", "/torchvision/prototype/features/__init__.py"], "/torchvision/prototype/transforms/_color.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py", "/torchvision/prototype/transforms/_transform.py", "/torchvision/prototype/transforms/_utils.py"], "/torchvision/prototype/transforms/_presets.py": ["/torchvision/prototype/transforms/__init__.py"], "/torchvision/prototype/transforms/_utils.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/features/_feature.py"], "/torchvision/prototype/datasets/_builtin/imagenet.py": ["/torchvision/prototype/datasets/utils/__init__.py", "/torchvision/prototype/features/__init__.py", "/torchvision/prototype/utils/_internal.py"], "/test/test_prototype_features.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/utils/_internal.py"], "/test/builtin_dataset_mocks.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/datasets/_builtin/semeion.py": ["/torchvision/prototype/datasets/decoder.py", "/torchvision/prototype/datasets/utils/__init__.py"], "/torchvision/prototype/datasets/decoder.py": ["/torchvision/prototype/__init__.py"], "/torchvision/prototype/features/_label.py": ["/torchvision/prototype/features/_feature.py"], "/torchvision/prototype/features/_feature.py": ["/torchvision/prototype/utils/_internal.py"], "/test/test_prototype_builtin_datasets.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/datasets/_builtin/celeba.py": ["/torchvision/prototype/datasets/utils/__init__.py"]}
|
39,110,318
|
Hsuxu/vision
|
refs/heads/master
|
/test/test_prototype_features.py
|
import functools
import itertools
import pytest
import torch
from torch.testing import make_tensor as _make_tensor, assert_close
from torchvision.prototype import features
from torchvision.prototype.utils._internal import sequence_to_str
make_tensor = functools.partial(_make_tensor, device="cpu", dtype=torch.float32)
def make_image(**kwargs):
data = make_tensor((3, *torch.randint(16, 33, (2,)).tolist()))
return features.Image(data, **kwargs)
def make_bounding_box(*, format="xyxy", image_size=(10, 10)):
if isinstance(format, str):
format = features.BoundingBoxFormat[format]
height, width = image_size
if format == features.BoundingBoxFormat.XYXY:
x1 = torch.randint(0, width // 2, ())
y1 = torch.randint(0, height // 2, ())
x2 = torch.randint(int(x1) + 1, width - int(x1), ()) + x1
y2 = torch.randint(int(y1) + 1, height - int(y1), ()) + y1
parts = (x1, y1, x2, y2)
elif format == features.BoundingBoxFormat.XYWH:
x = torch.randint(0, width // 2, ())
y = torch.randint(0, height // 2, ())
w = torch.randint(1, width - int(x), ())
h = torch.randint(1, height - int(y), ())
parts = (x, y, w, h)
elif format == features.BoundingBoxFormat.CXCYWH:
cx = torch.randint(1, width - 1, ())
cy = torch.randint(1, height - 1, ())
w = torch.randint(1, min(int(cx), width - int(cx)), ())
h = torch.randint(1, min(int(cy), height - int(cy)), ())
parts = (cx, cy, w, h)
else: # format == features.BoundingBoxFormat._SENTINEL:
parts = make_tensor((4,)).unbind()
return features.BoundingBox.from_parts(*parts, format=format, image_size=image_size)
MAKE_DATA_MAP = {
features.Image: make_image,
features.BoundingBox: make_bounding_box,
}
def make_feature(feature_type, **meta_data):
maker = MAKE_DATA_MAP.get(feature_type, lambda **meta_data: feature_type(make_tensor(()), **meta_data))
return maker(**meta_data)
class TestCommon:
FEATURE_TYPES, NON_DEFAULT_META_DATA = zip(
*(
(features.Image, dict(color_space=features.ColorSpace._SENTINEL)),
(features.Label, dict(category="category")),
(features.BoundingBox, dict(format=features.BoundingBoxFormat._SENTINEL, image_size=(-1, -1))),
)
)
feature_types = pytest.mark.parametrize(
"feature_type", FEATURE_TYPES, ids=lambda feature_type: feature_type.__name__
)
features = pytest.mark.parametrize(
"feature",
[
pytest.param(make_feature(feature_type, **meta_data), id=feature_type.__name__)
for feature_type, meta_data in zip(FEATURE_TYPES, NON_DEFAULT_META_DATA)
],
)
def test_consistency(self):
builtin_feature_types = {
name
for name, feature_type in features.__dict__.items()
if not name.startswith("_")
and isinstance(feature_type, type)
and issubclass(feature_type, features.Feature)
and feature_type is not features.Feature
}
untested_feature_types = builtin_feature_types - {feature_type.__name__ for feature_type in self.FEATURE_TYPES}
if untested_feature_types:
raise AssertionError(
f"The feature(s) {sequence_to_str(sorted(untested_feature_types), separate_last='and ')} "
f"is/are exposed at `torchvision.prototype.features`, but is/are not tested by `TestCommon`. "
f"Please add it/them to `TestCommon.FEATURE_TYPES`."
)
@features
def test_meta_data_attribute_access(self, feature):
for name, value in feature._meta_data.items():
assert getattr(feature, name) == feature._meta_data[name]
@feature_types
def test_torch_function(self, feature_type):
input = make_feature(feature_type)
# This can be any Tensor operation besides clone
output = input + 1
assert type(output) is torch.Tensor
assert_close(output, input + 1)
@feature_types
def test_clone(self, feature_type):
input = make_feature(feature_type)
output = input.clone()
assert type(output) is feature_type
assert_close(output, input)
assert output._meta_data == input._meta_data
@features
def test_serialization(self, tmpdir, feature):
file = tmpdir / "test_serialization.pt"
torch.save(feature, str(file))
loaded_feature = torch.load(str(file))
assert isinstance(loaded_feature, type(feature))
assert_close(loaded_feature, feature)
assert loaded_feature._meta_data == feature._meta_data
@features
def test_repr(self, feature):
assert type(feature).__name__ in repr(feature)
class TestBoundingBox:
@pytest.mark.parametrize(("format", "intermediate_format"), itertools.permutations(("xyxy", "xywh"), 2))
def test_cycle_consistency(self, format, intermediate_format):
input = make_bounding_box(format=format)
output = input.convert(intermediate_format).convert(format)
assert_close(input, output)
# For now, tensor subclasses with additional meta data do not work with torchscript.
# See https://github.com/pytorch/vision/pull/4721#discussion_r741676037.
@pytest.mark.xfail
class TestJit:
def test_bounding_box(self):
def resize(input: features.BoundingBox, size: torch.Tensor) -> features.BoundingBox:
old_height, old_width = input.image_size
new_height, new_width = size
height_scale = new_height / old_height
width_scale = new_width / old_width
old_x1, old_y1, old_x2, old_y2 = input.convert("xyxy").to_parts()
new_x1 = old_x1 * width_scale
new_y1 = old_y1 * height_scale
new_x2 = old_x2 * width_scale
new_y2 = old_y2 * height_scale
return features.BoundingBox.from_parts(
new_x1, new_y1, new_x2, new_y2, like=input, format="xyxy", image_size=tuple(size.tolist())
)
def horizontal_flip(input: features.BoundingBox) -> features.BoundingBox:
x, y, w, h = input.convert("xywh").to_parts()
x = input.image_size[1] - (x + w)
return features.BoundingBox.from_parts(x, y, w, h, like=input, format="xywh")
def compose(input: features.BoundingBox, size: torch.Tensor) -> features.BoundingBox:
return horizontal_flip(resize(input, size)).convert("xyxy")
image_size = (8, 6)
input = features.BoundingBox([2, 4, 2, 4], format="cxcywh", image_size=image_size)
size = torch.tensor((4, 12))
expected = features.BoundingBox([6, 1, 10, 3], format="xyxy", image_size=image_size)
actual_eager = compose(input, size)
assert_close(actual_eager, expected)
sample_inputs = (features.BoundingBox(torch.zeros((4,)), image_size=(10, 10)), torch.tensor((20, 5)))
actual_jit = torch.jit.trace(compose, sample_inputs, check_trace=False)(input, size)
assert_close(actual_jit, expected)
|
{"/torchvision/prototype/transforms/_transform.py": ["/torchvision/prototype/transforms/_utils.py", "/torchvision/prototype/__init__.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/transforms/_meta.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py"], "/torchvision/prototype/transforms/_deprecated.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py", "/torchvision/prototype/transforms/_transform.py", "/torchvision/prototype/transforms/_utils.py"], "/test/test_prototype_transforms_utils.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/_utils.py"], "/test/test_prototype_transforms.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/_utils.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/features/_mask.py": ["/torchvision/prototype/features/_feature.py"], "/torchvision/prototype/transforms/_augment.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py", "/torchvision/prototype/transforms/_transform.py", "/torchvision/prototype/transforms/_utils.py"], "/torchvision/prototype/transforms/_geometry.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py", "/torchvision/prototype/transforms/_transform.py", "/torchvision/prototype/transforms/_utils.py", "/torchvision/prototype/datasets/utils/__init__.py", "/torchvision/prototype/features/__init__.py"], "/torchvision/prototype/features/_image.py": ["/torchvision/prototype/features/_feature.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/transforms/functional/_temporal.py": ["/torchvision/prototype/__init__.py"], "/torchvision/prototype/transforms/__init__.py": ["/torchvision/prototype/transforms/_transform.py", "/torchvision/prototype/transforms/_presets.py", "/torchvision/prototype/transforms/_augment.py", "/torchvision/prototype/transforms/_color.py", "/torchvision/prototype/transforms/_container.py", "/torchvision/prototype/transforms/_geometry.py", "/torchvision/prototype/transforms/_meta.py", "/torchvision/prototype/transforms/_misc.py", "/torchvision/prototype/transforms/_temporal.py", "/torchvision/prototype/transforms/_type_conversion.py", "/torchvision/prototype/transforms/_deprecated.py"], "/torchvision/prototype/features/_bounding_box.py": ["/torchvision/prototype/features/_feature.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/features/__init__.py": ["/torchvision/prototype/features/_bounding_box.py", "/torchvision/prototype/features/_feature.py", "/torchvision/prototype/features/_image.py", "/torchvision/prototype/features/_label.py", "/torchvision/prototype/features/_mask.py"], "/torchvision/prototype/transforms/functional/_augment.py": ["/torchvision/prototype/__init__.py"], "/torchvision/prototype/transforms/_container.py": ["/torchvision/prototype/transforms/__init__.py"], "/torchvision/prototype/transforms/_temporal.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py"], "/test/prototype_common_utils.py": ["/torchvision/prototype/__init__.py"], "/torchvision/prototype/transforms/functional/_type_conversion.py": ["/torchvision/prototype/__init__.py"], "/torchvision/prototype/transforms/_type_conversion.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py"], "/torchvision/prototype/transforms/_misc.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py", "/torchvision/prototype/transforms/_utils.py", "/torchvision/prototype/features/__init__.py"], "/torchvision/prototype/transforms/_color.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py", "/torchvision/prototype/transforms/_transform.py", "/torchvision/prototype/transforms/_utils.py"], "/torchvision/prototype/transforms/_presets.py": ["/torchvision/prototype/transforms/__init__.py"], "/torchvision/prototype/transforms/_utils.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/features/_feature.py"], "/torchvision/prototype/datasets/_builtin/imagenet.py": ["/torchvision/prototype/datasets/utils/__init__.py", "/torchvision/prototype/features/__init__.py", "/torchvision/prototype/utils/_internal.py"], "/test/test_prototype_features.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/utils/_internal.py"], "/test/builtin_dataset_mocks.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/datasets/_builtin/semeion.py": ["/torchvision/prototype/datasets/decoder.py", "/torchvision/prototype/datasets/utils/__init__.py"], "/torchvision/prototype/datasets/decoder.py": ["/torchvision/prototype/__init__.py"], "/torchvision/prototype/features/_label.py": ["/torchvision/prototype/features/_feature.py"], "/torchvision/prototype/features/_feature.py": ["/torchvision/prototype/utils/_internal.py"], "/test/test_prototype_builtin_datasets.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/datasets/_builtin/celeba.py": ["/torchvision/prototype/datasets/utils/__init__.py"]}
|
39,110,319
|
Hsuxu/vision
|
refs/heads/master
|
/test/builtin_dataset_mocks.py
|
import functools
import gzip
import json
import lzma
import pathlib
import pickle
import tempfile
from collections import defaultdict
from typing import Any, Dict, Tuple
import numpy as np
import PIL.Image
import pytest
import torch
from datasets_utils import create_image_folder, make_tar, make_zip
from torch.testing import make_tensor as _make_tensor
from torchdata.datapipes.iter import IterDataPipe
from torchvision.prototype import datasets
from torchvision.prototype.datasets._api import DEFAULT_DECODER_MAP, DEFAULT_DECODER
from torchvision.prototype.datasets._api import find
from torchvision.prototype.utils._internal import add_suggestion
make_tensor = functools.partial(_make_tensor, device="cpu")
make_scalar = functools.partial(make_tensor, ())
__all__ = ["load"]
DEFAULT_TEST_DECODER = object()
class TestResource(datasets.utils.OnlineResource):
def __init__(self, *, dataset_name, dataset_config, **kwargs):
super().__init__(**kwargs)
self.dataset_name = dataset_name
self.dataset_config = dataset_config
def _download(self, _):
raise pytest.UsageError(
f"Dataset '{self.dataset_name}' requires the file '{self.file_name}' for {self.dataset_config}, "
f"but this file does not exist."
)
class DatasetMocks:
def __init__(self):
self._mock_data_fns = {}
self._tmp_home = pathlib.Path(tempfile.mkdtemp())
self._cache = {}
def register_mock_data_fn(self, mock_data_fn):
name = mock_data_fn.__name__
if name not in datasets.list():
raise pytest.UsageError(
add_suggestion(
f"The name of the mock data function '{name}' has no corresponding dataset.",
word=name,
possibilities=datasets.list(),
close_match_hint=lambda close_match: f"Did you mean to name it '{close_match}'?",
alternative_hint=lambda _: "",
)
)
self._mock_data_fns[name] = mock_data_fn
return mock_data_fn
def _parse_mock_info(self, mock_info, *, name):
if mock_info is None:
raise pytest.UsageError(
f"The mock data function for dataset '{name}' returned nothing. It needs to at least return an integer "
f"indicating the number of samples for the current `config`."
)
elif isinstance(mock_info, int):
mock_info = dict(num_samples=mock_info)
elif not isinstance(mock_info, dict):
raise pytest.UsageError(
f"The mock data function for dataset '{name}' returned a {type(mock_info)}. The returned object should "
f"be a dictionary containing at least the number of samples for the current `config` for the key "
f"`'num_samples'`. If no additional information is required for specific tests, the number of samples "
f"can also be returned as an integer."
)
elif "num_samples" not in mock_info:
raise pytest.UsageError(
f"The dictionary returned by the mock data function for dataset '{name}' must contain a `'num_samples'` "
f"entry indicating the number of samples for the current `config`."
)
return mock_info
def _get(self, dataset, config, root):
name = dataset.info.name
resources_and_mock_info = self._cache.get((name, config))
if resources_and_mock_info:
return resources_and_mock_info
try:
fakedata_fn = self._mock_data_fns[name]
except KeyError:
raise pytest.UsageError(
f"No mock data available for dataset '{name}'. "
f"Did you add a new dataset, but forget to provide mock data for it? "
f"Did you register the mock data function with `@DatasetMocks.register_mock_data_fn`?"
)
mock_resources = [
TestResource(dataset_name=name, dataset_config=config, file_name=resource.file_name)
for resource in dataset.resources(config)
]
mock_info = self._parse_mock_info(fakedata_fn(dataset.info, root, config), name=name)
self._cache[(name, config)] = mock_resources, mock_info
return mock_resources, mock_info
def load(
self, name: str, decoder=DEFAULT_DECODER, split="train", **options: Any
) -> Tuple[IterDataPipe, Dict[str, Any]]:
dataset = find(name)
config = dataset.info.make_config(split=split, **options)
root = self._tmp_home / name
root.mkdir(exist_ok=True)
resources, mock_info = self._get(dataset, config, root)
datapipe = dataset._make_datapipe(
[resource.load(root) for resource in resources],
config=config,
decoder=DEFAULT_DECODER_MAP.get(dataset.info.type) if decoder is DEFAULT_DECODER else decoder,
)
return datapipe, mock_info
dataset_mocks = DatasetMocks()
load = dataset_mocks.load
class MNISTFakedata:
_DTYPES_ID = {
torch.uint8: 8,
torch.int8: 9,
torch.int16: 11,
torch.int32: 12,
torch.float32: 13,
torch.float64: 14,
}
@classmethod
def _magic(cls, dtype, ndim):
return cls._DTYPES_ID[dtype] * 256 + ndim + 1
@staticmethod
def _encode(t):
return torch.tensor(t, dtype=torch.int32).numpy().tobytes()[::-1]
@staticmethod
def _big_endian_dtype(dtype):
np_dtype = getattr(np, str(dtype).replace("torch.", ""))().dtype
return np.dtype(f">{np_dtype.kind}{np_dtype.itemsize}")
@classmethod
def _create_binary_file(cls, root, filename, *, num_samples, shape, dtype, compressor, low=0, high):
with compressor(root / filename, "wb") as fh:
for meta in (cls._magic(dtype, len(shape)), num_samples, *shape):
fh.write(cls._encode(meta))
data = make_tensor((num_samples, *shape), dtype=dtype, low=low, high=high)
fh.write(data.numpy().astype(cls._big_endian_dtype(dtype)).tobytes())
@classmethod
def generate(
cls,
root,
*,
num_categories,
num_samples=None,
images_file,
labels_file,
image_size=(28, 28),
image_dtype=torch.uint8,
label_size=(),
label_dtype=torch.uint8,
compressor=None,
):
if num_samples is None:
num_samples = num_categories
if compressor is None:
compressor = gzip.open
cls._create_binary_file(
root,
images_file,
num_samples=num_samples,
shape=image_size,
dtype=image_dtype,
compressor=compressor,
high=float("inf"),
)
cls._create_binary_file(
root,
labels_file,
num_samples=num_samples,
shape=label_size,
dtype=label_dtype,
compressor=compressor,
high=num_categories,
)
return num_samples
@dataset_mocks.register_mock_data_fn
def mnist(info, root, config):
train = config.split == "train"
images_file = f"{'train' if train else 't10k'}-images-idx3-ubyte.gz"
labels_file = f"{'train' if train else 't10k'}-labels-idx1-ubyte.gz"
return MNISTFakedata.generate(
root,
num_categories=len(info.categories),
images_file=images_file,
labels_file=labels_file,
)
@dataset_mocks.register_mock_data_fn
def fashionmnist(info, root, config):
train = config.split == "train"
images_file = f"{'train' if train else 't10k'}-images-idx3-ubyte.gz"
labels_file = f"{'train' if train else 't10k'}-labels-idx1-ubyte.gz"
return MNISTFakedata.generate(
root,
num_categories=len(info.categories),
images_file=images_file,
labels_file=labels_file,
)
@dataset_mocks.register_mock_data_fn
def kmnist(info, root, config):
train = config.split == "train"
images_file = f"{'train' if train else 't10k'}-images-idx3-ubyte.gz"
labels_file = f"{'train' if train else 't10k'}-labels-idx1-ubyte.gz"
return MNISTFakedata.generate(
root,
num_categories=len(info.categories),
images_file=images_file,
labels_file=labels_file,
)
@dataset_mocks.register_mock_data_fn
def emnist(info, root, config):
# The image sets that merge some lower case letters in their respective upper case variant, still use dense
# labels in the data files. Thus, num_categories != len(categories) there.
num_categories = defaultdict(
lambda: len(info.categories), **{image_set: 47 for image_set in ("Balanced", "By_Merge")}
)
num_samples = {}
file_names = set()
for _config in info._configs:
prefix = f"emnist-{_config.image_set.replace('_', '').lower()}-{_config.split}"
images_file = f"{prefix}-images-idx3-ubyte.gz"
labels_file = f"{prefix}-labels-idx1-ubyte.gz"
file_names.update({images_file, labels_file})
num_samples[_config.image_set] = MNISTFakedata.generate(
root,
num_categories=num_categories[_config.image_set],
images_file=images_file,
labels_file=labels_file,
)
make_zip(root, "emnist-gzip.zip", *file_names)
return num_samples[config.image_set]
@dataset_mocks.register_mock_data_fn
def qmnist(info, root, config):
num_categories = len(info.categories)
if config.split == "train":
num_samples = num_samples_gen = num_categories + 2
prefix = "qmnist-train"
suffix = ".gz"
compressor = gzip.open
elif config.split.startswith("test"):
# The split 'test50k' is defined as the last 50k images beginning at index 10000. Thus, we need to create more
# than 10000 images for the dataset to not be empty.
num_samples = num_samples_gen = 10001
if config.split == "test10k":
num_samples = min(num_samples, 10000)
if config.split == "test50k":
num_samples -= 10000
prefix = "qmnist-test"
suffix = ".gz"
compressor = gzip.open
else: # config.split == "nist"
num_samples = num_samples_gen = num_categories + 3
prefix = "xnist"
suffix = ".xz"
compressor = lzma.open
MNISTFakedata.generate(
root,
num_categories=num_categories,
num_samples=num_samples_gen,
images_file=f"{prefix}-images-idx3-ubyte{suffix}",
labels_file=f"{prefix}-labels-idx2-int{suffix}",
label_size=(8,),
label_dtype=torch.int32,
compressor=compressor,
)
return num_samples
class CIFARFakedata:
NUM_PIXELS = 32 * 32 * 3
@classmethod
def _create_batch_file(cls, root, name, *, num_categories, labels_key, num_samples=1):
content = {
"data": make_tensor((num_samples, cls.NUM_PIXELS), dtype=torch.uint8).numpy(),
labels_key: torch.randint(0, num_categories, size=(num_samples,)).tolist(),
}
with open(pathlib.Path(root) / name, "wb") as fh:
pickle.dump(content, fh)
@classmethod
def generate(
cls,
root,
name,
*,
folder,
train_files,
test_files,
num_categories,
labels_key,
):
folder = root / folder
folder.mkdir()
files = (*train_files, *test_files)
for file in files:
cls._create_batch_file(
folder,
file,
num_categories=num_categories,
labels_key=labels_key,
)
make_tar(root, name, folder, compression="gz")
@dataset_mocks.register_mock_data_fn
def cifar10(info, root, config):
train_files = [f"data_batch_{idx}" for idx in range(1, 6)]
test_files = ["test_batch"]
CIFARFakedata.generate(
root=root,
name="cifar-10-python.tar.gz",
folder=pathlib.Path("cifar-10-batches-py"),
train_files=train_files,
test_files=test_files,
num_categories=10,
labels_key="labels",
)
return len(train_files if config.split == "train" else test_files)
@dataset_mocks.register_mock_data_fn
def cifar100(info, root, config):
train_files = ["train"]
test_files = ["test"]
CIFARFakedata.generate(
root=root,
name="cifar-100-python.tar.gz",
folder=pathlib.Path("cifar-100-python"),
train_files=train_files,
test_files=test_files,
num_categories=100,
labels_key="fine_labels",
)
return len(train_files if config.split == "train" else test_files)
@dataset_mocks.register_mock_data_fn
def caltech101(info, root, config):
def create_ann_file(root, name):
import scipy.io
box_coord = make_tensor((1, 4), dtype=torch.int32, low=0).numpy().astype(np.uint16)
obj_contour = make_tensor((2, int(torch.randint(3, 6, size=()))), dtype=torch.float64, low=0).numpy()
scipy.io.savemat(str(pathlib.Path(root) / name), dict(box_coord=box_coord, obj_contour=obj_contour))
def create_ann_folder(root, name, file_name_fn, num_examples):
root = pathlib.Path(root) / name
root.mkdir(parents=True)
for idx in range(num_examples):
create_ann_file(root, file_name_fn(idx))
images_root = root / "101_ObjectCategories"
anns_root = root / "Annotations"
ann_category_map = {
"Faces_2": "Faces",
"Faces_3": "Faces_easy",
"Motorbikes_16": "Motorbikes",
"Airplanes_Side_2": "airplanes",
}
num_images_per_category = 2
for category in info.categories:
create_image_folder(
root=images_root,
name=category,
file_name_fn=lambda idx: f"image_{idx + 1:04d}.jpg",
num_examples=num_images_per_category,
)
create_ann_folder(
root=anns_root,
name=ann_category_map.get(category, category),
file_name_fn=lambda idx: f"annotation_{idx + 1:04d}.mat",
num_examples=num_images_per_category,
)
(images_root / "BACKGROUND_Goodle").mkdir()
make_tar(root, f"{images_root.name}.tar.gz", images_root, compression="gz")
make_tar(root, f"{anns_root.name}.tar", anns_root)
return num_images_per_category * len(info.categories)
@dataset_mocks.register_mock_data_fn
def caltech256(info, root, config):
dir = root / "256_ObjectCategories"
num_images_per_category = 2
for idx, category in enumerate(info.categories, 1):
files = create_image_folder(
dir,
name=f"{idx:03d}.{category}",
file_name_fn=lambda image_idx: f"{idx:03d}_{image_idx + 1:04d}.jpg",
num_examples=num_images_per_category,
)
if category == "spider":
open(files[0].parent / "RENAME2", "w").close()
make_tar(root, f"{dir.name}.tar", dir)
return num_images_per_category * len(info.categories)
@dataset_mocks.register_mock_data_fn
def imagenet(info, root, config):
wnids = tuple(info.extra.wnid_to_category.keys())
if config.split == "train":
images_root = root / "ILSVRC2012_img_train"
num_samples = len(wnids)
for wnid in wnids:
files = create_image_folder(
root=images_root,
name=wnid,
file_name_fn=lambda image_idx: f"{wnid}_{image_idx:04d}.JPEG",
num_examples=1,
)
make_tar(images_root, f"{wnid}.tar", files[0].parent)
elif config.split == "val":
num_samples = 3
files = create_image_folder(
root=root,
name="ILSVRC2012_img_val",
file_name_fn=lambda image_idx: f"ILSVRC2012_val_{image_idx + 1:08d}.JPEG",
num_examples=num_samples,
)
images_root = files[0].parent
else: # config.split == "test"
images_root = root / "ILSVRC2012_img_test_v10102019"
num_samples = 3
create_image_folder(
root=images_root,
name="test",
file_name_fn=lambda image_idx: f"ILSVRC2012_test_{image_idx + 1:08d}.JPEG",
num_examples=num_samples,
)
make_tar(root, f"{images_root.name}.tar", images_root)
devkit_root = root / "ILSVRC2012_devkit_t12"
devkit_root.mkdir()
data_root = devkit_root / "data"
data_root.mkdir()
with open(data_root / "ILSVRC2012_validation_ground_truth.txt", "w") as file:
for label in torch.randint(0, len(wnids), (num_samples,)).tolist():
file.write(f"{label}\n")
make_tar(root, f"{devkit_root}.tar.gz", devkit_root, compression="gz")
return num_samples
class CocoMockData:
@classmethod
def _make_images_archive(cls, root, name, *, num_samples):
image_paths = create_image_folder(
root, name, file_name_fn=lambda idx: f"{idx:012d}.jpg", num_examples=num_samples
)
images_meta = []
for path in image_paths:
with PIL.Image.open(path) as image:
width, height = image.size
images_meta.append(dict(file_name=path.name, id=int(path.stem), width=width, height=height))
make_zip(root, f"{name}.zip")
return images_meta
@classmethod
def _make_annotations_json(
cls,
root,
name,
*,
images_meta,
fn,
):
num_anns_per_image = torch.randint(1, 5, (len(images_meta),))
num_anns_total = int(num_anns_per_image.sum())
ann_ids_iter = iter(torch.arange(num_anns_total)[torch.randperm(num_anns_total)])
anns_meta = []
for image_meta, num_anns in zip(images_meta, num_anns_per_image):
for _ in range(num_anns):
ann_id = int(next(ann_ids_iter))
anns_meta.append(dict(fn(ann_id, image_meta), id=ann_id, image_id=image_meta["id"]))
anns_meta.sort(key=lambda ann: ann["id"])
with open(root / name, "w") as file:
json.dump(dict(images=images_meta, annotations=anns_meta), file)
return num_anns_per_image
@staticmethod
def _make_instances_data(ann_id, image_meta):
def make_rle_segmentation():
height, width = image_meta["height"], image_meta["width"]
numel = height * width
counts = []
while sum(counts) <= numel:
counts.append(int(torch.randint(5, 8, ())))
if sum(counts) > numel:
counts[-1] -= sum(counts) - numel
return dict(counts=counts, size=[height, width])
return dict(
segmentation=make_rle_segmentation(),
bbox=make_tensor((4,), dtype=torch.float32, low=0).tolist(),
iscrowd=True,
area=float(make_scalar(dtype=torch.float32)),
category_id=int(make_scalar(dtype=torch.int64)),
)
@staticmethod
def _make_captions_data(ann_id, image_meta):
return dict(caption=f"Caption {ann_id} describing image {image_meta['id']}.")
@classmethod
def _make_annotations(cls, root, name, *, images_meta):
num_anns_per_image = torch.zeros((len(images_meta),), dtype=torch.int64)
for annotations, fn in (
("instances", cls._make_instances_data),
("captions", cls._make_captions_data),
):
num_anns_per_image += cls._make_annotations_json(
root, f"{annotations}_{name}.json", images_meta=images_meta, fn=fn
)
return int(num_anns_per_image.sum())
@classmethod
def generate(
cls,
root,
*,
year,
num_samples,
):
annotations_dir = root / "annotations"
annotations_dir.mkdir()
for split in ("train", "val"):
config_name = f"{split}{year}"
images_meta = cls._make_images_archive(root, config_name, num_samples=num_samples)
cls._make_annotations(
annotations_dir,
config_name,
images_meta=images_meta,
)
make_zip(root, f"annotations_trainval{year}.zip", annotations_dir)
return num_samples
@dataset_mocks.register_mock_data_fn
def coco(info, root, config):
return CocoMockData.generate(root, year=config.year, num_samples=5)
|
{"/torchvision/prototype/transforms/_transform.py": ["/torchvision/prototype/transforms/_utils.py", "/torchvision/prototype/__init__.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/transforms/_meta.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py"], "/torchvision/prototype/transforms/_deprecated.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py", "/torchvision/prototype/transforms/_transform.py", "/torchvision/prototype/transforms/_utils.py"], "/test/test_prototype_transforms_utils.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/_utils.py"], "/test/test_prototype_transforms.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/_utils.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/features/_mask.py": ["/torchvision/prototype/features/_feature.py"], "/torchvision/prototype/transforms/_augment.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py", "/torchvision/prototype/transforms/_transform.py", "/torchvision/prototype/transforms/_utils.py"], "/torchvision/prototype/transforms/_geometry.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py", "/torchvision/prototype/transforms/_transform.py", "/torchvision/prototype/transforms/_utils.py", "/torchvision/prototype/datasets/utils/__init__.py", "/torchvision/prototype/features/__init__.py"], "/torchvision/prototype/features/_image.py": ["/torchvision/prototype/features/_feature.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/transforms/functional/_temporal.py": ["/torchvision/prototype/__init__.py"], "/torchvision/prototype/transforms/__init__.py": ["/torchvision/prototype/transforms/_transform.py", "/torchvision/prototype/transforms/_presets.py", "/torchvision/prototype/transforms/_augment.py", "/torchvision/prototype/transforms/_color.py", "/torchvision/prototype/transforms/_container.py", "/torchvision/prototype/transforms/_geometry.py", "/torchvision/prototype/transforms/_meta.py", "/torchvision/prototype/transforms/_misc.py", "/torchvision/prototype/transforms/_temporal.py", "/torchvision/prototype/transforms/_type_conversion.py", "/torchvision/prototype/transforms/_deprecated.py"], "/torchvision/prototype/features/_bounding_box.py": ["/torchvision/prototype/features/_feature.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/features/__init__.py": ["/torchvision/prototype/features/_bounding_box.py", "/torchvision/prototype/features/_feature.py", "/torchvision/prototype/features/_image.py", "/torchvision/prototype/features/_label.py", "/torchvision/prototype/features/_mask.py"], "/torchvision/prototype/transforms/functional/_augment.py": ["/torchvision/prototype/__init__.py"], "/torchvision/prototype/transforms/_container.py": ["/torchvision/prototype/transforms/__init__.py"], "/torchvision/prototype/transforms/_temporal.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py"], "/test/prototype_common_utils.py": ["/torchvision/prototype/__init__.py"], "/torchvision/prototype/transforms/functional/_type_conversion.py": ["/torchvision/prototype/__init__.py"], "/torchvision/prototype/transforms/_type_conversion.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py"], "/torchvision/prototype/transforms/_misc.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py", "/torchvision/prototype/transforms/_utils.py", "/torchvision/prototype/features/__init__.py"], "/torchvision/prototype/transforms/_color.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py", "/torchvision/prototype/transforms/_transform.py", "/torchvision/prototype/transforms/_utils.py"], "/torchvision/prototype/transforms/_presets.py": ["/torchvision/prototype/transforms/__init__.py"], "/torchvision/prototype/transforms/_utils.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/features/_feature.py"], "/torchvision/prototype/datasets/_builtin/imagenet.py": ["/torchvision/prototype/datasets/utils/__init__.py", "/torchvision/prototype/features/__init__.py", "/torchvision/prototype/utils/_internal.py"], "/test/test_prototype_features.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/utils/_internal.py"], "/test/builtin_dataset_mocks.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/datasets/_builtin/semeion.py": ["/torchvision/prototype/datasets/decoder.py", "/torchvision/prototype/datasets/utils/__init__.py"], "/torchvision/prototype/datasets/decoder.py": ["/torchvision/prototype/__init__.py"], "/torchvision/prototype/features/_label.py": ["/torchvision/prototype/features/_feature.py"], "/torchvision/prototype/features/_feature.py": ["/torchvision/prototype/utils/_internal.py"], "/test/test_prototype_builtin_datasets.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/datasets/_builtin/celeba.py": ["/torchvision/prototype/datasets/utils/__init__.py"]}
|
39,110,320
|
Hsuxu/vision
|
refs/heads/master
|
/test/test_prototype_transforms.py
|
import pytest
from torchvision.prototype import transforms, features
from torchvision.prototype.utils._internal import sequence_to_str
FEATURE_TYPES = {
feature_type
for name, feature_type in features.__dict__.items()
if not name.startswith("_")
and isinstance(feature_type, type)
and issubclass(feature_type, features.Feature)
and feature_type is not features.Feature
}
TRANSFORM_TYPES = tuple(
transform_type
for name, transform_type in transforms.__dict__.items()
if not name.startswith("_")
and isinstance(transform_type, type)
and issubclass(transform_type, transforms.Transform)
and transform_type is not transforms.Transform
)
def test_feature_type_support():
missing_feature_types = FEATURE_TYPES - set(transforms.Transform._BUILTIN_FEATURE_TYPES)
if missing_feature_types:
names = sorted([feature_type.__name__ for feature_type in missing_feature_types])
raise AssertionError(
f"The feature(s) {sequence_to_str(names, separate_last='and ')} is/are exposed at "
f"`torchvision.prototype.features`, but are missing in Transform._BUILTIN_FEATURE_TYPES. "
f"Please add it/them to the collection."
)
@pytest.mark.parametrize(
"transform_type",
[transform_type for transform_type in TRANSFORM_TYPES if transform_type is not transforms.Identity],
ids=lambda transform_type: transform_type.__name__,
)
def test_feature_no_op_coverage(transform_type):
unsupported_features = (
FEATURE_TYPES - transform_type.supported_feature_types() - set(transform_type.NO_OP_FEATURE_TYPES)
)
if unsupported_features:
names = sorted([feature_type.__name__ for feature_type in unsupported_features])
raise AssertionError(
f"The feature(s) {sequence_to_str(names, separate_last='and ')} are neither supported nor declared as "
f"no-op for transform `{transform_type.__name__}`. Please either implement a feature transform for them, "
f"or add them to the the `{transform_type.__name__}.NO_OP_FEATURE_TYPES` collection."
)
def test_non_feature_no_op():
class TestTransform(transforms.Transform):
@staticmethod
def image(input):
return input
no_op_sample = dict(int=0, float=0.0, bool=False, str="str")
assert TestTransform()(no_op_sample) == no_op_sample
|
{"/torchvision/prototype/transforms/_transform.py": ["/torchvision/prototype/transforms/_utils.py", "/torchvision/prototype/__init__.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/transforms/_meta.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py"], "/torchvision/prototype/transforms/_deprecated.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py", "/torchvision/prototype/transforms/_transform.py", "/torchvision/prototype/transforms/_utils.py"], "/test/test_prototype_transforms_utils.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/_utils.py"], "/test/test_prototype_transforms.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/_utils.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/features/_mask.py": ["/torchvision/prototype/features/_feature.py"], "/torchvision/prototype/transforms/_augment.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py", "/torchvision/prototype/transforms/_transform.py", "/torchvision/prototype/transforms/_utils.py"], "/torchvision/prototype/transforms/_geometry.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py", "/torchvision/prototype/transforms/_transform.py", "/torchvision/prototype/transforms/_utils.py", "/torchvision/prototype/datasets/utils/__init__.py", "/torchvision/prototype/features/__init__.py"], "/torchvision/prototype/features/_image.py": ["/torchvision/prototype/features/_feature.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/transforms/functional/_temporal.py": ["/torchvision/prototype/__init__.py"], "/torchvision/prototype/transforms/__init__.py": ["/torchvision/prototype/transforms/_transform.py", "/torchvision/prototype/transforms/_presets.py", "/torchvision/prototype/transforms/_augment.py", "/torchvision/prototype/transforms/_color.py", "/torchvision/prototype/transforms/_container.py", "/torchvision/prototype/transforms/_geometry.py", "/torchvision/prototype/transforms/_meta.py", "/torchvision/prototype/transforms/_misc.py", "/torchvision/prototype/transforms/_temporal.py", "/torchvision/prototype/transforms/_type_conversion.py", "/torchvision/prototype/transforms/_deprecated.py"], "/torchvision/prototype/features/_bounding_box.py": ["/torchvision/prototype/features/_feature.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/features/__init__.py": ["/torchvision/prototype/features/_bounding_box.py", "/torchvision/prototype/features/_feature.py", "/torchvision/prototype/features/_image.py", "/torchvision/prototype/features/_label.py", "/torchvision/prototype/features/_mask.py"], "/torchvision/prototype/transforms/functional/_augment.py": ["/torchvision/prototype/__init__.py"], "/torchvision/prototype/transforms/_container.py": ["/torchvision/prototype/transforms/__init__.py"], "/torchvision/prototype/transforms/_temporal.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py"], "/test/prototype_common_utils.py": ["/torchvision/prototype/__init__.py"], "/torchvision/prototype/transforms/functional/_type_conversion.py": ["/torchvision/prototype/__init__.py"], "/torchvision/prototype/transforms/_type_conversion.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py"], "/torchvision/prototype/transforms/_misc.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py", "/torchvision/prototype/transforms/_utils.py", "/torchvision/prototype/features/__init__.py"], "/torchvision/prototype/transforms/_color.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py", "/torchvision/prototype/transforms/_transform.py", "/torchvision/prototype/transforms/_utils.py"], "/torchvision/prototype/transforms/_presets.py": ["/torchvision/prototype/transforms/__init__.py"], "/torchvision/prototype/transforms/_utils.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/features/_feature.py"], "/torchvision/prototype/datasets/_builtin/imagenet.py": ["/torchvision/prototype/datasets/utils/__init__.py", "/torchvision/prototype/features/__init__.py", "/torchvision/prototype/utils/_internal.py"], "/test/test_prototype_features.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/utils/_internal.py"], "/test/builtin_dataset_mocks.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/datasets/_builtin/semeion.py": ["/torchvision/prototype/datasets/decoder.py", "/torchvision/prototype/datasets/utils/__init__.py"], "/torchvision/prototype/datasets/decoder.py": ["/torchvision/prototype/__init__.py"], "/torchvision/prototype/features/_label.py": ["/torchvision/prototype/features/_feature.py"], "/torchvision/prototype/features/_feature.py": ["/torchvision/prototype/utils/_internal.py"], "/test/test_prototype_builtin_datasets.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/datasets/_builtin/celeba.py": ["/torchvision/prototype/datasets/utils/__init__.py"]}
|
39,110,321
|
Hsuxu/vision
|
refs/heads/master
|
/torchvision/prototype/utils/_internal.py
|
import collections.abc
import difflib
import enum
import functools
import inspect
import os
import os.path
import textwrap
import warnings
from typing import Collection, Sequence, Callable, Any, Iterator, NoReturn, Mapping, TypeVar, Iterable, Tuple, cast
__all__ = [
"StrEnum",
"sequence_to_str",
"add_suggestion",
"FrozenMapping",
"make_repr",
"FrozenBunch",
"kwonly_to_pos_or_kw",
]
class StrEnumMeta(enum.EnumMeta):
def __getitem__(self, item):
return super().__getitem__(item.upper() if isinstance(item, str) else item)
class StrEnum(enum.Enum, metaclass=StrEnumMeta):
pass
def sequence_to_str(seq: Sequence, separate_last: str = "") -> str:
if len(seq) == 1:
return f"'{seq[0]}'"
return f"""'{"', '".join([str(item) for item in seq[:-1]])}', {separate_last}'{seq[-1]}'"""
def add_suggestion(
msg: str,
*,
word: str,
possibilities: Collection[str],
close_match_hint: Callable[[str], str] = lambda close_match: f"Did you mean '{close_match}'?",
alternative_hint: Callable[
[Sequence[str]], str
] = lambda possibilities: f"Can be {sequence_to_str(possibilities, separate_last='or ')}.",
) -> str:
if not isinstance(possibilities, collections.abc.Sequence):
possibilities = sorted(possibilities)
suggestions = difflib.get_close_matches(word, possibilities, 1)
hint = close_match_hint(suggestions[0]) if suggestions else alternative_hint(possibilities)
if not hint:
return msg
return f"{msg.strip()} {hint}"
K = TypeVar("K")
D = TypeVar("D")
class FrozenMapping(Mapping[K, D]):
def __init__(self, *args: Any, **kwargs: Any) -> None:
data = dict(*args, **kwargs)
self.__dict__["__data__"] = data
self.__dict__["__final_hash__"] = hash(tuple(data.items()))
def __getitem__(self, item: K) -> D:
return cast(Mapping[K, D], self.__dict__["__data__"])[item]
def __iter__(self) -> Iterator[K]:
return iter(self.__dict__["__data__"].keys())
def __len__(self) -> int:
return len(self.__dict__["__data__"])
def __immutable__(self) -> NoReturn:
raise RuntimeError(f"'{type(self).__name__}' object is immutable")
def __setitem__(self, key: K, value: Any) -> NoReturn:
self.__immutable__()
def __delitem__(self, key: K) -> NoReturn:
self.__immutable__()
def __hash__(self) -> int:
return cast(int, self.__dict__["__final_hash__"])
def __eq__(self, other: Any) -> bool:
if not isinstance(other, FrozenMapping):
return NotImplemented
return hash(self) == hash(other)
def __repr__(self) -> str:
return repr(self.__dict__["__data__"])
def make_repr(name: str, items: Iterable[Tuple[str, Any]]) -> str:
def to_str(sep: str) -> str:
return sep.join([f"{key}={value}" for key, value in items])
prefix = f"{name}("
postfix = ")"
body = to_str(", ")
line_length = int(os.environ.get("COLUMNS", 80))
body_too_long = (len(prefix) + len(body) + len(postfix)) > line_length
multiline_body = len(str(body).splitlines()) > 1
if not (body_too_long or multiline_body):
return prefix + body + postfix
body = textwrap.indent(to_str(",\n"), " " * 2)
return f"{prefix}\n{body}\n{postfix}"
class FrozenBunch(FrozenMapping):
def __getattr__(self, name: str) -> Any:
try:
return self[name]
except KeyError as error:
raise AttributeError(f"'{type(self).__name__}' object has no attribute '{name}'") from error
def __setattr__(self, key: Any, value: Any) -> NoReturn:
self.__immutable__()
def __delattr__(self, item: Any) -> NoReturn:
self.__immutable__()
def __repr__(self) -> str:
return make_repr(type(self).__name__, self.items())
def kwonly_to_pos_or_kw(fn: Callable[..., D]) -> Callable[..., D]:
"""Decorates a function that uses keyword only parameters to also allow them being passed as positionals.
For example, consider the use case of changing the signature of ``old_fn`` into the one from ``new_fn``:
.. code::
def old_fn(foo, bar, baz=None):
...
def new_fn(foo, *, bar, baz=None):
...
Calling ``old_fn("foo", "bar, "baz")`` was valid, but the same call is no longer valid with ``new_fn``. To keep BC
and at the same time warn the user of the deprecation, this decorator can be used:
.. code::
@kwonly_to_pos_or_kw
def new_fn(foo, *, bar, baz=None):
...
new_fn("foo", "bar, "baz")
"""
params = inspect.signature(fn).parameters
try:
keyword_only_start_idx = next(
idx for idx, param in enumerate(params.values()) if param.kind == param.KEYWORD_ONLY
)
except StopIteration:
raise TypeError(f"Found no keyword-only parameter on function '{fn.__name__}'") from None
keyword_only_params = tuple(inspect.signature(fn).parameters)[keyword_only_start_idx:]
@functools.wraps(fn)
def wrapper(*args: Any, **kwargs: Any) -> D:
args, keyword_only_args = args[:keyword_only_start_idx], args[keyword_only_start_idx:]
if keyword_only_args:
keyword_only_kwargs = dict(zip(keyword_only_params, keyword_only_args))
warnings.warn(
f"Using {sequence_to_str(tuple(keyword_only_kwargs.keys()), separate_last='and ')} as positional "
f"parameter(s) is deprecated. Please use keyword parameter(s) instead."
)
kwargs.update(keyword_only_kwargs)
return fn(*args, **kwargs)
return wrapper
|
{"/torchvision/prototype/transforms/_transform.py": ["/torchvision/prototype/transforms/_utils.py", "/torchvision/prototype/__init__.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/transforms/_meta.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py"], "/torchvision/prototype/transforms/_deprecated.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py", "/torchvision/prototype/transforms/_transform.py", "/torchvision/prototype/transforms/_utils.py"], "/test/test_prototype_transforms_utils.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/_utils.py"], "/test/test_prototype_transforms.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/_utils.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/features/_mask.py": ["/torchvision/prototype/features/_feature.py"], "/torchvision/prototype/transforms/_augment.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py", "/torchvision/prototype/transforms/_transform.py", "/torchvision/prototype/transforms/_utils.py"], "/torchvision/prototype/transforms/_geometry.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py", "/torchvision/prototype/transforms/_transform.py", "/torchvision/prototype/transforms/_utils.py", "/torchvision/prototype/datasets/utils/__init__.py", "/torchvision/prototype/features/__init__.py"], "/torchvision/prototype/features/_image.py": ["/torchvision/prototype/features/_feature.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/transforms/functional/_temporal.py": ["/torchvision/prototype/__init__.py"], "/torchvision/prototype/transforms/__init__.py": ["/torchvision/prototype/transforms/_transform.py", "/torchvision/prototype/transforms/_presets.py", "/torchvision/prototype/transforms/_augment.py", "/torchvision/prototype/transforms/_color.py", "/torchvision/prototype/transforms/_container.py", "/torchvision/prototype/transforms/_geometry.py", "/torchvision/prototype/transforms/_meta.py", "/torchvision/prototype/transforms/_misc.py", "/torchvision/prototype/transforms/_temporal.py", "/torchvision/prototype/transforms/_type_conversion.py", "/torchvision/prototype/transforms/_deprecated.py"], "/torchvision/prototype/features/_bounding_box.py": ["/torchvision/prototype/features/_feature.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/features/__init__.py": ["/torchvision/prototype/features/_bounding_box.py", "/torchvision/prototype/features/_feature.py", "/torchvision/prototype/features/_image.py", "/torchvision/prototype/features/_label.py", "/torchvision/prototype/features/_mask.py"], "/torchvision/prototype/transforms/functional/_augment.py": ["/torchvision/prototype/__init__.py"], "/torchvision/prototype/transforms/_container.py": ["/torchvision/prototype/transforms/__init__.py"], "/torchvision/prototype/transforms/_temporal.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py"], "/test/prototype_common_utils.py": ["/torchvision/prototype/__init__.py"], "/torchvision/prototype/transforms/functional/_type_conversion.py": ["/torchvision/prototype/__init__.py"], "/torchvision/prototype/transforms/_type_conversion.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py"], "/torchvision/prototype/transforms/_misc.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py", "/torchvision/prototype/transforms/_utils.py", "/torchvision/prototype/features/__init__.py"], "/torchvision/prototype/transforms/_color.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py", "/torchvision/prototype/transforms/_transform.py", "/torchvision/prototype/transforms/_utils.py"], "/torchvision/prototype/transforms/_presets.py": ["/torchvision/prototype/transforms/__init__.py"], "/torchvision/prototype/transforms/_utils.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/features/_feature.py"], "/torchvision/prototype/datasets/_builtin/imagenet.py": ["/torchvision/prototype/datasets/utils/__init__.py", "/torchvision/prototype/features/__init__.py", "/torchvision/prototype/utils/_internal.py"], "/test/test_prototype_features.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/utils/_internal.py"], "/test/builtin_dataset_mocks.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/datasets/_builtin/semeion.py": ["/torchvision/prototype/datasets/decoder.py", "/torchvision/prototype/datasets/utils/__init__.py"], "/torchvision/prototype/datasets/decoder.py": ["/torchvision/prototype/__init__.py"], "/torchvision/prototype/features/_label.py": ["/torchvision/prototype/features/_feature.py"], "/torchvision/prototype/features/_feature.py": ["/torchvision/prototype/utils/_internal.py"], "/test/test_prototype_builtin_datasets.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/datasets/_builtin/celeba.py": ["/torchvision/prototype/datasets/utils/__init__.py"]}
|
39,110,322
|
Hsuxu/vision
|
refs/heads/master
|
/torchvision/prototype/features/__init__.py
|
from ._bounding_box import BoundingBoxFormat, BoundingBox
from ._feature import Feature, DEFAULT
from ._image import Image, ColorSpace
from ._label import Label
|
{"/torchvision/prototype/transforms/_transform.py": ["/torchvision/prototype/transforms/_utils.py", "/torchvision/prototype/__init__.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/transforms/_meta.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py"], "/torchvision/prototype/transforms/_deprecated.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py", "/torchvision/prototype/transforms/_transform.py", "/torchvision/prototype/transforms/_utils.py"], "/test/test_prototype_transforms_utils.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/_utils.py"], "/test/test_prototype_transforms.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/_utils.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/features/_mask.py": ["/torchvision/prototype/features/_feature.py"], "/torchvision/prototype/transforms/_augment.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py", "/torchvision/prototype/transforms/_transform.py", "/torchvision/prototype/transforms/_utils.py"], "/torchvision/prototype/transforms/_geometry.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py", "/torchvision/prototype/transforms/_transform.py", "/torchvision/prototype/transforms/_utils.py", "/torchvision/prototype/datasets/utils/__init__.py", "/torchvision/prototype/features/__init__.py"], "/torchvision/prototype/features/_image.py": ["/torchvision/prototype/features/_feature.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/transforms/functional/_temporal.py": ["/torchvision/prototype/__init__.py"], "/torchvision/prototype/transforms/__init__.py": ["/torchvision/prototype/transforms/_transform.py", "/torchvision/prototype/transforms/_presets.py", "/torchvision/prototype/transforms/_augment.py", "/torchvision/prototype/transforms/_color.py", "/torchvision/prototype/transforms/_container.py", "/torchvision/prototype/transforms/_geometry.py", "/torchvision/prototype/transforms/_meta.py", "/torchvision/prototype/transforms/_misc.py", "/torchvision/prototype/transforms/_temporal.py", "/torchvision/prototype/transforms/_type_conversion.py", "/torchvision/prototype/transforms/_deprecated.py"], "/torchvision/prototype/features/_bounding_box.py": ["/torchvision/prototype/features/_feature.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/features/__init__.py": ["/torchvision/prototype/features/_bounding_box.py", "/torchvision/prototype/features/_feature.py", "/torchvision/prototype/features/_image.py", "/torchvision/prototype/features/_label.py", "/torchvision/prototype/features/_mask.py"], "/torchvision/prototype/transforms/functional/_augment.py": ["/torchvision/prototype/__init__.py"], "/torchvision/prototype/transforms/_container.py": ["/torchvision/prototype/transforms/__init__.py"], "/torchvision/prototype/transforms/_temporal.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py"], "/test/prototype_common_utils.py": ["/torchvision/prototype/__init__.py"], "/torchvision/prototype/transforms/functional/_type_conversion.py": ["/torchvision/prototype/__init__.py"], "/torchvision/prototype/transforms/_type_conversion.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py"], "/torchvision/prototype/transforms/_misc.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py", "/torchvision/prototype/transforms/_utils.py", "/torchvision/prototype/features/__init__.py"], "/torchvision/prototype/transforms/_color.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py", "/torchvision/prototype/transforms/_transform.py", "/torchvision/prototype/transforms/_utils.py"], "/torchvision/prototype/transforms/_presets.py": ["/torchvision/prototype/transforms/__init__.py"], "/torchvision/prototype/transforms/_utils.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/features/_feature.py"], "/torchvision/prototype/datasets/_builtin/imagenet.py": ["/torchvision/prototype/datasets/utils/__init__.py", "/torchvision/prototype/features/__init__.py", "/torchvision/prototype/utils/_internal.py"], "/test/test_prototype_features.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/utils/_internal.py"], "/test/builtin_dataset_mocks.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/datasets/_builtin/semeion.py": ["/torchvision/prototype/datasets/decoder.py", "/torchvision/prototype/datasets/utils/__init__.py"], "/torchvision/prototype/datasets/decoder.py": ["/torchvision/prototype/__init__.py"], "/torchvision/prototype/features/_label.py": ["/torchvision/prototype/features/_feature.py"], "/torchvision/prototype/features/_feature.py": ["/torchvision/prototype/utils/_internal.py"], "/test/test_prototype_builtin_datasets.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/datasets/_builtin/celeba.py": ["/torchvision/prototype/datasets/utils/__init__.py"]}
|
39,110,323
|
Hsuxu/vision
|
refs/heads/master
|
/torchvision/models/segmentation/segmentation.py
|
import warnings
# Import all methods/classes for BC:
from . import * # noqa: F401, F403
warnings.warn(
"The 'torchvision.models.segmentation.segmentation' module is deprecated. Please use directly the parent module "
"instead."
)
|
{"/torchvision/prototype/transforms/_transform.py": ["/torchvision/prototype/transforms/_utils.py", "/torchvision/prototype/__init__.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/transforms/_meta.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py"], "/torchvision/prototype/transforms/_deprecated.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py", "/torchvision/prototype/transforms/_transform.py", "/torchvision/prototype/transforms/_utils.py"], "/test/test_prototype_transforms_utils.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/_utils.py"], "/test/test_prototype_transforms.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/_utils.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/features/_mask.py": ["/torchvision/prototype/features/_feature.py"], "/torchvision/prototype/transforms/_augment.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py", "/torchvision/prototype/transforms/_transform.py", "/torchvision/prototype/transforms/_utils.py"], "/torchvision/prototype/transforms/_geometry.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py", "/torchvision/prototype/transforms/_transform.py", "/torchvision/prototype/transforms/_utils.py", "/torchvision/prototype/datasets/utils/__init__.py", "/torchvision/prototype/features/__init__.py"], "/torchvision/prototype/features/_image.py": ["/torchvision/prototype/features/_feature.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/transforms/functional/_temporal.py": ["/torchvision/prototype/__init__.py"], "/torchvision/prototype/transforms/__init__.py": ["/torchvision/prototype/transforms/_transform.py", "/torchvision/prototype/transforms/_presets.py", "/torchvision/prototype/transforms/_augment.py", "/torchvision/prototype/transforms/_color.py", "/torchvision/prototype/transforms/_container.py", "/torchvision/prototype/transforms/_geometry.py", "/torchvision/prototype/transforms/_meta.py", "/torchvision/prototype/transforms/_misc.py", "/torchvision/prototype/transforms/_temporal.py", "/torchvision/prototype/transforms/_type_conversion.py", "/torchvision/prototype/transforms/_deprecated.py"], "/torchvision/prototype/features/_bounding_box.py": ["/torchvision/prototype/features/_feature.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/features/__init__.py": ["/torchvision/prototype/features/_bounding_box.py", "/torchvision/prototype/features/_feature.py", "/torchvision/prototype/features/_image.py", "/torchvision/prototype/features/_label.py", "/torchvision/prototype/features/_mask.py"], "/torchvision/prototype/transforms/functional/_augment.py": ["/torchvision/prototype/__init__.py"], "/torchvision/prototype/transforms/_container.py": ["/torchvision/prototype/transforms/__init__.py"], "/torchvision/prototype/transforms/_temporal.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py"], "/test/prototype_common_utils.py": ["/torchvision/prototype/__init__.py"], "/torchvision/prototype/transforms/functional/_type_conversion.py": ["/torchvision/prototype/__init__.py"], "/torchvision/prototype/transforms/_type_conversion.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py"], "/torchvision/prototype/transforms/_misc.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py", "/torchvision/prototype/transforms/_utils.py", "/torchvision/prototype/features/__init__.py"], "/torchvision/prototype/transforms/_color.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py", "/torchvision/prototype/transforms/_transform.py", "/torchvision/prototype/transforms/_utils.py"], "/torchvision/prototype/transforms/_presets.py": ["/torchvision/prototype/transforms/__init__.py"], "/torchvision/prototype/transforms/_utils.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/features/_feature.py"], "/torchvision/prototype/datasets/_builtin/imagenet.py": ["/torchvision/prototype/datasets/utils/__init__.py", "/torchvision/prototype/features/__init__.py", "/torchvision/prototype/utils/_internal.py"], "/test/test_prototype_features.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/utils/_internal.py"], "/test/builtin_dataset_mocks.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/datasets/_builtin/semeion.py": ["/torchvision/prototype/datasets/decoder.py", "/torchvision/prototype/datasets/utils/__init__.py"], "/torchvision/prototype/datasets/decoder.py": ["/torchvision/prototype/__init__.py"], "/torchvision/prototype/features/_label.py": ["/torchvision/prototype/features/_feature.py"], "/torchvision/prototype/features/_feature.py": ["/torchvision/prototype/utils/_internal.py"], "/test/test_prototype_builtin_datasets.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/datasets/_builtin/celeba.py": ["/torchvision/prototype/datasets/utils/__init__.py"]}
|
39,110,324
|
Hsuxu/vision
|
refs/heads/master
|
/torchvision/prototype/transforms/__init__.py
|
from ._transform import Transform
from ._container import Compose, RandomApply, RandomChoice, RandomOrder # usort: skip
from ._geometry import Resize, RandomResize, HorizontalFlip, Crop, CenterCrop, RandomCrop
from ._misc import Identity, Normalize
from ._presets import CocoEval, ImageNetEval, VocEval, Kinect400Eval, RaftEval
|
{"/torchvision/prototype/transforms/_transform.py": ["/torchvision/prototype/transforms/_utils.py", "/torchvision/prototype/__init__.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/transforms/_meta.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py"], "/torchvision/prototype/transforms/_deprecated.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py", "/torchvision/prototype/transforms/_transform.py", "/torchvision/prototype/transforms/_utils.py"], "/test/test_prototype_transforms_utils.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/_utils.py"], "/test/test_prototype_transforms.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/_utils.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/features/_mask.py": ["/torchvision/prototype/features/_feature.py"], "/torchvision/prototype/transforms/_augment.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py", "/torchvision/prototype/transforms/_transform.py", "/torchvision/prototype/transforms/_utils.py"], "/torchvision/prototype/transforms/_geometry.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py", "/torchvision/prototype/transforms/_transform.py", "/torchvision/prototype/transforms/_utils.py", "/torchvision/prototype/datasets/utils/__init__.py", "/torchvision/prototype/features/__init__.py"], "/torchvision/prototype/features/_image.py": ["/torchvision/prototype/features/_feature.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/transforms/functional/_temporal.py": ["/torchvision/prototype/__init__.py"], "/torchvision/prototype/transforms/__init__.py": ["/torchvision/prototype/transforms/_transform.py", "/torchvision/prototype/transforms/_presets.py", "/torchvision/prototype/transforms/_augment.py", "/torchvision/prototype/transforms/_color.py", "/torchvision/prototype/transforms/_container.py", "/torchvision/prototype/transforms/_geometry.py", "/torchvision/prototype/transforms/_meta.py", "/torchvision/prototype/transforms/_misc.py", "/torchvision/prototype/transforms/_temporal.py", "/torchvision/prototype/transforms/_type_conversion.py", "/torchvision/prototype/transforms/_deprecated.py"], "/torchvision/prototype/features/_bounding_box.py": ["/torchvision/prototype/features/_feature.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/features/__init__.py": ["/torchvision/prototype/features/_bounding_box.py", "/torchvision/prototype/features/_feature.py", "/torchvision/prototype/features/_image.py", "/torchvision/prototype/features/_label.py", "/torchvision/prototype/features/_mask.py"], "/torchvision/prototype/transforms/functional/_augment.py": ["/torchvision/prototype/__init__.py"], "/torchvision/prototype/transforms/_container.py": ["/torchvision/prototype/transforms/__init__.py"], "/torchvision/prototype/transforms/_temporal.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py"], "/test/prototype_common_utils.py": ["/torchvision/prototype/__init__.py"], "/torchvision/prototype/transforms/functional/_type_conversion.py": ["/torchvision/prototype/__init__.py"], "/torchvision/prototype/transforms/_type_conversion.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py"], "/torchvision/prototype/transforms/_misc.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py", "/torchvision/prototype/transforms/_utils.py", "/torchvision/prototype/features/__init__.py"], "/torchvision/prototype/transforms/_color.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py", "/torchvision/prototype/transforms/_transform.py", "/torchvision/prototype/transforms/_utils.py"], "/torchvision/prototype/transforms/_presets.py": ["/torchvision/prototype/transforms/__init__.py"], "/torchvision/prototype/transforms/_utils.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/features/_feature.py"], "/torchvision/prototype/datasets/_builtin/imagenet.py": ["/torchvision/prototype/datasets/utils/__init__.py", "/torchvision/prototype/features/__init__.py", "/torchvision/prototype/utils/_internal.py"], "/test/test_prototype_features.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/utils/_internal.py"], "/test/builtin_dataset_mocks.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/datasets/_builtin/semeion.py": ["/torchvision/prototype/datasets/decoder.py", "/torchvision/prototype/datasets/utils/__init__.py"], "/torchvision/prototype/datasets/decoder.py": ["/torchvision/prototype/__init__.py"], "/torchvision/prototype/features/_label.py": ["/torchvision/prototype/features/_feature.py"], "/torchvision/prototype/features/_feature.py": ["/torchvision/prototype/utils/_internal.py"], "/test/test_prototype_builtin_datasets.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/datasets/_builtin/celeba.py": ["/torchvision/prototype/datasets/utils/__init__.py"]}
|
39,110,325
|
Hsuxu/vision
|
refs/heads/master
|
/torchvision/prototype/datasets/_builtin/semeion.py
|
import functools
import io
from typing import Any, Callable, Dict, List, Optional, Tuple
import torch
from torchdata.datapipes.iter import (
IterDataPipe,
Mapper,
CSVParser,
)
from torchvision.prototype.datasets.decoder import raw
from torchvision.prototype.datasets.utils import (
Dataset,
DatasetConfig,
DatasetInfo,
HttpResource,
OnlineResource,
DatasetType,
)
from torchvision.prototype.datasets.utils._internal import image_buffer_from_array, hint_sharding, hint_shuffling
class SEMEION(Dataset):
def _make_info(self) -> DatasetInfo:
return DatasetInfo(
"semeion",
type=DatasetType.RAW,
categories=10,
homepage="https://archive.ics.uci.edu/ml/datasets/Semeion+Handwritten+Digit",
)
def resources(self, config: DatasetConfig) -> List[OnlineResource]:
data = HttpResource(
"http://archive.ics.uci.edu/ml/machine-learning-databases/semeion/semeion.data",
sha256="f43228ae3da5ea6a3c95069d53450b86166770e3b719dcc333182128fe08d4b1",
)
return [data]
def _collate_and_decode_sample(
self,
data: Tuple[str, ...],
*,
decoder: Optional[Callable[[io.IOBase], torch.Tensor]],
) -> Dict[str, Any]:
image_data = torch.tensor([float(pixel) for pixel in data[:256]], dtype=torch.uint8).reshape(16, 16)
label_data = [int(label) for label in data[256:] if label]
if decoder is raw:
image = image_data.unsqueeze(0)
else:
image_buffer = image_buffer_from_array(image_data.numpy())
image = decoder(image_buffer) if decoder else image_buffer # type: ignore[assignment]
label = next((idx for idx, one_hot_label in enumerate(label_data) if one_hot_label))
category = self.info.categories[label]
return dict(image=image, label=label, category=category)
def _make_datapipe(
self,
resource_dps: List[IterDataPipe],
*,
config: DatasetConfig,
decoder: Optional[Callable[[io.IOBase], torch.Tensor]],
) -> IterDataPipe[Dict[str, Any]]:
dp = resource_dps[0]
dp = CSVParser(dp, delimiter=" ")
dp = hint_sharding(dp)
dp = hint_shuffling(dp)
dp = Mapper(dp, functools.partial(self._collate_and_decode_sample, decoder=decoder))
return dp
|
{"/torchvision/prototype/transforms/_transform.py": ["/torchvision/prototype/transforms/_utils.py", "/torchvision/prototype/__init__.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/transforms/_meta.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py"], "/torchvision/prototype/transforms/_deprecated.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py", "/torchvision/prototype/transforms/_transform.py", "/torchvision/prototype/transforms/_utils.py"], "/test/test_prototype_transforms_utils.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/_utils.py"], "/test/test_prototype_transforms.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/_utils.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/features/_mask.py": ["/torchvision/prototype/features/_feature.py"], "/torchvision/prototype/transforms/_augment.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py", "/torchvision/prototype/transforms/_transform.py", "/torchvision/prototype/transforms/_utils.py"], "/torchvision/prototype/transforms/_geometry.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py", "/torchvision/prototype/transforms/_transform.py", "/torchvision/prototype/transforms/_utils.py", "/torchvision/prototype/datasets/utils/__init__.py", "/torchvision/prototype/features/__init__.py"], "/torchvision/prototype/features/_image.py": ["/torchvision/prototype/features/_feature.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/transforms/functional/_temporal.py": ["/torchvision/prototype/__init__.py"], "/torchvision/prototype/transforms/__init__.py": ["/torchvision/prototype/transforms/_transform.py", "/torchvision/prototype/transforms/_presets.py", "/torchvision/prototype/transforms/_augment.py", "/torchvision/prototype/transforms/_color.py", "/torchvision/prototype/transforms/_container.py", "/torchvision/prototype/transforms/_geometry.py", "/torchvision/prototype/transforms/_meta.py", "/torchvision/prototype/transforms/_misc.py", "/torchvision/prototype/transforms/_temporal.py", "/torchvision/prototype/transforms/_type_conversion.py", "/torchvision/prototype/transforms/_deprecated.py"], "/torchvision/prototype/features/_bounding_box.py": ["/torchvision/prototype/features/_feature.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/features/__init__.py": ["/torchvision/prototype/features/_bounding_box.py", "/torchvision/prototype/features/_feature.py", "/torchvision/prototype/features/_image.py", "/torchvision/prototype/features/_label.py", "/torchvision/prototype/features/_mask.py"], "/torchvision/prototype/transforms/functional/_augment.py": ["/torchvision/prototype/__init__.py"], "/torchvision/prototype/transforms/_container.py": ["/torchvision/prototype/transforms/__init__.py"], "/torchvision/prototype/transforms/_temporal.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py"], "/test/prototype_common_utils.py": ["/torchvision/prototype/__init__.py"], "/torchvision/prototype/transforms/functional/_type_conversion.py": ["/torchvision/prototype/__init__.py"], "/torchvision/prototype/transforms/_type_conversion.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py"], "/torchvision/prototype/transforms/_misc.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py", "/torchvision/prototype/transforms/_utils.py", "/torchvision/prototype/features/__init__.py"], "/torchvision/prototype/transforms/_color.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py", "/torchvision/prototype/transforms/_transform.py", "/torchvision/prototype/transforms/_utils.py"], "/torchvision/prototype/transforms/_presets.py": ["/torchvision/prototype/transforms/__init__.py"], "/torchvision/prototype/transforms/_utils.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/features/_feature.py"], "/torchvision/prototype/datasets/_builtin/imagenet.py": ["/torchvision/prototype/datasets/utils/__init__.py", "/torchvision/prototype/features/__init__.py", "/torchvision/prototype/utils/_internal.py"], "/test/test_prototype_features.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/utils/_internal.py"], "/test/builtin_dataset_mocks.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/datasets/_builtin/semeion.py": ["/torchvision/prototype/datasets/decoder.py", "/torchvision/prototype/datasets/utils/__init__.py"], "/torchvision/prototype/datasets/decoder.py": ["/torchvision/prototype/__init__.py"], "/torchvision/prototype/features/_label.py": ["/torchvision/prototype/features/_feature.py"], "/torchvision/prototype/features/_feature.py": ["/torchvision/prototype/utils/_internal.py"], "/test/test_prototype_builtin_datasets.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/datasets/_builtin/celeba.py": ["/torchvision/prototype/datasets/utils/__init__.py"]}
|
39,110,326
|
Hsuxu/vision
|
refs/heads/master
|
/torchvision/prototype/features/_bounding_box.py
|
import enum
import functools
from typing import Callable, Union, Tuple, Dict, Any, Optional, cast
import torch
from torchvision.prototype.utils._internal import StrEnum
from ._feature import Feature, DEFAULT
class BoundingBoxFormat(StrEnum):
# this is just for test purposes
_SENTINEL = -1
XYXY = enum.auto()
XYWH = enum.auto()
CXCYWH = enum.auto()
def to_parts(input: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
return input.unbind(dim=-1) # type: ignore[return-value]
def from_parts(a: torch.Tensor, b: torch.Tensor, c: torch.Tensor, d: torch.Tensor) -> torch.Tensor:
return torch.stack((a, b, c, d), dim=-1)
def format_converter_wrapper(
part_converter: Callable[
[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor],
Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor],
]
):
def wrapper(input: torch.Tensor) -> torch.Tensor:
return from_parts(*part_converter(*to_parts(input)))
return wrapper
@format_converter_wrapper
def xywh_to_xyxy(
x: torch.Tensor, y: torch.Tensor, w: torch.Tensor, h: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
x1 = x
y1 = y
x2 = x + w
y2 = y + h
return x1, y1, x2, y2
@format_converter_wrapper
def xyxy_to_xywh(
x1: torch.Tensor, y1: torch.Tensor, x2: torch.Tensor, y2: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
x = x1
y = y1
w = x2 - x1
h = y2 - y1
return x, y, w, h
@format_converter_wrapper
def cxcywh_to_xyxy(
cx: torch.Tensor, cy: torch.Tensor, w: torch.Tensor, h: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
x1 = cx - 0.5 * w
y1 = cy - 0.5 * h
x2 = cx + 0.5 * w
y2 = cy + 0.5 * h
return x1, y1, x2, y2
@format_converter_wrapper
def xyxy_to_cxcywh(
x1: torch.Tensor, y1: torch.Tensor, x2: torch.Tensor, y2: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
cx = (x1 + x2) / 2
cy = (y1 + y2) / 2
w = x2 - x1
h = y2 - y1
return cx, cy, w, h
class BoundingBox(Feature):
formats = BoundingBoxFormat
format: BoundingBoxFormat
image_size: Tuple[int, int]
@classmethod
def _parse_meta_data(
cls,
format: Union[str, BoundingBoxFormat] = DEFAULT, # type: ignore[assignment]
image_size: Optional[Tuple[int, int]] = DEFAULT, # type: ignore[assignment]
) -> Dict[str, Tuple[Any, Any]]:
if isinstance(format, str):
format = BoundingBoxFormat[format]
format_fallback = BoundingBoxFormat.XYXY
return dict(
format=(format, format_fallback),
image_size=(image_size, functools.partial(cls.guess_image_size, format=format_fallback)),
)
_TO_XYXY_MAP = {
BoundingBoxFormat.XYWH: xywh_to_xyxy,
BoundingBoxFormat.CXCYWH: cxcywh_to_xyxy,
}
_FROM_XYXY_MAP = {
BoundingBoxFormat.XYWH: xyxy_to_xywh,
BoundingBoxFormat.CXCYWH: xyxy_to_cxcywh,
}
@classmethod
def guess_image_size(cls, data: torch.Tensor, *, format: BoundingBoxFormat) -> Tuple[int, int]:
if format not in (BoundingBoxFormat.XYWH, BoundingBoxFormat.CXCYWH):
if format != BoundingBoxFormat.XYXY:
data = cls._TO_XYXY_MAP[format](data)
data = cls._FROM_XYXY_MAP[BoundingBoxFormat.XYWH](data)
*_, w, h = to_parts(data)
if data.dtype.is_floating_point:
w = w.ceil()
h = h.ceil()
return int(h.max()), int(w.max())
@classmethod
def from_parts(
cls,
a,
b,
c,
d,
*,
like: Optional["BoundingBox"] = None,
format: Union[str, BoundingBoxFormat] = DEFAULT, # type: ignore[assignment]
image_size: Optional[Tuple[int, int]] = DEFAULT, # type: ignore[assignment]
) -> "BoundingBox":
return cls(from_parts(a, b, c, d), like=like, image_size=image_size, format=format)
def to_parts(self) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
return to_parts(self)
def convert(self, format: Union[str, BoundingBoxFormat]) -> "BoundingBox":
if isinstance(format, str):
format = BoundingBoxFormat[format]
if format == self.format:
return cast(BoundingBox, self.clone())
data = self
if self.format != BoundingBoxFormat.XYXY:
data = self._TO_XYXY_MAP[self.format](data)
if format != BoundingBoxFormat.XYXY:
data = self._FROM_XYXY_MAP[format](data)
return BoundingBox(data, like=self, format=format)
|
{"/torchvision/prototype/transforms/_transform.py": ["/torchvision/prototype/transforms/_utils.py", "/torchvision/prototype/__init__.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/transforms/_meta.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py"], "/torchvision/prototype/transforms/_deprecated.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py", "/torchvision/prototype/transforms/_transform.py", "/torchvision/prototype/transforms/_utils.py"], "/test/test_prototype_transforms_utils.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/_utils.py"], "/test/test_prototype_transforms.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/_utils.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/features/_mask.py": ["/torchvision/prototype/features/_feature.py"], "/torchvision/prototype/transforms/_augment.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py", "/torchvision/prototype/transforms/_transform.py", "/torchvision/prototype/transforms/_utils.py"], "/torchvision/prototype/transforms/_geometry.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py", "/torchvision/prototype/transforms/_transform.py", "/torchvision/prototype/transforms/_utils.py", "/torchvision/prototype/datasets/utils/__init__.py", "/torchvision/prototype/features/__init__.py"], "/torchvision/prototype/features/_image.py": ["/torchvision/prototype/features/_feature.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/transforms/functional/_temporal.py": ["/torchvision/prototype/__init__.py"], "/torchvision/prototype/transforms/__init__.py": ["/torchvision/prototype/transforms/_transform.py", "/torchvision/prototype/transforms/_presets.py", "/torchvision/prototype/transforms/_augment.py", "/torchvision/prototype/transforms/_color.py", "/torchvision/prototype/transforms/_container.py", "/torchvision/prototype/transforms/_geometry.py", "/torchvision/prototype/transforms/_meta.py", "/torchvision/prototype/transforms/_misc.py", "/torchvision/prototype/transforms/_temporal.py", "/torchvision/prototype/transforms/_type_conversion.py", "/torchvision/prototype/transforms/_deprecated.py"], "/torchvision/prototype/features/_bounding_box.py": ["/torchvision/prototype/features/_feature.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/features/__init__.py": ["/torchvision/prototype/features/_bounding_box.py", "/torchvision/prototype/features/_feature.py", "/torchvision/prototype/features/_image.py", "/torchvision/prototype/features/_label.py", "/torchvision/prototype/features/_mask.py"], "/torchvision/prototype/transforms/functional/_augment.py": ["/torchvision/prototype/__init__.py"], "/torchvision/prototype/transforms/_container.py": ["/torchvision/prototype/transforms/__init__.py"], "/torchvision/prototype/transforms/_temporal.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py"], "/test/prototype_common_utils.py": ["/torchvision/prototype/__init__.py"], "/torchvision/prototype/transforms/functional/_type_conversion.py": ["/torchvision/prototype/__init__.py"], "/torchvision/prototype/transforms/_type_conversion.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py"], "/torchvision/prototype/transforms/_misc.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py", "/torchvision/prototype/transforms/_utils.py", "/torchvision/prototype/features/__init__.py"], "/torchvision/prototype/transforms/_color.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py", "/torchvision/prototype/transforms/_transform.py", "/torchvision/prototype/transforms/_utils.py"], "/torchvision/prototype/transforms/_presets.py": ["/torchvision/prototype/transforms/__init__.py"], "/torchvision/prototype/transforms/_utils.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/features/_feature.py"], "/torchvision/prototype/datasets/_builtin/imagenet.py": ["/torchvision/prototype/datasets/utils/__init__.py", "/torchvision/prototype/features/__init__.py", "/torchvision/prototype/utils/_internal.py"], "/test/test_prototype_features.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/utils/_internal.py"], "/test/builtin_dataset_mocks.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/datasets/_builtin/semeion.py": ["/torchvision/prototype/datasets/decoder.py", "/torchvision/prototype/datasets/utils/__init__.py"], "/torchvision/prototype/datasets/decoder.py": ["/torchvision/prototype/__init__.py"], "/torchvision/prototype/features/_label.py": ["/torchvision/prototype/features/_feature.py"], "/torchvision/prototype/features/_feature.py": ["/torchvision/prototype/utils/_internal.py"], "/test/test_prototype_builtin_datasets.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/datasets/_builtin/celeba.py": ["/torchvision/prototype/datasets/utils/__init__.py"]}
|
39,110,327
|
Hsuxu/vision
|
refs/heads/master
|
/torchvision/prototype/features/_image.py
|
from typing import Dict, Any, Union, Tuple
import torch
from torchvision.prototype.utils._internal import StrEnum
from ._feature import Feature, DEFAULT
class ColorSpace(StrEnum):
# this is just for test purposes
_SENTINEL = -1
OTHER = 0
GRAYSCALE = 1
RGB = 3
class Image(Feature):
color_spaces = ColorSpace
color_space: ColorSpace
@classmethod
def _to_tensor(cls, data, *, dtype, device):
tensor = torch.as_tensor(data, dtype=dtype, device=device)
if tensor.ndim == 2:
tensor = tensor.unsqueeze(0)
elif tensor.ndim != 3:
raise ValueError("Only single images with 2 or 3 dimensions are allowed.")
return tensor
@classmethod
def _parse_meta_data(
cls,
color_space: Union[str, ColorSpace] = DEFAULT, # type: ignore[assignment]
) -> Dict[str, Tuple[Any, Any]]:
if isinstance(color_space, str):
color_space = ColorSpace[color_space]
return dict(color_space=(color_space, cls.guess_color_space))
@staticmethod
def guess_color_space(data: torch.Tensor) -> ColorSpace:
if data.ndim < 2:
return ColorSpace.OTHER
elif data.ndim == 2:
return ColorSpace.GRAYSCALE
num_channels = data.shape[-3]
if num_channels == 1:
return ColorSpace.GRAYSCALE
elif num_channels == 3:
return ColorSpace.RGB
else:
return ColorSpace.OTHER
|
{"/torchvision/prototype/transforms/_transform.py": ["/torchvision/prototype/transforms/_utils.py", "/torchvision/prototype/__init__.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/transforms/_meta.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py"], "/torchvision/prototype/transforms/_deprecated.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py", "/torchvision/prototype/transforms/_transform.py", "/torchvision/prototype/transforms/_utils.py"], "/test/test_prototype_transforms_utils.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/_utils.py"], "/test/test_prototype_transforms.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/_utils.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/features/_mask.py": ["/torchvision/prototype/features/_feature.py"], "/torchvision/prototype/transforms/_augment.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py", "/torchvision/prototype/transforms/_transform.py", "/torchvision/prototype/transforms/_utils.py"], "/torchvision/prototype/transforms/_geometry.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py", "/torchvision/prototype/transforms/_transform.py", "/torchvision/prototype/transforms/_utils.py", "/torchvision/prototype/datasets/utils/__init__.py", "/torchvision/prototype/features/__init__.py"], "/torchvision/prototype/features/_image.py": ["/torchvision/prototype/features/_feature.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/transforms/functional/_temporal.py": ["/torchvision/prototype/__init__.py"], "/torchvision/prototype/transforms/__init__.py": ["/torchvision/prototype/transforms/_transform.py", "/torchvision/prototype/transforms/_presets.py", "/torchvision/prototype/transforms/_augment.py", "/torchvision/prototype/transforms/_color.py", "/torchvision/prototype/transforms/_container.py", "/torchvision/prototype/transforms/_geometry.py", "/torchvision/prototype/transforms/_meta.py", "/torchvision/prototype/transforms/_misc.py", "/torchvision/prototype/transforms/_temporal.py", "/torchvision/prototype/transforms/_type_conversion.py", "/torchvision/prototype/transforms/_deprecated.py"], "/torchvision/prototype/features/_bounding_box.py": ["/torchvision/prototype/features/_feature.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/features/__init__.py": ["/torchvision/prototype/features/_bounding_box.py", "/torchvision/prototype/features/_feature.py", "/torchvision/prototype/features/_image.py", "/torchvision/prototype/features/_label.py", "/torchvision/prototype/features/_mask.py"], "/torchvision/prototype/transforms/functional/_augment.py": ["/torchvision/prototype/__init__.py"], "/torchvision/prototype/transforms/_container.py": ["/torchvision/prototype/transforms/__init__.py"], "/torchvision/prototype/transforms/_temporal.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py"], "/test/prototype_common_utils.py": ["/torchvision/prototype/__init__.py"], "/torchvision/prototype/transforms/functional/_type_conversion.py": ["/torchvision/prototype/__init__.py"], "/torchvision/prototype/transforms/_type_conversion.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py"], "/torchvision/prototype/transforms/_misc.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py", "/torchvision/prototype/transforms/_utils.py", "/torchvision/prototype/features/__init__.py"], "/torchvision/prototype/transforms/_color.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py", "/torchvision/prototype/transforms/_transform.py", "/torchvision/prototype/transforms/_utils.py"], "/torchvision/prototype/transforms/_presets.py": ["/torchvision/prototype/transforms/__init__.py"], "/torchvision/prototype/transforms/_utils.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/features/_feature.py"], "/torchvision/prototype/datasets/_builtin/imagenet.py": ["/torchvision/prototype/datasets/utils/__init__.py", "/torchvision/prototype/features/__init__.py", "/torchvision/prototype/utils/_internal.py"], "/test/test_prototype_features.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/utils/_internal.py"], "/test/builtin_dataset_mocks.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/datasets/_builtin/semeion.py": ["/torchvision/prototype/datasets/decoder.py", "/torchvision/prototype/datasets/utils/__init__.py"], "/torchvision/prototype/datasets/decoder.py": ["/torchvision/prototype/__init__.py"], "/torchvision/prototype/features/_label.py": ["/torchvision/prototype/features/_feature.py"], "/torchvision/prototype/features/_feature.py": ["/torchvision/prototype/utils/_internal.py"], "/test/test_prototype_builtin_datasets.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/datasets/_builtin/celeba.py": ["/torchvision/prototype/datasets/utils/__init__.py"]}
|
39,110,328
|
Hsuxu/vision
|
refs/heads/master
|
/torchvision/prototype/datasets/decoder.py
|
import io
import PIL.Image
import torch
from torchvision.prototype import features
from torchvision.transforms.functional import pil_to_tensor
__all__ = ["raw", "pil"]
def raw(buffer: io.IOBase) -> torch.Tensor:
raise RuntimeError("This is just a sentinel and should never be called.")
def pil(buffer: io.IOBase) -> features.Image:
return features.Image(pil_to_tensor(PIL.Image.open(buffer)))
|
{"/torchvision/prototype/transforms/_transform.py": ["/torchvision/prototype/transforms/_utils.py", "/torchvision/prototype/__init__.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/transforms/_meta.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py"], "/torchvision/prototype/transforms/_deprecated.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py", "/torchvision/prototype/transforms/_transform.py", "/torchvision/prototype/transforms/_utils.py"], "/test/test_prototype_transforms_utils.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/_utils.py"], "/test/test_prototype_transforms.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/_utils.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/features/_mask.py": ["/torchvision/prototype/features/_feature.py"], "/torchvision/prototype/transforms/_augment.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py", "/torchvision/prototype/transforms/_transform.py", "/torchvision/prototype/transforms/_utils.py"], "/torchvision/prototype/transforms/_geometry.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py", "/torchvision/prototype/transforms/_transform.py", "/torchvision/prototype/transforms/_utils.py", "/torchvision/prototype/datasets/utils/__init__.py", "/torchvision/prototype/features/__init__.py"], "/torchvision/prototype/features/_image.py": ["/torchvision/prototype/features/_feature.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/transforms/functional/_temporal.py": ["/torchvision/prototype/__init__.py"], "/torchvision/prototype/transforms/__init__.py": ["/torchvision/prototype/transforms/_transform.py", "/torchvision/prototype/transforms/_presets.py", "/torchvision/prototype/transforms/_augment.py", "/torchvision/prototype/transforms/_color.py", "/torchvision/prototype/transforms/_container.py", "/torchvision/prototype/transforms/_geometry.py", "/torchvision/prototype/transforms/_meta.py", "/torchvision/prototype/transforms/_misc.py", "/torchvision/prototype/transforms/_temporal.py", "/torchvision/prototype/transforms/_type_conversion.py", "/torchvision/prototype/transforms/_deprecated.py"], "/torchvision/prototype/features/_bounding_box.py": ["/torchvision/prototype/features/_feature.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/features/__init__.py": ["/torchvision/prototype/features/_bounding_box.py", "/torchvision/prototype/features/_feature.py", "/torchvision/prototype/features/_image.py", "/torchvision/prototype/features/_label.py", "/torchvision/prototype/features/_mask.py"], "/torchvision/prototype/transforms/functional/_augment.py": ["/torchvision/prototype/__init__.py"], "/torchvision/prototype/transforms/_container.py": ["/torchvision/prototype/transforms/__init__.py"], "/torchvision/prototype/transforms/_temporal.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py"], "/test/prototype_common_utils.py": ["/torchvision/prototype/__init__.py"], "/torchvision/prototype/transforms/functional/_type_conversion.py": ["/torchvision/prototype/__init__.py"], "/torchvision/prototype/transforms/_type_conversion.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py"], "/torchvision/prototype/transforms/_misc.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py", "/torchvision/prototype/transforms/_utils.py", "/torchvision/prototype/features/__init__.py"], "/torchvision/prototype/transforms/_color.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py", "/torchvision/prototype/transforms/_transform.py", "/torchvision/prototype/transforms/_utils.py"], "/torchvision/prototype/transforms/_presets.py": ["/torchvision/prototype/transforms/__init__.py"], "/torchvision/prototype/transforms/_utils.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/features/_feature.py"], "/torchvision/prototype/datasets/_builtin/imagenet.py": ["/torchvision/prototype/datasets/utils/__init__.py", "/torchvision/prototype/features/__init__.py", "/torchvision/prototype/utils/_internal.py"], "/test/test_prototype_features.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/utils/_internal.py"], "/test/builtin_dataset_mocks.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/datasets/_builtin/semeion.py": ["/torchvision/prototype/datasets/decoder.py", "/torchvision/prototype/datasets/utils/__init__.py"], "/torchvision/prototype/datasets/decoder.py": ["/torchvision/prototype/__init__.py"], "/torchvision/prototype/features/_label.py": ["/torchvision/prototype/features/_feature.py"], "/torchvision/prototype/features/_feature.py": ["/torchvision/prototype/utils/_internal.py"], "/test/test_prototype_builtin_datasets.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/datasets/_builtin/celeba.py": ["/torchvision/prototype/datasets/utils/__init__.py"]}
|
39,110,329
|
Hsuxu/vision
|
refs/heads/master
|
/torchvision/prototype/transforms/_geometry.py
|
from typing import Any, Dict, Tuple, Union
import torch
from torch.nn.functional import interpolate
from torchvision.prototype.datasets.utils import SampleQuery
from torchvision.prototype.features import BoundingBox, Image, Label
from torchvision.prototype.transforms import Transform
class HorizontalFlip(Transform):
NO_OP_FEATURE_TYPES = {Label}
@staticmethod
def image(input: Image) -> Image:
return Image(input.flip((-1,)), like=input)
@staticmethod
def bounding_box(input: BoundingBox) -> BoundingBox:
x, y, w, h = input.convert("xywh").to_parts()
x = input.image_size[1] - (x + w)
return BoundingBox.from_parts(x, y, w, h, like=input, format="xywh").convert(input.format)
class Resize(Transform):
NO_OP_FEATURE_TYPES = {Label}
def __init__(
self,
size: Union[int, Tuple[int, int]],
*,
interpolation_mode: str = "nearest",
) -> None:
super().__init__()
self.size = (size, size) if isinstance(size, int) else size
self.interpolation_mode = interpolation_mode
def get_params(self, sample: Any) -> Dict[str, Any]:
return dict(size=self.size, interpolation_mode=self.interpolation_mode)
@staticmethod
def image(input: Image, *, size: Tuple[int, int], interpolation_mode: str = "nearest") -> Image:
return Image(interpolate(input.unsqueeze(0), size, mode=interpolation_mode).squeeze(0), like=input)
@staticmethod
def bounding_box(input: BoundingBox, *, size: Tuple[int, int], **_: Any) -> BoundingBox:
old_height, old_width = input.image_size
new_height, new_width = size
height_scale = new_height / old_height
width_scale = new_width / old_width
old_x1, old_y1, old_x2, old_y2 = input.convert("xyxy").to_parts()
new_x1 = old_x1 * width_scale
new_y1 = old_y1 * height_scale
new_x2 = old_x2 * width_scale
new_y2 = old_y2 * height_scale
return BoundingBox.from_parts(
new_x1, new_y1, new_x2, new_y2, like=input, format="xyxy", image_size=size
).convert(input.format)
def extra_repr(self) -> str:
extra_repr = f"size={self.size}"
if self.interpolation_mode != "bilinear":
extra_repr += f", interpolation_mode={self.interpolation_mode}"
return extra_repr
class RandomResize(Transform, wraps=Resize):
def __init__(self, min_size: Union[int, Tuple[int, int]], max_size: Union[int, Tuple[int, int]]) -> None:
super().__init__()
self.min_size = (min_size, min_size) if isinstance(min_size, int) else min_size
self.max_size = (max_size, max_size) if isinstance(max_size, int) else max_size
def get_params(self, sample: Any) -> Dict[str, Any]:
min_height, min_width = self.min_size
max_height, max_width = self.max_size
height = int(torch.randint(min_height, max_height + 1, size=()))
width = int(torch.randint(min_width, max_width + 1, size=()))
return dict(size=(height, width))
def extra_repr(self) -> str:
return f"min_size={self.min_size}, max_size={self.max_size}"
class Crop(Transform):
NO_OP_FEATURE_TYPES = {BoundingBox, Label}
def __init__(self, crop_box: BoundingBox) -> None:
super().__init__()
self.crop_box = crop_box.convert("xyxy")
def get_params(self, sample: Any) -> Dict[str, Any]:
return dict(crop_box=self.crop_box)
@staticmethod
def image(input: Image, *, crop_box: BoundingBox) -> Image:
# FIXME: pad input in case it is smaller than crop_box
x1, y1, x2, y2 = crop_box.convert("xyxy").to_parts()
return Image(input[..., y1 : y2 + 1, x1 : x2 + 1], like=input) # type: ignore[misc]
class CenterCrop(Transform, wraps=Crop):
def __init__(self, crop_size: Union[int, Tuple[int, int]]) -> None:
super().__init__()
self.crop_size = (crop_size, crop_size) if isinstance(crop_size, int) else crop_size
def get_params(self, sample: Any) -> Dict[str, Any]:
image_size = SampleQuery(sample).image_size()
image_height, image_width = image_size
cx = image_width // 2
cy = image_height // 2
h, w = self.crop_size
crop_box = BoundingBox.from_parts(cx, cy, w, h, image_size=image_size, format="cxcywh")
return dict(crop_box=crop_box)
def extra_repr(self) -> str:
return f"crop_size={self.crop_size}"
class RandomCrop(Transform, wraps=Crop):
def __init__(self, crop_size: Union[int, Tuple[int, int]]) -> None:
super().__init__()
self.crop_size = (crop_size, crop_size) if isinstance(crop_size, int) else crop_size
def get_params(self, sample: Any) -> Dict[str, Any]:
image_size = SampleQuery(sample).image_size()
image_height, image_width = image_size
crop_height, crop_width = self.crop_size
x = torch.randint(0, image_width - crop_width + 1, size=()) if crop_width < image_width else 0
y = torch.randint(0, image_height - crop_height + 1, size=()) if crop_height < image_height else 0
crop_box = BoundingBox.from_parts(x, y, crop_width, crop_height, image_size=image_size, format="xywh")
return dict(crop_box=crop_box)
def extra_repr(self) -> str:
return f"crop_size={self.crop_size}"
|
{"/torchvision/prototype/transforms/_transform.py": ["/torchvision/prototype/transforms/_utils.py", "/torchvision/prototype/__init__.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/transforms/_meta.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py"], "/torchvision/prototype/transforms/_deprecated.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py", "/torchvision/prototype/transforms/_transform.py", "/torchvision/prototype/transforms/_utils.py"], "/test/test_prototype_transforms_utils.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/_utils.py"], "/test/test_prototype_transforms.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/_utils.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/features/_mask.py": ["/torchvision/prototype/features/_feature.py"], "/torchvision/prototype/transforms/_augment.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py", "/torchvision/prototype/transforms/_transform.py", "/torchvision/prototype/transforms/_utils.py"], "/torchvision/prototype/transforms/_geometry.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py", "/torchvision/prototype/transforms/_transform.py", "/torchvision/prototype/transforms/_utils.py", "/torchvision/prototype/datasets/utils/__init__.py", "/torchvision/prototype/features/__init__.py"], "/torchvision/prototype/features/_image.py": ["/torchvision/prototype/features/_feature.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/transforms/functional/_temporal.py": ["/torchvision/prototype/__init__.py"], "/torchvision/prototype/transforms/__init__.py": ["/torchvision/prototype/transforms/_transform.py", "/torchvision/prototype/transforms/_presets.py", "/torchvision/prototype/transforms/_augment.py", "/torchvision/prototype/transforms/_color.py", "/torchvision/prototype/transforms/_container.py", "/torchvision/prototype/transforms/_geometry.py", "/torchvision/prototype/transforms/_meta.py", "/torchvision/prototype/transforms/_misc.py", "/torchvision/prototype/transforms/_temporal.py", "/torchvision/prototype/transforms/_type_conversion.py", "/torchvision/prototype/transforms/_deprecated.py"], "/torchvision/prototype/features/_bounding_box.py": ["/torchvision/prototype/features/_feature.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/features/__init__.py": ["/torchvision/prototype/features/_bounding_box.py", "/torchvision/prototype/features/_feature.py", "/torchvision/prototype/features/_image.py", "/torchvision/prototype/features/_label.py", "/torchvision/prototype/features/_mask.py"], "/torchvision/prototype/transforms/functional/_augment.py": ["/torchvision/prototype/__init__.py"], "/torchvision/prototype/transforms/_container.py": ["/torchvision/prototype/transforms/__init__.py"], "/torchvision/prototype/transforms/_temporal.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py"], "/test/prototype_common_utils.py": ["/torchvision/prototype/__init__.py"], "/torchvision/prototype/transforms/functional/_type_conversion.py": ["/torchvision/prototype/__init__.py"], "/torchvision/prototype/transforms/_type_conversion.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py"], "/torchvision/prototype/transforms/_misc.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py", "/torchvision/prototype/transforms/_utils.py", "/torchvision/prototype/features/__init__.py"], "/torchvision/prototype/transforms/_color.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py", "/torchvision/prototype/transforms/_transform.py", "/torchvision/prototype/transforms/_utils.py"], "/torchvision/prototype/transforms/_presets.py": ["/torchvision/prototype/transforms/__init__.py"], "/torchvision/prototype/transforms/_utils.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/features/_feature.py"], "/torchvision/prototype/datasets/_builtin/imagenet.py": ["/torchvision/prototype/datasets/utils/__init__.py", "/torchvision/prototype/features/__init__.py", "/torchvision/prototype/utils/_internal.py"], "/test/test_prototype_features.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/utils/_internal.py"], "/test/builtin_dataset_mocks.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/datasets/_builtin/semeion.py": ["/torchvision/prototype/datasets/decoder.py", "/torchvision/prototype/datasets/utils/__init__.py"], "/torchvision/prototype/datasets/decoder.py": ["/torchvision/prototype/__init__.py"], "/torchvision/prototype/features/_label.py": ["/torchvision/prototype/features/_feature.py"], "/torchvision/prototype/features/_feature.py": ["/torchvision/prototype/utils/_internal.py"], "/test/test_prototype_builtin_datasets.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/datasets/_builtin/celeba.py": ["/torchvision/prototype/datasets/utils/__init__.py"]}
|
39,110,330
|
Hsuxu/vision
|
refs/heads/master
|
/torchvision/prototype/features/_label.py
|
from typing import Dict, Any, Optional, Tuple
from ._feature import Feature, DEFAULT
class Label(Feature):
category: Optional[str]
@classmethod
def _parse_meta_data(
cls,
category: Optional[str] = DEFAULT, # type: ignore[assignment]
) -> Dict[str, Tuple[Any, Any]]:
return dict(category=(category, None))
|
{"/torchvision/prototype/transforms/_transform.py": ["/torchvision/prototype/transforms/_utils.py", "/torchvision/prototype/__init__.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/transforms/_meta.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py"], "/torchvision/prototype/transforms/_deprecated.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py", "/torchvision/prototype/transforms/_transform.py", "/torchvision/prototype/transforms/_utils.py"], "/test/test_prototype_transforms_utils.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/_utils.py"], "/test/test_prototype_transforms.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/_utils.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/features/_mask.py": ["/torchvision/prototype/features/_feature.py"], "/torchvision/prototype/transforms/_augment.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py", "/torchvision/prototype/transforms/_transform.py", "/torchvision/prototype/transforms/_utils.py"], "/torchvision/prototype/transforms/_geometry.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py", "/torchvision/prototype/transforms/_transform.py", "/torchvision/prototype/transforms/_utils.py", "/torchvision/prototype/datasets/utils/__init__.py", "/torchvision/prototype/features/__init__.py"], "/torchvision/prototype/features/_image.py": ["/torchvision/prototype/features/_feature.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/transforms/functional/_temporal.py": ["/torchvision/prototype/__init__.py"], "/torchvision/prototype/transforms/__init__.py": ["/torchvision/prototype/transforms/_transform.py", "/torchvision/prototype/transforms/_presets.py", "/torchvision/prototype/transforms/_augment.py", "/torchvision/prototype/transforms/_color.py", "/torchvision/prototype/transforms/_container.py", "/torchvision/prototype/transforms/_geometry.py", "/torchvision/prototype/transforms/_meta.py", "/torchvision/prototype/transforms/_misc.py", "/torchvision/prototype/transforms/_temporal.py", "/torchvision/prototype/transforms/_type_conversion.py", "/torchvision/prototype/transforms/_deprecated.py"], "/torchvision/prototype/features/_bounding_box.py": ["/torchvision/prototype/features/_feature.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/features/__init__.py": ["/torchvision/prototype/features/_bounding_box.py", "/torchvision/prototype/features/_feature.py", "/torchvision/prototype/features/_image.py", "/torchvision/prototype/features/_label.py", "/torchvision/prototype/features/_mask.py"], "/torchvision/prototype/transforms/functional/_augment.py": ["/torchvision/prototype/__init__.py"], "/torchvision/prototype/transforms/_container.py": ["/torchvision/prototype/transforms/__init__.py"], "/torchvision/prototype/transforms/_temporal.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py"], "/test/prototype_common_utils.py": ["/torchvision/prototype/__init__.py"], "/torchvision/prototype/transforms/functional/_type_conversion.py": ["/torchvision/prototype/__init__.py"], "/torchvision/prototype/transforms/_type_conversion.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py"], "/torchvision/prototype/transforms/_misc.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py", "/torchvision/prototype/transforms/_utils.py", "/torchvision/prototype/features/__init__.py"], "/torchvision/prototype/transforms/_color.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py", "/torchvision/prototype/transforms/_transform.py", "/torchvision/prototype/transforms/_utils.py"], "/torchvision/prototype/transforms/_presets.py": ["/torchvision/prototype/transforms/__init__.py"], "/torchvision/prototype/transforms/_utils.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/features/_feature.py"], "/torchvision/prototype/datasets/_builtin/imagenet.py": ["/torchvision/prototype/datasets/utils/__init__.py", "/torchvision/prototype/features/__init__.py", "/torchvision/prototype/utils/_internal.py"], "/test/test_prototype_features.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/utils/_internal.py"], "/test/builtin_dataset_mocks.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/datasets/_builtin/semeion.py": ["/torchvision/prototype/datasets/decoder.py", "/torchvision/prototype/datasets/utils/__init__.py"], "/torchvision/prototype/datasets/decoder.py": ["/torchvision/prototype/__init__.py"], "/torchvision/prototype/features/_label.py": ["/torchvision/prototype/features/_feature.py"], "/torchvision/prototype/features/_feature.py": ["/torchvision/prototype/utils/_internal.py"], "/test/test_prototype_builtin_datasets.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/datasets/_builtin/celeba.py": ["/torchvision/prototype/datasets/utils/__init__.py"]}
|
39,110,331
|
Hsuxu/vision
|
refs/heads/master
|
/torchvision/prototype/features/_feature.py
|
from typing import Tuple, cast, TypeVar, Set, Dict, Any, Callable, Optional, Mapping, Type, Sequence
import torch
from torch._C import _TensorBase, DisableTorchFunction
from torchvision.prototype.utils._internal import add_suggestion
F = TypeVar("F", bound="Feature")
DEFAULT = object()
class Feature(torch.Tensor):
_META_ATTRS: Set[str] = set()
_meta_data: Dict[str, Any]
def __init_subclass__(cls):
# In order to help static type checkers, we require subclasses of `Feature` add the meta data attributes
# as static class annotations:
#
# >>> class Foo(Feature):
# ... bar: str
# ... baz: Optional[str]
#
# Internally, this information is used twofold:
#
# 1. A class annotation is contained in `cls.__annotations__` but not in `cls.__dict__`. We use this difference
# to automatically detect the meta data attributes and expose them as `@property`'s for convenient runtime
# access. This happens in this method.
# 2. The information extracted in 1. is also used at creation (`__new__`) to perform an input parsing for
# unknown arguments.
meta_attrs = {attr for attr in cls.__annotations__.keys() - cls.__dict__.keys() if not attr.startswith("_")}
for super_cls in cls.__mro__[1:]:
if super_cls is Feature:
break
meta_attrs.update(super_cls._META_ATTRS)
cls._META_ATTRS = meta_attrs
for attr in meta_attrs:
setattr(cls, attr, property(lambda self, attr=attr: self._meta_data[attr]))
def __new__(cls, data, *, dtype=None, device=None, like=None, **kwargs):
unknown_meta_attrs = kwargs.keys() - cls._META_ATTRS
if unknown_meta_attrs:
unknown_meta_attr = sorted(unknown_meta_attrs)[0]
raise TypeError(
add_suggestion(
f"{cls.__name__}() got unexpected keyword '{unknown_meta_attr}'.",
word=unknown_meta_attr,
possibilities=sorted(cls._META_ATTRS),
)
)
if like is not None:
dtype = dtype or like.dtype
device = device or like.device
data = cls._to_tensor(data, dtype=dtype, device=device)
requires_grad = False
self = torch.Tensor._make_subclass(cast(_TensorBase, cls), data, requires_grad)
meta_data = dict()
for attr, (explicit, fallback) in cls._parse_meta_data(**kwargs).items():
if explicit is not DEFAULT:
value = explicit
elif like is not None:
value = getattr(like, attr)
else:
value = fallback(data) if callable(fallback) else fallback
meta_data[attr] = value
self._meta_data = meta_data
return self
@classmethod
def _to_tensor(cls, data, *, dtype, device):
return torch.as_tensor(data, dtype=dtype, device=device)
@classmethod
def _parse_meta_data(cls) -> Dict[str, Tuple[Any, Any]]:
return dict()
@classmethod
def __torch_function__(
cls,
func: Callable[..., torch.Tensor],
types: Tuple[Type[torch.Tensor], ...],
args: Sequence[Any] = (),
kwargs: Optional[Mapping[str, Any]] = None,
) -> torch.Tensor:
with DisableTorchFunction():
output = func(*args, **(kwargs or dict()))
if func is not torch.Tensor.clone:
return output
return cls(output, like=args[0])
def __repr__(self):
return torch.Tensor.__repr__(self).replace("tensor", type(self).__name__)
|
{"/torchvision/prototype/transforms/_transform.py": ["/torchvision/prototype/transforms/_utils.py", "/torchvision/prototype/__init__.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/transforms/_meta.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py"], "/torchvision/prototype/transforms/_deprecated.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py", "/torchvision/prototype/transforms/_transform.py", "/torchvision/prototype/transforms/_utils.py"], "/test/test_prototype_transforms_utils.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/_utils.py"], "/test/test_prototype_transforms.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/_utils.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/features/_mask.py": ["/torchvision/prototype/features/_feature.py"], "/torchvision/prototype/transforms/_augment.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py", "/torchvision/prototype/transforms/_transform.py", "/torchvision/prototype/transforms/_utils.py"], "/torchvision/prototype/transforms/_geometry.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py", "/torchvision/prototype/transforms/_transform.py", "/torchvision/prototype/transforms/_utils.py", "/torchvision/prototype/datasets/utils/__init__.py", "/torchvision/prototype/features/__init__.py"], "/torchvision/prototype/features/_image.py": ["/torchvision/prototype/features/_feature.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/transforms/functional/_temporal.py": ["/torchvision/prototype/__init__.py"], "/torchvision/prototype/transforms/__init__.py": ["/torchvision/prototype/transforms/_transform.py", "/torchvision/prototype/transforms/_presets.py", "/torchvision/prototype/transforms/_augment.py", "/torchvision/prototype/transforms/_color.py", "/torchvision/prototype/transforms/_container.py", "/torchvision/prototype/transforms/_geometry.py", "/torchvision/prototype/transforms/_meta.py", "/torchvision/prototype/transforms/_misc.py", "/torchvision/prototype/transforms/_temporal.py", "/torchvision/prototype/transforms/_type_conversion.py", "/torchvision/prototype/transforms/_deprecated.py"], "/torchvision/prototype/features/_bounding_box.py": ["/torchvision/prototype/features/_feature.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/features/__init__.py": ["/torchvision/prototype/features/_bounding_box.py", "/torchvision/prototype/features/_feature.py", "/torchvision/prototype/features/_image.py", "/torchvision/prototype/features/_label.py", "/torchvision/prototype/features/_mask.py"], "/torchvision/prototype/transforms/functional/_augment.py": ["/torchvision/prototype/__init__.py"], "/torchvision/prototype/transforms/_container.py": ["/torchvision/prototype/transforms/__init__.py"], "/torchvision/prototype/transforms/_temporal.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py"], "/test/prototype_common_utils.py": ["/torchvision/prototype/__init__.py"], "/torchvision/prototype/transforms/functional/_type_conversion.py": ["/torchvision/prototype/__init__.py"], "/torchvision/prototype/transforms/_type_conversion.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py"], "/torchvision/prototype/transforms/_misc.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py", "/torchvision/prototype/transforms/_utils.py", "/torchvision/prototype/features/__init__.py"], "/torchvision/prototype/transforms/_color.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py", "/torchvision/prototype/transforms/_transform.py", "/torchvision/prototype/transforms/_utils.py"], "/torchvision/prototype/transforms/_presets.py": ["/torchvision/prototype/transforms/__init__.py"], "/torchvision/prototype/transforms/_utils.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/features/_feature.py"], "/torchvision/prototype/datasets/_builtin/imagenet.py": ["/torchvision/prototype/datasets/utils/__init__.py", "/torchvision/prototype/features/__init__.py", "/torchvision/prototype/utils/_internal.py"], "/test/test_prototype_features.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/utils/_internal.py"], "/test/builtin_dataset_mocks.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/datasets/_builtin/semeion.py": ["/torchvision/prototype/datasets/decoder.py", "/torchvision/prototype/datasets/utils/__init__.py"], "/torchvision/prototype/datasets/decoder.py": ["/torchvision/prototype/__init__.py"], "/torchvision/prototype/features/_label.py": ["/torchvision/prototype/features/_feature.py"], "/torchvision/prototype/features/_feature.py": ["/torchvision/prototype/utils/_internal.py"], "/test/test_prototype_builtin_datasets.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/datasets/_builtin/celeba.py": ["/torchvision/prototype/datasets/utils/__init__.py"]}
|
39,110,332
|
Hsuxu/vision
|
refs/heads/master
|
/test/test_prototype_builtin_datasets.py
|
import io
import builtin_dataset_mocks
import pytest
import torch
from torch.utils.data.datapipes.iter.grouping import ShardingFilterIterDataPipe as ShardingFilter
from torch.utils.data.graph import traverse
from torchdata.datapipes.iter import IterDataPipe, Shuffler
from torchvision.prototype import datasets, transforms
from torchvision.prototype.datasets._api import DEFAULT_DECODER
from torchvision.prototype.utils._internal import sequence_to_str
def to_bytes(file):
return file.read()
def config_id(name, config):
parts = [name]
for name, value in config.items():
if isinstance(value, bool):
part = ("" if value else "no_") + name
else:
part = str(value)
parts.append(part)
return "-".join(parts)
def dataset_parametrization(*names, decoder=to_bytes):
if not names:
# TODO: Replace this with torchvision.prototype.datasets.list() as soon as all builtin datasets are supported
names = (
"mnist",
"fashionmnist",
"kmnist",
"emnist",
"qmnist",
"cifar10",
"cifar100",
"caltech256",
"caltech101",
"imagenet",
"coco",
)
return pytest.mark.parametrize(
("dataset", "mock_info"),
[
pytest.param(*builtin_dataset_mocks.load(name, decoder=decoder, **config), id=config_id(name, config))
for name in names
for config in datasets.info(name)._configs
],
)
class TestCommon:
@dataset_parametrization()
def test_smoke(self, dataset, mock_info):
if not isinstance(dataset, IterDataPipe):
raise AssertionError(f"Loading the dataset should return an IterDataPipe, but got {type(dataset)} instead.")
@dataset_parametrization()
def test_sample(self, dataset, mock_info):
try:
sample = next(iter(dataset))
except Exception as error:
raise AssertionError("Drawing a sample raised the error above.") from error
if not isinstance(sample, dict):
raise AssertionError(f"Samples should be dictionaries, but got {type(sample)} instead.")
if not sample:
raise AssertionError("Sample dictionary is empty.")
@dataset_parametrization()
def test_num_samples(self, dataset, mock_info):
num_samples = 0
for _ in dataset:
num_samples += 1
assert num_samples == mock_info["num_samples"]
@dataset_parametrization()
def test_decoding(self, dataset, mock_info):
undecoded_features = {key for key, value in next(iter(dataset)).items() if isinstance(value, io.IOBase)}
if undecoded_features:
raise AssertionError(
f"The values of key(s) "
f"{sequence_to_str(sorted(undecoded_features), separate_last='and ')} were not decoded."
)
@dataset_parametrization(decoder=DEFAULT_DECODER)
def test_no_vanilla_tensors(self, dataset, mock_info):
vanilla_tensors = {key for key, value in next(iter(dataset)).items() if type(value) is torch.Tensor}
if vanilla_tensors:
raise AssertionError(
f"The values of key(s) "
f"{sequence_to_str(sorted(vanilla_tensors), separate_last='and ')} contained vanilla tensors."
)
@dataset_parametrization()
def test_transformable(self, dataset, mock_info):
next(iter(dataset.map(transforms.Identity())))
@dataset_parametrization()
def test_traversable(self, dataset, mock_info):
traverse(dataset)
@dataset_parametrization()
@pytest.mark.parametrize("annotation_dp_type", (Shuffler, ShardingFilter), ids=lambda type: type.__name__)
def test_has_annotations(self, dataset, mock_info, annotation_dp_type):
def scan(graph):
for node, sub_graph in graph.items():
yield node
yield from scan(sub_graph)
for dp in scan(traverse(dataset)):
if type(dp) is annotation_dp_type:
break
else:
raise AssertionError(f"The dataset doesn't comprise a {annotation_dp_type.__name__}() datapipe.")
class TestQMNIST:
@pytest.mark.parametrize(
"dataset",
[
pytest.param(builtin_dataset_mocks.load("qmnist", split=split)[0], id=split)
for split in ("train", "test", "test10k", "test50k", "nist")
],
)
def test_extra_label(self, dataset):
sample = next(iter(dataset))
for key, type in (
("nist_hsf_series", int),
("nist_writer_id", int),
("digit_index", int),
("nist_label", int),
("global_digit_index", int),
("duplicate", bool),
("unused", bool),
):
assert key in sample and isinstance(sample[key], type)
|
{"/torchvision/prototype/transforms/_transform.py": ["/torchvision/prototype/transforms/_utils.py", "/torchvision/prototype/__init__.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/transforms/_meta.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py"], "/torchvision/prototype/transforms/_deprecated.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py", "/torchvision/prototype/transforms/_transform.py", "/torchvision/prototype/transforms/_utils.py"], "/test/test_prototype_transforms_utils.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/_utils.py"], "/test/test_prototype_transforms.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/_utils.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/features/_mask.py": ["/torchvision/prototype/features/_feature.py"], "/torchvision/prototype/transforms/_augment.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py", "/torchvision/prototype/transforms/_transform.py", "/torchvision/prototype/transforms/_utils.py"], "/torchvision/prototype/transforms/_geometry.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py", "/torchvision/prototype/transforms/_transform.py", "/torchvision/prototype/transforms/_utils.py", "/torchvision/prototype/datasets/utils/__init__.py", "/torchvision/prototype/features/__init__.py"], "/torchvision/prototype/features/_image.py": ["/torchvision/prototype/features/_feature.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/transforms/functional/_temporal.py": ["/torchvision/prototype/__init__.py"], "/torchvision/prototype/transforms/__init__.py": ["/torchvision/prototype/transforms/_transform.py", "/torchvision/prototype/transforms/_presets.py", "/torchvision/prototype/transforms/_augment.py", "/torchvision/prototype/transforms/_color.py", "/torchvision/prototype/transforms/_container.py", "/torchvision/prototype/transforms/_geometry.py", "/torchvision/prototype/transforms/_meta.py", "/torchvision/prototype/transforms/_misc.py", "/torchvision/prototype/transforms/_temporal.py", "/torchvision/prototype/transforms/_type_conversion.py", "/torchvision/prototype/transforms/_deprecated.py"], "/torchvision/prototype/features/_bounding_box.py": ["/torchvision/prototype/features/_feature.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/features/__init__.py": ["/torchvision/prototype/features/_bounding_box.py", "/torchvision/prototype/features/_feature.py", "/torchvision/prototype/features/_image.py", "/torchvision/prototype/features/_label.py", "/torchvision/prototype/features/_mask.py"], "/torchvision/prototype/transforms/functional/_augment.py": ["/torchvision/prototype/__init__.py"], "/torchvision/prototype/transforms/_container.py": ["/torchvision/prototype/transforms/__init__.py"], "/torchvision/prototype/transforms/_temporal.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py"], "/test/prototype_common_utils.py": ["/torchvision/prototype/__init__.py"], "/torchvision/prototype/transforms/functional/_type_conversion.py": ["/torchvision/prototype/__init__.py"], "/torchvision/prototype/transforms/_type_conversion.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py"], "/torchvision/prototype/transforms/_misc.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py", "/torchvision/prototype/transforms/_utils.py", "/torchvision/prototype/features/__init__.py"], "/torchvision/prototype/transforms/_color.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py", "/torchvision/prototype/transforms/_transform.py", "/torchvision/prototype/transforms/_utils.py"], "/torchvision/prototype/transforms/_presets.py": ["/torchvision/prototype/transforms/__init__.py"], "/torchvision/prototype/transforms/_utils.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/features/_feature.py"], "/torchvision/prototype/datasets/_builtin/imagenet.py": ["/torchvision/prototype/datasets/utils/__init__.py", "/torchvision/prototype/features/__init__.py", "/torchvision/prototype/utils/_internal.py"], "/test/test_prototype_features.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/utils/_internal.py"], "/test/builtin_dataset_mocks.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/datasets/_builtin/semeion.py": ["/torchvision/prototype/datasets/decoder.py", "/torchvision/prototype/datasets/utils/__init__.py"], "/torchvision/prototype/datasets/decoder.py": ["/torchvision/prototype/__init__.py"], "/torchvision/prototype/features/_label.py": ["/torchvision/prototype/features/_feature.py"], "/torchvision/prototype/features/_feature.py": ["/torchvision/prototype/utils/_internal.py"], "/test/test_prototype_builtin_datasets.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/datasets/_builtin/celeba.py": ["/torchvision/prototype/datasets/utils/__init__.py"]}
|
39,110,333
|
Hsuxu/vision
|
refs/heads/master
|
/torchvision/prototype/transforms/_misc.py
|
from typing import Any, Dict, Sequence
import torch
from torchvision.prototype.features import Image, BoundingBox, Label
from torchvision.prototype.transforms import Transform
class Identity(Transform):
"""Identity transform that supports all built-in :class:`~torchvision.prototype.features.Feature`'s."""
def __init__(self):
super().__init__()
for feature_type in self._BUILTIN_FEATURE_TYPES:
self.register_feature_transform(feature_type, lambda input, **params: input)
class Normalize(Transform):
NO_OP_FEATURE_TYPES = {BoundingBox, Label}
def __init__(self, mean: Sequence[float], std: Sequence[float]):
super().__init__()
self.mean = mean
self.std = std
def get_params(self, sample: Any) -> Dict[str, Any]:
return dict(mean=self.mean, std=self.std)
@staticmethod
def _channel_stats_to_tensor(stats: Sequence[float], *, like: torch.Tensor) -> torch.Tensor:
return torch.as_tensor(stats, device=like.device, dtype=like.dtype).view(-1, 1, 1)
@staticmethod
def image(input: Image, *, mean: Sequence[float], std: Sequence[float]) -> Image:
mean_t = Normalize._channel_stats_to_tensor(mean, like=input)
std_t = Normalize._channel_stats_to_tensor(std, like=input)
return Image((input - mean_t) / std_t, like=input)
def extra_repr(self) -> str:
return f"mean={tuple(self.mean)}, std={tuple(self.std)}"
|
{"/torchvision/prototype/transforms/_transform.py": ["/torchvision/prototype/transforms/_utils.py", "/torchvision/prototype/__init__.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/transforms/_meta.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py"], "/torchvision/prototype/transforms/_deprecated.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py", "/torchvision/prototype/transforms/_transform.py", "/torchvision/prototype/transforms/_utils.py"], "/test/test_prototype_transforms_utils.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/_utils.py"], "/test/test_prototype_transforms.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/_utils.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/features/_mask.py": ["/torchvision/prototype/features/_feature.py"], "/torchvision/prototype/transforms/_augment.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py", "/torchvision/prototype/transforms/_transform.py", "/torchvision/prototype/transforms/_utils.py"], "/torchvision/prototype/transforms/_geometry.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py", "/torchvision/prototype/transforms/_transform.py", "/torchvision/prototype/transforms/_utils.py", "/torchvision/prototype/datasets/utils/__init__.py", "/torchvision/prototype/features/__init__.py"], "/torchvision/prototype/features/_image.py": ["/torchvision/prototype/features/_feature.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/transforms/functional/_temporal.py": ["/torchvision/prototype/__init__.py"], "/torchvision/prototype/transforms/__init__.py": ["/torchvision/prototype/transforms/_transform.py", "/torchvision/prototype/transforms/_presets.py", "/torchvision/prototype/transforms/_augment.py", "/torchvision/prototype/transforms/_color.py", "/torchvision/prototype/transforms/_container.py", "/torchvision/prototype/transforms/_geometry.py", "/torchvision/prototype/transforms/_meta.py", "/torchvision/prototype/transforms/_misc.py", "/torchvision/prototype/transforms/_temporal.py", "/torchvision/prototype/transforms/_type_conversion.py", "/torchvision/prototype/transforms/_deprecated.py"], "/torchvision/prototype/features/_bounding_box.py": ["/torchvision/prototype/features/_feature.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/features/__init__.py": ["/torchvision/prototype/features/_bounding_box.py", "/torchvision/prototype/features/_feature.py", "/torchvision/prototype/features/_image.py", "/torchvision/prototype/features/_label.py", "/torchvision/prototype/features/_mask.py"], "/torchvision/prototype/transforms/functional/_augment.py": ["/torchvision/prototype/__init__.py"], "/torchvision/prototype/transforms/_container.py": ["/torchvision/prototype/transforms/__init__.py"], "/torchvision/prototype/transforms/_temporal.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py"], "/test/prototype_common_utils.py": ["/torchvision/prototype/__init__.py"], "/torchvision/prototype/transforms/functional/_type_conversion.py": ["/torchvision/prototype/__init__.py"], "/torchvision/prototype/transforms/_type_conversion.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py"], "/torchvision/prototype/transforms/_misc.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py", "/torchvision/prototype/transforms/_utils.py", "/torchvision/prototype/features/__init__.py"], "/torchvision/prototype/transforms/_color.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py", "/torchvision/prototype/transforms/_transform.py", "/torchvision/prototype/transforms/_utils.py"], "/torchvision/prototype/transforms/_presets.py": ["/torchvision/prototype/transforms/__init__.py"], "/torchvision/prototype/transforms/_utils.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/features/_feature.py"], "/torchvision/prototype/datasets/_builtin/imagenet.py": ["/torchvision/prototype/datasets/utils/__init__.py", "/torchvision/prototype/features/__init__.py", "/torchvision/prototype/utils/_internal.py"], "/test/test_prototype_features.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/utils/_internal.py"], "/test/builtin_dataset_mocks.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/datasets/_builtin/semeion.py": ["/torchvision/prototype/datasets/decoder.py", "/torchvision/prototype/datasets/utils/__init__.py"], "/torchvision/prototype/datasets/decoder.py": ["/torchvision/prototype/__init__.py"], "/torchvision/prototype/features/_label.py": ["/torchvision/prototype/features/_feature.py"], "/torchvision/prototype/features/_feature.py": ["/torchvision/prototype/utils/_internal.py"], "/test/test_prototype_builtin_datasets.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/datasets/_builtin/celeba.py": ["/torchvision/prototype/datasets/utils/__init__.py"]}
|
39,110,334
|
Hsuxu/vision
|
refs/heads/master
|
/torchvision/prototype/datasets/_builtin/celeba.py
|
import csv
import functools
import io
from typing import Any, Callable, Dict, List, Optional, Tuple, Iterator, Sequence
import torch
from torchdata.datapipes.iter import (
IterDataPipe,
Mapper,
Filter,
Zipper,
IterKeyZipper,
)
from torchvision.prototype.datasets.utils import (
Dataset,
DatasetConfig,
DatasetInfo,
GDriveResource,
OnlineResource,
DatasetType,
)
from torchvision.prototype.datasets.utils._internal import (
INFINITE_BUFFER_SIZE,
getitem,
path_accessor,
hint_sharding,
hint_shuffling,
)
csv.register_dialect("celeba", delimiter=" ", skipinitialspace=True)
class CelebACSVParser(IterDataPipe[Tuple[str, Dict[str, str]]]):
def __init__(
self,
datapipe: IterDataPipe[Tuple[Any, io.IOBase]],
*,
fieldnames: Optional[Sequence[str]] = None,
) -> None:
self.datapipe = datapipe
self.fieldnames = fieldnames
def __iter__(self) -> Iterator[Tuple[str, Dict[str, str]]]:
for _, file in self.datapipe:
file = (line.decode() for line in file)
if self.fieldnames:
fieldnames = self.fieldnames
else:
# The first row is skipped, because it only contains the number of samples
next(file)
# Empty field names are filtered out, because some files have an extra white space after the header
# line, which is recognized as extra column
fieldnames = [name for name in next(csv.reader([next(file)], dialect="celeba")) if name]
# Some files do not include a label for the image ID column
if fieldnames[0] != "image_id":
fieldnames.insert(0, "image_id")
for line in csv.DictReader(file, fieldnames=fieldnames, dialect="celeba"):
yield line.pop("image_id"), line
class CelebA(Dataset):
def _make_info(self) -> DatasetInfo:
return DatasetInfo(
"celeba",
type=DatasetType.IMAGE,
homepage="https://mmlab.ie.cuhk.edu.hk/projects/CelebA.html",
)
def resources(self, config: DatasetConfig) -> List[OnlineResource]:
splits = GDriveResource(
"0B7EVK8r0v71pY0NSMzRuSXJEVkk",
sha256="fc955bcb3ef8fbdf7d5640d9a8693a8431b5f2ee291a5c1449a1549e7e073fe7",
file_name="list_eval_partition.txt",
)
images = GDriveResource(
"0B7EVK8r0v71pZjFTYXZWM3FlRnM",
sha256="46fb89443c578308acf364d7d379fe1b9efb793042c0af734b6112e4fd3a8c74",
file_name="img_align_celeba.zip",
)
identities = GDriveResource(
"1_ee_0u7vcNLOfNLegJRHmolfH5ICW-XS",
sha256="c6143857c3e2630ac2da9f782e9c1232e5e59be993a9d44e8a7916c78a6158c0",
file_name="identity_CelebA.txt",
)
attributes = GDriveResource(
"0B7EVK8r0v71pblRyaVFSWGxPY0U",
sha256="f0e5da289d5ccf75ffe8811132694922b60f2af59256ed362afa03fefba324d0",
file_name="list_attr_celeba.txt",
)
bboxes = GDriveResource(
"0B7EVK8r0v71pbThiMVRxWXZ4dU0",
sha256="7487a82e57c4bb956c5445ae2df4a91ffa717e903c5fa22874ede0820c8ec41b",
file_name="list_bbox_celeba.txt",
)
landmarks = GDriveResource(
"0B7EVK8r0v71pd0FJY3Blby1HUTQ",
sha256="6c02a87569907f6db2ba99019085697596730e8129f67a3d61659f198c48d43b",
file_name="list_landmarks_align_celeba.txt",
)
return [splits, images, identities, attributes, bboxes, landmarks]
_SPLIT_ID_TO_NAME = {
"0": "train",
"1": "valid",
"2": "test",
}
def _filter_split(self, data: Tuple[str, Dict[str, str]], *, split: str) -> bool:
return self._SPLIT_ID_TO_NAME[data[1]["split_id"]] == split
def _collate_anns(self, data: Tuple[Tuple[str, Dict[str, str]], ...]) -> Tuple[str, Dict[str, Dict[str, str]]]:
(image_id, identity), (_, attributes), (_, bbox), (_, landmarks) = data
return image_id, dict(identity=identity, attributes=attributes, bbox=bbox, landmarks=landmarks)
def _collate_and_decode_sample(
self,
data: Tuple[Tuple[str, Tuple[str, List[str]], Tuple[str, io.IOBase]], Tuple[str, Dict[str, Any]]],
*,
decoder: Optional[Callable[[io.IOBase], torch.Tensor]],
) -> Dict[str, Any]:
split_and_image_data, ann_data = data
_, _, image_data = split_and_image_data
path, buffer = image_data
_, ann = ann_data
image = decoder(buffer) if decoder else buffer
identity = int(ann["identity"]["identity"])
attributes = {attr: value == "1" for attr, value in ann["attributes"].items()}
bbox = torch.tensor([int(ann["bbox"][key]) for key in ("x_1", "y_1", "width", "height")])
landmarks = {
landmark: torch.tensor((int(ann["landmarks"][f"{landmark}_x"]), int(ann["landmarks"][f"{landmark}_y"])))
for landmark in {key[:-2] for key in ann["landmarks"].keys()}
}
return dict(
path=path,
image=image,
identity=identity,
attributes=attributes,
bbox=bbox,
landmarks=landmarks,
)
def _make_datapipe(
self,
resource_dps: List[IterDataPipe],
*,
config: DatasetConfig,
decoder: Optional[Callable[[io.IOBase], torch.Tensor]],
) -> IterDataPipe[Dict[str, Any]]:
splits_dp, images_dp, identities_dp, attributes_dp, bboxes_dp, landmarks_dp = resource_dps
splits_dp = CelebACSVParser(splits_dp, fieldnames=("image_id", "split_id"))
splits_dp = Filter(splits_dp, functools.partial(self._filter_split, split=config.split))
splits_dp = hint_sharding(splits_dp)
splits_dp = hint_shuffling(splits_dp)
anns_dp = Zipper(
*[
CelebACSVParser(dp, fieldnames=fieldnames)
for dp, fieldnames in (
(identities_dp, ("image_id", "identity")),
(attributes_dp, None),
(bboxes_dp, None),
(landmarks_dp, None),
)
]
)
anns_dp = Mapper(anns_dp, self._collate_anns)
dp = IterKeyZipper(
splits_dp,
images_dp,
key_fn=getitem(0),
ref_key_fn=path_accessor("name"),
buffer_size=INFINITE_BUFFER_SIZE,
keep_key=True,
)
dp = IterKeyZipper(dp, anns_dp, key_fn=getitem(0), buffer_size=INFINITE_BUFFER_SIZE)
return Mapper(dp, functools.partial(self._collate_and_decode_sample, decoder=decoder))
|
{"/torchvision/prototype/transforms/_transform.py": ["/torchvision/prototype/transforms/_utils.py", "/torchvision/prototype/__init__.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/transforms/_meta.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py"], "/torchvision/prototype/transforms/_deprecated.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py", "/torchvision/prototype/transforms/_transform.py", "/torchvision/prototype/transforms/_utils.py"], "/test/test_prototype_transforms_utils.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/_utils.py"], "/test/test_prototype_transforms.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/_utils.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/features/_mask.py": ["/torchvision/prototype/features/_feature.py"], "/torchvision/prototype/transforms/_augment.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py", "/torchvision/prototype/transforms/_transform.py", "/torchvision/prototype/transforms/_utils.py"], "/torchvision/prototype/transforms/_geometry.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py", "/torchvision/prototype/transforms/_transform.py", "/torchvision/prototype/transforms/_utils.py", "/torchvision/prototype/datasets/utils/__init__.py", "/torchvision/prototype/features/__init__.py"], "/torchvision/prototype/features/_image.py": ["/torchvision/prototype/features/_feature.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/transforms/functional/_temporal.py": ["/torchvision/prototype/__init__.py"], "/torchvision/prototype/transforms/__init__.py": ["/torchvision/prototype/transforms/_transform.py", "/torchvision/prototype/transforms/_presets.py", "/torchvision/prototype/transforms/_augment.py", "/torchvision/prototype/transforms/_color.py", "/torchvision/prototype/transforms/_container.py", "/torchvision/prototype/transforms/_geometry.py", "/torchvision/prototype/transforms/_meta.py", "/torchvision/prototype/transforms/_misc.py", "/torchvision/prototype/transforms/_temporal.py", "/torchvision/prototype/transforms/_type_conversion.py", "/torchvision/prototype/transforms/_deprecated.py"], "/torchvision/prototype/features/_bounding_box.py": ["/torchvision/prototype/features/_feature.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/features/__init__.py": ["/torchvision/prototype/features/_bounding_box.py", "/torchvision/prototype/features/_feature.py", "/torchvision/prototype/features/_image.py", "/torchvision/prototype/features/_label.py", "/torchvision/prototype/features/_mask.py"], "/torchvision/prototype/transforms/functional/_augment.py": ["/torchvision/prototype/__init__.py"], "/torchvision/prototype/transforms/_container.py": ["/torchvision/prototype/transforms/__init__.py"], "/torchvision/prototype/transforms/_temporal.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py"], "/test/prototype_common_utils.py": ["/torchvision/prototype/__init__.py"], "/torchvision/prototype/transforms/functional/_type_conversion.py": ["/torchvision/prototype/__init__.py"], "/torchvision/prototype/transforms/_type_conversion.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py"], "/torchvision/prototype/transforms/_misc.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py", "/torchvision/prototype/transforms/_utils.py", "/torchvision/prototype/features/__init__.py"], "/torchvision/prototype/transforms/_color.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/transforms/__init__.py", "/torchvision/prototype/transforms/_transform.py", "/torchvision/prototype/transforms/_utils.py"], "/torchvision/prototype/transforms/_presets.py": ["/torchvision/prototype/transforms/__init__.py"], "/torchvision/prototype/transforms/_utils.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/features/_feature.py"], "/torchvision/prototype/datasets/_builtin/imagenet.py": ["/torchvision/prototype/datasets/utils/__init__.py", "/torchvision/prototype/features/__init__.py", "/torchvision/prototype/utils/_internal.py"], "/test/test_prototype_features.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/utils/_internal.py"], "/test/builtin_dataset_mocks.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/datasets/_builtin/semeion.py": ["/torchvision/prototype/datasets/decoder.py", "/torchvision/prototype/datasets/utils/__init__.py"], "/torchvision/prototype/datasets/decoder.py": ["/torchvision/prototype/__init__.py"], "/torchvision/prototype/features/_label.py": ["/torchvision/prototype/features/_feature.py"], "/torchvision/prototype/features/_feature.py": ["/torchvision/prototype/utils/_internal.py"], "/test/test_prototype_builtin_datasets.py": ["/torchvision/prototype/__init__.py", "/torchvision/prototype/utils/_internal.py"], "/torchvision/prototype/datasets/_builtin/celeba.py": ["/torchvision/prototype/datasets/utils/__init__.py"]}
|
39,118,944
|
crandersn/temp_probe_pi_code
|
refs/heads/master
|
/ConnectedTemperatureSensor.py
|
from threading import Timer
import pyrebase
from TempReader import TempReader
from datetime import datetime
import RPi.GPIO as GPIO
import board
import digitalio
import adafruit_character_lcd.character_lcd as characterlcd
import time
import datetime
import boto3
import os
class ConnectedTempSensor:
def __init__(self):
# set up LCD
self.lcd_columns = 16
self.lcd_rows = 2
self.lcd_rs = digitalio.DigitalInOut(board.D22)
self.lcd_en = digitalio.DigitalInOut(board.D17)
self.lcd_d4 = digitalio.DigitalInOut(board.D25)
self.lcd_d5 = digitalio.DigitalInOut(board.D24)
self.lcd_d6 = digitalio.DigitalInOut(board.D23)
self.lcd_d7 = digitalio.DigitalInOut(board.D18)
self.backlight_d8 = digitalio.DigitalInOut(board.D8)
self.lcd = characterlcd.Character_LCD_Mono(self.lcd_rs, self.lcd_en, self.lcd_d4, self.lcd_d5, self.lcd_d6, self.lcd_d7, self.lcd_columns, self.lcd_rows, self.backlight_d8)
self.timerQue = []
# configure system to read temperature probe
os.system('modprobe w1-gpio')
os.system('modprobe w1-therm')
#set up button and switch
self.button = 16
self.powerSwitch = 26
GPIO.setup(self.button, GPIO.IN,pull_up_down=GPIO.PUD_UP)
GPIO.setup(self.powerSwitch, GPIO.IN,pull_up_down=GPIO.PUD_UP)
# set up firebase
self.firebaseConfig = {
"apiKey": "AIzaSyA1XkaqnfFO8pm-yuTK5ggZpAsY27eOwn8",
"authDomain": "connected-temp-sensor.firebaseapp.com",
"databaseURL": "https://connected-temp-sensor-default-rtdb.firebaseio.com",
"projectId": "connected-temp-sensor",
"storageBucket": "connected-temp-sensor.appspot.com",
"messagingSenderId": "363436241273",
"appId": "1:363436241273:web:3404021c99f76d3ed2bfa8",
"measurementId": "G-CD08HW864M"
}
#instantiate variables to keep track of program
self.firebaseDatabase = pyrebase.initialize_app(self.firebaseConfig).database()
self.last300 = []
self.virtualButtonPressed = False
self.lastTempReading = None
self.firebaseDatabase.child("virtual_button_pressed").stream(self.virtualButton)
self.lcd.backlight = True
self.lcd.message = "Powering on :)"
time.sleep(5.0)
def updateTempReading(self):
temp = TempReader.getTemp()
self.lastTempReading = temp
if (len(self.last300) < 300):
self.last300.append(self.lastTempReading)
else:
self.last300.pop(0)
self.last300.append(self.lastTempReading)
self.firebaseDatabase.update({"last_300_seconds" : self.last300})
print("last T reading = " + temp)
if (self.lastTempReading == "US"):
time.sleep(0.85)
self.t = Timer(0.1, self.updateTempReading)
self.t.start()
self.timerQue.append(self.t)
#if (len(self.timerQue) > 10):
# self.timerQue.pop(0)
#firebase listener function
def virtualButton(self, event):
self.virtualButtonPressed = event["data"]
print(event["data"])
# main controll loop
def run(self):
# begin reading t values
self.t = Timer(0.1, self.updateTempReading)
self.t.start()
self.timerQue.append(self.t)
while (True):
self.buttonState = GPIO.input(self.button)
self.powerSwitchState = GPIO.input(self.powerSwitch)
if (self.powerSwitchState != False):
if (self.buttonState == False or self.virtualButtonPressed == "True"):
self.lcd.backlight = True
if (self.lastTempReading != "US"):
self.lcd.message = TempReader.convertToDisplayFormat(self.lastTempReading)
else:
self.lcd.message = "Sensor Unplugged"
else:
self.lcd.backlight = False
self.lcd.clear()
self.updatedSinceLastTempReading = True
time.sleep(0.05)
else:
print(*self.timerQue, sep=", ")
for timer in self.timerQue:
if (timer.isAlive()):
timer.cancel()
timer.join()
self.timerQue.clear()
self.lcd.backlight = True
self.lcd.message = "Powering off :("
time.sleep(2)
self.lcd.backlight = False
self.lcd.clear()
while (self.powerSwitchState == False):
time.sleep(2)
self.powerSwitchState = GPIO.input(self.powerSwitch)
self.lcd.backlight = True
self.lcd.message = "Powering on :)"
time.sleep(2.5)
self.lcd.clear()
self.lcd.backlight = False
self.t = Timer(0.1, self.updateTempReading)
self.t.start()
|
{"/ConnectedTemperatureSensor.py": ["/TempReader.py"], "/RunTempSensor.py": ["/ConnectedTemperatureSensor.py"]}
|
39,118,945
|
crandersn/temp_probe_pi_code
|
refs/heads/master
|
/AwsTimeStream.py
|
import boto3
import time
class TimeStream:
def __init__ (self, dbName, tableName, region):
self.dbName = dbName
self.tableName = tableName
self.region = region
def write(self, temp):
CURRENT_TIME = str(int(time.time() *1000))
client = boto3.client('timestream-write', region_name=self.region, aws_access_key_id="AKIA45ZVKLZMNHMGSQG3", aws_secret_access_key="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")
dimension = [{'Name': 'data_origin', 'Value': 'raspbery_pi'}]
record = {
'Time': CURRENT_TIME,
'Dimensions': dimension,
'MeasureName': 'Temperature',
'MeasureValue': temp,
'MeasureValueType' : 'DOUBLE'
}
records = [record]
response = client.write_records(DatabaseName=self.dbName, TableName=self.tableName, Records=records)
print("wrote to timestream")
|
{"/ConnectedTemperatureSensor.py": ["/TempReader.py"], "/RunTempSensor.py": ["/ConnectedTemperatureSensor.py"]}
|
39,118,946
|
crandersn/temp_probe_pi_code
|
refs/heads/master
|
/TempReader.py
|
import glob
class TempReader:
base_dir = '/sys/bus/w1/devices/'
device_folder = glob.glob(base_dir + '28*')[0]
device_file = device_folder + '/w1_slave'
@staticmethod
def readTempFile(): #Fundtion to read the value of Temperature
try:
file = open(TempReader.device_file, 'r') #opent the file
lines = file.readlines() #read the lines in the file
file.close() #close the file
trimmed_data = lines[1].find('t=') #find the "t=" in the line
if trimmed_data != -1:
temp_string = lines[1][trimmed_data+2:] #trim the strig only to the temoerature value
return temp_string.strip()
else:
return "US"
except:
return "US"
@staticmethod
def convertToDisplayFormat(inputTemp):
return (inputTemp[:2] + "." + inputTemp[2:].strip() + " Celcius")
@staticmethod
def getTemp():
return TempReader.readTempFile()
|
{"/ConnectedTemperatureSensor.py": ["/TempReader.py"], "/RunTempSensor.py": ["/ConnectedTemperatureSensor.py"]}
|
39,118,947
|
crandersn/temp_probe_pi_code
|
refs/heads/master
|
/RunTempSensor.py
|
from ConnectedTemperatureSensor import ConnectedTempSensor
tempSensor = ConnectedTempSensor()
tempSensor.run()
|
{"/ConnectedTemperatureSensor.py": ["/TempReader.py"], "/RunTempSensor.py": ["/ConnectedTemperatureSensor.py"]}
|
39,220,242
|
Venko15/MLT
|
refs/heads/master
|
/bot/cogs/autorole.py
|
from re import L
from types import coroutine
from discord import client
import pymongo
import discord
import asyncio
from discord.ext import commands
from discord import Client
from discord import channel, guild
from discord.ext.commands.core import command
import os, sys
import typing as t
class AutoRbot(commands.Cog):
def __init__(self,bot):
self.bot = bot
self.client = pymongo.MongoClient(
"mongodb+srv://MLT:Venkoto%4015@mlt.kinqt.mongodb.net/MLT?retryWrites=true&w=majority")
self.db = self.client["MLT"]
self.rl = self.client["MLTROLES"]
self.roles = []
@commands.command(name = "level")
async def level(self,ctx):
col = self.db[str(ctx.guild.id)]
query = {"name" : str(ctx.author.id)}
if (member := col.find_one(query)):
await ctx.send(f'<@{int(member["name"])}>you are level {member["lvl"]}')
@commands.command(name = "setrole")
@commands.has_permissions(manage_roles = True)
async def setrole(self, ctx, level, *rolename):
role = " ".join(rolename)
col = self.rl[str(ctx.guild.id)]
if (rl := discord.utils.get(ctx.guild.roles, name=role)) is None:
guild = ctx.guild
await guild.create_role(name=role)
query = {"name": role}
if (member := col.find_one(query)) is None:
ins = {"name":role ,"lvl" : int(level)}
col.insert_one(ins)
else:
msg = await ctx.send("This role already exists in the database")
await asyncio.sleep(4)
await msg.delete()
@commands.command(name = "remrole")
@commands.has_permissions(manage_roles = True)
async def remrole(self,ctx, *rolename):
role = " ".join(rolename)
col = self.rl[str(ctx.guild.id)]
query = {"name": role}
if (member := col.find_one(query)):
col.delete_one(query)
role_object = discord.utils.get(ctx.message.guild.roles, name=role)
await role_object.delete()
else:
msg = await ctx.send(f'There isnt a role named {role}')
await asyncio.sleep(4)
await msg.delete()
@commands.command(name = "editrole")
@commands.has_permissions(manage_roles = True)
async def editrole(self, ctx, *rolename):
def _check(m):
return m.author == ctx.author
role = " ".join(rolename)
col = self.rl[str(ctx.guild.id)]
query = {"name": role}
if (member := col.find_one(query)):
await ctx.send("Waiting for level input")
level = await self.bot.wait_for('message', check=lambda message: message.author == ctx.author)
newval = {"$set": { "lvl" : int(level.content)}}
col.update(query, newval)
@commands.command(name = "display_roles")
@commands.has_permissions(manage_roles = True)
async def display_roles(self, ctx):
rlcol=self.rl[str(ctx.guild.id)]
myres = rlcol.find().sort("lvl")
for r in myres:
if r not in self.roles:
self.roles.append(r)
roles = [x["name"] for x in self.roles]
levels = [x["lvl"] for x in self.roles]
output = ""
for i in range(len(roles)):
output += f'{roles[i]} - {str(levels[i])}\n'
await ctx.send(output)
self.roles.clear()
@commands.command()
async def help(self, ctx, intention = None):
if intention == "general" or intention is None:
embed = discord.Embed(title="This is General Help", colour=discord.Colour(0xa94ab1), description="This is a multifunctional bot")
embed.set_thumbnail(url="https://cdn.discordapp.com/attachments/827580959322144768/850770402796109854/unknown.png")
embed.set_author(name="Yeah thats me >__MLT ze bot__<", icon_url="https://cdn.discordapp.com/attachments/827580959322144768/850770402796109854/unknown.png")
embed.add_field(name="🎷Music", value="```yaml\nFor help with the music commmand type - 1help Music```")
embed.add_field(name="🏋Autorole", value="```yaml\nFor help with the autorole commmand type - 1help Autorole```")
embed.add_field(name="☁️Weather", value="```yaml\nFor help with the weather commmand type - 1help Weather```")
embed.add_field(name="🤣Memes", value="```yaml\nFor help with the memes commmand type - 1help Memes```")
elif intention == "Music":
embed = discord.Embed(title="This is Music Help", colour=discord.Colour(0xa94ab1), description="This is a multifunctional bot")
embed.set_thumbnail(url="https://cdn.discordapp.com/attachments/827580959322144768/850770402796109854/unknown.png")
embed.set_author(name="Yeah thats me >__MLT ze bot__<", icon_url="https://cdn.discordapp.com/attachments/827580959322144768/850770402796109854/unknown.png")
embed.add_field(name="Play music on demand", value="```yaml\nUse the 1play/1p/1pl to play a song, a playlist or even music from a link(spotify/youtube): 1pl/1play <query>```")
embed.add_field(name="Skip a song", value="```yaml\nUse 1sk/1skip to skip to the next song : 1sk/1skip```")
embed.add_field(name="View the queue", value="```yaml\nUse 1queue/1q to view all the songs you have queued up : 1q```")
embed.add_field(name="Or even loop the queue", value="```yaml\nUse 1loop to enable/disable the loop : 1loop```")
elif intention == "Autorole":
embed = discord.Embed(title="This is Autorole Help", colour=discord.Colour(0xa94ab1), description="This is a multifunctional bot")
embed.set_thumbnail(url="https://cdn.discordapp.com/attachments/827580959322144768/850770402796109854/unknown.png")
embed.set_author(name="Yeah thats me >__MLT ze bot__<", icon_url="https://cdn.discordapp.com/attachments/827580959322144768/850770402796109854/unknown.png")
embed.add_field(name="Set role to a certain level", value="```yaml\nUse the 1setrole to set a role to a certain level : 1setrole <level> <role_name>```")
embed.add_field(name="Edit role level", value="```yaml\nUse 1sk/1skip to skip to the next song : 1editrole <role_name>```")
embed.add_field(name="Remove role by name query", value="```yaml\nUse 1remrole to view all the songs you have queued up : 1remrole <level> <role_name>```")
embed.add_field(name="Display all roles", value="```yaml\nUse 1display_roles to show all roles : 1display_roles ```")
elif intention == "Weather":
embed = discord.Embed(title="This is Weather Help", colour=discord.Colour(0xa94ab1), description="This is a multifunctional bot")
embed.set_thumbnail(url="https://cdn.discordapp.com/attachments/827580959322144768/850770402796109854/unknown.png")
embed.set_author(name="Yeah thats me >__MLT ze bot__<", icon_url="https://cdn.discordapp.com/attachments/827580959322144768/850770402796109854/unknown.png")
embed.add_field(name="Set role to a certain level", value="```yaml\nUse the 1setrole to set a role to a certain level : 1weather <City/Country>```")
embed.add_field(name="Check the forecast for certain amount of days ahead", value="```yaml\nUse 1forecast : 1forecast <days> <City/Country>```")
elif intention == "Memes":
embed = discord.Embed(title="This is Weather Help", colour=discord.Colour(0xa94ab1), description="This is a multifunctional bot")
embed.set_thumbnail(url="https://cdn.discordapp.com/attachments/827580959322144768/850770402796109854/unknown.png")
embed.set_author(name="Yeah thats me >__MLT ze bot__<", icon_url="https://cdn.discordapp.com/attachments/827580959322144768/850770402796109854/unknown.png")
embed.add_field(name="Set role to a certain level", value="```yaml\nUse the 1setrole to set a role to a certain level : 1weather <City/Country>```")
embed.add_field(name="Check the forecast for certain amount of days ahead", value="```yaml\nUse 1forecast : 1forecast <days> <City/Country>```")
await ctx.send(embed=embed)
def setup(bot):
bot.add_cog(AutoRbot(bot))
|
{"/dsBot.py": ["/music.py", "/memes.py"], "/launcher.py": ["/bot/__init__.py"], "/bot/__init__.py": ["/bot/dsBot.py"], "/bot/cogs/musicv3.py": ["/bot/glbs.py"], "/bot/dsBot.py": ["/bot/glbs.py", "/bot/cogs/musicv3.py"]}
|
39,220,243
|
Venko15/MLT
|
refs/heads/master
|
/env/Lib/site-packages/httpcore/_sync/connection.py
|
from ssl import SSLContext
from typing import List, Optional, Tuple, cast
from .._backends.sync import SyncBackend, SyncLock, SyncSocketStream, SyncBackend
from .._exceptions import ConnectError, ConnectTimeout
from .._types import URL, Headers, Origin, TimeoutDict
from .._utils import exponential_backoff, get_logger, url_to_origin
from .base import (
SyncByteStream,
SyncHTTPTransport,
ConnectionState,
NewConnectionRequired,
)
from .http import SyncBaseHTTPConnection
from .http11 import SyncHTTP11Connection
logger = get_logger(__name__)
RETRIES_BACKOFF_FACTOR = 0.5 # 0s, 0.5s, 1s, 2s, 4s, etc.
class SyncHTTPConnection(SyncHTTPTransport):
def __init__(
self,
origin: Origin,
http1: bool = True,
http2: bool = False,
uds: str = None,
ssl_context: SSLContext = None,
socket: SyncSocketStream = None,
local_address: str = None,
retries: int = 0,
backend: SyncBackend = None,
):
self.origin = origin
self.http1 = http1
self.http2 = http2
self.uds = uds
self.ssl_context = SSLContext() if ssl_context is None else ssl_context
self.socket = socket
self.local_address = local_address
self.retries = retries
alpn_protocols: List[str] = []
if http1:
alpn_protocols.append("http/1.1")
if http2:
alpn_protocols.append("h2")
self.ssl_context.set_alpn_protocols(alpn_protocols)
self.connection: Optional[SyncBaseHTTPConnection] = None
self.is_http11 = False
self.is_http2 = False
self.connect_failed = False
self.expires_at: Optional[float] = None
self.backend = SyncBackend() if backend is None else backend
def __repr__(self) -> str:
http_version = "UNKNOWN"
if self.is_http11:
http_version = "HTTP/1.1"
elif self.is_http2:
http_version = "HTTP/2"
return f"<SyncHTTPConnection http_version={http_version} state={self.state}>"
def info(self) -> str:
if self.connection is None:
return "Not connected"
elif self.state == ConnectionState.PENDING:
return "Connecting"
return self.connection.info()
@property
def request_lock(self) -> SyncLock:
# We do this lazily, to make sure backend autodetection always
# runs within an async context.
if not hasattr(self, "_request_lock"):
self._request_lock = self.backend.create_lock()
return self._request_lock
def handle_request(
self,
method: bytes,
url: URL,
headers: Headers,
stream: SyncByteStream,
extensions: dict,
) -> Tuple[int, Headers, SyncByteStream, dict]:
assert url_to_origin(url) == self.origin
timeout = cast(TimeoutDict, extensions.get("timeout", {}))
with self.request_lock:
if self.state == ConnectionState.PENDING:
if not self.socket:
logger.trace(
"open_socket origin=%r timeout=%r", self.origin, timeout
)
self.socket = self._open_socket(timeout)
self._create_connection(self.socket)
elif self.state in (ConnectionState.READY, ConnectionState.IDLE):
pass
elif self.state == ConnectionState.ACTIVE and self.is_http2:
pass
else:
raise NewConnectionRequired()
assert self.connection is not None
logger.trace(
"connection.handle_request method=%r url=%r headers=%r",
method,
url,
headers,
)
return self.connection.handle_request(
method, url, headers, stream, extensions
)
def _open_socket(self, timeout: TimeoutDict = None) -> SyncSocketStream:
scheme, hostname, port = self.origin
timeout = {} if timeout is None else timeout
ssl_context = self.ssl_context if scheme == b"https" else None
retries_left = self.retries
delays = exponential_backoff(factor=RETRIES_BACKOFF_FACTOR)
while True:
try:
if self.uds is None:
return self.backend.open_tcp_stream(
hostname,
port,
ssl_context,
timeout,
local_address=self.local_address,
)
else:
return self.backend.open_uds_stream(
self.uds, hostname, ssl_context, timeout
)
except (ConnectError, ConnectTimeout):
if retries_left <= 0:
self.connect_failed = True
raise
retries_left -= 1
delay = next(delays)
self.backend.sleep(delay)
except Exception: # noqa: PIE786
self.connect_failed = True
raise
def _create_connection(self, socket: SyncSocketStream) -> None:
http_version = socket.get_http_version()
logger.trace(
"create_connection socket=%r http_version=%r", socket, http_version
)
if http_version == "HTTP/2" or (self.http2 and not self.http1):
from .http2 import SyncHTTP2Connection
self.is_http2 = True
self.connection = SyncHTTP2Connection(
socket=socket, backend=self.backend, ssl_context=self.ssl_context
)
else:
self.is_http11 = True
self.connection = SyncHTTP11Connection(
socket=socket, ssl_context=self.ssl_context
)
@property
def state(self) -> ConnectionState:
if self.connect_failed:
return ConnectionState.CLOSED
elif self.connection is None:
return ConnectionState.PENDING
return self.connection.get_state()
def is_socket_readable(self) -> bool:
return self.connection is not None and self.connection.is_socket_readable()
def mark_as_ready(self) -> None:
if self.connection is not None:
self.connection.mark_as_ready()
def start_tls(self, hostname: bytes, timeout: TimeoutDict = None) -> None:
if self.connection is not None:
logger.trace("start_tls hostname=%r timeout=%r", hostname, timeout)
self.socket = self.connection.start_tls(hostname, timeout)
logger.trace("start_tls complete hostname=%r timeout=%r", hostname, timeout)
def close(self) -> None:
with self.request_lock:
if self.connection is not None:
self.connection.close()
|
{"/dsBot.py": ["/music.py", "/memes.py"], "/launcher.py": ["/bot/__init__.py"], "/bot/__init__.py": ["/bot/dsBot.py"], "/bot/cogs/musicv3.py": ["/bot/glbs.py"], "/bot/dsBot.py": ["/bot/glbs.py", "/bot/cogs/musicv3.py"]}
|
39,220,244
|
Venko15/MLT
|
refs/heads/master
|
/env/Lib/site-packages/youtubesearchpython/handlers/componenthandler.py
|
from typing import List, Union
from youtubesearchpython.internal.constants import *
class ComponentHandler:
def _getVideoComponent(self, element: dict, shelfTitle: str = None) -> dict:
video = element[videoElementKey]
component = {
'type': 'video',
'id': self._getValue(video, ['videoId']),
'title': self._getValue(video, ['title', 'runs', 0, 'text']),
'publishedTime': self._getValue(video, ['publishedTimeText', 'simpleText']),
'duration': self._getValue(video, ['lengthText', 'simpleText']),
'viewCount': {
'text': self._getValue(video, ['viewCountText', 'simpleText']),
'short': self._getValue(video, ['shortViewCountText', 'simpleText']),
},
'thumbnails': self._getValue(video, ['thumbnail', 'thumbnails']),
'richThumbnail': self._getValue(video, ['richThumbnail', 'movingThumbnailRenderer', 'movingThumbnailDetails', 'thumbnails', 0]),
'descriptionSnippet': self._getValue(video, ['detailedMetadataSnippets', 0, 'snippetText', 'runs']),
'channel': {
'name': self._getValue(video, ['ownerText', 'runs', 0, 'text']),
'id': self._getValue(video, ['ownerText', 'runs', 0, 'navigationEndpoint', 'browseEndpoint', 'browseId']),
'thumbnails': self._getValue(video, ['channelThumbnailSupportedRenderers', 'channelThumbnailWithLinkRenderer', 'thumbnail', 'thumbnails']),
},
'accessibility': {
'title': self._getValue(video, ['title', 'accessibility', 'accessibilityData', 'label']),
'duration': self._getValue(video, ['lengthText', 'accessibility', 'accessibilityData', 'label']),
},
}
component['link'] = 'https://www.youtube.com/watch?v=' + component['id']
component['channel']['link'] = 'https://www.youtube.com/channel/' + component['channel']['id']
component['shelfTitle'] = shelfTitle
return component
def _getChannelComponent(self, element: dict) -> dict:
channel = element[channelElementKey]
component = {
'type': 'channel',
'id': self._getValue(channel, ['channelId']),
'title': self._getValue(channel, ['title', 'simpleText']),
'thumbnails': self._getValue(channel, ['thumbnail', 'thumbnails']),
'videoCount': self._getValue(channel, ['videoCountText', 'runs', 0, 'text']),
'descriptionSnippet': self._getValue(channel, ['descriptionSnippet', 'runs']),
'subscribers': self._getValue(channel, ['subscriberCountText', 'simpleText']),
}
component['link'] = 'https://www.youtube.com/channel/' + component['id']
return component
def _getPlaylistComponent(self, element: dict) -> dict:
playlist = element[playlistElementKey]
component = {
'type': 'playlist',
'id': self._getValue(playlist, ['playlistId']),
'title': self._getValue(playlist, ['title', 'simpleText']),
'videoCount': self._getValue(playlist, ['videoCount']),
'channel': {
'name': self._getValue(playlist, ['shortBylineText', 'runs', 0, 'text']),
'id': self._getValue(playlist, ['shortBylineText', 'runs', 0, 'navigationEndpoint', 'browseEndpoint', 'browseId']),
},
'thumbnails': self._getValue(playlist, ['thumbnailRenderer', 'playlistVideoThumbnailRenderer', 'thumbnail', 'thumbnails']),
}
component['link'] = 'https://www.youtube.com/playlist?list=' + component['id']
component['channel']['link'] = 'https://www.youtube.com/channel/' + component['channel']['id']
return component
def _getShelfComponent(self, element: dict) -> dict:
shelf = element[shelfElementKey]
return {
'title': self._getValue(shelf, ['title', 'simpleText']),
'elements': self._getValue(shelf, ['content', 'verticalListRenderer', 'items']),
}
def _getValue(self, source: dict, path: List[str]) -> Union[str, int, dict, None]:
value = source
for key in path:
if type(key) is str:
if key in value.keys():
value = value[key]
else:
value = None
break
elif type(key) is int:
if len(value) != 0:
value = value[key]
else:
value = None
break
return value
|
{"/dsBot.py": ["/music.py", "/memes.py"], "/launcher.py": ["/bot/__init__.py"], "/bot/__init__.py": ["/bot/dsBot.py"], "/bot/cogs/musicv3.py": ["/bot/glbs.py"], "/bot/dsBot.py": ["/bot/glbs.py", "/bot/cogs/musicv3.py"]}
|
39,220,245
|
Venko15/MLT
|
refs/heads/master
|
/bot/glbs.py
|
queue = {}
|
{"/dsBot.py": ["/music.py", "/memes.py"], "/launcher.py": ["/bot/__init__.py"], "/bot/__init__.py": ["/bot/dsBot.py"], "/bot/cogs/musicv3.py": ["/bot/glbs.py"], "/bot/dsBot.py": ["/bot/glbs.py", "/bot/cogs/musicv3.py"]}
|
39,220,246
|
Venko15/MLT
|
refs/heads/master
|
/env/Lib/site-packages/prawcore/auth.py
|
"""Provides Authentication and Authorization classes."""
import time
from requests import Request
from requests.status_codes import codes
from . import const
from .exceptions import InvalidInvocation, OAuthException, ResponseException
class BaseAuthenticator(object):
"""Provide the base authenticator object that stores OAuth2 credentials."""
def __init__(self, requestor, client_id, redirect_uri=None):
"""Represent a single authentication to Reddit's API.
:param requestor: An instance of :class:`Requestor`.
:param client_id: The OAuth2 client ID to use with the session.
:param redirect_uri: (optional) The redirect URI exactly as specified in your
OAuth application settings on Reddit. This parameter is required if you want
to use the ``authorize_url`` method, or the ``authorize`` method of the
``Authorizer`` class.
"""
self._requestor = requestor
self.client_id = client_id
self.redirect_uri = redirect_uri
def _post(self, url, success_status=codes["ok"], **data):
response = self._requestor.request(
"post",
url,
auth=self._auth(),
data=sorted(data.items()),
headers={"Connection": "close"},
)
if response.status_code != success_status:
raise ResponseException(response)
return response
def authorize_url(self, duration, scopes, state, implicit=False):
"""Return the URL used out-of-band to grant access to your application.
:param duration: Either ``permanent`` or ``temporary``. ``temporary``
authorizations generate access tokens that last only 1 hour. ``permanent``
authorizations additionally generate a refresh token that can be
indefinitely used to generate new hour-long access tokens. Only
``temporary`` can be specified if ``implicit`` is set to ``True``.
:param scopes: A list of OAuth scopes to request authorization for.
:param state: A string that will be reflected in the callback to
``redirect_uri``. Elements must be printable ASCII characters in the range
0x20 through 0x7E inclusive. This value should be temporarily unique to the
client for whom the URL was generated.
:param implicit: (optional) Use the implicit grant flow (default: False). This
flow is only available for UntrustedAuthenticators.
"""
if self.redirect_uri is None:
raise InvalidInvocation("redirect URI not provided")
if implicit and not isinstance(self, UntrustedAuthenticator):
raise InvalidInvocation(
"Only UntrustedAuthenticator instances can "
"use the implicit grant flow."
)
if implicit and duration != "temporary":
raise InvalidInvocation(
"The implicit grant flow only supports "
"temporary access tokens."
)
params = {
"client_id": self.client_id,
"duration": duration,
"redirect_uri": self.redirect_uri,
"response_type": "token" if implicit else "code",
"scope": " ".join(scopes),
"state": state,
}
url = self._requestor.reddit_url + const.AUTHORIZATION_PATH
request = Request("GET", url, params=params)
return request.prepare().url
def revoke_token(self, token, token_type=None):
"""Ask Reddit to revoke the provided token.
:param token: The access or refresh token to revoke.
:param token_type: (Optional) When provided, hint to Reddit what the token type
is for a possible efficiency gain. The value can be either ``access_token``
or ``refresh_token``.
"""
data = {"token": token}
if token_type is not None:
data["token_type_hint"] = token_type
url = self._requestor.reddit_url + const.REVOKE_TOKEN_PATH
self._post(url, success_status=codes["no_content"], **data)
class TrustedAuthenticator(BaseAuthenticator):
"""Store OAuth2 authentication credentials for web, or script type apps."""
RESPONSE_TYPE = "code"
def __init__(self, requestor, client_id, client_secret, redirect_uri=None):
"""Represent a single authentication to Reddit's API.
:param requestor: An instance of :class:`Requestor`.
:param client_id: The OAuth2 client ID to use with the session.
:param client_secret: The OAuth2 client secret to use with the session.
:param redirect_uri: (optional) The redirect URI exactly as specified in your
OAuth application settings on Reddit. This parameter is required if you want
to use the ``authorize_url`` method, or the ``authorize`` method of the
``Authorizer`` class.
"""
super(TrustedAuthenticator, self).__init__(
requestor, client_id, redirect_uri
)
self.client_secret = client_secret
def _auth(self):
return self.client_id, self.client_secret
class UntrustedAuthenticator(BaseAuthenticator):
"""Store OAuth2 authentication credentials for installed applications."""
def _auth(self):
return self.client_id, ""
class BaseAuthorizer(object):
"""Superclass for OAuth2 authorization tokens and scopes."""
def __init__(self, authenticator):
"""Represent a single authorization to Reddit's API.
:param authenticator: An instance of :class:`BaseAuthenticator`.
"""
self._authenticator = authenticator
self._clear_access_token()
self._validate_authenticator()
def _clear_access_token(self):
self._expiration_timestamp = None
self.access_token = None
self.scopes = None
def _request_token(self, **data):
url = (
self._authenticator._requestor.reddit_url + const.ACCESS_TOKEN_PATH
)
pre_request_time = time.time()
response = self._authenticator._post(url, **data)
payload = response.json()
if "error" in payload: # Why are these OKAY responses?
raise OAuthException(
response, payload["error"], payload.get("error_description")
)
self._expiration_timestamp = (
pre_request_time - 10 + payload["expires_in"]
)
self.access_token = payload["access_token"]
if "refresh_token" in payload:
self.refresh_token = payload["refresh_token"]
self.scopes = set(payload["scope"].split(" "))
def _validate_authenticator(self):
if not isinstance(self._authenticator, self.AUTHENTICATOR_CLASS):
raise InvalidInvocation(
"Must use a authenticator of type"
f" {self.AUTHENTICATOR_CLASS.__name__}."
)
def is_valid(self):
"""Return whether or not the Authorizer is ready to authorize requests.
A ``True`` return value does not guarantee that the access_token is actually
valid on the server side.
"""
return (
self.access_token is not None
and time.time() < self._expiration_timestamp
)
def revoke(self):
"""Revoke the current Authorization."""
if self.access_token is None:
raise InvalidInvocation("no token available to revoke")
self._authenticator.revoke_token(self.access_token, "access_token")
self._clear_access_token()
class Authorizer(BaseAuthorizer):
"""Manages OAuth2 authorization tokens and scopes."""
AUTHENTICATOR_CLASS = BaseAuthenticator
def __init__(
self,
authenticator,
*,
post_refresh_callback=None,
pre_refresh_callback=None,
refresh_token=None,
):
"""Represent a single authorization to Reddit's API.
:param authenticator: An instance of a subclass of :class:`BaseAuthenticator`.
:param post_refresh_callback: (Optional) When a single-argument
function is passed, the function will be called prior to refreshing
the access and refresh tokens. The argument to the callback is the
:class:`Authorizer` instance. This callback can be used to inspect
and modify the attributes of the :class:`Authorizer`.
:param pre_refresh_callback: (Optional) When a single-argument function
is passed, the function will be called after refreshing the access
and refresh tokens. The argument to the callback is the
:class:`Authorizer` instance. This callback can be used to inspect
and modify the attributes of the :class:`Authorizer`.
:param refresh_token: (Optional) Enables the ability to refresh the
authorization.
"""
super(Authorizer, self).__init__(authenticator)
self._post_refresh_callback = post_refresh_callback
self._pre_refresh_callback = pre_refresh_callback
self.refresh_token = refresh_token
def authorize(self, code):
"""Obtain and set authorization tokens based on ``code``.
:param code: The code obtained by an out-of-band authorization request to
Reddit.
"""
if self._authenticator.redirect_uri is None:
raise InvalidInvocation("redirect URI not provided")
self._request_token(
code=code,
grant_type="authorization_code",
redirect_uri=self._authenticator.redirect_uri,
)
def refresh(self):
"""Obtain a new access token from the refresh_token."""
if self._pre_refresh_callback:
self._pre_refresh_callback(self)
if self.refresh_token is None:
raise InvalidInvocation("refresh token not provided")
self._request_token(
grant_type="refresh_token", refresh_token=self.refresh_token
)
if self._post_refresh_callback:
self._post_refresh_callback(self)
def revoke(self, only_access=False):
"""Revoke the current Authorization.
:param only_access: (Optional) When explicitly set to True, do not evict the
refresh token if one is set.
Revoking a refresh token will in-turn revoke all access tokens associated with
that authorization.
"""
if only_access or self.refresh_token is None:
super(Authorizer, self).revoke()
else:
self._authenticator.revoke_token(
self.refresh_token, "refresh_token"
)
self._clear_access_token()
self.refresh_token = None
class DeviceIDAuthorizer(BaseAuthorizer):
"""Manages app-only OAuth2 for 'installed' applications.
While the '*' scope will be available, some endpoints simply will not work due to
the lack of an associated Reddit account.
"""
AUTHENTICATOR_CLASS = UntrustedAuthenticator
def __init__(self, authenticator, device_id="DO_NOT_TRACK_THIS_DEVICE"):
"""Represent an app-only OAuth2 authorization for 'installed' apps.
:param authenticator: An instance of :class:`UntrustedAuthenticator`.
:param device_id: (optional) A unique ID (20-30 character ASCII string) (default
DO_NOT_TRACK_THIS_DEVICE). For more information about this parameter, see:
https://github.com/reddit/reddit/wiki/OAuth2#application-only-oauth
"""
super(DeviceIDAuthorizer, self).__init__(authenticator)
self._device_id = device_id
def refresh(self):
"""Obtain a new access token."""
grant_type = "https://oauth.reddit.com/grants/installed_client"
self._request_token(grant_type=grant_type, device_id=self._device_id)
class ImplicitAuthorizer(BaseAuthorizer):
"""Manages implicit installed-app type authorizations."""
AUTHENTICATOR_CLASS = UntrustedAuthenticator
def __init__(self, authenticator, access_token, expires_in, scope):
"""Represent a single implicit authorization to Reddit's API.
:param authenticator: An instance of :class:`UntrustedAuthenticator`.
:param access_token: The access_token obtained from Reddit via callback to the
authenticator's redirect_uri.
:param expires_in: The number of seconds the ``access_token`` is valid for. The
origin of this value was returned from Reddit via callback to the
authenticator's redirect uri. Note, you may need to subtract an offset
before passing in this number to account for a delay between when Reddit
prepared the response, and when you make this function call.
:param scope: A space-delimited string of Reddit OAuth2 scope names as returned
from Reddit in the callback to the authenticator's redirect uri.
"""
super(ImplicitAuthorizer, self).__init__(authenticator)
self._expiration_timestamp = time.time() + expires_in
self.access_token = access_token
self.scopes = set(scope.split(" "))
class ReadOnlyAuthorizer(Authorizer):
"""Manages authorizations that are not associated with a Reddit account.
While the '*' scope will be available, some endpoints simply will not work due to
the lack of an associated Reddit account.
"""
AUTHENTICATOR_CLASS = TrustedAuthenticator
def refresh(self):
"""Obtain a new ReadOnly access token."""
self._request_token(grant_type="client_credentials")
class ScriptAuthorizer(Authorizer):
"""Manages personal-use script type authorizations.
Only users who are listed as developers for the application will be granted access
tokens.
"""
AUTHENTICATOR_CLASS = TrustedAuthenticator
def __init__(self, authenticator, username, password):
"""Represent a single personal-use authorization to Reddit's API.
:param authenticator: An instance of :class:`TrustedAuthenticator`.
:param username: The Reddit username of one of the application's developers.
:param password: The password associated with ``username``.
"""
super(ScriptAuthorizer, self).__init__(authenticator)
self._username = username
self._password = password
def refresh(self):
"""Obtain a new personal-use script type access token."""
self._request_token(
grant_type="password",
username=self._username,
password=self._password,
)
|
{"/dsBot.py": ["/music.py", "/memes.py"], "/launcher.py": ["/bot/__init__.py"], "/bot/__init__.py": ["/bot/dsBot.py"], "/bot/cogs/musicv3.py": ["/bot/glbs.py"], "/bot/dsBot.py": ["/bot/glbs.py", "/bot/cogs/musicv3.py"]}
|
39,220,247
|
Venko15/MLT
|
refs/heads/master
|
/bot/cogs/memes.py
|
import random
from discord import embeds
import datetime as dt
from discord.ext import commands
import discord
import requests
class Meme(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(name='memepls', aliases = ["meme"])
async def memepls(self,ctx):
res = requests.get("https://meme-api.herokuapp.com/gimme")
x = res.json()
embed = discord.Embed()
embed.set_image(url = x["url"])
embed.set_author(name = x["title"])
await ctx.send(embed = embed)
def setup(bot):
bot.add_cog(Meme(bot))
|
{"/dsBot.py": ["/music.py", "/memes.py"], "/launcher.py": ["/bot/__init__.py"], "/bot/__init__.py": ["/bot/dsBot.py"], "/bot/cogs/musicv3.py": ["/bot/glbs.py"], "/bot/dsBot.py": ["/bot/glbs.py", "/bot/cogs/musicv3.py"]}
|
39,220,248
|
Venko15/MLT
|
refs/heads/master
|
/bot/cogs/weatherbot.py
|
from re import I
import discord
from discord.ext import commands
from discord.ext.commands.core import command
import requests
import json
import datetime
import typing as t
import unittest
class NoCityName(commands.CommandError):
pass
class NoCityFound(commands.CommandError):
pass
class WeatherBot(commands.Cog):
def __init__(self, bot):
self.bot = bot
def build_url(self, base_url, city, api_key):
return base_url+"q="+city+"&appid="+api_key
@commands.command()
async def weather(self,ctx,*, city_name):
if not len(city_name):
raise NoCityName
print(len(city_name))
url = self.build_url("http://api.openweathermap.org/data/2.5/weather?",
city_name, "2888fecacf9ba5007679e4fd079a7388")
res = requests.get(url)
x = res.json()
if x["cod"] == "401" or x["cod"] == "404":
raise NoCityFound
else:
info = x["main"]
curr_temp = info["temp"] - 273.15
curr_humidity = info["humidity"]
z = x["weather"]
curr_temp
weather_description = z[0]["description"]
await ctx.send(f"`` Temperature = {int(curr_temp)} C\n humidity (in percentage) = {str(curr_humidity)} \n description = {str(weather_description)}``")
@commands.command()
async def forecast(self, ctx, city_name, days = 3):
url = self.build_url("http://api.openweathermap.org/data/2.5/forecast?",
city_name, "2888fecacf9ba5007679e4fd079a7388")
forecast = []
lista = []
res = requests.get(url)
x = res.json()
if days > 15 or days < 1:
await ctx.send("aight mate, cant be over 30 neither lower than one, so imma jus say 'you dumb, dumb'")
pass
if x["cod"] == "401" or x["cod"] == "404":
raise NoCityFound
else:
for i in range(days):
info = x["list"][i]["main"]
lista.append(int(info["temp_min"] - 273.15))
lista.append(int(info["temp_max"] - 273.15))
lista.append(int(info["humidity"]))
z = x["list"][i]["weather"]
lista.append(z[0]["description"])
forecast.append(list(lista))
lista.clear()
a = datetime.datetime.today()
date_list = [a + datetime.timedelta(days=x) for x in range(days)]
msg = ""
for i in range(days):
msg +=f'``Forecast for {str(date_list[i].day)} - {str(date_list[i].strftime("%B"))} - {str(date_list[i].year)}``\n'
msg += f'``` Max Temperature = {forecast[i][0]} C\n Min Temperature = {forecast[i][1]} C\n humidity (in percentage) = {forecast[i][2]} \n description = {forecast[i][3]}```\n'
await ctx.send(msg)
@weather.error
async def weather_exc(self, ctx, exc):
if isinstance(exc, NoCityName):
await ctx.send("You must enter a city name after the command")
elif isinstance(exc, NoCityFound):
await ctx.send("I couldn't find a city with this name")
@forecast.error
async def weather_exc(self, ctx, exc):
if isinstance(exc, NoCityName):
await ctx.send("You must enter a city name after the command")
elif isinstance(exc, NoCityFound):
await ctx.send("I couldn't find a city with this name")
@commands.command()
async def test_w(self, ctx):
t=Test()
if not await t.test_weather():
await ctx.send("tests passed")
class Test(unittest.TestCase):
async def test_weather(self):
m = WeatherBot(discord.Client())
self.assertEqual(m.build_url("http://api.openweathermap.org/data/2.5/forecast?","Sofia","2888fecacf9ba5007679e4fd079a7388"),"http://api.openweathermap.org/data/2.5/forecast?q=Sofia&appid=2888fecacf9ba5007679e4fd079a7388")
def setup(bot):
bot.add_cog(WeatherBot(bot))
|
{"/dsBot.py": ["/music.py", "/memes.py"], "/launcher.py": ["/bot/__init__.py"], "/bot/__init__.py": ["/bot/dsBot.py"], "/bot/cogs/musicv3.py": ["/bot/glbs.py"], "/bot/dsBot.py": ["/bot/glbs.py", "/bot/cogs/musicv3.py"]}
|
39,220,249
|
Venko15/MLT
|
refs/heads/master
|
/launcher.py
|
from bot import MLT
def main():
bot=MLT()
bot.run()
if __name__ == "__main__":
main()
|
{"/dsBot.py": ["/music.py", "/memes.py"], "/launcher.py": ["/bot/__init__.py"], "/bot/__init__.py": ["/bot/dsBot.py"], "/bot/cogs/musicv3.py": ["/bot/glbs.py"], "/bot/dsBot.py": ["/bot/glbs.py", "/bot/cogs/musicv3.py"]}
|
39,220,250
|
Venko15/MLT
|
refs/heads/master
|
/bot/__init__.py
|
from .dsBot import MLT
from . import glbs
|
{"/dsBot.py": ["/music.py", "/memes.py"], "/launcher.py": ["/bot/__init__.py"], "/bot/__init__.py": ["/bot/dsBot.py"], "/bot/cogs/musicv3.py": ["/bot/glbs.py"], "/bot/dsBot.py": ["/bot/glbs.py", "/bot/cogs/musicv3.py"]}
|
39,220,251
|
Venko15/MLT
|
refs/heads/master
|
/env/Lib/site-packages/youtubesearchpython/__future__/internal/streamurlfetcher.py
|
isPyTubeInstalled = False
import asyncio
import httpx
try:
from youtubesearchpython.__future__.internal.json import loads
from pytube.extract import apply_descrambler, apply_signature
from pytube import YouTube, extract
from urllib.parse import parse_qs
isPyTubeInstalled = True
except:
class YouTube:
def __init__(self):
pass
js_url = None
class StreamURLFetcherInternal(YouTube):
'''
Overrided parent's constructor.
'''
def __init__(self):
self._js_url = None
self._js = None
if not isPyTubeInstalled:
raise Exception('ERROR: PyTube is not installed. To use this functionality of youtube-search-python, PyTube must be installed.')
'''
This method is derived from YouTube.prefetch.
This method fetches player JavaScript & its URL from /watch endpoint on YouTube.
Removed unnecessary methods & web requests as we already have metadata.
Uses httpx.AsyncClient in place of requests.
Removed v parameter from the query. (No idea about why PyTube bothered with that)
'''
async def getJavaScript(self) -> None:
'''Gets player JavaScript from YouTube, avoid calling more than once.
'''
try:
global js_url
async with httpx.AsyncClient() as client:
response = await client.get('https://www.youtube.com/watch', timeout = None)
watchHTML = response.text
loop = asyncio.get_running_loop()
self._js_url = await loop.run_in_executor(None, extract.js_url, watchHTML)
if js_url != self._js_url:
async with httpx.AsyncClient() as client:
response = await client.get(self._js_url, timeout = None)
self._js = response.text
except:
raise Exception('ERROR: Could not make request.')
'''
Saving videoFormats inside a dictionary with key 'player_response' for apply_descrambler & apply_signature methods.
'''
async def _getDecipheredURLs(self, videoFormats: dict) -> None:
self._player_response = {'player_response': videoFormats}
if not videoFormats['streamingData']:
try:
''' For getting streamingData in age restricted video. '''
async with httpx.AsyncClient() as client:
''' Derived from extract.video_info_url_age_restricted '''
response = await client.post(
'https://youtube.com/get_video_info',
params = {
'video_id': videoFormats['id'],
'eurl': f'https://youtube.googleapis.com/v/{videoFormats["id"]}',
'sts': None,
},
timeout = None,
)
''' Google returns content as a query string instead of a JSON. '''
self._player_response['player_response'] = await loads(parse_qs(response.text)["player_response"][0])
except:
raise Exception('ERROR: Could not make request.')
self.video_id = videoFormats["id"]
await self._decipher()
async def _decipher(self, retry: bool = False):
'''
Not fetching for new player JavaScript if self._js is not None or exception is not caused.
'''
if not self._js or retry:
await self.getJavaScript()
try:
'''
These two are the main methods being used from PyTube.
Used to _decipher the stream URLs using player JavaScript & the player_response passed from the getStream method of this derieved class.
These methods operate on the value of "player_response" key in dictionary of self._player_response & save _deciphered information in the "url_encoded_fmt_stream_map" key.
'''
loop = asyncio.get_running_loop()
await loop.run_in_executor(None, apply_descrambler, self._player_response, 'url_encoded_fmt_stream_map')
await loop.run_in_executor(None, apply_signature, self._player_response, 'url_encoded_fmt_stream_map', self._js)
except:
'''
Fetch updated player JavaScript to get new cipher algorithm.
'''
await self._decipher(retry = False)
|
{"/dsBot.py": ["/music.py", "/memes.py"], "/launcher.py": ["/bot/__init__.py"], "/bot/__init__.py": ["/bot/dsBot.py"], "/bot/cogs/musicv3.py": ["/bot/glbs.py"], "/bot/dsBot.py": ["/bot/glbs.py", "/bot/cogs/musicv3.py"]}
|
39,220,252
|
Venko15/MLT
|
refs/heads/master
|
/bot/cogs/musicv3.py
|
from cache_decorator.cache import cache
from discord.activity import Spotify
from discord.player import FFmpegAudio
from youtube_dl import YoutubeDL
import discord
import asyncio
from discord import FFmpegPCMAudio
from discord.ext.commands.cog import Cog
from discord.ext.commands.core import command
from discord.ext import commands
from youtube_dl.postprocessor import ffmpeg
from youtubesearchpython import VideosSearch
import re
from random import shuffle
from youtubesearchpython import *
import spotipy
from spotipy.oauth2 import SpotifyClientCredentials
import unittest
ffmpeg_opts = {'before_options': '-reconnect 1 -reconnect_streamed 1 -reconnect_delay_max 5',
'options': '-vn'}
Youtube_URL = r'((http|https):\/\/|)(www\.|)youtube\.com/playlist'
Spotify_URL = r'\w+:\/\/open.spotify.com\/playlist\/.*'
Spotify_song_url = r'https://open.spotify.com/track/'
auth_manager = SpotifyClientCredentials()
import bot.glbs
sp = spotipy.Spotify(auth_manager=auth_manager)
class NotVoiceChannnel(commands.CommandError):
pass
class EmptyQueue(commands.CommandError):
pass
class TempExc(commands.CommandError):
pass
class TrackNotFound(commands.CommandError):
pass
SCROLL = {
"⬆️": -1,
"⬇️": 1,
}
class Queue:
def __init__(self):
self._queue = []
self.track_names = []
self.pos = 0
self.queuePOS = 0
self.mode = False
self.event_listeners = {}
self.vc = None
self.listeners_set=False
@property
def is_empty(self):
return not self._queue
def curr_poss(self):
return self.pos
@property
def get_curr_track(self):
if not self._queue:
raise EmptyQueue
if self.pos <= len(self._queue)-1:
print(self._queue[self.pos])
return self._queue[self.pos]["song_link"]
def add_to_queue(self, *args):
prev_len = len(self._queue)
self._queue.extend(args)
if not prev_len:
self.event_listeners["on_not_empty"](self)
def get_next_track(self, error):
if not self._queue:
raise EmptyQueue
self.pos += 1
self.queuePOS += 1
if self.pos < 0:
return None
elif self.pos > len(self._queue) - 1:
if self.mode:
self.pos = 0
self.queuePOS = 0
else:
self.clear_q()
self.event_listeners["on_pos_change"](self)
def progress_queue(self, option):
if self.queuePOS + 10*option > len(self._queue) - 1 or self.queuePOS + 10*option < 0:
if self.queuePOS + 10*option < 0:
self.queuePOS = 0
return self.queuePOS
return self.queuePOS
else:
self.queuePOS += 10*option
return self.queuePOS
@property
def mix(self):
firstel = []
firstel = [i for i in self._queue[:self.pos+1]]
temp = self.pos + 1
self._queue = self._queue[temp:]
shuffle(self._queue)
firstel.extend(self._queue)
self._queue = firstel
def clear_q(self):
self._queue.clear()
self.pos = 0
self.queuePOS = 0
@property
def do_loop(self):
if not self.mode:
self.mode = True
else:
self.mode = False
def on_event(self, event, clb):
events = ["on_pos_change", "on_not_empty"]
if event not in events:
raise TempExc
self.event_listeners[event] = clb
class MusicB(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.queue = None
#set_listeners(self.queue)
@staticmethod
def download_inf(url):
ytdl = YoutubeDL()
mp3 = ytdl.extract_info(url, download=False)
track = mp3["formats"][0]["url"]
print(mp3["title"])
return track
@commands.Cog.listener()
async def on_voice_state_update(self, member, before, after):
if not member.bot and after.channel is None:
if not [m for m in before.channel.members if not m.bot]:
await member.guild.voice_client.disconnect()
async def disconn(self, error):
await self.voice_client.disconnect()
@commands.command(name="leave", aliases=["l", "quit"])
async def leave_comm(self, ctx):
self.clear(ctx)
await self.ctx.guild.voice_client.diconnect()
async def get_song_name(self, query):
video_output = VideosSearch(query, limit=1)
return video_output.result()["result"][0]["title"] if video_output is not None else None
async def search_song(self, query):
video_output = VideosSearch(query, limit=1)
return video_output.result()["result"][0]["link"] if video_output is not None else None
async def spotify_parse(self, ctx, url):
playlists = sp.playlist(url)
spotsong = []
i = 0
for i in range(10121212121010101):
try:
spotsong.append(playlists['tracks']["items"][i]['track']["name"] + " - " +
playlists['tracks']["items"][i]['track']["album"]["artists"][0]["name"])
i += 1
except IndexError:
break
return list(spotsong)
async def spotify_song(self, ctx, url):
song = sp.track(url)
output = str(song["album"]["artists"][0]["name"]) + \
" - " + str(song["name"])
print(output)
return output
@commands.command(name="play", aliases=["pl", "p"])
async def play(self, ctx, *args):
self.queue = bot.glbs.queue[ctx.guild.id]
print("opaaaaaaaaaa")
if not self.queue.listeners_set:
set_listeners(self.queue)
self.queue.listeners_set = True
url = ""
url = " ".join(args)
if self.queue.vc is None:
if ctx.author.voice is None:
raise NotVoiceChannnel
self.queue.vc = await ctx.author.voice.channel.connect()
Track = None
if re.match(Youtube_URL, url):
try:
playlist = Playlist(url)
songs = [{"song_link": x["link"], "song_title" : x["title"]}
for x in playlist.videos]
self.queue.add_to_queue(*songs)
except:
Track = url
elif re.match(Spotify_URL, str(url)):
dc = []
List = await self.spotify_parse(ctx, url)
for i in List:
if (song := await self.search_song(i)):
dc.append({"song_link": song, "song_title": i})
self.queue.add_to_queue(*dc)
return
elif re.match(Spotify_song_url, str(url)):
Track = await self.search_song(await self.spotify_song(ctx, url))
song_name = await self.get_song_name(await self.spotify_song(ctx, url))
dc = {"song_link": Track, "song_title": song_name}
else:
song_name = await self.get_song_name(url)
Track = await self.search_song(url)
dc = {"song_link": Track, "song_title": song_name}
if Track == None:
raise TrackNotFound
self.queue.add_to_queue(dc)
@commands.command(name="skip", aliases=["sk"])
async def skip(self, ctx):
self.queue = bot.glbs.queue[ctx.guild.id]
self.queue.vc.stop()
self.queue.get_next_track(None)
@commands.command()
async def mix(self, ctx):
self.queue = bot.glbs.queue[ctx.guild.id]
self.queue.mix
@commands.command()
async def loop(self, ctx):
self.queue = bot.glbs.queue[ctx.guild.id]
self.queue.do_loop
@commands.command(name="q", aliases=["queue"])
async def queue_com(self, ctx):
self.queue = bot.glbs.queue[ctx.guild.id]
def _check(r, u):
return (
r.emoji in SCROLL.keys()
and u in [m for m in ctx.guild.members if not m.bot]
and r.message.id == msg.id
)
q = ""
for i in range(self.queue.queuePOS, self.queue.queuePOS+10)[:len(self.queue._queue)]:
if i > len(self.queue._queue)-1:
pass
else:
print(i)
q += f'{i+1} - ' + str(self.queue._queue[i]["song_title"])
q += "\n"
if not self.queue.is_empty:
msg = await ctx.send(f'```yaml\n{str(q)}```')
for option in list(SCROLL.keys())[:len(SCROLL)]:
await msg.add_reaction(option)
reaction, _ = await self.bot.wait_for("reaction_add", check=_check)
if not q:
return
await msg.delete()
self.queue.progress_queue(SCROLL[reaction.emoji])
await self.queue_com(ctx)
else:
raise EmptyQueue
@queue_com.error
async def queue_exc(self, ctx, exc):
if isinstance(exc, EmptyQueue):
msg = await ctx.send("```yaml\nThe queue is empty. Play a song/playlist by typing 1play <URL>/<query> ;D```")
@play.error
async def queue_exc(self, ctx, exc):
if isinstance(exc, TrackNotFound):
msg = await ctx.send("```yaml\nDidnt find the track :(```")
@commands.command()
async def test_m(self, ctx):
t = Test()
if not await t.test_music():
await ctx.send("tests passed")
@commands.command()
async def clear(self, ctx):
self.queue = bot.glbs.queue[ctx.guild.id]
self.queue.clear_q()
class Test(unittest.TestCase):
async def test_music(self):
m = MusicB(discord.Client())
self.assertEqual(await m.search_song("azis"), "https://www.youtube.com/watch?v=j2IVk3SG7l8")
self.assertEqual(await m.spotify_parse(ctx=None, url="https://open.spotify.com/playlist/3gm3JRpm2bQvnBPn7EEn5a?si=c1469ed9eb8547f4"), ["Immortal - 21 Savage"])
def set_listeners(queue):
def not_empty(_queue):
track = MusicB.download_inf(_queue.get_curr_track)
audio_src = FFmpegPCMAudio(track, **ffmpeg_opts)
_queue.vc.play(audio_src, after=_queue.get_next_track)
queue.on_event("on_not_empty", not_empty)
queue.on_event("on_pos_change", not_empty)
def setup(bot):
bot.add_cog(MusicB(bot))
print()
|
{"/dsBot.py": ["/music.py", "/memes.py"], "/launcher.py": ["/bot/__init__.py"], "/bot/__init__.py": ["/bot/dsBot.py"], "/bot/cogs/musicv3.py": ["/bot/glbs.py"], "/bot/dsBot.py": ["/bot/glbs.py", "/bot/cogs/musicv3.py"]}
|
39,220,253
|
Venko15/MLT
|
refs/heads/master
|
/bot/dsBot.py
|
from pathlib import Path
from discord import channel, guild
from discord.ext.commands.core import command
import pymongo
import discord
from discord.ext import commands
import asyncio
import datetime
import random
from discord import Client
from dotenv import load_dotenv
import os
import dns
import unittest
load_dotenv()
import bot.glbs
from .cogs.musicv3 import Queue
class MLT(commands.Bot):
def __init__(self):
self._cogs = [p.stem for p in Path(".").glob("./bot/cogs/*.py")]
self.client = pymongo.MongoClient(
"mongodb+srv://MLT:Venkoto%4015@mlt.kinqt.mongodb.net/MLT?retryWrites=true&w=majority")
self.db = self.client["MLT"]
self.channelsend = "level-up"
self.Token = os.environ["Token"]
self.rl = self.client["MLTROLES"]
self.prefix = "1"
self.roles = []
super().__init__(command_prefix=self.prefix,
case_insensitive=True, intents=discord.Intents.all())
self.remove_command("help")
def setup(self):
print("Running setup...")
for cog in self._cogs:
self.load_extension(f"bot.cogs.{cog}")
print(f" Loaded `{cog}` cog.")
async def on_ready(self):
bot.glbs.queue = {x.id:Queue() for x in self.guilds}
print(bot.glbs.queue)
print("MLT logged in ")
async def prefix(self, bot, msg):
return commands.when_mentioned_or(str(self.prefix))(bot, msg)
def run(self):
self.setup()
super().run(self.Token, reconnect=True)
async def on_connect(self):
print(f" Connected to Discord (latency: {self.latency*1000:,.0f} ms).")
async def process_commands(self, msg):
ctx = await self.get_context(msg, cls=commands.Context)
if ctx.command is not None:
await self.invoke(ctx)
async def is_level_up(self, msg, member):
if member["xp"] >= member["threshold"]:
return True
return False
async def add_role(self, msg,member):
rlcol=self.rl[str(msg.guild.id)]
myres = rlcol.find().sort("lvl")
for r in myres:
if r not in self.roles:
self.roles.append(r)
for i in range(len(self.roles)-1):
try:
if member["lvl"] >= self.roles[i]["lvl"]:
if i == len(self.roles)-1:
print(self.roles[i]["lvl"])
role = discord.utils.get(msg.guild.roles, name=str(self.roles[i]["name"]))
await msg.author.add_roles(role)
elif member["lvl"] >= self.roles[i+1]["lvl"]:
continue
else:
role = discord.utils.get(msg.guild.roles, name=str(self.roles[i]["name"]))
try:
await msg.author.add_roles(role)
self.roles.clear()
except AttributeError:
self.roles.clear()
pass
except IndexError:
break
async def on_message(self, msg):
col = self.db[str(msg.guild.id)]
if (ch := discord.utils.get(msg.guild.text_channels, name=self.channelsend)) is None:
guild = msg.guild
await guild.create_text_channel(self.channelsend)
if not msg.author.bot:
if msg.content == "1test_m":
t = Test()
if not await t.test_main():
await msg.channel.send("tests passed")
a = datetime.datetime.today()
query = {"name": str(msg.author.id)}
if (member := col.find_one(query)) is None:
ins = {"name": str(msg.author.id), "lvl": 0,
"xp": 0, "lastmsg": a, "threshold": 10}
col.insert_one(ins)
else:
b = datetime.timedelta(minutes=1.75)
if a - member["lastmsg"] > b:
member["lastmsg"] = a
member["xp"] += random.randint(5,15)
newval = {"$set": {"xp": int(member["xp"]), "lastmsg": a}}
await self.add_role(msg,member)
if await self.is_level_up(msg, member):
await ch.send(f'<@{str(msg.author.id)}> just leveled up. Reached level {int(member["lvl"])+1}')
newval = {"$set": {"lvl": int(member["lvl"]+1),
"xp": int(member["xp"] - member["threshold"]), "lastmsg": a, "threshold": abs((member["lvl"]-1 + member["lvl"])*15)}}
col.update(query, newval)
await self.process_commands(msg)
class Test(unittest.TestCase):
async def test_main(self):
m = MLT()
self.assertTrue(await m.is_level_up(None,{"xp":20,"threshold":15}), True)
|
{"/dsBot.py": ["/music.py", "/memes.py"], "/launcher.py": ["/bot/__init__.py"], "/bot/__init__.py": ["/bot/dsBot.py"], "/bot/cogs/musicv3.py": ["/bot/glbs.py"], "/bot/dsBot.py": ["/bot/glbs.py", "/bot/cogs/musicv3.py"]}
|
39,220,254
|
Venko15/MLT
|
refs/heads/master
|
/env/Lib/site-packages/youtubesearchpython/__init__.py
|
from youtubesearchpython.search import Search, VideosSearch, ChannelsSearch, PlaylistsSearch, CustomSearch
from youtubesearchpython.extras import Video, Playlist, Suggestions
from youtubesearchpython.streamurlfetcher import StreamURLFetcher
from youtubesearchpython.internal.constants import *
__title__ = 'youtube-search-python'
__version__ = '1.4.5'
__author__ = 'alexmercerind'
__license__ = 'MIT'
''' Deprecated. Present for legacy support. '''
from youtubesearchpython.legacy import SearchVideos, SearchPlaylists
from youtubesearchpython.legacy import SearchVideos as searchYoutube
|
{"/dsBot.py": ["/music.py", "/memes.py"], "/launcher.py": ["/bot/__init__.py"], "/bot/__init__.py": ["/bot/dsBot.py"], "/bot/cogs/musicv3.py": ["/bot/glbs.py"], "/bot/dsBot.py": ["/bot/glbs.py", "/bot/cogs/musicv3.py"]}
|
39,220,255
|
Venko15/MLT
|
refs/heads/master
|
/env/Lib/site-packages/httpcore/_async/http11.py
|
from ssl import SSLContext
from typing import AsyncIterator, List, Tuple, Union, cast
import h11
from .._backends.auto import AsyncSocketStream
from .._bytestreams import AsyncIteratorByteStream
from .._exceptions import LocalProtocolError, RemoteProtocolError, map_exceptions
from .._types import URL, Headers, TimeoutDict
from .._utils import get_logger
from .base import AsyncByteStream, ConnectionState
from .http import AsyncBaseHTTPConnection
H11Event = Union[
h11.Request,
h11.Response,
h11.InformationalResponse,
h11.Data,
h11.EndOfMessage,
h11.ConnectionClosed,
]
logger = get_logger(__name__)
class AsyncHTTP11Connection(AsyncBaseHTTPConnection):
READ_NUM_BYTES = 64 * 1024
def __init__(self, socket: AsyncSocketStream, ssl_context: SSLContext = None):
self.socket = socket
self.ssl_context = SSLContext() if ssl_context is None else ssl_context
self.h11_state = h11.Connection(our_role=h11.CLIENT)
self.state = ConnectionState.ACTIVE
def __repr__(self) -> str:
return f"<AsyncHTTP11Connection state={self.state}>"
def info(self) -> str:
return f"HTTP/1.1, {self.state.name}"
def get_state(self) -> ConnectionState:
return self.state
def mark_as_ready(self) -> None:
if self.state == ConnectionState.IDLE:
self.state = ConnectionState.READY
async def handle_async_request(
self,
method: bytes,
url: URL,
headers: Headers,
stream: AsyncByteStream,
extensions: dict,
) -> Tuple[int, Headers, AsyncByteStream, dict]:
timeout = cast(TimeoutDict, extensions.get("timeout", {}))
self.state = ConnectionState.ACTIVE
await self._send_request(method, url, headers, timeout)
await self._send_request_body(stream, timeout)
(
http_version,
status_code,
reason_phrase,
headers,
) = await self._receive_response(timeout)
response_stream = AsyncIteratorByteStream(
aiterator=self._receive_response_data(timeout),
aclose_func=self._response_closed,
)
extensions = {
"http_version": http_version,
"reason_phrase": reason_phrase,
}
return (status_code, headers, response_stream, extensions)
async def start_tls(
self, hostname: bytes, timeout: TimeoutDict = None
) -> AsyncSocketStream:
timeout = {} if timeout is None else timeout
self.socket = await self.socket.start_tls(hostname, self.ssl_context, timeout)
return self.socket
async def _send_request(
self, method: bytes, url: URL, headers: Headers, timeout: TimeoutDict
) -> None:
"""
Send the request line and headers.
"""
logger.trace("send_request method=%r url=%r headers=%s", method, url, headers)
_scheme, _host, _port, target = url
with map_exceptions({h11.LocalProtocolError: LocalProtocolError}):
event = h11.Request(method=method, target=target, headers=headers)
await self._send_event(event, timeout)
async def _send_request_body(
self, stream: AsyncByteStream, timeout: TimeoutDict
) -> None:
"""
Send the request body.
"""
# Send the request body.
async for chunk in stream:
logger.trace("send_data=Data(<%d bytes>)", len(chunk))
event = h11.Data(data=chunk)
await self._send_event(event, timeout)
# Finalize sending the request.
event = h11.EndOfMessage()
await self._send_event(event, timeout)
async def _send_event(self, event: H11Event, timeout: TimeoutDict) -> None:
"""
Send a single `h11` event to the network, waiting for the data to
drain before returning.
"""
bytes_to_send = self.h11_state.send(event)
await self.socket.write(bytes_to_send, timeout)
async def _receive_response(
self, timeout: TimeoutDict
) -> Tuple[bytes, int, bytes, List[Tuple[bytes, bytes]]]:
"""
Read the response status and headers from the network.
"""
while True:
event = await self._receive_event(timeout)
if isinstance(event, h11.Response):
break
http_version = b"HTTP/" + event.http_version
# h11 version 0.11+ supports a `raw_items` interface to get the
# raw header casing, rather than the enforced lowercase headers.
headers = event.headers.raw_items()
return http_version, event.status_code, event.reason, headers
async def _receive_response_data(
self, timeout: TimeoutDict
) -> AsyncIterator[bytes]:
"""
Read the response data from the network.
"""
while True:
event = await self._receive_event(timeout)
if isinstance(event, h11.Data):
logger.trace("receive_event=Data(<%d bytes>)", len(event.data))
yield bytes(event.data)
elif isinstance(event, (h11.EndOfMessage, h11.PAUSED)):
logger.trace("receive_event=%r", event)
break
async def _receive_event(self, timeout: TimeoutDict) -> H11Event:
"""
Read a single `h11` event, reading more data from the network if needed.
"""
while True:
with map_exceptions({h11.RemoteProtocolError: RemoteProtocolError}):
event = self.h11_state.next_event()
if event is h11.NEED_DATA:
data = await self.socket.read(self.READ_NUM_BYTES, timeout)
# If we feed this case through h11 we'll raise an exception like:
#
# httpcore.RemoteProtocolError: can't handle event type
# ConnectionClosed when role=SERVER and state=SEND_RESPONSE
#
# Which is accurate, but not very informative from an end-user
# perspective. Instead we handle this case distinctly and treat
# it as a ConnectError.
if data == b"" and self.h11_state.their_state == h11.SEND_RESPONSE:
msg = "Server disconnected without sending a response."
raise RemoteProtocolError(msg)
self.h11_state.receive_data(data)
else:
assert event is not h11.NEED_DATA
break
return event
async def _response_closed(self) -> None:
logger.trace(
"response_closed our_state=%r their_state=%r",
self.h11_state.our_state,
self.h11_state.their_state,
)
if (
self.h11_state.our_state is h11.DONE
and self.h11_state.their_state is h11.DONE
):
self.h11_state.start_next_cycle()
self.state = ConnectionState.IDLE
else:
await self.aclose()
async def aclose(self) -> None:
if self.state != ConnectionState.CLOSED:
self.state = ConnectionState.CLOSED
if self.h11_state.our_state is h11.MUST_CLOSE:
event = h11.ConnectionClosed()
self.h11_state.send(event)
await self.socket.aclose()
def is_socket_readable(self) -> bool:
return self.socket.is_readable()
|
{"/dsBot.py": ["/music.py", "/memes.py"], "/launcher.py": ["/bot/__init__.py"], "/bot/__init__.py": ["/bot/dsBot.py"], "/bot/cogs/musicv3.py": ["/bot/glbs.py"], "/bot/dsBot.py": ["/bot/glbs.py", "/bot/cogs/musicv3.py"]}
|
39,254,963
|
Muhammad-Elgendi/Athena
|
refs/heads/production
|
/assistant/views.py
|
from .models import Question,Choice
from django.http import JsonResponse
from django.template import loader
from django.http import HttpResponse, HttpResponseRedirect , Http404
from django.shortcuts import get_object_or_404, render
from django.urls import reverse
from django.conf import settings
from django.core.files.storage import FileSystemStorage
from django.views.decorators.csrf import csrf_exempt
import os, sys
from os import listdir
from os.path import isfile, join,split
from .modules.FaceRecognizer import FaceRecognizer
from .modules.Chatbot import Chatbot
from .modules.TextRecognizer import TextRecognizer
import json
import urllib.request
import shutil
from urllib.parse import urlparse
# from django.core.cache import cache
# import _pickle as cPickle
# Create your views here.
def index(request):
return HttpResponse("Hello, world. My name is Athena.")
@csrf_exempt
def add_person(request):
if request.method == 'POST' and 'imgUrl' in request.POST and 'name' in request.POST and 'auth' in request.POST and request.POST['auth'] == 'GxsQXvHY5XMo@4%':
imgUrl = request.POST['imgUrl']
urlParser = urlparse(imgUrl)
fileName = os.path.basename(urlParser.path)
name = request.POST['name']
# if(not os.path.isdir(os.path.join(settings.BASE_DIR, 'media/'+name))):
# os.mkdir(os.path.join(settings.BASE_DIR, 'media/'+name), 755)
# fs = FileSystemStorage(location='media/'+name)
# fs = FileSystemStorage(location='media/dataset')
#get the path of all the files in the folder
imagePaths = [join(settings.BASE_DIR+'/media/dataset',f) for f in listdir(settings.BASE_DIR+'/media/dataset')]
#create empty Persons list
Persons = {}
#create empty serials list
serials = {}
#now looping through all the image paths and loading the Ids and the images
for imagePath in imagePaths:
#get the Id from the image
Id = int(split(imagePath)[-1].split(".")[1])
nameOfPerson = split(imagePath)[-1].split(".")[0]
#get the name from the image
Persons[nameOfPerson] = Id
#get serial number
serial = int(split(imagePath)[-1].split(".")[2])
serials[nameOfPerson] = max(1, serials.get(nameOfPerson,1) ,serial)
if name not in Persons.keys():
maxId = max(Persons.values()) if Persons else 0
imgName = name+'.'+str(maxId+1)+'.1.'+split(fileName)[-1].split(".")[1]
else:
imgName = name+'.'+str(Persons[name])+'.'+str(serials[name]+1)+'.'+split(fileName)[-1].split(".")[1]
# Download the file from `url` and save it locally under `file_name`:
with urllib.request.urlopen(imgUrl) as response, open(settings.BASE_DIR+'/media/dataset/'+imgName, 'wb+') as out_file:
shutil.copyfileobj(response, out_file)
# filename = fs.save(imgName, myfile)
uploaded_file_url = settings.BASE_DIR+'/media/dataset/'+imgName
# train our model
faceRecognizer = FaceRecognizer(settings.BASE_DIR+'/assistant/modules')
faceRecognizer.train_and_save(settings.BASE_DIR+'/media/dataset')
# return render(request, 'assistant/add_person.html', {
# 'uploaded_file_url': uploaded_file_url,
# 'name' : name
# })
return JsonResponse({'uploaded_file_url': uploaded_file_url , 'name' : name ,'status' : 'success'})
# return render(request, 'assistant/add_person.html')
else:
return JsonResponse({'Error': "Please specify a name and the url of image and enter your auth key" ,'status' : 'fail'})
@csrf_exempt
def recognize(request):
if request.method == 'POST' and 'imgUrl' in request.POST and 'auth' in request.POST and request.POST['auth'] == 'GxsQXvHY5XMo@4%':
imgUrl = request.POST['imgUrl']
urlParser = urlparse(imgUrl)
fileName = os.path.basename(urlParser.path)
# Download the file from `url` and save it locally under `file_name`:
with urllib.request.urlopen(imgUrl) as response, open(settings.BASE_DIR+'/media/uploads/'+fileName, 'wb+') as out_file:
shutil.copyfileobj(response, out_file)
# fs = FileSystemStorage(location='media/uploads')
# filename = fs.save(myfile.name, myfile)
uploaded_file_url = settings.BASE_DIR+'/media/uploads/'+fileName
# recognize the image
faceRecognizer = FaceRecognizer(settings.BASE_DIR+'/assistant/modules')
faces = faceRecognizer.recognize_faces(join(settings.BASE_DIR+'/media/uploads',fileName),settings.BASE_DIR+'/media/dataset')
# convert into JSON:
faces = json.dumps(faces)
# return render(request, 'assistant/recognize_person.html', {
# 'uploaded_file_url': uploaded_file_url,
# 'faces' : faces
# })
return JsonResponse({'uploaded_file_url': uploaded_file_url , 'faces' : faces ,'status' : 'success'})
# return render(request, 'assistant/recognize_person.html')
else:
return JsonResponse({'Error': "Please specify the Url of image and your auth key" ,'status' : 'fail'})
@csrf_exempt
def chat(request):
if request.method == 'POST' and 'msg' in request.POST and 'auth' in request.POST and request.POST['auth'] == 'GxsQXvHY5XMo@4%':
user_msg = request.POST['msg']
# # this key is used to `set` and `get`
# # your trained model from the cache
# model_cache_key = 'model_cache'
# # get model from cache
# model = cache.get(model_cache_key)
# if model is None:
# # your model isn't in the cache
# # so `set` it
# # load model
# f = open(settings.BASE_DIR+'/assistant/modules/classifier.pickle', 'rb')
# model = cPickle.load(f)
# f.close()
# # save in the cache
# # None is the timeout parameter. It means cache forever
# cache.set(model_cache_key, model, None)
chatbot = Chatbot(settings.BASE_DIR+'/assistant/modules')
reply , sentiment = chatbot.generate_reply(user_msg)
return JsonResponse({'Reply': reply , 'sentiment' : sentiment ,'status' : 'success'})
else:
return JsonResponse({'Error': "Please specify the msg of user and your auth key" ,'status' : 'fail'})
@csrf_exempt
def ocr(request):
if request.method == 'POST' and 'img' in request.POST and 'lang' in request.POST and 'auth' in request.POST and request.POST['auth'] == 'GxsQXvHY5XMo@4%':
img_url = request.POST['img']
lang = request.POST['lang']
urlParser = urlparse(img_url)
fileName = os.path.basename(urlParser.path)
# Download the file from `url` and save it locally under `file_name`:
with urllib.request.urlopen(img_url) as response, open(settings.BASE_DIR+'/media/uploads/'+fileName, 'wb+') as out_file:
shutil.copyfileobj(response, out_file)
recognizer = TextRecognizer(settings.BASE_DIR+'/assistant/modules')
results = recognizer.recognize(settings.BASE_DIR+'/media/uploads/'+fileName,lang = lang)
# loop over the results
texts = []
for ((startX, startY, endX, endY), text) in results:
texts.append(text)
return JsonResponse({'Text': texts ,'status' : 'success'})
else:
return JsonResponse({'Error': "Please specify the img_url of the image with the , lang parameter and your auth key" ,'status' : 'fail'})
pass
|
{"/assistant/views.py": ["/assistant/modules/FaceRecognizer.py", "/assistant/modules/Chatbot.py"], "/assistant/modules/Chatbot.py": ["/assistant/modules/EmotionAnalyser.py"]}
|
39,254,964
|
Muhammad-Elgendi/Athena
|
refs/heads/production
|
/assistant/modules/EmotionAnalyser.py
|
# -*- coding: utf-8 -*-
import nltk.classify.util
from nltk.classify import NaiveBayesClassifier
import nltk
from nltk.corpus import stopwords
from nltk import ngrams
from nltk.tokenize import TweetTokenizer
import csv
import _pickle as cPickle
class EmotionAnalyser:
__instance = None
@staticmethod
def getInstance(base_path):
""" Static access method. """
if EmotionAnalyser.__instance == None:
EmotionAnalyser(base_path)
return EmotionAnalyser.__instance
def __init__(self,base_path):
if EmotionAnalyser.__instance != None:
raise Exception("This class is a singleton!")
else:
EmotionAnalyser.__instance = self
self.base_path = base_path
self.stoplist = set(stopwords.words("english"))
self.punctuation = ['.',',','\'','\"',':',';','...','-','–','—','(',')','[',']','«','»']
self.tokenizer = TweetTokenizer(strip_handles=True, reduce_len=True)
pass
def extract_features(self,statement):
word_list = [ word for word in self.tokenizer.tokenize(statement) if word not in self.stoplist and word not in self.punctuation]
# word_list =statement.split()
# ngram_vocab = ngrams(word_list, 3)
return dict([(word,True) for word in word_list])
def train(self):
# load train data from csv file
csv_file = open(self.base_path+'/text_emotion.csv')
csv_reader = csv.reader(csv_file, delimiter=',')
trainDataset = {}
for index, row in enumerate(csv_reader):
if index != 0:
if row[0] not in trainDataset:
trainDataset[row[0]] = []
trainDataset[row[0]].append(row[1])
else :
trainDataset[row[0]].append(row[1])
# separate train data set into classes of emotion
# Split the dataset into training and testing datasets (80/20)
# build the train features
# build the test features
features = {}
thresholds = {}
spilt_factor = 0.8
features_train = []
features_test = []
for emotion in trainDataset:
features[emotion] = [(self.extract_features(statement), emotion) for statement in trainDataset[emotion]]
thresholds[emotion] = int(spilt_factor * len(features[emotion]))
features_train.extend( features[emotion][:thresholds[emotion]] )
features_test.extend( features[emotion][thresholds[emotion]:] )
if __name__ == "__main__":
print ("Number of training records:", len(features_train))
print ("Number of test records:", len(features_test))
# joblib.dump(features_train, "classifier.sav")
# use a Naive Bayes classifier and train it
classifier = NaiveBayesClassifier.train(features_train)
if __name__ == "__main__":
print ("Accuracy of the classifier:", nltk.classify.util.accuracy(classifier, features_test))
# informative = classifier.most_informative_features(1000)
# print(informative)
# # dump classifier into a file
f = open(self.base_path+'/classifier.pickle', 'wb')
cPickle.dump(classifier, f)
f.close()
# joblib.dump(classifier, "classifier.save")
def classify(self,statement,classifier =None):
if classifier == None :
f = open(self.base_path+'/classifier.pickle', 'rb')
classifier = cPickle.load(f)
f.close()
# classifier = joblib.load("classifier.save")
probdist = classifier.prob_classify(self.extract_features(statement))
predected_sentiment = probdist.max()
probability = round(probdist.prob(predected_sentiment), 2)
return predected_sentiment, probability
if __name__ == "__main__":
# analyser = EmotionAnalyser.getInstance(".")
# # analyser.train()
# sentiment , confidence = analyser.classify("I like being with you")
# print(sentiment , "with confidence :" , confidence)
pass
|
{"/assistant/views.py": ["/assistant/modules/FaceRecognizer.py", "/assistant/modules/Chatbot.py"], "/assistant/modules/Chatbot.py": ["/assistant/modules/EmotionAnalyser.py"]}
|
39,254,965
|
Muhammad-Elgendi/Athena
|
refs/heads/production
|
/assistant/urls.py
|
from django.urls import path
from . import views
from django.conf import settings
from django.conf.urls.static import static
app_name = 'assistant'
urlpatterns = [
# ex: /assistant/
path('', views.index, name='index'),
# ex: /assistant/5/
# path('<int:question_id>/', views.detail, name='detail'),
# # ex: /assistant/5/results/
# path('<int:question_id>/results/', views.results, name='results'),
# # ex: /assistant/5/vote/
# path('<int:question_id>/vote/', views.vote, name='vote'),
path('add',views.add_person, name='add_person'),
path('recognize',views.recognize, name='recognize'),
path('chat',views.chat, name='chat'),
path('ocr',views.ocr, name='ocr'),
]
|
{"/assistant/views.py": ["/assistant/modules/FaceRecognizer.py", "/assistant/modules/Chatbot.py"], "/assistant/modules/Chatbot.py": ["/assistant/modules/EmotionAnalyser.py"]}
|
39,254,966
|
Muhammad-Elgendi/Athena
|
refs/heads/production
|
/assistant/modules/Chatbot.py
|
import nltk
from nltk.corpus import stopwords
import numpy as np
import random
import string
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from .EmotionAnalyser import EmotionAnalyser
# first-time use only
# nltk.download('punkt')
# nltk.download('wordnet')
# nltk.download('stopwords')
class Chatbot:
def __init__(self,path):
self.path = path
# reading and preprocessing
corpusFile=open(self.path+'/Corpus.txt','r', encoding='utf-8')
corpus = corpusFile.read()
# converts to lowercase
self.corpus = corpus.lower()
# converts to list of sentences
self.sent_tokens = nltk.sent_tokenize(corpus)
# lemmatization
self.lemmer = nltk.stem.WordNetLemmatizer()
# remove punctuation dictionary
self.remove_punct_dict = dict((ord(punct), None) for punct in string.punctuation)
# lemmatize the tokens
def LemTokens(self,tokens):
return [self.lemmer.lemmatize(token) for token in tokens]
# remove stop words and punctuations
def LemNormalize(self,text):
stopEnglish = set(stopwords.words('english'))
return self.LemTokens([token for token in nltk.word_tokenize(text.lower().translate(self.remove_punct_dict)) if token not in stopEnglish])
# Handle of greetings
def greeting(self,sentence):
GREETING_INPUTS = ("welcome","hello", "hi", "greetings", "sup", "what's up","hey",)
GREETING_RESPONSES = ["Welcome","Hi", "Hey", "*nods*", "Hi there", "Hello", "I am glad! You are talking to me"]
for word in sentence.split():
if word.lower() in GREETING_INPUTS:
return random.choice(GREETING_RESPONSES)
# Handle of emotion
def handleEmotion(self,user_response,classfier = None):
analyser = EmotionAnalyser.getInstance(self.path)
sentiment , confidence = analyser.classify(user_response,classfier)
possitive = ['enthusiasm','fun','happiness','love','surprise','relief']
negative = ['anger','boredom','hate','sadness','worry']
nutral = ['empty','neutral']
if sentiment in nutral:
return "I am sorry! I don't understand you" , sentiment
if sentiment in possitive:
return random.choice(["I am happy for your "+sentiment,"Keep up your good feelings :)","Hooray!","Good for you","I am happy for you"]) , sentiment
if sentiment in negative:
return random.choice(["You aren't alone","Cheer up","I am sad for your "+sentiment,"It's ganna be okay","Sorry to hear that :(","It will be alright","It's bad for you to feel "+sentiment]) , sentiment
# Get the response from corpus to the user's questions
def response(self,user_response):
# add user_response to sent_tokens
self.sent_tokens.append(user_response)
# Apply TF-IDF
TfidfVec = TfidfVectorizer(tokenizer=self.LemNormalize)
tfidf = TfidfVec.fit_transform(self.sent_tokens)
# Apply cosine similarity to user_response and the corpus
vals = cosine_similarity(tfidf[-1], tfidf)
idx=vals.argsort()[0][-2]
flat = vals.flatten()
flat.sort()
req_tfidf = flat[-2]
if(req_tfidf==0):
return "I am sorry! I don't understand you"
else:
return self.sent_tokens[idx]
# build up the conversation along with sentiment
def generate_reply(self,user_response,classfier = None):
user_response=user_response.lower()
if(user_response!='bye'):
if(user_response=='thanks' or user_response=='thank you' ):
return "You are welcome.." , "relief"
else:
greeting = self.greeting(user_response)
if(greeting != None):
return greeting , "happiness"
else:
result = self.response(user_response)
self.sent_tokens.remove(user_response)
if(result == "I am sorry! I don't understand you"):
return self.handleEmotion(user_response,classfier)
else :
return result , "neutral"
else:
return "Bye! take care.." , "relief"
|
{"/assistant/views.py": ["/assistant/modules/FaceRecognizer.py", "/assistant/modules/Chatbot.py"], "/assistant/modules/Chatbot.py": ["/assistant/modules/EmotionAnalyser.py"]}
|
39,254,967
|
Muhammad-Elgendi/Athena
|
refs/heads/production
|
/assistant/modules/EmotionAnalyser_scikit_learn.py
|
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.model_selection import train_test_split
# class EmotionAnalyser:
# pass
count_vect = CountVectorizer()
|
{"/assistant/views.py": ["/assistant/modules/FaceRecognizer.py", "/assistant/modules/Chatbot.py"], "/assistant/modules/Chatbot.py": ["/assistant/modules/EmotionAnalyser.py"]}
|
39,254,968
|
Muhammad-Elgendi/Athena
|
refs/heads/production
|
/assistant/modules/FaceRecognizer.py
|
import cv2 as cv2
import numpy as np
import glob
from os import listdir
from os.path import isfile, join,split
class FaceRecognizer():
def __init__(self,base_path):
#create empth face list
self.faceSamples = []
#create empty ID list
self.ids = []
#create empty Persons list
self.persons = {}
self.detector = cv2.CascadeClassifier(base_path+'/haarcascade_frontalface_default.xml')
self.recognizer = cv2.face.LBPHFaceRecognizer_create()
self.base_path = base_path
def get_faces(self,img):
faces = self.detector.detectMultiScale(img, 1.3, 5)
objlist =[]
for (x,y,w,h) in faces:
face = img[y:y+h, x:x+w]
objlist.append(face)
return objlist
def getImagesAndLabels(self,path):
#get the path of all the files in the folder
imagePaths = [join(path,f) for f in listdir(path)]
#now looping through all the image paths and loading the Ids and the images
for imagePath in imagePaths:
#loading the image and converting it to gray scale
image = cv2.imread(imagePath,cv2.IMREAD_GRAYSCALE)
#getting the Id from the image
Id = int(split(imagePath)[-1].split(".")[1])
#get the name from the image
self.persons[Id] = split(imagePath)[-1].split(".")[0]
# extract the face from the training image sample
faces = self.detector.detectMultiScale(image,1.3, 5)
#If a face is there then append that in the list as well as Id of it
for (x,y,w,h) in faces:
self.faceSamples.append(image[y:y+h,x:x+w])
self.ids.append(Id)
return self.faceSamples,self.ids,self.persons
def setPersonsIds(self,path):
#get the path of all the files in the folder
imagePaths = [join(path,f) for f in listdir(path)]
#now looping through all the image paths and loading the Ids and the images
for imagePath in imagePaths:
#getting the Id from the image
Id = int(split(imagePath)[-1].split(".")[1])
#get the name from the image
self.persons[Id] = split(imagePath)[-1].split(".")[0]
return self.persons
def train_and_save(self,path):
self.getImagesAndLabels(path)
self.recognizer.train(self.faceSamples, np.array(self.ids))
self.recognizer.write(self.base_path+"/trained_recognizer.xml")
def recognize_faces(self,img_path,dataset_path):
self.recognizer.read(self.base_path+"/trained_recognizer.xml")
Img = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE)
faces = self.get_faces(Img)
self.setPersonsIds(dataset_path)
counter = 0
output = []
for face in faces:
results = self.recognizer.predict(face)
counter += 1
output.append([counter,self.persons[results[0]],results[1]])
return output
|
{"/assistant/views.py": ["/assistant/modules/FaceRecognizer.py", "/assistant/modules/Chatbot.py"], "/assistant/modules/Chatbot.py": ["/assistant/modules/EmotionAnalyser.py"]}
|
39,265,170
|
LeResKP/pyramid_sqladmin
|
refs/heads/master
|
/setup.py
|
from setuptools import setup, find_packages
import sys, os
# Hack to prevent TypeError: 'NoneType' object is not callable error
# on exit of python setup.py test
try:
import multiprocessing
except ImportError:
pass
version = '0.1'
setup(name='pyramid_sqladmin/',
version=version,
description="Simple way to edit your SQLAlchemy objects in pyramid",
long_description="""\
""",
classifiers=[], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
keywords='',
author='Aur\xc3\xa9lien Matouillot',
author_email='a.matouillot@gmail.com',
url='https://github.com/LeResKP/pyramid_sqladmin',
license='MIT',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
include_package_data=True,
zip_safe=False,
install_requires=[
'sqla_declarative',
'pyramid_mako',
'pyramid',
'SQLAlchemy',
'zope.sqlalchemy',
'tw2.core',
'tw2.sqla',
'mako',
],
test_suite = 'nose.collector',
tests_require=[
'nose',
'WebTest',
'FormEncode',
'BeautifulSoup',
'strainer',
'sieve',
],
entry_points="""
# -*- Entry points: -*-
""",
)
|
{"/tests/test_init.py": ["/pyramid_sqladmin/__init__.py"]}
|
39,292,865
|
juliarobles/graffitisWeb
|
refs/heads/master
|
/graffitiApp/views.py
|
from django.shortcuts import render
from rest_framework import serializers
from rest_framework_mongoengine import viewsets
from .serializers import PublicacionSerializer, UsuarioSerializer, GraffitiSerializer
from .models import Publicacion, Usuario, Graffiti
from django.http import HttpResponse
from bson import ObjectId
# Create your views here.
def index(request):
return HttpResponse('Success')
class PublicacionViewSet(viewsets.ModelViewSet):
lookup_field = 'id'
serializer_class = PublicacionSerializer
def get_queryset(self):
return Publicacion.objects.all()
class UsuarioViewSet(viewsets.ModelViewSet):
lookup_field = 'id'
serializer_class = UsuarioSerializer
def get_queryset(self):
return Usuario.objects.all()
class GraffitiViewSet(viewsets.ModelViewSet):
lookup_field = 'id'
serializer_class = GraffitiSerializer
def get_queryset(self):
return Graffiti.objects.all()
def base_view(request):
return render(request, 'graffiti_list.html')
def list_publicaciones_views(request):
publicaciones = Publicacion.objects.all()
context = {
"publicaciones": publicaciones
}
return render(request, 'publicaciones_list.html', context=context)
def publicaciones_detail_view(request, pk):
pk = ObjectId(pk)
publicacion = Publicacion.objects.get(pk=pk)
context = {
"publicacion": publicacion,
"meGusta": len(publicacion.meGusta)
}
return render(request, 'publicacion_detail.html', context=context)
|
{"/graffitiApp/admin.py": ["/graffitiApp/models.py"], "/graffitiApp/Apiviews/ComentarioAPIView.py": ["/graffitiApp/models.py", "/graffitiApp/Apiviews/PublicacionAPIView.py", "/graffitiApp/serializers.py"], "/graffitiApp/urls.py": ["/graffitiApp/views.py", "/graffitiApp/Apiviews/UserAPIView.py", "/graffitiApp/Apiviews/PublicacionAPIView.py", "/graffitiApp/Apiviews/GraffitiAPIView.py", "/graffitiApp/Apiviews/ComentarioAPIView.py"], "/ayuntamientoApp/urls.py": ["/ayuntamientoApp/apiviews.py"], "/graffitiApp/serializers.py": ["/graffitiApp/models.py"], "/clienteApp/urls.py": ["/clienteApp/views/views.py", "/clienteApp/views/loginViews.py"], "/graffitiApp/views.py": ["/graffitiApp/serializers.py", "/graffitiApp/models.py"], "/graffitiApp/Apiviews/PublicacionAPIView.py": ["/graffitiApp/models.py", "/graffitiApp/serializers.py", "/graffitiApp/Apiviews/UserAPIView.py"], "/graffitiApp/Apiviews/UserAPIView.py": ["/graffitiApp/models.py", "/graffitiApp/serializers.py"], "/graffitiApp/Apiviews/GraffitiAPIView.py": ["/graffitiApp/models.py", "/graffitiApp/Apiviews/PublicacionAPIView.py", "/graffitiApp/serializers.py"]}
|
39,297,862
|
roblee357/Clarios_mold_OCR
|
refs/heads/master
|
/pil_scratch.py
|
from PIL import Image
import PIL.ImageTk
capture_image = PIL.Image.open('Android_Web\\flask\\examples\\tutorial\\flaskr\\images\\2021_03_26___13_06_45.png')
|
{"/gui_web.py": ["/capture.py", "/preprocess.py", "/OCR.py", "/win_loc.py", "/badges.py", "/Live_phone.py"], "/OCR.py": ["/preprocess.py"], "/mold_No_button.py": ["/win_loc.py"], "/gui.py": ["/capture.py", "/preprocess.py", "/OCR.py"], "/zmqimage_server_test.py": ["/zmqimage.py"], "/preprocess.py": ["/capture.py"], "/zmqimage_client_test.py": ["/zmqimage.py"], "/gui2.py": ["/capture.py", "/preprocess.py", "/OCR.py"]}
|
39,297,863
|
roblee357/Clarios_mold_OCR
|
refs/heads/master
|
/OCR.py
|
import numpy as np
import cv2
import string
import sys
import os
import pytesseract
from pytesseract import Output
from threading import Thread, Event
from time import sleep
import struct
# import redis
import time
import preprocess
class OCR():
def __init__(self, threshold=None):
self.DD = self.Del()
class Del:
def __init__(self, keep=string.digits):
self.comp = dict((ord(c), c) for c in keep)
def __getitem__(self, k):
return self.comp.get(k)
def run(self, txt):
# Redis connection
# r = redis.Redis(host='localhost', port=6379, db=0)
self.txt = txt
key = 0
start = time.time()
while key != 27:
# img = self.fromRedis(r, 'processed_image')
# img = cv2.imread('processed_image.jpg')
img = self.pro_img.img
# print(img.shape)
try:
found_text = pytesseract.image_to_string(img, config='--psm 7')
except:
found_text = "None"
found_digits = found_text.translate(self.DD)
end = time.time()
print(found_digits, 'elapsed time:', end - start)
self.txt = found_digits
with open('textout.txt', 'w') as fout:
fout.write(found_digits)
start = time.time()
try:
cv2.imwrite('OCR_image.jpg',img)
except Exception as e:
print(e)
def start(self, pro_img):
self.pro_img = pro_img
# processedimage = preprocess.process(capture.img)
self.txt = None
t = Thread(target=self.run, args=(self.txt, ))
t.start()
def main():
ocr = OCR()
ocr.run()
if __name__ == '__main__':
main()
|
{"/gui_web.py": ["/capture.py", "/preprocess.py", "/OCR.py", "/win_loc.py", "/badges.py", "/Live_phone.py"], "/OCR.py": ["/preprocess.py"], "/mold_No_button.py": ["/win_loc.py"], "/gui.py": ["/capture.py", "/preprocess.py", "/OCR.py"], "/zmqimage_server_test.py": ["/zmqimage.py"], "/preprocess.py": ["/capture.py"], "/zmqimage_client_test.py": ["/zmqimage.py"], "/gui2.py": ["/capture.py", "/preprocess.py", "/OCR.py"]}
|
39,297,864
|
roblee357/Clarios_mold_OCR
|
refs/heads/master
|
/mold_No_button.py
|
#! python3
import pyautogui, sys, keyboard, time, json, os, win32clipboard
from tkinter import *
import win_loc
global wormhole_active, CBdata, prev_x, prev_y, config
keyboard.is_pressed('F8')
configPath = 'config.json'
configExists = os.path.exists(configPath)
if keyboard.is_pressed('F8') or not configExists:
input("Move cursor to top left corner of region to be blocked and press enter.")
topLeft = pyautogui.position()
print(topLeft)
input("Move cursor to lower right corner of region to be blocked and press enter.")
bottomRight = pyautogui.position()
print(bottomRight)
right_pos = topLeft[0]
down_pos = topLeft[1]
win_width = bottomRight[0] - topLeft[0]
win_height = bottomRight[1] - topLeft[1]
config = { "right_pos" : right_pos,"down_pos" : down_pos, "win_width" : win_width, "win_height" : win_height}
with open(configPath,'w') as fout:
json.dump(config,fout)
else:
with open(configPath,'r') as fin:
config = json.load(fin)
bs_pos = win_loc.main()
right_pos = bs_pos[0] + bs_pos[2] - config["win_width"] -20
down_pos = bs_pos[1] + 265
win_width = config["win_width"]
win_height = config["win_height"]
prev_x, prev_y = 0,0
win_width_s = str(win_width) + 'x'
win_height_s = str(win_height) + '+'
right_pos_s = str(right_pos) + '+'
down_pos_s = str(down_pos)
wormhole_active = True
def openNewWindow():
global wormhole_active
wormhole_active = not wormhole_active
print('clicked')
def getCBdata():
try:
win32clipboard.OpenClipboard()
data = win32clipboard.GetClipboardData(win32clipboard.CF_TEXT)
win32clipboard.CloseClipboard()
except:
data = ""
return data
CBdata = getCBdata()
def enter_field(moldNo):
curPos = pyautogui.position()
right_pos = config["right_pos"]
down_pos = config["down_pos"]
win_width = config["win_width"]
win_height = config["win_height"]
root.withdraw()
windowCenterX = right_pos + win_width / 2
windowCenterY = down_pos + win_height / 2
pyautogui.moveTo(windowCenterX, windowCenterY)
pyautogui.click()
try:
moldNo = moldNo.decode("utf-8")
except:
moldNo = moldNo
print(moldNo, right_pos)
pyautogui.write(moldNo,.02)
pyautogui.press('tab')
time.sleep(1)
pyautogui.moveTo(curPos[0], curPos[1])
root.update()
root.deiconify()
root = Tk()
root.configure(bg='blue')
root.overrideredirect(1)
root.geometry(win_width_s + win_height_s + right_pos_s + down_pos_s)
root.attributes('-alpha', 0.3)
root.lift()
root.attributes('-topmost', True)
btn = Button(root,
text ="", # Hardest button to button
command = openNewWindow,
background='blue')
btn.pack(fill=BOTH, expand=1)
def task():
global wormhole_active, prev_x, prev_y, CBdata
x, y = pyautogui.position()
positionStr = 'X: ' + str(x).rjust(4) + ' Y: ' + str(y).rjust(4) + '\n'
#### Uncomment to view position
## print(positionStr, end='')
xcheck = x >= right_pos and x <= right_pos + win_width
ycheck = y >= down_pos and y <= down_pos + win_height
prev_x, prev_y = x, y
root.after(1, task)
root.after(1, task)
root.mainloop()
|
{"/gui_web.py": ["/capture.py", "/preprocess.py", "/OCR.py", "/win_loc.py", "/badges.py", "/Live_phone.py"], "/OCR.py": ["/preprocess.py"], "/mold_No_button.py": ["/win_loc.py"], "/gui.py": ["/capture.py", "/preprocess.py", "/OCR.py"], "/zmqimage_server_test.py": ["/zmqimage.py"], "/preprocess.py": ["/capture.py"], "/zmqimage_client_test.py": ["/zmqimage.py"], "/gui2.py": ["/capture.py", "/preprocess.py", "/OCR.py"]}
|
39,297,865
|
roblee357/Clarios_mold_OCR
|
refs/heads/master
|
/key_poller.py
|
from pynput.keyboard import Key, Listener
import time
from threading import Thread
class Key_listener():
def __init__(self):
self.queue = [2,1,1,1,1,1,1,1]
self.itime = time.time()
self.scanned = False
start_thread = Thread(target=self.start)
start_thread.start()
def on_press(self,key):
try:
if key.char in '0123456789':
# print('key char',key.char)
self.queue.append(time.time() - self.itime)
self.queue.pop(0)
# print(self.queue)
self.itime = time.time()
if self.queue[1] < .005:
print("scan detected")
self.queue = [2,1,1,1,1,1,1,1]
self.scanned = True
except:
pass
def start(self):
with Listener(on_press=self.on_press) as listener:
listener.join()
def reset(self):
self.scanned = False
def main():
kl = Key_listener()
# kl.start()
while True:
print(kl.scanned)
if kl.scanned:
kl.reset()
time.sleep(1)
if __name__ == '__main__':
main()
|
{"/gui_web.py": ["/capture.py", "/preprocess.py", "/OCR.py", "/win_loc.py", "/badges.py", "/Live_phone.py"], "/OCR.py": ["/preprocess.py"], "/mold_No_button.py": ["/win_loc.py"], "/gui.py": ["/capture.py", "/preprocess.py", "/OCR.py"], "/zmqimage_server_test.py": ["/zmqimage.py"], "/preprocess.py": ["/capture.py"], "/zmqimage_client_test.py": ["/zmqimage.py"], "/gui2.py": ["/capture.py", "/preprocess.py", "/OCR.py"]}
|
39,297,866
|
roblee357/Clarios_mold_OCR
|
refs/heads/master
|
/win_loc.py
|
import win32gui
def callback(hwnd, pos):
rect = win32gui.GetWindowRect(hwnd)
x = rect[0]
y = rect[1]
w = rect[2] - x
h = rect[3] - y
if 'PSMII Bluesheet' in win32gui.GetWindowText(hwnd):
# print("Window %s:" % win32gui.GetWindowText(hwnd))
# print("\tLocation: (%d, %d)" % (x, y))
# print("\t Size: (%d, %d)" % (w, h))
pos.append(x)
pos.append(y)
pos.append(w)
pos.append(h)
def main():
pos = []
win32gui.EnumWindows(callback, pos)
# print(pos)
return pos
if __name__ == '__main__':
main()
|
{"/gui_web.py": ["/capture.py", "/preprocess.py", "/OCR.py", "/win_loc.py", "/badges.py", "/Live_phone.py"], "/OCR.py": ["/preprocess.py"], "/mold_No_button.py": ["/win_loc.py"], "/gui.py": ["/capture.py", "/preprocess.py", "/OCR.py"], "/zmqimage_server_test.py": ["/zmqimage.py"], "/preprocess.py": ["/capture.py"], "/zmqimage_client_test.py": ["/zmqimage.py"], "/gui2.py": ["/capture.py", "/preprocess.py", "/OCR.py"]}
|
39,297,867
|
roblee357/Clarios_mold_OCR
|
refs/heads/master
|
/gui.py
|
from tkinter import *
from PIL import Image
from PIL import ImageTk
import cv2, time
import numpy as np
from datetime import datetime
import subprocess, os, sys
from multiprocessing import Process
import capture, preprocess, OCR
root = Tk()
panelA = None
panelB = None
image = Image.open('redis_not_started.jpg')
ROI = Image.open('redis_not_started.jpg')
def submitCallBack():
# img = fromRedis(r,'processed_image')
img = cv2.imread('processed_image.jpg')
# img = panelB.image
text = moldNo.cget("text")
now = datetime.now()
ts = now.strftime("%Y_%m_%d___%H_%M_%S")
fname = ts + '__mold-' + text + '.png'
# img.write('log/' + fname)
cv2.imwrite( 'log/' + fname,root.proc.img)
print(fname)
def show_log():
path = r'C:\python34\Lib'
sys.path.append(path)
cwd = os.getcwd()
path = os.path.join(cwd,'log')
FILEBROWSER_PATH = os.path.join(os.getenv('WINDIR'), 'explorer.exe')
subprocess.run([FILEBROWSER_PATH, path])
# convert the images to PIL format...
image = ImageTk.PhotoImage(image=image)
ROI = ImageTk.PhotoImage(image=ROI)
# the first panel will store our original image
panelA = Label(image= image)
panelA.image = image
panelA.pack(side="left", padx=10, pady=10)
# while the second panel will store the edge map
ROI_frame = Frame(root)
ROI_frame.pack(side="right", padx=10, pady=10)
panelB = Label(ROI_frame, image= ROI)
panelB.image = ROI
panelB.pack()
moldNo = Label(ROI_frame, text= 'Hello there')
moldNo.pack()
submit_button = Button ( ROI_frame, text="Submit", command = submitCallBack )
submit_button.pack(fill=X)
root.title("Mold number check")
menubar = Menu(root)
root.config(menu=menubar)
fileMenu = Menu(menubar)
fileMenu.add_command(label="Show Log", command=show_log)
# fileMenu.add_command(label="Start Redis Server", command=start_redis)
menubar.add_cascade(label="File", menu=fileMenu)
def update_images():
# image = fromRedis(r,'rect')
# ROI = fromRedis(r,'processed_image')
image = root.proc.rect #cap.img #cv2.imread('raw.jpg')
ROI = root.proc.img #cv2.imread('processed_image.jpg')
# print(image.shape,image[1][1])
with open('textout.txt','r') as fin:
found_digits = fin.readline()
moldNo.configure(text = found_digits)
# convert the images to PIL format...
image = ImageTk.PhotoImage(Image.fromarray(image))
ROI = ImageTk.PhotoImage(Image.fromarray(ROI))
# the first panel will store our original image
# panelA = Label(image=image)
# panelA.image = image
panelA.configure(image=image)
panelA.image = image
# while the second panel will store the edge map
panelB.configure(image=ROI)
panelB.image = ROI
root.after(100, update_images)
root.after(1, update_images)
def startPreprocess():
prepro = preprocess.Preprocess()
prepro.run()
def startCapture():
cap = capture.Capture(1)
cap.run()
def startOCR():
ocr = OCR.OCR()
ocr.run()
def startGUI():
cap = capture.Capture(0)
print('before run')
cap.start()
t = time.time()
while cap.img is None:
print('waiting for camera to connect',time.time()-t)
time.sleep(.25)
proc = preprocess.Processor()
proc.start(cap)
while proc.img is None:
print('waiting for processed image',time.time()-t)
time.sleep(.25)
ocr = OCR.OCR()
ocr.start(proc)
while ocr.txt is None:
print('waiting for OCR',time.time()-t)
time.sleep(.25)
root.cap = cap
root.proc = proc
root.mainloop()
def main():
# print('starting Capture')
# Process(target=startCapture).start()
# print('starting Preprocess')
# Process(target=startPreprocess).start()
# print('starting OCR')
# Process(target=startOCR).start()
print('starting GUI')
Process(target=startGUI).start()
print('gui started')
if __name__ == "__main__":
main()
|
{"/gui_web.py": ["/capture.py", "/preprocess.py", "/OCR.py", "/win_loc.py", "/badges.py", "/Live_phone.py"], "/OCR.py": ["/preprocess.py"], "/mold_No_button.py": ["/win_loc.py"], "/gui.py": ["/capture.py", "/preprocess.py", "/OCR.py"], "/zmqimage_server_test.py": ["/zmqimage.py"], "/preprocess.py": ["/capture.py"], "/zmqimage_client_test.py": ["/zmqimage.py"], "/gui2.py": ["/capture.py", "/preprocess.py", "/OCR.py"]}
|
39,297,868
|
roblee357/Clarios_mold_OCR
|
refs/heads/master
|
/zmqimage_server_test.py
|
import zmqimage, cv2
zmq = zmqimage.zmqImageShowServer(open_port="tcp://*:5555")
print("Starting zmqImageShow Server...")
print(" press Ctrl-C to stop")
i = 0
while True: # Until Ctrl-C is pressed, will repeatedly
arrayname, image = zmq.getArray()
cv2.imshow(arrayname, image)
cv2.waitKey(1)
print(i)
i += 1
|
{"/gui_web.py": ["/capture.py", "/preprocess.py", "/OCR.py", "/win_loc.py", "/badges.py", "/Live_phone.py"], "/OCR.py": ["/preprocess.py"], "/mold_No_button.py": ["/win_loc.py"], "/gui.py": ["/capture.py", "/preprocess.py", "/OCR.py"], "/zmqimage_server_test.py": ["/zmqimage.py"], "/preprocess.py": ["/capture.py"], "/zmqimage_client_test.py": ["/zmqimage.py"], "/gui2.py": ["/capture.py", "/preprocess.py", "/OCR.py"]}
|
39,297,869
|
roblee357/Clarios_mold_OCR
|
refs/heads/master
|
/capture.py
|
import cv2, time, json
import struct
##import redis
import numpy as np
import ctypes
import argparse
from random import randrange
import pickle as pkl
# from multiprocessing import Process, Value, Array
from threading import Thread, Event
def parseCmdLineArgs ():
# parse the command line
parser = argparse.ArgumentParser ()
# add optional argument to turn off the broker
parser.add_argument ("-v","--verbosity",default='', help="Be Verbose")
# parse the args
args = parser.parse_args ()
return args
class Capture():
def __init__(self, camera):
self.camera = camera
self.img = None
print('starting Capture')
# multiprocessing.Process(target=self.run).start()
# def toRedis(self,r,a,n):
# """Store given Numpy array 'a' in Redis under key 'n'"""
# h, w = a.shape[:2]
# shape = struct.pack('>II',h,w)
# encoded = shape + a.tobytes()
# # Store encoded data in Redis
# r.set(n,encoded)
# return
# def toZMQ(self,a, arrayname="NoName",flags=0, copy=True, track=False):
# # self.zmq.imshow("Raw capture", a)
# md = dict(
# arrayname = arrayname,
# dtype = str(a.dtype),
# shape = a.shape,
# data = a.tolist()
# )
# print( 'sending', md['dtype'])
# self.socket.send_string("raw %s" % (json.dumps(md)))
# # self.socket.send_string('hello') #_json(md, 0) #flags|zmq.SNDMORE)
# # return self.socket.send(a, flags, copy=copy, track=track)
def run(self,img):
# Redis connection
self.img = img
## r = redis.Redis(host='localhost', port=6379, db=0)
cam = cv2.VideoCapture(self.camera)
key = 0
while key != 27:
try:
ret, self.img = cam.read()
self.img = cv2.cvtColor(self.img,cv2.COLOR_BGR2RGB)
except Exception as e:
cam = cv2.VideoCapture(self.camera)
print('reconnecting 2',e)
def start(self):
self.img = None
t = Thread(target=self.run, args=(self.img, ))
t.start()
def main():
args = parseCmdLineArgs ()
cap = Capture(0)
print('before run')
cap.start()
t = time.time()
while cap.img is None:
print('waiting for camera to connect',time.time()-t)
time.sleep(.25)
# t = time.time()
while True:
cv2.imshow('this',cap.img)
cv2.waitKey(1)
# print('Running',time.time()-t)
# t = time.time()
if __name__ == '__main__':
main()
|
{"/gui_web.py": ["/capture.py", "/preprocess.py", "/OCR.py", "/win_loc.py", "/badges.py", "/Live_phone.py"], "/OCR.py": ["/preprocess.py"], "/mold_No_button.py": ["/win_loc.py"], "/gui.py": ["/capture.py", "/preprocess.py", "/OCR.py"], "/zmqimage_server_test.py": ["/zmqimage.py"], "/preprocess.py": ["/capture.py"], "/zmqimage_client_test.py": ["/zmqimage.py"], "/gui2.py": ["/capture.py", "/preprocess.py", "/OCR.py"]}
|
39,297,870
|
roblee357/Clarios_mold_OCR
|
refs/heads/master
|
/sub_img.py
|
from ctypes import create_string_buffer
import sys, json, pickle
import zmq
import numpy as np
import time, cv2
# Socket to talk to server
context = zmq.Context()
socket = context.socket(zmq.SUB)
print("Collecting updates from weather server...")
socket.connect("tcp://localhost:5555")
zip_filter = 'raw'
socket.setsockopt_string(zmq.SUBSCRIBE, zip_filter)
t = time.time()
while True:
string = socket.recv_string()
# data = socket.recv()
topiclen = len(string.split(' ')[0]) + 1
topic = string[0:topiclen]
msg = string[topiclen:]
data = json.loads(msg)
# pickle.load(
img = np.array(data['data'], dtype=np.uint8)
print(topic, data['dtype'], img.shape, time.time()-t)
t = time.time()
cv2.imshow('raw',img)
key = cv2.waitKey(1) & 0xFF
|
{"/gui_web.py": ["/capture.py", "/preprocess.py", "/OCR.py", "/win_loc.py", "/badges.py", "/Live_phone.py"], "/OCR.py": ["/preprocess.py"], "/mold_No_button.py": ["/win_loc.py"], "/gui.py": ["/capture.py", "/preprocess.py", "/OCR.py"], "/zmqimage_server_test.py": ["/zmqimage.py"], "/preprocess.py": ["/capture.py"], "/zmqimage_client_test.py": ["/zmqimage.py"], "/gui2.py": ["/capture.py", "/preprocess.py", "/OCR.py"]}
|
39,297,871
|
roblee357/Clarios_mold_OCR
|
refs/heads/master
|
/preprocess.py
|
import numpy as np
import cv2, string, sys, os, json, time
import pytesseract
from pytesseract import Output
from multiprocessing import Process
from time import sleep
import struct
import argparse
import pickle as pkl
from threading import Thread
import capture
configPath = 'config.json'
class Processor():
def __init__(self):
pass
def run(self, img):
# ii = 0
while True:
img = self.capture.img
# print('looping like cra cra', ii)
# ii += 1
try:
with open(configPath,'rb') as fin:
config = json.load(fin)
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = cv2.medianBlur(img,5)
threshold = None
if not threshold is None:
img = eval ('cv2.threshold(img,0,255,' + threshold +' )[1]' ) # cv2.THRESH_BINARY + cv2.THRESH_OTSU
img = cv2.cvtColor(img,cv2.COLOR_GRAY2RGB)
img = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
scale_percent = 100 #config['scale_percent']
img_w = int(img.shape[1] * scale_percent / 100)
img_h = int(img.shape[0] * scale_percent / 100)
h_padding_factor = config['crop_h_padding_factor']
v_padding_factor = config['crop_v_padding_factor']
h_padding = int(h_padding_factor * scale_percent / 100)
v_padding = int(v_padding_factor * scale_percent / 100)
dsize = (img_w, img_h)
img = cv2.resize(img, dsize)
crop_img = img[ v_padding:img_h-v_padding, h_padding:img_w-h_padding]
scale_percent = 100
crop_w = crop_img.shape[1]
crop_h = crop_img.shape[0]
rect_img_w = int(crop_w * scale_percent / 100)
rect_img_h = int(crop_h * scale_percent / 100)
rect_h_padding_factor = config['rect_h_padding_factor']
rect_v_padding_factor = config['rect_v_padding_factor']
rect_h_padding = int(rect_img_w * rect_h_padding_factor / 100)
rect_v_padding = int(rect_img_h * rect_v_padding_factor / 100)
crop_img = cv2.rectangle(crop_img,(rect_h_padding,rect_v_padding),(crop_w-rect_h_padding,crop_h-rect_v_padding),(255,0,0),3)
self.rect = crop_img
cv2.imwrite('rect.jpg',crop_img)
ROI = crop_img[rect_v_padding:crop_h-rect_v_padding,rect_h_padding:crop_w-rect_h_padding]
scale_percent = config['scale_percent']
ROI_w = int(ROI.shape[1] * scale_percent / 100)
ROI_h = int(ROI.shape[0] * scale_percent / 100)
dsize = (ROI_w, ROI_h)
ROI = cv2.resize(ROI, dsize)
self.img = ROI
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print(exc_type, fname, exc_tb.tb_lineno)
print(e)
def start(self,capture):
self.capture = capture
self.img = None
t = Thread(target=self.run, args=(self.img, ))
t.start()
def main():
cap = capture.Capture(0)
print('before run')
cap.start()
t = time.time()
while cap.img is None:
print('waiting for camera to connect',time.time()-t)
time.sleep(.25)
proc = Processor()
proc.start(cap)
while proc.img is None:
print('waiting for processed image',time.time()-t)
time.sleep(.25)
ii = 0
while True:
processed = proc.img
print(processed.shape, ii)
ii += 1
cv2.imshow('this',processed)
cv2.waitKey(1)
if __name__ == '__main__':
main()
|
{"/gui_web.py": ["/capture.py", "/preprocess.py", "/OCR.py", "/win_loc.py", "/badges.py", "/Live_phone.py"], "/OCR.py": ["/preprocess.py"], "/mold_No_button.py": ["/win_loc.py"], "/gui.py": ["/capture.py", "/preprocess.py", "/OCR.py"], "/zmqimage_server_test.py": ["/zmqimage.py"], "/preprocess.py": ["/capture.py"], "/zmqimage_client_test.py": ["/zmqimage.py"], "/gui2.py": ["/capture.py", "/preprocess.py", "/OCR.py"]}
|
39,297,872
|
roblee357/Clarios_mold_OCR
|
refs/heads/master
|
/zmqimage_client_test.py
|
import numpy as np
import cv2
import zmqimage
print("Connecting to zmqShowImage Server ... ")
zmq = zmqimage.zmqConnect(connect_to="tcp://127.0.0.1:5555")
for i in range(255):
image = np.ones((500, 500), dtype="uint8")*i
zmq.imshow("Zero Image 500 x 500", image)
# build a rectangular mask & display it
mask = np.zeros(image.shape[:2], dtype="uint8")
cv2.rectangle(mask, (0, 90), (300, 450), 255, -1)
zmq.imshow("Rectangular Mask", mask)
|
{"/gui_web.py": ["/capture.py", "/preprocess.py", "/OCR.py", "/win_loc.py", "/badges.py", "/Live_phone.py"], "/OCR.py": ["/preprocess.py"], "/mold_No_button.py": ["/win_loc.py"], "/gui.py": ["/capture.py", "/preprocess.py", "/OCR.py"], "/zmqimage_server_test.py": ["/zmqimage.py"], "/preprocess.py": ["/capture.py"], "/zmqimage_client_test.py": ["/zmqimage.py"], "/gui2.py": ["/capture.py", "/preprocess.py", "/OCR.py"]}
|
39,297,873
|
roblee357/Clarios_mold_OCR
|
refs/heads/master
|
/thread_play.py
|
from threading import Thread
from queue import Queue
import time
def thread1(threadname, q):
#read variable "a" modify by thread 2
while True:
a = q.get()
if a is None: return # Poison pill
print(a)
def thread2(threadname, q):
a = 0
for _ in range(10):
a += 1
q.put(a)
time.sleep(1)
q.put(None) # Poison pill
queue = Queue()
thread1 = Thread( target=thread1, args=("Thread-1", queue) )
thread2 = Thread( target=thread2, args=("Thread-2", queue) )
thread1.start()
thread2.start()
print('after starts')
while True:
print('after starts', queue.get())
thread1.join()
thread2.join()
|
{"/gui_web.py": ["/capture.py", "/preprocess.py", "/OCR.py", "/win_loc.py", "/badges.py", "/Live_phone.py"], "/OCR.py": ["/preprocess.py"], "/mold_No_button.py": ["/win_loc.py"], "/gui.py": ["/capture.py", "/preprocess.py", "/OCR.py"], "/zmqimage_server_test.py": ["/zmqimage.py"], "/preprocess.py": ["/capture.py"], "/zmqimage_client_test.py": ["/zmqimage.py"], "/gui2.py": ["/capture.py", "/preprocess.py", "/OCR.py"]}
|
39,297,874
|
roblee357/Clarios_mold_OCR
|
refs/heads/master
|
/Live_phone.py
|
import os
import pathlib
import cv2
from threading import Thread, Event
class Live_phone:
def __init__(self):
self.img = None
def get(self, img):
# self.img = img
i = 0
while True:
try:
path = 'Android_Web\\flask\\examples\\tutorial\\flaskr\\images\\live.png'
self.img = cv2.imread(path)
# os.remove(path)
except Exception as e:
i =+ 1
# print(e , i)
def start_getting(self):
t = Thread(target=self.get, args=(self.img, ))
t.start()
def main():
lv = Live_phone()
lv.start_getting()
while True:
try:
cv2.imshow('this',lv.img)
cv2.waitKey(1)
except:
pass
# lv.live()
if __name__ == '__main__':
main()
|
{"/gui_web.py": ["/capture.py", "/preprocess.py", "/OCR.py", "/win_loc.py", "/badges.py", "/Live_phone.py"], "/OCR.py": ["/preprocess.py"], "/mold_No_button.py": ["/win_loc.py"], "/gui.py": ["/capture.py", "/preprocess.py", "/OCR.py"], "/zmqimage_server_test.py": ["/zmqimage.py"], "/preprocess.py": ["/capture.py"], "/zmqimage_client_test.py": ["/zmqimage.py"], "/gui2.py": ["/capture.py", "/preprocess.py", "/OCR.py"]}
|
39,297,875
|
roblee357/Clarios_mold_OCR
|
refs/heads/master
|
/zmqimage.py
|
# zmqimage.py -- classes to send, receive and display cv2 images via zmq
# based on serialization in pyzmq docs and pyzmq/examples/serialization
'''
PURPOSE:
These classes allow a headless (no display) computer running OpenCV code
to display OpenCV images on another computer with a display.
For example, a headless Raspberry Pi with no display can run OpenCV code
and can display OpenCV images on a Mac with a display.
USAGE:
First, start this "display server" program on the computer with a display:
# imageShowServer.py
import zmqimage
zmq = zmqimage.zmqImageShowServer()
print "Starting zmqImageShow Server..."
print " press Ctrl-C to stop"
while True: # Until Ctrl-C is pressed, will repeatedly
zmq.imshow() # display images sent from the headless computer
Run the above program by:
python imageShowServer.py
Leave the above program running in its own terminal window.
Then, run a program like the one below on the headless computer.
In most cases, it will be run using ssh into the headless computer
from another terminal window on the computer with a display.
The python lines below represent a program fragment as an example.
Use zmq.imshow("Image Display Name", image) instead of
cv2.imshow("Image Display Name", image) and the images will
display on the computer running the program above:
import numpy as np
import cv2
import zmqimage
print "Connecting to zmqShowImage Server ... "
zmq = zmqimage.zmqConnect()
image = np.zeros((500, 500), dtype="uint8")
zmq.imshow("Zero Image 500 x 500", image)
# build a rectangular mask & display it
mask = np.zeros(image.shape[:2], dtype="uint8")
cv2.rectangle(mask, (0, 90), (300, 450), 255, -1)
zmq.imshow("Rectangular Mask", mask)
CAVEATS:
There is no error checking and only Ctrl-C stops the display server.
While zmq.imshow() works like cv2.imshow(), no other
cv2 display functions are implemented.
Uses tcp style sockets; sockets and tcp addresses have
defaults in the classes but may be overridden.
AUTHOR:
Jeff Bass, https://github.com/jeffbass, jeff@yin-yang-ranch.com
'''
import zmq
import numpy as np
import cv2
class SerializingSocket(zmq.Socket):
"""A class with some extra serialization methods
send_array sends numpy arrays with metadata necessary
for reconstructing the array on the other side (dtype,shape).
Also sends array name for display with cv2.show(image).
recv_array receives dict(arrayname,dtype,shape) and an array
and reconstructs the array with the correct shape and array name.
"""
def send_array(self, A, arrayname="NoName",flags=0, copy=True, track=False):
"""send a numpy array with metadata and array name"""
md = dict(
arrayname = arrayname,
dtype = str(A.dtype),
shape = A.shape,
)
self.send_json(md, flags|zmq.SNDMORE)
return self.send(A, flags, copy=copy, track=track)
def recv_array(self, flags=0, copy=True, track=False):
"""recv a numpy array, including arrayname, dtype and shape"""
md = self.recv_json(flags=flags)
msg = self.recv(flags=flags, copy=copy, track=track)
A = np.frombuffer(msg, dtype=md['dtype'])
return (md['arrayname'], A.reshape(md['shape']))
class SerializingContext(zmq.Context):
_socket_class = SerializingSocket
class zmqConnect():
'''A class that opens a zmq REQ socket on the headless computer
'''
def __init__(self, connect_to="tcp://jeff-mac:5555"):
'''initialize zmq socket for sending images to display on remote computer'''
'''connect_to is the tcp address:port of the display computer'''
print('connecting to', connect_to)
self.zmq_context = SerializingContext()
self.zmq_socket = self.zmq_context.socket(zmq.REQ)
self.zmq_socket.connect(connect_to)
def imshow(self, arrayname, array):
'''send image to display on remote server'''
if array.flags['C_CONTIGUOUS']:
# if array is already contiguous in memory just send it
self.zmq_socket.send_array(array, arrayname, copy=False)
else:
# else make it contiguous before sending
array = np.ascontiguousarray(array)
self.zmq_socket.send_array(array, arrayname, copy=False)
message = self.zmq_socket.recv()
class zmqImageShowServer():
'''A class that opens a zmq REP socket on the display computer to receive images
'''
def __init__(self, open_port="tcp://*:5555"):
'''initialize zmq socket on viewing computer that will display images'''
self.zmq_context = SerializingContext()
self.zmq_socket = self.zmq_context.socket(zmq.REP)
self.zmq_socket.bind(open_port)
def imshow(self, copy=False):
'''receive and show image on viewing computer display'''
arrayname, image = self.zmq_socket.recv_array(copy=False)
# print "Received Array Named: ", arrayname
# print "Array size: ", image.shape
cv2.imshow(arrayname, image)
cv2.waitKey(1)
self.zmq_socket.send(b"OK")
def getArray(self, copy=False):
'''receive and show image on viewing computer display'''
arrayname, image = self.zmq_socket.recv_array(copy=False)
self.zmq_socket.send(b"OK")
return arrayname, image
|
{"/gui_web.py": ["/capture.py", "/preprocess.py", "/OCR.py", "/win_loc.py", "/badges.py", "/Live_phone.py"], "/OCR.py": ["/preprocess.py"], "/mold_No_button.py": ["/win_loc.py"], "/gui.py": ["/capture.py", "/preprocess.py", "/OCR.py"], "/zmqimage_server_test.py": ["/zmqimage.py"], "/preprocess.py": ["/capture.py"], "/zmqimage_client_test.py": ["/zmqimage.py"], "/gui2.py": ["/capture.py", "/preprocess.py", "/OCR.py"]}
|
39,297,876
|
roblee357/Clarios_mold_OCR
|
refs/heads/master
|
/gui2.py
|
from io import RawIOBase
import capture, preprocess, OCR
import time, cv2
cap = capture.Capture(1)
print('before run')
cap.start()
t = time.time()
while cap.img is None:
print('waiting for camera to connect',time.time()-t)
time.sleep(.25)
proc = preprocess.Processor()
proc.start(cap)
while proc.img is None:
print('waiting for processed image',time.time()-t)
time.sleep(.25)
ocr = OCR.OCR()
ocr.start(proc)
while ocr.txt is None:
print('waiting for OCR',time.time()-t)
time.sleep(.25)
t = time.time()
while True:
raw = cap.img
processed = proc.img
# cv2.imshow('this',raw)
cv2.imshow('this',processed)
cv2.waitKey(1)
print(ocr.txt,'Running',time.time()-t)
t = time.time()
|
{"/gui_web.py": ["/capture.py", "/preprocess.py", "/OCR.py", "/win_loc.py", "/badges.py", "/Live_phone.py"], "/OCR.py": ["/preprocess.py"], "/mold_No_button.py": ["/win_loc.py"], "/gui.py": ["/capture.py", "/preprocess.py", "/OCR.py"], "/zmqimage_server_test.py": ["/zmqimage.py"], "/preprocess.py": ["/capture.py"], "/zmqimage_client_test.py": ["/zmqimage.py"], "/gui2.py": ["/capture.py", "/preprocess.py", "/OCR.py"]}
|
39,297,877
|
roblee357/Clarios_mold_OCR
|
refs/heads/master
|
/badges.py
|
import pandas as pd
import tkinter, datetime, pyautogui, time
class Badge:
def __init__(self, master):
self.master = master
self.path = 'badges.csv'
self.df = pd.read_csv(self.path,index_col='index') # ,header=None , comment = '#',names=["code", "firstname", "lastname", "date_added"]
def prt(self):
print(self.df)
def lookupName(self,scan_input):
self.scan_input = scan_input
fisrtnames = self.df.loc[self.df['code'] == scan_input]['firstname']
lasttnames = self.df.loc[self.df['code'] == scan_input]['lastname']
if len(fisrtnames) > 0:
return fisrtnames.iloc[0], lasttnames.iloc[0]
else:
print('badge not found')
return self.addName()
def addName(self):
# master = tkinter.Tk()
self.nameRequestWindow = tkinter.Toplevel(self.master)
self.nameRequestWindow.title("New Badge. Please type name.")
self.nameRequestWindow.lift()
self.nameRequestWindow.focus()
self.nameRequestWindow.grab_set() #for disable main window
self.nameRequestWindow.attributes('-topmost',True) #for focus on toplevel
self.nameRequestWindow.focus_force()
e = tkinter.Entry(self.nameRequestWindow)
e.pack(padx=120, pady=40)
e.focus_set()
def callback(event = None):
self.nameInput = e.get() # This is the text you may want to use later
self.nameRequestWindow.destroy()
b = tkinter.Button(self.nameRequestWindow, text = "OK", width = 10, command = callback)
b.pack()
self.nameRequestWindow.bind('<Return>', callback)
self.master.wait_window(self.nameRequestWindow)
self.now = datetime.datetime.now()
self.ts = self.now.strftime("%Y_%m_%d___%H_%M_%S")
self.firstnameInput = self.nameInput.split(' ')[0]
self.lastnameInput = self.nameInput.split(' ')[1]
self.df.loc[len(self.df.index)+1] = [ self.scan_input,self.firstnameInput,self.lastnameInput ,self.ts]
print(self.df)
self.df.to_csv(self.path,index=True)
return self.firstnameInput, self.lastnameInput
def scan(self):
self.badgeScanWindow = tkinter.Toplevel(self.master)
self.badgeScanWindow.lift()
self.badgeScanWindow.focus()
self.badgeScanWindow.grab_set() #for disable main window
self.badgeScanWindow.attributes('-topmost',True) #for focus on toplevel
self.badgeScanWindow.focus_force()
self.badgeScanWindow.title("Scan your badge.")
self.badgeScanText = tkinter.StringVar(self.badgeScanWindow, value="Scan badge")
self.badgeScan = tkinter.Entry(self.badgeScanWindow, textvariable = self.badgeScanText)
self.badgeScan.pack(padx=100, pady=50)
self.badgeScanWindow.after(1000, self.sendTab)
self.badgeScanWindow.bind('<Return>', self.badgeAccepted)
self.master.wait_window(self.badgeScanWindow)
def badgeAccepted(self, e):
print('badgeScanWindow destroyed')
self.badgeNo = self.badgeScan.get()
self.lookedUpName = self.lookupName(self.badgeNo)
print('self.lookedUpName',self.lookedUpName)
self.badgeScanWindow.destroy()
def sendTab(self):
pyautogui.press('tab')
print('sent tab')
def main():
master = tkinter.Tk()
badges = Badge(master)
badgeName = badges.scan()
# badges.prt()
print('badgeName',badgeName)
if __name__ == '__main__':
main()
|
{"/gui_web.py": ["/capture.py", "/preprocess.py", "/OCR.py", "/win_loc.py", "/badges.py", "/Live_phone.py"], "/OCR.py": ["/preprocess.py"], "/mold_No_button.py": ["/win_loc.py"], "/gui.py": ["/capture.py", "/preprocess.py", "/OCR.py"], "/zmqimage_server_test.py": ["/zmqimage.py"], "/preprocess.py": ["/capture.py"], "/zmqimage_client_test.py": ["/zmqimage.py"], "/gui2.py": ["/capture.py", "/preprocess.py", "/OCR.py"]}
|
39,297,878
|
roblee357/Clarios_mold_OCR
|
refs/heads/master
|
/multiprocess_test.py
|
from multiprocessing import Process, Lock
from multiprocessing.sharedctypes import Value, Array
from ctypes import Structure, c_double
import numpy as np
import ctypes
class Point(Structure):
_fields_ = [('x', c_double), ('y', c_double)]
# _fields_ = np.array([[1.875,-6.25], [-5.75,2.0], [2.375,9.5]])
def modify(n, x, s, A):
n.value **= 2
x.value **= 2
s.value = s.value.upper()
for a in A:
a.x **= 2
a.y **= 2
if __name__ == '__main__':
lock = Lock()
n = Value('i', 7)
x = Value(c_double, 1.0/3.0, lock=False)
s = Array('c', b'hello world', lock=lock)
arr = np.array([[1.875,-6.25], [-5.75,2.0], [2.375,9.5]])
arr = ((1.875,-6.25), (-5.75,2.0), (2.375,9.5))
arr = np.array(arr)
print(arr.shape)
tup = tuple(map(tuple, arr))
A = Array(Point, tup, lock=lock)
p = Process(target=modify, args=(n, x, s, A))
p.start()
p.join()
print(n.value)
print(x.value)
print(s.value)
print([(a.x, a.y) for a in A])
# # import multiprocessing
# import time
# from multiprocessing import Process, Value, Array
# class A():
# def __init__(self):
# pass
# def f(self,n, a):
# for bb in range(100):
# n.value = 3.1415927 + bb
# time.sleep(.1)
# for i in range(len(a)):
# a[i] = -a[i]
# def run(self):
# self.num = Value('d', 0.0)
# self.arr = Array('i', range(10))
# self.p = Process(target=self.f, args=(self.num, self.arr))
# self.p.start()
# if __name__ == '__main__':
# myA = A()
# myA.run()
# for i in range(10):
# print(myA.num.value)
# print(myA.arr[:])
# time.sleep(1)
# def worker(procnum, return_dict):
# """worker function"""
# time.sleep(procnum)
# print(str(procnum) + " represent!")
# return_dict[procnum] = procnum
# if __name__ == "__main__":
# manager = multiprocessing.Manager()
# return_dict = manager.dict()
# jobs = []
# for i in range(5):
# p = multiprocessing.Process(target=worker, args=(i, return_dict))
# jobs.append(p)
# p.start()
# for proc in jobs:
# proc.join()
# print(return_dict.values())
# # time.sleep(1)
#
# class A(object):
# def __init__(self, *args, **kwargs):
# # do other stuff
# self.starttime = time.time()
# self.time = self.starttime
# self.manager = multiprocessing.Manager()
# self.return_dict = self.manager.dict()
# def do_something(self, i,thetime):
# for i in range(1000):
# thetime = time.time() - self.starttime
# time.sleep(0.01)
# # print('%s * %s = %s' % (i, i, i*i))
# def run(self):
# processes = []
# # for i in range(10):
# thetime = 0
# p = multiprocessing.Process(target=self.do_something, args=(5,thetime))
# p.start()
# # processes.append(p)
# # [x.start() for x in processes]
# if __name__ == '__main__':
# a = A()
# a.run()
# print('post run')
# for i in range(10):
# print(a.time)
# time.sleep(1)
|
{"/gui_web.py": ["/capture.py", "/preprocess.py", "/OCR.py", "/win_loc.py", "/badges.py", "/Live_phone.py"], "/OCR.py": ["/preprocess.py"], "/mold_No_button.py": ["/win_loc.py"], "/gui.py": ["/capture.py", "/preprocess.py", "/OCR.py"], "/zmqimage_server_test.py": ["/zmqimage.py"], "/preprocess.py": ["/capture.py"], "/zmqimage_client_test.py": ["/zmqimage.py"], "/gui2.py": ["/capture.py", "/preprocess.py", "/OCR.py"]}
|
39,356,916
|
kalizhaankyzy/STUDGUIDE-WebDevProject
|
refs/heads/master
|
/api/urls.py
|
from django.urls import path
from rest_framework_jwt.views import obtain_jwt_token
from rest_framework.urlpatterns import format_suffix_patterns
from .views import author_detail, author_list, category_list, category_detail, category_news, course_detail, course_list, level_detail, level_list, news_detail, news_list
urlpatterns = [
path('authors/', author_list.as_view()),
path('authors/<int:author_id>/', author_detail.as_view()),
path('categories/', category_list),
path('categories/<int:category_id>/', category_detail),
path('categories/<int:category_id>/news/', category_news),
path('news/', news_list.as_view()),
path('news/<int:news_id>', news_detail.as_view()),
path('login/', obtain_jwt_token),
path('course_levels/', level_list),
path('course_levels/<intLlevel_id>/', level_detail),
path('courses/', course_list),
path('courses/<intLlevel_id>/', course_detail),
]
|
{"/sg_back/api/serializers.py": ["/sg_back/api/models.py"], "/sg_back/api/urls.py": ["/sg_back/api/views.py"], "/sg_back/api/views.py": ["/sg_back/api/models.py", "/sg_back/api/serializers.py"]}
|
39,484,224
|
amanmyrats/taksi
|
refs/heads/master
|
/api/urls.py
|
from django.urls import path
from .views import saher_ara_view, saher_ici_view, etrap_obalary_view
urlpatterns = [
path('api/saherara/', saher_ara_view),
path('api/saherici/', saher_ici_view),
path('api/etrapobalary/', etrap_obalary_view)
]
|
{"/taksist/urls.py": ["/taksist/views.py"], "/yolagcy/views.py": ["/yolagcy/filters.py", "/api/views.py", "/taksist/models.py"], "/api/urls.py": ["/api/views.py"], "/api/views.py": ["/api/serializers.py", "/taksist/models.py"], "/yolagcy/filters.py": ["/taksist/models.py"], "/taksist/migrations of sqlite3/0001_initial.py": ["/taksist/utils.py"], "/taksist/models.py": ["/taksist/utils.py"], "/taksist/forms.py": ["/taksist/models.py"], "/taksist/views.py": ["/taksist/models.py", "/taksist/forms.py"], "/taksist/admin.py": ["/taksist/models.py"], "/api/serializers.py": ["/taksist/models.py"]}
|
39,484,225
|
amanmyrats/taksi
|
refs/heads/master
|
/api/views.py
|
from django.http import JsonResponse
from .serializers import TaxiSerializer
from taksist.models import TaxiProfile
def saher_ara_view(request):
if request.method=='GET':
taksistler = TaxiProfile.objects.all()
serializer = TaxiSerializer(taksistler, many=True)
return JsonResponse(serializer.data, safe=False)
def saher_ici_view(request):
if request.method=='GET':
taksistler = TaxiProfile.objects.all()
serializer = TaxiSerializer(taksistler, many=True)
return JsonResponse(serializer.data, safe=False)
def etrap_obalary_view(request):
if request.method=='GET':
taksistler = TaxiProfile.objects.all()
serializer = TaxiSerializer(taksistler, many=True)
return JsonResponse(serializer.data, safe=False)
|
{"/taksist/urls.py": ["/taksist/views.py"], "/yolagcy/views.py": ["/yolagcy/filters.py", "/api/views.py", "/taksist/models.py"], "/api/urls.py": ["/api/views.py"], "/api/views.py": ["/api/serializers.py", "/taksist/models.py"], "/yolagcy/filters.py": ["/taksist/models.py"], "/taksist/migrations of sqlite3/0001_initial.py": ["/taksist/utils.py"], "/taksist/models.py": ["/taksist/utils.py"], "/taksist/forms.py": ["/taksist/models.py"], "/taksist/views.py": ["/taksist/models.py", "/taksist/forms.py"], "/taksist/admin.py": ["/taksist/models.py"], "/api/serializers.py": ["/taksist/models.py"]}
|
39,484,226
|
amanmyrats/taksi
|
refs/heads/master
|
/api/serializers.py
|
from rest_framework import serializers
from taksist.models import TaxiProfile, User, TaxiStatus, Category, Status
class TaxiSerializer(serializers.ModelSerializer):
username = serializers.SerializerMethodField('get_taksist_name')
# xstatus = serializers.SerializerMethodField('get_taksist_status')
# xcategory = serializers.SerializerMethodField('get_taksist_category')
cat_id = serializers.StringRelatedField()
status_id = serializers.StringRelatedField()
class Meta:
model = TaxiProfile
fields = ['user_id', 'mobile', 'username', 'cat_id', 'status_id']
def get_taksist_name(self, taksist):
user = User.objects.get(taxiprofile = taksist)
return user.first_name
# def get_taksist_status(self, taksist):
# try:
# xstatus = TaxiStatus.objects.get(user_id=taksist.user_id)
# if xstatus:
# return xstatus.status
# except:
# return 'no status'
# def get_taksist_category(self, taksist):
# return TaxiProfile.get_category(taksist)
|
{"/taksist/urls.py": ["/taksist/views.py"], "/yolagcy/views.py": ["/yolagcy/filters.py", "/api/views.py", "/taksist/models.py"], "/api/urls.py": ["/api/views.py"], "/api/views.py": ["/api/serializers.py", "/taksist/models.py"], "/yolagcy/filters.py": ["/taksist/models.py"], "/taksist/migrations of sqlite3/0001_initial.py": ["/taksist/utils.py"], "/taksist/models.py": ["/taksist/utils.py"], "/taksist/forms.py": ["/taksist/models.py"], "/taksist/views.py": ["/taksist/models.py", "/taksist/forms.py"], "/taksist/admin.py": ["/taksist/models.py"], "/api/serializers.py": ["/taksist/models.py"]}
|
39,484,227
|
amanmyrats/taksi
|
refs/heads/master
|
/test.py
|
# import os
# from twilio.rest import Client
# account_sid=os.environ['ACa0b0646156f095b60fc6a159cad21464']
# auth_token=os.environ['1ba680297b4b5ec2f695a961e88e8985']
# client=Client(account_sid, auth_token)
# message=client.messages.create(body="Join Earth's mightiest heroes. Like Kevin Bacon.",from_='+12057360756',to='+99365555833')
# print(message.sid)
# import os
# from twilio.rest import Client
import os
from twilio.rest import Client
import time
# Your Account Sid and Auth Token from twilio.com/console
# and set the environment variables. See http://twil.io/secure
account_sid = os.environ['ACa0b0646156f095b60fc6a159cad21464']
time.sleep(5)
auth_token = os.environ['1ba680297b4b5ec2f695a961e88e8985']
client = Client(account_sid, auth_token)
time.sleep(5)
message = client.messages \
.create(
body="Join Earth's mightiest heroes. Like Kevin Bacon.",
from_='+12057360756',
to='+99365555833'
)
print(message.sid)
time.sleep(5)
|
{"/taksist/urls.py": ["/taksist/views.py"], "/yolagcy/views.py": ["/yolagcy/filters.py", "/api/views.py", "/taksist/models.py"], "/api/urls.py": ["/api/views.py"], "/api/views.py": ["/api/serializers.py", "/taksist/models.py"], "/yolagcy/filters.py": ["/taksist/models.py"], "/taksist/migrations of sqlite3/0001_initial.py": ["/taksist/utils.py"], "/taksist/models.py": ["/taksist/utils.py"], "/taksist/forms.py": ["/taksist/models.py"], "/taksist/views.py": ["/taksist/models.py", "/taksist/forms.py"], "/taksist/admin.py": ["/taksist/models.py"], "/api/serializers.py": ["/taksist/models.py"]}
|
39,484,228
|
amanmyrats/taksi
|
refs/heads/master
|
/taksist/views.py
|
from django.shortcuts import render, redirect
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth import login, authenticate, logout
from django.contrib.auth.models import User
from django.http import JsonResponse
from .models import TaxiProfile , TaxiStatus, TaxiCategory
from .forms import TaxiProfileModelForm ,TaxiCategoryModelForm, TaxiStatusModelForm, UserModelForm
def home_view(request):
return render(request, 'home.html', {'msg':'Hello'})
def signup_view(request):
if request.user.is_authenticated:
print('You are already in.')
print(request.user)
context={}
return render(request, 'home.html', context)
else:
if request.method=='POST':
form=UserCreationForm(request.POST)
if form.is_valid():
form.save()
username=form.cleaned_data.get('username')
raw_password=form.cleaned_data.get('password1')
user=authenticate(username=username, password=raw_password)
login(request, user)
return redirect('home')
else:
form=UserCreationForm()
return render(request, 'signup.html', {'form': form})
def login_view(request):
context={}
if request.user.is_authenticated:
return redirect('home')
else:
print('user is not authenticated')
if request.method=='POST':
print('request method is POST')
username=request.POST.get('username')
raw_password=request.POST.get('password')
user=authenticate(username=username, password=raw_password)
print('this is result of user: ', user)
if user is not None:
# redirect ('profile.html')
print('before login')
login(request, user)
print('user authenticated')
print('this is result', user.is_authenticated)
return render(request, 'profile.html', {'profile_data':request.user})
else:
msg='Agza adyňyz ýada parolyňyz ýalňyş, täzeden synanşyň.'
context={'msg':msg}
return render(request, 'profile.html', {'msg2':'hello asdg'})
return render(request, 'login.html', context)
def logout_view(request):
logout(request)
return redirect('login')
def settings_view(request, *args, **kwargs):
print('settings started working')
if request.user.is_authenticated:
print('user is authenticated')
if request.method=='POST':
print('method is POST')
form_profile=TaxiProfileModelForm(request.POST, request.FILES)
form_category=TaxiCategoryModelForm(request.POST)
# form_status=TaxiStatusModelForm(request.POST)
form_user=UserModelForm(request.POST)
print('before checkin is_valid')
if form_profile.is_valid() and form_category.is_valid() and form_user.is_valid():
user=request.user
print('before profile')
# Here check TaxiProfile model, if exists update, if not create new taxe profile
try:
check_profile=TaxiProfile.objects.get(user_id=user.id)
# check_profile.user_id=user
print('photo none or not: ', form_profile.cleaned_data['user_photo'])
if form_profile.cleaned_data['user_photo']!='user_photo/default_taksist.png':
check_profile.user_photo=form_profile.cleaned_data['user_photo']
if form_profile.cleaned_data['car_photo']!='car_photo/default_car.png':
check_profile.car_photo=form_profile.cleaned_data['car_photo']
check_profile.mobile=form_profile.cleaned_data['mobile']
check_profile.save()
print('saved profile')
except:
profile=form_profile.save(False)
profile.user_id=user
profile.save()
print('saved profile with exception')
# Here check TaxiCategory model, if exists update, if not create new taxe category
try:
check_category=TaxiCategory.objects.get(user_id=user.id)
# check_category.user_id=user
check_category.name=form_category.cleaned_data['name']
check_category.save()
print('saved category')
except:
category=form_category.save(False)
category.user_id=user
category.save()
print('saved category with exception')
# Here check TaxiStatus model, if exists update, if not create new taxe status
# try:
# check_status=TaxiStatus.objects.get(user_id=user.id)
# # check_status.user_id=user
# check_status.status=form_status.cleaned_data['status']
# check_status.save()
# except:
# status=form_status.save(False)
# status.user_id=user
# status.save()
# Here update the User model data
try:
current_user=User.objects.get(pk=request.user.id)
print('user first name before: ', current_user.first_name)
current_user.first_name=form_user.cleaned_data['first_name']
current_user.last_name=form_user.cleaned_data['last_name']
current_user.save()
print('user first name after: ', current_user.first_name)
print(form_user.cleaned_data['first_name'])
print('current user: ', current_user)
except:
print('Exception occured when editing user model.')
msg='Successfully saved.'
context={'msg':msg}
print('return render')
# return render(request, 'profile.html', context)
return redirect('profile')
else:
msg='Forms are not valid'
context={'msg':msg}
else:
current_profile=TaxiProfile.objects.get(pk=request.user.id)
current_user=User.objects.get(pk=request.user.id)
current_category=TaxiCategory.objects.get(pk=request.user.id)
form_profile=TaxiProfileModelForm(instance=current_profile)
form_category=TaxiCategoryModelForm(instance=current_category)
# form_status=TaxiStatusModelForm()
form_user=UserModelForm(instance=current_user)
context={'form_profile': form_profile, 'form_category': form_category , 'form_user': form_user }
else:
return redirect('home')
return render(request, 'settings.html', context)
def profile_view(request):
if request.user.is_authenticated:
# profile_data=request.user
context={'msg':'Successful'}
return render(request, 'profile.html', context)
else:
# if request.method=='POST':
# return render(request, 'login.html', {})
context={'msg':'Failed'}
return render(request, 'profile.html', context)
def awtomenzil_view(request):
return render(request, 'awtomenzil.html', {'msg': 'Hello'})
def statuschange_view(request):
if request.is_ajax() and request.method=='GET':
print("inside statuschange_view")
new_status=request.GET.get('status', None)
user_status=TaxiStatus.objects.get(pk=request.user.id)
user_status.status=new_status
user_status.save()
return JsonResponse({"new_status":new_status}, status = 200)
|
{"/taksist/urls.py": ["/taksist/views.py"], "/yolagcy/views.py": ["/yolagcy/filters.py", "/api/views.py", "/taksist/models.py"], "/api/urls.py": ["/api/views.py"], "/api/views.py": ["/api/serializers.py", "/taksist/models.py"], "/yolagcy/filters.py": ["/taksist/models.py"], "/taksist/migrations of sqlite3/0001_initial.py": ["/taksist/utils.py"], "/taksist/models.py": ["/taksist/utils.py"], "/taksist/forms.py": ["/taksist/models.py"], "/taksist/views.py": ["/taksist/models.py", "/taksist/forms.py"], "/taksist/admin.py": ["/taksist/models.py"], "/api/serializers.py": ["/taksist/models.py"]}
|
39,484,229
|
amanmyrats/taksi
|
refs/heads/master
|
/taksist/admin.py
|
from django.contrib import admin
from .models import TaxiProfile , TaxiCategory, TaxiStatus
admin.site.register( TaxiProfile)
admin.site.register( TaxiCategory)
admin.site.register( TaxiStatus)
|
{"/taksist/urls.py": ["/taksist/views.py"], "/yolagcy/views.py": ["/yolagcy/filters.py", "/api/views.py", "/taksist/models.py"], "/api/urls.py": ["/api/views.py"], "/api/views.py": ["/api/serializers.py", "/taksist/models.py"], "/yolagcy/filters.py": ["/taksist/models.py"], "/taksist/migrations of sqlite3/0001_initial.py": ["/taksist/utils.py"], "/taksist/models.py": ["/taksist/utils.py"], "/taksist/forms.py": ["/taksist/models.py"], "/taksist/views.py": ["/taksist/models.py", "/taksist/forms.py"], "/taksist/admin.py": ["/taksist/models.py"], "/api/serializers.py": ["/taksist/models.py"]}
|
39,484,230
|
amanmyrats/taksi
|
refs/heads/master
|
/taksist/forms.py
|
from django import forms
from .models import TaxiProfile , TaxiStatus, TaxiCategory
from django.contrib.auth.models import User
class TaxiProfileModelForm(forms.ModelForm):
class Meta:
model=TaxiProfile
fields=['car_photo', 'user_photo', 'mobile']
class TaxiCategoryModelForm(forms.ModelForm):
class Meta:
model=TaxiCategory
fields=['name']
class TaxiStatusModelForm(forms.ModelForm):
class Meta:
model=TaxiStatus
fields=['status']
class UserModelForm(forms.ModelForm):
class Meta:
model=User
fields=['first_name', 'last_name']
|
{"/taksist/urls.py": ["/taksist/views.py"], "/yolagcy/views.py": ["/yolagcy/filters.py", "/api/views.py", "/taksist/models.py"], "/api/urls.py": ["/api/views.py"], "/api/views.py": ["/api/serializers.py", "/taksist/models.py"], "/yolagcy/filters.py": ["/taksist/models.py"], "/taksist/migrations of sqlite3/0001_initial.py": ["/taksist/utils.py"], "/taksist/models.py": ["/taksist/utils.py"], "/taksist/forms.py": ["/taksist/models.py"], "/taksist/views.py": ["/taksist/models.py", "/taksist/forms.py"], "/taksist/admin.py": ["/taksist/models.py"], "/api/serializers.py": ["/taksist/models.py"]}
|
39,484,231
|
amanmyrats/taksi
|
refs/heads/master
|
/yolagcy/views.py
|
from django.shortcuts import render
from django.contrib.auth.models import User
def search_view(request):
taxi_drivers=User.objects.all()
return render(request, 'search.html', {'taxi_drivers':taxi_drivers})
|
{"/taksist/urls.py": ["/taksist/views.py"], "/yolagcy/views.py": ["/yolagcy/filters.py", "/api/views.py", "/taksist/models.py"], "/api/urls.py": ["/api/views.py"], "/api/views.py": ["/api/serializers.py", "/taksist/models.py"], "/yolagcy/filters.py": ["/taksist/models.py"], "/taksist/migrations of sqlite3/0001_initial.py": ["/taksist/utils.py"], "/taksist/models.py": ["/taksist/utils.py"], "/taksist/forms.py": ["/taksist/models.py"], "/taksist/views.py": ["/taksist/models.py", "/taksist/forms.py"], "/taksist/admin.py": ["/taksist/models.py"], "/api/serializers.py": ["/taksist/models.py"]}
|
39,486,895
|
conskourris/fyp
|
refs/heads/master
|
/patterns_final.py
|
import numpy as np
from definitions.doji import doji_60
from definitions.close_near_high import c_high60
from definitions.close_near_low import c_low122
def no_definition(df, index) :
return True
downtrend = no_definition
uptrend = no_definition
is_tall = no_definition
is_doji = doji_60
close_near_high = c_high60
close_near_low = c_low122
def two_black_gapping(df={}, get_length=False):
# bear C
pattern_length = 3
indexes = []
if get_length is True :
return [], pattern_length
for i in range(df.shape[0]-(pattern_length-1)):
o_1, h_1, l_1, c_1 = df['Open'].iloc[i], df['High'].iloc[i], df['Low'].iloc[i], df['Close'].iloc[i]
o_2, h_2, l_2, c_2 = df['Open'].iloc[i+1], df['High'].iloc[i+1], df['Low'].iloc[i+1], df['Close'].iloc[i+1]
o_3, h_3, l_3, c_3 = df['Open'].iloc[i+2], df['High'].iloc[i+2], df['Low'].iloc[i+2], df['Close'].iloc[i+2]
# down
if l_1 > h_2:
if o_2 > o_3 and o_3 > c_2 and c_2 > c_3:
if downtrend(df, i) :
indexes.append(i)
return indexes, pattern_length
def above_the_stomach(df={}, get_length=False):
# bull R
pattern_length = 2
indexes = []
if get_length is True :
return [], pattern_length
for i in range(df.shape[0]-(pattern_length-1)):
o_1, h_1, l_1, c_1 = df['Open'].iloc[i], df['High'].iloc[i], df['Low'].iloc[i], df['Close'].iloc[i]
o_2, h_2, l_2, c_2 = df['Open'].iloc[i+1], df['High'].iloc[i+1], df['Low'].iloc[i+1], df['Close'].iloc[i+1]
# down
if c_1 < o_1 and c_2 > o_2 :
if o_2 > (c_1 + o_1) / 2 :
if downtrend(df, i) :
indexes.append(i)
return indexes, pattern_length
def belt_hold_bearish(df={}, get_length=False):
# bear R
pattern_length = 1
indexes = []
if get_length is True :
return [], pattern_length
for i in range(df.shape[0]-(pattern_length-1)):
o_1, h_1, l_1, c_1 = df['Open'].iloc[i], df['High'].iloc[i], df['Low'].iloc[i], df['Close'].iloc[i]
# up
# implement close_near_low and tall_candle
if c_1 < o_1 :
if o_1 == h_1 :
if uptrend(df, i) :
if close_near_low(df, i) :
if is_tall(df, i) :
indexes.append(i)
return indexes, pattern_length
def belt_hold_bullish(df={}, get_length=False):
# bull R
pattern_length = 1
indexes = []
if get_length is True :
return [], pattern_length
for i in range(df.shape[0]-(pattern_length-1)):
o_1, h_1, l_1, c_1 = df['Open'].iloc[i], df['High'].iloc[i], df['Low'].iloc[i], df['Close'].iloc[i]
# down
# implement close_near_high and tall_candle
if c_1 > o_1 :
if o_1 == l_1 :
if downtrend(df, i) :
if close_near_high(df, i) :
if is_tall(df, i) :
indexes.append(i)
return indexes, pattern_length
def doji_star(df={}, get_length=False) :
# bear R
pattern_length = 2
indexes = []
if get_length is True :
return [], pattern_length
for i in range(df.shape[0]-(pattern_length-1)):
o_1, h_1, l_1, c_1 = df['Open'].iloc[i], df['High'].iloc[i], df['Low'].iloc[i], df['Close'].iloc[i]
o_2, h_2, l_2, c_2 = df['Open'].iloc[i+1], df['High'].iloc[i+1], df['Low'].iloc[i+1], df['Close'].iloc[i+1]
# up
# implement is_doji on second and tall_candle on first
if c_1 > o_1 :
if l_2 > h_1 :
if h_1 - l_1 > h_2 - l_2 :
if uptrend(df, i) :
if is_tall(df, i) :
if is_doji(df, i+1) :
indexes.append(i)
return indexes, pattern_length
def engulfing_bearish(df={}, get_length=False):
# bear R
pattern_length = 2
indexes = []
if get_length is True :
return [], pattern_length
for i in range(df.shape[0]-(pattern_length-1)):
o_1, h_1, l_1, c_1 = df['Open'].iloc[i], df['High'].iloc[i], df['Low'].iloc[i], df['Close'].iloc[i]
o_2, h_2, l_2, c_2 = df['Open'].iloc[i+1], df['High'].iloc[i+1], df['Low'].iloc[i+1], df['Close'].iloc[i+1]
# up
if c_1 > o_1 and c_2 < o_2 :
if o_2 > c_1 and c_2 < o_1 :
if uptrend(df, i) :
indexes.append(i)
return indexes, pattern_length
def last_engulfing_bottom(df={}, get_length=False) :
# bull R (bear C)
pattern_length = 2
indexes = []
if get_length is True :
return [], pattern_length
for i in range(df.shape[0]-(pattern_length-1)):
o_1, h_1, l_1, c_1 = df['Open'].iloc[i], df['High'].iloc[i], df['Low'].iloc[i], df['Close'].iloc[i]
o_2, h_2, l_2, c_2 = df['Open'].iloc[i+1], df['High'].iloc[i+1], df['Low'].iloc[i+1], df['Close'].iloc[i+1]
# down
if c_1 > o_1 and c_2 < o_2 :
if o_2 > c_1 and c_2 < o_1 :
if downtrend(df, i) :
indexes.append(i)
return indexes, pattern_length
def last_engulfing_top(df={}, get_length=False):
# bear R (bull C)
pattern_length = 2
indexes = []
if get_length is True :
return [], pattern_length
for i in range(df.shape[0]-(pattern_length-1)):
o_1, h_1, l_1, c_1 = df['Open'].iloc[i], df['High'].iloc[i], df['Low'].iloc[i], df['Close'].iloc[i]
o_2, h_2, l_2, c_2 = df['Open'].iloc[i+1], df['High'].iloc[i+1], df['Low'].iloc[i+1], df['Close'].iloc[i+1]
# up
if c_1 < o_1 and c_2 > o_2 :
if c_2 > o_1 and o_2 < c_1 :
if uptrend(df, i) :
indexes.append(i)
return indexes, pattern_length
def three_outside_down(df={}, get_length=False):
# bear R
pattern_length = 3
indexes = []
if get_length is True :
return [], pattern_length
for i in range(df.shape[0]-(pattern_length-1)):
o_1, h_1, l_1, c_1 = df['Open'].iloc[i], df['High'].iloc[i], df['Low'].iloc[i], df['Close'].iloc[i]
o_2, h_2, l_2, c_2 = df['Open'].iloc[i+1], df['High'].iloc[i+1], df['Low'].iloc[i+1], df['Close'].iloc[i+1]
o_3, h_3, l_3, c_3 = df['Open'].iloc[i+2], df['High'].iloc[i+2], df['Low'].iloc[i+2], df['Close'].iloc[i+2]
# up
if c_1 > o_1 and c_2 < o_2 and c_3 < o_3 :
if o_2 > c_1 and c_2 < o_1 :
if c_3 < c_2 :
if uptrend(df, i) :
indexes.append(i)
return indexes, pattern_length
def three_outside_up(df={}, get_length=False) :
# bull R
pattern_length = 3
indexes = []
if get_length is True :
return [], pattern_length
for i in range(df.shape[0]-(pattern_length-1)):
o_1, h_1, l_1, c_1 = df['Open'].iloc[i], df['High'].iloc[i], df['Low'].iloc[i], df['Close'].iloc[i]
o_2, h_2, l_2, c_2 = df['Open'].iloc[i+1], df['High'].iloc[i+1], df['Low'].iloc[i+1], df['Close'].iloc[i+1]
o_3, h_3, l_3, c_3 = df['Open'].iloc[i+2], df['High'].iloc[i+2], df['Low'].iloc[i+2], df['Close'].iloc[i+2]
# down
if c_1 < o_1 and c_2 > o_2 and c_3 > o_3 :
if c_3 > c_2 :
if downtrend(df, i) :
indexes.append(i)
return indexes, pattern_length
def falling_window(df={}, get_length=False) :
# bear C
pattern_length = 2
indexes = []
if get_length is True :
return [], pattern_length
for i in range(df.shape[0]-(pattern_length-1)):
o_1, h_1, l_1, c_1 = df['Open'].iloc[i], df['High'].iloc[i], df['Low'].iloc[i], df['Close'].iloc[i]
o_2, h_2, l_2, c_2 = df['Open'].iloc[i+1], df['High'].iloc[i+1], df['Low'].iloc[i+1], df['Close'].iloc[i+1]
# down
if c_1 < o_1 and c_2 < o_2 :
if h_2 < l_1 :
if downtrend(df, i) :
indexes.append(i)
return indexes, pattern_length
def rising_window(df={}, get_length=False) :
# bull C
pattern_length = 2
indexes = []
if get_length is True :
return [], pattern_length
for i in range(df.shape[0]-(pattern_length-1)) :
o_1, h_1, l_1, c_1 = df['Open'].iloc[i], df['High'].iloc[i], df['Low'].iloc[i], df['Close'].iloc[i]
o_2, h_2, l_2, c_2 = df['Open'].iloc[i+1], df['High'].iloc[i+1], df['Low'].iloc[i+1], df['Close'].iloc[i+1]
# up
if c_1 > o_1 and c_2 > o_2 :
if l_2 > h_1 :
if uptrend(df, i) :
indexes.append(i)
return indexes, pattern_length
def three_line_strike_bearish(df={}, get_length=False) :
# bear C (bull R)
pattern_length = 4
indexes = []
if get_length is True :
return [], pattern_length
for i in range(df.shape[0]-(pattern_length-1)) :
o_1, h_1, l_1, c_1 = df['Open'].iloc[i], df['High'].iloc[i], df['Low'].iloc[i], df['Close'].iloc[i]
o_2, h_2, l_2, c_2 = df['Open'].iloc[i+1], df['High'].iloc[i+1], df['Low'].iloc[i+1], df['Close'].iloc[i+1]
o_3, h_3, l_3, c_3 = df['Open'].iloc[i+2], df['High'].iloc[i+2], df['Low'].iloc[i+2], df['Close'].iloc[i+2]
o_4, h_4, l_4, c_4 = df['Open'].iloc[i+3], df['High'].iloc[i+3], df['Low'].iloc[i+3], df['Close'].iloc[i+3]
# down
if c_1 < o_1 and c_2 < o_2 and c_3 < o_3 and c_4 > o_4 :
if c_1 > c_2 and c_2 > c_3 :
if o_4 < c_3 and c_4 > o_1 :
if downtrend(df, i) :
indexes.append(i)
return indexes, pattern_length
def three_line_strike_bullish(df={}, get_length=False) :
# bull C (bear R)
pattern_length = 4
indexes = []
if get_length is True :
return [], pattern_length
for i in range(df.shape[0]-(pattern_length-1)) :
o_1, h_1, l_1, c_1 = df['Open'].iloc[i], df['High'].iloc[i], df['Low'].iloc[i], df['Close'].iloc[i]
o_2, h_2, l_2, c_2 = df['Open'].iloc[i+1], df['High'].iloc[i+1], df['Low'].iloc[i+1], df['Close'].iloc[i+1]
o_3, h_3, l_3, c_3 = df['Open'].iloc[i+2], df['High'].iloc[i+2], df['Low'].iloc[i+2], df['Close'].iloc[i+2]
o_4, h_4, l_4, c_4 = df['Open'].iloc[i+3], df['High'].iloc[i+3], df['Low'].iloc[i+3], df['Close'].iloc[i+3]
# up
if c_1 > o_1 and c_2 > o_2 and c_3 > o_3 and c_4 < o_4 :
if c_1 < c_2 and c_2 < c_3 :
if o_4 > c_3 and c_4 < o_1 :
if uptrend(df, i) :
indexes.append(i)
return indexes, pattern_length
def three_black_crows(df={}, get_length=False) :
# bear R
pattern_length = 3
indexes = []
if get_length is True :
return [], pattern_length
for i in range(df.shape[0]-(pattern_length-1)) :
o_1, h_1, l_1, c_1 = df['Open'].iloc[i], df['High'].iloc[i], df['Low'].iloc[i], df['Close'].iloc[i]
o_2, h_2, l_2, c_2 = df['Open'].iloc[i+1], df['High'].iloc[i+1], df['Low'].iloc[i+1], df['Close'].iloc[i+1]
o_3, h_3, l_3, c_3 = df['Open'].iloc[i+2], df['High'].iloc[i+2], df['Low'].iloc[i+2], df['Close'].iloc[i+2]
# up
# implement close near low on 2nd and 3rd
if c_1 < o_1 and c_2 < o_2 and c_3 < o_3 :
if c_1 <= o_2 and o_2 <= o_1 :
if c_2 <= o_3 and o_3 <= o_2 :
if uptrend(df, i) :
if close_near_low(df, i+1) :
if close_near_low(df, i+2) :
indexes.append(i)
return indexes, pattern_length
def evening_star(df={}, get_length=False) :
# bear R
pattern_length = 3
indexes = []
if get_length is True :
return [], pattern_length
for i in range(df.shape[0]-(pattern_length-1)) :
o_1, h_1, l_1, c_1 = df['Open'].iloc[i], df['High'].iloc[i], df['Low'].iloc[i], df['Close'].iloc[i]
o_2, h_2, l_2, c_2 = df['Open'].iloc[i+1], df['High'].iloc[i+1], df['Low'].iloc[i+1], df['Close'].iloc[i+1]
o_3, h_3, l_3, c_3 = df['Open'].iloc[i+2], df['High'].iloc[i+2], df['Low'].iloc[i+2], df['Close'].iloc[i+2]
# up
# implement tall_candle on 1st and 3rd
if c_1 > o_1 and c_3 < o_3 :
if c_2 > c_1 and o_2 > c_1 and c_2 > c_3 and o_2 > c_3 :
if c_3 < (c_1 + o_1)/2 :
if uptrend(df, i) :
if is_tall(df, i) :
if is_tall(df, i+2) :
indexes.append(i)
return indexes, pattern_length
def abandoned_baby_bullish(df={}, get_length=False) :
# bull R
pattern_length = 3
indexes = []
if get_length is True :
return [], pattern_length
for i in range(df.shape[0]-(pattern_length-1)) :
o_1, h_1, l_1, c_1 = df['Open'].iloc[i], df['High'].iloc[i], df['Low'].iloc[i], df['Close'].iloc[i]
o_2, h_2, l_2, c_2 = df['Open'].iloc[i+1], df['High'].iloc[i+1], df['Low'].iloc[i+1], df['Close'].iloc[i+1]
o_3, h_3, l_3, c_3 = df['Open'].iloc[i+2], df['High'].iloc[i+2], df['Low'].iloc[i+2], df['Close'].iloc[i+2]
# down
# implement doji on 2nd
if c_1 < o_1 and c_3 > o_3 :
if h_2 < l_1 and h_2 < l_3 :
if downtrend(df, i) :
if is_doji(df, i+1) :
indexes.append(i)
return indexes, pattern_length
def dark_cloud_cover(df={}, get_length=False) :
# bear R
pattern_length = 2
indexes = []
if get_length is True :
return [], pattern_length
for i in range(df.shape[0]-(pattern_length-1)) :
o_1, h_1, l_1, c_1 = df['Open'].iloc[i], df['High'].iloc[i], df['Low'].iloc[i], df['Close'].iloc[i]
o_2, h_2, l_2, c_2 = df['Open'].iloc[i+1], df['High'].iloc[i+1], df['Low'].iloc[i+1], df['Close'].iloc[i+1]
# down
# implement tall on 1st
if c_1 > o_1 and c_2 < o_2 :
if o_2 > c_1 and c_2 < (o_1 + c_1)/2 :
if downtrend(df, i) :
if is_tall(df, i) :
indexes.append(i)
return indexes, pattern_length
def select_at_random(df={}, get_length=False):
pattern_length = 0
indexes = []
if get_length is True :
return [], pattern_length
select = 0.01
for i in range(df.shape[0]):
val = np.random.uniform()
if val <= select:
indexes.append(i)
return indexes, pattern_length
all_patterns_final = [
two_black_gapping,
above_the_stomach,
belt_hold_bearish,
belt_hold_bullish,
doji_star,
engulfing_bearish,
last_engulfing_bottom,
last_engulfing_top,
three_outside_down,
three_outside_up,
falling_window,
rising_window,
three_line_strike_bearish,
three_line_strike_bullish,
three_black_crows,
evening_star,
abandoned_baby_bullish,
dark_cloud_cover,
select_at_random
]
|
{"/main.py": ["/patterns_final.py", "/tools.py", "/evaluate_findings.py", "/evaluation_tools.py", "/plotting_tools.py", "/trading_tools.py", "/definitions/positive_fit.py", "/definitions/positive_mav4.py", "/definitions/positive_mav5.py", "/definitions/negative_fit.py", "/definitions/negative_mav3.py", "/definitions/negative_mav4.py", "/definitions/tall_candle.py", "/definitions/doji.py", "/definitions/close_near_high.py", "/definitions/close_near_low.py", "/trading_strategies/best_strategies.py", "/trading_strategies/exit_after.py", "/trading_strategies/limit_exit0.py", "/trading_strategies/limit_exit1.py", "/trading_strategies/limit_exit2.py", "/trading_strategies/limit_exit3.py", "/trading_strategies/limit_exit5.py", "/trading_strategies/limit_exit6.py", "/trading_strategies/limit_exit7.py", "/trading_strategies/limit_exit8.py", "/trading_strategies/limit_exit9.py", "/trading_strategies.py"], "/trading_strategies/limit_exit0.py": ["/trading_strategies.py", "/patterns_final.py", "/tools.py", "/evaluation_tools.py", "/plotting_tools.py"], "/trading_strategies/limit_exit5.py": ["/patterns_final.py", "/tools.py", "/evaluation_tools.py", "/plotting_tools.py"], "/catchup.py": ["/trading_strategies.py", "/patterns.py", "/tools.py", "/evaluation_tools.py", "/definitions/negative_fit.py", "/definitions/negative_mav3.py", "/definitions/negative_mav4.py"], "/trading_strategies/limit_exit3.py": ["/trading_strategies.py", "/patterns_final.py", "/tools.py", "/evaluation_tools.py", "/plotting_tools.py"], "/patterns_final.py": ["/definitions/positive_fit.py", "/definitions/doji.py", "/definitions/close_near_high.py", "/definitions/close_near_low.py"], "/tools.py": ["/patterns_final.py"], "/ml.py": ["/patterns_final.py", "/tools.py", "/evaluation_tools.py", "/plotting_tools.py", "/trading_tools.py", "/ml_tools.py", "/definitions/positive_fit.py", "/definitions/positive_mav4.py", "/definitions/positive_mav5.py", "/definitions/negative_fit.py", "/definitions/negative_mav3.py", "/definitions/negative_mav4.py", "/definitions/tall_candle.py", "/definitions/doji.py", "/definitions/close_near_high.py", "/definitions/close_near_low.py", "/trading_strategies/exit_after.py", "/trading_strategies/limit_exit0.py", "/trading_strategies/limit_exit1.py", "/trading_strategies/limit_exit2.py", "/trading_strategies/limit_exit3.py", "/trading_strategies/limit_exit5.py", "/trading_strategies/limit_exit6.py", "/trading_strategies/limit_exit7.py", "/trading_strategies/limit_exit8.py", "/trading_strategies/limit_exit9.py", "/trading_strategies/best_strategies.py"], "/ml_tools.py": ["/patterns_final.py", "/tools.py", "/evaluation_tools.py", "/plotting_tools.py", "/trading_tools.py", "/trading_strategies/best_strategies.py"], "/trading_strategies/best_strategies.py": ["/patterns_final.py", "/tools.py", "/evaluation_tools.py", "/plotting_tools.py"], "/trading_strategies/limit_exit6.py": ["/trading_strategies.py", "/patterns_final.py", "/tools.py", "/evaluation_tools.py", "/plotting_tools.py"], "/trading_strategies/limit_exit2.py": ["/patterns_final.py", "/tools.py", "/evaluation_tools.py", "/plotting_tools.py"], "/trading_tools.py": ["/patterns_final.py", "/tools.py", "/evaluation_tools.py", "/plotting_tools.py", "/trading_strategies.py"], "/evaluation_tools.py": ["/trading_strategies.py", "/patterns_final.py", "/tools.py", "/definitions/negative_fit.py", "/definitions/negative_mav3.py", "/definitions/negative_mav4.py"], "/trading_strategies/limit_exit9.py": ["/trading_strategies.py", "/patterns_final.py", "/tools.py", "/evaluation_tools.py", "/plotting_tools.py"], "/trading_strategies/limit_exit8.py": ["/trading_strategies.py", "/patterns_final.py", "/tools.py", "/evaluation_tools.py", "/plotting_tools.py"], "/evaluate_findings.py": ["/trading_strategies.py", "/patterns_final.py", "/tools.py", "/definitions/negative_fit.py", "/definitions/negative_mav3.py", "/definitions/negative_mav4.py"], "/trading_strategies/limit_exit7.py": ["/trading_strategies.py", "/patterns_final.py", "/tools.py", "/evaluation_tools.py", "/plotting_tools.py"], "/trading_strategies/limit_exit1.py": ["/trading_strategies.py", "/patterns_final.py", "/tools.py", "/evaluation_tools.py", "/plotting_tools.py"], "/trading_strategies/exit_after.py": ["/trading_strategies.py", "/patterns_final.py", "/tools.py", "/evaluation_tools.py", "/plotting_tools.py"]}
|
39,486,896
|
conskourris/fyp
|
refs/heads/master
|
/tools.py
|
import os
import datetime as dt
import matplotlib.pyplot as plt
from matplotlib import style
import matplotlib
import numpy as np
from math import floor
from scipy.stats import skew
import mplfinance as mpf
import pandas as pd
import pandas_datareader as web
import random
import pickle
import json
import statistics
def run_snp(pattern, trading, days_forward):
result = {}
with open(f'sp100tickers.pickle', 'rb') as f:
tickers = pickle.load(f)
for ticker in tickers:
try:
df = pd.read_csv(f'historical/{ticker}.csv')
except:
print(f'problem oppening {ticker}')
continue
indexes, pattern_length = pattern(df)
returns = trading(df, indexes, pattern_length, days_forward)
result[f'{ticker}'] = returns
return result
def run_ticker(ticker, pattern, strategy, days_forward):
result = {}
with open(f'sp100tickers.pickle', 'rb') as f:
tickers = pickle.load(f)
df = pd.read_csv(f'historical/{ticker}.csv')
indexes, pattern_length = pattern(df)
returns = strategy(df, indexes, pattern_length, days_forward)
result[f'{ticker}'] = returns
return result
def save_pattern_indexes(pattern) :
with open(f'sp100tickers.pickle', 'rb') as f:
tickers = pickle.load(f)
for ticker in tickers :
try :
df = pd.read_csv(f'historical/{ticker}.csv')
except :
print(f'problem oppening {ticker}')
continue
indexes, _ = pattern(df)
pattern_str = pattern.__name__
np.save(f'patterns_indexes/{pattern_str}/{ticker}.npy', indexes)
print(f'Saved {ticker} of {pattern_str}')
def save_pattern_final_indexes(pattern) :
with open(f'sp100tickers.pickle', 'rb') as f:
tickers = pickle.load(f)
for ticker in tickers :
try :
df = pd.read_csv(f'historical/{ticker}.csv')
except :
print(f'problem oppening {ticker}')
continue
indexes, _ = pattern(df)
pattern_str = pattern.__name__
np.save(f'patterns_final_indexes/{pattern_str}/{ticker}.npy', indexes)
print(f'Saved {ticker} of {pattern_str}')
def run_strategy_on_pattern(strategy, pattern, days_forward=0) :
result = {}
with open(f'sp100tickers.pickle', 'rb') as f:
tickers = pickle.load(f)
for ticker in tickers :
try :
df = pd.read_csv(f'historical/{ticker}.csv')
except :
print(f'problem oppening {ticker}')
continue
_, pattern_length = pattern(None, get_length=True)
pattern_str = pattern.__name__
indexes = np.load(f'patterns_indexes/{pattern_str}/{ticker}.npy')
returns = strategy(df, indexes, pattern_length, days_forward)
result[f'{ticker}'] = returns
return result
def get_pattern_indexes(pattern) :
indexes = {}
pattern_str = pattern.__name__
with open(f'sp100tickers.pickle', 'rb') as f:
tickers = pickle.load(f)
for ticker in tickers :
try :
indexes[ticker] = np.load(f'patterns_indexes/{pattern_str}/{ticker}.npy')
except FileNotFoundError:
continue
return indexes
def get_pattern_final_indexes(pattern) :
indexes = {}
pattern_str = pattern.__name__
with open(f'sp100tickers.pickle', 'rb') as f:
tickers = pickle.load(f)
for ticker in tickers :
try :
indexes[ticker] = np.load(f'patterns_final_indexes/{pattern_str}/{ticker}.npy')
except FileNotFoundError:
continue
return indexes
def get_pattern_occurances(pattern) :
indexes = get_pattern_indexes(pattern)
occs = 0
for i in indexes :
occs += len(indexes[i])
return occs
def get_pattern_final_occurances(pattern) :
indexes = get_pattern_final_indexes(pattern)
occs = 0
for i in indexes :
occs += len(indexes[i])
return occs
def filter_pattern_trend(indexes, trend, delay=0) :
with open(f'sp100tickers.pickle', 'rb') as f:
tickers = pickle.load(f)
for ticker in tickers :
try :
df = pd.read_csv(f'historical/{ticker}.csv')
except :
print(f'problem oppening {ticker}')
continue
new_indexes = []
for i in indexes[ticker] :
if trend(df, i+delay) is True :
new_indexes.append(i)
indexes[ticker] = new_indexes
return indexes
def average_close_high() :
with open(f'sp100tickers.pickle', 'rb') as f:
tickers = pickle.load(f)
ratios = []
for ticker in tickers :
try :
df = pd.read_csv(f'historical/{ticker}.csv')
except :
print(f'problem oppening {ticker}')
continue
for i in range(df.shape[0]) :
close = df['Close'].iloc[i]
high = df['High'].iloc[i]
ratio = (abs(high-close) / close)
ratios.append(ratio)
average = statistics.mean(ratios)
return average
def average_close_low() :
with open(f'sp100tickers.pickle', 'rb') as f:
tickers = pickle.load(f)
ratios = []
for ticker in tickers :
try :
df = pd.read_csv(f'historical/{ticker}.csv')
except :
print(f'problem oppening {ticker}')
continue
for i in range(df.shape[0]) :
close = df['Close'].iloc[i]
low = df['Low'].iloc[i]
ratio = (abs(low-close) / close)
ratios.append(ratio)
average = statistics.mean(ratios)
return average
|
{"/main.py": ["/patterns_final.py", "/tools.py", "/evaluate_findings.py", "/evaluation_tools.py", "/plotting_tools.py", "/trading_tools.py", "/definitions/positive_fit.py", "/definitions/positive_mav4.py", "/definitions/positive_mav5.py", "/definitions/negative_fit.py", "/definitions/negative_mav3.py", "/definitions/negative_mav4.py", "/definitions/tall_candle.py", "/definitions/doji.py", "/definitions/close_near_high.py", "/definitions/close_near_low.py", "/trading_strategies/best_strategies.py", "/trading_strategies/exit_after.py", "/trading_strategies/limit_exit0.py", "/trading_strategies/limit_exit1.py", "/trading_strategies/limit_exit2.py", "/trading_strategies/limit_exit3.py", "/trading_strategies/limit_exit5.py", "/trading_strategies/limit_exit6.py", "/trading_strategies/limit_exit7.py", "/trading_strategies/limit_exit8.py", "/trading_strategies/limit_exit9.py", "/trading_strategies.py"], "/trading_strategies/limit_exit0.py": ["/trading_strategies.py", "/patterns_final.py", "/tools.py", "/evaluation_tools.py", "/plotting_tools.py"], "/trading_strategies/limit_exit5.py": ["/patterns_final.py", "/tools.py", "/evaluation_tools.py", "/plotting_tools.py"], "/catchup.py": ["/trading_strategies.py", "/patterns.py", "/tools.py", "/evaluation_tools.py", "/definitions/negative_fit.py", "/definitions/negative_mav3.py", "/definitions/negative_mav4.py"], "/trading_strategies/limit_exit3.py": ["/trading_strategies.py", "/patterns_final.py", "/tools.py", "/evaluation_tools.py", "/plotting_tools.py"], "/patterns_final.py": ["/definitions/positive_fit.py", "/definitions/doji.py", "/definitions/close_near_high.py", "/definitions/close_near_low.py"], "/tools.py": ["/patterns_final.py"], "/ml.py": ["/patterns_final.py", "/tools.py", "/evaluation_tools.py", "/plotting_tools.py", "/trading_tools.py", "/ml_tools.py", "/definitions/positive_fit.py", "/definitions/positive_mav4.py", "/definitions/positive_mav5.py", "/definitions/negative_fit.py", "/definitions/negative_mav3.py", "/definitions/negative_mav4.py", "/definitions/tall_candle.py", "/definitions/doji.py", "/definitions/close_near_high.py", "/definitions/close_near_low.py", "/trading_strategies/exit_after.py", "/trading_strategies/limit_exit0.py", "/trading_strategies/limit_exit1.py", "/trading_strategies/limit_exit2.py", "/trading_strategies/limit_exit3.py", "/trading_strategies/limit_exit5.py", "/trading_strategies/limit_exit6.py", "/trading_strategies/limit_exit7.py", "/trading_strategies/limit_exit8.py", "/trading_strategies/limit_exit9.py", "/trading_strategies/best_strategies.py"], "/ml_tools.py": ["/patterns_final.py", "/tools.py", "/evaluation_tools.py", "/plotting_tools.py", "/trading_tools.py", "/trading_strategies/best_strategies.py"], "/trading_strategies/best_strategies.py": ["/patterns_final.py", "/tools.py", "/evaluation_tools.py", "/plotting_tools.py"], "/trading_strategies/limit_exit6.py": ["/trading_strategies.py", "/patterns_final.py", "/tools.py", "/evaluation_tools.py", "/plotting_tools.py"], "/trading_strategies/limit_exit2.py": ["/patterns_final.py", "/tools.py", "/evaluation_tools.py", "/plotting_tools.py"], "/trading_tools.py": ["/patterns_final.py", "/tools.py", "/evaluation_tools.py", "/plotting_tools.py", "/trading_strategies.py"], "/evaluation_tools.py": ["/trading_strategies.py", "/patterns_final.py", "/tools.py", "/definitions/negative_fit.py", "/definitions/negative_mav3.py", "/definitions/negative_mav4.py"], "/trading_strategies/limit_exit9.py": ["/trading_strategies.py", "/patterns_final.py", "/tools.py", "/evaluation_tools.py", "/plotting_tools.py"], "/trading_strategies/limit_exit8.py": ["/trading_strategies.py", "/patterns_final.py", "/tools.py", "/evaluation_tools.py", "/plotting_tools.py"], "/evaluate_findings.py": ["/trading_strategies.py", "/patterns_final.py", "/tools.py", "/definitions/negative_fit.py", "/definitions/negative_mav3.py", "/definitions/negative_mav4.py"], "/trading_strategies/limit_exit7.py": ["/trading_strategies.py", "/patterns_final.py", "/tools.py", "/evaluation_tools.py", "/plotting_tools.py"], "/trading_strategies/limit_exit1.py": ["/trading_strategies.py", "/patterns_final.py", "/tools.py", "/evaluation_tools.py", "/plotting_tools.py"], "/trading_strategies/exit_after.py": ["/trading_strategies.py", "/patterns_final.py", "/tools.py", "/evaluation_tools.py", "/plotting_tools.py"]}
|
39,486,897
|
conskourris/fyp
|
refs/heads/master
|
/trading_strategies.py
|
from trading_strategies import *
from patterns_final import *
from tools import *
from evaluation_tools import *
from plotting_tools import *
def sell_after_1(pattern) :
indexes = get_pattern_indexes(pattern)
for ticker in indexes :
for index in indexes[ticker] :
|
{"/main.py": ["/patterns_final.py", "/tools.py", "/evaluate_findings.py", "/evaluation_tools.py", "/plotting_tools.py", "/trading_tools.py", "/definitions/positive_fit.py", "/definitions/positive_mav4.py", "/definitions/positive_mav5.py", "/definitions/negative_fit.py", "/definitions/negative_mav3.py", "/definitions/negative_mav4.py", "/definitions/tall_candle.py", "/definitions/doji.py", "/definitions/close_near_high.py", "/definitions/close_near_low.py", "/trading_strategies/best_strategies.py", "/trading_strategies/exit_after.py", "/trading_strategies/limit_exit0.py", "/trading_strategies/limit_exit1.py", "/trading_strategies/limit_exit2.py", "/trading_strategies/limit_exit3.py", "/trading_strategies/limit_exit5.py", "/trading_strategies/limit_exit6.py", "/trading_strategies/limit_exit7.py", "/trading_strategies/limit_exit8.py", "/trading_strategies/limit_exit9.py", "/trading_strategies.py"], "/trading_strategies/limit_exit0.py": ["/trading_strategies.py", "/patterns_final.py", "/tools.py", "/evaluation_tools.py", "/plotting_tools.py"], "/trading_strategies/limit_exit5.py": ["/patterns_final.py", "/tools.py", "/evaluation_tools.py", "/plotting_tools.py"], "/catchup.py": ["/trading_strategies.py", "/patterns.py", "/tools.py", "/evaluation_tools.py", "/definitions/negative_fit.py", "/definitions/negative_mav3.py", "/definitions/negative_mav4.py"], "/trading_strategies/limit_exit3.py": ["/trading_strategies.py", "/patterns_final.py", "/tools.py", "/evaluation_tools.py", "/plotting_tools.py"], "/patterns_final.py": ["/definitions/positive_fit.py", "/definitions/doji.py", "/definitions/close_near_high.py", "/definitions/close_near_low.py"], "/tools.py": ["/patterns_final.py"], "/ml.py": ["/patterns_final.py", "/tools.py", "/evaluation_tools.py", "/plotting_tools.py", "/trading_tools.py", "/ml_tools.py", "/definitions/positive_fit.py", "/definitions/positive_mav4.py", "/definitions/positive_mav5.py", "/definitions/negative_fit.py", "/definitions/negative_mav3.py", "/definitions/negative_mav4.py", "/definitions/tall_candle.py", "/definitions/doji.py", "/definitions/close_near_high.py", "/definitions/close_near_low.py", "/trading_strategies/exit_after.py", "/trading_strategies/limit_exit0.py", "/trading_strategies/limit_exit1.py", "/trading_strategies/limit_exit2.py", "/trading_strategies/limit_exit3.py", "/trading_strategies/limit_exit5.py", "/trading_strategies/limit_exit6.py", "/trading_strategies/limit_exit7.py", "/trading_strategies/limit_exit8.py", "/trading_strategies/limit_exit9.py", "/trading_strategies/best_strategies.py"], "/ml_tools.py": ["/patterns_final.py", "/tools.py", "/evaluation_tools.py", "/plotting_tools.py", "/trading_tools.py", "/trading_strategies/best_strategies.py"], "/trading_strategies/best_strategies.py": ["/patterns_final.py", "/tools.py", "/evaluation_tools.py", "/plotting_tools.py"], "/trading_strategies/limit_exit6.py": ["/trading_strategies.py", "/patterns_final.py", "/tools.py", "/evaluation_tools.py", "/plotting_tools.py"], "/trading_strategies/limit_exit2.py": ["/patterns_final.py", "/tools.py", "/evaluation_tools.py", "/plotting_tools.py"], "/trading_tools.py": ["/patterns_final.py", "/tools.py", "/evaluation_tools.py", "/plotting_tools.py", "/trading_strategies.py"], "/evaluation_tools.py": ["/trading_strategies.py", "/patterns_final.py", "/tools.py", "/definitions/negative_fit.py", "/definitions/negative_mav3.py", "/definitions/negative_mav4.py"], "/trading_strategies/limit_exit9.py": ["/trading_strategies.py", "/patterns_final.py", "/tools.py", "/evaluation_tools.py", "/plotting_tools.py"], "/trading_strategies/limit_exit8.py": ["/trading_strategies.py", "/patterns_final.py", "/tools.py", "/evaluation_tools.py", "/plotting_tools.py"], "/evaluate_findings.py": ["/trading_strategies.py", "/patterns_final.py", "/tools.py", "/definitions/negative_fit.py", "/definitions/negative_mav3.py", "/definitions/negative_mav4.py"], "/trading_strategies/limit_exit7.py": ["/trading_strategies.py", "/patterns_final.py", "/tools.py", "/evaluation_tools.py", "/plotting_tools.py"], "/trading_strategies/limit_exit1.py": ["/trading_strategies.py", "/patterns_final.py", "/tools.py", "/evaluation_tools.py", "/plotting_tools.py"], "/trading_strategies/exit_after.py": ["/trading_strategies.py", "/patterns_final.py", "/tools.py", "/evaluation_tools.py", "/plotting_tools.py"]}
|
39,486,898
|
conskourris/fyp
|
refs/heads/master
|
/main.py
|
import os
import datetime as dt
import matplotlib.pyplot as plt
from matplotlib import style
import matplotlib
import numpy as np
from math import floor
from scipy.stats import skew
import mplfinance as mpf
import pandas as pd
import pandas_datareader as web
import random
import pickle
import json
from trading_strategies import *
from patterns_final import *
from tools import *
from evaluation_tools import *
from plotting_tools import *
from definitions.positive_fit import *
from definitions.positive_mav3 import *
from definitions.positive_mav4 import *
from definitions.positive_mav5 import *
from definitions.negative_fit import *
from definitions.negative_mav3 import *
from definitions.negative_mav4 import *
from definitions.negative_mav5 import *
from definitions.tall_candle import *
from definitions.doji import *
from definitions.close_near_high import *
from definitions.close_near_low import *
# generate and plot a pattern profile using specific definitions
# rets, stds, occs = method_on_pattern(positive_trends, three_line_strike_bullish, True, 1)
# np.savez('definition_results/three_line_strike_bullish.npz', rets, stds, occs)
# # # plot saved return - risk plots of pattern profiles
plot_result_data('three_line_strike_bullish', positive_trends, use_occ=False, best_fit=False, f=1)
plot_result_data('three_line_strike_bullish', positive_trends, use_occ=True, best_fit=False, f=2)
plt.show()
|
{"/main.py": ["/patterns_final.py", "/tools.py", "/evaluate_findings.py", "/evaluation_tools.py", "/plotting_tools.py", "/trading_tools.py", "/definitions/positive_fit.py", "/definitions/positive_mav4.py", "/definitions/positive_mav5.py", "/definitions/negative_fit.py", "/definitions/negative_mav3.py", "/definitions/negative_mav4.py", "/definitions/tall_candle.py", "/definitions/doji.py", "/definitions/close_near_high.py", "/definitions/close_near_low.py", "/trading_strategies/best_strategies.py", "/trading_strategies/exit_after.py", "/trading_strategies/limit_exit0.py", "/trading_strategies/limit_exit1.py", "/trading_strategies/limit_exit2.py", "/trading_strategies/limit_exit3.py", "/trading_strategies/limit_exit5.py", "/trading_strategies/limit_exit6.py", "/trading_strategies/limit_exit7.py", "/trading_strategies/limit_exit8.py", "/trading_strategies/limit_exit9.py", "/trading_strategies.py"], "/trading_strategies/limit_exit0.py": ["/trading_strategies.py", "/patterns_final.py", "/tools.py", "/evaluation_tools.py", "/plotting_tools.py"], "/trading_strategies/limit_exit5.py": ["/patterns_final.py", "/tools.py", "/evaluation_tools.py", "/plotting_tools.py"], "/catchup.py": ["/trading_strategies.py", "/patterns.py", "/tools.py", "/evaluation_tools.py", "/definitions/negative_fit.py", "/definitions/negative_mav3.py", "/definitions/negative_mav4.py"], "/trading_strategies/limit_exit3.py": ["/trading_strategies.py", "/patterns_final.py", "/tools.py", "/evaluation_tools.py", "/plotting_tools.py"], "/patterns_final.py": ["/definitions/positive_fit.py", "/definitions/doji.py", "/definitions/close_near_high.py", "/definitions/close_near_low.py"], "/tools.py": ["/patterns_final.py"], "/ml.py": ["/patterns_final.py", "/tools.py", "/evaluation_tools.py", "/plotting_tools.py", "/trading_tools.py", "/ml_tools.py", "/definitions/positive_fit.py", "/definitions/positive_mav4.py", "/definitions/positive_mav5.py", "/definitions/negative_fit.py", "/definitions/negative_mav3.py", "/definitions/negative_mav4.py", "/definitions/tall_candle.py", "/definitions/doji.py", "/definitions/close_near_high.py", "/definitions/close_near_low.py", "/trading_strategies/exit_after.py", "/trading_strategies/limit_exit0.py", "/trading_strategies/limit_exit1.py", "/trading_strategies/limit_exit2.py", "/trading_strategies/limit_exit3.py", "/trading_strategies/limit_exit5.py", "/trading_strategies/limit_exit6.py", "/trading_strategies/limit_exit7.py", "/trading_strategies/limit_exit8.py", "/trading_strategies/limit_exit9.py", "/trading_strategies/best_strategies.py"], "/ml_tools.py": ["/patterns_final.py", "/tools.py", "/evaluation_tools.py", "/plotting_tools.py", "/trading_tools.py", "/trading_strategies/best_strategies.py"], "/trading_strategies/best_strategies.py": ["/patterns_final.py", "/tools.py", "/evaluation_tools.py", "/plotting_tools.py"], "/trading_strategies/limit_exit6.py": ["/trading_strategies.py", "/patterns_final.py", "/tools.py", "/evaluation_tools.py", "/plotting_tools.py"], "/trading_strategies/limit_exit2.py": ["/patterns_final.py", "/tools.py", "/evaluation_tools.py", "/plotting_tools.py"], "/trading_tools.py": ["/patterns_final.py", "/tools.py", "/evaluation_tools.py", "/plotting_tools.py", "/trading_strategies.py"], "/evaluation_tools.py": ["/trading_strategies.py", "/patterns_final.py", "/tools.py", "/definitions/negative_fit.py", "/definitions/negative_mav3.py", "/definitions/negative_mav4.py"], "/trading_strategies/limit_exit9.py": ["/trading_strategies.py", "/patterns_final.py", "/tools.py", "/evaluation_tools.py", "/plotting_tools.py"], "/trading_strategies/limit_exit8.py": ["/trading_strategies.py", "/patterns_final.py", "/tools.py", "/evaluation_tools.py", "/plotting_tools.py"], "/evaluate_findings.py": ["/trading_strategies.py", "/patterns_final.py", "/tools.py", "/definitions/negative_fit.py", "/definitions/negative_mav3.py", "/definitions/negative_mav4.py"], "/trading_strategies/limit_exit7.py": ["/trading_strategies.py", "/patterns_final.py", "/tools.py", "/evaluation_tools.py", "/plotting_tools.py"], "/trading_strategies/limit_exit1.py": ["/trading_strategies.py", "/patterns_final.py", "/tools.py", "/evaluation_tools.py", "/plotting_tools.py"], "/trading_strategies/exit_after.py": ["/trading_strategies.py", "/patterns_final.py", "/tools.py", "/evaluation_tools.py", "/plotting_tools.py"]}
|
39,495,702
|
xdiabetes/xproject_api
|
refs/heads/master
|
/xapp/api_v1/consumers/DispatchConsumer.py
|
import asyncio
from django.http.response import Http404
from rest_framework.exceptions import APIException
from xapp.api_v1.consumers.BasicConsumer import BasicConsumer
from xapp.api_v1.consumers.helpers import SocketException, ConsumerException
from .ConsumerView import Path, ConsumerRequest, WatcherList, ConsumerResponse
from .paths import paths
# load all consumer paths
# noinspection PyStatementEffect
from ..helpers import APIBadRequest
paths
class DispatchConsumer(BasicConsumer):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.watchers = []
@staticmethod
async def execute_watcher(gp_name):
# notify the watchers
gp_name = Path.flatten(gp_name)
members = WatcherList.members(gp_name).copy()
for member in members:
# create a result generator to be used in a dispatcher consumer
# to generate consistent responses
async def _result_generator(request, **kwargs):
return await member.execute()
await DispatchConsumer.send_result(dispatcher=member.request.dispatcher, source=gp_name,
request=member.request,
result_generator=_result_generator)
@staticmethod
async def notify_watcher_endpoint(endpoint_path):
"""flatten Endpoint path is it's watcher gp name"""
gp_name = Path.flatten(endpoint_path)
await DispatchConsumer.execute_watcher(gp_name)
@staticmethod
async def send_result(dispatcher, source, result_generator, request, **kwargs):
"""calls a result generator and catches api bad request exceptions and sends
appropriate response
this function is used to create consistent response objects when results
are generated in different places
"""
try:
result = await result_generator(request, **kwargs)
await dispatcher.view_response(result, source=source)
except APIBadRequest as e:
await dispatcher.view_response(ConsumerResponse({}, e.status_code),
source=source)
async def receive(self, text_data=None, bytes_data=None):
try:
raw_request = await self.get_request(text_data)
view, kwargs = Path.match(raw_request.get('endpoint'))
authorization = raw_request.get('headers').get('Authorization')
# get the user
if not authorization:
user = None
else:
user = await self.authenticate(authorization)
# create ConsumerRequest object
request = ConsumerRequest(data=raw_request.get('data'), method=raw_request.get('method'),
user=user, scope=self.scope, channel_name=self.channel_name,
channel_layer=self.channel_layer, dispatcher=self,
request_endpoint=raw_request.get('endpoint'))
method_function, affected_consumer_apis, view_obj = await view(request, **kwargs)
# report if APIBadRequest happens
# result = await method_function(request, **kwargs)
await self.send_result(dispatcher=self,
source=view_obj.get_watch_gp_name(),
request=request,
result_generator=method_function,
**kwargs)
# notify the watchers
for affected_consumer_api in affected_consumer_apis:
await self.notify_watcher_endpoint(affected_consumer_api)
except (ConsumerException, Http404) as e:
await self.send_back_response(str(e), 400)
except APIException as e:
await self.send_back_response(str(e), e.status_code)
except SocketException:
"""
Socket exceptions are meat to stop the code
proper error handling must be done before throwing this exception
"""
pass
async def connect(self):
# accept any incoming connections
self.groups = []
self.watchers = []
await self.accept()
async def disconnect(self, code):
for watcher in self.watchers:
WatcherList.remove(watcher)
|
{"/user_profile/models.py": ["/location/models.py", "/user_profile/helpers.py"], "/diabo/tests.py": ["/diabo/models.py", "/job/models.py", "/user_profile/serializers.py", "/user_profile/tests.py"], "/job/tests.py": ["/job/models.py", "/user_profile/tests.py"], "/diabo/views.py": ["/diabo/models.py", "/job/models.py", "/diabo/serializers.py", "/job/serializers.py", "/user_profile/permissions.py"], "/diabo/urls.py": ["/diabo/views.py"], "/walking_tracker/admin.py": ["/walking_tracker/models.py"], "/location/serializers.py": ["/location/models.py"], "/diabo/serializers.py": ["/diabo/models.py", "/job/serializers.py", "/user_profile/serializers.py"], "/user_profile/migrations/0001_initial.py": ["/user_profile/helpers.py"], "/user_profile/admin.py": ["/user_profile/models.py"], "/location/admin.py": ["/location/models.py"], "/diabo/models.py": ["/job/models.py", "/user_profile/models.py"], "/diabetes_therapy/helpers.py": ["/diabetes_therapy/models.py", "/diabetes_therapy/serializes.py"], "/user_profile/tests.py": ["/location/models.py", "/user_profile/models.py"], "/diabetes_therapy/tests.py": ["/diabetes_therapy/models.py", "/user_profile/models.py", "/user_profile/tests.py"], "/user_profile/urls.py": ["/user_profile/views.py"], "/user_profile/views.py": ["/user_profile/models.py", "/user_profile/permissions.py", "/user_profile/serializers.py"], "/job/admin.py": ["/job/models.py"], "/diabetes_therapy/serializes.py": ["/diabetes_therapy/models.py"], "/diabetes_therapy/urls.py": ["/diabetes_therapy/views.py"], "/diabetes_therapy/views.py": ["/diabetes_therapy/helpers.py", "/diabetes_therapy/models.py", "/diabetes_therapy/serializes.py", "/user_profile/permissions.py"], "/user_profile/serializers.py": ["/location/serializers.py", "/user_profile/helpers.py", "/user_profile/models.py"], "/walking_tracker/models.py": ["/user_profile/models.py"], "/job/serializers.py": ["/job/models.py"], "/walking_tracker/serializers.py": ["/walking_tracker/models.py"], "/walking_tracker/views.py": ["/walking_tracker/models.py", "/walking_tracker/serializers.py"], "/location/views.py": ["/location/models.py", "/location/serializers.py"], "/diabetes_therapy/admin.py": ["/diabetes_therapy/models.py"], "/location/urls.py": ["/location/views.py"], "/location/tests.py": ["/location/models.py", "/location/serializers.py", "/user_profile/models.py"], "/walking_tracker/urls.py": ["/walking_tracker/views.py"], "/job/urls.py": ["/diabo/views.py"], "/xapp/api_v1/consumers/DispatchConsumer.py": ["/xapp/api_v1/consumers/BasicConsumer.py", "/xapp/api_v1/consumers/helpers.py", "/xapp/api_v1/consumers/ConsumerView.py", "/xapp/api_v1/consumers/paths.py", "/xapp/api_v1/helpers.py"], "/xapp/api_v1/consumers/ConsumerView.py": ["/xapp/api_v1/consumers/helpers.py"], "/xapp/api_v1/modules/payment/common.py": ["/xapp/api_v1/consts.py"], "/xapp/api_v1/views/UserProfileViews.py": ["/xapp/api_v1/consts.py", "/xapp/api_v1/permissions.py", "/xapp/api_v1/serializers/UserProfileSerializrs.py", "/xapp/models.py"], "/xapp/api_v1/serializers/UserProfileSerializrs.py": ["/xapp/api_v1/consts.py", "/xapp/models.py"], "/xapp/api_v1/modules/payment/vandar.py": ["/xapp/api_v1/consts.py", "/xapp/api_v1/modules/payment/common.py", "/xapp/models.py", "/xapp/api_v1/modules/payment/index.py"], "/xapp/api_v1/urls.py": ["/xapp/api_v1/views/UserProfileViews.py"], "/xapp/api_v1/modules/payment/pep.py": ["/xapp/api_v1/consts.py", "/xapp/api_v1/helpers.py", "/xapp/api_v1/modules/payment/common.py", "/xapp/api_v1/modules/payment/index.py"], "/xapp/api_v1/consumers/paths.py": ["/xapp/api_v1/consumers/ConsumerView.py", "/xapp/api_v1/consumers/consumer_views.py"], "/xapp/api_v1/consumers/consumer_views.py": ["/xapp/api_v1/consumers/ConsumerView.py", "/xapp/api_v1/consumers/helpers.py", "/xapp/api_v1/helpers.py", "/xapp/api_v1/permissions.py", "/xapp/models.py"], "/xapp/migrations/0001_initial.py": ["/xapp/api_v1/helpers.py"], "/xapp/api_v1/modules/payment/index.py": ["/xapp/api_v1/modules/payment/pep.py", "/xapp/api_v1/modules/payment/vandar.py"], "/xapp/models.py": ["/xapp/api_v1/consts.py", "/xapp/api_v1/helpers.py"], "/xapp/api_v1/helpers.py": ["/xapp/api_v1/consts.py", "/xapp/models.py", "/xapp/api_v1/consumers/DispatchConsumer.py"], "/xapp/api_v1/permissions.py": ["/xapp/models.py", "/xapp/api_v1/consts.py"], "/xapp/api_v1/consumers/BasicConsumer.py": ["/xapp/api_v1/consumers/ConsumerView.py", "/xapp/api_v1/consumers/helpers.py"]}
|
39,495,703
|
xdiabetes/xproject_api
|
refs/heads/master
|
/sms/migrations/0007_auto_20191010_1300.py
|
# Generated by Django 2.2.4 on 2019-10-10 13:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sms', '0006_auto_20190623_1410'),
]
operations = [
migrations.AlterModelOptions(
name='message',
options={'verbose_name': 'Message', 'verbose_name_plural': 'Messages'},
),
migrations.AlterField(
model_name='message',
name='message',
field=models.CharField(help_text='Message Text', max_length=500, verbose_name='Message Text'),
),
migrations.AlterField(
model_name='message',
name='to',
field=models.CharField(help_text='Recipient of the message', max_length=15, verbose_name='Recipient of the message'),
),
migrations.AlterField(
model_name='operator',
name='api_endpoint',
field=models.CharField(help_text='API Endpoint', max_length=500, verbose_name='API Endpoint'),
),
migrations.AlterField(
model_name='operator',
name='name',
field=models.CharField(help_text='A name for the operator', max_length=255, verbose_name='Operator Name'),
),
migrations.AlterField(
model_name='operator',
name='password',
field=models.CharField(help_text='Password given by operator', max_length=255, verbose_name='Password'),
),
migrations.AlterField(
model_name='operator',
name='retry_gap_time',
field=models.IntegerField(help_text='Time in minutes before you can try to send a message again', verbose_name='Retry Gap Time'),
),
migrations.AlterField(
model_name='operator',
name='sender',
field=models.CharField(help_text='The operator phone number', max_length=15, verbose_name='Sender Phone Number'),
),
migrations.AlterField(
model_name='operator',
name='username',
field=models.CharField(help_text='User name given by operator', max_length=255, verbose_name='Username'),
),
]
|
{"/user_profile/models.py": ["/location/models.py", "/user_profile/helpers.py"], "/diabo/tests.py": ["/diabo/models.py", "/job/models.py", "/user_profile/serializers.py", "/user_profile/tests.py"], "/job/tests.py": ["/job/models.py", "/user_profile/tests.py"], "/diabo/views.py": ["/diabo/models.py", "/job/models.py", "/diabo/serializers.py", "/job/serializers.py", "/user_profile/permissions.py"], "/diabo/urls.py": ["/diabo/views.py"], "/walking_tracker/admin.py": ["/walking_tracker/models.py"], "/location/serializers.py": ["/location/models.py"], "/diabo/serializers.py": ["/diabo/models.py", "/job/serializers.py", "/user_profile/serializers.py"], "/user_profile/migrations/0001_initial.py": ["/user_profile/helpers.py"], "/user_profile/admin.py": ["/user_profile/models.py"], "/location/admin.py": ["/location/models.py"], "/diabo/models.py": ["/job/models.py", "/user_profile/models.py"], "/diabetes_therapy/helpers.py": ["/diabetes_therapy/models.py", "/diabetes_therapy/serializes.py"], "/user_profile/tests.py": ["/location/models.py", "/user_profile/models.py"], "/diabetes_therapy/tests.py": ["/diabetes_therapy/models.py", "/user_profile/models.py", "/user_profile/tests.py"], "/user_profile/urls.py": ["/user_profile/views.py"], "/user_profile/views.py": ["/user_profile/models.py", "/user_profile/permissions.py", "/user_profile/serializers.py"], "/job/admin.py": ["/job/models.py"], "/diabetes_therapy/serializes.py": ["/diabetes_therapy/models.py"], "/diabetes_therapy/urls.py": ["/diabetes_therapy/views.py"], "/diabetes_therapy/views.py": ["/diabetes_therapy/helpers.py", "/diabetes_therapy/models.py", "/diabetes_therapy/serializes.py", "/user_profile/permissions.py"], "/user_profile/serializers.py": ["/location/serializers.py", "/user_profile/helpers.py", "/user_profile/models.py"], "/walking_tracker/models.py": ["/user_profile/models.py"], "/job/serializers.py": ["/job/models.py"], "/walking_tracker/serializers.py": ["/walking_tracker/models.py"], "/walking_tracker/views.py": ["/walking_tracker/models.py", "/walking_tracker/serializers.py"], "/location/views.py": ["/location/models.py", "/location/serializers.py"], "/diabetes_therapy/admin.py": ["/diabetes_therapy/models.py"], "/location/urls.py": ["/location/views.py"], "/location/tests.py": ["/location/models.py", "/location/serializers.py", "/user_profile/models.py"], "/walking_tracker/urls.py": ["/walking_tracker/views.py"], "/job/urls.py": ["/diabo/views.py"], "/xapp/api_v1/consumers/DispatchConsumer.py": ["/xapp/api_v1/consumers/BasicConsumer.py", "/xapp/api_v1/consumers/helpers.py", "/xapp/api_v1/consumers/ConsumerView.py", "/xapp/api_v1/consumers/paths.py", "/xapp/api_v1/helpers.py"], "/xapp/api_v1/consumers/ConsumerView.py": ["/xapp/api_v1/consumers/helpers.py"], "/xapp/api_v1/modules/payment/common.py": ["/xapp/api_v1/consts.py"], "/xapp/api_v1/views/UserProfileViews.py": ["/xapp/api_v1/consts.py", "/xapp/api_v1/permissions.py", "/xapp/api_v1/serializers/UserProfileSerializrs.py", "/xapp/models.py"], "/xapp/api_v1/serializers/UserProfileSerializrs.py": ["/xapp/api_v1/consts.py", "/xapp/models.py"], "/xapp/api_v1/modules/payment/vandar.py": ["/xapp/api_v1/consts.py", "/xapp/api_v1/modules/payment/common.py", "/xapp/models.py", "/xapp/api_v1/modules/payment/index.py"], "/xapp/api_v1/urls.py": ["/xapp/api_v1/views/UserProfileViews.py"], "/xapp/api_v1/modules/payment/pep.py": ["/xapp/api_v1/consts.py", "/xapp/api_v1/helpers.py", "/xapp/api_v1/modules/payment/common.py", "/xapp/api_v1/modules/payment/index.py"], "/xapp/api_v1/consumers/paths.py": ["/xapp/api_v1/consumers/ConsumerView.py", "/xapp/api_v1/consumers/consumer_views.py"], "/xapp/api_v1/consumers/consumer_views.py": ["/xapp/api_v1/consumers/ConsumerView.py", "/xapp/api_v1/consumers/helpers.py", "/xapp/api_v1/helpers.py", "/xapp/api_v1/permissions.py", "/xapp/models.py"], "/xapp/migrations/0001_initial.py": ["/xapp/api_v1/helpers.py"], "/xapp/api_v1/modules/payment/index.py": ["/xapp/api_v1/modules/payment/pep.py", "/xapp/api_v1/modules/payment/vandar.py"], "/xapp/models.py": ["/xapp/api_v1/consts.py", "/xapp/api_v1/helpers.py"], "/xapp/api_v1/helpers.py": ["/xapp/api_v1/consts.py", "/xapp/models.py", "/xapp/api_v1/consumers/DispatchConsumer.py"], "/xapp/api_v1/permissions.py": ["/xapp/models.py", "/xapp/api_v1/consts.py"], "/xapp/api_v1/consumers/BasicConsumer.py": ["/xapp/api_v1/consumers/ConsumerView.py", "/xapp/api_v1/consumers/helpers.py"]}
|
39,495,704
|
xdiabetes/xproject_api
|
refs/heads/master
|
/xapp/api_v1/consumers/ConsumerView.py
|
from abc import ABC
from django.utils.translation import gettext as _
from rest_framework import serializers, status
from rest_framework.permissions import BasePermission
from xapp.api_v1.consumers.helpers import ConsumerException, MODIFY_METHODS, WATCH, get_object_or_404
class NotImplementedConsumer(ConsumerException):
pass
class Path:
"""
A route
maps name to a view function
"""
paths_list = []
def __init__(self, pattern, view, name=None):
self.pattern = pattern
self.view = view
self.name = name
@staticmethod
def arg(term):
"""
Check if the given term is an argument
:param term:
:return: the argument if term is argument, false otherwise
"""
if len(term) < 3:
return False
if term[0] == "<" and term[-1] == ">":
return term.replace("<", "").replace(">", "")
def get_arg_names(self):
"""
:return: pattern argument names
"""
args = []
for term in self.pattern.split("/"):
the_arg = Path.arg(term)
if the_arg:
args += [the_arg]
return args
@staticmethod
def reverse(name, kwargs={}):
"""return path url by path name and kwargs"""
the_path = None
for consumer_path in Path.paths_list:
if consumer_path.name == name:
the_path = consumer_path
break
if not the_path:
raise Exception(_("Path name not fund"))
url = the_path.pattern
for arg in the_path.get_arg_names():
url = url.replace(arg, str(kwargs[arg]))
return url.replace('<', '').replace('>', '')
@staticmethod
def match(target):
"""
:param target:
:return: match the given target path with a consumer path
return kwargs and the corresponding view
"""
kwargs = {}
target_terms = target.split("/")
for consumer_path in Path.paths_list:
path_terms = consumer_path.pattern.split("/")
if len(path_terms) != len(target_terms):
continue
found = True
for i in range(len(path_terms)):
if Path.arg(path_terms[i]):
kwargs[Path.arg(path_terms[i])] = target_terms[i]
elif path_terms[i] != target_terms[i]:
kwargs = {}
found = False
break
if found:
return consumer_path.view, kwargs
raise ConsumerException(_("Endpoint not found: %s" % target))
@staticmethod
def flatten(absolute_path):
return absolute_path.replace("/", ".")
@staticmethod
def normalize(flatten_path):
return flatten_path.replace('.', "/")
def path(pattern, view, name=None):
"""
Create a consumer path
:param pattern:
:param view:
:param name:
:return: consumer path
"""
Path.paths_list += [Path(pattern, view, name)]
class ConsumerRequest:
"""
Request object to be passed to consumer views
"""
def __init__(self, data, method, user,
scope, channel_name, channel_layer,
request_endpoint, dispatcher):
self.data = data
self.method = method
self.user = user
self.scope = scope
self.channel_name = channel_name
self.channel_layer = channel_layer
self.request_endpoint = request_endpoint
self.dispatcher = dispatcher
def build_absolute_uri(self, location=None):
if self.scope.get('server'):
host = str(self.scope.get('server')[0]) + ':' + str(self.scope.get('server')[1])
host += "//"
return host + location
else:
host = self.scope.get('headers')[2][1].decode('utf-8') + '//'
return 'https://' + host + location
class ConsumerResponse:
"""
Consumer Response structure
"""
def __init__(self, data={}, status=200):
self.status = status
self.data = data
class BaseConsumerView(ABC):
"""
Abstract Consumer View
"""
queryset = None
serializer_class = None
permission_classes = None
lookup_field = 'pk'
lookup_url_kwarg = None
affected_method_paths = None
def __init__(self):
self.kwargs = {}
self.request = None
@classmethod
def as_view(cls):
return cls()._as_view
def _init_(self, request: ConsumerRequest, **kwargs):
"""
Check permissions, dispatch the proper view function based on method
:param request:
:param kwargs:
:return: view function
"""
self.kwargs = kwargs
self.request = request
# check permission
for permission in self.get_permissions():
if not permission.has_permission(request, self):
raise ConsumerException(_("Permission Denied"))
# get affected consumer views
paths = []
affected_paths = self.get_affected_method_paths()
if affected_paths:
for affected_path in affected_paths:
assert type(affected_path) == \
MethodPathEffect, "TypeError affect consumer classes" \
" must be an instance of MethodAffectConsumer"
if request.method in affected_path.methods:
paths += [affected_path.absolute_path]
# if request is watch, add the current channel to the endpoint group
if request.method == WATCH:
watcher = WatcherList(view=self, request=request, kwargs=kwargs)
WatcherList.add(watcher)
return self.dispatch(request.method), paths, self
async def _as_view(self, request: ConsumerRequest, bypass_permissions=False, **kwargs):
assert type(request) == ConsumerRequest, \
"request type must be ConsumerRequest not %s" % type(request)
# create a new view instance
_view = self.__class__()._init_(request, **kwargs)
return _view
def get_affected_method_paths(self):
"""
:return: Classes that the execution of the this view affects
"""
return self.affected_method_paths
def get_watch_gp_name(self):
return Path.flatten(self.request.request_endpoint)
def get_permissions(self):
"""
Instantiates and returns the list of permissions that this view requires.
"""
permissions = []
if not self.permission_classes:
return []
for permission_class in self.permission_classes:
assert issubclass(permission_class, BasePermission), \
"permission must be of type BasePermission not %s" % type(permission_class)
permissions += [permission_class()]
return permissions
def dispatch(self, method):
"""
:param method:
:return: proper view function based on method
"""
_dispatch = {
'get': self.get, 'post': self.post, 'watch': self.get,
'put': self.put, 'delete': self.delete
}
return _dispatch[method.lower()]
def get_queryset(self):
if not self.queryset:
raise NotImplementedConsumer(_("You should either set queryset or override get_queryset"))
return self.queryset
def get_serializer_class(self):
if not self.serializer_class:
raise NotImplementedConsumer(_("You should either set serializer_class or override get_serializer_class"))
return self.serializer_class
def get_serializer_context(self):
return {
'request': self.request,
'view': self
}
def get_serializer(self, *args, **kwargs):
serializer_class = self.get_serializer_class()
kwargs['context'] = self.get_serializer_context()
return serializer_class(*args, **kwargs)
async def get(self, request, *args, **kwargs):
raise NotImplementedConsumer(_("GET method not implemented"))
async def post(self, request, *args, **kwargs):
raise NotImplementedConsumer(_("POST method not implemented"))
async def put(self, request, *args, **kwargs):
raise NotImplementedConsumer(_("PUT method not implemented"))
async def delete(self, request, *args, **kwargs):
raise NotImplementedConsumer(_("DELETE method not implemented"))
class MethodPathEffect:
"""
Map of methods that have an effect on the api view
"""
def __init__(self, absolute_path, methods=MODIFY_METHODS):
self.methods = methods
self.absolute_path = absolute_path
class WatcherList:
watcher_list_gp_name_based = {}
watcher_list_channel_based = {}
def __init__(self, view, request: ConsumerRequest, kwargs):
self.view = view
self.request = request
self.kwargs = kwargs
@staticmethod
def add(watcher):
gp_name = watcher.view.get_watch_gp_name()
if gp_name not in WatcherList.watcher_list_gp_name_based.keys():
WatcherList.watcher_list_gp_name_based[gp_name] = set()
WatcherList.watcher_list_gp_name_based[gp_name].add(watcher)
watcher.request.dispatcher.groups += [gp_name, ]
watcher.request.dispatcher.watchers += [watcher, ]
@staticmethod
def remove(watcher):
gp_name = watcher.view.get_watch_gp_name()
if gp_name not in WatcherList.watcher_list_gp_name_based.keys():
return
WatcherList.watcher_list_gp_name_based[gp_name].remove(watcher)
@staticmethod
def remove_by_channel_name(channel_name):
# for gp in WatcherList.watcher_list_gp_name_based
pass
@staticmethod
def members(gp_name):
return WatcherList.watcher_list_gp_name_based.get(gp_name, set())
async def execute(self):
result = await self.view.get(self.request, self.kwargs)
return result
await self.request.dispatcher.view_response(result, source=self.view.get_watch_gp_name())
class ConsumerCreateAPIView(BaseConsumerView):
def create(self, request, *args, **kwargs):
try:
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
self.perform_create(serializer)
return ConsumerResponse(serializer.data, status=status.HTTP_201_CREATED)
except serializers.ValidationError as e:
raise ConsumerException(e)
def perform_create(self, serializer):
serializer.save()
async def post(self, request, *args, **kwargs):
return self.create(request, *args, **kwargs)
class ConsumerListAPIView(BaseConsumerView):
def list(self, request, *args, **kwargs):
queryset = self.get_queryset()
serializer = self.get_serializer(queryset, many=True)
return ConsumerResponse(serializer.data)
async def get(self, request, *args, **kwargs):
return self.list(request, *args, **kwargs)
class ConsumerRetrieveAPIView(BaseConsumerView):
def get_object(self):
"""
Returns the object the view is displaying.
You may want to override this if you need to provide non-standard
queryset lookups. Eg if objects are referenced using multiple
keyword arguments in the url conf.
"""
# Perform the lookup filtering.
lookup_url_kwarg = self.lookup_url_kwarg or self.lookup_field
queryset = self.get_queryset()
assert lookup_url_kwarg in self.kwargs, (
'Expected view %s to be called with a URL keyword argument '
'named "%s". Fix your URL conf, or set the `.lookup_field` '
'attribute on the view correctly.' %
(self.__class__.__name__, lookup_url_kwarg)
)
filter_kwargs = {self.lookup_field: self.kwargs[lookup_url_kwarg]}
obj = get_object_or_404(queryset, **filter_kwargs)
return obj
def retrieve(self, request, *args, **kwargs):
instance = self.get_object()
serializer = self.get_serializer(instance)
return ConsumerResponse(serializer.data)
async def get(self, request, *args, **kwargs):
return self.retrieve(request, *args, **kwargs)
|
{"/user_profile/models.py": ["/location/models.py", "/user_profile/helpers.py"], "/diabo/tests.py": ["/diabo/models.py", "/job/models.py", "/user_profile/serializers.py", "/user_profile/tests.py"], "/job/tests.py": ["/job/models.py", "/user_profile/tests.py"], "/diabo/views.py": ["/diabo/models.py", "/job/models.py", "/diabo/serializers.py", "/job/serializers.py", "/user_profile/permissions.py"], "/diabo/urls.py": ["/diabo/views.py"], "/walking_tracker/admin.py": ["/walking_tracker/models.py"], "/location/serializers.py": ["/location/models.py"], "/diabo/serializers.py": ["/diabo/models.py", "/job/serializers.py", "/user_profile/serializers.py"], "/user_profile/migrations/0001_initial.py": ["/user_profile/helpers.py"], "/user_profile/admin.py": ["/user_profile/models.py"], "/location/admin.py": ["/location/models.py"], "/diabo/models.py": ["/job/models.py", "/user_profile/models.py"], "/diabetes_therapy/helpers.py": ["/diabetes_therapy/models.py", "/diabetes_therapy/serializes.py"], "/user_profile/tests.py": ["/location/models.py", "/user_profile/models.py"], "/diabetes_therapy/tests.py": ["/diabetes_therapy/models.py", "/user_profile/models.py", "/user_profile/tests.py"], "/user_profile/urls.py": ["/user_profile/views.py"], "/user_profile/views.py": ["/user_profile/models.py", "/user_profile/permissions.py", "/user_profile/serializers.py"], "/job/admin.py": ["/job/models.py"], "/diabetes_therapy/serializes.py": ["/diabetes_therapy/models.py"], "/diabetes_therapy/urls.py": ["/diabetes_therapy/views.py"], "/diabetes_therapy/views.py": ["/diabetes_therapy/helpers.py", "/diabetes_therapy/models.py", "/diabetes_therapy/serializes.py", "/user_profile/permissions.py"], "/user_profile/serializers.py": ["/location/serializers.py", "/user_profile/helpers.py", "/user_profile/models.py"], "/walking_tracker/models.py": ["/user_profile/models.py"], "/job/serializers.py": ["/job/models.py"], "/walking_tracker/serializers.py": ["/walking_tracker/models.py"], "/walking_tracker/views.py": ["/walking_tracker/models.py", "/walking_tracker/serializers.py"], "/location/views.py": ["/location/models.py", "/location/serializers.py"], "/diabetes_therapy/admin.py": ["/diabetes_therapy/models.py"], "/location/urls.py": ["/location/views.py"], "/location/tests.py": ["/location/models.py", "/location/serializers.py", "/user_profile/models.py"], "/walking_tracker/urls.py": ["/walking_tracker/views.py"], "/job/urls.py": ["/diabo/views.py"], "/xapp/api_v1/consumers/DispatchConsumer.py": ["/xapp/api_v1/consumers/BasicConsumer.py", "/xapp/api_v1/consumers/helpers.py", "/xapp/api_v1/consumers/ConsumerView.py", "/xapp/api_v1/consumers/paths.py", "/xapp/api_v1/helpers.py"], "/xapp/api_v1/consumers/ConsumerView.py": ["/xapp/api_v1/consumers/helpers.py"], "/xapp/api_v1/modules/payment/common.py": ["/xapp/api_v1/consts.py"], "/xapp/api_v1/views/UserProfileViews.py": ["/xapp/api_v1/consts.py", "/xapp/api_v1/permissions.py", "/xapp/api_v1/serializers/UserProfileSerializrs.py", "/xapp/models.py"], "/xapp/api_v1/serializers/UserProfileSerializrs.py": ["/xapp/api_v1/consts.py", "/xapp/models.py"], "/xapp/api_v1/modules/payment/vandar.py": ["/xapp/api_v1/consts.py", "/xapp/api_v1/modules/payment/common.py", "/xapp/models.py", "/xapp/api_v1/modules/payment/index.py"], "/xapp/api_v1/urls.py": ["/xapp/api_v1/views/UserProfileViews.py"], "/xapp/api_v1/modules/payment/pep.py": ["/xapp/api_v1/consts.py", "/xapp/api_v1/helpers.py", "/xapp/api_v1/modules/payment/common.py", "/xapp/api_v1/modules/payment/index.py"], "/xapp/api_v1/consumers/paths.py": ["/xapp/api_v1/consumers/ConsumerView.py", "/xapp/api_v1/consumers/consumer_views.py"], "/xapp/api_v1/consumers/consumer_views.py": ["/xapp/api_v1/consumers/ConsumerView.py", "/xapp/api_v1/consumers/helpers.py", "/xapp/api_v1/helpers.py", "/xapp/api_v1/permissions.py", "/xapp/models.py"], "/xapp/migrations/0001_initial.py": ["/xapp/api_v1/helpers.py"], "/xapp/api_v1/modules/payment/index.py": ["/xapp/api_v1/modules/payment/pep.py", "/xapp/api_v1/modules/payment/vandar.py"], "/xapp/models.py": ["/xapp/api_v1/consts.py", "/xapp/api_v1/helpers.py"], "/xapp/api_v1/helpers.py": ["/xapp/api_v1/consts.py", "/xapp/models.py", "/xapp/api_v1/consumers/DispatchConsumer.py"], "/xapp/api_v1/permissions.py": ["/xapp/models.py", "/xapp/api_v1/consts.py"], "/xapp/api_v1/consumers/BasicConsumer.py": ["/xapp/api_v1/consumers/ConsumerView.py", "/xapp/api_v1/consumers/helpers.py"]}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.