index
int64 | repo_name
string | branch_name
string | path
string | content
string | import_graph
string |
|---|---|---|---|---|---|
24,978
|
jackw01/led-control
|
refs/heads/master
|
/ledcontrol/animationfunctions.py
|
# led-control WS2812B LED Controller Server
# Copyright 2022 jackw01. Released under the MIT License (see LICENSE for details).
from random import random
from enum import Enum
import ledcontrol.driver as driver
import ledcontrol.utils as utils
ColorMode = Enum('ColorMode', ['hsv', 'rgb'])
def blank(t, dt, x, y, z, prev_state):
return (0, 0, 0), ColorMode.hsv
static_function_ids = [0, 1, 2, 3] # pattern IDs that display a solid color
default = {
0: {
'name': 'Static Color',
'primary_speed': 0.0,
'primary_scale': 1.0,
'default': True,
'source': '''
def pattern(t, dt, x, y, z, prev_state):
return palette(0), hsv
'''
},
1: {
'name': 'Static White',
'primary_speed': 0.0,
'primary_scale': 1.0,
'default': True,
'source': '''
def pattern(t, dt, x, y, z, prev_state):
return (0, 0, 1), hsv
'''
},
2: {
'name': 'Static Gradient 1D',
'primary_speed': 0.0,
'primary_scale': 1.0,
'default': True,
'source': '''
def pattern(t, dt, x, y, z, prev_state):
return palette(x), hsv
'''
},
3: {
'name': 'Static Gradient Mirrored 1D',
'primary_speed': 0.0,
'primary_scale': 1.0,
'default': True,
'source': '''
def pattern(t, dt, x, y, z, prev_state):
return palette_mirrored(x), hsv
'''
},
6: {
'name': 'Twinkle Gradient 1D',
'primary_speed': 0.2,
'primary_scale': 1.0,
'default': True,
'source': '''
def pattern(t, dt, x, y, z, prev_state):
v = prev_state[2] - dt
if v <= 0:
c = palette(x)
return (c[0], c[1], random.random()), hsv
elif v > 0:
return (prev_state[0], prev_state[1], v), hsv
else:
return (0, 0, 0), hsv
'''
},
7: {
'name': 'Twinkle White 1D',
'primary_speed': 0.2,
'primary_scale': 1.0,
'default': True,
'source': '''
def pattern(t, dt, x, y, z, prev_state):
v = prev_state[2] - dt
if v <= 0:
return (0, 0, random.random()), hsv
elif v > 0:
return (prev_state[0], prev_state[1], v), hsv
else:
return (0, 0, 0), hsv
'''
},
100: {
'name': 'Palette Cycle 1D',
'primary_speed': 0.2,
'primary_scale': 1.0,
'default': True,
'source': '''
def pattern(t, dt, x, y, z, prev_state):
return palette(t + x), hsv
'''
},
110: {
'name': 'Palette Cycle Mirrored 1D',
'primary_speed': 0.2,
'primary_scale': 1.0,
'default': True,
'source': '''
def pattern(t, dt, x, y, z, prev_state):
return palette_mirrored(t + x), hsv
'''
},
112: {
'name': 'Palette Cycle Wipe 1D',
'primary_speed': 0.2,
'primary_scale': 1.0,
'default': True,
'source': '''
def pattern(t, dt, x, y, z, prev_state):
c = palette_mirrored(t + x)
return (c[0], c[1], ((t + x) % 1 > 0.5) * 1.0), hsv
'''
},
114: {
'name': 'Palette Cycle Wipe From Center 1D',
'primary_speed': 0.2,
'primary_scale': 1.0,
'default': True,
'source': '''
def pattern(t, dt, x, y, z, prev_state):
c = palette_mirrored(t + x)
if x < 0.5:
return (c[0], c[1], ((t + x) % 1 < 0.5) * 1.0), hsv
else:
return (c[0], c[1], ((t - x) % 1 < 0.5) * 1.0), hsv
'''
},
120: {
'name': 'Palette Cycle Quantized 1D',
'primary_speed': 0.2,
'primary_scale': 1.0,
'default': True,
'source': '''
def pattern(t, dt, x, y, z, prev_state):
t = (t + x) % 1
return palette(t - (t % (1 / 6))), hsv
'''
},
130: {
'name': 'Palette Cycle Random 1D',
'primary_speed': 0.2,
'primary_scale': 1.0,
'default': True,
'source': '''
def pattern(t, dt, x, y, z, prev_state):
t = t + x
i = (t - (t % 0.2)) / 0.2
return palette(i * 0.618034), hsv
'''
},
140: {
'name': 'Palette Scan Mirrored 1D',
'primary_speed': 0.1,
'primary_scale': 1.0,
'default': True,
'source': '''
def pattern(t, dt, x, y, z, prev_state):
return palette_mirrored(wave_triangle(t) + x), hsv
'''
},
141: {
'name': 'Palette Bounce Mirrored 1D',
'primary_speed': 0.1,
'primary_scale': 1.0,
'default': True,
'source': '''
def pattern(t, dt, x, y, z, prev_state):
return palette_mirrored(wave_sine(t) + x), hsv
'''
},
150: { # Performance isn't as good as it could be
'name': 'Palette Waves 1D',
'primary_speed': 0.05,
'primary_scale': 1.0,
'default': True,
'source': '''
def pattern(t, dt, x, y, z, prev_state):
h = (x + t) * 0.1 + x + wave_sine(t)
c = palette(wave_triangle(h))
return (c[0], c[1], wave_sine(h + t)), hsv
'''
},
160: {
'name': 'Palette Ripples 1D',
'primary_speed': 0.2,
'primary_scale': 1.0,
'default': True,
'source': '''
def pattern(t, dt, x, y, z, prev_state):
wave1 = wave_sine(t / 4 + x)
wave2 = wave_sine(t / 8 - x)
wave3 = wave_sine(x + wave1 + wave2)
c = palette(wave3 % 0.15 + t)
return (c[0], c[1], wave1 + wave3), hsv
'''
},
161: {
'name': 'Palette Ripples (Fast Cycle) 1D',
'primary_speed': 0.2,
'primary_scale': 1.0,
'default': True,
'source': '''
def pattern(t, dt, x, y, z, prev_state):
wave1 = wave_sine(t / 4 + x)
wave2 = wave_sine(t / 8 - x)
wave3 = wave_sine(x + wave1 + wave2)
c = palette(wave3 % 0.8 + t)
return (c[0], c[1], wave1 + wave3), hsv
'''
},
170: {
'name': 'Palette Plasma 2D',
'primary_speed': 0.2,
'primary_scale': 1.0,
'default': True,
'source': '''
def pattern(t, dt, x, y, z, prev_state):
v = plasma_sines(x, y, t, 1.0, 0.5, 0.5, 1.0)
return palette(wave_triangle(v)), hsv
'''
},
180: {
'name': 'Palette Fractal Plasma 2D',
'primary_speed': 0.2,
'primary_scale': 1.0,
'default': True,
'source': '''
def pattern(t, dt, x, y, z, prev_state):
v = plasma_sines_octave(x, y, t, 7, 2.0, 0.5)
return palette(wave_triangle(v)), hsv
'''
},
190: {
'name': 'Palette Twinkle 1D',
'primary_speed': 0.2,
'primary_scale': 1.0,
'default': True,
'source': '''
def pattern(t, dt, x, y, z, prev_state):
v = prev_state[2] - dt
if v <= 0:
c = palette(t + x)
return (c[0], c[1], random.random()), hsv
elif v > 0:
return (prev_state[0], prev_state[1], v), hsv
else:
return (0, 0, 0), hsv
'''
},
200: {
'name': 'Palette Perlin Noise 2D',
'primary_speed': 0.3,
'primary_scale': 1.0,
'default': True,
'source': '''
def pattern(t, dt, x, y, z, prev_state):
return palette(perlin_noise_3d(x, y, t)), hsv
'''
},
210: {
'name': 'Palette fBm Noise 2D',
'primary_speed': 0.3,
'primary_scale': 1.0,
'default': True,
'source': '''
def pattern(t, dt, x, y, z, prev_state):
v = fbm_noise_3d(x, y, t * 0.5, 7, 2.0, 0.5)
return palette(wave_triangle(v * 4)), hsv
'''
},
310: {
'name': 'Hue Cycle 1D',
'primary_speed': 0.2,
'primary_scale': 1.0,
'default': True,
'source': '''
def pattern(t, dt, x, y, z, prev_state):
return (t + x, 1, 1), hsv
'''
},
320: {
'name': 'Hue Cycle Quantized 1D',
'primary_speed': 0.2,
'primary_scale': 1.0,
'default': True,
'source': '''
def pattern(t, dt, x, y, z, prev_state):
hue = (t + x) % 1
return (hue - (hue % 0.1666), 1, 1), hsv
'''
},
330: {
'name': 'Hue Scan 1D',
'primary_speed': 0.1,
'primary_scale': 1.0,
'default': True,
'source': '''
def pattern(t, dt, x, y, z, prev_state):
return (wave_triangle(t) + x, 1, 1), hsv
'''
},
331: {
'name': 'Hue Bounce 1D',
'primary_speed': 0.1,
'primary_scale': 1.0,
'default': True,
'source': '''
def pattern(t, dt, x, y, z, prev_state):
return (wave_sine(t) + x, 1, 1), hsv
'''
},
340: {
'name': 'Hue Waves 1D',
'primary_speed': 0.2,
'primary_scale': 1.0,
'default': True,
'source': '''
def pattern(t, dt, x, y, z, prev_state):
h = (x + t) * 0.5 + x + wave_sine(t)
return (h, 1, wave_sine(h + t)), hsv
'''
},
350: {
'name': 'Hue Ripples 1D',
'primary_speed': 0.2,
'primary_scale': 1.0,
'default': True,
'source': '''
def pattern(t, dt, x, y, z, prev_state):
wave1 = wave_sine(t / 4 + x)
wave2 = wave_sine(t / 8 - x)
wave3 = wave_sine(x + wave1 + wave2)
return (wave3 % 0.15 + t, 1, wave1 + wave3), hsv
'''
},
400: {
'name': 'RGB Sines 1D',
'primary_speed': 0.2,
'primary_scale': 1.0,
'default': True,
'source': '''
def pattern(t, dt, x, y, z, prev_state):
return (wave_sine(t + x),
wave_sine((t + x) * 1.2),
wave_sine((t + x) * 1.4)), rgb
'''
},
410: {
'name': 'RGB Cubics 1D',
'primary_speed': 0.2,
'primary_scale': 1.0,
'default': True,
'source': '''
def pattern(t, dt, x, y, z, prev_state):
return (wave_cubic(t + x),
wave_cubic((t + x) * 1.2),
wave_cubic((t + x) * 1.4)), rgb
'''
},
420: {
'name': 'RGB Ripples 1D',
'primary_speed': 0.2,
'primary_scale': 1.0,
'default': True,
'source': '''
def pattern(t, dt, x, y, z, prev_state):
v0 = x + (wave_sine(t)) + wave_sine(x + 0.666 * t)
v1 = x + (wave_sine(t + 0.05)) + wave_sine(x + 0.666 * t + 0.05)
v2 = x + (wave_sine(t + 0.1)) + wave_sine(x + 0.666 * t + 0.1)
return (0.01 / (wave_triangle(v0) + 0.01), 0.01 / (wave_triangle(v1) + 0.01), 0.01 / (wave_triangle(v2) + 0.01)), rgb
'''
},
430: {
'name': 'RGB Plasma (Spectrum Sines) 2D',
'primary_speed': 0.2,
'primary_scale': 1.0,
'default': True,
'source': '''
def pattern(t, dt, x, y, z, prev_state):
v = plasma_sines(x, y, t, 1.0, 0.5, 0.5, 1.0)
return (wave_sine(v),
wave_sine(v + 0.333),
wave_sine(v + 0.666)), rgb
'''
},
440: {
'name': 'RGB Plasma (Fire Sines) 2D',
'primary_speed': 0.2,
'primary_scale': 1.0,
'default': True,
'source': '''
def pattern(t, dt, x, y, z, prev_state):
v = plasma_sines(x, y, t, 1.0, 0.5, 0.5, 1.0)
return (0.9 - wave_sine(v),
wave_sine(v + 0.333) - 0.1,
0.9 - wave_sine(v + 0.666)), rgb
'''
},
450: {
'name': 'RGB Fractal Plasma (Fire Sines) 2D',
'primary_speed': 0.2,
'primary_scale': 1.0,
'default': True,
'source': '''
def pattern(t, dt, x, y, z, prev_state):
v = plasma_sines_octave(x, y, t, 7, 2.0, 0.5)
return (1.0 - wave_sine(v),
wave_sine(v + 0.333),
1.0 - wave_sine(v + 0.666)), rgb
'''
},
}
|
{"/ledcontrol/ledcontroller.py": ["/ledcontrol/animationfunctions.py", "/ledcontrol/driver/__init__.py", "/ledcontrol/utils.py"], "/ledcontrol/animationfunctions.py": ["/ledcontrol/driver/__init__.py", "/ledcontrol/utils.py"], "/ledcontrol/app.py": ["/ledcontrol/animationcontroller.py", "/ledcontrol/ledcontroller.py", "/ledcontrol/homekit.py", "/ledcontrol/pixelmappings.py", "/ledcontrol/animationfunctions.py", "/ledcontrol/utils.py"], "/ledcontrol/driver/__init__.py": ["/ledcontrol/driver/driver_non_raspberry_pi.py"], "/renderpreviews.py": ["/ledcontrol/animationcontroller.py", "/ledcontrol/animationfunctions.py", "/ledcontrol/pixelmappings.py", "/ledcontrol/driver/__init__.py", "/ledcontrol/utils.py"], "/ledcontrol/animationcontroller.py": ["/ledcontrol/intervaltimer.py", "/ledcontrol/animationfunctions.py", "/ledcontrol/driver/__init__.py", "/ledcontrol/utils.py"], "/ledcontrol/__init__.py": ["/ledcontrol/app.py"]}
|
24,979
|
jackw01/led-control
|
refs/heads/master
|
/ledcontrol/app.py
|
# led-control WS2812B LED Controller Server
# Copyright 2022 jackw01. Released under the MIT License (see LICENSE for details).
import json
import atexit
import shutil
import traceback
from threading import Timer
from pathlib import Path
from flask import Flask, render_template, request, jsonify
from ledcontrol.animationcontroller import AnimationController
from ledcontrol.ledcontroller import LEDController
from ledcontrol.homekit import homekit_start
import ledcontrol.pixelmappings as pixelmappings
import ledcontrol.animationfunctions as animfunctions
import ledcontrol.colorpalettes as colorpalettes
import ledcontrol.utils as utils
def create_app(led_count,
config_file,
pixel_mapping_file,
refresh_rate,
led_pin,
led_data_rate,
led_dma_channel,
led_pixel_order,
led_brightness_limit,
save_interval,
enable_sacn,
enable_hap,
no_timer_reset,
dev,
serial_port):
app = Flask(__name__)
# Create pixel mapping function
if pixel_mapping_file is not None:
pixel_mapping = json.load(pixel_mapping_file)
pixel_mapping_file.close()
led_count = len(pixel_mapping)
print(f'Using pixel mapping from file ({led_count} LEDs)')
mapping_func = pixelmappings.from_array(pixel_mapping)
else:
print(f'Using default linear pixel mapping ({led_count} LEDs)')
mapping_func = pixelmappings.line(led_count)
leds = LEDController(led_count,
led_pin,
led_data_rate,
led_dma_channel,
led_pixel_order,
serial_port)
controller = AnimationController(leds,
refresh_rate,
led_count,
mapping_func,
enable_sacn,
no_timer_reset,
led_brightness_limit)
presets = {}
functions = dict(animfunctions.default)
# Create file if it doesn't exist already
if config_file is not None:
filename = Path(config_file)
else:
filename = Path('/etc') / 'ledcontrol.json'
filename.touch(exist_ok=True)
# Init controller params and custom animations from settings file
with filename.open('r') as data_file:
try:
settings_str = data_file.read()
# Apply updates to old versions of settings file
settings_str = settings_str.replace('master_', '')
settings_str = settings_str.replace('pattern(t, dt, x, y, prev_state)',
'pattern(t, dt, x, y, z, prev_state)')
settings = json.loads(settings_str)
if 'save_version' not in settings:
print(f'Detected an old save file version at {filename}. Making a backup to {filename}.bak.')
shutil.copyfile(filename, filename.with_suffix('.json.bak'))
# Rename 'params' and recreate as 'settings'
params = settings.pop('params')
settings['settings'] = {
'global_brightness': params['brightness'],
'global_color_temp': params['color_temp'],
'global_color_r': 1.0,
'global_color_g': 1.0,
'global_color_b': 1.0,
'global_saturation': params['saturation'],
'groups': {
'main': {
'range_start': 0,
'range_end': 100000,
'render_mode': 'local',
'render_target': '',
'mapping': [],
'name': 'main',
'brightness': 1.0,
'color_temp': 6500,
'saturation': 1.0,
'function': 0,
'speed': params['primary_speed'],
'scale': params['primary_scale'],
'palette': 0,
}
}
}
# Add default flag to animation patterns
for k in settings['patterns']:
if 'source' in settings['patterns'][k]:
settings['patterns'][k]['default'] = False
else:
settings['patterns'][k]['default'] = True
# Rename 'patterns'
settings['functions'] = settings.pop('patterns')
# Add default flag to palettes
for k in settings['palettes']:
settings['palettes'][k]['default'] = False
print('Successfully upgraded save file.')
# Enforce calibration off when starting up
settings['settings']['calibration'] = 0
# Set controller settings, (automatically) recalculate things that depend on them
controller.update_settings(settings['settings'])
# Read presets
if 'presets' in settings:
presets.update(settings['presets'])
# Read custom animations and changed params for default animations
for k, v in settings['functions'].items():
if v['default'] == False:
functions[int(k)] = v
controller.set_pattern_function(int(k), v['source'])
elif int(k) in functions:
functions[int(k)].update(v)
# Read color palettes
for k, v in settings['palettes'].items():
controller.set_palette(int(k), v)
controller.calculate_palette_tables()
print(f'Loaded saved settings from {filename}')
except Exception as e:
if settings_str == '':
print(f'Creating new settings file at {filename}.')
else:
print(f'Some saved settings at {filename} are out of date or invalid. Making a backup of the old file to {filename}.error and creating a new one with default settings.')
shutil.copyfile(filename, filename.with_suffix('.json.error'))
@app.before_request
def before_request():
'Log post request json for testing'
if dev and request.method == 'POST':
print(request.endpoint)
print(request.json)
@app.route('/')
def index():
'Returns web app page'
return app.send_static_file('index.html')
@app.get('/getsettings')
def get_settings():
'Get settings'
return jsonify(controller.get_settings())
@app.post('/updatesettings')
def update_settings():
'Update settings'
new_settings = request.json
controller.update_settings(new_settings)
return jsonify(result='')
@app.get('/getpresets')
def get_presets():
'Get presets'
return jsonify(presets)
@app.post('/updatepreset')
def update_preset():
'Update a preset'
presets[request.json['key']] = request.json['value']
return jsonify(result='')
@app.post('/removepreset')
def remove_preset():
'Remove a preset'
del presets[request.json['key']]
return jsonify(result='')
@app.post('/removegroup')
def remove_group():
'Remove a group'
controller.delete_group(request.json['key'])
return jsonify(result='')
@app.get('/getfunctions')
def get_functions():
'Get functions'
return jsonify(functions)
@app.post('/compilefunction')
def compile_function():
'Compiles a function, returns errors and warnings in JSON array form'
key = request.json['key']
errors, warnings = controller.set_pattern_function(key, functions[key]['source'])
return jsonify(errors=errors, warnings=warnings)
@app.post('/updatefunction')
def update_function():
'Update a function'
functions[request.json['key']] = request.json['value']
return jsonify(result='')
@app.post('/removefunction')
def remove_function():
'Remove a function'
del functions[request.json['key']]
return jsonify(result='')
@app.get('/getpalettes')
def get_palettes():
'Get palettes'
return jsonify(controller.get_palettes())
@app.post('/updatepalette')
def update_palette():
'Update a palette'
controller.set_palette(request.json['key'], request.json['value'])
controller.calculate_palette_table(request.json['key'])
return jsonify(result='')
@app.post('/removepalette')
def remove_palette():
'Remove a palette'
controller.delete_palette(request.json['key'])
return jsonify(result='')
@app.get('/getfps')
def get_fps():
'Returns latest animation frames per second'
return jsonify(fps=controller.get_frame_rate())
@app.get('/resettimer')
def reset_timer():
'Resets animation timer'
controller.reset_timer()
return jsonify(result='')
def save_settings():
'Save controller settings, patterns, and palettes'
functions_2 = {}
for k, v in functions.items():
if not v['default']:
functions_2[str(k)] = v
else:
functions_2[str(k)] = {n: v[n] for n in ('default', 'primary_speed', 'primary_scale')}
palettes_2 = {str(k): v for (k, v) in controller.get_palettes().items() if not v['default']}
data = {
'save_version': 2,
'settings': controller.get_settings(),
'presets': presets,
'functions': functions_2,
'palettes': palettes_2,
}
with filename.open('w') as data_file:
try:
json.dump(data, data_file, sort_keys=True, indent=4)
print(f'Saved settings to {filename}')
except Exception as e:
traceback.print_exc()
print(f'Could not save settings to {filename}')
def auto_save_settings():
'Timer for automatically saving settings'
save_settings()
t = Timer(save_interval, auto_save_settings)
t.daemon = True
t.start()
controller.begin_animation_thread()
atexit.register(save_settings)
atexit.register(controller.clear_leds)
atexit.register(controller.end_animation)
auto_save_settings()
if enable_hap:
def setter_callback(char_values):
new_settings = {}
if 'On' in char_values:
new_settings['on'] = char_values['On']
if 'Brightness' in char_values:
new_settings['global_brightness'] = char_values['Brightness'] / 100.0
if 'Saturation' in char_values:
new_settings['global_saturation'] = char_values['Saturation'] / 100.0
controller.update_settings(new_settings)
hap_accessory = homekit_start(setter_callback)
hap_accessory.on.set_value(controller.get_settings()['on'])
hap_accessory.brightness.set_value(controller.get_settings()['global_brightness'] * 100.0)
hap_accessory.saturation.set_value(controller.get_settings()['global_saturation'] * 100.0)
return app
|
{"/ledcontrol/ledcontroller.py": ["/ledcontrol/animationfunctions.py", "/ledcontrol/driver/__init__.py", "/ledcontrol/utils.py"], "/ledcontrol/animationfunctions.py": ["/ledcontrol/driver/__init__.py", "/ledcontrol/utils.py"], "/ledcontrol/app.py": ["/ledcontrol/animationcontroller.py", "/ledcontrol/ledcontroller.py", "/ledcontrol/homekit.py", "/ledcontrol/pixelmappings.py", "/ledcontrol/animationfunctions.py", "/ledcontrol/utils.py"], "/ledcontrol/driver/__init__.py": ["/ledcontrol/driver/driver_non_raspberry_pi.py"], "/renderpreviews.py": ["/ledcontrol/animationcontroller.py", "/ledcontrol/animationfunctions.py", "/ledcontrol/pixelmappings.py", "/ledcontrol/driver/__init__.py", "/ledcontrol/utils.py"], "/ledcontrol/animationcontroller.py": ["/ledcontrol/intervaltimer.py", "/ledcontrol/animationfunctions.py", "/ledcontrol/driver/__init__.py", "/ledcontrol/utils.py"], "/ledcontrol/__init__.py": ["/ledcontrol/app.py"]}
|
24,980
|
jackw01/led-control
|
refs/heads/master
|
/ledcontrol/driver/driver_non_raspberry_pi.py
|
# led-control WS2812B LED Controller Server
# Copyright 2023 jackw01. Released under the MIT License (see LICENSE for details).
import math
import pyfastnoisesimd as fns
noise_coords = fns.empty_coords(3)
noise = fns.Noise()
def float_to_int_1000(t):
return int(t * 999.9) % 1000
def float_to_int_1000_mirror(t):
return abs(int(t * 1998.9) % 1999 - 999)
def wave_pulse(t, duty_cycle):
return math.ceil(duty_cycle - math.fmod(t, 1.0))
def wave_triangle(t):
ramp = math.fmod((2.0 * t), 2.0)
return math.fabs((ramp + 2.0 if ramp < 0 else ramp) - 1.0)
def wave_sine(t):
return math.cos(6.283 * t) / 2.0 + 0.5
def wave_cubic(t):
ramp = math.fmod((2.0 * t), 2.0)
tri = math.fabs((ramp + 2.0 if ramp < 0 else ramp) - 1.0)
if tri > 0.5:
t2 = 1.0 - tri
return 1.0 - 4.0 * t2 * t2 * t2
else:
return 4.0 * tri * tri * tri
def plasma_sines(x, y, t, coeff_x, coeff_y, coeff_x_y, coeff_dist_x_y):
v = 0
v += math.sin((x + t) * coeff_x)
v += math.sin((y + t) * coeff_y)
v += math.sin((x + y + t) * coeff_x_y)
v += math.sin((math.sqrt(x * x + y * y) + t) * coeff_dist_x_y)
return v
def plasma_sines_octave(x, y, t, octaves, lacunarity, persistence):
vx = x
vy = y
freq = 1.0
amplitude = 1.0
for i in range(octaves):
vx1 = vx
vx += math.cos(vy * freq + t * freq) * amplitude
vy += math.sin(vx1 * freq + t * freq) * amplitude
freq *= lacunarity
amplitude *= persistence
return vx / 2.0
def perlin_noise_3d(x, y, z):
noise_coords[0,:] = x
noise_coords[1,:] = y
noise_coords[2,:] = z
return noise.genFromCoords(noise_coords)[0]
def fbm_noise_3d(x, y, z, octaves, lacunarity, persistence):
v = 0
freq = 1.0
amplitude = 1.0
for i in range(octaves):
v += amplitude * perlin_noise_3d(freq * x, freq * y, freq * z)
freq *= lacunarity
amplitude *= persistence
return v / 2.0
def clamp(x, min, max):
if x < min:
return min
elif x > max:
return max
else:
return x
def blackbody_to_rgb(kelvin):
tmp_internal = kelvin / 100.0
r_out = 0
g_out = 0
b_out = 0
if tmp_internal <= 66:
xg = tmp_internal - 2.0
r_out = 1.0
g_out = clamp((-155.255 - 0.446 * xg + 104.492 * math.log(xg)) / 255.0, 0, 1)
else:
xr = tmp_internal - 55.0
xg = tmp_internal - 50.0
r_out = clamp((351.977 + 0.114 * xr - 40.254 * math.log(xr)) / 255.0, 0, 1)
g_out = clamp((325.449 + 0.079 * xg - 28.085 * math.log(xg)) / 255.0, 0, 1)
if tmp_internal >= 66:
b_out = 1.0
elif tmp_internal <= 19:
b_out = 0.0
else:
xb = tmp_internal - 10.0
b_out = clamp((-254.769 + 0.827 * xb + 115.680 * math.log(xb)) / 255.0, 0, 1)
return [r_out, g_out, b_out]
def blackbody_correction_rgb(rgb, kelvin):
bb = blackbody_to_rgb(kelvin)
return [rgb[0] * bb[0], rgb[1] * bb[1], rgb[2] * bb[2]]
|
{"/ledcontrol/ledcontroller.py": ["/ledcontrol/animationfunctions.py", "/ledcontrol/driver/__init__.py", "/ledcontrol/utils.py"], "/ledcontrol/animationfunctions.py": ["/ledcontrol/driver/__init__.py", "/ledcontrol/utils.py"], "/ledcontrol/app.py": ["/ledcontrol/animationcontroller.py", "/ledcontrol/ledcontroller.py", "/ledcontrol/homekit.py", "/ledcontrol/pixelmappings.py", "/ledcontrol/animationfunctions.py", "/ledcontrol/utils.py"], "/ledcontrol/driver/__init__.py": ["/ledcontrol/driver/driver_non_raspberry_pi.py"], "/renderpreviews.py": ["/ledcontrol/animationcontroller.py", "/ledcontrol/animationfunctions.py", "/ledcontrol/pixelmappings.py", "/ledcontrol/driver/__init__.py", "/ledcontrol/utils.py"], "/ledcontrol/animationcontroller.py": ["/ledcontrol/intervaltimer.py", "/ledcontrol/animationfunctions.py", "/ledcontrol/driver/__init__.py", "/ledcontrol/utils.py"], "/ledcontrol/__init__.py": ["/ledcontrol/app.py"]}
|
24,981
|
jackw01/led-control
|
refs/heads/master
|
/ledcontrol/driver/__init__.py
|
# led-control WS2812B LED Controller Server
# Copyright 2023 jackw01. Released under the MIT License (see LICENSE for details).
import io
def is_raspberrypi():
try:
with io.open('/sys/firmware/devicetree/base/model', 'r') as m:
if 'raspberry pi' in m.read().lower():
return True
except Exception:
pass
return False
if is_raspberrypi():
# Import the extension module
from _ledcontrol_rpi_ws281x_driver import *
else:
from . driver_non_raspberry_pi import *
|
{"/ledcontrol/ledcontroller.py": ["/ledcontrol/animationfunctions.py", "/ledcontrol/driver/__init__.py", "/ledcontrol/utils.py"], "/ledcontrol/animationfunctions.py": ["/ledcontrol/driver/__init__.py", "/ledcontrol/utils.py"], "/ledcontrol/app.py": ["/ledcontrol/animationcontroller.py", "/ledcontrol/ledcontroller.py", "/ledcontrol/homekit.py", "/ledcontrol/pixelmappings.py", "/ledcontrol/animationfunctions.py", "/ledcontrol/utils.py"], "/ledcontrol/driver/__init__.py": ["/ledcontrol/driver/driver_non_raspberry_pi.py"], "/renderpreviews.py": ["/ledcontrol/animationcontroller.py", "/ledcontrol/animationfunctions.py", "/ledcontrol/pixelmappings.py", "/ledcontrol/driver/__init__.py", "/ledcontrol/utils.py"], "/ledcontrol/animationcontroller.py": ["/ledcontrol/intervaltimer.py", "/ledcontrol/animationfunctions.py", "/ledcontrol/driver/__init__.py", "/ledcontrol/utils.py"], "/ledcontrol/__init__.py": ["/ledcontrol/app.py"]}
|
24,982
|
jackw01/led-control
|
refs/heads/master
|
/renderpreviews.py
|
from PIL import Image
from colorsys import hsv_to_rgb
from ledcontrol.animationcontroller import AnimationController
import ledcontrol.animationfunctions as animfunctions
import ledcontrol.colorpalettes as colorpalettes
import ledcontrol.pixelmappings as pixelmappings
import ledcontrol.driver as driver
import ledcontrol.utils as utils
controller = AnimationController(None, 0, 256, pixelmappings.line(256), False, True, 1.0)
controller._current_palette_table = controller._palette_tables[0]
s = 100 # LED strip length
t = 400 # Time units
gif_t = 300 # Animated gif duration
f = open('animations.md', 'w')
f.write('## Built-In Animation Patterns\n\n')
for k, pattern_dict in animfunctions.default.items():
errors, warnings = controller.set_pattern_function(k, pattern_dict['source'])
pattern = controller._functions[k]
img = Image.new('RGB', (t, s), 'black')
pixels = img.load()
print(pattern_dict["name"])
f.write(f'## {pattern_dict["name"]}\n')
prev = [(0, 0, 0) for i in range(s)]
frames = []
for i in range(img.size[0]):
frame = Image.new('RGB', (s, 1), 'black')
frame_pixels = frame.load()
for j in range(img.size[1]):
p = pattern((pattern_dict['primary_speed'] / 0.2) * i / s, 1.0 / s, j / s, 0, 0, prev[j])
prev[j] = p[0]
if p[1] == animfunctions.ColorMode.hsv:
c = tuple([int(x * 255) for x in hsv_to_rgb(*p[0])])
pixels[i, j] = c
frame_pixels[j, 0] = c
else:
c = tuple([int(x * 255) for x in p[0]])
pixels[i, j] = c
frame_pixels[j, 0] = c
if i < gif_t:
frames.append(frame)
#img_name = f'img/{pattern_dict["name"]}.png'.replace(' ', '-')
gif_name = f'img/{pattern_dict["name"]}.gif'.replace(' ', '-')
#img.save(img_name)
frames[0].save(gif_name, save_all=True, append_images=frames[1:], duration=100, loop=0)
f.write(f'<img src="{gif_name}" width="800"/>\n\n')
#f.write(f'<img src="{img_name}" width="800"/>\n\n')
f.close()
|
{"/ledcontrol/ledcontroller.py": ["/ledcontrol/animationfunctions.py", "/ledcontrol/driver/__init__.py", "/ledcontrol/utils.py"], "/ledcontrol/animationfunctions.py": ["/ledcontrol/driver/__init__.py", "/ledcontrol/utils.py"], "/ledcontrol/app.py": ["/ledcontrol/animationcontroller.py", "/ledcontrol/ledcontroller.py", "/ledcontrol/homekit.py", "/ledcontrol/pixelmappings.py", "/ledcontrol/animationfunctions.py", "/ledcontrol/utils.py"], "/ledcontrol/driver/__init__.py": ["/ledcontrol/driver/driver_non_raspberry_pi.py"], "/renderpreviews.py": ["/ledcontrol/animationcontroller.py", "/ledcontrol/animationfunctions.py", "/ledcontrol/pixelmappings.py", "/ledcontrol/driver/__init__.py", "/ledcontrol/utils.py"], "/ledcontrol/animationcontroller.py": ["/ledcontrol/intervaltimer.py", "/ledcontrol/animationfunctions.py", "/ledcontrol/driver/__init__.py", "/ledcontrol/utils.py"], "/ledcontrol/__init__.py": ["/ledcontrol/app.py"]}
|
24,983
|
jackw01/led-control
|
refs/heads/master
|
/ledcontrol/animationcontroller.py
|
# led-control WS2812B LED Controller Server
# Copyright 2022 jackw01. Released under the MIT License (see LICENSE for details).
import math
import random
import time
import traceback
import RestrictedPython
import sacn
import collections
from itertools import zip_longest
from ledcontrol.intervaltimer import IntervalTimer
import ledcontrol.animationfunctions as animfunctions
import ledcontrol.colorpalettes as colorpalettes
import ledcontrol.driver as driver
import ledcontrol.utils as utils
class AnimationController:
def __init__(self,
led_controller,
refresh_rate,
led_count,
mapping_func,
enable_sacn,
no_timer_reset,
global_brightness_limit):
self._led_controller = led_controller
self._refresh_rate = refresh_rate
self._led_count = led_count
self._mapping_func = mapping_func
self._enable_sacn = enable_sacn
self._no_timer_reset = no_timer_reset
self._global_brightness_limit = global_brightness_limit
# Initialize prev state array
self._prev_state = [(0, 0, 0) for i in range(self._led_count)]
# Map led indices to normalized position vectors
self._mapped = [self._mapping_func(i) for i in range(self._led_count)]
# Create dictionary of lists used to cache current mappings
self._mappings = {}
# All user-editable animation settings stored here
self._settings = {
'on': 1,
'global_brightness': 0.15,
'global_brightness_limit': global_brightness_limit,
'global_color_temp': 6500,
'global_color_r': 255,
'global_color_g': 190,
'global_color_b': 170,
'global_saturation': 1.0,
'sacn': 0,
'calibration': 0,
'groups': {
'main': {
'range_start': 0,
'range_end': 100000,
'render_mode': 'local',
'render_target': '',
'name': 'main',
'mapping': [],
'brightness': 1.0,
'color_temp': 6500,
'saturation': 1.0,
'function': 0,
'speed': 0.2,
'scale': 1.0,
'palette': 0,
}
}
}
# Dictionary for pattern functions
self._functions = {}
# Initialize default pattern functions
for k, v in animfunctions.default.items():
self.set_pattern_function(k, v['source'])
# Color palette used for animations
self._palette_table_size = 1000
self._palettes = dict(colorpalettes.default)
self._palette_tables = {}
self._current_palette_table = []
self.calculate_palette_tables()
# Set default color temp
self.calculate_color_correction()
# Set default mapping
self.calculate_mapping()
# Prepare to start
self.reset_timer()
self._time = 0
self._update_needed = True # Is the LED state going to change this frame?
# Initialize sACN / E1.31
if enable_sacn:
self._last_sacn_time = 0
self._sacn_perf_avg = 0
self._sacn_count = 0
# Computing cached values
def calculate_palette_table(self, key):
'Calculate and store the palette lookup table for one palette'
palette = self._palettes[key]
palette_table = []
sector_size = 1.0 / (len(palette['colors']) - 1)
for i in range(self._palette_table_size):
f = i / self._palette_table_size
sector = math.floor(f / sector_size)
f = f % sector_size / sector_size
c1, c2 = palette['colors'][sector], palette['colors'][sector + 1]
# Allow full spectrum if extremes are 0 and 1 in any order
# otherwise pick shortest path between colors
h1 = c2[0] - c1[0]
if abs(h1) != 1:
if h1 < -0.5:
h1 += 1
if h1 > 0.5:
h1 -= 1
palette_table.append((
f * h1 + c1[0],
f * (c2[1] - c1[1]) + c1[1],
f * (c2[2] - c1[2]) + c1[2],
))
self._palette_tables[key] = palette_table
self._update_needed = True
def calculate_palette_tables(self):
'Calculate and store the palette lookup tables for all palettes'
for key in self._palettes:
self.calculate_palette_table(key)
def calculate_color_correction(self):
'Calculate and store color temperature correction'
rgb = driver.blackbody_to_rgb(self._settings['global_color_temp'])
c = [self._settings['global_color_r'] * int(rgb[0] * 255) // 255,
self._settings['global_color_g'] * int(rgb[1] * 255) // 255,
self._settings['global_color_b'] * int(rgb[2] * 255) // 255]
self._correction = (c[0] << 16) | (c[1] << 8) | c[2]
def calculate_mapping(self):
'Calculate and store spatial mapping values with current scale'
for group in self._settings['groups']:
scale = self._settings['groups'][group]['scale']
if scale != 0:
# Calculate scale components to determine animation position
# scale component = position / scale (pattern length in units)
# One cycle is a normalized input value's transition from 0 to 1
self._mappings[group] = [(
(self._mapped[i][0] / scale) % 1,
(self._mapped[i][1] / scale) % 1,
(self._mapped[i][2] / scale) % 1
) for i in range(self._led_count)]
else:
self._mappings[group] = [(0, 0, 0) for i in range(self._led_count)]
# Settings frontend
def get_settings(self):
'Get settings dict'
return self._settings
def update_settings(self, new_settings):
'Update settings dict with new values'
self._flag_correction = False
self._flag_mapping = False
self._flag_clear = False
def recursive_update(d1, d2):
for k, v in d2.items():
if isinstance(v, collections.abc.Mapping):
d1[k] = recursive_update(d1.get(k, {}), v)
else:
d1[k] = v
# Perform checks for things that need to be recalculated
if k in ['global_color_temp', 'global_color_r', 'global_color_g', 'global_color_b', 'color_temp']:
self._flag_correction = True
elif k == 'global_brightness':
if d1[k] != 0:
self._settings['on'] = 1 # force homekit 'on' when brightness is changed
d1[k] = min(d1[k], self._global_brightness_limit)
elif k == 'scale':
self._flag_mapping = True
elif k == 'function':
if v not in self._functions: # for uncompiled functions
self._functions[v] = animfunctions.blank
self._check_reset_animation_state()
elif k in ['range_start', 'range_end']:
self._flag_clear = True # clear LEDs to make range selection less ambiguous
elif k in ['render_mode', 'render_target']:
# todo
self._flag_clear = True
elif k == 'sacn' and self._enable_sacn:
if v:
self._receiver = sacn.sACNreceiver()
self._receiver.listen_on('universe', universe=1)(self._sacn_callback)
self._receiver.start()
elif hasattr(self, '_receiver'):
self._receiver.stop()
return d1
recursive_update(self._settings, new_settings)
if self._flag_correction:
self.calculate_color_correction()
if self._flag_mapping:
self.calculate_mapping()
if self._flag_clear:
self.clear_leds()
self._update_needed = True
def delete_group(self, key):
'Delete a group'
if key != 'main':
del self._settings['groups'][key]
# Functions frontend
def set_pattern_function(self, key, source):
'Update and recompile a pattern function'
def getitem(obj, index):
if obj is not None and type(obj) in (list, tuple, dict):
return obj[index]
raise Exception()
def getiter(obj):
return obj
restricted_globals = {
'__builtins__': RestrictedPython.Guards.safe_builtins,
'_print_': RestrictedPython.PrintCollector,
'_getattr_': RestrictedPython.Guards.safer_getattr,
'_getitem_': getitem,
'_getiter_': getiter,
'_write_': RestrictedPython.Guards.full_write_guard,
'math': math,
'random': random,
'palette': self._get_palette_color,
'palette_mirrored': self._get_palette_color_mirrored,
'hsv': animfunctions.ColorMode.hsv,
'rgb': animfunctions.ColorMode.rgb,
'clamp': utils.clamp,
'wave_pulse': driver.wave_pulse,
'wave_triangle': driver.wave_triangle,
'wave_sine': driver.wave_sine,
'wave_cubic': driver.wave_cubic,
'plasma_sines': driver.plasma_sines,
'plasma_sines_octave': driver.plasma_sines_octave,
'perlin_noise_3d': driver.perlin_noise_3d,
'fbm_noise_3d': driver.fbm_noise_3d,
'impulse_exp': utils.impulse_exp,
'fract': utils.fract,
'blackbody_to_rgb': driver.blackbody_to_rgb,
'blackbody_correction_rgb': driver.blackbody_correction_rgb,
}
restricted_locals = {}
arg_names = ['t', 'dt', 'x', 'y', 'z', 'prev_state']
result = RestrictedPython.compile_restricted_exec(source)
warnings = list(result.warnings)
for name in result.used_names:
if name not in restricted_globals and name not in arg_names:
warnings.append(f'NameError: name \'{name}\' is not defined')
if result.code:
exec(result.code, restricted_globals, restricted_locals)
if len(result.errors) == 0 and 'pattern' in restricted_locals:
self._functions[key] = restricted_locals['pattern']
self._check_reset_animation_state()
self._update_needed = True
return result.errors, warnings
# Palettes frontend
def get_palettes(self):
'Get palettes dict'
return self._palettes
def set_palette(self, key, value):
'Update palette'
self._palettes[key] = value
def delete_palette(self, key):
'Delete palette'
del self._palettes[key]
# Palettes backend
def _get_palette_color(self, t):
'Get color from current palette corresponding to index between 0 and 1'
# This gives a surprising performance improvement over doing the math in python
# If the palette size is ever changed here, it needs to be changed in animation_utils.h
return self._current_palette_table[driver.float_to_int_1000(t)]
def _get_palette_color_mirrored(self, t):
'Version of get_palette_color that samples a mirrored version of the palette'
return self._current_palette_table[driver.float_to_int_1000_mirror(t)]
# Animation and timer
def begin_animation_thread(self):
'Start animating'
self._timer = IntervalTimer(1.0 / self._refresh_rate, self.update_leds)
self._timer.start()
def get_frame_rate(self):
'Get frame rate'
return self._timer.get_rate()
def _check_reset_animation_state(self):
'Reset animation timer if allowed by configuration flag'
if not self._no_timer_reset:
self.reset_timer()
def reset_timer(self):
'Reset animation timer'
self._start = time.perf_counter()
def update_leds(self):
'Determine time, render frame, and display'
last_t = self._time
self._time = self._timer.last_start - self._start
delta_t = self._time - last_t
if self._timer.get_count() % 100 == 0:
print(f'Execution time: {self._timer.get_perf_avg():0.5f}s, {self._timer.get_rate():05.1f} FPS')
if self._settings['calibration'] == 1:
self._led_controller.show_calibration_color(self._led_count,
self._correction,
self._settings['global_brightness'] / 2)
return
if self._update_needed and self._settings['sacn'] == 0:
self._update_needed = False
# Store dict keys as list in case they are changed during iteration
for group, settings in list(self._settings['groups'].items()):
try:
mapping = self._mappings[group]
range_start = settings['range_start']
range_end = min(self._led_count, settings['range_end'])
# Begin render
self._current_palette_table = self._palette_tables[settings['palette']]
computed_brightness = self._settings['on'] * self._settings['global_brightness'] * settings['brightness']
computed_saturation = self._settings['global_saturation'] * settings['saturation']
function_1 = self._functions[settings['function']]
# Calculate times
# Reset time every week to prevent strange math issues
time_fix = self._time % 604800
# time component = time (s) * speed (cycle/s)
time_1 = time_fix * settings['speed']
delta_t_1 = delta_t * settings['speed']
except KeyError as e: # Ignore if settings haven't been calculated yet
continue
try:
# Determine current pattern mode
c, mode = function_1(0, 0.1, 0, 0, 0, (0, 0, 0))
# Run pattern to determine color
state = [function_1(time_1,
delta_t_1,
mapping[i][0],
mapping[i][1],
mapping[i][2],
self._prev_state[i])[0]
for i in range(range_start, range_end)]
self._prev_state[range_start:range_end] = state
self._led_controller.set_range(state, range_start, range_end,
self._correction,
computed_saturation,
computed_brightness,
mode)
except Exception as e:
msg = traceback.format_exception(type(e), e, e.__traceback__)
print(f'Animation execution: {msg}')
r = 0.1 * driver.wave_pulse(time_fix, 0.5)
self._led_controller.set_range([(r, 0, 0) for i in range(self._led_count)],
0, self._led_count,
self._correction,
1.0,
1.0,
animfunctions.ColorMode.rgb)
self._led_controller.render()
return
# If displaying a static pattern, brightness is 0, or speed is 0:
# no update is needed the next frame
if (not self._update_needed
and settings['function'] not in animfunctions.static_function_ids
and settings['speed'] != 0
and computed_brightness > 0):
self._update_needed = True
self._led_controller.render()
def _sacn_callback(self, packet):
'Callback for sACN / E1.31 client'
sacn_time = time.perf_counter()
self._sacn_perf_avg += (sacn_time - self._last_sacn_time)
self._last_sacn_time = sacn_time
self._sacn_count += 1
if self._sacn_count % 100 == 0:
print('Average sACN rate (packets/s): {}'.format(1 / (self._sacn_perf_avg / 100)))
self._sacn_perf_avg = 0
data = [x / 255.0 for x in packet.dmxData[:self._led_count * 3]]
self._led_controller.set_range(list(zip_longest(*(iter(data),) * 3)),
0, self._led_count,
self._correction,
1.0,
self._settings['global_brightness'],
animfunctions.ColorMode.rgb)
self._led_controller.render()
def clear_leds(self):
'Turn all LEDs off'
self._led_controller.set_range([(0, 0, 0) for i in range(self._led_count)],
0, self._led_count,
self._correction,
1.0,
1.0,
animfunctions.ColorMode.rgb)
self._led_controller.render()
def end_animation(self):
'Stop rendering in the animation thread and stop sACN receiver'
self._timer.stop()
try:
if self._enable_sacn and self._receiver:
self._receiver.stop()
except:
pass
|
{"/ledcontrol/ledcontroller.py": ["/ledcontrol/animationfunctions.py", "/ledcontrol/driver/__init__.py", "/ledcontrol/utils.py"], "/ledcontrol/animationfunctions.py": ["/ledcontrol/driver/__init__.py", "/ledcontrol/utils.py"], "/ledcontrol/app.py": ["/ledcontrol/animationcontroller.py", "/ledcontrol/ledcontroller.py", "/ledcontrol/homekit.py", "/ledcontrol/pixelmappings.py", "/ledcontrol/animationfunctions.py", "/ledcontrol/utils.py"], "/ledcontrol/driver/__init__.py": ["/ledcontrol/driver/driver_non_raspberry_pi.py"], "/renderpreviews.py": ["/ledcontrol/animationcontroller.py", "/ledcontrol/animationfunctions.py", "/ledcontrol/pixelmappings.py", "/ledcontrol/driver/__init__.py", "/ledcontrol/utils.py"], "/ledcontrol/animationcontroller.py": ["/ledcontrol/intervaltimer.py", "/ledcontrol/animationfunctions.py", "/ledcontrol/driver/__init__.py", "/ledcontrol/utils.py"], "/ledcontrol/__init__.py": ["/ledcontrol/app.py"]}
|
24,984
|
jackw01/led-control
|
refs/heads/master
|
/ledcontrol/intervaltimer.py
|
# led-control WS2812B LED Controller Server
# Copyright 2022 jackw01. Released under the MIT License (see LICENSE for details).
import time
from threading import Event, Thread
class IntervalTimer:
'Repeat function call at a regular interval'
def __init__(self, interval, function, *args, **kwargs):
self._interval = interval
self._function = function
self._args = args
self._kwargs = kwargs
self._count = 0
self._last_perf_avg_count = -1
self._wait_time = 0
self.last_start = time.perf_counter()
self._last_measurement_c = 0
self._last_measurement_t = 0
self._perf_avg = 0
self._event = Event()
self._thread = Thread(target=self.target, daemon=True)
def start(self):
'Starts the timer thread'
self._thread.start()
def target(self):
'Waits until ready and executes target function'
while not self._event.wait(self._wait_time):
current_start = time.perf_counter()
self._function(*self._args, **self._kwargs)
self._count += 1
cycle_time = time.perf_counter() - current_start
self._perf_avg += cycle_time
# Calculate wait for next iteration
self._wait_time = self._interval - (current_start - self.last_start)
self.last_start = current_start
if (self._wait_time < 0):
self._wait_time = 0
def get_count(self):
'Returns cycle count'
return self._count
def get_perf_avg(self):
'Returns average function execution time and clears accumulator'
average = self._perf_avg / (self._count - self._last_perf_avg_count)
self._perf_avg = 0
self._last_perf_avg_count = self._count
return average
def get_rate(self):
'Returns current rate in cycles per second'
result = ((self._count - self._last_measurement_c) /
(self.last_start - self._last_measurement_t))
self._last_measurement_c = self._count
self._last_measurement_t = self.last_start
return result
def stop(self):
'Stops the timer thread'
self._event.set()
self._thread.join()
|
{"/ledcontrol/ledcontroller.py": ["/ledcontrol/animationfunctions.py", "/ledcontrol/driver/__init__.py", "/ledcontrol/utils.py"], "/ledcontrol/animationfunctions.py": ["/ledcontrol/driver/__init__.py", "/ledcontrol/utils.py"], "/ledcontrol/app.py": ["/ledcontrol/animationcontroller.py", "/ledcontrol/ledcontroller.py", "/ledcontrol/homekit.py", "/ledcontrol/pixelmappings.py", "/ledcontrol/animationfunctions.py", "/ledcontrol/utils.py"], "/ledcontrol/driver/__init__.py": ["/ledcontrol/driver/driver_non_raspberry_pi.py"], "/renderpreviews.py": ["/ledcontrol/animationcontroller.py", "/ledcontrol/animationfunctions.py", "/ledcontrol/pixelmappings.py", "/ledcontrol/driver/__init__.py", "/ledcontrol/utils.py"], "/ledcontrol/animationcontroller.py": ["/ledcontrol/intervaltimer.py", "/ledcontrol/animationfunctions.py", "/ledcontrol/driver/__init__.py", "/ledcontrol/utils.py"], "/ledcontrol/__init__.py": ["/ledcontrol/app.py"]}
|
24,985
|
jackw01/led-control
|
refs/heads/master
|
/ledcontrol/utils.py
|
# led-control WS2812B LED Controller Server
# Copyright 2021 jackw01. Released under the MIT License (see LICENSE for details).
import re
import math
import colorsys
# Constrain value
def clamp(x, min, max):
if x < min:
return min
elif x > max:
return max
else:
return x
# Title generation
def camel_to_title(text):
return re.sub(r'((?<=[a-z])[A-Z]|(?<!\A)[A-Z](?=[a-z]))', r' \1', text)
def snake_to_title(text):
return text.replace('_', ' ').title()
# Misc shaping functions
# Exponential asymmetric impulse function - peaks at t=1
# See http://www.iquilezles.org/www/articles/functions/functions.htm
def impulse_exp(t):
return t * math.exp(1 - t)
# Equivalent to GLSL fract - returns the floating point component of a number
def fract(x):
return x - math.floor(x)
|
{"/ledcontrol/ledcontroller.py": ["/ledcontrol/animationfunctions.py", "/ledcontrol/driver/__init__.py", "/ledcontrol/utils.py"], "/ledcontrol/animationfunctions.py": ["/ledcontrol/driver/__init__.py", "/ledcontrol/utils.py"], "/ledcontrol/app.py": ["/ledcontrol/animationcontroller.py", "/ledcontrol/ledcontroller.py", "/ledcontrol/homekit.py", "/ledcontrol/pixelmappings.py", "/ledcontrol/animationfunctions.py", "/ledcontrol/utils.py"], "/ledcontrol/driver/__init__.py": ["/ledcontrol/driver/driver_non_raspberry_pi.py"], "/renderpreviews.py": ["/ledcontrol/animationcontroller.py", "/ledcontrol/animationfunctions.py", "/ledcontrol/pixelmappings.py", "/ledcontrol/driver/__init__.py", "/ledcontrol/utils.py"], "/ledcontrol/animationcontroller.py": ["/ledcontrol/intervaltimer.py", "/ledcontrol/animationfunctions.py", "/ledcontrol/driver/__init__.py", "/ledcontrol/utils.py"], "/ledcontrol/__init__.py": ["/ledcontrol/app.py"]}
|
24,986
|
jackw01/led-control
|
refs/heads/master
|
/ledcontrol/__init__.py
|
# led-control WS2812B LED Controller Server
# Copyright 2022 jackw01. Released under the MIT License (see LICENSE for details).
import argparse
from ledcontrol.app import create_app
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--port', type=int, default=80,
help='Port to use for web interface. Default: 80')
parser.add_argument('--host', default='0.0.0.0',
help='Hostname to use for web interface. Default: 0.0.0.0')
parser.add_argument('--led_count', type=int, default=0,
help='Number of LEDs')
parser.add_argument('--config_file',
help='Location of config file. Default: /etc/ledcontrol.json')
parser.add_argument('--pixel_mapping_json', type=argparse.FileType('r'),
help='JSON file containing pixel mapping (see README)')
parser.add_argument('--fps', type=int, default=60,
help='Refresh rate limit for LEDs, in FPS. Default: 60')
parser.add_argument('--led_pin', type=int, default=18,
help='Pin for LEDs (see https://github.com/jgarff/rpi_ws281x). Default: 18')
parser.add_argument('--led_data_rate', type=int, default=800000,
help='Data rate for LEDs. Default: 800000 Hz')
parser.add_argument('--led_dma_channel', type=int, default=10,
help='DMA channel for LEDs. DO NOT USE CHANNEL 5 ON Pi 3 B. Default: 10')
parser.add_argument('--led_pixel_order', default='GRB',
help='LED color channel order. Any combination of RGB with or without a W at the end. Default: GRB, try GRBW for SK6812')
parser.add_argument('--led_brightness_limit', type=float, default=1.0,
help='LED maximum brightness limit for the web UI. Float from 0.0-1.0. Default: 1.0')
parser.add_argument('--save_interval', type=int, default=60,
help='Interval for automatically saving settings in seconds. Default: 60')
parser.add_argument('--sacn', action='store_true',
help='Enable sACN / E1.31 support. Default: False')
parser.add_argument('--hap', action='store_true',
help='Enable HomeKit Accessory Protocol support. Default: False')
parser.add_argument('--no_timer_reset', action='store_true',
help='Do not reset the animation timer when patterns are changed. Default: False')
parser.add_argument('--dev', action='store_true',
help='Development flag. Default: False')
parser.add_argument('--serial_port',
help='Serial port for external LED driver.')
args = parser.parse_args()
app = create_app(args.led_count,
args.config_file,
args.pixel_mapping_json,
args.fps,
args.led_pin,
args.led_data_rate,
args.led_dma_channel,
args.led_pixel_order.upper(),
args.led_brightness_limit,
args.save_interval,
args.sacn,
args.hap,
args.no_timer_reset,
args.dev,
args.serial_port)
if args.dev:
app.run(host=args.host, port=args.port)
else:
import bjoern
bjoern.run(app, host=args.host, port=args.port)
|
{"/ledcontrol/ledcontroller.py": ["/ledcontrol/animationfunctions.py", "/ledcontrol/driver/__init__.py", "/ledcontrol/utils.py"], "/ledcontrol/animationfunctions.py": ["/ledcontrol/driver/__init__.py", "/ledcontrol/utils.py"], "/ledcontrol/app.py": ["/ledcontrol/animationcontroller.py", "/ledcontrol/ledcontroller.py", "/ledcontrol/homekit.py", "/ledcontrol/pixelmappings.py", "/ledcontrol/animationfunctions.py", "/ledcontrol/utils.py"], "/ledcontrol/driver/__init__.py": ["/ledcontrol/driver/driver_non_raspberry_pi.py"], "/renderpreviews.py": ["/ledcontrol/animationcontroller.py", "/ledcontrol/animationfunctions.py", "/ledcontrol/pixelmappings.py", "/ledcontrol/driver/__init__.py", "/ledcontrol/utils.py"], "/ledcontrol/animationcontroller.py": ["/ledcontrol/intervaltimer.py", "/ledcontrol/animationfunctions.py", "/ledcontrol/driver/__init__.py", "/ledcontrol/utils.py"], "/ledcontrol/__init__.py": ["/ledcontrol/app.py"]}
|
24,987
|
jackw01/led-control
|
refs/heads/master
|
/setup.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from setuptools import find_packages, setup, Extension
from setuptools.command.develop import develop
from setuptools.command.install import install
from subprocess import check_call
def pre_install():
print('preinstall')
if is_raspberrypi():
check_call('scons', cwd='ledcontrol/driver/rpi_ws281x/')
class PreDevelopCommand(develop):
def run(self):
pre_install()
develop.run(self)
class PreInstallCommand(install):
def run(self):
pre_install()
install.run(self)
def is_raspberrypi():
try:
with io.open('/sys/firmware/devicetree/base/model', 'r') as m:
if 'raspberry pi' in m.read().lower():
return True
except Exception:
pass
return False
requirements = [
'Flask==2.2.2',
'RestrictedPython>=5.2',
'sacn>=1.8.1',
'HAP-python==4.4.0',
'pyopenssl==22.1.0',
'numpy>=1.21.0',
'pyfastnoisesimd>=0.4.2',
] + (['bjoern>=3.2.1'] if sys.platform.startswith('linux') else [])
extensions = [
Extension('_ledcontrol_rpi_ws281x_driver',
sources=['ledcontrol/driver/ledcontrol_rpi_ws281x_driver_wrap.c'],
include_dirs=['ledcontrol/driver'],
library_dirs=['ledcontrol/driver/rpi_ws281x/'],
libraries=['ws2811'])
]
setup(
name='led-control',
version='2.0.0',
description='WS2812 LED strip controller with web interface for Raspberry Pi',
long_description=open('README.md').read(),
long_description_content_type='text/markdown',
author='jackw01',
python_requires='>=3.7.0',
url='https://github.com/jackw01/led-control',
packages=find_packages(),
zip_safe=False,
install_requires=requirements,
setup_requires=requirements,
ext_modules=extensions if is_raspberrypi() else [],
include_package_data=True,
entry_points={
'console_scripts': [
'ledcontrol=ledcontrol:main'
]
},
cmdclass={
'develop': PreDevelopCommand,
'install': PreInstallCommand,
},
license='MIT',
classifiers=[
# Trove classifiers
# Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy'
]
)
|
{"/ledcontrol/ledcontroller.py": ["/ledcontrol/animationfunctions.py", "/ledcontrol/driver/__init__.py", "/ledcontrol/utils.py"], "/ledcontrol/animationfunctions.py": ["/ledcontrol/driver/__init__.py", "/ledcontrol/utils.py"], "/ledcontrol/app.py": ["/ledcontrol/animationcontroller.py", "/ledcontrol/ledcontroller.py", "/ledcontrol/homekit.py", "/ledcontrol/pixelmappings.py", "/ledcontrol/animationfunctions.py", "/ledcontrol/utils.py"], "/ledcontrol/driver/__init__.py": ["/ledcontrol/driver/driver_non_raspberry_pi.py"], "/renderpreviews.py": ["/ledcontrol/animationcontroller.py", "/ledcontrol/animationfunctions.py", "/ledcontrol/pixelmappings.py", "/ledcontrol/driver/__init__.py", "/ledcontrol/utils.py"], "/ledcontrol/animationcontroller.py": ["/ledcontrol/intervaltimer.py", "/ledcontrol/animationfunctions.py", "/ledcontrol/driver/__init__.py", "/ledcontrol/utils.py"], "/ledcontrol/__init__.py": ["/ledcontrol/app.py"]}
|
24,988
|
jackw01/led-control
|
refs/heads/master
|
/ledcontrol/homekit.py
|
import threading
import signal
from pyhap.accessory import Accessory
from pyhap.accessory_driver import AccessoryDriver
from pyhap.const import CATEGORY_LIGHTBULB
class LEDControlHomeKitAccessory(Accessory):
category = CATEGORY_LIGHTBULB # This is for the icon in the iOS Home app.
def __init__(self, *args, **kwargs):
# If overriding this method, be sure to call the super's implementation first.
super().__init__(*args, **kwargs)
# Add the services that this Accessory will support with add_preload_service here
self._serv_light = self.add_preload_service('Lightbulb', chars=['On', 'Brightness', 'Saturation'])
self.on = self._serv_light.configure_char('On', value=1)
self.brightness = self._serv_light.configure_char('Brightness', value=100)
self.saturation = self._serv_light.configure_char('Saturation', value=100)
def set_setter_callback(self, callback):
self._serv_light.setter_callback = callback
def homekit_start(setter_callback):
# Start the accessory on port 51826
driver = AccessoryDriver(port=51826)
accessory = LEDControlHomeKitAccessory(driver, 'LEDControl')
accessory.set_setter_callback(setter_callback)
driver.add_accessory(accessory=accessory)
# We want SIGTERM (terminate) to be handled by the driver itself,
# so that it can gracefully stop the accessory, server and advertising.
signal.signal(signal.SIGTERM, driver.signal_handler)
# Start it!
thread = threading.Thread(target=driver.start)
thread.start()
return accessory
|
{"/ledcontrol/ledcontroller.py": ["/ledcontrol/animationfunctions.py", "/ledcontrol/driver/__init__.py", "/ledcontrol/utils.py"], "/ledcontrol/animationfunctions.py": ["/ledcontrol/driver/__init__.py", "/ledcontrol/utils.py"], "/ledcontrol/app.py": ["/ledcontrol/animationcontroller.py", "/ledcontrol/ledcontroller.py", "/ledcontrol/homekit.py", "/ledcontrol/pixelmappings.py", "/ledcontrol/animationfunctions.py", "/ledcontrol/utils.py"], "/ledcontrol/driver/__init__.py": ["/ledcontrol/driver/driver_non_raspberry_pi.py"], "/renderpreviews.py": ["/ledcontrol/animationcontroller.py", "/ledcontrol/animationfunctions.py", "/ledcontrol/pixelmappings.py", "/ledcontrol/driver/__init__.py", "/ledcontrol/utils.py"], "/ledcontrol/animationcontroller.py": ["/ledcontrol/intervaltimer.py", "/ledcontrol/animationfunctions.py", "/ledcontrol/driver/__init__.py", "/ledcontrol/utils.py"], "/ledcontrol/__init__.py": ["/ledcontrol/app.py"]}
|
24,989
|
jackw01/led-control
|
refs/heads/master
|
/ledcontrol/pixelmappings.py
|
# led-control WS2812B LED Controller Server
# Copyright 2021 jackw01. Released under the MIT License (see LICENSE for details).
import collections
Point = collections.namedtuple('Point', ['x', 'y', 'z'])
def line(count):
return lambda i: Point(i / count, 0, 0)
def from_array(mapping):
min_v = min([min(pt) for pt in mapping])
v_range = max([max(pt) for pt in mapping]) - min_v
mapping_normalized = [[(v - min_v) / v_range * 0.999 for v in pt] for pt in mapping]
return lambda i: Point(mapping_normalized[i][0],
mapping_normalized[i][1],
mapping_normalized[i][2])
|
{"/ledcontrol/ledcontroller.py": ["/ledcontrol/animationfunctions.py", "/ledcontrol/driver/__init__.py", "/ledcontrol/utils.py"], "/ledcontrol/animationfunctions.py": ["/ledcontrol/driver/__init__.py", "/ledcontrol/utils.py"], "/ledcontrol/app.py": ["/ledcontrol/animationcontroller.py", "/ledcontrol/ledcontroller.py", "/ledcontrol/homekit.py", "/ledcontrol/pixelmappings.py", "/ledcontrol/animationfunctions.py", "/ledcontrol/utils.py"], "/ledcontrol/driver/__init__.py": ["/ledcontrol/driver/driver_non_raspberry_pi.py"], "/renderpreviews.py": ["/ledcontrol/animationcontroller.py", "/ledcontrol/animationfunctions.py", "/ledcontrol/pixelmappings.py", "/ledcontrol/driver/__init__.py", "/ledcontrol/utils.py"], "/ledcontrol/animationcontroller.py": ["/ledcontrol/intervaltimer.py", "/ledcontrol/animationfunctions.py", "/ledcontrol/driver/__init__.py", "/ledcontrol/utils.py"], "/ledcontrol/__init__.py": ["/ledcontrol/app.py"]}
|
25,012
|
MuhammedBuyukkinaci/TensorFlow-Multiclass-Image-Classification-using-CNN-s
|
refs/heads/master
|
/main.py
|
import os # dealing with directories
import matplotlib.pyplot as plt # for visualizations
import numpy as np # arrays
import pandas as pd # for manipulating data
import torch
import torch.nn as nn
import torch.nn.functional as F
from PIL import Image
from sklearn.metrics import roc_auc_score
from torch.utils.data import DataLoader, Dataset
from utils.helpers import (create_transform, prepare_train_valid_test,
unzip_input_file)
# HYPERPARAMETERS
# our photos are in the size of (80,80,3)
IMG_SIZE = 80
IMG_SIZE_ALEXNET = 227
SHOWN_IMAGE_COUNT = 64
columns = 8
rows = 8
# hyperparameters
hidden_size = 100
num_epochs = 50
batch_size = 32
learning_rate = 3e-5
UNZIP = False
BASE_DIR = os.getcwd()
# Current working directory
# Our dataset class
class CustomDataset(Dataset):
def __init__(self, arr, transform=None) -> None:
self.x = [Image.fromarray(i[0], "RGB") for i in arr]
self.y = np.array([i[1].argmax() for i in arr])
self.transform = transform
self.n_samples = len(self.x)
def __getitem__(self, index):
y_label = self.y[index]
if self.transform:
img = self.transform(self.x[index])
return img, y_label
def __len__(self):
return self.n_samples
# Declaring model
class AlexNet(nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv1 = nn.Conv2d(
in_channels=3, out_channels=96, kernel_size=(11, 11), stride=4
)
self.relu = nn.ReLU()
self.pool1 = nn.MaxPool2d(kernel_size=(3, 3), stride=(2, 2))
self.conv2 = nn.Conv2d(
in_channels=96, out_channels=256, kernel_size=(5, 5), stride=1, padding=2
)
self.pool2 = nn.MaxPool2d(kernel_size=(3, 3), stride=(2, 2))
self.conv3 = nn.Conv2d(
in_channels=256, out_channels=384, kernel_size=(3, 3), stride=1, padding=1
)
self.conv4 = nn.Conv2d(
in_channels=384, out_channels=384, kernel_size=(3, 3), stride=1, padding=1
)
self.conv5 = nn.Conv2d(
in_channels=384, out_channels=256, kernel_size=(3, 3), stride=1, padding=1
)
self.pool3 = nn.MaxPool2d(kernel_size=(3, 3), stride=(2, 2))
self.dropout = nn.Dropout(p=0.5)
self.fc1 = nn.Linear(in_features=256 * 6 * 6, out_features=4096)
self.fc2 = nn.Linear(in_features=4096, out_features=4096)
self.fc3 = nn.Linear(in_features=4096, out_features=4)
def forward(self, x):
x = self.pool1(self.relu(self.conv1(x)))
x = self.pool2(self.relu(self.conv2(x)))
x = self.relu(self.conv3(x))
x = self.relu(self.conv4(x))
x = self.pool3(self.relu(self.conv5(x)))
x = self.dropout(x)
x = x.reshape(-1, 256 * 6 * 6)
x = self.relu(self.fc1(x))
x = self.dropout(x)
x = self.relu(self.fc2(x))
x = self.fc3(x)
return x
def training(
train_loader,
device,
model,
optimizer,
epoch,
criterion,
training_loss_list,
num_steps,
):
model.train()
for i, (images, labels) in enumerate(train_loader):
# moving input and output to device
images = images.to(device)
labels = labels.to(device)
# forward
outputs = model(images)
loss = criterion(outputs, labels)
training_loss_list.append(loss.item())
# set gradients to 0 first
optimizer.zero_grad()
# back propogate gradients
loss.backward()
# update weights via learning rate and gradients
optimizer.step()
if (i + 1) % num_steps == 0:
print(f"train; epoch={epoch+1}, training loss = {np.round(loss.item(),4)}")
def evaluation(model, device, loader, criterion, validation_loss_list, epoch=None, validation=True):
model.eval()
with torch.no_grad():
n_correct = 0
n_samples = 0
loss_all = 0
for images, labels in loader:
images = images.to(device)
labels = labels.to(device)
outputs = model(images)
loss = criterion(outputs, labels)
validation_loss_list.append(loss.item())
loss_all += loss.item()
# value, index
_, predictions = torch.max(outputs, 1)
n_samples += labels.shape[0]
n_correct += (predictions == labels).sum().item()
acc = 100.0 * n_correct / n_samples
avg_loss = loss_all / len(loader)
if validation:
print(
f"valid; epoch={epoch+1}, valid loss = {round(float(avg_loss),4)}, accuracy = {np.round(acc,4)} \n"
)
else:
print(
f"test scores, valid loss = {round(float(avg_loss),4)}, accuracy = {np.round(acc,4)} \n"
)
def plot_loss_train_valid(training_loss_list, validation_loss_list):
f, ax = plt.subplots(1, 2, figsize=(12, 3))
pd.Series(training_loss_list).rolling(50).mean().plot(
kind="line", title="Accuracy on CV data", ax=ax[0]
)
pd.Series(validation_loss_list).rolling(50).mean().plot(
kind="line", title="Loss on CV data", ax=ax[1]
)
plt.subplots_adjust(wspace=0.8)
ax[0].set_title("Loss on train data")
ax[1].set_title("Loss on CV data")
plt.show()
def get_test_preds(model, loader, device):
model.eval()
with torch.no_grad():
test_classes = []
test_preds = []
for images, labels in loader:
images = images.to(device)
labels = labels.to(device)
outputs = model(images)
_, predictions = torch.max(outputs, 1)
test_classes.append(labels)
test_preds.append(predictions)
test_classes = np.hstack([x.cpu().numpy() for x in test_classes])
test_preds = np.hstack([x.cpu().numpy() for x in test_preds])
return test_preds
def plot_some_preds(SHOWN_IMAGE_COUNT, columns, rows, test_preds, test_data):
pred_labels = []
for i in range(SHOWN_IMAGE_COUNT):
r = test_preds[i]
if r == 0:
pred_labels.append("chair")
elif r == 1:
pred_labels.append("kitchen")
elif r == 2:
pred_labels.append("knife")
elif r == 3:
pred_labels.append("saucepan")
# First 64 images
shown_images = [x[0] for x in test_data[:SHOWN_IMAGE_COUNT]]
fig = plt.figure(figsize=(20, 20))
for m in range(1, columns * rows + 1):
img = shown_images[m - 1].reshape([IMG_SIZE_ALEXNET, IMG_SIZE_ALEXNET, 3])
fig.add_subplot(rows, columns, m)
plt.imshow(img)
plt.title("Pred: " + pred_labels[m - 1])
plt.axis("off")
plt.show()
def main():
# Unzipping file
if UNZIP:
unzip_input_file("datasets.zip")
# prepare data
train, cv, test_data = prepare_train_valid_test(
BASE_DIR,
"datasets",
"train_data_mc.npy",
"test_data_mc.npy",
IMG_SIZE_ALEXNET,
train_size=4800,
)
transform = create_transform()
train_dataset = CustomDataset(train, transform)
valid_dataset = CustomDataset(cv, transform)
test_dataset = CustomDataset(test_data, transform)
train_loader = DataLoader(
dataset=train_dataset, batch_size=batch_size, shuffle=True
)
valid_loader = DataLoader(
dataset=valid_dataset, batch_size=batch_size, shuffle=False
)
test_loader = DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=False)
# setting device
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(device)
# move model to cuda
model = AlexNet().to(device)
# define loss function
criterion = nn.CrossEntropyLoss()
# define optimizer
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
# setting step count
num_steps = len(train_loader)
# some empty dicts to monitor losses during training and testing
training_loss_list = []
validation_loss_list = []
# training loop
for epoch in range(num_epochs):
# training phase
training(
train_loader,
device,
model,
optimizer,
epoch,
criterion,
training_loss_list,
num_steps,
)
# evaluation on valid
evaluation(
model=model,
device=device,
loader=valid_loader,
criterion=criterion,
validation_loss_list=validation_loss_list,
epoch=epoch,
validation=True,
)
# evaluation on test
# evaluation(
# model=model, device=device, loader=test_loader, epoch=None, validation=False
# )
plot_loss_train_valid(training_loss_list, validation_loss_list)
# convert list to numpy array
test_preds = get_test_preds(model=model, loader=test_loader, device=device)
plot_some_preds(SHOWN_IMAGE_COUNT, columns, rows, test_preds, test_data)
if __name__ == "__main__":
main()
|
{"/main.py": ["/utils/helpers.py"]}
|
25,013
|
MuhammedBuyukkinaci/TensorFlow-Multiclass-Image-Classification-using-CNN-s
|
refs/heads/master
|
/utils/helpers.py
|
import os
import zipfile
import cv2
import numpy as np
import torchvision.transforms as transforms
def unzip_input_file(file_name) -> None:
with zipfile.ZipFile(file_name, "r") as zip_ref:
zip_ref.extractall()
# Reading .npy files
def load_data(file_path: str):
return np.load(file_path, allow_pickle=True)
def resize_np_array(arr, resize_shape):
for i in range(len(arr)):
arr[i][0] = cv2.resize(arr[i][0], (resize_shape, resize_shape))
return arr
def fix_test_data(test_data):
for i in range(len(test_data)):
test_data[i][1] = np.array(test_data[i][1])
return test_data
def prepare_train_valid_test(
BASE_DIR, folder_name, train_name, test_name, IMG_SIZE_ALEXNET, train_size=4800
):
train_data = load_data(os.path.join(BASE_DIR, folder_name, train_name))
test_data = load_data(os.path.join(BASE_DIR, folder_name, test_name))
# fix the test_data by converting list to numpy array
test_data = fix_test_data(test_data)
# resize all_images in npy files
# In order to implement ALEXNET, we are resizing them to (227,227,3)
train_data = resize_np_array(train_data, IMG_SIZE_ALEXNET)
test_data = resize_np_array(test_data, IMG_SIZE_ALEXNET)
train = train_data[:train_size]
cv = train_data[train_size:]
return train, cv, test_data
def create_transform():
# transform for input image
transform = transforms.Compose(
transforms=[
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]
)
return transform
|
{"/main.py": ["/utils/helpers.py"]}
|
25,017
|
lferran/pyflipt
|
refs/heads/main
|
/pyflipt/__init__.py
|
from .client import * # noqa
from .models import * # noqa
|
{"/pyflipt/__init__.py": ["/pyflipt/client.py", "/pyflipt/models.py"], "/example.py": ["/pyflipt/__init__.py"], "/pyflipt/client.py": ["/pyflipt/__init__.py"]}
|
25,018
|
lferran/pyflipt
|
refs/heads/main
|
/pyflipt/models.py
|
from enum import Enum
from typing import Dict, Optional
from pydantic import BaseModel
class FliptBasicUnit(BaseModel):
...
class Flag(FliptBasicUnit):
key: str
name: str
description: Optional[str]
enabled: bool
class Rule(FliptBasicUnit):
flag_key: str
segment_key: str
rank: int
id: Optional[str]
class MatchType(str, Enum):
ALL = "ALL_MATCH_TYPE"
ANY = "ANY_MATCH_TYPE"
class Segment(FliptBasicUnit):
key: str
name: str
description: Optional[str]
match_type: MatchType = MatchType.ALL.value
class ComparisonType(str, Enum):
UNKNOWN = "UNKNOWN_COMPARISON_TYPE"
STRING = "STRING_COMPARISON_TYPE"
NUMBER = "NUMBER_COMPARISON_TYPE"
BOOLEAN = "BOOLEAN_COMPARISON_TYPE"
class OperatorType(str, Enum):
EQ = "=="
NEQ = "!="
IS_EMPTY = "IS EMPTY"
IS_NOT_EMPTY = "IS NOT EMPTY"
HAS_SUFFIX = "HAS SUFFIX"
HAS_PREFIX = "HAS PREFIX"
class Constraint(FliptBasicUnit):
segment_key: str
type: ComparisonType = ComparisonType.UNKNOWN.value
property: str
operator: OperatorType
value: Optional[str]
class EvaluateResponse(BaseModel):
request_context: Dict[str, str]
match: bool
flag_key: str
segment_key: str
value: str
|
{"/pyflipt/__init__.py": ["/pyflipt/client.py", "/pyflipt/models.py"], "/example.py": ["/pyflipt/__init__.py"], "/pyflipt/client.py": ["/pyflipt/__init__.py"]}
|
25,019
|
lferran/pyflipt
|
refs/heads/main
|
/example.py
|
import asyncio
from pyflipt import (
ComparisonType,
Constraint,
Flag,
MatchType,
OperatorType,
Rule,
Segment,
get_client,
)
FLIPT_API_BASE_URL = "http://localhost:8083/api/v1"
async def main():
pf = get_client(FLIPT_API_BASE_URL)
flag = Flag(key="myflag", name="My Flag", enabled=True)
await pf.create(flag)
segments = [
Segment(key="user", name="Selected users", match_type=MatchType.ANY.value),
Segment(
key="account", name="Selected accounts", match_type=MatchType.ANY.value
),
]
for segment in segments:
await pf.create(segment)
constraints = [
Constraint(
segment_key=segments[0].key,
type=ComparisonType.STRING.value,
property="user",
operator=OperatorType.EQ,
value="user@mailbox.org",
),
Constraint(
segment_key=segments[1].key,
type=ComparisonType.STRING.value,
property="account",
operator=OperatorType.EQ,
value="some-client-account",
),
]
for constraint in constraints:
await pf.create(constraint)
rules = []
for rank, segment in enumerate(segments):
rules.append(Rule(flag_key=flag.key, segment_key=segment.key, rank=rank + 1))
for rule in rules:
await pf.create(rule)
await pf.close()
if __name__ == "__main__":
asyncio.run(main())
|
{"/pyflipt/__init__.py": ["/pyflipt/client.py", "/pyflipt/models.py"], "/example.py": ["/pyflipt/__init__.py"], "/pyflipt/client.py": ["/pyflipt/__init__.py"]}
|
25,020
|
lferran/pyflipt
|
refs/heads/main
|
/pyflipt/client.py
|
import json
from typing import List
from aiohttp import ClientSession
from pyflipt import models
__all__ = ["get_client", "FliptClient", "FliptError"]
def safe_path_join(*url_parts) -> str:
parts = []
n_parts = len(url_parts)
for index, part in enumerate(url_parts):
if index == 0:
parts.append(part.rstrip("/"))
elif index == n_parts - 1:
parts.append(part.lstrip("/"))
else:
parts.append(part.rstrip("/").lstrip("/"))
return "/".join(parts)
CONFLICT_CODE = 3
class FliptError(Exception):
def __init__(self, resp_json):
self.resp_json = resp_json
def __repr__(self):
return f"FliptError({self.resp_json})"
class FliptClient:
def __init__(self, base_url):
self.base_url = base_url
self.session = ClientSession()
def get_url(self, unit: models.FliptBasicUnit) -> str:
if isinstance(unit, models.Flag):
url = safe_path_join(self.base_url, "/flags")
elif isinstance(unit, models.Segment):
url = safe_path_join(self.base_url, "/segments")
elif isinstance(unit, models.Constraint):
url = safe_path_join(
self.base_url, f"/segments/{unit.segment_key}/constraints"
)
elif isinstance(unit, models.Rule):
url = safe_path_join(self.base_url, f"/flags/{unit.flag_key}/rules")
else:
raise ValueError(f"Not supported yet {unit}")
return url
async def create(self, unit: models.FliptBasicUnit):
url = self.get_url(unit)
async with self.session.post(url, data=unit.json()) as resp:
resp_json = await resp.json()
if resp.status == 200:
if isinstance(unit, models.Rule):
unit.id = resp_json["id"]
return resp_json
if resp.status == 400:
if resp_json.get("code") == CONFLICT_CODE:
# Already there
return unit.dict()
raise FliptError(resp_json)
async def delete(self, unit: models.FliptBasicUnit):
url = self.get_url(unit)
async with self.session.delete(url) as resp:
if resp.status == 404 or resp.status == 200:
return
else:
resp_json = await resp.json()
raise FliptError(resp_json)
async def order_rules(self, flag_key: str, rule_ids: List[str]):
url = safe_path_join(self.base_url, f"/flags/{flag_key}/rules/order")
async with self.session.put(
url, data=json.dumps({"flag_key": flag_key, "rule_ids": rule_ids})
) as resp:
if resp.status != 200:
resp_json = await resp.json()
raise FliptError(resp_json)
async def close(self):
if not self.session.closed:
await self.session.close()
def get_client(base_url) -> FliptClient:
return FliptClient(base_url)
|
{"/pyflipt/__init__.py": ["/pyflipt/client.py", "/pyflipt/models.py"], "/example.py": ["/pyflipt/__init__.py"], "/pyflipt/client.py": ["/pyflipt/__init__.py"]}
|
25,023
|
ego-alt/blobchain
|
refs/heads/master
|
/blobchain/peer.py
|
import blobchain.blockchain as blockchain
import asyncio
import socket
import ast
# Hard-coded nodes in the network provides a contact point for finding other peers
defaulthost = '127.0.0.1'
sisters = [8888, 8877, 8866, 8855]
peerlist = []
def extract_data(data):
# Processes the raw data received into a tuple of the form (message type, message)
decoded = data.decode('utf-8')
msgtype, message = ast.literal_eval(decoded)
return msgtype, message
def process_message(msgtype, message):
# Converts the tuple (message type, message) into a string which can be encoded and sent
packet = str((msgtype, message))
encoded = packet.encode('utf-8')
return encoded
class BlobNode:
def __init__(self, PORT, HOST=None):
"""Initialises a fully functioning peer node which can handle and send requests"""
self.blo = blockchain.Blobchain()
self.maxpeers = 100
self.port = PORT
if not HOST:
name = socket.gethostname()
self.host = socket.gethostbyname(name)
else:
self.host = HOST
self.address = None
async def main(self):
server = await asyncio.start_server(self.handle_echo, self.host, self.port)
self.address = server.sockets[0].getsockname()
print(f'Serving on {self.address}')
try:
await asyncio.gather(*(self.build_peers(defaulthost, peerport) for peerport in sisters))
except OSError:
pass
async with server:
await server.serve_forever()
async def routine(self, PEERHOST, PEERPORT, msgtype, message):
"""Creates the routine of sending messages, receiving replies, and correctly using the information"""
newpeer = (PEERHOST, PEERPORT)
try:
replytype, reply = await self.send_echo(PEERHOST, PEERPORT, msgtype, message)
await self.handle_reply(newpeer, replytype, reply)
return True
except OSError:
return False
async def broadcast(self, msgtype, message):
for peer in peerlist:
peerhost, peerport = peer
await self.routine(peerhost, peerport, msgtype, message)
async def send_echo(self, PEERHOST, PEERPORT, msgtype, message):
"""Sends and receives messages"""
reader, writer = await asyncio.open_connection(PEERHOST, PEERPORT)
newpeer = (PEERHOST, PEERPORT)
print(f'Sending message {msgtype}: {message!r} to {newpeer!r}...')
packet = process_message(msgtype, message)
writer.write(packet)
data = await reader.read(1024)
replytype, reply = extract_data(data)
await writer.drain()
writer.close()
return replytype, reply
async def handle_reply(self, newpeer, replytype, reply):
"""Interprets replytype in order to decide what to do with reply
:param newpeer: Name of the client node
:param replytype: In the format REPL-{original request} so that the purpose of the reply is known
:param reply: Information satisfying the original request"""
print(f'Received reply {replytype}: {reply!r} from {newpeer!r}')
response = ReplyHandler()
condition, element = await response.reply_data(replytype, reply, self.blo.chain)
if condition == 'UPDATE':
await self.update_blockchain(element)
print(self.blo.chain)
async def handle_echo(self, reader, writer):
"""Receives incoming messages and returns an appropriate reply"""
data = await reader.read(1024)
msgtype, message = extract_data(data)
print(f'Received message {msgtype}: {message!r}')
response = Handler(self.maxpeers)
anunctype, announcement = await response.handle_data(data, self.blo)
if anunctype:
await self.broadcast(anunctype, announcement)
writer.write(response.packet)
print(f'Sending reply...')
await writer.drain()
writer.close()
async def update_blockchain(self, other_chain):
self.blo.chain = other_chain
print('The blobchain has been updated with the longest chain')
async def build_peers(self, host, port):
try:
newpeer = (host, port)
if (host != self.host or port != self.port) and (newpeer not in peerlist):
if len(peerlist) < self.maxpeers and await self.routine(host, port, 'PING', self.address):
peerlist.append(newpeer)
print(f'{host!r}:{port!r} added to peer list')
print(f'Building peers...')
_, reply = await self.send_echo(host, port, 'LIST', None)
reply = ast.literal_eval(str(reply))
for peeraddr in reply:
peerhost, peerport = peeraddr
if peerhost != self.host or peerport != self.port:
try:
await self.build_peers(peerhost, peerport)
except OSError:
pass
except OSError:
pass
class Handler:
def __init__(self, maxpeers, PEERHOST=None, PEERPORT=None):
"""The object Handler takes incoming requests and decides how to reply
PING: adds the sender to the peer list if the maximum has not been reached
LIST: shares a copy of the full peer list to the sender
CASH: puts in proof of work to verify the sent transaction
BLOB: shares a copy of the full blockchain to the sender"""
self.maxpeers = maxpeers
self.peerhost = PEERHOST
self.peerport = PEERPORT
self.handlers = {'PING': self.ping_check,
'LIST': self.list_peers,
'BLOB': self.request_blobchain,
'BLOC': self.fresh_block}
# value_handlers have to return values to the BlobNode for further handling
self.value_handlers = {'CASH': self.transaction}
self.packet = None
async def handle_data(self, data, blo):
msgtype, message = extract_data(data)
message = str(message)
if msgtype in self.handlers:
await self.handlers[msgtype](message, blo)
return None, None
elif msgtype in self.value_handlers:
anunctype, announcement = await self.value_handlers[msgtype](message, blo)
return anunctype, announcement
else:
return None, None
async def ping_check(self, message, *args):
"""PING is sent to a peer contact which was not initially in the peer list
PING includes its message type 'PING' and the sender's contact details, i.e. host and port
In response, the receiver checks whether the sender is in the peer list, and if not, adds them"""
self.peerhost, self.peerport = ast.literal_eval(message)
newpeer = (self.peerhost, self.peerport)
if newpeer not in peerlist and len(peerlist) < self.maxpeers:
peerlist.append(newpeer)
print(f'{self.peerhost!r}:{self.peerport!r} added to peer list')
replytype, reply = 'REPL-PING', None
self.packet = process_message(replytype, reply)
elif newpeer in peerlist:
print(f'ALERT: {self.peerhost!r}:{self.peerport!r} is already a peer')
replytype, reply = 'ERRO', 'Request to add was declined, because you are already listed'
self.packet = process_message(replytype, reply)
elif len(peerlist) < self.maxpeers:
print(f'ALERT: peer list has reached its maximum capacity of {self.maxpeers!r}')
replytype, reply = 'ERRO', 'Request to add was declined, because maximum number of peers has been reached'
self.packet = process_message(replytype, reply)
async def list_peers(self, _, *args):
"""Upon receiving LIST, shares the full peer list to the node which made the request"""
replytype, reply = 'REPL-LIST', peerlist
self.packet = process_message(replytype, reply)
async def request_blobchain(self, _, blo):
blobchain = []
for blob in blo.chain:
blobchain.append(vars(blob))
replytype, reply = 'REPL-BLOB', blobchain
self.packet = process_message(replytype, reply)
async def fresh_block(self, message, blo):
"""On receiving BLOC, the node updates its own chain and broadcasts the new block to its own peers
This should result in a network of broadcasts, in order to announce the existence of the new transaction"""
transaction = ast.literal_eval(message)
recipient = transaction["recipient"]
sender = transaction["sender"]
amount = transaction["amount"]
blo.new_block(recipient, sender, amount)
async def transaction(self, message, blo):
"""The receiver verifies the transaction and mines a new block"""
transaction = ast.literal_eval(message)
recipient = transaction["recipient"]
sender = transaction["sender"]
amount = transaction["amount"]
blo.new_block(recipient, sender, amount)
print(f'{sender} successfully transferred {amount} blobcoin to {recipient}')
replytype, reply = 'REPL-CASH', None
self.packet = process_message(replytype, reply)
return 'BLOC', transaction
class ReplyHandler:
"""The object ReplyHandler receives replies from its previous requests and decides how to use the information
REPL-LIST: adds new peers to the peer list
REPL-CASH: reports the first instance of a verification of the transaction
REPLY-BLOB: compares the highest index in the chain received with its own blobchain"""
def __init__(self):
self.handlers = {'REPL-LIST': self.reply_list,
'REPL-CASH': self.reply_cash,
'REPL-BLOB': self.reply_blob}
async def reply_data(self, command, reply, blockchain):
reply = ast.literal_eval(str(reply))
if command == 'REPL-BLOB':
return await self.reply_blob(reply, blockchain)
elif command in self.handlers and not 'REPL-BLOB':
return await self.handlers[command](reply)
else:
return False, None
async def reply_list(self, reply):
newpeers = []
for peeraddr in reply:
newpeers.append(peeraddr) if peeraddr not in peerlist else newpeers
peerlist.extend(newpeers)
counter = len(newpeers)
print(f'{counter} new peers added to peer list')
return True, None
async def reply_cash(self, reply):
return True, None
async def reply_blob(self, reply, blockchain):
other_chain = reply
block_num = len(other_chain)
return ('UPDATE', other_chain) if len(blockchain) < block_num else (False, None)
|
{"/blobchain/peer.py": ["/blobchain/blockchain.py"], "/blobchain/miner.py": ["/blobchain/peer.py"], "/blobchain/key_generator.py": ["/blobchain/primes.py"], "/blobchain/client.py": ["/blobchain/peer.py"]}
|
25,024
|
ego-alt/blobchain
|
refs/heads/master
|
/blobchain/miner.py
|
from blobchain.peer import BlobNode
import asyncio
import sys
port = int(sys.argv[1])
if len(sys.argv) == 2:
host = None
else:
host = str(sys.argv[2])
asyncio.run(BlobNode(port, host).main())
|
{"/blobchain/peer.py": ["/blobchain/blockchain.py"], "/blobchain/miner.py": ["/blobchain/peer.py"], "/blobchain/key_generator.py": ["/blobchain/primes.py"], "/blobchain/client.py": ["/blobchain/peer.py"]}
|
25,025
|
ego-alt/blobchain
|
refs/heads/master
|
/blobchain/key_generator.py
|
from random import randint
from hashlib import sha256
import blobchain.primes as primes
"""Adapted the Digital Signature Algorithm as documented by the U.S. Department of Commerce
Section B.1.1: Key Pair Generation Using Extra Random Bits
Create public and private keys"""
def bits_to_integer(bits, N):
integer = 0
for n in range(N):
two_power = 2 ** (N - n)
integer += two_power * int(bits[n])
return integer
def concatenate_binary(variables):
f_string = ""
for n in variables:
n = bin(n)[2:]
f_string = f"{f_string}{n}"
return f_string
def find_inverse(z, a):
if 0 < z < a:
i, j = a, z
y2, y1 = 0, 1
while j > 0:
quotient = i // j
remainder = i - j * quotient
y = y2 - y1 * quotient
i, j = j, remainder
y2, y1 = y1, y
if i != 1:
print("ERROR")
else:
return y2 % a
else:
print(f"{z}, {a} are INVALID")
class PairKey:
def __init__(self, L, N, seedlen):
""":param L: <int> Bit length of p
:param N: <int> Bit length of q
:param input_seed: <int> Any random number"""
self.seedlen = seedlen
first_seed = self.find_seed(N)
find_q = primes.ST_random_prime(N, first_seed)
(q_status, self.q, q_seed, q_counter) = find_q.find_prime()
p_0 = primes.ST_random_prime(L // 2 + 1, q_seed)
(p0_status, p0, seed, gen_counter) = p_0.find_prime()
(p_status, self.p, p_seed, pgen_counter) = p_0.find_p(self.q, p0, L, seed, gen_counter)
dp_seed = concatenate_binary([first_seed, p_seed, q_seed])
self.domain_parameter_seed = bits_to_integer(dp_seed, len(dp_seed))
validate = Validate()
(self.L, self.N) = validate.pq(self.p, self.q, L, N)
validate.LN(self.L, self.N)
self.g = self.find_g(self.p, self.q, 1)
self.c = self.find_c()
self.x = (self.c % (self.q - 1)) + 1
self.y = pow(self.g, self.x, self.p)
validate.xy(self.x, self.y, self.p, self.q)
def find_seed(self, N):
first_seed = 0
if self.seedlen < N:
print("seedlen is INVALID")
while first_seed < 2 ** (N - 1):
first_seed = [randint(0, 1) for _ in range(self.seedlen)]
first_seed = bits_to_integer(first_seed, self.seedlen)
return first_seed
def find_g(self, p, q, index):
""":param p: <int> Prime modulus
:param q: <int> Prime divisor of (p - 1)
:param index: <str> Bit string of length 8
:return g: <int> Generator of a subgroup of order q in the multiplicative group GF(p)"""
e = (p - 1) // q
count = 0
g = 0
while g < 2:
count += 1
ggen = 0x6767656E
U = f"{self.domain_parameter_seed}{concatenate_binary([ggen, index, count])}"
W = int(sha256(U.encode()).hexdigest(), 16)
g = pow(W, e, p)
return g
def find_c(self):
c = [randint(0, 1) for _ in range(self.N + 64)]
c = bits_to_integer(c, self.N + 64)
return c
def find_k(self):
c = [randint(0, 1) for _ in range(self.N + 64)]
c = bits_to_integer(c, self.N + 64)
k = c % (self.q - 1) + 1
k_inv = find_inverse(k, self.q)
return k, k_inv
def gen_signature(self, M):
""":param M: <str> Transaction details
:param k: <int> Secret number unique to each message
:param k_inv: <int> Mod q inverse of k"""
(k, k_inv) = self.find_k()
M = int(sha256(M.encode()).hexdigest(), 16)
z = bin(M)[2:min(self.N, 256)]
z = bits_to_integer(z, len(z))
r = pow(self.g, k, self.p) % self.q
s = (k_inv * (z + self.x * r)) % self.q
if r == 0 or s == 0:
self.find_k()
return r, s
# !-!-! verify.signature() takes too damn long to run
def verify_signature(self, M, r, s):
"""Prior to verifying the signature, the domain parameters and public key should be available to the verifier
:param M: <str> Received version of M (M')
:param r: <int> Received version of r (r')
:param s: <int> Received version of s (s')
"""
if not (0 < r < self.q and 0 < s < self.q):
return False
w = find_inverse(s, self.q) % self.q
M = int(sha256(M.encode()).hexdigest(), 16)
z = bin(M)[2:min(self.N, 256)]
z = bits_to_integer(z, len(z))
u1 = (z * w) % self.q
u2 = (r * w) % self.q
print(u1)
print(u2)
v = (((self.g ** u1) * (self.y ** u2)) % self.p) % self.q
if v == r:
return True
class Validate:
def __init__(self):
pass
def LN(self, L, N):
min_L, min_N = 1024, 160
if L < min_L or N < min_N:
raise Exception("(L, N) pair is INVALID")
def pq(self, p, q, L, N):
if 2 ** L <= p or 2 ** N <= q:
print("FAILURE")
if (p - 1) % q != 0:
print("q is INVALID")
return L, N
def xy(self, x, y, p, q):
if not 1 <= x <= (q - 1):
print("x is INVALID")
if not 1 <= y <= (p - 1):
print("y is INVALID")
else:
print("SUCCESS")
pair = PairKey(1024, 160, 927)
(r, s) = pair.gen_signature("hello")
print(pair.verify_signature("hello", r, s))
|
{"/blobchain/peer.py": ["/blobchain/blockchain.py"], "/blobchain/miner.py": ["/blobchain/peer.py"], "/blobchain/key_generator.py": ["/blobchain/primes.py"], "/blobchain/client.py": ["/blobchain/peer.py"]}
|
25,026
|
ego-alt/blobchain
|
refs/heads/master
|
/web/server.py
|
import os
from abc import ABC
from tornado.web import Application, RequestHandler
from tornado.options import define, options, parse_command_line
from tornado.ioloop import IOLoop
define('port', default=5000, help='Port to listen on')
STATIC_DIRNAME = "assets"
settings = {
"static_path": os.path.join(os.path.dirname(__file__), STATIC_DIRNAME),
"static_url_prefix": "/assets/",
}
class InfoView(RequestHandler, ABC):
SUPPORTED_METHODS = ["GET", "POST"]
def get(self):
self.render('index.html')
def post(self):
if self.get_argument("send", None) is not None:
sender = self.get_body_argument("sender")
recipient = self.get_body_argument("recipient")
amount = self.get_body_argument("amount")
transaction = {'sender': sender, 'recipient': recipient, 'amount': amount}
if self.get_argument("check", None) is not None:
key = self.get_body_argument("key")
if self.get_argument("download", None) is not None:
pass
def main():
parse_command_line()
app = Application([
(r"/", InfoView),
], debug=True, **settings)
app.listen(options.port)
IOLoop.current().start()
if __name__ == "__main__":
main()
|
{"/blobchain/peer.py": ["/blobchain/blockchain.py"], "/blobchain/miner.py": ["/blobchain/peer.py"], "/blobchain/key_generator.py": ["/blobchain/primes.py"], "/blobchain/client.py": ["/blobchain/peer.py"]}
|
25,027
|
ego-alt/blobchain
|
refs/heads/master
|
/blobchain/blockchain.py
|
from hashlib import sha256
from time import time
class Blobchain:
def __init__(self):
self.chain = []
self.genesis_block()
def genesis_block(self):
# Initialises the blockchain with the genesis block
self.chain.append(Blob(0, "1", None, None, None))
pass
def new_block(self, recipient, sender, amount):
"""Creates a new Block
:param recipient: <str> Address of the Recipient
:param sender: <str> Address of the Sender
:param amount: <int> Amount of money transferred"""
index = len(self.chain) + 1
previous_hash = self.chain[-1].own_hash
fresh_block = Blob(index, previous_hash, recipient, sender, amount)
self.add_block(fresh_block)
return fresh_block
def add_block(self, block):
"""Adds a new Block to the Blockchain given the Proof of Work"""
if self.proof_of_work(block):
self.chain.append(block)
def proof_of_work(self, block):
"""Verifies whether the Nonce generates a hash with 3 leading zeroes
:param block: <class> Block
:return: <bool>"""
guess_hash = sha256(f"{block.index - 1}{block.nonce}{block.previous_hash}".encode()).hexdigest()
return guess_hash[:3] == "000"
class Blob:
def __init__(self, index, previous_hash, recipient, sender, amount, nonce=0):
"""Initialises a Block
:param index: <int> Index of a Block in the Blockchain
:param previous_hash: <str> SHA256 hash of the preceding Block in the Blockchain
:param nonce: <int> Guess number for Proof of Work
"""
self.index = index
self.timestamp = time()
self.previous_hash = previous_hash
self.nonce = self.work(nonce)
self.own_hash = self.create_hash()
self.transaction = self.transaction(recipient, sender, amount)
def create_hash(self):
"""Generates a SHA256 hash for the new Block
:return: <str>"""
data = f"{self.index}{self.timestamp}{self.previous_hash}{self.transaction}"
new_hash = sha256(data.encode()).hexdigest()
return new_hash
def work(self, nonce):
"""Solves for Nonce
:return: <int> nonce"""
while sha256(f"{self.index - 1}{nonce}{self.previous_hash}".encode()).hexdigest()[:3] != "000":
nonce += 1
return nonce
def transaction(self, recipient, sender, amount):
"""Adds the new transaction to the Block waiting to be mined"""
transaction = {
"recipient": recipient,
"sender": sender,
"amount": amount
}
return transaction
|
{"/blobchain/peer.py": ["/blobchain/blockchain.py"], "/blobchain/miner.py": ["/blobchain/peer.py"], "/blobchain/key_generator.py": ["/blobchain/primes.py"], "/blobchain/client.py": ["/blobchain/peer.py"]}
|
25,028
|
ego-alt/blobchain
|
refs/heads/master
|
/blobchain/primes.py
|
from hashlib import sha1
from math import gcd
from sympy import isprime
"""Generation of probable primes p and q using the Shawe-Taylor method
Implement as parameters for securely generating keys and signatures"""
def hash_int(digit):
# Converts SHA1 hash to integer
digit = str(digit)
new_hash = sha1(digit.encode("utf-8")).hexdigest()
return int(new_hash, 16)
class ST_random_prime:
def __init__(self, length, input_seed):
self.length = length
self.prime_seed = input_seed
self.prime = 0
self.prime_gen_counter = 0
self.outlen = 160
def find_prime(self):
if self.length < 2:
return 'FAILURE', 0, 0, 0
elif 2 <= self.length < 33:
return self.find_c()
else:
return self.find_c2()
def find_c(self):
# Generates a pseudorandom integer c of length bits
c = hash_int(self.prime_seed) + hash_int(self.prime_seed + 1)
# Sets prime to the least odd integer greater than or equal to c
while self.prime_gen_counter <= (4 * self.length) and self.prime == 0:
c = 2 ** (self.length - 1) + c % 2 ** (self.length - 1)
c = 2 * c // 2 + 1
self.prime_gen_counter = self.prime_gen_counter + 1
self.prime_seed = self.prime_seed + 2
if isprime(c):
self.prime = c
return "SUCCESS", c, self.prime_seed, self.prime_gen_counter
if self.prime_gen_counter > (4 * self.length):
return 'FAILURE', 0, 0, 0
def find_c2(self):
# Generates a pseudorandom integer x in the interval [2 ** (self.length - 1), 2 ** (self.length)]
class_object = ST_random_prime((self.length // 2 + 1), self.prime_seed)
(status, c0, self.prime_seed, self.prime_gen_counter) = class_object.find_prime()
if status == "FAILURE":
return 'FAILURE', 0, 0, 0
iterations = (self.length // self.outlen) - 1
old_counter = self.prime_gen_counter
x = 0
for i in range(iterations):
x = x + hash_int(self.prime_seed + i) * (2 ** (i * self.outlen))
self.prime_seed = self.prime_seed + iterations + 1
x = 2 ** (self.length - 1) + x % 2 ** (self.length - 1)
# Generates a candidate prime c in the interval [2 ** (self.length - 1), 2 ** (self.length)]
t = x // (2 * c0)
while self.prime_gen_counter <= (4 * self.length + old_counter):
if 2 * t * c0 + 1 > 2 ** self.length:
t = 2 ** (self.length - 1) // (2 * c0)
c = 2 * t * c0 + 1
self.prime_gen_counter = self.prime_gen_counter + 1
a = 0
for j in range(iterations):
a = a + (hash_int(self.prime_seed + j) * 2 ** (j * self.outlen))
self.prime_seed = self.prime_seed + iterations + 1
a = 2 + a % (c - 3)
z = pow(a, (2 * t), c)
if 1 == gcd(z - 1, c) and 1 == pow(z, c0, c):
self.prime = c
return 'SUCCESS', c, self.prime_seed, self.prime_gen_counter
t += 1
if self.prime_gen_counter > (4 * self.length + old_counter):
return 'FAILURE', 0, 0, 0
def find_p(self, q, p0, L, p_seed, pgen_counter):
iterations = L // self.outlen - 1
old_counter = pgen_counter
x = 0
for i in range(iterations):
x = x + hash_int(p_seed + i) * (2 ** (i * self.outlen))
p_seed = p_seed + iterations + 1
x = 2 ** (L - 1) + x % 2 ** (L - 1)
c0 = q * p0
t = x // (2 * c0)
while pgen_counter <= (4 * L + old_counter):
if 2 * t * c0 + 1 > 2 ** L:
t = 2 ** (L - 1) // (2 * c0)
p = 2 * t * c0 + 1
pgen_counter = pgen_counter + 1
a = 0
for j in range(iterations):
a = a + (hash_int(p_seed + j) * 2 ** (j * self.outlen))
p_seed = p_seed + iterations + 1
a = 2 + a % (p - 3)
z = pow(a, (2 * t * q), p)
if 1 == gcd(z - 1, p) and 1 == pow(z, p0, p):
self.prime = p
return 'SUCCESS', p, p_seed, pgen_counter
t += 1
if pgen_counter > (4 * self.length + old_counter):
return 'FAILURE', 0, 0, 0
|
{"/blobchain/peer.py": ["/blobchain/blockchain.py"], "/blobchain/miner.py": ["/blobchain/peer.py"], "/blobchain/key_generator.py": ["/blobchain/primes.py"], "/blobchain/client.py": ["/blobchain/peer.py"]}
|
25,029
|
ego-alt/blobchain
|
refs/heads/master
|
/blobchain/client.py
|
from flask import Flask, request
import blobchain.peer as peer
import asyncio
app = Flask(__name__)
defaulthost = '127.0.0.1'
sisters = [8888, 8877, 8866, 8855]
port = 5000
@app.route('/transaction', methods=['POST'])
def make_transaction():
if request.method == 'POST':
transaction = request.get_json(force=True)
print(f'Data received: {transaction}')
client = peer.BlobNode(port)
try:
for peerport in sisters:
asyncio.run(client.send_echo(defaulthost, peerport, 'CASH', transaction))
print(f'Your transaction has been broadcast')
except OSError:
pass
app.run()
|
{"/blobchain/peer.py": ["/blobchain/blockchain.py"], "/blobchain/miner.py": ["/blobchain/peer.py"], "/blobchain/key_generator.py": ["/blobchain/primes.py"], "/blobchain/client.py": ["/blobchain/peer.py"]}
|
25,041
|
guma44/pyCLIP
|
refs/heads/master
|
/WiggleReader.py
|
from HTSeq import GenomicInterval, FileOrSequence
class WiggleReader(FileOrSequence):
"""Parse a Wiggle file"""
def __init__(self, filename_or_sequence, strand):
"""@todo: to be defined1.
:param filename_or_sequence: @todo
:param strand: @todo
"""
FileOrSequence.__init__(self, filename_or_sequence)
self.strand = strand
def __iter__(self):
for line in FileOrSequence.__iter__(self):
if line.startswith("track"):
continue
chrom, start, end, score = line.rstrip().split("\t")
iv = GenomicInterval(chrom, int(start), int(end), self.strand)
yield iv, float(score)
|
{"/test/test_ShortRead.py": ["/ShortRead.py"]}
|
25,042
|
guma44/pyCLIP
|
refs/heads/master
|
/test/test_ShortRead.py
|
import os
import sys
import unittest
script_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(script_dir, ".."))
from ShortRead import ShortRead, IncorrectCigarException, CLIPzCigar
class CLIPzCigarTest(unittest.TestCase):
def setUp(self):
# plus strand
self.cigar1 = CLIPzCigar(cigar="30", strand="+", chrom="chr1", position=1000)
self.cigar2 = CLIPzCigar(cigar="20MTC5IG1MTG11DA9", strand="+", chrom="chr1", position=1000)
self.cigar3 = CLIPzCigar(cigar="19MTC4DA15MTG3IT1", strand="+", chrom="chr1", position=1000)
self.cigar4 = CLIPzCigar(cigar="5DADCDGDTITITIADC10", strand="+", chrom="chr1", position=1000)
self.cigar5 = CLIPzCigar(cigar="MTCMTCITITMTCDADAITDA10", strand="+", chrom="chr1", position=1000)
# minus strand
self.cigar1_minus = CLIPzCigar(cigar="30", strand="-", chrom="chr1", position=1000)
self.cigar2_minus = CLIPzCigar(cigar="20MTC5IG1MTG11DA9", strand="-", chrom="chr1", position=1000)
self.cigar3_minus = CLIPzCigar(cigar="19MTC4DA15MTG3IT1", strand="-", chrom="chr1", position=1000)
self.cigar4_minus = CLIPzCigar(cigar="5DADCDGDTITITIADC10", strand="-", chrom="chr1", position=1000)
self.cigar5_minus = CLIPzCigar(cigar="MTCMTCITITMTCDADAITDA10", strand="-", chrom="chr1", position=1000)
self.cigar6_minus = CLIPzCigar(cigar="MCTMCT10MAGMAG", strand="-", chrom="chr1", position=1000)
def test_cigar_properly_split_the_string(self):
self.assertEqual(self.cigar1.cigar_list, ['30'])
self.assertEqual(self.cigar2.cigar_list, ['20', 'MTC', '5', 'IG', '1', 'MTG', '11', 'DA', '9'])
self.assertEqual(self.cigar3.cigar_list, ['19', 'MTC', '4', 'DA', '15', 'MTG', '3', 'IT', '1'])
self.assertEqual(self.cigar4.cigar_list, ['5', 'DA', 'DC', 'DG', 'DT', 'IT', 'IT', 'IA','DC', '10'])
self.assertEqual(self.cigar5.cigar_list, ["MTC", "MTC", "IT", "IT", "MTC",'DA', 'DA', 'IT', 'DA', "10"])
# the same should be for minus strand
self.assertEqual(self.cigar1_minus.cigar_list, ['30'])
self.assertEqual(self.cigar2_minus.cigar_list, ['20', 'MTC', '5', 'IG', '1', 'MTG', '11', 'DA', '9'])
self.assertEqual(self.cigar3_minus.cigar_list, ['19', 'MTC', '4', 'DA', '15', 'MTG', '3', 'IT', '1'])
self.assertEqual(self.cigar4_minus.cigar_list, ['5', 'DA', 'DC', 'DG', 'DT', 'IT', 'IT', 'IA','DC', '10'])
self.assertEqual(self.cigar5_minus.cigar_list, ["MTC", "MTC", "IT", "IT", "MTC",'DA', 'DA', 'IT', 'DA', "10"])
self.assertEqual(self.cigar6_minus.cigar_list, ["MCT", "MCT", "10", "MAG", "MAG"])
def test_cigar_parsed_properly_for_plus_strand(self):
# cigar2
self.assertEqual(len(self.cigar2.features['MTC']), 1)
self.assertEqual(len(self.cigar2.features['IG']), 1)
self.assertEqual(len(self.cigar2.features['MTG']), 1)
self.assertEqual(len(self.cigar2.features['DA']), 1)
# cigar3
self.assertEqual(len(self.cigar3.features['MTC']), 1)
self.assertEqual(len(self.cigar3.features['DA']), 1)
self.assertEqual(len(self.cigar3.features['MTG']), 1)
self.assertEqual(len(self.cigar3.features['IT']), 1)
# cigar4
self.assertEqual(len(self.cigar4.features['DA']), 1)
self.assertEqual(len(self.cigar4.features['DC']), 2)
self.assertEqual(len(self.cigar4.features['DG']), 1)
self.assertEqual(len(self.cigar4.features['DT']), 1)
self.assertEqual(len(self.cigar4.features['IT']), 2)
self.assertEqual(len(self.cigar4.features['IA']), 1)
# cigar5
self.assertEqual(len(self.cigar5.features['MTC']), 3)
self.assertEqual(len(self.cigar5.features['IT']), 3)
self.assertEqual(len(self.cigar5.features['DA']), 3)
def test_cigar_parsed_properly_for_minus_strand(self):
# cigar2_minus
self.assertEqual(len(self.cigar2_minus.features['MAG']), 1)
self.assertEqual(len(self.cigar2_minus.features['IC']), 1)
self.assertEqual(len(self.cigar2_minus.features['MAG']), 1)
self.assertEqual(len(self.cigar2_minus.features['DT']), 1)
# cigar3_minus
self.assertEqual(len(self.cigar3_minus.features['MAG']), 1)
self.assertEqual(len(self.cigar3_minus.features['DT']), 1)
self.assertEqual(len(self.cigar3_minus.features['MAC']), 1)
self.assertEqual(len(self.cigar3_minus.features['IA']), 1)
# cigar4_minus
self.assertEqual(len(self.cigar4_minus.features['DT']), 1)
self.assertEqual(len(self.cigar4_minus.features['DG']), 2)
self.assertEqual(len(self.cigar4_minus.features['DC']), 1)
self.assertEqual(len(self.cigar4_minus.features['DA']), 1)
self.assertEqual(len(self.cigar4_minus.features['IA']), 2)
self.assertEqual(len(self.cigar4_minus.features['IT']), 1)
# cigar5_minus
self.assertEqual(len(self.cigar5_minus.features['MAG']), 3)
self.assertEqual(len(self.cigar5_minus.features['IA']), 3)
self.assertEqual(len(self.cigar5_minus.features['DT']), 3)
# cigar6_minus
self.assertEqual(len(self.cigar6_minus.features['MTC']), 2)
self.assertEqual(len(self.cigar6_minus.features['MGA']), 2)
def test_features_on_plus_strand_have_correct_position(self):
# cigar2
self.assertEqual(self.cigar2.features['MTC'][0].iv.start, 1020)
self.assertEqual(self.cigar2.features['IG'][0].iv.start, 1025)
self.assertEqual(self.cigar2.features['MTG'][0].iv.start, 1027)
self.assertEqual(self.cigar2.features['DA'][0].iv.start, 1039)
# cigar3
self.assertEqual(self.cigar3.features['MTC'][0].iv.start, 1019)
self.assertEqual(self.cigar3.features['DA'][0].iv.start, 1024)
self.assertEqual(self.cigar3.features['MTG'][0].iv.start, 1040)
self.assertEqual(self.cigar3.features['IT'][0].iv.start, 1043)
# cigar4
self.assertEqual(self.cigar4.features['DA'][0].iv.start, 1005)
self.assertEqual(self.cigar4.features['DC'][0].iv.start, 1006)
self.assertEqual(self.cigar4.features['DG'][0].iv.start, 1007)
self.assertEqual(self.cigar4.features['DT'][0].iv.start, 1008)
self.assertEqual(self.cigar4.features['IT'][0].iv.start, 1008)
self.assertEqual(self.cigar4.features['IT'][1].iv.start, 1008)
self.assertEqual(self.cigar4.features['IA'][0].iv.start, 1008)
self.assertEqual(self.cigar4.features['DC'][1].iv.start, 1009)
# cigar5
self.assertEqual(self.cigar5.features['MTC'][0].iv.start, 1000)
self.assertEqual(self.cigar5.features['MTC'][1].iv.start, 1001)
self.assertEqual(self.cigar5.features['IT'][0].iv.start, 1001)
self.assertEqual(self.cigar5.features['IT'][1].iv.start, 1001)
self.assertEqual(self.cigar5.features['MTC'][2].iv.start, 1002)
self.assertEqual(self.cigar5.features['DA'][0].iv.start, 1003)
self.assertEqual(self.cigar5.features['DA'][1].iv.start, 1004)
self.assertEqual(self.cigar5.features['IT'][2].iv.start, 1004)
self.assertEqual(self.cigar5.features['DA'][2].iv.start, 1005)
def test_features_on_minus_strand_have_correct_position(self):
# cigar2_minus
self.assertEqual(self.cigar2_minus.features['MAG'][0].iv.start, 1020)
self.assertEqual(self.cigar2_minus.features['IC'][0].iv.start, 1025)
self.assertEqual(self.cigar2_minus.features['MAC'][0].iv.start, 1027)
self.assertEqual(self.cigar2_minus.features['DT'][0].iv.start, 1039)
# cigar3_minus
self.assertEqual(self.cigar3_minus.features['MAG'][0].iv.start, 1019)
self.assertEqual(self.cigar3_minus.features['DT'][0].iv.start, 1024)
self.assertEqual(self.cigar3_minus.features['MAC'][0].iv.start, 1040)
self.assertEqual(self.cigar3_minus.features['IA'][0].iv.start, 1043)
# cigar4_minus
self.assertEqual(self.cigar4_minus.features['DT'][0].iv.start, 1005)
self.assertEqual(self.cigar4_minus.features['DG'][0].iv.start, 1006)
self.assertEqual(self.cigar4_minus.features['DC'][0].iv.start, 1007)
self.assertEqual(self.cigar4_minus.features['DA'][0].iv.start, 1008)
self.assertEqual(self.cigar4_minus.features['IA'][0].iv.start, 1008)
self.assertEqual(self.cigar4_minus.features['IA'][1].iv.start, 1008)
self.assertEqual(self.cigar4_minus.features['IT'][0].iv.start, 1008)
self.assertEqual(self.cigar4_minus.features['DG'][1].iv.start, 1009)
# cigar5_minus
self.assertEqual(self.cigar5_minus.features['MAG'][0].iv.start, 1000)
self.assertEqual(self.cigar5_minus.features['MAG'][1].iv.start, 1001)
self.assertEqual(self.cigar5_minus.features['IA'][0].iv.start, 1001)
self.assertEqual(self.cigar5_minus.features['IA'][1].iv.start, 1001)
self.assertEqual(self.cigar5_minus.features['MAG'][2].iv.start, 1002)
self.assertEqual(self.cigar5_minus.features['DT'][0].iv.start, 1003)
self.assertEqual(self.cigar5_minus.features['DT'][1].iv.start, 1004)
self.assertEqual(self.cigar5_minus.features['IA'][2].iv.start, 1004)
self.assertEqual(self.cigar5_minus.features['DT'][2].iv.start, 1005)
# cigar6_minus
self.assertEqual(self.cigar6_minus.features['MTC'][0].iv.start, 1012)
self.assertEqual(self.cigar6_minus.features['MTC'][1].iv.start, 1013)
self.assertEqual(self.cigar6_minus.features['MGA'][0].iv.start, 1000)
self.assertEqual(self.cigar6_minus.features['MGA'][1].iv.start, 1001)
def test_cigar_length_is_correct(self):
self.assertEqual(self.cigar1.length, 30)
self.assertEqual(self.cigar2.length, 49)
self.assertEqual(self.cigar3.length, 45)
self.assertEqual(self.cigar4.length, 20)
self.assertEqual(self.cigar5.length, 16)
# minus should be the same
self.assertEqual(self.cigar1_minus.length, 30)
self.assertEqual(self.cigar2_minus.length, 49)
self.assertEqual(self.cigar3_minus.length, 45)
self.assertEqual(self.cigar4_minus.length, 20)
self.assertEqual(self.cigar5_minus.length, 16)
self.assertEqual(self.cigar6_minus.length, 14)
class ShortReadTest(unittest.TestCase):
def setUp(self):
# plus strand
self.sr1 = ShortRead(chrom="chr1", start=1000, end=1030, strand="+", clipz_cigar="30")
self.sr2 = ShortRead(chrom="chr5", start=1000, end=1049, strand="+", clipz_cigar="20MTC5IG1MTG11DA9")
self.sr3 = ShortRead(chrom="chr1", start=1000, end=1045, strand="+", clipz_cigar="19MTC4DA15MTG3IT1")
# minus strand
self.sr1_minus = ShortRead(chrom="chr1", start=1000, end=1030, strand="-", clipz_cigar="30")
self.sr4_minus = ShortRead(chrom="chr22", start=1000, end=1049, strand="-", clipz_cigar="3MTG2DT4IC1MTC36")
def test_raises_error_when_incorrect_cigar(self):
with self.assertRaises(IncorrectCigarException):
ShortRead(chrom="chr22", start=1000, end=1048, strand="-", clipz_cigar="3MTG2DT4IT")
def test_returns_correct_truncation_position(self):
self.assertEqual(self.sr1.get_truncation_position(), 1000)
self.assertEqual(self.sr2.get_truncation_position(), 1000)
self.assertEqual(self.sr3.get_truncation_position(), 1000)
self.assertEqual(self.sr1_minus.get_truncation_position(), 1029)
self.assertEqual(self.sr4_minus.get_truncation_position(), 1048)
if __name__ == '__main__':
unittest.main()
|
{"/test/test_ShortRead.py": ["/ShortRead.py"]}
|
25,043
|
guma44/pyCLIP
|
refs/heads/master
|
/ShortRead.py
|
import re
from collections import defaultdict
from HTSeq import GenomicInterval, GenomicFeature
mutations_mapping = {'MAC': 'MTG',
'MAT': 'MTA',
'MAG': 'MTC',
'MAN': 'MTN',
'MCA': 'MGT',
'MCT': 'MGA',
'MCG': 'MGC',
'MCN': 'MGN',
'MTA': 'MAT',
'MTC': 'MAG',
'MTG': 'MAC',
'MTN': 'MAN',
'MGA': 'MCT',
'MGC': 'MCG',
'MGT': 'MCA',
'MGN': 'MCN',
'MNA': 'MNT',
'MNC': 'MNG',
'MNT': 'MNA',
'MNG': 'MNC',
'DA': 'DT',
'DC': 'DG',
'DT': 'DA',
'DG': 'DC',
'DN': 'DN',
'IA': 'IT',
'IC': 'IG',
'IT': 'IA',
'IG': 'IC',
'IN': 'IN'}
features_types = {'M': "mutation",
'D': 'deletion',
'I': 'insertion'}
class IncorrectCigarException(Exception):
pass
class ReadFeature(GenomicFeature):
"""Read Feature class"""
def __init__(self, name, type_, interval, beg_in_read):
super(ReadFeature, self).__init__(name=name, type_=type_, interval=interval)
self.beg_in_read = beg_in_read
class ShortRead(GenomicInterval):
"""This class implements short read from CLIPz"""
def __init__(self, chrom, start, end, strand, seq=None, clipz_cigar=None, name=None):
super(ShortRead, self).__init__(chrom=chrom, start=int(start), end=int(end), strand=strand)
if clipz_cigar:
self.clipz_cigar = CLIPzCigar(clipz_cigar, strand, start, chrom)
if self.clipz_cigar.length != self.length:
raise IncorrectCigarException("Cigar length does not match interval length: %i vs %i" % (self.clipz_cigar.length, self.length))
else:
self.clipz_cigar = clipz_cigar
self.seq = seq
self.name = name
def get_truncation_position(self):
"""Reeturn position of the truncation"""
return self.start_d
def get_end_position(self):
"Get end of the read in 0-based coordinates"
return self.end_d - 1
def get_bed_string(self):
bed_str = "%s\t%s\t%s\t%s\t%s\t%s\n" % (self.chrom,
self.start,
self.end,
str(self.name),
"1",
self.strand)
return bed_str
def get_truncation_bed_string(self):
"""TODO: test this function"""
bed_str = "%s\t%s\t%s\t%s\t%s\t%s\n" % (self.chrom,
self.get_truncation_position(),
self.get_truncation_position() + 1,
str(self.name),
"1",
self.strand)
return bed_str
def get_gff_ensembl_string(self):
chrom = self.chrom
attr = 'gene_id "None"; gene_version "None"; gene_name "None"; gene_source "CLIPz"; gene_biotype "None";'
gff_str = "%s\tCLIPz\tshort_read\t%i\t%i\t.\t%s\t.\t%s\n" % (chrom[3:] if chrom.startswith("chr") else chrom,
self.start + 1, # one based start
self.end,
self.strand,
attr)
return gff_str
class CLIPzCigar:
def __init__(self, cigar, strand, position, chrom=None):
self.cigar = cigar
self.strand = strand
self.position = position
self.chrom = chrom
self.features = defaultdict(list)
self.cigar_list = self.parse_cigar()
self.length = self.get_length()
self.parse_cigar_into_features()
def parse_cigar(self):
"""Split CLIPz cigar into list"""
return filter(None,
re.split('([0-9]+|M[ACGTN]{2}|D[ACGTN]|I[ACGTN])',
self.cigar))
def get_length(self):
length = 0
for feature in self.cigar_list:
if re.match("[MD]", feature):
length += 1
elif feature.startswith("I"):
pass
else:
length += int(feature)
return length
def is_string_integer(self, value):
"""Return True if string can be converted to integer"""
try:
int(value)
return True
except ValueError:
return False
def parse_cigar_into_features(self):
"""Parse CLIPz cigar string into dict of features of the read"""
i = 0
insertions = 0
for feat in self.cigar_list:
if self.is_string_integer(feat):
i += int(feat)
else:
if feat.startswith('I'):
insertions += 1
if self.strand == '+':
self.features[feat].append(ReadFeature(feat,
type_= features_types[feat[0]],
interval = GenomicInterval(self.chrom,
self.position + i - insertions,
self.position + i + 1 - insertions,
self.strand),
beg_in_read = i + insertions))
elif self.strand == "-":
self.features[mutations_mapping[feat]].append(ReadFeature(mutations_mapping[feat],
type_= features_types[feat[0]],
interval = GenomicInterval(self.chrom,
self.position + i - insertions,
self.position + i + 1 - insertions,
self.strand),
beg_in_read = self.length - i - 1 + insertions))
else:
raise Exception("Strand must be + or -")
i += 1
|
{"/test/test_ShortRead.py": ["/ShortRead.py"]}
|
25,044
|
guma44/pyCLIP
|
refs/heads/master
|
/test/test_pyCLIP.py
|
import os
import sys
from Bio import SeqIO
script_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(script_dir, ".."))
import ShortRead
for rec in SeqIO.parse(os.path.join(script_dir, 'test.fa'), 'fasta'):
actual = rec.id.split('|')[0].split(':')
read = ShortRead.ShortRead(chrom='chrN',
start=0,
end=len(actual[2]),
strand=actual[3],
seq=actual[2],
clipz_cigar=actual[1],
name=actual[0])
print "*" * 80
print
print actual[0], actual[1], actual[2]
print 'Features positions: ', read.clipz_cigar.features
i = 0
j = 0
feat_count = 0
feats = read.clipz_cigar.features
seq1 = []
seq2 = []
align = []
while len(seq1) <= len(actual[2]):
if feat_count not in feats.keys():
seq1.append(actual[2][i])
seq2.append(rec.seq[j])
align.append('|')
i += 1
j += 1
feat_count += 1
else:
if feats[feat_count].startswith('M'):
seq1.append(actual[2][i])
seq2.append(rec.seq[j])
align.append(' ')
i += 1
j += 1
feat_count += 1
elif feats[feat_count].startswith('D'):
seq1.append('-')
seq2.append(rec.seq[j])
align.append(' ')
j += 1
feat_count += 1
elif feats[feat_count].startswith('I'):
seq1.append(actual[2][i])
seq2.append('-')
align.append(' ')
i += 1
feat_count += 1
print "".join(seq1)
print "".join(align)
print "".join(seq2)
print
print "*" * 80
|
{"/test/test_ShortRead.py": ["/ShortRead.py"]}
|
25,045
|
mmjimenez1/restaurant_finder
|
refs/heads/master
|
/Yelp.py
|
import requests
import pandas as pd
import sqlalchemy
import os
from sqlalchemy import create_engine
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import plotly.graph_objects as go
import plotly.express as px
# Collect input from user and return it.
def collect_state_input():
state = input("Enter a state: ")
while(not state.isalpha()):
state = input("Enter a state: ")
return state
# Collect zip_code input from user and return it.
def collect_zip_input():
while True:
try:
zip_code = int(input("Enter a ZipCode: "))
break
except ValueError:
print("Invalid, try again.")
continue
zip_code = str(zip_code)
return zip_code
# Return collected user preference iput.
def collect_liked_input():
food_type_liked = input(
"Enter the type of food liked (ex; Italian, Mexican,etc): ")
while(not food_type_liked.isalpha()):
food_type_liked = input(
"Enter the type of food liked (ex; Italian, Mexican,etc ): ")
return food_type_liked
# SQL, save database, save to file, load file, load database.
def saveSQLtoFile(database_name, file_name):
os.system('mysqldump -u root -pcodio ' + database_name + ' > ' + file_name)
# Load a SQL database from a file.
def loadSQLfromFile(database_name, file_name):
os.system('mysql -u root -pcodio -e "CREATE DATABASE IF NOT EXISTS '
+ database_name + ';"')
os.system("mysql -u root -pcodio " + database_name + " < " + file_name)
# Save the dataset.
def saveDatasetToFile(database_name, file_name, dataframe, table_name):
engine = create_engine('mysql://root:codio@localhost/' +
database_name + '?charset=utf8', encoding='utf-8')
dataframe.to_sql(table_name, con=engine, if_exists='replace', index=False)
saveSQLtoFile(database_name, file_name)
# Load dataset from file.
def loadDataset(database_name, table_name, file_name):
loadSQLfromFile(database_name, file_name)
engine = create_engine('mysql://root:codio@localhost/' +
database_name + '?charset=utf8', encoding='utf-8')
df = pd.read_sql_table(table_name, con=engine,
if_exists='replace', index=False)
return df
# Make a scatter plot from dataframe and save it as html file.
def makeScatterchart(df, xcol, ycol):
data = df
fig = px.scatter(data, x=xcol, y=ycol, color="Name",
size="Rating")
fig.write_html('scatter.html')
# Make a scatter plot from dataframe and save it as a html file.
def makeBarchart(df, xcol, ycol):
data = df
fig = px.bar(data, x=xcol, y=ycol, color="Rating",
title="Restaurant Search Results")
fig.write_html('barchart.html')
# Create and return the search url.
def create_search_url(base_url, zip_code, state, term1, term2='None'):
location = str(state + zip_code)
search_url = base_url + 'location=' + location + '&' + 'term=' + term1
return search_url
# Git information from API and return as json.
def api_to_json(search_url, headers):
r = requests.get(search_url, headers=headers)
print(r.status_code)
response_json = r.json()
return response_json
# Parse information and return as a dataframe.
def add_all_info_df(col_names, main_dictionary):
df = pd.DataFrame(columns=col_names)
for business in main_dictionary:
df.loc[len(df.index)] = [business['name'], business['rating'],
business['location']['address1'],
business['location']['zip_code'],
business['categories'][0]['alias']]
df.sort_values(by=['Rating'], inplace=True, ascending=False)
return df
# Main code-
# Collect user input
base_url = 'https://api.yelp.com/v3/businesses/search?'
zip_code = collect_zip_input()
state = collect_state_input()
preference = collect_liked_input()
# Get API information and access.
search_url = create_search_url(base_url, zip_code, state, preference)
key1 = 'eUJ17k9QVRaQVpchITGQszLJGOhhyE52aMFr1o1AO4v52d3_-La_yyI1gj-'
key2 = 'FiGOEKFG62RrVMqh5rN7Ab12hw60MC6euNHD7mYV5sARgZS4GNYFc-'
key3 = 'g_FQH4c7c7jYHYx'
api_key = key1 + key2 + key3
# 'eUJ17k9QVRaQVpchITGQszLJGOhhyE52aMFr1o1AO4v52d3_-La_yyI1gj-FiGOEKFG62RrVMqh5rN7Ab12hw60MC6euNHD7mYV5sARgZS4GNYFc-g_FQH4c7c7jYHYx'
headers = {
'Authorization': 'Bearer %s' % api_key,
}
restaurantList = api_to_json(search_url, headers)
main = restaurantList['businesses']
# Turn API info into dataframe.
col_names = ['Name', 'Rating', 'Location', 'ZipCode', 'Food Type']
df = add_all_info_df(col_names, main)
df2 = df.iloc[:3]
print(df2)
# make database
database_name = 'restaurants'
file_name = 'test_file.sql'
table_name = 'restaurant_information'
os.system('mysql -u root -pcodio -e "CREATE DATABASE IF NOT EXISTS ' +
database_name + ';"')
engine = create_engine('mysql://root:codio@localhost/restaurants' +
'?charset=utf8', encoding='utf-8')
df2.to_sql('restaurant_information', con=engine,
if_exists='replace', index=False)
makeBarchart(df2, "Name", "Rating")
# saveSQLtoFile(database_name, file_name)
# loadSQLfromFile(database_name, file_name)
# makeScatterchart(df2, "Name", "Rating")
|
{"/test.py": ["/Yelp.py"]}
|
25,046
|
mmjimenez1/restaurant_finder
|
refs/heads/master
|
/test.py
|
import unittest
import pandas as pd
from Yelp import collect_state_input, collect_zip_input
from Yelp import collect_liked_input, makeScatterchart
from Yelp import makeBarchart, create_search_url
from Yelp import api_to_json, add_all_info_df
class TestFileName(unittest.TestCase):
# State length is greater than abbreviation
def test_collect_state_input(self):
length= len(collect_state_input())
if(length>=2):
isvalid = True
else:
isvalid = False
self.assertTrue(isvalid)
print()
# All zipcodes must be greater than 5
def test_collect_zip_input(self):
length= len(collect_zip_input())
if(length==5):
isvalid = True
else:
isvalid = False
self.assertTrue(isvalid)
print()
# Input must be a valid length longer than 3 characters.
def test_collect_liked_input(self):
length= len(collect_liked_input())
if(length>3):
isvalid = True
else:
isvalid = False
self.assertTrue(isvalid)
print()
# Check no errors are happening when you put a valid data frame
def test_makeScatterchart(self):
test_data = [['restaurant_1', 1], ['restaurant_2', 2], ['restaurant_3', 4]]
xcol ="Name"
ycol = "Rating"
df = pd.DataFrame(test_data, columns= ['Name', 'Rating'])
isworking = True
try:
makeScatterchart(df, xcol,ycol)
except:
isworking= False
self.assertTrue(isworking)
print()
# Test that there are no errors when trying to make a chart, using a
# Valid dataframe.
def test_makeBarchart(self):
test_data = [['restaurant_1', 1], ['restaurant_2', 2], ['restaurant_3', 4]]
xcol = "Name"
ycol = "Rating"
df = pd.DataFrame(test_data, columns= ['Name', 'Rating'])
isworking = True
try:
makeBarchart(df, xcol,ycol)
except:
isworking= False
self.assertTrue(isworking)
print()
# Check that a string is created
def test_create_search_url(self):
base_url = 'testurl.com'
zip_code = 'test'
state = 'test'
term1 = 'test'
url= create_search_url(base_url, zip_code,state, term1)
self.assertEqual(type(url), str)
print()
# Check that returned dictionary is not empty and that there is no error
def test_api_to_json(self):
search_url='https://api.yelp.com/v3/businesses/search?location=illinois&term=italian'
api_key ='eUJ17k9QVRaQVpchITGQszLJGOhhyE52aMFr1o1AO4v52d3_-La_yyI1gj-FiGOEKFG62RrVMqh5rN7Ab12hw60MC6euNHD7mYV5sARgZS4GNYFc-g_FQH4c7c7jYHYx'
headers = {
'Authorization': 'Bearer %s' % api_key,
}
test1= {}
try:
dictionary= api_to_json(search_url,headers)
except:
dictionary= 'error'
self.assertNotEqual(dictionary, test1)
self.assertNotEqual(dictionary, 'error')
print()
if __name__ == '__main__':
unittest.main()
|
{"/test.py": ["/Yelp.py"]}
|
25,048
|
Nyagah-Tech/blog
|
refs/heads/master
|
/app/auth/forms.py
|
from flask_wtf import FlaskForm
from wtforms import StringField,PasswordField,SubmitField,BooleanField
from wtforms.validators import Required,Email,EqualTo
from ..models import User
from wtforms import ValidationError
class LoginForm(FlaskForm):
username = StringField('your username',validators = [Required()])
password = PasswordField('Your password...',validators = [Required()])
remember = BooleanField('Remember me')
submit = SubmitField('Submit')
class RegForm(FlaskForm):
email = StringField('your email',validators = [Email(),Required()])
username = StringField('your username', validators = [Required()])
password = PasswordField('your password',validators =[Required(),EqualTo('password_confirm',message = 'Password must match')])
password_confirm = PasswordField('Confirm password',validators = [Required()])
submit = SubmitField('Submit')
def validate_email(self,data_field):
if User.query.filter_by(email = data_field.data).first():
raise ValidationError('There is an account registered with that email')
def validate_username(self,data_field):
if User.query.filter_by(username = data_field.data).first():
raise ValidationError('The username exists..')
|
{"/app/auth/forms.py": ["/app/models.py"], "/app/main/views.py": ["/app/models.py", "/app/main/forms.py"]}
|
25,049
|
Nyagah-Tech/blog
|
refs/heads/master
|
/app/main/forms.py
|
from flask_wtf import FlaskForm
from wtforms import StringField,BooleanField,TextAreaField,SubmitField
from wtforms.validators import Required,Email
class New_blog_form(FlaskForm):
title = StringField('Blog title',validators = [Required()])
body = TextAreaField('Write your blog here ', validators = [Required()])
submit = SubmitField('Submit')
class CommentForm(FlaskForm):
body = TextAreaField('write your comment', validators = [Required()])
submit = SubmitField('Submit')
class UpdateProfileForm(FlaskForm):
bio = StringField('write your bio',validators = [Required()])
submit = SubmitField('Submit')
class UpdateBlogForm(FlaskForm):
title = StringField('Blog title', validators = [Required()])
body = TextAreaField('write your blog here..',validators = [Required()])
submit = SubmitField('Submit')
class Subscribe_Form(FlaskForm):
email = StringField('your email',validators = [Required(),Email()])
username = StringField('your username',validators = [Required()])
submit = SubmitField('Submit')
|
{"/app/auth/forms.py": ["/app/models.py"], "/app/main/views.py": ["/app/models.py", "/app/main/forms.py"]}
|
25,050
|
Nyagah-Tech/blog
|
refs/heads/master
|
/app/models.py
|
from . import db
from werkzeug.security import generate_password_hash,check_password_hash
from flask_login import UserMixin
from . import login_manager
from datetime import datetime
class User(UserMixin,db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer,primary_key = True)
username = db.Column(db.String(30),index = True)
email = db.Column(db.String(),unique = True,index = True)
bio = db.Column(db.String)
profile_path = db.Column(db.String)
pass_code = db.Column(db.String(255))
@property
def password(self):
raise AttributeError('Access denied.Read acess is permitted')
@password.setter
def password(self, password):
self.pass_code = generate_password_hash(password)
def verify_password(self, password):
return check_password_hash(self.pass_code,password)
def __repr__(self):
return f'User {self.username}'
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
class Blog(db.Model):
__tablename__ = 'blogs'
id = db.Column(db.Integer,primary_key = True)
user = db.Column(db.String)
blog_title = db.Column(db.String)
blog_body = db.Column(db.String)
posted = db.Column(db.DateTime,default = datetime.utcnow)
def save_blog(self):
db.session.add(self)
db.session.commit()
def delete_blog(self):
db.session.delete(self)
db.session.commit()
@classmethod
def get_user_blog(cls,name):
blog = Blog.query.filter_by(user = name).all()
return blog
@classmethod
def get_all_blog(cls):
blog_list = Blog.query.all()
return blog_list
class Comment(db.Model):
__tablename__ = 'comment'
id = db.Column(db.Integer,primary_key = True)
blog_id = db.Column(db.String)
user = db.Column(db.String)
blog_comment = db.Column(db.String)
posted =db.Column(db.DateTime,default = datetime.utcnow)
def delete_comment(self):
db.session.delete(self)
db.session.commit()
def save_comment(self):
db.session.add(self)
db.session.commit()
@classmethod
def get_comment(cls,id):
comments = Comment.query.filter_by(blog_id = id).all()
return comments
class Subscribe(db.Model):
__tablename__ = 'subscribe'
id =db.Column(db.Integer,primary_key = True)
email = db.Column(db.String)
username = db.Column(db.String)
def save_subscribers(self):
db.session.add(self)
db.session.commit()
@classmethod
def get_all_email(cls):
emails = Subscribe.query.all()
return emails
class Quote:
def __init__(self,author,quote):
self.author = author
self.quote = quote
|
{"/app/auth/forms.py": ["/app/models.py"], "/app/main/views.py": ["/app/models.py", "/app/main/forms.py"]}
|
25,051
|
Nyagah-Tech/blog
|
refs/heads/master
|
/config.py
|
class Config:
SECRET_KEY ='dm/01254'
SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://dan:12345@localhost/blog'
UPLOADED_PHOTOS_DEST = 'app/static/photos'
QOUTE_API_URL = 'http://quotes.stormconsultancy.co.uk/random.json'
MAIL_SERVER = 'smtp.googlemail.com'
MAIL_PORT = 587
MAIL_USE_TLS = True
MAIL_USERNAME = 'danmuv12@gmail.com'
MAIL_PASSWORD = 'Dm17/01254'
pass
class ProdConfig(Config):
pass
class DevConfig(Config):
DEBUG = True
config_options = {
'development':DevConfig,
'production':ProdConfig
}
|
{"/app/auth/forms.py": ["/app/models.py"], "/app/main/views.py": ["/app/models.py", "/app/main/forms.py"]}
|
25,052
|
Nyagah-Tech/blog
|
refs/heads/master
|
/app/main/views.py
|
from flask import render_template,redirect,url_for,abort,request
from . import main
from flask_login import login_required,current_user
from ..models import User,Blog,Comment,Subscribe
from .forms import New_blog_form,CommentForm,UpdateProfileForm,UpdateBlogForm,Subscribe_Form
from .. import db,photos
from ..email import mail_message
from ..requests import get_quotes
@main.route('/')
def index():
title ="home"
quotes = get_quotes()
blogs = Blog.get_all_blog()
return render_template('index.html',title=title,quotes =quotes,blogs = blogs)
@main.route('/blog/new/<uname>', methods = ['GET','POST'])
@login_required
def new_blog(uname):
user = User.query.filter_by(username = uname).first()
if user is None:
abort(404)
form = New_blog_form()
if form.validate_on_submit():
title = form.title.data
body = form.body.data
new_blog = Blog(blog_title = title,blog_body = body,user = current_user.username)
new_blog.save_blog()
email_list = Subscribe.get_all_email()
for emails in email_list:
mail_message("Your daily newsfeed","email/subscription_user",emails.email,emails = emails)
return redirect(url_for('.profile', uname = current_user.username))
title = 'new pitch'
return render_template('new_blog.html',title = title, newBlog_form = form)
@main.route('/blog/user/profile/<uname>')
@login_required
def profile(uname):
user = User.query.filter_by(username = uname).first()
if user is None:
abort(404)
bloglist = Blog.get_user_blog(user.username)
return render_template("profile/profile.html",user = user, bloglist = bloglist)
@main.route('/blog/profile/comment/new/<int:id>', methods=['GET', 'POST'])
@login_required
def comment(id):
form = CommentForm()
blog = Blog.query.filter_by(id = id).first()
if form.validate_on_submit():
comment = form.body.data
new_comment = Comment(user = current_user.username,blog_comment = comment,blog_id = blog.id)
new_comment.save_comment()
return redirect(url_for('.blog',id = blog.id))
title = 'comments'
return render_template('comment.html',title = title,comment_form =form,blog = blog)
@main.route('/blog/<id>')
@login_required
def blog(id):
blog = Blog.query.filter_by(id = id).first()
comment = Comment.get_comment(id)
return render_template('blog.html',blog = blog, comment = comment)
@main.route('/profile/update/<uname>', methods = ['GET','POST'])
@login_required
def update_profile(uname):
user = User.query.filter_by(username = uname).first()
if user is None:
abort(404)
form = UpdateProfileForm()
if form.validate_on_submit():
user.bio = form.bio.data
db.session.add(user)
db.session.commit()
return redirect(url_for('.profile', uname = user.username))
return render_template('profile/updateProf.html',updateForm = form,)
@main.route('/profile/<uname>/update/profile/pic/', methods = ['Post'])
def update_pic(uname):
user = User.query.filter_by(username = uname).first()
if 'photo' in request.files:
filename = photos.save(request.files['photo'])
path = f'photos/{filename}'
user.profile_path = path
db.session.commit()
return redirect(url_for('main.profile',uname = user.username))
@main.route('/blog/delete/<int:id>', methods = ['GET','POST'])
@login_required
def delete_blog(id):
blog = Blog.query.filter_by(id = id).first()
Blog.delete_blog(blog)
return redirect(url_for('main.profile', uname = current_user.username))
@main.route('/blog/update/<int:id>',methods = ['GET','POST'])
@login_required
def update_blog(id):
blog = Blog.query.filter_by(id = id).first()
form = UpdateBlogForm()
if form.validate_on_submit():
blog.blog_title = form.title.data
blog.blog_body = form.body.data
db.session.add(blog)
db.session.commit()
return redirect(url_for('main.profile', uname = current_user.username))
return render_template('profile/updateBlog.html',updateblogform = form)
@main.route('/user/subcribe/<uname>', methods = ['GET','POST'])
@login_required
def subcribe(uname):
user = User.query.filter_by(username = uname).first()
if user is None:
abort(404)
form = Subscribe_Form()
if form.validate_on_submit():
email = form.email.data
username= form.username.data
new_sub = Subscribe(username = username,email = email)
db.session.add(new_sub)
db.session.commit()
return redirect(url_for('.index'))
return render_template('subscribe/subscribe.html',subcribeForm = form,user = user)
@main.route('/comment/delete/<int:id>')
@login_required
def delete_comment(id):
comment = Comment.query.filter_by(id = id).first()
Comment.delete_comment(comment)
return redirect(url_for('.blog', id = comment.blog_id))
|
{"/app/auth/forms.py": ["/app/models.py"], "/app/main/views.py": ["/app/models.py", "/app/main/forms.py"]}
|
25,081
|
TSejal/Python-Education_mgmt
|
refs/heads/master
|
/school_mgmt/urls.py
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from education_mgmt import settings
|
{"/school_mgmt/admin.py": ["/school_mgmt/models.py"]}
|
25,082
|
TSejal/Python-Education_mgmt
|
refs/heads/master
|
/school_mgmt/models.py
|
from django.db import models
from django.utils import timezone
from django.contrib.auth.models import User
from django.conf import settings
from django.dispatch import receiver
from rest_framework.authtoken.models import Token
class University(models.Model):
name = models.CharField(max_length=50)
logo = models.ImageField(upload_to='school_img/')
website = models.CharField(max_length=50,null=True,blank=True)
created_at = models.DateTimeField(default=timezone.now)
modified_at = models.DateTimeField()
is_active = models.BooleanField(default=True)
def __str__(self):
return self.name
class School(models.Model):
owner = models.ForeignKey(User)
university = models.ForeignKey(University)
name = models.CharField(max_length=50)
logo = models.ImageField(upload_to='school_img/')
website = models.CharField(max_length=50,null=True,blank=True)
created_at = models.DateTimeField(default=timezone.now, null=True,blank=True)
modified_at = models.DateTimeField(default=timezone.now, null=True,blank=True)
is_active = models.BooleanField(default=True)
def __str__(self):
return self.name
class Address(models.Model):
Many_to_Many=True
counrty_name=(
('IND','India'),
('AUS','Austrelia'),
('FND','Finland'),
('UK','United Kingdom'),
('GM','Germany'),
)
street_1 = models.CharField(max_length=30)
street_2 = models.CharField(max_length=30)
city = models.CharField(max_length=30)
state = models.CharField(max_length=30)
country = models.CharField(max_length=10, choices=counrty_name)
zip_code = models.IntegerField(null=True,blank=True)
mobile_no = models.IntegerField(null=True,blank=True)
def __str__(self):
return self.city
class Student(models.Model):
school = models.ForeignKey(School)
first_name = models.CharField(max_length=20)
last_name = models.CharField(max_length=20)
roll_no = models.IntegerField(max_length=20)
email = models.EmailField()
date_of_birth = models.CharField(max_length=20)
address = models.ManyToManyField(Address,null=True,blank=True)
created_at = models.DateTimeField(default=timezone.now)
modified_at = models.DateTimeField()
is_active = models.BooleanField(default=True)
def __str__(self):
return self.first_name
|
{"/school_mgmt/admin.py": ["/school_mgmt/models.py"]}
|
25,083
|
TSejal/Python-Education_mgmt
|
refs/heads/master
|
/school_mgmt/serializers.py
|
from rest_framework import serializers
from school_mgmt.models import *
from rest_framework.validators import UniqueValidator
from django.contrib.auth.models import User
from random import randint
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('username', 'password')
class UserRegisterSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('id','first_name','last_name','username', 'email', 'password')
#add
class UniversityCreateSerializer(serializers.ModelSerializer):
class Meta:
model = University
exclude = ('created_at')
class SchoolCreateSerializer(serializers.ModelSerializer):
class Meta:
model = School
exclude = ('created_at','modified_at')
class StudentCreateSerializer(serializers.ModelSerializer):
class Meta:
model = Student
#exclude = ('created_at','modified_at')
#list
class SchoolListSerializer(serializers.ModelSerializer):
class Meta:
model = School
class SchoolListforUniversitySerializer(serializers.ModelSerializer):
class Meta:
model = School
fields = ('id','name','university')
class UniversityListSerializer(serializers.ModelSerializer):
# school = SchoolListSerializer()
# school_count = serializers.SerializerMethodField()
class Meta:
model = University
# def get_school_count(self,University):
# return School.objects.filter(University=university).count()
class UniversityWithSchoolsListSerializer(serializers.ModelSerializer):
school_name = serializers.SerializerMethodField()
class Meta:
model = University
fields = ('id','name','school_name')
def get_school_name(self, obj):
print obj.id
return SchoolListforUniversitySerializer(School.objects.filter(university__id=obj.id),many=True).data
|
{"/school_mgmt/admin.py": ["/school_mgmt/models.py"]}
|
25,084
|
TSejal/Python-Education_mgmt
|
refs/heads/master
|
/school_mgmt/migrations/0001_initial.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Address',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('street_1', models.CharField(max_length=30)),
('street_2', models.CharField(max_length=30)),
('city', models.CharField(max_length=30)),
('state', models.CharField(max_length=30)),
('country', models.CharField(max_length=10, choices=[(b'IND', b'India'), (b'AUS', b'Austrelia'), (b'FND', b'Finland'), (b'UK', b'United Kingdom'), (b'GM', b'Germany')])),
('zip_code', models.IntegerField(null=True, blank=True)),
('mobile_no', models.IntegerField(null=True, blank=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='School',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=50)),
('logo', models.ImageField(upload_to=b'school_img/')),
('website', models.CharField(max_length=50, null=True, blank=True)),
('created_at', models.DateTimeField(default=django.utils.timezone.now)),
('modified_at', models.DateTimeField()),
('is_active', models.BooleanField(default=True)),
('owner', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Student',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('smart_number', models.CharField(max_length=18)),
('first_name', models.CharField(max_length=20)),
('last_name', models.CharField(max_length=20)),
('roll_no', models.IntegerField(max_length=20)),
('email', models.EmailField(max_length=75)),
('date_of_birth', models.CharField(max_length=20)),
('created_at', models.DateTimeField(default=django.utils.timezone.now)),
('modified_at', models.DateTimeField()),
('is_active', models.BooleanField(default=True)),
('address', models.ManyToManyField(to='school_mgmt.Address', null=True, blank=True)),
('school', models.ForeignKey(to='school_mgmt.School')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='University',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=50)),
('logo', models.ImageField(upload_to=b'school_img/')),
('website', models.CharField(max_length=50, null=True, blank=True)),
('created_at', models.DateTimeField(default=django.utils.timezone.now)),
('modified_at', models.DateTimeField()),
('is_active', models.BooleanField(default=True)),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='school',
name='university',
field=models.ForeignKey(to='school_mgmt.University'),
preserve_default=True,
),
]
|
{"/school_mgmt/admin.py": ["/school_mgmt/models.py"]}
|
25,085
|
TSejal/Python-Education_mgmt
|
refs/heads/master
|
/school_mgmt/admin.py
|
from django.contrib import admin
from school_mgmt.models import *
# Register your models here.
class UniversityAdmin(admin.ModelAdmin):
fieldsets = (
('University data', {'fields': ('name', 'logo', 'website')}),
('Date', {'fields': ('created_at','modified_at')}),
('Permission', {'fields': ('is_active', )}),
)
class SchoolAdmin(admin.ModelAdmin):
fieldsets=(
('School data',{'fields':('owner','university','name','logo','website')}),
('Date',{'fields':('created_at','modified_at')}),
('Permission', {'fields': ('is_active', )}),
)
class AddressAdmin(admin.ModelAdmin):
fieldsets=(
('School data',{'fields':('street_1','street_2','city','state','country','zip_code','mobile_no')}),
)
class StudentAdmin(admin.ModelAdmin):
fieldsets=(
('School data',{'fields':('school','first_name','last_name','roll_no','email','date_of_birth','address')}),
('Date',{'fields':('created_at','modified_at')}),
('Permission', {'fields': ('is_active', )}),
)
admin.site.register(University,UniversityAdmin)
admin.site.register(School,SchoolAdmin)
admin.site.register(Address,AddressAdmin)
admin.site.register(Student,StudentAdmin)
|
{"/school_mgmt/admin.py": ["/school_mgmt/models.py"]}
|
25,086
|
TSejal/Python-Education_mgmt
|
refs/heads/master
|
/api/urls.py
|
from django.conf.urls import patterns, url
# from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from api.views import *
from rest_framework.authtoken import views as tokenView
print "in api url"
urlpatterns = patterns('',
url(r'^login/$', tokenView.obtain_auth_token),
url(r'^register/$', user_register, name='user-register'),
url(r'^student/add/$', add_student, name='add-student'),
url(r'^university/add/$', add_university, name='add-university'),
url(r'^university/list/$', university_list, name='university-list'),
url(r'^university/list1/$', universitywithschools_list, name='university-school-list'),
url(r'^university/delete/(?P<pk>[0-9]+)/$', university_delete, name='university-delete'),
url(r'^school/add/$', add_school, name='add-school'),
url(r'^school/list/$', school_list, name='school-list'),
url(r'^school/details/(?P<pk>[0-9]+)/$', school_details, name='school-detail'),
url(r'^school/update/(?P<pk>[0-9]+)/$', school_update, name='school-update'),
url(r'^school/delete/(?P<pk>[0-9]+)/$', school_delete, name='school-delete'),
)
|
{"/school_mgmt/admin.py": ["/school_mgmt/models.py"]}
|
25,087
|
TSejal/Python-Education_mgmt
|
refs/heads/master
|
/api/views.py
|
from django.shortcuts import render
from rest_framework import status
from rest_framework.decorators import api_view, authentication_classes
from rest_framework.response import Response
from school_mgmt.serializers import *
from school_mgmt.models import *
from rest_framework.authentication import TokenAuthentication
from random import randint
@api_view(['POST'])
def user_login(request):
serializer = UserSerializer(data=request.data)
if serializer.is_valid():
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@api_view(['POST'])
def user_register(request):
serializer = UserRegisterSerializer(data=request.data)
psw = request.data['password']
re_psw = request.data['Password_confirmation']
if psw == re_psw:
if serializer.is_valid():
obj = serializer.save()
obj.set_password(request.data['password'])
obj.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.data, status=status.HTTP_400_BAD_REQUEST)
#lists
@api_view(['GET'])
def university_list(request):
print "in list"
school_count = School.objects.filter(name=School.university).count()
print "count"
print school_count
serializer = UniversityListSerializer(data=school_count)
universities = University.objects.all()
serializer = UniversityListSerializer(universities, many=True)
#return Response(serializer.data, status=status.HTTP_200_OK)
return Response({"School_count":school_count,"data":serializer.data}, status=status.HTTP_200_OK)
@api_view(['GET'])
def universitywithschools_list(request):
print "in list"
print request.data
universities = University.objects.all()
serializer = UniversityWithSchoolsListSerializer(universities, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
@api_view(['GET'])
@authentication_classes((TokenAuthentication,))
def school_list(request):
print "in list"
print request.data
schools = School.objects.all()
serializer = SchoolListSerializer(schools, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
#create
@api_view(['POST'])
def add_university(request):
data = request.data
serializer = UniversityCreateSerializer(data=data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@api_view(['POST'])
def add_student(request):
data = request.data
#1st 4digits
one = randint(10**(3), (10**(4)-1))
print "Random number... one ", randint(10**(3), (10**(4)-1))
#2nd 2digits
date = data['date_of_birth']
two = date[:2]
print "bdate... two ", date[:2]
#3rd 3characters
school_id = data['school']
school_name = School.objects.get(id=school_id)
university_name = school_name.university.name
three = university_name[:3]
print "univesity.... three ", university_name[:3]
#4th 3characters
school_data = data['school']
school_obj_name = School.objects.get(id=school_data)
school_obj_name = school_obj_name.name
four = school_obj_name[:3]
print "school..... four ", school_obj_name[:3]
#5th 2digits
five = date[3:5]
print "birth month.... five ", date[3:5]
#6th 4digits
six = date[6:]
print "birth year.... six", date[6:]
sn = "%s%s-%s%s-%s%s" % (one, two, three, four, five, six)
print sn
# s = Student()
# s.smart_number = sn
# s.save()
serializer = StudentCreateSerializer(data=data)
if serializer.is_valid():
serializer.save()
return Response({"Smart_number" : sn,"Data" : serializer.data}, status=status.HTTP_201_CREATED)
#return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@api_view(['POST'])
@authentication_classes((TokenAuthentication,))
def add_school(request):
data = request.data
serializer = SchoolCreateSerializer(data=data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
#details
@api_view(['GET'])
@authentication_classes((TokenAuthentication,))
def school_details(request, pk):
if request.user.is_authenticated():
try:
school = School.objects.get(id=pk, owner=request.user)
except:
return Response({'error': 'School id not found'}, status=status.HTTP_400_BAD_REQUEST)
serializer = SchoolListSerializer(school, many=False)
return Response(serializer.data, status=status.HTTP_200_OK)
else:
return Response({'error': 'You are not authenticated'}, status=status.HTTP_200_OK)
#update
@api_view(['PUT'])
@authentication_classes((TokenAuthentication,))
def school_update(request, pk):
try:
school = School.objects.get(id=pk)
except:
return Response({'error': 'School id not found'}, status=status.HTTP_400_BAD_REQUEST)
serializer = SchoolListSerializer(school, data=request.data, many=False)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
#delete
@api_view(['DELETE'])
@authentication_classes((TokenAuthentication,))
def school_delete(request, pk):
try:
school = School.objects.get(id=pk)
except:
return Response({'error': 'School id not found'}, status=status.HTTP_400_BAD_REQUEST)
school.delete()
return Response({'success': 'School deleted successfully'}, status=status.HTTP_200_OK)
@api_view(['DELETE'])
def university_delete(request, pk):
try:
university = University.objects.get(id=pk)
except:
return Response({'error': 'University id not found'}, status=status.HTTP_400_BAD_REQUEST)
university.delete()
return Response({'success': 'University deleted successfully'}, status=status.HTTP_200_OK)
|
{"/school_mgmt/admin.py": ["/school_mgmt/models.py"]}
|
25,095
|
matajoh/Deep3DFaceRecon_pytorch
|
refs/heads/master
|
/util/visualizer.py
|
"""This script defines the visualizer for Deep3DFaceRecon_pytorch
"""
import numpy as np
import os
import sys
import ntpath
import time
from . import util, html
from subprocess import Popen, PIPE
from torch.utils.tensorboard import SummaryWriter
def save_images(webpage, visuals, image_path, aspect_ratio=1.0, width=256):
"""Save images to the disk.
Parameters:
webpage (the HTML class) -- the HTML webpage class that stores these imaegs (see html.py for more details)
visuals (OrderedDict) -- an ordered dictionary that stores (name, images (either tensor or numpy) ) pairs
image_path (str) -- the string is used to create image paths
aspect_ratio (float) -- the aspect ratio of saved images
width (int) -- the images will be resized to width x width
This function will save images stored in 'visuals' to the HTML file specified by 'webpage'.
"""
image_dir = webpage.get_image_dir()
short_path = ntpath.basename(image_path[0])
name = os.path.splitext(short_path)[0]
webpage.add_header(name)
ims, txts, links = [], [], []
for label, im_data in visuals.items():
im = util.tensor2im(im_data)
image_name = '%s/%s.png' % (label, name)
os.makedirs(os.path.join(image_dir, label), exist_ok=True)
save_path = os.path.join(image_dir, image_name)
util.save_image(im, save_path, aspect_ratio=aspect_ratio)
ims.append(image_name)
txts.append(label)
links.append(image_name)
webpage.add_images(ims, txts, links, width=width)
class Visualizer():
"""This class includes several functions that can display/save images and print/save logging information.
It uses a Python library tensprboardX for display, and a Python library 'dominate' (wrapped in 'HTML') for creating HTML files with images.
"""
def __init__(self, opt):
"""Initialize the Visualizer class
Parameters:
opt -- stores all the experiment flags; needs to be a subclass of BaseOptions
Step 1: Cache the training/test options
Step 2: create a tensorboard writer
Step 3: create an HTML object for saveing HTML filters
Step 4: create a logging file to store training losses
"""
self.opt = opt # cache the option
self.use_html = opt.isTrain and not opt.no_html
self.writer = SummaryWriter(os.path.join(opt.checkpoints_dir, 'logs', opt.name))
self.win_size = opt.display_winsize
self.name = opt.name
self.saved = False
if self.use_html: # create an HTML object at <checkpoints_dir>/web/; images will be saved under <checkpoints_dir>/web/images/
self.web_dir = os.path.join(opt.checkpoints_dir, opt.name, 'web')
self.img_dir = os.path.join(self.web_dir, 'images')
print('create web directory %s...' % self.web_dir)
util.mkdirs([self.web_dir, self.img_dir])
# create a logging file to store training losses
self.log_name = os.path.join(opt.checkpoints_dir, opt.name, 'loss_log.txt')
with open(self.log_name, "a") as log_file:
now = time.strftime("%c")
log_file.write('================ Training Loss (%s) ================\n' % now)
def reset(self):
"""Reset the self.saved status"""
self.saved = False
def display_current_results(self, visuals, total_iters, epoch, save_result):
"""Display current results on tensorboad; save current results to an HTML file.
Parameters:
visuals (OrderedDict) - - dictionary of images to display or save
total_iters (int) -- total iterations
epoch (int) - - the current epoch
save_result (bool) - - if save the current results to an HTML file
"""
for label, image in visuals.items():
self.writer.add_image(label, util.tensor2im(image), total_iters, dataformats='HWC')
if self.use_html and (save_result or not self.saved): # save images to an HTML file if they haven't been saved.
self.saved = True
# save images to the disk
for label, image in visuals.items():
image_numpy = util.tensor2im(image)
img_path = os.path.join(self.img_dir, 'epoch%.3d_%s.png' % (epoch, label))
util.save_image(image_numpy, img_path)
# update website
webpage = html.HTML(self.web_dir, 'Experiment name = %s' % self.name, refresh=0)
for n in range(epoch, 0, -1):
webpage.add_header('epoch [%d]' % n)
ims, txts, links = [], [], []
for label, image_numpy in visuals.items():
image_numpy = util.tensor2im(image)
img_path = 'epoch%.3d_%s.png' % (n, label)
ims.append(img_path)
txts.append(label)
links.append(img_path)
webpage.add_images(ims, txts, links, width=self.win_size)
webpage.save()
def plot_current_losses(self, total_iters, losses):
# G_loss_collection = {}
# D_loss_collection = {}
# for name, value in losses.items():
# if 'G' in name or 'NCE' in name or 'idt' in name:
# G_loss_collection[name] = value
# else:
# D_loss_collection[name] = value
# self.writer.add_scalars('G_collec', G_loss_collection, total_iters)
# self.writer.add_scalars('D_collec', D_loss_collection, total_iters)
for name, value in losses.items():
self.writer.add_scalar(name, value, total_iters)
# losses: same format as |losses| of plot_current_losses
def print_current_losses(self, epoch, iters, losses, t_comp, t_data):
"""print current losses on console; also save the losses to the disk
Parameters:
epoch (int) -- current epoch
iters (int) -- current training iteration during this epoch (reset to 0 at the end of every epoch)
losses (OrderedDict) -- training losses stored in the format of (name, float) pairs
t_comp (float) -- computational time per data point (normalized by batch_size)
t_data (float) -- data loading time per data point (normalized by batch_size)
"""
message = '(epoch: %d, iters: %d, time: %.3f, data: %.3f) ' % (epoch, iters, t_comp, t_data)
for k, v in losses.items():
message += '%s: %.3f ' % (k, v)
print(message) # print the message
with open(self.log_name, "a") as log_file:
log_file.write('%s\n' % message) # save the message
class MyVisualizer:
def __init__(self, opt):
"""Initialize the Visualizer class
Parameters:
opt -- stores all the experiment flags; needs to be a subclass of BaseOptions
Step 1: Cache the training/test options
Step 2: create a tensorboard writer
Step 3: create an HTML object for saveing HTML filters
Step 4: create a logging file to store training losses
"""
self.opt = opt # cache the optio
self.name = opt.name
self.img_dir = os.path.join(opt.checkpoints_dir, opt.name, 'results')
if opt.phase != 'test':
self.writer = SummaryWriter(os.path.join(opt.checkpoints_dir, opt.name, 'logs'))
# create a logging file to store training losses
self.log_name = os.path.join(opt.checkpoints_dir, opt.name, 'loss_log.txt')
with open(self.log_name, "a") as log_file:
now = time.strftime("%c")
log_file.write('================ Training Loss (%s) ================\n' % now)
def display_current_results(self, visuals, total_iters, epoch, dataset='train', save_results=False, count=0, name=None,
add_image=True):
"""Display current results on tensorboad; save current results to an HTML file.
Parameters:
visuals (OrderedDict) - - dictionary of images to display or save
total_iters (int) -- total iterations
epoch (int) - - the current epoch
dataset (str) - - 'train' or 'val' or 'test'
"""
# if (not add_image) and (not save_results): return
for label, image in visuals.items():
for i in range(image.shape[0]):
image_numpy = util.tensor2im(image[i])
if add_image:
self.writer.add_image(label + '%s_%02d'%(dataset, i + count),
image_numpy, total_iters, dataformats='HWC')
if save_results:
save_path = os.path.join(self.img_dir, dataset, 'epoch_%s_%06d'%(epoch, total_iters))
if not os.path.isdir(save_path):
os.makedirs(save_path)
if name is not None:
img_path = os.path.join(save_path, '%s.png' % name)
else:
img_path = os.path.join(save_path, '%s_%03d.png' % (label, i + count))
util.save_image(image_numpy, img_path)
def plot_current_losses(self, total_iters, losses, dataset='train'):
for name, value in losses.items():
self.writer.add_scalar(name + '/%s'%dataset, value, total_iters)
# losses: same format as |losses| of plot_current_losses
def print_current_losses(self, epoch, iters, losses, t_comp, t_data, dataset='train'):
"""print current losses on console; also save the losses to the disk
Parameters:
epoch (int) -- current epoch
iters (int) -- current training iteration during this epoch (reset to 0 at the end of every epoch)
losses (OrderedDict) -- training losses stored in the format of (name, float) pairs
t_comp (float) -- computational time per data point (normalized by batch_size)
t_data (float) -- data loading time per data point (normalized by batch_size)
"""
message = '(dataset: %s, epoch: %d, iters: %d, time: %.3f, data: %.3f) ' % (
dataset, epoch, iters, t_comp, t_data)
for k, v in losses.items():
message += '%s: %.3f ' % (k, v)
print(message) # print the message
with open(self.log_name, "a") as log_file:
log_file.write('%s\n' % message) # save the message
|
{"/util/detect_lm68.py": ["/util/preprocess.py"], "/data/flist_dataset.py": ["/data/base_dataset.py", "/util/util.py", "/util/preprocess.py"], "/data_preparation.py": ["/util/detect_lm68.py", "/util/generate_list.py"], "/train.py": ["/options/train_options.py", "/util/visualizer.py", "/util/util.py"], "/models/facerecon_model.py": ["/models/bfm.py", "/models/losses.py", "/util/nvdiffrast.py", "/util/preprocess.py"], "/test.py": ["/options/test_options.py", "/util/visualizer.py", "/util/preprocess.py", "/data/flist_dataset.py"]}
|
25,096
|
matajoh/Deep3DFaceRecon_pytorch
|
refs/heads/master
|
/util/detect_lm68.py
|
import os
import cv2
import numpy as np
from scipy.io import loadmat
import tensorflow as tf
from util.preprocess import align_for_lm
from shutil import move
mean_face = np.loadtxt('util/test_mean_face.txt')
mean_face = mean_face.reshape([68, 2])
def save_label(labels, save_path):
np.savetxt(save_path, labels)
def draw_landmarks(img, landmark, save_name):
landmark = landmark
lm_img = np.zeros([img.shape[0], img.shape[1], 3])
lm_img[:] = img.astype(np.float32)
landmark = np.round(landmark).astype(np.int32)
for i in range(len(landmark)):
for j in range(-1, 1):
for k in range(-1, 1):
if img.shape[0] - 1 - landmark[i, 1]+j > 0 and \
img.shape[0] - 1 - landmark[i, 1]+j < img.shape[0] and \
landmark[i, 0]+k > 0 and \
landmark[i, 0]+k < img.shape[1]:
lm_img[img.shape[0] - 1 - landmark[i, 1]+j, landmark[i, 0]+k,
:] = np.array([0, 0, 255])
lm_img = lm_img.astype(np.uint8)
cv2.imwrite(save_name, lm_img)
def load_data(img_name, txt_name):
return cv2.imread(img_name), np.loadtxt(txt_name)
# create tensorflow graph for landmark detector
def load_lm_graph(graph_filename):
with tf.gfile.GFile(graph_filename, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
with tf.Graph().as_default() as graph:
tf.import_graph_def(graph_def, name='net')
img_224 = graph.get_tensor_by_name('net/input_imgs:0')
output_lm = graph.get_tensor_by_name('net/lm:0')
lm_sess = tf.Session(graph=graph)
return lm_sess,img_224,output_lm
# landmark detection
def detect_68p(img_path,sess,input_op,output_op):
print('detecting landmarks......')
names = [i for i in sorted(os.listdir(
img_path)) if 'jpg' in i or 'png' in i or 'jpeg' in i or 'PNG' in i]
vis_path = os.path.join(img_path, 'vis')
remove_path = os.path.join(img_path, 'remove')
save_path = os.path.join(img_path, 'landmarks')
if not os.path.isdir(vis_path):
os.makedirs(vis_path)
if not os.path.isdir(remove_path):
os.makedirs(remove_path)
if not os.path.isdir(save_path):
os.makedirs(save_path)
for i in range(0, len(names)):
name = names[i]
print('%05d' % (i), ' ', name)
full_image_name = os.path.join(img_path, name)
txt_name = '.'.join(name.split('.')[:-1]) + '.txt'
full_txt_name = os.path.join(img_path, 'detections', txt_name) # 5 facial landmark path for each image
# if an image does not have detected 5 facial landmarks, remove it from the training list
if not os.path.isfile(full_txt_name):
move(full_image_name, os.path.join(remove_path, name))
continue
# load data
img, five_points = load_data(full_image_name, full_txt_name)
input_img, scale, bbox = align_for_lm(img, five_points) # align for 68 landmark detection
# if the alignment fails, remove corresponding image from the training list
if scale == 0:
move(full_txt_name, os.path.join(
remove_path, txt_name))
move(full_image_name, os.path.join(remove_path, name))
continue
# detect landmarks
input_img = np.reshape(
input_img, [1, 224, 224, 3]).astype(np.float32)
landmark = sess.run(
output_op, feed_dict={input_op: input_img})
# transform back to original image coordinate
landmark = landmark.reshape([68, 2]) + mean_face
landmark[:, 1] = 223 - landmark[:, 1]
landmark = landmark / scale
landmark[:, 0] = landmark[:, 0] + bbox[0]
landmark[:, 1] = landmark[:, 1] + bbox[1]
landmark[:, 1] = img.shape[0] - 1 - landmark[:, 1]
if i % 100 == 0:
draw_landmarks(img, landmark, os.path.join(vis_path, name))
save_label(landmark, os.path.join(save_path, txt_name))
|
{"/util/detect_lm68.py": ["/util/preprocess.py"], "/data/flist_dataset.py": ["/data/base_dataset.py", "/util/util.py", "/util/preprocess.py"], "/data_preparation.py": ["/util/detect_lm68.py", "/util/generate_list.py"], "/train.py": ["/options/train_options.py", "/util/visualizer.py", "/util/util.py"], "/models/facerecon_model.py": ["/models/bfm.py", "/models/losses.py", "/util/nvdiffrast.py", "/util/preprocess.py"], "/test.py": ["/options/test_options.py", "/util/visualizer.py", "/util/preprocess.py", "/data/flist_dataset.py"]}
|
25,097
|
matajoh/Deep3DFaceRecon_pytorch
|
refs/heads/master
|
/options/train_options.py
|
"""This script contains the training options for Deep3DFaceRecon_pytorch
"""
from .base_options import BaseOptions
from util import util
class TrainOptions(BaseOptions):
"""This class includes training options.
It also includes shared options defined in BaseOptions.
"""
def initialize(self, parser):
parser = BaseOptions.initialize(self, parser)
# dataset parameters
# for train
parser.add_argument('--data_root', type=str, default='./', help='dataset root')
parser.add_argument('--flist', type=str, default='datalist/train/masks.txt', help='list of mask names of training set')
parser.add_argument('--batch_size', type=int, default=32)
parser.add_argument('--dataset_mode', type=str, default='flist', help='chooses how datasets are loaded. [None | flist]')
parser.add_argument('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly')
parser.add_argument('--num_threads', default=4, type=int, help='# threads for loading data')
parser.add_argument('--max_dataset_size', type=int, default=float("inf"), help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.')
parser.add_argument('--preprocess', type=str, default='shift_scale_rot_flip', help='scaling and cropping of images at load time [shift_scale_rot_flip | shift_scale | shift | shift_rot_flip ]')
parser.add_argument('--use_aug', type=util.str2bool, nargs='?', const=True, default=True, help='whether use data augmentation')
# for val
parser.add_argument('--flist_val', type=str, default='datalist/val/masks.txt', help='list of mask names of val set')
parser.add_argument('--batch_size_val', type=int, default=32)
# visualization parameters
parser.add_argument('--display_freq', type=int, default=1000, help='frequency of showing training results on screen')
parser.add_argument('--print_freq', type=int, default=100, help='frequency of showing training results on console')
# network saving and loading parameters
parser.add_argument('--save_latest_freq', type=int, default=5000, help='frequency of saving the latest results')
parser.add_argument('--save_epoch_freq', type=int, default=1, help='frequency of saving checkpoints at the end of epochs')
parser.add_argument('--evaluation_freq', type=int, default=5000, help='evaluation freq')
parser.add_argument('--save_by_iter', action='store_true', help='whether saves model by iteration')
parser.add_argument('--continue_train', action='store_true', help='continue training: load the latest model')
parser.add_argument('--epoch_count', type=int, default=1, help='the starting epoch count, we save the model by <epoch_count>, <epoch_count>+<save_latest_freq>, ...')
parser.add_argument('--phase', type=str, default='train', help='train, val, test, etc')
parser.add_argument('--pretrained_name', type=str, default=None, help='resume training from another checkpoint')
# training parameters
parser.add_argument('--n_epochs', type=int, default=20, help='number of epochs with the initial learning rate')
parser.add_argument('--lr', type=float, default=0.0001, help='initial learning rate for adam')
parser.add_argument('--lr_policy', type=str, default='step', help='learning rate policy. [linear | step | plateau | cosine]')
parser.add_argument('--lr_decay_epochs', type=int, default=10, help='multiply by a gamma every lr_decay_epochs epoches')
self.isTrain = True
return parser
|
{"/util/detect_lm68.py": ["/util/preprocess.py"], "/data/flist_dataset.py": ["/data/base_dataset.py", "/util/util.py", "/util/preprocess.py"], "/data_preparation.py": ["/util/detect_lm68.py", "/util/generate_list.py"], "/train.py": ["/options/train_options.py", "/util/visualizer.py", "/util/util.py"], "/models/facerecon_model.py": ["/models/bfm.py", "/models/losses.py", "/util/nvdiffrast.py", "/util/preprocess.py"], "/test.py": ["/options/test_options.py", "/util/visualizer.py", "/util/preprocess.py", "/data/flist_dataset.py"]}
|
25,098
|
matajoh/Deep3DFaceRecon_pytorch
|
refs/heads/master
|
/data/flist_dataset.py
|
"""This script defines the custom dataset for Deep3DFaceRecon_pytorch
"""
import os.path
from data.base_dataset import BaseDataset, get_transform, get_affine_mat, apply_img_affine, apply_lm_affine
from data.image_folder import make_dataset
from PIL import Image
import random
import util.util as util
import numpy as np
import json
import torch
from scipy.io import loadmat, savemat
import pickle
from util.preprocess import align_img, estimate_norm
from util.load_mats import load_lm3d
def default_flist_reader(flist):
"""
flist format: impath label\nimpath label\n ...(same to caffe's filelist)
"""
imlist = []
with open(flist, 'r') as rf:
for line in rf.readlines():
impath = line.strip()
imlist.append(impath)
return imlist
def jason_flist_reader(flist):
with open(flist, 'r') as fp:
info = json.load(fp)
return info
def parse_label(label):
return torch.tensor(np.array(label).astype(np.float32))
class FlistDataset(BaseDataset):
"""
It requires one directories to host training images '/path/to/data/train'
You can train the model with the dataset flag '--dataroot /path/to/data'.
"""
def __init__(self, opt):
"""Initialize this dataset class.
Parameters:
opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
"""
BaseDataset.__init__(self, opt)
self.lm3d_std = load_lm3d(opt.bfm_folder)
msk_names = default_flist_reader(opt.flist)
self.msk_paths = [os.path.join(opt.data_root, i) for i in msk_names]
self.size = len(self.msk_paths)
self.opt = opt
self.name = 'train' if opt.isTrain else 'val'
if '_' in opt.flist:
self.name += '_' + opt.flist.split(os.sep)[-1].split('_')[0]
def __getitem__(self, index):
"""Return a data point and its metadata information.
Parameters:
index (int) -- a random integer for data indexing
Returns a dictionary that contains A, B, A_paths and B_paths
img (tensor) -- an image in the input domain
msk (tensor) -- its corresponding attention mask
lm (tensor) -- its corresponding 3d landmarks
im_paths (str) -- image paths
aug_flag (bool) -- a flag used to tell whether its raw or augmented
"""
msk_path = self.msk_paths[index % self.size] # make sure index is within then range
img_path = msk_path.replace('mask/', '')
lm_path = '.'.join(msk_path.replace('mask', 'landmarks').split('.')[:-1]) + '.txt'
raw_img = Image.open(img_path).convert('RGB')
raw_msk = Image.open(msk_path).convert('RGB')
raw_lm = np.loadtxt(lm_path).astype(np.float32)
_, img, lm, msk = align_img(raw_img, raw_lm, self.lm3d_std, raw_msk)
aug_flag = self.opt.use_aug and self.opt.isTrain
if aug_flag:
img, lm, msk = self._augmentation(img, lm, self.opt, msk)
_, H = img.size
M = estimate_norm(lm, H)
transform = get_transform()
img_tensor = transform(img)
msk_tensor = transform(msk)[:1, ...]
lm_tensor = parse_label(lm)
M_tensor = parse_label(M)
return {'imgs': img_tensor,
'lms': lm_tensor,
'msks': msk_tensor,
'M': M_tensor,
'im_paths': img_path,
'aug_flag': aug_flag,
'dataset': self.name}
def _augmentation(self, img, lm, opt, msk=None):
affine, affine_inv, flip = get_affine_mat(opt, img.size)
img = apply_img_affine(img, affine_inv)
lm = apply_lm_affine(lm, affine, flip, img.size)
if msk is not None:
msk = apply_img_affine(msk, affine_inv, method=Image.BILINEAR)
return img, lm, msk
def __len__(self):
"""Return the total number of images in the dataset.
"""
return self.size
|
{"/util/detect_lm68.py": ["/util/preprocess.py"], "/data/flist_dataset.py": ["/data/base_dataset.py", "/util/util.py", "/util/preprocess.py"], "/data_preparation.py": ["/util/detect_lm68.py", "/util/generate_list.py"], "/train.py": ["/options/train_options.py", "/util/visualizer.py", "/util/util.py"], "/models/facerecon_model.py": ["/models/bfm.py", "/models/losses.py", "/util/nvdiffrast.py", "/util/preprocess.py"], "/test.py": ["/options/test_options.py", "/util/visualizer.py", "/util/preprocess.py", "/data/flist_dataset.py"]}
|
25,099
|
matajoh/Deep3DFaceRecon_pytorch
|
refs/heads/master
|
/data_preparation.py
|
"""This script is the data preparation script for Deep3DFaceRecon_pytorch
"""
import os
import numpy as np
import argparse
from util.detect_lm68 import detect_68p,load_lm_graph
from util.skin_mask import get_skin_mask
from util.generate_list import check_list, write_list
import warnings
warnings.filterwarnings("ignore")
parser = argparse.ArgumentParser()
parser.add_argument('--data_root', type=str, default='datasets', help='root directory for training data')
parser.add_argument('--img_folder', nargs="+", required=True, help='folders of training images')
parser.add_argument('--mode', type=str, default='train', help='train or val')
opt = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
def data_prepare(folder_list,mode):
lm_sess,input_op,output_op = load_lm_graph('./checkpoints/lm_model/68lm_detector.pb') # load a tensorflow version 68-landmark detector
for img_folder in folder_list:
detect_68p(img_folder,lm_sess,input_op,output_op) # detect landmarks for images
get_skin_mask(img_folder) # generate skin attention mask for images
# create files that record path to all training data
msks_list = []
for img_folder in folder_list:
path = os.path.join(img_folder, 'mask')
msks_list += ['/'.join([img_folder, 'mask', i]) for i in sorted(os.listdir(path)) if 'jpg' in i or
'png' in i or 'jpeg' in i or 'PNG' in i]
imgs_list = [i.replace('mask/', '') for i in msks_list]
lms_list = [i.replace('mask', 'landmarks') for i in msks_list]
lms_list = ['.'.join(i.split('.')[:-1]) + '.txt' for i in lms_list]
lms_list_final, imgs_list_final, msks_list_final = check_list(lms_list, imgs_list, msks_list) # check if the path is valid
write_list(lms_list_final, imgs_list_final, msks_list_final, mode=mode) # save files
if __name__ == '__main__':
print('Datasets:',opt.img_folder)
data_prepare([os.path.join(opt.data_root,folder) for folder in opt.img_folder],opt.mode)
|
{"/util/detect_lm68.py": ["/util/preprocess.py"], "/data/flist_dataset.py": ["/data/base_dataset.py", "/util/util.py", "/util/preprocess.py"], "/data_preparation.py": ["/util/detect_lm68.py", "/util/generate_list.py"], "/train.py": ["/options/train_options.py", "/util/visualizer.py", "/util/util.py"], "/models/facerecon_model.py": ["/models/bfm.py", "/models/losses.py", "/util/nvdiffrast.py", "/util/preprocess.py"], "/test.py": ["/options/test_options.py", "/util/visualizer.py", "/util/preprocess.py", "/data/flist_dataset.py"]}
|
25,100
|
matajoh/Deep3DFaceRecon_pytorch
|
refs/heads/master
|
/options/test_options.py
|
"""This script contains the test options for Deep3DFaceRecon_pytorch
"""
from .base_options import BaseOptions
class TestOptions(BaseOptions):
"""This class includes test options.
It also includes shared options defined in BaseOptions.
"""
def initialize(self, parser):
parser = BaseOptions.initialize(self, parser) # define shared options
parser.add_argument('--phase', type=str, default='test', help='train, val, test, etc')
parser.add_argument('--dataset_mode', type=str, default=None, help='chooses how datasets are loaded. [None | flist]')
parser.add_argument('--img_folder', type=str, default='examples', help='folder for test images.')
# Dropout and Batchnorm has different behavior during training and test.
self.isTrain = False
return parser
|
{"/util/detect_lm68.py": ["/util/preprocess.py"], "/data/flist_dataset.py": ["/data/base_dataset.py", "/util/util.py", "/util/preprocess.py"], "/data_preparation.py": ["/util/detect_lm68.py", "/util/generate_list.py"], "/train.py": ["/options/train_options.py", "/util/visualizer.py", "/util/util.py"], "/models/facerecon_model.py": ["/models/bfm.py", "/models/losses.py", "/util/nvdiffrast.py", "/util/preprocess.py"], "/test.py": ["/options/test_options.py", "/util/visualizer.py", "/util/preprocess.py", "/data/flist_dataset.py"]}
|
25,101
|
matajoh/Deep3DFaceRecon_pytorch
|
refs/heads/master
|
/train.py
|
"""This script is the training script for Deep3DFaceRecon_pytorch
"""
import os
import time
import numpy as np
import torch
from options.train_options import TrainOptions
from data import create_dataset
from models import create_model
from util.visualizer import MyVisualizer
from util.util import genvalconf
import torch.multiprocessing as mp
import torch.distributed as dist
def setup(rank, world_size, port):
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = port
# initialize the process group
dist.init_process_group("gloo", rank=rank, world_size=world_size)
def cleanup():
dist.destroy_process_group()
def main(rank, world_size, train_opt):
val_opt = genvalconf(train_opt, isTrain=False)
device = torch.device(rank)
torch.cuda.set_device(device)
use_ddp = train_opt.use_ddp
if use_ddp:
setup(rank, world_size, train_opt.ddp_port)
train_dataset, val_dataset = create_dataset(train_opt, rank=rank), create_dataset(val_opt, rank=rank)
train_dataset_batches, val_dataset_batches = \
len(train_dataset) // train_opt.batch_size, len(val_dataset) // val_opt.batch_size
model = create_model(train_opt) # create a model given train_opt.model and other options
model.setup(train_opt)
model.device = device
model.parallelize()
if rank == 0:
print('The batch number of training images = %d\n, \
the batch number of validation images = %d'% (train_dataset_batches, val_dataset_batches))
model.print_networks(train_opt.verbose)
visualizer = MyVisualizer(train_opt) # create a visualizer that display/save images and plots
total_iters = train_dataset_batches * (train_opt.epoch_count - 1) # the total number of training iterations
t_data = 0
t_val = 0
optimize_time = 0.1
batch_size = 1 if train_opt.display_per_batch else train_opt.batch_size
if use_ddp:
dist.barrier()
times = []
for epoch in range(train_opt.epoch_count, train_opt.n_epochs + 1): # outer loop for different epochs; we save the model by <epoch_count>, <epoch_count>+<save_latest_freq>
epoch_start_time = time.time() # timer for entire epoch
iter_data_time = time.time() # timer for train_data loading per iteration
epoch_iter = 0 # the number of training iterations in current epoch, reset to 0 every epoch
train_dataset.set_epoch(epoch)
for i, train_data in enumerate(train_dataset): # inner loop within one epoch
iter_start_time = time.time() # timer for computation per iteration
if total_iters % train_opt.print_freq == 0:
t_data = iter_start_time - iter_data_time
total_iters += batch_size
epoch_iter += batch_size
torch.cuda.synchronize()
optimize_start_time = time.time()
model.set_input(train_data) # unpack train_data from dataset and apply preprocessing
model.optimize_parameters() # calculate loss functions, get gradients, update network weights
torch.cuda.synchronize()
optimize_time = (time.time() - optimize_start_time) / batch_size * 0.005 + 0.995 * optimize_time
if use_ddp:
dist.barrier()
if rank == 0 and (total_iters == batch_size or total_iters % train_opt.display_freq == 0): # display images on visdom and save images to a HTML file
model.compute_visuals()
visualizer.display_current_results(model.get_current_visuals(), total_iters, epoch,
save_results=True,
add_image=train_opt.add_image)
# (total_iters == batch_size or total_iters % train_opt.evaluation_freq == 0)
if rank == 0 and (total_iters == batch_size or total_iters % train_opt.print_freq == 0): # print training losses and save logging information to the disk
losses = model.get_current_losses()
visualizer.print_current_losses(epoch, epoch_iter, losses, optimize_time, t_data)
visualizer.plot_current_losses(total_iters, losses)
if total_iters == batch_size or total_iters % train_opt.evaluation_freq == 0:
with torch.no_grad():
torch.cuda.synchronize()
val_start_time = time.time()
losses_avg = {}
model.eval()
for j, val_data in enumerate(val_dataset):
model.set_input(val_data)
model.optimize_parameters(isTrain=False)
if rank == 0 and j < train_opt.vis_batch_nums:
model.compute_visuals()
visualizer.display_current_results(model.get_current_visuals(), total_iters, epoch,
dataset='val', save_results=True, count=j * val_opt.batch_size,
add_image=train_opt.add_image)
if j < train_opt.eval_batch_nums:
losses = model.get_current_losses()
for key, value in losses.items():
losses_avg[key] = losses_avg.get(key, 0) + value
for key, value in losses_avg.items():
losses_avg[key] = value / min(train_opt.eval_batch_nums, val_dataset_batches)
torch.cuda.synchronize()
eval_time = time.time() - val_start_time
if rank == 0:
visualizer.print_current_losses(epoch, epoch_iter, losses_avg, eval_time, t_data, dataset='val') # visualize training results
visualizer.plot_current_losses(total_iters, losses_avg, dataset='val')
model.train()
if use_ddp:
dist.barrier()
if rank == 0 and (total_iters == batch_size or total_iters % train_opt.save_latest_freq == 0): # cache our latest model every <save_latest_freq> iterations
print('saving the latest model (epoch %d, total_iters %d)' % (epoch, total_iters))
print(train_opt.name) # it's useful to occasionally show the experiment name on console
save_suffix = 'iter_%d' % total_iters if train_opt.save_by_iter else 'latest'
model.save_networks(save_suffix)
if use_ddp:
dist.barrier()
iter_data_time = time.time()
print('End of epoch %d / %d \t Time Taken: %d sec' % (epoch, train_opt.n_epochs, time.time() - epoch_start_time))
model.update_learning_rate() # update learning rates at the end of every epoch.
if rank == 0 and epoch % train_opt.save_epoch_freq == 0: # cache our model every <save_epoch_freq> epochs
print('saving the model at the end of epoch %d, iters %d' % (epoch, total_iters))
model.save_networks('latest')
model.save_networks(epoch)
if use_ddp:
dist.barrier()
if __name__ == '__main__':
import warnings
warnings.filterwarnings("ignore")
train_opt = TrainOptions().parse() # get training options
world_size = train_opt.world_size
if train_opt.use_ddp:
mp.spawn(main, args=(world_size, train_opt), nprocs=world_size, join=True)
else:
main(0, world_size, train_opt)
|
{"/util/detect_lm68.py": ["/util/preprocess.py"], "/data/flist_dataset.py": ["/data/base_dataset.py", "/util/util.py", "/util/preprocess.py"], "/data_preparation.py": ["/util/detect_lm68.py", "/util/generate_list.py"], "/train.py": ["/options/train_options.py", "/util/visualizer.py", "/util/util.py"], "/models/facerecon_model.py": ["/models/bfm.py", "/models/losses.py", "/util/nvdiffrast.py", "/util/preprocess.py"], "/test.py": ["/options/test_options.py", "/util/visualizer.py", "/util/preprocess.py", "/data/flist_dataset.py"]}
|
25,102
|
matajoh/Deep3DFaceRecon_pytorch
|
refs/heads/master
|
/util/util.py
|
"""This script contains basic utilities for Deep3DFaceRecon_pytorch
"""
from __future__ import print_function
import numpy as np
import torch
from PIL import Image
import os
import importlib
import argparse
from argparse import Namespace
import torchvision
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def copyconf(default_opt, **kwargs):
conf = Namespace(**vars(default_opt))
for key in kwargs:
setattr(conf, key, kwargs[key])
return conf
def genvalconf(train_opt, **kwargs):
conf = Namespace(**vars(train_opt))
attr_dict = train_opt.__dict__
for key, value in attr_dict.items():
if 'val' in key and key.split('_')[0] in attr_dict:
setattr(conf, key.split('_')[0], value)
for key in kwargs:
setattr(conf, key, kwargs[key])
return conf
def find_class_in_module(target_cls_name, module):
target_cls_name = target_cls_name.replace('_', '').lower()
clslib = importlib.import_module(module)
cls = None
for name, clsobj in clslib.__dict__.items():
if name.lower() == target_cls_name:
cls = clsobj
assert cls is not None, "In %s, there should be a class whose name matches %s in lowercase without underscore(_)" % (module, target_cls_name)
return cls
def tensor2im(input_image, imtype=np.uint8):
""""Converts a Tensor array into a numpy image array.
Parameters:
input_image (tensor) -- the input image tensor array, range(0, 1)
imtype (type) -- the desired type of the converted numpy array
"""
if not isinstance(input_image, np.ndarray):
if isinstance(input_image, torch.Tensor): # get the data from a variable
image_tensor = input_image.data
else:
return input_image
image_numpy = image_tensor.clamp(0.0, 1.0).cpu().float().numpy() # convert it into a numpy array
if image_numpy.shape[0] == 1: # grayscale to RGB
image_numpy = np.tile(image_numpy, (3, 1, 1))
image_numpy = np.transpose(image_numpy, (1, 2, 0)) * 255.0 # post-processing: tranpose and scaling
else: # if it is a numpy array, do nothing
image_numpy = input_image
return image_numpy.astype(imtype)
def diagnose_network(net, name='network'):
"""Calculate and print the mean of average absolute(gradients)
Parameters:
net (torch network) -- Torch network
name (str) -- the name of the network
"""
mean = 0.0
count = 0
for param in net.parameters():
if param.grad is not None:
mean += torch.mean(torch.abs(param.grad.data))
count += 1
if count > 0:
mean = mean / count
print(name)
print(mean)
def save_image(image_numpy, image_path, aspect_ratio=1.0):
"""Save a numpy image to the disk
Parameters:
image_numpy (numpy array) -- input numpy array
image_path (str) -- the path of the image
"""
image_pil = Image.fromarray(image_numpy)
h, w, _ = image_numpy.shape
if aspect_ratio is None:
pass
elif aspect_ratio > 1.0:
image_pil = image_pil.resize((h, int(w * aspect_ratio)), Image.BICUBIC)
elif aspect_ratio < 1.0:
image_pil = image_pil.resize((int(h / aspect_ratio), w), Image.BICUBIC)
image_pil.save(image_path)
def print_numpy(x, val=True, shp=False):
"""Print the mean, min, max, median, std, and size of a numpy array
Parameters:
val (bool) -- if print the values of the numpy array
shp (bool) -- if print the shape of the numpy array
"""
x = x.astype(np.float64)
if shp:
print('shape,', x.shape)
if val:
x = x.flatten()
print('mean = %3.3f, min = %3.3f, max = %3.3f, median = %3.3f, std=%3.3f' % (
np.mean(x), np.min(x), np.max(x), np.median(x), np.std(x)))
def mkdirs(paths):
"""create empty directories if they don't exist
Parameters:
paths (str list) -- a list of directory paths
"""
if isinstance(paths, list) and not isinstance(paths, str):
for path in paths:
mkdir(path)
else:
mkdir(paths)
def mkdir(path):
"""create a single empty directory if it didn't exist
Parameters:
path (str) -- a single directory path
"""
if not os.path.exists(path):
os.makedirs(path)
def correct_resize_label(t, size):
device = t.device
t = t.detach().cpu()
resized = []
for i in range(t.size(0)):
one_t = t[i, :1]
one_np = np.transpose(one_t.numpy().astype(np.uint8), (1, 2, 0))
one_np = one_np[:, :, 0]
one_image = Image.fromarray(one_np).resize(size, Image.NEAREST)
resized_t = torch.from_numpy(np.array(one_image)).long()
resized.append(resized_t)
return torch.stack(resized, dim=0).to(device)
def correct_resize(t, size, mode=Image.BICUBIC):
device = t.device
t = t.detach().cpu()
resized = []
for i in range(t.size(0)):
one_t = t[i:i + 1]
one_image = Image.fromarray(tensor2im(one_t)).resize(size, Image.BICUBIC)
resized_t = torchvision.transforms.functional.to_tensor(one_image) * 2 - 1.0
resized.append(resized_t)
return torch.stack(resized, dim=0).to(device)
def draw_landmarks(img, landmark, color='r', step=2):
"""
Return:
img -- numpy.array, (B, H, W, 3) img with landmark, RGB order, range (0, 255)
Parameters:
img -- numpy.array, (B, H, W, 3), RGB order, range (0, 255)
landmark -- numpy.array, (B, 68, 2), y direction is opposite to v direction
color -- str, 'r' or 'b' (red or blue)
"""
if color =='r':
c = np.array([255., 0, 0])
else:
c = np.array([0, 0, 255.])
_, H, W, _ = img.shape
img, landmark = img.copy(), landmark.copy()
landmark[..., 1] = H - 1 - landmark[..., 1]
landmark = np.round(landmark).astype(np.int32)
for i in range(landmark.shape[1]):
x, y = landmark[:, i, 0], landmark[:, i, 1]
for j in range(-step, step):
for k in range(-step, step):
u = np.clip(x + j, 0, W - 1)
v = np.clip(y + k, 0, H - 1)
for m in range(landmark.shape[0]):
img[m, v[m], u[m]] = c
return img
|
{"/util/detect_lm68.py": ["/util/preprocess.py"], "/data/flist_dataset.py": ["/data/base_dataset.py", "/util/util.py", "/util/preprocess.py"], "/data_preparation.py": ["/util/detect_lm68.py", "/util/generate_list.py"], "/train.py": ["/options/train_options.py", "/util/visualizer.py", "/util/util.py"], "/models/facerecon_model.py": ["/models/bfm.py", "/models/losses.py", "/util/nvdiffrast.py", "/util/preprocess.py"], "/test.py": ["/options/test_options.py", "/util/visualizer.py", "/util/preprocess.py", "/data/flist_dataset.py"]}
|
25,103
|
matajoh/Deep3DFaceRecon_pytorch
|
refs/heads/master
|
/models/facerecon_model.py
|
"""This script defines the face reconstruction model for Deep3DFaceRecon_pytorch
"""
import numpy as np
import torch
from .base_model import BaseModel
from . import networks
from .bfm import ParametricFaceModel
from .losses import perceptual_loss, photo_loss, reg_loss, reflectance_loss, landmark_loss
from util import util
from util.nvdiffrast import MeshRenderer
from util.preprocess import estimate_norm_torch
import trimesh
from scipy.io import savemat
class FaceReconModel(BaseModel):
@staticmethod
def modify_commandline_options(parser, is_train=True):
""" Configures options specific for CUT model
"""
# net structure and parameters
parser.add_argument('--net_recon', type=str, default='resnet50', choices=['resnet18', 'resnet34', 'resnet50'], help='network structure')
parser.add_argument('--init_path', type=str, default='checkpoints/init_model/resnet50-0676ba61.pth')
parser.add_argument('--use_last_fc', type=util.str2bool, nargs='?', const=True, default=False, help='zero initialize the last fc')
parser.add_argument('--bfm_folder', type=str, default='BFM')
parser.add_argument('--bfm_model', type=str, default='BFM_model_front.mat', help='bfm model')
# renderer parameters
parser.add_argument('--focal', type=float, default=1015.)
parser.add_argument('--center', type=float, default=112.)
parser.add_argument('--camera_d', type=float, default=10.)
parser.add_argument('--z_near', type=float, default=5.)
parser.add_argument('--z_far', type=float, default=15.)
if is_train:
# training parameters
parser.add_argument('--net_recog', type=str, default='r50', choices=['r18', 'r43', 'r50'], help='face recog network structure')
parser.add_argument('--net_recog_path', type=str, default='checkpoints/recog_model/ms1mv3_arcface_r50_fp16/backbone.pth')
parser.add_argument('--use_crop_face', type=util.str2bool, nargs='?', const=True, default=False, help='use crop mask for photo loss')
parser.add_argument('--use_predef_M', type=util.str2bool, nargs='?', const=True, default=False, help='use predefined M for predicted face')
# augmentation parameters
parser.add_argument('--shift_pixs', type=float, default=10., help='shift pixels')
parser.add_argument('--scale_delta', type=float, default=0.1, help='delta scale factor')
parser.add_argument('--rot_angle', type=float, default=10., help='rot angles, degree')
# loss weights
parser.add_argument('--w_feat', type=float, default=0.2, help='weight for feat loss')
parser.add_argument('--w_color', type=float, default=1.92, help='weight for loss loss')
parser.add_argument('--w_reg', type=float, default=3.0e-4, help='weight for reg loss')
parser.add_argument('--w_id', type=float, default=1.0, help='weight for id_reg loss')
parser.add_argument('--w_exp', type=float, default=0.8, help='weight for exp_reg loss')
parser.add_argument('--w_tex', type=float, default=1.7e-2, help='weight for tex_reg loss')
parser.add_argument('--w_gamma', type=float, default=10.0, help='weight for gamma loss')
parser.add_argument('--w_lm', type=float, default=1.6e-3, help='weight for lm loss')
parser.add_argument('--w_reflc', type=float, default=5.0, help='weight for reflc loss')
opt, _ = parser.parse_known_args()
parser.set_defaults(
focal=1015., center=112., camera_d=10., use_last_fc=False, z_near=5., z_far=15.
)
if is_train:
parser.set_defaults(
use_crop_face=True, use_predef_M=False
)
return parser
def __init__(self, opt):
"""Initialize this model class.
Parameters:
opt -- training/test options
A few things can be done here.
- (required) call the initialization function of BaseModel
- define loss function, visualization images, model names, and optimizers
"""
BaseModel.__init__(self, opt) # call the initialization method of BaseModel
self.visual_names = ['output_vis']
self.model_names = ['net_recon']
self.parallel_names = self.model_names + ['renderer']
self.net_recon = networks.define_net_recon(
net_recon=opt.net_recon, use_last_fc=opt.use_last_fc, init_path=opt.init_path
)
self.facemodel = ParametricFaceModel(
bfm_folder=opt.bfm_folder, camera_distance=opt.camera_d, focal=opt.focal, center=opt.center,
is_train=self.isTrain, default_name=opt.bfm_model
)
fov = 2 * np.arctan(opt.center / opt.focal) * 180 / np.pi
self.renderer = MeshRenderer(
rasterize_fov=fov, znear=opt.z_near, zfar=opt.z_far, rasterize_size=int(2 * opt.center)
)
if self.isTrain:
self.loss_names = ['all', 'feat', 'color', 'lm', 'reg', 'gamma', 'reflc']
self.net_recog = networks.define_net_recog(
net_recog=opt.net_recog, pretrained_path=opt.net_recog_path
)
# loss func name: (compute_%s_loss) % loss_name
self.compute_feat_loss = perceptual_loss
self.comupte_color_loss = photo_loss
self.compute_lm_loss = landmark_loss
self.compute_reg_loss = reg_loss
self.compute_reflc_loss = reflectance_loss
self.optimizer = torch.optim.Adam(self.net_recon.parameters(), lr=opt.lr)
self.optimizers = [self.optimizer]
self.parallel_names += ['net_recog']
# Our program will automatically call <model.setup> to define schedulers, load networks, and print networks
def set_input(self, input):
"""Unpack input data from the dataloader and perform necessary pre-processing steps.
Parameters:
input: a dictionary that contains the data itself and its metadata information.
"""
self.input_img = input['imgs'].to(self.device)
self.atten_mask = input['msks'].to(self.device) if 'msks' in input else None
self.gt_lm = input['lms'].to(self.device) if 'lms' in input else None
self.trans_m = input['M'].to(self.device) if 'M' in input else None
self.image_paths = input['im_paths'] if 'im_paths' in input else None
def forward(self):
output_coeff = self.net_recon(self.input_img)
self.facemodel.to(self.device)
self.pred_vertex, self.pred_tex, self.pred_color, self.pred_lm = \
self.facemodel.compute_for_render(output_coeff)
self.pred_mask, _, self.pred_face = self.renderer(
self.pred_vertex, self.facemodel.face_buf, feat=self.pred_color)
self.pred_coeffs_dict = self.facemodel.split_coeff(output_coeff)
def compute_losses(self):
"""Calculate losses, gradients, and update network weights; called in every training iteration"""
assert self.net_recog.training == False
trans_m = self.trans_m
if not self.opt.use_predef_M:
trans_m = estimate_norm_torch(self.pred_lm, self.input_img.shape[-2])
pred_feat = self.net_recog(self.pred_face, trans_m)
gt_feat = self.net_recog(self.input_img, self.trans_m)
self.loss_feat = self.opt.w_feat * self.compute_feat_loss(pred_feat, gt_feat)
face_mask = self.pred_mask
if self.opt.use_crop_face:
face_mask, _, _ = self.renderer(self.pred_vertex, self.facemodel.front_face_buf)
face_mask = face_mask.detach()
self.loss_color = self.opt.w_color * self.comupte_color_loss(
self.pred_face, self.input_img, self.atten_mask * face_mask)
loss_reg, loss_gamma = self.compute_reg_loss(self.pred_coeffs_dict, self.opt)
self.loss_reg = self.opt.w_reg * loss_reg
self.loss_gamma = self.opt.w_gamma * loss_gamma
self.loss_lm = self.opt.w_lm * self.compute_lm_loss(self.pred_lm, self.gt_lm)
self.loss_reflc = self.opt.w_reflc * self.compute_reflc_loss(self.pred_tex, self.facemodel.skin_mask)
self.loss_all = self.loss_feat + self.loss_color + self.loss_reg + self.loss_gamma \
+ self.loss_lm + self.loss_reflc
def optimize_parameters(self, isTrain=True):
self.forward()
self.compute_losses()
"""Update network weights; it will be called in every training iteration."""
if isTrain:
self.optimizer.zero_grad()
self.loss_all.backward()
self.optimizer.step()
def compute_visuals(self):
with torch.no_grad():
input_img_numpy = 255. * self.input_img.detach().cpu().permute(0, 2, 3, 1).numpy()
output_vis = self.pred_face * self.pred_mask + (1 - self.pred_mask) * self.input_img
output_vis_numpy_raw = 255. * output_vis.detach().cpu().permute(0, 2, 3, 1).numpy()
if self.gt_lm is not None:
gt_lm_numpy = self.gt_lm.cpu().numpy()
pred_lm_numpy = self.pred_lm.detach().cpu().numpy()
output_vis_numpy = util.draw_landmarks(output_vis_numpy_raw, gt_lm_numpy, 'b')
output_vis_numpy = util.draw_landmarks(output_vis_numpy, pred_lm_numpy, 'r')
output_vis_numpy = np.concatenate((input_img_numpy,
output_vis_numpy_raw, output_vis_numpy), axis=-2)
else:
output_vis_numpy = np.concatenate((input_img_numpy,
output_vis_numpy_raw), axis=-2)
self.output_vis = torch.tensor(
output_vis_numpy / 255., dtype=torch.float32
).permute(0, 3, 1, 2).to(self.device)
def save_mesh(self, name):
recon_shape = self.pred_vertex # get reconstructed shape
recon_shape[..., -1] = 10 - recon_shape[..., -1] # from camera space to world space
recon_shape = recon_shape.cpu().numpy()[0]
recon_color = self.pred_color
recon_color = recon_color.cpu().numpy()[0]
tri = self.facemodel.face_buf.cpu().numpy()
mesh = trimesh.Trimesh(vertices=recon_shape, faces=tri, vertex_colors=np.clip(255. * recon_color, 0, 255).astype(np.uint8))
mesh.export(name)
def save_coeff(self,name):
pred_coeffs = {key:self.pred_coeffs_dict[key].cpu().numpy() for key in self.pred_coeffs_dict}
pred_lm = self.pred_lm.cpu().numpy()
pred_lm = np.stack([pred_lm[:,:,0],self.input_img.shape[2]-1-pred_lm[:,:,1]],axis=2) # transfer to image coordinate
pred_coeffs['lm68'] = pred_lm
savemat(name,pred_coeffs)
|
{"/util/detect_lm68.py": ["/util/preprocess.py"], "/data/flist_dataset.py": ["/data/base_dataset.py", "/util/util.py", "/util/preprocess.py"], "/data_preparation.py": ["/util/detect_lm68.py", "/util/generate_list.py"], "/train.py": ["/options/train_options.py", "/util/visualizer.py", "/util/util.py"], "/models/facerecon_model.py": ["/models/bfm.py", "/models/losses.py", "/util/nvdiffrast.py", "/util/preprocess.py"], "/test.py": ["/options/test_options.py", "/util/visualizer.py", "/util/preprocess.py", "/data/flist_dataset.py"]}
|
25,104
|
matajoh/Deep3DFaceRecon_pytorch
|
refs/heads/master
|
/models/losses.py
|
import numpy as np
import torch
import torch.nn as nn
from kornia.geometry import warp_affine
import torch.nn.functional as F
def resize_n_crop(image, M, dsize=112):
# image: (b, c, h, w)
# M : (b, 2, 3)
return warp_affine(image, M, dsize=(dsize, dsize))
### perceptual level loss
class PerceptualLoss(nn.Module):
def __init__(self, recog_net, input_size=112):
super(PerceptualLoss, self).__init__()
self.recog_net = recog_net
self.preprocess = lambda x: 2 * x - 1
self.input_size=input_size
def forward(imageA, imageB, M):
"""
1 - cosine distance
Parameters:
imageA --torch.tensor (B, 3, H, W), range (0, 1) , RGB order
imageB --same as imageA
"""
imageA = self.preprocess(resize_n_crop(imageA, M, self.input_size))
imageB = self.preprocess(resize_n_crop(imageB, M, self.input_size))
# freeze bn
self.recog_net.eval()
id_featureA = F.normalize(self.recog_net(imageA), dim=-1, p=2)
id_featureB = F.normalize(self.recog_net(imageB), dim=-1, p=2)
cosine_d = torch.sum(id_featureA * id_featureB, dim=-1)
# assert torch.sum((cosine_d > 1).float()) == 0
return torch.sum(1 - cosine_d) / cosine_d.shape[0]
def perceptual_loss(id_featureA, id_featureB):
cosine_d = torch.sum(id_featureA * id_featureB, dim=-1)
# assert torch.sum((cosine_d > 1).float()) == 0
return torch.sum(1 - cosine_d) / cosine_d.shape[0]
### image level loss
def photo_loss(imageA, imageB, mask, eps=1e-6):
"""
l2 norm (with sqrt, to ensure backward stabililty, use eps, otherwise Nan may occur)
Parameters:
imageA --torch.tensor (B, 3, H, W), range (0, 1), RGB order
imageB --same as imageA
"""
loss = torch.sqrt(eps + torch.sum((imageA - imageB) ** 2, dim=1, keepdims=True)) * mask
loss = torch.sum(loss) / torch.max(torch.sum(mask), torch.tensor(1.0).to(mask.device))
return loss
def landmark_loss(predict_lm, gt_lm, weight=None):
"""
weighted mse loss
Parameters:
predict_lm --torch.tensor (B, 68, 2)
gt_lm --torch.tensor (B, 68, 2)
weight --numpy.array (1, 68)
"""
if not weight:
weight = np.ones([68])
weight[28:31] = 20
weight[-8:] = 20
weight = np.expand_dims(weight, 0)
weight = torch.tensor(weight).to(predict_lm.device)
loss = torch.sum((predict_lm - gt_lm)**2, dim=-1) * weight
loss = torch.sum(loss) / (predict_lm.shape[0] * predict_lm.shape[1])
return loss
### regulization
def reg_loss(coeffs_dict, opt=None):
"""
l2 norm without the sqrt, from yu's implementation (mse)
tf.nn.l2_loss https://www.tensorflow.org/api_docs/python/tf/nn/l2_loss
Parameters:
coeffs_dict -- a dict of torch.tensors , keys: id, exp, tex, angle, gamma, trans
"""
# coefficient regularization to ensure plausible 3d faces
if opt:
w_id, w_exp, w_tex = opt.w_id, opt.w_exp, opt.w_tex
else:
w_id, w_exp, w_tex = 1, 1, 1, 1
creg_loss = w_id * torch.sum(coeffs_dict['id'] ** 2) + \
w_exp * torch.sum(coeffs_dict['exp'] ** 2) + \
w_tex * torch.sum(coeffs_dict['tex'] ** 2)
creg_loss = creg_loss / coeffs_dict['id'].shape[0]
# gamma regularization to ensure a nearly-monochromatic light
gamma = coeffs_dict['gamma'].reshape([-1, 3, 9])
gamma_mean = torch.mean(gamma, dim=1, keepdims=True)
gamma_loss = torch.mean((gamma - gamma_mean) ** 2)
return creg_loss, gamma_loss
def reflectance_loss(texture, mask):
"""
minimize texture variance (mse), albedo regularization to ensure an uniform skin albedo
Parameters:
texture --torch.tensor, (B, N, 3)
mask --torch.tensor, (N), 1 or 0
"""
mask = mask.reshape([1, mask.shape[0], 1])
texture_mean = torch.sum(mask * texture, dim=1, keepdims=True) / torch.sum(mask)
loss = torch.sum(((texture - texture_mean) * mask)**2) / (texture.shape[0] * torch.sum(mask))
return loss
|
{"/util/detect_lm68.py": ["/util/preprocess.py"], "/data/flist_dataset.py": ["/data/base_dataset.py", "/util/util.py", "/util/preprocess.py"], "/data_preparation.py": ["/util/detect_lm68.py", "/util/generate_list.py"], "/train.py": ["/options/train_options.py", "/util/visualizer.py", "/util/util.py"], "/models/facerecon_model.py": ["/models/bfm.py", "/models/losses.py", "/util/nvdiffrast.py", "/util/preprocess.py"], "/test.py": ["/options/test_options.py", "/util/visualizer.py", "/util/preprocess.py", "/data/flist_dataset.py"]}
|
25,105
|
matajoh/Deep3DFaceRecon_pytorch
|
refs/heads/master
|
/util/generate_list.py
|
"""This script is to generate training list files for Deep3DFaceRecon_pytorch
"""
import os
# save path to training data
def write_list(lms_list, imgs_list, msks_list, mode='train',save_folder='datalist', save_name=''):
save_path = os.path.join(save_folder, mode)
if not os.path.isdir(save_path):
os.makedirs(save_path)
with open(os.path.join(save_path, save_name + 'landmarks.txt'), 'w') as fd:
fd.writelines([i + '\n' for i in lms_list])
with open(os.path.join(save_path, save_name + 'images.txt'), 'w') as fd:
fd.writelines([i + '\n' for i in imgs_list])
with open(os.path.join(save_path, save_name + 'masks.txt'), 'w') as fd:
fd.writelines([i + '\n' for i in msks_list])
# check if the path is valid
def check_list(rlms_list, rimgs_list, rmsks_list):
lms_list, imgs_list, msks_list = [], [], []
for i in range(len(rlms_list)):
flag = 'false'
lm_path = rlms_list[i]
im_path = rimgs_list[i]
msk_path = rmsks_list[i]
if os.path.isfile(lm_path) and os.path.isfile(im_path) and os.path.isfile(msk_path):
flag = 'true'
lms_list.append(rlms_list[i])
imgs_list.append(rimgs_list[i])
msks_list.append(rmsks_list[i])
print(i, rlms_list[i], flag)
return lms_list, imgs_list, msks_list
|
{"/util/detect_lm68.py": ["/util/preprocess.py"], "/data/flist_dataset.py": ["/data/base_dataset.py", "/util/util.py", "/util/preprocess.py"], "/data_preparation.py": ["/util/detect_lm68.py", "/util/generate_list.py"], "/train.py": ["/options/train_options.py", "/util/visualizer.py", "/util/util.py"], "/models/facerecon_model.py": ["/models/bfm.py", "/models/losses.py", "/util/nvdiffrast.py", "/util/preprocess.py"], "/test.py": ["/options/test_options.py", "/util/visualizer.py", "/util/preprocess.py", "/data/flist_dataset.py"]}
|
25,106
|
matajoh/Deep3DFaceRecon_pytorch
|
refs/heads/master
|
/util/preprocess.py
|
"""This script contains the image preprocessing code for Deep3DFaceRecon_pytorch
"""
import numpy as np
from scipy.io import loadmat
from PIL import Image
import cv2
import os
from skimage import transform as trans
import torch
import warnings
warnings.filterwarnings("ignore", category=np.VisibleDeprecationWarning)
warnings.filterwarnings("ignore", category=FutureWarning)
# calculating least square problem for image alignment
def POS(xp, x):
npts = xp.shape[1]
A = np.zeros([2*npts, 8])
A[0:2*npts-1:2, 0:3] = x.transpose()
A[0:2*npts-1:2, 3] = 1
A[1:2*npts:2, 4:7] = x.transpose()
A[1:2*npts:2, 7] = 1
b = np.reshape(xp.transpose(), [2*npts, 1])
k, _, _, _ = np.linalg.lstsq(A, b)
R1 = k[0:3]
R2 = k[4:7]
sTx = k[3]
sTy = k[7]
s = (np.linalg.norm(R1) + np.linalg.norm(R2))/2
t = np.stack([sTx, sTy], axis=0)
return t, s
# bounding box for 68 landmark detection
def BBRegression(points, params):
w1 = params['W1']
b1 = params['B1']
w2 = params['W2']
b2 = params['B2']
data = points.copy()
data = data.reshape([5, 2])
data_mean = np.mean(data, axis=0)
x_mean = data_mean[0]
y_mean = data_mean[1]
data[:, 0] = data[:, 0] - x_mean
data[:, 1] = data[:, 1] - y_mean
rms = np.sqrt(np.sum(data ** 2)/5)
data = data / rms
data = data.reshape([1, 10])
data = np.transpose(data)
inputs = np.matmul(w1, data) + b1
inputs = 2 / (1 + np.exp(-2 * inputs)) - 1
inputs = np.matmul(w2, inputs) + b2
inputs = np.transpose(inputs)
x = inputs[:, 0] * rms + x_mean
y = inputs[:, 1] * rms + y_mean
w = 224/inputs[:, 2] * rms
rects = [x, y, w, w]
return np.array(rects).reshape([4])
# utils for landmark detection
def img_padding(img, box):
success = True
bbox = box.copy()
res = np.zeros([2*img.shape[0], 2*img.shape[1], 3])
res[img.shape[0] // 2: img.shape[0] + img.shape[0] //
2, img.shape[1] // 2: img.shape[1] + img.shape[1]//2] = img
bbox[0] = bbox[0] + img.shape[1] // 2
bbox[1] = bbox[1] + img.shape[0] // 2
if bbox[0] < 0 or bbox[1] < 0:
success = False
return res, bbox, success
# utils for landmark detection
def crop(img, bbox):
padded_img, padded_bbox, flag = img_padding(img, bbox)
if flag:
crop_img = padded_img[padded_bbox[1]: padded_bbox[1] +
padded_bbox[3], padded_bbox[0]: padded_bbox[0] + padded_bbox[2]]
crop_img = cv2.resize(crop_img.astype(np.uint8),
(224, 224), interpolation=cv2.INTER_CUBIC)
scale = 224 / padded_bbox[3]
return crop_img, scale
else:
return padded_img, 0
# utils for landmark detection
def scale_trans(img, lm, t, s):
imgw = img.shape[1]
imgh = img.shape[0]
M_s = np.array([[1, 0, -t[0] + imgw//2 + 0.5], [0, 1, -imgh//2 + t[1]]],
dtype=np.float32)
img = cv2.warpAffine(img, M_s, (imgw, imgh))
w = int(imgw / s * 100)
h = int(imgh / s * 100)
img = cv2.resize(img, (w, h))
lm = np.stack([lm[:, 0] - t[0] + imgw // 2, lm[:, 1] -
t[1] + imgh // 2], axis=1) / s * 100
left = w//2 - 112
up = h//2 - 112
bbox = [left, up, 224, 224]
cropped_img, scale2 = crop(img, bbox)
assert(scale2!=0)
t1 = np.array([bbox[0], bbox[1]])
# back to raw img s * crop + s * t1 + t2
t1 = np.array([w//2 - 112, h//2 - 112])
scale = s / 100
t2 = np.array([t[0] - imgw/2, t[1] - imgh / 2])
inv = (scale/scale2, scale * t1 + t2.reshape([2]))
return cropped_img, inv
# utils for landmark detection
def align_for_lm(img, five_points):
five_points = np.array(five_points).reshape([1, 10])
params = loadmat('util/BBRegressorParam_r.mat')
bbox = BBRegression(five_points, params)
assert(bbox[2] != 0)
bbox = np.round(bbox).astype(np.int32)
crop_img, scale = crop(img, bbox)
return crop_img, scale, bbox
# resize and crop images for face reconstruction
def resize_n_crop_img(img, lm, t, s, target_size=224., mask=None):
w0, h0 = img.size
w = (w0*s).astype(np.int32)
h = (h0*s).astype(np.int32)
left = (w/2 - target_size/2 + float((t[0] - w0/2)*s)).astype(np.int32)
right = left + target_size
up = (h/2 - target_size/2 + float((h0/2 - t[1])*s)).astype(np.int32)
below = up + target_size
img = img.resize((w, h), resample=Image.BICUBIC)
img = img.crop((left, up, right, below))
if mask is not None:
mask = mask.resize((w, h), resample=Image.BICUBIC)
mask = mask.crop((left, up, right, below))
lm = np.stack([lm[:, 0] - t[0] + w0/2, lm[:, 1] -
t[1] + h0/2], axis=1)*s
lm = lm - np.reshape(
np.array([(w/2 - target_size/2), (h/2-target_size/2)]), [1, 2])
return img, lm, mask
# utils for face reconstruction
def extract_5p(lm):
lm_idx = np.array([31, 37, 40, 43, 46, 49, 55]) - 1
lm5p = np.stack([lm[lm_idx[0], :], np.mean(lm[lm_idx[[1, 2]], :], 0), np.mean(
lm[lm_idx[[3, 4]], :], 0), lm[lm_idx[5], :], lm[lm_idx[6], :]], axis=0)
lm5p = lm5p[[1, 2, 0, 3, 4], :]
return lm5p
# utils for face reconstruction
def align_img(img, lm, lm3D, mask=None, target_size=224., rescale_factor=102.):
"""
Return:
transparams --numpy.array (raw_W, raw_H, scale, tx, ty)
img_new --PIL.Image (target_size, target_size, 3)
lm_new --numpy.array (68, 2), y direction is opposite to v direction
mask_new --PIL.Image (target_size, target_size)
Parameters:
img --PIL.Image (raw_H, raw_W, 3)
lm --numpy.array (68, 2), y direction is opposite to v direction
lm3D --numpy.array (5, 3)
mask --PIL.Image (raw_H, raw_W, 3)
"""
w0, h0 = img.size
if lm.shape[0] != 5:
lm5p = extract_5p(lm)
else:
lm5p = lm
# calculate translation and scale factors using 5 facial landmarks and standard landmarks of a 3D face
t, s = POS(lm5p.transpose(), lm3D.transpose())
s = rescale_factor/s
# processing the image
img_new, lm_new, mask_new = resize_n_crop_img(img, lm, t, s, target_size=target_size, mask=mask)
trans_params = np.array([w0, h0, s, t[0], t[1]])
return trans_params, img_new, lm_new, mask_new
# utils for face recognition model
def estimate_norm(lm_68p, H):
# from https://github.com/deepinsight/insightface/blob/c61d3cd208a603dfa4a338bd743b320ce3e94730/recognition/common/face_align.py#L68
"""
Return:
trans_m --numpy.array (2, 3)
Parameters:
lm --numpy.array (68, 2), y direction is opposite to v direction
H --int/float , image height
"""
lm = extract_5p(lm_68p)
lm[:, -1] = H - 1 - lm[:, -1]
tform = trans.SimilarityTransform()
src = np.array(
[[38.2946, 51.6963], [73.5318, 51.5014], [56.0252, 71.7366],
[41.5493, 92.3655], [70.7299, 92.2041]],
dtype=np.float32)
tform.estimate(lm, src)
M = tform.params
if np.linalg.det(M) == 0:
M = np.eye(3)
return M[0:2, :]
def estimate_norm_torch(lm_68p, H):
lm_68p_ = lm_68p.detach().cpu().numpy()
M = []
for i in range(lm_68p_.shape[0]):
M.append(estimate_norm(lm_68p_[i], H))
M = torch.tensor(np.array(M), dtype=torch.float32).to(lm_68p.device)
return M
|
{"/util/detect_lm68.py": ["/util/preprocess.py"], "/data/flist_dataset.py": ["/data/base_dataset.py", "/util/util.py", "/util/preprocess.py"], "/data_preparation.py": ["/util/detect_lm68.py", "/util/generate_list.py"], "/train.py": ["/options/train_options.py", "/util/visualizer.py", "/util/util.py"], "/models/facerecon_model.py": ["/models/bfm.py", "/models/losses.py", "/util/nvdiffrast.py", "/util/preprocess.py"], "/test.py": ["/options/test_options.py", "/util/visualizer.py", "/util/preprocess.py", "/data/flist_dataset.py"]}
|
25,107
|
matajoh/Deep3DFaceRecon_pytorch
|
refs/heads/master
|
/test.py
|
"""This script is the test script for Deep3DFaceRecon_pytorch
"""
import os
from options.test_options import TestOptions
from data import create_dataset
from models import create_model
from util.visualizer import MyVisualizer
from util.preprocess import align_img
from PIL import Image
import numpy as np
from util.load_mats import load_lm3d
import torch
from data.flist_dataset import default_flist_reader
from scipy.io import loadmat, savemat
def get_data_path(root='examples'):
im_path = [os.path.join(root, i) for i in sorted(os.listdir(root)) if i.endswith('png') or i.endswith('jpg')]
lm_path = [i.replace('png', 'txt').replace('jpg', 'txt') for i in im_path]
lm_path = [os.path.join(i.replace(i.split(os.path.sep)[-1],''),'detections',i.split(os.path.sep)[-1]) for i in lm_path]
return im_path, lm_path
def read_data(im_path, lm_path, lm3d_std, to_tensor=True):
# to RGB
im = Image.open(im_path).convert('RGB')
W,H = im.size
lm = np.loadtxt(lm_path).astype(np.float32)
lm = lm.reshape([-1, 2])
lm[:, -1] = H - 1 - lm[:, -1]
_, im, lm, _ = align_img(im, lm, lm3d_std)
if to_tensor:
im = torch.tensor(np.array(im)/255., dtype=torch.float32).permute(2, 0, 1).unsqueeze(0)
lm = torch.tensor(lm).unsqueeze(0)
return im, lm
def main(rank, opt, name='examples'):
device = torch.device(rank)
torch.cuda.set_device(device)
model = create_model(opt)
model.setup(opt)
model.device = device
model.parallelize()
model.eval()
visualizer = MyVisualizer(opt)
im_path, lm_path = get_data_path(name)
lm3d_std = load_lm3d(opt.bfm_folder)
for i in range(len(im_path)):
print(i, im_path[i])
img_name = im_path[i].split(os.path.sep)[-1].replace('.png','').replace('.jpg','')
if not os.path.isfile(lm_path[i]):
continue
im_tensor, lm_tensor = read_data(im_path[i], lm_path[i], lm3d_std)
data = {
'imgs': im_tensor,
'lms': lm_tensor
}
model.set_input(data) # unpack data from data loader
model.test() # run inference
visuals = model.get_current_visuals() # get image results
visualizer.display_current_results(visuals, 0, opt.epoch, dataset=name.split(os.path.sep)[-1],
save_results=True, count=i, name=img_name, add_image=False)
model.save_mesh(os.path.join(visualizer.img_dir, name.split(os.path.sep)[-1], 'epoch_%s_%06d'%(opt.epoch, 0),img_name+'.obj')) # save reconstruction meshes
model.save_coeff(os.path.join(visualizer.img_dir, name.split(os.path.sep)[-1], 'epoch_%s_%06d'%(opt.epoch, 0),img_name+'.mat')) # save predicted coefficients
if __name__ == '__main__':
opt = TestOptions().parse() # get test options
main(0, opt,opt.img_folder)
|
{"/util/detect_lm68.py": ["/util/preprocess.py"], "/data/flist_dataset.py": ["/data/base_dataset.py", "/util/util.py", "/util/preprocess.py"], "/data_preparation.py": ["/util/detect_lm68.py", "/util/generate_list.py"], "/train.py": ["/options/train_options.py", "/util/visualizer.py", "/util/util.py"], "/models/facerecon_model.py": ["/models/bfm.py", "/models/losses.py", "/util/nvdiffrast.py", "/util/preprocess.py"], "/test.py": ["/options/test_options.py", "/util/visualizer.py", "/util/preprocess.py", "/data/flist_dataset.py"]}
|
25,108
|
matajoh/Deep3DFaceRecon_pytorch
|
refs/heads/master
|
/data/base_dataset.py
|
"""This module implements an abstract base class (ABC) 'BaseDataset' for datasets.
It also includes common transformation functions (e.g., get_transform, __scale_width), which can be later used in subclasses.
"""
import random
import numpy as np
import torch.utils.data as data
from PIL import Image
import torchvision.transforms as transforms
from abc import ABC, abstractmethod
class BaseDataset(data.Dataset, ABC):
"""This class is an abstract base class (ABC) for datasets.
To create a subclass, you need to implement the following four functions:
-- <__init__>: initialize the class, first call BaseDataset.__init__(self, opt).
-- <__len__>: return the size of dataset.
-- <__getitem__>: get a data point.
-- <modify_commandline_options>: (optionally) add dataset-specific options and set default options.
"""
def __init__(self, opt):
"""Initialize the class; save the options in the class
Parameters:
opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
"""
self.opt = opt
# self.root = opt.dataroot
self.current_epoch = 0
@staticmethod
def modify_commandline_options(parser, is_train):
"""Add new dataset-specific options, and rewrite default values for existing options.
Parameters:
parser -- original option parser
is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
Returns:
the modified parser.
"""
return parser
@abstractmethod
def __len__(self):
"""Return the total number of images in the dataset."""
return 0
@abstractmethod
def __getitem__(self, index):
"""Return a data point and its metadata information.
Parameters:
index - - a random integer for data indexing
Returns:
a dictionary of data with their names. It ususally contains the data itself and its metadata information.
"""
pass
def get_transform(grayscale=False):
transform_list = []
if grayscale:
transform_list.append(transforms.Grayscale(1))
transform_list += [transforms.ToTensor()]
return transforms.Compose(transform_list)
def get_affine_mat(opt, size):
shift_x, shift_y, scale, rot_angle, flip = 0., 0., 1., 0., False
w, h = size
if 'shift' in opt.preprocess:
shift_pixs = int(opt.shift_pixs)
shift_x = random.randint(-shift_pixs, shift_pixs)
shift_y = random.randint(-shift_pixs, shift_pixs)
if 'scale' in opt.preprocess:
scale = 1 + opt.scale_delta * (2 * random.random() - 1)
if 'rot' in opt.preprocess:
rot_angle = opt.rot_angle * (2 * random.random() - 1)
rot_rad = -rot_angle * np.pi/180
if 'flip' in opt.preprocess:
flip = random.random() > 0.5
shift_to_origin = np.array([1, 0, -w//2, 0, 1, -h//2, 0, 0, 1]).reshape([3, 3])
flip_mat = np.array([-1 if flip else 1, 0, 0, 0, 1, 0, 0, 0, 1]).reshape([3, 3])
shift_mat = np.array([1, 0, shift_x, 0, 1, shift_y, 0, 0, 1]).reshape([3, 3])
rot_mat = np.array([np.cos(rot_rad), np.sin(rot_rad), 0, -np.sin(rot_rad), np.cos(rot_rad), 0, 0, 0, 1]).reshape([3, 3])
scale_mat = np.array([scale, 0, 0, 0, scale, 0, 0, 0, 1]).reshape([3, 3])
shift_to_center = np.array([1, 0, w//2, 0, 1, h//2, 0, 0, 1]).reshape([3, 3])
affine = shift_to_center @ scale_mat @ rot_mat @ shift_mat @ flip_mat @ shift_to_origin
affine_inv = np.linalg.inv(affine)
return affine, affine_inv, flip
def apply_img_affine(img, affine_inv, method=Image.BICUBIC):
return img.transform(img.size, Image.AFFINE, data=affine_inv.flatten()[:6], resample=Image.BICUBIC)
def apply_lm_affine(landmark, affine, flip, size):
_, h = size
lm = landmark.copy()
lm[:, 1] = h - 1 - lm[:, 1]
lm = np.concatenate((lm, np.ones([lm.shape[0], 1])), -1)
lm = lm @ np.transpose(affine)
lm[:, :2] = lm[:, :2] / lm[:, 2:]
lm = lm[:, :2]
lm[:, 1] = h - 1 - lm[:, 1]
if flip:
lm_ = lm.copy()
lm_[:17] = lm[16::-1]
lm_[17:22] = lm[26:21:-1]
lm_[22:27] = lm[21:16:-1]
lm_[31:36] = lm[35:30:-1]
lm_[36:40] = lm[45:41:-1]
lm_[40:42] = lm[47:45:-1]
lm_[42:46] = lm[39:35:-1]
lm_[46:48] = lm[41:39:-1]
lm_[48:55] = lm[54:47:-1]
lm_[55:60] = lm[59:54:-1]
lm_[60:65] = lm[64:59:-1]
lm_[65:68] = lm[67:64:-1]
lm = lm_
return lm
|
{"/util/detect_lm68.py": ["/util/preprocess.py"], "/data/flist_dataset.py": ["/data/base_dataset.py", "/util/util.py", "/util/preprocess.py"], "/data_preparation.py": ["/util/detect_lm68.py", "/util/generate_list.py"], "/train.py": ["/options/train_options.py", "/util/visualizer.py", "/util/util.py"], "/models/facerecon_model.py": ["/models/bfm.py", "/models/losses.py", "/util/nvdiffrast.py", "/util/preprocess.py"], "/test.py": ["/options/test_options.py", "/util/visualizer.py", "/util/preprocess.py", "/data/flist_dataset.py"]}
|
25,109
|
matajoh/Deep3DFaceRecon_pytorch
|
refs/heads/master
|
/models/bfm.py
|
"""This script defines the parametric 3d face model for Deep3DFaceRecon_pytorch
"""
import numpy as np
import torch
import torch.nn.functional as F
from scipy.io import loadmat
from util.load_mats import transferBFM09
import os
def perspective_projection(focal, center):
# return p.T (N, 3) @ (3, 3)
return np.array([
focal, 0, center,
0, focal, center,
0, 0, 1
]).reshape([3, 3]).astype(np.float32).transpose()
class SH:
def __init__(self):
self.a = [np.pi, 2 * np.pi / np.sqrt(3.), 2 * np.pi / np.sqrt(8.)]
self.c = [1/np.sqrt(4 * np.pi), np.sqrt(3.) / np.sqrt(4 * np.pi), 3 * np.sqrt(5.) / np.sqrt(12 * np.pi)]
class ParametricFaceModel:
def __init__(self,
bfm_folder='./BFM',
recenter=True,
camera_distance=10.,
init_lit=np.array([
0.8, 0, 0, 0, 0, 0, 0, 0, 0
]),
focal=1015.,
center=112.,
is_train=True,
default_name='BFM_model_front.mat'):
if not os.path.isfile(os.path.join(bfm_folder, default_name)):
transferBFM09(bfm_folder)
model = loadmat(os.path.join(bfm_folder, default_name))
# mean face shape. [3*N,1]
self.mean_shape = model['meanshape'].astype(np.float32)
# identity basis. [3*N,80]
self.id_base = model['idBase'].astype(np.float32)
# expression basis. [3*N,64]
self.exp_base = model['exBase'].astype(np.float32)
# mean face texture. [3*N,1] (0-255)
self.mean_tex = model['meantex'].astype(np.float32)
# texture basis. [3*N,80]
self.tex_base = model['texBase'].astype(np.float32)
# face indices for each vertex that lies in. starts from 0. [N,8]
self.point_buf = model['point_buf'].astype(np.int64) - 1
# vertex indices for each face. starts from 0. [F,3]
self.face_buf = model['tri'].astype(np.int64) - 1
# vertex indices for 68 landmarks. starts from 0. [68,1]
self.keypoints = np.squeeze(model['keypoints']).astype(np.int64) - 1
if is_train:
# vertex indices for small face region to compute photometric error. starts from 0.
self.front_mask = np.squeeze(model['frontmask2_idx']).astype(np.int64) - 1
# vertex indices for each face from small face region. starts from 0. [f,3]
self.front_face_buf = model['tri_mask2'].astype(np.int64) - 1
# vertex indices for pre-defined skin region to compute reflectance loss
self.skin_mask = np.squeeze(model['skinmask'])
if recenter:
mean_shape = self.mean_shape.reshape([-1, 3])
mean_shape = mean_shape - np.mean(mean_shape, axis=0, keepdims=True)
self.mean_shape = mean_shape.reshape([-1, 1])
self.persc_proj = perspective_projection(focal, center)
self.device = 'cpu'
self.camera_distance = camera_distance
self.SH = SH()
self.init_lit = init_lit.reshape([1, 1, -1]).astype(np.float32)
def to(self, device):
self.device = device
for key, value in self.__dict__.items():
if type(value).__module__ == np.__name__:
setattr(self, key, torch.tensor(value).to(device))
def compute_shape(self, id_coeff, exp_coeff):
"""
Return:
face_shape -- torch.tensor, size (B, N, 3)
Parameters:
id_coeff -- torch.tensor, size (B, 80), identity coeffs
exp_coeff -- torch.tensor, size (B, 64), expression coeffs
"""
batch_size = id_coeff.shape[0]
id_part = torch.einsum('ij,aj->ai', self.id_base, id_coeff)
exp_part = torch.einsum('ij,aj->ai', self.exp_base, exp_coeff)
face_shape = id_part + exp_part + self.mean_shape.reshape([1, -1])
return face_shape.reshape([batch_size, -1, 3])
def compute_texture(self, tex_coeff, normalize=True):
"""
Return:
face_texture -- torch.tensor, size (B, N, 3), in RGB order, range (0, 1.)
Parameters:
tex_coeff -- torch.tensor, size (B, 80)
"""
batch_size = tex_coeff.shape[0]
face_texture = torch.einsum('ij,aj->ai', self.tex_base, tex_coeff) + self.mean_tex
if normalize:
face_texture = face_texture / 255.
return face_texture.reshape([batch_size, -1, 3])
def compute_norm(self, face_shape):
"""
Return:
vertex_norm -- torch.tensor, size (B, N, 3)
Parameters:
face_shape -- torch.tensor, size (B, N, 3)
"""
v1 = face_shape[:, self.face_buf[:, 0]]
v2 = face_shape[:, self.face_buf[:, 1]]
v3 = face_shape[:, self.face_buf[:, 2]]
e1 = v1 - v2
e2 = v2 - v3
face_norm = torch.cross(e1, e2, dim=-1)
face_norm = F.normalize(face_norm, dim=-1, p=2)
face_norm = torch.cat([face_norm, torch.zeros(face_norm.shape[0], 1, 3).to(self.device)], dim=1)
vertex_norm = torch.sum(face_norm[:, self.point_buf], dim=2)
vertex_norm = F.normalize(vertex_norm, dim=-1, p=2)
return vertex_norm
def compute_color(self, face_texture, face_norm, gamma):
"""
Return:
face_color -- torch.tensor, size (B, N, 3), range (0, 1.)
Parameters:
face_texture -- torch.tensor, size (B, N, 3), from texture model, range (0, 1.)
face_norm -- torch.tensor, size (B, N, 3), rotated face normal
gamma -- torch.tensor, size (B, 27), SH coeffs
"""
batch_size = gamma.shape[0]
v_num = face_texture.shape[1]
a, c = self.SH.a, self.SH.c
gamma = gamma.reshape([batch_size, 3, 9])
gamma = gamma + self.init_lit
gamma = gamma.permute(0, 2, 1)
Y = torch.cat([
a[0] * c[0] * torch.ones_like(face_norm[..., :1]).to(self.device),
-a[1] * c[1] * face_norm[..., 1:2],
a[1] * c[1] * face_norm[..., 2:],
-a[1] * c[1] * face_norm[..., :1],
a[2] * c[2] * face_norm[..., :1] * face_norm[..., 1:2],
-a[2] * c[2] * face_norm[..., 1:2] * face_norm[..., 2:],
0.5 * a[2] * c[2] / np.sqrt(3.) * (3 * face_norm[..., 2:] ** 2 - 1),
-a[2] * c[2] * face_norm[..., :1] * face_norm[..., 2:],
0.5 * a[2] * c[2] * (face_norm[..., :1] ** 2 - face_norm[..., 1:2] ** 2)
], dim=-1)
r = Y @ gamma[..., :1]
g = Y @ gamma[..., 1:2]
b = Y @ gamma[..., 2:]
face_color = torch.cat([r, g, b], dim=-1) * face_texture
return face_color
def compute_rotation(self, angles):
"""
Return:
rot -- torch.tensor, size (B, 3, 3) pts @ trans_mat
Parameters:
angles -- torch.tensor, size (B, 3), radian
"""
batch_size = angles.shape[0]
ones = torch.ones([batch_size, 1]).to(self.device)
zeros = torch.zeros([batch_size, 1]).to(self.device)
x, y, z = angles[:, :1], angles[:, 1:2], angles[:, 2:],
rot_x = torch.cat([
ones, zeros, zeros,
zeros, torch.cos(x), -torch.sin(x),
zeros, torch.sin(x), torch.cos(x)
], dim=1).reshape([batch_size, 3, 3])
rot_y = torch.cat([
torch.cos(y), zeros, torch.sin(y),
zeros, ones, zeros,
-torch.sin(y), zeros, torch.cos(y)
], dim=1).reshape([batch_size, 3, 3])
rot_z = torch.cat([
torch.cos(z), -torch.sin(z), zeros,
torch.sin(z), torch.cos(z), zeros,
zeros, zeros, ones
], dim=1).reshape([batch_size, 3, 3])
rot = rot_z @ rot_y @ rot_x
return rot.permute(0, 2, 1)
def to_camera(self, face_shape):
face_shape[..., -1] = self.camera_distance - face_shape[..., -1]
return face_shape
def to_image(self, face_shape):
"""
Return:
face_proj -- torch.tensor, size (B, N, 2), y direction is opposite to v direction
Parameters:
face_shape -- torch.tensor, size (B, N, 3)
"""
# to image_plane
face_proj = face_shape @ self.persc_proj
face_proj = face_proj[..., :2] / face_proj[..., 2:]
return face_proj
def transform(self, face_shape, rot, trans):
"""
Return:
face_shape -- torch.tensor, size (B, N, 3) pts @ rot + trans
Parameters:
face_shape -- torch.tensor, size (B, N, 3)
rot -- torch.tensor, size (B, 3, 3)
trans -- torch.tensor, size (B, 3)
"""
return face_shape @ rot + trans.unsqueeze(1)
def get_landmarks(self, face_proj):
"""
Return:
face_lms -- torch.tensor, size (B, 68, 2)
Parameters:
face_proj -- torch.tensor, size (B, N, 2)
"""
return face_proj[:, self.keypoints]
def split_coeff(self, coeffs):
"""
Return:
coeffs_dict -- a dict of torch.tensors
Parameters:
coeffs -- torch.tensor, size (B, 256)
"""
id_coeffs = coeffs[:, :80]
exp_coeffs = coeffs[:, 80: 144]
tex_coeffs = coeffs[:, 144: 224]
angles = coeffs[:, 224: 227]
gammas = coeffs[:, 227: 254]
translations = coeffs[:, 254:]
return {
'id': id_coeffs,
'exp': exp_coeffs,
'tex': tex_coeffs,
'angle': angles,
'gamma': gammas,
'trans': translations
}
def compute_for_render(self, coeffs):
"""
Return:
face_vertex -- torch.tensor, size (B, N, 3), in camera coordinate
face_color -- torch.tensor, size (B, N, 3), in RGB order
landmark -- torch.tensor, size (B, 68, 2), y direction is opposite to v direction
Parameters:
coeffs -- torch.tensor, size (B, 257)
"""
coef_dict = self.split_coeff(coeffs)
face_shape = self.compute_shape(coef_dict['id'], coef_dict['exp'])
rotation = self.compute_rotation(coef_dict['angle'])
face_shape_transformed = self.transform(face_shape, rotation, coef_dict['trans'])
face_vertex = self.to_camera(face_shape_transformed)
face_proj = self.to_image(face_vertex)
landmark = self.get_landmarks(face_proj)
face_texture = self.compute_texture(coef_dict['tex'])
face_norm = self.compute_norm(face_shape)
face_norm_roted = face_norm @ rotation
face_color = self.compute_color(face_texture, face_norm_roted, coef_dict['gamma'])
return face_vertex, face_texture, face_color, landmark
|
{"/util/detect_lm68.py": ["/util/preprocess.py"], "/data/flist_dataset.py": ["/data/base_dataset.py", "/util/util.py", "/util/preprocess.py"], "/data_preparation.py": ["/util/detect_lm68.py", "/util/generate_list.py"], "/train.py": ["/options/train_options.py", "/util/visualizer.py", "/util/util.py"], "/models/facerecon_model.py": ["/models/bfm.py", "/models/losses.py", "/util/nvdiffrast.py", "/util/preprocess.py"], "/test.py": ["/options/test_options.py", "/util/visualizer.py", "/util/preprocess.py", "/data/flist_dataset.py"]}
|
25,110
|
matajoh/Deep3DFaceRecon_pytorch
|
refs/heads/master
|
/util/nvdiffrast.py
|
"""This script is the differentiable renderer for Deep3DFaceRecon_pytorch
Attention, antialiasing step is missing in current version.
"""
import torch
import torch.nn.functional as F
import kornia
from kornia.geometry.camera import pixel2cam
import numpy as np
from typing import List
import nvdiffrast.torch as dr
from scipy.io import loadmat
from torch import nn
def ndc_projection(x=0.1, n=1.0, f=50.0):
return np.array([[n/x, 0, 0, 0],
[ 0, n/-x, 0, 0],
[ 0, 0, -(f+n)/(f-n), -(2*f*n)/(f-n)],
[ 0, 0, -1, 0]]).astype(np.float32)
class MeshRenderer(nn.Module):
def __init__(self,
rasterize_fov,
znear=0.1,
zfar=10,
rasterize_size=224):
super(MeshRenderer, self).__init__()
x = np.tan(np.deg2rad(rasterize_fov * 0.5)) * znear
self.ndc_proj = torch.tensor(ndc_projection(x=x, n=znear, f=zfar)).matmul(
torch.diag(torch.tensor([1., -1, -1, 1])))
self.rasterize_size = rasterize_size
self.glctx = None
def forward(self, vertex, tri, feat=None):
"""
Return:
mask -- torch.tensor, size (B, 1, H, W)
depth -- torch.tensor, size (B, 1, H, W)
features(optional) -- torch.tensor, size (B, C, H, W) if feat is not None
Parameters:
vertex -- torch.tensor, size (B, N, 3)
tri -- torch.tensor, size (B, M, 3) or (M, 3), triangles
feat(optional) -- torch.tensor, size (B, C), features
"""
device = vertex.device
rsize = int(self.rasterize_size)
ndc_proj = self.ndc_proj.to(device)
# trans to homogeneous coordinates of 3d vertices, the direction of y is the same as v
if vertex.shape[-1] == 3:
vertex = torch.cat([vertex, torch.ones([*vertex.shape[:2], 1]).to(device)], dim=-1)
vertex[..., 1] = -vertex[..., 1]
vertex_ndc = vertex @ ndc_proj.t()
if self.glctx is None:
self.glctx = dr.RasterizeGLContext(device=device)
print("create glctx on device cuda:%d"%device.index)
ranges = None
if isinstance(tri, List) or len(tri.shape) == 3:
vum = vertex_ndc.shape[1]
fnum = torch.tensor([f.shape[0] for f in tri]).unsqueeze(1).to(device)
fstartidx = torch.cumsum(fnum, dim=0) - fnum
ranges = torch.cat([fstartidx, fnum], axis=1).type(torch.int32).cpu()
for i in range(tri.shape[0]):
tri[i] = tri[i] + i*vum
vertex_ndc = torch.cat(vertex_ndc, dim=0)
tri = torch.cat(tri, dim=0)
# for range_mode vetex: [B*N, 4], tri: [B*M, 3], for instance_mode vetex: [B, N, 4], tri: [M, 3]
tri = tri.type(torch.int32).contiguous()
rast_out, _ = dr.rasterize(self.glctx, vertex_ndc.contiguous(), tri, resolution=[rsize, rsize], ranges=ranges)
depth, _ = dr.interpolate(vertex.reshape([-1,4])[...,2].unsqueeze(1).contiguous(), rast_out, tri)
depth = depth.permute(0, 3, 1, 2)
mask = (rast_out[..., 3] > 0).float().unsqueeze(1)
depth = mask * depth
image = None
if feat is not None:
image, _ = dr.interpolate(feat, rast_out, tri)
image = image.permute(0, 3, 1, 2)
image = mask * image
return mask, depth, image
|
{"/util/detect_lm68.py": ["/util/preprocess.py"], "/data/flist_dataset.py": ["/data/base_dataset.py", "/util/util.py", "/util/preprocess.py"], "/data_preparation.py": ["/util/detect_lm68.py", "/util/generate_list.py"], "/train.py": ["/options/train_options.py", "/util/visualizer.py", "/util/util.py"], "/models/facerecon_model.py": ["/models/bfm.py", "/models/losses.py", "/util/nvdiffrast.py", "/util/preprocess.py"], "/test.py": ["/options/test_options.py", "/util/visualizer.py", "/util/preprocess.py", "/data/flist_dataset.py"]}
|
25,130
|
deathbeds/importnb
|
refs/heads/main
|
/src/importnb/loader.py
|
# coding: utf-8
"""# `loader`
the loading machinery for notebooks style documents, and less.
notebooks combine code, markdown, and raw cells to create a complete document.
the importnb loader provides an interface for transforming these objects to valid python.
"""
import ast
import inspect
import re
import shlex
import sys
import textwrap
from contextlib import contextmanager
from dataclasses import asdict, dataclass, field
from functools import partial
from importlib import _bootstrap as bootstrap
from importlib import reload
from importlib._bootstrap import _init_module_attrs, _requires_builtin
from importlib._bootstrap_external import FileFinder, decode_source
from importlib.machinery import ModuleSpec, SourceFileLoader
from importlib.util import LazyLoader, find_spec
from pathlib import Path
from types import ModuleType
from . import get_ipython
from .decoder import LineCacheNotebookDecoder, quote
from .docstrings import update_docstring
from .finder import FileModuleSpec, FuzzyFinder, get_loader_details, get_loader_index
__all__ = "Notebook", "reload"
VERSION = sys.version_info.major, sys.version_info.minor
MAGIC = re.compile("^\s*%{2}", re.MULTILINE)
ALLOW_TOP_LEVEL_AWAIT = getattr(ast, "PyCF_ALLOW_TOP_LEVEL_AWAIT", 0x0)
def _get_co_flags_set(co_flags):
"""return a deconstructed set of code flags from a code object."""
flags = set()
for i in range(12):
flag = 1 << i
if co_flags & flag:
flags.add(flag)
co_flags ^= flag
if not co_flags:
break
else:
flags.intersection_update(flags)
return flags
class SourceModule(ModuleType):
def __fspath__(self):
return self.__file__
@dataclass
class Interface:
"""a configuration python importing interface"""
name: str = None
path: str = None
lazy: bool = False
extensions: tuple = field(default_factory=[".ipy", ".ipynb"].copy)
include_fuzzy_finder: bool = True
include_markdown_docstring: bool = True
include_non_defs: bool = True
include_await: bool = True
module_type: ModuleType = field(default=SourceModule)
no_magic: bool = False
_loader_hook_position: int = field(default=0, repr=False)
def __new__(cls, name=None, path=None, **kwargs):
kwargs.update(name=name, path=path)
self = super().__new__(cls)
self.__init__(**kwargs)
return self
class Loader(Interface, SourceFileLoader):
"""The simplest implementation of a Notebook Source File Loader.
This class breaks down the loading process into finer steps."""
extensions: tuple = field(default_factory=[".py"].copy)
@property
def loader(self):
"""generate a new loader based on the state of an existing loader."""
loader = type(self)
if self.lazy:
loader = LazyLoader.factory(loader)
# Strip the leading underscore from slots
params = asdict(self)
params.pop("name")
params.pop("path")
return partial(loader, **params)
@property
def finder(self):
"""generate a new finder based on the state of an existing loader"""
return self.include_fuzzy_finder and FuzzyFinder or FileFinder
def raw_to_source(self, source):
"""transform a string from a raw file to python source."""
if self.path and self.path.endswith(".ipynb"):
# when we encounter notebooks we apply different transformers to the diff cell types
return LineCacheNotebookDecoder(
code=self.code, raw=self.raw, markdown=self.markdown
).decode(source, self.path)
# for a normal file we just apply the code transformer.
return self.code(source)
def source_to_nodes(self, source, path="<unknown>", *, _optimize=-1):
"""parse source string as python ast"""
flags = ast.PyCF_ONLY_AST
return bootstrap._call_with_frames_removed(
compile, source, path, "exec", flags=flags, dont_inherit=True, optimize=_optimize
)
def nodes_to_code(self, nodes, path="<unknown>", *, _optimize=-1):
"""compile ast nodes to python code object"""
flags = ALLOW_TOP_LEVEL_AWAIT
return bootstrap._call_with_frames_removed(
compile, nodes, path, "exec", flags=flags, dont_inherit=True, optimize=_optimize
)
def source_to_code(self, source, path="<unknown>", *, _optimize=-1):
"""tangle python source to compiled code by:
1. parsing the source as ast nodes
2. compiling the ast nodes as python code"""
nodes = self.source_to_nodes(source, path, _optimize=_optimize)
return self.nodes_to_code(nodes, path, _optimize=_optimize)
def get_data(self, path):
"""get_data injects an input transformation before the raw text.
this method allows notebook json to be transformed line for line into vertically sparse python code."""
return self.raw_to_source(decode_source(super().get_data(self.path)))
def create_module(self, spec):
"""an overloaded create_module method injecting fuzzy finder setup up logic."""
module = self.module_type(str(spec.name))
_init_module_attrs(spec, module)
if self.name:
module.__name__ = self.name
if module.__file__.endswith((".ipynb", ".ipy")):
module.get_ipython = get_ipython
if getattr(spec, "alias", None):
# put a fuzzy spec on the modules to avoid re importing it.
# there is a funky trick you do with the fuzzy finder where you
# load multiple versions with different finders.
sys.modules[spec.alias] = module
return module
def exec_module(self, module):
"""Execute the module."""
# importlib uses module.__name__, but when running modules as __main__ name will change.
# this approach uses the original name on the spec.
try:
code = self.get_code(module.__spec__.name)
# from importlib
if code is None:
raise ImportError(
f"cannot load module {module.__name__!r} when " "get_code() returns None"
)
if inspect.CO_COROUTINE not in _get_co_flags_set(code.co_flags):
# if there isn't any async non sense then we proceed with convention.
bootstrap._call_with_frames_removed(exec, code, module.__dict__)
else:
self.aexec_module_sync(module)
except BaseException as e:
alias = getattr(module.__spec__, "alias", None)
if alias:
sys.modules.pop(alias, None)
raise e
def aexec_module_sync(self, module):
if "anyio" in sys.modules:
import anyio
__import__("anyio").run(self.aexec_module, module)
else:
from asyncio import get_event_loop
get_event_loop().run_until_complete(self.aexec_module(module))
async def aexec_module(self, module):
"""an async exec_module method permitting top-level await."""
# there is so redudancy in this approach, but it starts getting asynchier.
nodes = self.source_to_nodes(self.get_data(self.path))
# iterate through the nodes and compile individual statements
for node in nodes.body:
co = bootstrap._call_with_frames_removed(
compile,
ast.Module([node], []),
module.__file__,
"exec",
flags=ALLOW_TOP_LEVEL_AWAIT,
)
if inspect.CO_COROUTINE in _get_co_flags_set(co.co_flags):
# when something async is encountered we compile it with the single flag
# this lets us use eval to retreive our coroutine.
co = bootstrap._call_with_frames_removed(
compile,
ast.Interactive([node]),
module.__file__,
"single",
flags=ALLOW_TOP_LEVEL_AWAIT,
)
await bootstrap._call_with_frames_removed(
eval, co, module.__dict__, module.__dict__
)
else:
bootstrap._call_with_frames_removed(exec, co, module.__dict__, module.__dict__)
def code(self, str):
return dedent(str)
@classmethod
@_requires_builtin
def is_package(cls, fullname):
"""Return False as built-in modules are never packages."""
if "." not in fullname:
return True
return super().is_package(fullname)
def __enter__(self):
path_id, loader_id, details = get_loader_index(".py")
for _, e in details:
if all(map(e.__contains__, self.extensions)):
self._loader_hook_position = None
return self
else:
self._loader_hook_position = loader_id + 1
details.insert(self._loader_hook_position, (self.loader, self.extensions))
sys.path_hooks[path_id] = self.finder.path_hook(*details)
sys.path_importer_cache.clear()
return self
def __exit__(self, *excepts):
if self._loader_hook_position is not None:
path_id, details = get_loader_details()
details.pop(self._loader_hook_position)
sys.path_hooks[path_id] = self.finder.path_hook(*details)
sys.path_importer_cache.clear()
@classmethod
def load_file(cls, filename, main=True, **kwargs):
"""Import a notebook as a module from a filename.
dir: The directory to load the file from.
main: Load the module in the __main__ context.
>>> assert Notebook.load_file('foo.ipynb')
"""
name = main and "__main__" or filename
loader = cls(name, str(filename), **kwargs)
spec = FileModuleSpec(name, loader, origin=loader.path)
module = loader.create_module(spec)
loader.exec_module(module)
return module
@classmethod
def load_module(cls, module, main=False, **kwargs):
"""Import a notebook as a module.
main: Load the module in the __main__ context.
>>> assert Notebook.load_module('foo')
"""
from runpy import _run_module_as_main, run_module
with cls() as loader:
spec = find_spec(module)
module = spec.loader.create_module(spec)
if main:
sys.modules["__main__"] = module
module.__name__ = "__main__"
spec.loader.exec_module(module)
return module
@classmethod
def load_argv(cls, argv=None, *, parser=None):
"""load a module based on python arguments
load a notebook from its file name
>>> Notebook.load_argv("foo.ipynb --arg abc")
load the same notebook from a module alias.
>>> Notebook.load_argv("-m foo --arg abc")
"""
if parser is None:
parser = cls.get_argparser()
if argv is None:
from sys import argv
argv = argv[1:]
if isinstance(argv, str):
argv = shlex.split(argv)
module = cls.load_ns(parser.parse_args(argv))
if module is None:
return parser.print_help()
return module
@classmethod
def load_ns(cls, ns):
"""load a module from a namespace, used when loading module from sys.argv parameters."""
if ns.tasks:
# i don't quite why we need to do this here, but we do. so don't move it
from doit.cmd_base import ModuleTaskLoader
from doit.doit_cmd import DoitMain
if ns.code:
with main_argv(sys.argv[0], ns.args):
result = cls.load_code(ns.code)
elif ns.module:
if ns.dir:
if ns.dir not in sys.path:
sys.path.insert(0, ns.dir)
elif "" in sys.path:
pass
else:
sys.path.insert(0, "")
with main_argv(ns.module, ns.args):
result = cls.load_module(ns.module, main=True)
elif ns.file:
where = Path(ns.dir, ns.file) if ns.dir else Path(ns.file)
with main_argv(str(where), ns.args):
result = cls.load_file(ns.file)
else:
return
if ns.tasks:
DoitMain(ModuleTaskLoader(result)).run(ns.args or ["help"])
return result
@classmethod
def load_code(cls, code, argv=None, mod_name=None, script_name=None, main=False):
"""load a module from raw source code"""
from runpy import _run_module_code
self = cls()
name = main and "__main__" or mod_name or "<raw code>"
return _dict_module(
_run_module_code(self.raw_to_source(code), mod_name=name, script_name=script_name)
)
@staticmethod
def get_argparser(parser=None):
from argparse import REMAINDER, ArgumentParser
if parser is None:
parser = ArgumentParser("importnb", description="run notebooks as python code")
parser.add_argument("file", nargs="?", help="run a file")
parser.add_argument("args", nargs=REMAINDER, help="arguments to pass to script")
parser.add_argument("-m", "--module", help="run a module")
parser.add_argument("-c", "--code", help="run raw code")
parser.add_argument("-d", "--dir", help="path to run script in")
parser.add_argument("-t", "--tasks", action="store_true", help="run doit tasks")
return parser
def comment(str):
return textwrap.indent(str, "# ")
class DefsOnly(ast.NodeTransformer):
INCLUDE = ast.Import, ast.ImportFrom, ast.ClassDef, ast.FunctionDef, ast.AsyncFunctionDef
def visit_Module(self, node):
args = ([x for x in node.body if isinstance(x, self.INCLUDE)],)
if VERSION >= (3, 8):
args += (node.type_ignores,)
return ast.Module(*args)
class Notebook(Loader):
"""Notebook is a user friendly file finder and module loader for notebook source code.
> Remember, restart and run all or it didn't happen.
Notebook provides several useful options.
* Lazy module loading. A module is executed the first time it is used in a script.
"""
def markdown(self, str):
return quote(str)
def raw(self, str):
return comment(str)
def visit(self, nodes):
if self.include_non_defs:
return nodes
return DefsOnly().visit(nodes)
def code(self, str):
if self.no_magic:
if MAGIC.match(str):
return comment(str)
return super().code(str)
def source_to_nodes(self, source, path="<unknown>", *, _optimize=-1):
nodes = super().source_to_nodes(source, path)
if self.include_markdown_docstring:
nodes = update_docstring(nodes)
nodes = self.visit(nodes)
return ast.fix_missing_locations(nodes)
def raw_to_source(self, source):
"""transform a string from a raw file to python source."""
if self.path and self.path.endswith(".ipynb"):
# when we encounter notebooks we apply different transformers to the diff cell types
return LineCacheNotebookDecoder(
code=self.code, raw=self.raw, markdown=self.markdown
).decode(source, self.path)
# for a normal file we just apply the code transformer.
return self.code(source)
def _dict_module(ns):
m = ModuleType(ns.get("__name__"), ns.get("__doc__"))
m.__dict__.update(ns)
return m
@contextmanager
def main_argv(prog, args=None):
if args is not None:
args = [prog] + list(args)
prior, sys.argv = sys.argv, args
yield
if args is not None:
sys.argv = prior
try:
import IPython
from IPython.core.inputsplitter import IPythonInputSplitter
dedent = IPythonInputSplitter(
line_input_checker=False,
physical_line_transforms=[
IPython.core.inputsplitter.leading_indent(),
IPython.core.inputsplitter.ipy_prompt(),
IPython.core.inputsplitter.cellmagic(end_on_blank_line=False),
],
).transform_cell
except ModuleNotFoundError:
def dedent(body):
from textwrap import dedent, indent
if MAGIC.match(body):
return indent(body, "# ")
return dedent(body)
|
{"/src/importnb/loader.py": ["/src/importnb/__init__.py", "/src/importnb/decoder.py", "/src/importnb/docstrings.py", "/src/importnb/finder.py"], "/src/importnb/__init__.py": ["/src/importnb/loader.py", "/src/importnb/entry_points.py"], "/src/importnb/__main__.py": ["/src/importnb/__init__.py"], "/src/importnb/loaders.py": ["/src/importnb/loader.py"], "/src/importnb/entry_points.py": ["/src/importnb/loader.py"]}
|
25,131
|
deathbeds/importnb
|
refs/heads/main
|
/src/importnb/utils/pytest_importnb.py
|
# coding: utf-8
"""A `pytest` plugin for importing notebooks as modules and using standard test discovered.
The `AlternativeModule` is reusable. See `pidgin` for an example.
"""
from pathlib import Path
import pytest
from importnb import Notebook
def get_file_patterns(cls, parent):
for pat in parent.config.getini("python_files"):
for e in cls.loader().extensions:
yield "*" + pat.rstrip(".py") + e
class AlternativeModule(pytest.Module):
def _getobj(self):
return self.loader.load_file(str(self.path), False)
@classmethod
def pytest_collect_file(cls, parent, path):
if not parent.session.isinitpath(path):
for pat in get_file_patterns(cls, parent):
if path.fnmatch(pat):
break
else:
return
if hasattr(cls, "from_parent"):
return cls.from_parent(parent, path=Path(path))
return cls(path, parent)
class NotebookModule(AlternativeModule):
loader = Notebook
pytest_collect_file = NotebookModule.pytest_collect_file
|
{"/src/importnb/loader.py": ["/src/importnb/__init__.py", "/src/importnb/decoder.py", "/src/importnb/docstrings.py", "/src/importnb/finder.py"], "/src/importnb/__init__.py": ["/src/importnb/loader.py", "/src/importnb/entry_points.py"], "/src/importnb/__main__.py": ["/src/importnb/__init__.py"], "/src/importnb/loaders.py": ["/src/importnb/loader.py"], "/src/importnb/entry_points.py": ["/src/importnb/loader.py"]}
|
25,132
|
deathbeds/importnb
|
refs/heads/main
|
/src/importnb/decoder.py
|
import json
import linecache
import textwrap
from functools import partial
def quote(object, *, quotes="'''"):
if quotes in object:
quotes = '"""'
return quotes + object + "\n" + quotes
from ._json_parser import Lark_StandAlone, Transformer, Tree
class Transformer(Transformer):
def __init__(
self,
markdown=quote,
code=textwrap.dedent,
raw=partial(textwrap.indent, prefix="# "),
**kwargs,
):
super().__init__(**kwargs)
for key in ("markdown", "code", "raw"):
setattr(self, "transform_" + key, locals().get(key))
def string(self, s):
return s[0].line, json.loads(s[0])
def item(self, s):
key = s[0][-1]
if key == "cells":
if not isinstance(s[-1], Tree):
return self.render(list(map(dict, s[-1])))
elif key in {"source", "text"}:
return key, s[-1]
elif key == "cell_type":
if isinstance(s[-1], tuple):
return key, s[-1][-1]
def array(self, s):
if s:
return s
return []
def object(self, s):
return [x for x in s if x is not None]
def render_one(self, kind, lines):
s = "".join(lines)
if not s.endswith(("\n",)):
s += "\n"
return getattr(self, f"transform_{kind}")(s)
def render(self, x):
body = []
for token in x:
t = token.get("cell_type")
try:
s = token["source"]
except KeyError:
s = token.get("text")
if s:
if not isinstance(s, list):
s = [s]
l, lines = s[0][0], [x[1] for x in s]
body.extend([""] * (l - len(body)))
lines = self.render_one(t, lines)
body.extend(lines.splitlines())
return "\n".join(body + [""])
class LineCacheNotebookDecoder(Transformer):
def __init__(
self,
markdown=quote,
code=textwrap.dedent,
raw=partial(textwrap.indent, prefix="# "),
**kwargs,
):
super().__init__(**kwargs)
for key in ("markdown", "code", "raw"):
setattr(self, "transform_" + key, locals().get(key))
def source_from_json_grammar(self, object):
return Lark_StandAlone(transformer=self).parse(object)
def decode(self, object, filename):
s = self.source_from_json_grammar(object)
if s:
source = s[0]
linecache.updatecache(filename)
if filename in linecache.cache:
linecache.cache[filename] = (
linecache.cache[filename][0],
linecache.cache[filename][1],
source.splitlines(True),
filename,
)
return source
return ""
|
{"/src/importnb/loader.py": ["/src/importnb/__init__.py", "/src/importnb/decoder.py", "/src/importnb/docstrings.py", "/src/importnb/finder.py"], "/src/importnb/__init__.py": ["/src/importnb/loader.py", "/src/importnb/entry_points.py"], "/src/importnb/__main__.py": ["/src/importnb/__init__.py"], "/src/importnb/loaders.py": ["/src/importnb/loader.py"], "/src/importnb/entry_points.py": ["/src/importnb/loader.py"]}
|
25,133
|
deathbeds/importnb
|
refs/heads/main
|
/hatch_build.py
|
import os
import shlex
import sys
from functools import partial
from io import StringIO
from pathlib import Path
from hatchling.builders.hooks.plugin.interface import BuildHookInterface
class LarkStandAloneBuildHook(BuildHookInterface):
PLUGIN_NAME = "lark_standalone"
def initialize(self, version, build_data):
L = get_logger()
WIN = os.name == "nt"
L.info("converting json grammar to python")
python_parser = Path(self.root, "src/importnb/_json_parser.py")
if not python_parser.exists():
py = get_standalone()
python_parser.write_text(py)
build_data["artifacts"].append(
"/src/importnb/_json_parser.py"
) # its really important to remember the preceeding /
def get_logger():
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
logging.basicConfig(level=logging.INFO)
return logger
def get_lark():
from lark.tools.standalone import build_lalr, lalr_argparser
return build_lalr(lalr_argparser.parse_args(["--propagate_positions", "src/json.g"]))[0]
def write(buffer, *lines):
buffer.writelines(map(str, lines or ["\n"]))
def get_standalone():
from lark.tools.standalone import gen_standalone
lark = get_lark()
python = StringIO()
gen_standalone(lark, partial(print, file=python))
return python.getvalue()
|
{"/src/importnb/loader.py": ["/src/importnb/__init__.py", "/src/importnb/decoder.py", "/src/importnb/docstrings.py", "/src/importnb/finder.py"], "/src/importnb/__init__.py": ["/src/importnb/loader.py", "/src/importnb/entry_points.py"], "/src/importnb/__main__.py": ["/src/importnb/__init__.py"], "/src/importnb/loaders.py": ["/src/importnb/loader.py"], "/src/importnb/entry_points.py": ["/src/importnb/loader.py"]}
|
25,134
|
deathbeds/importnb
|
refs/heads/main
|
/src/importnb/__init__.py
|
# coding: utf-8
__all__ = "Notebook", "reload", "imports"
def is_ipython():
from sys import modules
return "IPython" in modules
def get_ipython(force=True):
if force or is_ipython():
try:
from IPython import get_ipython
except ModuleNotFoundError:
return
shell = get_ipython()
if shell is None:
from IPython import InteractiveShell
shell = InteractiveShell.instance()
return shell
return None
import builtins
from ._version import __version__
from .loader import Notebook, reload
from .entry_points import imports
builtins.true, builtins.false, builtins.null = True, False, None
|
{"/src/importnb/loader.py": ["/src/importnb/__init__.py", "/src/importnb/decoder.py", "/src/importnb/docstrings.py", "/src/importnb/finder.py"], "/src/importnb/__init__.py": ["/src/importnb/loader.py", "/src/importnb/entry_points.py"], "/src/importnb/__main__.py": ["/src/importnb/__init__.py"], "/src/importnb/loaders.py": ["/src/importnb/loader.py"], "/src/importnb/entry_points.py": ["/src/importnb/loader.py"]}
|
25,135
|
deathbeds/importnb
|
refs/heads/main
|
/src/importnb/finder.py
|
# coding: utf-8
"""# `sys.path_hook` modifiers
Many suggestions for importing notebooks use `sys.meta_paths`, but `importnb` relies on the `sys.path_hooks` to load any notebook in the path. `PathHooksContext` is a base class for the `importnb.Notebook` `SourceFileLoader`.
"""
import inspect
import sys
from importlib._bootstrap_external import FileFinder
from importlib.machinery import ModuleSpec
from pathlib import Path
class FileModuleSpec(ModuleSpec):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._set_fileattr = True
class FuzzySpec(FileModuleSpec):
def __init__(
self, name, loader, *, alias=None, origin=None, loader_state=None, is_package=None
):
super().__init__(
name,
loader,
origin=origin,
loader_state=loader_state,
is_package=is_package,
)
self.alias = alias
def fuzzy_query(str):
new = ""
for chr in str:
new += (not new.endswith("__") or chr != "_") and chr or ""
return new.replace("__", "*").replace("_", "?")
def fuzzy_file_search(path, fullname):
results = []
id, details = get_loader_details()
for ext in sum((list(object[1]) for object in details), []):
results.extend(Path(path).glob(fullname + ext))
"_" in fullname and results.extend(Path(path).glob(fuzzy_query(fullname) + ext))
return results
class FuzzyFinder(FileFinder):
"""Adds the ability to open file names with special characters using underscores."""
def find_spec(self, fullname, target=None):
"""Try to finder the spec and if it cannot be found, use the underscore starring syntax
to identify potential matches.
"""
spec = super().find_spec(fullname, target=target)
raw = fullname
if spec is None:
original = fullname
if "." in fullname:
original, fullname = fullname.rsplit(".", 1)
else:
original, fullname = "", original
if "_" in fullname:
# find any files using the fuzzy convention
files = fuzzy_file_search(self.path, fullname)
if files:
# sort and create of a path of the chosen file
file = sorted(files, key=lambda x: x.stat().st_mtime, reverse=True)[0]
name = file.stem
if original:
name = ".".join((original, name))
name = (original + "." + file.stem).lstrip(".")
spec = super().find_spec(name, target=target)
spec = spec and FuzzySpec(
spec.name,
spec.loader,
origin=spec.origin,
loader_state=spec.loader_state,
alias=raw,
is_package=bool(spec.submodule_search_locations),
)
return spec
def get_loader_details():
for id, path_hook in enumerate(sys.path_hooks):
try:
return (
id,
list(inspect.getclosurevars(path_hook).nonlocals["loader_details"]),
)
except:
continue
def get_loader_index(ext):
path_id, details = get_loader_details()
for i, (loader, exts) in enumerate(details):
if ext in exts:
return path_id, i, details
|
{"/src/importnb/loader.py": ["/src/importnb/__init__.py", "/src/importnb/decoder.py", "/src/importnb/docstrings.py", "/src/importnb/finder.py"], "/src/importnb/__init__.py": ["/src/importnb/loader.py", "/src/importnb/entry_points.py"], "/src/importnb/__main__.py": ["/src/importnb/__init__.py"], "/src/importnb/loaders.py": ["/src/importnb/loader.py"], "/src/importnb/entry_points.py": ["/src/importnb/loader.py"]}
|
25,136
|
deathbeds/importnb
|
refs/heads/main
|
/src/importnb/__main__.py
|
from . import Notebook
def main(argv=None):
"""a convenience function for running importnb as an application"""
Notebook.load_argv(argv)
return
if __name__ == "__main__":
main()
|
{"/src/importnb/loader.py": ["/src/importnb/__init__.py", "/src/importnb/decoder.py", "/src/importnb/docstrings.py", "/src/importnb/finder.py"], "/src/importnb/__init__.py": ["/src/importnb/loader.py", "/src/importnb/entry_points.py"], "/src/importnb/__main__.py": ["/src/importnb/__init__.py"], "/src/importnb/loaders.py": ["/src/importnb/loader.py"], "/src/importnb/entry_points.py": ["/src/importnb/loader.py"]}
|
25,137
|
deathbeds/importnb
|
refs/heads/main
|
/src/importnb/loaders.py
|
from .loader import Loader, SourceModule
from dataclasses import dataclass, field
from types import ModuleType
class DataModule(SourceModule):
def _repr_json_(self):
return self.data, dict(root=repr(self), expanded=False)
@dataclass
class DataStreamLoader(Loader):
"""an import loader for data streams"""
module_type: ModuleType = field(default=DataModule)
def exec_module(self, module):
with open(module.__file__, "rb") as file:
module.data = self.get_data_loader()(file)
return module
def get_data_loader(self):
raise NotImplementedError("load_data not implemented.")
@dataclass
class Json(DataStreamLoader):
"""an import loader for json files"""
extensions: tuple = field(default_factory=[".json"].copy)
def get_data_loader(self):
from json import load
return load
@dataclass
class Yaml(DataStreamLoader):
"""an import loader for yml and yaml"""
extensions: tuple = field(default_factory=[".yml", ".yaml"].copy)
def get_data_loader(self):
try:
from ruamel.yaml import safe_load
except ModuleNotFoundError:
from yaml import safe_load
# probably want an error message about how to fix this if we cant find yamls
return safe_load
@dataclass
class Toml(DataStreamLoader):
"""an import loader for toml"""
extensions: tuple = field(default_factory=[".toml"].copy)
def get_data_loader(self):
try:
from tomllib import load
except ModuleNotFoundError:
from tomli import load
return load
|
{"/src/importnb/loader.py": ["/src/importnb/__init__.py", "/src/importnb/decoder.py", "/src/importnb/docstrings.py", "/src/importnb/finder.py"], "/src/importnb/__init__.py": ["/src/importnb/loader.py", "/src/importnb/entry_points.py"], "/src/importnb/__main__.py": ["/src/importnb/__init__.py"], "/src/importnb/loaders.py": ["/src/importnb/loader.py"], "/src/importnb/entry_points.py": ["/src/importnb/loader.py"]}
|
25,138
|
deathbeds/importnb
|
refs/heads/main
|
/src/importnb/docstrings.py
|
# coding: utf-8
"""# Special handling of markdown cells as docstrings.
Modify the Python `ast` to assign docstrings to functions when they are preceded by a Markdown cell.
"""
import ast
"""# Modifying the `ast`
>>> assert isinstance(create_test, ast.Assign)
>>> assert isinstance(test_update, ast.Attribute)
"""
create_test = ast.parse("""__test__ = globals().get('__test__', {})""", mode="single").body[0]
test_update = ast.parse("""__test__.update""", mode="single").body[0].value
str_nodes = (ast.Str,)
"""`TestStrings` is an `ast.NodeTransformer` that captures `str_nodes` in the `TestStrings.strings` object.
```ipython
>>> assert isinstance(ast.parse(TestStrings().visit(ast.parse('"Test me"'))), ast.Module)
```
"""
class TestStrings(ast.NodeTransformer):
strings = None
def visit_Module(self, module):
"""`TestStrings.visit_Module` initializes the capture. After all the nodes are visit we append `create_test and test_update`
to populate the `"__test__"` attribute.
"""
self.strings = []
module = self.visit_body(module)
module.body += (
[create_test]
+ [
ast.copy_location(
ast.Expr(
ast.Call(
func=test_update,
args=[
ast.Dict(
keys=[ast.Str("string-{}".format(node.lineno))],
values=[node],
)
],
keywords=[],
)
),
node,
)
for node in self.strings
]
if self.strings
else []
)
return module
def visit_body(self, node):
"""`TestStrings.visit_body` visits nodes with a `"body"` attibute and extracts potential string tests."""
body = []
if (
node.body
and isinstance(node.body[0], ast.Expr)
and isinstance(node.body[0].value, str_nodes)
):
body.append(node.body.pop(0))
node.body = body + [
(self.visit_body if hasattr(object, "body") else self.visit)(object)
for object in node.body
]
return node
def visit_Expr(self, node):
"""`TestStrings.visit_Expr` append the `str_nodes` to `TestStrings.strings` to append to the `ast.Module`."""
if isinstance(node.value, str_nodes):
self.strings.append(
ast.copy_location(ast.Str(node.value.s.replace("\n```", "\n")), node)
)
return node
def update_docstring(module):
from functools import reduce
module.body = reduce(markdown_docstring, module.body, [])
return TestStrings().visit(module)
docstring_ast_types = ast.ClassDef, ast.FunctionDef
try:
docstring_ast_types += (ast.AsyncFunctionDef,)
except:
...
def markdown_docstring(nodes, node):
if (
len(nodes) > 1
and str_expr(nodes[-1])
and isinstance(node, docstring_ast_types)
and not str_expr(node.body[0])
):
node.body.insert(0, nodes.pop())
return nodes.append(node) or nodes
def str_expr(node):
return isinstance(node, ast.Expr) and isinstance(node.value, ast.Str)
|
{"/src/importnb/loader.py": ["/src/importnb/__init__.py", "/src/importnb/decoder.py", "/src/importnb/docstrings.py", "/src/importnb/finder.py"], "/src/importnb/__init__.py": ["/src/importnb/loader.py", "/src/importnb/entry_points.py"], "/src/importnb/__main__.py": ["/src/importnb/__init__.py"], "/src/importnb/loaders.py": ["/src/importnb/loader.py"], "/src/importnb/entry_points.py": ["/src/importnb/loader.py"]}
|
25,139
|
deathbeds/importnb
|
refs/heads/main
|
/src/importnb/utils/ipython.py
|
# coding: utf-8
import ast
import json
import os
import sys
from pathlib import Path
from IPython import get_ipython, paths
from IPython.core import profiledir
def get_config(profile="default"):
profile_dir = profiledir.ProfileDir()
try:
profile = profile_dir.find_profile_dir_by_name(paths.get_ipython_dir(), profile)
except profiledir.ProfileDirError:
os.makedirs(paths.get_ipython_dir(), exist_ok=True)
profile = profile_dir.create_profile_dir_by_name(paths.get_ipython_dir(), profile)
return Path(profile.location, "ipython_config.json")
def load_config():
location = get_config()
try:
with location.open() as file:
config = json.load(file)
except (FileNotFoundError, getattr(json, "JSONDecodeError", ValueError)):
config = {}
if "InteractiveShellApp" not in config:
config["InteractiveShellApp"] = {}
if "extensions" not in config["InteractiveShellApp"]:
config["InteractiveShellApp"]["extensions"] = []
return config, location
def install(project="importnb"):
"""install the importnb extension"""
config, location = load_config()
projects = [project]
if not installed(project):
config["InteractiveShellApp"]["extensions"].extend(projects)
with location.open("w") as file:
json.dump(config, file)
print("""✅ {}""".format(projects))
def installed(project):
config, location = load_config()
return project in config.get("InteractiveShellApp", {}).get("extensions", [])
def uninstall(project="importnb"):
"""uninstall the importnb extension"""
config, location = load_config()
projects = [project]
config["InteractiveShellApp"]["extensions"] = [
ext for ext in config["InteractiveShellApp"]["extensions"] if ext not in projects
]
with location.open("w") as file:
json.dump(config, file)
print("""❌ {}.""".format(projects))
|
{"/src/importnb/loader.py": ["/src/importnb/__init__.py", "/src/importnb/decoder.py", "/src/importnb/docstrings.py", "/src/importnb/finder.py"], "/src/importnb/__init__.py": ["/src/importnb/loader.py", "/src/importnb/entry_points.py"], "/src/importnb/__main__.py": ["/src/importnb/__init__.py"], "/src/importnb/loaders.py": ["/src/importnb/loader.py"], "/src/importnb/entry_points.py": ["/src/importnb/loader.py"]}
|
25,140
|
deathbeds/importnb
|
refs/heads/main
|
/docs/test_cli.py
|
from pathlib import Path
from subprocess import check_call
from sys import executable, path, version_info
from pytest import importorskip
from importnb import Notebook
GTE10 = version_info.major == 3 and version_info.minor >= 10
HERE = Path(__file__).parent
path.insert(0, str(HERE))
UNTITLED = HERE / "Untitled42.ipynb"
ref = Notebook.load_file(UNTITLED)
REF = Path(ref.__file__)
def get_prepared_string(x):
if GTE10:
x = x.replace("optional arguments:", "options:")
return x.replace("\r", "")
def cli_test(command):
def delay(f):
def wrapper(tmp_path):
from shlex import split
path = tmp_path / "tmp"
with path.open("w") as file:
check_call([executable] + split(command), stderr=file, stdout=file)
out = path.read_text()
match = get_prepared_string(
f.__doc__.format(UNTITLED=UNTITLED.as_posix(), SLUG=ref.magic_slug)
)
if "UserWarning: Attempting to work in a virtualenv." in out:
out = "".join(out.splitlines(True)[2:])
assert out == match
return wrapper
return delay
@cli_test("-m importnb")
def test_usage():
"""\
usage: importnb [-h] [-m MODULE] [-c CODE] [-d DIR] [-t] [file] ...
run notebooks as python code
positional arguments:
file run a file
args arguments to pass to script
optional arguments:
-h, --help show this help message and exit
-m MODULE, --module MODULE
run a module
-c CODE, --code CODE run raw code
-d DIR, --dir DIR path to run script in
-t, --tasks run doit tasks
"""
@cli_test(rf"-m importnb -d {UNTITLED.parent.as_posix()} {UNTITLED.as_posix()}")
def test_file():
"""\
i was printed from {UNTITLED} and my name is __main__
{SLUG}
the parser namespace is Namespace(args=None)
"""
@cli_test(rf"-m importnb -d {UNTITLED.parent.as_posix()} -m {UNTITLED.stem}")
def test_module():
"""\
i was printed from {UNTITLED} and my name is __main__
{SLUG}
the parser namespace is Namespace(args=None)
"""
@cli_test("-m importnb -c '{}'")
def test_empty_code():
""""""
@cli_test(rf"-m importnb -d {UNTITLED.parent.as_posix()} -t {UNTITLED.as_posix()} list")
def test_doit():
"""\
i was printed from {UNTITLED} and my name is __main__
{SLUG}
echo this the docstring for the `echo` task that echos hello.
"""
importorskip("doit")
|
{"/src/importnb/loader.py": ["/src/importnb/__init__.py", "/src/importnb/decoder.py", "/src/importnb/docstrings.py", "/src/importnb/finder.py"], "/src/importnb/__init__.py": ["/src/importnb/loader.py", "/src/importnb/entry_points.py"], "/src/importnb/__main__.py": ["/src/importnb/__init__.py"], "/src/importnb/loaders.py": ["/src/importnb/loader.py"], "/src/importnb/entry_points.py": ["/src/importnb/loader.py"]}
|
25,141
|
deathbeds/importnb
|
refs/heads/main
|
/src/importnb/entry_points.py
|
from .loader import Loader
from dataclasses import dataclass, field
from types import MethodType
from contextlib import contextmanager, ExitStack
def _get_importnb_entry_points():
try:
from importlib.metadata import entry_points
yield from entry_points()["importnb"]
except ModuleNotFoundError:
from importlib_metadata import entry_points
yield from entry_points(group="importnb")
__all__ = ("imports",)
ENTRY_POINTS = dict()
def get_importnb_entry_points():
"""discover the known importnb entry points"""
global ENTRY_POINTS
for ep in _get_importnb_entry_points():
ENTRY_POINTS[ep.name] = ep.value
return ENTRY_POINTS
def loader_from_alias(alias):
"""load an attribute from a module using the entry points value specificaiton"""
from importlib import import_module
from operator import attrgetter
module, _, member = alias.rpartition(":")
module = import_module(module)
return attrgetter(member)(module)
def loader_from_ep(alias):
"""discover a loader for an importnb alias or vaue"""
if ":" in alias:
return loader_from_alias(alias)
if not ENTRY_POINTS:
get_importnb_entry_points()
if alias in ENTRY_POINTS:
return loader_from_alias(ENTRY_POINTS[alias])
raise ValueError(f"{alias} is not a valid loader alias.")
@contextmanager
def imports(*names):
"""a shortcut to importnb loaders through entrypoints"""
types = set()
with ExitStack() as stack:
for name in names:
t = loader_from_ep(name)
if t not in types:
stack.enter_context(t())
types.add(t)
yield stack
def list_aliases():
"""list the entry points associated with importnb"""
if not ENTRY_POINTS:
get_importnb_entry_points()
return list(ENTRY_POINTS)
|
{"/src/importnb/loader.py": ["/src/importnb/__init__.py", "/src/importnb/decoder.py", "/src/importnb/docstrings.py", "/src/importnb/finder.py"], "/src/importnb/__init__.py": ["/src/importnb/loader.py", "/src/importnb/entry_points.py"], "/src/importnb/__main__.py": ["/src/importnb/__init__.py"], "/src/importnb/loaders.py": ["/src/importnb/loader.py"], "/src/importnb/entry_points.py": ["/src/importnb/loader.py"]}
|
25,142
|
deathbeds/importnb
|
refs/heads/main
|
/docs/test_importnb.py
|
import ast
import inspect
import json
import linecache
import sys
from importlib import reload
from importlib.util import find_spec
from pathlib import Path
from shutil import copyfile, rmtree
from types import FunctionType
from pytest import fixture, mark, raises, skip
import importnb
from importnb import Notebook, get_ipython, imports
from importnb.loader import VERSION
CLOBBER = ("Untitled42", "my_package", "__42", "__ed42", "__d42")
HERE = locals().get("__file__", None)
HERE = (Path(HERE).parent if HERE else Path()).absolute()
sys.path.insert(0, str(HERE))
IPY = bool(get_ipython())
print(88, IPY)
ipy = mark.skipif(not IPY, reason="""Not IPython.""")
@fixture(scope="session")
def ref():
return Notebook.load_file(HERE / "Untitled42.ipynb")
@fixture
def clean():
yield
unimport(CLOBBER)
@fixture
def package(ref):
package = HERE / "my_package"
package.mkdir(parents=True, exist_ok=True)
target = package / "my_module.ipynb"
copyfile(ref.__file__, package / target)
yield package
target.unlink()
rmtree(package)
@fixture
def minified(ref):
minified = Path(HERE / "minified.ipynb")
with open(ref.__file__) as f, open(minified, "w") as o:
json.dump(json.load(f), o, separators=(",", ":"))
yield
minified.unlink()
@fixture
def untitled_py(ref):
py = Path(ref.__file__).with_suffix(".py")
py.touch()
yield
py.unlink()
def cant_reload(m):
with raises(ImportError):
reload(m)
def unimport(ns):
"""unimport a module namespace"""
from sys import modules, path_importer_cache
for module in [x for x in modules if x.startswith(ns)]:
del modules[module]
path_importer_cache.clear()
def test_version():
assert importnb.__version__
def test_ref(ref):
assert ref.__file__.endswith(".ipynb")
def test_finder():
assert not find_spec("Untitled42")
with Notebook():
assert find_spec("Untitled42")
def test_basic(clean, ref):
with Notebook():
import Untitled42
assert ref is not Untitled42
assert Untitled42.__file__ == ref.__file__
assert isinstance(Untitled42.__loader__, Notebook)
with Notebook():
assert reload(Untitled42)
def test_load_module(clean, ref):
m = Notebook.load_module("Untitled42")
assert m.__file__ == ref.__file__
cant_reload(m)
def test_load_module_package(clean, package):
m = Notebook.load_module("my_package.my_module")
def test_load_file(clean, ref):
m = Notebook.load_file("docs/Untitled42.ipynb")
assert ref.__file__.endswith(str(Path(m.__file__)))
cant_reload(m)
def test_load_code(clean):
assert Notebook.load_code(""), "can't load an empty notebook"
body = Path("docs/Untitled42.ipynb").read_text()
m = Notebook.load_code(body)
cant_reload(m)
def test_package(clean, package):
from shutil import copyfile
with Notebook():
import my_package.my_module
assert hasattr(my_package, "__path__")
with raises(ModuleNotFoundError):
# we can't find a spec for a notebook without the notebook loader context
reload(my_package.my_module)
with Notebook():
reload(my_package.my_module)
@mark.parametrize("magic", [True, False])
def test_no_magic(capsys, clean, magic, ref):
with Notebook(no_magic=not magic):
import Untitled42
stdout = capsys.readouterr()[0]
if IPY:
if magic:
assert ref.magic_slug.rstrip() in stdout
else:
assert ref.magic_slug.rstrip() not in stdout
@mark.parametrize("defs", [True, False])
def test_defs_only(defs, ref):
known_defs = [
k for k, v in vars(ref).items() if not k[0] == "_" and isinstance(v, (type, FunctionType))
]
not_defs = [k for k, v in vars(ref).items() if not k[0] == "_" and isinstance(v, (str,))]
with Notebook(include_non_defs=not defs):
import Untitled42
assert all(hasattr(Untitled42, k) for k in known_defs)
if defs:
assert not any(hasattr(Untitled42, k) for k in not_defs)
def test_fuzzy_finder(clean, ref, capsys):
outs = []
with Notebook():
import __ed42
outs.append(capsys.readouterr())
import __d42
outs.append(capsys.readouterr())
import __42
outs.append(capsys.readouterr())
import __42
outs.append(capsys.readouterr())
import __42 as nb
outs.append(capsys.readouterr())
assert outs[0] == outs[1] == outs[2]
assert not any([outs[3].out, outs[3].err] + [outs[4].out, outs[4].err])
def test_fuzzy_finder_conflict(clean, ref):
try:
with Notebook():
spec = find_spec("__d42")
assert find_spec("__d42")
new = HERE / "d42.ipynb"
new.write_text("{}")
spec2 = find_spec("__d42")
assert spec.loader.path != spec2.loader.path
finally:
with Notebook():
new.unlink()
spec3 = find_spec("__d42")
assert spec.loader.path == spec3.loader.path
def test_minified_json(ref, minified):
with Notebook():
import minified as minned
example_source = inspect.getsource(minned.function_with_a_markdown_docstring)
assert example_source
def test_docstrings(clean, ref):
with Notebook():
import Untitled42 as nb
assert nb.function_with_a_markdown_docstring.__doc__
assert nb.class_with_a_python_docstring.__doc__
assert nb.function_with_a_markdown_docstring.__doc__
assert nb.__doc__ == ref.__doc__
assert (
nb.function_with_a_markdown_docstring.__doc__
== ref.function_with_a_markdown_docstring.__doc__
)
assert nb.class_with_a_python_docstring.__doc__ == ref.class_with_a_python_docstring.__doc__
assert nb.class_with_a_markdown_docstring.__doc__ == ref.class_with_a_markdown_docstring.__doc__
assert ast.parse(
inspect.getsource(nb.function_with_a_markdown_docstring)
), """The source is invalid"""
# the line cache isnt json, it is python
with raises(getattr(json, "JSONDecodeError", ValueError)):
json.loads("".join(linecache.cache[nb.__file__][2]))
assert inspect.getsource(nb).strip() == "".join(linecache.cache[nb.__file__][2]).strip()
def test_python_file_takes_precedent(clean, ref, untitled_py):
with Notebook():
import Untitled42
assert Untitled42.__file__.endswith(".py")
def test_lazy(capsys, clean):
"""Use stdout to test this depsite there probably being a better way"""
with Notebook(lazy=True):
import Untitled42 as module
assert not capsys.readouterr()[0], capsys.readouterr()[0]
module.slug, "The function gets executed here"
assert capsys.readouterr()[0]
@ipy
def test_import_ipy():
"""import ipy scripts, this won't really work without ipython."""
with Notebook():
import ascript
assert ascript.msg
@ipy
def test_cli(clean):
with Notebook():
import Untitled42 as module
__import__("subprocess").check_call(
"ipython -m {}".format(module.__name__).split(), cwd=str(Path(module.__file__).parent)
)
__import__("subprocess").check_call(
"ipython -m importnb -- {}".format(module.__file__).split(),
cwd=str(Path(module.__file__).parent),
)
@mark.skipif(VERSION < (3, 8), reason="async not supported in 3.7")
def test_top_level_async():
with Notebook():
import async_cells
def test_data_loaders(pytester):
some_random_data = {"top": [{}]}
import json, ruamel.yaml as yaml, tomli_w, io
sys.path.insert(0, str(pytester._path))
pytester.makefile(".json", json_data=json.dumps(some_random_data))
pytester.makefile(".toml", toml_data=tomli_w.dumps(some_random_data))
y = io.StringIO()
yaml.safe_dump(some_random_data, y)
pytester.makefile(".yaml", yaml_data=y.getvalue())
with imports("json", "yaml", "toml"):
import json_data, yaml_data, toml_data
assert json_data.__file__.endswith(".json")
assert toml_data.__file__.endswith(".toml")
assert yaml_data.__file__.endswith(".yaml")
|
{"/src/importnb/loader.py": ["/src/importnb/__init__.py", "/src/importnb/decoder.py", "/src/importnb/docstrings.py", "/src/importnb/finder.py"], "/src/importnb/__init__.py": ["/src/importnb/loader.py", "/src/importnb/entry_points.py"], "/src/importnb/__main__.py": ["/src/importnb/__init__.py"], "/src/importnb/loaders.py": ["/src/importnb/loader.py"], "/src/importnb/entry_points.py": ["/src/importnb/loader.py"]}
|
25,150
|
j-f-st/bondlog
|
refs/heads/master
|
/boards/models.py
|
from django.db import models
from django.contrib.auth.models import User
from django.utils import timezone
from markdownx.models import MarkdownxField
from markdownx.utils import markdownify
#twitter表示用
class Post(models.Model):
# id = AutoField(primary_key=True) # 自動的に追加されるので定義不要
tweet_id = models.CharField(max_length=25,primary_key=True) #primary_key=True,null blank
tweet_words = models.TextField(max_length=140)
tweet_date = models.DateTimeField(default=timezone.now)
tweet_origin = models.URLField(max_length=200,null=True)
blog_published_date = models.DateTimeField(blank=True,null=True)
blog_update_date = models.DateTimeField(blank=True,null=True)
blog_content = MarkdownxField('content', help_text='markdown format',max_length=1000,null=True) #マイクロブログ本文
blog_category = models.CharField(max_length=25,null=True,blank=True) #ブログカテゴリ
blog_tag = models.CharField(max_length=25,null=True,blank=True) #ブログタグ
def publish(self):
self.published_date = timezone.now()
self.save()
def __str__(self):
return self.tweet_words
def blog_content_to_markdown(self):
return markdownify(self.blog_content)
|
{"/boards/admin.py": ["/boards/models.py"], "/boards/views.py": ["/boards/models.py", "/boards/forms.py"]}
|
25,151
|
j-f-st/bondlog
|
refs/heads/master
|
/boards/forms.py
|
from django import forms
class SearchForm(forms.Form):
keyword = forms.CharField(label='search', max_length=40)
|
{"/boards/admin.py": ["/boards/models.py"], "/boards/views.py": ["/boards/models.py", "/boards/forms.py"]}
|
25,152
|
j-f-st/bondlog
|
refs/heads/master
|
/boards/config.py
|
CONSUMER_KEY = ""
CONSUMER_SECRET =""
ACCESS_TOKEN =""
ACCESS_TOKEN_SECRET =""
TWITTER_USERNAME = ""
SECRET_KEY = "karikarikari"
|
{"/boards/admin.py": ["/boards/models.py"], "/boards/views.py": ["/boards/models.py", "/boards/forms.py"]}
|
25,153
|
j-f-st/bondlog
|
refs/heads/master
|
/boards/admin.py
|
from django.contrib import admin
#models機能利用
from .models import Post
admin.site.index_title = ''
class PostAdmin(admin.ModelAdmin):
list_display = ('tweet_id', 'tweet_words', 'tweet_date')
search_fields = ['tweet_words']
list_filter = ('tweet_date', 'blog_category', 'blog_tag')
list_per_page = 30
ordering = ['-tweet_date']
admin.site.register(Post,PostAdmin)
|
{"/boards/admin.py": ["/boards/models.py"], "/boards/views.py": ["/boards/models.py", "/boards/forms.py"]}
|
25,154
|
j-f-st/bondlog
|
refs/heads/master
|
/bondlog/urls.py
|
from django.contrib import admin
from django.urls import path, include
from django.conf.urls.static import static
from django.conf import settings
# views設定利用のための設定
from boards import views
urlpatterns = [
path('admin/', admin.site.urls),
#twitter取り込みかける(後adminへ)
path('gettweet', views.gettweet, name='gettweet'),
#ホームの設定
path('', views.home, name='home'),
path('detail/<int:key>/', views.detail, name='detail'),
#markdownx
path('markdownx/', include('markdownx.urls')),
path('search', views.search),
]
urlpatterns += static(
settings.STATIC_URL,
document_root=settings.STATIC_ROOT
)
urlpatterns += static(
settings.MEDIA_URL,
document_root=settings.MEDIA_ROOT
)
|
{"/boards/admin.py": ["/boards/models.py"], "/boards/views.py": ["/boards/models.py", "/boards/forms.py"]}
|
25,155
|
j-f-st/bondlog
|
refs/heads/master
|
/boards/views.py
|
from django.shortcuts import render, redirect, get_object_or_404
#twitter
from requests_oauthlib import OAuth1Session
import time, calendar
import datetime
import json
import re
import os
import requests
import sys, codecs
## デバッグフラグ ##
DEBUG = False
## ---------- ##
try:
from . import config
except ImportError:
pass
## -----------##
sys.stdout = codecs.getwriter("utf-8")(sys.stdout)
from django.http.response import HttpResponse
from .models import Post
#twitter更新(直近200件までしか取得できない(19/5現在)(やろうと思えば出来る))->redirect admin画面
def gettweet(request):
# C_KEY = os.environ["CONSUMER_KEY"]
C_KEY = config.CONSUMER_KEY
# C_SECRET = os.environ["CONSUMER_SECRET"]
C_SECRET = config.CONSUMER_SECRET
# A_KEY = os.environ["ACCESS_TOKEN"]
A_KEY = config.ACCESS_TOKEN
# A_SECRET = os.environ["ACCESS_TOKEN_SECRET"]
A_SECRET = config.ACCESS_TOKEN_SECRET
# T_UN = os.environ["TWITTER_USERNAME"]
T_UN = config.TWITTER_USERNAME
###===================================================###
# C_KEY C_SECRET A_KEY A_SECRET T_UN 上:heroku 下:ローカル #
###===================================================###
twitter = OAuth1Session(C_KEY,C_SECRET,A_KEY,A_SECRET) #認証
url = "https://api.twitter.com/1.1/statuses/user_timeline.json" #タイムライン取得エンドポイント
params = {'count': 300}
req = twitter.get(url, params = params)
if req.status_code == 200:
timelines = json.loads(req.text) #配列で入ってるタイムライン
limit = req.headers['x-rate-limit-remaining']
#更新部
postchecks = Post.objects.values('tweet_id')
#リスト形式
for tweet in timelines:
#既存DBに無いtweet(id)分のみをDBに登録
if tweet['id_str'] not in postchecks:
data = Post()
data.tweet_words = tweet['text']
data.tweet_date = YmdHMS(tweet['created_at'])
data.tweet_id = tweet['id_str']
#tweetリンクURL設定
data.tweet_origin = 'https://twitter.com/' + T_UN + '/status/' + tweet['id_str']
data.save()
return redirect('admin/')
#error時
else:
return redirect('admin/')
#twitterの日時形式をDB用に変更用関数
def YmdHMS(created_at):
time_utc = time.strptime(created_at, '%a %b %d %H:%M:%S +0000 %Y')
unix_time = calendar.timegm(time_utc)
time_local = time.localtime(unix_time)
return time.strftime('%Y-%m-%d %H:%M:%S', time_local)
#homeにアクセス時
def home(request):
posts = Post.objects.all()
#検索窓
searchForm = SearchForm()
return render(request, 'home.html', {'posts': posts ,'searchform': searchForm }) #第2 template , 第3 タプルで渡す、keyが表示変数
# 個別記事
def detail(request ,key):
# adminで作成した記事を全て出す
detail = get_object_or_404(Post, tweet_id=key)
#検索窓
searchForm = SearchForm()
return render(request, 'detail.html',{'detail': detail,'searchform': searchForm})
# 検索時
from .forms import SearchForm
def search(request):
searchForm = SearchForm(request.POST)
message = ''
if searchForm.is_valid():
keyword = searchForm.cleaned_data['keyword']
posts = Post.objects.filter(blog_content__icontains=keyword)
if len(posts) == 0:
message = '検索結果はありません(記事一覧へ戻る)'
else:
searchForm = SearchForm()
posts = Post.objects.all()
message = '入力文字をご確認下さい(記事一覧へ戻る)'
reaction = {
'message': message,
'posts': posts, #記事データ
'searchform': searchForm, #検索文字
}
return render(request, 'home.html', reaction)
|
{"/boards/admin.py": ["/boards/models.py"], "/boards/views.py": ["/boards/models.py", "/boards/forms.py"]}
|
25,191
|
dnelson27/NetworkMapper
|
refs/heads/master
|
/network_mapper.py
|
from datamodels import NetworkHop
from scapy.layers.inet import UDP, traceroute
import argparse
from sys import argv
from re import match
def mapRoute(dest: str):
result, err = traceroute(target=dest)
print(result, err)
def initializeArgparser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(
usage="%(prog)s [options] ip_or_dns_host",
description="Trace a route and create a graph of the hops!"
)
parser.add_argument("-t", "--type")
parser.add_argument("-d", "--domain-name")
parser.add_argument("-i" ,"--ip-address")
return parser
if __name__ == "__main__":
parser = initializeArgparser()
args = parser.parse_args()
if not args.type:
print("Please choose an action type")
if args.type == "map":
if args.domain_name:
targetDomain = args.domain
print(f"Mapping route to domain {targetDomain}")
elif args.ip_address:
targetIp = args.ip_address
if match("^(?:[0-9]{1,3}\.){3}[0-9]{1,3}$", targetIp):
print(f"Mapping route to IPv4 address {targetIp}")
mapRoute(targetIp)
else:
print(f"Your entered IP did not pass verification, please enter a valid IPv4 address.")
|
{"/network_mapper.py": ["/datamodels.py"]}
|
25,192
|
dnelson27/NetworkMapper
|
refs/heads/master
|
/datamodels.py
|
from neomodel import StructuredNode, RelationshipTo
from neomodel.properties import StringProperty, ArrayProperty, IntegerProperty, uuid
class Ipv4Address:
def __init__(self, ipAddress: str) -> None:
self.address = ipAddress.split("/")[0]
self.cidr = ipAddress.split("/")[1]
self.mask = self.getSubnetMask(self.cidr)
self.addressBits = list(self.binaryConvert(int(i)) for i in self.address.split("."))
self.subnetMaskBits = list(self.binaryConvert(int(i)) for i in self.mask.split("."))
# self.networkId = self.getNetworkId()
def binaryConvert(self, num):
binaryNumbers = [128, 64, 32, 16, 8, 4, 2, 1]
resultNumber = ""
for i in binaryNumbers:
if num >= i:
resultNumber += "1"
num -= i
else:
resultNumber += "0"
return resultNumber
def getSubnetMask(self, cidr):
bits = [0, 128, 192, 224, 240, 248, 252, 254, 255]
cidr = int(cidr)
mask = ""
if cidr >= 8 and cidr < 16:
cidr -= 8
mask = "255." + str(bits[cidr]) + ".0.0"
elif cidr >= 16 and cidr < 24:
cidr -= 16
mask = "255.255." + str(bits[cidr]) + ".0"
elif cidr >= 16 and cidr <= 32:
cidr -= 24
mask = "255.255.255." + str(bits[cidr])
else:
print("Invalid Cidr")
return mask
class NetworkHop(StructuredNode):
sysId = StringProperty(unqiue_index=True, default=uuid)
ipv4Address = StringProperty()
dnsName = StringProperty()
|
{"/network_mapper.py": ["/datamodels.py"]}
|
25,204
|
sebastiangarren/blogz
|
refs/heads/master
|
/main.py
|
from flask import Flask, request, redirect, render_template, flash, session, make_response
from flask_sqlalchemy import SQLAlchemy
from datetime import datetime
import cgi
from app import app, db
from models import Author, Blog_post
#Initialize Classes
#Global functions
#require login
@app.before_request
def require_login():
allowed_routes = ['login', 'signup', 'blog']
if request.endpoint not in allowed_routes and 'username' not in session:
return redirect ('/login')
#return all posts indiscriminately and with extreme prejudice
def get_blog_posts():
posts = Blog_post.query.all()
return posts
#return all users undiscriminately and with extreme prjudice
def get_users():
users = Author.query.all()
return users
#display username if logged in
def get_session_name():
if session:
session_name = session['username']
return session_name
@app.route('/blog', methods=['POST', 'GET'])
def blog():
#1) The action on blogz routes to '/new_post'
#2) blogs queries database
#3) and returns all the Blog_post.name and Blog_post.body
if request.method == 'POST':
post_name = request.form['new_post_name']
post_body = request.form['new_post_body']
username = session['username']
author = Author.query.filter_by(username=username).first()
pub_date = datetime.utcnow()
new_post = Blog_post(post_name, post_body, author.id, pub_date)
if new_post.body == '' or new_post.title == '':
flash("Every post must have text in the title and the body.", "error")
return render_template("/new_post.html", new_post_name=post_name, new_post_body=post_body, session_name=get_session_name())
else:
db.session.add(new_post)
db.session.commit()
return redirect('/blog?id=' + str(new_post.id))
#for each type of args I need to render a different type of page
if request.args.get('id'):
post_number = request.args['id']
post = Blog_post.query.filter_by(id=post_number).first()
user_id = post.author_id
user = Author.query.filter_by(id=user_id).first()
return render_template('post_page.html', post=post, user=user)
if request.args.get('user'):
user_number = request.args['user']
posts = Blog_post.query.filter_by(author_id=user_number).all()
user = Author.query.filter_by(id=user_number).first()
return render_template('SingleUser.html', posts=posts, user=user, session_name=get_session_name())
return render_template('blogz.html', posts=get_blog_posts(), users=get_users(), session_name=get_session_name())
@app.route('/new_post', methods=['POST', 'GET'])
def new_post():
#new_post function only needs to return new_post.html
return render_template("new_post.html", session_name=get_session_name())
@app.route('/signup', methods=['POST', 'GET'])
def signup():
if request.method == 'POST':
username = request.form['username']
password = request.form['password']
verify = request.form['verify']
existing_user = Author.query.filter_by(username=username).first()
if not existing_user and password == verify:
new_user = Author(username, password)
db.session.add(new_user)
db.session.commit()
session['username'] = username
return redirect('/new_post')
elif existing_user:
flash('That username already exists. Try a different one.', 'error')
if len(password) < 3:
flash('Your password must be three characters or longer.', 'error')
if password != verify:
flash('Passwords do not match.', 'error')
return render_template('signup.html')
@app.route('/login', methods=['GET', 'POST'])
def login():
if request.method == 'POST':
username = request.form['username']
password = request.form['password']
user = Author.query.filter_by(username=username).first()
#check validity
if user and user.password == password:
session['username'] = username
return redirect("/new_post")
elif not user:
flash("Invalid username.", "error")
else:
if password != user.password:
flash("Username and password do not match.", "error")
return render_template('login.html')
return render_template('login.html')
#Render a list of users for index.html, click user name to see user's posts.
@app.route('/')
def index():
return render_template('index.html', users=get_users(), session_name=get_session_name())
#logout function handles POST to /logout, delete username from session, redirect to blog
@app.route('/logout')
def logout():
del session['username']
return redirect('/blog')
if __name__ == '__main__':
app.run()
#signup.html is a signup page
#login.html is a standard route
#index.html
#singleUser.html
#
#new route handlers signup, login, index
#
#For /login page:
# User enters a username that is stored in the database with the correct password
# and is redirected to the /newpost page with their username being stored in a session.
# User enters a username that is stored in the database
# with an incorrect password and is redirected to the /login page with a message that their password is incorrect.
# User tries to login with a username that is not stored in the database
# and is redirected to the /login page with a message that this username does not exist.
# User does not have an account and clicks "Create Account" and is directed to the /signup page.
# For /signup page:
# User enters new, valid username, a valid password, and
# verifies password correctly and is redirected to the
# '/newpost' page with their username being stored in a session.
# User leaves any of the username, password, or verify
# fields blank and gets an error message that one or more fields are invalid.
# User enters a username that already exists and gets an error message that username already exists.
# User enters different strings into the password and
# verify fields and gets an error message that the passwords do not match.
# User enters a password or username less than
# 3 characters long and gets either an invalid username or an invalid password message.
|
{"/main.py": ["/models.py"]}
|
25,205
|
sebastiangarren/blogz
|
refs/heads/master
|
/models.py
|
from flask_sqlalchemy import SQLAlchemy
from datetime import datetime
from app import app, db
class Author(db.Model):
__tablename__ = 'author'
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(120))
password = db.Column(db.String(120))
def __init__(self,username,password):
self.username = username
self.password = password
class Blog_post(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(120))
body = db.Column(db.Text)
datetime = db.Column(db.DateTime)
author_id = db.Column(db.Integer, db.ForeignKey('author.id'))
def __init__(self, title, body, author_id, datetime):
self.title = title
self.body = body
self.author_id = author_id
if datetime is None:
datetime = datetime.utcnow()
self.datetime = datetime
def __repr__(self):
return '<Blog_post %r>' % self.title
|
{"/main.py": ["/models.py"]}
|
25,283
|
Malbec21/Social-Network
|
refs/heads/master
|
/socialnetwork/posts/management/commands/activate_bot.py
|
from django.core.management import BaseCommand
from ..bot.run import SocialNetworkBot
class Command(BaseCommand):
help = 'Object of this bot demonstrate functionalities of the system according to defined rules from settings.'
def handle(self, *args, **options):
try:
social_network_bot = SocialNetworkBot()
social_network_bot.run_bot()
self.stdout.write(
self.style.SUCCESS('BOT FINISH HIS ACTIVITY! LET\'S GO CHECK IT AT http://localhost:8000/')
)
except Exception as e:
print(e)
|
{"/socialnetwork/posts/management/commands/activate_bot.py": ["/socialnetwork/posts/management/bot/run.py"], "/socialnetwork/posts/serializers.py": ["/socialnetwork/posts/models.py"], "/socialnetwork/posts/api.py": ["/socialnetwork/posts/models.py", "/socialnetwork/posts/serializers.py"], "/socialnetwork/posts/urls.py": ["/socialnetwork/posts/api.py"]}
|
25,284
|
Malbec21/Social-Network
|
refs/heads/master
|
/socialnetwork/posts/management/bot/run.py
|
from random import randint
from uuid import uuid4
from posts.models import Post, User
from socialnetwork.settings import NUMBER_OF_USERS, MAX_POST_PER_USER, MAX_LIKES_PER_USER
class SocialNetworkBot:
def __init__(self):
self.number_of_users = NUMBER_OF_USERS
self.max_post_per_user = MAX_POST_PER_USER
self.max_likes_per_user = MAX_LIKES_PER_USER
self.user_list = []
self.post_list = []
def __create_users(self):
for _ in range(self.number_of_users):
user_obj = User()
user_obj.username = str(uuid4())[:6]
self.user_list.append(user_obj)
User.save(user_obj)
def __create_posts(self):
for user in self.user_list:
for _ in range(self.max_post_per_user):
post_obj = Post()
post_obj.title = 'Test'
post_obj.content = 'test test test'
post_obj.author = user
post_obj.author_name = user.username
Post.save(post_obj)
self.post_list.append(post_obj)
def __like_posts(self):
for post in self.post_list:
for user in self.user_list:
for _ in range(randint(0, self.max_likes_per_user)):
post.like(user)
def run_bot(self):
self.__create_users()
self.__create_posts()
self.__like_posts()
|
{"/socialnetwork/posts/management/commands/activate_bot.py": ["/socialnetwork/posts/management/bot/run.py"], "/socialnetwork/posts/serializers.py": ["/socialnetwork/posts/models.py"], "/socialnetwork/posts/api.py": ["/socialnetwork/posts/models.py", "/socialnetwork/posts/serializers.py"], "/socialnetwork/posts/urls.py": ["/socialnetwork/posts/api.py"]}
|
25,285
|
Malbec21/Social-Network
|
refs/heads/master
|
/socialnetwork/posts/migrations/0001_initial.py
|
# Generated by Django 3.1.7 on 2021-02-23 13:45
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('content', models.TextField()),
('image', models.ImageField(default='posts_images/default.png', upload_to='posts_images')),
('like_count', models.IntegerField(default=0)),
('author_name', models.CharField(max_length=150, null=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('author', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='posts', to=settings.AUTH_USER_MODEL)),
('users_liked', models.ManyToManyField(blank=True, to=settings.AUTH_USER_MODEL)),
],
),
]
|
{"/socialnetwork/posts/management/commands/activate_bot.py": ["/socialnetwork/posts/management/bot/run.py"], "/socialnetwork/posts/serializers.py": ["/socialnetwork/posts/models.py"], "/socialnetwork/posts/api.py": ["/socialnetwork/posts/models.py", "/socialnetwork/posts/serializers.py"], "/socialnetwork/posts/urls.py": ["/socialnetwork/posts/api.py"]}
|
25,286
|
Malbec21/Social-Network
|
refs/heads/master
|
/socialnetwork/posts/serializers.py
|
from rest_framework import serializers
from .models import Post
# Post Serializer
class PostSerializer(serializers.ModelSerializer):
image = serializers.ImageField(required=False)
class Meta:
model = Post
fields = "__all__"
@staticmethod
def prepare_posts_response(serializer_data, user_id):
for post in serializer_data:
if user_id in post['users_liked']:
post['is_liked'] = True
else:
post['is_liked'] = False
|
{"/socialnetwork/posts/management/commands/activate_bot.py": ["/socialnetwork/posts/management/bot/run.py"], "/socialnetwork/posts/serializers.py": ["/socialnetwork/posts/models.py"], "/socialnetwork/posts/api.py": ["/socialnetwork/posts/models.py", "/socialnetwork/posts/serializers.py"], "/socialnetwork/posts/urls.py": ["/socialnetwork/posts/api.py"]}
|
25,287
|
Malbec21/Social-Network
|
refs/heads/master
|
/socialnetwork/posts/api.py
|
from rest_framework import viewsets, permissions
from rest_framework.decorators import action
from rest_framework.response import Response
from .models import Post
from .serializers import PostSerializer
# Post ViewSet
class PostViewSet(viewsets.ModelViewSet):
queryset = Post.objects.all()
permission_classes = [
permissions.IsAuthenticated
]
serializer_class = PostSerializer
def list(self, request, *args, **kwargs):
serializer = PostSerializer(self.get_queryset(), many=True)
self.get_serializer_class().prepare_posts_response(serializer.data, self.request.user.id)
return Response(serializer.data)
@action(detail=False, methods=['get'], url_path='self')
def get_user_posts(self, request):
serializer = PostSerializer(request.user.posts.all(), many=True)
self.get_serializer_class().prepare_posts_response(serializer.data, self.request.user.id)
return Response(serializer.data)
@action(detail=True, methods=['post'], url_path='like')
def like_post(self, request, pk=None):
post = Post.objects.get(pk=pk)
if request.user not in post.users_liked.all():
post.like(request.user)
return Response({"status": "liked"})
return Response({"status": "user is already liked this post"})
@action(detail=True, methods=['delete'], url_path='dislike')
def dislike_post(self, request, pk=None):
post = Post.objects.get(pk=pk)
if request.user in post.users_liked.all():
post.dislike(request.user)
return Response({"status": "disliked"})
return Response({"status": "user is already disliked this post"})
def perform_create(self, serializer):
serializer.save(author=self.request.user)
serializer.save(author_name=self.request.user.username)
|
{"/socialnetwork/posts/management/commands/activate_bot.py": ["/socialnetwork/posts/management/bot/run.py"], "/socialnetwork/posts/serializers.py": ["/socialnetwork/posts/models.py"], "/socialnetwork/posts/api.py": ["/socialnetwork/posts/models.py", "/socialnetwork/posts/serializers.py"], "/socialnetwork/posts/urls.py": ["/socialnetwork/posts/api.py"]}
|
25,288
|
Malbec21/Social-Network
|
refs/heads/master
|
/socialnetwork/posts/models.py
|
from django.contrib.auth.models import User
from django.db import models
class Post(models.Model):
title = models.CharField(max_length=100)
content = models.TextField()
image = models.ImageField(upload_to='posts_images', default='posts_images/default.png')
like_count = models.IntegerField(default=0)
users_liked = models.ManyToManyField(User, blank=True)
author = models.ForeignKey(User, related_name="posts", on_delete=models.CASCADE, null=True)
author_name = models.CharField(max_length=150, null=True)
created_at = models.DateTimeField(auto_now_add=True)
def like(self, user):
self.like_count += 1
self.users_liked.add(user)
self.save()
def dislike(self, user):
self.like_count -= 1
self.users_liked.remove(user)
self.save()
|
{"/socialnetwork/posts/management/commands/activate_bot.py": ["/socialnetwork/posts/management/bot/run.py"], "/socialnetwork/posts/serializers.py": ["/socialnetwork/posts/models.py"], "/socialnetwork/posts/api.py": ["/socialnetwork/posts/models.py", "/socialnetwork/posts/serializers.py"], "/socialnetwork/posts/urls.py": ["/socialnetwork/posts/api.py"]}
|
25,289
|
Malbec21/Social-Network
|
refs/heads/master
|
/socialnetwork/posts/urls.py
|
from django.urls import path, include
from rest_framework import routers
from .api import PostViewSet
router = routers.DefaultRouter()
router.register('api/posts', PostViewSet, 'posts')
urlpatterns = [
path('', include(router.urls)),
]
|
{"/socialnetwork/posts/management/commands/activate_bot.py": ["/socialnetwork/posts/management/bot/run.py"], "/socialnetwork/posts/serializers.py": ["/socialnetwork/posts/models.py"], "/socialnetwork/posts/api.py": ["/socialnetwork/posts/models.py", "/socialnetwork/posts/serializers.py"], "/socialnetwork/posts/urls.py": ["/socialnetwork/posts/api.py"]}
|
25,297
|
mksmtn/test_job
|
refs/heads/master
|
/src/Math.py
|
from decimal import Decimal, ROUND_DOWN, ROUND_UP
from typing import Iterable, List, Tuple
import matplotlib.pyplot as plt
import matplotlib.ticker as mticker
import numpy as np
from src.Data import Data
# Type aliases
Value = float
Coordinates = Tuple[int, int]
Distance = Tuple[Value, Coordinates]
# End Type aliases
class EuclideanDistance:
"""Tools for working with Euclidean distances
After instantiation stores all Euclidean distances in a priority queue
(min heap).
"""
@staticmethod
def calc_euclidean_distance(vector_0: Iterable[float], vector_1: Iterable[float]) -> float:
array_0 = np.array(vector_0)
array_1 = np.array(vector_1)
return np.linalg.norm(array_0 - array_1)
def __init__(self, data: Data):
self._data = data
self._max = (float('-inf'), (0, 0))
self._min = (float('inf'), (0, 0))
self._find_min_max()
@property
def max(self) -> Distance:
"""Return maximum Euclidean distance
:return: biggest distance between vectors
"""
return self._max
@property
def min(self) -> Distance:
"""Return minimum Euclidean distance
:return: smallest distance between vectors
"""
return self._min
def histogram(self, file: str, step: float = 0.1):
"""Draw a histogram to file
:param file: file name where the result should be saved
:param step: X-axis step
"""
xlabels = self._get_xlabels(Decimal(str(step)))
yvalues = self._get_yvalues(xlabels)
plt.bar(xlabels, yvalues)
plt.xlabel('Distance')
plt.ylabel('Count')
plt.xticks(rotation=45)
plt.gca().xaxis.set_major_locator(mticker.MultipleLocator(2))
plt.savefig(file)
def _distances_generator(self):
outer_counter = -1
for row_tuple_0 in self._data.data_frame.itertuples(index=False):
inner_counter = -1
outer_counter += 1
for row_tuple_1 in self._data.data_frame.itertuples(index=False):
inner_counter += 1
same_vector = outer_counter == inner_counter
already_calculated = outer_counter > inner_counter
are_pair_good = not same_vector and not already_calculated
if are_pair_good:
value = EuclideanDistance.calc_euclidean_distance(row_tuple_0, row_tuple_1)
coordinates = (outer_counter, inner_counter)
distance = (value, coordinates)
yield distance
def _find_min_max(self):
for distance in self._distances_generator():
if distance[0] < self._min[0]:
self._min = distance
if distance[0] > self._max[0]:
self._max = distance
def _get_xlabels(self, step: Decimal) -> List[str]:
"""Return X-axis labels for histogram"""
xlabels = []
first_label = Decimal(str(self._min[0])).quantize(step, rounding=ROUND_DOWN)
last_label = Decimal(str(self._max[0])).quantize(step, rounding=ROUND_UP)
current_label = first_label
while current_label <= last_label:
xlabels.append(str(current_label))
current_label += step
assert xlabels[-1] == str(last_label)
return xlabels
def _get_yvalues(self, xlabels: List[str]) -> List[float]:
"""Return Y-axis values for histogram"""
yvalues = [0 for _ in xlabels]
length = len(xlabels)
for distance in self._distances_generator():
for i in range(length - 1):
min_border = float(xlabels[i])
max_border = float(xlabels[i+1])
if min_border <= distance[0] < max_border:
yvalues[i] += 1
return yvalues
|
{"/src/Math.py": ["/src/Data.py"], "/tests/test_data.py": ["/src/Config.py", "/src/Data.py"], "/tests/test_math.py": ["/src/Data.py", "/src/Math.py"], "/src/Data.py": ["/src/Config.py"], "/main.py": ["/src/Config.py", "/src/Data.py", "/src/Math.py"]}
|
25,298
|
mksmtn/test_job
|
refs/heads/master
|
/tests/test_data.py
|
import os
import unittest
from random import randint
from src.Config import Config
from src.Data import Data
class TestData(unittest.TestCase):
r = randint(0, 99999999)
file = str(r) + '_test.csv'
nrows = 999
ncols = 49
min_border = -1.0
max_border = 1.0
config = Config(
file=file,
nrows=nrows,
ncols=ncols,
)
@classmethod
def setUpClass(cls) -> None:
Data(TestData.config)
@classmethod
def tearDownClass(cls) -> None:
os.remove(TestData.file)
def test_if_file_is_created(self):
self.assertTrue(
os.path.isfile(TestData.file)
)
def test_if_numbers_are_valid(self):
data = Data(TestData.config)
mn = data.data_frame[data.data_frame.columns].min().min()
mx = data.data_frame[data.data_frame.columns].max().min()
self.assertTrue(mn > TestData.min_border and mx < TestData.max_border)
if __name__ == '__main__':
unittest.main()
|
{"/src/Math.py": ["/src/Data.py"], "/tests/test_data.py": ["/src/Config.py", "/src/Data.py"], "/tests/test_math.py": ["/src/Data.py", "/src/Math.py"], "/src/Data.py": ["/src/Config.py"], "/main.py": ["/src/Config.py", "/src/Data.py", "/src/Math.py"]}
|
25,299
|
mksmtn/test_job
|
refs/heads/master
|
/src/Config.py
|
class Config:
"""Holds some project-related variables"""
def __init__(self, file: str, nrows: int, ncols: int):
"""
:param file: Name of CSV file to write numbers
:param nrows: Number of rows in the generated CSV file
:param ncols: Number of columns in the generated CSV file
"""
self._file = file
self._nrows = nrows
self._ncols = ncols
@property
def file(self) -> str:
return self._file
@property
def nrows(self) -> int:
return self._nrows
@property
def ncols(self) -> int:
return self._ncols
|
{"/src/Math.py": ["/src/Data.py"], "/tests/test_data.py": ["/src/Config.py", "/src/Data.py"], "/tests/test_math.py": ["/src/Data.py", "/src/Math.py"], "/src/Data.py": ["/src/Config.py"], "/main.py": ["/src/Config.py", "/src/Data.py", "/src/Math.py"]}
|
25,300
|
mksmtn/test_job
|
refs/heads/master
|
/tests/test_math.py
|
from decimal import Decimal
from typing import List
import math
import unittest
from src.Data import Data
from src.Math import EuclideanDistance
class TestEuclideanDistance(unittest.TestCase):
def setUp(self) -> None:
self._data = Data(file='tests/test_vectors.csv')
self._euclidean_distance = EuclideanDistance(self._data)
def test_euclidean_distance_calculation(self):
tuple_0 = (1, 1, 1)
tuple_1 = (1, 1, 0)
returned_dist = EuclideanDistance.calc_euclidean_distance(tuple_0, tuple_1)
expected_dist = 1
self.assertEqual(returned_dist, expected_dist)
tuple_0 = (-3, 4, 0)
tuple_1 = (0, 8, 0)
returned_dist = EuclideanDistance.calc_euclidean_distance(tuple_0, tuple_1)
expected_dist = 5
self.assertEqual(returned_dist, expected_dist)
tuple_0 = (.45, -0.3, .34)
tuple_1 = (.43, .09, -0.9)
returned_dist = EuclideanDistance.calc_euclidean_distance(tuple_0, tuple_1)
expected_dist = 1.3000384609695206
self.assertAlmostEqual(returned_dist, expected_dist)
def test_min_distance(self):
returned_min_dist = self._euclidean_distance.min
expected_min_dist = (1, (0, 1))
self.assertEqual(returned_min_dist, expected_min_dist)
def test_max_distance(self):
returned_max_dist = self._euclidean_distance.max
expected_max_dist = (math.sqrt(51), (0, 3))
self.assertEqual(returned_max_dist, expected_max_dist)
def test_number_of_calculated_distances(self):
returned_number = len(list(
self._euclidean_distance._distances_generator()
))
expected_number = 6
self.assertEqual(returned_number, expected_number)
def test_xlabels(self):
expected_lables = ['1', '2', '3', '4', '5', '6', '7', '8']
returned_labels = self._get_xlabels()
self.assertListEqual(returned_labels, expected_lables)
def test_yvalues(self):
xlabels = self._get_xlabels()
expected_values = [1, 0, 0, 0, 3, 0, 2, 0]
returned_values = self._euclidean_distance._get_yvalues(xlabels)
self.assertListEqual(returned_values, expected_values)
def _get_xlabels(self) -> List[str]:
return self._euclidean_distance._get_xlabels(Decimal('1'))
if __name__ == '__main__':
unittest.main()
|
{"/src/Math.py": ["/src/Data.py"], "/tests/test_data.py": ["/src/Config.py", "/src/Data.py"], "/tests/test_math.py": ["/src/Data.py", "/src/Math.py"], "/src/Data.py": ["/src/Config.py"], "/main.py": ["/src/Config.py", "/src/Data.py", "/src/Math.py"]}
|
25,301
|
mksmtn/test_job
|
refs/heads/master
|
/src/Data.py
|
import numpy as np
import pandas as pd
from src.Config import Config
class Data:
"""Vectors' data from CSV file"""
_bothProvidedMsg = 'Either `config` or `file` argument should be provided, but not both'
_noneProvidedMsg = 'Neither `config` nor `file` argument is provided'
@staticmethod
def generate_csv(config: Config) -> pd.DataFrame:
"""Generate CSV file with random numbers"""
array = np.random.rand(config.nrows, config.ncols)
signs = [-1, 1]
for row_number in range(config.nrows):
for col_number in range(config.ncols):
sign = np.random.choice(signs)
array[row_number][col_number] = sign * array[row_number][col_number]
df = pd.DataFrame(array)
df.to_csv(config.file, header=False, index=False)
return df
def __init__(self, config: Config = None, file: str = None):
"""Create or load CSV data
If `config` is provided, a new CSV file is created.
If `file` is provided, data gets loaded from that file.
If both are provided, `TypeError` is thrown.
:raises TypeError: if given both `config` and `file` arguments, or neither
"""
if config and file:
raise TypeError(Data._bothProvidedMsg)
if not config and not file:
raise TypeError(Data._noneProvidedMsg)
if file:
self._df = pd.read_csv(file, header=None, index_col=False)
elif config:
self._df = Data.generate_csv(config)
@property
def data_frame(self) -> pd.DataFrame:
return self._df
|
{"/src/Math.py": ["/src/Data.py"], "/tests/test_data.py": ["/src/Config.py", "/src/Data.py"], "/tests/test_math.py": ["/src/Data.py", "/src/Math.py"], "/src/Data.py": ["/src/Config.py"], "/main.py": ["/src/Config.py", "/src/Data.py", "/src/Math.py"]}
|
25,302
|
mksmtn/test_job
|
refs/heads/master
|
/main.py
|
from sys import argv
from decimal import Decimal
from src.Config import Config
from src.Data import Data
from src.Math import EuclideanDistance
def main(args):
file = 'examples/vectors.csv'
if args[1] == 'g':
if len(args) == 4:
nrows = int(args[2])
ncols = int(args[3])
else:
nrows = 501
ncols = 11
config = Config(
file=file,
nrows=nrows,
ncols=ncols,
)
Data(config)
elif args[1] == 'a':
data = Data(file=file)
ed = EuclideanDistance(data)
with open('examples/summary.txt', 'w+') as handle:
handle.write('min:' + str(ed.min) + '\n')
handle.write('max:' + str(ed.max) + '\n')
handle.write('xlabels: ' + str(ed._get_xlabels(Decimal('0.1'))) + '\n')
ed.histogram('examples/hist.png')
if __name__ == '__main__':
main(argv)
|
{"/src/Math.py": ["/src/Data.py"], "/tests/test_data.py": ["/src/Config.py", "/src/Data.py"], "/tests/test_math.py": ["/src/Data.py", "/src/Math.py"], "/src/Data.py": ["/src/Config.py"], "/main.py": ["/src/Config.py", "/src/Data.py", "/src/Math.py"]}
|
25,306
|
tweenty247/CompleteWebside
|
refs/heads/master
|
/Web_Laundry/migrations/0002_remove_formnames_subject.py
|
# Generated by Django 3.1.2 on 2020-10-03 07:18
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('Web_Laundry', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='formnames',
name='subject',
),
]
|
{"/Web_Laundry/admin.py": ["/Web_Laundry/models.py"], "/Web_Laundry/views.py": ["/Web_Laundry/forms.py"], "/Web_Laundry/forms.py": ["/Web_Laundry/models.py"]}
|
25,307
|
tweenty247/CompleteWebside
|
refs/heads/master
|
/Web_Laundry/admin.py
|
from django.contrib import admin
from .models import FormNames, AppointmentSection, SubscribeForm
admin.site.register(FormNames)
admin.site.register(AppointmentSection)
admin.site.register(SubscribeForm)
|
{"/Web_Laundry/admin.py": ["/Web_Laundry/models.py"], "/Web_Laundry/views.py": ["/Web_Laundry/forms.py"], "/Web_Laundry/forms.py": ["/Web_Laundry/models.py"]}
|
25,308
|
tweenty247/CompleteWebside
|
refs/heads/master
|
/Web_Laundry/migrations/0005_auto_20201004_0024.py
|
# Generated by Django 3.1.2 on 2020-10-03 23:24
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Web_Laundry', '0004_subscribtionmodel'),
]
operations = [
migrations.CreateModel(
name='AppointmentSection',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=222)),
('numb', models.IntegerField()),
('services', models.CharField(max_length=222)),
('serviceman', models.CharField(max_length=222)),
],
),
migrations.RenameField(
model_name='subscribtionmodel',
old_name='Subscribe',
new_name='subscribe',
),
]
|
{"/Web_Laundry/admin.py": ["/Web_Laundry/models.py"], "/Web_Laundry/views.py": ["/Web_Laundry/forms.py"], "/Web_Laundry/forms.py": ["/Web_Laundry/models.py"]}
|
25,309
|
tweenty247/CompleteWebside
|
refs/heads/master
|
/Web_Laundry/views.py
|
from django.shortcuts import render, redirect
from .forms import ModelFormNames, AppointmentSectionFormNames, ModalSubscribeForm
from django.contrib import messages
def index(request):
if request.method == 'POST':
form = AppointmentSectionFormNames(request.POST)
if form.is_valid():
form.save()
messages.success(request, 'your form was submitted successfully')
return redirect('index')
else:
messages.info(request, 'form was not submitted, there was an error try again ')
return render(request, 'index.html', {})
return render(request, 'index.html', {})
def contact_page(request):
if request.method == 'POST':
form = ModelFormNames(request.POST)
if form.is_valid():
form.save()
username = form.cleaned_data.get('name')
messages.success(request, 'Mr ' + username + '' + ' For Contacting Us.. We will Get Back To You')
return render(request, 'contact.html', {})
form3 = ModalSubscribeForm(request.POST)
if form3.is_valid():
form3.save()
messages.info(request, 'Your Email is Save...we Shall Keep You Ubdate With Latest Content About Our Site')
return render(request, 'contact.html', {})
def about(request):
return render(request, 'about.html', {})
def service(request):
return render(request, 'services.html', {})
def pricing(request):
return render(request, 'pricing.html', {})
def blog(request):
return render(request, 'blog.html', {})
|
{"/Web_Laundry/admin.py": ["/Web_Laundry/models.py"], "/Web_Laundry/views.py": ["/Web_Laundry/forms.py"], "/Web_Laundry/forms.py": ["/Web_Laundry/models.py"]}
|
25,310
|
tweenty247/CompleteWebside
|
refs/heads/master
|
/Web_Laundry/models.py
|
from django.db import models
class FormNames(models.Model):
name = models.CharField(max_length=200)
email = models.EmailField()
number = models.IntegerField(default=1234567)
message = models.TextField()
def __str__(self):
return self.name
class AppointmentSection(models.Model):
name = models.CharField(max_length=222)
numb = models.IntegerField()
services = models.CharField(max_length=222)
serviceman = models.CharField(max_length=222)
def __str__(self):
return self.name
class SubscribeForm(models.Model):
subscribe = models.EmailField()
def __str__(self):
return self.subscribe
|
{"/Web_Laundry/admin.py": ["/Web_Laundry/models.py"], "/Web_Laundry/views.py": ["/Web_Laundry/forms.py"], "/Web_Laundry/forms.py": ["/Web_Laundry/models.py"]}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.