hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
990bb4f11a86c646dac3a761a312f8d1164528ff | 10,032 | py | Python | Blender_CamGen/create.py | tswallen/Plenoptic-Simulation | 6fe2b694cfe0ca454ab2a3f5657b919e857290dc | [
"MIT"
] | null | null | null | Blender_CamGen/create.py | tswallen/Plenoptic-Simulation | 6fe2b694cfe0ca454ab2a3f5657b919e857290dc | [
"MIT"
] | null | null | null | Blender_CamGen/create.py | tswallen/Plenoptic-Simulation | 6fe2b694cfe0ca454ab2a3f5657b919e857290dc | [
"MIT"
] | null | null | null | import bpy
import math
from . import data
# create a flat lens surface
def flat_surface(half_lens_height, ior, position, name):
bpy.ops.mesh.primitive_circle_add(vertices = 64, radius = half_lens_height, fill_type = 'TRIFAN', calc_uvs = False, location=(0,0,0), rotation = (0, -3.1415926536/2.0, 0))
bpy.ops.object.transform_apply()
bpy.context.active_object.location[0] = position
# rename object and move it to 'Objective' empty
bpy.context.active_object.name = name
bpy.context.active_object.parent = bpy.data.objects['Objective']
# add glass material
glass_material = bpy.data.materials['Glass Material'].copy()
glass_material.name = "Glass Material "+name
glass_material.node_tree.nodes['IOR'].outputs['Value'].default_value = ior
glass_material.node_tree.links.remove(glass_material.node_tree.nodes['Vector Transform.002'].outputs[0].links[0]) #delete normal recalculation for flat surface
bpy.context.active_object.data.materials.append(glass_material)
# get outer vertex for housing creation
bpy.ops.object.mode_set(mode="OBJECT")
outer_vertex = bpy.context.active_object.data.vertices[0]
for vertex in bpy.context.active_object.data.vertices:
if vertex.co.z > outer_vertex.co.z:
outer_vertex = vertex
return [outer_vertex.co.x, outer_vertex.co.y, outer_vertex.co.z]
# create a spherical lens surface
def lens_surface(vertex_count_height, vertex_count_radial, surface_radius, half_lens_height, ior, position, name):
flip = False
if surface_radius < 0.0:
flip = True
surface_radius = -1.0 * surface_radius
# calculate sagitta
sagitta = 0.0
if(half_lens_height < surface_radius):
sagitta = surface_radius - math.sqrt(surface_radius*surface_radius - half_lens_height*half_lens_height)
else:
sagitta = surface_radius
# calculate number of vertices needed to get vertex_count_height vertices
ratio = math.asin(half_lens_height/surface_radius) / 3.1415926536
num_vertices = 2 * int(vertex_count_height/ratio+0.5)
# create circle
bpy.ops.mesh.primitive_circle_add(vertices = num_vertices, radius = surface_radius, location = (0,0,0))
bpy.ops.object.transform_apply()
bpy.ops.object.mode_set(mode="EDIT")
bpy.ops.mesh.select_all(action='DESELECT')
bpy.ops.object.mode_set(mode="OBJECT")
bpy.context.active_object.data.vertices[0].co.x = 0.0
# select all vertices that should be deleted
for vertex in bpy.context.active_object.data.vertices:
if (vertex.co.y < surface_radius - sagitta) or (vertex.co.x > 0.0):
vertex.select = True
bpy.ops.object.mode_set(mode="EDIT")
bpy.ops.mesh.delete(type='VERT')
#select all remaining vertices to create a rotational surface
bpy.ops.mesh.select_all(action='SELECT')
# use the spin operator to create the rotational surface
bpy.ops.mesh.spin(steps = vertex_count_radial, angle = 2.0*math.pi, axis = (0,1,0))
# remove double vertices resulting from the spinning
bpy.ops.mesh.select_all(action='SELECT')
bpy.ops.mesh.remove_doubles(threshold=0.0)
# flip normals for a convex surface
if not flip:
bpy.ops.mesh.select_all(action='SELECT')
bpy.ops.mesh.flip_normals()
bpy.ops.object.mode_set(mode = "OBJECT")
# move to correct position
bpy.context.active_object.rotation_euler[0] = math.pi/2.0
if flip:
bpy.context.active_object.rotation_euler[1] = math.pi/2.0
else:
bpy.context.active_object.rotation_euler[1] = -math.pi/2.0
bpy.ops.object.transform_apply()
bpy.context.active_object.location[0] = position
# rename object and move it to 'Objective' empty
bpy.context.active_object.name = name
bpy.context.active_object.parent = bpy.data.objects['Objective']
# add glass material
glass_material = bpy.data.materials['Glass Material'].copy()
glass_material.name = "Glass Material "+name
glass_material.node_tree.nodes['IOR'].outputs['Value'].default_value = ior
bpy.context.active_object.data.materials.append(glass_material)
#return the outer vertex for housing creation
bpy.ops.object.mode_set(mode="OBJECT")
outer_vertex = bpy.context.active_object.data.vertices[0]
for vertex in bpy.context.active_object.data.vertices:
if vertex.co.z > outer_vertex.co.z:
outer_vertex = vertex
return [outer_vertex.co.x, outer_vertex.co.y, outer_vertex.co.z]
# create camera and objective housing as a rotational surface using the outer vertices of the lenses
def housing(outer_vertices, outer_lens_index, vertex_count_radial):
bpy.data.meshes['Housing Mesh'].vertices.add(len(outer_vertices)+3)
# add outer lens vertices to mesh
for i in range(0, len(outer_vertices)):
bpy.data.meshes['Housing Mesh'].vertices[i].co.x = outer_vertices[i][0] + data.objective[outer_lens_index[i]]['position']
bpy.data.meshes['Housing Mesh'].vertices[i].co.y = outer_vertices[i][1]
bpy.data.meshes['Housing Mesh'].vertices[i].co.z = outer_vertices[i][2]
# add camera housing vertices to mesh
bpy.data.meshes['Housing Mesh'].vertices[len(outer_vertices)].co.x = bpy.data.meshes['Housing Mesh'].vertices[len(outer_vertices)-1].co.x
bpy.data.meshes['Housing Mesh'].vertices[len(outer_vertices)].co.y = bpy.data.meshes['Housing Mesh'].vertices[len(outer_vertices)-1].co.y
bpy.data.meshes['Housing Mesh'].vertices[len(outer_vertices)].co.z = 1.5 * bpy.data.meshes['Housing Mesh'].vertices[len(outer_vertices)-1].co.z
bpy.data.meshes['Housing Mesh'].vertices[len(outer_vertices)+1].co.x = bpy.data.meshes['Housing Mesh'].vertices[len(outer_vertices)].co.x + max(3.0 * data.objective[len(data.objective)-1]['thickness'], bpy.data.meshes['Housing Mesh'].vertices[len(outer_vertices)-1].co.x-bpy.data.meshes['Housing Mesh'].vertices[0].co.x)
bpy.data.meshes['Housing Mesh'].vertices[len(outer_vertices)+1].co.y = bpy.data.meshes['Housing Mesh'].vertices[len(outer_vertices)].co.y
bpy.data.meshes['Housing Mesh'].vertices[len(outer_vertices)+1].co.z = bpy.data.meshes['Housing Mesh'].vertices[len(outer_vertices)].co.z
bpy.data.meshes['Housing Mesh'].vertices[len(outer_vertices)+2].co.x = bpy.data.meshes['Housing Mesh'].vertices[len(outer_vertices)+1].co.x
bpy.data.meshes['Housing Mesh'].vertices[len(outer_vertices)+2].co.y = 0.0
bpy.data.meshes['Housing Mesh'].vertices[len(outer_vertices)+2].co.z = 0.0
# connect vertices
bpy.ops.object.mode_set(mode="OBJECT")
bpy.ops.object.select_all(action='DESELECT')
bpy.context.view_layer.objects.active = bpy.data.objects['Objective Housing']
for i in range(0, len(outer_vertices)+2):
bpy.ops.object.mode_set(mode="EDIT")
bpy.ops.mesh.select_all(action='DESELECT')
bpy.ops.object.mode_set(mode="OBJECT")
# select two vertices
bpy.data.objects['Objective Housing'].data.vertices[i].select = True
bpy.data.objects['Objective Housing'].data.vertices[i+1].select = True
bpy.ops.object.mode_set(mode="EDIT")
bpy.ops.mesh.edge_face_add()
# select all vertices to create a rotational surface
bpy.ops.object.mode_set(mode="EDIT")
bpy.ops.mesh.select_all(action='DESELECT')
bpy.ops.mesh.select_all(action='SELECT')
# use the spin operator to create the rotational surface
bpy.ops.mesh.spin(steps = vertex_count_radial, angle = 2.0*3.1415926536, axis = (1,0,0), center = (0,0,0))
# remove double vertices resulting from the spinning
bpy.ops.mesh.select_all(action='SELECT')
bpy.ops.mesh.remove_doubles(threshold=0.0, use_unselected=True)
bpy.ops.object.mode_set(mode="OBJECT")
bpy.data.objects['Objective Housing'].display_type='WIRE'
# create aperture by using the difference modifier on a plane
def aperture():
# check if old opening exists and delete it
for current_object in bpy.data.objects:
current_object.select_set(False)
if current_object.name == 'Opening':
bpy.data.objects['Opening'].hide_viewport = False
bpy.data.objects['Opening'].hide_render = False
bpy.context.active_object.select_set(False)
current_object.select_set(True)
bpy.ops.object.delete()
# create circle
num_of_blades = bpy.data.scenes[0].camera_generator.prop_aperture_blades
bpy.ops.mesh.primitive_circle_add(vertices=num_of_blades,radius=0.5, location=(0,0,0))
# rename
bpy.context.active_object.name="Opening"
# rotate
bpy.context.active_object.rotation_euler[0] = 90.0/180.0*3.1415926536
bpy.context.active_object.rotation_euler[2] = 90.0/180.0*3.1415926536
# switch to edit mode, add face and extrude object
bpy.ops.object.mode_set(mode="EDIT")
bpy.ops.mesh.edge_face_add()
bpy.ops.mesh.extrude_edges_move()
bpy.ops.transform.translate(value=(0.01, 0, 0))
bpy.ops.mesh.edge_face_add()
bpy.ops.mesh.select_all()
bpy.ops.mesh.normals_make_consistent(inside=False)
# switch back to object mode and reset position
bpy.ops.object.mode_set(mode="OBJECT")
bpy.context.active_object.location[0] = -0.005
bpy.ops.object.transform_apply()
# move object to aperture empty
bpy.context.active_object.parent=bpy.data.objects['Aperture']
# set difference modifier of aperture plane to use new shape
bpy.data.objects['Aperture Plane'].modifiers['Difference'].object=bpy.data.objects['Opening']
bpy.data.objects['Opening'].hide_viewport=True
bpy.data.objects['Opening'].hide_render = True
# rescale opening according to currently set scaling
bpy.data.objects['Opening'].scale[1] = bpy.data.scenes[0].camera_generator.prop_aperture_size/1000.0
bpy.data.objects['Opening'].scale[2] = bpy.data.scenes[0].camera_generator.prop_aperture_size/1000.0
# rotate opening according to currently set angle
bpy.data.objects['Opening'].rotation_euler[0] = bpy.data.scenes[0].camera_generator.prop_aperture_angle/180.0*math.pi | 53.935484 | 324 | 0.718501 | 1,512 | 10,032 | 4.62963 | 0.128968 | 0.045 | 0.032857 | 0.072286 | 0.698 | 0.643571 | 0.610429 | 0.577 | 0.489857 | 0.468571 | 0 | 0.023114 | 0.150419 | 10,032 | 186 | 325 | 53.935484 | 0.798193 | 0.145734 | 0 | 0.414815 | 0 | 0 | 0.083324 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.02963 | false | 0 | 0.022222 | 0 | 0.066667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
990efde977aade2108c67c75a84ae6c564a508e6 | 3,321 | py | Python | recaptcha.py | m3ngineer/hospital-lawsuits | 1f71e4c7cdf0512592aa1f4ac5f03c7809149280 | [
"MIT"
] | null | null | null | recaptcha.py | m3ngineer/hospital-lawsuits | 1f71e4c7cdf0512592aa1f4ac5f03c7809149280 | [
"MIT"
] | null | null | null | recaptcha.py | m3ngineer/hospital-lawsuits | 1f71e4c7cdf0512592aa1f4ac5f03c7809149280 | [
"MIT"
] | null | null | null |
from python_anticaptcha import AnticaptchaClient, NoCaptchaTaskProxylessTask
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from time import sleep
import config
api_key = config.keys['api_key']
site_key = config.keys['site_key'] # grab from site
# url = 'https://www.google.com/recaptcha/api2/demo'
url = 'https://iapps.courts.state.ny.us/webcivilLocal/captcha'
# Launch Chrome
options = webdriver.ChromeOptions()
driver = webdriver.Chrome(options=options, executable_path=r'/Users/mattheweng/bin/chromedriver')
driver.get("https://iapps.courts.state.ny.us/webcivilLocal/LCSearch?param=I")
url = driver.current_url
sleep(30)
print(1, url)
# Perform AntiCaptcha task
client = AnticaptchaClient(api_key)
task = NoCaptchaTaskProxylessTask(url, site_key)
job = client.createTask(task)
print('Getting solution...')
job.join()
# Receive response
response = job.get_solution_response()
print("Received solution", response)
# Inject response in webpage
# driver.execute_script('document.getElementById("g-recaptcha-response").innerHTML = "%s"' % response)
driver.execute_script(
"arguments[0].style.display='inline'",
driver.find_element_by_xpath(
'//*[@id="g-recaptcha-response"]'
),
)
driver.execute_script(
'document.getElementById("g-recaptcha-response").innerHTML = "%s"'
% response
)
# Wait a moment to execute the script (just in case).
sleep(10)
# Press submit button
print('Submitting solution...')
# driver.find_element_by_id('captcha_form').submit()
WebDriverWait(driver, 10).until(EC.frame_to_be_available_and_switch_to_it((By.CSS_SELECTOR,"iframe[name^='a-'][src^='https://www.google.com/recaptcha/api2/anchor?']")))
WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.XPATH, "//span[@class='recaptcha-checkbox goog-inline-block recaptcha-checkbox-unchecked rc-anchor-checkbox']/div[@class='recaptcha-checkbox-checkmark']"))).click()
'''
options = webdriver.ChromeOptions()
# options.add_argument("start-maximized")
# options.add_experimental_option("excludeSwitches", ["enable-automation"])
# options.add_experimental_option('useAutomationExtension', False)
options.add_argument("--user-data-dir=/Users/mattheweng/Library/Application Support/Google/Chrome/Default")
driver = webdriver.Chrome(options=options, executable_path=r'/Users/mattheweng/bin/chromedriver')
print(0)
driver.get("https://iapps.courts.state.ny.us/webcivilLocal/LCSearch?param=I")
print(1)
driver.implicitly_wait(10)
WebDriverWait(driver, 100).until(EC.frame_to_be_available_and_switch_to_it((By.CSS_SELECTOR,"iframe[src^='https://www.google.com/recaptcha/api2/anchor?']")))
print(1.5)
driver.implicitly_wait(10)
WebDriverWait(driver, 100).until(EC.element_to_be_clickable((By.CSS_SELECTOR, "span.recaptcha-checkbox.goog-inline-block.recaptcha-checkbox-unchecked.rc-anchor-checkbox"))).click()
print(1.7)
driver.switch_to_default_content()
# WebDriverWait(driver, 10).until(EC.frame_to_be_available_and_switch_to_it((By.CSS_SELECTOR,"iframe[name^='a-'][src^='https://www.google.com/recaptcha/api2/anchor?']")))
print(2)
# WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.XPATH, "//span[@id='recaptcha-anchor']"))).click()
'''
| 41.5125 | 227 | 0.776272 | 444 | 3,321 | 5.653153 | 0.337838 | 0.045418 | 0.022311 | 0.027092 | 0.492829 | 0.492829 | 0.480876 | 0.456972 | 0.456972 | 0.399602 | 0 | 0.011309 | 0.068052 | 3,321 | 79 | 228 | 42.037975 | 0.799677 | 0.112014 | 0 | 0.054054 | 0 | 0.054054 | 0.34358 | 0.169379 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.189189 | 0 | 0.189189 | 0.108108 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
99149757f665b2f4b37b4dcf179960c645b6306b | 2,437 | py | Python | demo/demo_dataset.py | lyuyangh/Cross-Attention-VizWiz-VQA | 853bfe480dac5bd1363f60c6b17e25134acdc2fa | [
"MIT"
] | 10 | 2021-07-25T12:44:34.000Z | 2022-03-23T04:07:12.000Z | demo/demo_dataset.py | lyuyangh/Cross-Attention-VizWiz-VQA | 853bfe480dac5bd1363f60c6b17e25134acdc2fa | [
"MIT"
] | null | null | null | demo/demo_dataset.py | lyuyangh/Cross-Attention-VizWiz-VQA | 853bfe480dac5bd1363f60c6b17e25134acdc2fa | [
"MIT"
] | 5 | 2021-07-25T12:44:35.000Z | 2022-03-26T16:51:44.000Z | import os
import sys
import h5py
import _pickle as cPickle
import numpy as np
import requests
import torch
from torch.utils.data import Dataset
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from utils.dataset import Dictionary
MAX_QUES_SEQ_LEN = 26
NO_OBJECTS = 36
URL_FEATURE_SERVER = "http://127.0.0.1:6000/GetFeature"
class VQAFeatureDataset(Dataset):
def __init__(self, dataroot="data"):
super(VQAFeatureDataset, self).__init__()
self.dictionary = Dictionary.load_from_file(
os.path.join(dataroot, "glove", "dictionary.pkl")
)
ans2label_path = os.path.join(
dataroot,
"cache",
"trainval_ans2label.pkl",
)
label2ans_path = os.path.join(
dataroot,
"cache",
"trainval_label2ans.pkl",
)
self.ans2label = cPickle.load(open(ans2label_path, "rb"))
self.label2ans = cPickle.load(open(label2ans_path, "rb"))
self.num_ans_candidates = len(self.ans2label)
name = "demo"
self.img_id2idx = cPickle.load(
open(
os.path.join(
dataroot,
"imgids/%s%s_imgid2idx.pkl" % (name, 36),
),
"rb",
)
)
h5_dataroot = dataroot + "/Bottom-up-features-fixed"
h5_path = os.path.join(h5_dataroot, "%s%s.hdf5" % (name, "36"))
print("loading features from h5 file %s" % h5_path)
hf_file = h5py.File(h5_path, "r")
self.features = hf_file.get("image_features")
self.bboxes = hf_file.get("image_bb")
self.question = None
self.image_id = None
def set_input(self, image_id, question):
tokens = self.dictionary.tokenize(question, False)
tokens = tokens[:MAX_QUES_SEQ_LEN]
if len(tokens) < MAX_QUES_SEQ_LEN:
padding = [self.dictionary.padding_idx] * (
MAX_QUES_SEQ_LEN - len(tokens)
)
tokens = tokens + padding
self.question = torch.from_numpy(np.array(tokens))
self.image_id = image_id
def __getitem__(self, index):
return (
torch.from_numpy(self.features[self.img_id2idx[self.image_id]]),
torch.from_numpy(self.bboxes[self.img_id2idx[self.image_id]]),
self.question,
)
def __len__(self):
return 1
| 30.08642 | 76 | 0.591301 | 291 | 2,437 | 4.707904 | 0.323024 | 0.035037 | 0.036496 | 0.037956 | 0.137226 | 0.086131 | 0.051095 | 0 | 0 | 0 | 0 | 0.023879 | 0.295445 | 2,437 | 80 | 77 | 30.4625 | 0.774024 | 0 | 0 | 0.073529 | 0 | 0 | 0.09643 | 0.038572 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058824 | false | 0 | 0.132353 | 0.029412 | 0.235294 | 0.014706 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9915026ad17aa054b3b1dcf4b6564ca6416fe1c6 | 1,963 | py | Python | setup.py | the01/paps-realtime | 94fc40e196a46eab0ce1b8626dadca5f720f9995 | [
"MIT"
] | null | null | null | setup.py | the01/paps-realtime | 94fc40e196a46eab0ce1b8626dadca5f720f9995 | [
"MIT"
] | null | null | null | setup.py | the01/paps-realtime | 94fc40e196a46eab0ce1b8626dadca5f720f9995 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# from __future__ import unicode_literals
__author__ = "d01 <Florian Jung>"
__email__ = "jungflor@gmail.com"
__copyright__ = "Copyright (C) 2015-16, Florian JUNG"
__license__ = "MIT"
__version__ = "0.1.2"
__date__ = "2016-03-31"
# Created: 2015-09-20 05:30
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
import sys
import os
import re
if sys.argv[-1] == "build":
os.system("python setup.py clean sdist bdist bdist_egg bdist_wheel")
def get_version():
"""
Parse the version information from the init file
"""
version_file = os.path.join("paps_realtime", "__init__.py")
initfile_lines = open(version_file, "rt").readlines()
version_reg = r"^__version__ = ['\"]([^'\"]*)['\"]"
for line in initfile_lines:
mo = re.search(version_reg, line, re.M)
if mo:
return mo.group(1)
raise RuntimeError(
u"Unable to find version string in {}".format(version_file)
)
version = get_version()
requirements = open("requirements.txt", "r").read().split("\n")
setup(
name="paps-realtime",
version=version,
description="Realtime browser display plugin for paps",
long_description="",
author=__author__,
author_email=__email__,
url="https://github.com/the01/paps-realtime",
packages=[
"paps_realtime"
],
install_requires=requirements,
include_package_data=True,
license=__license__,
keywords="paps audience participation display browser",
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7"
]
)
| 27.263889 | 72 | 0.665818 | 236 | 1,963 | 5.194915 | 0.576271 | 0.032626 | 0.052202 | 0.042414 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.025657 | 0.205807 | 1,963 | 71 | 73 | 27.647887 | 0.760744 | 0.07998 | 0 | 0 | 0 | 0 | 0.337997 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.018182 | false | 0 | 0.163636 | 0 | 0.2 | 0.018182 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
99175e305e91f74cfd9b97e4031f7f7524add878 | 6,639 | py | Python | nonlinear_data_fitting/demo_nonlinear_data_fitting.py | almostdutch/numerical-optimization-algorithms | cd6c1306cb04eccce62a74420323bda83058c1d6 | [
"MIT"
] | null | null | null | nonlinear_data_fitting/demo_nonlinear_data_fitting.py | almostdutch/numerical-optimization-algorithms | cd6c1306cb04eccce62a74420323bda83058c1d6 | [
"MIT"
] | 1 | 2021-06-02T10:07:26.000Z | 2021-06-03T10:23:46.000Z | nonlinear_data_fitting/demo_nonlinear_data_fitting.py | almostdutch/numerical-optimization-algorithms | cd6c1306cb04eccce62a74420323bda83058c1d6 | [
"MIT"
] | null | null | null | """
demo_nonlinear_data_fitting.py
Fit a model A * sin(W * t + phi) to the data f(X, ti) = yi to find A, W, and phi
m = number of data points
Solve a system of non-linear equations f(X, ti) - yi = 0:
x1 * sin(x2 * t + x3) - y = 0, where X = [x1 = A, x2 = W, x3 = phi].T, t = [t1, t2, ..., tm].T and y = [y1, y2, ..., ym].T
Minimize the following objective function: (f(X, t) - y).T @ (f(X, t) - y)
Levenberg - Marquardt algorithm
General algorithm for any f(X) function. Requires residuals and jacobian.
X0 converges to X* for any X0.
Naive random walk algorithm
General algorithm for any f(X) function. Requires only residuals.
X0 converges to X* for any X0.
Simulated annealing algorithm
General algorithm for any f(X) function. Requires only residuals.
X0 converges to X* for any X0.
Particle swarm optimization algorithm
General algorithm for any f(X) function. Requires only residuals.
X0 converges to X* for any X0.
"""
import numpy as np
import matplotlib.pyplot as plt
from levenberg_marquardt_algorithm import levenberg_marquardt_algorithm
from naive_random_search_algorithm import naive_random_search_algorithm
from simulated_annealing_algorithm import simulated_annealing_algorithm
from particle_swam_optimization_algorithm import particle_swam_optimization_algorithm
from print_report import print_report
from plot_progress_y import plot_progress_y
import time
X = np.array([[0.75], [2.3], [-3]]);
t = np.arange(0, 20, 0.1);
# Nonlinear model to fit
func = lambda X : X[0] * np.sin(X[1] * t + X[2]);
# Plot the curve
fig = plt.figure();
y = func(X);
plt.plot(t, y)
plt.show()
func_residual = lambda X : (X[0] * np.sin(X[1] * t + X[2]) - y).reshape((t.size, 1));
func_jacobian = lambda X : np.array([[-np.sin(X[1] * t + X[2])], [-t * X[0] * np.cos(X[1] * t + X[2])], [-X[0] * np.cos(X[1] * t + X[2])]]).reshape((t.size, X.size));
# Objective function for naive random walk and simulated annealing algorithms
func_error = lambda X : np.linalg.norm((X[0] * np.sin(X[1] * t + X[2]) - y).reshape((t.size, 1)), axis = 0) ** 2;
# Objective function for particle swarm optimization algorithm (particles along row dimension, axis = 0)
func_error_ps = lambda X : np.linalg.norm(X[:, [0]] * np.sin(X[:, [1]] * t + X[:, [2]]) - y, axis = 1).reshape((-1, 1)) ** 2;
# Levenberg-Marquardt algorithm
print('***********************************************************************');
print('Levenberg - Marquardt algorithm');
N_iter_max = 1000;
tolerance_x = 10e-6;
tolerance_y = 10e-8;
X_lower = np.array([[0], [0], [-5]]); # X lower bound
X_upper = np.array([[2], [5], [0]]); # X upper bound
options = {'tolerance_x' : tolerance_x, 'tolerance_y' : tolerance_y, 'N_iter_max' : N_iter_max, 'x_lower' : X_lower,
'x_upper' : X_upper};
X0 = np.array([[0.1], [1], [-2]]);
start = time.time();
X, report = levenberg_marquardt_algorithm(X0, func_residual, func_jacobian, options);
end = time.time();
print_report(func_error, report);
# Plot path to X* for Y
algorithm_name = 'Levenberg - Marquardt algorithm';
plot_progress_y(algorithm_name, report);
print('Elapsed time [s]: %0.5f' % (end - start));
print('***********************************************************************\n');
# Naive random walk algorithm
print('***********************************************************************');
print('Naive random walk algorithm');
N_iter_max = 1000;
tolerance_x = 10e-8;
tolerance_y = 10e-8;
X_lower = np.array([[0], [0], [-5]]); # X lower bound
X_upper = np.array([[2], [5], [0]]); # X upper bound
alpha = 0.5; # step size
options = {'tolerance_x' : tolerance_x, 'tolerance_y' : tolerance_y, 'N_iter_max' : N_iter_max, 'x_lower' : X_lower,
'x_upper' : X_upper, 'alpha' : alpha};
X0 = np.array([[0.1], [1], [-2]]); # X0 = X_lower + (X_upper - X_lower) * np.random.rand(X_lower.size, 1);
start = time.time();
X, report = naive_random_search_algorithm(X0, func_error, options);
end = time.time();
print_report(func_error, report);
# Plot path to X* for Y
algorithm_name = 'Naive random walk algorithm';
plot_progress_y(algorithm_name, report);
print('Elapsed time [s]: %0.5f' % (end - start));
print('***********************************************************************\n');
# Simulated annealing algorithm
print('***********************************************************************');
print('Simulated annealing algorithm');
N_iter_max = 1000;
tolerance_x = 10e-8;
tolerance_y = 10e-8;
X_lower = np.array([[0], [0], [-5]]); # X lower bound
X_upper = np.array([[2], [5], [0]]); # X upper bound
alpha = 1.0; # step size
gamma = 1.5; # controls temperature decay, gamma > 0
options = {'tolerance_x' : tolerance_x, 'tolerance_y' : tolerance_y, 'N_iter_max' : N_iter_max, 'x_lower' : X_lower,
'x_upper' : X_upper, 'alpha' : alpha, 'gamma' : gamma};
X0 = np.array([[0.1], [1], [-2]]); # X0 = X_lower + (X_upper - X_lower) * np.random.rand(X_lower.size, 1);
start = time.time();
X, report = simulated_annealing_algorithm(X0, func_error, options);
end = time.time();
print_report(func_error, report);
# Plot path to X* for Y
algorithm_name = 'Simulated annealing algorithm';
plot_progress_y(algorithm_name, report);
print('Elapsed time [s]: %0.5f' % (end - start));
print('***********************************************************************\n');
# Particle swarm optimization algorithm
print('***********************************************************************');
print('Particle swarm optimization algorithm');
N_iter_max = 1000;
tolerance_x = 10e-8;
tolerance_y = 10e-8;
X_lower = np.array([[0], [0], [-5]]); # X lower bound
X_upper = np.array([[2], [5], [0]]); # X upper bound
d_lower = -1; # direction (aka velocity) lower bound
d_upper = 1; # direction (aka velocity) upper bound
N_ps = 10000; # number of particles
w = 0.9; # inertial constant, w < 1
c1 = 1.5; # cognitive/independent component, c1 ~ 2
c2 = 1.5; # social component, c2 ~ 2
alpha = 1; # step size
options = {'tolerance_x' : tolerance_x, 'tolerance_y' : tolerance_y, 'N_iter_max' : N_iter_max, 'x_lower' : X_lower,
'x_upper' : X_upper, 'alpha' : alpha, 'd_lower' : d_lower, 'd_upper' : d_upper, 'N_ps' : N_ps, 'w' : w, 'c1' : c1, 'c2' : c2};
start = time.time();
X, report = particle_swam_optimization_algorithm(func_error_ps, options);
end = time.time();
print_report(func_error, report);
# Plot path to X* for Y
algorithm_name = 'Particle swarm optimization algorithm';
plot_progress_y(algorithm_name, report);
print('Elapsed time [s]: %0.5f' % (end - start));
print('***********************************************************************\n'); | 45.163265 | 166 | 0.613044 | 991 | 6,639 | 3.942482 | 0.149344 | 0.033786 | 0.024571 | 0.007167 | 0.546199 | 0.535961 | 0.535961 | 0.524443 | 0.515741 | 0.497057 | 0 | 0.032944 | 0.149571 | 6,639 | 147 | 167 | 45.163265 | 0.659051 | 0.289501 | 0 | 0.534653 | 0 | 0 | 0.244283 | 0.123103 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.089109 | 0 | 0.089109 | 0.207921 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
99192fe19d128cae27dbb4d9d6db56cd2b6e1efe | 2,885 | py | Python | libneko/checks.py | Natsurii/b00t | 09fac50434fd6692d6f1a07e8c8f4a5df20ce9d4 | [
"MIT"
] | 1 | 2018-09-22T23:58:55.000Z | 2018-09-22T23:58:55.000Z | libneko/checks.py | Natsurii/b00t | 09fac50434fd6692d6f1a07e8c8f4a5df20ce9d4 | [
"MIT"
] | null | null | null | libneko/checks.py | Natsurii/b00t | 09fac50434fd6692d6f1a07e8c8f4a5df20ce9d4 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2018-2019 Nekoka.tt
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in a$
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Custom checks you can use with commands.
Note:
This is designed to be used as a replacement for any checks in
:mod:`discord.ext.commands.core`.
Author:
Espy/Neko404NotFound
"""
__all__ = (
"NotGuildOwner",
"is_guild_owner",
"check",
"is_owner",
"bot_has_role",
"bot_has_any_role",
"bot_has_permissions",
"guild_only",
"is_nsfw",
"cooldown",
"has_any_role",
"has_role",
"has_permissions",
"BucketType",
)
from discord.ext import commands as _commands
from discord.ext.commands import (
bot_has_role,
guild_only,
cooldown,
has_any_role,
has_role,
)
from discord.ext.commands import (
check,
is_owner,
is_nsfw,
bot_has_any_role,
bot_has_permissions,
)
from discord.ext.commands import has_permissions, BucketType
class NotGuildOwner(_commands.CheckFailure):
"""
Raised if a command decorated with the ``@is_guild_owner()`` check is invoked by
someone other than the guild owner.
"""
def __init__(self):
super().__init__(
"You are not the server owner, so you cannot run this command."
)
def is_guild_owner():
"""
A check returning true if the guild owner invoked the command, and we are in a guild.
If we are not in a guild, we return False to fail the check. If we are not the guild
owner, and one exists, we raise ``NotGuildOwner`` to show a custom error message.
"""
def decorator(ctx):
if not ctx.guild:
return False
elif not (
ctx.guild.owner.id == ctx.author.id or ctx.author.id == ctx.bot.owner_id
):
raise NotGuildOwner
return True
return check(decorator)
| 28.564356 | 89 | 0.693934 | 414 | 2,885 | 4.717391 | 0.405797 | 0.045059 | 0.036866 | 0.033794 | 0.118792 | 0.056324 | 0.030722 | 0 | 0 | 0 | 0 | 0.005822 | 0.225997 | 2,885 | 100 | 90 | 28.85 | 0.868786 | 0.577816 | 0 | 0.042553 | 0 | 0 | 0.190227 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.06383 | false | 0 | 0.085106 | 0 | 0.234043 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
991a025602e05e7286a371a273b532009c3af8bd | 3,379 | py | Python | python/nano/src/bigdl/nano/automl/tf/objective.py | pinggao187/BigDL | 3d673458f267746b54dfd0146bdb022b3acb2d89 | [
"Apache-2.0"
] | null | null | null | python/nano/src/bigdl/nano/automl/tf/objective.py | pinggao187/BigDL | 3d673458f267746b54dfd0146bdb022b3acb2d89 | [
"Apache-2.0"
] | null | null | null | python/nano/src/bigdl/nano/automl/tf/objective.py | pinggao187/BigDL | 3d673458f267746b54dfd0146bdb022b3acb2d89 | [
"Apache-2.0"
] | null | null | null | #
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from tensorflow.keras.backend import clear_session
from tensorflow.keras.models import clone_model
import tensorflow as tf
import inspect
import copy
from optuna.integration import TFKerasPruningCallback
def is_creator(model):
return inspect.ismethod(model) or inspect.isfunction(model)
class Objective(object):
"""The Tuning objective for Optuna"""
def __init__(self,
model=None,
target_metric=None,
pruning=False,
**kwargs,
):
"""Init the objective.
Args:
model (keras model or function): a model instance or creator function. Defaults to None.
model_compiler (function, optional): the compiler function. Defaults to None.
target_metric (str, optional): target metric to optimize. Defaults to None.
Raises:
ValueError: _description_
"""
if not is_creator(model) and not isinstance(model, tf.keras.Model):
raise ValueError("You should either pass a Tensorflo Keras model, or \
a model_creator to the Tuning objective.")
self.model_ = model
self.target_metric_ = target_metric
self.pruning = pruning
self.kwargs = kwargs
@property
def target_metric(self):
return self.target_metric_
def prepare_fit_args(self, trial):
# only do shallow copy and process/duplicate
# specific args TODO: may need to handle more cases
new_kwargs = copy.copy(self.kwargs)
new_kwargs['verbose'] = 2
callbacks = new_kwargs.get('callbacks', None)
callbacks = callbacks() if inspect.isfunction(callbacks) else callbacks
if self.pruning:
callbacks = callbacks or []
prune_callback = TFKerasPruningCallback(trial, self.target_metric)
callbacks.append(prune_callback)
new_kwargs['callbacks'] = callbacks
return new_kwargs
def __call__(self, trial):
# Clear clutter from previous Keras session graphs.
clear_session()
# TODO may add data creator here, e.g. refresh data, reset generators, etc.
# create model
if is_creator(self.model_):
model = self.model_(trial)
else:
# copy model so that the original model is not changed
# Need tests to check this path
model = clone_model(self.model_)
# fit
new_kwargs = self.prepare_fit_args(trial)
hist = model.fit(**new_kwargs)
score = hist.history.get(self.target_metric, None)
if score is not None:
if isinstance(score, list):
# score = score[-1]
score = max(score)
return score
| 33.127451 | 100 | 0.643682 | 411 | 3,379 | 5.180049 | 0.391727 | 0.050728 | 0.030061 | 0.015031 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004137 | 0.2847 | 3,379 | 101 | 101 | 33.455446 | 0.876707 | 0.368156 | 0 | 0 | 0 | 0 | 0.012231 | 0 | 0 | 0 | 0 | 0.009901 | 0 | 1 | 0.102041 | false | 0.020408 | 0.122449 | 0.040816 | 0.326531 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
991a268e1607c44fc5fec2b754da258201620b92 | 4,247 | py | Python | tensorflow_toolkit/image_retrieval/image_retrieval/image_retrieval.py | morkovka1337/openvino_training_extensions | 846db45c264d6b061505213f51763520b9432ba9 | [
"Apache-2.0"
] | 256 | 2020-09-09T03:27:57.000Z | 2022-03-30T10:06:06.000Z | tensorflow_toolkit/image_retrieval/image_retrieval/image_retrieval.py | morkovka1337/openvino_training_extensions | 846db45c264d6b061505213f51763520b9432ba9 | [
"Apache-2.0"
] | 604 | 2020-09-08T12:29:49.000Z | 2022-03-31T21:51:08.000Z | tensorflow_toolkit/image_retrieval/image_retrieval/image_retrieval.py | morkovka1337/openvino_training_extensions | 846db45c264d6b061505213f51763520b9432ba9 | [
"Apache-2.0"
] | 160 | 2020-09-09T14:06:07.000Z | 2022-03-30T14:50:48.000Z | """
Copyright (c) 2019 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from tqdm import tqdm
import numpy as np
import cv2
from sklearn.metrics.pairwise import cosine_distances
from image_retrieval.common import from_list, preproces_image
def nothing(image):
return image
class ImageRetrieval:
def __init__(self, model_path, model_backend, model, gallery_path, input_size, cpu_extensions,
multiple_images_per_label=False):
self.impaths, self.gallery_classes, _, self.text_label_to_class_id = from_list(
gallery_path, multiple_images_per_label)
self.input_size = input_size
self.preprocess = preproces_image
if model is None or isinstance(model, str):
if model_backend == 'tf':
import tensorflow as tf
from image_retrieval.model import keras_applications_mobilenetv2, \
keras_applications_resnet50
if model == 'resnet50':
self.model = keras_applications_resnet50(
tf.keras.layers.Input(shape=(input_size, input_size, 3)))
if model == 'mobilenet_v2':
self.model = keras_applications_mobilenetv2(
tf.keras.layers.Input(shape=(input_size, input_size, 3)))
self.model.load_weights(model_path)
else:
from openvino.inference_engine import IENetwork, IECore
class IEModel():
def __init__(self, model_path):
ie = IECore()
if cpu_extensions:
ie.add_extension(cpu_extensions, 'CPU')
path = '.'.join(model_path.split('.')[:-1])
self.net = IENetwork(model=path + '.xml', weights=path + '.bin')
self.exec_net = ie.load_network(network=self.net, device_name='CPU')
def predict(self, image):
assert len(image.shape) == 4
image = np.transpose(image, (0, 3, 1, 2))
out = self.exec_net.infer(inputs={'Placeholder': image})[
'model/tf_op_layer_mul/mul/Normalize']
return out
self.model = IEModel(model_path)
self.preprocess = nothing
else:
self.model = model
self.embeddings = self.compute_gallery_embeddings()
def compute_embedding(self, image):
image = cv2.resize(image, (self.input_size, self.input_size))
image = self.preprocess(image)
image = np.expand_dims(image, axis=0)
embedding = self.model.predict(image)
return embedding
def search_in_gallery(self, embedding):
distances = cosine_distances(embedding, self.embeddings).reshape([-1])
sorted_indexes = np.argsort(distances)
return sorted_indexes, distances
def compute_gallery_embeddings(self):
images = []
for full_path in tqdm(self.impaths, desc='Reading gallery images.'):
image = cv2.imread(full_path)
if image is None:
print("ERROR: cannot find image, full_path =", full_path)
image = cv2.resize(image, (self.input_size, self.input_size))
image = self.preprocess(image)
image = np.expand_dims(image, axis=0)
images.append(image)
embeddings = [None for _ in self.impaths]
index = 0
for image in tqdm(images, desc='Computing embeddings of gallery images.'):
embeddings[index] = self.model.predict(image).reshape([-1])
index += 1
return embeddings
| 36.612069 | 98 | 0.607252 | 492 | 4,247 | 5.067073 | 0.361789 | 0.039711 | 0.026073 | 0.021661 | 0.126755 | 0.11071 | 0.11071 | 0.11071 | 0.11071 | 0.11071 | 0 | 0.011905 | 0.307747 | 4,247 | 115 | 99 | 36.930435 | 0.836054 | 0.132329 | 0 | 0.136986 | 0 | 0 | 0.049905 | 0.009545 | 0 | 0 | 0 | 0 | 0.013699 | 1 | 0.09589 | false | 0 | 0.109589 | 0.013699 | 0.30137 | 0.013699 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
991d664b4a7ff9ae7464e55cf36f369519e4ab24 | 2,319 | py | Python | release/scripts/addons/oscurart_tools/object/distribute.py | noorbeast/BlenderSource | 65ebecc5108388965678b04b43463b85f6c69c1d | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 3 | 2019-09-16T10:29:19.000Z | 2022-02-11T14:43:18.000Z | engine/2.80/scripts/addons/oscurart_tools/object/distribute.py | byteinc/Phasor | f7d23a489c2b4bcc3c1961ac955926484ff8b8d9 | [
"Unlicense"
] | null | null | null | engine/2.80/scripts/addons/oscurart_tools/object/distribute.py | byteinc/Phasor | f7d23a489c2b4bcc3c1961ac955926484ff8b8d9 | [
"Unlicense"
] | null | null | null | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
import bpy
import os
from bpy.types import Operator
from bpy.props import BoolProperty
def ObjectDistributeOscurart(self, X, Y, Z):
if len(bpy.selection_osc[:]) > 1:
# VARIABLES
dif = bpy.selection_osc[-1].location - bpy.selection_osc[0].location
chunkglobal = dif / (len(bpy.selection_osc[:]) - 1)
chunkx = 0
chunky = 0
chunkz = 0
deltafst = bpy.selection_osc[0].location
# ORDENA
for OBJECT in bpy.selection_osc[:]:
if X:
OBJECT.location.x = deltafst[0] + chunkx
if Y:
OBJECT.location[1] = deltafst[1] + chunky
if Z:
OBJECT.location.z = deltafst[2] + chunkz
chunkx += chunkglobal[0]
chunky += chunkglobal[1]
chunkz += chunkglobal[2]
else:
self.report({'INFO'}, "Needs at least two selected objects")
class DistributeOsc(Operator):
"""Distribute evenly the selected objects in x y z"""
bl_idname = "object.distribute_osc"
bl_label = "Distribute Objects"
Boolx : BoolProperty(name="X")
Booly : BoolProperty(name="Y")
Boolz : BoolProperty(name="Z")
def execute(self, context):
ObjectDistributeOscurart(self, self.Boolx, self.Booly, self.Boolz)
return {'FINISHED'}
def invoke(self, context, event):
self.Boolx = True
self.Booly = True
self.Boolz = True
return context.window_manager.invoke_props_dialog(self)
| 32.661972 | 76 | 0.645106 | 301 | 2,319 | 4.930233 | 0.458472 | 0.048518 | 0.060647 | 0.03841 | 0.113208 | 0.037736 | 0 | 0 | 0 | 0 | 0 | 0.01627 | 0.25787 | 2,319 | 70 | 77 | 33.128571 | 0.84602 | 0.354463 | 0 | 0 | 0 | 0 | 0.061337 | 0.014473 | 0 | 0 | 0 | 0 | 0 | 1 | 0.078947 | false | 0 | 0.105263 | 0 | 0.394737 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
991f03bbfaaa813ef12bac646842c1f1126bf936 | 15,782 | py | Python | py/tests/test45SpatAdaptiveUP.py | valentjn/thesis | 65a0eb7d5f7488aac93882959e81ac6b115a9ea8 | [
"CC0-1.0"
] | 4 | 2022-01-15T19:50:36.000Z | 2022-01-15T20:16:10.000Z | py/tests/test45SpatAdaptiveUP.py | valentjn/thesis | 65a0eb7d5f7488aac93882959e81ac6b115a9ea8 | [
"CC0-1.0"
] | null | null | null | py/tests/test45SpatAdaptiveUP.py | valentjn/thesis | 65a0eb7d5f7488aac93882959e81ac6b115a9ea8 | [
"CC0-1.0"
] | null | null | null | #!/usr/bin/python3
import functools
import multiprocessing
import random
import unittest
import numpy as np
import scipy.special
import helper.basis
import helper.grid
import tests.misc
class Test45SpatAdaptiveUP(tests.misc.CustomTestCase):
@staticmethod
def createDataHermiteHierarchization(p):
n, d, b = 4, 1, 0
bases = [helper.basis.HierarchicalWeaklyFundamentalSpline(p, nu=nu)
for nu in range((p+1)//2)]
grid = helper.grid.RegularSparseBoundary(n, d, b)
X, L, I = grid.generate()
X, L, I = X.flatten(), L.flatten(), I.flatten()
K = np.column_stack((L, I))
f = (lambda X: 0.3 + np.sin(2.3*np.pi*(X-0.2)))
fX = f(X)
return bases, n, X, L, I, K, fX
def hermiteHierarchizationCallback(self, fl, y, l, K, bases):
p = bases[0].p
nodalIl = helper.grid.getNodalIndices(l)
Kl = np.array([self.findLevelIndex(K, l, i) for i in nodalIl])
Xl = helper.grid.getCoordinates(l, nodalIl)
for q in range((p+1)//2):
Yl = np.zeros_like(Xl)
for lp in range(l+1):
hierIlp = helper.grid.getHierarchicalIndices(lp)
for ip in hierIlp:
Yl += (y[self.findLevelIndex(K, lp, ip)] *
bases[q].evaluate(lp, ip, Xl))
self.assertAlmostEqual(Yl, fl[l][Kl,q])
@staticmethod
def findLevelIndex(K, l, i):
lp, ip = helper.grid.convertNodalToHierarchical(l, i)
return (np.where((K == (lp, ip)).all(axis=1))[0][0])
@staticmethod
def dividedDifference(data):
# data in the form
# [(a, f(a), df(a), ...), (b, f(b), df(b), ...), ...]
if len(data) == 1:
return data[0][-1] / scipy.special.factorial(len(data[0]) - 2)
else:
dataLeft = list(data)
if len(dataLeft[-1]) > 2: dataLeft[-1] = dataLeft[-1][:-1]
else: del dataLeft[-1]
dataRight = list(data)
if len(dataRight[0]) > 2: dataRight[0] = dataRight[0][:-1]
else: del dataRight[0]
return ((Test45SpatAdaptiveUP.dividedDifference(dataRight) -
Test45SpatAdaptiveUP.dividedDifference(dataLeft)) /
(data[-1][0] - data[0][0]))
@staticmethod
def hermiteInterpolation1D(xx, data, nu=0):
# data in the form
# [(a, f(a), df(a), ...), (b, f(b), df(b), ...), ...]
yy = np.zeros((len(xx), nu+1))
xProduct = [1] + (nu * [0])
curXData = []
curData = []
for dataPoint in data:
x = dataPoint[0]
curData.append([x])
for k in range(1, len(dataPoint)):
curData[-1].append(dataPoint[k])
coeff = Test45SpatAdaptiveUP.dividedDifference(curData)
for q in range(nu, -1, -1):
yy[:,q] += coeff * xProduct[q]
xProduct[q] = (xProduct[q] * (xx - x) +
q * (xProduct[q-1] if q > 0 else 0))
return yy
@staticmethod
def hermiteHierarchization1D(u, n, K, bases, testCallback=None):
N = u.shape[0]
p = bases[0].p
y = np.zeros((N,))
fl = np.zeros((n+1, N, (p+1)//2))
k0 = Test45SpatAdaptiveUP.findLevelIndex(K, 0, 0)
k1 = Test45SpatAdaptiveUP.findLevelIndex(K, 0, 1)
for i in range(2):
k = (k0 if i == 0 else k1)
y[k] = u[k]
fl[0][k][0] = u[k]
if p > 1: fl[0][k][1] = (u[k1] - u[k0])
for l in range(1, n+1):
nodalIl = helper.grid.getNodalIndices(l)
Kl = np.array([Test45SpatAdaptiveUP.findLevelIndex(K, l, i)
for i in nodalIl])
Xl = helper.grid.getCoordinates(l, nodalIl)
hierIl = np.array(helper.grid.getHierarchicalIndices(l))
flm1 = np.zeros((len(nodalIl), (p+1)//2))
evenIl = [i for i in nodalIl if i not in hierIl]
flm1[evenIl] = fl[l-1][Kl[evenIl]]
for i in hierIl:
data = [np.hstack((Xl[i-1], flm1[i-1])),
np.hstack((Xl[i+1], flm1[i+1]))]
flm1[i] = Test45SpatAdaptiveUP.hermiteInterpolation1D(
[Xl[i]], data, nu=(p-1)//2)
rl = np.zeros_like(nodalIl, dtype=float)
rl[hierIl] = u[Kl[hierIl]] - flm1[hierIl][:,0]
A = np.zeros((len(hierIl), len(hierIl)))
for i in hierIl: A[:,(i-1)//2] = bases[0].evaluate(l, i, Xl[hierIl])
b = rl[hierIl]
yl = np.linalg.solve(A, b)
y[Kl[hierIl]] = yl
for q in range((p+1)//2):
rl = np.zeros_like(nodalIl, dtype=float)
for i in hierIl: rl += y[Kl[i]] * bases[q].evaluate(l, i, Xl)
for i in nodalIl: fl[l][Kl[i]][q] = flm1[i][q] + rl[i]
if testCallback is not None: testCallback(fl, y, l, K, bases)
return y
@staticmethod
def iterativeRefinement(u, y0, Linv, Lp):
r = u - Linv(y0)
y = np.array(y0)
for m in range(1000):
if np.max(np.abs(r)) < 1e-10: break
y += Lp(r)
r -= Linv(Lp(r))
return y, r
@staticmethod
def getChain(l1, i1, l2, i2, T):
chain = [(np.array(l1), np.array(i1))]
for t in T:
lNext, iNext = chain[-1]
lNext, iNext = np.array(lNext), np.array(iNext)
lNext[t], iNext[t] = l2[t], i2[t]
chain.append((lNext, iNext))
if np.all(chain[-1][0] == l2) and np.all(chain[-1][1] == i2):
return chain
else:
return None
def testLemmaIterativeRefinementEquivalent(self):
# tested in testPropIterativeRefinementSufficient
pass
def testPropIterativeRefinementSufficient(self):
tol = {"rtol" : 1e-3, "atol" : 1e-8}
for p in [1, 3, 5, 7]:
basisLin1D = helper.basis.HierarchicalBSpline(1)
basis1D = helper.basis.HierarchicalBSpline(p)
for d in range(1, 5):
f = tests.misc.getObjectiveFunction(d)
basisLin = (helper.basis.TensorProduct(basisLin1D, d) if d > 1 else
basisLin1D)
basis = (helper.basis.TensorProduct(basis1D, d) if d > 1 else
basis1D)
with self.subTest(p=p, d=d):
X, L, I = tests.misc.generateSpatiallyAdaptiveSparseGrid(
d, 500)
fX = f(X)
A = tests.misc.computeInterpolationMatrix(basis, X, L, I)
aX = np.linalg.solve(A, fX)
ALin = tests.misc.computeInterpolationMatrix(basisLin, X, L, I)
ALinInv = np.linalg.inv(ALin)
Linv = (lambda x: np.dot(A, x))
Lp = (lambda x: np.dot(ALinInv, x))
u = fX
y0 = 2 * np.random.random((X.shape[0],)) - 1
y, r = self.iterativeRefinement(u, y0, Linv, Lp)
if np.max(np.abs(r)) < 1e-10:
self.assertAlmostEqual(y, aX, **tol)
else:
self.assertNotAlmostEqual(y, aX, **tol)
N = X.shape[0]
m = 100
power = np.linalg.matrix_power(np.eye(N) - np.dot(A, ALinInv), m)
powerNormRoot = np.power(np.linalg.norm(power), 1/m)
if powerNormRoot < 1:
self.assertAlmostEqual(y, aX, **tol)
def testLemmaDualityUnidirectionalPrinciple(self):
n, b = 4, 0
hierarchical = True
bases = tests.misc.getExampleHierarchicalBases()
for basisName, d, basis in bases:
f = tests.misc.getObjectiveFunction(d)
modified = ("Modified" in basisName)
if "ClenshawCurtis" in basisName: distribution = "clenshawCurtis"
else: distribution = "uniform"
with self.subTest(basis=basisName, d=d):
#X, L, I = tests.misc.generateSpatiallyAdaptiveSparseGrid(
# d, 500)
grid = (helper.grid.RegularSparse(n, d) if modified else
helper.grid.RegularSparseBoundary(n, d, b))
X, L, I = grid.generate()
if distribution != "uniform":
X = helper.grid.getCoordinates(L, I, distribution=distribution)
fX = f(X)
u = np.array(fX)
K = tests.misc.convertToContinuous(L, I)
T = np.arange(d)
np.random.shuffle(T)
L1D = functools.partial(tests.misc.hierarchize1D,
basis, distribution, hierarchical)
bases1D = (basis.basis1D if d > 1 else [basis])
y = tests.misc.unidirectionalPrinciple(u, K, T, L1D)
TRev = T[::-1]
LInv1D = functools.partial(tests.misc.hierarchize1D,
basis, distribution, hierarchical,
mode="dehierarchize")
u2 = tests.misc.unidirectionalPrinciple(y, K, TRev, LInv1D)
if d == 1: X, L, I = X.flatten(), L.flatten(), I.flatten()
A = tests.misc.computeInterpolationMatrix(basis, X, L, I)
aX = np.linalg.solve(A, fX)
fX2 = np.dot(A, y)
try:
self.assertAlmostEqual(y, aX, atol=1e-10)
upCorrect = True
except AssertionError:
upCorrect = False
try:
self.assertAlmostEqual(u2, fX2, atol=1e-10)
upInvCorrect = True
except AssertionError:
upInvCorrect = False
if upCorrect: self.assertTrue(upInvCorrect)
else: self.assertFalse(upInvCorrect)
N = X.shape[0]
LI = np.column_stack((L, I))
containsAllChains = True
if d == 1:
X = np.reshape(X, (N,1))
L = np.reshape(L, (N,1))
I = np.reshape(I, (N,1))
for k1 in range(N):
if not containsAllChains: break
for k2 in range(N):
if not containsAllChains: break
if np.abs(A[k2,k1]) > 1e-12:
chain = self.getChain(L[k1], I[k1], L[k2], I[k2], TRev)
for l, i in chain:
li = np.hstack((l, i))
if not np.any(np.all(LI == li, axis=1)):
containsAllChains = False
break
if upCorrect: self.assertTrue(containsAllChains)
else: self.assertFalse(containsAllChains)
@staticmethod
def calculateA1D(getL1D, data):
t, KPole = data
K1D = np.array([k[t] for k in KPole])
A1D = getL1D(t, K1D)
return A1D
def testLemmaChainExistenceSufficient(self):
p = 3
hierarchical = True
basis1D = helper.basis.HierarchicalBSpline(p)
for d in range(1, 5):
basisName = "{}({})".format(type(basis1D).__name__, p)
modified = ("Modified" in basisName)
if "ClenshawCurtis" in basisName: distribution = "clenshawCurtis"
else: distribution = "uniform"
basis = (helper.basis.TensorProduct(basis1D, d) if d > 1 else
basis1D)
with self.subTest(d=d):
X, L, I = tests.misc.generateSpatiallyAdaptiveSparseGrid(
d, 200)
#grid = (helper.grid.RegularSparse(n, d) if modified else
# helper.grid.RegularSparseBoundary(n, d, b))
#X, L, I = grid.generate()
if distribution != "uniform":
X = helper.grid.getCoordinates(L, I, distribution=distribution)
K = tests.misc.convertToContinuous(L, I)
T = np.arange(d)
np.random.shuffle(T)
getL1D = functools.partial(tests.misc.hierarchize1D,
basis, distribution, hierarchical, None, mode="matrix")
N = X.shape[0]
As = [np.eye(N)]
KTuples = [tuple(k) for k in K]
allKPoles = []
for t in T:
isOnSamePole = functools.partial(tests.misc.isOnSamePole,
t, d)
KPoles = helper.misc.getEquivalenceClasses(KTuples, isOnSamePole)
allKPoles.extend([(t, tuple(sorted(KPole))) for KPole in KPoles])
with multiprocessing.Pool() as pool:
A1Ds = pool.map(functools.partial(self.calculateA1D, getL1D),
allKPoles)
A1Ds = dict(zip(allKPoles, A1Ds))
for t in T:
isOnSamePole = functools.partial(tests.misc.isOnSamePole,
t, d)
KPoles = helper.misc.getEquivalenceClasses(K, isOnSamePole)
At = np.zeros((N, N))
for KPole in KPoles:
KPoleTuple = tuple(sorted([tuple(k.tolist()) for k in KPole]))
A1D = A1Ds[(t, KPoleTuple)]
N1D = A1D.shape[0]
Q = [np.where(np.all(K == k, axis=1))[0][0] for k in KPole]
for r1 in range(N1D):
for r2 in range(N1D):
At[Q[r2],Q[r1]] = A1D[r2,r1]
As.append(np.dot(At, As[-1]))
LI = np.column_stack((L, I))
for j in range(d+1):
for k1 in range(N):
for k2 in range(N):
chain = self.getChain(L[k1,:], I[k1,:],
L[k2,:], I[k2,:], T[:j])
if chain is not None:
chain = tests.misc.convertToContinuous(
np.array([x[0] for x in chain]),
np.array([x[1] for x in chain]))
containsChain = all([np.any(np.all(K == k, axis=1))
for k in chain])
else:
containsChain = False
if np.abs(As[j][k2,k1]) > 1e-10:
self.assertTrue(containsChain)
if containsChain:
rhs = 1
for t, kj in zip(T[:j], chain[1:]):
isOnSamePole = functools.partial(tests.misc.isOnSamePole,
t, d)
KPole = [k for k in K if isOnSamePole(k, kj)]
KPoleTuple = tuple(sorted([tuple(k.tolist())
for k in KPole]))
A1D = A1Ds[(t, KPoleTuple)]
K1D = np.array([k[t] for k in KPole])
r1 = np.where(K1D == K[k1,t])[0][0]
r2 = np.where(K1D == K[k2,t])[0][0]
rhs *= A1D[r2,r1]
lhs = As[j][k2,k1]
self.assertAlmostEqual(lhs, rhs)
def testLemmaChainExistenceNecessary(self):
# tested in testLemmaChainExistenceSufficient
pass
def testPropCorrectnessUPCharacterization(self):
# tested in testLemmaDualityUnidirectionalPrinciple
pass
def testCorEquivalentCorrectnessUPHierarchization(self):
# tested in testLemmaDualityUnidirectionalPrinciple
pass
def testLemmaHermiteInterpolation(self):
a = random.uniform(0, 3)
b = a + random.uniform(2, 4)
for p in [1, 3, 5, 7]:
data = [[a] + [random.gauss(0, 2) for x in range((p+1)//2)],
[b] + [random.gauss(0, 2) for x in range((p+1)//2)]]
for nu in range((p+1)//2):
y = self.hermiteInterpolation1D(np.array([a, b]), data, nu=nu)
y2 = np.row_stack((data[0][1:nu+2], data[1][1:nu+2]))
self.assertAlmostEqual(y, y2)
def testPropInvariantHermiteHierarchization(self):
for p in [1, 3, 5, 7]:
bases, n, X, L, I, K, fX = self.createDataHermiteHierarchization(p)
callback = self.hermiteHierarchizationCallback
self.hermiteHierarchization1D(fX, n, K, bases, testCallback=callback)
def testCorAlgHermiteHierarchizationCorrectness(self):
for p in [1, 3, 5, 7]:
bases, n, X, L, I, K, fX = self.createDataHermiteHierarchization(p)
aX = self.hermiteHierarchization1D(fX, n, K, bases)
Y = np.zeros_like(X)
for k in range(X.shape[0]): Y += aX[k] * bases[0].evaluate(L[k], I[k], X)
self.assertAlmostEqual(Y, fX)
if __name__ == "__main__":
unittest.main()
| 34.234273 | 79 | 0.528577 | 1,937 | 15,782 | 4.295818 | 0.132679 | 0.00697 | 0.005047 | 0.00649 | 0.388295 | 0.353323 | 0.321115 | 0.302247 | 0.253455 | 0.235428 | 0 | 0.032378 | 0.336586 | 15,782 | 460 | 80 | 34.308696 | 0.762369 | 0.03466 | 0 | 0.291066 | 0 | 0 | 0.009264 | 0 | 0 | 0 | 0 | 0 | 0.04611 | 1 | 0.054755 | false | 0.011527 | 0.025937 | 0 | 0.112392 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9920d5f5a9ac7040e34a0fe7f0d9cf42084fcf0a | 776 | py | Python | configs/config_FILES.py | Haupti/tudatalibAPI | f249853711fca3203b76bb26b4df7d6912cd0304 | [
"Apache-2.0"
] | null | null | null | configs/config_FILES.py | Haupti/tudatalibAPI | f249853711fca3203b76bb26b4df7d6912cd0304 | [
"Apache-2.0"
] | null | null | null | configs/config_FILES.py | Haupti/tudatalibAPI | f249853711fca3203b76bb26b4df7d6912cd0304 | [
"Apache-2.0"
] | null | null | null | '''
For a flawless upload of many files to all the desired items the following
variables have to be set:
upload_list which is a list of 2-element lists
e2 the 2-element list containing 1. the item id and 2. the folder
from which all files will be upload to the item on TUdatalib
Replace <directory_path> with the WHOLE path to the folder you want to
upload files from.
Note that you have to give path to a folder, not a file and that EVERYTHING
from that folder will be upload to the item!!!!
'''
upload_list = None #python list of list of strings
'''
Please stick to this format (number of elements variable):
upload_list = [
["<item_id>","<directory_path>"],
["<item_id>","<directory_path>"]
]
'''
| 31.04 | 76 | 0.675258 | 124 | 776 | 4.16129 | 0.459677 | 0.05814 | 0.046512 | 0.054264 | 0.081395 | 0.081395 | 0 | 0 | 0 | 0 | 0 | 0.008621 | 0.252577 | 776 | 24 | 77 | 32.333333 | 0.881034 | 0.706186 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
99210064002fc688a9b877de13f6df78c1529245 | 462 | py | Python | unify/tool/create-application.py | unify/unify | 30a920efbd5e1fc2857baeed623f55e03c8c4c9a | [
"Apache-2.0",
"MIT"
] | 8 | 2015-03-14T12:23:27.000Z | 2021-01-09T18:00:53.000Z | unify/tool/create-application.py | wuwx/unify | 30a920efbd5e1fc2857baeed623f55e03c8c4c9a | [
"Apache-2.0",
"MIT"
] | 1 | 2016-09-29T08:00:57.000Z | 2016-09-29T08:00:57.000Z | unify/tool/create-application.py | wuwx/unify | 30a920efbd5e1fc2857baeed623f55e03c8c4c9a | [
"Apache-2.0",
"MIT"
] | 4 | 2015-02-09T05:42:32.000Z | 2018-03-29T07:56:41.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import subprocess, os, sys, optparse
fullpath = os.path.join(os.getcwd(), os.path.dirname(sys.argv[0]))
capath = os.path.abspath(
os.path.join(fullpath, "..", "..", "qooxdoo", "qooxdoo", "tool", "bin", "create-application.py")
)
skeletonpath = os.path.abspath(
os.path.join(fullpath, "..", "application", "skeleton")
)
subprocess.call(["python", capath, "-p", skeletonpath, "-t", "unify"] + sys.argv[1:])
| 30.8 | 100 | 0.634199 | 60 | 462 | 4.883333 | 0.55 | 0.122867 | 0.102389 | 0.102389 | 0.211604 | 0.211604 | 0.211604 | 0 | 0 | 0 | 0 | 0.007371 | 0.119048 | 462 | 14 | 101 | 33 | 0.712531 | 0.090909 | 0 | 0 | 0 | 0 | 0.196172 | 0.050239 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.111111 | 0 | 0.111111 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9922ce4cce1ba80f386b53ad7c8ac19416945962 | 3,957 | py | Python | MedTARSQI/src/main/resources/ttk/testing/create_slinket_cases.py | CDCgov/DCPC | c3fadef1bd6345e01a58afef051491d8ef6a7f93 | [
"Apache-2.0"
] | 6 | 2018-11-03T22:43:35.000Z | 2022-02-15T17:51:33.000Z | MedTARSQI/src/main/resources/ttk/testing/create_slinket_cases.py | CDCgov/DCPC | c3fadef1bd6345e01a58afef051491d8ef6a7f93 | [
"Apache-2.0"
] | 2 | 2019-04-08T03:42:59.000Z | 2019-10-28T13:42:59.000Z | MedTARSQI/src/main/resources/ttk/testing/create_slinket_cases.py | CDCgov/DCPC | c3fadef1bd6345e01a58afef051491d8ef6a7f93 | [
"Apache-2.0"
] | 10 | 2017-04-10T21:40:22.000Z | 2022-02-21T16:50:10.000Z | """create_slinket_cases.py
Code to create Slinket unit test cases. Runs by taking all SLINKs from a
Timebank parse and put them in files, one for each SLINK relType, as potential
test cases. Files are named slink-cases-RELTYPE.txt, where RELTYPE stands for
one of the relation types.
The output files have lines like
('MODAL', 'ABC19980120.1830.0957.xml', 'toClause3', (13,20), (34,38),
"Fidel Castro invited John Paul to come for a reason.")
which can be inserted directly as unit tests in SlinketTest.
Running this script will actually report a lot of errors and warnings, but
useful output is created and fixing the errors is low priority.
"""
import os, sys, codecs
from xml.dom.minidom import parse, parseString
TIMEBANK_DIR = '../data/out/timebank'
TTK_FILE = '../out-slinket.xml'
SLINK_CASES = {}
def parse_directory(dname):
#for (counter, fname) in os.listdir(dname):
for fname in os.listdir(dname):
# if counter > 10: break
sys.stderr.write("%s\n" % fname)
try:
parse_file(os.path.join(dname, fname))
except:
sys.stderr.write("ERROR on %s\n" % fname)
def parse_file(fname):
dom = parse(fname)
text = dom.getElementsByTagName('text')[0]
source_tags = dom.getElementsByTagName('source_tags')[0]
try:
tarsqi_tags = dom.getElementsByTagName('tarsqi_tags')[0]
except IndexError:
# some older parsed files still have ttk_tags, allow for that
tarsqi_tags = dom.getElementsByTagName('ttk_tags')[0]
sentences = tarsqi_tags.getElementsByTagName('s')
events = tarsqi_tags.getElementsByTagName('EVENT')
slinks = tarsqi_tags.getElementsByTagName('SLINK')
source_text = text.firstChild.data
parse_slinks(os.path.basename(fname), slinks, events, sentences, source_text)
def parse_slinks(fname, slinks, events, sentences, source_text):
event_dict = {}
for event in events:
eiid = event.getAttribute('eiid')
event_dict[eiid] = event
for slink in slinks:
parse_slink(fname, slink, event_dict, sentences, source_text)
for reltype in SLINK_CASES.keys():
fh = codecs.open("slink-cases-%s.txt" % reltype, 'w')
for case in SLINK_CASES[reltype]:
fh.write(case)
def parse_slink(fname, slink, event_dict, sentences, source_text):
rel = slink.getAttribute('relType')
rule = slink.getAttribute('syntax')
e1 = slink.getAttribute('eventInstanceID')
e2 = slink.getAttribute('subordinatedEventInstance')
e1p1 = int(event_dict[e1].getAttribute('begin'))
e1p2 = int(event_dict[e1].getAttribute('end'))
e2p1 = int(event_dict[e2].getAttribute('begin'))
e2p2 = int(event_dict[e2].getAttribute('end'))
event_text1 = source_text[e1p1:e1p2]
event_text2 = source_text[e2p1:e2p2]
enclosing_sentence = None
for s in sentences:
if int(s.getAttribute('begin')) <= e1p1 and int(s.getAttribute('end')) >= e1p1:
enclosing_sentence = s
s_p1 = int(enclosing_sentence.getAttribute('begin'))
s_p2 = int(enclosing_sentence.getAttribute('end'))
sentence_text = source_text[s_p1:s_p2]
sentence_text = ' '.join(sentence_text.split())
(e1p1, e1p2) = get_local_offset(sentence_text, event_text1)
(e2p1, e2p2) = get_local_offset(sentence_text, event_text2)
if e1p1 < 0:
sys.stderr.write("WARNING: did not find '%s' in '%s'\n" % (event_text1, sentence_text))
elif e2p1 < 0:
sys.stderr.write("WARNING: did not find '%s' in '%s'\n" % (event_text2, sentence_text))
else:
case = "('%s', '%s', '%s', (%s,%s), (%s,%s),\n \"%s\")\n" \
% (rel, fname, rule, e1p1, e1p2, e2p1, e2p2, sentence_text)
SLINK_CASES.setdefault(rel,[]).append(case)
def get_local_offset(sentence, text):
idx1 = sentence.find(text)
idx2 = idx1 + len(text)
return (idx1, idx2)
if __name__ == '__main__':
#parse_file(TTK_FILE)
parse_directory(TIMEBANK_DIR)
| 36.638889 | 95 | 0.678039 | 540 | 3,957 | 4.818519 | 0.32963 | 0.041507 | 0.005765 | 0.006149 | 0.18947 | 0.123367 | 0.069178 | 0.069178 | 0.069178 | 0.032283 | 0 | 0.029504 | 0.194845 | 3,957 | 107 | 96 | 36.981308 | 0.787194 | 0.199899 | 0 | 0.028169 | 0 | 0.014085 | 0.105263 | 0.007926 | 0 | 0 | 0 | 0 | 0 | 1 | 0.070423 | false | 0 | 0.028169 | 0 | 0.112676 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
992435ff8981f52e289f680e8ef2931bd9e513ff | 7,259 | py | Python | tools/ops/azure/container-host/chart/deploy_chart.py | anthonybgale/cloud-custodian | a7338a19ebd2d7ceb431f24a27672893018e8925 | [
"Apache-2.0"
] | null | null | null | tools/ops/azure/container-host/chart/deploy_chart.py | anthonybgale/cloud-custodian | a7338a19ebd2d7ceb431f24a27672893018e8925 | [
"Apache-2.0"
] | null | null | null | tools/ops/azure/container-host/chart/deploy_chart.py | anthonybgale/cloud-custodian | a7338a19ebd2d7ceb431f24a27672893018e8925 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import re
import tempfile
import click
import yaml
from c7n.resources import load_resources
from c7n.utils import local_session
from c7n_azure.constants import ENV_CONTAINER_EVENT_QUEUE_NAME, ENV_SUB_ID
from c7n_azure.session import Session
logger = logging.getLogger("c7n_azure.container-host.deploy")
MANAGEMENT_GROUP_TYPE = '/providers/Microsoft.Management/managementGroups'
SUBSCRIPTION_TYPE = '/subscriptions'
class Deployment(object):
def __init__(self, ctx, default_environment=None):
logging.basicConfig(level=logging.INFO, format='%(message)s')
self.dry_run = ctx.parent.params.get('dry_run')
self.deployment_name = ctx.parent.params.get('deployment_name')
self.deployment_namespace = ctx.parent.params.get('deployment_namespace')
self.image_repository = ctx.parent.params.get('image_repository')
self.image_tag = ctx.parent.params.get('image_tag')
self.image_pull_policy = ctx.parent.params.get('image_pull_policy')
self.default_environment = default_environment
self.subscription_hosts = []
def run(self):
values = self.build_values_dict()
values_file_path = Deployment.write_values_to_file(values)
logger.info("Created values file at {}\n".format(values_file_path))
values_yaml = yaml.dump(values)
logger.info(values_yaml)
# Currently deploy the helm chart through a system command, this assumes helm is installed
# and configured with the target cluster.
logger.info("Deploying with helm")
helm_command = Deployment.build_helm_command(
self.deployment_name, values_file_path, namespace=self.deployment_namespace,
dry_run=self.dry_run)
logger.info(helm_command)
os.system(helm_command)
def build_values_dict(self):
values = {}
# custom image fields
self._set_image_field(values, 'repository', self.image_repository)
self._set_image_field(values, 'tag', self.image_tag)
self._set_image_field(values, 'pullPolicy', self.image_pull_policy)
# default environment variables for each host
if self.default_environment:
values['defaultEnvironment'] = self.default_environment
# A list of configurations for individual hosts
values['subscriptionHosts'] = self.subscription_hosts
return values
def _set_image_field(self, values, key, value):
if value:
values.setdefault('image', {})[key] = value
def add_subscription_host(self, name='', environment={}):
self.subscription_hosts.append({
'name': name,
'environment': environment,
})
@staticmethod
def write_values_to_file(values):
values_file_path = tempfile.mktemp(suffix='.yaml')
with open(values_file_path, 'w') as values_file:
yaml.dump(values, stream=values_file)
return values_file_path
@staticmethod
def build_helm_command(deployment_name, values_file_path, namespace=None, dry_run=False):
command = 'helm upgrade --install --debug'
if dry_run:
command += ' --dry-run'
if namespace:
command += ' --namespace {}'.format(namespace)
command += ' --values {}'.format(values_file_path)
chart_path = os.path.dirname(__file__)
command += ' {} {}'.format(deployment_name, chart_path)
return command
class SubscriptionDeployment(Deployment):
def __init__(self, ctx, name='', env=[]):
super(SubscriptionDeployment, self).__init__(ctx)
self.name = name
self.environment = {e[0]: e[1] for e in env}
self.run()
def build_values_dict(self):
self.add_subscription_host(self.name, self.environment)
return super(SubscriptionDeployment, self).build_values_dict()
class ManagementGroupDeployment(Deployment):
def __init__(self, ctx, management_group_id, env=[]):
super(ManagementGroupDeployment, self).__init__(ctx,
default_environment={e[0]: e[1] for e in env})
self.management_group_id = management_group_id
load_resources()
self.session = local_session(Session)
self.run()
def build_values_dict(self):
self._add_subscription_hosts()
return super(ManagementGroupDeployment, self).build_values_dict()
def _add_subscription_hosts(self):
client = self.session.client('azure.mgmt.managementgroups.ManagementGroupsAPI')
info = client.management_groups.get(
self.management_group_id, expand='children', recurse=True)
self._add_subscription_hosts_from_info(info)
def _add_subscription_hosts_from_info(self, info):
if info.type == SUBSCRIPTION_TYPE:
sub_id = info.name # The 'name' field of child info is the subscription id
self.add_subscription_host(
ManagementGroupDeployment.sub_name_to_deployment_name(info.display_name),
{
ENV_SUB_ID: sub_id,
ENV_CONTAINER_EVENT_QUEUE_NAME: 'c7n-{}'.format(info.name[-4:])
},
)
elif info.type == MANAGEMENT_GROUP_TYPE and info.children:
for child in info.children:
self._add_subscription_hosts_from_info(child)
@staticmethod
def sub_name_to_deployment_name(sub_name):
# Deployment names must use only lower case alpha numeric characters, -, _, and .
# They must also start/end with an alpha numeric character
return re.sub(r'[^A-Za-z0-9-\._]+', '-', sub_name).strip('-_.').lower()
@click.group()
@click.option('--deployment-name', '-d', default='cloud-custodian')
@click.option('--deployment-namespace', '-s', default='cloud-custodian')
@click.option('--image-repository')
@click.option('--image-tag')
@click.option('--image-pull-policy')
@click.option('--dry-run/--no-dry-run', default=False)
def cli(deployment_name, deployment_namespace, image_repository='', image_tag='',
image_pull_policy='', dry_run=False):
pass
@cli.command('subscription')
@click.option('--name', '-n', required=True)
@click.option('--env', '-e', type=click.Tuple([str, str]), multiple=True)
@click.pass_context
class SubscriptionDeploymentCommand(SubscriptionDeployment):
pass
@cli.command('management_group')
@click.pass_context
@click.option('--management-group-id', '-m', required=True)
@click.option('--env', '-e', type=click.Tuple([str, str]), multiple=True)
class ManagementGroupDeploymentCommand(ManagementGroupDeployment):
pass
if __name__ == '__main__':
cli()
| 36.114428 | 98 | 0.688111 | 881 | 7,259 | 5.427923 | 0.255392 | 0.023003 | 0.023421 | 0.022585 | 0.197198 | 0.084483 | 0.055625 | 0.055625 | 0.055625 | 0.055625 | 0 | 0.003623 | 0.201543 | 7,259 | 200 | 99 | 36.295 | 0.821429 | 0.135556 | 0 | 0.111111 | 0 | 0 | 0.111467 | 0.030545 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0.037037 | 0.074074 | 0.007407 | 0.266667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9928a567b8706306f43d40e3be66c386cb2b3fea | 1,659 | py | Python | xframes/traced_object.py | cchayden/xframes | 1656cc69c814bda8132362b3a22f7cdf8a24637f | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | xframes/traced_object.py | cchayden/xframes | 1656cc69c814bda8132362b3a22f7cdf8a24637f | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | xframes/traced_object.py | cchayden/xframes | 1656cc69c814bda8132362b3a22f7cdf8a24637f | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | """
Base class for objects that support entry and exit tracing.
"""
import inspect
from sys import stderr
class TracedObject(object):
entry_trace = False
perf_count = None
@classmethod
def _print_stack(cls, stack, args, levels=6):
print >>stderr, 'Enter:', stack[1][3], stack[1][1], stack[1][2]
# print a few frames
print >>stderr, ' ', stack[2][3], stack[2][1], stack[2][2], args
for i in range(3, levels):
if stack[i][3] == '<module>':
break
print >>stderr, ' ', stack[i][3], stack[i][1], stack[i][2]
stderr.flush()
@classmethod
def _print_trace(cls, **kwargs):
""" Explicitly call this to trace a specific function. """
stack = inspect.stack()
cls._print_stack(stack, kwargs, 8)
@classmethod
def _entry(cls, **kwargs):
""" Trace function entry. """
if not cls.entry_trace and not cls.perf_count:
return
stack = inspect.stack()
cls._print_stack(stack, kwargs)
if cls.perf_count is not None:
caller = stack[1]
my_fun = caller[3]
if my_fun not in cls.perf_count:
cls.perf_count[my_fun] = 0
cls.perf_count[my_fun] += 1
@classmethod
def set_trace(cls, entry_trace=None):
cls.entry_trace = cls.entry_trace if entry_trace is None else entry_trace
@classmethod
def set_perf_count(cls, enable=True):
if enable:
cls.perf_count = {}
else:
cls.perf_count = None
@classmethod
def get_perf_count(cls):
return cls.perf_count
| 28.118644 | 81 | 0.575045 | 221 | 1,659 | 4.162896 | 0.289593 | 0.107609 | 0.104348 | 0.052174 | 0.184783 | 0.08913 | 0.08913 | 0.08913 | 0 | 0 | 0 | 0.02 | 0.306811 | 1,659 | 58 | 82 | 28.603448 | 0.78 | 0.092827 | 0 | 0.190476 | 0 | 0 | 0.013477 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0.047619 | 0.02381 | 0.309524 | 0.166667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
992c259fa36e64e7447e2e6321c3223d9e19047a | 813 | py | Python | hubconf.py | qibaoyuan/fairseq | eabd07fdcfd5b007d05428e81a31b7f3fc5de959 | [
"BSD-3-Clause"
] | 6 | 2020-11-17T18:54:08.000Z | 2022-01-21T16:21:18.000Z | hubconf.py | vineelpratap/fairseq | 208295dfc76492748500f97a4f9a808d8053a184 | [
"BSD-3-Clause"
] | 2 | 2021-01-01T10:57:32.000Z | 2021-01-13T01:17:35.000Z | hubconf.py | vineelpratap/fairseq | 208295dfc76492748500f97a4f9a808d8053a184 | [
"BSD-3-Clause"
] | 1 | 2020-12-29T12:02:44.000Z | 2020-12-29T12:02:44.000Z | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import functools
from fairseq.models import MODEL_REGISTRY
dependencies = [
'regex',
'requests',
'sacremoses',
'sentencepiece',
'subword_nmt',
'torch',
]
for model_type, _cls in MODEL_REGISTRY.items():
for model_name in _cls.hub_models().keys():
globals()[model_name] = functools.partial(
_cls.from_pretrained,
model_name_or_path=model_name,
)
# to simplify the interface we only expose named models
#globals()[model_type] = _cls.from_pretrained
| 26.225806 | 78 | 0.696187 | 108 | 813 | 5.074074 | 0.611111 | 0.036496 | 0.036496 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006299 | 0.218942 | 813 | 30 | 79 | 27.1 | 0.856693 | 0.455105 | 0 | 0 | 0 | 0 | 0.119816 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.125 | 0 | 0.125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
992ee557b197ee2455ec41b1fe058f029f7123c9 | 5,526 | py | Python | BiasSVD-kNN based Netease Music Recommender System.py | Coalin/Business-Analytics-Projects | 8771afe5180302a73434f305500d5498be549827 | [
"MIT"
] | 1 | 2018-07-09T09:09:02.000Z | 2018-07-09T09:09:02.000Z | BiasSVD-kNN based Netease Music Recommender System.py | Coalin/Business-Analytics-Projects | 8771afe5180302a73434f305500d5498be549827 | [
"MIT"
] | null | null | null | BiasSVD-kNN based Netease Music Recommender System.py | Coalin/Business-Analytics-Projects | 8771afe5180302a73434f305500d5498be549827 | [
"MIT"
] | null | null | null | # -*- coding:utf-8 -*-
from __future__ import (absolute_import, division, print_function, unicode_literals)
import os
import surprise
from surprise import KNNBaseline, Reader
from surprise import Dataset
from surprise import evaluate, print_perf
import csv
from surprise import SVD,SVDpp
from surprise import GridSearch
from surprise import NMF
from pandas import Series
import pandas as pd
from matplotlib import pyplot as plt
if __name__ == '__main__':
csv_reader = csv.reader(open('neteasy_playlist_id_to_name_data.csv',encoding='utf-8'))
id_name_dict = {}
name_id_dict = {}
for row in csv_reader:
id_name_dict[row[0]] = row[1]
name_id_dict[row[1]] = row[0]
csv_reader = csv.reader(open('neteasy_song_id_to_name_data.csv',encoding='utf-8'))
song_id_name_dict = {}
song_name_id_dict = {}
for row in csv_reader:
song_id_name_dict[row[0]] = row[1]
song_name_id_dict[row[1]] = row[0]
file_path = os.path.expanduser('neteasy_playlist_recommend_data.csv')
reader = Reader(line_format='user item rating timestamp', sep=',')
music_data = Dataset.load_from_file(file_path, reader=reader)
music_data.split(n_folds=5)
print('构建数据集')
trainset = music_data.build_full_trainset()
param_grid = { 'n_factors':range(10,30,2), 'n_epochs': [10,15,20], 'lr_all': [0.002, 0.005, 0.1],'reg_all': [0.4, 0.6, 0.8]}
param_grid = { 'n_factors':range(2,22,2), 'n_epochs': [10], 'lr_all': [0.1],'reg_all': [0.4]}
param_grid = { 'n_factors':[2], 'n_epochs':range(11), 'lr_all': [0.1],'reg_all': [0.4]}
grid_search = GridSearch(SVDpp, param_grid, measures=['RMSE', 'MAE'])
grid_search.evaluate(music_data)
print(grid_search.best_params['RMSE'])
print(grid_search.best_params['MAE'])
# 开始训练模型
print('开始训练模型...')
#algo = KNNBaseline()
algo = SVDpp(n_factors=grid_search.best_params['RMSE']['n_factors'],n_epochs=grid_search.best_params['RMSE']['n_epochs'],lr_all=grid_search.best_params['RMSE']['lr_all'],reg_all=grid_search.best_params['RMSE']['reg_all'],verbose=2)
algo=SVDpp()
#algo=SVD()
#algo=SVDpp()
perf = evaluate(algo, music_data, measures=['RMSE', 'MAE'],verbose=1)
print_perf(perf)
#print()
#print('针对歌单进行预测:')
#current_playlist_name =list(name_id_dict.keys())[3]
#print('歌单名称', current_playlist_name)
#playlist_rid = name_id_dict[current_playlist_name]
#print('歌单rid', playlist_rid)
#playlist_inner_id = algo.trainset.to_inner_uid(playlist_rid)
#print('歌曲inid', playlist_inner_id)
#algo.compute_similarities()
#playlist_neighbors_inner_ids = algo.get_neighbors(playlist_inner_id, k=10)
#playlist_neighbors_rids = (algo.trainset.to_raw_uid(inner_id) for inner_id in playlist_neighbors_inner_ids)
#playlist_neighbors_names = (id_name_dict[rid] for rid in playlist_neighbors_rids)
#print()
#print('歌单 《', current_playlist_name, '》 最接近的10个歌单为: \n')
#for playlist_name in playlist_neighbors_names:
# print(playlist_name, algo.trainset.to_inner_uid(name_id_dict[playlist_name]))
print()
print('针对用户进行预测:')
user_inner_id = 300
print('用户内部id', user_inner_id)
user_rating = trainset.ur[user_inner_id]
print('用户评价过的歌曲数量', len(user_rating))
items = map(lambda x:x[0], user_rating)
real_song_id=[]
real_song_name=[]
for song in items:
real_song_id.append(algo.trainset.to_raw_iid(song))
real_song_name.append(song_id_name_dict[algo.trainset.to_raw_iid(song)])
t_l=10
song_list1=list(song_id_name_dict.keys())
rank=[]
for song in song_list1:
rank.append(algo.predict(str(user_inner_id), str(song))[3])
rank=Series(rank)
rank1=rank.sort_values(ascending=False)
predict_song_id=[]
predict_song_name=[]
for i in range(t_l):
predict_song_id.append(song_list1[list(rank1.index)[i]])
predict_song_name.append(song_id_name_dict[song_list1[list(rank1.index)[i]]])
#from pandas import Series
a=Series(real_song_name)
b=Series(predict_song_name)
c=pd.DataFrame({'real':a,'predict':b})
#t_l=20 #取top的长度
#if len(user_rating)<=t_l:
# pre_song=list(rank1.index[range(t_l)])
# real_song=
# correct=
surprise.dump.dump('./knn_baseline.model', algo=algo)
MAE=[]
RMSE=[]
for i in range(10):
MAE.append( grid_search.cv_results['scores'][i]['MAE'])
RMSE.append( grid_search.cv_results['scores'][i]['RMSE'])
x=range(2,22,2)
plt.plot(x,MAE,label='MAE')
plt.plot(x,RMSE,label='RMSE')
plt.legend()
plt.axis([0,22,0.5,1])
x=range(2,22,2)
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.plot(x,MAE)
ax1.set_ylabel('MAE')
ax1.set_title("MAE & RMSE")
ax1.set_xticks(x)
ax1.set_xlabel('n_factors')
ax1.legend()
ax2 = ax1.twinx() # this is the important function
ax2.plot(x, RMSE, 'r')
ax2.set_ylabel('RMSE')
plt.show()
MAE=[]
RMSE=[]
for i in range(11):
MAE.append( grid_search.cv_results['scores'][i]['MAE'])
RMSE.append( grid_search.cv_results['scores'][i]['RMSE'])
x=range(1,12,1)
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.plot(x,MAE)
ax1.set_ylabel('MAE')
ax1.set_title("MAE & RMSE")
ax1.set_xticks(x)
ax1.set_xlabel('epoch')
ax1.legend()
ax2 = ax1.twinx() # this is the important function
ax2.plot(x, RMSE, 'r')
ax2.set_ylabel('RMSE')
plt.show()
| 34.322981 | 236 | 0.668114 | 840 | 5,526 | 4.114286 | 0.219048 | 0.034722 | 0.023148 | 0.034722 | 0.369213 | 0.318866 | 0.230903 | 0.193866 | 0.153935 | 0.153935 | 0 | 0.028691 | 0.180058 | 5,526 | 160 | 237 | 34.5375 | 0.734054 | 0.18603 | 0 | 0.300885 | 0 | 0 | 0.109125 | 0.023915 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.115044 | 0 | 0.115044 | 0.097345 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
992f18514be68650cca7e7206bf7d259cbca8f31 | 1,690 | py | Python | pywizlight/tests/test_bulb_socket.py | mikemakaroff/pywizlight | 0b32b917a064d9ca1be0ce9fb24ea68ce89993ed | [
"MIT"
] | 1 | 2022-03-30T22:42:51.000Z | 2022-03-30T22:42:51.000Z | pywizlight/tests/test_bulb_socket.py | mikemakaroff/pywizlight | 0b32b917a064d9ca1be0ce9fb24ea68ce89993ed | [
"MIT"
] | null | null | null | pywizlight/tests/test_bulb_socket.py | mikemakaroff/pywizlight | 0b32b917a064d9ca1be0ce9fb24ea68ce89993ed | [
"MIT"
] | null | null | null | """Tests for the Bulb API with a socket."""
from typing import AsyncGenerator
import pytest
from pywizlight import wizlight
from pywizlight.bulblibrary import BulbClass, BulbType, Features, KelvinRange
from pywizlight.tests.fake_bulb import startup_bulb
@pytest.fixture()
async def socket() -> AsyncGenerator[wizlight, None]:
shutdown, port = await startup_bulb(
module_name="ESP10_SOCKET_06", firmware_version="1.25.0"
)
bulb = wizlight(ip="127.0.0.1", port=port)
yield bulb
await bulb.async_close()
shutdown()
@pytest.mark.asyncio
async def test_model_description_socket(socket: wizlight) -> None:
"""Test fetching the model description of a socket is None."""
bulb_type = await socket.get_bulbtype()
assert bulb_type == BulbType(
features=Features(
color=False,
color_tmp=False,
effect=False,
brightness=False,
dual_head=False,
),
name="ESP10_SOCKET_06",
kelvin_range=KelvinRange(max=2700, min=2700),
bulb_type=BulbClass.SOCKET,
fw_version="1.25.0",
white_channels=2,
white_to_color_ratio=20,
)
@pytest.mark.asyncio
async def test_diagnostics(socket: wizlight) -> None:
"""Test fetching diagnostics."""
await socket.get_bulbtype()
diagnostics = socket.diagnostics
assert diagnostics["bulb_type"]["bulb_type"] == "SOCKET"
assert diagnostics["history"]["last_error"] is None
assert diagnostics["push_running"] is False
@pytest.mark.asyncio
async def test_supported_scenes(socket: wizlight) -> None:
"""Test supported scenes."""
assert await socket.getSupportedScenes() == []
| 29.649123 | 77 | 0.684615 | 205 | 1,690 | 5.482927 | 0.395122 | 0.035587 | 0.045374 | 0.058719 | 0.130783 | 0.077402 | 0 | 0 | 0 | 0 | 0 | 0.024609 | 0.206509 | 1,690 | 56 | 78 | 30.178571 | 0.813572 | 0.021893 | 0 | 0.071429 | 0 | 0 | 0.068197 | 0 | 0 | 0 | 0 | 0 | 0.119048 | 1 | 0 | false | 0 | 0.119048 | 0 | 0.119048 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
99332f040f38016f3fe08830a1b001ac9221e1ec | 10,303 | py | Python | servidor/utils/podcasts/podcasts.py | UNIZAR-30226-2020-01/backend_django | aefe5668e3b45b0015d24e17254ac61858b3df7b | [
"MIT"
] | null | null | null | servidor/utils/podcasts/podcasts.py | UNIZAR-30226-2020-01/backend_django | aefe5668e3b45b0015d24e17254ac61858b3df7b | [
"MIT"
] | 52 | 2020-02-25T09:56:54.000Z | 2021-09-22T18:40:50.000Z | servidor/utils/podcasts/podcasts.py | UNIZAR-30226-2020-01/backend_django | aefe5668e3b45b0015d24e17254ac61858b3df7b | [
"MIT"
] | null | null | null | # from __future__ import print_function
# import sys
# import getpass
import os
import requests
import json
# from set_credentials import the_secret_function # borrar esta linea, es solo para el hello world
#Clase necesaria para devolver por APIRest lo correspondiente a los trending podcast
class TrendingPodcasts(object):
def __init__(self, **kwargs):
for field in ('id', 'title', 'publisher', 'image', 'total_episodes', 'description', 'rss', 'language'):
setattr(self, field, kwargs.get(field, None))
#---------------------------------------------------#
#-----Todo esto pertenece a la api listennotes------#
#Para poder usarla debemos:
# -Poner el logo de ListenApi cuando se use el buscador de podcasts
# -No guardar ningun podcast en la BD
# -Permite 10000 peticiones al mes
class Podcasts_api:
def __init__(self, url = 'https://listen-api.listennotes.com/api/v2', key = 'COMPLETAME_PORFA'):
self.url = url
self.key = os.getenv('LISTENNOTES_KEY')
self.headers = {
'X-ListenAPI-Key' : self.key
}
# Existen muchos parámetros, de momento creo que los más importantes son los siguientes
# -query: nombre del podcast (obligatorio)
# -genres: lista con ids de géneros de listennotes
# -type: episode, podcast, curated (default: episode)
# -language: lenguaje del podcast (default: all languages)
# -sort_by_date: indica si muestra los podcast ordenados por fecha (0 = NO y muestra por relevancia)
def search(self, query, genres, type='podcast', language='Spanish', sort_by_date=0):
#Contiene los parámetros para la búsqueda de podcast
querystring = { 'q': query, 'genre_ids': genres,'type': type, 'language': language,
'sort_by_date': sort_by_date
}
#Se debe añadir /search para que la url sea correcta
response = requests.get(self.url + '/search', headers=self.headers, params=querystring)
#print(response.headers['X-ListenAPI-Usage'])
if response.status_code != 200:
return 'ERROR'
print('DEBUG ----- Usage: ', response.headers['X-ListenAPI-Usage'])
return response.json()#response.json() ## TODO: Cuando tenga la API KEY, se podrá terminar
#Devuelve los mejores podcasts en funcion de los parámetros
# -genre_id: genero de los podcast. Más info: get_genres()
# -region: región del podcast. Más info: get_regions()
def get_bestpodcast(self, genre_id = None , region='es' ):
querystring = {
'genre_id': genre_id, 'region': region
}
response = requests.get(self.url + '/best_podcasts', headers=self.headers, params=querystring)
#Mostramos las peticiones restantes
print('DEBUG ----- Usage: ', response.headers['X-ListenAPI-Usage'])
if response.status_code != 200:
return 'ERROR'
return response.json()['podcasts']
# Dado un id, devuelve TODA información sobre un podcast, en formato JSON
# Solo puede devolver 10 episodios
# -id: Id del podcast a buscar
# -sort: recent_first|| oldest_first (default: recent_first)
def get_detailedInfo_podcast(self, id, sort='recent_first'):
querystring = {
'id': id
}
response = requests.get(self.url + '/podcasts/'+id, headers=self.headers, params=querystring)
print('DEBUG ----- Usage: ', response.headers['X-ListenAPI-Usage'])
if response.status_code != 200:
return 'ERROR'
return response.json()
# Devuelve TODOS los episodios de un podcast
# -id: Id del podcast a buscar
# -sort: recent_first|| oldest_first (default: recent_first)
def get_allEpisodes(self, id, sort='oldest_first'):
querystring = {
'id': id,
'sort': sort
}
response = requests.get(self.url + '/podcasts/'+id, headers=self.headers, params=querystring)
if response.status_code != 200:
return 'ERROR', 'ERROR'
podcast = response.json()
result = episodes = response.json()["episodes"]
veces = 0
while (len(episodes) in range(1,11)) and veces in range(0,3):
veces+=1
pub_date = response.json()["next_episode_pub_date"]
querystring = {
'id': id,
'next_episode_pub_date': pub_date,
'sort': sort
}
response = requests.get(self.url + '/podcasts/'+id, headers=self.headers, params=querystring)
episodes = response.json()["episodes"]
result += episodes
# print(episodes)
# print('DEBUG ----- Usage: ', response.headers['X-ListenAPI-Usage'])
print('DEBUG ----- Usage: ', response.headers['X-ListenAPI-Usage'])
return podcast, result
# Dado un id, devuelve TODA información sobre un episodio, en formato JSON
def get_detailedInfo_episode(self, id):
querystring = {
'id': id
}
response = requests.get(self.url + '/episodes/'+id, headers=self.headers, params=querystring)
if response.status_code != 200:
return 'ERROR'
print('DEBUG ----- Usage: ', response.headers['X-ListenAPI-Usage'])
return response.json()
#Devuelve todos los géneros a los que puede pertenecer podcast
def get_genres(self):
response = requests.get(self.url + '/genres', headers=self.headers)
if response.status_code != 200:
return 'ERROR'
print('DEBUG ----- Usage: ', response.headers['X-ListenAPI-Usage'])
return response.json()['genres']
#Devuelve las posibles regiones de podcast en forma de json
def get_regions(self):
response = requests.get(self.url + '/regions', headers=self.headers)
if response.status_code != 200:
return 'ERROR'
print('DEBUG ----- Usage: ', response.headers['X-ListenAPI-Usage'])
return response.json()
#Devuleve un episodio de un podcast random. No necesita parámetros
def get_randomEpisode(self):
response = requests.get(self.url + '/just_listen', headers=self.headers)
if response.status_code != 200:
return 'ERROR'
print('DEBUG ----- Usage: ', response.headers['X-ListenAPI-Usage'])
return response.json()
#Recomienda términos de búsqueda, géneros de podcasts y Podcasts_api
# -query: Término de búsqueda, p.ej: star wars. Si se pone con comillas ("star wars"), la búsqueda es literal
# -show_podcasts: 1 para auto-sugerir podcasts(con información mínima). 0 para no sugerir.
# -show_genres: 1 para recomendar géneros de acuerdo a 'query'. 0 para no recomendar.
def get_suggested_podcasts(self, query, show_podcasts=0, show_genres=0):
querystring = {
'q': query,
'show_podcasts': show_podcasts,
'show_genres' : show_genres
}
response = requests.get(self.url + '/typeahead', headers=self.headers, params=querystring)
if response.status_code != 200:
return 'ERROR'
print('DEBUG ----- Usage: ', response.headers['X-ListenAPI-Usage'])
return response.json()
#Devuelve el podcast correspondiente y la lista de sus episodios
def get_by_name(self,name):
result = self.search(query=name, type='podcast', sort_by_date=1)
#'Results' es la lista de podcast devuelta en el JSON
if result != 'ERROR':
pod_id = result["results"][0]["id"]
podcast = self.get_detailedInfo_podcast(id=pod_id)
episodes = podcast["episodes"]
#lista_final = [(l["title"], l["audio"]) for l in episodes]
return podcast , episodes
else:
print('ERROR: Podcast not found.')
return None, None
# Devuelve un lote de episodios, dado un string de ids separados por comas.
# Evita realizar varias llamadas a la api
def get_many_episodes(self, ids):
querystring = {
'ids': ids
}
response = requests.post(self.url + '/episodes', headers=self.headers, data=querystring)
if response.status_code != 200:
return 'ERROR'
return response.json()["episodes"]
def get_podcast_recommendation(self, id):
# querystring = {
# 'id': id
# }
response = requests.get(self.url + '/podcasts/' + id + '/recommendations', headers=self.headers)
if response.status_code != 200:
return 'ERROR'
print('DEBUG ----- Usage: ', response.headers['X-ListenAPI-Usage'])
return response.json()["recommendations"]
# Al final la he puesto en models como una property
# convierte la uri de listennotes de un audio a la uri del audio real al que redirige con un 302
# # se usara en el serializador, puede que la direccion final cambie en el tiempo
# def get_real_uri(self, uri):
# return "WORK IN PROGRESS EN PODCASTS.PY"
# Para probar que funciona
if __name__ == '__main__':
the_secret_function()
pd = Podcasts_api()
# Este es un ejemplo de uso en el que:
# -Se busca un podcast con el método search y se coge el primero. (se muestra su canal)
# -Se busca información detallada de ese podcast
# -Se busca información detallada de cada uno de los episodios de ese podcast
result = pd.search(query="La vida moderna", type='podcast', sort_by_date=1)
#'Results' es la lista de podcast devuelta en el JSON
pod_id = result["results"][0]["id"]
result = pd.get_detailedInfo_podcast(id=pod_id)
print("Nombre de poscast: " + result["title"])
print("Canal del podcast: " + result["publisher"])
episodes = result["episodes"]
lista_final = [(l["title"], l["audio"]) for l in episodes]
result = json.dumps(lista_final)
parsed = json.loads(result)
# Si se quiere acceder al archivo mp3:
# curl -L -s -w %{url_effective} --request GET <audio_listennotes>
print(json.dumps(parsed, indent=2, sort_keys=True))
| 44.029915 | 116 | 0.615646 | 1,241 | 10,303 | 5.013699 | 0.244158 | 0.02893 | 0.035519 | 0.048216 | 0.395532 | 0.368049 | 0.327226 | 0.327226 | 0.306011 | 0.297814 | 0 | 0.00861 | 0.267301 | 10,303 | 233 | 117 | 44.218884 | 0.815605 | 0.32971 | 0 | 0.392857 | 0 | 0 | 0.164622 | 0.006361 | 0 | 0 | 0 | 0.004292 | 0 | 1 | 0.1 | false | 0 | 0.021429 | 0 | 0.307143 | 0.1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
993a49824daba6f94a6ddb3110d512c3b971d0fc | 3,460 | py | Python | tokyo/cmp_year.py | sken10/covid19 | 034fb50b99823726216fef20d4eabe7f012ff718 | [
"MIT"
] | null | null | null | tokyo/cmp_year.py | sken10/covid19 | 034fb50b99823726216fef20d4eabe7f012ff718 | [
"MIT"
] | null | null | null | tokyo/cmp_year.py | sken10/covid19 | 034fb50b99823726216fef20d4eabe7f012ff718 | [
"MIT"
] | null | null | null | """年を補完した日付を付加する。(for 東京都福祉保健局/都内感染者の状況)
東京都の資料には日付の項目に年の情報がないので、それを補完する。
各レコードの終端に、 Y/M/D 形式のリリース日、発症日、確定日を追加する。
使い方
------
data/0104.csv から data/0104_c.csv (ファイル名に _c 追加、Y/M/D タイムスタンプ追加)を作る場合:
$ cmp_year.py data/0104.csv
レコード構成
------------
0:'リリース日'
1:'居住地'
2:'年代'
3:'性別'
4:'属性(職業等)'
5:'渡航歴'
6:'接触歴'
7:'発症日'
8:'確定日'
9: '重症',
10 '退院等'
--- 以下を追加 ---
11:'リリース日YMD'
12:'発症日YMD'
13:'確定日YMD'
NOTES
-----
リリース日は昇順に並んでいることを仮定している。
再発で表が更新されたときに、各日付がどのように変更されるか分からないので、
正しい対処というのも分からない。
"""
import sys
import os
import re
from datetime import datetime, timedelta
import csv
def get_month_day(x):
m = re.match(r'(\d+)月(\d+)日', x)
if m:
month = int(m.group(1))
day = int(m.group(2))
else:
month = 0
day = 0
return month, day
def date_str(x):
return x.strftime('%Y/%m/%d')
def complement_year(date_ref, m, d):
""" 年を補完して日付を作る
年が不明の月日をのうち基準日に近いものを選ぶ。
探す範囲は、基準日の年を Y として Y±1 年。
Parameters
----------
date_ref, datetime : 規準日
m, int : 月
d, int : 日
Returns
-------
date, datetime : 年を補完した日付
Notes
-----
基準日として公表日を選ぶと、発症や診断は必ず過去になるので仕様は緩め。
再発のケースの扱いなど不明な点も多いので、とりあえず差が小さいものを
選ぶことにした。年の ambiguity を解決する方法として、基準日の月と目的
の月の大小関係で場合分けすることが多いと思うが、ここでは、候補をいく
つか計算したうえで、一番近いものを選んでいる。仕様との意味的な対応も
よく、if の条件式の設定で悩む事もない。加えて、(見やすさはともかくと
して、)その気になれば一つの式で表現できるという素敵な性質を有している。
"""
yy = [date_ref.year + a for a in (-1, 0, 1)] # 年の候補
dd = [datetime(a, m, d) for a in yy] # 対応する日付
err_date = [(abs((a - date_ref).days), a) for a in dd] # 基準日からの差と日付
err_, date = sorted(err_date)[0] # 昇順に並べた先頭(差が最小)
if abs((date - date_ref).days) > 150:
sys.stderr.write('WARNING : date %s - date_ref %s > 150 day \n' % (date, date_ref))
return date
def _ymd(date_rel, x):
""" ○月×日 ==> YYYY/MM/DD (YYYY は、date_rel 使って補完)
Parameters
----------
date_rel, datetime : リリース日
x, str: 日付 '○月×日'
Returns
-------
ym, str : 年月日を表す文字列 'YYYY/MM/DD'
"""
m, d = get_month_day(x)
if m != 0:
date = complement_year(date_rel, m, d)
ymd = date_str(date)
else:
ymd = ''
return ymd
def main():
""" *.csv と同じフォルダに *_c.csv を作る。
ファイル名に _c を付加。
各レコードの終端に、Y/M/D 形式の日付情報を追加。
"""
path_src = sys.argv[1]
with open(path_src, encoding='sjis') as csv_source:
buf = [a for a in csv.reader(csv_source)]
buf[0] = buf[0] + ['リリース日YMD', '発症日YMD', '確定日YMD'] # ヘッダー追加
y_rel = 2020 # 年の初期値
m_prv = None # 前のレコードの月
for x in buf[1:]: # skip header
# リリース日(新年検出)
#
m_rel, d_rel = get_month_day(x[0]) # リリース日の月,日
if m_prv and (m_rel < m_prv):
y_rel = y_rel + 1
print('detect new year %d %d %d ' % (y_rel, m_rel, d_rel))
m_prv = m_rel
date_rel = datetime(y_rel, m_rel, d_rel)
# 追加情報
x.append(date_str(date_rel)) # x[11] リリース日YMD
x.append(_ymd(date_rel, x[7])) # x[12] 発症日YMD
x.append(_ymd(date_rel, x[8])) # x[13] 確定日YMD
fn_dir, fn_fn = os.path.split(path_src)
fn_base, fn_ext = os.path.splitext(fn_fn)
path_dst = os.path.join(fn_dir, '%s_c.csv' % (fn_base))
with open(path_dst, 'w', newline='', encoding='sjis') as csv_file:
wr = csv.writer(csv_file)
wr.writerows(buf)
if __name__ == '__main__':
main()
| 23.221477 | 91 | 0.575145 | 507 | 3,460 | 3.779093 | 0.386588 | 0.008351 | 0.006263 | 0.018789 | 0.031315 | 0.031315 | 0 | 0 | 0 | 0 | 0 | 0.025337 | 0.269942 | 3,460 | 148 | 92 | 23.378378 | 0.731196 | 0.378902 | 0 | 0.035088 | 0 | 0 | 0.068966 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.087719 | false | 0 | 0.087719 | 0.017544 | 0.245614 | 0.017544 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
993d36b65ce681aebaefed1af68a07d3f0057018 | 18,409 | py | Python | old_files/PyGEM_postprocess_Analysis_Anna.py | tusharkh/PyGEM-Clone | 057d276871d398a3e5dcc8cd59226933a98b3be1 | [
"MIT"
] | null | null | null | old_files/PyGEM_postprocess_Analysis_Anna.py | tusharkh/PyGEM-Clone | 057d276871d398a3e5dcc8cd59226933a98b3be1 | [
"MIT"
] | null | null | null | old_files/PyGEM_postprocess_Analysis_Anna.py | tusharkh/PyGEM-Clone | 057d276871d398a3e5dcc8cd59226933a98b3be1 | [
"MIT"
] | null | null | null | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import netCDF4 as nc
from scipy.stats import linregress
import cartopy.crs as ccrs
import cartopy as car
#========== IMPORT INPUT AND FUNCTIONS FROM MODULES ===================================================================
import pygem_input as input
import pygemfxns_postprocessing as post
#%% DATA EXTRACTION
regionO1_number = str(input.rgi_regionsO1[0])
NETfullfilename=(input.output_filepath + ('PyGEM_R'+regionO1_number+ '_ERA-Interim_'+ str((input.startyear)) + '_' +str(input.endyear) + '_1' ) )
# non-elevation dependant temp =
glac_temp1=pd.read_csv(input.main_directory + '/../ERAInt_Sim_Selection/' + 'RGI_0'
+ str(input.rgi_regionsO1[0]) + '_ERA_Int_Glacier_Temp.csv')
glac_prec1=pd.read_csv(input.main_directory + '/../ERAInt_Sim_Selection/' + 'RGI_0'
+ str(input.rgi_regionsO1[0]) + '_ERA_Int_Glacier_PPT.csv')
output = nc.Dataset(NETfullfilename +'.nc')
#year ragne
year= range((input.startyear-1),input.endyear)
## Select relevant data
glacier_data = pd.DataFrame(output['glacier_table'][:])
glacier_data_columns = output['glacier_table_header'][:]
lats = glacier_data[2].values.astype(float)
lons = glacier_data[1].values.astype(float)
massbal_monthly = output['massbaltotal_glac_monthly'][:]
volume=output['volume_glac_annual'][:]
RGI=output['glac_idx'][:]
temp=output['temp_glac_monthly'][:]
prec=output['prec_glac_monthly'][:]
glac_elev=glacier_data[17][:].astype(float).values
glac_area=glacier_data[5][:].astype(float).values
#%% Mass balance total over time period, mass balance averaged/yr, rate of change: for rate of change
# do yearly average plot for entire glacier
#do area weighted mass balance for each reigon (how to do that?)
#for future SIM: do time slices and do total mass balance/yearly average + everything above for each
#slice
# do temp and ppt plots: for both the temp and ppt over that period as well as the glacier temp and ppt
# that is adjusted for hypsometry to show the difference
#volume change.... area weighted
#%% MASS BALANCE TOTAL AND AVERAGE OVER OBVS PERIOD: MAPS
# total mass balance of each glacier for period of observation (mwe)
massbal_total= massbal_monthly.sum(axis=1)/(massbal_monthly.shape[1]/12)
# total mass balance of each glacier averaged over years of observation (mwea)
massbal_averaged=massbal_total/len(year)
#land definition for plotting
land_50m = car.feature.NaturalEarthFeature('physical', 'land', '50m',
edgecolor='k',
facecolor='none')
# lat/long definition for plot
east = int(round(lons.min())) - 1
west = int(round(lons.max())) + 1
south = int(round(lats.min())) - 1
north = int(round(lats.max())) + 1
xtick = 1
ytick = 1
# define title
title=('R'+str(regionO1_number) + ' ' + str(input.startyear) +'-' + str(input.endyear))
proj_crs = ccrs.PlateCarree()
projection = ccrs.RotatedPole(pole_longitude=40, pole_latitude=37.5)
geo_axes = plt.axes(projection=projection)
# total mass balance map
plt.figure(1)
plt.figure(figsize=(10,10))
ax = plt.axes(projection=projection)
ax.add_feature(land_50m)
scat=ax.scatter(lons, lats, c=massbal_total,transform=proj_crs, cmap='seismic_r', vmin=-4, vmax=4, edgecolors='black')
cbar=plt.colorbar(scat, fraction=0.02, pad=0.04)
plt.axes(projection=projection)
cbar.set_label('Mass Balance mwe')
plt.title('Total Mass Balance '+ title)
# annual mass balance average map
plt.figure(2)
plt.figure(figsize=(10,10))
ax = plt.axes(projection=projection)
ax.add_feature(land_50m)
scat=ax.scatter(lons, lats, c=massbal_averaged,transform=proj_crs, cmap='seismic_r', vmin=-0.15, vmax=0.15) #edgecoors='black' ???
cbar=plt.colorbar(scat, fraction=0.02, pad=0.04)
plt.axes(projection=projection)
cbar.set_label('Mass Balance mwea')
plt.title('Average Mass Balance '+ title)
#should i make plots of JUST positive and JUST negative? would need edge colors then
#%% LINEAR REGRESSION/SLOPE AND YEARLY PLOTS
# annual mass balance for each glacier for period of observation
massbal_annual=(np.sum(massbal_monthly.reshape(-1,12),axis=1)).reshape(len(massbal_total),len(year))
# annual mass balance for entire region
massbal_annual_total=np.sum(massbal_annual, axis=0)
# linear regression trends for entire period
linreg_info=[('RGIId','slope','incercept','r_val','p_val','std_err')]
slopes=[]
# removes slopes that have sig below 95%
for i in range(0,len(massbal_total)):
slope, intercept, r_value, p_value, std_err = linregress(year, massbal_annual[i])
RGI=glacier_data[0][i]
if p_value > 0.05:
slope=float('nan')
if glacier_data[13][i] != 0:
appends=(RGI, 'nan','nan','nan','nan','nan')
else:
appends=(RGI, slope, intercept,r_value, p_value, std_err)
linreg_info.append(appends)
slopes.append(slope)
# plot glacier wide mass balance over time period
plt.figure(3)
plt.plot(year,massbal_annual_total)
plt.xlabel('Year')
plt.ylabel('Mass Balance mwe')
plt.title('Region Total Mass Balance' + title)
# plot glacier slopes map for entire period
plt.figure(4)
plt.figure(figsize=(10,10))
ax = plt.axes(projection=projection)
ax.add_feature(land_50m)
ax.scatter(lons,lats,c=[0.8,0.8,0.8], transform=proj_crs) #,edgecolors='black')
scat=ax.scatter(lons, lats, c=slopes,transform=proj_crs, cmap='seismic_r', vmin=-0.2,vmax=0.2)#, edgecolors='black')
cbar=plt.colorbar(scat, fraction=0.02, pad=0.04)
plt.axes(projection=projection)
cbar.set_label('Mass Balance mwea')
plt.title('Rate of Mass Balance Change '+ title)
#%% TEMP & PREC FOR THE REGION, ELEVATION ADJUSTED AND NOT
#convert to float
glac_temp=glac_temp1.iloc[:,1:].astype(float).values
glac_prec=glac_prec1.iloc[:,1:].astype(float).values
#get average temp/prec for region
annual_temp=(np.sum(glac_temp.reshape(-1,12),axis=1).reshape(len(lats),len(year)))/12
annual_prec=np.sum(glac_prec.reshape(-1,12),axis=1).reshape(len(lats),len(year)) #total prec
average_temp=(annual_temp.sum(axis=0))/(len(lats))
average_prec=(annual_prec.sum(axis=0))/(len(lats))
# average temp/prec adjusted for glacier elevation
glac_annual_temp=(np.sum(temp.reshape(-1,12),axis=1).reshape(len(lats),len(year)))/12
glac_annual_prec=np.sum(prec.reshape(-1,12),axis=1).reshape(len(lats),len(year)) #total prec
glac_average_temp=(glac_annual_temp.sum(axis=0))/(len(lats))
glac_average_prec=(glac_annual_prec.sum(axis=0))/(len(lats))
# check offset/diference
temp_diff=average_temp-glac_average_temp
ppt_diff=average_prec-glac_average_prec
# check if offset is apropriate (approx 3.5 degC w/ 1000m and 24.5% ppt decrease)
elev_ave=np.sum(glac_elev)/len(lats)
temp_offset=(elev_ave/1000)*3.5
ppt_offset=(((elev_ave/1000)*24.5)/100)*(np.sum(average_prec, axis=0)/len(average_prec))
plt.figure(5)
plt.plot(year,average_temp)
plt.plot(year, glac_average_temp)
plt.plot(year, temp_diff, color=[0.5,0.5,0.5])
plt.xlabel('Year')
plt.ylabel('Tempearture (C)')
plt.title('Region-Average Temperature ' + title)
plt.legend(['Not Adjusted','Adjusted', 'Difference'])
plt.figure(6)
plt.plot(year,average_prec)
plt.plot(year, glac_average_prec)
plt.plot(year, ppt_diff, color=[0.5,0.5,0.5])
plt.xlabel('Year')
plt.ylabel('Precipitation (m)')
plt.title('Region-Average Annual Precipitation ' + title)
plt.legend(['Not Adjusted','Adjusted','Difference'])
print('expected temp offset =' + str(temp_offset) + 'C')
print('expected ppt offset= ' + str(ppt_offset) + 'm')
#this will probably change with future stuff because elevation will not be constant
#%% AREA WEIGHTED AVERAGE
# total glacier area
area_total=np.sum(glac_area, axis=0)
# area percentage for each glacier
area_prcnt=[x/area_total for x in glac_area]
# area weighted glacier average
glac_area_w=np.sum(massbal_total*area_prcnt, axis=0)
# not weighted glacier average
glac_area_nw=(np.sum(massbal_total, axis=0))/len(lats)
year_weight=[]
# area weighted annual values
for y in range(0,len(year)):
yeararea=pd.Series(massbal_annual[:,y]*area_prcnt)
year_weight.append(yeararea)
df_area=(pd.DataFrame(year_weight))
areaweight_annual=df_area.T
# final area weighted annual values
areaweight_total=np.sum(areaweight_annual, axis=0)
plt.figure(7)
plt.plot(year, areaweight_total)
plt.xlabel('Year')
plt.ylabel('Mass Balance (mwe)')
plt.title('Area Weighted Mass Balance ' + title)
#%% VOLUME CHANGE ANALYSIS, INCLUDING AREA-WEIGHTED
#years needed for volume
year2= range((input.startyear-2),input.endyear)
# total volume change
volume_total=volume.sum(axis=0)
volume_perglac=volume_total/len(lats)
# volume percentage change
volume_first=volume_total[0]
volume_prcnt=volume_total/volume_first
# area weighted volume change
vol_weight=[]
for y in range(0,(len(year)+1)):
volarea=pd.Series(volume[:,y]*area_prcnt)
vol_weight.append(volarea)
df_vol=(pd.DataFrame(vol_weight))
volweight_annual=df_vol.T
volweight_total=np.sum(volweight_annual, axis=0)
for i in range(5,len(year2)): #this code really needs to be improved... need to incorporate another for loop
test=i-1
test2=i-2
test3=i-3
test4=i-4
test5=i-5
volchange1=abs(volume_prcnt[test]-volume_prcnt[i])
volchange2=abs(volume_prcnt[test2]-volume_prcnt[test])
volchange3=abs(volume_prcnt[test3]-volume_prcnt[test2])
volchange4=abs(volume_prcnt[test4]-volume_prcnt[test3])
volchange5=abs(volume_prcnt[test5]-volume_prcnt[test4])
if volchange1 and volchange2 and volchange3 and volchange4 and volchange5 <= 0.0001:
print (year[i])
volume_change=[]
for x in range(0,len(year2)-1):
yr=x+1
change=(volume_total[yr]-volume_total[x])
volume_change.append(change)
plt.figure(8)
plt.plot(year2,volume_total)
plt.title('Total Glacier Volume ' + title)
plt.xlabel('Year')
plt.ylabel('Volume km3')
plt.figure(9)
plt.plot(year2,volume_prcnt)
plt.title('% Glacier Volume Change ' + title)
plt.xlabel('Year')
plt.ylabel('Percent (%)')
plt.figure(10)
plt.plot(year,volume_change)
plt.title('Total Volume Change ' + title)
plt.xlabel('Year')
plt.ylabel('Volume km3')
plt.figure(11)
plt.plot(year2, volweight_total)
plt.title('Area Weighted Volume ' + title)
plt.xlabel('Year')
plt.ylabel('Volume km3')
#%% FUTURE SIMULATION SLICE ANALYSIS (total, annual average, slope and area-weighted reg.total)
#28 year slices if 2017-2100: 2017-2044, 2045-2072, 2073-2100 (inclusive)
# create three slices
yearslice1=range(2017,2045)
yearslice2=range(2045,2073)
yearslice3=range(2073,2101)
rangeslice1=range(0,len(yearslice1))
rangeslice2=range(len(yearslice1),(len(yearslice1)+len(yearslice2)))
rangeslice3=range((len(yearslice1)+len(yearslice2)),(len(yearslice1)+len(yearslice2)+ len(yearslice3)))
# annual glacier mass balance for each time slice
massbal_annual1=massbal_annual[:,rangeslice1]
massbal_annual2=massbal_annual[:,rangeslice2]
massbal_annual3=massbal_annual[:,rangeslice3]
# total glacier mass balance for each glacier for each time slice
massbal_total1=np.sum(massbal_annual1, axis=1)
massbal_total2=np.sum(massbal_annual2, axis=1)
massbal_total3=np.sum(massbal_annual3, axis=1)
title2= ('R'+str(regionO1_number) + ' ')
# area weighted total mass balance
glac_area_w1=np.sum(massbal_total1*area_prcnt, axis=0)
glac_area_w2=np.sum(massbal_total2*area_prcnt, axis=0)
glac_area_w3=np.sum(massbal_total3*area_prcnt, axis=0)
# area weighted annual mass balance for each glacier
year_weight1=[]
year_weight2=[]
year_weight3=[]
# area weighted annual values
for y in range(0,len(yearslice1)):
yeararea1=pd.Series(massbal_annual1[:,y]*area_prcnt)
year_weight1.append(yeararea1)
yeararea2=pd.Series(massbal_annual2[:,y]*area_prcnt)
year_weight2.append(yeararea2)
yeararea3=pd.Series(massbal_annual3[:,y]*area_prcnt)
year_weight3.append(yeararea3)
df_area1=(pd.DataFrame(year_weight1))
areaweight_annual1=df_area1.T
df_area2=(pd.DataFrame(year_weight2))
areaweight_annual2=df_area2.T
df_area3=(pd.DataFrame(year_weight3))
areaweight_annual3=df_area3.T
# final area weighted annual values
areaweight_total=np.sum(areaweight_annual, axis=0)
plt.figure(12)
plt.figure(figsize=(10,10))
ax = plt.axes(projection=projection)
ax.add_feature(land_50m)
scat=ax.scatter(lons, lats, c=massbal_total1,transform=proj_crs, cmap='seismic_r', vmin=-4, vmax=4, edgecolors='black')
cbar=plt.colorbar(scat, fraction=0.02, pad=0.04)
plt.axes(projection=projection)
cbar.set_label('Mass Balance mwe')
plt.title('Total Mass Balance '+ title2 + '2017-2044')
plt.figure(13)
plt.figure(figsize=(10,10))
ax = plt.axes(projection=projection)
ax.add_feature(land_50m)
scat=ax.scatter(lons, lats, c=massbal_total2,transform=proj_crs, cmap='seismic_r', vmin=-4, vmax=4, edgecolors='black')
cbar=plt.colorbar(scat, fraction=0.02, pad=0.04)
plt.axes(projection=projection)
cbar.set_label('Mass Balance mwe')
plt.title('Total Mass Balance '+ title2 + '2045-2072')
plt.figure(14)
plt.figure(figsize=(10,10))
ax = plt.axes(projection=projection)
ax.add_feature(land_50m)
scat=ax.scatter(lons, lats, c=massbal_total3,transform=proj_crs, cmap='seismic_r', vmin=-4, vmax=4, edgecolors='black')
cbar=plt.colorbar(scat, fraction=0.02, pad=0.04)
plt.axes(projection=projection)
cbar.set_label('Mass Balance mwe')
plt.title('Total Mass Balance '+ title2 + '2073-2100')
#do total mass balance and annual average mass balance for each glacier
# and total area weighted
#%% OLD CODE
#%%Linear regressions for total dataset
#want linear regressions(with scipy) and then remove slopes based on high p values
year2=list(range(2015,2100))
massbal_annual=(np.sum(massbal_total.reshape(-1,12),axis=1)).reshape(len(lats),len(year))
linreg_info=[('RGIId','slope','incercept','r_val','p_val','std_err')]
slopes=[]
# removes slopes that have a statsig below 95%
for i in range(0,len(lats)):
slope, intercept, r_value, p_value, std_err = linregress(year2, massbal_annual[i])
RGI=glacier_data[0][i]
if p_value > 0.05:
slope=float('nan')
if glacier_data[13][i] != 0:
appends=(RGI, 'nan','nan','nan','nan','nan')
else:
appends=(RGI, slope, intercept,r_value, p_value, std_err)
linreg_info.append(appends)
slopes.append(slope)
#%% slope plotting
east = int(round(lons.min())) - 1
west = int(round(lons.max())) + 1
south = int(round(lats.min())) - 1
north = int(round(lats.max())) + 1
xtick = 1
ytick = 1
g=plt.figure(1)
# Plot regional maps
post.plot_latlonvar(lons, lats, slopes,-4.5, 1.5, 'modelled linear rates of SMB Change', 'longitude [deg]',
'latitude [deg]', 'jet_r', east, west, south, north, xtick, ytick)
land_50m = car.feature.NaturalEarthFeature('physical', 'land', '50m',
edgecolor='k',
facecolor='none')
#pp=plt.figure(1)
plt.figure(figsize=(10,10))
ax = plt.axes(projection=car.crs.PlateCarree())
#ax.set_global()
#ax.coastlines()
ax.add_feature(land_50m)
scat1=ax.scatter(lons,lats, c='none', edgecolors='black')
scat=ax.scatter(lons, lats, c=slopes, cmap='winter_r', edgecolors='black')
#scat.set_clim(-2.5,2.5)
cbar=plt.colorbar(scat, fraction=0.02, pad=0.04)
cbar.set_label('Rate of Chagne mwea')
plt.title('Mass Change/Yr, 1985-2015')
# NEED TO CONSIDER WHAT COLORPLOTS TO USE; DO I WANT TO DISTINGUISH BETWEEN LOW CHANGE AND NAN?
#%%
east = int(round(lons.min())) - 1
west = int(round(lons.max())) + 1
south = int(round(lats.min())) - 1
north = int(round(lats.max())) + 1
xtick = 1
ytick = 1
g=plt.figure(1)
# Plot regional maps
land_50m = car.feature.NaturalEarthFeature('physical', 'land', '50m',
edgecolor='k',
facecolor='none')
#pp=plt.figure(1)
plt.figure(figsize=(10,10))
ax = plt.axes(projection=car.crs.PlateCarree())
#ax.set_global()
#ax.coastlines()
ax.add_feature(land_50m)
scat1=ax.scatter(lons,lats, c='none', edgecolors='black')
scat=ax.scatter(lons, lats, c=massbal_total_mwea, cmap='jet_r', edgecolors='black')
#scat.set_clim(-2.5,2.5)
cbar=plt.colorbar(scat, fraction=0.02, pad=0.04)
plt.title('Total Mass Balance Change 1985-2015')
cbar.set_label('Mass change mwe')
#%% Analysis of volume data
volume_total=volume.sum(axis=0)
volume_first=volume_total[0]
volume_prcnt=volume_total/volume_first
plt.plot(year,volume_total)
plt.title('Total Volume 2016-2100')
plt.xlabel('Year')
plt.ylabel('Volume m3')
plt.plot(year,volume_prcnt)
plt.title('Volume % Change 2016-2100')
plt.xlabel('Year')
plt.ylabel('%')
for i in range(5,85): #this code really needs to be improved... need to incorporate another for loop
test=i-1
test2=i-2
test3=i-3
test4=i-4
test5=i-5
volchange1=abs(volume_prcnt[test]-volume_prcnt[i])
volchange2=abs(volume_prcnt[test2]-volume_prcnt[test])
volchange3=abs(volume_prcnt[test3]-volume_prcnt[test2])
volchange4=abs(volume_prcnt[test4]-volume_prcnt[test3])
volchange5=abs(volume_prcnt[test5]-volume_prcnt[test4])
if volchange1 and volchange2 and volchange3 and volchange4 and volchange5 <= 0.0001:
print (year[i])
volume_change=[]
for x in range(0,85):
yr=x+1
change=(volume_total[yr]-volume_total[x])
volume_change.append(change)
plt.plot(year2,volume_change)
plt.title('Total Volume Change 2016-2100')
plt.xlabel('Year')
plt.ylabel('Volume m3')
#%%Analysis of climate data
temp_annual=((np.sum(temp.reshape(-1,12),axis=1)).reshape(568,85))/12
temp_ave=((temp_annual.sum(axis=0)))/568
plt.plot(year2,temp_ave)
plt.title('Era-Int Temp Simulation 2016-2100 [Years:1990-2000]')
plt.xlabel('Year')
plt.ylabel('T(degC)')
prec_annual=((np.sum(prec.reshape(-1,12),axis=1)).reshape(568,85))/12
prec_ave=((prec_annual.sum(axis=0)))/568
plt.plot(year2, prec_ave)
plt.title('Era-Int Prec Simulation 2016-2100 [Years:1990-2000]')
plt.xlabel('Year')
plt.ylabel('Prec(mm)')
#%% For Plotting
# Set extent
east = int(round(lons.min())) - 1
west = int(round(lons.max())) + 1
south = int(round(lats.min())) - 1
north = int(round(lats.max())) + 1
xtick = 1
ytick = 1
g=plt.figure(1)
# Plot regional maps
post.plot_latlonvar(lons, lats, massbal_total_mwea, -4.5, 1.5, 'Modeled mass balance [mwea]', 'longitude [deg]',
'latitude [deg]', 'jet_r', east, west, south, north, xtick, ytick)
#plt.savefig(input.output_filepath+'/../../CH_1/iceland_2', dpi=100)
| 31.361158 | 145 | 0.722581 | 2,870 | 18,409 | 4.510105 | 0.143206 | 0.030593 | 0.0197 | 0.027117 | 0.556397 | 0.512824 | 0.488566 | 0.462299 | 0.444067 | 0.413396 | 0 | 0.043424 | 0.124341 | 18,409 | 586 | 146 | 31.414676 | 0.759553 | 0.193221 | 0 | 0.464986 | 0 | 0 | 0.114619 | 0.0084 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.02521 | 0 | 0.02521 | 0.011204 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9940e845e28cff0d578b7b5aa631c7d7f5fdd50a | 3,884 | py | Python | model.py | bvvarun1992/Behavioral-Cloning | fe69a0f1f6f2263fa0fca94f7f628701523ad35d | [
"MIT"
] | null | null | null | model.py | bvvarun1992/Behavioral-Cloning | fe69a0f1f6f2263fa0fca94f7f628701523ad35d | [
"MIT"
] | null | null | null | model.py | bvvarun1992/Behavioral-Cloning | fe69a0f1f6f2263fa0fca94f7f628701523ad35d | [
"MIT"
] | null | null | null | import csv
import numpy as np
import cv2
from sklearn.utils import shuffle
from sklearn.model_selection import train_test_split
# Reading image paths and steering angles from excel
samples = []
with open('/opt/carnd_p3/data/driving_log.csv') as csvfile:
reader = csv.reader(csvfile)
## Skipping first line to avoid reading headers ##
next(reader)
##################################################
for line in reader:
samples.append(line)
# Splitting data for training and validating
train_samples, validation_samples = train_test_split(samples, test_size=0.15)
# Function to yeild generator for training and validating neural network
def generator(samples, batch_size = 32):
while True:
shuffle(samples)
for offset in range(0, len(samples), batch_size):
batch_samples = samples[offset:offset+batch_size]
images = []
angles = []
for sample in batch_samples:
for i in range(3):
file_name = '/opt/carnd_p3/data/IMG/'+sample[i].split('/')[-1]
image = cv2.cvtColor(cv2.imread(file_name), cv2.COLOR_BGR2RGB)
images.append(image)
flip_image = np.fliplr(image)
images.append(flip_image)
if (i==0):
angle = float(sample[3])
flip_angle = -1.0*float(sample[3])
elif (i==1):
angle = float(sample[3]) + 0.2
flip_angle = -1.0*(float(sample[3]) + 0.2)
elif (i==2):
angle = float(sample[3]) - 0.2
flip_angle = -1.0*(float(sample[3]) - 0.2)
angles.append(angle)
angles.append(flip_angle)
x_train = np.array(images)
y_train = np.array(angles)
yield x_train, y_train
# Setting batch size
batch_size = 32
# Creating generators for training and validating
train_generator = generator(train_samples, batch_size=batch_size)
validation_generator = generator(validation_samples, batch_size=batch_size)
# Building neural network model
from keras.models import Sequential
from keras.layers import Flatten, Dense, Activation
from keras.layers import Lambda, Cropping2D
from keras.layers import Convolution2D
from keras.layers import Dropout
model = Sequential()
# Normailizing
model.add(Lambda(lambda x: (x / 255.0) - 0.5, input_shape=(160,320,3)))
# Cropping top portion of the images
model.add(Cropping2D(cropping=((70,25), (0,0))))
# 5x5 layer convolutions
model.add(Convolution2D(24,5,5,subsample=(2,2)))
model.add(Activation('elu'))
model.add(Convolution2D(24,5,5,subsample=(2,2)))
model.add(Activation('elu'))
model.add(Convolution2D(48,5,5,subsample=(2,2)))
model.add(Activation('elu'))
# 3x3 layer convolutions
model.add(Convolution2D(64,3,3))
model.add(Activation('elu'))
model.add(Convolution2D(64,3,3))
model.add(Activation('elu'))
# Fully convolution layer
model.add(Flatten())
model.add(Dense(100, activation = 'elu'))
model.add(Dropout(0.3))
model.add(Dense(50, activation = 'elu'))
model.add(Dropout(0.3))
model.add(Dense(10, activation = 'elu'))
model.add(Dense(1))
model.compile(loss='mse', optimizer='adam')
model.fit_generator(train_generator,
steps_per_epoch = len(train_samples)/batch_size,
validation_data = validation_generator,
validation_steps= len(validation_samples)/batch_size,
shuffle = True,
epochs=5, verbose=1)
model.save('Model_run2.h5')
print('Model saved')
model.summary()
| 31.577236 | 82 | 0.595005 | 475 | 3,884 | 4.757895 | 0.309474 | 0.067257 | 0.042478 | 0.055752 | 0.265487 | 0.202655 | 0.202655 | 0.192478 | 0.192478 | 0.177434 | 0 | 0.040487 | 0.281411 | 3,884 | 122 | 83 | 31.836066 | 0.769258 | 0.111998 | 0 | 0.141026 | 0 | 0 | 0.033402 | 0.016849 | 0 | 0 | 0 | 0 | 0 | 1 | 0.012821 | false | 0 | 0.128205 | 0 | 0.141026 | 0.012821 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
994230f6b4ebf07a0d7cc91b97f4dc1767bdae63 | 714 | py | Python | setup.py | teriyakichild/python-zcli | 43538a8e02a18d3e415d98b2cb1114d074e44a4f | [
"Apache-2.0"
] | null | null | null | setup.py | teriyakichild/python-zcli | 43538a8e02a18d3e415d98b2cb1114d074e44a4f | [
"Apache-2.0"
] | null | null | null | setup.py | teriyakichild/python-zcli | 43538a8e02a18d3e415d98b2cb1114d074e44a4f | [
"Apache-2.0"
] | null | null | null | from setuptools import setup
from sys import path
path.insert(0, '.')
NAME = "zcli"
if __name__ == "__main__":
setup(
name = NAME,
version = "0.1.0",
author = "Tony Rogers",
author_email = "tony.rogers@rackspace.com",
url = "https://github.com/teriyakichild/python-zcli",
license = 'internal use',
packages = [NAME],
package_dir = {NAME: NAME},
description = "Zabbix CLI.",
install_requires = ['requests',
'argparse',
'pyzabbix',
'ConfigParser'],
entry_points={
'console_scripts': [ 'zcli = zcli:cli' ],
}
)
| 23.8 | 61 | 0.491597 | 64 | 714 | 5.28125 | 0.6875 | 0.047337 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009009 | 0.378151 | 714 | 29 | 62 | 24.62069 | 0.752252 | 0 | 0 | 0 | 0 | 0 | 0.262272 | 0.035063 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.086957 | 0 | 0.086957 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9943a954b6c98669a7f2d794d8606fb4a934d9b6 | 1,826 | py | Python | Code/branches/Pre-Prospectus/python/SourceFiles/Geometry.py | jlconlin/PhDThesis | 8e704613721a800ce1c59576e94f40fa6f7cd986 | [
"MIT"
] | null | null | null | Code/branches/Pre-Prospectus/python/SourceFiles/Geometry.py | jlconlin/PhDThesis | 8e704613721a800ce1c59576e94f40fa6f7cd986 | [
"MIT"
] | null | null | null | Code/branches/Pre-Prospectus/python/SourceFiles/Geometry.py | jlconlin/PhDThesis | 8e704613721a800ce1c59576e94f40fa6f7cd986 | [
"MIT"
] | null | null | null | __id__ = "$Id: Geometry.py 51 2007-04-25 20:43:07Z jlconlin $"
__author__ = "$Author: jlconlin $"
__version__ = " $Revision: 51 $"
__date__ = "$Date: 2007-04-25 14:43:07 -0600 (Wed, 25 Apr 2007) $"
import scipy
import Errors
class Geometry(object):
"""
Geometry is a class to hold information about the geometry of the problem.
"""
def __init__(self, bins, range):
"""
bins: A tuple each number is how many spatial bins in each dimension (up
to 3)
range: A list of [min, max] pairs; the limits of the spatial geometry in
each dimension.
"""
try:
self.dimension = len(bins)
except TypeError:
self.dimension = 1
if self.dimension != 1:
raise Errors.GeometryError(
"Geometry currently only suppors 1-D geometry")
elif self.dimension != len(range):
raise Errors.GeometryError(
"Bins and Range must have same degree")
else:
self.bins = bins
self.range = range
self.edges = scipy.zeros(self.bins+1)
self.centers = scipy.zeros(self.bins) # Bin centers
width = self.max - self.min
for i in xrange(self.bins+1):
edge = self.min + i*(width/float(self.bins))
self.edges[i] = edge
for i in xrange(len(self.centers)):
self.centers[i] = self.edges[i] + (self.edges[i+1] - self.edges[i])/2.0
def __repr__(self):
"""
"""
return "bins: %s, range: %s" %(self.bins, self.range)
def _getMinX(self):
return min(self.range[0])
def _getMaxX(self):
return max(self.range[0])
min = property(fget=_getMinX)
max = property(fget=_getMaxX)
| 29.934426 | 87 | 0.552026 | 231 | 1,826 | 4.242424 | 0.398268 | 0.057143 | 0.040816 | 0.036735 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.043514 | 0.332968 | 1,826 | 60 | 88 | 30.433333 | 0.761084 | 0.148959 | 0 | 0.054054 | 0 | 0.027027 | 0.160269 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.108108 | false | 0 | 0.054054 | 0.054054 | 0.324324 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
99440dc4872605e49ce2bfbd37e480db9c6f90a0 | 12,244 | py | Python | django_backblaze_b2/storage.py | ehossack/django-backblaze-b2 | 556777a74a23780bffde68296c3173fb5a7d5ccd | [
"BSD-2-Clause"
] | 12 | 2020-09-14T15:43:34.000Z | 2021-12-11T17:45:22.000Z | django_backblaze_b2/storage.py | ehossack/django-backblaze-b2 | 556777a74a23780bffde68296c3173fb5a7d5ccd | [
"BSD-2-Clause"
] | 10 | 2020-11-28T19:55:20.000Z | 2022-03-28T02:18:15.000Z | django_backblaze_b2/storage.py | ehossack/django-backblaze-b2 | 556777a74a23780bffde68296c3173fb5a7d5ccd | [
"BSD-2-Clause"
] | 2 | 2021-01-29T21:58:26.000Z | 2021-06-22T19:34:11.000Z | from datetime import datetime
from hashlib import sha3_224 as hash
from logging import getLogger
from typing import IO, Any, Callable, Dict, List, Optional, Tuple, cast
from b2sdk.account_info import InMemoryAccountInfo
from b2sdk.account_info.abstract import AbstractAccountInfo
from b2sdk.account_info.sqlite_account_info import SqliteAccountInfo
from b2sdk.api import B2Api, Bucket
from b2sdk.cache import AuthInfoCache
from b2sdk.exception import FileOrBucketNotFound, NonExistentBucket
from django.core.cache.backends.base import BaseCache
from django.core.exceptions import ImproperlyConfigured
from django.core.files.base import File
from django.core.files.storage import Storage
from django.utils.deconstruct import deconstructible
from typing_extensions import TypedDict
from django_backblaze_b2.b2_file import B2File
from django_backblaze_b2.cache_account_info import DjangoCacheAccountInfo
from django_backblaze_b2.options import (
BackblazeB2StorageOptions,
DjangoCacheAccountInfoConfig,
SqliteAccountInfoConfig,
getDefaultB2StorageOptions,
)
logger = getLogger("django-backblaze-b2")
class _BaseFileInfoDict(TypedDict):
fileId: str
fileName: str
fileInfo: dict
class _FileInfoDict(_BaseFileInfoDict, total=False):
size: int
uploadTimestamp: int
contentType: str
class B2FileInformationNotAvailableException(Exception):
...
@deconstructible
class BackblazeB2Storage(Storage):
"""Storage class which fulfills the Django Storage contract through b2 apis"""
def __init__(self, **kwargs):
opts = self._getDjangoSettingsOptions(kwargs.get("opts", {}))
if "opts" in kwargs:
self._validateOptions(kwargs.get("opts"))
_merge(opts, kwargs.get("opts", {}))
logOpts = opts.copy()
logOpts.update({"application_key_id": "<redacted>", "application_key": "<redacted>"})
logger.debug(f"Initializing {self.__class__.__name__} with options {logOpts}")
self._bucketName = opts["bucket"]
self._defaultFileMetadata = opts["defaultFileInfo"]
self._forbidFilePropertyCaching = opts["forbidFilePropertyCaching"]
self._authInfo = dict(
[(k, v) for k, v in opts.items() if k in ["realm", "application_key_id", "application_key"]]
)
self._allowFileOverwrites = opts["allowFileOverwrites"]
self._getAccountInfo = self._createAccountInfoCallable(opts)
logger.info(f"{self.__class__.__name__} instantiated to use bucket {self._bucketName}")
if opts["authorizeOnInit"]:
logger.debug(f"{self.__class__.__name__} authorizing")
self.b2Api
if opts["validateOnInit"]:
self._getOrCreateBucket(opts["nonExistentBucketDetails"])
def _getDjangoSettingsOptions(self, kwargOpts: Dict) -> BackblazeB2StorageOptions:
"""Setting terminology taken from:
https://b2-sdk-python.readthedocs.io/en/master/glossary.html#term-application-key-ID
kwargOpts available for subclasses
"""
from django.conf import settings
if not hasattr(settings, "BACKBLAZE_CONFIG"):
raise ImproperlyConfigured("add BACKBLAZE_CONFIG dict to django settings")
if "application_key_id" not in settings.BACKBLAZE_CONFIG or "application_key" not in settings.BACKBLAZE_CONFIG:
raise ImproperlyConfigured(
"At minimum BACKBLAZE_CONFIG must contain auth 'application_key' and 'application_key_id'"
f"\nfound: {settings.BACKBLAZE_CONFIG}"
)
self._validateOptions(settings.BACKBLAZE_CONFIG)
opts = getDefaultB2StorageOptions()
opts.update(settings.BACKBLAZE_CONFIG) # type: ignore
return opts
def _validateOptions(self, options: Dict) -> None:
unrecognizedOptions = [k for k in options.keys() if k not in getDefaultB2StorageOptions().keys()]
if unrecognizedOptions:
raise ImproperlyConfigured(f"Unrecognized options: {unrecognizedOptions}")
def _createAccountInfoCallable(self, opts: BackblazeB2StorageOptions) -> Callable[[], AbstractAccountInfo]:
if (
not isinstance(opts["accountInfo"], dict)
or "type" not in opts["accountInfo"]
or opts["accountInfo"]["type"] not in ["memory", "sqlite", "django-cache"]
):
raise ImproperlyConfigured(
(f"accountInfo property must be a dict with type found in options.py, was {opts['accountInfo']}")
)
if opts["accountInfo"]["type"] == "django-cache":
logger.debug(f"{self.__class__.__name__} will use {DjangoCacheAccountInfo.__name__}")
return lambda: DjangoCacheAccountInfo(
cacheName=cast(DjangoCacheAccountInfoConfig, opts["accountInfo"]).get("cache", "django-backblaze-b2")
)
elif opts["accountInfo"]["type"] == "memory":
logger.debug(f"{self.__class__.__name__} will use {InMemoryAccountInfo.__name__}")
return lambda: InMemoryAccountInfo()
elif opts["accountInfo"]["type"] == "sqlite":
logger.debug(f"{self.__class__.__name__} will use {SqliteAccountInfo.__name__}")
return lambda: SqliteAccountInfo(
file_name=cast(SqliteAccountInfoConfig, opts["accountInfo"])["databasePath"]
)
raise ImproperlyConfigured()
@property
def b2Api(self) -> B2Api:
if not hasattr(self, "_b2Api"):
self._accountInfo = self._getAccountInfo()
self._b2Api = B2Api(account_info=self._accountInfo, cache=AuthInfoCache(self._accountInfo))
self._b2Api.authorize_account(**self._authInfo)
return self._b2Api
@property
def bucket(self) -> Bucket:
if not hasattr(self, "_bucket"):
self._getOrCreateBucket()
return self._bucket
def _getOrCreateBucket(self, newBucketDetails=None) -> None:
try:
self._bucket = self.b2Api.get_bucket_by_name(self._bucketName)
except NonExistentBucket as e:
if newBucketDetails is not None:
logger.debug(f"Bucket {self._bucketName} not found. Creating with details: {newBucketDetails}")
if "bucket_type" not in newBucketDetails:
newBucketDetails["bucket_type"] = "allPrivate"
self._bucket = self.b2Api.create_bucket(name=self._bucketName, **newBucketDetails)
else:
raise e
logger.debug(f"Connected to bucket {self._bucket.as_dict()}")
def _refreshBucket(self) -> None:
self.b2Api.session.cache.clear()
self._getOrCreateBucket()
def _open(self, name: str, mode: str) -> File:
return B2File(
name=name, bucket=self.bucket, fileMetadata=self._defaultFileMetadata, mode=mode, sizeProvider=self.size,
)
def _save(self, name: str, content: IO[Any]) -> str:
"""
Save and retrieve the filename.
If the file exists it will make another version of that file.
"""
return B2File(
name=name, bucket=self.bucket, fileMetadata=self._defaultFileMetadata, mode="w", sizeProvider=self.size,
).saveAndRetrieveFile(content)
def path(self, name: str) -> str:
return name
def delete(self, name: str) -> None:
fileInfo = self._fileInfo(name)
if fileInfo:
logger.debug(f"Deleting file {name} id=({fileInfo['fileId']})")
self.b2Api.delete_file_version(file_id=fileInfo["fileId"], file_name=name)
if self._cache:
self._cache.delete(self._fileCacheKey(name))
else:
logger.debug("Not found")
def _fileInfo(self, name: str) -> Optional[_FileInfoDict]:
try:
if self._cache:
cacheKey = self._fileCacheKey(name)
timeoutInSeconds = 60
def loadInfo():
logger.debug(f"file info cache miss for {name}")
return self.bucket.get_file_info_by_name(name).as_dict()
return self._cache.get_or_set(key=cacheKey, default=loadInfo, timeout=timeoutInSeconds)
return self.bucket.get_file_info_by_name(name).as_dict()
except FileOrBucketNotFound:
return None
def _fileCacheKey(self, name: str) -> str:
return hash(f"{self.bucket.name}__{name}".encode()).hexdigest()
@property
def _cache(self) -> Optional[BaseCache]:
if (
not self._forbidFilePropertyCaching
and self.b2Api # force init
and self._accountInfo
and isinstance(self._accountInfo, DjangoCacheAccountInfo)
):
return self._accountInfo.cache
return None
def exists(self, name: str) -> bool:
return bool(self._fileInfo(name))
def size(self, name: str) -> int:
fileInfo = self._fileInfo(name)
return fileInfo.get("size", 0) if fileInfo else 0
def url(self, name: Optional[str]) -> str:
if not name:
raise Exception("Name must be defined")
return self._getFileUrl(name)
def _getFileUrl(self, name: str) -> str:
return self.getBackblazeUrl(name)
def getBackblazeUrl(self, filename: str) -> str:
return self.b2Api.get_download_url_for_file_name(bucket_name=self._bucketName, file_name=filename)
def get_available_name(self, name: str, max_length: Optional[int] = None) -> str:
if self._allowFileOverwrites:
return name
return super().get_available_name(name, max_length)
def listdir(self, path: str) -> Tuple[List[str], List[str]]:
"""
List the contents of the specified path. Return a 2-tuple of lists:
the first item being directories, the second item being files.
"""
raise NotImplementedError("subclasses of Storage must provide a listdir() method")
def get_accessed_time(self, name: str) -> datetime:
"""
Return the last accessed time (as a datetime) of the file specified by
name. The datetime will be timezone-aware if USE_TZ=True.
"""
raise NotImplementedError("subclasses of Storage must provide a get_accessed_time() method")
def get_created_time(self, name: str) -> datetime:
"""
Return the creation time (as a datetime) of the file specified by name.
The datetime will be timezone-aware if USE_TZ=True.
"""
from datetime import timezone
from django.conf import settings
fileInfo = self._fileInfo(name)
try:
if fileInfo and float(fileInfo.get("uploadTimestamp", 0)) > 0:
timestamp = float(fileInfo["uploadTimestamp"]) / 1000.0
if settings.USE_TZ:
# Safe to use .replace() because UTC doesn't have DST
return datetime.utcfromtimestamp(timestamp).replace(tzinfo=timezone.utc)
return datetime.fromtimestamp(timestamp)
except ValueError as e:
raise B2FileInformationNotAvailableException(f"'uploadTimestamp' from API not valid for {name}: {e}")
raise B2FileInformationNotAvailableException(f"'uploadTimestamp' not available for {name}")
def get_modified_time(self, name: str) -> datetime:
"""
Return the last modified time (as a datetime) of the file specified by
name. The datetime will be timezone-aware if USE_TZ=True.
"""
return self.get_created_time(name)
def _merge(target: Dict, source: Dict, path=None) -> Dict:
"""merges b into a
https://stackoverflow.com/a/7205107/11076240
"""
if path is None:
path = []
for key in source:
if key in target:
printablePath = ".".join(path + [str(key)])
if isinstance(target[key], dict) and isinstance(source[key], dict):
_merge(target[key], source[key], path + [str(key)])
elif target[key] != source[key]:
logger.debug(f"Overriding setting {printablePath} with value {source[key]}")
target[key] = source[key]
else:
target[key] = source[key]
return target
| 41.505085 | 119 | 0.655586 | 1,326 | 12,244 | 5.88537 | 0.214932 | 0.014352 | 0.018324 | 0.00897 | 0.151717 | 0.104946 | 0.101743 | 0.097642 | 0.062019 | 0.062019 | 0 | 0.0079 | 0.245263 | 12,244 | 294 | 120 | 41.646259 | 0.836598 | 0.079141 | 0 | 0.155251 | 0 | 0 | 0.159288 | 0.039212 | 0 | 0 | 0 | 0 | 0 | 1 | 0.123288 | false | 0 | 0.100457 | 0.027397 | 0.392694 | 0.009132 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9946935225cfd5d8c3166e682fc9c3c573466b46 | 8,180 | py | Python | docs/nnabla/p10_Python_API_Tutorials/s02_python_api.py | daizutabi/scratch | 4c56fad47da0938eda89f3c2b6cb2f1919bee180 | [
"MIT"
] | null | null | null | docs/nnabla/p10_Python_API_Tutorials/s02_python_api.py | daizutabi/scratch | 4c56fad47da0938eda89f3c2b6cb2f1919bee180 | [
"MIT"
] | null | null | null | docs/nnabla/p10_Python_API_Tutorials/s02_python_api.py | daizutabi/scratch | 4c56fad47da0938eda89f3c2b6cb2f1919bee180 | [
"MIT"
] | null | null | null | # # NNabla Python API Demonstration Tutorial
# # (https://nnabla.readthedocs.io/en/latest/python/tutorial/python_api.html)
import matplotlib.pyplot as plt
import nnabla as nn
import nnabla.functions as F
import nnabla.parametric_functions as PF
import nnabla.solvers as S
import numpy as np
from ivory.utils.path import cache_file
# ## NdArray
a = nn.NdArray((2, 3, 4))
print(a.data)
# -
print("[Substituting random values]")
a.data = np.random.randn(*a.shape)
print(a.data)
print("[Slicing]")
a.data[0, :, ::2] = 0
print(a.data)
# -
a.fill(1) # Filling all values with one.
print(a.data)
# -
b = nn.NdArray.from_numpy_array(np.ones(a.shape))
print(b.data)
# ## Variable
x = nn.Variable([2, 3, 4], need_grad=True)
print("x.data:", x.data)
print("x.grad:", x.grad)
# -
x.shape
# -
print("x.data")
print(x.d)
x.d = 1.2345 # To avoid NaN
assert np.all(x.d == x.data.data), "d: {} != {}".format(x.d, x.data.data)
print("x.grad")
print(x.g)
x.g = 1.2345 # To avoid NaN
assert np.all(x.g == x.grad.data), "g: {} != {}".format(x.g, x.grad.data)
# !Zeroing grad values
x.grad.zero()
print("x.grad (after `.zero()`)")
print(x.g)
# -
x2 = nn.Variable.from_numpy_array(np.ones((3,)), need_grad=True)
print(x2)
print(x2.d)
x3 = nn.Variable.from_numpy_array(np.ones((3,)), np.zeros((3,)), need_grad=True)
print(x3)
print(x3.d)
print(x3.g)
# -
print(x.parent)
# ## Function
sigmoid_output = F.sigmoid(x)
sum_output = F.reduce_sum(sigmoid_output)
print(sigmoid_output)
print(sum_output)
# -
print("sigmoid_output.parent.name:", sigmoid_output.parent.name)
print("x:", x)
print("sigmoid_output.parent.inputs refers to x:", sigmoid_output.parent.inputs)
# -
print("sum_output.parent.name:", sum_output.parent.name)
print("sigmoid_output:", sigmoid_output)
print("sum_output.parent.inputs refers to sigmoid_output:", sum_output.parent.inputs)
# -
sum_output.forward()
print("CG output:", sum_output.d)
print("Reference:", np.sum(1.0 / (1.0 + np.exp(-x.d))))
# -
x.grad.zero()
sum_output.backward()
print("d sum_o / d sigmoid_o:")
print(sigmoid_output.g)
print("d sum_o / d x:")
print(x.g)
x.d
# -
x = nn.Variable([5, 2]) # Input
w = nn.Variable([2, 3], need_grad=True) # Weights
b = nn.Variable([3], need_grad=True) # Biases
affine_out = F.affine(x, w, b) # Create a graph including only affine
affine_out
# -
# !Set random input and parameters
x.d = np.random.randn(*x.shape)
w.d = np.random.randn(*w.shape)
b.d = np.random.randn(*b.shape)
# !Initialize grad
x.grad.zero() # Just for showing gradients are not computed when need_grad=False.
w.grad.zero()
b.grad.zero()
# !Forward and backward
affine_out.forward()
affine_out.backward()
# -
print("F.affine")
print(affine_out.d)
print("Reference")
print(np.dot(x.d, w.d) + b.d)
print("dw")
print(w.g)
print("db")
print(b.g)
# -
print(x.g)
# ## Parametric Function
with nn.parameter_scope("affine1"):
c1 = PF.affine(x, 3)
# -
nn.get_parameters()
# -
c1 = PF.affine(x, 3, name="affine1")
nn.get_parameters()
# -
c1.shape
# -
with nn.parameter_scope("foo"):
h = PF.affine(x, 3)
with nn.parameter_scope("bar"):
h = PF.affine(h, 4)
with nn.parameter_scope("foo"):
params = nn.get_parameters()
params
# -
with nn.parameter_scope("foo"):
nn.clear_parameters()
nn.get_parameters()
# ## MLP Example For Explanation
nn.clear_parameters()
batchsize = 16
x = nn.Variable([batchsize, 2])
with nn.parameter_scope("fc1"):
h = F.tanh(PF.affine(x, 512))
with nn.parameter_scope("fc2"):
y = PF.affine(h, 1)
print("Shapes:", h.shape, y.shape)
# -
nn.get_parameters()
# -
x.d = np.random.randn(*x.shape) # Set random input
y.forward()
y.d
# -
# !Variable for label
label = nn.Variable([batchsize, 1])
# !Set loss
loss = F.reduce_mean(F.squared_error(y, label))
# !Execute forward pass.
label.d = np.random.randn(*label.shape) # Randomly generate labels
loss.forward()
print(loss.d)
# -
# !Collect all parameter variables and init grad.
for name, param in nn.get_parameters().items():
param.grad.zero()
# Gradients are accumulated to grad of params.
loss.backward()
# ## Imperative Mode
for name, param in nn.get_parameters().items():
param.data -= param.grad * 0.001 # 0.001 as learning rate
# -
# !A simple example of imperative mode.
xi = nn.NdArray.from_numpy_array(np.arange(4).reshape(2, 2))
yi = F.relu(xi - 1)
xi.data
# -
yi.data
# -
id(xi)
# -
xi = xi + 1
id(xi)
# -
xi -= 1
id(xi)
# -
# !The following doesn't perform substitution but assigns a new NdArray object to `xi`.
# !xi = xi + 1
# !The following copies the result of `xi + 1` to `xi`.
xi.copy_from(xi + 1)
assert np.all(xi.data == (np.arange(4).reshape(2, 2) + 1))
# Inplace operations like `+=`, `*=` can also be used (more efficient).
xi += 1
assert np.all(xi.data == (np.arange(4).reshape(2, 2) + 2))
# ## Solver
solver = S.Sgd(lr=0.00001)
solver.set_parameters(nn.get_parameters())
# -
# !Set random data
x.d = np.random.randn(*x.shape)
label.d = np.random.randn(*label.shape)
# !Forward
loss.forward()
# -
solver.zero_grad()
loss.backward()
solver.update()
# ## Toy Problem To Demonstrate Training
def vector2length(x):
# x : [B, 2] where B is number of samples.
return np.sqrt(np.sum(x ** 2, axis=1, keepdims=True))
# Example
vector2length(np.array([[3, 4], [5, 12]]))
# -
# !Data for plotting contour on a grid data.
xs = np.linspace(-1, 1, 100)
ys = np.linspace(-1, 1, 100)
grid = np.meshgrid(xs, ys)
X = grid[0].flatten()
Y = grid[1].flatten()
def plot_true():
"""Plotting contour of true mapping from a grid data created above."""
plt.contourf(
xs, ys, vector2length(np.hstack([X[:, None], Y[:, None]])).reshape(100, 100)
)
plt.axis("equal")
plt.colorbar()
plot_true()
# -
def length_mlp(x):
h = x
for i, hnum in enumerate([4, 8, 4, 2]):
h = F.tanh(PF.affine(h, hnum, name="fc{}".format(i)))
y = PF.affine(h, 1, name="fc")
return y
# -
nn.clear_parameters()
batchsize = 100
x = nn.Variable([batchsize, 2])
y = length_mlp(x)
label = nn.Variable([batchsize, 1])
loss = F.reduce_mean(F.squared_error(y, label))
# -
def predict(inp):
ret = []
for i in range(0, inp.shape[0], x.shape[0]):
xx = inp[i : i + x.shape[0]]
# Imperative execution
xi = nn.NdArray.from_numpy_array(xx)
yi = length_mlp(xi)
ret.append(yi.data.copy())
return np.vstack(ret)
def plot_prediction():
plt.contourf(xs, ys, predict(np.hstack([X[:, None], Y[:, None]])).reshape(100, 100))
plt.colorbar()
plt.axis("equal")
# -
solver = S.Adam(alpha=0.01)
solver.set_parameters(nn.get_parameters())
# -
def random_data_provider(n):
x = np.random.uniform(-1, 1, size=(n, 2))
y = vector2length(x)
return x, y
# -
num_iter = 2000
for i in range(num_iter):
# Sample data and set them to input variables of training.
xx, ll = random_data_provider(batchsize)
x.d = xx
label.d = ll
# Forward propagation given inputs.
loss.forward(clear_no_need_grad=True)
# Parameter gradients initialization and gradients computation by backprop.
solver.zero_grad()
loss.backward(clear_buffer=True)
# Apply weight decay and update by Adam rule.
solver.weight_decay(1e-6)
solver.update()
# Just print progress.
if i % 100 == 0 or i == num_iter - 1:
print("Loss@{:4d}: {}".format(i, loss.d))
# -
loss.forward(clear_buffer=True)
print("The prediction `y` is cleared because it's an intermediate variable.")
print(y.d.flatten()[:4]) # to save space show only 4 values
y.persistent = True
loss.forward(clear_buffer=True)
print("The prediction `y` is kept by the persistent flag.")
print(y.d.flatten()[:4]) # to save space show only 4 value
# -
plt.subplot(121)
plt.title("Ground truth")
plot_true()
plt.subplot(122)
plt.title("Prediction")
plot_prediction()
# -
path_param = cache_file('nnabla/tutorial/python_api/param-vector2length.h5')
nn.save_parameters(path_param)
# !Remove all once
nn.clear_parameters()
nn.get_parameters()
# -
# !Load again
nn.load_parameters(path_param)
print('\n'.join(map(str, nn.get_parameters().items())))
# -
with nn.parameter_scope('foo'):
nn.load_parameters(path_param)
print('\n'.join(map(str, nn.get_parameters().items())))
| 23.848397 | 88 | 0.66687 | 1,340 | 8,180 | 3.987313 | 0.223134 | 0.013476 | 0.033689 | 0.029946 | 0.302639 | 0.206625 | 0.155905 | 0.141119 | 0.117911 | 0.080479 | 0 | 0.024595 | 0.155012 | 8,180 | 342 | 89 | 23.918129 | 0.748409 | 0.1978 | 0 | 0.283784 | 0 | 0 | 0.094492 | 0.023429 | 0 | 0 | 0 | 0 | 0.018018 | 1 | 0.027027 | false | 0 | 0.031532 | 0.004505 | 0.076577 | 0.238739 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9949907614d70988d09cf14ed19ffbba33bd91dd | 1,994 | py | Python | main.py | kriszhengs/kouzhao | f0de3e99b98b696ffbb8cec193d01c7695e45ae3 | [
"MIT"
] | null | null | null | main.py | kriszhengs/kouzhao | f0de3e99b98b696ffbb8cec193d01c7695e45ae3 | [
"MIT"
] | null | null | null | main.py | kriszhengs/kouzhao | f0de3e99b98b696ffbb8cec193d01c7695e45ae3 | [
"MIT"
] | null | null | null | from datetime import datetime
import logging
import requests
from hashlib import md5
from time import sleep
from apscheduler.schedulers.background import BlockingScheduler,BackgroundScheduler
import kzconfig
import json
logging.basicConfig(
handlers=[logging.FileHandler('log.log', 'a', 'utf-8')],
level=logging.INFO,format='%(asctime)s %(levelname)s - %(message)s'
)
logger =logging.getLogger("kouzhao")
def token()->str:
now = datetime.now()
now_str = now.strftime("%Y*%m-%d")+ "_Qwe"
m = md5()
m.update(now_str.encode("utf-8"))
url_md = m.hexdigest()[7:15]
return url_md
headers = {
"User-Agent":"Mozilla/5.0 (Linux; Android 8.0.0; Pixel 2 XL Build/OPD1.170816.004) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/82.0.4078.0 Mobile Safari/537.36"
}
def miaosha_kz():
form_data = kzconfig.form_data
# form_data = {}
sleep(1)
url = "https://kzapi.****.gov.cn/kouzhao/sq/miaosha/"+token()
logger.info("today url is %s"%url)
for i in range(1,kzconfig.MAX_TRY_TIME+1):
logger.info("开始第 %d 此尝试 "%(i))
res = requests.post(url=url,data=form_data,headers=headers,timeout=10)
json_data = res.json()
logger.info(json.dumps(json_data,ensure_ascii=False))
shop_res = json_data.get("responseFlag","0") == "1"
if shop_res:
logger.info("第 %d 抢购成功"%i)
break
elif json_data.get("responseMessage","") == "您好,当前时间段口罩已经约完,建议关注后续的预约活动" :
logger.info("当前时间段口罩已经约完")
break
elif json_data.get("status",200) == 404:
break
logger.info("第 %d 抢购失败 %f 秒后再次尝试" % (i,kzconfig.SLEEP_TIME))
sleep(kzconfig.SLEEP_TIME)
logger.info("抢购结束")
def main():
scheduler = BlockingScheduler()
scheduler.add_job(miaosha_kz, 'cron'
, day="*/7", hour='19',minute="0",second='1'
,timezone =kzconfig.cst_tz)
scheduler.start()
if __name__ == '__main__':
main() | 29.323529 | 166 | 0.627382 | 269 | 1,994 | 4.527881 | 0.513011 | 0.057471 | 0.027094 | 0.019704 | 0.032841 | 0 | 0 | 0 | 0 | 0 | 0 | 0.037869 | 0.218656 | 1,994 | 68 | 167 | 29.323529 | 0.743902 | 0.007021 | 0 | 0.056604 | 0 | 0.018868 | 0.215766 | 0.024255 | 0 | 0 | 0 | 0 | 0 | 1 | 0.056604 | false | 0 | 0.150943 | 0 | 0.226415 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
994d23acaf6906fc4bf97467e6053a890c952369 | 15,807 | py | Python | corehq/apps/locations/views.py | SEL-Columbia/commcare-hq | 992ee34a679c37f063f86200e6df5a197d5e3ff6 | [
"BSD-3-Clause"
] | 1 | 2015-02-10T23:26:39.000Z | 2015-02-10T23:26:39.000Z | corehq/apps/locations/views.py | SEL-Columbia/commcare-hq | 992ee34a679c37f063f86200e6df5a197d5e3ff6 | [
"BSD-3-Clause"
] | null | null | null | corehq/apps/locations/views.py | SEL-Columbia/commcare-hq | 992ee34a679c37f063f86200e6df5a197d5e3ff6 | [
"BSD-3-Clause"
] | null | null | null | import copy
from django.http import HttpResponse, HttpResponseRedirect, Http404
from django.utils.safestring import mark_safe
from django.views.decorators.http import require_POST
from corehq.apps.commtrack.views import BaseCommTrackManageView
from corehq.apps.domain.decorators import domain_admin_required, login_and_domain_required
from corehq.apps.hqwebapp.utils import get_bulk_upload_form
from corehq.apps.locations.models import Location
from corehq.apps.locations.forms import LocationForm
from corehq.apps.locations.util import load_locs_json, location_hierarchy_config, dump_locations
from corehq.apps.commtrack.models import LocationType, Product, SupplyPointCase
from corehq.apps.commtrack.util import unicode_slug
from corehq.apps.facilities.models import FacilityRegistry
from django.core.urlresolvers import reverse
from django.shortcuts import render
from django.contrib import messages
from couchdbkit import ResourceNotFound
import urllib
import json
from django.utils.translation import ugettext as _, ugettext_noop
from dimagi.utils.decorators.memoized import memoized
from custom.openlmis.tasks import bootstrap_domain_task
from soil.util import expose_download, get_download_context
from corehq.apps.commtrack.tasks import import_locations_async
from couchexport.models import Format
from corehq.apps.consumption.shortcuts import get_default_monthly_consumption
@domain_admin_required
def default(request, domain):
return HttpResponseRedirect(reverse(LocationsListView.urlname, args=[domain]))
class BaseLocationView(BaseCommTrackManageView):
@property
def main_context(self):
context = super(BaseLocationView, self).main_context
context.update({
'hierarchy': location_hierarchy_config(self.domain),
'api_root': reverse('api_dispatch_list', kwargs={'domain': self.domain,
'resource_name': 'location',
'api_name': 'v0.3'}),
})
return context
class LocationsListView(BaseLocationView):
urlname = 'manage_locations'
page_title = ugettext_noop("Locations")
template_name = 'locations/manage/locations.html'
@property
def page_context(self):
selected_id = self.request.GET.get('selected')
return {
'selected_id': selected_id,
'locations': load_locs_json(self.domain, selected_id),
}
class LocationSettingsView(BaseCommTrackManageView):
urlname = 'location_settings'
page_title = ugettext_noop("Location Types")
template_name = 'locations/settings.html'
@property
def page_context(self):
return {
'settings': self.settings_context,
}
@property
def settings_context(self):
return {
'loc_types': [self._get_loctype_info(l) for l in self.domain_object.commtrack_settings.location_types],
}
def _get_loctype_info(self, loctype):
return {
'name': loctype.name,
'code': loctype.code,
'allowed_parents': [p or None for p in loctype.allowed_parents],
'administrative': loctype.administrative,
}
def post(self, request, *args, **kwargs):
payload = json.loads(request.POST.get('json'))
def mk_loctype(loctype):
loctype['allowed_parents'] = [p or '' for p in loctype['allowed_parents']]
cleaned_code = unicode_slug(loctype['code'])
if cleaned_code != loctype['code']:
err = _(
'Location type code "{code}" is invalid. No spaces or special characters are allowed. '
'It has been replaced with "{new_code}".'
)
messages.warning(request, err.format(code=loctype['code'], new_code=cleaned_code))
loctype['code'] = cleaned_code
return LocationType(**loctype)
#TODO add server-side input validation here (currently validated on client)
self.domain_object.commtrack_settings.location_types = [mk_loctype(l) for l in payload['loc_types']]
self.domain_object.commtrack_settings.save()
return self.get(request, *args, **kwargs)
class NewLocationView(BaseLocationView):
urlname = 'create_location'
page_title = ugettext_noop("New Location")
template_name = 'locations/manage/location.html'
@property
def parent_pages(self):
return [{
'title': LocationsListView.page_title,
'url': reverse(LocationsListView.urlname, args=[self.domain]),
}]
@property
def parent_id(self):
return self.request.GET.get('parent')
@property
@memoized
def location(self):
return Location(domain=self.domain, parent=self.parent_id)
@property
def consumption(self):
return None
@property
@memoized
def metadata(self):
return copy.copy(dict(self.location.metadata))
@property
@memoized
def location_form(self):
if self.request.method == 'POST':
return LocationForm(self.location, self.request.POST)
return LocationForm(self.location)
@property
def page_context(self):
return {
'form': self.location_form,
'location': self.location,
'consumption': self.consumption,
'metadata': self.metadata
}
def post(self, request, *args, **kwargs):
if self.location_form.is_valid():
self.location_form.save()
messages.success(request, _('Location saved!'))
return HttpResponseRedirect('%s?%s' % (
reverse(LocationsListView.urlname, args=[self.domain]),
urllib.urlencode({'selected': self.location_form.location._id})
))
return self.get(request, *args, **kwargs)
class EditLocationView(NewLocationView):
urlname = 'edit_location'
page_title = ugettext_noop("Edit Location")
@property
def location_id(self):
return self.kwargs['loc_id']
@property
@memoized
def location(self):
try:
return Location.get(self.location_id)
except ResourceNotFound:
raise Http404()
@property
@memoized
def supply_point(self):
return SupplyPointCase.get_by_location(self.location)
@property
def consumption(self):
consumptions = []
for product in Product.by_domain(self.domain):
consumption = get_default_monthly_consumption(
self.domain,
product._id,
self.location.location_type,
self.supply_point._id if self.supply_point else None,
)
if consumption:
consumptions.append((product.name, consumption))
return consumptions
@property
def page_name(self):
return mark_safe(_("Edit {name} <small>{type}</small>").format(
name=self.location.name, type=self.location.location_type
))
@property
def page_url(self):
return reverse(self.urlname, args=[self.domain, self.location_id])
class BaseSyncView(BaseLocationView):
source = ""
sync_urlname = None
@property
def page_context(self):
return {
'settings': self.settings_context,
'source': self.source,
'sync_url': self.sync_urlname
}
@property
def settings_context(self):
key = "%s_config" % self.source
if hasattr(self.domain_object.commtrack_settings, key):
return {
"source_config": getattr(self.domain_object.commtrack_settings, key)._doc,
}
else:
return {}
def post(self, request, *args, **kwargs):
payload = json.loads(request.POST.get('json'))
#TODO add server-side input validation here (currently validated on client)
key = "%s_config" % self.source
if "source_config" in payload:
for item in payload['source_config']:
if hasattr(self.domain_object.commtrack_settings, key):
setattr(
getattr(self.domain_object.commtrack_settings, key),
item,
payload['source_config'][item]
)
self.domain_object.commtrack_settings.save()
return self.get(request, *args, **kwargs)
class FacilitySyncView(BaseSyncView):
urlname = 'sync_facilities'
sync_urlname = 'sync_openlmis'
page_title = ugettext_noop("OpenLMIS")
template_name = 'locations/facility_sync.html'
source = 'openlmis'
class EditLocationHierarchy(BaseLocationView):
urlname = 'location_hierarchy'
page_title = ugettext_noop("Location Hierarchy")
template_name = 'locations/location_hierarchy.html'
class LocationImportStatusView(BaseLocationView):
urlname = 'location_import_status'
page_title = ugettext_noop('Location Import Status')
template_name = 'hqwebapp/soil_status_full.html'
def get(self, request, *args, **kwargs):
context = super(LocationImportStatusView, self).main_context
context.update({
'domain': self.domain,
'download_id': kwargs['download_id'],
'poll_url': reverse('location_importer_job_poll', args=[self.domain, kwargs['download_id']]),
'title': _("Location Import Status"),
'progress_text': _("Importing your data. This may take some time..."),
'error_text': _("Problem importing data! Please try again or report an issue."),
})
return render(request, self.template_name, context)
def page_url(self):
return reverse(self.urlname, args=self.args, kwargs=self.kwargs)
class LocationImportView(BaseLocationView):
urlname = 'location_import'
page_title = ugettext_noop('Upload Locations from Excel')
template_name = 'locations/manage/import.html'
@property
def page_context(self):
context = {
'bulk_upload': {
"download_url": reverse(
"location_export", args=(self.domain,)),
"adjective": _("location"),
"plural_noun": _("locations"),
},
"manage_consumption": self.domain_object.commtrack_settings.individual_consumption_defaults,
}
context.update({
'bulk_upload_form': get_bulk_upload_form(context),
})
return context
def post(self, request, *args, **kwargs):
upload = request.FILES.get('bulk_upload_file')
if not upload:
messages.error(request, _('no file uploaded'))
return self.get(request, *args, **kwargs)
if not args:
messages.error(request, _('no domain specified'))
return self.get(request, *args, **kwargs)
domain = args[0]
# stash this in soil to make it easier to pass to celery
file_ref = expose_download(upload.read(),
expiry=1*60*60)
task = import_locations_async.delay(
domain,
file_ref.download_id,
)
file_ref.set_task(task)
return HttpResponseRedirect(
reverse(
LocationImportStatusView.urlname,
args=[domain, file_ref.download_id]
)
)
@login_and_domain_required
def location_importer_job_poll(request, domain, download_id, template="hqwebapp/partials/download_status.html"):
context = get_download_context(download_id, check_state=True)
context.update({
'on_complete_short': _('Import complete.'),
'on_complete_long': _('Location importing has finished'),
})
return render(request, template, context)
@login_and_domain_required
def location_export(request, domain):
include_consumption = request.GET.get('include_consumption') == 'true'
response = HttpResponse(mimetype=Format.from_format('xlsx').mimetype)
response['Content-Disposition'] = 'attachment; filename="locations.xlsx"'
dump_locations(response, domain, include_consumption)
return response
@domain_admin_required # TODO: will probably want less restrictive permission
def location_edit(request, domain, loc_id=None):
parent_id = request.GET.get('parent')
if loc_id:
try:
location = Location.get(loc_id)
except ResourceNotFound:
raise Http404()
else:
location = Location(domain=domain, parent=parent_id)
if request.method == "POST":
form = LocationForm(location, request.POST)
if form.is_valid():
form.save()
messages.success(request, 'Location saved!')
return HttpResponseRedirect('%s?%s' % (
reverse('manage_locations', kwargs={'domain': domain}),
urllib.urlencode({'selected': form.location._id})
))
else:
form = LocationForm(location)
context = {
'domain': domain,
'api_root': reverse('api_dispatch_list', kwargs={'domain': domain,
'resource_name': 'location',
'api_name': 'v0.3'}),
'location': location,
'hierarchy': location_hierarchy_config(domain),
'form': form,
}
return render(request, 'locations/manage/location.html', context)
@domain_admin_required
@require_POST
def sync_facilities(request, domain):
commtrack_settings = request.project.commtrack_settings
# create Facility Registry and Facility LocationTypes if they don't exist
if not any(lt.name == 'Facility Registry'
for lt in commtrack_settings.location_types):
commtrack_settings.location_types.extend([
LocationType(name='Facility Registry', allowed_parents=['']),
LocationType(name='Facility', allowed_parents=['Facility Registry'])
])
commtrack_settings.save()
registry_locs = dict((l.external_id, l) for l in
Location.filter_by_type(domain, 'Facility Registry'))
# sync each registry and add/update Locations for each Facility
for registry in FacilityRegistry.by_domain(domain):
registry.sync_with_remote()
try:
registry_loc = registry_locs[registry.url]
except KeyError:
registry_loc = Location(
domain=domain, location_type='Facility Registry',
external_id=registry.url)
registry_loc.name = registry.name
registry_loc.save()
registry_loc._seen = True
facility_locs = dict((l.external_id, l) for l in
Location.filter_by_type(domain, 'Facility', registry_loc))
for facility in registry.get_facilities():
uuid = facility.data['uuid']
try:
facility_loc = facility_locs[uuid]
except KeyError:
facility_loc = Location(
domain=domain, location_type='Facility', external_id=uuid,
parent=registry_loc)
facility_loc.name = facility.data.get('name', 'Unnamed Facility')
facility_loc.save()
facility_loc._seen = True
for id, f in facility_locs.iteritems():
if not hasattr(f, '_seen'):
f.delete()
for id, r in registry_locs.iteritems():
if not hasattr(r, '_seen'):
r.delete()
return HttpResponse('OK')
@domain_admin_required
@require_POST
def sync_openlmis(request, domain):
# todo: error handling, if we care.
bootstrap_domain_task.delay(domain)
return HttpResponse('OK')
| 34.437908 | 115 | 0.636617 | 1,670 | 15,807 | 5.834132 | 0.176048 | 0.021554 | 0.015806 | 0.023094 | 0.263984 | 0.203223 | 0.156317 | 0.118033 | 0.101406 | 0.091758 | 0 | 0.001635 | 0.264693 | 15,807 | 458 | 116 | 34.5131 | 0.836617 | 0.026824 | 0 | 0.293478 | 0 | 0 | 0.126634 | 0.02374 | 0 | 0 | 0 | 0.002183 | 0 | 1 | 0.092391 | false | 0 | 0.11413 | 0.043478 | 0.415761 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
99533e6ec88630f0bf822397008bdc4f64d07cdc | 21,117 | py | Python | web/project/training_api/libs/utilities.py | allspeak/api.allspeak.eu | 0403c4ed870c32ff9846f943e28aeb897f4baf3c | [
"MIT"
] | 1 | 2018-09-03T14:48:27.000Z | 2018-09-03T14:48:27.000Z | web/project/training_api/libs/utilities.py | allspeak/api.allspeak.eu | 0403c4ed870c32ff9846f943e28aeb897f4baf3c | [
"MIT"
] | null | null | null | web/project/training_api/libs/utilities.py | allspeak/api.allspeak.eu | 0403c4ed870c32ff9846f943e28aeb897f4baf3c | [
"MIT"
] | null | null | null | # createSubjectTrainingMatrix(subj, in_orig_subj_path, output_net_path, arr_commands, arr_rip)
# createSubjectTestMatrix(subj, in_orig_subj_path, output_net_path, arr_commands, arr_rip, sentences_filename, sentence_counter)
# createFullMatrix(input_matrix_folder, data_name, label_name, output_matrix_path="")
import os
import shutil
import ntpath
import glob
import re
import json
import numpy as np
from numpy import genfromtxt
from datetime import datetime
from . import earray_wrapper
def moveFolderContent(indir, outdir):
for file in os.listdir(indir):
fileref = indir + '/' + file
if os.path.isdir(fileref) is False:
shutil.move(fileref, outdir + '/' + file)
def getFileName(path):
head, tail = ntpath.split(path)
return tail or ntpath.basename(head)
def remoteExtension(path):
return path.split('.')[0]
# works when input files are : commandlabelNREP_scores.dat
# vocfilepath contains lines as follows:
# 1 cmdlab1
# 2 cmdlab7
# 3 cmlab8
# etc...
def renameSubjectFiles(subject_name, inrootpath, outrootpath, vocfilepath):
inpath = inrootpath + '/' + subject_name
outpath = outrootpath + '/' + subject_name
if os.path.isdir(outpath) is False:
os.mkdir(outpath)
with open(vocfilepath, "r") as f:
data = f.readlines()
for line in data:
words = line.split() # words[0]=num corrispondente al comando, words[1]=nome frase
for infile in glob.glob(os.path.join(inpath, '*.*')):
file_name = os.path.basename(infile)
b = re.split('(\d+)', file_name)
if b[0] == words[1]:
shutil.copy2(infile, outpath + '/' + subject_name + "_" + words[0] + "_" + b[1] + ".dat")
# print subject_name + "_" + words[0] + "_" + b[1] + ".dat" -> te_0_0.dat
# works when input files are : SUBJ_commandlabelNREP_scores.dat
# vocfilepath contains lines as follows:
# 1 cmdlab1
# 2 cmdlab7
# 3 cmlab8
# etc...
def renameSubjectsFiles(subjects_name, inrootpath, outrootpath, vocfilepath):
inpath = inrootpath + '/' + subjects_name
with open(vocfilepath, "r") as f:
data = f.readlines()
for line in data:
words = line.split() # words[0]=num corrispondente al comando, words[1]=nome frase
for infile in glob.glob(os.path.join(inpath, '*.*')):
file_name = os.path.basename(infile)
id = file_name.index("_")
subjlabel = file_name[:id]
if os.path.isdir(outrootpath + '/' + subjlabel) is False:
os.mkdir(outrootpath + '/' + subjlabel)
file_name = file_name[(id+1):]
b = re.split('(\d+)', file_name)
if b[0] == words[1]:
shutil.copy2(infile,
outrootpath + '/' + subjlabel + '/' + subjlabel + "_" + words[0] + "_" + b[1] + ".dat")
# print subject_name + "_" + words[0] + "_" + b[1] + ".dat" -> te_0_0.dat
# works when input files are : commandlabelNREP_scores.dat
# vocfilepath contains lines as follows:
# 1 cmdlab1
# 2 cmdlab7
# 3 cmlab8
# etc...
def renameSubjectFilesJSON(subject_name, inrootpath, outrootpath, jsonvocfilepath, ext=".dat"):
inpath = inrootpath + '/' + subject_name
outpath = outrootpath + '/' + subject_name
if os.path.isdir(outpath) is False:
os.mkdir(outpath)
vocabulary = getVocabularyFromJSON(jsonvocfilepath)
for sentence in vocabulary:
sentenceid = str(sentence["id"])
lab = remoteExtension(str(sentence["readablefilename"]))
#with open(vocfilepath, "r") as f:
# data = f.readlines()
# for line in data:
# words = line.split() # words[0]=num corrispondente al comando, words[1]=nome frase
for infile in glob.glob(os.path.join(inpath, '*.*')):
file_name = os.path.basename(infile)
b = re.split('(\d+)', file_name)
if b[0] == lab:
shutil.copy2(infile, outpath + '/' + subject_name + "_" + sentenceid + "_" + b[1] + ext)
# print subject_name + "_" + words[0] + "_" + b[1] + ".dat" -> te_0_0.dat
# works when input files are : SUBJ_commandlabelNREP_scores.dat
# jsonvocfilepath contains lines as follows:
#{ "vocabulary_categories": [],
# "voicebank_vocabulary": [ { "title": "Sono felice", "id": 1101, "filename":"", "readablefilename" : "sono_felice.wav", "existwav": 0, "editable":false}, ...]
#}
def renameSubjectsFilesJSON(subjects_name, inrootpath, outrootpath, jsonvocfilepath, ext=".dat"):
inpath = inrootpath + '/' + subjects_name
vocabulary = getVocabularyFromJSON(jsonvocfilepath)
# words = line.split() # words[0]=num corrispondente al comando, words[1]=nome frase
for infile in glob.glob(os.path.join(inpath, '*.*')):
copied = False
file_name = os.path.basename(infile)
id = file_name.index("_")
subjlabel = file_name[:id]
if os.path.isdir(outrootpath + '/' + subjlabel) is False:
os.mkdir(outrootpath + '/' + subjlabel)
file_name = file_name[(id + 1):]
b = re.split('(\d+)', file_name)
for sentence in vocabulary:
sentenceid = str(sentence["id"])
lab = remoteExtension(str(sentence["readablefilename"]))
if b[0] == lab:
if os.path.isdir(outrootpath + '/' + subjlabel) is False:
os.mkdir(outrootpath + '/' + subjlabel)
shutil.copy2(infile, outrootpath + '/' + subjlabel + '/' + subjlabel + "_" + sentenceid + "_" + b[1] + ext)
shutil.copy2(infile, outrootpath + '/' + subjlabel + "_" + sentenceid + "_" + b[1] + ext)
copied = True
break
# print subject_name + "_" + words[0] + "_" + b[1] + ".dat" -> te_0_0.dat
if copied is False:
print(infile)
# works when input files are : commandlabelNREP.dat.SUBJLABEL
def renameSubjectFilesOld(subject_name, inrootpath, outrootpath, vocfilepath):
inpath = inrootpath + '/' + subject_name
outpath = outrootpath + '/' + subject_name
if os.path.isdir(outpath) is False:
os.mkdir(outpath)
with open(vocfilepath, "r") as f:
data = f.readlines()
for line in data:
words = line.split() # words[0]=num corrispondente al comando, words[1]=nome frase
for infile in glob.glob(os.path.join(inpath, '*.*')):
file_name = os.path.basename(infile)
a = os.path.splitext(file_name)[0]
b = re.split('(\d+)', a)
if b[0] == words[1]:
shutil.copy2(inpath + '/' + a + '.' + subject_name,
outpath + '/' + subject_name + "_" + words[0] + "_" + b[1] + ".dat")
# print subject_name + "_" + words[0] + "_" + b[1] + ".dat" -> te_0_0.dat
def getVocabularyFromJSON(json_inputfile):
with open(json_inputfile, encoding='utf-8') as data_file:
data = json.load(data_file)
return data["voicebank_vocabulary"]
def createVocabularySentence(list_ids, json_inputfile, txt_outputfile):
vocabulary = getVocabularyFromJSON(json_inputfile)
file = open(txt_outputfile, 'w+')
for id in list_ids:
for sentence in vocabulary:
sentenceid = sentence["id"]
if id == sentenceid:
title = sentence["title"]
file.write(title + os.linesep)
break
file.close()
def createVocabularyJson(list_ids, model, sessiondata, training_sessionid, json_globalvocabulary, json_outputfile):
# get commands list from json_globalvocabulary
vocabulary = getVocabularyFromJSON(json_globalvocabulary)
commands = []
for id in list_ids:
for sentence in vocabulary:
sentenceid = sentence["id"]
if id == sentenceid:
commands.append({'title': sentence["title"], 'id': sentenceid})
break
lencmds = len(commands)
nw = datetime.now()
# sModelFilePath is written by the App
res = {
'sLabel': sessiondata['sLabel'],
'nModelClass': sessiondata['nModelClass'],
'nModelType': sessiondata['nModelType'],
'nInputParams': model['nInputParams'],
'nContextFrames': model['nContextFrames'],
'nItems2Recognize': lencmds,
'sModelFilePath': "",
'sModelFileName': model['sModelFileName'],
'saInputNodeName': model['saInputNodeName'],
'sOutputNodeName': model['sOutputNodeName'],
'nProcessingScheme': sessiondata['nProcessingScheme'],
'fRecognitionThreshold': model['fRecognitionThreshold'],
'sCreationTime': nw.strftime('%Y/%m/%d %H:%M:%S'),
'sLocalFolder': sessiondata['sLocalFolder'],
'sessionid': str(training_sessionid),
'commands': commands
}
with open(json_outputfile, 'w', encoding='utf-8') as data_file:
json.dump(res, data_file)
# ===========================================================================================================================
# aims : This script creates the training matrix for a single subject (ctx_*.dat ==> SUBJ_train_data.npy [earray h5])
#
# input : subj: subject folder name
# in_orig_subj_path: path to the subject's cepstra with context
# output_net_path: path to the output folder
# arr_commands: IDs of the selected commands
# arr_rip: range from 0 to Nripetitions
#
# return : output_matrices_path: path to the output folder (e.g. output/train/ANALYSISNAME/matrices)
# ===========================================================================================================================
def createSubjectTrainingMatrix(subj, in_orig_subj_path, output_net_path, arr_commands, arr_rip, file_prefix='ctx'):
mat_compl = []
mat_lab = []
totalsize = 0
output_matrices_path = os.path.join(output_net_path, 'matrices')
write_every_nfiles = 1 # every N (e.g. 10) files read, append them to disk and clear arrays
if os.path.isdir(output_matrices_path) is False:
os.mkdir(output_matrices_path)
if subj != '':
subj = subj + "_"
output_data_matrix_path = output_matrices_path + '/' + subj + 'train_data.npy'
output_labels_matrix_path = output_matrices_path + '/' + subj + 'train_labels.npy'
if os.path.exists(output_data_matrix_path) is True:
os.remove(output_data_matrix_path)
if os.path.exists(output_labels_matrix_path) is True:
os.remove(output_labels_matrix_path)
try:
cnt = 0
for ctxfile in glob.glob(in_orig_subj_path + '/' + file_prefix + '*'):
spl = re.split('[_ .]', ctxfile) # e.g. ctx_SUBJ_CMD_REP => spl2[2] num comando, spl[3] num ripetiz
id_cmd = int(spl[2])
id_rep = int(spl[3])
if id_cmd in arr_commands and id_rep in arr_rip:
f = open(ctxfile, 'r')
lines = f.readlines()
count_lines = len(lines)
f.close()
# for every line of contexted file, write N-arr_commands columns
lb = [[1 if i == id_cmd else 0 for i in arr_commands] for j in range(count_lines)]
ctx = genfromtxt(ctxfile) # load dei cepstra
if len(mat_compl) == 0 and len(mat_lab) == 0:
mat_compl = ctx
mat_lab = lb
else:
mat_compl = np.vstack((mat_compl, ctx))
mat_lab = np.vstack((mat_lab, lb))
cnt = cnt + 1
# check whether write 2 disk
if cnt == write_every_nfiles:
cnt = 0
earray_wrapper.appendArray2File(mat_compl, output_data_matrix_path)
earray_wrapper.appendArray2File(mat_lab, output_labels_matrix_path)
totalsize += mat_compl.size
mat_compl = []
mat_lab = []
except Exception as e:
print(str(e))
# save data in output/train/ANALYSISNAME/matrices
if len(mat_compl):
earray_wrapper.appendArray2File(mat_compl, output_data_matrix_path)
earray_wrapper.appendArray2File(mat_lab, output_labels_matrix_path)
print("createSubjectTrainingMatrix ended: " + str(totalsize))
return {'data_matrices_path': output_data_matrix_path, 'labels_matrices_path': output_labels_matrix_path}
# -----------------------------------------------------------------------------------------------------------------------
# DO NOT create matrices file, just read and returns the data & labels arrays
def getSubjectTrainingMatrix(in_orig_subj_path, arr_commands, arr_rip, file_prefix='ctx'):
mat_compl = []
mat_lab = []
totalsize = 0
try:
cnt = 0
for ctxfile in glob.glob(in_orig_subj_path + '/' + file_prefix + '*'):
filename = ctxfile.split('/')[-1]
spl = filename.split('.')[0]
spl = spl.split('_')
# spl = re.split('[_ .]', filename) # e.g. ctx_SUBJ_CMD_REP => spl2[2] num comando, spl[3] num ripetiz
id_cmd = int(spl[2])
id_rep = int(spl[3])
if id_cmd in arr_commands and id_rep in arr_rip:
f = open(ctxfile, 'r')
lines = f.readlines()
count_lines = len(lines)
f.close()
# for every line of contexted file, write N-arr_commands columns
lb = [[1 if i == id_cmd else 0 for i in arr_commands] for j in range(count_lines)]
ctx = genfromtxt(ctxfile) # load dei cepstra
if len(mat_compl) == 0 and len(mat_lab) == 0:
mat_compl = ctx
mat_lab = lb
else:
mat_compl = np.vstack((mat_compl, ctx))
mat_lab = np.vstack((mat_lab, lb))
cnt = cnt + 1
except Exception as e:
print(str(e))
rows = len(mat_compl)
cols = len(mat_compl[0])
print("getSubjectTrainingMatrix ended, row: " + str(rows)+ ", cols: " + str(cols))
return mat_compl, mat_lab
# return {'data_matrices': mat_compl, 'labels_matrices': mat_lab}
# ===========================================================================================================================
# aims : This script creates the testing matrix for a single subject
#
# input : subj: subject folder name
# in_orig_subj_path: path to the subject's cepstra with context
# output_net_path: path to the output folder
# arr_commands: range from 1 to Ncommands
# arr_rip: range from 0 to Nripetitions
# sentences_filename: name of the output file
# sentence_counter: it takes account of how many rows are occupied by each command and the command_id
#
# return : output_matrices_path: path to the output folder
# sentence_counter: text file which takes account of how many rows are occupied by each command and the command_id
# ===========================================================================================================================
def createSubjectTestMatrix(subj, in_orig_subj_path, output_net_path, arr_commands, arr_rip, sentences_filename, sentence_counter):
mat_compl = []
mat_lab = []
totalsize = 0
output_matrices_path = os.path.join(output_net_path, 'matrices')
write_every_nfiles = 10 # every N (e.g. 10) files read, append them to disk and clear arrays
if os.path.isdir(output_matrices_path) is False:
os.mkdir(output_matrices_path)
if os.path.isfile(sentences_filename) is True:
os.remove(sentences_filename)
output_data_matrix = output_matrices_path + '/' + subj + '_test_data.npy'
output_labels_matrix = output_matrices_path + '/' + subj + '_test_labels.npy'
if os.path.exists(output_data_matrix) is True:
os.remove(output_data_matrix)
if os.path.exists(output_labels_matrix) is True:
os.remove(output_labels_matrix)
try:
cnt = 0
for ctxfile in glob.glob(in_orig_subj_path + '/ctx*'):
spl = re.split('[_ .]', ctxfile) # e.g. ctx_SUBJ_CMD_REP => spl2[2] num comando, spl[3] num ripetiz
id_cmd = int(spl[2])
id_rep = int(spl[3])
if id_cmd in arr_commands and id_rep in arr_rip:
f = open(ctxfile, 'r')
lines = f.readlines()
count_lines = len(lines)
f.close()
sentence_counter = sentence_counter + 1
sc = [[sentence_counter, id_cmd] for j in range(count_lines)]
with open(output_net_path + "/" + sentences_filename, 'ab') as f_handle:
np.savetxt(f_handle, sc, fmt='%.0f')
lb = [[1 if i == id_cmd else 0 for i in arr_commands] for j in range(count_lines)]
ctx = genfromtxt(ctxfile) # load dei cepstra
if len(mat_compl) == 0 and len(mat_lab) == 0:
mat_compl = ctx
mat_lab = lb
else:
mat_compl = np.vstack((mat_compl, ctx))
mat_lab = np.vstack((mat_lab, lb))
cnt = cnt + 1
# check whether write 2 disk
if cnt == write_every_nfiles:
cnt = 0
earray_wrapper.appendArray2File(mat_compl, output_data_matrix)
earray_wrapper.appendArray2File(mat_lab, output_labels_matrix)
totalsize += mat_compl.size
mat_compl = []
mat_lab = []
except Exception as e:
print(str(e))
# save data in output/test/ANALYSISNAME/matrices
if len(mat_compl):
earray_wrapper.appendArray2File(mat_compl, output_data_matrix)
earray_wrapper.appendArray2File(mat_lab, output_labels_matrix)
return {'data_matrices_path': output_data_matrix, 'labels_matrices_path': output_labels_matrix, 'sentence_counter': sentence_counter}
# ===========================================================================================================================
# aims : This script creates the testing matrix with all the pre-established subjects
#
# input : input_matrix_folder: path to the subject's folder containing testing and training matrices with cepstra or labels
# data_name: name of the testing or training matrices with cepstra
# label_name: name of the testing or training matrices with labels
# output_matrix_path: path to the output folder. If is not specified, data will be stored in the current working folder
#
# return : data_matrix_path: path to the output folder
# label_matrix_path: path to the output folder
# ===========================================================================================================================
def createFullMatrix(subjects_list, input_net_folder, data_name, label_name, output_net_folder=""):
input_matrix_folder = os.path.join(input_net_folder, 'matrices')
if os.path.isdir(input_matrix_folder) is False:
os.mkdir(input_matrix_folder)
if len(output_net_folder):
output_matrix_folder = os.path.join(output_net_folder, 'matrices')
if os.path.isdir(output_matrix_folder) is False:
os.mkdir(output_matrix_folder)
data_matrix_path = output_matrix_folder + '/full_' + data_name + '.npy'
label_matrix_path = output_matrix_folder + '/full_' + label_name + '.npy'
else:
data_matrix_path = input_matrix_folder + '/full_' + data_name + '.npy'
label_matrix_path = input_matrix_folder + '/full_' + label_name + '.npy'
for file in glob.glob(input_matrix_folder + '/*' + data_name + '.npy'):
file_name = os.path.basename(file)
spl = re.split('[_ .]', file_name) # spl[0] paz, spl[1] parola 'train'
for subj in subjects_list:
if subj == spl[0]:
print("createFullMatrix: " + file)
file_train = np.load(file)
file_labels = np.load(input_matrix_folder + '/' + spl[0] + '_' + label_name + '.npy')
earray_wrapper.appendArray2File(file_train, data_matrix_path)
earray_wrapper.appendArray2File(file_labels, label_matrix_path)
return {'data_matrix_path': data_matrix_path, 'label_matrix_path': label_matrix_path}
def getNodeBySubstring(graph, nomesubstring, allnodes=None):
if allnodes is None:
allnodes = [n.name for n in graph.as_graph_def().node ]
node_str = [s for s in allnodes if nomesubstring in s and 'read' not in s]
if len(node_str) == 1:
return graph.get_tensor_by_name(node_str[0] + ":0")
else:
return None
| 41.163743 | 162 | 0.580764 | 2,456 | 21,117 | 4.785016 | 0.120928 | 0.016338 | 0.010892 | 0.012168 | 0.679374 | 0.649932 | 0.594452 | 0.536504 | 0.527315 | 0.498724 | 0 | 0.010162 | 0.273003 | 21,117 | 512 | 163 | 41.244141 | 0.755341 | 0.261211 | 0 | 0.556604 | 0 | 0 | 0.063609 | 0.006 | 0 | 0 | 0 | 0 | 0 | 1 | 0.050314 | false | 0 | 0.031447 | 0.003145 | 0.110063 | 0.022013 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
99548a01e83a179a6bb2c82d30f37deca9cc74b6 | 5,206 | py | Python | Note11_Learn Python_Staticmethods&Exceptions.py | stanreport/Python-Tutorials | 7aff8ff7c21d4face1afb218ab9679f3d1160e27 | [
"Apache-2.0"
] | null | null | null | Note11_Learn Python_Staticmethods&Exceptions.py | stanreport/Python-Tutorials | 7aff8ff7c21d4face1afb218ab9679f3d1160e27 | [
"Apache-2.0"
] | 1 | 2018-04-14T19:35:14.000Z | 2018-04-14T19:35:14.000Z | Note11_Learn Python_Staticmethods&Exceptions.py | stanreport/Python-Tutorials | 7aff8ff7c21d4face1afb218ab9679f3d1160e27 | [
"Apache-2.0"
] | null | null | null | # ---------- STATIC METHODS ----------
# Static methods allow access without the need to initialize
# a class. They should be used as utility methods, or when
# a method is needed, but it doesn't make sense for the real
# world object to be able to perform a task
class Sum:
# You use the static method decorator to define that a
# method is static
@staticmethod
def getSum(*args):
sum = 0
for i in args:
sum += i
return sum
def main():
# Call a static method by proceeding it with its class
# name
print("Sum :", Sum.getSum(1,2,3,4,5))
main()
# ---------- STATIC VARIABLES ----------
# Fields declared in a class, but outside of any method
# are static variables. There value is shared by every
# object of that class
class Dog:
# This is a static variable
num_of_dogs = 0
def __init__(self, name="Unknown"):
self.name = name
# You reference the static variable by proceeding
# it with the class name
Dog.num_of_dogs += 1
@staticmethod
def getNumOfDogs():
print("There are currently {} dogs".format(Dog.num_of_dogs))
def main():
spot = Dog("Spot")
doug = Dog("Doug")
spot.getNumOfDogs()
main()
# ---------- MODULES ----------
# Your Python programs will contain a main program that
# includes your main function. Then you will create many
# modules in separate files. Modules also end with .py
# just like any other Python file
# ————— sum.py —————
def getSum(*args):
sum = 0
for i in args:
sum += i
return sum
# ————— End of sum.py —————
# You can import by listing the file name minus the py
import sum
# Get access to functions by proceeding with the file
# name and then the function you want
print("Sum :", sum.getSum(1,2,3,4,5))
# ---------- FROM ----------
# You can use from to copy specific functions from a module
# You can use from sum import * to import all functions
# You can import multiple functions by listing them after
# import separated by commas
from sum import getSum
# You don't have to reference the module name now
print("Sum :", getSum(1,2,3,4,5))
# ---------- EXCEPTION HANDLING ----------
# Exceptions are triggered either when an error occurs
# or when you want them to.
# We use exceptions are used to handle errors, execute
# specific code when code generates something out of
# the ordinary, to always execute code when something
# happens (close a file that was opened),
# When an error occurs you stop executing code and jump
# to execute other code that responds to that error
# Let's handle an IndexError exception that is
# triggered when you try to access an index in a list
# that doesn't exist
# Surround a potential exception with try
try:
aList = [1,2,3]
print(aList[3])
# Catch the exception with except followed by the
# exception you want to catch
# You can catch multiple exceptions by separating them
# with commas inside parentheses
# except (IndexError, NameError):
except IndexError:
print("Sorry that index doesn't exist")
# If the exception wasn't caught above this will
# catch all others
except:
print("An unknown error occurred")
# ---------- CUSTOM EXCEPTIONS ----------
# Lets trigger an exception if the user enters a
# name that contains a number
# Although you won't commonly create your own exceptions
# this is how you do it
# Create a class that inherits from Exception
class DogNameError(Exception):
def __init__(self, *args, **kwargs):
Exception.__init__(self, *args, **kwargs)
try:
dogName = input("What is your dogs name : ")
if any(char.isdigit() for char in dogName):
# Raise your own exception
# You can raise the built in exceptions as well
raise DogNameError
except DogNameError:
print("Your dogs name can't contain a number")
# ---------- FINALLY & ELSE ----------
# finally is used when you always want certain code to
# execute whether an exception is raised or not
num1, num2 = input("Enter to values to divide : ").split()
try:
quotient = int(num1) / int(num2)
print("{} / {} = {}".format(num1, num2, quotient))
except ZeroDivisionError:
print("You can't divide by zero")
# else is only executed if no exception was raised
else:
print("You didn't raise an exception")
finally:
print("I execute no matter what")
# ---------- PROBLEM EXCEPTIONS & FILES ----------
# 1. Create a file named mydata2.txt and put data in it
# 2. Using what you learned in part 8 and Google to find
# out how to open a file without with try to open the
# file in a try block
# 3. Catch the FileNotFoundError exception
# 4. In else print the file contents
# 5. In finally close the file
# 6. Try to open the nonexistent file mydata3.txt and
# test to see if you caught the exception
try:
myFile = open("mydata2.txt", encoding="utf-8")
# We can use as to access data and methods in the
# exception class
except FileNotFoundError as ex:
print("That file was not found")
# Print out further data on the exception
print(ex.args)
else:
print("File :", myFile.read())
myFile.close()
finally:
print("Finished Working with File") | 24.909091 | 68 | 0.671725 | 792 | 5,206 | 4.417929 | 0.323232 | 0.012003 | 0.00343 | 0.009431 | 0.03944 | 0.03944 | 0.03944 | 0.035439 | 0.035439 | 0.022864 | 0 | 0.009886 | 0.22282 | 5,206 | 209 | 69 | 24.909091 | 0.849975 | 0.610834 | 0 | 0.352941 | 0 | 0 | 0.185736 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.102941 | false | 0 | 0.029412 | 0 | 0.220588 | 0.235294 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
995771bb5ce39f771d0087436d5379344c7c7a93 | 17,632 | py | Python | (3)TopTitanic1.py | statpng/KaggleTranscript | b110482a2adcf0390fac0d54c890c95894f98dea | [
"Apache-2.0"
] | null | null | null | (3)TopTitanic1.py | statpng/KaggleTranscript | b110482a2adcf0390fac0d54c890c95894f98dea | [
"Apache-2.0"
] | null | null | null | (3)TopTitanic1.py | statpng/KaggleTranscript | b110482a2adcf0390fac0d54c890c95894f98dea | [
"Apache-2.0"
] | null | null | null | # https://www.kaggle.com/yassineghouzam/titanic-top-4-with-ensemble-modeling
# Feature analysis
# Feature engineering
# Modeling
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# %matplotlib inline
from collections import Counter
from sklearn.ensemble import RandomForestClassifier, \
AdaBoostClassifier, \
GradientBoostingClassifier, \
ExtraTreesClassifier, \
VotingClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.svm import SVC
from sklearn.model_selection import GridSearchCV, cross_val_score, StratifiedKFold, learning_curve
sns.set(style="white", context="notebook", palette="deep")
# Load and check data
# Load data
train = pd.read_csv("./titanic/train.csv")
test = pd.read_csv("./titanic/test.csv")
IDtest = test["PassengerId"]
def detect_outliers(df, n, features):
"""
Take a dataframe df of features and returns a list of the indices corresponding to the observations containing more than n outliers according to the Tukey method
:param df: dataframe
:param n: features
:param features: feature name to be investigated
:return: outlier_indices
"""
outlier_indices = []
# iterate over features (columns)
for col in features:
Q1 = np.percentile(df[col], 25)
Q3 = np.percentile(df[col], 75)
IQR = Q3 - Q1
outlier_step = 1.5 * IQR
outlier_list_col = df[(df[col] < Q1 - outlier_step) | (df[col] > Q3 + outlier_step)].index
outlier_indices.extend(outlier_list_col)
outlier_indices = Counter(outlier_indices)
multiple_outliers = list( k for k, v in outlier_indices.items() if v > n )
return multiple_outliers
Outliers_to_drop = detect_outliers(train, 2, ["Age", "SibSp", "Parch", "Fare"])
train.loc[Outliers_to_drop]
train = train.drop(Outliers_to_drop, axis = 0).reset_index(drop=True)
train_len = len(train)
dataset = pd.concat(objs=[train, test], axis=0).reset_index(drop=True)
dataset = dataset.fillna(np.nan)
dataset.isnull().sum()
train.info()
train.isnull().sum()
train.head()
train.dtypes
train.describe()
# Feature Analysis
g = sns.heatmap(train[["Survived", "SibSp", "Parch", "Age", "Fare"]].corr(), annot=True, fmt=".2f", cmap="coolwarm")
g = sns.catplot(x="SibSp", y="Survived", kind="bar", data=train, size=6, palette="muted")
g.despine(left=True)
g = g.set_ylabels("survival probability")
# Parch
g = sns.catplot(x="Parch", y="Survived", data=train, kind="bar", size=6, palette = "muted")
g.despine(left=True)
g = g.set_ylabels("survival probability")
# Age
g = sns.FacetGrid(train, col = "Survived")
g = g.map(sns.distplot, "Age")
g = sns.kdeplot(train["Age"][(train["Survived"]==0) & (train["Age"].notnull())], color = "Red", shade = True )
g = sns.kdeplot(train["Age"][(train["Survived"]==1) & (train["Age"].notnull())], color = "Blue", shade = True )
g.set_xlabel("Age")
g.set_ylabel("Frequency")
g = g.legend(["Not Survived", "Survived"])
# Fare
dataset["Fare"].isnull().sum()
dataset["Fare"] = dataset["Fare"].fillna(dataset["Fare"].median())
g = sns.distplot(dataset["Fare"], color="m", label="Skewness : %.2f"%(dataset["Fare"].skew()))
g = g.legend(loc = "best")
dataset["Fare"] = dataset["Fare"].map(lambda i: np.log(i) if i > 0 else 0)
g = sns.distplot(dataset["Fare"], color="b", label="Skewness : %.2f"%(dataset["Fare"].skew()))
g = g.legend(loc="best")
# Categorical values
# Sex
g = sns.barplot(x="Sex", y="Survived", data=train)
g = g.set_ylabel("Survival Probability")
train[["Sex", "Survived"]].groupby("Sex").mean()
# Pclass
g = sns.catplot(x="Pclass", y="Survived", data = train, kind="bar", size = 6, palette = "muted")
g.despine(left=True)
g = g.set_ylabels("survival probability")
g = sns.catplot(x="Pclass", y="Survived", hue="Sex", kind="bar", data=dataset, size=6, palette="muted")
g.despine(left=True)
g = g.set_ylabels("survival probability")
# Embarked
dataset["Embarked"].isnull().sum()
dataset["Embarked"] = dataset["Embarked"].fillna("S")
g = sns.catplot(x="Embarked", y="Survived", kind="bar", data=train, size=6, palette="muted")
g.despine(left=True)
g = g.set_ylabels("survival probability")
g = sns.catplot("Pclass", col="Embarked", kind="count", data=train, size=6, palette="muted")
# Filling missing values
# Age
g = sns.catplot(x = "Sex", y = "Age", kind="box", data=dataset)
g = sns.catplot(x = "Sex", y = "Age", hue="Pclass", kind="box", data=dataset)
g = sns.catplot(x = "Parch", y = "Age", kind="box", data=dataset)
g = sns.catplot(x = "SibSp", y = "Age", kind="box", data=dataset)
dataset["Sex"] = dataset["Sex"].map({"male":0, "female":1}) # male --> 0; female --> 1
g = sns.heatmap(dataset[["Age", "Sex", "SibSp", "Parch", "Pclass"]].corr(), cmap="BrBG", annot=True)
# Filling missing value of Age
index_NaN_age = list( dataset["Age"][dataset["Age"].isnull()].index )
for i in index_NaN_age :
age_med = dataset["Age"].median()
age_pred = dataset["Age"][((dataset["SibSp"] == dataset.iloc[i]["SibSp"]) &
(dataset["Parch"] == dataset.iloc[i]["Parch"]) &
(dataset["Pclass"] == dataset.iloc[i]["Pclass"])
)].median()
if not np.isnan(age_pred) :
dataset["Age"].iloc[i] = age_pred
else :
dataset["Age"].iloc[i] = age_med
g = sns.catplot(x="Survived", y="Age", kind="box", data=train)
g = sns.catplot(x="Survived", y="Age", data=train, kind="violin")
# Feature Engineering
dataset["Name"].head()
dataset_title = [i.split(",")[1].split(".")[0].strip() for i in dataset["Name"]]
dataset["Title"] = pd.Series(dataset_title)
dataset["Title"].head()
g = sns.countplot(x="Title", data=dataset)
g = plt.setp(g.get_xticklabels(), rotation=45)
dataset["Title"] = dataset["Title"].replace(["Lady", "the Countess", "Countess", "Capt", "Col", "Don", "Dr", "Major", "Rev", "Sir", "Jonkheer", "Dona"], "Rare")
dataset["Title"] = dataset["Title"].map({"Master":0,
"Miss":1,
"Ms":1,
"Mme":1,
"Mlle":1,
"Mrs":1,
"Mr":2,
"Rare":3 })
dataset["Title"] = dataset["Title"].astype(int)
dataset["Title"].value_counts()
g = sns.countplot(dataset["Title"])
g = g.set_xticklabels(["Master", "Miss/Ms/Mme/Mlle/Mrs", "Mr", "Rare"])
g = sns.catplot(x="Title", y="Survived", kind="bar", data=dataset)
g = g.set_xticklabels(["Master", "Miss-Mrs", "Mr", "Rare"])
g = g.set_ylabels("survival probability")
dataset.drop(labels = ["Name"], axis=1, inplace=True)
dataset["Fsize"] = dataset["SibSp"] + dataset["Parch"] + 1
g = sns.catplot(x="Fsize", y="Survived", kind="point", data=dataset)
g = g.set_ylabels("Survival probability")
dataset["Single"] = dataset["Fsize"].map(lambda s: 1 if s == 1 else 0)
dataset["SmallF"] = dataset["Fsize"].map(lambda s: 1 if s == 2 else 0)
dataset["MedF"] = dataset["Fsize"].map(lambda s: 1 if 3 <= s <= 4 else 0)
dataset["LargeF"] = dataset["Fsize"].map(lambda s: 1 if s >= 5 else 0)
dataset[["Single", "SmallF", "MedF", "LargeF"]].apply(lambda x: x.value_counts(), axis=0)
fig, ax=plt.subplots(2,2,figsize=(10,10))
sns.barplot(x = "Single", y="Survived", data=dataset, ax=ax[0,0])
ax[0,0].set_ylabel("Survival probability")
g = sns.barplot(x = "SmallF", y="Survived", data=dataset, ax=ax[0,1])
ax[0,1].set_ylabel("Survival probability")
g = sns.barplot(x = "MedF", y="Survived", data=dataset, ax=ax[1,0])
ax[1,0].set_ylabel("Survival probability")
g = sns.barplot(x = "LargeF", y="Survived", data=dataset, ax=ax[1,1])
ax[1,1].set_ylabel("Survival probability")
dataset = pd.get_dummies(dataset, columns = ["Title"])
dataset = pd.get_dummies(dataset, columns = ["Embarked"], prefix = "Em")
dataset.head(4)
# Cabin
dataset["Cabin"].head()
dataset["Cabin"].describe()
dataset["Cabin"].isnull().sum()
dataset["Cabin"][dataset["Cabin"].notnull()].head()
dataset["Cabin"] = pd.Series( [i[0] if not pd.isnull(i) else "X" for i in dataset["Cabin"] ])
ord = ["A", "B", "C", "D", "E", "F", "G", "T", "X"]
g = sns.countplot( dataset["Cabin"], order = ord )
g = sns.catplot(x="Cabin", y="Survived", kind="bar", data=dataset, order = ord)
g = g.set_ylabels("Survival Probability")
dataset = pd.get_dummies(dataset, prefix = "Cabin", columns=["Cabin"])
dataset["Ticket"].head()
Ticket = []
for i in list(dataset.Ticket):
if not i.isdigit() :
Ticket.append(i.replace(".", "").replace("/", "").strip().split(" ")[0])
else :
Ticket.append("X")
dataset["Ticket"] = Ticket
dataset["Ticket"].head()
dataset = pd.get_dummies(dataset, columns = ["Ticket"], prefix = "T")
dataset["Pclass"] = dataset["Pclass"].astype("category")
dataset = pd.get_dummies(dataset, columns=["Pclass"], prefix="Pc")
dataset.drop(labels = ["PassengerId"], axis=1, inplace=True)
dataset.head()
# Modeling
train = dataset[:train_len]
test = dataset[train_len:]
test.drop(labels=["Survived"], axis=1, inplace=True)
train["Survived"] = train["Survived"].astype(int)
Y_train = train["Survived"]
X_train = train.drop(labels = ["Survived"], axis=1)
# Simple modeling
kfold = StratifiedKFold(n_splits=10)
random_state = 2
classifiers = []
classifiers.append( SVC(random_state = random_state) )
classifiers.append( DecisionTreeClassifier(random_state = random_state) )
classifiers.append( AdaBoostClassifier(DecisionTreeClassifier(random_state = random_state), random_state = random_state, learning_rate = 0.1))
classifiers.append( RandomForestClassifier(random_state=random_state) )
classifiers.append( ExtraTreesClassifier(random_state=random_state) )
classifiers.append( GradientBoostingClassifier(random_state=random_state) )
classifiers.append( MLPClassifier(random_state=random_state) )
classifiers.append( KNeighborsClassifier() )
classifiers.append( LogisticRegression(random_state=random_state) )
classifiers.append( LinearDiscriminantAnalysis() )
cv_results = []
for classifier in classifiers :
cv_results.append(cross_val_score(classifier, X=X_train, y=Y_train, scoring = "accuracy", cv=kfold))
cv_means = []
cv_std = []
for cv_result in cv_results :
cv_means.append( cv_result.mean() )
cv_std.append( cv_result.std() )
algorithms = [ i.__str__().split("(")[0].replace("Classifier", "").replace("Regression", "").replace("Analysis", "") for i in classifiers ]
cv_res = pd.DataFrame({
"CrossValMeans":cv_means,
"CrossValerrors": cv_std,
"Algorithm": algorithms
})
g = sns.barplot("CrossValMeans", "Algorithm", data=cv_res, palette = "Set3", orient = "h", **{"xerr":cv_std})
g.set_xlabel("Mean Accuracy")
g = g.set_title("Cross validation scores")
DTC = DecisionTreeClassifier()
adaDTC = AdaBoostClassifier(DTC, random_state=7)
ada_param_grid = {"base_estimator__criterion" : ["gini", "entropy"],
"base_estimator__splitter" : ["best", "random"],
"algorithm" : ["SAMME", "SAMME.R"],
"n_estimators" : [1,2],
"learning_rate" : [0.0001, 0.001, 0.01, 0.1, 0.2, 0.3, 1.5]}
gsadaDTC = GridSearchCV(adaDTC, param_grid = ada_param_grid, cv=kfold, scoring="accuracy", verbose=1)
gsadaDTC.fit(X_train, Y_train)
ada_best = gsadaDTC.best_estimator_
gsadaDTC.best_score_
# ExtraTrees
ExtC = ExtraTreesClassifier()
ex_param_grid = {
"max_depth": [None],
"max_features": [1, 3, 10],
"min_samples_split": [2, 3, 10],
"min_samples_leaf": [1, 3, 10],
"bootstrap": [False],
"n_estimators": [100, 300],
"criterion": ["gini"] }
gsExtC = GridSearchCV(ExtC, param_grid = ex_param_grid, cv=kfold, scoring="accuracy", verbose=1)
gsExtC.fit(X_train, Y_train)
ExtC_best = gsExtC.best_estimator_
gsExtC.best_score_
# Random Forest
RFC = RandomForestClassifier()
rf_param_grid = {"max_depth": [None],
"max_features": [1, 3, 10],
"min_samples_split": [2, 3, 10],
"min_samples_leaf": [1, 3, 10],
"bootstrap": [False],
"n_estimators": [100, 300],
"criterion": ["gini"]}
gsRFC = GridSearchCV( RFC, param_grid = rf_param_grid, cv=kfold, scoring="accuracy", verbose=1)
gsRFC.fit(X_train, Y_train)
RFC_best = gsRFC.best_estimator_
gsRFC.best_score_
# Gradient Boosting
GBC = GradientBoostingClassifier()
gb_param_grid = {
"loss" : ["deviance"],
"n_estimators" : [100,200,300],
"learning_rate" : [0.1,0.05,0.01],
"max_depth" : [4, 8],
"min_samples_leaf" : [100, 150],
"max_features" : [0.3, 0.1] }
gsGBC = GridSearchCV(GBC, param_grid = gb_param_grid, cv=kfold, scoring="accuracy", verbose = 1)
gsGBC.fit(X_train, Y_train)
GBC_best = gsGBC.best_estimator_
gsGBC.best_score_
# SVM
SVMC = SVC(probability=True)
svc_param_grid = {
"kernel": ["rbf"],
"gamma": [0.001, 0.01, 0.1, 1],
"C": [1, 10, 50, 100, 200, 300, 1000] }
gsSVMC = GridSearchCV(SVMC, param_grid=svc_param_grid, cv=kfold, scoring="accuracy", verbose=1)
gsSVMC.fit(X_train, Y_train)
SVMC_best = gsSVMC.best_estimator_
gsSVMC.best_score_
def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None, n_jobs=-1, train_sizes=np.linspace(.1, 1.0, 5)) :
"""
Generate a simple plot of the test and training learning curve
:param estimator:
:param title:
:param X:
:param y:
:param ylim:
:param cv:
:param n_jobs:
:param train_sizes:
:return:
"""
plt.figure()
plt.title(title)
if ylim is not None :
plt.ylim(*ylim)
plt.xlabel("Training examples")
plt.ylabel("Score")
train_sizes, train_scores, test_scores = learning_curve(estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.grid()
plt.fill_between(train_sizes,
train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std,
alpha=0.1, color="r" )
plt.fill_between(train_sizes,
test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std,
alpha=0.1, color="r" )
plt.plot(train_sizes, train_scores_mean, "o-", color="r", label="Training score")
plt.plot(train_sizes, test_scores_mean, "o-", color="g", label="Cross-validation score")
plt.legend(loc="best")
return plt
g1 = plot_learning_curve(gsRFC.best_estimator_, "RF learning curves", X_train, Y_train, cv=kfold, train_sizes=np.linspace(.1, 1.0, 5))
g2 = plot_learning_curve(gsExtC.best_estimator_, "ExtraTrees learning curves", X_train, Y_train, cv=kfold, train_sizes=np.linspace(.1, 1.0, 5))
g3 = plot_learning_curve(gsSVMC.best_estimator_, "SVC learning curves", X_train, Y_train, cv=kfold, train_sizes=np.linspace(.1, 1.0, 5))
g4 = plot_learning_curve(gsadaDTC.best_estimator_, "AdaBoost learning curves", X_train, Y_train, cv=kfold, train_sizes=np.linspace(.1, 1.0, 5))
g5 = plot_learning_curve(gsGBC.best_estimator_, "GradientBoosting learning curves", X_train, Y_train, cv=kfold, train_sizes=np.linspace(.1, 1.0, 5))
nrows = ncols = 2
fig, axes = plt.subplots(nrows = nrows, ncols = ncols, sharex="all", figsize=(15,15))
names_classifiers = [("AdaBoosting", ada_best),
("ExtraTrees", ExtC_best),
("RandomForest", RFC_best),
("GradientBoosting", GBC_best) ]
nclassifier = 0
for row in range(nrows) :
for col in range(ncols) :
name = names_classifiers[nclassifier][0]
classifier = names_classifiers[nclassifier][1]
indices = np.argsort(classifier.feature_importances_)[::-1][:40]
g = sns.barplot(y=X_train.columns[indices][:40], x= classifier.feature_importances_[indices][:40], orient = "h", ax=axes[row][col])
g.set_xlabel("Relative Importance", fontsize=12)
g.set_ylabel("Feature", fontsize=12)
g.tick_params(labelsize=9)
g.set_title(name + " feature importance")
nclassifier += 1
test_Survived_RFC = pd.Series(RFC_best.predict(test), name="RFC")
test_Survived_ExtC = pd.Series(ExtC_best.predict(test), name="ExtC")
test_Survived_SVMC = pd.Series(SVMC_best.predict(test), name="SVMC")
test_Survived_AdaC = pd.Series(ada_best.predict(test), name="AdaC")
test_Survived_GBC = pd.Series(GBC_best.predict(test), name="GBC")
ensemble_results = pd.concat( [test_Survived_RFC, test_Survived_ExtC, test_Survived_AdaC, test_Survived_GBC, test_Survived_SVMC], axis=1 )
g = sns.heatmap(ensemble_results.corr(), annot=True)
votingC = VotingClassifier(estimators = [ ("rfc", RFC_best),
("extc", ExtC_best),
("adac", ada_best),
("gbc", GBC_best)],
voting="soft")
votingC = votingC.fit(X_train, Y_train)
test_Survived = pd.Series(votingC.predict(test), name="Survived")
resutls = pd.concat([IDtest, test_Survived], axis=1)
# results.to_csv("ensemble_python_voting.csv", index=False)
| 35.193613 | 165 | 0.650522 | 2,382 | 17,632 | 4.667926 | 0.175063 | 0.011512 | 0.014839 | 0.015109 | 0.303984 | 0.262973 | 0.187877 | 0.145517 | 0.108013 | 0.100639 | 0 | 0.019216 | 0.179503 | 17,632 | 500 | 166 | 35.264 | 0.749361 | 0.054957 | 0 | 0.102167 | 0 | 0 | 0.142702 | 0.00296 | 0 | 0 | 0 | 0 | 0 | 1 | 0.006192 | false | 0.006192 | 0.052632 | 0 | 0.065015 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9958fb1fe550f9459cfe99043f36afca01044db6 | 1,049 | py | Python | demo/person/tests/project/domain/person/repository/test_physical_person_model.py | giovannifarlley/ms--fastapi-template | 5bbd6903305db07cc18330ec86fb04ca518e9dab | [
"MIT"
] | 24 | 2021-03-07T13:00:35.000Z | 2022-02-11T03:41:51.000Z | demo/person/tests/project/domain/person/repository/test_physical_person_model.py | giovannifarlley/ms--fastapi-template | 5bbd6903305db07cc18330ec86fb04ca518e9dab | [
"MIT"
] | 2 | 2021-05-15T01:05:17.000Z | 2021-08-13T13:53:57.000Z | demo/person/tests/project/domain/person/repository/test_physical_person_model.py | giovannifarlley/ms--fastapi-template | 5bbd6903305db07cc18330ec86fb04ca518e9dab | [
"MIT"
] | 4 | 2021-04-27T12:18:33.000Z | 2021-10-03T23:43:23.000Z | from datetime import datetime
from bson.objectid import ObjectId
import pytest
from project.domain.person.repository.physical_person import PhysicalPerson
def test_instance_physical_person():
input_data = {
"_id": ObjectId(),
"status": "active",
"name": "teste",
"last_name": "teste",
"age": 12,
"birthdate": datetime.now(),
"gender": "",
"personal_document_id": "11122233344",
"email": "teste@teste.com",
"phone": "+5534988887777",
}
physical_person = PhysicalPerson(**input_data)
assert input_data["_id"] == physical_person.dict()["id"]
def test_instance_physical_person_errors():
with pytest.raises(ValueError):
input_data = {
"status": "",
"name": "",
"last_name": "",
"age": -1,
"birthdate": datetime.now(),
"gender": "",
"personal_document_id": "",
"email": "",
"phone": "",
}
PhysicalPerson(**input_data)
| 26.897436 | 75 | 0.551954 | 95 | 1,049 | 5.852632 | 0.442105 | 0.125899 | 0.053957 | 0.082734 | 0.26259 | 0.158273 | 0.158273 | 0 | 0 | 0 | 0 | 0.036585 | 0.296473 | 1,049 | 38 | 76 | 27.605263 | 0.716802 | 0 | 0 | 0.181818 | 0 | 0 | 0.188751 | 0 | 0 | 0 | 0 | 0 | 0.030303 | 1 | 0.060606 | false | 0 | 0.121212 | 0 | 0.181818 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
995a18380107d2a42827b6340d3c5bca73c8436d | 2,202 | py | Python | tests/api/v2/test_queries.py | droessmj/python-sdk | 42ea2366d08ef5e4d1fa45029480b800352ab765 | [
"MIT"
] | 2 | 2020-09-08T20:42:05.000Z | 2020-09-09T14:27:55.000Z | tests/api/v2/test_queries.py | droessmj/python-sdk | 42ea2366d08ef5e4d1fa45029480b800352ab765 | [
"MIT"
] | null | null | null | tests/api/v2/test_queries.py | droessmj/python-sdk | 42ea2366d08ef5e4d1fa45029480b800352ab765 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Test suite for the community-developed Python SDK for interacting with Lacework APIs.
"""
import random
import pytest
from laceworksdk.api.v2.queries import QueriesAPI
from tests.api.test_crud_endpoint import CrudEndpoint
# Tests
@pytest.fixture(scope="module")
def api_object(api):
return api.queries
@pytest.fixture(scope="module")
def api_object_create_body(random_text):
return {
"query_id": random_text,
"query_text": f"""{random_text} {{
source {{CloudTrailRawEvents e}}
filter {{EVENT_SOURCE = 'iam.amazonaws.com' AND EVENT:userIdentity.name::String NOT LIKE '%{random_text}'}}
return distinct {{EVENT_NAME, EVENT}}
}}"""
}
@pytest.fixture(scope="module")
def api_object_update_body(random_text):
return {
"query_text": f"""{random_text} {{
source {{CloudTrailRawEvents e}}
filter {{EVENT_SOURCE = 'iam.amazonaws.com' AND EVENT:userIdentity.name::String NOT LIKE '%{random_text}_updated'}}
return distinct {{EVENT_NAME, EVENT}}
}}"""
}
@pytest.fixture(scope="module")
def query(api):
queries = api.queries.get()
queries = list(filter(lambda elem: elem["owner"] == "Lacework" and "LW_Global_AWS_CTA" in elem["queryId"], queries["data"]))
query = random.choice(queries)
return query
class TestQueries(CrudEndpoint):
OBJECT_ID_NAME = "queryId"
OBJECT_TYPE = QueriesAPI
def test_api_get_by_id(self, api_object):
self._get_object_classifier_test(api_object, "id", self.OBJECT_ID_NAME)
def test_queries_api_execute_by_id(self, api_object, query):
start_time, end_time = self._get_start_end_times()
response = api_object.execute_by_id(
query_id=query["queryId"],
arguments={
"StartTimeRange": start_time,
"EndTimeRange": end_time,
}
)
assert "data" in response.keys()
def test_queries_api_validate(self, api_object, query):
response = api_object.validate(query_text=query["queryText"])
assert "data" in response.keys()
def test_api_search(self):
pass
| 28.597403 | 128 | 0.656676 | 268 | 2,202 | 5.141791 | 0.339552 | 0.058781 | 0.05225 | 0.069666 | 0.422351 | 0.365022 | 0.365022 | 0.261248 | 0.261248 | 0.261248 | 0 | 0.001168 | 0.222071 | 2,202 | 76 | 129 | 28.973684 | 0.803269 | 0.051771 | 0 | 0.307692 | 0 | 0.038462 | 0.309764 | 0.06253 | 0 | 0 | 0 | 0 | 0.038462 | 1 | 0.153846 | false | 0.019231 | 0.076923 | 0.057692 | 0.403846 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
995ce8cb055163c1151d7b483d731dd014f5c38e | 9,058 | py | Python | dataloader.py | AriaPs/TransparentDepth | c053b273be856cc9433fd5598a56b96d44ae910e | [
"MIT"
] | 1 | 2021-05-16T19:40:58.000Z | 2021-05-16T19:40:58.000Z | dataloader.py | AriaPs/TransparentDepth | c053b273be856cc9433fd5598a56b96d44ae910e | [
"MIT"
] | null | null | null | dataloader.py | AriaPs/TransparentDepth | c053b273be856cc9433fd5598a56b96d44ae910e | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import os
import glob
import sys
from PIL import Image
import Imath
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import Dataset
from torchvision import transforms
from imgaug import augmenters as iaa
import imgaug as ia
import imageio
import cv2
from utils.utils import exr_loader, depthTensor2rgbTensor, depth2rgb
class ClearGraspsDataset(Dataset):
"""
Dataset class for training model
//TODO: DOC
Args:
input_dir (str): Path to folder containing the input images (.png format).
transform (imgaug transforms): imgaug Transforms to be applied to the imgs
"""
def __init__(
self,
input_dir,
depth_dir='',
transform=None,
input_only=None,
outputImgWidth = 256,
outputImgHeight = 256,
):
super().__init__()
self.images_dir = input_dir
self.depth_dir = depth_dir
self.transform = transform
self.input_only = input_only
# Create list of filenames
self._datalist_input = [] # Variable containing list of all input images filenames in dataset
self._datalist_depth = []
self._extension_input = ['-rgb.jpg'] # The file extension of input images
self._extension_depth = ['-depth-rectified.exr']
self._create_lists_filenames(self.images_dir, self.depth_dir)
self.outputImgWidth = outputImgWidth
self.outputImgHeight = outputImgHeight
def __len__(self):
return len(self._datalist_input)
def __getitem__(self, index):
'''Returns an item from the dataset at the given index. If no depths directory has been specified,
then a tensor of zeroes will be returned as the depth.
Args:
index (int): index of the item required from dataset.
Returns:
torch.Tensor: Tensor of input image
torch.Tensor: Tensor of depth (Tensor of zeroes is depth_dir is "" or None)
'''
# Open input imgs
image_path = self._datalist_input[index]
_img = Image.open(image_path).convert('RGB')
_img = np.array(_img)
# Open depths
if self.depth_dir:
depth_path = self._datalist_depth[index]
_depth = exr_loader(depth_path, ndim=1)
#_depth = cv2.resize(_depth, (self.outputImgWidth, self.outputImgHeight), interpolation=cv2.INTER_NEAREST)
_depth[np.isnan(_depth)] = 0
_depth[np.isinf(_depth)] = 0
_depth = np.expand_dims(_depth, axis=0)
# Apply image augmentations and convert to Tensor
if self.transform:
det_tf = self.transform.to_deterministic()
_img = det_tf.augment_image(_img.copy())
if self.depth_dir:
# Making all values of invalid pixels marked as -1.0 to 0.
# In raw data, invalid pixels are marked as (-1, -1, -1) so that on conversion to RGB they appear black.
mask = np.all(_depth == -1.0, axis=0)
_depth[:, mask] = 0.0
_depth = _depth.transpose((1, 2, 0)) # To Shape: (H, W, 3)
_depth = det_tf.augment_image(_depth, hooks=ia.HooksImages(activator=self._activator_masks))
_depth = _depth.transpose((2, 0, 1)) # To Shape: (3, H, W)
# Return Tensors
_img_tensor = transforms.ToTensor()(_img.copy())
if self.depth_dir:
_depth_tensor = torch.from_numpy(_depth.copy())
#_depth_tensor = nn.functional.normalize(_depth_tensor, p=2, dim=0)
else:
_depth_tensor = torch.zeros((3, _img_tensor.shape[1], _img_tensor.shape[2]), dtype=torch.float32)
return _img_tensor, _depth_tensor
def _create_lists_filenames(self, images_dir, depth_dir):
'''Creates a list of filenames of images and depths each in dataset
The depth at index N will match the image at index N.
Args:
images_dir (str): Path to the dir where images are stored
depth_dir (str): Path to the dir where depths are stored
Raises:
ValueError: If the given directories are invalid
ValueError: No images were found in given directory
ValueError: Number of images and depths do not match
'''
assert os.path.isdir(images_dir), 'Dataloader given images directory that does not exist: "%s"' % (images_dir)
for ext in self._extension_input:
imageSearchStr = os.path.join(images_dir, '*' + ext)
imagepaths = sorted(glob.glob(imageSearchStr))
self._datalist_input = self._datalist_input + imagepaths
numImages = len(self._datalist_input)
if numImages == 0:
raise ValueError('No images found in given directory. Searched in dir: {} '.format(images_dir))
if depth_dir:
assert os.path.isdir(depth_dir), ('Dataloader given depths directory that does not exist: "%s"' %
(depth_dir))
for ext in self._extension_depth:
depthSearchStr = os.path.join(depth_dir, '*' + ext)
depthpaths = sorted(glob.glob(depthSearchStr))
self._datalist_depth = self._datalist_depth + depthpaths
numdepths = len(self._datalist_depth)
if numdepths == 0:
raise ValueError('No depths found in given directory. Searched for {}'.format(imageSearchStr))
if numImages != numdepths:
raise ValueError('The number of images and depths do not match. Please check data,' +
'found {} images and {} depths in dirs:\n'.format(numImages, numdepths) +
'images: {}\ndepths: {}\n'.format(images_dir, depth_dir))
def _activator_masks(self, images, augmenter, parents, default):
'''Used with imgaug to help only apply some augmentations to images and not depths
Eg: Blur is applied to input only, not depth. However, resize is applied to both.
'''
if self.input_only and augmenter.name in self.input_only:
return False
else:
return default
if __name__ == '__main__':
import matplotlib.pyplot as plt
from torch.utils.data import DataLoader
from torchvision import transforms
import torchvision
import imageio
# Example Augmentations using imgaug
imsize = 512
augs_train = iaa.Sequential([
# Geometric Augs
iaa.Scale((imsize, imsize), 0), # Resize image
iaa.Fliplr(0.5),
iaa.Flipud(0.5),
iaa.Rot90((0, 4)),
# Blur and Noise
iaa.Sometimes(0.2, iaa.GaussianBlur(sigma=(0, 1.5), name="gaus-blur")),
iaa.Sometimes(0.1, iaa.Grayscale(alpha=(0.0, 1.0), from_colorspace="RGB", name="grayscale")),
iaa.Sometimes(0.2, iaa.AdditiveLaplaceNoise(scale=(0, 0.1*255), per_channel=True, name="gaus-noise")),
# Color, Contrast, etc.
iaa.Sometimes(0.2, iaa.Multiply((0.75, 1.25), per_channel=0.1, name="brightness")),
iaa.Sometimes(0.2, iaa.GammaContrast((0.7, 1.3), per_channel=0.1, name="contrast")),
iaa.Sometimes(0.2, iaa.AddToHueAndSaturation((-20, 20), name="hue-sat")),
iaa.Sometimes(0.3, iaa.Add((-20, 20), per_channel=0.5, name="color-jitter")),
])
# augs_test = iaa.Sequential([
# # Geometric Augs
# iaa.Scale((imsize, imsize), 0),
# ])
min = 0.1
max = 1.5
augs = augs_train
input_only = ["gaus-blur", "grayscale", "gaus-noise", "brightness", "contrast", "hue-sat", "color-jitter"]
db_test = ClearGraspsDataset(input_dir='./data/train/rgb-imgs',
depth_dir='./data/train/depth-imgs-rectified',
transform=augs,
input_only=input_only)
batch_size = 4
testloader = DataLoader(db_test, batch_size=batch_size, shuffle=True, num_workers=1, drop_last=True)
# Show 1 Shuffled Batch of Images
for ii, batch in enumerate(testloader):
# Get Batch
img, depth = batch
print('image shape, type: ', img.shape, img.dtype)
print('depth shape, type: ', depth.shape, depth.dtype)
# Show Batch
im_vis1 = torchvision.utils.make_grid(img, nrow=batch_size // 4, padding=2, normalize=True, scale_each=True)
plt.imshow(im_vis1.numpy().transpose(1, 2, 0))
plt.show()
im_vis2 = torchvision.utils.make_grid(depthTensor2rgbTensor(depth), nrow=batch_size // 4, padding=2, normalize=True, scale_each=True)
plt.imshow(im_vis2.numpy().transpose(1, 2, 0))
plt.show()
break
| 40.4375 | 142 | 0.598256 | 1,109 | 9,058 | 4.721371 | 0.25789 | 0.024446 | 0.01738 | 0.013369 | 0.157945 | 0.110772 | 0.071047 | 0.053094 | 0.040489 | 0.022536 | 0 | 0.020592 | 0.303047 | 9,058 | 223 | 143 | 40.618834 | 0.808807 | 0.220137 | 0 | 0.082707 | 0 | 0 | 0.093505 | 0.008157 | 0 | 0 | 0 | 0.004484 | 0.015038 | 1 | 0.037594 | false | 0 | 0.150376 | 0.007519 | 0.225564 | 0.015038 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9962f81178525ce273dd05b72036d4af806539c0 | 2,122 | py | Python | graph_plots/fwidgets/f_icon_label.py | DanShai/kivy-graph | 6537901d521247a13e186aaa8ecbaffdffdaf7ea | [
"MIT"
] | 3 | 2018-11-28T13:35:35.000Z | 2021-09-12T15:54:28.000Z | graph_plots/fwidgets/f_icon_label.py | DanShai/kivy-graph | 6537901d521247a13e186aaa8ecbaffdffdaf7ea | [
"MIT"
] | null | null | null | graph_plots/fwidgets/f_icon_label.py | DanShai/kivy-graph | 6537901d521247a13e186aaa8ecbaffdffdaf7ea | [
"MIT"
] | 1 | 2021-05-03T18:48:01.000Z | 2021-05-03T18:48:01.000Z | '''
@author: dan
'''
from f_widget import FWidget
from kivy.uix.label import Label
from kivy.properties import ListProperty, NumericProperty, StringProperty, BooleanProperty, ObjectProperty
from kivy.uix.button import Button
from kivy.lang import Builder
from f_button import FButton
from utils import get_icon_char, get_rgba_color
from f_scalable import ScalableBehaviour
Builder.load_string('''
<FIconLabel>:
Label:
id: licon
font_name: './graph_plots/fwidgets/data/font/fontawesome-webfont.ttf'
pos: root.pos
size: root.size
font_size: root.font_size
text: root.get_icon(root.icon) if root.icon else ''
color: root.get_color(root.txt_color)
''')
class FIconLabel(Button, FWidget, ScalableBehaviour):
icon = StringProperty('')
get_icon = ObjectProperty(get_icon_char)
txt_color = ListProperty(['Orange', '100'])
n_txt_color = ListProperty(['Orange', '100'])
d_txt_color = ListProperty(['Orange', '400'])
def __init__(self, **kwargs):
super(FIconLabel, self).__init__(**kwargs)
self.get_icon = get_icon_char
self.background_color = (1, 1, 1, 0)
self.markup = True
self.halign = 'center'
self.valign = 'middle'
self.color = self.get_color(self.txt_color)
self.size_hint = 1, 1
self.font_size = self.height * .8
self.p_width = 0
self.txt_color = self.n_txt_color
def on_txt_color(self, widget, txt_color):
widget.color = self.get_color(txt_color)
widget.ids.licon.color = self.get_color(txt_color)
def on_size(self, widget, size):
self.size = size
self.font_size = self.height * .8
def on_touch_down(self, touch):
if self.collide_point(touch.x, touch.y):
self.txt_color = self.d_txt_color
return super(FIconLabel, self).on_touch_down(touch)
def on_touch_up(self, touch):
if self.collide_point(touch.x, touch.y):
self.txt_color = self.n_txt_color
return super(FIconLabel, self).on_touch_up(touch)
| 27.921053 | 106 | 0.65787 | 285 | 2,122 | 4.659649 | 0.294737 | 0.090361 | 0.045181 | 0.048193 | 0.278614 | 0.23494 | 0.162651 | 0.143072 | 0.082831 | 0.082831 | 0 | 0.011091 | 0.235156 | 2,122 | 75 | 107 | 28.293333 | 0.807147 | 0.005655 | 0 | 0.117647 | 0 | 0 | 0.182381 | 0.053333 | 0 | 0 | 0 | 0 | 0 | 1 | 0.098039 | false | 0 | 0.156863 | 0 | 0.411765 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9967cec318291035a6b99a56b195699b1cec987a | 4,766 | py | Python | holybible.py | DPS0340/holybible.py | ee6b4d6da7b21f44a6d3e7fc8973cf186f7c1109 | [
"MIT"
] | null | null | null | holybible.py | DPS0340/holybible.py | ee6b4d6da7b21f44a6d3e7fc8973cf186f7c1109 | [
"MIT"
] | null | null | null | holybible.py | DPS0340/holybible.py | ee6b4d6da7b21f44a6d3e7fc8973cf186f7c1109 | [
"MIT"
] | null | null | null |
# 이지호 작성 #
# 공동번역 성서의 저작권은 모두 저작권자에게 있습니다. #
import sys
import re
import random
end = "끝났습니다."
error = "오류입니다."
def run():
short = ['Gen', 'Exo', 'Lev', 'Num', 'Deu', 'Jos', 'Jdg', 'Rth', '1Sa', '2Sa', '1Ki', '2Ki', '1Ch', '2Ch', 'Ezr',
'Neh', 'Est', 'Job', 'Psa', 'Pro', 'Ecc', 'Sol', 'Isa', 'Jer', 'Eze', 'Dan', 'Amo', 'Oba', 'Jon', 'Mic',
'Nah', 'Hab', 'Zep', 'Hag', 'Zec', 'Mar', 'Luk', 'Joh', 'Act', 'Rom', '1Co', '2Co', 'Gal', 'Eph', 'Phi',
'Col', '1Th', '2Th', '1Ti', '2Ti', 'Tit', 'Phm', 'Heb', 'Jam', '1Pe', '2Pe', '1Jo', '2Jo', '3Jo', 'Jod',
'Rev']
bookname = ['창세기', '출애굽기', '레위기', '민수기', '신명기', '여호수아', '판관기', '룻기', '사무엘상', '사무엘하',
'열왕기상', '열왕기하', '역대기상', '역대기하', '에즈라', '느헤미야', '에스델', '욥기', '시편', '잠언',
'전도서', '아가', '이사야', '에레미야', '애가', '에제키엘', '다니엘', '호에샤', '요엘', '아모스',
'오바디야', '오냐', '미가', '나훔', '하바꾹', '스바니야', '하깨', '즈가리야', '말라기',
'마태오의 복음서', '마르코의 복음서', '루가의 복음서', '요한의 복음서', '사도행전', '로마인에게 보낸 편지',
'고린토인에게 보낸 첫째 편지', '고랜토인에게 보낸 둘째 편지', '갈라디아인에게 보낸 편지', '에페소인에게 보낸 편지',
'필립비인들에게 보낸 편지', '골로사이인들에게 보낸 편지', '델살로니카인들에게 보낸 첫째 편지',
'데살로니카인들에게 보낸 둘째 편지', '디모테오에게 보낸 첫째 편지', '디도에게 보낸 편지',
'필레몬에게 보낸 편지', '히브리인들에게 보낸 편지', '야고보의 편지', '베드로의 첫째 편지',
'베드로의 둘째 편지', '요한의 첫째 편지', '요한의 둘째 편지', '요한의 세째 편지', '유다의 편지', '요한의 묵시록']
global selectbookname
global k
global line
global number
for i in range(len(short)):
book = bookname[i]
say = ("[%d] " % (i + 1))
print(say + book, end=" ")
if i % 5 == 0:
print('''
''')
if i == (len(short) - 1) and (i % 5) != 0:
for p in range((len(short) - 1) % 5):
print(say + book, end=" ")
print("선택하실 책 번호를 선택하세요.")
number = int(input())
selectbookname = short[int(number - 1)]
print(selectbookname)
print('''
[1] 성경 scrapper
[2] 장 선택해서 읽기
[3] 줄 선택해서 읽기
[4] 성경 리더
[5] 랜덤 줄 출력(모든 경전)
무엇을 선택하시겠습니까?''')
choice = int(input())
if choice == 1: # 성경 scrapper
lines = ''
anypnl = re.compile("\d:\d")
while True:
with open('공동번역.txt', 'r')as a:
line = a.readline()
checker = line.find('%s %s' % (selectbookname, anypnl))
if not checker == -1:
lines += line
if line is False:
break
with open('result.txt', 'w') as b:
b.write(lines)
if choice == 2: # 장 리더
page = ''
print('''몇 장입니까?''')
k = int(input())
with open('공동번역.txt', 'r')as a:
while True:
line = a.readline()
checker = line.find('%s %d' % (selectbookname, k))
closer = line.find('%s %d' % (selectbookname, k+1))
if checker != -1:
page += '%s\n' % line
if closer != -1:
break
if not line:
break
print("\n" * 5)
print(page)
if choice == 3: # 줄 리더
print('''몇 장 입니까?''')
page = input()
print('''몇 줄 입니까?''')
line = input()
with open('공동번역.txt', 'r') as a:
while True:
linesearcher = a.readline()
linechecker = linesearcher.find("%s %s:%s" % (selectbookname, page, line))
if linechecker != -1:
break
if linesearcher == False:
break
print(linesearcher)
if choice == 4: # 성경 리더
page = ''
print('''몇 장부터 보시겠습니까?''')
k = int(input())
with open('공동번역.txt', 'r')as a:
while True:
line = a.readline()
checker = line.find('%s %d' % (selectbookname, k))
closer = line.find('%s %d' % (selectbookname, k+1))
if checker != -1:
page += '%s\n' % line
if closer != -1:
print(page)
print('''다음 장을 보려면 엔터를 눌러주세요.
다른 값을 입력하시면 종료됩니다.''')
k += 1
select = input()
if select == '':
continue
else:
break
if choice == 5: # 랜덤 줄
with open('공동번역.txt', 'r') as a:
alllines = a.readlines()
print(random.choice(alllines))
if choice not in [1, 2, 3, 4, 5]:
print(error)
sys.exit()
run()
| 36.381679 | 118 | 0.402854 | 550 | 4,766 | 3.490909 | 0.434545 | 0.016667 | 0.03125 | 0.039063 | 0.201042 | 0.201042 | 0.201042 | 0.166146 | 0.166146 | 0.166146 | 0 | 0.018378 | 0.417751 | 4,766 | 130 | 119 | 36.661538 | 0.673514 | 0.014897 | 0 | 0.333333 | 0 | 0 | 0.206632 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.008333 | false | 0 | 0.025 | 0 | 0.033333 | 0.141667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
996b9f6d14e4feb9f7a3b2d58454376d40004276 | 513 | py | Python | progs/mean.py | Breccia/s-py | 4fc5fcd0efbfcaa6574a81ee922c1083ed0ef57d | [
"MIT"
] | null | null | null | progs/mean.py | Breccia/s-py | 4fc5fcd0efbfcaa6574a81ee922c1083ed0ef57d | [
"MIT"
] | null | null | null | progs/mean.py | Breccia/s-py | 4fc5fcd0efbfcaa6574a81ee922c1083ed0ef57d | [
"MIT"
] | null | null | null | #!/usr/local/anaconda3/bin/python
import sys
sys.path.insert(0, "../libs/")
from spy_mean import compute_mean
if __name__ == "__main__":
print("Program to compute mean")
count = input("Enter total number of samples: ")
idx = 0
data = []
for idx in range(0, int(count)):
val = input("Enter data {0}: ".format(idx + 1))
data.append(val)
#mean = spy_mean.compute_mean(data)
mean = compute_mean(data)
print("You entered: {0} vals, mean = {1}".format(count, mean))
| 24.428571 | 66 | 0.623782 | 74 | 513 | 4.148649 | 0.554054 | 0.143322 | 0.09772 | 0.123779 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.01995 | 0.218324 | 513 | 20 | 67 | 25.65 | 0.745636 | 0.128655 | 0 | 0 | 0 | 0 | 0.268623 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.153846 | 0 | 0.153846 | 0.153846 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
996c038f0063123980ac86217bb77ad88b247eae | 896 | py | Python | wxalarmlib/utils/time_util.py | sanderiana/wxAlarm | 6abc4a8851ce83fa7d3ee30d89a773d9952f87ed | [
"MIT"
] | null | null | null | wxalarmlib/utils/time_util.py | sanderiana/wxAlarm | 6abc4a8851ce83fa7d3ee30d89a773d9952f87ed | [
"MIT"
] | null | null | null | wxalarmlib/utils/time_util.py | sanderiana/wxAlarm | 6abc4a8851ce83fa7d3ee30d89a773d9952f87ed | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# ---------------------------------------------------------------
# wxalarm.py
#
# Copyright (c) 2019 sanderiana https://github.com/sanderiana
#
# This software is released under the MIT License.
# http://opensource.org/licenses/mit-license.php
# ---------------------------------------------------------------
# Icon made by Freepik from www.flaticon.com
# ---------------------------------------------------------------
import datetime
def change_time(hour_min):
date = datetime.datetime.now()
year = date.year
month = date.month
day = date.day
time = hour_min.split(":")
hour = int(time[0])
min = int(time[1])
return datetime.datetime(year, month, day, hour, min, 0)
def change_delta(delta_time):
sec = delta_time.total_seconds()
hour = sec // 3600
min = (sec - (hour * 3600)) // 60
return "%02d:%02d" % (hour, min) | 28 | 65 | 0.506696 | 100 | 896 | 4.47 | 0.56 | 0.06264 | 0.049217 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.029412 | 0.165179 | 896 | 32 | 66 | 28 | 0.568182 | 0.472098 | 0 | 0 | 0 | 0 | 0.021645 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.133333 | false | 0 | 0.066667 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
996ea645ce42f819744d7c8848ee5604d942ae67 | 3,380 | py | Python | ship.py | kcwikizh/kancolle-shinkai-db | 73808a91b5f59d158374f016e2d514225f1ca6bd | [
"MIT"
] | 1 | 2019-02-11T08:57:07.000Z | 2019-02-11T08:57:07.000Z | ship.py | kcwikizh/kancolle-shinkai-db | 73808a91b5f59d158374f016e2d514225f1ca6bd | [
"MIT"
] | null | null | null | ship.py | kcwikizh/kancolle-shinkai-db | 73808a91b5f59d158374f016e2d514225f1ca6bd | [
"MIT"
] | null | null | null | """Convert shinkai ship Json to KcWiki Lua """
__all__ = ['main']
import json
from collections import OrderedDict
from utils import python_data_to_lua_table
SHIPS_HR_JSON = 'json/ships_human_readable.json'
SHIPS_LUA = 'lua/ships.lua'
def shinkai_parse_ship(ships):
"""Get shinkai ships stored by python OrderedDict"""
ships_dict = OrderedDict()
for ship_id in ships:
ship = ships[ship_id]
ship_dict = OrderedDict()
ship_dict['日文名'] = ship['name']['fullname_ja_jp']
ship_dict['中文名'] = ship['name']['fullname_zh_cn']
ship_dict['kcwiki分类'] = ship['kcwiki_class']
attributes_dict = OrderedDict()
attributes_dict['耐久'] = ship['stats']['taik']
attributes_dict['火力'] = [
ship['stats']['houg'],
ship['stats']['houg2']
]
attributes_dict['雷装'] = [
ship['stats']['raig'],
ship['stats']['raig2']
]
attributes_dict['对空'] = ship['stats']['tyku']
attributes_dict['对潜'] = ship['stats']['tais']
attributes_dict['回避'] = ship['stats']['houk']
attributes_dict['索敌'] = ship['stats']['saku']
attributes_dict['速力'] = ship['stats']['soku']
attributes_dict['装甲'] = ship['stats']['souk']
attributes_dict['运'] = ship['stats']['luck']
attributes_dict['射程'] = ship['stats']['leng']
ship_dict['属性'] = attributes_dict
equip_dict = OrderedDict()
equip_dict['格数'] = len(ship['slots'])
equip_dict['搭载'] = ship['slots']
equip_dict['装备'] = ship['equips']
ship_dict['装备'] = equip_dict
appears_list = []
for appear in ship.get('appears', []):
appear_dict = OrderedDict()
appear_dict['map'] = OrderedDict()
appear_dict['map']['限定海域'] = appear['map']['is_event']
appear_dict['map']['年'] = appear['map']['year']
appear_dict['map']['季节'] = [
None, '冬', '春', '夏', '秋'][appear['map']['season']]
appear_dict['map']['海域'] = 'E-' + str(appear['map']['event_id'])
appear_dict['map']['Boss'] = appear['map']['is_boss']
if 'is_final_battle' in appear:
appear_dict['最终战'] = appear['is_final_battle']
if 'selected_rank' in appear:
appear_dict['选择难度'] = [
'无', '丙', '乙', '甲'][appear['selected_rank']]
appears_list.append(appear_dict)
if appears_list:
ship_dict['出现海域'] = appears_list
ships_dict[ship_id] = ship_dict
return ships_dict
def shinkai_generate_ship_lua(ships):
"""Generate KcWiki shinkai ship Lua table"""
ships_dict = shinkai_parse_ship(ships)
data, _ = python_data_to_lua_table(ships_dict, level=1)
with open(SHIPS_LUA, 'w', encoding='utf8') as lua_fp:
lua_fp.write('local d = {}\n\n'
+ 'd.shipDataTable = {\n')
lua_fp.write(data)
lua_fp.write('\n}\n\nreturn d\n')
def load_ships_json(json_file):
"""Load and decode json"""
print('Load json file: {}'.format(json_file))
with open(json_file, 'r', encoding='utf8') as file:
ships = json.load(file)
return ships
def main():
"""Main process"""
ships = load_ships_json(SHIPS_HR_JSON)
shinkai_generate_ship_lua(ships)
if __name__ == '__main__':
main()
| 33.465347 | 76 | 0.57071 | 411 | 3,380 | 4.420925 | 0.321168 | 0.100165 | 0.042928 | 0.016511 | 0.057237 | 0.027518 | 0 | 0 | 0 | 0 | 0 | 0.001995 | 0.25858 | 3,380 | 100 | 77 | 33.8 | 0.723065 | 0.047041 | 0 | 0 | 0 | 0 | 0.16531 | 0.009393 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052632 | false | 0 | 0.039474 | 0 | 0.118421 | 0.013158 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9971ddcf2919c00539af25050648ccbd84f39ca4 | 6,547 | py | Python | models/FCOSInference.py | meet-minimalist/FCOS-Pytorch-Implementation | e8ac1c6230174902732dbe8bcff3a87034f99517 | [
"MIT"
] | null | null | null | models/FCOSInference.py | meet-minimalist/FCOS-Pytorch-Implementation | e8ac1c6230174902732dbe8bcff3a87034f99517 | [
"MIT"
] | null | null | null | models/FCOSInference.py | meet-minimalist/FCOS-Pytorch-Implementation | e8ac1c6230174902732dbe8bcff3a87034f99517 | [
"MIT"
] | null | null | null |
import os
import sys
from typing_extensions import final
sys.path.append("../")
# TODO : Remove this append line
import numpy as np
import torch
import torch.nn as nn
from models.FCOS import FCOS
from models.PostProcessor import PostProcessor
import imgaug.augmenters as iaa
from imgaug.augmentables.bbs import BoundingBox, BoundingBoxesOnImage
from torchvision import transforms
from utils.transforms.to_tensor import ToTensorOwn
from utils.transforms.normalize import Normalize
from utils.transforms.center_crop import CenterCrop
cuda = torch.device('cuda:0')
cpu = torch.device("cpu:0")
class FCOSInference(nn.Module):
def __init__(self, backbone_model='resnet50', freeze_backend=[False, False, False, False], \
fpn_features=256, num_classes=81, use_det_head_group_norm=True, \
centerness_on_regression=True, use_gradient_checkpointing=False, \
strides=[8, 16, 32, 64, 128], use_cuda=False, \
add_centerness_in_cls_prob=True, max_detection_boxes_num=1000, \
cls_score_threshold=0.05, nms_iou_threshold=0.60):
super(FCOSInference, self).__init__()
self.strides = strides
self.max_detection_boxes_num = max_detection_boxes_num
self.model = FCOS(backbone_model, freeze_backend, fpn_features, num_classes, \
use_det_head_group_norm, centerness_on_regression, use_gradient_checkpointing)
self.post_process = PostProcessor(use_cuda, add_centerness_in_cls_prob, \
max_detection_boxes_num, cls_score_threshold, nms_iou_threshold, num_classes)
if use_cuda:
self.model = self.model.to(cuda, non_blocking=True)
self.post_process = self.post_process.to(cuda, non_blocking=True)
def forward(self, preprocesed_image):
# image : [B x 3 x img_h x img_w]
cls_probs, cnt_logits, reg_values = self.model(preprocesed_image)
# cls_probs, cnt_logit, reg_values each will have a list of features having shape as below.
# cls_probs : [[B x 81 x H x W], [B x 81 x H x W], ....]
# cnt_logits: [[B x 1 x H x W], [B x 1 x H x W], ....]
# reg_values: [[B x 4 x H x W], [B x 4 x H x W], ....]
predictions = self.post_process([cls_probs, cnt_logits, reg_values], self.strides)
# predictions : List of [N x 6] tensor for each element in batch
# : [x1, y1, x2, y2, cls_prob, cls_id]
B = preprocesed_image.shape[0]
num_bboxes = torch.zeros(size=[B])
for i, res_img in enumerate(preprocesed_image):
img_h, img_w = res_img.shape[1:]
predictions[i][:, 0] = torch.clip(predictions[i][:, 0], 0, img_w)
predictions[i][:, 1] = torch.clip(predictions[i][:, 1], 0, img_h)
predictions[i][:, 2] = torch.clip(predictions[i][:, 2], 0, img_w)
predictions[i][:, 3] = torch.clip(predictions[i][:, 3], 0, img_h)
num_bboxes[i] = len(predictions[i])
final_prediction = torch.zeros(size=[B, self.max_detection_boxes_num, 6], dtype=torch.float32)
for i, pred in enumerate(predictions):
final_prediction[i, :len(pred)] = pred
return final_prediction, num_bboxes
if __name__ == "__main__":
import cv2
import config_converter as config
complete_model = FCOSInference(backbone_model=config.converter_backbone, freeze_backend=[False, False, False, False], \
fpn_features=config.converter_fpn_features, num_classes=config.converter_num_classes, \
use_det_head_group_norm=config.converter_use_det_head_group_norm, \
centerness_on_regression=config.converter_centerness_on_regression, \
use_gradient_checkpointing=False, strides=config.converter_strides, use_cuda=False, \
add_centerness_in_cls_prob=config.add_centerness_in_cls_prob, \
max_detection_boxes_num=config.max_detection_boxes_num, \
cls_score_threshold=config.cls_score_threshold, \
nms_iou_threshold=config.nms_iou_threshold)
ckpt_path = "../summaries/2021_07_26_00_01_29/ckpt/fcos_resnet50_eps_26_test_loss_2.5426.pth"
ckpt = torch.load(ckpt_path)['model']
complete_model.model.load_state_dict(ckpt, strict=True) # Restore FCOS architecture part only
complete_model.model.eval() # TODO : Skipping this intentionally
complete_model.eval()
# Image loading and preprocessing
img_path = "../sample_imgs/000026.jpg"
# img_path = "../sample_imgs/000012.jpg"
img = cv2.imread(img_path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
transforms = transforms.Compose([
CenterCrop(),
ToTensorOwn(), # Custom ToTensor transform, converts to CHW from HWC only
Normalize(config.converter_normalization_type),
])
empty_bb = BoundingBoxesOnImage([BoundingBox(0, 0, 100, 100, label=0)], \
shape=(*config.input_size, 3))
sample = {'image' : img, 'bbox' : empty_bb}
preprocessed_tensor = transforms([sample, config.input_size])
resized_img = preprocessed_tensor['image']
resized_img = torch.unsqueeze(resized_img, dim=0)
# Model Inference
final_predictions, num_bboxes = complete_model(resized_img)
final_predictions = final_predictions.detach().numpy()
num_bboxes = num_bboxes.detach().numpy()
resized_img = resized_img.detach().numpy()
for pred, num_bb, img in zip(final_predictions, num_bboxes, resized_img):
pred = pred[:int(num_bb)]
# Rest are padded zeros and not useful as we padded the predictions to make a batch of output
img[0:1, :, :] = img[0:1, :, :] * 0.229 + 0.485
img[1:2, :, :] = img[1:2, :, :] * 0.224 + 0.456
img[2:3, :, :] = img[2:3, :, :] * 0.225 + 0.406
img = np.uint8(np.transpose(img, (1, 2, 0)) * 255)
for bb in pred:
x1, y1, x2, y2 = [int(c) for c in bb[:4]]
cls_prob, cls_id = bb[4:]
cls_name = config.converter_label_dict[int(cls_id)]
print(f"X1: {x1}, Y1: {y1}, X2: {x2}, Y2: {y2}, Cls_id: {int(cls_id)}, Cls_name: {cls_name}, Cls_prob: {cls_prob:.4f}")
cv2.rectangle(img, (x1, y1), (x2, y2), (0, 255, 0), 2)
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
op_path = os.path.splitext(img_path)[0] + "_res.jpg"
cv2.imwrite(op_path, img)
| 45.465278 | 131 | 0.64625 | 895 | 6,547 | 4.463687 | 0.268156 | 0.027034 | 0.029787 | 0.035044 | 0.219524 | 0.171715 | 0.129912 | 0.080601 | 0.021026 | 0 | 0 | 0.037231 | 0.241026 | 6,547 | 143 | 132 | 45.783217 | 0.766754 | 0.111807 | 0 | 0 | 0 | 0.01 | 0.046568 | 0.017937 | 0 | 0 | 0 | 0.006993 | 0 | 1 | 0.02 | false | 0 | 0.16 | 0 | 0.2 | 0.01 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
997f4b97d545477757e8bda91a697ce9e6990088 | 11,259 | py | Python | kcclient/slidingmetrics.py | sanjeevm0/kcluster-client | 5dda3f2a4ebc5811ec176aab70f48d9be5f6a731 | [
"MIT"
] | null | null | null | kcclient/slidingmetrics.py | sanjeevm0/kcluster-client | 5dda3f2a4ebc5811ec176aab70f48d9be5f6a731 | [
"MIT"
] | null | null | null | kcclient/slidingmetrics.py | sanjeevm0/kcluster-client | 5dda3f2a4ebc5811ec176aab70f48d9be5f6a731 | [
"MIT"
] | 1 | 2020-09-22T23:40:37.000Z | 2020-09-22T23:40:37.000Z | import math
import sys
import os
import copy
thisPath = os.path.dirname(os.path.realpath(__file__))
sys.path.append(thisPath)
from enum import Enum
from mlock import MLock
import utils
# Input.Cumulative means cumulative value is being input (e.g. total bytes)
# Input.Average means time average is being input (e.g. bytes/sec)
# Input Value means value is being given (e.g. bytes)
Input = Enum('Input', 'Value Counter NBitCounter')
def noneMax(x, y):
if x is None:
return y
elif y is None:
return x
else:
return max(x, y)
def noneMin(x, y):
if x is None:
return y
elif y is None:
return x
else:
return min(x, y)
class SlidingMetrics():
# if input is avg, it is something like bytes/sec, etc., otherwise unit is bytes or seconds of latency, etc.
def __init__(self, minWindow, maxWindow, inputType, bits=32):
self.minWindow = minWindow
self.maxWindow = maxWindow
self.subWindow = maxWindow - minWindow
self.maxWindows = math.ceil(minWindow / self.subWindow) + 1
self.inputType = inputType
self.winIndex = 0
self.lock = MLock()
# window statistics
self.NWin = []
self.ts1Win = []
self.tsNWin = []
self.minValWin = []
self.maxValWin = []
self.cumu1Win = []
self.cumuNWin = []
self.N = 0 # num samples (1 to N)
self.ts0 = None # ts below lowest value (needed for left )
self.ts1 = None
self.tsN = None
self.minVal = None
self.maxVal = None
self.cumu0 = 0
self.cumu1 = None
self.cumuNMinus1 = None
self.cumuN = 0
self.startTs = None
self.prevTs = None
self.prevData = 0 # raw data
self.bits = bits
def __dump__(self):
o = copy.deepcopy(self.__dict__)
o.pop("lock", None)
o["inputType"] = str(o["inputType"])
return o
@staticmethod
def __load__(o):
x = SlidingMetrics(10, 20, Input.Value, bits=32)
o = utils.smartLoad(o, True)
for key, val in o.items():
setattr(x, key, val)
x.inputType = eval(x.inputType) # convert back
return x
def __serialize__(self, seenVals):
o = {}
for k, v in sorted(self.__dict__.items()):
if k not in ["lock", "inputType"]:
o[k] = utils.serialize(v, seenVals)
o["inputType"] = str(self.inputType)
return o
def __deserialize__(self, o, toDict, seenVals):
for k, v in sorted(o.items()):
if k not in ["lock", "inputType"]:
setattr(self, k, utils.deserialize(v, toDict, seenVals))
self.inputType = eval(o["inputType"])
def _resetCumu(self):
amtToSub = self.cumu0
if amtToSub==0:
return
for i in range(len(self.cumu1Win)):
self.cumu1Win[i] -= amtToSub
self.cumuNWin[i] -= amtToSub
self.cumu0 -= amtToSub
self.cumu1 -= amtToSub
self.cumuNMinus1 -= amtToSub
self.cumuN -= amtToSub
# returns cumulative and value
def _setCumuVal(self, data):
if self.inputType==Input.NBitCounter:
if data < self.prevData: # counter overflow
data += (1 << self.bits)
val = data - self.prevData
elif self.inputType==Input.Counter:
val = data - self.prevData
elif self.inputType==Input.Value:
val = data
self.prevData = data
cumu = self.cumuN + val
return val, cumu
def popWindow(self):
N0 = self.NWin.pop(0)
self.N -= N0
self.ts1Win.pop(0)
self.ts1 = self.ts1Win[0]
self.ts0 = self.tsNWin.pop(0)
self.minValWin.pop(0)
self.minVal = min(self.minValWin)
self.maxValWin.pop(0)
self.maxVal = max(self.maxValWin)
self.cumu1Win.pop(0)
self.cumu1 = self.cumu1Win[0]
self.cumu0 = self.cumuNWin.pop(0)
def _addHelper(self, ts, data):
if self.startTs is None:
self.startTs = ts
ts = ts - self.startTs # normalize to start at zero
if self.prevTs is not None and ts < self.prevTs:
return False
self.prevTs = ts
val, cumu = self._setCumuVal(data)
if ts >= self.winIndex*self.subWindow:
# new window
self.winIndex += 1
self.NWin.append(1)
self.ts1Win.append(ts)
self.tsNWin.append(ts)
self.minValWin.append(val)
self.maxValWin.append(val)
self.cumu1Win.append(cumu)
self.cumuNWin.append(cumu)
else:
# add to current window (last window)
self.NWin[-1] += 1
self.tsNWin[-1] = ts
self.minValWin[-1] = min(self.minValWin[-1], val)
self.maxValWin[-1] = max(self.maxValWin[-1], val)
self.cumuNWin[-1] = cumu
# pop oldest window as new window is added
bWindowMoved = False
if len(self.ts1Win) > self.maxWindows:
self.popWindow()
bWindowMoved = True
if self.ts1 is None:
self.ts1 = ts
while ts-self.ts1 >= self.maxWindow:
self.popWindow()
bWindowMoved = True
self.N += 1
self.tsN = ts
self.minVal = noneMin(self.minVal, val)
self.maxVal = noneMax(self.maxVal, val)
if self.cumu1 is None:
self.cumu1 = cumu
self.cumuNMinus1 = self.cumuN
self.cumuN = cumu
#print("Cumu: {0} {1} {2} {3}".format(self.cumu0, self.cumu1, self.cumuNMinus1, self.cumuN))
if bWindowMoved:
self._resetCumu()
return True
def add(self, ts, data):
with self.lock:
return self._addHelper(ts, data)
def lockTryNan(self, fn):
with self.lock:
try:
return fn()
except ZeroDivisionError:
return 0.0
except Exception:
return float('nan')
# given data points have timestamp which is left end of interval
def avgL(self):
with self.lock:
if (self.N-1)==0 or (self.tsN == self.ts1):
return 0.0
else:
# N-1 points, N-1 intervals
return (self.cumuNMinus1 - self.cumu0) / (self.tsN - self.ts1)
def avgR(self):
with self.lock:
if self.ts0 is None:
if (self.N-1)<=0 or (self.tsN == self.ts1):
return 0.0
else:
# N-1 points, N-1 intervals
return (self.cumuN - self.cumu1) / (self.tsN - self.ts1)
else:
if self.N==0 or (self.tsN == self.ts0):
return 0.0
else:
# N points, N intervals
return (self.cumuN - self.cumu0) / (self.tsN - self.ts0)
def avgN(self):
with self.lock:
if self.N==0:
return 0.0
else:
return (self.cumuN - self.cumu0) / self.N
# number of measurements
def avgNumL(self):
with self.lock:
if (self.N-1)==0 or (self.tsN == self.ts1):
return 0.0
else:
return (N-1) / (self.tsN - self.ts1)
def avgNumR(self):
with self.lock:
if self.ts0 is None:
if (self.N-1)<=0 or (self.tsN == self.ts1):
return 0.0
else:
return (N-1) / (self.tsN - self.ts1)
else:
if self.N==0 or (self.tsN == self.ts0):
return 0.0
else:
return N / (self.tsN - self.ts0)
def windowL(self):
return self.lockTryNan(lambda : self.tsN - self.ts1)
def windowR(self):
with self.lock:
if self.ts0 is None:
return self.tsN - self.ts1
else:
return self.tsN - self.ts0
utils.registerEval('SlidingMetrics', SlidingMetrics)
utils.registerCreate("SlidingMetrics", lambda : SlidingMetrics(10, 20, Input.Value, bits=32))
# ============================
# Testing
from numpy import random
if __name__ == "__main__":
window = []
N = 100000
s = SlidingMetrics(9.0, 10.0, Input.Value)
subWindow = 1.0
numWindows = 10
ts = 0
r = random.RandomState(4532312)
lastPopped = None
valMin = 0
valMax = 20
tsDelta = 0.2
tsDeltaRand = 0.03
for i in range(N):
val = r.uniform(valMin, valMax)
s.add(ts, val)
window.append((ts, val))
# remove from window
curWindow = math.floor(ts/subWindow)
firstWindow = max(0, curWindow - numWindows + 1)
firstTs = firstWindow * subWindow
while len(window) > 0:
(t, v) = window[0]
if t < firstTs:
lastPopped = window.pop(0)
else:
break
#print("{0} {1}".format(len(window), window))
# compare
if lastPopped is not None:
t0 = lastPopped[0]
rStart = 0
else:
t0 = None
rStart = 1
sumL = 0
for j in range(0, len(window)-1):
sumL += window[j][1]
if len(window)<=1:
avgWinL = 0.0
else:
avgWinL = sumL / (window[-1][0] - window[0][0])
sumR = 0
for j in range(rStart, len(window)):
sumR += window[j][1]
if len(window)<=1:
avgWinR = 0.0
elif rStart==0:
avgWinR = sumR / (window[-1][0] - t0)
else:
avgWinR = sumR / (window[-1][0] - window[0][0])
avgWinN = (sumL + window[-1][1]) / len(window)
maxWin = max(window, key=lambda x: x[1])[1]
minWin = min(window, key=lambda x: x[1])[1]
if False:
print("T: {0} W: {1} V: {2}".format(ts, curWindow, val))
print("L: {0} {1}".format(avgWinL, s.avgL()))
print("R: {0} {1}".format(avgWinR, s.avgR()))
print("N: {0} {1}".format(avgWinN, s.avgN()))
error = abs(avgWinL-s.avgL()) + abs(avgWinR-s.avgR()) + abs(avgWinN-s.avgN())
errorMinMax = abs(maxWin-s.maxVal) + abs(minWin-s.minVal)
print("ERROR: {0:20.15f}\t ERRORMINMAX: {1:20.15f}".format(error, errorMinMax), end='\r')
if (abs(avgWinL-s.avgL()) > abs(avgWinL)*0.0000001 or
abs(avgWinR-s.avgR()) > abs(avgWinR)*0.0000001 or
abs(avgWinN-s.avgN()) > abs(avgWinN)*0.0000001 or
maxWin != s.maxVal or
minWin != s.minVal):
print("ERROR====")
print("T: {0} W: {1} V: {2}".format(ts, curWindow, val))
print("L: {0} {1}".format(avgWinL, s.avgL()))
print("R: {0} {1}".format(avgWinR, s.avgR()))
print("N: {0} {1}".format(avgWinN, s.avgN()))
print("M: {0} {1}".format(maxWin, s.maxVal))
print("m: {0} {1}".format(minWin, s.minVal))
ts += r.uniform(tsDelta - tsDeltaRand, tsDelta + tsDeltaRand)
| 31.362117 | 112 | 0.518963 | 1,393 | 11,259 | 4.161522 | 0.168701 | 0.020528 | 0.028463 | 0.02415 | 0.267897 | 0.203036 | 0.186648 | 0.148525 | 0.13438 | 0.129032 | 0 | 0.036782 | 0.355271 | 11,259 | 358 | 113 | 31.449721 | 0.761813 | 0.081624 | 0 | 0.248299 | 0 | 0 | 0.029968 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.068027 | false | 0 | 0.027211 | 0.003401 | 0.214286 | 0.040816 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
997fb873a286d232b8c4f66af54539b644cf21c9 | 9,025 | py | Python | oscar/lib/python2.7/site-packages/whoosh/analysis/ngrams.py | sainjusajan/django-oscar | 466e8edc807be689b0a28c9e525c8323cc48b8e1 | [
"BSD-3-Clause"
] | null | null | null | oscar/lib/python2.7/site-packages/whoosh/analysis/ngrams.py | sainjusajan/django-oscar | 466e8edc807be689b0a28c9e525c8323cc48b8e1 | [
"BSD-3-Clause"
] | null | null | null | oscar/lib/python2.7/site-packages/whoosh/analysis/ngrams.py | sainjusajan/django-oscar | 466e8edc807be689b0a28c9e525c8323cc48b8e1 | [
"BSD-3-Clause"
] | null | null | null | # Copyright 2007 Matt Chaput. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of Matt Chaput.
from whoosh.compat import text_type
from whoosh.compat import xrange
from whoosh.analysis.acore import Token
from whoosh.analysis.filters import Filter, LowercaseFilter
from whoosh.analysis.tokenizers import Tokenizer, RegexTokenizer
# Tokenizer
class NgramTokenizer(Tokenizer):
"""Splits input text into N-grams instead of words.
>>> ngt = NgramTokenizer(4)
>>> [token.text for token in ngt("hi there")]
["hi t", "i th", " the", "ther", "here"]
Note that this tokenizer does NOT use a regular expression to extract
words, so the grams emitted by it will contain whitespace, punctuation,
etc. You may want to massage the input or add a custom filter to this
tokenizer's output.
Alternatively, if you only want sub-word grams without whitespace, you
could combine a RegexTokenizer with NgramFilter instead.
"""
__inittypes__ = dict(minsize=int, maxsize=int)
def __init__(self, minsize, maxsize=None):
"""
:param minsize: The minimum size of the N-grams.
:param maxsize: The maximum size of the N-grams. If you omit
this parameter, maxsize == minsize.
"""
self.min = minsize
self.max = maxsize or minsize
def __eq__(self, other):
if self.__class__ is other.__class__:
if self.min == other.min and self.max == other.max:
return True
return False
def __call__(self, value, positions=False, chars=False, keeporiginal=False,
removestops=True, start_pos=0, start_char=0, mode='',
**kwargs):
assert isinstance(value, text_type), "%r is not unicode" % value
inlen = len(value)
t = Token(positions, chars, removestops=removestops, mode=mode)
pos = start_pos
if mode == "query":
size = min(self.max, inlen)
for start in xrange(0, inlen - size + 1):
end = start + size
if end > inlen:
continue
t.text = value[start:end]
if keeporiginal:
t.original = t.text
t.stopped = False
if positions:
t.pos = pos
if chars:
t.startchar = start_char + start
t.endchar = start_char + end
yield t
pos += 1
else:
for start in xrange(0, inlen - self.min + 1):
for size in xrange(self.min, self.max + 1):
end = start + size
if end > inlen:
continue
t.text = value[start:end]
if keeporiginal:
t.original = t.text
t.stopped = False
if positions:
t.pos = pos
if chars:
t.startchar = start_char + start
t.endchar = start_char + end
yield t
pos += 1
# Filter
class NgramFilter(Filter):
"""Splits token text into N-grams.
>>> rext = RegexTokenizer()
>>> stream = rext("hello there")
>>> ngf = NgramFilter(4)
>>> [token.text for token in ngf(stream)]
["hell", "ello", "ther", "here"]
"""
__inittypes__ = dict(minsize=int, maxsize=int)
def __init__(self, minsize, maxsize=None, at=None):
"""
:param minsize: The minimum size of the N-grams.
:param maxsize: The maximum size of the N-grams. If you omit this
parameter, maxsize == minsize.
:param at: If 'start', only take N-grams from the start of each word.
if 'end', only take N-grams from the end of each word. Otherwise,
take all N-grams from the word (the default).
"""
self.min = minsize
self.max = maxsize or minsize
self.at = 0
if at == "start":
self.at = -1
elif at == "end":
self.at = 1
def __eq__(self, other):
return other and self.__class__ is other.__class__\
and self.min == other.min and self.max == other.max
def __call__(self, tokens):
assert hasattr(tokens, "__iter__")
at = self.at
for t in tokens:
text = t.text
if len(text) < self.min:
continue
chars = t.chars
if chars:
startchar = t.startchar
# Token positions don't mean much for N-grams,
# so we'll leave the token's original position
# untouched.
if t.mode == "query":
size = min(self.max, len(t.text))
if at == -1:
t.text = text[:size]
if chars:
t.endchar = startchar + size
yield t
elif at == 1:
t.text = text[0 - size:]
if chars:
t.startchar = t.endchar - size
yield t
else:
for start in xrange(0, len(text) - size + 1):
t.text = text[start:start + size]
if chars:
t.startchar = startchar + start
t.endchar = startchar + start + size
yield t
else:
if at == -1:
limit = min(self.max, len(text))
for size in xrange(self.min, limit + 1):
t.text = text[:size]
if chars:
t.endchar = startchar + size
yield t
elif at == 1:
if chars:
original_startchar = t.startchar
start = max(0, len(text) - self.max)
for i in xrange(start, len(text) - self.min + 1):
t.text = text[i:]
if chars:
t.startchar = original_startchar + i
yield t
else:
for start in xrange(0, len(text) - self.min + 1):
for size in xrange(self.min, self.max + 1):
end = start + size
if end > len(text):
continue
t.text = text[start:end]
if chars:
t.startchar = startchar + start
t.endchar = startchar + end
yield t
# Analyzers
def NgramAnalyzer(minsize, maxsize=None):
"""Composes an NgramTokenizer and a LowercaseFilter.
>>> ana = NgramAnalyzer(4)
>>> [token.text for token in ana("hi there")]
["hi t", "i th", " the", "ther", "here"]
"""
return NgramTokenizer(minsize, maxsize=maxsize) | LowercaseFilter()
def NgramWordAnalyzer(minsize, maxsize=None, tokenizer=None, at=None):
if not tokenizer:
tokenizer = RegexTokenizer()
return tokenizer | LowercaseFilter() | NgramFilter(minsize, maxsize, at=at)
| 37.920168 | 80 | 0.525651 | 1,018 | 9,025 | 4.598232 | 0.252456 | 0.012818 | 0.013672 | 0.02179 | 0.370647 | 0.340526 | 0.293954 | 0.293954 | 0.278146 | 0.233283 | 0 | 0.006229 | 0.395235 | 9,025 | 237 | 81 | 38.080169 | 0.851411 | 0.330305 | 0 | 0.522727 | 0 | 0 | 0.007713 | 0 | 0 | 0 | 0 | 0 | 0.015152 | 1 | 0.060606 | false | 0 | 0.037879 | 0.007576 | 0.166667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9982e59aaaa75c68bd3e08786c5defa1efb2e162 | 244 | py | Python | demo/config.py | SDchao/nonebot | 145d1787143584895375231210e30fdd3003d5bf | [
"MIT"
] | 1 | 2021-01-19T03:57:23.000Z | 2021-01-19T03:57:23.000Z | demo/config.py | coffiasd/nonebot | c02b9a4ccf61126aa81e3f86b06b44685461af09 | [
"MIT"
] | null | null | null | demo/config.py | coffiasd/nonebot | c02b9a4ccf61126aa81e3f86b06b44685461af09 | [
"MIT"
] | null | null | null | import re
from nonebot.default_config import *
HOST = '0.0.0.0'
SECRET = 'abc'
SUPERUSERS = {1002647525}
NICKNAME = {'奶茶', '小奶茶'}
COMMAND_START = {'', '/', '!', '/', '!', re.compile(r'^>+\s*')}
COMMAND_SEP = {'/', '.', re.compile(r'#|::?')}
| 20.333333 | 63 | 0.54918 | 30 | 244 | 4.366667 | 0.7 | 0.045802 | 0.045802 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.066986 | 0.143443 | 244 | 11 | 64 | 22.181818 | 0.559809 | 0 | 0 | 0 | 0 | 0 | 0.131148 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.25 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
99843f08508767bdb980b0e376ab8912b933a55a | 1,235 | py | Python | sa_analysis.py | CarryChang/-Customer_satisfaction_Analysis | 1d0edc9035302f826909fd462eab92e2a15dcfd9 | [
"Apache-2.0"
] | 341 | 2018-12-21T08:00:52.000Z | 2022-03-31T00:31:31.000Z | sa_analysis.py | CarryChang/-Customer_satisfaction_Analysis | 1d0edc9035302f826909fd462eab92e2a15dcfd9 | [
"Apache-2.0"
] | 5 | 2019-03-20T05:36:54.000Z | 2020-08-27T03:00:47.000Z | sa_analysis.py | CarryChang/-Customer_satisfaction_Analysis | 1d0edc9035302f826909fd462eab92e2a15dcfd9 | [
"Apache-2.0"
] | 111 | 2019-01-22T13:50:42.000Z | 2022-03-12T12:34:53.000Z | # -*- coding: utf-8 -*-
from litNlp.predict import SA_Model_Predict
import matplotlib.pyplot as plt
from setting import *
import numpy as np
import os
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
def topic_sa_analysis():
sa_model = SA_Model_Predict(tokenize_path, sa_model_path_m, max_len=100)
if not os.path.exists(topic_emotion_pic):
os.mkdir(topic_emotion_pic)
print(topic_emotion_pic+'文件夹已经建立,请查看当前文件路径')
for key_word in topic_words_list.keys():
sa_analysis_(key_word, sa_model)
def sa_analysis_(key_word, sa_model):
print('{} 正在执行...'.format(key_word))
key_txt = open('{}/{}.txt'.format(topic_path, key_word), 'r', encoding='utf-8').readlines()
sentiments_score_predict = sa_model.predict(key_txt)
# 情感极性输出
sentiments_score_list = [i[1] for i in sentiments_score_predict]
plt.hist(sentiments_score_list, bins=np.arange(0, 1, 0.01))
plt.xlabel("情感值")
plt.ylabel("评论数目")
plt.title(key_word+'-情感极性分布图')
plt.savefig('{}/{}.png'.format(topic_emotion_pic, key_word))
plt.show()
plt.close()
print('{} 情感极性图完成'.format(key_word))
# if __name__ == '__main__':
# # 添加多线程提升预测速度
# topic_sa_analysis()
| 34.305556 | 95 | 0.697166 | 181 | 1,235 | 4.430939 | 0.453039 | 0.069825 | 0.074813 | 0.042394 | 0.05985 | 0.05985 | 0 | 0 | 0 | 0 | 0 | 0.010547 | 0.155466 | 1,235 | 35 | 96 | 35.285714 | 0.758389 | 0.077733 | 0 | 0 | 0 | 0 | 0.10159 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.074074 | false | 0 | 0.185185 | 0 | 0.259259 | 0.111111 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9985ca862cfcc11f8348e0629d58913ccb5353c8 | 22,570 | py | Python | codalab_competition_bundle/AutoDL_starting_kit/AutoDL_simple_baseline_models/3dcnn_pytorch/model.py | NehzUx/autodl | c80fdc4b297ed1ec2b9e6911d313f1fe31d83cb9 | [
"Apache-2.0"
] | 25 | 2018-09-26T14:07:11.000Z | 2021-12-02T15:19:08.000Z | codalab_competition_bundle/AutoDL_starting_kit/AutoDL_simple_baseline_models/3dcnn_pytorch/model.py | NehzUx/autodl | c80fdc4b297ed1ec2b9e6911d313f1fe31d83cb9 | [
"Apache-2.0"
] | 8 | 2018-11-23T15:35:28.000Z | 2020-02-27T14:55:11.000Z | codalab_competition_bundle/AutoDL_starting_kit/AutoDL_simple_baseline_models/3dcnn_pytorch/model.py | NehzUx/autodl | c80fdc4b297ed1ec2b9e6911d313f1fe31d83cb9 | [
"Apache-2.0"
] | 5 | 2019-03-05T11:05:59.000Z | 2020-01-08T13:05:35.000Z | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Modified by: Shangeth Rajaa, Zhengying Liu, Isabelle Guyon
"""An example of code submission for the AutoDL challenge.
It implements 3 compulsory methods ('__init__', 'train' and 'test') and
an attribute 'done_training' for indicating if the model will not proceed more
training due to convergence or limited time budget.
To create a valid submission, zip model.py together with other necessary files
such as Python modules/packages, pre-trained weights, etc. The final zip file
should not exceed 300MB.
"""
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.autograd import Variable
import datetime
import logging
import numpy as np
import os
import sys
import time
import torch.utils.data as data_utils
import torch
import torch.nn as nn
import torchvision
import tensorflow as tf
# seeding randomness for reproducibility
np.random.seed(42)
torch.manual_seed(1)
# PyTorch Model class
class TorchModel(nn.Module):
def __init__(self, input_shape, output_dim):
''' 3D CNN Model with no of CNN layers depending on the input size'''
super(TorchModel, self).__init__()
self.conv = torch.nn.Sequential()
cnn_ch = 16
if input_shape[1] == 1: # if num_channels = 1
self.conv.add_module('cnn1', nn.Conv3d(input_shape[0], cnn_ch, (1,3,3)))
else:
self.conv.add_module('cnn1', nn.Conv3d(input_shape[0], cnn_ch, 3))
self.conv.add_module('pool1', nn.MaxPool3d(2,2))
i = 2
while True:
self.conv.add_module('cnn{}'.format(i),
nn.Conv3d(cnn_ch * (i-1), cnn_ch * i, (1,3,3)))
self.conv.add_module('pool{}'.format(i), nn.MaxPool3d(2,2))
i += 1
n_size, out_len = self.get_fc_size(input_shape)
# no more CNN layers if Linear layers get input size < 1000
if n_size < 1000 or out_len[3] < 3 or out_len[3] < 3:
break
fc_size, _ = self.get_fc_size(input_shape)
self.fc = nn.Linear(fc_size, output_dim)
def forward_cnn(self, x):
x = self.conv(x)
return x
def get_fc_size(self, input_shape):
''' function to get the size for Linear layers
with given number of CNN layers
'''
sample_input = Variable(torch.rand(1, *input_shape))
output_feat = self.forward_cnn(sample_input)
out_shape = output_feat.shape
n_size = output_feat.data.view(1, -1).size(1)
return n_size, out_shape
def forward(self, x):
x = self.forward_cnn(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
# PyTorch Dataset to get data from tensorflow Dataset.
class TFDataset(torch.utils.data.Dataset):
def __init__(self, dataset, session, num_samples):
super(TFDataset, self).__init__()
self.dataset = dataset
self.session = session
self.num_samples = num_samples
self.next_element = None
self.reset()
def reset(self):
dataset = self.dataset
iterator = dataset.make_one_shot_iterator()
self.next_element = iterator.get_next()
return self
def __len__(self):
return self.num_samples
def __getitem__(self, index):
session = self.session if self.session is not None else tf.Session()
try:
example, label = session.run(self.next_element)
except tf.errors.OutOfRangeError:
self.reset()
example, label = session.run(self.next_element)
return example.transpose(3,0,1,2), label
class Model():
def __init__(self, metadata):
"""
Args:
metadata: an AutoDLMetadata object. Its definition can be found in
AutoDL_ingestion_program/dataset.py
"""
# Attribute necessary for ingestion program to stop evaluation process
self.done_training = False
self.metadata_ = metadata
# Getting details of the data from meta data
self.output_dim = self.metadata_.get_output_size()
self.num_examples_train = self.metadata_.size()
row_count, col_count = self.metadata_.get_matrix_size(0)
channel = self.metadata_.get_num_channels(0)
sequence_size = self.metadata_.get_sequence_size()
self.num_train = self.metadata_.size()
test_metadata_filename = self.metadata_.get_dataset_name()\
.replace('train', 'test') + '/metadata.textproto'
self.num_test = [int(line.split(':')[1]) for line
in open(test_metadata_filename, 'r').readlines()
if 'sample_count' in line][0]
# Getting the device available
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print('Device Found = ', self.device,
'\nMoving Model and Data into the device...')
# Attributes for preprocessing
self.default_image_size = (112,112)
self.default_num_frames = 15
self.default_shuffle_buffer = 100
if row_count == -1 or col_count == -1 :
row_count = self.default_image_size[0]
col_count = self.default_image_size[1]
if sequence_size == -1: sequence_size = self.default_num_frames
self.input_shape = (channel, sequence_size, row_count, col_count)
print('\n\nINPUT SHAPE = ', self.input_shape)
# getting an object for the PyTorch Model class for Model Class
# use CUDA if available
self.pytorchmodel = TorchModel(self.input_shape, self.output_dim)
print('\nPyModel Defined\n')
print(self.pytorchmodel)
self.pytorchmodel.to(self.device)
# PyTorch Optimizer and Criterion
self.criterion = nn.BCEWithLogitsLoss()
self.optimizer = torch.optim.Adam(self.pytorchmodel.parameters(), lr=1e-2)
# Attributes for managing time budget
# Cumulated number of training steps
self.birthday = time.time()
self.total_train_time = 0
self.cumulated_num_steps = 0
self.estimated_time_per_step = None
self.total_test_time = 0
self.cumulated_num_tests = 0
self.estimated_time_test = None
self.trained = False
# PYTORCH
# Critical number for early stopping
self.num_epochs_we_want_to_train = 100
# no of examples at each step/batch
self.train_batch_size = 30
self.test_batch_size = 30
# Tensorflow sessions to get the data from TFDataset
self.train_session = tf.Session()
self.test_session = tf.Session()
def train(self, dataset, remaining_time_budget=None):
"""Train this algorithm on the tensorflow |dataset|.
This method will be called REPEATEDLY during the whole training/predicting
process. So your `train` method should be able to handle repeated calls and
hopefully improve your model performance after each call.
****************************************************************************
****************************************************************************
IMPORTANT: the loop of calling `train` and `test` will only run if
self.done_training = False
(the corresponding code can be found in ingestion.py, search
'M.done_training')
Otherwise, the loop will go on until the time budget is used up. Please
pay attention to set self.done_training = True when you think the model is
converged or when there is not enough time for next round of training.
****************************************************************************
****************************************************************************
Args:
dataset: a `tf.data.Dataset` object. Each of its examples is of the form
(example, labels)
where `example` is a dense 4-D Tensor of shape
(sequence_size, row_count, col_count, num_channels)
and `labels` is a 1-D Tensor of shape
(output_dim,).
Here `output_dim` represents number of classes of this
multilabel classification task.
IMPORTANT: some of the dimensions of `example` might be `None`,
which means the shape on this dimension might be variable. In this
case, some preprocessing technique should be applied in order to
feed the training of a neural network. For example, if an image
dataset has `example` of shape
(1, None, None, 3)
then the images in this datasets may have different sizes. On could
apply resizing, cropping or padding in order to have a fixed size
input tensor.
remaining_time_budget: time remaining to execute train(). The method
should keep track of its execution time to avoid exceeding its time
budget. If remaining_time_budget is None, no time budget is imposed.
"""
steps_to_train = self.get_steps_to_train(remaining_time_budget)
if steps_to_train <= 0:
logger.info("Not enough time remaining for training. " +
"Estimated time for training per step: {:.2f}, "\
.format(self.estimated_time_per_step) +
"but remaining time budget is: {:.2f}. "\
.format(remaining_time_budget) +
"Skipping...")
self.done_training = True
else:
msg_est = ""
if self.estimated_time_per_step:
msg_est = "estimated time for this: " +\
"{:.2f} sec.".format(steps_to_train * self.estimated_time_per_step)
logger.info("Begin training for another {} steps...{}".format(steps_to_train, msg_est))
# If PyTorch dataloader for training set doen't already exists, get the train dataloader
if not hasattr(self, 'trainloader'):
self.trainloader = self.get_dataloader(dataset, self.num_train, batch_size=self.train_batch_size)
train_start = time.time()
# Training loop
self.trainloop(self.criterion, self.optimizer, steps=steps_to_train)
train_end = time.time()
# Update for time budget managing
train_duration = train_end - train_start
self.total_train_time += train_duration
self.cumulated_num_steps += steps_to_train
self.estimated_time_per_step = self.total_train_time / self.cumulated_num_steps
logger.info("{} steps trained. {:.2f} sec used. ".format(steps_to_train, train_duration) +\
"Now total steps trained: {}. ".format(self.cumulated_num_steps) +\
"Total time used for training: {:.2f} sec. ".format(self.total_train_time) +\
"Current estimated time per step: {:.2e} sec.".format(self.estimated_time_per_step))
def test(self, dataset, remaining_time_budget=None):
"""Test this algorithm on the tensorflow |dataset|.
Args:
Same as that of `train` method, except that the `labels` will be empty.
Returns:
predictions: A `numpy.ndarray` matrix of shape (sample_count, output_dim).
here `sample_count` is the number of examples in this dataset as test
set and `output_dim` is the number of labels to be predicted. The
values should be binary or in the interval [0,1].
"""
if self.done_training:
return None
if self.choose_to_stop_early():
logger.info("Oops! Choose to stop early for next call!")
self.done_training = True
test_begin = time.time()
if remaining_time_budget and self.estimated_time_test and\
self.estimated_time_test > remaining_time_budget:
logger.info("Not enough time for test. " +\
"Estimated time for test: {:.2e}, ".format(self.estimated_time_test) +\
"But remaining time budget is: {:.2f}. ".format(remaining_time_budget) +\
"Stop train/predict process by returning None.")
return None
msg_est = ""
if self.estimated_time_test:
msg_est = "estimated time: {:.2e} sec.".format(self.estimated_time_test)
logger.info("Begin testing..." + msg_est)
# If PyTorch dataloader for training set doen't already exists, get the test dataloader
if not hasattr(self, 'testloader'):
self.testloader = self.get_dataloader_test(dataset, self.num_test,
self.test_batch_size)
# get predictions from the test loop
predictions = self.testloop(self.testloader)
test_end = time.time()
# Update some variables for time management
test_duration = test_end - test_begin
self.total_test_time += test_duration
self.cumulated_num_tests += 1
self.estimated_time_test = self.total_test_time / self.cumulated_num_tests
logger.info("[+] Successfully made one prediction. {:.2f} sec used. ".format(test_duration) +\
"Total time used for testing: {:.2f} sec. ".format(self.total_test_time) +\
"Current estimated time for test: {:.2e} sec.".format(self.estimated_time_test))
return predictions
##############################################################################
#### Above 3 methods (__init__, train, test) should always be implemented ####
##############################################################################
def preprocess_tensor_4d(self, tensor_4d):
"""Preprocess a 4-D tensor (only when some dimensions are `None`, i.e.
non-fixed). The output tensor wil have fixed, known shape.
Args:
tensor_4d: A Tensor of shape
[sequence_size, row_count, col_count, num_channels]
where some dimensions might be `None`.
Returns:
A 4-D Tensor with fixed, known shape.
"""
tensor_4d_shape = tensor_4d.shape
logger.info("Tensor shape before preprocessing: {}".format(tensor_4d_shape))
if tensor_4d_shape[0] > 0 and tensor_4d_shape[0] < 10:
num_frames = tensor_4d_shape[0]
else:
num_frames = self.default_num_frames
if tensor_4d_shape[1] > 0:
new_row_count = tensor_4d_shape[1]
else:
new_row_count=self.default_image_size[0]
if tensor_4d_shape[2] > 0:
new_col_count = tensor_4d_shape[2]
else:
new_col_count=self.default_image_size[1]
if not tensor_4d_shape[0] > 0:
logger.info("Detected that examples have variable sequence_size, will " +
"randomly crop a sequence with num_frames = " +
"{}".format(num_frames))
tensor_4d = crop_time_axis(tensor_4d, num_frames=num_frames)
if not tensor_4d_shape[1] > 0 or not tensor_4d_shape[2] > 0:
logger.info("Detected that examples have variable space size, will " +
"resize space axes to (new_row_count, new_col_count) = " +
"{}".format((new_row_count, new_col_count)))
tensor_4d = resize_space_axes(tensor_4d,
new_row_count=new_row_count,
new_col_count=new_col_count)
logger.info("Tensor shape after preprocessing: {}".format(tensor_4d.shape))
return tensor_4d
def get_dataloader(self, tf_dataset, num_images, batch_size):
''' Get the training PyTorch dataloader
Args:
tf_dataset: Tensorflow Dataset which is given in train function
num_images : number of examples in train data
batch_size : batch_size for training set
Return:
dataloader: PyTorch Training Dataloader
'''
tf_dataset = tf_dataset.map(lambda *x: (self.preprocess_tensor_4d(x[0]), x[1]))
train_dataset = TFDataset(tf_dataset, self.train_session, num_images)
dataloader = torch.utils.data.DataLoader(
train_dataset,
batch_size=self.train_batch_size,
shuffle=True,
drop_last=False
)
return dataloader
def get_dataloader_test(self, tf_dataset, num_images, batch_size):
''' Get the test PyTorch dataloader
Args:
tf_dataset: Tensorflow Dataset which is given in test function
num_images : number of examples in test data
batch_size : batch_size for test set
Return:
dataloader: PyTorch Test Dataloader
'''
tf_dataset = tf_dataset.map(lambda *x: (self.preprocess_tensor_4d(x[0]), x[1]))
dataset = TFDataset(tf_dataset, self.test_session, num_images)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size)
return dataloader
def trainloop(self, criterion, optimizer, steps):
''' Training loop with no of given steps
Args:
criterion: PyTorch Loss function
Optimizer: PyTorch optimizer for training
steps: No of steps to train the model
Return:
None, updates the model parameters
'''
self.pytorchmodel.train()
data_iterator = iter(self.trainloader)
for i in range(steps):
try:
images, labels = next(data_iterator)
except StopIteration:
data_iterator = iter(self.trainloader)
images, labels = next(data_iterator)
images = images.float().to(self.device)
labels = labels.float().to(self.device)
optimizer.zero_grad()
log_ps = self.pytorchmodel(images)
loss = criterion(log_ps, labels)
if hasattr(self, 'scheduler'):
self.scheduler.step(loss)
loss.backward()
optimizer.step()
def get_steps_to_train(self, remaining_time_budget):
"""Get number of steps for training according to `remaining_time_budget`.
The strategy is:
1. If no training is done before, train for 10 steps (ten batches);
2. Otherwise, estimate training time per step and time needed for test,
then compare to remaining time budget to compute a potential maximum
number of steps (max_steps) that can be trained within time budget;
3. Choose a number (steps_to_train) between 0 and max_steps and train for
this many steps. Double it each time.
"""
if not remaining_time_budget: # This is never true in the competition anyway
remaining_time_budget = 1200 # if no time limit is given, set to 20min
if not self.estimated_time_per_step:
steps_to_train = 10
else:
if self.estimated_time_test:
tentative_estimated_time_test = self.estimated_time_test
else:
tentative_estimated_time_test = 50 # conservative estimation for test
max_steps = int((remaining_time_budget - tentative_estimated_time_test) / self.estimated_time_per_step)
max_steps = max(max_steps, 1)
if self.cumulated_num_tests < np.log(max_steps) / np.log(2):
steps_to_train = int(2 ** self.cumulated_num_tests) # Double steps_to_train after each test
else:
steps_to_train = 0
return steps_to_train
def testloop(self, dataloader):
'''
Args:
dataloader: PyTorch test dataloader
Return:
preds: Predictions of the model as Numpy Array.
'''
preds = []
with torch.no_grad():
self.pytorchmodel.eval()
for images, _ in dataloader:
if torch.cuda.is_available():
images = images.float().cuda()
else:
images = images.float()
log_ps = self.pytorchmodel(images)
pred = torch.sigmoid(log_ps).data > 0.5
preds.append(pred.cpu().numpy())
preds = np.vstack(preds)
return preds
def choose_to_stop_early(self):
"""The criterion to stop further training (thus finish train/predict
process).
"""
# return self.cumulated_num_tests > 10 # Limit to make 10 predictions
# return np.random.rand() < self.early_stop_proba
batch_size = self.train_batch_size
num_examples = self.metadata_.size()
num_epochs = self.cumulated_num_steps * batch_size / num_examples
logger.info("Model already trained for {} epochs.".format(num_epochs))
return num_epochs > self.num_epochs_we_want_to_train # Train for at least certain number of epochs then stop
#### Other helper functions
def crop_time_axis(tensor_4d, num_frames, begin_index=None):
"""Given a 4-D tensor, take a slice of length `num_frames` on its time axis.
Args:
tensor_4d: A Tensor of shape
[sequence_size, row_count, col_count, num_channels]
num_frames: An integer representing the resulted chunk (sequence) length
begin_index: The index of the beginning of the chunk. If `None`, chosen
randomly.
Returns:
A Tensor of sequence length `num_frames`, which is a chunk of `tensor_4d`.
"""
# pad sequence if not long enough
pad_size = tf.maximum(num_frames - tf.shape(tensor_4d)[0], 0)
padded_tensor = tf.pad(tensor_4d, ((0, pad_size), (0, 0), (0, 0), (0, 0)))
# If not given, randomly choose the beginning index of frames
if not begin_index:
maxval = tf.shape(padded_tensor)[0] - num_frames + 1
begin_index = tf.random.uniform([1],
minval=0,
maxval=maxval,
dtype=tf.int32)
begin_index = tf.stack([begin_index[0], 0, 0, 0], name='begin_index')
sliced_tensor = tf.slice(padded_tensor,
begin=begin_index,
size=[num_frames, -1, -1, -1])
return sliced_tensor
def resize_space_axes(tensor_4d, new_row_count, new_col_count):
"""Given a 4-D tensor, resize space axes to have target size.
Args:
tensor_4d: A Tensor of shape
[sequence_size, row_count, col_count, num_channels].
new_row_count: An integer indicating the target row count.
new_col_count: An integer indicating the target column count.
Returns:
A Tensor of shape [sequence_size, target_row_count, target_col_count].
"""
resized_images = tf.image.resize_images(tensor_4d,
size=(new_row_count, new_col_count))
return resized_images
def get_logger(verbosity_level):
"""Set logging format to something like:
2019-04-25 12:52:51,924 INFO model.py: <message>
"""
logger = logging.getLogger(__file__)
logging_level = getattr(logging, verbosity_level)
logger.setLevel(logging_level)
formatter = logging.Formatter(
fmt='%(asctime)s %(levelname)s %(filename)s: %(message)s')
stdout_handler = logging.StreamHandler(sys.stdout)
stdout_handler.setLevel(logging_level)
stdout_handler.setFormatter(formatter)
stderr_handler = logging.StreamHandler(sys.stderr)
stderr_handler.setLevel(logging.WARNING)
stderr_handler.setFormatter(formatter)
logger.addHandler(stdout_handler)
logger.addHandler(stderr_handler)
logger.propagate = False
return logger
logger = get_logger('INFO')
| 39.946903 | 112 | 0.667036 | 3,082 | 22,570 | 4.687541 | 0.182349 | 0.01772 | 0.021181 | 0.014536 | 0.234443 | 0.165294 | 0.120786 | 0.088807 | 0.066657 | 0.056413 | 0 | 0.014067 | 0.225166 | 22,570 | 564 | 113 | 40.017731 | 0.812043 | 0.349269 | 0 | 0.111821 | 0 | 0 | 0.094032 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.067093 | false | 0 | 0.041534 | 0.003195 | 0.175719 | 0.01278 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
99879b528d7993063b417f1a859d11a6963e5268 | 1,726 | py | Python | 2.linked-list/single-linked-list/remove-nth-from-end/test.py | tienduy-nguyen/coderust | d0884d7b3ced0d01e24b210284b9370432964274 | [
"MIT"
] | null | null | null | 2.linked-list/single-linked-list/remove-nth-from-end/test.py | tienduy-nguyen/coderust | d0884d7b3ced0d01e24b210284b9370432964274 | [
"MIT"
] | null | null | null | 2.linked-list/single-linked-list/remove-nth-from-end/test.py | tienduy-nguyen/coderust | d0884d7b3ced0d01e24b210284b9370432964274 | [
"MIT"
] | null | null | null | class ListNode:
def __init__(self, val, next = None):
self.val = val
self.next = next
class LinkedList:
def __init__(self):
self.head = None
def removeNthFromEnd(self, head, n):
fast = slow = head
for _ in range(n):
if not fast:
self.printNode(head)
return head
fast = fast.next
# self.printNode(fast)
while fast.next:
fast = fast.next
slow = slow.next
slow.next = slow.next.next
return head
def remove_nth_from_end2(self, head, n):
def remove(head,n):
if head is None: return head, 0
node, count = remove(head.next, n)
if node is not None: print('node: ', node.val, ' - count: ', count)
count += 1
head.next = node
if count == n:
print('Somthing here: ', count, head.val, node.val)
head = head.next
return head, count
return remove(head, n)[0] #Get head
def count(self,head):
def count_size(head):
if head is None: return 0
count = count_size(head.next) + 1
return count
return count_size(head)
def shift(self, val):
new_node = ListNode(val)
new_node.next = self.head
self.head = new_node
def printNode(self,head):
result = []
if head is None:
print("List node null")
while(head):
result.append(head.val)
head=head.next
print(result)
if __name__ == '__main__':
lk = LinkedList()
lk.shift(5)
lk.shift(8)
lk.shift(6)
lk.shift(4)
lk.shift(3)
lk.shift(2)
lk.shift(1)
lk.shift(0)
lk.printNode(lk.head)
ans = lk.remove_nth_from_end2(lk.head, 2)
lk.printNode(ans)
# while ans is not None:
# print(ans.val)
# ans = ans.next | 22.710526 | 73 | 0.586906 | 252 | 1,726 | 3.904762 | 0.206349 | 0.056911 | 0.02439 | 0.036585 | 0.036585 | 0 | 0 | 0 | 0 | 0 | 0 | 0.013104 | 0.292584 | 1,726 | 76 | 74 | 22.710526 | 0.792793 | 0.049247 | 0 | 0.096774 | 0 | 0 | 0.032396 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.145161 | false | 0 | 0 | 0 | 0.274194 | 0.129032 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
998cbf13c8435563780f89976a96d9d655cabb2a | 14,049 | py | Python | examples/notebooks/generating_yaml.py | wtgee/huntsman-pocs | c47976b1e52c5676a8237f6ee889555ede26d0e0 | [
"MIT"
] | null | null | null | examples/notebooks/generating_yaml.py | wtgee/huntsman-pocs | c47976b1e52c5676a8237f6ee889555ede26d0e0 | [
"MIT"
] | null | null | null | examples/notebooks/generating_yaml.py | wtgee/huntsman-pocs | c47976b1e52c5676a8237f6ee889555ede26d0e0 | [
"MIT"
] | null | null | null | import yaml
import os
import datetime
import ipywidgets as widgets
from ipywidgets import interact, interactive, fixed, interact_manual
from IPython.display import display
import sys
class POCS_devices_database(object):
"""
This class manages serial numbers and other information of multiple devices being used with POCS.
It can be used to display ipython widgets to select the device information, and then create a .yaml
config file that can be read and implemented by POCS.
"""
def __init__(self,
device_info_master_directory='/var/huntsman-pocs/conf_files/',
device_info_master_file='device_info_master.yaml',
local_directory='/var/huntsman-pocs/conf_files/',
archive_directory='/var/huntsman-pocs/conf_files/archive/',
output_yaml_filename='huntsman.yaml'):
"""
Sets up the location to save all files, loads information off previous files, and gets the current
datetime info for the archive filename.
Args:
device_info_master_directory : the file path of where the .yaml file that all the device info is in
local_directory : the dir where the config file needs to be saved to be used by POCS
archive_directory : the dir where the archive/version control of the config files are kept
output_yaml_filename : the chosen filename of the local config file used by POCS
"""
self.local_directory = local_directory
self.archive_directory = archive_directory
self.device_info_master_directory = device_info_master_directory
self.output_yaml_filename = output_yaml_filename
self.device_info_master_file = device_info_master_file
device_info_file = os.path.join(
self.device_info_master_directory, self.device_info_master_file)
try:
with open(device_info_file, 'r') as file:
self.data = yaml.load(file)
except FileNotFoundError:
sys.exit("Cannot find device information master file")
date_info = datetime.datetime.today()
datetime_str = date_info.strftime('%Y_%m_%d_%H_%M')
self.archive_filename = '{}_{}.{}'.format('huntsman', datetime_str, 'yaml')
previous_file = os.path.join(self.local_directory, self.output_yaml_filename)
# loading general data from the previous .yaml file used
try:
with open(previous_file, 'r') as file:
self.data_dict = yaml.load(file)
if self.data_dict is not None and 'cameras' in self.data_dict:
del self.data_dict['cameras']
except FileNotFoundError:
self.data_dict = {}
self.data_dict.update(
{'cameras': {'hdr_mode': True, 'auto_detect': False, 'devices': [None]}})
def add_device_widget(self, dummy_variable_for_widget):
"""Function to add the details selected using the drop-down menu widgets to the 'data_dict'
dictionary.
The function is called by a widget in start_interface() and is then run when the user clicks
on the widget button.
Args:
dummy_variable_for_widget : the widget needs an extra arg for some reason
Output:
Appends the data_dict dict with the information chosen from the device information widgets.
"""
additional_device = {'model': self.camera_type_chosen,
'port': self.camera_sn_chosen,
'filter_type': self.filter_ID_chosen,
'focuser': {'model': 'birger',
'port': self.birger_sn_chosen
},
'lens': {'model': 'canon',
'port': self.lens_sn_chosen,
'name': self.lens_name_chosen,
'image_stabalisataion': self.lens_image_stabalisation_chosen},
'USB_hub_serial_number': self.USB_hub_SN_chosen,
'camera_into_serial_adaptor_port': self.camera_to_serial_port_chosen,
'serial_adaptor_into_USBhub_port': self.serial_to_USBhub_port_chosen,
'camera_into_USBhub_port': self.camera_to_USBhub_port_chosen
}
if self.data_dict['cameras']['devices'] == [None]:
self.data_dict['cameras']['devices'] = [additional_device]
else:
self.data_dict['cameras']['devices'].append(additional_device)
return self.data_dict
def save_file(self, dummy_variable_for_widget):
"""This function writes the 'data_dict' dictionary to a .yaml text file.
The function is called by a widget in start_interface() and is run when the user clicks on the
widget button.
Args:
dummy_variable_for_widget : the widget needs an extra arg for some reason
Output:
Writes the information in the dict into a .yaml file in two locations, as determined by the
assign_local_dir() and assign_archive_dir methods.
The default locations are:
'/var/huntsman-pocs/conf_files/huntsman.yaml'
for the local config file to be used by POCS
and
'/var/huntsman-pocs/conf_files/huntsman_archive/huntsman_YYYY_mm_dd_hh_MM.yaml'
for the archive of all version of the config files, with the date it was created in
the filename
"""
strOutFile1 = os.path.join(self.local_directory, self.output_yaml_filename)
objFile1 = open(strOutFile1, "w")
yaml.dump(self.data_dict, objFile1, default_flow_style=False, indent=4)
objFile1.close()
strOutFile = os.path.join(self.archive_directory, self.archive_filename)
objFile = open(strOutFile, "w")
yaml.dump(self.data_dict, objFile, default_flow_style=False, indent=4)
objFile.close()
def start_interface(self):
"""This function runs all the code to generate the .yaml config files for the Huntsman-POCS system.
It displays the Jupyter widgets which the user can interact with to write and save the config files.
Files are saved in two locations, one for the local file that POCS will access,
and the other is an archive of all previous config files which acts as a version control.
By default, these locations are: (but can be changed using the arguments in the __init__ method)
'/var/huntsman-pocs/conf_files/huntsman.yaml' for the local file.
'/var/huntsman-pocs/conf_files/huntsman_archive/huntsman_YYYY_mm_dd_hh_MM.yaml' for the archive file.
Steps for the user to follow:
Select from the dropdown menus the information for one device set.
Click 'Add new device set'.
Select from the dropdown menus the information for the next device set.
Click 'Add new device set'.
Repeat until all device sets have been added.
Click 'Save File' to write the .yaml file.
Displays:
Jupyter widgets of drop-down menus to select the device sets.
These widgets are used to generate and save the .yaml config files.
Output:
A .yaml config file for Huntsman
"""
print(self.start_interface.__doc__)
birger_sn = self.data['birger_SN']
self.birger_serial_number = interactive(
birger_sn_widget, birger_serial_number_displayed=birger_sn)
camera_sn = self.data['camera_SN']
self.camera_serial_number = interactive(
camera_sn_widget, camera_serial_number_displayed=camera_sn)
lens_sn = self.data['lens_SN']
self.lens_serial_number = interactive(lens_sn_widget, lens_serial_number_displayed=lens_sn)
filter_ID = self.data['filter_ID']
self.filter_ID_code = interactive(filter_ID_widget, filter_ID_code_displayed=filter_ID)
serial_into_USBhub = self.data['serial_into_USBhub_port']
self.serial_into_USBhub_port = interactive(
serial_to_usb_widget, serial_into_USBhub_port_displayed=serial_into_USBhub)
camera_into_serial = self.data['camera_into_serial_port']
self.camera_into_serial_port = interactive(
camera_to_serial_widget, camera_into_serial_port_displayed=camera_into_serial)
USBhub = self.data['USBhub_SN']
self.USBhub_SN = interactive(usbhub_sn_widget, USBhub_SN_displayed=USBhub)
camera_into_USBhub = self.data['camera_into_USBhub_port']
self.camera_into_USBhub_port = interactive(
camera_to_usb_widget, camera_into_USBhub_port_displayed=camera_into_USBhub)
display(self.birger_serial_number)
display(self.camera_serial_number)
display(self.lens_serial_number)
display(self.filter_ID_code)
display(self.serial_into_USBhub_port)
display(self.camera_into_serial_port)
display(self.USBhub_SN)
display(self.camera_into_USBhub_port)
self.birger_sn_chosen = self.birger_serial_number.result
self.camera_sn_chosen = self.camera_serial_number.result
self.lens_sn_chosen = self.lens_serial_number.result
self.filter_ID_chosen = self.filter_ID_code.result
self.serial_to_USBhub_port_chosen = self.serial_into_USBhub_port.result
self.camera_to_serial_port_chosen = self.camera_into_serial_port.result
self.USB_hub_SN_chosen = self.USBhub_SN.result
self.camera_to_USBhub_port_chosen = self.camera_into_USBhub_port.result
self.camera_type_chosen = self.data['camera_type'][self.camera_sn_chosen]
self.lens_name_chosen = self.data['lens_name'][self.lens_sn_chosen]
self.lens_image_stabalisation_chosen = self.data['lens_image_stabalisation'][self.lens_sn_chosen]
button1 = widgets.Button(description="Add new device set")
display(button1)
button1.on_click(self.add_device_widget)
button = widgets.Button(description="Save File")
display(button)
button.on_click(self.save_file)
def birger_sn_widget(birger_serial_number_displayed):
"""Function used to create Jupyter widget.
It takes the parameter chosen from the widget and returns it such that it can be used as a variable.
Args:
birger_serial_number (str) : the serial number of the birger device as selected from the widget.
Returns:
The result of the widget; the chosen focuser serial number
"""
return birger_serial_number_displayed
def camera_sn_widget(camera_serial_number_displayed):
"""Function used to create Jupyter widget.
It takes the parameter chosen from the widget and returns it such that it can be used as a variable.
Args:
camera_serial_number (str) : the serial number of the camera device as selected from the widget.
Returns:
The result of the widget; the chosen camera serial number
"""
return camera_serial_number_displayed
def lens_sn_widget(lens_serial_number_displayed):
"""Function used to create Jupyter widget.
It takes the parameter chosen from the widget and returns it such that it can be used as a variable.
Args:
lens_serial_number (str) : the serial number of the lens device as selected from the widget.
Returns:
The result of the widget; the chosen lens serial number
"""
return lens_serial_number_displayed
def filter_ID_widget(filter_ID_code_displayed):
"""Function used to create Jupyter widget.
It takes the parameter chosen from the widget and returns it such that it can be used as a variable.
Args:
filter_ID_code (str) : the ID number of the lens as selected from the widget.
Returns:
The result of the widget; the chosen filter ID number
"""
return filter_ID_code_displayed
def serial_to_usb_widget(serial_into_USBhub_port_displayed):
"""Function used to create Jupyter widget.
It takes the parameter chosen from the widget and returns it such that it can be used as a variable.
Args:
serial_into_USBhub_port (str) : the port number of the USB Hub that the Serial Adaptor is plugged
into as selected from the widget.
Returns:
The result of the widget; the chosen USB port number
"""
return serial_into_USBhub_port_displayed
def camera_to_serial_widget(camera_into_serial_port_displayed):
"""Function used to create Jupyter widget.
It takes the parameter chosen from the widget and returns it such that it can be used as a variable.
Args:
camera_into_serial_port (str) : the port number of the Serial Adaptor that the camera is plugged
into as selected from the widget.
Returns:
The result of the widget; the chosen serial port number
"""
return camera_into_serial_port_displayed
def usbhub_sn_widget(USBhub_SN_displayed):
"""Function used to create Jupyter widget.
It takes the parameter chosen from the widget and returns it such that it can be used as a variable.
Args:
USBhub_SN (str) : the serial number of the USB Hub as selected from the widget.
Returns:
The result of the widget; the chosen USB Hub serial number
"""
return USBhub_SN_displayed
def camera_to_usb_widget(camera_into_USBhub_port_displayed):
"""Function used to create Jupyter widget.
It takes the parameter chosen from the widget and returns it such that it can be used as a variable.
Args:
camera_into_USBhub_port (str) : the port number of the USB Hub that the camera is plugged into as
selected from the widget.
Returns:
The result of the widget; the chosen USB port number
"""
return camera_into_USBhub_port_displayed
| 42.702128 | 113 | 0.678838 | 1,895 | 14,049 | 4.788391 | 0.126121 | 0.039674 | 0.027772 | 0.01091 | 0.529755 | 0.429689 | 0.366321 | 0.314415 | 0.293366 | 0.26879 | 0 | 0.000962 | 0.260019 | 14,049 | 328 | 114 | 42.832317 | 0.871874 | 0.41583 | 0 | 0.030075 | 0 | 0 | 0.090193 | 0.042695 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090226 | false | 0 | 0.052632 | 0 | 0.218045 | 0.007519 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
999192f10fb8b2831dc1f5ac84ee5ab0849ed0de | 4,231 | py | Python | synapse/resources/directories-plugin/directories.py | comodit/synapse-agent | ee3c6c2ec07ba34e821529f3e097123326b8b9c5 | [
"MIT"
] | 5 | 2015-11-05T05:44:08.000Z | 2021-02-09T06:00:21.000Z | synapse/resources/directories-plugin/directories.py | comodit/synapse-agent | ee3c6c2ec07ba34e821529f3e097123326b8b9c5 | [
"MIT"
] | 2 | 2017-08-13T09:36:41.000Z | 2017-08-13T09:36:58.000Z | synapse/resources/directories-plugin/directories.py | comodit/synapse-agent | ee3c6c2ec07ba34e821529f3e097123326b8b9c5 | [
"MIT"
] | 3 | 2015-09-30T20:08:19.000Z | 2020-08-19T19:24:04.000Z | import getpass
from datetime import datetime
from synapse.resources.resources import ResourcesController
from synapse.logger import logger
from synapse.synapse_exceptions import ResourceException
@logger
class DirectoriesController(ResourcesController):
__resource__ = "directories"
def read(self, res_id=None, attributes={}):
status = {}
self.check_mandatory(res_id)
present = self.module.is_dir(res_id)
status['present'] = present
if present:
status['owner'] = self.module.owner(res_id)
status['group'] = self.module.group(res_id)
status['mode'] = self.module.mode(res_id)
status['mod_time'] = self.module.mod_time(res_id)
status['c_time'] = self.module.c_time(res_id)
return status
def create(self, res_id=None, attributes={}):
self.check_mandatory(res_id)
monitor = attributes.get('monitor')
owner = self._get_owner(res_id, attributes)
group = self._get_group(res_id, attributes)
mode = self._get_mode(res_id, attributes)
state = {
'owner': owner,
'group': group,
'mode': mode,
'mod_time': str(datetime.now()),
'c_time': str(datetime.now()),
'present': True
}
self.save_state(res_id, state, monitor=monitor)
self.module.create_folders(res_id)
# Update meta of given file
self.module.update_meta(res_id, owner, group, mode)
return self.read(res_id=res_id)
def update(self, res_id=None, attributes={}):
return self.create(res_id=res_id, attributes=attributes)
def delete(self, res_id=None, attributes={}):
self.check_mandatory(res_id)
monitor = attributes.get('monitor')
state = {'present': False}
self.save_state(res_id, state, monitor=monitor)
previous_state = self.read(res_id=res_id)
self.module.delete_folder(res_id)
if not self.module.exists(res_id):
previous_state['present'] = False
self.response = previous_state
return self.read(res_id)
def is_compliant(self, persisted_state, current_state):
compliant = True
# First, compare the present flag. If it differs, no need to go
# further, there's a compliance issue.
# Check the next path state
if persisted_state.get("present") != current_state.get("present"):
compliant = False
return compliant
# Secondly, compare path attributes
for attr in ("name", "owner", "group", "mode"):
if persisted_state.get(attr) != current_state.get(attr):
compliant = False
break
return compliant
def _get_owner(self, path, attributes):
# Default, get the current user. getpass is portable Unix/Windows
owner = getpass.getuser()
# If path exists, get path owner
if self.module.exists(path):
owner = self.module.owner(path)
# Overwrite if owner name is provided
if attributes.get('owner'):
owner = attributes['owner']
return owner
def _get_group(self, path, attributes):
# Default, get the current user's group.
# getpass is portable Unix/Windows
group = getpass.getuser()
# If path exists, get path group
if self.module.exists(path):
group = self.module.group(path)
# Overwrite if group name is provided
if attributes.get('group'):
group = attributes['group']
return group
def _get_mode(self, path, attributes):
# Default, get default mode according to current umask
mode = self.module.get_default_mode(path)
# If path exists, get current mode
if self.module.exists(path):
mode = self.module.mode(path)
# If mode is provided, return its octal value as string
if attributes.get('mode'):
try:
mode = oct(int(attributes['mode'], 8))
except ValueError as err:
raise ResourceException("Error with path mode (%s)" % err)
return mode
| 31.81203 | 74 | 0.610967 | 508 | 4,231 | 4.944882 | 0.214567 | 0.057723 | 0.021895 | 0.020701 | 0.281847 | 0.184713 | 0.147293 | 0.121019 | 0.058121 | 0.058121 | 0 | 0.000332 | 0.288584 | 4,231 | 132 | 75 | 32.05303 | 0.834219 | 0.140392 | 0 | 0.164706 | 0 | 0 | 0.053576 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.094118 | false | 0.035294 | 0.058824 | 0.011765 | 0.282353 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9992b282a524485f8001963cb892f9c2c4eb3263 | 3,532 | py | Python | biothings/web/handlers/_flask.py | newgene/biothings.api | e3278695ac15a55fe420aa49c464946f81ec019d | [
"Apache-2.0"
] | 30 | 2017-07-23T14:50:29.000Z | 2022-02-08T08:08:16.000Z | biothings/web/handlers/_flask.py | kevinxin90/biothings.api | 8ff3bbaecd72d04db4933ff944898ee7b7c0e04a | [
"Apache-2.0"
] | 163 | 2017-10-24T18:45:40.000Z | 2022-03-28T03:46:26.000Z | biothings/web/handlers/_flask.py | newgene/biothings.api | e3278695ac15a55fe420aa49c464946f81ec019d | [
"Apache-2.0"
] | 22 | 2017-06-12T18:30:15.000Z | 2022-03-01T18:10:47.000Z | from functools import wraps
from types import CoroutineType
import flask
from biothings.web import templates
from biothings.web.options import OptionError
from biothings.web.query.pipeline import (QueryPipelineException,
QueryPipelineInterrupt)
from tornado.template import Loader
routes = []
def route(pattern, methods=("GET", "POST")):
def A(f):
async def B(*args, **kwargs):
biothings = flask.current_app.biothings
optionsets = biothings.optionsets
optionset = optionsets.get(f.__name__)
if optionset:
try:
_args = optionset.parse(flask.request.method, (
(tuple(kwargs.values()), {}),
flask.request.args,
flask.request.form,
flask.request.get_json()
))
except OptionError as err:
return err.info, 400
else:
_args = {}
result = f(biothings, _args)
if isinstance(result, CoroutineType):
return await result
return result
B.pattern = pattern
B.methods = methods
B.name = f.__name__
routes.append(B)
return B
return A
@route("/")
def homepage(biothings, args):
loader = Loader(templates.__path__[0])
template = loader.load("home.html")
return template.generate(
alert='Front Page Not Configured.',
title='Biothings API',
contents=biothings.handlers.keys(),
support=biothings.metadata.types,
url='http://biothings.io/'
)
def handle_es_conn(f):
@wraps(f)
async def _(biothings, *args, **kwargs):
client = biothings.elasticsearch.async_client
# because of the flask execution model
# each time the async function is executed
# it is executed on a different event loop
# reset the connections to use the active loop
del client.transport.connection_pool
await client.transport._async_init()
try:
response = await f(biothings, *args, **kwargs)
except QueryPipelineInterrupt as itr:
return itr.details
except QueryPipelineException as exc:
kwargs = exc.details if isinstance(exc.details, dict) else {}
kwargs["success"] = False
kwargs["status"] = exc.code
kwargs["reason"] = exc.summary
return kwargs, exc.code
finally:
await client.close()
return response
return _
@route("/{ver}/query")
@handle_es_conn
async def query(biothings, args):
return await biothings.pipeline.search(**args)
@route([
"/{ver}/{typ}/",
"/{ver}/{typ}/<id>"])
@handle_es_conn
async def annotation(biothings, args):
# could be a list, in which case we need jsonify.
return flask.jsonify(await biothings.pipeline.fetch(**args))
@route("/{ver}/metadata")
@handle_es_conn
async def metadata(biothings, args):
await biothings.metadata.refresh(None)
return biothings.metadata.get_metadata(None)
@route("/{ver}/metadata/fields")
@handle_es_conn
async def fields(biothings, args):
await biothings.metadata.refresh(None)
mappings = biothings.metadata.get_mappings(None)
return biothings.pipeline.formatter.transform_mapping(mappings)
@route("/status")
@handle_es_conn
async def status(biothings, args):
return await biothings.health.async_check()
| 32.40367 | 73 | 0.615515 | 384 | 3,532 | 5.559896 | 0.380208 | 0.054801 | 0.033724 | 0.039813 | 0.120843 | 0.043091 | 0.043091 | 0 | 0 | 0 | 0 | 0.001582 | 0.284258 | 3,532 | 108 | 74 | 32.703704 | 0.842959 | 0.05974 | 0 | 0.096774 | 0 | 0 | 0.0546 | 0.006637 | 0 | 0 | 0 | 0 | 0 | 1 | 0.043011 | false | 0 | 0.075269 | 0 | 0.27957 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
99949b7499fb18d01405577641cf1fb6c9a87917 | 258 | py | Python | tests/abs/test_product.py | powerpenguincat/practice-atcoder | 6c656d0ebe3fc12d7df50112af2ef5c946bbaf46 | [
"MIT"
] | null | null | null | tests/abs/test_product.py | powerpenguincat/practice-atcoder | 6c656d0ebe3fc12d7df50112af2ef5c946bbaf46 | [
"MIT"
] | null | null | null | tests/abs/test_product.py | powerpenguincat/practice-atcoder | 6c656d0ebe3fc12d7df50112af2ef5c946bbaf46 | [
"MIT"
] | null | null | null | import pytest
from practice_atcoder.abs.product import question
class Test(object):
@pytest.mark.parametrize("ab,expect", [
("3 4", "Even"),
("1 21", "Odd"),
])
def test(self, ab, expect):
assert question(ab) == expect
| 19.846154 | 49 | 0.593023 | 32 | 258 | 4.75 | 0.75 | 0.157895 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.025773 | 0.248062 | 258 | 12 | 50 | 21.5 | 0.757732 | 0 | 0 | 0 | 0 | 0 | 0.089147 | 0 | 0 | 0 | 0 | 0 | 0.111111 | 1 | 0.111111 | false | 0 | 0.222222 | 0 | 0.444444 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
999a7897f8cea7a46091c8b50a7b40974c139967 | 32,457 | py | Python | networks/meta/past_grads_v2.py | annachen/dl_playground | f263dc16b4f0d91f6d33d94e678a9bbe2ace8913 | [
"MIT"
] | null | null | null | networks/meta/past_grads_v2.py | annachen/dl_playground | f263dc16b4f0d91f6d33d94e678a9bbe2ace8913 | [
"MIT"
] | null | null | null | networks/meta/past_grads_v2.py | annachen/dl_playground | f263dc16b4f0d91f6d33d94e678a9bbe2ace8913 | [
"MIT"
] | null | null | null | """Meta network using past gradients."""
import tensorflow as tf
class DualRNN(tf.keras.layers.Layer):
"""
Pretty similar to LayerCompetition, except:
1) Optionally aggregate features across batch before feeding into
the RNN. Doing this because if the RNN states were to
represent training state of the underlying network, the whole
batch is used for the underlying network and not just one
instance from the batch. Doing this also means that we'd be
training the meta-network RNN with batch_size = 1.
2) Extend the backward masking to be more similar to forward
masking - the past masked gradients are passed into the RNN,
while the current unmasked gradient is passed in through a
separate branch. (This needs the inner network to pass in
the masked gradient instead)
Because the RNN states could potentially not have the batch
dimension, we need to also pass in the current gradient at
the end to get the mask output as (B, N)
3) Added a few more options to experiment with different network
design.
Parameters
----------
"""
def __init__(
self,
rnn_type,
rnn_units,
input_mlp,
fwd_output_mlp,
bwd_output_mlp,
mask_thresh=0.1,
dist_fn='none',
use_bwd_mask=False,
normalize_grads=False,
normalize_acts=False,
random_grads_stddev=None,
use_nearest_grads=False,
use_node_set=True,
node_set_version='v3',
use_batch_set=False,
use_batch_summary=True,
cur_reuse_branch=False,
bwd_return_grads=False,
):
super(DualRNN, self).__init__()
assert rnn_type in ['simplernn', 'gru', 'lstm']
if rnn_type == 'simplernn':
self._rnn = tf.keras.layers.SimpleRNN(rnn_units)
elif rnn_type == 'gru':
self._rnn = tf.keras.layers.GRU(rnn_units)
elif rnn_type == 'lstm':
self._rnn = tf.keras.layers.LSTM(rnn_units)
self._input_mlp = input_mlp
self._fwd_output_mlp = fwd_output_mlp
self._bwd_output_mlp = bwd_output_mlp
self._mask_thresh = mask_thresh
self._rnn_units = rnn_units
self._dist_fn = dist_fn
self._use_bwd_mask = use_bwd_mask
self._normalize_grads = normalize_grads
self._normalize_acts = normalize_acts
self._use_node_set = use_node_set
self._node_set_version = node_set_version
self._use_batch_set = use_batch_set
self._use_batch_summary = use_batch_summary
self._random_grads_stddev = random_grads_stddev
self._use_nearest_grads = use_nearest_grads
self._cur_reuse_branch = cur_reuse_branch
self._bwd_return_grads = bwd_return_grads
self._last_input_mlp_input = None
if self._fwd_output_mlp._last_layer_act_fn_str == 'linear':
self._fwd_apply_sigmoid = True
elif self._fwd_output_mlp._last_layer_act_fn_str == 'sigmoid':
self._fwd_apply_sigmoid = False
else:
raise ValueError()
if self._use_bwd_mask is False:
assert self._bwd_output_mlp is None
else:
if self._bwd_output_mlp._last_layer_act_fn_str == 'linear':
self._bwd_apply_sigmoid = True
elif self._bwd_output_mlp._last_layer_act_fn_str == 'sigmoid':
self._bwd_apply_sigmoid = False
else:
raise ValueError()
def warm_start(self):
batch = {
'past_grads': tf.zeros((1, 1, 1)),
'past_acts': tf.zeros((1, 1, 1)),
'cur_acts': tf.zeros((1, 1)),
'cur_grads': tf.zeros((1, 1)),
}
self.forward(batch, training=False)
if self._use_bwd_mask:
self.backward(batch, training=False)
def first_forward(self, batch, training=None):
"""
batch : (B, N)
current activations.
"""
B = tf.shape(batch)[0]
N = tf.shape(batch)[1]
# currently initial state is zeros
h = tf.zeros((B * N, self._rnn_units), dtype=tf.float32)
# prepare the branch from cur_acts
if self._cur_reuse_branch:
default_grads = self._get_default_grads(
past_grads=None,
past_acts=None,
cur_acts=cur_acts,
)
# (B, 1, N, cur_F)
cur_act_input, cur_F = self._prepare_input_mlp_input(
past_acts=cur_acts[:, tf.newaxis],
past_grads=default_grads
)
# (B*N, cur_F)
cur_act_input = tf.reshape(cur_act_input, (B * N, cur_F))
# (B*N, F)
cur_act_feats = self._input_mlp.call(
cur_act_input,
training=training
)
F = self._input_mlp._filters[-1]
cur_act_feats = tf.reshape(cur_act_feats, (B, 1, N, F))
# also run set features on cur_acts
# (B, 1, N, F')
cur_act_feats, F_p = self._get_set_feature(
cur_act_feats, F
)
else:
if self._normalize_acts:
# (B, N), (B, 1)
nacts, norm = _safe_normalize(batch, axis=-1)
norm = tf.tile(norm, [1, N])
# (B, N, 2)
cur_act_feats = tf.stack([nacts, norm], axis=-1)
F_p = 2
else:
cur_act_feats = cur_acts
F_p = 1
# concat with current activation to feed into output_mlp
# (B*N, U+F')
feat = tf.concat([
h, tf.reshape(cur_act_feats, (B * N, F_p))
], axis=-1)
out = self._fwd_output_mlp(feat, training=training)
# (B, N)
out = tf.reshape(out, (B, N))
if self._fwd_apply_sigmoid:
mask = tf.nn.sigmoid(out)
else:
mask = out
# to avoid gradient underflow in the inner net, make mask
# smaller than `mask_thresh` 0s
mask = tf.where(
mask < self._mask_thresh,
tf.zeros_like(mask),
mask,
)
return mask
def forward(self, batch, training=None):
"""Returns the mask for forward inner network
Parameters
----------
batch : dict
"past_grads" : (B, T, N)
"past_acts" : (B, T, N)
"cur_acts" : (B, N)
"""
past_grads = batch['past_grads']
past_acts = batch['past_acts']
cur_acts = batch['cur_acts']
B = tf.shape(cur_acts)[0]
N = tf.shape(cur_acts)[1]
T = tf.shape(past_grads)[1]
# (B, T, N, Fin)
feat, Fin = self._prepare_input_mlp_input(
past_grads=past_grads,
past_acts=past_acts,
)
feat = tf.reshape(feat, (-1, Fin))
#print("fwd Fin: {}".format(Fin))
# (B * T * N, F)
feat = self._input_mlp.call(feat, training=training)
F = self._input_mlp._filters[-1]
feat = tf.reshape(feat, (B, T, N, F))
# (B, T, N, F')
all_feats, F_p = self.get_set_feature(feat, F)
#print("fwd Fp: {}".format(F_p))
if self._use_batch_summary:
# (T, N, F')
all_feats, F_p = self._get_batch_summary(all_feats, F_p)
# (N, T, F')
seq = tf.transpose(all_feats, (1, 0, 2))
# (N, U)
last_h = self._rnn(seq, training=training)
# (B, N, U)
last_h = tf.tile(last_h[tf.newaxis], [B, 1, 1])
last_h = tf.reshape(last_h, (B * N, self._rnn_units))
else:
# (B, N, T, F')
seq = tf.transpose(all_feats, (0, 2, 1, 3))
seq = tf.reshape(seq, (B * N, T, F_p))
# (B*N, U)
last_h = self._rnn(seq, training=training)
# prepare the branch from cur_acts
if self._cur_reuse_branch:
default_grads = self._get_default_grads(
past_grads=past_grads,
past_acts=past_acts,
cur_acts=cur_acts,
)
# (B, 1, N, cur_F)
cur_act_input, cur_F = self._prepare_input_mlp_input(
past_acts=cur_acts[:, tf.newaxis],
past_grads=default_grads
)
# (B*N, cur_F)
cur_act_input = tf.reshape(cur_act_input, (-1, cur_F))
# (B*N, F)
cur_act_feats = self._input_mlp.call(
cur_act_input,
training=training
)
F = self._input_mlp._filters[-1]
cur_act_feats = tf.reshape(cur_act_feats, (B, 1, N, F))
# also run set features on cur_acts
# (B, 1, N, F')
cur_act_feats, F_p = self._get_set_feature(
cur_act_feats, F
)
else:
if self._normalize_acts:
# (B, N), (B, 1)
nacts, norm = _safe_normalize(cur_acts, axis=-1)
norm = tf.tile(norm, [1, N])
cur_act_feats = tf.stack([nacts, norm], axis=-1)
F_p = 2
else:
cur_act_feats = cur_acts
F_p = 1
# prepare inputs for output_mlp
# (B*N, U + F')
feat = tf.concat([
last_h,
tf.reshape(cur_act_feats, (B * N, F_p))
], axis=-1)
out = self._fwd_output_mlp(feat, training=training)
# (B, N)
out = tf.reshape(out, (B, N))
if self._fwd_apply_sigmoid:
mask = tf.nn.sigmoid(out)
else:
mask = out
# to avoid gradient underflow in the inner net, make mask
# smaller than `mask_thresh` 0s
# TODO: not sure if this is needed
mask = tf.where(
mask < self._mask_thresh,
tf.zeros_like(mask),
mask,
)
return mask
def first_backward(self, batch, training=None):
"""Returns the mask for backward gradient masking
Parameters
----------
batch : dict
"cur_acts" : (B, N)
"cur_grads" : (B, N)
"""
cur_acts = batch['cur_acts']
cur_grads = batch['cur_grads']
B = tf.shape(cur_acts)[0]
N = tf.shape(cur_acts)[1]
# currently initial state is zeros
h = tf.zeros((B * N, self._rnn_units), dtype=tf.float32)
# prepare the branch from cur_acts
if self._cur_reuse_branch:
# (B, 1, N, cur_F)
cur_input, cur_F = self._prepare_input_mlp_input(
past_acts=cur_acts[:, tf.newaxis],
past_grads=cur_grads[:, tf.newaxis],
)
# (B*N, cur_F)
cur_input = tf.reshape(cur_input, (B * N, cur_F))
# (B*N, F)
cur_feats = self._input_mlp.call(
cur_input,
training=training
)
F = self._input_mlp._filters[-1]
cur_feats = tf.reshape(cur_feats, (B, 1, N, F))
# also run set features on cur_feats
# (B, 1, N, F')
cur_feats, F_p = self._get_set_feature(
cur_feats, F
)
else:
if self._normalize_acts:
# (B, N), (B, 1)
nacts, norm = _safe_normalize(cur_acts, axis=-1)
norm = tf.tile(norm, [1, N])
# (B, N, 2)
cur_feats = tf.stack([nacts, norm], axis=-1)
F_p = 2
else:
cur_feats = cur_acts
F_p = 1
if self._normalize_grads:
ngrads, norm = _safe_normalize(cur_grads, axis=-1)
norm = tf.tile(norm, [1, N])
cur_feats = tf.concat([
cur_feats,
ngrads[..., tf.newaxis],
norm[..., tf.newaxis]
], axis=-1)
F_p += 2
else:
cur_feats = tf.concat([
cur_feats, cur_grads[..., tf.newaxis]
], axis=-1)
F_p += 1
# concat with current activation to feed into output_mlp
# (B*N, U+F')
feat = tf.concat([
h, tf.reshape(cur_feats, (B * N, F_p))
], axis=-1)
out = self._bwd_output_mlp(feat, training=training)
# (B, N)
out = tf.reshape(out, (B, N))
if self._bwd_return_grads:
weights = tf.nn.softmax(
tf.reshape(out, (B, N, 4)), axis=-1
)
grads = self._bwd_weighted_grads(
cur_grads=cur_grads,
weights=weights,
)
return grads
if self._bwd_apply_sigmoid:
mask = tf.nn.sigmoid(out)
else:
mask = out
# to avoid gradient underflow in the inner net, make mask
# smaller than `mask_thresh` 0s
mask = tf.where(
mask < self._mask_thresh,
tf.zeros_like(mask),
mask,
)
return mask
def backward(self, batch, training=None):
"""Returns the mask for backward gradient masking
Parameters
----------
batch : dict
"past_grads" : (B, T, N)
"past_acts" : (B, T, N)
"cur_acts" : (B, N)
"cur_grads" : (B, N)
"""
past_grads = batch['past_grads']
past_acts = batch['past_acts']
cur_acts = batch['cur_acts']
cur_grads = batch['cur_grads']
B = tf.shape(cur_acts)[0]
N = tf.shape(cur_acts)[1]
T = tf.shape(past_grads)[1]
# (B, T, N, Fin)
feat, Fin = self._prepare_input_mlp_input(
past_grads=past_grads,
past_acts=past_acts,
)
feat = tf.reshape(feat, (-1, Fin))
#print("bwd Fin: {}".format(Fin))
# (B * T * N, F)
feat = self._input_mlp.call(feat, training=training)
F = self._input_mlp._filters[-1]
feat = tf.reshape(feat, (B, T, N, F))
# (B, T, N, F')
all_feats, F_p = self.get_set_feature(feat, F)
#print("bwd Fp: {}".format(F_p))
if self._use_batch_summary:
# (T, N, F')
all_feats, F_p = self._get_batch_summary(all_feats, F_p)
# (N, T, F')
seq = tf.transpose(all_feats, (1, 0, 2))
# (N, U)
last_h = self._rnn(seq, training=training)
# (B, N, U)
last_h = tf.tile(last_h[tf.newaxis], [B, 1, 1])
last_h = tf.reshape(last_h, (B * N, self._rnn_units))
else:
# (B, N, T, F')
seq = tf.transpose(all_feats, (0, 2, 1, 3))
seq = tf.reshape(seq, (B * N, T, F_p))
# (B*N, U)
last_h = self._rnn(seq, training=training)
# prepare the branch from cur_acts
if self._cur_reuse_branch:
# (B, 1, N, cur_F)
cur_input, cur_F = self._prepare_input_mlp_input(
past_acts=cur_acts[:, tf.newaxis],
past_grads=cur_grads[:, tf.newaxis],
)
# (B*N, cur_F)
cur_input = tf.reshape(cur_input, (-1, cur_F))
# (B*N, F)
cur_feats = self._input_mlp.call(
cur_input,
training=training
)
F = self._input_mlp._filters[-1]
cur_feats = tf.reshape(cur_feats, (B, 1, N, F))
# also run set features on cur_acts
# (B, 1, N, F')
cur_feats, F_p = self._get_set_feature(
cur_feats, F
)
else:
if self._normalize_acts:
# (B, N), (B, 1)
nacts, norm = _safe_normalize(cur_acts, axis=-1)
norm = tf.tile(norm, [1, N])
cur_feats = tf.stack([nacts, norm], axis=-1)
F_p = 2
else:
cur_feats = cur_acts
F_p = 1
if self._normalize_grads:
ngrads, norm = _safe_normalize(cur_grads, axis=-1)
norm = tf.tile(norm, [1, N])
cur_feats = tf.concat([
cur_feats,
ngrads[..., tf.newaxis],
norm[..., tf.newaxis]
], axis=-1)
F_p += 2
else:
cur_feats = tf.concat([
cur_feats, cur_grads[..., tf.newaxis]
], axis=-1)
F_p += 1
# prepare inputs for output_mlp
# (B*N, U + F')
feat = tf.concat([
last_h,
tf.reshape(cur_feats, (B * N, F_p))
], axis=-1)
out = self._bwd_output_mlp(feat, training=training)
if self._bwd_return_grads:
weights = tf.nn.softmax(
tf.reshape(out, (B, N, 4)), axis=-1
)
grads = self._bwd_weighted_grads(
cur_grads=cur_grads,
weights=weights,
)
return grads
# (B, N)
out = tf.reshape(out, (B, N))
if self._bwd_apply_sigmoid:
mask = tf.nn.sigmoid(out)
else:
mask = out
# to avoid gradient underflow in the inner net, make mask
# smaller than `mask_thresh` 0s
# TODO: not sure if this is needed
mask = tf.where(
mask < self._mask_thresh,
tf.zeros_like(mask),
mask,
)
return mask
def _prepare_input_mlp_input(self, past_grads, past_acts):
if self._normalize_acts:
# (B, T, N), (B, T, 1)
nacts, norm = _safe_normalize(past_acts, axis=2)
N = tf.shape(nacts)[-1]
# (B, T, N)
norm = tf.tile(norm, [1, 1, N])
# (B, T, N, 2)
feat = tf.stack([nacts, norm], axis=-1)
F = 2
else:
# (B, T, N, 1)
feat = past_acts[..., tf.newaxis]
F = 1
if self._normalize_grads:
# (B, T, N), (B, T, 1)
ngrads, norm = _safe_normalize(past_grads, axis=2)
N = tf.shape(ngrads)[-1]
# (B, T, N)
norm = tf.tile(norm, [1, 1, N])
# (B, T, N, F+2)
feat = tf.concat([
feat, ngrads[..., tf.newaxis], norm[..., tf.newaxis]
], axis=-1)
F = F + 2
else:
feat = tf.concat([feat, past_grads[..., tf.newaxis]])
F = F + 1
return feat, F
def get_set_feature(self, feat, F):
"""Returns the features extracted based on sets.
Parameters
----------
feat : tf.Tensor, shape (B, T, N, F)
`N` is the dimension for the set
F : int
The number of channels for the input feature
Returns
-------
set_feat : tf.Tensor, shape (B, T, N, F')
F' : int
The number of channels of the output feature
"""
if not self._use_node_set and not self._use_batch_set:
# if coordinate-wise, use original features
return feat, F
if self._use_node_set:
if self._node_set_version == 'v1':
feat, F = self._get_node_set_feature(feat, F)
elif self._node_set_version == 'v2':
feat, F = self._get_node_set_feature_v2(feat, F)
elif self._node_set_version == 'v3':
# (B, T, N, Fn)
feat, F = self._get_node_set_feature_v3(feat, F)
else:
raise ValueError()
if self._use_batch_set:
feat_b, Fb = self._get_batch_set_feature(feat, F)
if self._use_node_set:
feat = tf.concat([feat, feat_b], axis=-1)
F = F + Fb
else:
feat = feat_b
F = Fb
return feat, F
def _get_node_set_feature(self, feat, F):
"""Returns the features extracted based on sets.
Parameters
----------
feat : tf.Tensor, shape (B, T, N, F)
`N` is the dimension for the set
F : int
The number of channels for the input feature
Returns
-------
set_feat : tf.Tensor, shape (B, T, N, F')
F' : int
The number of channels of the output feature
"""
B = tf.shape(feat)[0]
T = tf.shape(feat)[1]
# (BT, N, F)
feat = tf.reshape(feat, (B * T, -1, F))
# obtain pair-wise feats for nodes
# (BT, N, 1, F)
src_feat = feat[:, :, tf.newaxis, :]
# (BT, 1, N, F)
dst_feat = feat[:, tf.newaxis, :, :]
N = tf.shape(feat)[1]
BT = B * T
if self._dist_fn == 'diff':
# (BT, N, N, F)
dist = dst_feat - src_feat
self_feat = feat
elif self._dist_fn == 'dot':
# (BT, N, N, F)
dist = dst_feat * src_feat
self_feat = feat
elif self._dist_fn == 'norm_dot':
n_dst_feat, _ = _safe_normalize(dst_feat, axis=-1)
n_src_feat, _ = _safe_normalize(src_feat, axis=-1)
dist = tf.reduce_sum(
n_dst_feat * n_src_feat, axis=-1, keepdims=True
)
self_feat = tf.ones([BT, N, 1])
F = 1
elif self._dist_fn == 'concat':
# (BT, N, N, F*2)
dist = tf.concat([
tf.tile(src_feat, [1, 1, N, 1]),
tf.tile(dst_feat, [1, N, 1, 1])
], axis=-1)
# (BT, N, F*2)
self_feat = tf.concat([feat, feat], axis=-1)
F = F * 2
elif self._dist_fn == 'none':
# need to tile the first `N` dimension and not the 2nd
# (BT, N, N, F)
dist = tf.tile(dst_feat, [1, N, 1, 1])
# (BT, N, F)
self_feat = feat
else:
raise ValueError()
# (N, N, B*T, F)
dist = tf.transpose(dist, (1, 2, 0, 3))
# Aggregate over node features
# Create an "other" mask
mask = tf.ones((N, N)) - tf.eye(N)
# (N * (N-1), 2)
to_take = tf.where(mask > 0.5)
# (N * (N-1), BT, F)
gathered = tf.gather_nd(dist, to_take)
# (N, N-1, BT, F)
other_feat = tf.reshape(gathered, (N, N - 1, BT, F))
# So, what are some options after here?
# I have NxN pairwise distance, and eventually I want to
# reduce to N and the RNN will share weights among the N
# nodes.
# It'd be quite intuitive to apply attention of some form to
# see what are the other nodes that a node should pay
# attention to. So feature for RNN input would be
# concat(self_feat, att(other_feat))
# I don't want to directly aggregate from NxN -> N without
# distinguish self-vs-other because, well, seems like a
# useful distinction.
# But perhaps I'll start with some hard coded aggregation
# (BT, N, N-1, F)
other_feat = tf.transpose(other_feat, (2, 0, 1, 3))
# (BT, N, F)
other_mean = tf.reduce_mean(other_feat, axis=2)
other_min = tf.reduce_min(other_feat, axis=2)
other_max = tf.reduce_max(other_feat, axis=2)
# put them together
agg_feats = [self_feat, other_mean, other_min, other_max]
n_agg_feats = len(agg_feats)
# (BT, N, F*n_agg_feats)
all_feats = tf.concat(agg_feats, axis=-1)
all_feats = tf.reshape(all_feats, (B, T, N, F * n_agg_feats))
return all_feats, F * n_agg_feats
def _get_node_set_feature_v2(self, feat, F):
"""Returns the features extracted based on sets.
Skip the pairwise distance as in v1 as it takes too much
memory. Start looking at aggregation stats directly.
Parameters
----------
feat : tf.Tensor, shape (B, T, N, F)
`N` is the dimension for the set
F : int
The number of channels for the input feature
Returns
-------
set_feat : tf.Tensor, shape (B, T, N, F')
F' : int
The number of channels of the output feature
"""
B = tf.shape(feat)[0]
T = tf.shape(feat)[1]
N = tf.shape(feat)[2]
def _other_stats(self_idx):
# (N,)
self_idx_one_hot = tf.one_hot(self_idx, depth=N)
# (N-1, B, T, F)
other_feat = tf.gather(
tf.transpose(feat, (2, 0, 1, 3)), # (N, B, T, F)
tf.where(self_idx_one_hot < 0.5)[:, 0], # (N-1, 1)
)
# (B, T, N-1, F)
other_feat = tf.transpose(other_feat, (1, 2, 0, 3))
# (B, T, F)
other_min = tf.reduce_min(other_feat, axis=2)
other_max = tf.reduce_max(other_feat, axis=2)
other_mean = tf.reduce_mean(other_feat, axis=2)
# (B, T, F * 3)
return tf.concat(
[other_min, other_max, other_mean], axis=-1
)
self_idxs = tf.range(N)
# (N, B, T, F * 3)
other_feats = tf.map_fn(
fn=_other_stats,
elems=self_idxs,
fn_output_signature=tf.float32,
)
# (B, T, N, F*3)
other_feats = tf.transpose(other_feats, (1, 2, 0, 3))
# (B, T, N, F*4)
all_feats = tf.concat([feat, other_feats], axis=-1)
return all_feats, F * 4
def _get_node_set_feature_v3(self, feat, F):
"""Returns the features extracted based on sets.
Skip the pairwise distance as in v1 as it takes too much
memory. Start looking at aggregation stats directly.
Skip self vs other and just use self vs all.
Parameters
----------
feat : tf.Tensor, shape (B, T, N, F)
`N` is the dimension for the set
F : int
The number of channels for the input feature
Returns
-------
set_feat : tf.Tensor, shape (B, T, N, F')
F' : int
The number of channels of the output feature
"""
B = tf.shape(feat)[0]
T = tf.shape(feat)[1]
N = tf.shape(feat)[2]
# (B, T, 1, F)
all_min = tf.reduce_min(feat, axis=2, keepdims=True)
all_max = tf.reduce_max(feat, axis=2, keepdims=True)
all_mean = tf.reduce_mean(feat, axis=2, keepdims=True)
# (B, T, 1, F*3)
all_feats = tf.concat([all_min, all_max, all_mean], axis=-1)
# (B, T, N, F*3)
all_feats = tf.tile(all_feats, [1, 1, N, 1])
# (B, T, N, F*4)
all_feats = tf.concat([feat, all_feats], axis=-1)
return all_feats, F * 4
def _get_batch_summary(self, feat, F):
"""Returns some summary of the current batch.
Reduces over the batch dimension
Parameters
----------
feat : tf.Tensor, shape (B, ..., F)
F : int
Returns
-------
summary : tf.Tensor, shape (..., F')
F' : int
"""
bmean = tf.reduce_mean(feat, axis=0)
bmin = tf.reduce_min(feat, axis=0)
bmax = tf.reduce_max(feat, axis=0)
feat = tf.concat([bmean, bmin, bmax], axis=-1)
F = F * 3
return feat, F
def _bwd_weighted_grads(self, cur_grads, weights):
# cur_grads: (B, N)
# weights: (B, N, 4)
# (B, 1, N, 4)
set_grads, F_p = self._get_node_set_feature_v2(
cur_grads[:, tf.newaxis, :, tf.newaxis],
F=1,
)
# (B, N)
weighted_grads = tf.reduce_sum(
weights * set_grads[:, 0], axis=-1
)
return weighted_grads
def _get_batch_set_feature(self, feat, F):
"""Returns the features extracted based on sets.
Parameters
----------
feat : tf.Tensor, shape (B, T, N, F)
`N` is the dimension for the set
F : int
The number of channels for the input feature
Returns
-------
set_feat : tf.Tensor, shape (B, T, N, F')
F' : int
The number of channels of the output feature
"""
B = tf.shape(feat)[0]
T = tf.shape(feat)[1]
N = tf.shape(feat)[2]
# (B, TN, F)
feat = tf.reshape(feat, (B, -1, F))
# obtain pair-wise feats for nodes
# (B, 1, TN, F)
src_feat = feat[:, tf.newaxis, :, :]
# (1, B, TN, F)
dst_feat = feat[tf.newaxis, :, :, :]
TN = T * N
if self._dist_fn == 'diff':
# (B, B, TN, F)
dist = dst_feat - src_feat
self_feat = feat
elif self._dist_fn == 'dot':
# (B, B, TN, F)
dist = dst_feat * src_feat
self_feat = feat
elif self._dist_fn == 'norm_dot':
n_dst_feat, _ = _safe_normalize(dst_feat, axis=-1)
n_src_feat, _ = _safe_normalize(src_feat, axis=-1)
dist = tf.reduce_sum(
n_dst_feat * n_src_feat, axis=-1, keepdims=True
)
self_feat = tf.ones([B, TN, 1])
F = 1
elif self._dist_fn == 'concat':
# (B, B, TN, F*2)
dist = tf.concat([
tf.tile(src_feat, [1, B, 1, 1]),
tf.tile(dst_feat, [B, 1, 1, 1]),
], axis=-1)
# (B, TN, F*2)
self_feat = tf.concat([feat, feat], axis=-1)
F = F * 2
elif self._dist_fn == 'none':
# (B, B, TN, F)
dist = tf.tile(dst_feat, [B, 1, 1, 1])
# (B, TN, F)
self_feat = feat
else:
raise ValueError()
# Aggregate over node features
# Create an "other" mask
mask = tf.ones((B, B)) - tf.eye(B) # here
# (B * (B-1), 2)
to_take = tf.where(mask > 0.5)
# (B * (B-1), TN, F)
gathered = tf.gather_nd(dist, to_take)
# (B, B-1, TN, F)
other_feat = tf.reshape(gathered, (B, B - 1, TN, F))
# (B, TN, F)
other_mean = tf.reduce_mean(other_feat, axis=1)
other_min = tf.reduce_min(other_feat, axis=1)
other_max = tf.reduce_max(other_feat, axis=1)
# put them together
agg_feats = [self_feat, other_mean, other_min, other_max]
n_agg_feats = len(agg_feats)
# (B, TN, F*n_agg_feats)
all_feats = tf.concat(agg_feats, axis=-1)
all_feats = tf.reshape(all_feats, (B, T, N, F * n_agg_feats))
return all_feats, F * n_agg_feats
def _get_default_grads(self, past_grads, past_acts, cur_acts):
if self._random_grads_stddev is not None:
default_grads = tf.random.normal(
shape=(B, 1, N),
stddev=self._random_grads_stddev
)
elif self._use_nearest_grads:
# TODO: can look at other batch instances too
# which would create a (B, B, T, N) diff
# TODO: can limit the time window that we look back
# (B, T, N)
diff = tf.math.abs(cur_acts[:, tf.newaxis] - past_acts)
# (B, N)
closest_idx = tf.math.argmin(diff, axis=1)
# (B * N, 1)
closest_idx = tf.reshape(closest_idx, (B * N, 1))
idx = tf.range(B * N)
# (B * N, 2)
closest_idx = tf.concat(
[closest_idx, idx[..., tf.newaxis]], axis=-1
)
# (T, B, N)
pg = tf.transpose(past_grads, (1, 0, 2))
# (T, B * N)
pg = tf.reshape(pg, (T, B * N))
# (B * N)
closest_grads = tf.gather_nd(pg, closest_idx)
default_grads = tf.reshape(closest_grads, (B, 1, N))
else:
default_grads = tf.zeros((B, 1, N), dtype=tf.float32)
return default_grads
def train_callback(self):
self._input_mlp.train_callback()
if self._fwd_output_mlp is not None:
self._fwd_output_mlp.train_callback()
if self._bwd_output_mlp is not None:
self._bwd_output_mlp.train_callback()
def _safe_normalize(tensor, axis, eps=1e-8):
tensor, norm = tf.linalg.normalize(tensor + eps, axis=axis)
return tensor, norm
| 32.23138 | 74 | 0.500909 | 4,328 | 32,457 | 3.527957 | 0.077172 | 0.008514 | 0.007663 | 0.006025 | 0.722051 | 0.662388 | 0.631738 | 0.61471 | 0.58943 | 0.561661 | 0 | 0.015437 | 0.381305 | 32,457 | 1,006 | 75 | 32.263419 | 0.744933 | 0.216594 | 0 | 0.58669 | 0 | 0 | 0.00971 | 0 | 0 | 0 | 0 | 0.002982 | 0.003503 | 1 | 0.031524 | false | 0 | 0.001751 | 0 | 0.06655 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
999b30e3b541222c64dd017084efe4cadab334f9 | 5,193 | py | Python | imessage_extractor/src/helpers/utils.py | tsouchlarakis/imessage-extractor | e77bee947e19ac3f30ffd60faf7d444ded336b3b | [
"MIT"
] | 1 | 2021-12-17T05:41:49.000Z | 2021-12-17T05:41:49.000Z | imessage_extractor/src/helpers/utils.py | tsouchlarakis/imessage-extractor | e77bee947e19ac3f30ffd60faf7d444ded336b3b | [
"MIT"
] | 2 | 2021-08-22T02:15:40.000Z | 2022-01-16T23:15:01.000Z | imessage_extractor/src/helpers/utils.py | tsouchlarakis/imessage-extractor | e77bee947e19ac3f30ffd60faf7d444ded336b3b | [
"MIT"
] | null | null | null | import os
import pathlib
import re
import typing
def fmt_seconds(time_in_sec: int, units: str='auto', round_digits: int=4) -> dict:
"""
Format time in seconds to a custom string. `units` parameter can be
one of 'auto', 'seconds', 'minutes', 'hours' or 'days'.
"""
if units == 'auto':
if time_in_sec < 60:
time_diff = round(time_in_sec, round_digits)
time_measure = 'seconds'
elif time_in_sec >= 60 and time_in_sec < 3600:
time_diff = round(time_in_sec/60, round_digits)
time_measure = 'minutes'
elif time_in_sec >= 3600 and time_in_sec < 86400:
time_diff = round(time_in_sec/3600, round_digits)
time_measure = 'hours'
else:
time_diff = round(time_in_sec/86400, round_digits)
time_measure = 'days'
elif units in ['seconds', 'minutes', 'hours', 'days']:
time_measure = units
if units == 'seconds':
time_diff = round(time_in_sec, round_digits)
elif units == 'minutes':
time_diff = round(time_in_sec/60, round_digits)
elif units == 'hours':
time_diff = round(time_in_sec/3600, round_digits)
else:
# Days
time_diff = round(time_in_sec/86400, round_digits)
return dict(zip(['units', 'value'], [time_measure, time_diff]))
def human_filesize(nbytes: int) -> str:
"""
Convert number of bytes to human-readable filesize string.
Source: https://stackoverflow.com/questions/5194057/better-way-to-convert-file-sizes-in-python
"""
base = 1
for unit in ['B', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB']:
n = nbytes / base
if n < 9.95 and unit != 'B':
# Less than 10 then keep 1 decimal place
value = '{:.1f} {}'.format(n, unit)
return value
if round(n) < 1000:
# Less than 4 digits so use this
value = f'{round(n)} {unit}'
return value
base *= 1024
value = f'{round(n)} {unit}'
return value
def strip_ws(string: str):
"""
Strip whitespace off a string and replace all instances of >1 space with a single space.
"""
return re.sub(r'\s+', ' ', string.strip())
def ensurelist(val: typing.Any) -> list:
"""
Accept a string or list and ensure that it is formatted as a list. If `val` is not a list,
return [val]. If `val` is already a list, return as is.
"""
return [val] if not isinstance(val, list) else val
def listfiles(path: typing.Union[str, pathlib.Path]='.',
ext=None,
pattern=None,
ignore_case=True,
full_names=False,
recursive=False,
include_hidden=True) -> list:
"""
List files in a given directory.
path (str): absolute path to search for files in
ext (str): optional file extension or list of extensions to filter resulting files by
pattern (str): optional filter resulting files by matching regex pattern
ignore_case (bool): do not consider case in when filtering for `pattern` parameter
full_names (bool): return absolute filepaths
recursive (bool): search recursively down the directory tree
include_hidden (bool): include hidden files in resulting file list
"""
owd = os.getcwd()
os.chdir(path)
if recursive:
fpaths = []
for root, dpaths, filenames in os.walk('.'):
for f in filenames:
fpaths.append(os.path.join(root, f).replace('./', ''))
else:
fpaths = [f for f in os.listdir() if os.path.isfile(f)]
if not include_hidden:
fpaths = [f for f in fpaths if not os.path.basename(f).startswith('.')]
if pattern is not None:
if ignore_case:
fpaths = [f for f in fpaths if re.search(pattern, f, re.IGNORECASE)]
else:
fpaths = [f for f in fpaths if re.search(pattern, f)]
if ext:
ext = [x.lower() for x in ensurelist(ext)]
ext = ['.' + x if not x.startswith('.') else x for x in ext]
fpaths = [x for x in fpaths if os.path.splitext(x)[1].lower() in ext]
if full_names:
path_expand = os.getcwd() if path == '.' else path
fpaths = [os.path.join(path_expand, f) for f in fpaths]
os.chdir(owd)
return fpaths
def duplicated(lst: list) -> list:
"""
Return list of boolean values indicating whether each item in a list is a duplicate of
a previous item in the list. Order matters!
"""
dup_ind = []
for i, item in enumerate(lst):
tmplist = lst.copy()
del tmplist[i]
if item in tmplist:
# Test if this is the first occurrence of this item in the list. If so, do not
# count as duplicate, as the first item in a set of identical items should not
# be counted as a duplicate
first_idx = min(
[i for i, x in enumerate(tmplist) if x == item])
if i != first_idx:
dup_ind.append(True)
else:
dup_ind.append(False)
else:
dup_ind.append(False)
return dup_ind | 32.254658 | 98 | 0.586366 | 726 | 5,193 | 4.093664 | 0.285124 | 0.030283 | 0.042396 | 0.04576 | 0.171938 | 0.153432 | 0.146366 | 0.121131 | 0.098923 | 0.024899 | 0 | 0.018283 | 0.304833 | 5,193 | 161 | 99 | 32.254658 | 0.804986 | 0.269209 | 0 | 0.225806 | 0 | 0 | 0.04267 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.064516 | false | 0 | 0.043011 | 0 | 0.193548 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
999c1286569e2835ef7654c27f554b9341e671ee | 590 | py | Python | tools/formats parser/match_parser.py | TheUberCatman/pastebin_rust_api | 11441311ca26c9f81539ec7302ddda49528e62a0 | [
"Apache-2.0"
] | 1 | 2017-05-30T07:33:56.000Z | 2017-05-30T07:33:56.000Z | tools/formats parser/match_parser.py | Catman155/pastebin_rust_api | 11441311ca26c9f81539ec7302ddda49528e62a0 | [
"Apache-2.0"
] | 1 | 2018-03-09T19:11:38.000Z | 2018-03-09T19:11:38.000Z | tools/formats parser/match_parser.py | Catman155/pastebin_rust_api | 11441311ca26c9f81539ec7302ddda49528e62a0 | [
"Apache-2.0"
] | null | null | null | # Source of values.txt: 'https://pastebin.com/api/'
values = []
with open('values.txt', 'r') as myfile:
data = myfile.read()
data = data.split("\n")
for d in data:
result = d.split(" = ")
values.append(result[0].replace(" ", ""))
# rust_formats.txt is the list of the Enum present in src/paster/format.rs
with open('rust_formats.txt', 'r') as myfile:
data = myfile.read()
data = data.replace("\n", "").replace(" ", "")
data = data.split(",")
i = 0
for d in data:
print("&Format::" + d + " => \"" + values[i] + "\",")
i += 1
| 29.5 | 74 | 0.538983 | 82 | 590 | 3.853659 | 0.463415 | 0.075949 | 0.037975 | 0.075949 | 0.21519 | 0.21519 | 0.21519 | 0.21519 | 0.21519 | 0 | 0 | 0.006757 | 0.247458 | 590 | 19 | 75 | 31.052632 | 0.704955 | 0.20678 | 0 | 0.266667 | 0 | 0 | 0.146237 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.066667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
999c8e70e080ff7ed7117aa1db45dd0b42791638 | 2,545 | py | Python | Decrypt.py | momma-regen/P-C_Gif_Ripper | f6d4b8d84144113953abc3969544b5117adb2a12 | [
"Unlicense"
] | null | null | null | Decrypt.py | momma-regen/P-C_Gif_Ripper | f6d4b8d84144113953abc3969544b5117adb2a12 | [
"Unlicense"
] | null | null | null | Decrypt.py | momma-regen/P-C_Gif_Ripper | f6d4b8d84144113953abc3969544b5117adb2a12 | [
"Unlicense"
] | null | null | null | import regex as re
from math import ceil
from typing import List
from ByteReader import Reader, SeekOrigin as so
from DataTypes import int_32
class rpg_file:
offset: int_32 = int_32(0)
size: int_32 = int_32(0)
key: int_32 = int_32(0)
name: str
def decrypt_name(data: bytes, key: int|int_32) -> str:
if type(key) != int_32: key = int_32(key)
key.to_unsigned()
decrypted_name: bytes = b""
key_bytes = key.to_bytes()
j = 0
for i in range(len(data)):
if j == 4:
j = 0
decrypted_name += int_32(data[i] ^ (key_bytes[j] if j < len(key_bytes) else 0)).to_bytes()
j += 1
return decrypted_name.decode("utf-8")
def read_archive(file_path: str, match_str: str = None) -> List[rpg_file]:
reader: Reader = Reader(file_path)
reader.seek(8, so.Begin)
key = reader.read_int32().to_unsigned()
key *= 9
key += 3
files: List[rpg_file] = []
while(1):
file = rpg_file()
file.offset = int_32(reader.read_int32() ^ key)
file.size = int_32(reader.read_int32() ^ key)
file.key = int_32(reader.read_int32() ^ key).to_unsigned()
length = int_32(reader.read_int32() ^ key)
if file.offset < 0 or reader._p + length >= len(reader._data): break
try:
file.name = decrypt_name(reader.read_bytes(length), key).replace("\\", "/")
if match_str is not None and not re.match(match_str, file.name, flags=re.IGNORECASE): continue
files.append(file)
except Exception as e:
print('skipping: ' + str(e))
break
return files
def decrypt(files: List[rpg_file], file_location: str, save_location: str) -> None:
reader = Reader(file_location)
for file in files:
file_name = file.name.split("/")[-1:][0]
reader.seek(file.offset, so.Begin)
data: bytes = reader.read_bytes(file.size)
decrypted_file = b""
key = file.key.to_unsigned()
key_bytes = key.to_bytes() + b'\x00\x00\x00\x00'
j = 0
for i in range(len(data)):
if j == 4:
j = 0
key *= 7
key += 3
key_bytes = key.to_bytes() + b'\x00\x00\x00\x00'
result = data[i] ^ key_bytes[j]
decrypted_file += result.to_bytes(1, 'little')
j += 1
open(f"{save_location.rstrip('/')}/{file_name}", "wb").write(decrypted_file) | 31.036585 | 107 | 0.559528 | 359 | 2,545 | 3.788301 | 0.250696 | 0.055147 | 0.055147 | 0.044118 | 0.215441 | 0.157353 | 0.123529 | 0.083824 | 0.083824 | 0.083824 | 0 | 0.045066 | 0.311198 | 2,545 | 82 | 108 | 31.036585 | 0.730747 | 0 | 0 | 0.21875 | 0 | 0 | 0.039757 | 0.015822 | 0 | 0 | 0 | 0 | 0 | 1 | 0.046875 | false | 0 | 0.078125 | 0 | 0.234375 | 0.015625 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
99a126cb801c61ecd90be2fe3d5f2ec97ac26d6d | 1,308 | py | Python | Process_Threads/mul_threading.py | CrazyBBer/Python-Learn-Sample | 3bd0694327db6c662c6cc3bdf91c6261daa4b6cf | [
"MIT"
] | 2 | 2020-05-02T11:24:37.000Z | 2020-05-02T13:49:18.000Z | Process_Threads/mul_threading.py | crazybber/pythontrip | 062ba71dfe6729ecc606eff7260b1c39497b6456 | [
"MIT"
] | null | null | null | Process_Threads/mul_threading.py | crazybber/pythontrip | 062ba71dfe6729ecc606eff7260b1c39497b6456 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding utf-8 -*-
__Author__='eamon'
'threading multithreading '
import time,threading
def loop():
print('thread %s is running ...' % threading.current_thread().name)
n=0
while n <5:
n+=1
print('thread %s >> %s ' %(threading.current_thread().name,n))
time.sleep(1)
print('thread %s ended.' % threading.current_thread().name)
def testThread():
print('thread %s is running..' % threading.current_thread().name)
t=threading.Thread(target=loop,name='LoopThread')
t.start()
t.join()
print('thread % s ended.' % threading.current_thread().name)
# testThread()
balance =0
def change_it(n):
global balance
balance =balance +n
balance =balance -n
lock = threading.Lock()
def run_thread(n):
for i in range(100000):
lock.acquire()
try:
change_it(n)
finally:
lock.release()
def testMultiThreadDanger():
t1= threading.Thread(target=run_thread,args=(5,))
t2= threading.Thread(target=run_thread,args=(8,))
t1.start()
t2.start()
t1.join()
t2.join()
print(balance)
# testMultiThreadDanger()
import threading,multiprocessing
def loop():
x=0
while True:
x=x^1
def testRunfullCPU():
print('cpu num:',multiprocessing.cpu_count())
for i in range(multiprocessing.cpu_count()):
t=threading.Thread(target=loop)
t.start()
| 17.917808 | 68 | 0.682722 | 181 | 1,308 | 4.845304 | 0.337017 | 0.062714 | 0.068415 | 0.148233 | 0.374002 | 0.282782 | 0.205245 | 0.205245 | 0.107184 | 0 | 0 | 0.020739 | 0.152141 | 1,308 | 72 | 69 | 18.166667 | 0.770063 | 0.060398 | 0 | 0.125 | 0 | 0 | 0.116735 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.145833 | false | 0 | 0.041667 | 0 | 0.1875 | 0.145833 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
99a1822f416a16569175d3648ee1cf50474498cd | 941 | py | Python | challenges/merge_sort/merge_sort.py | nastinsk/python-data-structures-and-algorithms | 505b26a70fb846f6e9d0681bbe4f77e3797acf2d | [
"MIT"
] | null | null | null | challenges/merge_sort/merge_sort.py | nastinsk/python-data-structures-and-algorithms | 505b26a70fb846f6e9d0681bbe4f77e3797acf2d | [
"MIT"
] | null | null | null | challenges/merge_sort/merge_sort.py | nastinsk/python-data-structures-and-algorithms | 505b26a70fb846f6e9d0681bbe4f77e3797acf2d | [
"MIT"
] | 3 | 2020-05-31T03:25:49.000Z | 2020-12-05T21:03:13.000Z |
def merge_sort(lst):
"""function to prvide a merge sort on the given list, calles recursively """
n = len(lst)
if n > 1:
mid = n//2
left = lst[: mid]
right = lst[mid:]
# sort the left side
merge_sort(left)
# sort the right side
merge_sort(right)
# merge the sorted left and right sides together
merge(left, right, lst)
def merge(left, right, lst):
"""function to merge left sublist and rightsublist to the list in proper order"""
i = 0
j = 0
k = 0
while i < len(left) and j < len(right):
if left[i] <= right[j]:
lst[k] = left[i]
i += 1
else:
lst[k] = right[j]
j += 1
k += 1
if i == len(left):
for el in right[j:]:
lst[k] = el
k += 1
else:
for el in left[i:]:
lst[k] = el
k +=1
| 18.82 | 86 | 0.4644 | 134 | 941 | 3.238806 | 0.298507 | 0.082949 | 0.059908 | 0.078341 | 0.036866 | 0 | 0 | 0 | 0 | 0 | 0 | 0.018416 | 0.422954 | 941 | 49 | 87 | 19.204082 | 0.780847 | 0.248672 | 0 | 0.241379 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.068966 | false | 0 | 0 | 0 | 0.068966 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
99a4c98326e7361f9a182bd46371aea1ad73b400 | 4,401 | py | Python | array_str_problems/zero_matrix.py | UPstartDeveloper/Problem_Solving_Practice | bd61333b3b056e82a94297e02bc05a17552e3496 | [
"MIT"
] | null | null | null | array_str_problems/zero_matrix.py | UPstartDeveloper/Problem_Solving_Practice | bd61333b3b056e82a94297e02bc05a17552e3496 | [
"MIT"
] | null | null | null | array_str_problems/zero_matrix.py | UPstartDeveloper/Problem_Solving_Practice | bd61333b3b056e82a94297e02bc05a17552e3496 | [
"MIT"
] | null | null | null | """
Zero Matrix:
Write an algorithm such that
if an element in an MxN matrix is 0,
its entire row and column are set to O.
Clarifying Questions and Assumptions:
- so we have a rectangular matrix? yes
- just integers? yes
- and what are the inputs to the function?
- are we given the indicies of a single element ---> use a helper function
- or are we given the entire matrix, and expected to do this
over the whole matrix? yes
- is the input mutable? no ---> otherwise it'll be ambiguous about
- which rows and cols to "zeroify" as the function goes on
- are we guaranteed to have at least 1 row with at least 1 element? no
- are we allowed to use NumPy? no, you don't really need to
- what is the return value --> a matrix?
Intuition:
- traverse the 2D matrix
Approach Ideas:
test input =
[
[0, 5 ,6, 7, 3, 1, -5],
[8, 8, 0, 6, 0, 2, 4],
[5, 0, 3, 6, 7, 3, -3]
]
====>
[
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]
]
---------------------------
[
[0, 5 ,6, 7, 3, 1, -5],
[8, 8, 0, 6, 0, 2, 4],
[5, 5, 3, 6, 7, 3, -3]
]
rows = 0, 1
cols = 0, 2, 3
====>
[
[0, 5 ,6, 7, 3, 1, -5],
[8, 8, 0, 6, 0, 2, 4],
[5, 5, 3, 6, 7, 3, -3]
]
zeroes = [
(0, 0), (1, 2), (1, 4)
]
1. Brute Force - Start with 0's, Try to Keep Elements
- make a MxN matrix of all zeroes
- check rows
- if the corresponding row in the input contains a 0,
leave the output as is
- otherwise, copy over the row
2. Brute Force idea 2 --> can be in-place or out of place
- record locations of all the 0s
- iterate back over the array
- if we hit one of those locations from before,
"zeroify" that row and column
- return the output
Edge Cases:
- empty array (check for that)
"""
from typing import List
def find_zeroes(matrix):
rows, cols = set(), set()
for row_ndx, row in enumerate(matrix):
for col_ndx, element in enumerate(row):
# only add the location if it's in a unique row and column
if element == 0:
if row_ndx not in rows:
rows.add(row_ndx)
if col_ndx not in cols:
cols.add(col_ndx)
return rows, cols
def zeroify_row(matrix, zero_row_ndx):
# zeroify the matrix row
for col_ndx in range(len(matrix[zero_row_ndx])):
matrix[zero_row_ndx][col_ndx] = 0
def zeroify_col(matrix, zero_col_ndx):
# zeroify the matrix column
for row_ndx in range(len(matrix)):
matrix[row_ndx][zero_col_ndx] = 0
def zero_matrix(matrix: List[List[int]]) -> List[List[int]]:
"""
Input:
[
0 1 2 3 4 5 6
> 0 [0, 0 ,0, 0, 0, 0, 0],
> 1 [0, 8, 0, 6, 0, 2, 4],
> 2 [0, 5, 3, 6, 7, 3, -3]
]
locations = [
(0, 0), (1, 2), (1, 4),
]
ROW_LENGTH = 7
rndx row cndx e
0 [0, 5 ,6, 7, 3, 1, -5], 0 0
1 5
2 6
3 7
4 3
5 1
6 -5
zrndx cndx zcndx
0 0 0
1
2
3
4
5
6
Big O:
Time: O(MxN)
Space: O(M + N)
Improvements:
- remember the rows and cols we've already marked for zeroifying:
TODO: ---> IN PLACE -->
1. First pass: edit the cols in top row to be zero, if they contain zero
edit the row vals in left col to be zero, if their rows
contain zero
2. Second pass: just check the top row and left col
top row: zeroify the col
left col: zeroify the row
"""
# - record locations of all the rows and cols to zeroify
rows, cols = find_zeroes(matrix) # MxN iterations
# "zeroify" the rows
for row_ndx in rows: # M
zeroify_row(matrix, row_ndx) # N
# "zeroify" the columns
if len(cols) < len(matrix):
for col_ndx in cols: # N
zeroify_col(matrix, col_ndx) # M
# - return the output
return matrix
| 25.736842 | 80 | 0.498523 | 670 | 4,401 | 3.226866 | 0.258209 | 0.031452 | 0.037465 | 0.044403 | 0.120722 | 0.061055 | 0.049954 | 0.043478 | 0.036078 | 0.036078 | 0 | 0.067598 | 0.398319 | 4,401 | 170 | 81 | 25.888235 | 0.748867 | 0.735287 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005882 | 0 | 1 | 0.16 | false | 0 | 0.04 | 0 | 0.28 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
99a63044e63f7d7aad2a8fc043b98abc40e94cd5 | 2,316 | py | Python | arc/utility_functions/batch_generator.py | stalhabukhari/ARC | a5efc44c3af0714e07a60204cc7c3a8ca19ef20e | [
"MIT"
] | null | null | null | arc/utility_functions/batch_generator.py | stalhabukhari/ARC | a5efc44c3af0714e07a60204cc7c3a8ca19ef20e | [
"MIT"
] | null | null | null | arc/utility_functions/batch_generator.py | stalhabukhari/ARC | a5efc44c3af0714e07a60204cc7c3a8ca19ef20e | [
"MIT"
] | 1 | 2022-03-18T10:55:57.000Z | 2022-03-18T10:55:57.000Z | """
batch_generator.py
"""
import os, random
import numpy as np
from PIL import Image
import tensorflow as tf
from tensorflow.keras.preprocessing.image import img_to_array, load_img
from tensorflow.keras.utils import to_categorical as tocat_fn
Image.LOAD_TRUNCATED_IMAGES = True
class BatchGenerator(tf.keras.utils.Sequence):
def __init__(self, data_list, label_list, batch_size, image_size=(150, 150), aug_flag=False):
self.data_list = data_list
self.label_list = label_list
self.batch_size = batch_size
self.image_size = image_size
self.aug_flag = aug_flag
self.total_images = len(self.data_list)
self.indices = np.arange(self.total_images)
self.num_batches = int(np.ceil(self.total_images/self.batch_size))
#self.on_epoch_end()
def __len__(self):
""" iterations per epoch """
return self.num_batches
def on_epoch_end(self):
random.shuffle(self.indices)
def __getitem__(self, index):
""" return batch of (data, label) pairs """
batch_x, batch_y = [], []
batch_indices = self.indices[index*self.batch_size:min((index+1)*self.batch_size, self.total_images)]
for loop in batch_indices:
loaded_image = img_to_array((load_img(os.path.join(
self.data_list[loop]))).resize(self.image_size, Image.ANTIALIAS))
loaded_label = tocat_fn(self.label_list[loop], 100)
if self.aug_flag:
loaded_image = self._random_rotate(loaded_image)
batch_x.append(loaded_image)
batch_y.append(loaded_label)
return (np.asarray(batch_x, dtype=np.float32),
np.asarray(batch_y, dtype=np.uint8))
def _random_augment(self, image):
if np.random.uniform(-1, 1) > 0:
return self._random_rotate(image)
else:
return self._random_brightness_distort(image)
@staticmethod
def _random_rotate(image):
angle_multiplier = np.random.randint(3)
return np.rot90(image, angle_multiplier)
@staticmethod
def _random_brightness_distort(image):
noise_shift = np.random.normal(0., .05, image.shape)
noise_scale = np.random.normal(1., .01, image.shape)
return (image + noise_shift) * noise_scale
| 33.085714 | 109 | 0.664076 | 312 | 2,316 | 4.637821 | 0.317308 | 0.037319 | 0.033172 | 0.01935 | 0.023497 | 0 | 0 | 0 | 0 | 0 | 0 | 0.014077 | 0.233161 | 2,316 | 69 | 110 | 33.565217 | 0.800676 | 0.041883 | 0 | 0.041667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.145833 | false | 0 | 0.125 | 0 | 0.416667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
99a8aebcdbc0b31659e37d72e454787e67305614 | 663 | py | Python | names.py | EggSquishIt/mcserver | f9e98f100f7d1e4b9d4fc306ca33255619d5504f | [
"MIT"
] | 3 | 2020-08-29T13:33:30.000Z | 2020-10-03T15:40:30.000Z | names.py | EggSquishIt/mcserver | f9e98f100f7d1e4b9d4fc306ca33255619d5504f | [
"MIT"
] | 3 | 2020-10-10T17:06:19.000Z | 2020-11-14T15:21:26.000Z | names.py | EggSquishIt/mcserver | f9e98f100f7d1e4b9d4fc306ca33255619d5504f | [
"MIT"
] | 1 | 2020-10-10T13:09:27.000Z | 2020-10-10T13:09:27.000Z | import random
vowels = [
"a",
"au",
"o",
"e",
"i",
"u",
]
prefixes = [
"b",
"c",
"d",
"f",
"g",
"gh",
"h",
"k",
"l",
"m",
"n",
"p",
"qu",
"r",
"s",
"t",
"v",
"w",
"x",
"y",
"z"
]
suffixes = [
"b",
"c",
"cc",
"ck",
"d",
"dd",
"f",
"g",
"gh",
"h",
"i",
"k",
"l",
"ll",
"m",
"n",
"p",
"r",
"rr",
"s",
"t",
"tt",
"v",
"w",
"x",
"y",
"z"
]
def generate_name():
result = ""
length = random.randint(3, 15)
while len(result) < length:
result = result + random.choice(prefixes) + random.choice(vowels) + random.choice(suffixes)
return result
def proper_case(string):
return string[:1].upper() + string[1:]
| 8.84 | 93 | 0.435897 | 95 | 663 | 3.021053 | 0.578947 | 0.125436 | 0.027875 | 0.034843 | 0.034843 | 0 | 0 | 0 | 0 | 0 | 0 | 0.01002 | 0.24736 | 663 | 74 | 94 | 8.959459 | 0.56513 | 0 | 0 | 0.608696 | 0 | 0 | 0.096531 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.028986 | false | 0 | 0.014493 | 0.014493 | 0.072464 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
99a903a06e260b2c7198a42a0a29f263e277358e | 245 | py | Python | attempt.py | hoshen20-meet/meet2018y1lab6 | 68e70de443eba980b1de8b865eea8337aa82e6d3 | [
"MIT"
] | null | null | null | attempt.py | hoshen20-meet/meet2018y1lab6 | 68e70de443eba980b1de8b865eea8337aa82e6d3 | [
"MIT"
] | null | null | null | attempt.py | hoshen20-meet/meet2018y1lab6 | 68e70de443eba980b1de8b865eea8337aa82e6d3 | [
"MIT"
] | null | null | null | import turtle
colors = ['green','blue','orange', 'red']
turtle.speed(900)
for i in range(99999999):
turtle.pencolor(colors[i%4])
turtle.bgcolor('black')
turtle.forward(i)
turtle.degrees()
turtle.right(70)
| 13.611111 | 41 | 0.608163 | 31 | 245 | 4.806452 | 0.709677 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.074074 | 0.228571 | 245 | 17 | 42 | 14.411765 | 0.714286 | 0 | 0 | 0 | 0 | 0 | 0.094262 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.111111 | 0 | 0.111111 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
99a9c7e2da4c504b1d30c8fa7fb339aa5d8ceae5 | 4,444 | py | Python | nr_common/image_utils/image_utils_caffe.py | nitred/nr-common | f251e76fe10cb46f609583922d485013f5cba92b | [
"MIT"
] | null | null | null | nr_common/image_utils/image_utils_caffe.py | nitred/nr-common | f251e76fe10cb46f609583922d485013f5cba92b | [
"MIT"
] | 1 | 2018-01-07T19:03:35.000Z | 2018-01-07T19:03:35.000Z | nr_common/image_utils/image_utils_caffe.py | nitred/nr-common | f251e76fe10cb46f609583922d485013f5cba92b | [
"MIT"
] | 1 | 2018-09-20T02:31:18.000Z | 2018-09-20T02:31:18.000Z | """Utility functions."""
import numpy as np
def caffe_load_image(image_filename):
"""Load image using caffe.io.load_image.
This is to maintain shape expectation across the caffe library.
Args:
image_filename (str): String filename.
Returns:
numpy.ndarray: an image with the following properties:
shape: [Height, Width, Channels]
channel_order: RGB
scale: [0, 1]
dtype: np.float32
"""
import caffe
return caffe.io.load_image(image_filename, color=True)
def caffe_load_image_batch(image_filenames, batch_size=None):
"""Load image using caffe.io.load_image.
This is to maintain shape expectation across the caffe library.
Args:
image_filename (list of str): List of string filenames.
batch_size (int): If batch_size is None, then all filenames are read.
Otherwise only the first `batch_size` number of filenames are read.
Returns:
numpy.ndarray: an image with the following properties:
shape: [batch_size, Height, Width, Channels]
channel_order: RGB
scale: [0, 1]
dtype: np.float32
"""
if batch_size is None:
batch_size = len(image_filenames)
image_batch = [caffe_load_image(image_filename) for image_filename in image_filenames[:batch_size]]
image_batch = np.array(image_batch) # converting list into numpy array
return image_batch
# TODO (nitred): LRU cache
def get_caffe_transformer(net_input_shape, mean_bgr_255=None):
"""Transform a batch of images which were loaded by caffe.io.load_image.
Transformations:
- mean subtraction (if mean provided)
- transposes data to become [Channels x Height x Width]
- swaps channels to convert RGB to BGR
- scales the data to [0., 255.]
Args:
net_input_shape (numpy.ndarray): The expected 4-dimensional shape of the network.
The first dimension i.e. the batch_size doesn't really matter.
Usually the expected shape is [BATCH_SIZE, Height, Width, Channels]
mean_bgr_255 (numpy.ndarray): 1-dimensional array of means.
Channel order should be BGR and scale should be [0., 255.]
Returns:
caffe.io.Transformer: With all standard transformations set.
"""
import caffe
transformer = caffe.io.Transformer({'data': net_input_shape})
if mean_bgr_255 is not None:
transformer.set_mean('data', mean_bgr_255)
transformer.set_transpose('data', (2, 0, 1))
transformer.set_channel_swap('data', (2, 1, 0))
transformer.set_raw_scale('data', 255.0)
return transformer
def caffe_transform_batch(X, net_input_shape, mean_bgr_255=None):
"""Transform a batch of images which were loaded by caffe.io.load_image.
Transformations:
- mean subtraction (if mean provided)
- transposes data to become [Channels x Height x Width]
- swaps channels to convert RGB to BGR
- scales the data to [0., 255.]
Args:
X (numpy.ndarray): A batch of images of shape [BATCH_SIZE, Height, Width, RGB-Channels].
Can be obtained by using `caffe_utils.caffe_load_image`.
"""
transformer = get_caffe_transformer(net_input_shape, mean_bgr_255)
transformed_batch = np.array([transformer.preprocess('data', image) for image in X])
return transformed_batch
def caffe_load_network_with_input_batch(net, X, mean_bgr_255=None, net_input_blob_name='data'):
"""Load the network with the input batch `inplace`.
Args:
net (caffe.Network): The network to load the input batch.
X (numpy.ndarray): A batch of images of shape [BATCH_SIZE, Height, Width, RGB-Channels].
Can be obtained by using `caffe_utils.caffe_load_image`.
mean_bgr_255 (numpy.ndarray): 1-dimensional array of means.
Channel order should be BGR and scale should be [0., 255.]
net_input_blob_name (str): The input blob name of the network. Default blob name is "data".
Returns:
net: The network is loaded with input inplace but it's returned anyway.
"""
net_input_shape = net.blobs[net_input_blob_name].data.shape
transformed_batch = caffe_transform_batch(X, net_input_shape, mean_bgr_255)
net.blobs[net_input_blob_name].reshape(*transformed_batch.shape)
net.blobs[net_input_blob_name].data[...] = transformed_batch
return net
| 37.982906 | 103 | 0.685419 | 624 | 4,444 | 4.695513 | 0.214744 | 0.03686 | 0.030717 | 0.027304 | 0.554608 | 0.505119 | 0.496928 | 0.496928 | 0.474403 | 0.453584 | 0 | 0.018741 | 0.231548 | 4,444 | 116 | 104 | 38.310345 | 0.839239 | 0.60126 | 0 | 0.068966 | 0 | 0 | 0.018482 | 0 | 0 | 0 | 0 | 0.008621 | 0 | 1 | 0.172414 | false | 0 | 0.103448 | 0 | 0.448276 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
99aa9d14b3d5ad7bbef547b6bdc0baea743dd41e | 1,183 | py | Python | ENV/lib/python3.5/site-packages/pyrogram/session/internals/msg_id.py | block1o1/CryptoPredicted | 7f660cdc456fb8252b3125028f31fd6f5a3ceea5 | [
"MIT"
] | 4 | 2021-10-14T21:22:25.000Z | 2022-03-12T19:58:48.000Z | ENV/lib/python3.5/site-packages/pyrogram/session/internals/msg_id.py | inevolin/CryptoPredicted | 7f660cdc456fb8252b3125028f31fd6f5a3ceea5 | [
"MIT"
] | null | null | null | ENV/lib/python3.5/site-packages/pyrogram/session/internals/msg_id.py | inevolin/CryptoPredicted | 7f660cdc456fb8252b3125028f31fd6f5a3ceea5 | [
"MIT"
] | 1 | 2022-03-15T22:52:53.000Z | 2022-03-15T22:52:53.000Z | # Pyrogram - Telegram MTProto API Client Library for Python
# Copyright (C) 2017-2018 Dan Tès <https://github.com/delivrance>
#
# This file is part of Pyrogram.
#
# Pyrogram is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pyrogram is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Pyrogram. If not, see <http://www.gnu.org/licenses/>.
from threading import Lock
from time import time
class MsgId:
last_time = 0
offset = 0
lock = Lock()
def __new__(cls) -> int:
with cls.lock:
now = time()
cls.offset = cls.offset + 4 if now == cls.last_time else 0
msg_id = int(now * 2 ** 32) + cls.offset
cls.last_time = now
return msg_id
| 32.861111 | 74 | 0.690617 | 181 | 1,183 | 4.464088 | 0.585635 | 0.018564 | 0.044554 | 0.070545 | 0.123762 | 0.123762 | 0.084158 | 0 | 0 | 0 | 0 | 0.017738 | 0.237532 | 1,183 | 35 | 75 | 33.8 | 0.878049 | 0.651733 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0.153846 | 0 | 0.615385 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
41d445f8f3d6e55aedb38945121914b577aa660c | 1,967 | py | Python | CNN using tensorflow.py | Highcourtdurai/Deep-learning | b9aed4f0973709ce407006311cef28a7a183787f | [
"Apache-2.0"
] | null | null | null | CNN using tensorflow.py | Highcourtdurai/Deep-learning | b9aed4f0973709ce407006311cef28a7a183787f | [
"Apache-2.0"
] | null | null | null | CNN using tensorflow.py | Highcourtdurai/Deep-learning | b9aed4f0973709ce407006311cef28a7a183787f | [
"Apache-2.0"
] | null | null | null | import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
#Fasion mnist=data of accesories like boats,dresses,bags etc
fashion_mnist=tf.keras.datasets.fashion_mnist
(train_images,train_labels),(test_images,test_labels)=fashion_mnist.load_data()
print(train_images.shape)
print(train_labels.shape)
print(test_images.shape)
print(test_labels.shape)
plt.imshow(train_images[4])
plt.show()
model=tf.keras.Sequential()
model.add(tf.keras.layers.Flatten(input_shape=(28,28)))
model.add(tf.keras.layers.Dense(units=120,activation="relu"))
model.add(tf.keras.layers.Dense(units=10,activation="softmax"))
# model.compile(optimizer=tf.keras.optimizers.Adam(0.01),loss=tf.keras.losses.CategoricalCrossentropy(from_logits=True),metrics=["accuracy"])
# model.fit(train_images,train_labels,epochs=20,batch_size=500)
def cross_entropy(y_pred,y_true):
return tf.reduce_mean(tf.keras.losses.SparseCategoricalCrossentropy(y_true,y_pred))
def accuracy(y_pred,y_true):
correct_prediction=tf.equal(tf.cast(y_pred,tf.int64),tf.cast(y_true,tf.int64))
return tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
optimizer=tf.optimizers.Adam()
def train_step(x,y):
with tf.GradientTape() as tape:
pred=tf.argmax(model.predict(x),axis=1)
loss=cross_entropy(pred,y)
trainable_variables=model.trainable_variables
gradients=tape.gradient(loss,trainable_variables)
optimizer.apply_gradients(zip(gradients,trainable_variables))
return pred,loss
train_data=tf.data.Dataset.from_tensor_slices((train_images,train_labels))
train_data=train_data.repeat().shuffle(100).batch(32).prefetch(1)
for epoch in range(20):
for step,(batch_x,batch_y) in enumerate(train_data.take(train_images.shape[0]//32),1):
pred,loss=train_step(batch_x,batch_y)
acc=accuracy(pred,batch_y)
print(acc,loss)
| 27.704225 | 142 | 0.734621 | 289 | 1,967 | 4.816609 | 0.384083 | 0.04023 | 0.034483 | 0.047414 | 0.111351 | 0.04454 | 0.04454 | 0 | 0 | 0 | 0 | 0.021829 | 0.138282 | 1,967 | 70 | 143 | 28.1 | 0.79941 | 0.132689 | 0 | 0 | 0 | 0 | 0.006778 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.083333 | 0.027778 | 0.25 | 0.138889 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
41d44b79dc2869fa41ba2410af3f958c1f765b2a | 1,489 | py | Python | Assignment-2/visualization.py | LuciFR1809/DAA-Assignments | 0f2faaf2f545cb81da8c86bdd370646694c2c756 | [
"BSD-3-Clause"
] | null | null | null | Assignment-2/visualization.py | LuciFR1809/DAA-Assignments | 0f2faaf2f545cb81da8c86bdd370646694c2c756 | [
"BSD-3-Clause"
] | null | null | null | Assignment-2/visualization.py | LuciFR1809/DAA-Assignments | 0f2faaf2f545cb81da8c86bdd370646694c2c756 | [
"BSD-3-Clause"
] | null | null | null | ##
# @file visualization.py
# @brief Python file for visualization of the testcase.
# Contains the driver code for reading the file and plotting it.
#
# @authors Kumar Pranjal 2018A7PS0163H
# @authors Ashna Swaika 2018A7PS0027H
# @authors Abhishek Bapna 2018A7PS0184H
# @authors Ashish Verma 2018A7PS0009H
# Importing required modules
from sys import argv
import matplotlib.pyplot as plt
import numpy as np
# Program starts here
if __name__ == '__main__':
fname = f'autotestcase/{argv[1]}'
fname2 = f'{fname}_line.txt'
X = []
Y = []
# Plotting the points
with open(fname, 'r', encoding='utf8') as f:
lines = f.readlines()
for line in lines:
x, y = list(map(float, line.split()))
X.append(x)
Y.append(y)
plt.plot(X, Y, '--.', color='red', linewidth=0.5)
# Plotting the partitions
with open(fname2, 'r', encoding='utf8') as f:
lines = f.readlines()
err_tot = 0
for line in lines:
cost, err, m, c, xmin, xmax = list(map(float, line.split()))
if m == float('inf'):
continue
x = np.linspace(xmin, xmax, 1000)
plt.plot(x, m*x+c, c=np.random.rand(3,), linewidth=3,label='y = %.3f x + %.3f' % (m, c))
err_tot += err
plt.legend()
# Displaying and saving the plot
plt.title('Cost : %.3f Error : %.3f'%(cost,err_tot))
plt.savefig(f'{fname}.png')
plt.show()
exit(0)
| 30.387755 | 100 | 0.584285 | 203 | 1,489 | 4.226601 | 0.507389 | 0.009324 | 0.030303 | 0.034965 | 0.121212 | 0.072261 | 0.072261 | 0.072261 | 0 | 0 | 0 | 0.051258 | 0.279382 | 1,489 | 48 | 101 | 31.020833 | 0.748369 | 0.274681 | 0 | 0.133333 | 0 | 0 | 0.111842 | 0.020677 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.1 | 0 | 0.1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
41d697a0f4888c1996ea9e1ef9843309eaf50ff0 | 2,744 | py | Python | tremana/analysis/transformations.py | s-weigand/tremana | 98a8a546c79ce4f248b3955da21374edfdd61dee | [
"Apache-2.0"
] | 1 | 2022-03-07T02:52:25.000Z | 2022-03-07T02:52:25.000Z | tremana/analysis/transformations.py | s-weigand/tremana | 98a8a546c79ce4f248b3955da21374edfdd61dee | [
"Apache-2.0"
] | 9 | 2021-04-26T07:08:27.000Z | 2022-03-28T07:23:31.000Z | tremana/analysis/transformations.py | s-weigand/tremana | 98a8a546c79ce4f248b3955da21374edfdd61dee | [
"Apache-2.0"
] | null | null | null | """Transformations to be used on tremor accelerometry data (e.g.: FFT)."""
from __future__ import annotations
from typing import Iterable
import numpy as np
import pandas as pd
from scipy.signal import periodogram
def fft_spectra(
input_dataframe: pd.DataFrame,
columns: Iterable[str] | None = None,
sampling_rate: int | float = 128,
norm=False,
):
"""Calculate the FFT of accelerometry data.
Parameters
----------
input_dataframe : pd.DataFrame
Dataframe containing accelerometry data.
columns : Iterable[str], optional
Columns co calculate the FFT for,
by default None which results in all columns to be used
sampling_rate : int, optional
Number of sample per second, by default 128
norm : bool, optional
Whether to normalize the the data to 1 or not, by default False
Returns
-------
pd.DataFrame
FFT spectra of the accelerometry data.
"""
n_samples = input_dataframe.shape[0]
freq = np.fft.fftfreq(n_samples, d=1 / sampling_rate)
fft_results = {}
if columns is None:
columns = input_dataframe.columns
for column in columns:
fft_vals = 2 / n_samples * np.abs(np.fft.fft(input_dataframe[column]))
if norm:
fft_vals /= fft_vals.max()
fft_results[column] = fft_vals
fft_df = pd.DataFrame(fft_results, index=freq)
return fft_df.iloc[freq >= 0, :]
def power_density_spectra(
input_dataframe: pd.DataFrame,
columns: Iterable[str] | None = None,
sampling_rate: int | float = 128,
norm=False,
):
"""Calculate the power density spectra of accelerometry data.
Compared to the FFT the resulting values are FFT[-freq]*FFT[freq] with freq>=0.
Parameters
----------
input_dataframe : pd.DataFrame
Dataframe containing accelerometry data.
columns : Iterable[str], optional
Columns co calculate the FFT for,
by default None which results in all columns to be used
sampling_rate : int, optional
Number of sample per second, by default 128
norm : bool, optional
Whether to normalize the the data to 1 or not, by default False
Returns
-------
pd.DataFrame
Power density spectra accelerometry data.
"""
pds_results = {}
if columns is None:
columns = input_dataframe.columns
for column in columns:
frequency, power_density = periodogram(input_dataframe[column], sampling_rate)
if norm:
power_density /= power_density.max()
else:
power_density *= 2 / (input_dataframe.shape[0] / sampling_rate)
pds_results[column] = power_density
return pd.DataFrame(pds_results, index=frequency)
| 31.181818 | 86 | 0.662901 | 354 | 2,744 | 5.014124 | 0.259887 | 0.078873 | 0.036056 | 0.056338 | 0.541972 | 0.541972 | 0.541972 | 0.541972 | 0.541972 | 0.541972 | 0 | 0.010254 | 0.253644 | 2,744 | 87 | 87 | 31.54023 | 0.856445 | 0.43586 | 0 | 0.45 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05 | false | 0 | 0.125 | 0 | 0.225 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
41d69abf160b8ce1e4074dd51d9496b0510b87af | 12,633 | py | Python | Prod_CV_NLP_API/flask/app.py | micintron/computer_vission_OCR | 1fdd521b334f6e5958958ccf816341531b783a21 | [
"CNRI-Python"
] | 1 | 2021-02-25T09:52:46.000Z | 2021-02-25T09:52:46.000Z | Prod_CV_NLP_API/flask/app.py | micintron/computer_vission_OCR | 1fdd521b334f6e5958958ccf816341531b783a21 | [
"CNRI-Python"
] | null | null | null | Prod_CV_NLP_API/flask/app.py | micintron/computer_vission_OCR | 1fdd521b334f6e5958958ccf816341531b783a21 | [
"CNRI-Python"
] | null | null | null | """ API to grab text content from images ID's and pdf's.
Endpoints
---------
* GET /: root: shows api info to new users on run
* POST /: convert_pdf_to_image: converts a pdf doc to an image for processing
* POST /: passport: extracts target text based information from pasport
* POST /: image: extracts target text based information from jpg or png image
USAGE
-----
Run local:
run app.py in virtual env after installing the requirments files
You should then be able to navigate to localhost:5000 if you see message API if operational
"""
import os
import json
import logging
from flask import Flask, request, make_response, jsonify
from werkzeug.utils import secure_filename
from passporteye.mrz.image import MRZPipeline
from passporteye import read_mrz
from pdfUtil import pdf_to_png
try:
from PIL import Image
except ImportError:
import Image
import pytesseract
import cv2
import numpy as np
import re
from random import *
from flask_cors import CORS# CORS allows cross origin requests from web browsers
from extract_image_data import *
from nlp_ops import sentiment_analysis_score
from nlpbot import NLPBot
from scanner import scan_barcode_image
#new addtions
#%pip install easyocr
import easyocr
reader = easyocr.Reader(['es', 'en'], gpu=False)
# for running locally
#UPLOAD_FOLDER = 'uploads'
#EDIT_FOLDER = 'edit'
# for docker build
UPLOAD_FOLDER = '/uploads'
EDIT_FOLDER = '/edit'
MAXIMUM_IMAGE_ROTATIONS = 3
app = Flask(__name__)
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=log_fmt)
#Endpoint Routes
@app.route('/')
def root():
"""Get and return root text response from API
Parameters
----------
None
Returns
-------
None
"""
return 'Welcome ! The endpoint for images is at <b>/passport</b>, <b>/image</b> or <b>/barcode</b> the key is imagefile , The EndPoint of pdfs is <b>pdf</b> and the key is pdf'
@app.route('/pdf', methods=['POST'])
def convert_pdf_to_image():
"""Post a pdf file for conversion to image format for data extraction
Parameters
----------
None
Returns
-------
png image converted from orginal pdf
"""
# Get PDF file from request and save to local directory
pdfFile = request.files.get('pdf', None)
if not pdfFile:
return make_response("Missing file parameter", 400)
filename = secure_filename(pdfFile.filename)
full_path = os.path.join(UPLOAD_FOLDER, filename)
pdfFile.save(full_path)
# Convert PDF file to image
png_path_array = pdf_to_png(full_path)
# Convert image to text
text_array = []
for png_path in png_path_array:
converted_text = image_to_string(png_path)
text_array.append(converted_text)
return jsonify(text_array)
@app.route('/passport', methods=['POST'])
def passport():
"""Post a passport image file for text data to be extracted
Parameters
----------
None
Returns
-------
json format - text data feilds extracted from the passport
"""
imagefile = request.files.get('imagefile', None)
if not imagefile:
return make_response("Missing file parameter", 400)
mrz, full_content = get_image_content(imagefile)
if mrz is None:
return make_response("Can not read image", 400)
mrz_data = mrz.to_dict()
all_infos = {}
all_infos['last_name'] = mrz_data['surname'].upper()
all_infos['first_name'] = mrz_data['names'].upper()
all_infos['country_code'] = mrz_data['country']
all_infos['country'] = get_country_name(all_infos['country_code'])
all_infos['nationality'] = get_country_name(mrz_data['nationality'])
all_infos['number'] = mrz_data['number']
all_infos['sex'] = mrz_data['sex']
# all_infos['full_text'] = full_content
valid_score = mrz_data['valid_score']
# Trying to extract full name
if all_infos['last_name'] in full_content:
splitted_fulltext = full_content.split("\n")
for w in splitted_fulltext:
if all_infos['last_name'] in w:
all_infos['last_name'] = w
continue
splitted_firstname = all_infos['first_name'].split(" ")
if splitted_firstname[0] in full_content:
splitted_fulltext = full_content.split("\n")
for w in splitted_fulltext:
if splitted_firstname[0] in w:
all_infos['first_name'] = clean_name(w)
continue
#clean out text
all_infos['last_name'] = all_infos['last_name'].replace('>','')
all_infos['last_name'] = all_infos['last_name'].replace('<','')
all_infos['last_name'] = all_infos['last_name'].replace('$','')
#fix sex if misidentified
s = all_infos['sex'].upper()
s = s.strip()
if(s != 'M' and s !='F'):
i = randint(0, 1)
if(i ==0):
s ='M'
else:
s='F'
all_infos['sex'] = s
return jsonify(all_infos)
@app.route('/image', methods=['POST'])
def image():
"""Post an image file for text data to be extracted
Parameters
----------
None
Returns
-------
json format - text data extracted from the image png or jpg
"""
imagefile = request.files.get('imagefile', None)
if not imagefile:
return make_response("Missing file parameter", 400)
filename = secure_filename(imagefile.filename)
full_path = os.path.join(UPLOAD_FOLDER, filename)
imagefile.save(full_path)
text = ''
try:
# Convert image to text
im = cv2.imread(full_path)
imC = clean_image(im)
text = pytesseract.image_to_string(imC, lang ='eng')
if text == "":
text = pytesseract.image_to_string(im, lang ='eng')
# logging.info('full image content = %s' %(full_content))
except:
text = 'Error : Can Not Read the current Image'
return jsonify(text)
@app.route('/nlpbot', methods=['POST'])
def nlpbot():
"""Post a pdf, text, vtt or other file and get a summary back
Parameters
----------
None
Returns
-------
json format - text data extracted from the image png or jpg
"""
# Get PDF file from request and save to local directory
pdfFile = request.files.get('pdf', None)
if not pdfFile:
return make_response("Missing file parameter", 400)
filename = secure_filename(pdfFile.filename)
full_path = os.path.join(UPLOAD_FOLDER, filename)
pdfFile.save(full_path)
nlpbot = NLPBot(infile_path=full_path)
nlpbot.summarize()
result = {"original_text": nlpbot.text, "summary_text": nlpbot.final_text}
return jsonify(result)
@app.route('/nlp_sa', methods=['POST'])
def nlp_sa():
"""Post a list of text and get sentiment analysis reports back on the data
Parameters
----------
None
Returns
-------
json format - text data and response report scores
"""
#extract from json responnse - {"words":["list of words"]}
data = request.json
words = data["words"]
result = sentiment_analysis_score(words)
return jsonify(result)
@app.route('/barcode', methods=['POST'])
def barcode():
"""Post a barcode image file for text data to be extracted
Parameters
----------
imagefile
Returns
-------
json format - text data extracted from the image png or jpg
"""
imagefile = request.files.get('imagefile', None)
if not imagefile:
return make_response("Missing file parameter", 400)
filename = secure_filename(imagefile.filename)
full_path = os.path.join(UPLOAD_FOLDER, filename)
imagefile.save(full_path)
text = ''
try:
# Convert image to text
text = scan_barcode_image(full_path)
except:
return make_response("Error processing image", 500)
return jsonify(text)
@app.route('/drivers_license', methods=['POST'])
def drivers_license():
"""Post an image file for text data to be extracted
Parameters
----------
None
Returns
-------
json format - text data extracted from the image png or jpg
example -
{"name":"JANICE ANN","address":"123 MAIN STREET, AARRISBURG, PA 17101-0000","state":"Pennsylvana",
"class":"A","sex":"F","height":"5'-06\"","eyes":"BRO","dob":"08/04/1975","exp":"08/05/2023"}
"""
imagefile = request.files.get('imagefile', None)
text = ''
if not imagefile:
return make_response("Missing file parameter", 400)
try:
# Convert DL to text
img = adjust_image(imagefile)
text = reader.readtext(img, detail=0)
parcetext={}
other_info =[]
#parce out data
i = -1
for x in text:
try:
x = str(x).upper()
x = str(x).replace('$','S')
i+=1
s = x.split(":")
if(len(s)>1):
s=s[1]
else:
s=x
if 'DL' in x:
parcetext['DLN']=s
continue
if 'CLASS' in x:
parcetext['CLASS']=s
continue
if 'SEX' in x:
parcetext['SEX']=s
continue
if 'HGT' in x:
parcetext['HGT']=s
continue
if 'WGT' in x:
parcetext['WGT']=s
continue
if 'EXP' in x:
parcetext['EXP']=s
continue
if 'EYE' in x:
parcetext['EYES']=s
continue
if 'ISS' in x:
parcetext['ISS']=s
if len(x)<7:
parcetext['ISS']=s+" "+ text[i-1]
continue
if 'DOB' in x or 'D0B'in x:
parcetext['DOB']=s
continue
if 'DD' in x or '00:'in x:
parcetext['DD']=s
continue
if 'DUPS' in x:
parcetext['DUPS']=s
continue
if(len(x)>0):
other_info.append(x)
except:
continue
parcetext['personal_info'] =other_info
except:
parcetext = 'Error : Can Not Read the current Image'
return jsonify(parcetext)
@app.route('/drivers_license_raw', methods=['POST'])
def drivers_license_raw():
"""Post an image file for text data to be extracted
Parameters
----------
None
Returns
-------
json format - text data extracted from the image png or jpg
"""
imagefile = request.files.get('imagefile', None)
text = ''
if not imagefile:
return make_response("Missing file parameter", 400)
try:
# Convert DL to text
img = adjust_image(imagefile)
text = reader.readtext(img, detail=0)
except:
text = 'Error : Can Not Read the current Image'
return jsonify(text)
@app.route('/simple_summary', methods=['POST'])
def simple_summary():
"""Post a list of text and get sentiment analysis reports back on the data
Parameters
----------
None
Returns
-------
json format - text data and response report scores
"""
#extract from json response - {"text": "Text to be summarized"}
data = request.json
nlpbot = NLPBot(text=data["text"])
nlpbot.summarize()
result = {"original_text": nlpbot.text, "summary_text": nlpbot.final_text}
return jsonify(result)
@app.route('/ner', methods=['POST'])
def ner():
"""Post a list of text and get sentiment analysis reports back on the data
Parameters
----------
None
Returns
-------
json format - text data and response report scores
"""
#extract from json response - {"text": "Text for Named Entity Recognition"}
data = request.json
nlpbot = NLPBot(text=data["text"])
nlpbot.ner()
result = {"original_text": nlpbot.text, "ner_text": nlpbot.tags}
return jsonify(result)
if __name__ == "__main__":
CORS(app)
app.run(host="0.0.0.0", debug=True)
| 27.887417 | 181 | 0.576269 | 1,539 | 12,633 | 4.607537 | 0.191033 | 0.027077 | 0.018615 | 0.022564 | 0.483571 | 0.447187 | 0.422084 | 0.422084 | 0.422084 | 0.39811 | 0 | 0.009807 | 0.305866 | 12,633 | 452 | 182 | 27.949115 | 0.798837 | 0.256155 | 0 | 0.382979 | 0 | 0.004255 | 0.135544 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.046809 | false | 0.021277 | 0.093617 | 0 | 0.225532 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
41d72f12694e72053874661915e1331274883431 | 5,540 | py | Python | py/umpire/server/service/multicast_unittest.py | arccode/factory | a1b0fccd68987d8cd9c89710adc3c04b868347ec | [
"BSD-3-Clause"
] | 3 | 2022-01-06T16:52:52.000Z | 2022-03-07T11:30:47.000Z | py/umpire/server/service/multicast_unittest.py | arccode/factory | a1b0fccd68987d8cd9c89710adc3c04b868347ec | [
"BSD-3-Clause"
] | null | null | null | py/umpire/server/service/multicast_unittest.py | arccode/factory | a1b0fccd68987d8cd9c89710adc3c04b868347ec | [
"BSD-3-Clause"
] | 1 | 2021-10-24T01:47:22.000Z | 2021-10-24T01:47:22.000Z | #!/usr/bin/env python3
#
# Copyright 2021 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import unittest
from unittest import mock
from cros.factory.umpire.server.service import multicast
from cros.factory.utils import json_utils
DEFAULT_PORT = 8080
TESTDATA_DIR = os.path.join(os.path.dirname(__file__), 'testdata')
def TestData(filename):
return os.path.join(TESTDATA_DIR, filename)
class GenerateConfigTest(unittest.TestCase):
def setUp(self):
self.payload = json_utils.LoadFile(
os.path.join(TESTDATA_DIR, 'example_payload.json'))
def testEnableAll(self):
_SERVICE_CONFIG_ENABLE_ALL = {
'mgroup': '224.1.2.3',
'server_ip': '192.168.1.1',
'required_components': {
"release_image": True,
"test_image": True,
"toolkit": True
}
}
generated_config = multicast.MulticastService.GenerateConfig(
_SERVICE_CONFIG_ENABLE_ALL, self.payload, DEFAULT_PORT)
expected_config = json_utils.LoadFile(
TestData('mcast_config_enable_all.json'))
self.assertEqual(generated_config, expected_config)
def testEnableToolkit(self):
_SERVICE_CONFIG_ENABLE_TOOLKIT = {
'mgroup': '224.1.2.3',
'server_ip': '192.168.1.1',
'required_components': {
"release_image": False,
"test_image": False,
"toolkit": True
}
}
generated_config = multicast.MulticastService.GenerateConfig(
_SERVICE_CONFIG_ENABLE_TOOLKIT, self.payload, DEFAULT_PORT)
expected_config = json_utils.LoadFile(
TestData('mcast_config_enable_toolkit.json'))
self.assertEqual(generated_config, expected_config)
def testDefaultValues(self):
# Enable one component here to test default mgroup value.
_SERVICE_CONFIG_DEFAULT_VALUES = {
'required_components': {
"test_image": True
}
}
generated_config = multicast.MulticastService.GenerateConfig(
_SERVICE_CONFIG_DEFAULT_VALUES, self.payload, DEFAULT_PORT)
expected_config = json_utils.LoadFile(
TestData('mcast_config_default_values.json'))
self.assertEqual(generated_config, expected_config)
def testNoServerIp(self):
"""Test when `server_ip` is assigned, but `mgroup` is not given."""
_SERVICE_CONFIG_NO_SERVER_IP = {
'mgroup': '224.1.2.3',
'required_components': {
"test_image": True
}
}
generated_config = multicast.MulticastService.GenerateConfig(
_SERVICE_CONFIG_NO_SERVER_IP, self.payload, DEFAULT_PORT)
expected_config = json_utils.LoadFile(
TestData('mcast_config_no_server_ip.json'))
self.assertEqual(generated_config, expected_config)
def testAutoAssignMgroup(self):
"""Test auto assigning `mgroup` from server_ip."""
_SERVICE_CONFIG_AUTO_ASSIGN_MGROUP = {
'server_ip': '192.168.12.34',
'required_components': {
"test_image": True
}
}
generated_config = multicast.MulticastService.GenerateConfig(
_SERVICE_CONFIG_AUTO_ASSIGN_MGROUP, self.payload, DEFAULT_PORT)
expected_config = json_utils.LoadFile(
TestData('mcast_config_auto_assign_mgroup.json'))
self.assertEqual(generated_config, expected_config)
def testBadMgroup(self):
_SERVICE_CONFIG_BAD_MGROUP = {
'mgroup': '123456',
'required_components': {
"test_image": True
}
}
with self.assertRaises(AssertionError):
multicast.MulticastService.GenerateConfig(_SERVICE_CONFIG_BAD_MGROUP,
self.payload, DEFAULT_PORT)
def testAutoAssignMgroupWithBadServerIp(self):
_SERVICE_CONFIG_BAD_SERVER_IP = {
'server_ip': '123456',
'required_components': {
"test_image": True
}
}
# Raised by the `.group()` call from a None object returned by `re.search`.
with self.assertRaises(AttributeError):
multicast.MulticastService.GenerateConfig(_SERVICE_CONFIG_BAD_SERVER_IP,
self.payload, DEFAULT_PORT)
class MulticastServiceTest(unittest.TestCase):
_DUMMY_MCAST_CONFIG = {
'dummy_key': 'dummy_value'
}
_FAKE_UMPIRE_CONFIG = {
'services': {
'multicast': {}
}
}
_FAKE_UMPIRE_BASE_DIR = 'umpire_base_dir'
_FAKE_MCAST_RESOURCE_NAME = 'multicast.32d4f1f4ba53b174acc8aa0a68fb53bd.json'
@mock.patch('cros.factory.utils.file_utils.ForceSymlink')
@mock.patch(multicast.__name__ + '.MulticastService.GenerateConfig')
def testCreateProcesses(self, mock_generate_config, mock_force_sym_link):
mock_generate_config.return_value = self._DUMMY_MCAST_CONFIG
mock_env = mock.MagicMock()
mock_env.base_dir = self._FAKE_UMPIRE_BASE_DIR
mock_env.AddConfigFromBlob.return_value = self._FAKE_MCAST_RESOURCE_NAME
ret = multicast.MulticastService().CreateProcesses(self._FAKE_UMPIRE_CONFIG,
mock_env)
self.assertEqual(ret, [])
mock_env.AddConfigFromBlob.assert_called_once_with(
json_utils.DumpStr(self._DUMMY_MCAST_CONFIG, pretty=True),
'multicast_config')
mock_force_sym_link.assert_called_once_with(
os.path.join('resources', self._FAKE_MCAST_RESOURCE_NAME),
os.path.join(self._FAKE_UMPIRE_BASE_DIR, 'multicast_config.json'))
if __name__ == '__main__':
unittest.main()
| 34.197531 | 80 | 0.687545 | 609 | 5,540 | 5.881773 | 0.249589 | 0.05081 | 0.076214 | 0.089894 | 0.485483 | 0.398381 | 0.336404 | 0.336404 | 0.265215 | 0.265215 | 0 | 0.018672 | 0.216968 | 5,540 | 161 | 81 | 34.409938 | 0.807054 | 0.075271 | 0 | 0.283465 | 0 | 0 | 0.15472 | 0.058754 | 0 | 0 | 0 | 0 | 0.07874 | 1 | 0.07874 | false | 0 | 0.03937 | 0.007874 | 0.173228 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
41dae6ca6afa36e98373f82982a75e2c50d8cc74 | 571 | py | Python | DigitRecognition/go-test.py | shifuture/kaggle-join | 8cc8fb6042982cba1d9a0eced1488c5a13557e80 | [
"MIT"
] | null | null | null | DigitRecognition/go-test.py | shifuture/kaggle-join | 8cc8fb6042982cba1d9a0eced1488c5a13557e80 | [
"MIT"
] | null | null | null | DigitRecognition/go-test.py | shifuture/kaggle-join | 8cc8fb6042982cba1d9a0eced1488c5a13557e80 | [
"MIT"
] | null | null | null | #!/usr/local/bin/python
# -*- coding: utf-8 -*-
import csv
import numpy as np
def loadTestData():
l=[]
with open('./data/test.csv') as file:
lines=csv.reader(file)
for line in lines:
l.append(list(e if e=='0' else 1 for e in line))
#remove csv head
l.remove(l[0])
data=np.array(l, int)
data=data.reshape(28000,784)
return data
tests = loadTestData()
for i in range(len(tests)):
with open('./extract_tdata/line_%d.txt'%i, 'w') as file:
file.write("\n".join(str(e) for e in tests[i].reshape(28,28)))
| 24.826087 | 70 | 0.595447 | 96 | 571 | 3.520833 | 0.5625 | 0.047337 | 0.035503 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.036446 | 0.231173 | 571 | 22 | 71 | 25.954545 | 0.733485 | 0.103328 | 0 | 0 | 0 | 0 | 0.090373 | 0.053045 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0.125 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
41dc7c7b8b8afe1b3ff6333c9f01816fc91e0652 | 54,070 | py | Python | src/funcFit/TutorialExampleSanity.py | mirofedurco/PyAstronomy | b0e5806a18bde647654e6c9de323327803722864 | [
"MIT"
] | 98 | 2015-01-01T12:46:05.000Z | 2022-02-13T14:17:36.000Z | src/funcFit/TutorialExampleSanity.py | mirofedurco/PyAstronomy | b0e5806a18bde647654e6c9de323327803722864 | [
"MIT"
] | 46 | 2015-02-10T19:53:38.000Z | 2022-01-11T17:26:05.000Z | src/funcFit/TutorialExampleSanity.py | mirofedurco/PyAstronomy | b0e5806a18bde647654e6c9de323327803722864 | [
"MIT"
] | 38 | 2015-01-08T17:00:34.000Z | 2022-03-04T05:15:22.000Z | from __future__ import print_function, division
import unittest
import os
class ExampleSanity(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def sanity_firstExample(self):
# Import numpy and matplotlib
from numpy import arange, sqrt, exp, pi, random, ones
import matplotlib.pylab as plt
# ... and now the funcFit package
from PyAstronomy import funcFit as fuf
# Before we can start fitting, we need something to fit.
# So let us create some data...
# Creating a Gaussian with some noise
# Choose some parameters...
gPar = {"A":-5.0, "sig":10.0, "mu":10.0, "off":1.0, "lin":0.0}
# Calculate profile
x = arange(100) - 50.0
y = gPar["off"] + gPar["A"] / sqrt(2*pi*gPar["sig"]**2) \
* exp(-(x-gPar["mu"])**2/(2*gPar["sig"]**2))
# Add some noise
y += random.normal(0.0, 0.01, x.size)
# Let us see what we have done...
plt.plot(x, y, 'bp')
# Now we can start exploiting the funcFit functionality to
# fit a Gaussian to our data. In the following lines, we
# create a fitting object representing a Gaussian and set guess parameters.
# Now let us come to the fitting
# First, we create the Gauss1d fit object
gf = fuf.GaussFit1d()
# See what parameters are available
print("List of available parameters: ", gf.availableParameters())
# Set guess values for the parameters
gf["A"] = -10.0
gf["sig"] = 15.77
gf["off"] = 0.87
gf["mu"] = 7.5
# Let us see whether the assignment worked
print("Parameters and guess values: ")
print(" A : ", gf["A"])
print(" sig : ", gf["sig"])
print(" off : ", gf["off"])
print(" mu : ", gf["mu"])
print("")
# Now some of the strengths of funcFit are demonstrated; namely, the
# ability to consider some parameters as free and others as fixed.
# By default, all parameters of the GaussFit1d are frozen.
# Show values and names of frozen parameters
print("Names and values of FROZEN parameters: ", gf.frozenParameters())
# Which parameters shall be variable during the fit?
# 'Thaw' those (the order is irrelevant)
gf.thaw(["A", "sig", "off", "mu"])
# Let us assume that we know that the amplitude is negative, i.e.,
# no lower boundary (None) and 0.0 as upper limit.
gf.setRestriction({"A":[None,0.0]})
# Now start the fit
gf.fit(x, y, yerr=ones(x.size)*0.01)
# Write the result to the screen and plot the best fit model
gf.parameterSummary()
plt.plot(x, gf.model, 'r--')
# Show the data and the best fit model
# plt.show()
def sanity_CustomModel(self):
# Import numpy and matplotlib
from numpy import arange, random
import matplotlib.pylab as plt
# ... and now the funcFit package
from PyAstronomy import funcFit as fuf
class StraightLine(fuf.OneDFit):
"""
Implements a straight line of the form y = "off" + x * "lin".
"""
def __init__(self):
fuf.OneDFit.__init__(self, ["off", "lin"])
def evaluate(self, x):
"""
Calculates and returns model according to the \
current parameter values.
Parameters:
- `x` - Array specifying the positions at \
which to evaluate the model.
"""
y = self["off"] + (self["lin"] * x)
return y
# Generate some data and add noise
x = arange(100)
y = 10.0 + 2.0 * x + random.normal(0.0, 5.0, 100)
# Create fitting class instance and set initial guess
# Note that all parameters are frozen by default
lf = StraightLine()
lf["off"] = 20.0
lf["lin"] = 1.0
# Thaw parameters
lf.thaw(["off", "lin"])
# Start fitting
lf.fit(x, y)
# Investigate the result
lf.parameterSummary()
plt.plot(x, y, 'bp')
plt.plot(x, lf.model, 'r--')
# plt.show()
def sanity_Relations(self):
# import numpy and matplotlib
from numpy import arange, random
import matplotlib.pylab as plt
# ... and now the funcFit package
from PyAstronomy import funcFit as fuf
class StraightLine(fuf.OneDFit):
"""
Implements a straight line of the form y = "off" + x * "lin".
"""
def __init__(self):
fuf.OneDFit.__init__(self, ["off", "lin"])
def evaluate(self, x):
"""
Calculates and returns model according to the current parameter values.
Parameters:
- x - Array specifying the positions at which to evaluate the model.
"""
y = self["off"] + (self["lin"] * x)
return y
# Create a function, which defines the relation.
def getLinearRelation(factor):
def linOffRel(off):
"""
Function used to relate parameters "lin" and "off".
"""
return factor * off
return linOffRel
# Note, above we used a nested function (a closure) to define
# the relation. This approach is very flexible. If we were already
# sure about the value of ``factor'' (e.g., 10.0), we could
# simply have used:
#
# def linOffRel(off):
# return 10.0 * off
# Generate some data with noise
x = arange(100)
y = 100.0 + 2.0 * x + random.normal(0.0, 5.0, 100)
# Create fitting class instance and set initial guess
lf = StraightLine()
lf["off"] = 20.0
lf["lin"] = 1.0
# Thaw parameters
lf.thaw(["off", "lin"])
# Assume we know about a relation between 'lin' and 'off'
# In particular, lin = 9.0 * off. We use the function getLinearRelation
# to obtain a function object defining the relation.
lf.relate("lin", ["off"], getLinearRelation(9))
# Start fitting
lf.fit(x, y)
# Investigate the result
lf.parameterSummary()
plt.plot(x, y, 'bp')
plt.plot(x, lf.model, 'r--')
# plt.show()
def sanity_CombiningModels(self):
# Import numpy and matplotlib
from numpy import arange, sqrt, exp, pi, random, ones
import matplotlib.pylab as plt
# ... and now the funcFit package
from PyAstronomy import funcFit as fuf
# Creating Gaussians with some noise
# Choose some parameters...
gPar1 = {"A":-5.0, "sig":10.0, "mu":20.0, "off":1.0, "lin":0.0}
gPar2 = {"A":+10.0, "sig":10.0, "mu":-20.0, "off":0.0, "lin":0.0}
# Calculate profile
x = arange(100) - 50.0
y = gPar1["off"] + gPar1["A"] / sqrt(2*pi*gPar1["sig"]**2) \
* exp(-(x-gPar1["mu"])**2/(2*gPar1["sig"]**2))
y -= gPar2["off"] + gPar2["A"] / sqrt(2*pi*gPar2["sig"]**2) \
* exp(-(x-gPar2["mu"])**2/(2*gPar2["sig"]**2))
# Add some noise
y += random.normal(0.0, 0.01, x.size)
# Let us see what we have done...
plt.plot(x, y, 'bp')
# Now let us come to the fitting
# First, we create two Gauss1d fit objects
gf1 = fuf.GaussFit1d()
gf2 = fuf.GaussFit1d()
# Assign guess values for the parameters
gf1["A"] = -0.3
gf1["sig"] = 3.0
gf1["off"] = 0.0
gf1["mu"] = +5.0
gf2["A"] = 3.0
gf2["sig"] = 15.0
gf2["off"] = 1.0
gf2["mu"] = -10.0
# Which parameters shall be variable during the fit?
# 'Thaw' those (the order is irrelevant)
gf1.thaw(["A", "sig", "mu"])
gf2.thaw(["sig", "mu", "off"])
# Our actual model is the sum of both Gaussians
twoG = gf1 + gf2
# Show a description of the model depending on the
# names of the individual components
print()
print("Description of the model: ", twoG.description())
print()
# Note that now the parameter names changed!
# Each parameter is now named using the "property"
# (e.g., 'A' or 'sig') as the first part, the component
# "root name" (in this case 'Gaussian') and a component
# number in parenthesis.
print("New parameter names and values: ")
twoG.parameterSummary()
# We forgot to thaw the amplitude of the second Gaussian, but
# we can still do it, but we have to refer to the correct name:
# either by using the (new) variable name:
twoG.thaw("A_Gaussian(2)")
# or by specifying property name, root name, and component number
# separately (note that a tuple is used to encapsulate them):
twoG.thaw(("A", "Gaussian", 2))
# We decide to rather freeze the offset of the second
# Gaussian (we could have used a tuple here, too).
twoG.freeze("off_Gaussian(2)")
# Start fit as usual
twoG.fit(x,y,yerr=ones(x.size)*0.01)
# Write the result to the screen and plot the best fit model
print()
print("--------------------------------")
print("Parameters for the combined fit:")
print("--------------------------------")
twoG.parameterSummary()
# Show the data and the best fit model
plt.plot(x, twoG.model, 'r--')
# plt.show()
def sanity_CustomObjectiveFunctions(self):
# Import numpy and matplotlib
from numpy import arange, exp, random, ones, sum, abs
import matplotlib.pylab as plt
# Import funcFit
from PyAstronomy import funcFit as fuf
# Define parameters of faked data
A = 1.0
tau = 10.
off = 0.2
t0 = 40.
# Calculate fake data set
x = arange(100)
y = A*exp(-(x-t0)/tau) * (x>t0) + off
y += random.normal(0., 0.1, 100)
yerr = ones(100)*0.01
# Exponential decay model
edf = fuf.ExpDecayFit1d()
# Define free quantities
edf.thaw(["A", "tau", "off", "t0"])
# Let the amplitude be positive
edf.setRestriction({"A":[0.0,None]})
# Define initial guess
edf.assignValue({"A":1.0, "tau": 15., "off":0.2, "t0":50.})
# Do not use chi square, but the linear deviation from model
# to evaluate quality of fit.
# Use the "MiniFunc" decorator to define your custom objective
# function. This decorator takes the fitting object as an
# argument. The function has to accept two arguments: the
# fitting object and the list of free parameters.
@fuf.MiniFunc(edf)
def mini(edf, P):
m = sum(abs(edf.model - edf.y)/edf.yerr)
print("mini - current parameters: ", P, ", value is: ", m)
return m
# Carry out fit WITH SELF-DEFINED OBJECTIVE FUNCTION
edf.fit(x, y, yerr=yerr, miniFunc=mini)
# Show parameter values and plot best-fit model.
edf.parameterSummary()
plt.errorbar(x,y,yerr)
plt.plot(x, edf.model, 'r-')
# plt.show()
def sanity_Overbinning(self):
# Import numpy and matplotlib
from numpy import arange, sqrt, exp, pi, random, ones
import matplotlib.pylab as plt
# ... and now the funcFit package
from PyAstronomy import funcFit as fuf
# Creating a Gaussian with some noise
# Choose some parameters...
gPar = {"A":-5.0, "sig":10.0, "mu":10.0, "off":1.0, "lin":0.0}
# Calculate profile
x = arange(20)/20.0 * 100.0 - 50.0
y = gPar["off"] + gPar["A"] / sqrt(2*pi*gPar["sig"]**2) \
* exp(-(x-gPar["mu"])**2/(2*gPar["sig"]**2))
# Add some noise
y += random.normal(0.0, 0.01, x.size)
# Let us see what we have done...
plt.plot(x, y, 'bp')
# First, we create a "GaussFit1d_Rebin" class object (note that the
# class object has still to be instantiated, the name is arbitrary).
GaussFit1d_Rebin = fuf.turnIntoRebin(fuf.GaussFit1d)
# Do the instantiation and specify how the overbinning should be
# carried out.
gf = GaussFit1d_Rebin()
gf.setRebinArray_Ndt(x, 10, x[1]-x[0])
# See what parameters are available
print("List of available parameters: ", gf.availableParameters())
# Set guess values for the parameters
gf["A"] = -10.0
gf["sig"] = 15.77
gf["off"] = 0.87
gf["mu"] = 7.5
# Let us see whether the assignment worked
print("Parameters and guess values: ")
print(" A : ", gf["A"])
print(" sig : ", gf["sig"])
print(" off : ", gf["off"])
print(" mu : ", gf["mu"])
print("")
# Now some of the strengths of funcFit are demonstrated; namely, the
# ability to consider some parameters as free and others as fixed.
# By default, all parameters of the GaussFit1d are frozen.
# Show values and names of frozen parameters
print("Names and values if FROZEN parameters: ", gf.frozenParameters())
# Which parameters shall be variable during the fit?
# 'Thaw' those (the order is irrelevant)
gf.thaw(["A", "sig", "off", "mu"])
# Let us assume that we know that the amplitude is negative, i.e.,
# no lower boundary (None) and 0.0 as upper limit.
gf.setRestriction({"A":[None,0.0]})
# Now start the fit
gf.fit(x, y, yerr=ones(x.size)*0.01)
# Write the result to the screen and plot the best fit model
gf.parameterSummary()
# Plot the final best-fit model
plt.plot(x, gf.model, 'rp--')
# Show the overbinned (=unbinned) model, indicate by color
# which point are averaged to obtain a point in the binned
# model.
for k, v in gf.rebinIdent.items():
c = "y"
if k % 2 == 0: c = "k"
plt.plot(gf.rebinTimes[v], gf.unbinnedModel[v], c+'.')
# Show the data and the best fit model
# plt.show()
def sanity_simultaneousFit(self):
from PyAstronomy import funcFit as fuf
import numpy
import matplotlib.pylab as plt
# Set up two different x axes.
x1 = numpy.arange(100.)/100. - 0.5
x2 = numpy.arange(150.)/150. - 0.25
# Getting the models ...
gauss = fuf.GaussFit1d()
calor = fuf.CauchyLorentz1d()
# and assign parameters.
gauss.assignValue({"A":0.02, "sig":0.1, "mu":0.0, "off":1.0, "lin":0.0})
calor.assignValue({"A":0.07, "g":0.1, "mu":0.2, "off":1.0, "lin":0.0})
# Create noisy data.
y1 = gauss.evaluate(x1) + numpy.random.normal(0., 0.01, 100)
y2 = calor.evaluate(x2) + numpy.random.normal(0., 0.01, 150)
# Plot the noisy data.
plt.subplot(2,1,1)
plt.errorbar(x1, y1, yerr=numpy.ones(100)*0.01)
plt.subplot(2,1,2)
plt.errorbar(x2, y2, yerr=numpy.ones(150)*0.01)
# Now, get ready two fit the data sets simultaneously.
sf = fuf.SyncFitContainer()
# Tell the class about the two components and save the
# component numbers assigned to them:
gaussCno = sf.addComponent(gauss)
calorCno = sf.addComponent(calor)
print("Component numbers in the syncFit container:")
print(" Gauss: ", gaussCno, ", Cauchy-Lorentz: ", calorCno)
print()
# See what happened to the parameters in the
# simultaneous fitting class.
# The variable names have changed.
sf.parameterSummary()
# Thaw all parameters (for later fit) ...
sf.thaw(list(sf.parameters()))
# but not the linear term.
sf.freeze(["lin_Gaussian[s1]", "lin_CauLor[s2]"])
# Tell the class about the identity of parameters,
# either by using the "property name" of the parameter:
sf.treatAsEqual("off")
# or by specifying the names explicitly.
sf.treatAsEqual(["g_CauLor[s2]", "sig_Gaussian[s1]"])
# See what happened to the parameters in the
# simultaneous fitting class.
print()
print("Parameters after 'treatAsEqual' has been applied:")
sf.parameterSummary()
# Randomize starting values.
for fp in sf.freeParamNames():
sf[fp] = sf[fp] + numpy.random.normal(0., 0.05)
# Set up the data appropriately.
data = {gaussCno:[x1, y1], calorCno:[x2, y2]}
yerr = {gaussCno: numpy.ones(100)*0.01, \
calorCno: numpy.ones(150)*0.01}
# Start the fit.
sf.fit(data, yerr=yerr)
# Show the best-fit values.
print()
print("Best-fit parameters:")
sf.parameterSummary()
# Plot the best-fit model(s).
plt.subplot(2,1,1)
plt.plot(x1, sf.models[gaussCno], 'r--')
plt.subplot(2,1,2)
plt.plot(x2, sf.models[calorCno], 'r--')
# plt.show()
def sanity_2dCircularFit(self):
import numpy as np
import matplotlib.pylab as plt
from PyAstronomy import funcFit as fuf
# Get the circular model and assign
# parameter values
c = fuf.Circle2d()
c["r"] = 1.0
c["t0"] = 0.0
c["per"] = 3.0
# Evaluate the model at a number of
# time stamps
t = np.linspace(0.0, 10.0, 20)
pos = c.evaluate(t)
# Add some error to the "measurement"
pos += np.reshape(np.random.normal(0.0, 0.2, pos.size), pos.shape)
err = np.reshape(np.ones(pos.size), pos.shape) * 0.2
# Define free parameters and fit the model
c.thaw(["r", "t0", "per"])
c.fit(t, pos, yerr=err)
c.parameterSummary()
# Evaluate the model at a larger number of
# points for plotting
tt = np.linspace(0.0, 10.0, 200)
model = c.evaluate(tt)
# Plot the result
plt.errorbar(pos[::,0], pos[::,1], yerr=err[::,1], \
xerr=err[::,0], fmt='bp')
plt.plot(model[::,0], model[::,1], 'r--')
# plt.show()
def sanity_2dGaussFit(self):
from PyAstronomy import funcFit as fuf
import numpy as np
import matplotlib.pylab as plt
# Constructing the individual coordinate axes
x = np.linspace(-2.,2.,50)
y = np.linspace(-2.,2.,50)
# Applying funcFit's "coordinateGrid" helper function
# to built appropriate array-index -> coordinate mapping
# needed for nD fitting.
g = fuf.coordinateGrid(x, y)
# Create the 2d-Gaussian model and assign
# some model parameters.
gf = fuf.GaussFit2d()
gf["sigx"] = 0.75
gf["sigy"] = 0.4
gf["A"] = 1.0
gf["rho"] = 0.4
# Get the "data" by evaluating the model
# and adding some noise. Note that the coordinate
# mapping (array g) is passed to evaluate here.
im = gf.evaluate(g)
im += np.reshape(np.random.normal(0.0, 0.1, 2500), (50,50))
err = np.ones((50,50))*0.1
# Thaw parameters and fit
gf.thaw(["A", "rho"])
gf.fit(g, im, yerr=err)
# Show the resulting parameter values ...
gf.parameterSummary()
# ... and plot the result.
plt.title("Image data")
plt.imshow(np.transpose(im), origin="lower")
# plt.show()
plt.title("Residuals")
plt.imshow(np.transpose(im - gf.evaluate(g)), origin="lower")
# plt.show()
def sanity_2gGaussFitTupleExample(self):
from PyAstronomy import funcFit as fuf
import numpy as np
import matplotlib.pylab as plt
# Constructing the individual coordinate axes
x = np.linspace(-2.,2.,50)
y = np.linspace(-2.,2.,50)
# Create the 2d-Gaussian model and assign
# some model parameters.
gf = fuf.GaussFit2dTuple()
gf["sigx"] = 0.75
gf["sigy"] = 0.4
gf["A"] = 1.0
gf["rho"] = 0.4
# Get the "data" by evaluating the model
# and adding some noise. Note that the coordinate
# mapping (array g) is passed to evaluate here.
im = gf.evaluate((x,y))
im += np.reshape(np.random.normal(0.0, 0.1, 2500), (50,50))
err = np.ones((50,50))*0.1
# Thaw parameters and fit
gf.thaw(["A", "rho"])
gf.fit((x,y), im, yerr=err)
# Show the resulting parameter values ...
gf.parameterSummary()
# ... and plot the result.
plt.title("Image data")
plt.imshow(np.transpose(im), origin="lower")
# plt.show()
plt.title("Residuals")
plt.imshow(np.transpose(im - gf.evaluate((x,y))), origin="lower")
# plt.show()
def sanity_coordinateGridExample(self):
from PyAstronomy import funcFit as fuf
import numpy as np
# Constructing the two individual coordinate axes
x = np.linspace(-2.,2.,50)
y = np.linspace(-2.,2.,50)
# Applying funcFit's "coordinateGrid" helper function
# to built appropriate array-index -> coordinate mapping
# needed for nD fitting.
g = fuf.coordinateGrid(x, y)
print("(x, y) coordinates at index (11, 28): ", g[11,28])
def sanity_CashStatisticsExample(self):
import numpy as np
import matplotlib.pylab as plt
from PyAstronomy import funcFit as fuf
# Get a Gaussian fitting object and
# set some parameters
g = fuf.GaussFit1d()
g["A"] = 5.1
g["sig"] = 0.5
g["mu"] = 3.94
# Generate some data with Poisson statistics
x = np.linspace(0.0, 7., 50)
y = np.zeros(len(x))
for i in range(len(x)):
y[i] = np.random.poisson(g.evaluate(x[i]))
# Choose free parameters and "disturb" the
# starting parameters for the fit a little.
g.thaw(["A", "sig", "mu"])
for par in g.freeParamNames():
g[par] += np.random.normal(0.0, g[par]*0.1)
# Fit using Cash statistic and print out
# result.
g.fit(x, y, miniFunc="cash79")
g.parameterSummary()
# Plot the result
plt.plot(x, y, 'bp')
plt.plot(x, g.evaluate(x), 'r--')
# plt.show()
def sanity_steppar1(self):
import numpy as np
import matplotlib.pylab as plt
from PyAstronomy import funcFit as fuf
# Set up a Gaussian model
# and create some "data"
x = np.linspace(0,2,100)
gf = fuf.GaussFit1d()
gf["A"] = 0.87
gf["mu"] = 1.0
gf["sig"] = 0.2
y = gf.evaluate(x)
y += np.random.normal(0.0, 0.1, len(x))
# Thaw parameters, which are to be fitted. Note
# that those parameters will also be fitted during
# the stepping; no further parameters will be thawed.
gf.thaw(["A", "mu", "sig"])
# ... and "disturb" starting values a little.
gf["A"] = gf["A"] + np.random.normal(0.0, 0.1)
gf["mu"] = gf["mu"] + np.random.normal(0.0, 0.1)
gf["sig"] = gf["sig"] + np.random.normal(0.0, 0.03)
# Find the best fit solution
gf.fit(x, y, yerr=np.ones(len(x))*0.1)
# Step the amplitude (area of the Gaussian) through
# the range 0.8 to 0.95 in 20 steps. Note that the
# last part of `ranges` ('lin') is optional. You may
# also use `log`; in this case, the stepping would be
# equidistant in the logarithm.
# In each step of `A`, "mu" and "sig" will be fitted,
# because they had been thawed earlier.
sp = gf.steppar("A", ranges={"A":[0.8, 0.95, 20, 'lin']})
# Extract the values for the Gaussian normalization
# (amplitude) ...
As = list(map(lambda x:x[0], sp))
# ... and chi square.
chis = list(map(lambda x:x[1], sp))
# Find minimum chi square
cmin = min(chis)
# Plot A vs. chi square
plt.title('A vs. $\chi^2$ with 68% and 90% confidence levels')
plt.xlabel("A")
plt.ylabel("$\chi^2$")
plt.plot(As, chis, 'bp-')
plt.plot(As, [cmin+1.0]*len(As), 'k--')
plt.plot(As, [cmin+2.706]*len(As), 'k:')
# plt.show()
def sanity_steppar2(self):
import numpy as np
import matplotlib.pylab as plt
from PyAstronomy import funcFit as fuf
# Set up a Gaussian model
# and create some "data"
x = np.linspace(0,2,100)
gf = fuf.GaussFit1d()
gf["A"] = 0.87
gf["mu"] = 1.0
gf["sig"] = 0.2
y = gf.evaluate(x)
y += np.random.normal(0.0, 0.1, len(x))
# Thaw parameters, which are to be fitted ...
gf.thaw(["A", "mu", "sig"])
# ... and "disturb" starting values a little.
gf["A"] = gf["A"] + np.random.normal(0.0, 0.1)
gf["mu"] = gf["mu"] + np.random.normal(0.0, 0.1)
gf["sig"] = gf["sig"] + np.random.normal(0.0, 0.03)
# Find the best fit solution
gf.fit(x, y, yerr=np.ones(len(x))*0.1)
# Step the amplitude (area of the Gaussian) and the
# center ("mu") of the Gaussian through the given
# ranges.
sp = gf.steppar(["A", "mu"], ranges={"A":[0.8, 0.95, 20], \
"mu":[0.96,1.05,15]})
# Get the values for `A`, `mu`, and chi-square
# from the output of steppar.
As = list(map(lambda x:x[0], sp))
mus = list(map(lambda x:x[1], sp))
chis = list(map(lambda x:x[2], sp))
# Create a chi-square array using the
# indices contained in the output.
z = np.zeros((20, 15))
for s in sp:
z[s[3]] = s[2]
# Find minimum chi-square and define levels
# for 68%, 90%, and 99% confidence intervals.
cm = min(chis)
levels = [cm+2.3, cm+4.61, cm+9.21]
# Plot the contours to explore the confidence
# interval and correlation.
plt.xlabel("mu")
plt.ylabel("A")
plt.contour(np.sort(np.unique(mus)), np.sort(np.unique(As)), z, \
levels=levels)
# Plot the input value
plt.plot([1.0], [0.87], 'k+', markersize=20)
# plt.show()
def sanity_errorConfInterval(self):
"""
Checking example of errorConfInterval
"""
import numpy as np
import matplotlib.pylab as plt
from PyAstronomy import funcFit as fuf
# Set up a Gaussian model
# and create some "data"
x = np.linspace(0,2,100)
gf = fuf.GaussFit1d()
gf["A"] = 0.87
gf["mu"] = 1.0
gf["sig"] = 0.2
y = gf.evaluate(x)
y += np.random.normal(0.0, 0.1, len(x))
# Thaw parameters, which are to be fitted. Note
# that those parameters will also be fitted during
# the stepping; no further parameters will be thawed.
gf.thaw(["A", "mu", "sig"])
# ... and "disturb" starting values a little.
gf["A"] = gf["A"] + np.random.normal(0.0, 0.1)
gf["mu"] = gf["mu"] + np.random.normal(0.0, 0.1)
gf["sig"] = gf["sig"] + np.random.normal(0.0, 0.03)
# Find the best fit solution
gf.fit(x, y, yerr=np.ones(len(x))*0.1)
# Step the amplitude (area of the Gaussian) through
# the range 0.8 to 0.95 in 20 steps. Note that the
# last part of `ranges` ('lin') is optional. You may
# also use `log`; in this case, the stepping would be
# equidistant in the logarithm.
# In each step of `A`, "mu" and "sig" will be fitted,
# because they had been thawed earlier.
sp = gf.steppar("A", ranges={"A":[0.8, 0.95, 20, 'lin']})
# Extract the values for the Gaussian normalization
# (amplitude) ...
As = [x[0] for x in sp]
# ... and chi square.
chis = [x[1] for x in sp]
# Calculate the confidence interval automatically
cfi90 = gf.errorConfInterval("A", dstat=2.706)
print("90% Confidence interval: ", cfi90["limits"])
print(" corresponding objective function values: ", cfi90["OFVals"])
print(" number of iterations needed: ", cfi90["iters"])
cfi68 = gf.errorConfInterval("A", dstat=1.0)
print("68% Confidence interval: ", cfi68["limits"])
print(" corresponding objective function values: ", cfi68["OFVals"])
print(" number of iterations needed: ", cfi68["iters"])
# Plot A vs. chi square
plt.title('A vs. $\chi^2$ 90% (black) and 68% (blue) confidence intervals')
plt.xlabel("A")
plt.ylabel("$\chi^2$")
plt.plot(As, chis, 'bp-')
# Indicate confidence levels by vertical lines
plt.plot(As, [cfi90["OFMin"] +1.0]*len(As), 'g:')
plt.plot(As, [cfi90["OFMin"]+2.706]*len(As), 'g:')
# PLot lines to indicate confidence intervals
plt.plot([cfi90["limits"][0]]*2, [min(chis), max(chis)], 'k--')
plt.plot([cfi90["limits"][1]]*2, [min(chis), max(chis)], 'k--')
plt.plot([cfi68["limits"][0]]*2, [min(chis), max(chis)], 'b--')
plt.plot([cfi68["limits"][1]]*2, [min(chis), max(chis)], 'b--')
# plt.show()
def sanity_conditionalRestrictions(self):
"""
Check the conditional restriction example.
"""
import numpy as np
import matplotlib.pylab as plt
from PyAstronomy import funcFit as fuf
# Get fitting object for a Gaussian ...
g = fuf.GaussFit1d()
# .. and define the parameters
g["A"] = 0.97
g["mu"] = 0.1
g["sig"] = 0.06
# Generate some "data" with noise included
x = np.linspace(-1.0,1.0,200)
y = g.evaluate(x) + np.random.normal(0.0, 0.1, len(x))
yerr = np.ones(len(x)) * 0.1
def myRestriction(A, sig):
"""
A conditional restriction.
Returns
-------
Penalty : float
A large value if condition is violated
and zero otherwise.
"""
if A > 10.0*sig:
return np.abs(A-10.0*sig + 1.0)*1e20
return 0.0
# Add the conditional restriction to the model and save
# the unique ID, which can be used to refer to that
# restriction.
uid = g.addConditionalRestriction(["A", "sig"], myRestriction)
print("Conditional restriction has been assigned the ID: ", uid)
print()
# Now see whether the restriction is really in place
g.showConditionalRestrictions()
# Define free parameters ...
g.thaw(["A", "mu", "sig"])
# ... and fit the model (restriction included)
g.fit(x, y, yerr=yerr)
# Save the resulting best-fit model
restrictedModel = g.model.copy()
# Remove the conditional restriction and re-fit
g.removeConditionalRestriction(uid)
g.fit(x, y, yerr=yerr)
# Save new model
unrestrictedModel = g.model.copy()
# Plot the result
# plt.errorbar(x, y, yerr=yerr, fmt='b.')
# plt.plot(x, restrictedModel, 'r--', label="Restricted")
# plt.plot(x, unrestrictedModel, 'g--', label="Unrestricted")
# plt.legend()
# plt.show()
class MCMCExampleSanity(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
try:
os.remove("mcmcExample.tmp")
except:
print("Could not remove file: mcmcExample.tmp")
try:
os.remove("mcmcTA.tmp")
except:
print("Could not remove file: mcmcTA.tmp")
try:
os.remove("mcmcSample.tmp")
except:
print("Could not remove file: mcmcSample.tmp")
try:
os.remove("chain.emcee")
except:
pass
try:
os.remove("gauss.emcee")
except:
print("Could not remove file: gauss.emcee")
try:
os.remove("musig.emcee")
except:
print("Could not remove file: musig.emcee")
def sanity_MCMCSampler(self):
# Import some required modules
from numpy import arange, sqrt, exp, pi, random, ones
import matplotlib.pylab as plt
import pymc
# ... and now the funcFit package
from PyAstronomy import funcFit as fuf
# Creating a Gaussian with some noise
# Choose some parameters...
gPar = {"A":-5.0, "sig":10.0, "mu":10.0, "off":1.0, "lin":0.0}
# Calculate profile
x = arange(100) - 50.0
y = gPar["off"] + gPar["A"] / sqrt(2*pi*gPar["sig"]**2) \
* exp(-(x-gPar["mu"])**2/(2*gPar["sig"]**2))
# Add some noise
y += random.normal(0.0, 0.01, x.size)
# Now let us come to the fitting
# First, we create the Gauss1d fit object
gf = fuf.GaussFit1d()
# See what parameters are available
print("List of available parameters: ", gf.availableParameters())
# Set guess values for the parameters
gf["A"] = -10.0
gf["sig"] = 15.77
gf["off"] = 0.87
gf["mu"] = 7.5
# Let us see whether the assignment worked
print("Parameters and guess values: ", gf.parameters())
# Which parameters shall be variable during the fit?
# 'Thaw' those (the order is irrelevant)
gf.thaw(["A", "sig", "off", "mu"])
# Now start a simplex fit
gf.fit(x,y,yerr=ones(x.size)*0.01)
# Obtain the best-fit values derived by the simplex fit.
# They are to be used as start values for the MCMC sampling.
# Note that 'A' is missing - we will introduce this later.
X0 = {"sig":gf["sig"], "off":gf["off"], "mu":gf["mu"]}
# Now we specify the limits within which the individual parameters
# can be varied (for those parameters listed in the 'X0' dictionary).
Lims = {"sig":[-20.,20.], "off":[0.,2.], "mu":[5.,15.]}
# For the parameters contained in 'X0', define the step widths, which
# are to be used by the MCMC sampler. The steps are specified using
# the same scale/units as the actual parameters.
steps = {"A":0.01, "sig":0.1, "off":0.1, "mu":0.1}
# In this example, we wish to define our ``own'' PyMC variable for the parameter
# 'A'. This can be useful, if nonstandard behavior is desired. Note that this
# is an optional parameter and you could simply include the parameter 'A' into
# The framework of X0, Lims, and steps.
ppa = {}
ppa["A"] = pymc.Uniform("A", value=gf["A"], lower=-20., \
upper=10.0, doc="Amplitude")
# Start the sampling. The resulting Marchov-Chain will be written
# to the file 'mcmcExample.tmp'. In default configuration, pickle
# is used to write that file.
# To save the chain to a compressed 'hdf5'
# file, you have to specify the dbArgs keyword; e.g., use:
# dbArgs = {"db":"hdf5", "dbname":"mcmcExample.hdf5"}
gf.fitMCMC(x, y, X0, Lims, steps, yerr=ones(x.size)*0.01, \
pymcPars=ppa, iter=2500, burn=0, thin=1, \
dbfile="mcmcExample.tmp")
# Reload the database (here, this is actually not required, but it is
# if the Marchov chain is to be analyzed later).
db = pymc.database.pickle.load('mcmcExample.tmp')
# Plot the trace of the amplitude, 'A'.
plt.hist(db.trace("A", 0)[:])
# plt.show()
def sanity_MCMCPriorExample(self):
from PyAstronomy import funcFit as fuf
import numpy as np
import matplotlib.pylab as plt
import pymc
# Create a Gauss-fit object
gf = fuf.GaussFit1d()
# Choose some parameters
gf["A"] = -0.65
gf["mu"] = 1.0
gf["lin"] = 0.0
gf["off"] = 1.1
gf["sig"] = 0.2
# Simulate data with noise
x = np.linspace(0., 2., 100)
y = gf.evaluate(x)
y += np.random.normal(0, 0.05, len(x))
gf.thaw(["A", "off", "mu", "sig"])
# Set up a normal prior for the offset parameter
# Note!---The name (first parameter) must correspond to that
# of the parameter.
# The expectation value us set to 0.9 while the width is given
# as 0.01 (tau = 1/sigma**2). The starting value is specified
# as 1.0.
offPar = pymc.Normal("off", mu=0.9, tau=(1./0.01)**2, value=1.0)
# Use a uniform prior for mu.
muPar = pymc.Uniform("mu", lower=0.95, upper=0.97, value=0.96)
# Collect the "extra"-variables in a dictionary using
# their names as keys
pymcPars = {"mu":muPar, "off":offPar}
# Specify starting values, X0, and limits, lims, for
# those parameter distributions not given specifically.
X0 = {"A":gf["A"], "sig":gf["sig"]}
lims = {"A":[-1.0,0.0], "sig":[0., 1.0]}
# Still, the steps dictionary has to contain all
# parameter distributions.
steps = {"A":0.02, "sig":0.02, "mu":0.01, "off":0.01}
# Carry out the MCMC sampling
gf.fitMCMC(x, y, X0, lims, steps, yerr=np.ones(len(x))*0.05, \
pymcPars=pymcPars, burn=1000, iter=3000)
# Setting parameters to mean values
for p in gf.freeParameters():
gf[p] = gf.MCMC.trace(p)[:].mean()
# Show the "data" and model in the upper panel
plt.subplot(2,1,1)
plt.title("Data and model")
plt.errorbar(x, y, yerr=np.ones(len(x))*0.05, fmt="bp")
# Plot lowest deviance solution
plt.plot(x, gf.evaluate(x), 'r--')
# Show the residuals in the lower panel
plt.subplot(2,1,2)
plt.title("Residuals")
plt.errorbar(x, y-gf.evaluate(x), yerr=np.ones(len(x))*0.05, fmt="bp")
plt.plot([min(x), max(x)], [0.0,0.0], 'r-')
#plt.show()
def sanity_autoMCMCExample1(self):
from PyAstronomy import funcFit as fuf
import numpy as np
import matplotlib.pylab as plt
x = np.linspace(0,30,1000)
gauss = fuf.GaussFit1d()
gauss["A"] = 1
gauss["mu"] = 23.
gauss["sig"] = 0.5
# Generate some "data" to fit
yerr = np.random.normal(0., 0.05, len(x))
y = gauss.evaluate(x) + yerr
# Thaw the parameters A, mu, and sig
gauss.thaw(["A","mu","sig"])
# Define the ranges, which are used to construct the
# uniform priors and step sizes.
# Note that for "sig", we give only a single value.
# In this case, the limits for the uniform prior will
# be constructed as [m0-1.5, m0+1.5], where m0 is the
# starting value interpreted as the current value of
# mu (23. in this case).
ranges = {"A":[0,10],"mu":3, "sig":[0.1,1.0]}
# Generate default input for X0, lims, and steps
X0, lims, steps = gauss.MCMCautoParameters(ranges)
# Show what happened...
print()
print("Auto-generated input parameters:")
print("X0: ", X0)
print("lims: ", lims)
print("steps: ", steps)
print()
# Call the usual sampler
gauss.fitMCMC(x, y, X0, lims, steps, yerr=yerr, iter=1000)
# and plot the results
plt.plot(x, y, 'k+')
plt.plot(x, gauss.evaluate(x), 'r--')
# plt.show()
def sanity_autoMCMCExample2(self):
from PyAstronomy import funcFit as fuf
import numpy as np
import matplotlib.pylab as plt
x = np.linspace(0,30,1000)
gauss = fuf.GaussFit1d()
gauss["A"] = 1
gauss["mu"] = 23.
gauss["sig"] = 0.5
# Generate some "data" to fit
yerr = np.random.normal(0., 0.05, len(x))
y = gauss.evaluate(x) + yerr
# Define the ranges, which are used to construct the
# uniform priors and step sizes.
# Note that for "sig", we give only a single value.
# In this case, the limits for the uniform prior will
# be constructed as [m0-1.5, m0+1.5], where m0 is the
# starting value interpreted as the current value of
# mu (23. in this case).
ranges = {"A":[0,10],"mu":3, "sig":[0.1,1.0]}
# Call the auto-sampler
# Note that we set picky to False here. In this case, the
# parameters specified in ranges will be thawed automatically.
# All parameters not mentioned there, will be frozen.
gauss.autoFitMCMC(x, y, ranges, yerr=yerr, picky=False, iter=1000)
# and plot the results
plt.plot(x, y, 'k+')
plt.plot(x, gauss.evaluate(x), 'r--')
# plt.show()
def sanity_TAtut_createTrace(self):
"""
TA tutorial, all examples
"""
import numpy as np
import matplotlib.pylab as plt
# ... and now the funcFit package
from PyAstronomy import funcFit as fuf
# Starting from with Voigt profile
vp = fuf.Voigt1d()
# Set some values to create a model
vp["A"] = -0.4
vp["al"] = 0.7
vp["mu"] = 5500.
vp["ad"] = 0.3
vp["off"] = 1.0
x = np.linspace(5490., 5510., 200)
# Create our data with some noise
yerr = np.ones(len(x))*0.01
y = vp.evaluate(x) + np.random.normal(0.0, 0.01, len(x))
# Say, we have a guess of the parameters, which is, however,
# not entirely correct
vp["A"] = -0.376
vp["al"] = 0.9
vp["mu"] = 5499.7
vp["ad"] = 0.4
vp["off"] = 1.0
# Plot the data and our guess
plt.errorbar(x, y, yerr=yerr, fmt='b.-')
plt.plot(x, vp.evaluate(x), 'r--')
# plt.show()
# Thaw the parameters, which we wish to vary
# during the sampling
vp.thaw(["A", "al", "mu", "ad"])
# Use current parameters as starting point for the sampling
X0 = vp.freeParameters()
print("Starting point for sampling: ", X0)
# Now we specify the limits within which the individual parameters
# can be varied. Actually, you specify the limits of uniform priors
# here.
lims = {"A":[-1.0,0.0], "al":[0.0,3.], "ad":[0.0,3.0], "mu":[5495., 5505.]}
# Provide a guess for the proposal step widths.
# Try to guess the scale of the problem in the individual
# parameters.
steps = {"A":0.02, "al":0.01, "ad":0.01, "mu":0.05}
# Start the sampling. The resulting Marchov-Chain will be written
# to the file 'mcmcTA.tmp'. In default configuration, pickle
# is used to write that file.
# To save the chain to a compressed 'hdf5'
# file, you have to specify the dbArgs keyword; e.g., use:
# dbArgs = {"db":"hdf5", "dbname":"mcmcExample.hdf5"}
vp.fitMCMC(x, y, X0, lims, steps, yerr=yerr, \
iter=2500, burn=0, thin=1, \
dbfile="mcmcTA.tmp")
######## Second example
from PyAstronomy import funcFit as fuf
# Create an instance of TraceAnalysis
# telling it which file to use
ta = fuf.TraceAnalysis("mcmcTA.tmp")
# Have a look at the deviance to check if and when
# the chains reached equilibrium.
ta.plotTrace("deviance")
# ta.show()
# Say, we are sure that after 500 iterations, the chain
# reached equilibrium. We use this as the burn-in phase
ta.setBurn(500)
# Have a second look at the deviance, this time considering
# the burn-in. Note that the first 500 iterations are not
# removed from the chain. They are just not considered any
# more.
ta.plotTrace("deviance")
# ta.show()
######## Third example
from PyAstronomy import funcFit as fuf
# Create an instance of TraceAnalysis
# telling it which file to use
ta = fuf.TraceAnalysis("mcmcTA.tmp")
# Use the burn-in from the previous example
ta.setBurn(500)
# See which model parameters have been sampled
print("Available parameters: ", ta.availableParameters())
# Access the traces of these parameters
print("Trace for A: ", ta["A"])
# Calculate mean, median, standard deviation, and
# credibility interval for the available parameters
for p in ta.availableParameters():
hpd = ta.hpd(p, cred=0.95)
print("Parameter %5s, mean = % g, median = % g, std = % g, 95%% HPD = % g - % g" \
% (p, ta.mean(p), ta.median(p), ta.std(p), hpd[0], hpd[1]))
######## Fourth example
from PyAstronomy import funcFit as fuf
# Create an instance of TraceAnalysis
# telling it which file to use
ta = fuf.TraceAnalysis("mcmcTA.tmp")
# Use the burn-in from the previous example
ta.setBurn(500)
# Have a look at the parameter correlations
ta.correlationTable()
# Calculate Pearson's and Spearman's r-coefficients
print("Pearson: ", ta.pearsonr("ad", "al"))
print("Spearman: ", ta.spearmanr("ad", "al"))
# Show a plot of the correlation
# Note that the plotCorrEnh method can also
# be used, which is useful in the case of long
# chains.
ta.plotCorr(parsList=["ad", "al"])
# ta.plotCorrEnh(parsList=["ad", "al"])
# ta.show()
######## Fifth example
from PyAstronomy import funcFit as fuf
import matplotlib.pylab as plt
import numpy as np
# Create an instance of TraceAnalysis
# telling it which file to use
ta = fuf.TraceAnalysis("mcmcTA.tmp")
# Use the burn-in from the previous example
ta.setBurn(500)
# Find sets of parameters
# First, the lowest deviance set
lds, index = ta.parameterSet(prescription="lowestDev")
print("Lowest deviance set: ", lds)
print(" at chain index: ", index)
means = ta.parameterSet(prescription="mean")
print("Set of mean values: ", means)
medians = ta.parameterSet(prescription="median")
print("Set of median values: ", means)
# Create Voigt model and plot the models belonging
# to the lowest deviance, mean, and median parameter
# set.
vp = fuf.Voigt1d()
# Generate the model wavelength axis
x = np.linspace(5490., 5510., 200)
# Calculate and plot the models
vp.assignValues(lds)
plt.plot(x, vp.evaluate(x), 'b.-')
vp.assignValues(means)
plt.plot(x, vp.evaluate(x), 'r.-')
vp.assignValues(medians)
plt.plot(x, vp.evaluate(x), 'g.-')
# plt.show()
######## Sixth example
from PyAstronomy import funcFit as fuf
# Create an instance of TraceAnalysis
# telling it which file to use
ta = fuf.TraceAnalysis("mcmcTA.tmp")
# Use the burn-in from the previous example
ta.setBurn(500)
# Investigate a trace
ta.plotTrace("mu")
# ta.show()
# and its distribution.
ta.plotHist("mu")
# ta.show()
# Combine trace and distribution
ta.plotTraceHist("mu")
# ta.show()
# Plot correlations
ta.plotCorr(parsList=["mu", "ad", "al"])
# ta.show()
def sanity_MCMCautoParameters(self):
"""
Checking sanity of MCMCautoParameters
"""
from PyAstronomy import funcFit as fuf
import numpy as np
import matplotlib.pylab as plt
x = np.linspace(0,30,1000)
gauss = fuf.GaussFit1d()
gauss["A"] = 1
gauss["mu"] = 23.
gauss["sig"] = 0.5
yerr = np.random.normal(0., 0.05, len(x))
y = gauss.evaluate(x) + yerr
# This step is not necessary if <picky>=False in MCMCautoParameters.
gauss.thaw(["A","mu","sig"])
X0, lims, steps = gauss.MCMCautoParameters({"A":[0,10],"mu":3, "sig":[0.1,1.0]})
gauss.fitMCMC(x, y, X0, lims, steps, yerr=yerr, iter=1000)
# plt.plot(x, y, 'k+')
# plt.plot(x, gauss.evaluate(x), 'r--')
# plt.show()
def sanity_EMCEEfirstexample(self):
# Import numpy and matplotlib
from numpy import arange, sqrt, exp, pi, random, ones
import matplotlib.pylab as plt
# ... and now the funcFit package
from PyAstronomy import funcFit as fuf
# Before we can start fitting, we need something to fit.
# So let us create some data...
# Choose some signal-to-noise ratio
snr = 25.0
# Creating a Gaussian with some noise
# Choose some parameters...
gf = fuf.GaussFit1d()
gf.assignValues({"A":-5.0, "sig":2.5, "mu":10.0, "off":1.0, "lin":0.0})
# Calculate profile
x = arange(100) - 50.0
y = gf.evaluate(x)
# Add some noise
y += random.normal(0.0, 1.0/snr, x.size)
# Define the free parameters
gf.thaw(["A", "sig", "mu", "off"])
# Start a fit (quite dispensable here)
gf.fit(x, y, yerr=ones(x.size)/snr)
# Say, we want 200 burn-in iterations and, thereafter,
# 1000 further iterations (per walker).
sampleArgs = {"iters":1000, "burn":200}
# Start the sampling (ps could be used to continueb the sampling)
ps = gf.fitEMCEE(x, y, yerr=ones(x.size)/snr, sampleArgs=sampleArgs)
# Plot the distributions of the chains
# NOTE: the order of the parameters in the chain object is the same
# as the order of the parameters returned by freeParamNames()
for i, p in enumerate(gf.freeParamNames()):
plt.subplot(len(gf.freeParamNames()), 1, i+1)
plt.hist(gf.emceeSampler.flatchain[::,i], label=p)
plt.legend()
# plt.show()
def sanity_EMCEEpriorexample(self):
# Import numpy and matplotlib
from numpy import arange, sqrt, exp, pi, random, ones
import matplotlib.pylab as plt
# ... and now the funcFit package
from PyAstronomy import funcFit as fuf
import numpy as np
# Before we can start fitting, we need something to fit.
# So let us create some data...
# Choose some signal-to-noise ratio
snr = 25.0
# Choosing an arbitrary constant and ...
c = 10.0
# ... an equally arbitrary number of data points
npoint = 10
# Define 'data'
x = arange(npoint)
y = np.ones(len(x)) * c
# Add some noise
y += random.normal(0.0, 1.0/snr, x.size)
# A funcFit object representing a constant
pf = fuf.PolyFit1d(0)
pf["c0"] = c
# The only parameter shall be free
pf.thaw("c0")
# Say, we want 200 burn-in iterations and, thereafter,
# 2500 further iterations (per walker).
sampleArgs = {"iters":2500, "burn":200}
# Start the sampling (ps could be used to continue the sampling)
ps = pf.fitEMCEE(x, y, yerr=ones(x.size)/snr, sampleArgs=sampleArgs)
print()
# Plot the distributions of the chains
# NOTE: the order of the parameters in the chain object is the same
# as the order of the parameters returned by freeParamNames()
h = plt.hist(pf.emceeSampler.flatchain[::,0], label="c0", normed=True)
# Construct "data points" in the middle of the bins
xhist = (h[1][1:] + h[1][0:-1]) / 2.0
yhist = h[0]
# Fit the histogram using a Gaussian
gf = fuf.GaussFit1d()
gf.assignValues({"A":1.0, "mu":c, "sig":1.0/snr/np.sqrt(npoint)})
# First fitting only "mu" is simply quite stable
gf.thaw("mu")
gf.fit(xhist, yhist)
gf.thaw(["A", "sig"])
gf.fit(xhist, yhist)
print()
print(" --- Sampling results ---")
print("Posterior estimate of constant: ", np.mean(pf.emceeSampler.flatchain[::,0]))
print("Nominal error of the mean: ", 1.0/snr/np.sqrt(npoint))
print("Estimate from Markov chain: ", np.std(pf.emceeSampler.flatchain[::,0]), end=' ')
print(" and from Gaussian fit to distribution: ", gf["sig"])
# Evaluate best-fit model ...
xmodel = np.linspace(c - 10.0/snr, c + 10.0/snr, 250)
ymodel = gf.evaluate(xmodel)
# ... and plot
plt.plot(xhist, yhist, 'rp')
plt.plot(xmodel, ymodel, 'r--')
plt.legend()
# plt.show()
# Defining a prior on c0. Prior knowledge tells us that its value
# is around 7. Let us choose the standard deviation of the prior so
# that the estimate will lie in the middle between 7 and 10. Here we
# exploit symmetry and make the prior information as strong as the
# information contained in the likelihood function.
priors = {"c0":fuf.FuFPrior("gaussian", sig=1.0/snr/np.sqrt(npoint), mu=7.0)}
# Start the sampling (ps could be used to continue the sampling)
ps = pf.fitEMCEE(x, y, yerr=ones(x.size)/snr, sampleArgs=sampleArgs, priors=priors)
print()
print(" --- Sampling results with strong prior information ---")
print("Posterior estimate of constant: ", np.mean(pf.emceeSampler.flatchain[::,0]), end=' ')
print(" +/-", np.std(pf.emceeSampler.flatchain[::,0]))
plt.hist(pf.emceeSampler.flatchain[::,0], label="c0", normed=True)
# plt.show()
def sanity_InstatiatePrior(self):
from PyAstronomy import funcFit as fuf
# Instantiate prior
gp = fuf.FuFPrior("gaussian", sig=0.1, mu=1.0)
# Current values (arbitrary)
cvals = {"a":1.4, "b":0.86, "c":1.1}
# Get log(prior) for parameter "b"
print(gp(cvals, "b"))
def sanity_sampleEMCEE_sampleFromGaussian(self):
"""
Checking first sampleEMCEE example (sample from Gaussian distribution)
"""
import numpy as np
from PyAstronomy import funcFit as fuf
import matplotlib.pylab as plt
def lfGauss(v, sigma, mu):
"""
Gaussian density
Parameters
----------
v : dictionary
Holds current values of "x"
mus, sigma : float
Mean and standard deviation of the Gaussian. Specified via
the `largs` argument.
Returns
-------
lp : float
Natural logarithm of the density.
"""
result = 0.0
# Log(density)
result += -0.5*np.log(2.*np.pi*sigma**2) - (v["x"] - mu)**2/(2.*sigma**2)
return result
# Sampling arguments
# burn: Number of burn-in steps per walker
# iters: Number of iterations per walker
sa = {"burn":1000, "iters":5000}
# Starting values
fv0 = {"x":0.5}
# Specify standard deviation and mean of Gaussian
la = {"mu":0.5, "sigma":0.25}
# Sample from distribution
ps = fuf.sampleEMCEE(["x"], fv0, lfGauss, largs=la, sampleArgs=sa, nwalker=4, dbfile="gauss.emcee")
print()
# Use TraceAnalysis to look at chains
ta = fuf.TraceAnalysis("gauss.emcee")
print("Available chains: ", ta.availableParameters())
print("Mean and STD of chain: ", np.mean(ta["x"]), np.std(ta["x"]))
# Check distribution of chain
# Plot histogram of chain
# plt.hist(ta["x"], 60, normed=True)
# Overplot Gaussian model
xx = np.linspace(la["mu"]-6*la["sigma"], la["mu"]+6*la["sigma"], 1000)
yy = 1./np.sqrt(2.*np.pi*la["sigma"]**2) * np.exp(-(xx - la["mu"])**2/(2.*la["sigma"]**2))
# plt.plot(xx, yy, 'r--')
# plt.show()
def sanity_sampleEMCEE_estimateMuSig(self):
"""
Checking sampleEMCEE example (estimate mu and sigma)
"""
import numpy as np
from PyAstronomy import funcFit as fuf
def lfGaussMS(v, x=None):
"""
Gaussian posterior with 1/sigma prior on sigma.
Parameters
----------
v : dictionary
Holds current values of "sigma" and "mu"
x : array
The 'data' observed. Will be specified by the `largs` keyword.
Returns
-------
lp : float
Natural logarithm of the density.
"""
if v["sigma"] < 0.:
# Penalize negative standard deviations
return -1e20*abs(v["sigma"])
result = 0.0
# Apply prior on sigma
result -= np.log(v["sigma"])
# Add log(likelihood)
result += np.sum(-0.5*np.log(2.*np.pi*v["sigma"]**2) - (x - v["mu"])**2/(2.*v["sigma"]**2))
return result
# Sampling arguments
# burn: Number of burn-in steps per walker
# iters: Number of iterations per walker
sa = {"burn":1000, "iters":5000}
# Starting values
fv0 = {"sigma":1., "mu":1.}
# 'Observed' data
la = {"x":np.random.normal(0.,1.,1000)}
print("Mean of 'data': ", np.mean(la["x"]))
print("Standard deviation of 'data': ", np.std(la["x"]))
# Scale width for distributing the walkers
s = {"mu":0.01, "sigma":0.5}
ps = fuf.sampleEMCEE(["mu", "sigma"], fv0, lfGaussMS, largs=la, sampleArgs=sa, nwalker=4, \
scales=s, dbfile="musig.emcee")
print()
# Use TraceAnalysis to look at chains
ta = fuf.TraceAnalysis("musig.emcee")
print("Available chains: ", ta.availableParameters())
# ta.plotTraceHist('mu')
# ta.show()
#
# ta.plotTraceHist('sigma')
# ta.show()
| 32.435513 | 103 | 0.597263 | 7,861 | 54,070 | 4.100369 | 0.111691 | 0.005522 | 0.014116 | 0.014767 | 0.546552 | 0.516148 | 0.481029 | 0.459901 | 0.448795 | 0.434275 | 0 | 0.036962 | 0.261457 | 54,070 | 1,666 | 104 | 32.454982 | 0.770215 | 0.389643 | 0 | 0.498018 | 0 | 0.002642 | 0.10606 | 0.002009 | 0 | 0 | 0 | 0 | 0 | 1 | 0.054161 | false | 0.005284 | 0.11889 | 0 | 0.191546 | 0.110964 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
41de05126656eb0665e6b6dd493d706236d85602 | 2,905 | py | Python | virtual_machines/update-matching-table.py | AmoVanB/chameleon-end-host | 573e1dccdaf4ca2bebedc96a7b902e622c50acab | [
"Apache-2.0"
] | null | null | null | virtual_machines/update-matching-table.py | AmoVanB/chameleon-end-host | 573e1dccdaf4ca2bebedc96a7b902e622c50acab | [
"Apache-2.0"
] | null | null | null | virtual_machines/update-matching-table.py | AmoVanB/chameleon-end-host | 573e1dccdaf4ca2bebedc96a7b902e622c50acab | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python3
"""
This script, to be used by VM 0, sends a configuration
message to the virtual switch to create a particular
tagging and shaping rule.
Author: Amaury Van Bemten <amaury.van-bemten@tum.de>
"""
from scapy.all import *
import sys
# import scapy config
from scapy.all import conf as scapyconf
# disable scapy promiscuous mode since it is already in this mode
scapyconf.sniff_promisc = 0
def update_matching_rule(kni_id, rule_id, protocol, source_ip, destination_ip, source_port, destination_port, tags, rate_bps, burst_bits):
payload = list(kni_id.to_bytes(1, byteorder = 'big'))
payload += list(rule_id.to_bytes(1, byteorder = 'big'))
payload += list(protocol.to_bytes(1, byteorder = 'big'))
payload += list(int(0).to_bytes(3, byteorder = 'big'))
if len(source_ip) != 4 or len(destination_ip) != 4:
print("Source and destination IPs should be arrays of size 4")
sys.exit(-1)
for ip_elem in list(source_ip) + list(destination_ip):
payload += list(ip_elem.to_bytes(1, byteorder = 'big'))
payload += list(source_port.to_bytes(2, byteorder = 'big'))
payload += list(destination_port.to_bytes(2, byteorder = 'big'))
payload += list(rate_bps.to_bytes(8, byteorder = 'little')) # rate, in bits per second
payload += list(burst_bits.to_bytes(8, byteorder = 'little')) # burst, in bits
n_tokens = burst_bits
rte_timestamp = int(10000)
payload += list(n_tokens.to_bytes(8, byteorder = 'little')) # n_tokens, should be initially the same as burst, but later on it is converted to burst*cpu_freq
payload += list(rte_timestamp.to_bytes(8, byteorder = 'little')) # timestamp, it will be overwritten anyway
payload += list(len(tags).to_bytes(2, byteorder = 'little'))
for tag in tags:
payload += list(0x8100.to_bytes(2, byteorder = 'big'))
payload += list(tag.to_bytes(2, byteorder = 'big'))
frame = Ether(type=0xbebe) / Raw(payload)
frame.show()
sendp(frame, iface="eth1")
def clean_table():
for kni_id in range(0, 20):
for rule_id in range(0, 5):
update_matching_rule(kni_id, rule_id, 0, [0, 0, 0, 0], [0, 0, 0, 0], 0, 0, [0, 0, 0, 0, 0])
if len(sys.argv) < 11:
print("Need at least 10 parameters")
sys.exit(-1)
kni_id = int(sys.argv[1])
rule_id = int(sys.argv[2])
protocol = int(sys.argv[3])
source_ip = [int(elem) for elem in sys.argv[4].split(".")]
destination_ip = [int(elem) for elem in sys.argv[5].split(".")]
source_port = int(sys.argv[6])
destination_port = int(sys.argv[7])
tags = [int(elem) for elem in sys.argv[8].split(",")]
rate_bps = int(sys.argv[9])
burst_bits = int(sys.argv[10])
if(len(tags) > 10):
print("At most 10 tags are allowed in the current implementation")
sys.exit(-1)
update_matching_rule(kni_id, rule_id, protocol, source_ip, destination_ip, source_port, destination_port, tags, rate_bps, burst_bits)
| 40.915493 | 161 | 0.683649 | 469 | 2,905 | 4.08742 | 0.294243 | 0.015649 | 0.021909 | 0.027126 | 0.346896 | 0.288472 | 0.288472 | 0.212833 | 0.115806 | 0.115806 | 0 | 0.031733 | 0.175559 | 2,905 | 70 | 162 | 41.5 | 0.768685 | 0.161102 | 0 | 0.06 | 0 | 0 | 0.082955 | 0 | 0 | 0 | 0.004953 | 0 | 0 | 1 | 0.04 | false | 0 | 0.06 | 0 | 0.1 | 0.06 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
41df73d109c0b036f4dc5a0fd33804ce5856c662 | 1,351 | py | Python | radSeqAmp/_versioninfo.py | msettles/radSeqAmp | a89d1aa12601dcd7aba0e83b2ae28fc3ff76989f | [
"Apache-2.0"
] | null | null | null | radSeqAmp/_versioninfo.py | msettles/radSeqAmp | a89d1aa12601dcd7aba0e83b2ae28fc3ff76989f | [
"Apache-2.0"
] | null | null | null | radSeqAmp/_versioninfo.py | msettles/radSeqAmp | a89d1aa12601dcd7aba0e83b2ae28fc3ff76989f | [
"Apache-2.0"
] | null | null | null | # _versioninfo.py
#
# gets the version number from the package info
# checks it agains the github version
import sys
from pkg_resources import get_distribution, parse_version
try:
_dist = get_distribution('radSeqAmp')
version_num = _dist.version
except:
version_num = 'Please install this project with setup.py'
version_master = "https://raw.githubusercontent.com/msettles/radSeqAmp/master/VERSION"
repo_master = "https://github.com/msettles/radSeqAmp"
version_develop = "https://raw.githubusercontent.com/msettles/radSeqAmp/develop/VERSION"
repo_develop = "https://github.com/msettles/radSeqAmp/tree/develop"
try:
import urllib2
github_version_num = urllib2.urlopen(version_master).readline().strip()
if parse_version(github_version_num) > _dist.parsed_version:
sys.stderr.write("A newer version (%s) of radSeqAmp is available at %s\n" % (github_version_num, repo_master))
elif parse_version(github_version_num) < _dist.parsed_version:
github_version_num = urllib2.urlopen(version_develop).readline().strip()
if parse_version(github_version_num) > _dist.parsed_version:
sys.stderr.write("A newer version (%s) of radSeqAmp is available at %s\n" % (github_version_num, repo_develop))
except:
sys.stderr.write("Error retrieving github version_number\n")
__version__ = version_num
| 40.939394 | 123 | 0.763138 | 180 | 1,351 | 5.472222 | 0.327778 | 0.101523 | 0.113706 | 0.093401 | 0.547208 | 0.484264 | 0.317767 | 0.317767 | 0.272081 | 0.272081 | 0 | 0.002566 | 0.134715 | 1,351 | 32 | 124 | 42.21875 | 0.840034 | 0.071799 | 0 | 0.26087 | 0 | 0 | 0.336269 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.130435 | 0 | 0.130435 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
41df8366d44990c149549ad5a6aecb5e9bc2fcdb | 5,835 | py | Python | weekly_degradation.py | rajeevratan84/LTE-KPI-Anomaly-Detection | b5d3ce261f75b94956867645fd3479c0b2eb0cd8 | [
"MIT"
] | null | null | null | weekly_degradation.py | rajeevratan84/LTE-KPI-Anomaly-Detection | b5d3ce261f75b94956867645fd3479c0b2eb0cd8 | [
"MIT"
] | null | null | null | weekly_degradation.py | rajeevratan84/LTE-KPI-Anomaly-Detection | b5d3ce261f75b94956867645fd3479c0b2eb0cd8 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from configuration.settings import Conf
from database.sql_connect import SQLDatabase
from KPIForecaster.forecaster import KPIForecaster
from datetime import datetime
import pandas as pd
import numpy as np
import time
import sys
import os.path
def findDegradation(df, weeks = 3):
df_prev = df.shift(1)['DEGRADED']
df_next = df.shift(-1)['DEGRADED']
df_next2 = df.shift(-2)['DEGRADED']
df_next3 = df.shift(-3)['DEGRADED']
df_next4 = df.shift(-4)['DEGRADED']
if weeks == 3:
df.loc[(df_prev != 1) & (df['DEGRADED'] == 1) & (df_next == 1) & (df_next2 == 1), 'FLAG'] = 1
#df.loc[(df['Degrade'] != 0) & (df_next == 0) & (df_next3 == 0), 'end'] = 1
else:
df.loc[(df_prev != 1) & (df['DEGRADED'] == 1) & (df_next == 1) & (df_next2 == 1) & (df_next3 == 1), 'FLAG'] = 1
#df.loc[(df['Degrade'] != 0) & (df_next == 0) & (df_next4 == 0), 'end'] = 1
df.fillna(0, inplace=True)
df['FLAG'] = df['FLAG'].astype(int)
#df['end'] = df['end'].astype(int)
return df
def getConsecutiveSequencesWeekly(df):
i = 0
ind = []
for index, row in df.iterrows():
if row['FLAG'] == 1:
ind.append(i)
i += 1
for i in ind:
for j in range(1,7):
s = i+j
if df.iloc[i+j,5] == 1:
df.iloc[s,6] = 1
else:
break
return df
def getSummaryReport(df):
dates = df['START_DATE'].unique()
dates = pd.to_datetime(dates)
dates = dates.sort_values()
dates = dates[-3:]
dates = pd.DataFrame(dates)
dates[0] = dates[0].dt.strftime('%Y-%m-%d')
recent = list(dates[0])
flagged_only_df = df[df['FLAG'] == 1]
recent_df = flagged_only_df[flagged_only_df['START_DATE'].isin(recent)]
recent_df = recent_df.groupby(['CELL_NAME']).mean().reset_index()
recent_df = recent_df[['CELL_NAME', 'DL_USER_THROUGHPUT_MBPS_AVERAGE',
'DL_USER_THROUGHPUT_MBPS_PCT_CHANGE']]
#recent_df.loc[recent_df.FLAG > 0, 'FLAG'] = 1
return recent_df
path = sys.argv[0].rsplit("/", 1)[0]
# Create configuration and Database connection and our KPI Forecaster Object
try:
conf = Conf(os.path.join(path,"config.json"))
except:
conf = Conf("config.json")
sql = SQLDatabase(conf)
# Creating out KPI Forecaster Object
KPIForecaster = KPIForecaster(conf)
#df_train = pd.read_csv('FT_CELL_NOV.csv')
# Starting Timer for benchmarking
T_START = time.time()
#df_train = sql.getHourlyKPIReportDegradation()
df_train = sql.getHourlyKPIReportXDays()
t0 = time.time()
completion_time = t0-T_START
print(f'[INFO] Total Time to Download Report: {completion_time}')
print("[INFO] Report Loaded")
# Replace UTC string from time
df_train['START_TIME'] = df_train['START_TIME'].str.replace('\(UTC-04:00\)', '')
# Set KPI here
KPI = 'DL_USER_THROUGHPUT_MBPS'
cell_names = df_train.CELL_NAME.unique()
df_train['START_TIME'] = pd.to_datetime(df_train['START_TIME'])
df_train['Week_Number'] = df_train['START_TIME'].dt.isocalendar().week
df_train['Year'] = df_train['START_TIME'].dt.year
df_train['YEAR_WEEK'] = df_train['Year'].astype(str) + "_" + df_train['Week_Number'].astype(str)
df_train['START_DATE'] = df_train['START_TIME'].dt.date
df_train['START_DATE'] = df_train['START_DATE'].astype(str)
start_dates = df_train[['START_DATE', 'YEAR_WEEK']].copy()
start_dates = start_dates.drop_duplicates(subset=['YEAR_WEEK'], keep='first').reset_index()
del start_dates['index']
df = pd.DataFrame()
appended_data = []
number_of_cells = len(cell_names)
for (i,cell_name) in enumerate(cell_names):
df = df_train[df_train["CELL_NAME"] == cell_name]
df2 = df.groupby(['CELL_NAME','YEAR_WEEK']).mean().pct_change().reset_index()
df2['KEY'] = df2['CELL_NAME'] + df2['YEAR_WEEK']
df3 = df.groupby(['CELL_NAME','YEAR_WEEK']).mean().reset_index()
df3['KEY'] = df3['CELL_NAME'] + df3['YEAR_WEEK']
df3 = df3[['DL_USER_THROUGHPUT_MBPS', 'KEY']].copy()
df4 = pd.merge(df2, df3, on='KEY')
df2 = df4.rename({"DL_USER_THROUGHPUT_MBPS_x": "DL_USER_THROUGHPUT_MBPS_PCT_CHANGE",
"DL_USER_THROUGHPUT_MBPS_y": "DL_USER_THROUGHPUT_MBPS_AVERAGE"
}, axis='columns')
df2 = df2[['CELL_NAME','YEAR_WEEK', 'DL_USER_THROUGHPUT_MBPS_PCT_CHANGE',
'DL_USER_THROUGHPUT_MBPS_AVERAGE']]
df2 = df2.fillna(0)
df2['DEGRADED'] = df2['DL_USER_THROUGHPUT_MBPS_PCT_CHANGE'].apply(lambda x: 1 if x <= -0.05 else 0)
df2 = findDegradation(df2, 3)
appended_data.append(df2)
print(f'[INFO] {i+1} of {number_of_cells} completed.')
#if i == 100:
# break
appended_data = pd.concat(appended_data, axis=0)
name = KPI + "_PCT_CHANGE"
appended_data = appended_data.rename({KPI: name,}, axis='columns')
result = pd.merge(appended_data, start_dates, on='YEAR_WEEK')
result = result.sort_values(['CELL_NAME','YEAR_WEEK'])
result = result[['CELL_NAME', 'YEAR_WEEK','START_DATE','DL_USER_THROUGHPUT_MBPS_AVERAGE',
'DL_USER_THROUGHPUT_MBPS_PCT_CHANGE','DEGRADED','FLAG']]
# Adding Flag Sequences
result = getConsecutiveSequencesWeekly(result)
result = result.fillna(0)
# Saving and Uploading to DWH
#path = "./Reports/DEGRADATION/"
#KPIForecaster.makeDir(path)
#date = datetime.today().strftime('%Y_%m_%d')
#file_name = path + "WEEKLY_DEGRADATION_REPORT_" + KPI + "_" + str(date) + ".csv"
#result.to_csv(file_name)
print("[INFO] Uploading Report to DWH.")
result['DL_USER_THROUGHPUT_MBPS_PCT_CHANGE'].replace(np.inf, 0, inplace=True)
sql.dumpToDWH(result, "KPI_DEGRADATION_WEEKLY", if_exists = 'append')
summary = getSummaryReport(result)
sql.deleteTable("KPI_DEGRADATION_WEEKLY_SUMMARY")
sql.dumpToDWH(summary, "KPI_DEGRADATION_WEEKLY_SUMMARY") | 35.150602 | 119 | 0.656041 | 833 | 5,835 | 4.343337 | 0.234094 | 0.042565 | 0.061913 | 0.077391 | 0.192371 | 0.155611 | 0.126866 | 0.095357 | 0.095357 | 0.095357 | 0 | 0.020515 | 0.18132 | 5,835 | 166 | 120 | 35.150602 | 0.736864 | 0.136247 | 0 | 0.035714 | 0 | 0 | 0.241633 | 0.100797 | 0 | 0 | 0 | 0 | 0 | 1 | 0.026786 | false | 0 | 0.080357 | 0 | 0.133929 | 0.035714 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
41e5525e7a720e9de54e83ab3802a2d8a16f8134 | 7,937 | py | Python | starthinker/tool/example.py | Ressmann/starthinker | 301c5cf17e382afee346871974ca2f4ae905a94a | [
"Apache-2.0"
] | 138 | 2018-11-28T21:42:44.000Z | 2022-03-30T17:26:35.000Z | starthinker/tool/example.py | Ressmann/starthinker | 301c5cf17e382afee346871974ca2f4ae905a94a | [
"Apache-2.0"
] | 36 | 2019-02-19T18:33:20.000Z | 2022-01-24T18:02:44.000Z | starthinker/tool/example.py | Ressmann/starthinker | 301c5cf17e382afee346871974ca2f4ae905a94a | [
"Apache-2.0"
] | 54 | 2018-12-06T05:47:32.000Z | 2022-02-21T22:01:01.000Z | ###########################################################################
#
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
"""StarThinker generator for python examples.
Includes both the command line and libraries used by UI.
See main for usage description.
"""
import argparse
import textwrap
from starthinker.util.configuration import commandline_parser
from starthinker.util.recipe import dict_to_python
from starthinker.util.recipe import get_recipe
from starthinker.util.recipe import json_get_fields
from starthinker.util.recipe import json_expand_queries
DISCLAIMER = '''###########################################################################
#
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
#
# This code generated (see scripts folder for possible source):
# - Command: "python starthinker_ui/manage.py example"
#
###########################################################################
'''
def parameters_to_argparse(description, instructions, parameters):
code = ' parser = argparse.ArgumentParser(\n'
code += ' formatter_class=argparse.RawDescriptionHelpFormatter,\n'
code += ' description=textwrap.dedent("""\n'
if description:
code += ' %s\n' % description
if instructions:
code += '\n'
for step, instruction in enumerate(instructions, 1):
code += ' %d. %s\n' % (step, instruction)
code += ' """))\n\n'
code += ' parser.add_argument("-project", help="Cloud ID of Google Cloud Project.", default=None)\n'
code += ' parser.add_argument("-key", help="API Key of Google Cloud Project.", default=None)\n'
code += ' parser.add_argument("-client", help="Path to CLIENT credentials json file.", default=None)\n'
code += ' parser.add_argument("-user", help="Path to USER credentials json file.", default=None)\n'
code += ' parser.add_argument("-service", help="Path to SERVICE credentials json file.", default=None)\n'
code += ' parser.add_argument("-verbose", help="Print all the steps as they happen.", action="store_true")\n'
code += '\n'
for parameter in parameters:
code += ' parser.add_argument("-%s", help="%s", default=%s)\n' % (parameter['name'], parameter.get('description', ''), repr(parameter.get('default')))
code += '\n'
return code
def recipe_to_python(name, description, instructions, tasks, parameters={}, project=None, client_credentials=None, user_credentials=None, service_credentials=None):
""" Converts a JSON recipe into a python stand alone example.
Sets up multiple steps to execute recipe:
1. Install starthinker from repository
2. Get Cloud Project ID.
3. Get Client Credentials ( optional if User Credentials exist ).
4. Enter Recipe parameters if fields present.
5. Execute recipe tasks.
Args:
* name: (string) The name of the notebook.
* description: (string) A description fo the recipe.
* instructions: (string) Recipe manual instructions, for example connecting datastudios.
* tasks: (list) The task JSON to execute.
* parameters: (dict) Values for field parameters in tasks, optional.
* project: (string) The GCP project id.
* client_credentials: (string) The GCP Desktop Client Credentials in JSON string.
* user_credentials: (string) Not used, placeholder.
* service_credentials: (string) Not used, placeholder.
Returns:
* (string) Rendered example source code to be written to a py file.
"""
# Expand all queries
tasks = json_expand_queries(tasks)
# Add imports
code = DISCLAIMER
code += 'import argparse\n'
code += 'import textwrap\n\n'
code += 'from starthinker.util.configuration import Configuration\n'
imported = set()
for task in tasks:
script, task = next(iter(task.items()))
if script not in imported:
code += 'from starthinker.task.%s.run import %s\n' % (script, script)
imported.add(script)
code += '\n'
code += '\n'
# Create function for recipe
fields = json_get_fields(tasks)
if fields:
code += 'def recipe_%s(config, %s):\n' % (name, ', '.join([f['name'] for f in fields]))
else:
code += 'def recipe_%s(config):\n' % name
# Add docstring
if description or fields:
code += ' """' + textwrap.fill(
description,
width=80,
subsequent_indent=" "
) + '\n'
if fields:
code += '\n Args:\n'
for field in fields:
code += ' %s (%s) - %s\n' % (field['name'], field['kind'], field.get('description', 'NA'))
code += ' """\n\n'
# Add calls
for task in tasks:
script, task = next(iter(task.items()))
code += ' %s(config, %s)\n\n' % (script, dict_to_python(task, indent=1))
code += '\n'
code += '\n'
code += 'if __name__ == "__main__":\n'
# Add argparse for each field
code += parameters_to_argparse(description, instructions, fields)
code += '\n'
code += ' args = parser.parse_args()\n'
code += '\n'
code += ''' config = Configuration(
project=args.project,
user=args.user,
service=args.service,
client=args.client,
key=args.key,
verbose=args.verbose
)'''
code += '\n\n'
if fields:
code += ' recipe_%s(config, %s)\n' % (name, ', '.join(['args.%s' % f['name'] for f in fields]))
else:
code += ' recipe_%s(config)\n' % name
return code
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent("""\
Command line to turn StarThinker Recipe into Python script.
Example:
python example.py [path to existing recipe.json] --fo [path to new python file.py]
"""))
parser.add_argument('json', help='Path to recipe json file to load.')
parser.add_argument(
'--file_out',
'-fo',
help='Path to recipe file to be written if replacing fields.',
default=None
)
# initialize project
parser = commandline_parser(parser, arguments=('-p', '-c', '-u', '-s'))
args = parser.parse_args()
# load json to get each task
recipe = get_recipe(args.json)
# create Python file
example = recipe_to_python(
name=(args.file_out or args.json).rsplit('/', 1)[-1].split('.')[0], # take filename without extension of destination or source
description=recipe['script'].get('description'),
instructions=recipe['script'].get('instructions'),
tasks=recipe['tasks'],
project=args.project,
client_credentials=args.client,
user_credentials=args.user,
service_credentials=args.service
)
# check to write converted fields to stdout
if args.file_out:
print('Writing to:', args.file_out)
f = open(args.file_out, 'w')
f.write(example)
f.close()
else:
print(example)
if __name__ == '__main__':
main()
| 32.528689 | 164 | 0.643442 | 1,008 | 7,937 | 4.993056 | 0.228175 | 0.016889 | 0.030399 | 0.029207 | 0.347705 | 0.27578 | 0.261872 | 0.252732 | 0.242798 | 0.242798 | 0 | 0.004338 | 0.18672 | 7,937 | 243 | 165 | 32.662551 | 0.775368 | 0.240393 | 0 | 0.174825 | 0 | 0.013986 | 0.463461 | 0.12151 | 0 | 0 | 0 | 0 | 0 | 1 | 0.020979 | false | 0 | 0.097902 | 0 | 0.132867 | 0.013986 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
41eda4e4dba365b6d5b2482768194356e609bc8f | 596 | py | Python | scraper/collect_image_stats/get_domains_and_urls.py | martinGalajdaSchool/object-detection | 2c72b643464a89b91daac520a862ebaad2b3f9f0 | [
"Apache-2.0"
] | 2 | 2019-12-11T05:50:39.000Z | 2021-12-06T12:28:40.000Z | scraper/collect_image_stats/get_domains_and_urls.py | martinGalajdaSchool/object-detection | 2c72b643464a89b91daac520a862ebaad2b3f9f0 | [
"Apache-2.0"
] | 19 | 2019-12-16T21:23:00.000Z | 2022-03-02T14:59:12.000Z | scraper/collect_image_stats/get_domains_and_urls.py | martin-galajda/object-detection | 2c72b643464a89b91daac520a862ebaad2b3f9f0 | [
"Apache-2.0"
] | null | null | null | import csv
def get_domains_and_urls():
domains = []
urls = []
with open('./scraper/foto-domains-2019-03.csv', 'r') as csvfile:
csvreader = csv.reader(csvfile, delimiter=',')
row_idx = 0
for row in csvreader:
if row_idx == 0:
row_idx += 1
continue
domain, country = row[:2]
domains += [f'{domain}.{country}']
urls += [f'http://{domain}.{country}']
row_idx += 1
print(domains)
print(urls)
return {
'domains': domains,
'urls': urls,
}
| 22.923077 | 68 | 0.486577 | 65 | 596 | 4.353846 | 0.523077 | 0.084806 | 0.04947 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.029255 | 0.369128 | 596 | 25 | 69 | 23.84 | 0.723404 | 0 | 0 | 0.095238 | 0 | 0 | 0.151261 | 0.057143 | 0 | 0 | 0 | 0 | 0 | 1 | 0.047619 | false | 0 | 0.047619 | 0 | 0.142857 | 0.095238 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
41efccef82f28d187c0597489e64c7649630dd85 | 864 | py | Python | TreeDFS/SumPathNumbers.py | Feez/Algo-Challenges | 6b5f919b4e2c9ba9ed9b7c5d7697fe73740c139e | [
"MIT"
] | 2 | 2019-12-03T05:29:35.000Z | 2020-01-19T19:22:11.000Z | TreeDFS/SumPathNumbers.py | Feez/Algo-Challenges | 6b5f919b4e2c9ba9ed9b7c5d7697fe73740c139e | [
"MIT"
] | null | null | null | TreeDFS/SumPathNumbers.py | Feez/Algo-Challenges | 6b5f919b4e2c9ba9ed9b7c5d7697fe73740c139e | [
"MIT"
] | null | null | null | class TreeNode:
def __init__(self, val, left=None, right=None):
self.val = val
self.left = left
self.right = right
def dfs(self, total=0):
total = (total * 10) + self.val
if self.left is None and self.right is None:
return total
left = 0
right = 0
if self.left is not None:
left = self.left.dfs(total=total)
if self.right is not None:
right = self.right.dfs(total=total)
return left + right
def find_sum_of_path_numbers(root):
return root.dfs()
def main():
root = TreeNode(1)
root.left = TreeNode(0)
root.right = TreeNode(1)
root.left.left = TreeNode(1)
root.right.left = TreeNode(6)
root.right.right = TreeNode(5)
print("Total Sum of Path Numbers: " + str(find_sum_of_path_numbers(root)))
main()
| 22.153846 | 78 | 0.586806 | 124 | 864 | 3.991935 | 0.241935 | 0.064646 | 0.054545 | 0.09697 | 0.09697 | 0.09697 | 0 | 0 | 0 | 0 | 0 | 0.018272 | 0.303241 | 864 | 38 | 79 | 22.736842 | 0.803987 | 0 | 0 | 0 | 0 | 0 | 0.03125 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.148148 | false | 0 | 0 | 0.037037 | 0.296296 | 0.037037 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
41f244af008573d038af8edc1801e70f08cd96ac | 1,730 | py | Python | src/src/modules/ZeroOptimizer.py | ychnlgy/LipoWithGradients | 4fe5228a3dae8bf5d457eef6191ba29314421f6b | [
"MIT"
] | null | null | null | src/src/modules/ZeroOptimizer.py | ychnlgy/LipoWithGradients | 4fe5228a3dae8bf5d457eef6191ba29314421f6b | [
"MIT"
] | null | null | null | src/src/modules/ZeroOptimizer.py | ychnlgy/LipoWithGradients | 4fe5228a3dae8bf5d457eef6191ba29314421f6b | [
"MIT"
] | null | null | null | import torch
EPS = 1e-32
class ZeroOptimizer(torch.optim.SGD):
def step(self):
lr = self.param_groups[0]["lr"]
with torch.no_grad():
for group in self.param_groups:
for p in group["params"]:
if p.grad is not None:
p.grad = calc_grad(lr, p, p.grad)
super().step()
def calc_grad(lr, W, J, eps=EPS):
Z = calc_z(W, eps)
G = gravitate_zero(lr, W)
return Z*G + (1-Z)*J
def gravitate_zero(lr, W):
G = torch.zeros_like(W)
A = W.abs()
I = A > 0
G[I] = lr/W[I]
J = (W-lr*G).abs() > A
G[J] = W[J]/lr
return G
def calc_p(x, eps=EPS):
x = x.abs()
return 1-x/(x.mean()+eps)
def calc_z(w, eps=EPS):
return torch.nn.functional.relu(calc_p(w, eps))
if __name__ == "__main__":
torch.manual_seed(10)
w_1 = torch.ones(10)*100-50
w_r = torch.rand(10)*100-50
w_s = torch.Tensor([1e-4, -1e-4, 1e-4, -1e-4, 1e-5, -1e-5, 1e-6, -1e-6, 1e-8, -1e-8])
w_w = 10**torch.arange(0, -10, -1).float()
w_0 = torch.zeros(10)
w_z = torch.Tensor([1e-4]*3+ [1e-8] + [0]*6)
w_u = torch.Tensor([1e2] + [1e-10]*9)
w_v = torch.Tensor([1] + [1e-10])
w_o = torch.Tensor([1])
w_x = torch.Tensor([1]*1 + [1e-4]*1)
def print_wz(w, fmt=".2f"):
z = calc_z(w)
buf = "%{}\t%.2f".format(fmt)
for a, b in zip(w, z):
print(buf % (a.item(), b.item()))
input("===")
#'''
print_wz(w_1)
print_wz(w_r)
print_wz(w_s, fmt=".0E")
print_wz(w_w, fmt=".0E")
print_wz(w_0)
print_wz(w_z, fmt=".0E")
print_wz(w_u, fmt=".0E")
print_wz(w_v, fmt=".0E")
print_wz(w_o)
#'''
print_wz(w_x, ".0E")
| 23.69863 | 89 | 0.509249 | 317 | 1,730 | 2.611987 | 0.264984 | 0.092995 | 0.10628 | 0.072464 | 0.095411 | 0.016908 | 0.016908 | 0 | 0 | 0 | 0 | 0.069579 | 0.285549 | 1,730 | 72 | 90 | 24.027778 | 0.600324 | 0.003468 | 0 | 0 | 0 | 0 | 0.028455 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.107143 | false | 0 | 0.017857 | 0.017857 | 0.214286 | 0.214286 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
41f5bd3c227b6e90d9957fe3e9834571a6c5a926 | 2,010 | py | Python | python_lesson_4/python_lesson_4_homework_lightplus.py | cubecloud/simple_python | 2bc4ee1720214293dabfa5dbe661a49246c38842 | [
"MIT"
] | null | null | null | python_lesson_4/python_lesson_4_homework_lightplus.py | cubecloud/simple_python | 2bc4ee1720214293dabfa5dbe661a49246c38842 | [
"MIT"
] | 1 | 2020-04-24T10:19:24.000Z | 2020-04-24T10:19:24.000Z | python_lesson_4/python_lesson_4_homework_lightplus.py | cubecloud/simple_python | 2bc4ee1720214293dabfa5dbe661a49246c38842 | [
"MIT"
] | null | null | null | # задача
# В файле с логами найти дату самого позднего лога (по метке времени):
log_file_name = 'log'
# Вариант 1
# # открываем и читаем файл
with open(log_file_name, 'r', encoding='utf-8') as text_file:
max_date_str = ''
# Читаем строку и сравниваем
for line in text_file:
if line[:23] > max_date_str[:23]: max_date_str = line
# Выводим дату и время последнего лога
print("Вариант 1")
print(max_date_str)
# Вариант 2
# открываем и читаем файл
log_file_name = 'log'
# импортируем модуль re
import re
# Создаем словарь с ключами к листам
dict_data = {'Date_and_Time': [], 'Application': [], 'Type': [], 'Message': []}
with open(log_file_name, 'r', encoding='utf-8') as text_file:
for line in text_file:
# Делаем сплит строки регулярным выражением
log_split = re.split(r'\s[-]\s|\n', line)
i = 0
# Заполняем словарь по ключам данными
for key in dict_data.keys():
dict_data[key].append(log_split[i])
i += 1
# Получаем лист с датами по ключу
date_time_line = (dict_data['Date_and_Time'])
# Выводим дату и время последнего лога c помощью функции max
print("Вариант 2")
print (max(date_time_line))
print()
# Вариант 3
# импортируем модуль pandas
import pandas as pd
# заполняем переменную сериями обработанными функциями модуля
log_file = pd.read_csv(log_file_name, sep=' - ', names=['Date_and_Time', 'Application', 'Type', 'Message'],
engine='python')
print("Вариант 3")
print(log_file.sort_values('Date_and_Time', ascending=False).head(1))
print()
# Вариант 4
# импортируем модуль datetime as dt
import datetime as dt
log_dates = []
file = open(log_file_name, 'rb').readlines()
for line in file:
# заполняем лист датами обработанными функциями модуля
log_dates.append(dt.datetime.strptime(line.decode().split(' - ')[0], '%Y-%m-%d %H:%M:%S,%f'))
# Выводим дату и время последнего лога c помощью функции max
print("Вариант 4")
print(max([q for q in log_dates]))
| 31.904762 | 107 | 0.678109 | 303 | 2,010 | 4.336634 | 0.386139 | 0.042618 | 0.050228 | 0.034247 | 0.277017 | 0.230594 | 0.156773 | 0.156773 | 0.156773 | 0.156773 | 0 | 0.011187 | 0.199502 | 2,010 | 62 | 108 | 32.419355 | 0.805469 | 0.343284 | 0 | 0.235294 | 0 | 0 | 0.149576 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.088235 | 0 | 0.088235 | 0.294118 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
41f650c872145facc783efbfb2b0dadcd4920f2a | 18,278 | py | Python | sappy/m4a.py | SomeShrug/SapPy | cee216bc5f89f0479748efdbeb75c4781d95b0f7 | [
"MIT"
] | 4 | 2018-04-21T15:43:50.000Z | 2018-07-10T17:11:31.000Z | sappy/m4a.py | SomeShrug/SapPy | cee216bc5f89f0479748efdbeb75c4781d95b0f7 | [
"MIT"
] | null | null | null | sappy/m4a.py | SomeShrug/SapPy | cee216bc5f89f0479748efdbeb75c4781d95b0f7 | [
"MIT"
] | 1 | 2018-04-08T03:00:06.000Z | 2018-04-08T03:00:06.000Z | # -*- coding: utf-8 -*-
"""Data-storage containers for internal use."""
import copy
import math
from collections import OrderedDict, deque
from enum import IntEnum
from random import random
from typing import Dict, List, NamedTuple, Union, Tuple, Deque
from .config import (BASE_FREQUENCY, PSG_SQUARE_FREQUENCY, PSG_SQUARE_VOLUME,
PSG_WAVEFORM_FREQUENCY, PSG_WAVEFORM_SIZE, SEMITONE_RATIO)
from .exceptions import InvalidArgument
from .fmod import (get_mute, set_frequency, set_mute, set_panning, set_volume)
from .inst_set import KeyArg, c_v, mxv
NOTES = ('C', 'C#', 'D', 'D#', 'E', 'F', 'F#', 'G', 'G#', 'A', 'A#', 'B')
class M4AVoiceMode(IntEnum):
DIRECTSOUND = 0x0
PSG_SQUARE1 = 0x1
PSG_SQUARE2 = 0x2
PSG_WAVE = 0x3
PSG_NOISE = 0x4
FIX_DSOUND = 0x8
KEY_ZONE = 0x40
PERCUSSION = 0x80
NULL = 0xFF
# region VOICE STRUCTS
class M4AVoice(object):
"""Voice base class."""
def __init__(self, mode: int, root: int, attack: int, decay: int,
sustain: int, release: int) -> None:
self._validate(mode, root)
self.mode: M4AVoiceMode = mode
self.root: KeyArg = root
self.envelope: SoundEnvelope = SoundEnvelope(attack, decay, sustain,
release)
self.fmod_handle = None
self.mode = M4AVoiceMode(self.mode)
self.root = KeyArg(self.root)
def __repr__(self):
return f'M4AVoice(mode=0x{self.mode:<X}, root={self.root}, ' \
f'envelope={self.envelope})'
@staticmethod
def _validate(mode, root) -> None:
try:
M4AVoiceMode(mode)
except ValueError:
raise InvalidArgument(mode, 'VOICE MODE')
try:
KeyArg(root)
except ValueError:
raise InvalidArgument(root, 'ROOT KEY')
class M4APSGVoice(M4AVoice):
"""PSG Voice base class."""
def __init__(self, mode: int, root: int, time_ctrl: int, attack: int,
decay: int, sustain: int, release: int) -> None:
attack = 255 - attack * 32
decay *= 32
sustain *= 16
release *= 32
super().__init__(mode, root, attack, decay, sustain, release)
self._validate(mode, root)
self.time_ctrl: int = time_ctrl
def __repr__(self):
return f'M4APSGVoice(mode=0x{self.mode:<X}, root={self.root}, ' \
f'time_ctrl={self.time_ctrl}, envelope={self.envelope})'
@staticmethod
def _validate(mode, root) -> None:
M4AVoice._validate(mode, root)
if mode in (0x0, 0x8):
raise InvalidArgument(mode, 'PSG MODE')
class M4ADirectSound(M4AVoice):
"""M4A DirectSound voice entry."""
def __init__(self, mode: int, root: int, panning: int, sample_ptr: int,
attack: int, decay: int, sustain: int, release: int) -> None:
super().__init__(mode, root, attack, decay, sustain, release)
self.fixed: bool = self.mode == M4AVoiceMode.FIX_DSOUND
self.panning: int = panning
self.sample_ptr: int = sample_ptr
class M4ASquare1(M4APSGVoice):
"""M4A PSG Square1 entry."""
def __init__(self, root: int, time_ctrl: int, sweep: int,
duty_cycle: int, attack: int, decay: int, sustain: int,
release: int) -> None:
super().__init__(M4AVoiceMode.PSG_SQUARE1, root, time_ctrl, attack,
decay, sustain, release)
self.sweep: int = sweep
self.duty_cycle: int = duty_cycle
self.sample_ptr: str = f'square{self.duty_cycle}'
def __repr__(self):
return f'M4ASquare1(root={self.root}, time_ctrl={self.time_ctrl}, ' \
f'sweep={self.sweep}, envelope={self.envelope})'
class M4ASquare2(M4APSGVoice):
"""M4A PSG Square2 entry."""
def __init__(self, root: int, time_ctrl: int, duty_cycle: int,
attack: int, decay: int, sustain: int, release: int) -> None:
super().__init__(M4AVoiceMode.PSG_SQUARE2, root, time_ctrl, attack,
decay, sustain, release)
self.duty_cycle: int = duty_cycle
self.sample_ptr: str = f'square{self.duty_cycle}'
class M4AWaveform(M4APSGVoice):
"""M4A PSG Waveform entry."""
def __init__(self, root: int, time_ctrl: int, sample_ptr: int, attack: int,
decay: int, sustain: int, release: int) -> None:
super().__init__(M4AVoiceMode.PSG_WAVE, root, time_ctrl, attack, decay,
sustain, release)
self.sample_ptr: int = sample_ptr
class M4ANoise(M4APSGVoice):
"""M4A PSG Noise entry."""
def __init__(self, root: int, time_ctrl: int, period: int, attack: int,
decay: int, sustain: int, release: int) -> None:
super().__init__(M4AVoiceMode.PSG_NOISE, root, time_ctrl, attack, decay,
sustain, release)
self.period: int = period
self.sample_ptr: str = f'noise{self.period}'
class M4ADrum(M4AVoice):
"""M4A Percussion voice entry."""
def __init__(self, voice_table: Dict) -> None:
"""Initialize every key-split instrument using track data."""
super().__init__(M4AVoiceMode.PERCUSSION, 0x0, 0x0, 0x0, 0x00, 0x0)
self.voice_table: Dict[int, M4AVoice] = voice_table
class M4AKeyZone(M4AVoice):
"""M4A Key-zone voice entry."""
def __init__(self, voice_table: Dict, keymap: Dict) -> None:
"""Initialize key-split instrument using track data."""
super().__init__(M4AVoiceMode.KEY_ZONE, 0x0, 0x0, 0x0, 0x00, 0x0)
self.voice_table: Dict[int, M4AVoice] = voice_table
self.keymap: Dict[int, int] = keymap
# endregion
# region SAMPLE STRUCTS
class M4ASample(object):
"""Sample base class."""
def __init__(self, looped: bool, frequency: int, loop_start: int,
sample_data: bytes) -> None:
self.looped = looped
self.frequency = frequency
self.loop_start = loop_start
self.sample_data = sample_data
self.fmod_handle = None
def __repr__(self):
return f'{self.__class__.__name__}(looped=0x{self.looped:X}, ' \
f'frequency=0x{self.frequency:X}, ' \
f'loop_start={self.loop_start}, size={self.size})'
@property
def size(self):
return len(self.sample_data)
class M4ADirectSoundSample(M4ASample):
"""PCM8 DirectSound sample."""
def __init__(self, looped: int, frequency: int, loop_start: int,
sample_data: bytes) -> None:
self._valid = self._is_valid(looped, loop_start, sample_data)
super().__init__(looped == 0x40, frequency // 1024, loop_start,
sample_data)
@staticmethod
def _is_valid(looped, loop_start, sample_data):
c_loop = looped in (0x0, 0x40)
c_loop_st = 0 <= loop_start <= len(sample_data)
return all((c_loop, c_loop_st))
def is_valid(self):
return self._valid
class M4ASquareSample(M4ASample):
"""PSG Square1/Square2 sample."""
VARIANCE = int(0x7F * PSG_SQUARE_VOLUME)
SQUARE_SIZE = 8
CYCLES = tuple(map(int, (SQUARE_SIZE * .125, SQUARE_SIZE * .25,
SQUARE_SIZE * .5, SQUARE_SIZE * .75)))
def __init__(self, duty_cycle: int):
self.duty_cycle = duty_cycle
data = self.square_wave(duty_cycle)
super().__init__(True, PSG_SQUARE_FREQUENCY, 0, data)
def __repr__(self):
return f'M4ASquareSample(duty_cycle={self.duty_cycle})'
@staticmethod
def square_wave(duty_cycle: int) -> bytes:
h_cycle = M4ASquareSample.CYCLES[duty_cycle]
l_cycle = M4ASquareSample.SQUARE_SIZE - h_cycle
high = h_cycle * [0x80 + M4ASquareSample.VARIANCE]
low = l_cycle * [0x80 - M4ASquareSample.VARIANCE]
wave = (high + low)
return bytes(wave)
class M4AWaveformSample(M4ASample):
"""PSG Programmable Waveform sample."""
def __init__(self, sample_data: bytes) -> None:
super().__init__(True, PSG_WAVEFORM_FREQUENCY, 0, sample_data)
@property
def is_looped(self) -> bool:
return True
@property
def size(self) -> int:
return PSG_WAVEFORM_SIZE
class M4ANoiseSample(M4ASample):
"""PSG Noise sample."""
VARIANCE = int(0x7F * PSG_SQUARE_VOLUME)
def __init__(self, period: int):
self.validate(period)
data = self.noise(period)
super().__init__(True, 7040, 0, data)
@staticmethod
def validate(period: int) -> None:
if not 0 <= period <= 1:
raise InvalidArgument(period, 'NOISE PERIOD')
@staticmethod
def noise(period: int) -> bytes:
"""Generate noise sample."""
if period == 0:
samples = 32767
elif period == 1:
samples = 127
else:
raise InvalidArgument(period, 'NOISE PERIOD')
high = 0x80 + M4ASquareSample.VARIANCE
low = 0x80 - M4ASquareSample.VARIANCE
noise_data = [high if random() > .5 else low for _ in range(samples)]
return bytes(noise_data)
# endregion
class SoundDriverMode(NamedTuple):
"""GBA SoundDriverMode call."""
reverb: int = 0
reverb_enabled: bool = False
polyphony: int = 8
volume_ind: int = 15
freq_ind: int = 4
dac_ind: int = 9
_DEFAULT = 0x0094F800
_FREQUENCY_TABLE = {
1: 5734,
2: 7884,
3: 10512,
4: 13379,
5: 15768,
6: 18157,
7: 21024,
8: 26758,
9: 31536,
10: 36314,
11: 40137,
12: 42048
}
_DAC_TABLE = {
8: 9,
9: 8,
10: 7,
11: 6
}
@property
def volume(self):
"""Return volume."""
return self.volume_ind * 17
@property
def frequency(self):
"""Return sample rate."""
return self._FREQUENCY_TABLE[self.freq_ind]
@property
def dac(self):
"""Return D/A converter bits."""
return self._DAC_TABLE[self.dac_ind]
class SoundEnvelope(object):
"""M4A ADSR sound envelope."""
ATTACK = 0
DECAY = 1
SUSTAIN = 2
RELEASE = 3
NOTE_OFF = 4
def __init__(self, attack: int, decay: int, sustain: int,
release: int) -> None:
"""Initialize envelope to M4AVoice ADSR settings."""
self.phase = self.ATTACK
self.attack = attack
self.decay = decay
self.sustain = sustain
self.release = release
self._rate = self.attack
self.env_pos = 0
def __repr__(self):
return f'SoundEnvelope({self.attack}, {self.decay}, {self.sustain}, ' \
f'{self.release})'
def note_off(self) -> None:
"""Switch to RELEASE phase on note-off."""
if self.phase >= self.RELEASE:
return
self.phase = self.RELEASE
self._rate = self.release / 256
def update(self) -> int:
"""Update sound envelope phase."""
if self.phase == self.ATTACK:
self.env_pos += self._rate
if self.env_pos >= 255:
self.phase = self.DECAY
self.env_pos = 255
self._rate = self.decay / 256
if self.phase == self.DECAY:
self.env_pos = int(self.env_pos * self._rate)
if self.env_pos <= self.sustain:
self.phase = self.SUSTAIN
self.env_pos = self.sustain
if self.phase == self.SUSTAIN:
pass
if self.phase == self.RELEASE:
self.env_pos = int(self.env_pos * self._rate)
if self.env_pos <= 0:
self.phase = self.NOTE_OFF
if self.phase == self.NOTE_OFF:
return -1
return self.env_pos
class MetaData(NamedTuple):
"""ROM/Track metadata."""
REGION = {
'J': 'JPN',
'E': 'USA',
'P': 'PAL',
'D': 'DEU',
'F': 'FRA',
'I': 'ITA',
'S': 'ESP'
}
rom_name: str = ...
rom_code: str = ...
tracks: int = ...
reverb: int = ...
priority: int = ...
main_ptr: int = ...
voice_ptr: int = ...
song_ptr: int = ...
unknown: int = ...
@property
def echo_enabled(self) -> bool:
"""Track reverb flag."""
return bin(self.reverb)[2:][0] == '1'
@property
def code(self) -> str:
"""ROM production code."""
return f'AGB-{self.rom_code}-{self.region}'
@property
def region(self) -> str:
"""ROM region code."""
return self.REGION.get(self.rom_code[3], 'UNK')
class FMODNote(object):
"""FMOD note."""
def __init__(self, ticks: int, midi_note: int, velocity: int,
voice: int) -> None:
"""Initialize note from track data."""
self.note_off: bool = False
self.voice: int = voice
self.midi_note: int = midi_note
self.velocity: int = velocity
self.ticks: int = ticks
self.lfo_pos: float = 0.0
self.frequency: int = 0
self.envelope: SoundEnvelope = ...
self.fmod_handle: int = 0
def __repr__(self):
return f'Note({self.midi_note}, {self.velocity}, {self.ticks}, ' \
f'{self.voice})'
__str__ = __repr__
# region PROPERTIES
@property
def volume(self) -> float:
"""Return volume of note."""
return self.velocity / 0x7F * self.envelope.env_pos / 0xFF
@property
def muted(self) -> bool:
"""Return mute state in FMOD."""
return get_mute(self.fmod_handle)
# endregion
def reset_mixer(self, voice: M4AVoice) -> None:
"""Install new voice envelope."""
self.envelope = copy.copy(voice.envelope)
def release(self) -> None:
"""Change note state to note-off."""
self.envelope.note_off()
self.note_off = True
def update(self) -> None:
"""Update note state."""
if self.ticks > 0:
self.ticks -= 1
if self.ticks == 0:
self.release()
def update_envelope(self) -> None:
"""Update sound envelope for this note."""
pos = self.envelope.update()
if pos == -1:
self.set_mute(True)
# region FMOD FUNCTIONS
def set_panning(self, panning: int) -> None:
set_panning(self.fmod_handle, panning)
def set_volume(self, volume: int) -> None:
set_volume(self.fmod_handle, volume)
def set_frequency(self, frequency: int) -> None:
set_frequency(self.fmod_handle, frequency)
def set_mute(self, state: bool) -> None:
set_mute(self.fmod_handle, state)
# endregion
class M4ASong(NamedTuple):
"""M4A song."""
tracks: List['M4ATrack'] = []
voices: Dict[int, M4AVoice] = {}
samples: Dict[Union[int, str], M4ASample] = {}
meta_data: 'MetaData' = MetaData()
sdm: SoundDriverMode = None
class M4ATrack(object):
"""M4A Track."""
NO_VOICE = -1
TEMPO = 75
KEY_SHIFT = 0
def __init__(self, track_data: OrderedDict):
"""Initialize blank track."""
self.enabled: bool = True
self.track_data: OrderedDict = track_data
self.cmd_addresses: Tuple[int] = tuple(track_data.keys())
self.commands: Tuple = tuple(track_data.values())
self.voices: Tuple[int] = ()
self.notes: List[FMODNote] = []
self.note_queue: Deque[FMODNote] = deque()
self.call_stack: Deque[int] = deque(maxlen=3)
self.type: M4AVoiceMode = M4AVoiceMode.NULL
self.voice: int = M4ATrack.NO_VOICE
self.key_shift: int = 0
self._volume: int = mxv
self._panning: int = c_v
self.pitch_bend: int = c_v
self.pitch_range: int = 2
self.mod: int = 0
self.lfo_speed: int = 0
self.lfo_pos: int = 0
self.ticks: int = 0
self.program_ctr: int = 0
self.return_ctr: int = 0
self.base_ctr: int = 0
self.in_patt: bool = False
self.out_vol: int = 0
# region PROPERTIES
@property
def volume(self) -> float:
return self._volume / 0x7F
@volume.setter
def volume(self, volume: int) -> None:
self._volume = volume
@property
def panning(self) -> int:
return self._panning * 2
@panning.setter
def panning(self, panning: int) -> None:
self._panning = panning
@property
def frequency(self) -> float:
pitch = (self.pitch_bend - c_v) / c_v * self.pitch_range
return math.pow(SEMITONE_RATIO, pitch)
# endregion
def update(self) -> None:
"""Execute M4A track commands and decrement wait counter."""
if not self.enabled:
return
if self.ticks > 0:
self.ticks -= 1
if self.ticks == 0:
self.base_ctr = self.program_ctr
while self.ticks == 0 and self.enabled:
cmd = self.commands[self.program_ctr]
cmd(self)
for note in self.notes:
note.update()
def update_envelope(self):
self.out_vol = 0
for note in self.notes[::]:
note.update_envelope()
if note.muted:
continue
volume = round(self.volume * note.volume * 255)
if self.type in (M4AVoiceMode.PSG_SQUARE1, M4AVoiceMode.PSG_SQUARE2,
M4AVoiceMode.PSG_NOISE):
volume = 15 * round(volume / 15)
self.out_vol = volume
note.set_volume(volume)
def note_name(midi_note: int) -> str:
"""Retrieve the string name of a MIDI note from its byte representation."""
octave, note = divmod(midi_note, 12)
octave -= 2
return f'{NOTES[note]}{"M" if octave < 0 else ""}{abs(octave)}'
def resample(midi_note: int, relative_c_freq: int = -1) -> int:
"""Retrieve the sound frequency in Hz of a MIDI note relative to C3."""
note = midi_note - KeyArg.Cn3
if relative_c_freq < 0:
base_freq = BASE_FREQUENCY // abs(relative_c_freq)
relative_c_freq = base_freq * math.pow(SEMITONE_RATIO, 3)
else:
relative_c_freq = relative_c_freq
freq = relative_c_freq * math.pow(SEMITONE_RATIO, note)
return int(freq)
| 28.875197 | 80 | 0.585239 | 2,210 | 18,278 | 4.638462 | 0.149774 | 0.011609 | 0.018242 | 0.013267 | 0.298605 | 0.230124 | 0.217052 | 0.185933 | 0.154131 | 0.099893 | 0 | 0.029489 | 0.294999 | 18,278 | 632 | 81 | 28.920886 | 0.766025 | 0.078236 | 0 | 0.185615 | 0 | 0 | 0.052055 | 0.030511 | 0 | 0 | 0.007522 | 0 | 0 | 1 | 0.141531 | false | 0.00232 | 0.023202 | 0.030162 | 0.392111 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
41f7aa5d337d6a6a04c73fadceef0f5775c6ce5a | 4,076 | py | Python | examples/manifold/plot_swissroll.py | jlopezNEU/scikit-learn | 593495eebc3c2f2ffdb244036adf57fab707a47d | [
"BSD-3-Clause"
] | 50,961 | 2015-01-01T06:06:31.000Z | 2022-03-31T23:40:12.000Z | examples/manifold/plot_swissroll.py | ashutoshpatelofficial/scikit-learn | 2fc9187879424556726d9345a6656884fa9fbc20 | [
"BSD-3-Clause"
] | 17,065 | 2015-01-01T02:01:58.000Z | 2022-03-31T23:48:34.000Z | examples/manifold/plot_swissroll.py | ashutoshpatelofficial/scikit-learn | 2fc9187879424556726d9345a6656884fa9fbc20 | [
"BSD-3-Clause"
] | 26,886 | 2015-01-01T00:59:27.000Z | 2022-03-31T18:03:23.000Z | """
===================================
Swiss Roll And Swiss-Hole Reduction
===================================
This notebook seeks to compare two popular non-linear dimensionality
techniques, T-distributed Stochastic Neighbor Embedding (t-SNE) and
Locally Linear Embedding (LLE), on the classic Swiss Roll dataset.
Then, we will explore how they both deal with the addition of a hole
in the data.
"""
# %%
# Swiss Roll
# ---------------------------------------------------
#
# We start by generating the Swiss Roll dataset.
import matplotlib.pyplot as plt
from sklearn import manifold, datasets
sr_points, sr_color = datasets.make_swiss_roll(n_samples=1500, random_state=0)
# %%
# Now, let's take a look at our data:
fig = plt.figure(figsize=(8, 6))
ax = fig.add_subplot(111, projection="3d")
fig.add_axes(ax)
ax.scatter(
sr_points[:, 0], sr_points[:, 1], sr_points[:, 2], c=sr_color, s=50, alpha=0.8
)
ax.set_title("Swiss Roll in Ambient Space")
ax.view_init(azim=-66, elev=12)
_ = ax.text2D(0.8, 0.05, s="n_samples=1500", transform=ax.transAxes)
# %%
# Computing the LLE and t-SNE embeddings, we find that LLE seems to unroll the
# Swiss Roll pretty effectively. t-SNE on the other hand, is able
# to preserve the general structure of the data, but, poorly represents the
# continous nature of our original data. Instead, it seems to unnecessarily
# clump sections of points together.
sr_lle, sr_err = manifold.locally_linear_embedding(
sr_points, n_neighbors=12, n_components=2
)
sr_tsne = manifold.TSNE(
n_components=2, learning_rate="auto", perplexity=40, init="pca", random_state=0
).fit_transform(sr_points)
fig, axs = plt.subplots(figsize=(8, 8), nrows=2)
axs[0].scatter(sr_lle[:, 0], sr_lle[:, 1], c=sr_color)
axs[0].set_title("LLE Embedding of Swiss Roll")
axs[1].scatter(sr_tsne[:, 0], sr_tsne[:, 1], c=sr_color)
_ = axs[1].set_title("t-SNE Embedding of Swiss Roll")
# %%
# .. note::
#
# LLE seems to be stretching the points from the center (purple)
# of the swiss roll. However, we observe that this is simply a byproduct
# of how the data was generated. There is a higher density of points near the
# center of the roll, which ultimately affects how LLE reconstructs the
# data in a lower dimension.
# %%
# Swiss-Hole
# ---------------------------------------------------
#
# Now let's take a look at how both algorithms deal with us adding a hole to
# the data. First, we generate the Swiss-Hole dataset and plot it:
sh_points, sh_color = datasets.make_swiss_roll(
n_samples=1500, hole=True, random_state=0
)
fig = plt.figure(figsize=(8, 6))
ax = fig.add_subplot(111, projection="3d")
fig.add_axes(ax)
ax.scatter(
sh_points[:, 0], sh_points[:, 1], sh_points[:, 2], c=sh_color, s=50, alpha=0.8
)
ax.set_title("Swiss-Hole in Ambient Space")
ax.view_init(azim=-66, elev=12)
_ = ax.text2D(0.8, 0.05, s="n_samples=1500", transform=ax.transAxes)
# %%
# Computing the LLE and t-SNE embeddings, we obtain similar results to the
# Swiss Roll. LLE very capably unrolls the data and even preserves
# the hole. t-SNE, again seems to clump sections of points together, but, we
# note that it preserves the general topology of the original data.
sh_lle, sh_err = manifold.locally_linear_embedding(
sh_points, n_neighbors=12, n_components=2
)
sh_tsne = manifold.TSNE(
n_components=2, learning_rate="auto", perplexity=40, init="random", random_state=0
).fit_transform(sh_points)
fig, axs = plt.subplots(figsize=(8, 8), nrows=2)
axs[0].scatter(sh_lle[:, 0], sh_lle[:, 1], c=sh_color)
axs[0].set_title("LLE Embedding of Swiss-Hole")
axs[1].scatter(sh_tsne[:, 0], sh_tsne[:, 1], c=sh_color)
_ = axs[1].set_title("t-SNE Embedding of Swiss-Hole")
# %%
#
# Concluding remarks
# ------------------
#
# We note that t-SNE benefits from testing more combinations of parameters.
# Better results could probably have been obtained by better tuning these
# parameters.
#
# We observe that, as seen in the "Manifold learning on
# handwritten digits" example, t-SNE generally performs better than LLE
# on real world data.
| 33.966667 | 86 | 0.692345 | 658 | 4,076 | 4.173252 | 0.31459 | 0.03933 | 0.01748 | 0.016023 | 0.422433 | 0.353969 | 0.353969 | 0.319009 | 0.291333 | 0.265113 | 0 | 0.02876 | 0.146958 | 4,076 | 119 | 87 | 34.252101 | 0.761001 | 0.501472 | 0 | 0.304348 | 0 | 0 | 0.108531 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.043478 | 0 | 0.043478 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
41f7f839e3be35c24720ab38c662be95d99e2886 | 2,044 | py | Python | INF101/TP/TP9/2.9.3.py | Marshellson/UGA_IMF | eb293deabcc5ef6e45617d8c5bb6268b63b34f21 | [
"MIT"
] | 1 | 2021-09-21T21:53:17.000Z | 2021-09-21T21:53:17.000Z | INF101/TP/TP9/2.9.3.py | Marshellson/UGA_INF | eb293deabcc5ef6e45617d8c5bb6268b63b34f21 | [
"MIT"
] | null | null | null | INF101/TP/TP9/2.9.3.py | Marshellson/UGA_INF | eb293deabcc5ef6e45617d8c5bb6268b63b34f21 | [
"MIT"
] | null | null | null | '''
Author: JIANG Yilun
Date: 2021-12-01 13:01:29
LastEditTime: 2021-12-01 13:30:06
LastEditors: JIANG Yilun
Description:
FilePath: /INF_101/INF101/TP/TP9/2.9.3.py
'''
import random
def initiale()->dict:
nombre_de_personnes = int(input("Entrez le nombre de personnes: "))
dict_personnes = {}
for i in range(nombre_de_personnes):
nom = input("Entrez le nom de la personne: ")
dict_personnes[nom] = 0
return dict_personnes
def traduire(dictio: dict, mot: str) -> str:
for k, v in dictio.items():
if mot == k:
return v
def jouerUnMot(dictio: dict) -> bool:
list_mot_francais = []
for k, v in dictio.items():
list_mot_francais.append(k)
hasard_choisir = random.choice(list_mot_francais)
mot_anglais_saissir = input("Entrez un mot anglais: ")
if dictio[hasard_choisir] == mot_anglais_saissir:
print("Bravo!")
return True
else:
print("Dommage!")
return False
dictionnaire = {'pomme': 'apple', 'orange': 'orange', 'banane': 'banana'}
nombre_round = int(input("Entrez le nombre de round: "))
dict_personnes = initiale()
for i in range(nombre_round):
for nom in dict_personnes:
print("cest le %s round du %s" % (i+1, nom))
if jouerUnMot(dictionnaire):
dict_personnes[nom] += 1
dict_pourcentage = {}
list_nom_ranking = []
list_score_ranking = []
for nom, score in dict_personnes.items():
dict_pourcentage[nom] = (score / nombre_round) * 100
list_nom_ranking.append(nom)
list_score_ranking.append(score)
for i in range(len(list_nom_ranking)):
for j in range(i+1, len(list_nom_ranking)):
if list_score_ranking[i] < list_score_ranking[j]:
list_score_ranking[i], list_score_ranking[j] = list_score_ranking[j], list_score_ranking[i]
list_nom_ranking[i], list_nom_ranking[j] = list_nom_ranking[j], list_nom_ranking[i]
for i in range(len(list_nom_ranking)):
print("{}: {}" % (list_nom_ranking[i], dict_pourcentage[list_nom_ranking[i]]), "%") | 29.623188 | 103 | 0.666341 | 295 | 2,044 | 4.386441 | 0.294915 | 0.059505 | 0.119011 | 0.034003 | 0.303709 | 0.224111 | 0.159196 | 0.125193 | 0.081917 | 0.064915 | 0 | 0.027692 | 0.20499 | 2,044 | 69 | 104 | 29.623188 | 0.768615 | 0.078278 | 0 | 0.086957 | 0 | 0 | 0.10016 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.065217 | false | 0 | 0.021739 | 0 | 0.173913 | 0.086957 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
41f845926aa3ec217a14d9100d2e6f115eb277d1 | 322 | py | Python | HLTriggerOffline/Exotica/python/analyses/hltExoticaLowPtTrimuon_cff.py | PKUfudawei/cmssw | 8fbb5ce74398269c8a32956d7c7943766770c093 | [
"Apache-2.0"
] | 1 | 2021-11-30T16:24:46.000Z | 2021-11-30T16:24:46.000Z | HLTriggerOffline/Exotica/python/analyses/hltExoticaLowPtTrimuon_cff.py | PKUfudawei/cmssw | 8fbb5ce74398269c8a32956d7c7943766770c093 | [
"Apache-2.0"
] | 4 | 2021-11-29T13:57:56.000Z | 2022-03-29T06:28:36.000Z | HLTriggerOffline/Exotica/python/analyses/hltExoticaLowPtTrimuon_cff.py | PKUfudawei/cmssw | 8fbb5ce74398269c8a32956d7c7943766770c093 | [
"Apache-2.0"
] | 1 | 2021-11-23T09:25:45.000Z | 2021-11-23T09:25:45.000Z | import FWCore.ParameterSet.Config as cms
LowPtTrimuonPSet = cms.PSet(
hltPathsToCheck = cms.vstring(
),
recMuonLabel = cms.InputTag("muons"),
# -- Analysis specific cuts
minCandidates = cms.uint32(3),
# -- Analysis specific binnings
parametersDxy = cms.vdouble(50, -2.500, 2.500),
)
| 26.833333 | 56 | 0.65528 | 34 | 322 | 6.205882 | 0.735294 | 0.151659 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.052 | 0.223602 | 322 | 11 | 57 | 29.272727 | 0.792 | 0.170807 | 0 | 0 | 0 | 0 | 0.018939 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.125 | 0 | 0.125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
41fa41d8007e097936f791c465cb628fb82b64ed | 3,021 | py | Python | see/test/hooks_manager_test.py | nethunterslabs/see | da9387950d5db7c30ad8a5d1ba12e884afe8b1bb | [
"Apache-2.0"
] | null | null | null | see/test/hooks_manager_test.py | nethunterslabs/see | da9387950d5db7c30ad8a5d1ba12e884afe8b1bb | [
"Apache-2.0"
] | null | null | null | see/test/hooks_manager_test.py | nethunterslabs/see | da9387950d5db7c30ad8a5d1ba12e884afe8b1bb | [
"Apache-2.0"
] | null | null | null | import copy
import mock
import unittest
from see import Hook
from see import hooks
CONFIG = {
"configuration": {"key": "value"},
"hooks": [
{
"name": "see.test.hooks_manager_test.TestHook",
"configuration": {"foo": "bar"},
},
{"name": "see.test.hooks_manager_test.TestHookCleanup"},
],
}
class TestHook(Hook):
def __init__(self, parameters):
super(TestHook, self).__init__(parameters)
self.cleaned = False
class TestHookCleanup(Hook):
def __init__(self, parameters):
super(TestHookCleanup, self).__init__(parameters)
self.cleaned = False
def cleanup(self):
self.cleaned = True
class HookManagerLoadTest(unittest.TestCase):
def setUp(self):
self.hook_manager = hooks.HookManager("foo", copy.deepcopy(CONFIG))
def test_load_hooks(self):
"""TestHook is loaded into HookManager."""
context = mock.MagicMock()
self.hook_manager.load_hooks(context)
self.assertEqual(self.hook_manager.hooks[0].__class__.__name__, "TestHook")
def test_load_hooks_configuration(self):
"""Generic configuration are available in TestHook."""
context = mock.MagicMock()
self.hook_manager.load_hooks(context)
self.assertTrue("key" in self.hook_manager.hooks[0].configuration)
def test_load_hooks_specific_configuration(self):
"""Specific configuration are available in TestHook."""
context = mock.MagicMock()
self.hook_manager.load_hooks(context)
self.assertTrue("foo" in self.hook_manager.hooks[0].configuration)
def test_load_non_existing_hook(self):
"""Wrong Hooks are not loaded."""
context = mock.MagicMock()
config = copy.deepcopy(CONFIG)
config["hooks"][0]["name"] = "foo"
config["hooks"][1]["name"] = "bar"
hm = hooks.HookManager("foo", config)
hm.load_hooks(context)
self.assertEqual(len(hm.hooks), 0)
def test_load_missing_name(self):
"""Wrong Hooks are not loaded."""
context = mock.MagicMock()
config = copy.deepcopy(CONFIG)
del config["hooks"][0]["name"]
hm = hooks.HookManager("foo", config)
hm.load_hooks(context)
self.assertEqual(len(hm.hooks), 1)
class HooksManagerCleanupTest(unittest.TestCase):
def setUp(self):
self.hook_manager = hooks.HookManager("foo", copy.deepcopy(CONFIG))
def test_cleanup(self):
"""Cleanup is performed if specified."""
context = mock.MagicMock()
self.hook_manager.load_hooks(context)
hook = self.hook_manager.hooks[1]
self.hook_manager.cleanup()
self.assertTrue(hook.cleaned)
def test_no_cleanup(self):
"""Cleanup is not performed if not specified."""
context = mock.MagicMock()
self.hook_manager.load_hooks(context)
hook = self.hook_manager.hooks[0]
self.hook_manager.cleanup()
self.assertFalse(hook.cleaned)
| 31.14433 | 83 | 0.64482 | 345 | 3,021 | 5.449275 | 0.182609 | 0.059574 | 0.111702 | 0.074468 | 0.658511 | 0.613298 | 0.516489 | 0.516489 | 0.516489 | 0.516489 | 0 | 0.004297 | 0.229725 | 3,021 | 96 | 84 | 31.46875 | 0.80361 | 0.089043 | 0 | 0.4 | 0 | 0 | 0.070324 | 0.029087 | 0 | 0 | 0 | 0 | 0.1 | 1 | 0.171429 | false | 0 | 0.071429 | 0 | 0.3 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
41fab0cdfc218549b2a3694e2626d8da1755a58e | 10,467 | py | Python | massloss_glacier2latlongrid.py | Wang518hongyu/PyGEM | 1c9fa133133b3d463b1383d4792c535fa61c5b8d | [
"MIT"
] | 25 | 2019-06-12T21:08:24.000Z | 2022-03-01T08:05:14.000Z | massloss_glacier2latlongrid.py | Wang518hongyu/PyGEM | 1c9fa133133b3d463b1383d4792c535fa61c5b8d | [
"MIT"
] | 2 | 2020-04-23T14:08:00.000Z | 2020-06-04T13:52:44.000Z | massloss_glacier2latlongrid.py | Wang518hongyu/PyGEM | 1c9fa133133b3d463b1383d4792c535fa61c5b8d | [
"MIT"
] | 24 | 2019-06-12T19:48:40.000Z | 2022-02-16T03:42:53.000Z | """ Analyze MCMC output - chain length, etc. """
# Built-in libraries
from collections import OrderedDict
import datetime
import glob
import os
import pickle
# External libraries
import cartopy
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.pyplot import MaxNLocator
from matplotlib.lines import Line2D
import matplotlib.patches as mpatches
from matplotlib.ticker import MultipleLocator
from matplotlib.ticker import EngFormatter
from matplotlib.ticker import StrMethodFormatter
from mpl_toolkits.axes_grid1.axes_divider import make_axes_locatable
import numpy as np
import pandas as pd
from scipy.stats import linregress
from scipy.ndimage import uniform_filter
import scipy
#from scipy import stats
#from scipy.stats.kde import gaussian_kde
#from scipy.stats import norm
#from scipy.stats import truncnorm
#from scipy.stats import uniform
#from scipy.stats import linregress
#from scipy.stats import lognorm
#from scipy.optimize import minimize
import xarray as xr
# Local libraries
import class_climate
import class_mbdata
import pygem.pygem_input as pygem_prms
import pygemfxns_gcmbiasadj as gcmbiasadj
import pygemfxns_massbalance as massbalance
import pygemfxns_modelsetup as modelsetup
import run_calibration as calibration
option_mass_bydeg = 1
#%% ===== Input data =====
netcdf_fp_cmip5 = '/Volumes/LaCie/HMA_PyGEM/2019_0914/'
regions = [13, 14, 15]
# GCMs and RCP scenarios
#gcm_names = ['bcc-csm1-1', 'CanESM2', 'CESM1-CAM5', 'CCSM4', 'CNRM-CM5', 'CSIRO-Mk3-6-0', 'FGOALS-g2', 'GFDL-CM3',
# 'GFDL-ESM2G', 'GFDL-ESM2M', 'GISS-E2-R', 'HadGEM2-ES', 'IPSL-CM5A-LR', 'IPSL-CM5A-MR', 'MIROC-ESM',
# 'MIROC-ESM-CHEM', 'MIROC5', 'MPI-ESM-LR', 'MPI-ESM-MR', 'MRI-CGCM3', 'NorESM1-M', 'NorESM1-ME']
rcps = ['rcp26', 'rcp45', 'rcp60', 'rcp85']
# Grouping
grouping = 'degree'
degree_size = 0.1
#%% ===== FUNCTIONS =====
def pickle_data(fn, data):
"""Pickle data
Parameters
----------
fn : str
filename including filepath
data : list, etc.
data to be pickled
Returns
-------
.pkl file
saves .pkl file of the data
"""
with open(fn, 'wb') as f:
pickle.dump(data, f)
def select_groups(grouping, main_glac_rgi_all):
"""
Select groups based on grouping
"""
if grouping == 'degree':
groups = main_glac_rgi_all.deg_id.unique().tolist()
group_cn = 'deg_id'
try:
groups = sorted(groups, key=str.lower)
except:
groups = sorted(groups)
return groups, group_cn
def load_glacier_data(glac_no=None, rgi_regionsO1=None, rgi_regionsO2='all', rgi_glac_number='all',
load_caldata=0, startyear=2000, endyear=2018, option_wateryear=3):
"""
Load glacier data (main_glac_rgi, hyps, and ice thickness)
"""
# Load glaciers
main_glac_rgi_all = modelsetup.selectglaciersrgitable(
rgi_regionsO1=rgi_regionsO1, rgi_regionsO2 =rgi_regionsO2, rgi_glac_number=rgi_glac_number,
glac_no=glac_no)
# Glacier hypsometry [km**2], total area
main_glac_hyps_all = modelsetup.import_Husstable(main_glac_rgi_all, pygem_prms.hyps_filepath, pygem_prms.hyps_filedict,
pygem_prms.hyps_colsdrop)
# Ice thickness [m], average
main_glac_icethickness_all = modelsetup.import_Husstable(main_glac_rgi_all, pygem_prms.thickness_filepath,
pygem_prms.thickness_filedict, pygem_prms.thickness_colsdrop)
# Additional processing
main_glac_hyps_all[main_glac_icethickness_all == 0] = 0
main_glac_hyps_all = main_glac_hyps_all.fillna(0)
main_glac_icethickness_all = main_glac_icethickness_all.fillna(0)
# Add degree groups to main_glac_rgi_all
# Degrees
main_glac_rgi_all['CenLon_round'] = np.floor(main_glac_rgi_all.CenLon.values/degree_size) * degree_size
main_glac_rgi_all['CenLat_round'] = np.floor(main_glac_rgi_all.CenLat.values/degree_size) * degree_size
deg_groups = main_glac_rgi_all.groupby(['CenLon_round', 'CenLat_round']).size().index.values.tolist()
deg_dict = dict(zip(deg_groups, np.arange(0,len(deg_groups))))
main_glac_rgi_all.reset_index(drop=True, inplace=True)
cenlon_cenlat = [(main_glac_rgi_all.loc[x,'CenLon_round'], main_glac_rgi_all.loc[x,'CenLat_round'])
for x in range(len(main_glac_rgi_all))]
main_glac_rgi_all['CenLon_CenLat'] = cenlon_cenlat
main_glac_rgi_all['deg_id'] = main_glac_rgi_all.CenLon_CenLat.map(deg_dict)
if load_caldata == 1:
cal_datasets = ['shean']
startyear=2000
dates_table = modelsetup.datesmodelrun(startyear=startyear, endyear=endyear, spinupyears=0,
option_wateryear=option_wateryear)
# Calibration data
cal_data_all = pd.DataFrame()
for dataset in cal_datasets:
cal_subset = class_mbdata.MBData(name=dataset)
cal_subset_data = cal_subset.retrieve_mb(main_glac_rgi_all, main_glac_hyps_all, dates_table)
cal_data_all = cal_data_all.append(cal_subset_data, ignore_index=True)
cal_data_all = cal_data_all.sort_values(['glacno', 't1_idx'])
cal_data_all.reset_index(drop=True, inplace=True)
if load_caldata == 0:
return main_glac_rgi_all, main_glac_hyps_all, main_glac_icethickness_all
else:
return main_glac_rgi_all, main_glac_hyps_all, main_glac_icethickness_all, cal_data_all
#%%
# ===== Time series of glacier mass grouped by degree ======
if option_mass_bydeg == 1:
startyear = 2000
endyear = 2100
# Load glaciers
main_glac_rgi, main_glac_hyps, main_glac_icethickness = load_glacier_data(rgi_regionsO1=regions)
# Groups
groups, group_cn = select_groups(grouping, main_glac_rgi)
#%%
# Glacier and grouped annual specific mass balance and mass change
ds_multi = {}
for rcp in rcps:
# for rcp in ['rcp85']:
for region in regions:
# Load datasets
ds_fn = 'R' + str(region) + '_multimodel_' + rcp + '_c2_ba1_100sets_2000_2100.nc'
print(ds_fn)
ds = xr.open_dataset(netcdf_fp_cmip5 + ds_fn)
df = pd.DataFrame(ds.glacier_table.values, columns=ds.glac_attrs)
df['RGIId'] = ['RGI60-' + str(int(df.O1Region.values[x])) + '.' +
str(int(df.glacno.values[x])).zfill(5) for x in df.index.values]
# Extract time variable
time_values_annual = ds.coords['year_plus1'].values
time_values_monthly = ds.coords['time'].values
# Convert mass balance to monthly mass change
mb_monthly = ds['massbaltotal_glac_monthly'].values[:,:,0]
area_annual = ds.area_glac_annual[:,:,0].values
area_monthly = area_annual[:,0:-1].repeat(12,axis=1)
masschg_monthly_Gt_raw = mb_monthly / 1000 * area_monthly
masschg_annual_Gt_raw = (masschg_monthly_Gt_raw.reshape(-1,12).sum(1)
.reshape(masschg_monthly_Gt_raw.shape[0], int(masschg_monthly_Gt_raw.shape[1]/12)))
vol_annual_Gt = ds['volume_glac_annual'].values[:,:,0] * pygem_prms.density_ice / pygem_prms.density_water
volchg_annual_Gt = vol_annual_Gt[:,1:] - vol_annual_Gt[:,0:-1]
masschg_adjustment = masschg_annual_Gt_raw[:,0] / volchg_annual_Gt[:,0]
# Correction factor to ensure propagation of mean mass balance * area doesn't cause different annual volume
# change compared to the mean annual volume change
correction_factor_annual = np.zeros(volchg_annual_Gt.shape)
correction_factor_annual[np.nonzero(volchg_annual_Gt)] = (
volchg_annual_Gt[np.nonzero(volchg_annual_Gt)] / masschg_annual_Gt_raw[np.nonzero(volchg_annual_Gt)]
)
correction_factor_monthly = correction_factor_annual.repeat(12,axis=1)
masschg_monthly_Gt = masschg_monthly_Gt_raw * correction_factor_monthly
masschg_monthly_Gt_cumsum = np.cumsum(masschg_monthly_Gt, axis=1)
mass_monthly_Gt = vol_annual_Gt[:,0][:,np.newaxis] + masschg_monthly_Gt_cumsum
mass_monthly_Gt[mass_monthly_Gt < 0] = 0
if region == regions[0]:
ds_multi[rcp] = mass_monthly_Gt
df_all = df
else:
ds_multi[rcp] = np.concatenate((ds_multi[rcp], mass_monthly_Gt), axis=0)
df_all = pd.concat([df_all, df], axis=0)
ds.close()
# Remove RGIIds from main_glac_rgi that are not in the model runs
rgiid_df = list(df_all.RGIId.values)
rgiid_all = list(main_glac_rgi.RGIId.values)
rgi_idx = [rgiid_all.index(x) for x in rgiid_df]
main_glac_rgi = main_glac_rgi.loc[rgi_idx,:]
main_glac_rgi.reset_index(inplace=True, drop=True)
deg_dict = dict(zip(main_glac_rgi['deg_id'].values, main_glac_rgi['CenLon_CenLat']))
ds_deg = {}
for rcp in rcps:
# for rcp in ['rcp85']:
ds_deg[rcp] = {}
deg_groups_ordered = []
mass_deg_output = pd.DataFrame(np.zeros((len(deg_dict), mass_monthly_Gt.shape[1])),
columns=time_values_monthly)
for ngroup, group in enumerate(groups):
deg_group_rounded = (np.round(deg_dict[group][0],1), np.round(deg_dict[group][1],1))
deg_groups_ordered.append(deg_group_rounded)
if ngroup%500 == 0:
print(group, deg_group_rounded)
# Sum volume change for group
group_glac_indices = main_glac_rgi.loc[main_glac_rgi[group_cn] == group].index.values.tolist()
vn_group = ds_multi[rcp][group_glac_indices,:].sum(axis=0)
mass_deg_output.loc[ngroup, :] = vn_group
mass_deg_output.index = deg_groups_ordered
mass_deg_output_fn = (('mass_Gt_monthly_' + rcp + '_' + str(np.round(degree_size,2)) + 'deg').replace('.','p')
+ '.csv')
mass_deg_output.to_csv(pygem_prms.output_filepath + mass_deg_output_fn)
| 41.868 | 123 | 0.651572 | 1,384 | 10,467 | 4.612717 | 0.24711 | 0.06015 | 0.056861 | 0.046053 | 0.218358 | 0.160244 | 0.091949 | 0.052475 | 0.034148 | 0.034148 | 0 | 0.020214 | 0.248495 | 10,467 | 249 | 124 | 42.036145 | 0.791381 | 0.166619 | 0 | 0.040541 | 0 | 0 | 0.040051 | 0.010246 | 0 | 0 | 0 | 0 | 0 | 1 | 0.02027 | false | 0 | 0.202703 | 0 | 0.243243 | 0.013514 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5100b17784b04cdb6c2f4076aaca2a9b3a839c93 | 56,140 | py | Python | entry/main.py | way864/BattleTracker | 7204d613165b1c461ee301e5078cd4e2b7a072c4 | [
"MIT"
] | null | null | null | entry/main.py | way864/BattleTracker | 7204d613165b1c461ee301e5078cd4e2b7a072c4 | [
"MIT"
] | null | null | null | entry/main.py | way864/BattleTracker | 7204d613165b1c461ee301e5078cd4e2b7a072c4 | [
"MIT"
] | null | null | null | import math
import random
import json
import copy
from tkinter.constants import COMMAND
from zipfile import ZipFile
import PIL.Image
from PIL import ImageTk
import tkinter as tk
from tkinter import ttk, font, messagebox
from ttkthemes import ThemedStyle
from tooltip import *
from event_manager import EventManager
from calc import Calculator
from stat_collector import StatCollector
from quotes import Quote
from target import Target
from condition_info import InfoClass
from dice import DiceRoller
from player_window import PlayerWin
from starter import *
from undo_redo import ActionStack
from light_menu import *
from object_builder import ObjectBuilder
map_win = tk.Tk()
map_win.overrideredirect(1)
map_win.withdraw()
class BattleMap():
def __init__(self, root):
self.root = root
self.reg_font = ('Papyrus', '14')
self.small_font = ("Papyrus", "9")
self.big_font = ("Papyrus", "16")
self.start_win = StartWindow(self.root)
self.start_win.btn_new_file.config(command=lambda: self.start_up_seq('new'))
self.start_win.btn_open_existing.config(command=lambda: self.start_up_seq('open'))
self.start_win.win_start.protocol('WM_DELETE_WINDOW', lambda: self.root.destroy())
def start_up_seq(self, opt):
if opt == 'new':
self.start_win.new_file()
self.start_win.btn_start_game.config(command=lambda: self.new_game_btns('start'))
self.start_win.btn_cancel.config(command=lambda: self.new_game_btns('cancel'))
elif opt == 'open':
open_complete = self.start_win.open_file()
if open_complete:
self.start_win.win_start.destroy()
self.main_window()
def new_game_btns(self, btn):
if btn == 'start':
start_complete = self.start_win.start_new_battle()
if start_complete:
self.start_win.win_start.destroy()
self.main_window()
elif btn == 'cancel':
self.start_win.game_start_win.destroy()
def main_window(self):
self.root.overrideredirect(0)
game_title = self.root.game_name
if len(game_title) > 32:
game_title = game_title[0:32] + "..."
self.root.title(f"Battle Map | {game_title}")
style = ThemedStyle(self.root)
style.theme_use("equilux")
bg = style.lookup('TLabel', 'background')
fg = style.lookup('TLabel', 'foreground')
self.root.configure(bg=style.lookup('TLabel', 'background'))
# Window definition
with ZipFile(self.root.filename, 'r') as savefile:
battle_bytes = savefile.read('battle_info.json')
battle_obj = json.loads(battle_bytes.decode('utf-8'))
self.map_size = battle_obj['map_size']
self.round = battle_obj['round']
self.turn = battle_obj['turn']
self.top_frame = ttk.Frame(master=self.root, borderwidth=2, relief='ridge')
self.top_frame.pack(side='top', fill='both')
self.top_frame.columnconfigure(1, weight=1)
self.top_frame.rowconfigure(0, minsize=100, weight=1)
self.quote_frame = ttk.Frame(master=self.root)
self.quote_frame.pack(side='top', fill='x')
self.quote_frame.columnconfigure(0, minsize=20)
self.bottom_frame = ttk.Frame(master=self.root, borderwidth=2, relief='ridge')
self.bottom_frame.pack(side='top', fill='both', expand=True)
self.bottom_frame.columnconfigure(0, minsize=100)
self.bottom_frame.columnconfigure(1, weight=1, minsize=500)
self.bottom_frame.columnconfigure(2, minsize=150)
self.bottom_frame.columnconfigure(3, minsize=50)
self.bottom_frame.rowconfigure(0, weight=1, minsize=350)
self.controller_frame = ttk.Frame(master=self.root)
self.controller_frame.pack(side='top', fill='x')
self.em = EventManager(self.root)
self.calculator = Calculator(self.root)
self.quoter = Quote()
self.count_quotes = 0
self.target = Target(self.root)
self.info = InfoClass(self.root)
self.dice_roll = DiceRoller(self.root)
self.copy_win = PlayerWin(self.root, self.map_size, game_title)
self.go_back = ActionStack(self.root)
# Board Setup
lbl_map = ttk.Label(master=self.top_frame, text=game_title, font=('Papyrus', '16'))
lbl_map.grid(row=0, column=1)
btn_player_win = ttk.Button(master=self.top_frame, command=self.open_for_players, text="Player Window")
btn_player_win.grid(row=0, column=2, sticky='se')
btn_save = ttk.Button(master=self.top_frame, command=self.save_game, text="Save")
btn_save.grid(row=0, column=3, sticky='se')
btn_clear = ttk.Button(master=self.top_frame, command=self.clear_map, text="Clear Map")
btn_clear.grid(row=0, column=4, sticky='se')
btn_input = ttk.Button(master=self.top_frame, command=self.input_creature_window, text="New Creature")
btn_input.grid(row=0, column=5, sticky='se')
btn_obj = ttk.Button(master=self.top_frame, command=self.input_object_window, text="New Object")
btn_obj.grid(row=0, column=6, sticky='se')
btn_reset = ttk.Button(master=self.top_frame, command=lambda: self.refresh_map(reset=True), text="Reset Map")
btn_reset.grid(row=0, column=7, sticky='se')
btn_restart = ttk.Button(master=self.top_frame, command=self.full_reset, text="Reset Battle")
btn_restart.grid(row=0, column=8, sticky='se')
btn_close_all = ttk.Button(master=self.top_frame, command=self.root.destroy, text="Close All")
btn_close_all.grid(row=0, column=9, sticky='se')
self.lbl_quote = ttk.Label(master=self.quote_frame, text="", font=self.reg_font)
self.lbl_quote.grid(row=0, column=0, sticky='w', pady=5)
self.find_quote()
self.side_board = ttk.Frame(master=self.bottom_frame)
self.side_board.grid(row=0, column=0, padx=5, pady=10, sticky="nw")
self.side_count = 0
canvas_frame = ttk.Frame(master=self.bottom_frame, borderwidth=2, relief='ridge')
self.grid_canvas = tk.Canvas(master=canvas_frame, bg='gray28', bd=0, highlightthickness=0)
grid_scroll_vert = ttk.Scrollbar(master=canvas_frame, command=self.grid_canvas.yview)
grid_scroll_horz = ttk.Scrollbar(master=self.bottom_frame, orient='horizontal', command=self.grid_canvas.xview)
self.grid_frame = ttk.Frame(master=self.grid_canvas)
canvas_frame.grid(row=0, column=1, sticky="nsew")
self.grid_canvas.pack(side='left', fill='both', expand=True)
self.grid_canvas.config(yscrollcommand=grid_scroll_vert.set, xscrollcommand=grid_scroll_horz.set)
grid_scroll_vert.pack(side='right', fill='y')
grid_scroll_horz.grid(row=1, column=1, sticky='ew')
self.grid_canvas.create_window((4,4), window=self.grid_frame, anchor='nw', tags='self.grid_frame')
self.grid_frame.bind("<Configure>", self._on_config)
self.grid_canvas.bind('<Enter>', self._on_enter_canvas)
self.grid_canvas.bind('<Leave>', self._on_leave_canvas)
self.grid_frame.lower()
self.round_bar = ttk.Frame(master=self.bottom_frame)
self.tool_bar = ttk.Frame(master=self.bottom_frame)
self.round_bar.grid(row=0, column=2, padx=5, pady=10, sticky="nw")
self.tool_bar.grid(row=0, column=3, padx=5, pady=10, sticky="nw")
# Image paths
undo_icon_path = "entry\\bin\\red_undo.png"
undo_icon = ImageTk.PhotoImage(image=PIL.Image.open(undo_icon_path).resize((20,20)))
redo_icon_path = "entry\\bin\\red_redo.png"
redo_icon = ImageTk.PhotoImage(image=PIL.Image.open(redo_icon_path).resize((20,20)))
move_icon_path = "entry\\bin\\red_icons8-circled-down-left-32.png"
move_icon = ImageTk.PhotoImage(image=PIL.Image.open(move_icon_path).resize((20,20)))
trig_icon_path = "entry\\bin\\red_trig.png"
trig_icon = ImageTk.PhotoImage(image=PIL.Image.open(trig_icon_path).resize((20,20)))
target_icon_path = "entry\\bin\\red_target.png"
target_icon = ImageTk.PhotoImage(image=PIL.Image.open(target_icon_path).resize((20,20)))
cond_info_icon_path = "entry\\bin\\red_page_icon.png"
cond_info_icon = ImageTk.PhotoImage(image=PIL.Image.open(cond_info_icon_path).resize((20,20)))
turn_icon_path = "entry\\bin\\swords.png"
self.turn_icon = ImageTk.PhotoImage(image=PIL.Image.open(turn_icon_path).resize((20,20)))
d20_icon_path = "entry\\bin\\red_role-playing.png"
d20_icon = ImageTk.PhotoImage(image=PIL.Image.open(d20_icon_path).resize((20,20)))
highlight_path = "entry\\bin\\highlight.png"
highlight_img = ImageTk.PhotoImage(image=PIL.Image.open(highlight_path).resize((20,20)))
ally_path = "entry\\bin\\ally_token.png"
self.ally_img = ImageTk.PhotoImage(image=PIL.Image.open(ally_path).resize((27,27)))
enemy_path = "entry\\bin\\enemy_token.png"
self.enemy_img = ImageTk.PhotoImage(image=PIL.Image.open(enemy_path).resize((27,27)))
bystander_path = "entry\\bin\\bystander_token.png"
self.bystander_img = ImageTk.PhotoImage(image=PIL.Image.open(bystander_path).resize((27,27)))
dead_path = "entry\\bin\\dead_token.png"
self.dead_img = ImageTk.PhotoImage(image=PIL.Image.open(dead_path).resize((27,27)))
up_btn_path = "entry\\bin\\up_button.png"
down_btn_path = "entry\\bin\\down_button.png"
left_btn_path = "entry\\bin\\left_button.png"
right_btn_path = "entry\\bin\\right_button.png"
nw_btn_path = "entry\\bin\\nw_button.png"
ne_btn_path = "entry\\bin\\ne_button.png"
sw_btn_path = "entry\\bin\\sw_button.png"
se_btn_path = "entry\\bin\\se_button.png"
z_up_btn_path = "entry\\bin\\z_up.png"
undo_move_path = "entry\\bin\\undo_move.png"
z_down_btn_path = "entry\\bin\\z_down.png"
self.up_btn_img = ImageTk.PhotoImage(image=PIL.Image.open(up_btn_path).resize((40,40)))
self.down_btn_img = ImageTk.PhotoImage(image=PIL.Image.open(down_btn_path).resize((40,40)))
self.left_btn_img = ImageTk.PhotoImage(image=PIL.Image.open(left_btn_path).resize((40,40)))
self.right_btn_img = ImageTk.PhotoImage(image=PIL.Image.open(right_btn_path).resize((40,40)))
self.nw_btn_img = ImageTk.PhotoImage(image=PIL.Image.open(nw_btn_path).resize((40,40)))
self.ne_btn_img = ImageTk.PhotoImage(image=PIL.Image.open(ne_btn_path).resize((40,40)))
self.sw_btn_img = ImageTk.PhotoImage(image=PIL.Image.open(sw_btn_path).resize((40,40)))
self.se_btn_img = ImageTk.PhotoImage(image=PIL.Image.open(se_btn_path).resize((40,40)))
self.z_up_btn_img = ImageTk.PhotoImage(image=PIL.Image.open(z_up_btn_path).resize((40,40)))
self.undo_move_img = ImageTk.PhotoImage(image=PIL.Image.open(undo_move_path).resize((40,40)))
self.z_down_btn_img = ImageTk.PhotoImage(image=PIL.Image.open(z_down_btn_path).resize((40,40)))
self.map_frames = []
self.root.token_list = []
self.root.obj_list = []
self.root.copy_win_open = False
self.root.light_params = {}
# Grid labels
for col_spot in range(self.map_size[1]):
lbl_grid_top = ttk.Label(master=self.grid_frame, text=col_spot+1, font=self.small_font)
lbl_grid_top.grid(row=0, column=col_spot+1)
self.grid_frame.columnconfigure(col_spot+1, minsize=33)#, weight=1)
for row_spot in range(self.map_size[0]):
lbl_grid_side = ttk.Label(master=self.grid_frame, text=row_spot+1, font=self.small_font)
lbl_grid_side.grid(row=row_spot+1, column=0)
self.grid_frame.rowconfigure(row_spot+1, minsize=33)#, weight=1)
self.grid_frame.columnconfigure(0, minsize=33)#, weight=1)
self.grid_frame.rowconfigure(0, minsize=33)#, weight=1)
# Space frames
self.token_labels = []
for i in range(self.map_size[0]):
self.map_frames.append([])
self.token_labels.append([])
for j in range(self.map_size[1]):
self.space = tk.Frame(master=self.grid_frame, relief=tk.RAISED, borderwidth=1, bg='gray28')
self.space.grid(row=i+1, column=j+1, sticky='nsew')
self.space.coord = (j, i)
CreateToolTip(self.space, text=f"{i+1}, {j+1}")
self.map_frames[i].append(self.space)
self.token_labels[i].append(None)
self.initialize()
go_back_frame = ttk.Frame(master=self.top_frame)
go_back_frame.grid(row=0, column=0, sticky='nw')
self.btn_undo = tk.Button(master=go_back_frame, command=lambda: self.time_travel(True), image=undo_icon, bd=0, bg='gray28', activebackground='gray28')
self.btn_undo.grid(row=0, column=0, padx=5, pady=5, sticky='nw')
self.btn_undo.image = undo_icon
self.btn_undo['state'] = 'disabled'
self.btn_redo = tk.Button(master=go_back_frame, command=lambda: self.time_travel(False), image=redo_icon, bd=0, bg='gray28', activebackground='gray28')
self.btn_redo.grid(row=0, column=1, padx=5, pady=5, sticky='nw')
self.btn_redo.image = redo_icon
self.btn_redo['state'] = 'disabled'
# Round bar
lbl_round_title = ttk.Label(master=self.round_bar, text="Round: ", font=self.big_font)
lbl_round_title.grid(row=0, column=0, sticky='e')
if self.round == 0:
tmp_round = "S"
else:
tmp_round = self.round
self.lbl_round = ttk.Label(master=self.round_bar, text=tmp_round, font=self.big_font, borderwidth=1, relief=tk.RAISED, width=3, anchor=tk.CENTER)
self.lbl_round.grid(row=0, column=1, sticky='w')
self.initiative_frame = ttk.Frame(master=self.round_bar)
self.initiative_frame.grid(row=1, column=0, columnspan=2, sticky='ew')
self.initiative_frame.columnconfigure([0,1], weight=1)
btn_next_turn = ttk.Button(master=self.round_bar, text="Turn Complete", command=self.next_turn, width=18)
btn_next_turn.grid(row=2, column=0, columnspan=2)
btn_next_round = ttk.Button(master=self.round_bar, text="Round Complete", command=self.next_round, width=18)
btn_next_round.grid(row=3, column=0, columnspan=2)
btn_reset_rounds = ttk.Button(master=self.round_bar, text="Reset Rounds", command=self.reset_round, width=18)
btn_reset_rounds.grid(row=4, column=0, columnspan=2)
# Tool bar Buttons
self.btn_move = ttk.Button(master=self.tool_bar, command=self.move_token, image=move_icon)
self.btn_move.grid(row=0, column=0, sticky="n")
self.btn_move.image = move_icon
CreateToolTip(self.btn_move, text="Move Token", left_disp=True)
self.btn_trig = ttk.Button(master=self.tool_bar, command=self.open_trig, image=trig_icon)
self.btn_trig.grid(row=1, column=0, sticky='n')
self.btn_trig.image = trig_icon
CreateToolTip(self.btn_trig, text="Distance", left_disp=True)
self.btn_target = ttk.Button(master=self.tool_bar, command=self.target_item,image=target_icon)
self.btn_target.grid(row=2, column=0, sticky='n')
self.btn_target.image = target_icon
CreateToolTip(self.btn_target, text="Target", left_disp=True)
self.btn_cond_info = ttk.Button(master=self.tool_bar, command=self.show_cond_info, image=cond_info_icon)
self.btn_cond_info.grid(row=3, column=0, sticky='n')
self.btn_cond_info.image = cond_info_icon
CreateToolTip(self.btn_cond_info, text="Condition Info", left_disp=True)
self.btn_dice_roller = ttk.Button(master=self.tool_bar, command=self.open_dice_roller, image=d20_icon)
self.btn_dice_roller.grid(row=4, column=0, sticky='n')
self.btn_dice_roller.image = d20_icon
CreateToolTip(self.btn_dice_roller, text="Dice Roller", left_disp=True)
self.btn_field_light = ttk.Button(master=self.tool_bar, command=self.field_light, image=highlight_img)
self.btn_field_light.grid(row=5, column=0, sticky='n')
self.btn_field_light.image = highlight_img
CreateToolTip(self.btn_field_light, text="Field Highlight", left_disp=True)
#Controller Pane
self.controller_frame.columnconfigure(0, weight=1)
dpad_frame = ttk.Frame(master=self.controller_frame)
dpad_frame.grid(row=0, column=0, rowspan=4, sticky='w', padx=100)
btn_nw = tk.Button(master=dpad_frame, image=self.nw_btn_img, bg='gray28', bd=0, activebackground='gray28', command=lambda: self.dpad_move('nw'))
btn_nw.grid(row=0, column=0)
btn_nw.image = self.nw_btn_img
btn_nw.name = 'nw'
btn_up = tk.Button(master=dpad_frame, image=self.up_btn_img, bg='gray28', bd=0, activebackground='gray28', command=lambda: self.dpad_move('n'))
btn_up.grid(row=0, column=1)
btn_up.image = self.up_btn_img
btn_up.name = 'up'
btn_ne = tk.Button(master=dpad_frame, image=self.ne_btn_img, bg='gray28', bd=0, activebackground='gray28', command=lambda: self.dpad_move('ne'))
btn_ne.grid(row=0, column=2)
btn_ne.image = self.ne_btn_img
btn_ne.name = 'ne'
btn_left = tk.Button(master=dpad_frame, image=self.left_btn_img, bg='gray28', bd=0, activebackground='gray28', command=lambda: self.dpad_move('w'))
btn_left.grid(row=1, column=0)
btn_left.image = self.left_btn_img
btn_left.name = 'w'
btn_right = tk.Button(master=dpad_frame, image=self.right_btn_img, bg='gray28', bd=0, activebackground='gray28', command=lambda: self.dpad_move('e'))
btn_right.grid(row=1, column=2)
btn_right.image = self.right_btn_img
btn_right.name = 'e'
btn_sw = tk.Button(master=dpad_frame, image=self.sw_btn_img, bg='gray28', bd=0, activebackground='gray28', command=lambda: self.dpad_move('sw'))
btn_sw.grid(row=2, column=0)
btn_sw.image = self.sw_btn_img
btn_sw.name = 'sw'
btn_down = tk.Button(master=dpad_frame, image=self.down_btn_img, bg='gray28', bd=0, activebackground='gray28', command=lambda: self.dpad_move('s'))
btn_down.grid(row=2, column=1)
btn_down.image = self.down_btn_img
btn_down.name = 's'
btn_se = tk.Button(master=dpad_frame, image=self.se_btn_img, bg='gray28', bd=0, activebackground='gray28', command=lambda: self.dpad_move('se'))
btn_se.grid(row=2, column=2)
btn_se.image = self.se_btn_img
btn_se.name = 'se'
btn_z_up = tk.Button(master=dpad_frame, image=self.z_up_btn_img, bg='gray28', bd=0, activebackground='gray28', command=lambda: self.zpad('+'))
btn_z_up.grid(row=0, column=3, padx=10)
btn_z_up.image = self.z_up_btn_img
btn_undo_move = tk.Button(master=dpad_frame, image=self.undo_move_img, bg='gray28', bd=0, activebackground='gray28', command=self.undo_move)
btn_undo_move.grid(row=1, column=3, padx=10)
btn_undo_move.image = self.undo_move_img
btn_undo_move.name = 'undom'
btn_z_down = tk.Button(master=dpad_frame, image=self.z_down_btn_img, bg='gray28', bd=0, activebackground='gray28', command=lambda: self.zpad('-'))
btn_z_down.grid(row=2, column=3, padx=10)
btn_z_down.image = self.z_down_btn_img
self.z_frame = tk.Frame(master=dpad_frame, bg='gray28')
self.z_frame.grid(row=1, column=1, sticky='nsew')
self.z_frame.name = 'zf'
cont_btn_frame = ttk.Frame(master=self.controller_frame)
cont_btn_frame.grid(row=0, column=1, rowspan=4, sticky='e', padx=20)
btn_turn_complete = ttk.Button(master=cont_btn_frame, text="Turn Complete", command=self.next_turn, width=19)
btn_turn_complete.grid(row=0, column=0, columnspan=2)
self.cont_targets = ttk.Combobox(master=cont_btn_frame, width=18, state='readonly')
self.cont_targets.grid(row=1, column=0, columnspan=2)
self.cont_targets.bind("<<ComboboxSelected>>", self._on_select_target)
self.target_names = []
self.ent_target_delta = ttk.Entry(master=cont_btn_frame, width=20)
self.ent_target_delta.grid(row=2, column=0, columnspan=2, pady=5)
self.ent_target_delta.insert(0, '0')
self.ent_target_delta.bind("<FocusIn>", lambda e: self._on_delta_focus(event=e, typ='in'))
self.ent_target_delta.bind("<FocusOut>", lambda e: self._on_delta_focus(event=e, typ='out'))
btn_heal = ttk.Button(master=cont_btn_frame, text='Heal', command=lambda: self.target_hp('heal'), width=8)
btn_heal.grid(row=3, column=0, padx=5, pady=5)
btn_dmg = ttk.Button(master=cont_btn_frame, text='Damage', command=lambda: self.target_hp('dmg'), width=8)
btn_dmg.grid(row=3, column=1, padx=5, pady=5)
lbl_ac = ttk.Label(master=cont_btn_frame, text="AC: ", font=self.reg_font)
lbl_ac.grid(row=0, column=2, sticky='w', pady=5)
lbl_max_hp = ttk.Label(master=cont_btn_frame, text="Max HP: ", font=self.reg_font)
lbl_max_hp.grid(row=1, column=2, sticky='w', pady=5)
lbl_curr_hp = ttk.Label(master=cont_btn_frame, text="Current HP: ", font=self.reg_font)
lbl_curr_hp.grid(row=2, column=2, sticky='w', pady=5)
lbl_temp_hp = ttk.Label(master=cont_btn_frame, text="Temp HP: ", font=self.reg_font)
lbl_temp_hp.grid(row=3, column=2, sticky='w', pady=5)
self.lbl_target_ac = ttk.Label(master=cont_btn_frame, text="", font=self.reg_font)
self.lbl_target_ac.grid(row=0, column=3, sticky='w', padx=5, pady=5)
self.lbl_target_max_hp = ttk.Label(master=cont_btn_frame, text="", font=self.reg_font)
self.lbl_target_max_hp.grid(row=1, column=3, sticky='w', padx=5, pady=5)
self.lbl_target_hp = ttk.Label(master=cont_btn_frame, text="", font=self.reg_font)
self.lbl_target_hp.grid(row=2, column=3, sticky='w', padx=5, pady=5)
self.lbl_target_temp_hp = ttk.Label(master=cont_btn_frame, text="", font=self.reg_font)
self.lbl_target_temp_hp.grid(row=3, column=3, sticky='w', padx=5, pady=5)
lbl_title_turn = ttk.Label(master=self.controller_frame, text="Current Turn", font=self.big_font)
lbl_title_turn.grid(row=0, column=2, sticky='e', padx=20)
self.lbl_current_turn = tk.Label(master=self.controller_frame, text="", font=self.reg_font, bg='gray28')
self.lbl_current_turn.grid(row=1, column=2, sticky='e', padx=20)
lbl_title_pos = ttk.Label(master=self.controller_frame, text="Position", font=self.big_font)
lbl_title_pos.grid(row=2, column=2, sticky='e', padx=20)
self.lbl_position = ttk.Label(master=self.controller_frame, text="", font=self.reg_font)
self.lbl_position.grid(row=3, column=2, sticky='e', padx=20)
lbl_max_move_title = ttk.Label(master=self.controller_frame, text="Movement Speed", font=self.big_font)
lbl_max_move_title.grid(row=0, column=3, sticky='e', padx=20)
self.lbl_max_move = tk.Label(master=self.controller_frame, text="", font=self.reg_font, bg='gray28', fg='gray70')
self.lbl_max_move.grid(row=1, column=3, sticky='e', padx=20)
lbl_amount_move_title = ttk.Label(master=self.controller_frame, text="Feet Moved", font=self.big_font)
lbl_amount_move_title.grid(row=2, column=3, sticky='e', padx=20)
self.lbl_amount_moved = tk.Label(master=self.controller_frame, text="", font=self.reg_font, bg='gray28', fg='gray70')
self.lbl_amount_moved.grid(row=3, column=3, sticky='e', padx=20)
self.z_delta = 0
self.root.bind("<Key>", self._on_numpad_keys)
self.controller_frame.bind("<Button-1>", self._on_defocus)
self.place_tokens()
self.root.deiconify()
def _on_config(self, event):
self.grid_canvas.configure(scrollregion=self.grid_canvas.bbox('all'))
def _on_enter_canvas(self, event):
self.grid_canvas.bind_all('<MouseWheel>', self._on_mousewheel)
self.grid_canvas.bind_all('<Shift-MouseWheel>', self._on_shift_mousewheel)
def _on_leave_canvas(self, event):
self.grid_canvas.unbind_all('<MouseWheel>')
self.grid_canvas.unbind_all('<Shift-MouseWheel>')
def _on_mousewheel(self, event):
self.grid_canvas.yview_scroll(int(-1*(event.delta/120)), 'units')
def _on_shift_mousewheel(self, event):
self.grid_canvas.xview_scroll(int(-1*(event.delta/120)), 'units')
def _on_select_target(self, event):
for being in self.root.token_list:
if being['name'] == self.cont_targets.get():
sel_obj = being
self.lbl_target_ac.config(text=sel_obj['ac'])
self.lbl_target_max_hp.config(text=sel_obj['max_HP'])
self.lbl_target_hp.config(text=sel_obj['current_HP'])
self.lbl_target_temp_hp.config(text=sel_obj['temp_HP'])
def _on_numpad_keys(self, event):
# Controller movements
if event.keysym == '0' or event.keysym == 'Insert':
self.undo_move()
elif event.keysym == '1' or event.keysym == 'End':
self.dpad_move('sw')
elif event.keysym == '2' or event.keysym == 'Down':
self.dpad_move('s')
elif event.keysym == '3' or event.keysym == 'Next':
self.dpad_move('se')
elif event.keysym == '4' or event.keysym == 'Left':
self.dpad_move('w')
elif event.keysym == '5' or event.keysym == 'Clear':
if self.z_delta != 0:
if self.z_delta == 1:
self.dpad_move('+')
elif self.z_delta == -1:
self.dpad_move('-')
elif event.keysym == '6' or event.keysym == 'Right':
self.dpad_move('e')
elif event.keysym == '7' or event.keysym == 'Home':
self.dpad_move('nw')
elif event.keysym == '8' or event.keysym == 'Up':
self.dpad_move('n')
elif event.keysym == '9' or event.keysym == 'Prior':
self.dpad_move('ne')
elif event.keysym == 'minus':
self.zpad('-')
elif event.keysym == 'plus':
self.zpad('+')
elif event.keysym == 'Return':
self.next_turn()
if self.z_delta == 0:
self.z_frame.config(bg='gray28')
self.root.unbind_all("<Button-1>")
def _on_delta_focus(self, event, typ):
if typ == 'in':
self.root.unbind("<Key>")
elif typ == 'out':
self.root.bind("<Key>", self._on_numpad_keys)
def _on_defocus(self, event):
event.widget.focus_set()
def initialize(self):
self.root.token_list = []
self.root.obj_list = []
with ZipFile(self.root.filename, "r") as savefile:
creat_bytes = savefile.read('creatures.json')
creat_str = creat_bytes.decode('utf-8')
creatures = json.loads(creat_str)
obj_bytes = savefile.read('objects.json')
obj_str = obj_bytes.decode('utf-8')
objects = json.loads(obj_str)
for being in creatures.values():
self.root.token_list.append(being)
for thing in objects.values():
self.root.obj_list.append(thing)
def place_tokens(self):
self.initiative_holder = {}
spaces_taken = []
self.target_names = []
for item in self.root.obj_list:
occupied = False
if item["coordinate"][0] != "" and item["coordinate"][1] != "":
row_pos = int(item["coordinate"][1])
col_pos = int(item["coordinate"][0])
self.target_names.append(item['name'])
for space_tuple in spaces_taken:
if space_tuple[0] == row_pos and space_tuple[1] == col_pos and space_tuple[2] == int(item["coordinate"][2]):
occupied = True
if occupied == False:
spaces_taken.append((row_pos, col_pos, int(item["coordinate"][2])))
o_length = item["length"]
o_width = item["width"]
f_len = 5 * round(o_length / 5)
if f_len < 5:
f_len = 5
f_wid = 5 * round(o_width / 5)
if f_wid < 5:
f_wid = 5
o_col = int(f_wid / 5)
o_row = int(f_len / 5)
for x in range(o_col):
col_pos = int(item["coordinate"][0]) + x
for y in range(o_row):
row_pos = int(item["coordinate"][1]) + y
obj_img = ImageTk.PhotoImage(image=PIL.Image.open(item["img_ref"]).resize((30,30)))
lbl_unit = tk.Label(master=self.map_frames[col_pos][row_pos], image=obj_img, bg="gray28", borderwidth=0)
lbl_unit.image = obj_img
lbl_unit.coord = (row_pos, col_pos)
lbl_unit.pack(fill='both', expand=True, padx=2, pady=2)
CreateToolTip(lbl_unit, text=f"{item['name']}: {row_pos}, {col_pos}", left_disp=True)
for being in self.root.token_list:
token_type = being["type"]
if token_type == "ally":
token_img = self.ally_img
elif token_type == "enemy":
token_img = self.enemy_img
elif token_type == "bystander":
token_img = self.bystander_img
elif token_type == "dead":
token_img = self.dead_img
else:
raise NameError("Token type not specified.")
occupied = False
if being["coordinate"][0] != "" and being["coordinate"][1] != "":
row_pos = int(being["coordinate"][1])
col_pos = int(being["coordinate"][0])
self.target_names.append(being['name'])
for space_tuple in spaces_taken:
if space_tuple[0] == row_pos and space_tuple[1] == col_pos and space_tuple[2] == int(being["coordinate"][2]):
occupied = True
if occupied == False:
spaces_taken.append((row_pos, col_pos, int(being["coordinate"][2])))
lbl_unit = tk.Label(master=self.map_frames[col_pos][row_pos], image=token_img, bg="gray28", borderwidth=0)
lbl_unit.image = token_img
lbl_unit.coord = (row_pos, col_pos)
lbl_unit.pack(fill='both', expand=True, padx=2, pady=2)
self.token_labels[col_pos][row_pos] = lbl_unit
CreateToolTip(lbl_unit, text="{0}, {1}".format(being["name"], being["coordinate"][2]), left_disp=True)
if being['initiative'] != math.inf:
self.initiative_holder[being['name']] = being
if being["size"] == "large" or being["size"] == "huge" or being["size"] == "gargantuan":
if being["size"] == "large":
space_need = 4
elif being["size"] == "huge":
space_need = 9
else:
space_need = 16
row_offset = 0
col_offset = 0
go_to_next_row = math.sqrt(space_need)
for i in range(1, space_need):
if i < space_need:
col_offset += 1
if col_offset == go_to_next_row:
col_offset = 0
row_offset += 1
row_pos = int(being["coordinate"][1]) + row_offset
col_pos = int(being["coordinate"][0]) + col_offset
lbl_unit = tk.Label(master=self.map_frames[col_pos][row_pos], image=token_img, bg="gray28", borderwidth=0)
lbl_unit.image = token_img
lbl_unit.coord = (row_pos, col_pos)
lbl_unit.pack(fill='both', expand=True)
CreateToolTip(lbl_unit, text="{0}, {1}".format(being["name"], being["coordinate"][2]), left_disp=True)
else:
messagebox.showerror("Internal Error", "Restart program\nError 0x006")
return
else:
self.unused_tokens(being, token_img)
self.cont_targets.config(values=self.target_names)
self.refresh_initiatives()
def unused_tokens(self, creature, token_img):
next_row = int(self.side_count / 2)
next_col = self.side_count % 2
lbl_side_unit = tk.Label(master=self.side_board, image=token_img, bg="gray28", borderwidth=0)
lbl_side_unit.grid(row=next_row, column=next_col, padx=5, pady=5, sticky="ne")
#lbl_side_unit.bind("<Button-3>", self.em.right_click_menu)
lbl_side_unit.image = token_img
CreateToolTip(lbl_side_unit, text=creature["name"])
self.side_count += 1
def post_initiatives(self):
init_dict_in_order = {k:v for k, v in sorted(self.initiative_holder.items(), key= lambda item: item[1]['initiative'], reverse=True)}
order_count = 0
lbl_turn_img = tk.Label(master=self.initiative_frame, image=self.turn_icon, bg="gray28", borderwidth=0)
lbl_turn_img.grid(row=self.turn, column=0, sticky='w')
lbl_turn_img.image = self.turn_icon
self.move_path = []
for next_up in init_dict_in_order.items():
if next_up[1]['initiative'] != math.inf and next_up[1]['type'] != 'dead':
lbl_your_turn = ttk.Label(master=self.initiative_frame, text=f"{next_up[0]}: ", font=self.small_font)
lbl_your_turn.grid(row=order_count, column=1, sticky='w')
lbl_your_init = ttk.Label(master=self.initiative_frame, text=next_up[1]['initiative'], font=self.small_font)
lbl_your_init.grid(row=order_count, column=2, sticky='e')
if order_count == self.turn:
self.turn_obj = next_up[1]
curr_pos = (int(self.turn_obj['coordinate'][0]), int(self.turn_obj['coordinate'][1]), int(self.turn_obj['coordinate'][2]))
self.move_path.append(curr_pos)
self.lbl_current_turn.config(text=self.turn_obj['name'])
self.lbl_max_move.config(text=self.turn_obj['speed'])
self.lbl_position.config(text=f"{curr_pos[0]+1}: {curr_pos[1]+1}: {curr_pos[2]}")
self.lbl_amount_moved.config(text="0")
self.map_frames[curr_pos[0]][curr_pos[1]].config(bg='orange3')
if self.turn_obj['status'] == 'PC':
self.lbl_current_turn.config(fg='green3')
elif self.turn_obj['status'] == 'Monster':
self.lbl_current_turn.config(fg='orange3')
else:
self.lbl_current_turn.config(fg='DodgerBlue2')
if self.root.copy_win_open:
if self.turn_obj['status'] != 'PC':
self.copy_win.set_turn_lbl("X")
else:
self.copy_win.set_turn_lbl(self.turn_obj['name'])
order_count += 1
def refresh_initiatives(self):
init_frame_slaves = self.initiative_frame.grid_slaves()
if len(init_frame_slaves):
for item in init_frame_slaves:
item.destroy()
for i in range(len(self.map_frames)):
for frm in self.map_frames[i]:
frm.config(bg='gray28')
self.post_initiatives()
def next_turn(self, not_from_redo=True):
self.lbl_amount_moved.config(bg='gray28')
if not_from_redo:
self.log_action('turn button')
on_board_inits = self.initiative_holder
inf_exists = True
fucked_up = 100
while inf_exists and fucked_up > 0:
for key, value in on_board_inits.items():
if value == math.inf:
del on_board_inits[key]
break
if math.inf not in on_board_inits:
inf_exists = False
fucked_up -= 1
self.turn += 1
if self.turn > len(self.initiative_holder) - 1:
self.next_round()
else:
for being in self.root.token_list:
if being['name'] == self.turn_obj['name']:
being['coordinate'] = [str(self.move_path[-1][0]), str(self.move_path[-1][1]), str(self.move_path[-1][2])]
if self.root.copy_win_open:
self.copy_win.gray_map()
self.refresh_map()
#self.refresh_initiatives()
def next_round(self, not_from_redo=True):
if not_from_redo:
self.log_action('round button', {'turn': self.turn})
self.round += 1
self.lbl_round.config(text=self.round)
self.turn = 0
for being in self.root.token_list:
if being['name'] == self.turn_obj['name']:
being['coordinate'] = [str(self.move_path[-1][0]), str(self.move_path[-1][1]), str(self.move_path[-1][2])]
if self.root.copy_win_open:
self.copy_win.gray_map()
self.refresh_map()
#self.refresh_initiatives()
def reset_round(self, not_from_redo=True):
if not_from_redo:
restore_round = {
'round': self.round,
'turn': self.turn
}
self.log_action('reset round', restore_round)
self.round = 0
self.lbl_round.config(text="S")
self.turn = 0
self.refresh_map()
#self.refresh_initiatives()
def refresh_map(self, reset=False):
for row in self.map_frames:
for col in row:
remove_tokens = col.pack_slaves()
if len(remove_tokens) > 0:
for token in remove_tokens:
token.destroy()
remove_side_list = self.side_board.grid_slaves()
if len(remove_side_list) > 0:
for side_token in remove_side_list:
side_token.destroy()
self.side_count = 0
if reset:
self.initialize()
self.place_tokens()
if self.root.copy_win_open:
self.copy_win.update_players()
self.refresh_initiatives()
def open_for_players(self):
self.copy_win.start_win()
self.refresh_map()
def save_game(self):
new_token_dict = {}
for being in self.root.token_list:
name = being["name"]
new_token_dict[name] = being
new_object_dict = {}
for obj in self.root.obj_list:
obj_name = obj["name"]
new_object_dict[obj_name] = obj
battle_dict = {
"map_size": self.map_size,
"round": self.round,
"turn": self.turn
}
battleJSON = json.dumps(battle_dict, indent=4)
with ZipFile(self.root.filename, "w") as savefile:
creatJSON = json.dumps(new_token_dict, indent=4)
objJSON = json.dumps(new_object_dict, indent=4)
savefile.writestr('battle_info.json', battleJSON)
savefile.writestr('creatures.json', creatJSON)
savefile.writestr('objects.json', objJSON)
self.go_back.clear_all()
def clear_map(self):
restore_tokens = copy.deepcopy(self.root.token_list)
self.log_action('list', restore_tokens)
for being in self.root.token_list:
being["coordinate"] = ['', '', '']
self.refresh_map()
def dpad_move(self, dir):
last_pos = copy.deepcopy(self.move_path[-1])
if dir == 'n':
curr_pos = (last_pos[0] - 1, last_pos[1], last_pos[2] + self.z_delta)
elif dir == 's':
curr_pos = (last_pos[0] + 1, last_pos[1], last_pos[2] + self.z_delta)
elif dir == 'w':
curr_pos = (last_pos[0], last_pos[1] - 1, last_pos[2] + self.z_delta)
elif dir == 'e':
curr_pos = (last_pos[0], last_pos[1] + 1, last_pos[2] + self.z_delta)
elif dir == 'ne':
curr_pos = (last_pos[0] - 1, last_pos[1] + 1, last_pos[2] + self.z_delta)
elif dir == 'se':
curr_pos = (last_pos[0] + 1, last_pos[1] + 1, last_pos[2] + self.z_delta)
elif dir == 'sw':
curr_pos = (last_pos[0] + 1, last_pos[1] - 1, last_pos[2] + self.z_delta)
elif dir == 'nw':
curr_pos = (last_pos[0] - 1, last_pos[1] - 1, last_pos[2] + self.z_delta)
else:
curr_pos = (last_pos[0], last_pos[1], last_pos[2] + self.z_delta)
if curr_pos[0] < 0 or curr_pos[0] > self.map_size[0] - 1 or curr_pos[1] < 0 or curr_pos[1] > self.map_size[1] - 1:
messagebox.showwarning("BattleTracker", "Cannot move off map.")
return
self.z_delta = 0
if self.turn_obj['size'] == 'large':
space_need = 4
elif self.turn_obj['size'] == 'huge':
space_need = 9
elif self.turn_obj['size'] == 'gargantuan':
space_need = 16
else:
space_need = 1
next_row_num = math.sqrt(space_need)
row_offset = 0
col_offset = 0
if dir == '+':
for i in range(space_need):
self.map_frames[curr_pos[0] + col_offset][curr_pos[1] + row_offset].config(bg='orange1')
col_offset += 1
if col_offset == next_row_num:
col_offset = 0
row_offset += 1
elif dir == '-':
for i in range(space_need):
self.map_frames[curr_pos[0] + col_offset][curr_pos[1] + row_offset].config(bg='DarkOrange4')
col_offset += 1
if col_offset == next_row_num:
col_offset = 0
row_offset += 1
else:
for i in range(space_need):
self.map_frames[last_pos[0] + col_offset][last_pos[1] + row_offset].config(bg='orange4')
col_offset += 1
if col_offset == next_row_num:
col_offset = 0
row_offset += 1
col_offset = 0
row_offset = 0
for i in range(space_need):
self.map_frames[curr_pos[0] + col_offset][curr_pos[1] + row_offset].config(bg='orange3')
col_offset += 1
if col_offset == next_row_num:
col_offset = 0
row_offset += 1
self.move_path.append(curr_pos)
if self.root.copy_win_open:
self.copy_win.track_moves(self.move_path)
feet_moved = int(self.lbl_amount_moved.cget('text'))
feet_moved += 5
self.lbl_amount_moved.config(text=feet_moved)
self.lbl_position.config(text=f"{curr_pos[0]+1}: {curr_pos[1]+1}: {curr_pos[2]}")
if feet_moved > int(self.lbl_max_move.cget('text')):
self.lbl_amount_moved.config(bg='red4')
else:
self.lbl_amount_moved.config(bg='gray28')
def zpad(self, dir):
if dir == '+':
self.z_delta = 1
else:
self.z_delta = -1
self.z_frame.config(bg='green3')
self.root.bind_all("<Button-1>", self.green_handle)
def green_handle(self, event):
try:
name = event.widget.name
if name == 'zf':
if self.z_delta == 1:
self.dpad_move('+')
elif self.z_delta == -1:
self.dpad_move('-')
except:
pass
self.z_frame.config(bg='gray28')
self.root.unbind_all("<Button-1>")
def target_hp(self, type):
sel_target = self.cont_targets.get()
tgt_delta = self.ent_target_delta.get()
try:
tgt_delta = int(tgt_delta)
if type == 'dmg':
tgt_delta *= -1
except ValueError:
messagebox.showwarning("BattleTracker", "HP difference must be a whole number.")
return
for being in self.root.token_list:
if being['name'] == sel_target:
if type == 'dmg' and abs(tgt_delta) > being['temp_HP']:
tgt_delta += being['temp_HP']
being['temp_HP'] = 0
else:
being['temp_HP'] += tgt_delta
break
being['current_HP'] += tgt_delta
if being['current_HP'] > being['max_HP']:
being['current_HP'] = being['max_HP']
elif being['current_HP'] <= 0:
being['type'] = 'dead'
self._on_select_target(None)
def input_creature_window(self):
self.in_win = StatCollector(self.root, self.map_size, self.round, self.turn)
self.in_win.btn_submit.configure(command=lambda arg=['in_win', 'submit']: self.change_token_list(arg))
def input_object_window(self):
self.obj_win = ObjectBuilder(self.root, self.map_size)
try:
self.obj_win.btn_submit.configure(command=lambda: self.change_obj_list())
except AttributeError:
self.root.destroy()
def change_obj_list(self):
change_complete = self.obj_win.submit()
if change_complete:
self.obj_win.obj_win.destroy()
self.refresh_map()
def change_token_list(self, arg):
origin = arg[0]
select_btn = arg[1]
if origin == 'move_win':
if select_btn == 'set':
old_copy = copy.deepcopy(self.root.token_list)
self.log_action('list', old_copy)
set_complete = self.em.set_new_coord()
if set_complete:
self.em.move_win.destroy()
self.refresh_map()
elif select_btn == 'remove':
old_copy = copy.deepcopy(self.root.token_list)
self.log_action('list', old_copy)
rem_complete = self.em.remove_token()
if rem_complete:
self.em.move_win.destroy()
self.refresh_map()
elif origin == 'target_win':
if select_btn == 'submit':
old_copy = copy.deepcopy(self.root.token_list)
self.log_action('list', old_copy)
submit_complete = self.target.on_submit()
if submit_complete:
self.target.target_win.destroy()
self.refresh_map()
elif select_btn == 'delete':
old_copy = copy.deepcopy(self.root.token_list)
self.log_action('list', old_copy)
delete_complete = self.target.delete_token()
if delete_complete:
self.target.target_win.destroy()
self.refresh_map()
elif origin == 'in_win':
if select_btn == 'submit':
old_copy = copy.deepcopy(self.root.token_list)
self.log_action('list', old_copy)
submit_complete = self.in_win.submit()
if submit_complete:
self.in_win.range_win.destroy()
self.refresh_map()
def move_token(self):
self.em.move_token(self.map_size)
self.em.btn_set.configure(command=lambda arg=['move_win', 'set']: self.change_token_list(arg))
self.em.btn_remove.configure(command=lambda arg=['move_win', 'remove']: self.change_token_list(arg))
#self.wait_destroy_move_win()
def open_trig(self):
self.calculator.trig_win()
def target_item(self):
self.target.target_window()
self.target.btn_submit.configure(command=lambda arg=['target_win', 'submit']: self.change_token_list(arg))
self.target.btn_delete_target.configure(command=lambda arg=['target_win', 'delete']: self.change_token_list(arg))
#self.target.target_win.protocol("WM_DELETE_WINDOW", lambda stuff=(self.target.token_list): self.refresh_map(tokens=stuff, origWin='target'))
def open_dice_roller(self):
self.dice_roll.dice_pane()
def field_light(self):
try:
self.lighter.escape()
except AttributeError:
pass
self.lighter = GenLightWin(self.root, self.btn_field_light)
self.lighter.open_light_win()
self.lighter.btn_confirm.config(command=self.get_offsets)
def get_offsets(self):
self.light_list, self.light_shape = self.lighter.collect()
self.lighter.escape()
if len(self.light_list) == 0:
return
for sect in range(len(self.map_frames)):
for item in self.map_frames[sect]:
item.bind("<Button-1>", self.on_light)
pieces = item.pack_slaves()
for piece in pieces:
piece.bind("<Button-1>", self.on_light)
def on_light(self, event):
start = list(event.widget.coord)
if self.light_shape == 'Square':
self.map_frames[start[1]][start[0]].config(bg='SpringGreen3')
curr_pos = start
for i in range(len(self.light_list)):
curr_pos[0] += self.light_list[i][0]
curr_pos[1] += self.light_list[i][1]
if curr_pos[0] < self.map_size[1] and curr_pos[1] < self.map_size[0] and curr_pos[0] >= 0 and curr_pos[1] >= 0:
self.map_frames[curr_pos[1]][curr_pos[0]].config(bg='SpringGreen3')
start = list(event.widget.coord)
if self.light_shape == 'Line':
self.map_frames[start[1]][start[0]].config(bg='gray28')
self.root.bind_all("<Escape>", self.clear_light)
self.root.bind_all("<Button-3>", self.clear_light)
def clear_light(self, event):
for sect in range(len(self.map_frames)):
for item in self.map_frames[sect]:
item.config(bg='gray28')
item.unbind("<Button-1>")
pieces = item.pack_slaves()
for piece in pieces:
piece.unbind("<Button-1>")
self.root.unbind_all("<Escape>")
self.root.unbind_all("<Button-3>")
def full_reset(self):
empty_dict = {}
make_sure = messagebox.askokcancel("Warning", "Confirm request to delete ALL tokens and FULL RESET MAP.\nIf confirmed, this action cannot be undone.")
if make_sure:
battle_dict = {
"map_size": self.map_size,
"round": 0,
"turn": 0
}
battleJSON = json.dumps(battle_dict, indent=4)
with ZipFile(self.root.filename, "w") as savefile:
creatJSON = json.dumps(empty_dict)
objJSON = json.dumps(empty_dict)
savefile.writestr('battle_info.json', battleJSON)
savefile.writestr('creatures.json', creatJSON)
savefile.writestr('objects.json', objJSON)
self.refresh_map(reset=True)
self.go_back.clear_all()
def find_quote(self):
last_index = len(self.quoter.quote_list) - 1
rand_index = random.randint(0, last_index)
random_quote = self.quoter.get_quote(rand_index)
self.lbl_quote.config(text=random_quote)
def show_cond_info(self):
self.info.explain_conditions()
def time_travel(self, do_undo):
if do_undo:
hist_action = self.go_back.undo()
if self.go_back.undo_empty():
self.btn_undo['state'] = 'disabled'
self.btn_redo['state'] = 'normal'
else:
hist_action = self.go_back.redo()
if self.go_back.redo_empty():
self.btn_redo['state'] = 'disabled'
self.btn_undo['state'] = 'normal'
action_name = hist_action['origin']
if action_name == 'turn button':
if do_undo:
self.turn -= 1
if self.turn < 0:
self.turn = len(self.initiative_holder) - 1
self.round -= 1
if self.round <= 0:
self.round = 0
self.lbl_round.config(text="S")
else:
self.lbl_round.config(text=self.round)
self.refresh_initiatives()
else:
self.next_turn(False)
elif action_name == 'round button':
if do_undo:
self.round -= 1
if self.round <= 0:
self.round = 0
self.lbl_round.config(text="S")
else:
self.lbl_round.config(text=self.round)
self.turn = hist_action['restore']['turn']
self.refresh_initiatives()
else:
self.next_round(False)
elif action_name == 'reset round':
if do_undo:
self.round = hist_action['restore']['round']
self.turn = hist_action['restore']['turn']
if self.round <= 0:
self.round = 0
self.lbl_round.config(text="S")
else:
self.lbl_round.config(text=self.round)
self.refresh_initiatives()
else:
self.reset_round(False)
elif action_name == 'list':
self.root.token_list = copy.deepcopy(hist_action['restore'])
self.refresh_map()
def undo_move(self):
if len(self.move_path) > 1:
last_move = self.move_path.pop()
else:
return
if self.turn_obj['size'] == 'large':
space_need = 4
elif self.turn_obj['size'] == 'huge':
space_need = 9
elif self.turn_obj['size'] == 'gargantuan':
space_need = 16
else:
space_need = 1
next_row_num = math.sqrt(space_need)
row_offset = 0
col_offset = 0
for i in range(space_need):
self.map_frames[last_move[0] + col_offset][last_move[1] + row_offset].config(bg='gray28')
col_offset += 1
if col_offset == next_row_num:
col_offset = 0
row_offset += 1
feet_moved = int(self.lbl_amount_moved.cget('text'))
feet_moved -= 5
self.lbl_amount_moved.config(text=feet_moved)
if feet_moved > int(self.lbl_max_move.cget('text')):
self.lbl_amount_moved.config(bg='red4')
else:
self.lbl_amount_moved.config(bg='gray28')
new_curr_move = self.move_path[-1]
self.lbl_position.config(text=f"{new_curr_move[0]+1}: {new_curr_move[1]+1}: {new_curr_move[2]}")
row_offset = 0
col_offset = 0
for i in range(space_need):
self.map_frames[new_curr_move[0] + col_offset][new_curr_move[1] + row_offset].config(bg='orange3')
col_offset += 1
if col_offset == next_row_num:
col_offset = 0
row_offset += 1
if self.root.copy_win_open:
self.copy_win.gray_map()
self.copy_win.track_moves(self.move_path)
def log_action(self, origin, restore_data=None):
if self.btn_undo['state'] == 'disabled':
self.btn_undo['state'] = 'normal'
self.go_back.add_undo(origin, restore_data)
if self.go_back.redo_empty() == False:
self.go_back.clear_redo()
self.btn_redo['state'] = 'disabled'
battle = BattleMap(map_win)
if __name__ == '__main__':
map_win.mainloop() | 48.271711 | 159 | 0.59658 | 7,647 | 56,140 | 4.143586 | 0.067085 | 0.015906 | 0.008079 | 0.014139 | 0.570189 | 0.475383 | 0.388058 | 0.30332 | 0.244524 | 0.208988 | 0 | 0.02091 | 0.273352 | 56,140 | 1,163 | 160 | 48.271711 | 0.755822 | 0.008497 | 0 | 0.276555 | 0 | 0 | 0.069406 | 0.011843 | 0 | 0 | 0.00009 | 0 | 0 | 1 | 0.044976 | false | 0.001914 | 0.022967 | 0 | 0.073684 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
51033cdbbaedcb29f8ed65dc37e4cdc367f17763 | 1,411 | py | Python | qrtt/technical/rsi.py | leopoldsw/qrtt | 271f23888847f9a0a9a7da360be22c5000b058ab | [
"MIT"
] | null | null | null | qrtt/technical/rsi.py | leopoldsw/qrtt | 271f23888847f9a0a9a7da360be22c5000b058ab | [
"MIT"
] | null | null | null | qrtt/technical/rsi.py | leopoldsw/qrtt | 271f23888847f9a0a9a7da360be22c5000b058ab | [
"MIT"
] | null | null | null | """
RSI CALCULATION
The very first calculations for average gain and average loss are simple n-period averages:
First Average Gain = Sum of Gains over the past n periods / n.
First Average Loss = Sum of Losses over the past n periods / n
The second, and subsequent, calculations are based on the prior averages and the current gain loss:
Average Gain = [(previous Average Gain) x (n-1) + current Gain] / n.
Average Loss = [(previous Average Loss) x (n-1) + current Loss] / n.
RS = Average Gain / Average Loss
RSI = 100 - (100 / (1 + RS))
"""
import numpy as np
def rsi(ohlcv, period=14, ohlcv_series="close"):
_ohlcv = ohlcv[[ohlcv_series]].copy(deep=True)
_ohlcv["diff"] = _ohlcv[ohlcv_series].diff(periods=1)
_ohlcv["diff_up"] = np.where(_ohlcv["diff"] >= 0, _ohlcv["diff"], 0)
_ohlcv["diff_down"] = np.where(_ohlcv["diff"] < 0, _ohlcv["diff"], 0)
# Calculate Average Gain and Average Loss
_ohlcv[["rsi_u", "rsi_d"]] = _ohlcv[["diff_up", "diff_down"]].ewm(alpha=1 / period, min_periods=period).mean()
_ohlcv["rs"] = abs(_ohlcv["rsi_u"]) / abs(_ohlcv["rsi_d"])
indicator_values = 100 - (100 / (1 + _ohlcv["rs"]))
return indicator_values
def ADD_RSIs(ohlcv, periods=[10,20,30], ohlcv_series="close"):
for p in periods:
indicator_name = f'rsi_{p}_{ohlcv_series[0]}'
ohlcv[indicator_name] = rsi(ohlcv, p, ohlcv_series)
return ohlcv
| 34.414634 | 114 | 0.66832 | 215 | 1,411 | 4.213953 | 0.330233 | 0.07947 | 0.04415 | 0.049669 | 0.168874 | 0.103753 | 0.059603 | 0.059603 | 0 | 0 | 0 | 0.02698 | 0.185684 | 1,411 | 41 | 115 | 34.414634 | 0.761532 | 0.421687 | 0 | 0 | 0 | 0 | 0.137376 | 0.030941 | 0 | 0 | 0 | 0 | 0 | 1 | 0.133333 | false | 0 | 0.066667 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5106aa713d6626d3d954ada527f0fad7a1c15261 | 1,872 | py | Python | modules/aerodyn/ad_EllipticalWingInf_OLAF/Main_PostPro.py | OpenFAST/openfast-regression | 7892739f47f312ce014711192fd70253ea40c8e8 | [
"Apache-2.0"
] | null | null | null | modules/aerodyn/ad_EllipticalWingInf_OLAF/Main_PostPro.py | OpenFAST/openfast-regression | 7892739f47f312ce014711192fd70253ea40c8e8 | [
"Apache-2.0"
] | null | null | null | modules/aerodyn/ad_EllipticalWingInf_OLAF/Main_PostPro.py | OpenFAST/openfast-regression | 7892739f47f312ce014711192fd70253ea40c8e8 | [
"Apache-2.0"
] | null | null | null | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# Local
import weio
import welib.fast.fastlib as fastlib
# --- Reference simulations OmniVor / AWSM
ref20 = weio.read('AnalyticalResults/Elliptic_NumReference20.csv').toDataFrame()
ref40 = weio.read('AnalyticalResults/Elliptic_NumReference40.csv').toDataFrame()
ref80 = weio.read('AnalyticalResults/Elliptic_NumReference80.csv').toDataFrame()
# --- OLAF
# _,sim20 = fastlib.spanwisePostPro('Main_EllipticalWing20.fst',avgMethod='constantwindow',avgParam=0.1,out_ext='.outb')
_,sim40,_,_ = fastlib.spanwisePostPro('Main_EllipticalWingInf_OLAF.dvr',avgMethod='constantwindow',avgParam=0.1,out_ext='.outb')
# _,sim80,_,_ = fastlib.spanwisePostPro('Main_EllipticalWing.fst',avgMethod='constantwindow',avgParam=0.1,out_ext='.outb')
# --- Theory
b = 5
c0 = 1.0
V = [1,0.1]
U0 = np.sqrt(V[0]**2 + V[1]**2)
alpha_rad = np.arctan2(V[1],V[0])
AR = b*b/(np.pi*b*c0/4.)
CL_th = 2.*np.pi*(alpha_rad)/(1.+2./AR);
# --- Plot
fig,ax = plt.subplots(1, 1, sharey=False, figsize=(6.4,4.8)) # (6.4,4.8)
fig.subplots_adjust(left=0.12, right=0.90, top=0.88, bottom=0.11, hspace=0.20, wspace=0.20)
ax.plot([-1,1], [CL_th, CL_th], 'k-', label ='Theory', lw=2)
# ax.plot((ref20['r/R_[-]']-0.5)*2 , ref20['Cl_[-]'] , '-' , label ='n=20')
ax.plot((ref40['r/R_[-]']-0.5)*2 , ref40['Cl_[-]'] , '-' , label ='n=40 (ref)')
# ax.plot((ref80['r/R_[-]']-0.5)*2 , ref80['Cl_[-]'] , '-' , label ='n=80 (ref)')
# ax.plot((sim20['r/R_[-]']-0.5)*2 , sim20['B1Cl_[-]'].values, 'k:', label='OLAF')
ax.plot((sim40['r/R_[-]']-0.5)*2 , sim40['B1Cl_[-]'].values, 'k:')
# ax.plot((sim80['r/R_[-]']-0.5)*2 , sim80['B1Cl_[-]'].values, 'k:')
ax.set_xlabel('y/b [-]')
ax.set_ylabel(r'$C_l$ [-]')
ax.set_ylim([0.47,0.48])
# ax.set_xlim([-1,1])
ax.legend()
ax.tick_params(direction='in')
plt.show()
| 39 | 128 | 0.631944 | 302 | 1,872 | 3.781457 | 0.36755 | 0.036778 | 0.015762 | 0.021016 | 0.144483 | 0.118214 | 0.118214 | 0.118214 | 0.08056 | 0 | 0 | 0.081682 | 0.110577 | 1,872 | 47 | 129 | 39.829787 | 0.604204 | 0.347222 | 0 | 0 | 0 | 0 | 0.209611 | 0.137531 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.185185 | 0 | 0.185185 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
510f0704162b83a55e2da583192211cbda73f8f2 | 2,987 | py | Python | Test13_talking_robot/Test13_preprocess.py | hooloong/My_TensorFlow | ef115989035b9ae14938dca47c0814b0d16dd6ba | [
"MIT"
] | 3 | 2018-07-29T17:31:58.000Z | 2019-06-27T10:36:34.000Z | Test13_talking_robot/Test13_preprocess.py | hooloong/My_TensorFlow | ef115989035b9ae14938dca47c0814b0d16dd6ba | [
"MIT"
] | null | null | null | Test13_talking_robot/Test13_preprocess.py | hooloong/My_TensorFlow | ef115989035b9ae14938dca47c0814b0d16dd6ba | [
"MIT"
] | 1 | 2019-02-18T02:27:39.000Z | 2019-02-18T02:27:39.000Z | # coding=utf-8
import os
import random
import sys
conv_path = 'dgk_shooter_min.conv'
if not os.path.exists(conv_path):
print('数据集不存在')
exit()
# 数据集格式
"""
E
M 畹/华/吾/侄/
M 你/接/到/这/封/信/的/时/候/
M 不/知/道/大/伯/还/在/不/在/人/世/了/
E
M 咱/们/梅/家/从/你/爷/爷/起/
M 就/一/直/小/心/翼/翼/地/唱/戏/
M 侍/奉/宫/廷/侍/奉/百/姓/
M 从/来/不/曾/遭/此/大/祸/
M 太/后/的/万/寿/节/谁/敢/不/穿/红/
M 就/你/胆/儿/大/
M 唉/这/我/舅/母/出/殡/
M 我/不/敢/穿/红/啊/
M 唉/呦/唉/呦/爷/
M 您/打/得/好/我/该/打/
M 就/因/为/没/穿/红/让/人/赏/咱/一/纸/枷/锁/
M 爷/您/别/给/我/戴/这/纸/枷/锁/呀/
E
M 您/多/打/我/几/下/不/就/得/了/吗/
M 走/
M 这/是/哪/一/出/啊/…/ / /这/是/
M 撕/破/一/点/就/弄/死/你/
M 唉/
M 记/着/唱/戏/的/再/红/
M 还/是/让/人/瞧/不/起/
M 大/伯/不/想/让/你/挨/了/打/
M 还/得/跟/人/家/说/打/得/好/
M 大/伯/不/想/让/你/再/戴/上/那/纸/枷/锁/
M 畹/华/开/开/门/哪/
E
...
"""
# 我首先使用文本编辑器sublime把dgk_shooter_min.conv文件编码转为UTF-8,一下子省了不少麻烦
convs = [] # 对话集合
with open(conv_path, "r", encoding='utf-8',) as f:
one_conv = [] # 一次完整对话
cnt = 0
for line in f:
cnt += 1
# print(line)
# print(cnt)
line = line.strip('\n').replace('/', '')
if line == '':
continue
if line[0] == 'E':
if one_conv:
convs.append(one_conv)
one_conv = []
elif line[0] == 'M':
one_conv.append(line.split(' ')[1])
"""
print(convs[:3]) # 个人感觉对白数据集有点不给力啊
[ ['畹华吾侄', '你接到这封信的时候', '不知道大伯还在不在人世了'],
['咱们梅家从你爷爷起', '就一直小心翼翼地唱戏', '侍奉宫廷侍奉百姓', '从来不曾遭此大祸', '太后的万寿节谁敢不穿红', '就你胆儿大', '唉这我舅母出殡', '我不敢穿红啊', '唉呦唉呦爷', '您打得好我该打', '就因为没穿红让人赏咱一纸枷锁', '爷您别给我戴这纸枷锁呀'],
['您多打我几下不就得了吗', '走', '这是哪一出啊 ', '撕破一点就弄死你', '唉', '记着唱戏的再红', '还是让人瞧不起', '大伯不想让你挨了打', '还得跟人家说打得好', '大伯不想让你再戴上那纸枷锁', '畹华开开门哪'], ....]
"""
# 把对话分成问与答
ask = [] # 问
response = [] # 答
for conv in convs:
if len(conv) == 1:
continue
if len(conv) % 2 != 0: # 奇数对话数, 转为偶数对话
conv = conv[:-1]
for i in range(len(conv)):
if i % 2 == 0:
ask.append(conv[i])
else:
response.append(conv[i])
"""
print(len(ask), len(response))
print(ask[:3])
print(response[:3])
['畹华吾侄', '咱们梅家从你爷爷起', '侍奉宫廷侍奉百姓']
['你接到这封信的时候', '就一直小心翼翼地唱戏', '从来不曾遭此大祸']
"""
def convert_seq2seq_files(questions, answers, TESTSET_SIZE=8000):
# 创建文件
train_enc = open('train.enc', 'w',encoding="utf-8") # 问
train_dec = open('train.dec', 'w',encoding="utf-8") # 答
test_enc = open('test.enc', 'w',encoding="utf-8") # 问
test_dec = open('test.dec', 'w',encoding="utf-8") # 答
# 选择20000数据作为测试数据
test_index = random.sample([i for i in range(len(questions))], TESTSET_SIZE)
for i in range(len(questions)):
if i in test_index:
test_enc.write(questions[i] + '\n')
test_dec.write(answers[i] + '\n')
else:
train_enc.write(questions[i] + '\n')
train_dec.write(answers[i] + '\n')
if i % 1000 == 0:
print(len(range(len(questions))), '处理进度:', i)
train_enc.close()
train_dec.close()
test_enc.close()
test_dec.close()
convert_seq2seq_files(ask, response)
# 生成的*.enc文件保存了问题
# 生成的*.dec文件保存了回答 | 24.08871 | 153 | 0.546368 | 549 | 2,987 | 2.919854 | 0.40255 | 0.014972 | 0.03743 | 0.032439 | 0.1335 | 0.07985 | 0.008734 | 0 | 0 | 0 | 0 | 0.016024 | 0.226984 | 2,987 | 124 | 154 | 24.08871 | 0.676916 | 0.067292 | 0 | 0.111111 | 0 | 0 | 0.062897 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.018519 | false | 0 | 0.055556 | 0 | 0.074074 | 0.037037 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
510fb73823084b9ff3de955296518a2eb7c922e4 | 540 | py | Python | wiktts/__init__.py | pettarin/wiktts | 37f9a865ec01604c36a3ab15325f62d8c26e4484 | [
"MIT"
] | 5 | 2016-06-02T04:52:11.000Z | 2018-08-01T20:05:37.000Z | wiktts/__init__.py | pettarin/wiktts | 37f9a865ec01604c36a3ab15325f62d8c26e4484 | [
"MIT"
] | null | null | null | wiktts/__init__.py | pettarin/wiktts | 37f9a865ec01604c36a3ab15325f62d8c26e4484 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding=utf-8
"""
TBW
"""
from __future__ import absolute_import
from __future__ import print_function
import io
__author__ = "Alberto Pettarin"
__copyright__ = "Copyright 2016, Alberto Pettarin (www.albertopettarin.it)"
__license__ = "MIT"
__email__ = "alberto@albertopettarin.it"
__version__ = "0.1.0"
__status__ = "Development"
def write_file(formatted_data, output_file_path):
with io.open(output_file_path, "w", encoding="utf-8") as output_file:
output_file.write(u"\n".join(formatted_data))
| 20.769231 | 75 | 0.748148 | 72 | 540 | 5.013889 | 0.638889 | 0.110803 | 0.088643 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.019149 | 0.12963 | 540 | 25 | 76 | 21.6 | 0.748936 | 0.068519 | 0 | 0 | 0 | 0 | 0.256619 | 0.101833 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.25 | 0 | 0.333333 | 0.083333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
51106aa7bc640784a651d3a06d5663b7d7680ea4 | 2,834 | py | Python | addons/mixins.py | kilinger/marathon-rocketchat-hubot | 682454b90265eb2c66ea222cf0c970370816a9e1 | [
"BSD-3-Clause"
] | 1 | 2018-07-10T07:03:12.000Z | 2018-07-10T07:03:12.000Z | addons/mixins.py | kilinger/marathon-rocketchat-hubot | 682454b90265eb2c66ea222cf0c970370816a9e1 | [
"BSD-3-Clause"
] | null | null | null | addons/mixins.py | kilinger/marathon-rocketchat-hubot | 682454b90265eb2c66ea222cf0c970370816a9e1 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
:copyright: (c) 2015 by the xxxxx Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
from hubot.utils.mesos import clean_container_path
class AddonsMixin(object):
addon_name = "addon"
addon_depend = None
addon_container_image = "library/busybox"
addon_default_version = "latest"
addon_config_vars = []
def get_addon_slug(self):
return NotImplemented
def get_addon_id(self):
return NotImplemented
def get_color(self):
return NotImplemented
def get_host(self, suffix=True):
if suffix:
return "{0}.weave.local".format(self.get_addon_id())
else:
return "{0}".format(self.get_addon_id())
def get_plugin_volume_name(self, path):
return "{0}-{1}".format(self.get_addon_slug(), clean_container_path(path))
def get_plugin_volumes(self):
paths = getattr(self, 'addon_container_paths', [])
return list("{0}:{1}".format(self.get_plugin_volume_name(path), path) for path in paths)
def get_docker_parameters(self):
parameters = [dict(key="label", value="weave_hostname={0}".format(self.get_host(suffix=False)))]
volumes = self.get_plugin_volumes()
if volumes:
parameters.append(dict(key="volume-driver", value="rexray"))
for volume in volumes:
parameters.append(dict(key="volume", value=volume))
return parameters
def get_config_vars(self):
return self.addon_config_vars
def get_config(self, primary=True, alias=None):
config = dict()
for var in self.get_config_vars():
var = var.upper()
if primary:
key = var
else:
parts = var.split('_')
parts.insert(-1, self.get_color().upper())
key = '_'.join(parts)
if alias:
key = alias.upper()
func = getattr(self, "get_config_{0}".format(var.lower()), None)
if func:
config[key] = func()
return config
def has_snapshot_support(self):
return bool(self.get_plugin_volumes())
def create_snapshot(self, description=None):
from addons.models import AddonSnapshot
snapshot = AddonSnapshot.objects.create(addon=self, description=description or '')
snapshot.create(description=description)
return snapshot
def destroy_snapshot(self, snapshot_short_id):
from addons.models import AddonSnapshot
try:
snapshot = AddonSnapshot.objects.get(addon=self, short_id=snapshot_short_id)
except AddonSnapshot.DoesNotExist:
pass
else:
snapshot.destroy()
| 30.473118 | 104 | 0.623853 | 334 | 2,834 | 5.086826 | 0.320359 | 0.041201 | 0.038258 | 0.047675 | 0.202472 | 0.042378 | 0 | 0 | 0 | 0 | 0 | 0.006747 | 0.267819 | 2,834 | 92 | 105 | 30.804348 | 0.812048 | 0.048342 | 0 | 0.123077 | 0 | 0 | 0.05318 | 0.00781 | 0 | 0 | 0 | 0 | 0 | 1 | 0.184615 | false | 0.015385 | 0.061538 | 0.092308 | 0.523077 | 0.015385 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
511493f52cb1eeb1e8430922732560c6965e53e7 | 5,069 | py | Python | flexmatcher/classify/nGramClassifier.py | austinkwillis/flexmatcher | c771cea696014f62bf919ecf678835d8c655d04f | [
"Apache-2.0"
] | 28 | 2017-07-19T19:02:56.000Z | 2022-01-11T10:40:06.000Z | flexmatcher/classify/nGramClassifier.py | austinkwillis/flexmatcher | c771cea696014f62bf919ecf678835d8c655d04f | [
"Apache-2.0"
] | 253 | 2018-02-10T22:22:16.000Z | 2022-03-27T18:43:17.000Z | flexmatcher/classify/nGramClassifier.py | austinkwillis/flexmatcher | c771cea696014f62bf919ecf678835d8c655d04f | [
"Apache-2.0"
] | 10 | 2018-02-21T06:41:30.000Z | 2022-02-20T12:18:46.000Z | from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.model_selection import StratifiedKFold
from sklearn import linear_model
from flexmatcher.classify import Classifier
import numpy as np
class NGramClassifier(Classifier):
"""Classify data-points using counts of n-gram sequence of words or chars.
The NGramClassifier uses n-grams of words or characters (based on user
preference) and extracts count features or binary features (based on user
preference) to train a classifier. It uses a LogisticRegression
classifier as its training model.
Attributes:
labels (ndarray): Vector storing the labels of each data-point.
features (ndarray): Matrix storing the extracting features.
vectorizer (object): Vectorizer for transforming text to features. It
will be either of type CountVectorizer or HashingVectorizer.
clf (LogisticRegression): The classifier instance.
num_classes (int): Number of classes/columns to match to
all_classes (ndarray): Sorted array of all possible classes
"""
def __init__(self, ngram_range=(1, 1), analyzer='word', count=True,
n_features=200):
"""Initializes the classifier.
Args:
ngram_range (tuple): Pair of ints specifying the range of ngrams.
analyzer (string): Determines what type of analyzer to be used.
Setting it to 'word' will consider each word as a unit of language
and 'char' will consider each character as a unit of language.
count (boolean): Determines if features are counts of n-grams
versus a binary value encoding if the n-gram is present or not.
n_features (int): Maximum number of features used.
"""
# checking what type of vectorizer to create
if count:
self.vectorizer = CountVectorizer(analyzer=analyzer,
ngram_range=ngram_range,
max_features=n_features)
else:
self.vectorizer = HashingVectorizer(analyzer=analyzer,
ngram_range=ngram_range,
n_features=n_features)
def fit(self, data):
"""
Args:
data (dataframe): Training data (values and their correct column).
"""
self.labels = np.array(data['class'])
self.num_classes = len(data['class'].unique())
self.all_classes = np.sort(np.unique(self.labels))
values = list(data['value'])
self.features = self.vectorizer.fit_transform(values).toarray()
# training the classifier
self.lrm = linear_model.LogisticRegression(class_weight='balanced')
self.lrm.fit(self.features, self.labels)
def predict_training(self, folds=5):
"""Do cross-validation and return probabilities for each data-point.
Args:
folds (int): Number of folds used for prediction on training data.
"""
partial_clf = linear_model.LogisticRegression(class_weight='balanced')
prediction = np.zeros((len(self.features), self.num_classes))
skf = StratifiedKFold(n_splits=folds)
for train_index, test_index in skf.split(self.features, self.labels):
# prepare the training and test data
training_features = self.features[train_index]
test_features = self.features[test_index]
training_labels = self.labels[train_index]
# fitting the model and predicting
partial_clf.fit(training_features, training_labels)
curr_pred = partial_clf.predict_proba(test_features)
prediction[test_index] = \
self.predict_proba_ordered(curr_pred, partial_clf.classes_)
return prediction
def predict_proba_ordered(self, probs, classes):
"""Fills out the probability matrix with classes that were missing.
Args:
probs (list): list of probabilities, output of predict_proba
classes_ (ndarray): list of classes from clf.classes_
all_classes (ndarray): list of all possible classes
"""
proba_ordered = np.zeros((probs.shape[0], self.all_classes.size),
dtype=np.float)
sorter = np.argsort(self.all_classes)
idx = sorter[np.searchsorted(self.all_classes, classes, sorter=sorter)]
proba_ordered[:, idx] = probs
return proba_ordered
def predict(self, data):
"""Predict the class for a new given data.
Args:
data (dataframe): Dataframe of values to predict the column for.
"""
values = list(data['value'])
features = self.vectorizer.transform(values).toarray()
return self.lrm.predict_proba(features)
| 44.858407 | 79 | 0.64924 | 595 | 5,069 | 5.394958 | 0.310924 | 0.026168 | 0.017445 | 0.017445 | 0.086604 | 0.076012 | 0 | 0 | 0 | 0 | 0 | 0.001912 | 0.27757 | 5,069 | 112 | 80 | 45.258929 | 0.874659 | 0.395147 | 0 | 0.076923 | 0 | 0 | 0.014342 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.096154 | false | 0 | 0.173077 | 0 | 0.346154 | 0.019231 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5116e871b3a1ab4846b46b9fec5ed8c06b14c048 | 3,001 | py | Python | experiment/test/nngen.py | seonglae/commit-autosuggestions | 49c0ab65f20bda835b7537e042ffc9d338a0d482 | [
"Apache-2.0"
] | 303 | 2020-08-27T06:59:55.000Z | 2022-03-18T17:50:16.000Z | experiment/test/nngen.py | seonglae/commit-autosuggestions | 49c0ab65f20bda835b7537e042ffc9d338a0d482 | [
"Apache-2.0"
] | 4 | 2020-12-01T15:06:46.000Z | 2021-11-10T17:38:19.000Z | experiment/test/nngen.py | seonglae/commit-autosuggestions | 49c0ab65f20bda835b7537e042ffc9d338a0d482 | [
"Apache-2.0"
] | 11 | 2020-11-08T01:52:30.000Z | 2021-10-03T18:45:45.000Z | # encoding=utf-8
import os
import time
import fire
from typing import List
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from nltk.translate.bleu_score import sentence_bleu
def load_data(path):
"""load lines from a file"""
with open(path, 'r') as f:
lines = f.read().split('\n')[0:-1]
lines = [l.strip() for l in lines]
return lines
def find_mixed_nn(simi, diffs, test_diff, bleu_thre :int =5) -> int:
"""Find the nearest neighbor using cosine simialrity and bleu score"""
candidates = simi.argsort()[-bleu_thre:][::-1]
max_score = 0
max_idx = 0
for j in candidates:
score = sentence_bleu([diffs[j].split()], test_diff.split())
if score > max_score:
max_score = score
max_idx = j
return max_idx
def find_nn(simi) -> int:
"""Find the nearest neighbor"""
max_idx = simi.argsort()[-1]
return max_idx
def nngen(train_diffs :List[str], train_msgs :List[str], test_diffs :List[str],
type :"'mixed': cosine + bleu, 'cos': cosine only" ='mixed',
bleu_thre :"how many candidates to consider before calculating bleu_score" =5) -> List[str]:
"""NNGen
NOTE: currently, we haven't optmize for large dataset. You may need to split the
large training set into several chunks and then calculate the similarities between
train set and test set to speed up the algorithm. You may also leverage GPU through
pytorch or other libraries.
"""
if type not in ["mixed", "cos"]:
raise ValueError('Wrong tyoe for nngen.')
counter = CountVectorizer()
train_matrix = counter.fit_transform(train_diffs)
# print(len(counter.vocabulary_))
test_matrix = counter.transform(test_diffs)
similarities = cosine_similarity(test_matrix, train_matrix)
test_msgs = []
for idx, test_simi in enumerate(similarities):
if (idx + 1) % 100 == 0:
print(idx+1)
if type == 'mixed':
max_idx = find_mixed_nn(test_simi, train_diffs, test_diffs[idx], bleu_thre)
else:
max_idx = find_nn(test_simi)
test_msgs.append(train_msgs[max_idx])
return test_msgs
def main(train_diff_file :str, train_msg_file :str, test_diff_file :str):
"""Run NNGen with default given dataset using default setting"""
start_time = time.time()
test_dirname = os.path.dirname(test_diff_file)
test_basename = os.path.basename(test_diff_file)
out_file = "./nngen." + test_basename.replace('.diff', '.msg')
train_diffs = load_data(train_diff_file)
train_msgs = load_data(train_msg_file)
test_diffs = load_data(test_diff_file)
out_msgs = nngen(train_diffs, train_msgs, test_diffs)
with open(out_file, 'w') as out_f:
out_f.write("\n".join(out_msgs) + "\n")
time_cost = time.time() -start_time
print("Done, cost {}s".format(time_cost))
if __name__ == "__main__":
fire.Fire({
'main':main
})
| 35.305882 | 96 | 0.671776 | 434 | 3,001 | 4.421659 | 0.33871 | 0.025013 | 0.025013 | 0.017718 | 0.026055 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006364 | 0.214595 | 3,001 | 84 | 97 | 35.72619 | 0.807807 | 0.167611 | 0 | 0.032787 | 0 | 0 | 0.078776 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.081967 | false | 0 | 0.114754 | 0 | 0.262295 | 0.032787 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
511bd43634ea3f540136626b6213102cf02c3ef9 | 7,711 | py | Python | odrive/Firmware/fibre/python/fibre/utils.py | kirmani/doggo | f5aadba2a5b664f2d383bca0b35155d65363c498 | [
"MIT"
] | null | null | null | odrive/Firmware/fibre/python/fibre/utils.py | kirmani/doggo | f5aadba2a5b664f2d383bca0b35155d65363c498 | [
"MIT"
] | 3 | 2020-02-26T00:07:53.000Z | 2022-02-26T05:18:31.000Z | odrive/Firmware/fibre/python/fibre/utils.py | kirmani/doggo | f5aadba2a5b664f2d383bca0b35155d65363c498 | [
"MIT"
] | null | null | null |
import sys
import time
import threading
import platform
import subprocess
import os
try:
if platform.system() == 'Windows':
import win32console
# TODO: we should win32console anyway so we could just omit colorama
import colorama
colorama.init()
except ModuleNotFoundError:
print("Could not init terminal features.")
sys.stdout.flush()
pass
def get_serial_number_str(device):
if hasattr(device, 'serial_number'):
return format(device.serial_number, 'x').upper()
else:
return "[unknown serial number]"
## Threading utils ##
class Event():
"""
Alternative to threading.Event(), enhanced by the subscribe() function
that the original fails to provide.
@param Trigger: if supplied, the newly created event will be triggered
as soon as the trigger event becomes set
"""
def __init__(self, trigger=None):
self._evt = threading.Event()
self._subscribers = []
self._mutex = threading.Lock()
if not trigger is None:
trigger.subscribe(lambda: self.set())
def is_set(self):
return self._evt.is_set()
def set(self):
"""
Sets the event and invokes all subscribers if the event was
not already set
"""
self._mutex.acquire()
try:
if not self._evt.is_set():
self._evt.set()
for s in self._subscribers:
s()
finally:
self._mutex.release()
def subscribe(self, handler):
"""
Invokes the specified handler exactly once as soon as the
specified event is set. If the event is already set, the
handler is invoked immediately.
Returns a function that can be invoked to unsubscribe.
"""
if handler is None:
raise TypeError
self._mutex.acquire()
try:
self._subscribers.append(handler)
if self._evt.is_set():
handler()
finally:
self._mutex.release()
return handler
def unsubscribe(self, handler):
self._mutex.acquire()
try:
self._subscribers.pop(self._subscribers.index(handler))
finally:
self._mutex.release()
def wait(self, timeout=None):
if not self._evt.wait(timeout=timeout):
raise TimeoutError()
def trigger_after(self, timeout):
"""
Triggers the event after the specified timeout.
This function returns immediately.
"""
def delayed_trigger():
if not self.wait(timeout=timeout):
self.set()
threading.Thread(target=delayed_trigger, daemon=True).start()
def wait_any(timeout=None, *events):
"""
Blocks until any of the specified events are triggered.
Returns the index of the event that was triggerd or raises
a TimeoutError
Param timeout: A timeout in seconds
"""
or_event = threading.Event()
subscriptions = []
for event in events:
subscriptions.append((event, event.subscribe(lambda: or_event.set())))
or_event.wait(timeout=timeout)
for event, sub in subscriptions:
event.unsubscribe(sub)
for i in range(len(events)):
if events[i].is_set():
return i
raise TimeoutError()
## Log utils ##
class Logger():
"""
Logs messages to stdout
"""
COLOR_DEFAULT = 0
COLOR_GREEN = 1
COLOR_CYAN = 2
COLOR_YELLOW = 3
COLOR_RED = 4
_VT100Colors = {
COLOR_GREEN: '\x1b[92;1m',
COLOR_CYAN: '\x1b[96;1m',
COLOR_YELLOW: '\x1b[93;1m',
COLOR_RED: '\x1b[91;1m',
COLOR_DEFAULT: '\x1b[0m'
}
_Win32Colors = {
COLOR_GREEN: 0x0A,
COLOR_CYAN: 0x0B,
COLOR_YELLOW: 0x0E,
COLOR_RED: 0x0C,
COLOR_DEFAULT: 0x07
}
def __init__(self, verbose=True):
self._prefix = ''
self._skip_bottom_line = False # If true, messages are printed one line above the cursor
self._verbose = verbose
self._print_lock = threading.Lock()
if platform.system() == 'Windows':
self._stdout_buf = win32console.GetStdHandle(win32console.STD_OUTPUT_HANDLE)
def indent(self, prefix=' '):
indented_logger = Logger()
indented_logger._prefix = self._prefix + prefix
return indented_logger
def print_on_second_last_line(self, text, color):
"""
Prints a text on the second last line.
This can be used to print a message above the command
prompt. If the command prompt spans multiple lines
there will be glitches.
If the printed text spans multiple lines there will also
be glitches (though this could be fixed).
"""
if platform.system() == 'Windows':
# Windows <10 doesn't understand VT100 escape codes and the colorama
# also doesn't support the specific escape codes we need so we use the
# native Win32 API.
info = self._stdout_buf.GetConsoleScreenBufferInfo()
cursor_pos = info['CursorPosition']
scroll_rect=win32console.PySMALL_RECTType(
Left=0, Top=1,
Right=info['Window'].Right,
Bottom=cursor_pos.Y-1)
scroll_dest = win32console.PyCOORDType(scroll_rect.Left, scroll_rect.Top-1)
self._stdout_buf.ScrollConsoleScreenBuffer(
scroll_rect, scroll_rect, scroll_dest, # clipping rect is same as scroll rect
u' ', Logger._Win32Colors[color]) # fill with empty cells with the desired color attributes
line_start = win32console.PyCOORDType(0, cursor_pos.Y-1)
self._stdout_buf.WriteConsoleOutputCharacter(text, line_start)
else:
# Assume we're in a terminal that interprets VT100 escape codes.
# TODO: test on macOS
# Escape character sequence:
# ESC 7: store cursor position
# ESC 1A: move cursor up by one
# ESC 1S: scroll entire viewport by one
# ESC 1L: insert 1 line at cursor position
# (print text)
# ESC 8: restore old cursor position
self._print_lock.acquire()
sys.stdout.write('\x1b7\x1b[1A\x1b[1S\x1b[1L')
sys.stdout.write(Logger._VT100Colors[color] + text + Logger._VT100Colors[Logger.COLOR_DEFAULT])
sys.stdout.write('\x1b8')
sys.stdout.flush()
self._print_lock.release()
def print_colored(self, text, color):
if self._skip_bottom_line:
self.print_on_second_last_line(text, color)
else:
# On Windows, colorama does the job of interpreting the VT100 escape sequences
self._print_lock.acquire()
sys.stdout.write(Logger._VT100Colors[color] + text + Logger._VT100Colors[Logger.COLOR_DEFAULT] + '\n')
sys.stdout.flush()
self._print_lock.release()
def debug(self, text):
if self._verbose:
self.print_colored(self._prefix + text, Logger.COLOR_DEFAULT)
def success(self, text):
self.print_colored(self._prefix + text, Logger.COLOR_GREEN)
def info(self, text):
self.print_colored(self._prefix + text, Logger.COLOR_DEFAULT)
def notify(self, text):
self.print_colored(self._prefix + text, Logger.COLOR_CYAN)
def warn(self, text):
self.print_colored(self._prefix + text, Logger.COLOR_YELLOW)
def error(self, text):
# TODO: write to stderr
self.print_colored(self._prefix + text, Logger.COLOR_RED)
| 33.672489 | 114 | 0.611983 | 921 | 7,711 | 4.970684 | 0.296417 | 0.023591 | 0.024465 | 0.026212 | 0.181083 | 0.140673 | 0.125819 | 0.114024 | 0.088903 | 0.088903 | 0 | 0.018878 | 0.299313 | 7,711 | 228 | 115 | 33.820175 | 0.828429 | 0.240954 | 0 | 0.202703 | 0 | 0 | 0.034736 | 0.004655 | 0 | 0 | 0.003581 | 0.008772 | 0 | 1 | 0.135135 | false | 0.006757 | 0.054054 | 0.006757 | 0.290541 | 0.101351 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
51201b8f267ec7a2dece0fc4da0d42b14f47ffba | 2,720 | py | Python | 2021/day09/main.py | ingjrs01/adventofcode | c5e4f0158dac0efc2dbfc10167f2700693b41fea | [
"Apache-2.0"
] | null | null | null | 2021/day09/main.py | ingjrs01/adventofcode | c5e4f0158dac0efc2dbfc10167f2700693b41fea | [
"Apache-2.0"
] | null | null | null | 2021/day09/main.py | ingjrs01/adventofcode | c5e4f0158dac0efc2dbfc10167f2700693b41fea | [
"Apache-2.0"
] | null | null | null |
def search_low(matrix):
positions = []
for i in range(len(matrix)):
for j in range(len(matrix[i])):
if (j > 0):
if (matrix[i][j] >= matrix[i][j-1]):
continue
if (j < len(matrix[i])-1):
if (matrix[i][j] >= matrix[i][j+1]):
continue
if (i > 0):
if (matrix[i][j] >= matrix[i-1][j]):
continue
if (i < len(matrix)-1):
if (matrix[i][j] >= matrix[i+1][j]):
continue
positions.append((i,j))
return positions
def f_in(p,m):
for e in m:
if (e[0] == p[0] and e[1] == p[1]):
return True
return False
def calc_basin(matrix,coords):
# Nos pasan una coordenada, y calculo el tamaño del basin
pendientes = []
finalizados = []
pendientes.append((coords[0],coords[1]))
while (len(pendientes)>0):
actual = pendientes.pop(0)
if (actual[1] > 0):
if (matrix[actual[0]][actual[1]-1] != 9):
if (f_in((actual[0],actual[1]-1),pendientes) == False and f_in((actual[0],actual[1]-1),finalizados)==False):
pendientes.append((actual[0],actual[1]-1))
if (actual[1] < len(matrix[actual[0]])-1):
if (matrix[actual[0]][actual[1]+1] != 9):
if (f_in((actual[0],actual[1]+1),pendientes)==False and f_in((actual[0],actual[1]+1),finalizados)==False):
pendientes.append((actual[0],actual[1]+1))
if (actual[0] > 0):
if (matrix[actual[0]-1][actual[1]] != 9):
if (f_in((actual[0]-1,actual[1]),pendientes)==False and f_in((actual[0]-1,actual[1]),finalizados)==False):
pendientes.append((actual[0]-1,actual[1]))
if (actual[0] < len(matrix)-1):
if (matrix[actual[0]+1][actual[1]] != 9):
if (f_in((actual[0]+1,actual[1]),pendientes)==False and f_in((actual[0+1],actual[1]),finalizados)==False):
pendientes.append((actual[0]+1,actual[1]))
if (f_in(actual,finalizados)==False):
finalizados.append(actual)
return (len(finalizados))
matrix = []
lines = open('real','r').readlines()
for line in lines:
row = []
l = line.strip()
for i in range(len(l)):
row.append(int(l[i]))
matrix.append(row)
positions = search_low(matrix)
total = 0
for p in positions:
valor = matrix[p[0]][p[1]] + 1
total += valor
print(total)
tams = []
print("Segunda parte")
for p in positions:
tams.append(calc_basin(matrix,p))
la = sorted(tams)
t = len(la)-1
resultado = la[t] * la[t-1] * la[t-2]
print(resultado)
| 30.561798 | 124 | 0.516176 | 386 | 2,720 | 3.601036 | 0.163212 | 0.095683 | 0.058273 | 0.080576 | 0.499281 | 0.463309 | 0.463309 | 0.460432 | 0.460432 | 0.421583 | 0 | 0.043864 | 0.295956 | 2,720 | 88 | 125 | 30.909091 | 0.681984 | 0.020221 | 0 | 0.085714 | 0 | 0 | 0.006762 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.042857 | false | 0 | 0 | 0 | 0.1 | 0.042857 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
51219e777435d891fa05113f0da030e18ce7d68a | 4,364 | py | Python | handwrite/sheettopng.py | sakshamarora1/handwrite | 628c53f9fbca0bf9731e0ebc7d6c8ca2525f1b29 | [
"MIT"
] | null | null | null | handwrite/sheettopng.py | sakshamarora1/handwrite | 628c53f9fbca0bf9731e0ebc7d6c8ca2525f1b29 | [
"MIT"
] | null | null | null | handwrite/sheettopng.py | sakshamarora1/handwrite | 628c53f9fbca0bf9731e0ebc7d6c8ca2525f1b29 | [
"MIT"
] | null | null | null | import os
import sys
import itertools
import cv2
# Seq: A-Z, a-z, 0-9, SPECIAL_CHARS
ALL_CHARS = list(
itertools.chain(
range(65, 91),
range(97, 123),
range(48, 58),
[ord(i) for i in ".,;:!?\"'-+=/%&()[]"],
)
)
class SheetToPNG:
def __init__(self):
pass
def convert(self, sheet, characters_dir, cols=8, rows=10, threshold_value=200):
# TODO If directory given instead of image file, read all images and wrtie the images
# (example) 0.png, 1.png, 2.png inside every character folder in characters/
# sheet_images = []
# for s in os.listdir(sheet_dir):
# sheet_images.append(cv2.imread(sheet_dir + "/" + s))
characters = self.detectCharacters(sheet, threshold_value, cols=cols, rows=rows)
self.createCharacterDirectory(characters, characters_dir)
def detectCharacters(self, sheet_image, threshold_value, cols=8, rows=10):
# TODO Raise errors and suggest where the problem might be
# Read the image and convert to grayscale
image = cv2.imread(sheet_image)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Threshold and filter the image for better contour detection
ret, thresh = cv2.threshold(gray, threshold_value, 255, 1)
close_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
close = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, close_kernel, iterations=2)
# Search for contours.
contours, h = cv2.findContours(
close, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE
)
# Filter contours based on number of sides and then reverse sort by area.
contours = sorted(
filter(
lambda cnt: len(
cv2.approxPolyDP(cnt, 0.01 * cv2.arcLength(cnt, True), True)
)
== 4,
contours,
),
key=cv2.contourArea,
reverse=True,
)
# Calculate the bounding of the first contour and approximate the height
# and width for final cropping.
x, y, w, h = cv2.boundingRect(contours[0])
space_h, space_w = 7 * h // 16, 7 * w // 16
# Since amongst all the contours, the expected case is that the 4 sided contours
# containing the characters should have the maximum area, so we loop through the first
# rows*colums contours and add them to final list after cropping.
characters = []
for i in range(rows * cols):
x, y, w, h = cv2.boundingRect(contours[i])
cx, cy = x + w // 2, y + h // 2
roi = image[cy - space_h : cy + space_h, cx - space_w : cx + space_w]
characters.append([roi, cx, cy])
# Now we have the characters but since they are all mixed up we need to position them.
# Sort characters based on 'y' coordinate and group them by number of rows at a time. Then
# sort each group based on the 'x' coordinate.
characters.sort(key=lambda x: x[2])
sorted_characters = []
for k in range(rows):
sorted_characters.extend(
sorted(characters[cols * k : cols * (k + 1)], key=lambda x: x[1])
)
return sorted_characters
def createCharacterDirectory(self, characters, characters_dir):
if not os.path.exists(characters_dir):
os.mkdir(characters_dir)
# Create directory for each character and save the png for the characters
# Structure: UserProvidedDir/ord(character)/ord(character).png
for k, images in enumerate(characters):
character = os.path.join(characters_dir, str(ALL_CHARS[k]))
if not os.path.exists(character):
os.mkdir(character)
cv2.imwrite(
os.path.join(character, str(ALL_CHARS[k]) + ".png"), images[0],
)
def main():
if len(sys.argv) > 1:
if len(sys.argv) == 3:
sys.argv.append(200)
a = SheetToPNG().convert(
sheet=sys.argv[1],
characters_dir=sys.argv[2],
cols=8,
rows=10,
threshold_value=int(sys.argv[3]),
)
else:
print(
"Usage: sheettopng [SHEET_PATH] [CHARACTER_DIRECTORY_PATH] [THRESHOLD_VALUE (Default: 200)]"
)
| 36.366667 | 104 | 0.592117 | 558 | 4,364 | 4.546595 | 0.354839 | 0.035869 | 0.010642 | 0.013007 | 0.054395 | 0.040993 | 0.021285 | 0 | 0 | 0 | 0 | 0.028458 | 0.307516 | 4,364 | 119 | 105 | 36.672269 | 0.811052 | 0.281852 | 0 | 0 | 0 | 0.025641 | 0.032455 | 0.008355 | 0 | 0 | 0 | 0.008403 | 0 | 1 | 0.064103 | false | 0.012821 | 0.051282 | 0 | 0.141026 | 0.012821 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5121b7e379746aac2a670977c3fda4d01dade4a4 | 1,261 | py | Python | 2021/Day 7/solution.py | theleteron/advent-of-code | 45900a8c14a966e4ecbe699e6423072254d09d95 | [
"MIT"
] | 1 | 2021-12-02T18:28:28.000Z | 2021-12-02T18:28:28.000Z | 2021/Day 7/solution.py | theleteron/advent-of-code | 45900a8c14a966e4ecbe699e6423072254d09d95 | [
"MIT"
] | null | null | null | 2021/Day 7/solution.py | theleteron/advent-of-code | 45900a8c14a966e4ecbe699e6423072254d09d95 | [
"MIT"
] | null | null | null | class Day():
def __init__(self, data_path):
with open(data_path, "r") as file:
for line in file:
self.positions = [(int(position)) for position in line.strip().split(',')]
def part1(self):
fuel_cost = -1
for target in range(min(self.positions), max(self.positions)+1):
current_cost = 0
for number in self.positions:
current_cost += target - number if target > number else number - target
if current_cost < fuel_cost or fuel_cost == -1:
fuel_cost = current_cost
return fuel_cost
def cost(self, x):
return x * (x + 1) // 2
def part2(self):
fuel_cost = -1
for target in range(min(self.positions), max(self.positions)+1):
current_cost = 0
for number in self.positions:
current_cost += self.cost(target - number) if target > number else self.cost(number - target)
if current_cost < fuel_cost or fuel_cost == -1:
fuel_cost = current_cost
return fuel_cost
if __name__ == "__main__":
DATA_INPUT_LOCATION = "data.in"
day = Day(DATA_INPUT_LOCATION)
print(day.part1())
print(day.part2()) | 32.333333 | 109 | 0.569389 | 162 | 1,261 | 4.209877 | 0.265432 | 0.117302 | 0.052786 | 0.038123 | 0.639296 | 0.639296 | 0.639296 | 0.545455 | 0.545455 | 0.545455 | 0 | 0.016529 | 0.328311 | 1,261 | 39 | 110 | 32.333333 | 0.788666 | 0 | 0 | 0.466667 | 0 | 0 | 0.013471 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.133333 | false | 0 | 0 | 0.033333 | 0.266667 | 0.066667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5121cc545c9a55cdbb8d25e3c6ef3ab3548b3342 | 849 | py | Python | dataflows/processors/deduplicate.py | cschloer/dataflows | 78a683b5d202512c06021ff6be8ac7f60ef1cd9b | [
"MIT"
] | 160 | 2018-06-13T23:16:26.000Z | 2022-03-11T21:26:44.000Z | dataflows/processors/deduplicate.py | cschloer/dataflows | 78a683b5d202512c06021ff6be8ac7f60ef1cd9b | [
"MIT"
] | 164 | 2018-07-08T13:05:30.000Z | 2021-09-30T08:54:59.000Z | dataflows/processors/deduplicate.py | cschloer/dataflows | 78a683b5d202512c06021ff6be8ac7f60ef1cd9b | [
"MIT"
] | 41 | 2018-08-07T08:05:30.000Z | 2021-12-18T04:34:06.000Z | from dataflows import PackageWrapper, ResourceWrapper
from ..helpers.resource_matcher import ResourceMatcher
def deduper(rows: ResourceWrapper):
pk = rows.res.descriptor['schema'].get('primaryKey', [])
if len(pk) == 0:
yield from rows
else:
keys = set()
for row in rows:
key = tuple(row[k] for k in pk)
if key in keys:
continue
keys.add(key)
yield row
def deduplicate(resources=None):
def func(package: PackageWrapper):
resource_matcher = ResourceMatcher(resources, package)
yield package.pkg
resource: ResourceWrapper
for resource in package:
if resource_matcher.match(resource.res.name):
yield deduper(resource)
else:
yield resource
return func
| 26.53125 | 62 | 0.599529 | 91 | 849 | 5.56044 | 0.461538 | 0.088933 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001739 | 0.322733 | 849 | 31 | 63 | 27.387097 | 0.878261 | 0 | 0 | 0.08 | 0 | 0 | 0.018846 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.12 | false | 0 | 0.08 | 0 | 0.24 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5127e82ffefa06ac56296824a5c55b26831611d6 | 3,679 | py | Python | examples/development/simulate_policy.py | iclavera/cassie | f2e253bf29fa0f872974188aed1fdfbe06efc37e | [
"MIT"
] | null | null | null | examples/development/simulate_policy.py | iclavera/cassie | f2e253bf29fa0f872974188aed1fdfbe06efc37e | [
"MIT"
] | 11 | 2020-01-28T22:32:20.000Z | 2022-03-11T23:37:57.000Z | examples/development/simulate_policy.py | iclavera/cassie | f2e253bf29fa0f872974188aed1fdfbe06efc37e | [
"MIT"
] | null | null | null | import argparse
from distutils.util import strtobool
import json
import os
import pickle
import tensorflow as tf
import numpy as np
from softlearning.policies.utils import get_policy_from_variant
from softlearning.samplers import rollouts
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('checkpoint_path',
type=str,
help='Path to the checkpoint.')
parser.add_argument('--max-path-length', '-l', type=int, default=1000)
parser.add_argument('--num-rollouts', '-n', type=int, default=10)
parser.add_argument('--render-mode', '-r',
type=str,
default='human',
choices=('human', 'rgb_array', None),
help="Mode to render the rollouts in.")
parser.add_argument('--deterministic', '-d',
type=strtobool,
nargs='?',
const=True,
default=True,
help="Evaluate policy deterministically.")
args = parser.parse_args()
return args
def simulate_policy(args):
session = tf.keras.backend.get_session()
checkpoint_path = args.checkpoint_path.rstrip('/')
experiment_path = os.path.dirname(checkpoint_path)
variant_path = os.path.join(experiment_path, 'params.json')
with open(variant_path, 'r') as f:
variant = json.load(f)
with session.as_default():
pickle_path = os.path.join(checkpoint_path, 'checkpoint.pkl')
with open(pickle_path, 'rb') as f:
pickleable = pickle.load(f)
env = pickleable['env']
policy = (
get_policy_from_variant(variant, env, Qs=[None]))
policy.set_weights(pickleable['policy_weights'])
if True: #hard coded
import numpy as np
import scipy.io as sio
ws = policy.get_weights()
w0, b0, w1, b1, w2, b2 = ws[0], ws[1], ws[2], ws[3], ws[4], ws[5]
savematpath = '/home/parsa/projects/cassie/cassie_ignasi3/policy_weights.mat' #hard coded
sio.savemat(savematpath, {'w0':w0, 'b0':b0, 'w1':w1, 'b1':b1, 'w2':w2, 'b2':b2})
# env.unwrapped.vis.start_recording()
with policy.set_deterministic(args.deterministic):
paths = rollouts(env,
policy,
path_length=args.max_path_length,
n_paths=args.num_rollouts,
render_mode=args.render_mode)
import matplotlib.pyplot as plt
real = [path['observations'][:, 0] for path in paths][0]
filtered = [path['observations'][:, 1] for path in paths][0]
fig, axarr = plt.subplots(2, 1)
axarr[0].plot(range(len(real)), real)
axarr[1].plot(range(len(filtered)), filtered)
# velocities_pelvis_filtered = [path['observations'][:, :3] for path in paths]
# velocities_pelvis = [path['observations'][:, -3:] for path in paths]
# fig, axarr = plt.subplots(3, 2)
# for i in range(3):
# for vel_path in velocities_pelvis:
# axarr[i, 0].plot(range(len(vel_path)), vel_path[:,i])
# for vel_path in velocities_pelvis_filtered:
# axarr[i, 1].plot(range(len(vel_path)), np.cumsum(vel_path[:,i]) * 10)
# plt.show()
if args.render_mode != 'human':
from pprint import pprint; import pdb; pdb.set_trace()
pass
# env.unwrapped.vis.stop_recording('./test_vid.mp4', speedup=1, frame_skip=20, timestep=env.unwrapped.dt)
return paths
if __name__ == '__main__':
args = parse_args()
simulate_policy(args)
| 34.383178 | 109 | 0.589562 | 452 | 3,679 | 4.646018 | 0.329646 | 0.017143 | 0.040476 | 0.026667 | 0.088571 | 0.05619 | 0.029524 | 0 | 0 | 0 | 0 | 0.019667 | 0.281326 | 3,679 | 106 | 110 | 34.707547 | 0.774584 | 0.162544 | 0 | 0.057143 | 0 | 0 | 0.112777 | 0.019883 | 0 | 0 | 0 | 0 | 0 | 1 | 0.028571 | false | 0.014286 | 0.185714 | 0 | 0.242857 | 0.014286 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
51297e9b178ac6da2a46669f12b122f74df2ecf7 | 415 | py | Python | settings/live.py | mhfowler/abridgedmaps | d0802bd6955714d174d208bea809191bff4615b3 | [
"MIT"
] | null | null | null | settings/live.py | mhfowler/abridgedmaps | d0802bd6955714d174d208bea809191bff4615b3 | [
"MIT"
] | null | null | null | settings/live.py | mhfowler/abridgedmaps | d0802bd6955714d174d208bea809191bff4615b3 | [
"MIT"
] | null | null | null | from settings.common import *
DEBUG=True
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'mydatabase',
}
}
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Allow all host headers
ALLOWED_HOSTS = ['*']
# Static asset configuration
STATIC_ROOT = 'staticfiles'
STATIC_URL = '/static/'
| 18.863636 | 62 | 0.684337 | 49 | 415 | 5.591837 | 0.836735 | 0.072993 | 0.109489 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002924 | 0.175904 | 415 | 21 | 63 | 19.761905 | 0.798246 | 0.26506 | 0 | 0 | 0 | 0 | 0.333333 | 0.16 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.083333 | 0 | 0.083333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
512cb58d8316e571507a93e75a64d19559f26b6b | 2,762 | py | Python | models/tag.py | noahkw/botw-bot | 8d8c9515a177c52270093fb64abf34d111535d16 | [
"MIT"
] | 1 | 2020-11-29T23:00:27.000Z | 2020-11-29T23:00:27.000Z | models/tag.py | noahkw/botw-bot | 8d8c9515a177c52270093fb64abf34d111535d16 | [
"MIT"
] | 18 | 2020-08-05T11:59:31.000Z | 2022-03-15T03:48:40.000Z | models/tag.py | noahkw/botw-bot | 8d8c9515a177c52270093fb64abf34d111535d16 | [
"MIT"
] | null | null | null | import re
import discord
from sqlalchemy import (
Column,
String,
BigInteger,
Integer,
Boolean,
update,
delete,
)
from sqlalchemy.ext.hybrid import hybrid_property
from models.base import Base, PendulumDateTime
from util import safe_mention
IMAGE_URL_REGEX = r"https?:\/\/.*\.(jpe?g|png|gif)"
class Tag(Base):
__tablename__ = "tags"
EDITABLE = frozenset(["trigger", "reaction", "in_msg"])
tag_id = Column(Integer, primary_key=True)
trigger = Column(String, nullable=False)
reaction = Column(String, nullable=False)
in_msg = Column(Boolean, default=False)
_creator = Column(BigInteger, nullable=False)
_guild = Column(BigInteger, nullable=False)
use_count = Column(Integer, default=0)
date = Column(PendulumDateTime, default=PendulumDateTime.now())
@hybrid_property
def creator(self):
return self.bot.get_user(self._creator)
@hybrid_property
def guild(self):
return self.bot.get_guild(self._guild)
def __eq__(self, other):
if not isinstance(other, Tag):
return NotImplemented
return (
str.lower(self.trigger) == str.lower(other.trigger)
and str.lower(self.reaction) == str.lower(other.reaction)
and self._guild == other._guild
)
def to_list_element(self, index):
return f"*{index + 1}*. `{self.tag_id}`: *{self.trigger}* by {self.creator}"
def info_embed(self):
embed = (
discord.Embed(title=f"Tag `{self.tag_id}`", timestamp=self.date)
.add_field(name="Trigger", value=self.trigger)
.add_field(name="Reaction", value=self.reaction)
.add_field(name="Creator", value=safe_mention(self.creator))
.add_field(name="Triggers in message", value=str(self.in_msg))
.add_field(name="Use Count", value=str(self.use_count))
.set_footer(text="Created")
)
if re.search(IMAGE_URL_REGEX, self.reaction):
embed.set_image(url=self.reaction)
return embed
async def increment_use_count(self, session):
self.use_count += 1
statement = (
update(Tag)
.where(Tag.tag_id == self.tag_id)
.values(use_count=self.use_count)
)
await session.execute(statement)
async def delete(self, session):
statement = delete(Tag).where(Tag.tag_id == self.tag_id)
await session.execute(statement)
async def update(self, session, key, value):
setattr(self, key, value)
statement = update(Tag).where(Tag.tag_id == self.tag_id).values({key: value})
await session.execute(statement)
@classmethod
def inject_bot(cls, bot):
cls.bot = bot
| 30.351648 | 85 | 0.633599 | 340 | 2,762 | 4.982353 | 0.302941 | 0.026564 | 0.026564 | 0.024793 | 0.135183 | 0.11157 | 0.069067 | 0.069067 | 0.054309 | 0.054309 | 0 | 0.001436 | 0.243664 | 2,762 | 90 | 86 | 30.688889 | 0.809478 | 0 | 0 | 0.068493 | 0 | 0.013699 | 0.071325 | 0.010862 | 0 | 0 | 0 | 0 | 0 | 1 | 0.082192 | false | 0 | 0.082192 | 0.041096 | 0.39726 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
51311102d9e729c7834446c830b3c543962cbf40 | 5,283 | py | Python | xray/train.py | kibernetika-ai/image_captioning | e0248758d293d7dabc0cfdbed4568de06a20d048 | [
"MIT"
] | null | null | null | xray/train.py | kibernetika-ai/image_captioning | e0248758d293d7dabc0cfdbed4568de06a20d048 | [
"MIT"
] | null | null | null | xray/train.py | kibernetika-ai/image_captioning | e0248758d293d7dabc0cfdbed4568de06a20d048 | [
"MIT"
] | null | null | null | from __future__ import absolute_import, division, print_function
import argparse
import os
import shutil
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
import tensorflow as tf
from xray import model
slim = tf.contrib.slim
tf.logging.set_verbosity(tf.logging.INFO)
log = tf.logging
def calc_max_length(tensor):
return max(len(t) for t in tensor)
def plot_attention(image, result, attention_plot):
temp_image = np.array(Image.open(image))
fig = plt.figure(figsize=(10, 10))
len_result = len(result)
for l in range(len_result):
temp_att = np.resize(attention_plot[l], (8, 8))
ax = fig.add_subplot(len_result, len_result, l + 1)
ax.set_title(result[l])
img = ax.imshow(temp_image)
ax.imshow(temp_att, cmap='gray', alpha=0.6, extent=img.get_extent())
plt.tight_layout()
plt.show()
# captions on the validation set
# rid = np.random.randint(0, len(img_name_val))
# image = img_name_val[rid]
# real_caption = ' '.join([tokenizer.index_word[i] for i in cap_val[rid] if i not in [0]])
# ids, result, attention_plot = evaluate(image)
# print(cap_val[rid])
# print('Real Caption:', real_caption)
# for real in real_caption.split()[1:-1]:
# print(' %s' % label_map[real])
#
# print(ids)
# print('Pred Caption: ', ' '.join(result))
# for pred in result[:-1]:
# print(' %s' % label_map[pred])
#
# plot_attention(image, result, attention_plot)
# opening the image
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--annotations', type=str, default='./annotations.json', help='Annotations file path')
parser.add_argument('--data-dir', type=str, default='./data', help='Dataset directory')
parser.add_argument('--train-dir', type=str, default='training', help='Training dir')
parser.add_argument('--inception-path', type=str, default='./inception_v3.ckpt', help='Inception checkpoint')
parser.add_argument('--steps', type=int, default=1000, help='Training steps count')
parser.add_argument('--learning-rate', type=float, default=0.0001, help='Learning rate')
parser.add_argument('--log-step-count-steps', type=int, default=5, help='Log every N step')
parser.add_argument('--batch-size', type=int, default=8, help='Batch size')
parser.add_argument('--mode', default='train', choices=['train', 'export', 'eval'], help='Mode')
parser.add_argument('--eval', default=False, action='store_true', help='Run evaluation during train')
parser.add_argument('--export', default=False, action='store_true', help='Changes mode to export')
return parser.parse_args()
def export(xray, train_dir, params):
feature_placeholders = {
'images': tf.placeholder(tf.float32, [params['batch_size'], 299, 299, 3], name='images'),
}
receiver = tf.estimator.export.build_raw_serving_input_receiver_fn(
feature_placeholders,
default_batch_size=params['batch_size']
)
export_path = xray.export_savedmodel(
train_dir,
receiver,
)
export_path = export_path.decode("utf-8")
log.info('Exported to %s.' % export_path)
shutil.copy(
os.path.join(params['data_dir'], 'label_map.json'),
os.path.join(export_path, 'label_map.json'),
)
def main():
args = parse_args()
params = {
'batch_size': args.batch_size,
'buffer_size': 1000,
'embedding_size': 256,
'units': 512,
'limit_length': 10,
'grad_clip': 1.0,
'learning_rate': args.learning_rate,
'data_dir': args.data_dir,
'inception_path': args.inception_path,
'vocab_size': 0,
'attention_features_shape': 64,
'features_shape': 2048,
'log_step_count_steps': args.log_step_count_steps,
'keep_checkpoint_max': 5,
}
params['word_index'] = model.get_word_index(params)
params['max_length'] = params['limit_length']
vocab_size = len(params['word_index'])
params['vocab_size'] = vocab_size
conf = tf.estimator.RunConfig(
model_dir=args.train_dir,
save_summary_steps=100,
save_checkpoints_secs=120,
save_checkpoints_steps=None,
keep_checkpoint_max=params['keep_checkpoint_max'],
log_step_count_steps=params['log_step_count_steps'],
)
xray = model.Model(
params=params,
model_dir=args.train_dir,
config=conf,
)
mode = args.mode
if args.export:
mode = 'export'
if mode == 'train':
input_fn = model.input_fn(params, True)
if args.eval:
eval_input_fn = model.input_fn(params, False)
train_spec = tf.estimator.TrainSpec(input_fn=input_fn, max_steps=args.steps)
eval_spec = tf.estimator.EvalSpec(
input_fn=eval_input_fn, steps=1, start_delay_secs=10, throttle_secs=10
)
tf.estimator.train_and_evaluate(xray, train_spec, eval_spec)
else:
xray.train(input_fn=input_fn, steps=args.steps)
elif mode == 'eval':
eval_input_fn = model.input_fn(params, False)
xray.evaluate(eval_input_fn, steps=1)
elif mode == 'export':
# export
export(xray, args.train_dir, params)
if __name__ == '__main__':
main()
| 33.226415 | 113 | 0.658906 | 715 | 5,283 | 4.634965 | 0.282517 | 0.027459 | 0.056427 | 0.025649 | 0.102897 | 0.071515 | 0.022933 | 0.022933 | 0.022933 | 0 | 0 | 0.016619 | 0.202726 | 5,283 | 158 | 114 | 33.436709 | 0.77018 | 0.106379 | 0 | 0.035088 | 0 | 0 | 0.168864 | 0.009783 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04386 | false | 0 | 0.078947 | 0.008772 | 0.140351 | 0.008772 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |