hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f6ea28bc3d46bfe5dfcf89ab752488b9203ba424 | 6,877 | py | Python | pyfiles.py | geoffmcnamara/pyfiles | d38f060ace02703b6fc0b55da12af0ef4ba4857d | [
"MIT"
] | null | null | null | pyfiles.py | geoffmcnamara/pyfiles | d38f060ace02703b6fc0b55da12af0ef4ba4857d | [
"MIT"
] | null | null | null | pyfiles.py | geoffmcnamara/pyfiles | d38f060ace02703b6fc0b55da12af0ef4ba4857d | [
"MIT"
] | null | null | null | #!/usr/bin/env python2
# vim: set syntax=none nospell:
import subprocess
import os
import fnmatch
import datetime
from bottle import default_app, route, run, template, response, redirect, debug, error, static_file
from wraphtml import WrapHtml
# for WSGI use:
application = default_app()
# config option #
debug(True)
# globals @
DLFILES_PATH = "/data/share/dlfiles"
application.config.setdefault('dlpath', "/data/share/dlfiles")
# application.config.setdefault('db_dir',"/data/share/db_dir")
# functions #
def run_cmd(cmd, ret_type="str"):
"""
run a command and return either a str or a list
"""
proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) # returns a class object
proc = proc.communicate()[0] # returns a string
if ret_type == "str":
return proc
if ret_type == "br":
# useful for html
br_proc = proc.replace('\n', "<br>\n")
return br_proc
list_proc = proc.split("\n")
return list_proc
# def _request(request, request_fallback=None):
# '''
# Extract request fields wherever they may come from: GET, POST, forms, fallback
# '''
# # Use lambdas to avoid evaluating bottle.request.* which may throw an Error
# all_dicts = [
# lambda: request.json,
# lambda: request.forms,
# lambda: request.query,
# lambda: request.files,
# #lambda: request.POST,
# lambda: request_fallback
# ]
# request_dict = dict()
# for req_dict_ in all_dicts:
# try:
# req_dict = req_dict_()
# except KeyError:
# continue
# if req_dict is not None and hasattr(req_dict, 'items'):
# for req_key, req_val in req_dict.items():
# request_dict[req_key] = req_val
# return request_dict
#
#
# def html_table(rows):
# '''
# input: data (list of lists)
# return: html_table
# '''
# table = "<table>"
# for row in rows:
# table += "<tr>"
# for cell in row:
# table += "<td>" + cell + "</td>"
# table += "</tr>"
# table += "</table>"
# return table
#
#
# def get_cols(db_conn,table):
# '''
# input: table
# return: cols (tuple?list)
# '''
# cur = db_conn.cursor()
# sql = "PRAGMA table_info(" + table + ")"
# cur.execute(sql)
# data = cur.fetchall()
# return data
#
# def doRows(cur, table=""):
# '''
# input: table
# get_cols
# get_pg_size
# truncateData (by rows and cols)
# addDERlinks (Details, Edit, Remove)
# return: htmltable
# '''
# # sql = "PRAGMA table_info(table)"
# # return = cur.execute(sql)
# # or .headers ON
# pass
#
#
#
# # EOFunctions #
# routes #
# @route('/static/<filepath:path>')
# def static(filepath):
# """
# docstring needed
# """
# return static_file(filepath, root='./static')
@error(404)
def err404(error):
return template('404.tpl', e=response.status_code)
# @route('/<s:path>/')
# def basename(s):
# print("basename :" + s)
# content = s
# html = WrapHtml(content)
# return html.wraphtml()
@route('/')
def home():
"""
Landing page: /index.html
WIP
"""
return redirect("/flist")
@route('/flist')
def flist():
"""
Purpose: to present "selected" files for download with a description for each
Requires: import os, import fnmatch
Selected files: !!! only files that have an associated description note file will get listed
The description note file must be in the same directory with the name of "." + filename + ".nts"
The first line of this file will get used as the file description. It has to be more than 4 characters...
"""
# mypath = "/data/share/dlfiles"
# application.config.setdefault('dlpath',mypath)
mypath = application.config['dlpath']
# import fnmatch
# import os
flist = os.listdir(mypath)
content = "<center>"
content += "<table>"
content += "<tr><th>Filename</th><th>Size</th><th>mTime</th><th>Description</th></tr>"
for fname in flist:
if fnmatch.fnmatch(fname, "*"):
note_file = mypath + "/." + fname + ".nts"
if os.path.isfile(note_file):
with open(note_file) as f:
first_line = f.readline()
if len(first_line) > 4:
size = os.path.getsize(mypath + "/" + fname)
# mtime = os.path.getmtime(mypath + "/" + fname)
mtime = datetime.datetime.fromtimestamp(os.path.getmtime(mypath + "/" + fname))
# print(fname + " " + "{:,}".format(size) + " " + str(mtime))
content += '<tr><td><a href="/dl/' + fname + '">' + fname + '</a></td><td>' + "{:,}".format(size) + '</td><td>' + str(mtime) + '</td><td>' + first_line + '</td></tr>'
content += "</table>"
content += "</center>"
# proc = run_cmd("ls -ltra " + mypath,"br")
# print("type(proc): " + str(type(proc)))
# proc.replace("\n","<br>")
# proc.replace(",","<br>")
# content += "<hr>" + str(proc) + "<hr>"
# html = WrapHtml(content=content, title="Files for download",render_now=True,nav_d={"Home": "/"})
html = WrapHtml(content, nav_d={"Home": "/"})
html.nav_d = {"Home": "/"}
html.title = 'File for download'
return html.render()
@route('/dl/<filename:path>')
def download(filename):
mypath = application.config['dlpath']
note_file = mypath + "/." + filename + ".nts"
found = False
# import fileinput
# for line in fileinput.input(note_file, inplace = 1):
with open(note_file, "r") as f:
flines = f.read().splitlines()
f.close()
new_flines = []
# print("flines: " + str(flines))
for line in flines:
if line.startswith("[cnt]:"):
found = True
words = line.split()
cnt = int(words[1]) + 1
print("[cnt]: " + str(cnt))
new_flines.append("[cnt]: " + str(cnt))
else:
print(line)
new_flines.append(line)
if not found:
f = open(note_file, "a")
f.write("[cnt]: 1")
print("Adding: [cnt] 1")
else:
print("write new_lines to note_file")
nf = open(note_file, "w+") # noqa:
for line in new_flines:
nf.write(line + "\n")
nf.close()
print("new_flines: " + str(new_flines))
# return static_file(filename, root='/path/to/static/files', download=filename)
return static_file(filename, root='/data/share/dlfiles', download=True) # download=True keeps the filename the same
# ## ####### ## #
if __name__ == '__main__':
# run(port=8080, debug=True, reloader=True)
run()
| 29.26383 | 190 | 0.558383 | 818 | 6,877 | 4.596577 | 0.310513 | 0.023404 | 0.017021 | 0.021543 | 0.0875 | 0.0375 | 0.026064 | 0 | 0 | 0 | 0 | 0.004453 | 0.281518 | 6,877 | 234 | 191 | 29.388889 | 0.756527 | 0.499927 | 0 | 0.04878 | 0 | 0.012195 | 0.136995 | 0.022524 | 0.012195 | 0 | 0 | 0 | 0 | 1 | 0.060976 | false | 0 | 0.073171 | 0.012195 | 0.219512 | 0.060976 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f6ec649c38937c1fef3f220168731774d308f112 | 6,343 | py | Python | Matsuoka/tripple_pend_ex.py | JacobSal/Neuromechanical_Models | 548f395327276864f8d0ea2450d565e91155853a | [
"MIT"
] | null | null | null | Matsuoka/tripple_pend_ex.py | JacobSal/Neuromechanical_Models | 548f395327276864f8d0ea2450d565e91155853a | [
"MIT"
] | null | null | null | Matsuoka/tripple_pend_ex.py | JacobSal/Neuromechanical_Models | 548f395327276864f8d0ea2450d565e91155853a | [
"MIT"
] | 1 | 2021-03-02T17:46:15.000Z | 2021-03-02T17:46:15.000Z | # -*- coding: utf-8 -*-
"""
Created on Thu Mar 11 11:25:26 2021
@author: jsalm
"""
import matplotlib.pyplot as plt
import numpy as np
from sympy import symbols
from sympy.physics import mechanics
from sympy import Dummy, lambdify
from scipy.integrate import odeint
#animation functions
from matplotlib import animation
from IPython.display import HTML
def integrate_pendulum(n, times, exforce = [],
initial_positions=135,
initial_velocities=0,
lengths=None, masses=1, dampening=0):
"""Integrate a multi-pendulum with `n` sections"""
#-------------------------------------------------
# Step 1: construct the pendulum model
# Generalized coordinates and velocities
# (in this case, angular positions & velocities of each mass)
q = mechanics.dynamicsymbols('q:{0}'.format(n))
u = mechanics.dynamicsymbols('u:{0}'.format(n))
# mass and length and dampening
m = symbols('m:{0}'.format(n))
l = symbols('l:{0}'.format(n))
k = symbols('k:{0}'.format(n))
# gravity and time symbols
g, t = symbols('g,t')
#force
f = mechanics.dynamicsymbols('f:{0}'.format(n))
#--------------------------------------------------
# Step 2: build the model using Kane's Method
# Create pivot point reference frame
A = mechanics.ReferenceFrame('A')
P = mechanics.Point('P')
P.set_vel(A, 0)
# lists to hold particles, forces, and kinetic ODEs
# for each pendulum in the chain
particles = []
forces = []
kinetic_odes = []
for i in range(n):
# Create a reference frame following the i^th mass
Ai = A.orientnew('A' + str(i), 'Axis', [q[i], A.z])
Ai.set_ang_vel(A, u[i] * A.z)
# Create a point in this reference frame
Pi = P.locatenew('P' + str(i), l[i] * Ai.x)
Pi.v2pt_theory(P, A, Ai)
# Create a new particle of mass m[i] at this point
Pai = mechanics.Particle('Pa' + str(i), Pi, m[i])
particles.append(Pai)
# Set forces & compute kinematic ODE
forces.append((Pi, f[i]*A.y+m[i]*g*A.x))
kinetic_odes.append(q[i].diff(t) - u[i])
P = Pi
# Generate equations of motion
KM = mechanics.KanesMethod(A, q_ind=q, u_ind=u,
kd_eqs=kinetic_odes)
fr, fr_star = KM.kanes_equations(particles, forces)
#-----------------------------------------------------
# Step 3: numerically evaluate equations and integrate
# initial positions and velocities – assumed to be given in degrees
y0 = np.deg2rad(np.concatenate([np.broadcast_to(initial_positions, n),
np.broadcast_to(initial_velocities, n)]))
# lengths and masses
if lengths is None:
lengths = np.ones(n) / n
lengths = np.broadcast_to(lengths, n)
masses = np.broadcast_to(masses, n)
damp = np.broadcast_to(dampening,n)
# Fixed parameters: gravitational , lengths, and masses
parameters = [g] + list(l) + list(m) + list(k)
parameter_vals = [9.81] + list(lengths) + list(masses) + list(damp)
# define symbols for unknown parameters
dynamic = q + u + f
unknowns = [Dummy() for i in dynamic]
unknown_dict = dict(zip(dynamic, unknowns))
kds = KM.kindiffdict()
# substitute unknown symbols for qdot terms
mm_sym = KM.mass_matrix_full.subs(kds).subs(unknown_dict)
fo_sym = KM.forcing_full.subs(kds).subs(unknown_dict)
# create functions for numerical calculation
mm_func = lambdify(unknowns + parameters, mm_sym)
fo_func = lambdify(unknowns + parameters, fo_sym)
# function which computes the derivatives of parameters
def gradient(y, t, args):
vals = np.concatenate((y, args))
sol = np.linalg.solve(mm_func(*vals), fo_func(*vals))
return np.array(sol).T[0]
# ODE integration
return odeint(gradient, y0, times, args=(parameter_vals,))
def get_xy_coords(p, lengths=None):
"""Get (x, y) coordinates from generalized coordinates p"""
p = np.atleast_2d(p)
n = p.shape[1] // 2
if lengths is None:
lengths = np.ones(n) / n
zeros = np.zeros(p.shape[0])[:, None]
x = np.hstack([zeros, lengths * np.sin(p[:, :n])])
y = np.hstack([zeros, -lengths * np.cos(p[:, :n])])
return np.cumsum(x, 1), np.cumsum(y, 1)
def get_angles(p, lengths = None):
pass
def plot_pendulum_trace(p):
x, y = get_xy_coords(p)
plt.figure("tripple Pendulum Trace")
plt.plot(x, y);
plt.xlabel("position (m)")
plt.ylabel("position (m)")
plt.show()
# plt.close()
return 0
def set_new_tpp(Xp,n_p):
n = Xp.shape[1] // 2
n_p_new = n_p.copy()
n_p_new[0] = list(Xp[-1,:n])
n_p_new[1] = list(Xp[-1,n:])
return n_p_new
def animate_pendulum(p,t):
x, y = get_xy_coords(p)
fig, ax = plt.subplots(figsize=(6, 6))
fig.subplots_adjust(left=0, right=1, bottom=0, top=1)
ax.axis('off')
ax.set(xlim=(-1, 1), ylim=(-1, 1))
line, = ax.plot([], [], 'o-', lw=2)
def init():
line.set_data([], [])
return line,
def animate(i):
line.set_data(x[i], y[i])
return line,
anim = animation.FuncAnimation(fig, animate, frames=len(t),
interval=1000 * t.max() / len(t),
blit=True, init_func=init)
# plt.close(fig)
return anim
if __name__ == '__main__':
Ttot = 1
# total time in second
f_s = 50 # sample frequency (samples/s)
t = np.arange(0,Ttot,1/f_s)
n = 3
jj = []
n_p = [[135,135,135],[0,0,0],[1,1,1],[1,1,1],1]
exforce = [0,0,0]
p = integrate_pendulum(n,t,exforce,n_p[0],n_p[1],n_p[2],n_p[3],n_p[4])
# for i in range(len(t)-1):
# t_s = [t[i],t[i+1]]
# print(n_p[0])
# p = integrate_pendulum(n,t_s,exforce,n_p[0],n_p[1],n_p[2],n_p[3],n_p[4])
# n_p = set_new_tpp(p,n_p)
# jj.append(p[-1,:])
# p = np.stack(jj)
x, y = get_xy_coords(p)
plt.figure("tripple Pendulum Trace")
plt.plot(x, y);
plt.waitforbuttonpress(5)
plt.close()
anim = animate_pendulum(p,t)
# HTML(anim.to_html5_video())
# HTML('<video controls loop src="http://jakevdp.github.io/videos/triple-pendulum.mp4" />') | 31.093137 | 95 | 0.578906 | 934 | 6,343 | 3.825482 | 0.300857 | 0.011755 | 0.013434 | 0.013434 | 0.104394 | 0.092079 | 0.059894 | 0.059894 | 0.059894 | 0.043101 | 0 | 0.023241 | 0.253823 | 6,343 | 204 | 95 | 31.093137 | 0.73146 | 0.267539 | 0 | 0.111111 | 0 | 0 | 0.027027 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0.008547 | 0.068376 | 0 | 0.213675 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f6ec8c44996e7a72f62986aaf887de9edefb0ec5 | 722 | py | Python | hackerrank/captains_room.py | nityansuman/coding-python | b1569de95d1881a82fb32f394f24617cfdcdf4b7 | [
"Apache-2.0"
] | null | null | null | hackerrank/captains_room.py | nityansuman/coding-python | b1569de95d1881a82fb32f394f24617cfdcdf4b7 | [
"Apache-2.0"
] | null | null | null | hackerrank/captains_room.py | nityansuman/coding-python | b1569de95d1881a82fb32f394f24617cfdcdf4b7 | [
"Apache-2.0"
] | 2 | 2021-06-25T16:49:36.000Z | 2022-02-13T03:27:45.000Z | def find_captains_room(rooms, size):
all_guests = list(map(int, rooms))
unique_guests = set(all_guests)
sum_all_guests = sum(all_guests)
# Get the sum of all the unique room numbers
sum_unique_guests = sum(unique_guests)
# Get the difference, in the sum: the captains room will cause this difference
temp = (sum_unique_guests * size) - sum_all_guests
# Compute the captain's room number
return temp // (size - 1)
if __name__ == "__main__":
# Read input size from stdin
size = int(input().strip())
# Read room number list from stdin
rooms = input().strip().split(" ")
# Find captain's room
captains_room_number = find_captains_room(rooms, size)
print(captains_room_number)
| 26.740741 | 80 | 0.707756 | 107 | 722 | 4.504673 | 0.373832 | 0.124481 | 0.074689 | 0.087137 | 0.172199 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001724 | 0.196676 | 722 | 26 | 81 | 27.769231 | 0.82931 | 0.322715 | 0 | 0 | 0 | 0 | 0.019737 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0 | 0 | 0.166667 | 0.083333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f6f03810010754a86c0317dca23f8a7eaa2dccb4 | 2,050 | py | Python | receptor/connection/sock.py | RedHatOfficial/receptor | 0eb9f0e3bd3b25bce948f7a2f43562f181a630a1 | [
"Apache-2.0"
] | 6 | 2020-07-12T05:56:21.000Z | 2022-03-09T11:43:53.000Z | receptor/connection/sock.py | RedHatOfficial/receptor | 0eb9f0e3bd3b25bce948f7a2f43562f181a630a1 | [
"Apache-2.0"
] | 7 | 2020-07-06T15:51:06.000Z | 2021-08-18T18:55:26.000Z | receptor/connection/sock.py | RedHatOfficial/receptor | 0eb9f0e3bd3b25bce948f7a2f43562f181a630a1 | [
"Apache-2.0"
] | 3 | 2020-06-25T21:03:42.000Z | 2021-08-09T01:27:48.000Z | import asyncio
import logging
from .base import Transport, log_ssl_detail
logger = logging.getLogger(__name__)
class RawSocket(Transport):
def __init__(self, reader, writer, chunk_size=2 ** 16):
self.reader = reader
self.writer = writer
self._closed = False
self.chunk_size = chunk_size
async def __anext__(self):
bytes_ = await self.reader.read(self.chunk_size)
if not bytes_:
self.close()
return bytes_
@property
def closed(self):
return self._closed
def close(self):
self._closed = True
self.writer.close()
async def send(self, q):
async for chunk in q:
self.writer.write(chunk)
await self.writer.drain()
def _diagnostics(self):
t = self.writer._transport.get_extra_info
addr, port = t("peername", (None, None))
return {
"address": addr,
"port": port,
"compression": t("compression"),
"cipher": t("cipher"),
"peercert": t("peercert"),
"sslcontext": t("sslcontext"),
"closed": self.closed,
"chunk_size": self.chunk_size,
}
async def connect(host, port, factory, loop=None, ssl=None, reconnect=True):
if not loop:
loop = asyncio.get_event_loop()
worker = factory()
try:
r, w = await asyncio.open_connection(host, port, loop=loop, ssl=ssl)
log_ssl_detail(w._transport)
t = RawSocket(r, w)
await worker.client(t)
except Exception as ex:
logger.info(f"sock.connect: connection failed, {str(ex)}")
if not reconnect:
return False
finally:
if reconnect:
await asyncio.sleep(5)
logger.debug("sock.connect: reconnection")
loop.create_task(connect(host, port, factory, loop))
return True
async def serve(reader, writer, factory):
log_ssl_detail(writer._transport)
t = RawSocket(reader, writer)
await factory().server(t)
| 26.973684 | 76 | 0.593171 | 242 | 2,050 | 4.859504 | 0.355372 | 0.045918 | 0.030612 | 0.028912 | 0.044218 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002774 | 0.296585 | 2,050 | 75 | 77 | 27.333333 | 0.81276 | 0 | 0 | 0 | 0 | 0 | 0.08439 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.065574 | false | 0 | 0.04918 | 0.016393 | 0.213115 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f6f1242f3dff4a13c09f9cca00fa6595b77c32d7 | 434 | py | Python | echo.py | devopsprosiva/python | 07311d7597c0895554efe8013b57f218a0f11bb5 | [
"MIT"
] | null | null | null | echo.py | devopsprosiva/python | 07311d7597c0895554efe8013b57f218a0f11bb5 | [
"MIT"
] | null | null | null | echo.py | devopsprosiva/python | 07311d7597c0895554efe8013b57f218a0f11bb5 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# https://python-essentials.readthedocs.io/en/latest/echo.html
while True:
user_input = input("Enter some text: ")
try:
is_user_input_of_type_int = int(user_input)
print("You entered an integer " + user_input)
continue
except ValueError:
if user_input == 'quit':
print("You entered quit. So quitting...")
break
else:
print("You entered the string: " + user_input)
| 22.842105 | 62 | 0.665899 | 60 | 434 | 4.65 | 0.666667 | 0.193548 | 0.16129 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.211982 | 434 | 18 | 63 | 24.111111 | 0.815789 | 0.186636 | 0 | 0 | 0 | 0 | 0.285714 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.25 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f6f15cefe9ad0d3236e47039b8b006b0931ac91a | 1,021 | py | Python | medium/560-subarray-sum-equals-k.py | wanglongjiang/leetcode | c61d2e719e81575cfb5bde9d64e15cee7cf01ef3 | [
"MIT"
] | 2 | 2021-03-14T11:38:26.000Z | 2021-03-14T11:38:30.000Z | medium/560-subarray-sum-equals-k.py | wanglongjiang/leetcode | c61d2e719e81575cfb5bde9d64e15cee7cf01ef3 | [
"MIT"
] | null | null | null | medium/560-subarray-sum-equals-k.py | wanglongjiang/leetcode | c61d2e719e81575cfb5bde9d64e15cee7cf01ef3 | [
"MIT"
] | 1 | 2022-01-17T19:33:23.000Z | 2022-01-17T19:33:23.000Z | '''
和为K的子数组
给定一个整数数组和一个整数 k,你需要找到该数组中和为 k 的连续的子数组的个数。
说明 :
数组的长度为 [1, 20,000]。
数组中元素的范围是 [-1000, 1000] ,且整数 k 的范围是 [-1e7, 1e7]。
'''
from typing import List
from collections import defaultdict
'''
思路:前缀和+哈希
依次从左到右计算前缀和,如果前缀和presum = k,则满足要求的子数组数量ans+1
如果以往的子数组前缀和等于presum-k,因为以往的子数组是从0..x,而当前前缀和是从0..i,i>x,必然有当前子数组前缀和减去满足该条件的子数组前缀和为k
以往的子数组前缀和用一个哈希表presums记录,key为前缀和,value为具有该前缀和的子数组个数
1074.[元素和为目标值的子矩阵数量](hard/1074-number-of-submatrices-that-sum-to-target.py)是这道题的升级版
时间复杂度:O(n)
空间复杂度:O(n)
'''
class Solution:
def subarraySum(self, nums: List[int], k: int) -> int:
ans = 0
presum = 0 # 前缀和
presums = defaultdict(int) # 用于记录前缀和个数
for num in nums:
presum += num
if presum == k: # 数组的前缀和为k,满足要求
ans += 1
if (presum - k) in presums: # 当前数组减去以往所有前缀和为presum-k的子数组,都会满足要求
ans += presums[presum - k]
presums[presum] += 1
return ans
s = Solution()
print(s.subarraySum(nums=[1, 1, 1], k=2))
| 24.309524 | 84 | 0.63761 | 127 | 1,021 | 5.125984 | 0.606299 | 0.032258 | 0.02765 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.047497 | 0.237023 | 1,021 | 41 | 85 | 24.902439 | 0.78819 | 0.184133 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058824 | false | 0 | 0.117647 | 0 | 0.294118 | 0.058824 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f6f2ad180c13071a9741435f918fec8c6729ea8e | 5,139 | py | Python | xclim/testing/tests/test_sdba/diagnostics.py | Ouranosinc/hailstorm | 494c850164a9f553eeeba66c6cc90fe398eb2094 | [
"Apache-2.0"
] | 1 | 2018-08-20T16:36:40.000Z | 2018-08-20T16:36:40.000Z | xclim/testing/tests/test_sdba/diagnostics.py | Ouranosinc/hailstorm | 494c850164a9f553eeeba66c6cc90fe398eb2094 | [
"Apache-2.0"
] | 3 | 2018-08-23T13:25:47.000Z | 2018-08-23T15:59:45.000Z | xclim/testing/tests/test_sdba/diagnostics.py | Ouranosinc/hailstorm | 494c850164a9f553eeeba66c6cc90fe398eb2094 | [
"Apache-2.0"
] | null | null | null | # noqa: D205,D400
"""
SDBA Diagnostic Testing Module
==============================
This module is meant to compare results with those expected from papers, or create figures illustrating the
behavior of sdba methods and utilities.
"""
from __future__ import annotations
import numpy as np
from scipy.stats import gaussian_kde, scoreatpercentile
from xclim.sdba.adjustment import (
DetrendedQuantileMapping,
EmpiricalQuantileMapping,
QuantileDeltaMapping,
)
from xclim.sdba.processing import adapt_freq
from . import utils as tu
try:
from matplotlib import pyplot as plt
except ModuleNotFoundError:
plt = False
__all__ = ["synth_rainfall", "cannon_2015_figure_2", "adapt_freq_graph"]
def synth_rainfall(shape, scale=1, wet_freq=0.25, size=1):
r"""Return gamma distributed rainfall values for wet days.
Notes
-----
The probability density for the Gamma distribution is:
.. math::
p(x) = x^{k-1}\frac{e^{-x/\theta}}{\theta^k\Gamma(k)},
where :math:`k` is the shape and :math:`\theta` the scale, and :math:`\Gamma` is the Gamma function.
"""
is_wet = np.random.binomial(1, p=wet_freq, size=size)
wet_intensity = np.random.gamma(shape, scale, size)
return np.where(is_wet, wet_intensity, 0)
def cannon_2015_figure_2():
# noqa: D103
n = 10000
ref, hist, sim = tu.cannon_2015_rvs(n, random=False)
QM = EmpiricalQuantileMapping(kind="*", group="time", interp="linear")
QM.train(ref, hist)
sim_eqm = QM.predict(sim)
DQM = DetrendedQuantileMapping(kind="*", group="time", interp="linear")
DQM.train(ref, hist)
sim_dqm = DQM.predict(sim, degree=0)
QDM = QuantileDeltaMapping(kind="*", group="time", interp="linear")
QDM.train(ref, hist)
sim_qdm = QDM.predict(sim)
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(11, 4))
x = np.linspace(0, 105, 50)
ax1.plot(x, gaussian_kde(ref)(x), color="r", label="Obs hist")
ax1.plot(x, gaussian_kde(hist)(x), color="k", label="GCM hist")
ax1.plot(x, gaussian_kde(sim)(x), color="blue", label="GCM simure")
ax1.plot(x, gaussian_kde(sim_qdm)(x), color="lime", label="QDM future")
ax1.plot(x, gaussian_kde(sim_eqm)(x), color="darkgreen", ls="--", label="QM future")
ax1.plot(x, gaussian_kde(sim_dqm)(x), color="lime", ls=":", label="DQM future")
ax1.legend(frameon=False)
ax1.set_xlabel("Value")
ax1.set_ylabel("Density")
tau = np.array([0.25, 0.5, 0.75, 0.95, 0.99]) * 100
bc_gcm = (
scoreatpercentile(sim, tau) - scoreatpercentile(hist, tau)
) / scoreatpercentile(hist, tau)
bc_qdm = (
scoreatpercentile(sim_qdm, tau) - scoreatpercentile(ref, tau)
) / scoreatpercentile(ref, tau)
bc_eqm = (
scoreatpercentile(sim_eqm, tau) - scoreatpercentile(ref, tau)
) / scoreatpercentile(ref, tau)
bc_dqm = (
scoreatpercentile(sim_dqm, tau) - scoreatpercentile(ref, tau)
) / scoreatpercentile(ref, tau)
ax2.plot([0, 1], [0, 1], ls=":", color="blue")
ax2.plot(bc_gcm, bc_gcm, "-", color="blue", label="GCM")
ax2.plot(bc_gcm, bc_qdm, marker="o", mfc="lime", label="QDM")
ax2.plot(
bc_gcm,
bc_eqm,
marker="o",
mfc="darkgreen",
ls=":",
color="darkgreen",
label="QM",
)
ax2.plot(
bc_gcm,
bc_dqm,
marker="s",
mec="lime",
mfc="w",
ls="--",
color="lime",
label="DQM",
)
for i, s in enumerate(tau / 100):
ax2.text(bc_gcm[i], bc_eqm[i], f"{s} ", ha="right", va="center", fontsize=9)
ax2.set_xlabel("GCM relative change")
ax2.set_ylabel("Bias adjusted relative change")
ax2.legend(loc="upper left", frameon=False)
ax2.set_aspect("equal")
plt.tight_layout()
return fig
def adapt_freq_graph():
"""Create a graphic with the additive adjustment factors estimated after applying the adapt_freq method."""
n = 10000
x = tu.series(synth_rainfall(2, 2, wet_freq=0.25, size=n), "pr") # sim
y = tu.series(synth_rainfall(2, 2, wet_freq=0.5, size=n), "pr") # ref
xp = adapt_freq(x, y, thresh=0).sim_ad
fig, (ax1, ax2) = plt.subplots(2, 1)
sx = x.sortby(x)
sy = y.sortby(y)
sxp = xp.sortby(xp)
# Original and corrected series
ax1.plot(sx.values, color="blue", lw=1.5, label="x : sim")
ax1.plot(sxp.values, color="pink", label="xp : sim corrected")
ax1.plot(sy.values, color="k", label="y : ref")
ax1.legend()
# Compute qm factors
qm_add = QuantileDeltaMapping(kind="+", group="time").train(y, x).ds
qm_mul = QuantileDeltaMapping(kind="*", group="time").train(y, x).ds
qm_add_p = QuantileDeltaMapping(kind="+", group="time").train(y, xp).ds
qm_mul_p = QuantileDeltaMapping(kind="*", group="time").train(y, xp).ds
qm_add.cf.plot(ax=ax2, color="cyan", ls="--", label="+: y-x")
qm_add_p.cf.plot(ax=ax2, color="cyan", label="+: y-xp")
qm_mul.cf.plot(ax=ax2, color="brown", ls="--", label="*: y/x")
qm_mul_p.cf.plot(ax=ax2, color="brown", label="*: y/xp")
ax2.legend(loc="upper left", frameon=False)
return fig
| 32.11875 | 111 | 0.628916 | 743 | 5,139 | 4.236878 | 0.277254 | 0.020013 | 0.028907 | 0.030496 | 0.286531 | 0.212834 | 0.163596 | 0.109276 | 0.076874 | 0.029225 | 0 | 0.030148 | 0.19965 | 5,139 | 159 | 112 | 32.320755 | 0.73523 | 0.134851 | 0 | 0.119266 | 0 | 0 | 0.099226 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.027523 | false | 0 | 0.06422 | 0 | 0.119266 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f6f56cdb2070cf0bbc1d4d28c70e844d719585b1 | 4,973 | py | Python | models/general_modules.py | zlijingtao/DAC20_reconstruction | c928cda1c8e492c05110d6c219c1ed529924e127 | [
"Apache-2.0"
] | 2 | 2021-03-13T19:27:04.000Z | 2021-11-17T17:14:19.000Z | models/general_modules.py | zlijingtao/DAC20_reconstruction | c928cda1c8e492c05110d6c219c1ed529924e127 | [
"Apache-2.0"
] | null | null | null | models/general_modules.py | zlijingtao/DAC20_reconstruction | c928cda1c8e492c05110d6c219c1ed529924e127 | [
"Apache-2.0"
] | null | null | null | import torch
import pdb
import torch.nn as nn
import torch.nn.functional as F
import math
from torch.autograd import Variable
from torch.autograd import Function
import numpy as np
def get_centroid(input, grain_size, num_bits, M2D):
if len(input.size()) == 2:
print(input.size())
print(grain_size)
original_size = input.size()
reshaped_input = input.view(1, 1, original_size[0], original_size[1])
pooling_result = F.avg_pool2d(reshaped_input, grain_size, grain_size)
pooling_result = get_quantized(pooling_result, num_bits, M2D)
pooling_result = pooling_result.view(pooling_result.size()[2:])
print(pooling_result.size())
pooling_result = pooling_result.unsqueeze(1).repeat(1,grain_size[0], 1).view(-1,pooling_result.size()[1]).transpose(0,1)
output = pooling_result.repeat(1, grain_size[1]).view(-1,pooling_result.size()[1]).transpose(0,1)
if len(input.size()) == 4:
original_size = input.size()
reshaped_input = input.permute(1, 2, 3, 0).view(1, 1, -1, original_size[0])
pooling_result = F.avg_pool2d(reshaped_input, grain_size, grain_size)
pooling_result = get_quantized(pooling_result, num_bits, M2D)
pooling_result = pooling_result.view(pooling_result.size()[2:])
pooling_result = pooling_result.unsqueeze(1).repeat(1,grain_size[0], 1).view(-1,pooling_result.size()[1]).transpose(0,1)
pooling_result = pooling_result.repeat(1, grain_size[1]).view(-1,pooling_result.size()[1]).transpose(0,1)
output = pooling_result.view(original_size[1], original_size[2], original_size[3], original_size[0]).permute(3, 0, 1, 2)
return output
def get_quantized(input, num_bits, M2D):
output = input.clone()
if M2D != 0.0:
qmin = 0
qmax = qmin + 2.**num_bits - 1.
scale = 2 * M2D / (qmax - qmin)
output.div_(scale)
output.add_((qmax - qmin)/2)
output.clamp_(qmin, qmax).round_()
output.add_(-(qmax - qmin)/2)
output.mul_(scale)
else:
output = input.clone().zero_()
return output
def get_clipped(input, range):
output = input.clone()
output.clamp_(-range, range)
return output
class Unite(torch.autograd.Function):
def __init__(self, grain_size, num_bits, M2D, save_path):
super(Unite,self).__init__()
self.grain_size = grain_size #grain size in tuple
self.M2D = M2D
self.num_bits = num_bits
self.save_path = save_path
def forward(self, input):
self.save_for_backward(input)
self.centroid = get_centroid(input, self.grain_size, self.num_bits, self.M2D)
global ti
global num_res
ti += 1
input_d = (input - self.centroid)
output = input.clone().zero_()
self.W = 1-self.M2D
output = get_clipped(input_d, self.W)
if ti <=num_res:
torch.save(self.centroid, self.save_path + '/saved_tensors/centroid{}.pt'.format(ti))
torch.save(output, self.save_path + '/saved_tensors/deviation{}.pt'.format(ti))
output = output + self.centroid
return output
def backward(self, grad_output):
# saved tensors - tuple of tensors with one element
grad_input = grad_output.clone()
input, = self.saved_tensors
grad_input[input.ge(1)] = 0
grad_input[input.le(-1)] = 0
return grad_input
class UniteLinear(nn.Linear):
def __init__(self, infeatures, classes, grain_size, num_bits, M2D, save_path):
super(UniteLinear, self).__init__(in_features = infeatures, out_features = classes, bias=True)
self.grain_size = grain_size
self.num_bits = num_bits
self.M2D = M2D
self.save_path = save_path
print("FClayer: grain_size: %s, num_bits: %d, M2D ratio: %.4f"% (str(grain_size), num_bits, M2D))
def forward(self, input):
weight = Unite(grain_size = self.grain_size , num_bits = self.num_bits, M2D = self.M2D, save_path = self.save_path)(self.weight)
output = F.linear(input, weight, self.bias)
return output
class UniteConv2d(nn.Conv2d):
def __init__(self, inplanes, planes, kernel_size, stride, padding, bias, grain_size, num_bits, M2D, save_path):
super(UniteConv2d, self).__init__(in_channels = inplanes, out_channels = planes, kernel_size = kernel_size, stride = stride, padding = padding, bias = bias)
self.grain_size = grain_size
self.num_bits = num_bits
self.M2D = M2D
self.save_path = save_path
print("Convlayer: grain_size: %s, num_bits: %d, M2D ratio: %.4f"% (str(grain_size), num_bits, M2D))
def forward(self, input):
weight = Unite(grain_size = self.grain_size , num_bits = self.num_bits, M2D = self.M2D, save_path = self.save_path)(self.weight)
output = F.conv2d(input, weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
return output
| 42.87069 | 164 | 0.658958 | 702 | 4,973 | 4.428775 | 0.159544 | 0.08395 | 0.035381 | 0.041171 | 0.487617 | 0.436475 | 0.413959 | 0.388871 | 0.357993 | 0.357993 | 0 | 0.025424 | 0.216972 | 4,973 | 115 | 165 | 43.243478 | 0.772984 | 0.013875 | 0 | 0.37 | 0 | 0 | 0.034068 | 0.011628 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.08 | 0 | 0.28 | 0.05 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f6f61c548dca85e9fb4e47a43a333094a78bec72 | 1,816 | py | Python | kws_streaming/layers/preemphasis_test.py | egonrian/google-research | 8177adbe9ca0d7e5a9463b54581fe6dd27be0974 | [
"Apache-2.0"
] | 3 | 2021-01-18T04:46:49.000Z | 2021-03-05T09:21:40.000Z | kws_streaming/layers/preemphasis_test.py | JustinDurham/google-research | 9049acf9246c1b75170f0c6757e62a8f619a9db6 | [
"Apache-2.0"
] | 25 | 2020-07-25T08:53:09.000Z | 2022-03-12T00:43:02.000Z | kws_streaming/layers/preemphasis_test.py | JustinDurham/google-research | 9049acf9246c1b75170f0c6757e62a8f619a9db6 | [
"Apache-2.0"
] | 4 | 2021-02-08T10:25:45.000Z | 2021-04-17T14:46:26.000Z | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for kws_streaming.layers.preemphasis."""
import numpy as np
from kws_streaming.layers import preemphasis
from kws_streaming.layers.compat import tf
from kws_streaming.layers.compat import tf1
import kws_streaming.layers.test_utils as tu
tf1.disable_eager_execution()
class PreemphasisTest(tu.FrameTestBase):
def test_derivative_calculation(self):
# comapre TF implementation with numpy
preemph = 0.97
preemphasis_layer = preemphasis.Preemphasis(preemph=preemph)
# it receives all data with size: data_size
input1 = tf.keras.layers.Input(
shape=(self.data_size,),
batch_size=self.inference_batch_size,
dtype=tf.float32)
output1 = preemphasis_layer(input1)
model = tf.keras.models.Model(input1, output1)
# generate frames for the whole signal (no streaming here)
output_tf = model.predict(self.signal)
output_np = []
output_np.append(self.signal[0][0] * (1 - preemph))
for i in range(1, self.data_size):
derivative = self.signal[0][i] - preemph * self.signal[0][i - 1]
output_np.append(derivative)
self.assertAllClose(np.asarray(output_np), output_tf[0])
if __name__ == "__main__":
tf.test.main()
| 31.859649 | 74 | 0.736233 | 261 | 1,816 | 5 | 0.498084 | 0.045977 | 0.068966 | 0.050575 | 0.052107 | 0.052107 | 0 | 0 | 0 | 0 | 0 | 0.019282 | 0.171806 | 1,816 | 56 | 75 | 32.428571 | 0.848404 | 0.417952 | 0 | 0 | 0 | 0 | 0.007729 | 0 | 0 | 0 | 0 | 0 | 0.04 | 1 | 0.04 | false | 0 | 0.2 | 0 | 0.28 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f6f7ca029395167d19947b07a6906680c9c59669 | 1,430 | py | Python | pymobiledevice3/services/dtfetchsymbols.py | iOSForensics/pymobiledevice3 | 6b148f4e58cc51cb44c18935913a3e6cec5b60d5 | [
"MIT"
] | 1 | 2022-01-20T16:53:15.000Z | 2022-01-20T16:53:15.000Z | pymobiledevice3/services/dtfetchsymbols.py | iOSForensics/pymobiledevice3 | 6b148f4e58cc51cb44c18935913a3e6cec5b60d5 | [
"MIT"
] | null | null | null | pymobiledevice3/services/dtfetchsymbols.py | iOSForensics/pymobiledevice3 | 6b148f4e58cc51cb44c18935913a3e6cec5b60d5 | [
"MIT"
] | null | null | null | import logging
import struct
import typing
from pymobiledevice3.exceptions import PyMobileDevice3Exception
from pymobiledevice3.lockdown import LockdownClient
class DtFetchSymbols(object):
SERVICE_NAME = 'com.apple.dt.fetchsymbols'
MAX_CHUNK = 1024 * 1024 * 10 # 10MB
CMD_LIST_FILES_PLIST = struct.pack('>I', 0x30303030)
CMD_GET_FILE = struct.pack('>I', 1)
def __init__(self, lockdown: LockdownClient):
self.logger = logging.getLogger(__name__)
self.lockdown = lockdown
def list_files(self) -> bytes:
service = self._start_command(self.CMD_LIST_FILES_PLIST)
return service.recv_plist().get('files')
def get_file(self, fileno: int, stream: typing.IO):
service = self._start_command(self.CMD_GET_FILE)
service.sendall(struct.pack('>I', fileno))
size = struct.unpack('>Q', service.recvall(8))[0]
self.logger.debug(f'file size: {size}')
received = 0
while received < size:
buf = service.recv(min(size - received, self.MAX_CHUNK))
stream.write(buf)
received += len(buf)
def _start_command(self, cmd: bytes):
service = self.lockdown.start_developer_service(self.SERVICE_NAME)
service.sendall(cmd)
# receive same command as an ack
if cmd != service.recvall(len(cmd)):
raise PyMobileDevice3Exception('bad ack')
return service
| 31.777778 | 74 | 0.665734 | 173 | 1,430 | 5.312139 | 0.416185 | 0.047878 | 0.035909 | 0.062024 | 0.065288 | 0.065288 | 0 | 0 | 0 | 0 | 0 | 0.026268 | 0.227972 | 1,430 | 44 | 75 | 32.5 | 0.806159 | 0.024476 | 0 | 0 | 0 | 0 | 0.04454 | 0.01796 | 0 | 0 | 0.007184 | 0 | 0 | 1 | 0.125 | false | 0 | 0.15625 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f6faf53f3fd09e7ab28a9081de0a8f0200d7ee62 | 22,106 | py | Python | Data/FrackFinder/PA/2005-2010/Transformations_and_QAQC/MoorFrog/bin/task2shp.py | SkyTruth/CrowdProjects | eede4c97ca5195d8ad39ce353c962f588e52c6ad | [
"BSD-3-Clause"
] | 2 | 2015-05-23T06:57:32.000Z | 2016-08-21T17:50:32.000Z | Data/FrackFinder/PA/2013/Transformations_and_QAQC/MoorFrog/bin/task2shp.py | SkyTruth/CrowdProjects | eede4c97ca5195d8ad39ce353c962f588e52c6ad | [
"BSD-3-Clause"
] | 25 | 2015-01-08T16:00:08.000Z | 2017-05-04T17:37:23.000Z | Data/FrackFinder/PA/2013/Transformations_and_QAQC/MoorFrog/bin/task2shp.py | SkyTruth/CrowdProjects | eede4c97ca5195d8ad39ce353c962f588e52c6ad | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# This document is part of CrowdProjects
# https://github.com/skytruth/CrowdProjects
# =========================================================================== #
#
# Copyright (c) 2014, SkyTruth
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the {organization} nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
#
# =========================================================================== #
"""
Convert a FrackFinder MoorFrog 2005-2010 JSON export to three layers:
bounding boxes, pond clicks, and well pad points
"""
import os
import sys
import json
from os import sep
from os.path import *
try:
from osgeo import ogr
from osgeo import osr
except ImportError:
import ogr
import osr
#/* ======================================================================= */#
#/* Build Information
#/* ======================================================================= */#
__author__ = 'Kevin Wurster'
__version__ = '0.1-dev'
__release__ = '2014-06-19'
__docname__ = basename(__file__)
__license__ = """
Copyright (c) 2014, SkyTruth
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the {organization} nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
#/* ======================================================================= */#
#/* Define print_usage() function
#/* ======================================================================= */#
def print_usage():
"""
Command line usage information
:return: 1 for exit code purposes
:rtype: int
"""
print("""
Usage: %s [options] task.json task_run.json output/directory
Options:
--help-info -> Print out a list of help related flags
--overwrite -> Overwrite output files
--prefix=str -> Output filename prefix - defaults to 'MoorFrog-'
--wellpad-file-name=str -> Defaults to 'wellpad.shp
--bbox-file-name=str -> Defaults to 'bbox.shp
--clicks-file-name=str -> Defaults to 'clicks.shp
--no-bbox -> Don't generate bounding boxes file
--no-click -> Don't generate clicks file
--no-wellpad -> Don't generate wellpads file
--of=driver -> Output driver name/file type - default='ESRI Shapefile'
--epsg=int -> EPSG code for coordinates in task.json - default='4326'
""" % __docname__)
return 1
#/* ======================================================================= */#
#/* Define print_license() function
#/* ======================================================================= */#
def print_license():
"""
Print out license information
:return: 1 for exit code purposes
:rtype: int
"""
print(__license__)
return 1
#/* ======================================================================= */#
#/* Define print_help() function
#/* ======================================================================= */#
def print_help():
"""
Detailed help information
:return: 1 for exit code purposes
:rtype: int
"""
print("""
Help: {0}
------{1}
Input is task.json and task_run.json from MoorFrog
Output is a set of bounding boxes, well pad points,
and pond clicks.
""".format(__docname__, '-' * len(__docname__)))
return 1
#/* ======================================================================= */#
#/* Define print_help_info() function
#/* ======================================================================= */#
def print_help_info():
"""
Print a list of help related flags
:return: 1 for exit code purposes
:rtype: int
"""
print("""
Help flags:
--help -> More detailed description of this utility
--usage -> Arguments, parameters, flags, options, etc.
--version -> Version and ownership information
--license -> License information
""")
return 1
#/* ======================================================================= */#
#/* Define print_version() function
#/* ======================================================================= */#
def print_version():
"""
Print script version information
:return: 1 for exit code purposes
:rtype: int
"""
print("""
%s version %s - released %s
""" % (__docname__, __version__, __release__))
return 1
#/* ======================================================================= */#
#/* Define create_bboxes() function
#/* ======================================================================= */#
def create_bboxes(tasks, layer):
"""
Add bounding boxes to input layer
:param tasks: tasks from json.load(open('task.json'))
:type tasks: list
:param layer: OGR layer object
:type layer: <ogr.Layer class>
:return: True on success and False on failure
:rtype: bool
"""
# Update user
print("Creating bounding boxes")
# Define fields
print(" Defining bbox fields...")
fields_definitions = (('id', 10, ogr.OFTInteger),
('site_id', 254, ogr.OFTString),
('location', 254, ogr.OFTString),
('wms_url', 254, ogr.OFTString),
('county', 254, ogr.OFTString),
('year', 10, ogr.OFTInteger),
('qaqc', 254, ogr.OFTString))
# Create fields
for field_name, field_width, field_type in fields_definitions:
print(" " + field_name)
field_object = ogr.FieldDefn(field_name, field_type)
field_object.SetWidth(field_width)
layer.CreateField(field_object)
# Loop through tasks and create features
num_tasks = len(tasks)
i = 0
print(" Processing %s tasks..." % str(len(tasks)))
for task in tasks:
# Update user
i += 1
sys.stdout.write("\r\x1b[K" + " %s/%s" % (str(i), str(num_tasks)))
sys.stdout.flush()
# Get field content
location = str(task['info']['latitude']) + str(task['info']['longitude']) + '---' + str(task['info']['year'])
field_values = {'id': int(task['id']),
'site_id': str(task['info']['SiteID']),
'location': str(location),
'wms_url': str(task['info']['url']),
'county': str(task['info']['county']),
'year': int(task['info']['year'])}
# Get corner coordinates and assemble into a geometry
coordinates = task['info']['bbox'].split(',')
x_min = float(coordinates[2])
x_max = float(coordinates[0])
y_min = float(coordinates[1])
y_max = float(coordinates[3])
ring = ogr.Geometry(ogr.wkbLinearRing)
ring.AddPoint(x_min, y_max)
ring.AddPoint(x_min, y_min)
ring.AddPoint(x_max, y_min)
ring.AddPoint(x_max, y_max)
ring.CloseRings()
# Create a new feature and assign geometry and field values
rectangle = ogr.Geometry(ogr.wkbPolygon)
rectangle.AddGeometry(ring)
feature = ogr.Feature(layer.GetLayerDefn())
feature.SetGeometry(rectangle)
for field, value in field_values.iteritems():
feature.SetField(field, value)
layer.CreateFeature(feature)
rectangle = None
feature = None
# Update user
print(" - Done")
return True
#/* ======================================================================= */#
#/* Define create_clicks() function
#/* ======================================================================= */#
def create_clicks(tasks, task_runs, layer):
"""
Add click points to layer
:param tasks: tasks from json.load(open('task.json'))
:type tasks: list
:param task_runs: tasks from json.load(open('task_run.json'))
:type task_runs: list
:param layer: OGR layer object
:type layer: <ogr.Layer class>
:return: True on success and False on failure
:rtype: bool
"""
# Update user
print("Creating clicks")
# Define fields
print(" Defining click fields...")
fields_definitions = (('id', 10, ogr.OFTInteger),
('task_id', 10, ogr.OFTInteger),
('year', 10, ogr.OFTInteger),
('qaqc', 254, ogr.OFTString))
# Create fields
for field_name, field_width, field_type in fields_definitions:
print(" " + field_name)
field_object = ogr.FieldDefn(field_name, field_type)
field_object.SetWidth(field_width)
layer.CreateField(field_object)
# Loop through tasks and create features
print(" Processing %s tasks..." % str(len(task_runs)))
i = 0
num_task_runs = len(task_runs)
for task_run in task_runs:
# Update user
i += 1
sys.stdout.write("\r\x1b[K" + " %s/%s" % (str(i), str(num_task_runs)))
sys.stdout.flush()
# Get field content
field_values = {'id': int(task_run['id']),
'task_id': int(task_run['task_id'])}
# Get year
for t in tasks:
if t['id'] == task_run['task_id']:
field_values['year'] = int(t['info']['year'])
break
# Get list of clicks
clicks = task_run['info']['positions']
for click in clicks:
feature = ogr.Feature(layer.GetLayerDefn())
# Set field attributes and geometry
point = ogr.CreateGeometryFromWkt("POINT(%f %f)" % (float(click['lon']), float(click['lat'])))
feature.SetGeometry(point)
for field, value in field_values.iteritems():
feature.SetField(field, value)
layer.CreateFeature(feature)
feature = None
# Update user
print(" Done")
return True
#/* ======================================================================= */#
#/* Define get_crowd_selection() function
#/* ======================================================================= */#
def create_wellpads(tasks, layer):
"""
Add click points to layer
:param tasks: tasks from json.load(open('task.json'))
:type tasks: list
:param layer: OGR layer object
:type layer: <ogr.Layer class>
:return: True on success and False on failure
:rtype: bool
"""
# Update user
print("Creating wellpads")
# Define fields
print(" Defining layer fields...")
fields_definitions = (('id', 10, ogr.OFTInteger),
('site_id', 254, ogr.OFTString),
('location', 254, ogr.OFTString),
('wms_url', 254, ogr.OFTString),
('county', 254, ogr.OFTString),
('year', 10, ogr.OFTInteger),
('qaqc', 254, ogr.OFTString))
# Create fields
for field_name, field_width, field_type in fields_definitions:
print(" " + field_name)
field_object = ogr.FieldDefn(field_name, field_type)
field_object.SetWidth(field_width)
layer.CreateField(field_object)
# Loop through tasks and create features
print(" Processing %s tasks..." % str(len(tasks)))
i = 0
num_tasks = len(tasks)
for task in tasks:
# Update user
i += 1
sys.stdout.write("\r\x1b[K" + " %s/%s" % (str(i), str(num_tasks)))
sys.stdout.flush()
# Get field content
location = str(task['info']['latitude']) + str(task['info']['longitude']) + '---' + str(task['info']['year'])
field_values = {'id': int(task['id']),
'site_id': str(task['info']['SiteID']),
'location': location,
'wms_url': str(task['info']['url']),
'county': str(task['info']['county']),
'year': int(task['info']['year'])}
# Define and create feature
feature = ogr.Feature(layer.GetLayerDefn())
wkt = "POINT(%f %f)" % (float(task['info']['longitude']), float(task['info']['latitude']))
point = ogr.CreateGeometryFromWkt(wkt)
feature.SetGeometry(point)
for field, value in field_values.iteritems():
feature.SetField(field, value)
layer.CreateFeature(feature)
feature = None
# Update user
print(" Done")
return True
#/* ======================================================================= */#
#/* Define main() function
#/* ======================================================================= */#
def main(args):
"""
Main routine
:param args: arguments from the commandline (sys.argv[1:] in order to drop the script name)
:type args: list
:return: 0 on success and 1 on error
:rtype: int
"""
# Containers
task_file_path = None
task_run_file_path = None
output_directory = None
output_prefix = 'MoorFrog-'
# Defaults
overwrite = False
bbox_file_name = 'bbox.shp'
wellpad_file_name = 'wellpads.shp'
clicks_file_name = 'clicks.shp'
epsg_code = 4326
vector_driver = 'ESRI Shapefile'
generate_bbox = True
generate_clicks = True
generate_wellpads = True
# Parse arguments
arg_error = False
for arg in args:
# Help arguments
if arg in ('--help', '-help'):
return print_help()
elif arg in ('--help-info', '-help-info', '--helpinfo', '--helpinfo'):
return print_help_info()
elif arg in ('--license', '-license'):
return print_license()
elif arg in ('--version', '-version'):
return print_version()
# Configure output
elif arg in ('--no-clicks', '--no-click'):
generate_clicks = False
elif arg in ('--no-bbox', '--no-bboxes'):
generate_bbox = False
elif arg in ('--no-wellpads', '--no-wellpad'):
generate_wellpads = False
# Configure file names
elif '--prefix=' in arg:
output_prefix = arg.split('=', 1)[1]
elif '--bbox-file-name=' in arg:
bbox_file_name = arg.split('=', 1)[1]
elif '--wellpad-file-name=' in arg or '--well-pad-file-name=' in arg:
wellpad_file_name = arg.split('=', 1)[1]
elif '--clicks-file-name=' in arg:
clicks_file_name = arg.split('=', 1)[1]
# OGR output options
elif '--epsg=' in arg:
epsg_code = arg.split('=', 1)[1]
elif '--of=' in arg:
vector_driver = arg.split('=', 1)[1]
# Additional options
elif arg == '--overwrite':
overwrite = True
# Ignore empty arguments
elif arg == '':
pass
# Positional arguments
else:
# Get task.json file
if task_file_path is None:
task_file_path = arg
# Get task_run.json file
elif task_run_file_path is None:
task_run_file_path = arg
# Get output directory
elif output_directory is None:
output_directory = arg
# Argument is unrecognized - throw an error
else:
print("ERROR: Invalid argument: %s" % str(arg))
arg_error = True
# Define output file paths
clicks_file_path = sep.join([output_directory, output_prefix + clicks_file_name])
bbox_file_path = sep.join([output_directory, output_prefix + bbox_file_name])
wellpad_file_path = sep.join([output_directory, output_prefix + wellpad_file_name])
# Validate
bail = False
if arg_error:
print("ERROR: Did not successfully parse arguments")
bail = True
if output_directory is None or not os.access(output_directory, os.W_OK):
print("ERROR: Can't access output directory: %s" % output_directory)
bail = True
if task_file_path is None or not os.access(task_file_path, os.R_OK):
print("ERROR: Can't access task file: %s" % task_file_path)
bail = True
if task_run_file_path is None or not os.access(task_run_file_path, os.R_OK):
print("ERROR: Can't access task run file: %s" % task_run_file_path)
bail = True
if not overwrite:
for filepath in [clicks_file_path, bbox_file_path, wellpad_file_path]:
if isfile(filepath):
print("ERROR: Output file exists: %s" % filepath)
bail = True
try:
epsg_code = int(epsg_code)
except ValueError:
print("ERROR: EPSG code must be an int: %s" % str(epsg_code))
bail = True
if bail:
return 1
# Update user
print("Task file: %s" % task_file_path)
print("Task run file: %s" % task_run_file_path)
print("Output directory: %s" % output_directory)
# Convert files to json
print("Extracting JSON...")
with open(task_file_path, 'r') as f:
task_json = json.load(f)
with open(task_run_file_path, 'r') as f:
task_run_json = json.load(f)
print(" Num tasks: %s" % str(len(task_json)))
print(" Num task runs: %s" % str(len(task_run_json)))
# Get SRS and driver objects
srs = osr.SpatialReference()
srs.ImportFromEPSG(epsg_code)
driver = ogr.GetDriverByName(vector_driver)
# Delete existing files if in overwrite mode
if overwrite:
print("Overwriting existing files...")
for filepath in [clicks_file_path, bbox_file_path, wellpad_file_path]:
if isfile(filepath):
driver.DeleteDataSource(filepath)
print(" Deleted %s" % filepath)
# Create clicks file OGR object
clicks_layer_name = clicks_file_name.split('.', 1)[0]
print("Creating empty clicks outfile...")
print(" Path: %s" % clicks_file_path)
print(" Layer: %s" % clicks_layer_name)
clicks_datasource = driver.CreateDataSource(clicks_file_path)
clicks_layer = clicks_datasource.CreateLayer(clicks_layer_name, srs, ogr.wkbPoint)
# Create bounding box OGR object
bbox_layer_name = bbox_file_name.split('.', 1)[0]
print("Creating empty bbox outfile...")
print(" Path: %s" % bbox_file_path)
print(" Layer: %s" % bbox_layer_name)
bbox_datasource = driver.CreateDataSource(bbox_file_path)
bbox_layer = bbox_datasource.CreateLayer(bbox_layer_name, srs, ogr.wkbPolygon)
# Create wellpad OGR object
wellpad_layer_name = wellpad_file_name.split('.', 1)[0]
print("Creating empty wellpad outfile...")
print(" Path: %s" % wellpad_file_path)
print(" Layer: %s" % wellpad_layer_name)
wellpad_datasource = driver.CreateDataSource(wellpad_file_path)
wellpad_layer = wellpad_datasource.CreateLayer(wellpad_layer_name, srs, ogr.wkbPoint)
# == Create Files == #
if generate_bbox:
if not create_bboxes(task_json, bbox_layer):
print("ERROR: Problem creating bounding boxes")
if generate_clicks:
if not create_clicks(task_json, task_run_json, clicks_layer):
print("ERROR: Problem creating clicks")
if generate_wellpads:
if not create_wellpads(task_json, wellpad_layer):
print("ERROR: Problem creating wellpads")
# Cleanup OGR data sources
print("Cleaning up...")
srs = None
driver = None
clicks_layer = None
bbox_layer = None
wellpad_layer = None
clicks_datasource = None
bbox_datasource = None
wellpad_datasource = None
# Success
print("Done.")
return 0
#/* ======================================================================= */#
#/* Commandline Execution
#/* ======================================================================= */#
if __name__ == '__main__':
# Not enough arguments - print usage
if len(sys.argv) is 1:
sys.exit(print_usage())
# Got enough arguments - give all but the first to the main() function
else:
sys.exit(main(sys.argv[1:]))
| 32.79822 | 117 | 0.566453 | 2,510 | 22,106 | 4.846215 | 0.159363 | 0.020388 | 0.010852 | 0.009865 | 0.518908 | 0.462759 | 0.434725 | 0.422065 | 0.399457 | 0.390579 | 0 | 0.007897 | 0.24957 | 22,106 | 673 | 118 | 32.846954 | 0.72536 | 0.272958 | 0 | 0.305085 | 0 | 0 | 0.274899 | 0.004216 | 0 | 0 | 0 | 0 | 0 | 1 | 0.025424 | false | 0.002825 | 0.031073 | 0 | 0.096045 | 0.166667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f6ff6690850f8c8bea40662924c37c303210909d | 4,133 | py | Python | dnppy/raster/degree_days.py | NASA-DEVELOP/dnppy | 8f7ef6f0653f5a4ea730ee557c72a2c89c06ce0b | [
"NASA-1.3"
] | 65 | 2015-09-10T12:59:56.000Z | 2022-02-27T22:09:03.000Z | dnppy/raster/degree_days.py | snowzm/dnppy | 8f7ef6f0653f5a4ea730ee557c72a2c89c06ce0b | [
"NASA-1.3"
] | 40 | 2015-04-08T19:23:30.000Z | 2015-08-04T15:53:11.000Z | dnppy/raster/degree_days.py | snowzm/dnppy | 8f7ef6f0653f5a4ea730ee557c72a2c89c06ce0b | [
"NASA-1.3"
] | 45 | 2015-08-14T19:09:38.000Z | 2022-02-15T18:53:16.000Z |
__all__ = ["degree_days"]
from to_numpy import to_numpy
from from_numpy import from_numpy
import numpy
def degree_days(T_base, Max, Min, NoData_Value, outpath = False, roof = False, floor = False):
"""
Inputs rasters for maximum and minimum temperatures, calculates Growing Degree Days
this function is built to perform the common degree day calculation on either a pair
of raster filepaths, a pair of numpy arrays It requires, at minimum a maximum
temperature value, a minimum temperature value, and a base temperature. This
equation could also be used to calculate Chill hours or anything similar.
The equation is ``[(Max+Min)/2 + T_base]``
where values in Max which are greater than roof are set equal to roof
where values in Min which are less than floor are set equal to floor
consult [https://en.wikipedia.org/wiki/Growing_degree-day] for more information.
:param T_base: base temperature to ADD, be mindful of sign convention.
:param Max: filepath, numpy array, or list of maximum temperatures
:param Min: filepath, numpy array, or list of minimum temperatures
:param NoData_Value: values to ignore (must be int or float)
:param outpath: filepath to which output should be saved. Only works if Max and Min inputs
are raster filepaths with spatial referencing.
:param roof: roof value above which Max temps do not mater
:param floor: floor value below which Min temps do not mater
:return deg_days: a numpy array of the output degree_days
"""
#FIXME: doesn't fit style guide. does not operate in batch and return list of output filepaths
output_filelist = []
# format numerical inputs as floating point values
T_base = float(T_base)
if roof:
roof = float(roof)
if floor:
floor = float(floor)
# Determine the type of input and convert to useful format for calculation
# acceptable input formats are filepaths to rasters, numpy arrays, or lists.
if type(Max) is list and type(Min) is list:
# if the first entry in a list is a string, assume it is a filename that has
# been placed into a list.
if type(Max[0]) is str and type(Min[0]) is str:
Max = Max[0]
Min = Min[0]
# load in the min and max files.
highs, meta = to_numpy(Max)
lows, meta = to_numpy(Min)
print('Found spatially referenced image pair!')
else:
highs = numpy.array(Max)
lows = numpy.array(Min)
# if they are already numpy arrays
elif type(Max) is numpy.ndarray:
highs = Max
lows = Min
else:
raise Exception("invalid inputs!")
# Begin to perform the degree day calculations
# apply roof and floor corrections if they have been specified
if roof:
highs[highs >= roof] = roof
if floor:
lows[lows <=floor] = floor
# find the shapes of high and low arrays
xsh, ysh = highs.shape
xsl, ysl = lows.shape
# only continue if min and max arrays have the same shape
if xsh == xsl and ysh == ysl:
# set empty degree day matrix
deg_days = numpy.zeros((xsh,ysh))
# perform the calculation
for x in range(xsh):
for y in range(ysh):
if round(highs[x,y]/NoData_Value,10) !=1 and round(lows[x,y]/NoData_Value,10) != 1:
deg_days[x,y] =((highs[x,y] + lows[x,y])/2) + T_base
else:
deg_days[x,y] = NoData_Value
# print error if the arrays are not the same size
else:
print('Images are not the same size!, Check inputs!')
return False
# if an output path was specified, save it with the spatial referencing information.
if outpath and type(Max) is str and type(Min) is str:
from_numpy(deg_days, meta, outpath)
print('Output saved at : ' + outpath)
return deg_days
| 36.575221 | 102 | 0.625696 | 593 | 4,133 | 4.305228 | 0.325464 | 0.011751 | 0.010576 | 0.015276 | 0.057971 | 0.032902 | 0 | 0 | 0 | 0 | 0 | 0.004197 | 0.308251 | 4,133 | 112 | 103 | 36.901786 | 0.888772 | 0.527704 | 0 | 0.170213 | 0 | 0 | 0.068145 | 0 | 0 | 0 | 0 | 0.008929 | 0 | 1 | 0.021277 | false | 0 | 0.06383 | 0 | 0.12766 | 0.06383 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f6ffc84dd73dd6770247f6b84eac07007bd98522 | 3,277 | py | Python | code/Model/baselines/sentence-level-models/models/lstm.py | INK-USC/DS-RelationExtraction | eebcfa7fd2eda5bba92f3ef8158797cdf91e6981 | [
"MIT"
] | 156 | 2018-10-09T09:01:42.000Z | 2019-12-25T09:07:47.000Z | code/Model/baselines/sentence-level-models/models/lstm.py | pengdada98/USC-DS-RelationExtraction | eebcfa7fd2eda5bba92f3ef8158797cdf91e6981 | [
"MIT"
] | 10 | 2018-10-12T11:54:58.000Z | 2019-10-11T03:29:20.000Z | code/Model/baselines/sentence-level-models/models/lstm.py | pengdada98/USC-DS-RelationExtraction | eebcfa7fd2eda5bba92f3ef8158797cdf91e6981 | [
"MIT"
] | 64 | 2016-11-04T16:03:03.000Z | 2018-07-20T18:03:00.000Z | __author__ = 'Maosen'
import torch
import torch.nn as nn
import torch.nn.functional as F
import utils
from utils import pos2id, ner2id
import sys
from tqdm import tqdm
class LSTM(nn.Module):
def __init__(self, args, rel2id, word_emb=None):
super(LSTM, self).__init__()
# arguments
hidden, vocab_size, emb_dim, pos_dim, ner_dim, position_dim, attn_dim, num_layers, dropout = \
args.hidden, args.vocab_size, args.emb_dim, args.pos_dim, args.ner_dim, \
args.position_dim, args.attn_dim, args.num_layers, args.dropout
# embeddings
if word_emb is not None:
assert vocab_size, emb_dim == word_emb.shape
self.word_emb = nn.Embedding(vocab_size, emb_dim, padding_idx=utils.PAD_ID, _weight=torch.from_numpy(word_emb).float())
# self.word_emb.weight.data.copy_(torch.from_numpy(word_emb))
# self.word_emb.weight.requires_grad = False
else:
self.word_emb = nn.Embedding(vocab_size, emb_dim, padding_idx=utils.PAD_ID)
self.word_emb.weight.data[1:, :].uniform_(-1.0, 1.0)
self.pos_dim = pos_dim
self.ner_dim = ner_dim
self.hidden = hidden
if pos_dim > 0:
self.pos_emb = nn.Embedding(len(pos2id), pos_dim, padding_idx=utils.PAD_ID)
self.pos_emb.weight.data[1:, :].uniform_(-1.0, 1.0)
if ner_dim > 0:
self.ner_emb = nn.Embedding(len(ner2id), ner_dim, padding_idx=utils.PAD_ID)
self.ner_emb.weight.data[1:, :].uniform_(-1.0, 1.0)
if position_dim > 0:
self.position_emb = nn.Embedding(utils.MAXLEN*2, position_dim)
self.position_emb.weight.data.uniform_(-1.0, 1.0)
# GRU
# input_size = emb_dim + pos_dim + ner_dim
input_size = emb_dim + position_dim*2
self.lstm = nn.LSTM(input_size=input_size, hidden_size=hidden, num_layers=num_layers, batch_first=True,
dropout=dropout)
self.dropout = nn.Dropout(dropout)
# linear parameters of Position-aware attention
feat_dim = hidden*2 + position_dim*2
self.attn_dim = attn_dim
self.feat_dim = feat_dim
# self.wlinear = nn.Linear(feat_dim, attn_dim, bias=False)
# self.vlinear = nn.Linear(attn_dim, 1, bias=False)
self.flinear = nn.Linear(hidden, len(rel2id))
# self.wlinear.weight.data.normal_(std=0.001)
# self.vlinear.weight.data.zero_()
self.flinear.weight.data.normal_(std=0.001)
def forward(self, inputs):
words, pos, ner, subj_pos, obj_pos = inputs
# pos_subj and pos_obj are relative position to subject/object
batch, maxlen = words.size()
masks = torch.eq(words, utils.PAD_ID)
seq_lens = masks.eq(utils.PAD_ID).long().sum(1).squeeze().tolist()
emb_words = self.word_emb(words)
emb_pos = self.pos_emb(pos)
emb_ner = self.ner_emb(ner)
emb_subj_pos = self.position_emb(subj_pos + utils.MAXLEN)
emb_obj_pos = self.position_emb(obj_pos + utils.MAXLEN)
# input = torch.cat([emb_words, emb_pos, emb_ner], dim=2)
input = torch.cat([emb_words, emb_subj_pos, emb_obj_pos], dim=2).contiguous()
input = self.dropout(input)
input = nn.utils.rnn.pack_padded_sequence(input, seq_lens, batch_first=True)
output, (hn, cn) = self.lstm(input) # default: zero state
output, output_lens = nn.utils.rnn.pad_packed_sequence(output, batch_first=True)
# output = self.dropout(output)
final_hidden = hn[-1]
final_hidden = self.dropout(final_hidden)
logits = self.flinear(final_hidden)
return logits
| 33.10101 | 122 | 0.728715 | 541 | 3,277 | 4.15342 | 0.216266 | 0.034268 | 0.026702 | 0.026702 | 0.207833 | 0.17312 | 0.131286 | 0.085892 | 0.085892 | 0.074766 | 0 | 0.01602 | 0.142814 | 3,277 | 98 | 123 | 33.438776 | 0.783909 | 0.172414 | 0 | 0 | 0 | 0 | 0.00223 | 0 | 0 | 0 | 0 | 0 | 0.016667 | 1 | 0.033333 | false | 0 | 0.116667 | 0 | 0.183333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1000dfbbd035c312db7ff448296e00fba1a7cd5e | 898 | py | Python | backend/ocr_core/config.py | WestonLu/chinese-ocr | d27bf720a47b9bf3aae6f306c94bad0a36056e56 | [
"MIT"
] | 1 | 2022-02-23T09:22:39.000Z | 2022-02-23T09:22:39.000Z | backend/ocr_core/config.py | luxu1220/chinese-ocr | d27bf720a47b9bf3aae6f306c94bad0a36056e56 | [
"MIT"
] | null | null | null | backend/ocr_core/config.py | luxu1220/chinese-ocr | d27bf720a47b9bf3aae6f306c94bad0a36056e56 | [
"MIT"
] | null | null | null | import os
filt_path = os.path.abspath(__file__)
father_path = os.path.abspath(os.path.dirname(filt_path) + os.path.sep + ".")
GPU_ID = "cpu"
dbnet_short_size = 960
det_model_type = "dbnet"
pse_scale = 1
model_path = os.path.join(father_path, "models/dbnet.onnx")
# crnn相关
nh = 256
crnn_vertical_model_path = os.path.join(father_path,
"models/crnn_dw_lstm_vertical.pth")
LSTMFLAG = False
crnn_model_path = os.path.join(father_path, "models/crnn_lite_dense_dw.pth")
# angle_class相关
lable_map_dict = {0: "hengdao", 1: "hengzhen", 2: "shudao",
3: "shuzhen"} # hengdao: 文本行横向倒立 其他类似
rotae_map_dict = {"hengdao": 180, "hengzhen": 0, "shudao": 180,
"shuzhen": 0} # 文本行需要旋转的角度
angle_type = "shufflenetv2_05"
angle_model_path = os.path.join(father_path, "models/{}.pth".format(angle_type))
TIMEOUT = 30
version = 'api/v1'
| 28.967742 | 80 | 0.671492 | 129 | 898 | 4.372093 | 0.48062 | 0.085106 | 0.124113 | 0.106383 | 0.262411 | 0.262411 | 0.262411 | 0.262411 | 0.138298 | 0 | 0 | 0.034341 | 0.18931 | 898 | 30 | 81 | 29.933333 | 0.740385 | 0.05902 | 0 | 0 | 0 | 0 | 0.210714 | 0.072619 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.047619 | 0 | 0.047619 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1001fbdf815fe1e81a9f259621fa2deb092ac084 | 1,265 | py | Python | src/merge_csv/merge_files.py | JoaquimXG/csv-merge | 2d0430b6dfe5ecb69e9bc18ba58b45678515cc69 | [
"MIT"
] | null | null | null | src/merge_csv/merge_files.py | JoaquimXG/csv-merge | 2d0430b6dfe5ecb69e9bc18ba58b45678515cc69 | [
"MIT"
] | null | null | null | src/merge_csv/merge_files.py | JoaquimXG/csv-merge | 2d0430b6dfe5ecb69e9bc18ba58b45678515cc69 | [
"MIT"
] | null | null | null | import pandas as pd
import logging
from .validate_options import validate_options
from .merge_dataframes import merge_dataframes_multiple_columns, merge_dataframes_single_column
def merge_files(left_file: str, right_file: str, columns: list, keep: str = 'none', keep_missing: str = 'none') -> pd.DataFrame:
"""
Merges two csv files
Parameters:
left_file (str): Path to first file
right_file (str): Path to second file
column (str): Name of column to merge files on
keep (str): Table to keep values from when no match is found. One of ['left', 'right', 'both', 'none']. Default is 'none'
keep_missing (str): Table to keep values from when row contains no value in given oclumn. One of ['left', 'right', 'both', 'none']. Default is 'none'
Returns:
(pd.DataFrame): Merged DataFrame
"""
log = logging.getLogger(__name__)
dfLeft = pd.read_csv(left_file)
dfRight = pd.read_csv(right_file)
validate_options(dfLeft, dfRight, columns, keep, keep_missing)
log.info("Starting Merge")
if len(columns) == 1:
return merge_dataframes_single_column(dfLeft, dfRight, columns[0], keep, keep_missing)
else:
return merge_dataframes_multiple_columns(dfLeft, dfRight, columns, keep) | 39.53125 | 153 | 0.705929 | 177 | 1,265 | 4.858757 | 0.378531 | 0.087209 | 0.069767 | 0.069767 | 0.146512 | 0.146512 | 0.146512 | 0.081395 | 0.081395 | 0 | 0 | 0.001961 | 0.193676 | 1,265 | 32 | 154 | 39.53125 | 0.841176 | 0.371542 | 0 | 0 | 0 | 0 | 0.02961 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0.285714 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1003e56d44ba3bab8c9e2c86a4f9e739125ecbf8 | 1,231 | py | Python | dataset/file_table.py | hb-stone/FC-SOD | 1e084dde0d5bde4e90f633390ee74cbffdd67e76 | [
"MIT"
] | 12 | 2020-09-27T04:46:25.000Z | 2021-06-14T00:47:56.000Z | dataset/file_table.py | hb-stone/FC-SOD | 1e084dde0d5bde4e90f633390ee74cbffdd67e76 | [
"MIT"
] | 2 | 2021-06-15T11:03:31.000Z | 2021-09-17T01:02:00.000Z | dataset/file_table.py | hb-stone/FC-SOD | 1e084dde0d5bde4e90f633390ee74cbffdd67e76 | [
"MIT"
] | 1 | 2021-06-14T00:48:01.000Z | 2021-06-14T00:48:01.000Z | import os
from typing import Dict
from os.path import join as pathjoin
__ALL__ = ['get_dataset_path_by_name']
def get_dataset_path_by_name(dataset_name:str) -> Dict[str, str]:
root_dir = os.path.dirname(__file__)
if dataset_name not in "DUT-OMRON DUTS PASCAL-S SOD".split(" "):
raise NameError(f"the dataset {dataset_name} are not be supported")
train_dir_name = ''
test_dir_name = ''
train_lst_name = 'train.lst'
test_lst_name = 'test.lst'
if dataset_name == "DUTS":
train_dir_name = 'DUTS-TR'
test_dir_name = 'DUTS-TE'
train_dir_path = pathjoin(root_dir,dataset_name,train_dir_name)
test_dir_path = pathjoin(root_dir,dataset_name,test_dir_name)
train_lst_path = pathjoin(train_dir_path, train_lst_name)
test_lst_path = pathjoin(test_dir_path, test_lst_name)
return dict(
train_dir_path=train_dir_path,
train_lst_path=train_lst_path,
test_dir_path=test_dir_path,
test_lst_path=test_lst_path,
train_dir_name=train_dir_name,
test_dir_name=test_dir_name,
)
if __name__ == '__main__':
from pprint import pprint
pprint(get_dataset_path_by_name('DUTS'))
pprint(get_dataset_path_by_name('ECSSD'))
| 34.194444 | 75 | 0.718115 | 191 | 1,231 | 4.120419 | 0.240838 | 0.088945 | 0.076239 | 0.081321 | 0.448539 | 0.268107 | 0.083863 | 0 | 0 | 0 | 0 | 0 | 0.190902 | 1,231 | 35 | 76 | 35.171429 | 0.790161 | 0 | 0 | 0 | 0 | 0 | 0.122665 | 0.019496 | 0 | 0 | 0 | 0 | 0 | 1 | 0.032258 | false | 0 | 0.129032 | 0 | 0.193548 | 0.096774 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1005b0ab01949ad9cd001c97ae464c7d77e2b32d | 1,579 | py | Python | catkin/src/distributed_robot_system/src/nodes/webcam_main.py | samuelwestlake/Multi-Tier-Robot-System | 93664413e68ac2080958527149729bd6b63429b5 | [
"MIT"
] | null | null | null | catkin/src/distributed_robot_system/src/nodes/webcam_main.py | samuelwestlake/Multi-Tier-Robot-System | 93664413e68ac2080958527149729bd6b63429b5 | [
"MIT"
] | null | null | null | catkin/src/distributed_robot_system/src/nodes/webcam_main.py | samuelwestlake/Multi-Tier-Robot-System | 93664413e68ac2080958527149729bd6b63429b5 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import cv2
import rospy
import numpy as np
from sensor_msgs.msg import CompressedImage
class CameraNode(object):
def __init__(self, camera=0, nb=0, buggy_nb=0, node_name="camera_node"):
self.vc = cv2.VideoCapture(camera) # Initialise instance of video capture
self.rate = 25 # Maximum frequency
topic_name = "buggy"+str(buggy_nb)+"/camera"+str(nb)
rospy.init_node(node_name, anonymous=True) # Initialise ros node
self.publisher = rospy.Publisher(topic_name, CompressedImage, queue_size=1) # Initialise publisher
def main(self):
message = CompressedImage() # Ros compressed image
r = rospy.Rate(self.rate)
while True:
_, frame = self.vc.read() # Read frame
message.format = "jpeg" # Give image format to message
message.data = np.array(cv2.imencode(".jpg", frame)[1]).tostring() # Encode captured image
self.publisher.publish(message) # Publish message
if cv2.waitKey(1) & 0xFF == ord('q'):
break
r.sleep()
self.vc.release() # Release capture
cv2.destroyAllWindows() # Destroy all windows
if __name__ == '__main__':
cn = CameraNode()
cn.main()
| 45.114286 | 118 | 0.50855 | 156 | 1,579 | 5 | 0.512821 | 0.023077 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.014831 | 0.402153 | 1,579 | 34 | 119 | 46.441176 | 0.811441 | 0.158961 | 0 | 0 | 0 | 0 | 0.030395 | 0 | 0 | 0 | 0.00304 | 0 | 0 | 1 | 0.074074 | false | 0 | 0.148148 | 0 | 0.259259 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1007a20dabb4322c12f531ddcead5c7ccd4b2801 | 680 | py | Python | src/image.py | adamsh25/RE | 99c631ba2049c0ba86357a3148a8442a5fd8ec0e | [
"MIT"
] | 105 | 2017-01-02T18:32:01.000Z | 2021-11-09T11:23:50.000Z | src/image.py | scvalencia/MNIST_ASCCI_challenge | 60f7880f2d5aebe2420b472a4af7c7f2e0ee9c45 | [
"MIT"
] | null | null | null | src/image.py | scvalencia/MNIST_ASCCI_challenge | 60f7880f2d5aebe2420b472a4af7c7f2e0ee9c45 | [
"MIT"
] | 24 | 2017-01-03T13:03:56.000Z | 2017-10-25T02:27:45.000Z | import cv2
import numpy
def write_MNIST_files():
file_object = open('../data/data.csv', 'r')
file_object.readline()
counters = {_ : 0 for _ in range(10)}
folders = {
0 : 'zero', 1 : 'one', 2 : 'two', 3 : 'three',
4 : 'four', 5 : 'five', 6 : 'six', 7 : 'seven',
8 : 'eight', 9 : 'nine'
}
for line in file_object:
parsed = map(lambda x : int(x.strip()), line.split(','))
label = int(parsed[0])
image_array = numpy.array(parsed[1:])
image_array = image_array.reshape(28, 28)
imagefilename = "../img/data/" + folders[label] + "/file" + "_" + str(counters[label]) + ".png"
cv2.imwrite(imagefilename, image_array)
counters[label] = counters[label] + 1 | 24.285714 | 97 | 0.607353 | 96 | 680 | 4.177083 | 0.604167 | 0.099751 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.039927 | 0.189706 | 680 | 28 | 98 | 24.285714 | 0.68784 | 0 | 0 | 0 | 0 | 0 | 0.117474 | 0 | 0.052632 | 0 | 0 | 0 | 0 | 1 | 0.052632 | false | 0 | 0.105263 | 0 | 0.157895 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
100a065e4d36bdc98eb6cfcabc84b98767d44b90 | 2,769 | py | Python | pytorchDL/tasks/image_segmentation/evaluator.py | Milogav/PytorchDL | 39d8e40cf430113003b2e03f81951d43118dc09a | [
"MIT"
] | null | null | null | pytorchDL/tasks/image_segmentation/evaluator.py | Milogav/PytorchDL | 39d8e40cf430113003b2e03f81951d43118dc09a | [
"MIT"
] | null | null | null | pytorchDL/tasks/image_segmentation/evaluator.py | Milogav/PytorchDL | 39d8e40cf430113003b2e03f81951d43118dc09a | [
"MIT"
] | null | null | null | import os
import json
import torch
from tqdm import tqdm
from pytorchDL.tasks.image_segmentation.predictor import Predictor
from pytorchDL.tasks.image_segmentation.data import Dataset
from pytorchDL.metrics import ConfusionMatrix
class Evaluator(Predictor):
def __init__(self, test_data_dir, out_dir, ckpt_path, batch_size, device, num_proc, class_tags=None):
super().__init__(ckpt_path, device, num_proc=num_proc)
self.test_data_dir = test_data_dir
self.out_dir = out_dir
os.makedirs(out_dir, exist_ok=True)
self.batch_size = batch_size
self.class_tags = class_tags
self.num_proc = num_proc
def run_testing(self):
test_dataset = Dataset(data_dir=self.test_data_dir, output_shape=self.cfg['input_size'])
test_dataloader = torch.utils.data.DataLoader(dataset=test_dataset,
batch_size=self.batch_size,
num_workers=self.num_proc)
cm = ConfusionMatrix(num_classes=self.cfg['num_out_classes'], tags=self.class_tags)
test_steps = len(test_dataset) // self.batch_size
test_results = {}
with torch.no_grad():
for batch_data in tqdm(test_dataloader, total=test_steps):
x, y = batch_data
x = x.to(self.device)
pred_logits = self.model(x)
pred_logits = torch.nn.functional.softmax(pred_logits, dim=1)
_, pred_labels = pred_logits.max(dim=1)
gt_labels = y.cpu().numpy().flatten()
pred_labels = pred_labels.cpu().numpy().flatten()
cm.update(gt_labels, pred_labels)
out_file = os.path.join(self.out_dir, 'test_confusion_matrix.png')
cm.plot(title='Conf. Matrix - Classification', normalized=True, to_file=out_file)
test_results['norm_conf_mat'] = cm.get_normalized().tolist()
test_results['class_tags'] = self.class_tags
with open(os.path.join(self.out_dir, 'test_results.json'), 'w') as fp:
json.dump(test_results, fp)
if __name__ == '__main__':
test_dir = '/media/miguel/HDD/DeepLearning/Datasets/hand_landmark_detection/dataset_0/val'
out_dir = '/home/miguel/prueba_hand_segmentation'
ckpt_path = '/home/miguel/prueba_hand_segmentation/checkpoints/best_checkpoint.pth'
class_tags = ['bckg', 'hand']
evaluator = Evaluator(test_data_dir=test_dir,
out_dir=out_dir,
ckpt_path=ckpt_path,
batch_size=32,
device='gpu',
num_proc=0,
class_tags=class_tags)
evaluator.run_testing()
| 39 | 105 | 0.625135 | 347 | 2,769 | 4.645533 | 0.334294 | 0.033499 | 0.034119 | 0.027916 | 0.133995 | 0.029777 | 0.029777 | 0 | 0 | 0 | 0 | 0.003 | 0.277718 | 2,769 | 70 | 106 | 39.557143 | 0.803 | 0 | 0 | 0 | 0 | 0 | 0.116287 | 0.075117 | 0 | 0 | 0 | 0 | 0 | 1 | 0.037736 | false | 0 | 0.132075 | 0 | 0.188679 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
100e1407ca4bf246b1686309281c924774e73b5d | 5,548 | py | Python | setup.py | johannesnicolaus/singlecell | 8b3f5719b236fb2b9783e4d2c3b419352bb3bf6f | [
"BSD-3-Clause"
] | null | null | null | setup.py | johannesnicolaus/singlecell | 8b3f5719b236fb2b9783e4d2c3b419352bb3bf6f | [
"BSD-3-Clause"
] | null | null | null | setup.py | johannesnicolaus/singlecell | 8b3f5719b236fb2b9783e4d2c3b419352bb3bf6f | [
"BSD-3-Clause"
] | null | null | null | # import sys
import os
import io
from setuptools import setup, find_packages, Command, Extension
from os import path
root = 'singlecell'
name = 'singlecell'
version = '0.1.0'
here = path.abspath(path.dirname(__file__))
description = ('SingleCell: A Python/Cython Package for Processing '
'Single-Cell RNA-Seq Data.')
install_requires = [
'genometools>=0.3.4, <1',
'pysam>=0.11.1, <1',
'jinja2>=2.9.5, <3',
'pyyaml>=3.11, <4',
'pandas>=0.20.2, <1', # for SparseDataFrame support
'cython>=0.25.2, <1',
'HTSeq>=0.8.0, <1',
'numpy>=1.7.0',
'snakemake>=4.3.0, <5'
]
ext_modules = []
cmdclass = {}
try:
import numpy as np
from Cython.Distutils import build_ext
from Cython.Compiler import Options as CythonOptions
except ImportError:
pass
else:
# only enable Cython line tracing if we're installing in Travis-CI!
macros = []
# tell setuptools to build the Cython extension
ext_modules.append(
Extension(root + '.indrop.reads', [root + '/indrop/reads.pyx'],
include_dirs=[np.get_include()],
define_macros=macros))
ext_modules.append(
Extension(root + '.indrop.barcodes_cython',
[root + '/indrop/barcodes_cython.pyx'],
include_dirs=[np.get_include()],
define_macros=macros))
ext_modules.append(
Extension(root + '.indrop.expression',
[root + '/indrop/expression.pyx'],
include_dirs=[np.get_include()],
define_macros=macros))
cmdclass['build_ext'] = build_ext
# do not require installation if built by ReadTheDocs
# (we mock these modules in docs/source/conf.py)
if 'READTHEDOCS' not in os.environ or \
os.environ['READTHEDOCS'] != 'True':
install_requires.extend([
#'six>=1.10.0, <2',
#'scipy>=0.14, <1',
#'plotly>=1.9.6, <3',
])
else:
install_requires.extend([
#'pandas>=0.13, <1',
])
# get long description from file
long_description = ''
with io.open(path.join(here, 'README.rst'), encoding='UTF-8') as fh:
long_description = fh.read()
class CleanCommand(Command):
"""Removes files generated by setuptools.
"""
# see https://github.com/trigger/trigger/blob/develop/setup.py
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
error_msg = 'You must run this command in the package root!'
if not os.getcwd() == here:
raise OSError(error_msg)
else:
os.system('rm -rf ./dist ./build ./*.egg-info ')
cmdclass['clean'] = CleanCommand
setup(
name=name,
version=version,
description=description,
long_description=long_description,
# homepage
url='https://github.com/flo-compbio/singlecell',
author='Florian Wagner',
author_email='florian.wagner@nyu.edu',
license='proprietary',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Bio-Informatics',
'License :: Other/Proprietary License',
'Programming Language :: Python :: 3.5',
],
keywords='single-cell gene expression pipeline processing',
# packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
packages=find_packages(exclude=['docs', 'tests*']),
# packages=find_packages(root),
# libraries = [],
install_requires=install_requires,
# tests_require=[],
extras_require={
'docs': [
'sphinx',
'sphinx-rtd-theme',
'sphinx-argparse',
'mock',
],
'tests': [
'pytest>=3.1.2, <4',
'pytest-cov>=2.5.1, <3',
],
},
# data
# package_data={'genometools': ['data/RdBu_r_colormap.tsv']},
package_data={
'singlecell': [
'data/*/*',
'data/templates/*/*',
'indrop/reads.pyx',
]
},
# data outside the package
# data_files=[('my_data', ['data/data_file'])],
entry_points={
'console_scripts': [
# inDrop scripts
('indrop_generate_star_index.py = '
'singlecell.indrop.cli.generate_star_index:main'),
('indrop_create_config_file.py = '
'singlecell.indrop.cli.create_config_file:main'),
('indrop_pipeline.py = '
'singlecell.indrop.cli.pipeline:main'),
('indrop_check_pipeline.py = '
'singlecell.indrop.cli.check_pipeline:main'),
#('indrop_process_reads.py = '
# 'singlecell.indrop.cli.process_reads:main'),
#('indrop_map_with_star.py = '
# 'singlecell.indrop.cli.map_with_star:main'),
#('indrop_count_barcodes_mapped.py = '
# 'singlecell.indrop.cli.count_barcodes_mapped:main'),
#('indrop_quantify_gene_expression.py ='
# 'singlecell.indrop.cli.quantify_gene_expression:main'),
#('indrop_quantify_transcript_expression.py ='
# 'singlecell.indrop.cli.quantify_transcript_expression:main'),
#('indrop_count_barcodes_transcriptomic.py = '
# 'singlecell.indrop.cli.count_barcodes_transcriptomic:main'),
],
},
ext_modules=ext_modules,
cmdclass=cmdclass,
)
| 27.879397 | 75 | 0.593367 | 615 | 5,548 | 5.195122 | 0.38374 | 0.037559 | 0.056338 | 0.065728 | 0.156182 | 0.119875 | 0.063224 | 0.063224 | 0.063224 | 0.049452 | 0 | 0.017625 | 0.263699 | 5,548 | 198 | 76 | 28.020202 | 0.764504 | 0.244232 | 0 | 0.186992 | 0 | 0 | 0.316233 | 0.087669 | 0 | 0 | 0 | 0 | 0 | 1 | 0.02439 | false | 0.02439 | 0.065041 | 0 | 0.105691 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
100f590f093322ddd66390421e6b96818ed6e651 | 1,234 | py | Python | encode/tests/test_models.py | Ircam-Web/django-encode | 2c1c9d843865ec99fb5b45631d6f08a9c7cb86ce | [
"MIT"
] | 11 | 2015-03-11T20:48:13.000Z | 2021-12-14T14:17:39.000Z | encode/tests/test_models.py | Ircam-Web/django-encode | 2c1c9d843865ec99fb5b45631d6f08a9c7cb86ce | [
"MIT"
] | 2 | 2015-11-24T22:10:06.000Z | 2017-05-26T09:27:02.000Z | encode/tests/test_models.py | Ircam-Web/django-encode | 2c1c9d843865ec99fb5b45631d6f08a9c7cb86ce | [
"MIT"
] | 2 | 2019-08-09T17:29:41.000Z | 2020-08-31T16:47:27.000Z | # Copyright Collab 2014-2016
# See LICENSE for details.
"""
Tests for the :py:mod:`encode.models` module.
"""
from __future__ import unicode_literals
from django.core.files.base import ContentFile
from encode.models import Audio, Video, EncodingProfile
from encode.tests.helpers import WEBM_DATA, FileTestCase
class MediaBaseTestCase(FileTestCase):
"""
Tests for the :py:class:`encode.models.MediaBase` model.
"""
def test_get_media(self):
"""
`get_media` returns an instance of the model.
"""
afile = Audio.objects.create(title='Foo')
self.assertEqual(repr(afile.get_media()), '<Audio: Foo>')
def test_badProfileIds(self):
"""
Passing non-existent encoding profile id's to `save()`
raises an error.
"""
title = 'test.webm'
vfile = Video.objects.create(title='Foo')
# attach file to model
data = ContentFile(WEBM_DATA, title)
# store file data but don't save related model until
# the encoding profiles are saved as well
getattr(vfile, 'input_file').save(title, data, save=False)
self.assertRaises(EncodingProfile.DoesNotExist, vfile.save,
profiles=[18])
| 27.422222 | 67 | 0.65316 | 150 | 1,234 | 5.286667 | 0.56 | 0.045397 | 0.027743 | 0.032787 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010638 | 0.23825 | 1,234 | 44 | 68 | 28.045455 | 0.832979 | 0.311994 | 0 | 0 | 0 | 0 | 0.048177 | 0 | 0 | 0 | 0 | 0 | 0.133333 | 1 | 0.133333 | false | 0 | 0.266667 | 0 | 0.466667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
100fd6b5ec1d603eb5c012f65ffed656ba164f09 | 5,589 | py | Python | apps/tokenlizer.py | qsyao/cudaBERT | c93cb5ff0ccd387294a7229a9bef969c1375d0d6 | [
"Apache-2.0"
] | 88 | 2019-07-19T10:55:16.000Z | 2021-12-25T09:42:59.000Z | apps/tokenlizer.py | qsyao/cudaBERT | c93cb5ff0ccd387294a7229a9bef969c1375d0d6 | [
"Apache-2.0"
] | 3 | 2019-08-01T12:47:43.000Z | 2021-12-07T03:16:50.000Z | apps/tokenlizer.py | qsyao/cudaBERT | c93cb5ff0ccd387294a7229a9bef969c1375d0d6 | [
"Apache-2.0"
] | 12 | 2019-07-19T17:41:29.000Z | 2021-11-10T02:59:53.000Z | from pytorch_pretrained_bert.tokenization import BertTokenizer
tokenlizer = None
'''
Convert a input line from input_file to a tuple:
[index, line_raw_data , inputs_id, segments_id, mask]
record id_line
process inputs_id, segments_id, mask
record line_data(raw string in line from input_file to output)
There is an example to process ./data/example.tsv
process_line() will be called in engine.py
'''
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, num_line, line_data, guid, text_a, text_b=None, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.num_line = num_line
self.line_data = line_data
self.guid = guid
self.text_a = text_a
self.text_b = text_b
def init_tokenlizer(vocab_file, do_lower_case):
global tokenizer
tokenizer = BertTokenizer.from_pretrained(\
vocab_file, do_lower_case=do_lower_case)
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def convert_example_to_feature(example, max_seq_length):
"""Loads a data file into a list of `InputBatch`s."""
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
if tokens_b:
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[0:(max_seq_length - 2)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambigiously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = []
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in tokens_a:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
if tokens_b:
for token in tokens_b:
tokens.append(token)
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
return (example.num_line,
example.line_data,
input_ids,
input_mask,
segment_ids)
def create_example(line, set_type, index):
line = line.replace("\0", '').rstrip().split('\t')
guid = "%s-%s" % (set_type, index)
text_a = line[0]
text_b = line[1]
return InputExample(index, line_data='\t'.join(line), \
guid=guid, text_a=text_a, text_b=text_b)
def tokenlizer_line(max_seq_length, line, index):
example = create_example(line, "dev", index)
eval_feature = convert_example_to_feature(
example, max_seq_length)
return eval_feature
| 37.510067 | 84 | 0.632671 | 787 | 5,589 | 4.311309 | 0.261753 | 0.007663 | 0.009726 | 0.01061 | 0.191866 | 0.150604 | 0.053935 | 0.05364 | 0.010021 | 0.010021 | 0 | 0.010758 | 0.284845 | 5,589 | 148 | 85 | 37.763514 | 0.838129 | 0.382358 | 0 | 0.191781 | 0 | 0 | 0.010251 | 0 | 0 | 0 | 0 | 0 | 0.041096 | 1 | 0.082192 | false | 0 | 0.013699 | 0 | 0.150685 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
101197a60898fb4d5c822eb8c2635b94b789f285 | 3,653 | py | Python | spider/ipSpider.py | SonnySmart/M3u8Downloader | 663b61ece071e6734ab60f82bd17ac222d90218c | [
"MIT"
] | 10 | 2019-11-20T19:09:50.000Z | 2022-02-26T00:40:32.000Z | spider/ipSpider.py | SonnySmart/M3u8Downloader | 663b61ece071e6734ab60f82bd17ac222d90218c | [
"MIT"
] | 1 | 2021-06-01T23:51:29.000Z | 2021-06-01T23:51:29.000Z | spider/ipSpider.py | SonnySmart/M3u8Downloader | 663b61ece071e6734ab60f82bd17ac222d90218c | [
"MIT"
] | 2 | 2020-02-21T20:59:18.000Z | 2020-09-24T15:05:59.000Z | # -*- coding: UTF-8 -*-
import requests
import urllib3
import threading
import json
import os
from bs4 import BeautifulSoup
class IpSpider:
url = 'http://www.xicidaili.com/nn/'
page = 1
maxPage = 10
checkUrl = 'https://www.ip.cn/'
needIpNum = 10
ipNum = 0
filePath = 'db/'
fileName = 'ip_info.json'
headers = {
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36'
}
def __init__(self):
self.url = IpSpider.url
self.page = IpSpider.page
self.maxPage = IpSpider.maxPage
self.checkUrl = IpSpider.checkUrl
self.needIpNum = IpSpider.needIpNum
self.ipNum = IpSpider.ipNum
self.filePath = IpSpider.filePath
self.fileName = IpSpider.fileName
self.headers = IpSpider.headers
self.ipInfoList = []
def spider(self):
# 判断ip信息是否存在及可用ip数量
if os.access(self.filePath + self.fileName, os.F_OK):
fp = open(self.filePath + self.fileName, encoding='utf-8')
self.ipInfoList = json.load(fp)
fp.close()
self.ipNum = len(self.ipInfoList)
if (self.ipNum >= self.needIpNum):
return True
# 爬取工作
http = urllib3.PoolManager()
while (self.ipNum < self.needIpNum and self.page <= self.maxPage):
p = self.page
res = http.request('get', self.url + str(p), headers=self.headers)
# 解析爬取结果
self.parser(res.data)
self.page += 1
# ipInfoList列表写入文件
with open(self.filePath + self.fileName, 'w') as f:
json.dump(self.ipInfoList, f)
return True
def parser(self, html):
if html == '':
return
soup = BeautifulSoup(html, 'html.parser', from_encoding='utf-8')
# 第一栏表头不获取
trNodes = soup.find_all('tr')[1:]
for trNode in trNodes:
if (self.ipNum >= self.needIpNum):
break
tdNodes = trNode.find_all('td')
if tdNodes[5].string.lower() == 'https':
continue
ipInfo = {
'country': tdNodes[0].get_text(),
'ip': tdNodes[1].string,
'port': tdNodes[2].string,
'server_address': tdNodes[3].get_text(),
'is_anonymity': tdNodes[4].string,
'protocol': tdNodes[5].string,
'speed': tdNodes[6].get_text(),
'connection_time': tdNodes[7].get_text(),
'live_time': tdNodes[8].string,
'verify_time': tdNodes[9].string
}
# 验证有效性
try:
has = False
for info in self.ipInfoList:
if ipInfo['ip'] == info['ip']:
has = True
break
if has is False:
threading.Thread(target=self.detect(ipInfo))
except Exception:
print(ipInfo['ip'] + ' is a bad ip')
return
def detect(self, ipInfo={}):
proxies = {
'http': 'http://' + ipInfo['ip'] + ':' + ipInfo['port'],
'https': 'https://' + ipInfo['ip'] + ':' + ipInfo['port']
}
try:
requests.get(self.checkUrl, headers=self.headers, proxies=proxies, timeout=3)
except:
print(ipInfo['ip'] + ' is a good ip')
self.ipInfoList.append(ipInfo)
self.ipNum += 1
else:
print(ipInfo['ip'] + ' is a bad ip')
return
| 32.616071 | 145 | 0.514098 | 393 | 3,653 | 4.727735 | 0.366412 | 0.029064 | 0.043057 | 0.038751 | 0.093649 | 0.029064 | 0.029064 | 0.029064 | 0 | 0 | 0 | 0.023067 | 0.359157 | 3,653 | 111 | 146 | 32.90991 | 0.770611 | 0.022721 | 0 | 0.138298 | 0 | 0.010638 | 0.111735 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.042553 | false | 0 | 0.06383 | 0 | 0.265957 | 0.031915 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1012f3c009ffa34c2c9b80bf26d6395fbc58797a | 617 | py | Python | qwe/planning/blockSim.py | IEEERobotics/high-level | a50f2170ca81a16bd50b50f970f9e3fe9c656bfa | [
"BSD-2-Clause"
] | 1 | 2017-08-07T06:03:53.000Z | 2017-08-07T06:03:53.000Z | qwe/planning/blockSim.py | IEEERobotics/high-level | a50f2170ca81a16bd50b50f970f9e3fe9c656bfa | [
"BSD-2-Clause"
] | null | null | null | qwe/planning/blockSim.py | IEEERobotics/high-level | a50f2170ca81a16bd50b50f970f9e3fe9c656bfa | [
"BSD-2-Clause"
] | null | null | null | import Block;
class BlockSim:
def process(self, loc, count):
listofBlocks = []
filename = "./planning/" + str(loc) + ".txt"
f = open(filename, 'r')
data = f.read()
#print line
lines = data.split('\n')
#print len(lines)
for i in range(len(lines)):
items = lines[i].split()
blk = Block.Block()
blk.setColor(items[0])
blk.setSize(items[1])
blk.setLocation(items[2],items[3])
#print blk.getColor(), blk.getSize(), blk.getLocation()
listofBlocks.append(blk)
return listofBlocks[count]
#bs = BlockSim()
#b = bs.process(2)
#print b.getColor(), b.getSize(), b.getLocation()
| 19.903226 | 58 | 0.63047 | 84 | 617 | 4.630952 | 0.535714 | 0.041131 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009921 | 0.183144 | 617 | 30 | 59 | 20.566667 | 0.761905 | 0.259319 | 0 | 0 | 0 | 0 | 0.039911 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0.0625 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
101377cd066f70a4f80a4aa9efddee6dc1171b27 | 1,349 | py | Python | exp1/src/task3.py | bhshp/optimization | 8f1a476c263f2b49c3b166f55e2fcc0913050552 | [
"MIT"
] | null | null | null | exp1/src/task3.py | bhshp/optimization | 8f1a476c263f2b49c3b166f55e2fcc0913050552 | [
"MIT"
] | null | null | null | exp1/src/task3.py | bhshp/optimization | 8f1a476c263f2b49c3b166f55e2fcc0913050552 | [
"MIT"
] | null | null | null | import cv2
import numpy as np
from task2 import fit, l1_grad_descent
origin_image = cv2.imread('./data/lena.jpg')
n, m, channel = origin_image.shape
sigma = 20
noise_image = np.uint8(np.clip(np.random.normal(0, sigma, origin_image.shape)
+ origin_image, 0, 255))
cv2.imwrite('./data/noise_lena.jpg', noise_image)
noise_image = noise_image / 255
temp_image = noise_image.copy()
def new_loss(x_list, y_list, theta):
result = 0
for i in range(len(x_list)):
sum = y_list[i] - (theta[0] + theta[1] * x_list[i]
[0] + theta[2] * x_list[i][1])
result += sum ** 2
return result
for i in range(n):
print(i)
for j in range(m):
y_list = []
x_list = []
for x in range(-1, 2):
for y in range(-1, 2):
if i + x < 0 or i + x >= n or j + y < 0 or j + y >= m:
continue
x_list.append([x, y])
y_list.append(temp_image[i + x, j + y])
x_list = np.array(x_list)
for r in range(channel):
new_y_list = [y[r] for y in y_list]
noise_image[i, j, r] = fit(
x_list, new_y_list, 2, l1_grad_descent, new_loss)[0]
noise_image = np.uint8(255 * np.clip(noise_image, 0, 1))
cv2.imwrite('./regression_lena.jpg', noise_image)
| 29.977778 | 77 | 0.550037 | 220 | 1,349 | 3.186364 | 0.268182 | 0.128388 | 0.064194 | 0.048502 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.042254 | 0.315789 | 1,349 | 44 | 78 | 30.659091 | 0.717226 | 0 | 0 | 0 | 0 | 0 | 0.042254 | 0.031134 | 0 | 0 | 0 | 0 | 0 | 1 | 0.027778 | false | 0 | 0.083333 | 0 | 0.138889 | 0.027778 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
10146477856bf3fb9a7d6889ff29df6b4dd4be0b | 2,513 | py | Python | datafiles/testscript.py | xiaoxinz-cisco/examples | ce1d1526346665bf797effb7b372a5030d2f9bfd | [
"Apache-2.0"
] | 81 | 2019-08-07T09:00:15.000Z | 2022-03-17T23:23:51.000Z | datafiles/testscript.py | xiaoxinz-cisco/examples | ce1d1526346665bf797effb7b372a5030d2f9bfd | [
"Apache-2.0"
] | 2 | 2019-07-30T03:09:50.000Z | 2021-09-28T13:08:00.000Z | datafiles/testscript.py | xiaoxinz-cisco/examples | ce1d1526346665bf797effb7b372a5030d2f9bfd | [
"Apache-2.0"
] | 41 | 2019-08-21T22:43:11.000Z | 2022-03-30T03:22:35.000Z | #!/usr/bin/env python
'''
This is a very short script intended to help the user undersand what datafiles
are, how to use them & how datafiles affect your script's normal execution.
First, run this script by itself:
bash$ python testscript.py
Now, add datafile:
bash$ python testscript.py -datafile data/simple_data.yaml
Then, try extended datafile:
bash$ python testscript.py -datafile data/extended_data.yaml
'''
import logging
from pyats import aetest
logger = logging.getLogger(__name__)
parameters = {
'script_param_a': 'default_value_a',
'script_param_b': 'default_value_b',
}
module_var_a = 'module var a value'
class CommonSetup(aetest.CommonSetup):
parameters = {
'cc_param_a': 1,
'cc_param_b': 2,
}
@aetest.subsection
def common_setup_params(self, cc_param_a, cc_param_b):
logger.info('the following parameters are local to common_setup')
logger.info(' cc_param_a = %s' % cc_param_a)
logger.info(' cc_param_b = %s' % cc_param_b)
class MyTestcase(aetest.Testcase):
parameters = {
'tc_param_a': 100,
'tc_param_b': 200,
}
class_var_a = 'class var a value'
@aetest.test
def uid_and_groups(self):
logger.info('notice how testcase uid/groups are modified')
logger.info(' uid = %s' % self.uid)
logger.info(' groups = %s' % self.groups)
@aetest.test
def script_params(self, script_param_a, script_param_b):
logger.info('the following parameters are script-level')
logger.info(' script_param_a = %s' % script_param_a)
logger.info(' script_param_b = %s' % script_param_b)
@aetest.test
def testcase_params(self, tc_param_a, tc_param_b):
logger.info('the following parameters are local to this testcase')
logger.info(' tc_param_a = %s' % tc_param_a)
logger.info(' tc_param_b = %s' % tc_param_b)
@aetest.test
def module_variables(self):
logger.info('the following variables are defined at module level')
logger.info(' module_var_a = %s' % module_var_a)
logger.info(' module_var_b = %s' % module_var_b)
@aetest.test
def class_attributes(self):
logger.info('the following attributes are defined at class level')
logger.info(' class_var_a = %s' % self.class_var_a)
logger.info(' class_var_b = %s' % self.class_var_b)
if __name__ == '__main__': # pragma: no cover
logging.root.setLevel(logging.INFO)
aetest.main()
| 29.564706 | 78 | 0.668922 | 359 | 2,513 | 4.417827 | 0.264624 | 0.113493 | 0.040984 | 0.069357 | 0.196091 | 0.139344 | 0.139344 | 0.086381 | 0.06053 | 0.06053 | 0 | 0.004098 | 0.223239 | 2,513 | 84 | 79 | 29.916667 | 0.808402 | 0.173498 | 0 | 0.153846 | 0 | 0 | 0.30706 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.115385 | false | 0 | 0.038462 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
10159dc12ce248c5ab20867cf825a1f1750f06f7 | 372 | py | Python | beginner_contest/109/C.py | FGtatsuro/myatcoder | 25a3123be6a6311e7d1c25394987de3e35575ff4 | [
"MIT"
] | null | null | null | beginner_contest/109/C.py | FGtatsuro/myatcoder | 25a3123be6a6311e7d1c25394987de3e35575ff4 | [
"MIT"
] | null | null | null | beginner_contest/109/C.py | FGtatsuro/myatcoder | 25a3123be6a6311e7d1c25394987de3e35575ff4 | [
"MIT"
] | null | null | null | import sys
input = sys.stdin.readline
sys.setrecursionlimit(10 ** 7)
n, x = map(int, input().split())
point = list(map(int, input().split()))
diff = [0] * n
for i in range(n):
diff[i] = abs(point[i] - x)
def gcd(big, small):
if small == 0:
return big
else:
return gcd(small, big % small)
import functools
print(functools.reduce(gcd, diff))
| 18.6 | 39 | 0.612903 | 58 | 372 | 3.931034 | 0.551724 | 0.052632 | 0.096491 | 0.140351 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.017241 | 0.22043 | 372 | 19 | 40 | 19.578947 | 0.768966 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0 | 0.133333 | 0 | 0.333333 | 0.066667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
101629a24b0565cb16ae5309c4126f52ea5e2bb8 | 1,369 | py | Python | returns/interfaces/lashable.py | ariebovenberg/returns | 1630e060a629082b2f9de62177d198a5b59e1929 | [
"BSD-2-Clause"
] | 3 | 2019-01-27T14:41:46.000Z | 2019-01-30T10:57:25.000Z | returns/interfaces/lashable.py | ariebovenberg/returns | 1630e060a629082b2f9de62177d198a5b59e1929 | [
"BSD-2-Clause"
] | 92 | 2022-01-03T01:14:21.000Z | 2022-03-30T00:32:09.000Z | returns/interfaces/lashable.py | ariebovenberg/returns | 1630e060a629082b2f9de62177d198a5b59e1929 | [
"BSD-2-Clause"
] | null | null | null | from abc import abstractmethod
from typing import Callable, Generic, NoReturn, TypeVar
from returns.primitives.hkt import KindN
_FirstType = TypeVar('_FirstType')
_SecondType = TypeVar('_SecondType')
_ThirdType = TypeVar('_ThirdType')
_UpdatedType = TypeVar('_UpdatedType')
_LashableType = TypeVar('_LashableType', bound='LashableN')
class LashableN(Generic[_FirstType, _SecondType, _ThirdType]):
"""
Represents a "context" in which calculations can be executed.
``Rescueable`` allows you to bind together
a series of calculations while maintaining
the context of that specific container.
In contrast to :class:`returns.interfaces.bindable.BinbdaleN`,
works with the second type value.
"""
__slots__ = ()
@abstractmethod
def lash(
self: _LashableType,
function: Callable[
[_SecondType],
KindN[_LashableType, _FirstType, _UpdatedType, _ThirdType],
],
) -> KindN[_LashableType, _FirstType, _UpdatedType, _ThirdType]:
"""
Applies 'function' to the result of a previous calculation.
And returns a new container.
"""
#: Type alias for kinds with two type arguments.
Lashable2 = LashableN[_FirstType, _SecondType, NoReturn]
#: Type alias for kinds with three type arguments.
Lashable3 = LashableN[_FirstType, _SecondType, _ThirdType]
| 28.520833 | 71 | 0.707816 | 140 | 1,369 | 6.692857 | 0.514286 | 0.08111 | 0.059765 | 0.078975 | 0.14301 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001838 | 0.205259 | 1,369 | 47 | 72 | 29.12766 | 0.859375 | 0.345508 | 0 | 0 | 0 | 0 | 0.078502 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05 | false | 0 | 0.15 | 0 | 0.3 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
10162d06e2219ad8662267101a5ad7808681589b | 6,875 | py | Python | combinatorial_optim/sa_test_total.py | goodxue/CenterNet | 50e1726664337fb988542e3c2247a4c57ef74334 | [
"MIT"
] | null | null | null | combinatorial_optim/sa_test_total.py | goodxue/CenterNet | 50e1726664337fb988542e3c2247a4c57ef74334 | [
"MIT"
] | null | null | null | combinatorial_optim/sa_test_total.py | goodxue/CenterNet | 50e1726664337fb988542e3c2247a4c57ef74334 | [
"MIT"
] | null | null | null | import os
import time
import glob
import numpy as np
import argparse
from detection_evaluation.nuscenes_eval_core import NuScenesEval
from detection_evaluation.label_parser import LabelParser
import co_utils as cu
def parse_args():
parser = argparse.ArgumentParser(description='arg parser')
# parser.add_argument('--pred_labels', type=str, required=True,
# help='Prediction labels data path')
# parser.add_argument('--gt_labels', type=str, required=True,
# help='Ground Truth labels data path')
parser.add_argument('--format', type=str, default='class truncated occluded alpha bbox_xmin bbox_ymin bbox_xmax bbox_ymax h w l x y z r score')
args = parser.parse_args()
return args
def main():
args = parse_args()
NuScenesEval(args.pred_labels, args.gt_labels, args.format)
if __name__ == '__main__':
args = parse_args()
file_parsing = LabelParser(args.format)
FUSE_NUM = 2
pred_files_list = []
dataset_path = '/home/ubuntu/xwp/datasets/multi_view_dataset/new' #数据集根目录
gt_global_label_dir = '/home/ubuntu/xwp/datasets/multi_view_dataset/new/global_label_new' #全部gt的世界坐标label文件夹
camset_path = [ os.path.join(dataset_path,"cam{}".format(cam_num),'label_test_trans') for cam_num in range(1,35)] #每一个相机的test txt文件夹
#gtset_path = [ os.path.join(dataset_path,"cam{}".format(cam_num),'global_filtered') for cam_num in range(1,35)] #每一个相机的test txt文件夹
gtset_path = [os.path.join(dataset_path,'global_label_new')]
cam_test_list = [] #所有相机单独检测的世界坐标 len=34,len(cam_test_list[0])=100 type(cam_test_list[0]) =np.ndarray shape = N*9(score) / N*8(gt)
cam_gt_list = []
load_start_time = time.time()
for i,pred_path in enumerate(camset_path):
pred_file_list = glob.glob(pred_path + "/*")
pred_file_list.sort()
if len(pred_file_list) != 100:
print(len(pred_file_list))
raise RuntimeError("can\'t read 100 files in cam{}. check the prediction file!".format(i+1))
frame_test_list = []
for pred_fn in pred_file_list:
predictions = file_parsing.parse_label(pred_fn, prediction=True)
frame_test_list.append(predictions[:,1:])
cam_test_list.append(frame_test_list)
# test sample 加载完成
# #加载gt
# cam_gt_list = []
# gt_file_list = glob.glob(gt_global_label_dir + "/*")
# gt_file_list.sort()
# for gt_fn in gt_file_list:
# if int(gt_fn[-10:-4]) < 901:
# continue
# gts = file_parsing.parse_label(gt_fn, prediction=False)
# cam_gt_list.append(gts[:,1:])
# #
for i,gt_path in enumerate(gtset_path):
gt_file_list = glob.glob(gt_path + "/*")
gt_file_list.sort()
frame_gt_list = []
for gt_fn in gt_file_list:
if int(gt_fn[-10:-4]) < 901:
continue
gts = file_parsing.parse_label(gt_fn, prediction=False)
frame_gt_list.append(gts[:,1:])
cam_gt_list.append(frame_gt_list)
load_time = time.time() - load_start_time
print("load time: ",load_time)
#遍历融合
from sko import SA
def func_co(x):
x.sort()
Eval = NuScenesEval('', '', args.format)
fused_data = cu.matching_and_fusion(cam_test_list[x[0]],cam_test_list[x[1]])
fused_gt = cu.filt_gt_labels_tuple(cam_gt_list[x[0]],cam_gt_list[x[1]])
mAP_temp = Eval.my_evaluate(fused_data,fused_gt)
return 1- mAP_temp
#fused_gt = cu.filt_gt_labels_tuple(*cam_gt_list)
fused_gt = cam_gt_list[0]
from sklearn.cluster import DBSCAN
dbscan = DBSCAN(eps = 1.6,min_samples=1)
def fuse_constellation(x):
#根据x的维度进行融合
x.sort()
size_n = x.shape[0]
#fused_data = cam_test_list[x[0]]
#gt_list = []
#gt_list.append(cam_gt_list[main_cam])
new_cam_test = []
for i in x:
new_cam_test.append(cam_test_list[i])
fused_data = cu.matching_and_fusion_tuple(*new_cam_test,dbscan=dbscan)
# for i in x[1:]:
# fused_data = cu.matching_and_fusion(fused_data,cam_test_list[i]) #融合
#gt_list.append(cam_gt_list[i])
#fused_gt = cu.filt_gt_labels_tuple(*gt_list)
Eval = NuScenesEval('', '', args.format)
#print(fused_data == cam_test_list[x[0]])
mAP_temp = Eval.my_evaluate(fused_data,fused_gt)
return 1- mAP_temp
# fused_data = cu.matching_and_fusion(cam_test_list[7],cam_test_list[23])
# fused_data = cu.matching_and_fusion(fused_data,cam_test_list[27])
# Eval = NuScenesEval('', '', args.format)
# mAP_temp = Eval.my_evaluate(fused_data,fused_gt)
# print(mAP_temp)
filt_start_time1 = time.time()
#x0 = SA.get_new_constellation(np.array([0,1,2,3,4,5,6,7,8,9,10]))
# x0 = np.arange(0,34)
# #x0 = np.array([0])
# print(1-fuse_constellation(x0))
# #sa = SA.SA_CO(func=fuse_constellation, x0=x0, T_max=1, T_min=0.1*(max(len(x0),5)-1), L=40, max_stay_counter=10)
# #sa = SA.SA_CO(func=fuse_constellation, x0=x0, T_max=1, T_min=0.1*(min(len(x0),5)-1), L=100, max_stay_counter=10)
# best_x, best_y = sa.run()
# print('best_x:', best_x, 'best_y', 1-best_y)
for i in range(9,28):
filt_start_time = time.time()
x0 = SA.get_new_constellation(np.arange(0,i))
sa = SA.SA_CO(func=fuse_constellation, x0=x0, T_max=1, T_min=0.1*(min(len(x0),6)-1), L=30*(min(len(x0),16)-1), max_stay_counter=5)
best_x, best_y = sa.run()
print('best_x:', best_x, 'best_y', 1-best_y)
filt_time = time.time() - filt_start_time
print('finished!,used {} s'.format(filt_time))
# filt_start_time = time.time()
# ret = cu.filt_gt_labels(cam_gt_list[0],cam_gt_list[1])
# filt_time = time.time() - filt_start_time
# print("filt gt for 1 iter, time: ",filt_time)
# max_map = 0
# max_i,max_j = 0,0
# for i in range(34):
# for j in range(i+1,34):
# fused_data = cu.matching_and_fusion(cam_test_list[i],cam_test_list[j]) #融合
# for k in range(j+1,34):
# Eval = NuScenesEval('', '', args.format)
# fused_data = cu.matching_and_fusion(fused_data,cam_test_list[k]) #融合
# fused_gt = cu.filt_gt_labels_tuple(cam_gt_list[i],cam_gt_list[j],cam_gt_list[k])
# #评估
# mAP_temp = Eval.my_evaluate(fused_data,fused_gt)
# if mAP_temp > max_map:
# max_map = mAP_temp
# max_i,max_j = i,j
# print('temp max mAP: {}.......... time: ## i: {} j: {} k:{} '.format(max_map,i,j,k))
# #print(mAP_temp)
filt_time1 = time.time() - filt_start_time1
print('finished!,used {} s'.format(filt_time1))
# 1.将所有test读取列表中
#循环2、3
# 2.融合
# 3.评估,迭代 | 39.739884 | 147 | 0.628509 | 1,063 | 6,875 | 3.757291 | 0.178739 | 0.03305 | 0.044066 | 0.0333 | 0.456435 | 0.436655 | 0.366299 | 0.348773 | 0.295193 | 0.255633 | 0 | 0.028121 | 0.234473 | 6,875 | 173 | 148 | 39.739884 | 0.730762 | 0.401164 | 0 | 0.120482 | 0 | 0 | 0.09663 | 0.027998 | 0 | 0 | 0 | 0 | 0 | 1 | 0.048193 | false | 0 | 0.120482 | 0 | 0.204819 | 0.060241 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
10170ff28699d7143f1d158e1501a5a54fa2ae72 | 1,158 | py | Python | unalikability-python/unalikability.py | cuevasclemente/unlikable-polyglot | ce9efb86df0b667124da394cfcc11c18b81e4849 | [
"MIT"
] | 1 | 2021-05-18T17:33:10.000Z | 2021-05-18T17:33:10.000Z | unalikability-python/unalikability.py | cuevasclemente/unlikable-polyglot | ce9efb86df0b667124da394cfcc11c18b81e4849 | [
"MIT"
] | 1 | 2021-05-18T17:36:44.000Z | 2021-05-18T18:51:34.000Z | unalikability-python/unalikability.py | cuevasclemente/unlikable-polyglot | ce9efb86df0b667124da394cfcc11c18b81e4849 | [
"MIT"
] | null | null | null | import collections
import math
def unalikeability(measurements: [int]) -> float:
"""
Unalikeability returns the unalikability measure
for `measurements`, assuming that
`measurements` is an array describing a
collection of measurements of a categorical
variable (i.e: perhaps the results of running
a multiclass classifier on a collection
of elements of the same class).
>>> unalikeability([1, 1, 1, 1, 1, 1, 1])
0.0
>>> unalikeability([1, 2, 3, 4, 5, 6, 7])
1.0
>>> unalikeability([1, 1, 1, 1, 1, 1, 2, 2, 2]) < \
unalikeability([1, 1, 1, 1, 1, 1, 2, 2, 3])
True
>>> round(unalikeability([1, 1, 1, 1, 1, 1, 1, 2, 2, 2 ]), 2)
0.42
"""
unalike = 0.0
counts = collections.Counter()
l = 0.0
for m in measurements:
counts[m] += 1.0
l += 1.0
# If every element is unique,
# you get an unalikeability
# of 1.0
if len(counts.keys()) == l:
return 1.0
for count in counts.values():
unalike += math.pow(count/l, 2)
return 1 - unalike
if __name__ == '__main__':
import doctest
doctest.testmod()
| 27.571429 | 65 | 0.576857 | 167 | 1,158 | 3.952096 | 0.407186 | 0.066667 | 0.081818 | 0.084848 | 0.136364 | 0.136364 | 0.136364 | 0.136364 | 0.081818 | 0 | 0 | 0.078049 | 0.291883 | 1,158 | 41 | 66 | 28.243902 | 0.726829 | 0.529361 | 0 | 0 | 0 | 0 | 0.017058 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058824 | false | 0 | 0.176471 | 0 | 0.352941 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
10179228ca75871b5e01551558e02842372500a8 | 8,769 | py | Python | pheweb/serve/components/autocomplete/sqlite_dao.py | FINNGEN/pheweb | 40fd83fce0b8e4f405d182dd63b9741c5ee5b280 | [
"MIT"
] | 4 | 2018-11-03T13:58:52.000Z | 2020-03-06T09:19:03.000Z | pheweb/serve/components/autocomplete/sqlite_dao.py | FINNGEN/pheweb | 40fd83fce0b8e4f405d182dd63b9741c5ee5b280 | [
"MIT"
] | 92 | 2018-05-17T18:07:01.000Z | 2022-03-29T00:37:30.000Z | pheweb/serve/components/autocomplete/sqlite_dao.py | FINNGEN/pheweb | 40fd83fce0b8e4f405d182dd63b9741c5ee5b280 | [
"MIT"
] | 4 | 2020-07-01T12:20:55.000Z | 2022-01-24T20:09:15.000Z |
from ....file_utils import get_filepath
from ...server_utils import parse_variant
from flask import url_for
from pathlib import Path
import urllib.parse
import itertools
import re
import copy
import sqlite3
from typing import List,Dict,Any,Optional,Iterator
# TODO: sort suggestions better.
# - It's good that hitting enter sends you to the thing with the highest token-ratio.
# - But it's not good that that's not the first thing in the autocomplete suggestions.
# - Solution:
# - for rsid and variant, the list should be sorted first by length.
# - for stringy things, the list should be sorted by token-match-ratio. That's gonna suck to implement in javascript.
# - Could we send token-sort-ratio along and tell typeaheadjs to sort on it? No, b/c the query changes.
# - but, stringy things should just be in a streamtable anyways.
def get_sqlite3_readonly_connection(filepath:str):
# `check_same_thread=False` lets WSGI work. Readonly makes me feel better about disabling `check_same_thread`.
return sqlite3.connect('file:{}?mode=ro'.format(urllib.parse.quote(filepath)), uri=True, check_same_thread=False)
class SQLiteAutocompleter(object):
def __init__(self, phenos:Dict[str,Dict[str,Any]]):
self._phenos = copy.deepcopy(phenos)
self._preprocess_phenos()
cpras_rsids_path = Path(get_filepath('cpras-rsids-sqlite3', must_exist=False))
gene_aliases_path = Path(get_filepath('gene-aliases-sqlite3', must_exist=False)())
self._cpras_rsids_sqlite3 = get_sqlite3_readonly_connection(str(cpras_rsids_path))
self._cpras_rsids_sqlite3.row_factory = sqlite3.Row
self._gene_aliases_sqlite3 = get_sqlite3_readonly_connection(str(gene_aliases_path))
self._gene_aliases_sqlite3.row_factory = sqlite3.Row
self._autocompleters = [
self._autocomplete_rsid, # Check rsid first, because it only runs if query.startswith('rs')
self._autocomplete_variant, # Check variant next, because it only runs if query starts with a chrom alias.
self._autocomplete_phenocode,
self._autocomplete_gene,
]
if any('phenostring' in pheno for pheno in self._phenos.values()):
self._autocompleters.append(self._autocomplete_phenostring)
def autocomplete(self, query:str) -> List[Dict[str,str]]:
query = query.strip()
result = []
for autocompleter in self._autocompleters:
result = list(itertools.islice(autocompleter(query), 0, 10))
if result: break
return result
def get_best_completion(self, query:str) -> Optional[Dict[str,str]]:
# TODO: self.autocomplete() only returns the first 10 for each autocompleter. Look at more?
suggestions = self.autocomplete(query)
if not suggestions:
return None
query_tokens = query.strip().lower().split()
return max(suggestions, key=lambda sugg: self._get_suggestion_quality(query_tokens, sugg['display']))
def _get_suggestion_quality(self, query_tokens:List[str], display:str) -> float:
suggestion_tokens = display.lower().split()
intersection_tokens = set(query_tokens).intersection(suggestion_tokens)
return len(intersection_tokens) / len(suggestion_tokens)
_process_string_non_word_regex = re.compile(r"(?ui)[^\w\.]") # Most of the time we want to include periods in words
@classmethod
def _process_string(cls, string:str) -> str:
# Cleaning inspired by <https://github.com/seatgeek/fuzzywuzzy/blob/6353e2/fuzzywuzzy/utils.py#L69>
return ' ' + cls._process_string_non_word_regex.sub(' ', string).lower().strip()
def _preprocess_phenos(self) -> None:
for phenocode, pheno in self._phenos.items():
pheno['--spaced--phenocode'] = self._process_string(phenocode)
if 'phenostring' in pheno:
pheno['--spaced--phenostring'] = self._process_string(pheno['phenostring'])
def _autocomplete_variant(self, query:str) -> Iterator[Dict[str,str]]:
# chrom-pos-ref-alt format
query = query.replace(',', '')
chrom, pos, ref, alt = parse_variant(query, default_chrom_pos = False)
if chrom is not None:
key = '-'.join(str(e) for e in [chrom,pos,ref,alt] if e is not None)
# In Python's sort, chr1:23-A-T comes before chr1:23-A-TG, so this should always put exact matches first.
cpra_rsid_pairs = list(self._cpras_rsids_sqlite3.execute(
'SELECT cpra,rsid FROM cpras_rsids WHERE cpra LIKE ? ORDER BY ROWID LIMIT 100', # Input was sorted by cpra, so ROWID will sort by cpra
(key+'%',)
))
if cpra_rsid_pairs:
for cpra, rows in itertools.groupby(cpra_rsid_pairs, key=lambda row:row['cpra']):
rowlist = list(rows)
cpra_display = cpra.replace('-', ':', 1)
if len(rowlist) == 1 and rowlist[0]['rsid'] is None:
display = cpra_display
else:
display = '{} ({})'.format(cpra_display, ','.join(row['rsid'] for row in rowlist))
yield {
'variant' : cpra,
'display' : display
}
def _autocomplete_rsid(self, query:str) -> Iterator[Dict[str,str]]:
key = query.lower()
if query.startswith('rs'):
## <https://sqlite.org/np1queryprob.html> recommends doing lots of small queries, and it's fast:
for suffix_length in [0,1,2]:
for suffix in (''.join(digits) for digits in itertools.product('0123456789', repeat=suffix_length)):
rows = list(self._cpras_rsids_sqlite3.execute('SELECT cpra,rsid FROM cpras_rsids WHERE rsid=?', (key+suffix,)))
for row in rows:
rsid, cpra = row['rsid'], row['cpra']
cpra_display = cpra.replace('-', ':', 1)
yield {
'variant' : cpra_display,
'display': '{} ({})'.format(rsid, cpra_display),
}
def _autocomplete_phenocode(self, query:str) -> Iterator[Dict[str,str]]:
query = self._process_string(query)
for phenocode, pheno in self._phenos.items():
if query in pheno['--spaced--phenocode']:
yield {
'pheno' : phenocode,
'display' : "{} ({})".format(phenocode, pheno['phenostring']) if 'phenostring' in pheno else phenocode, # TODO: truncate phenostring intelligently
}
def _autocomplete_phenostring(self, query:str) -> Iterator[Dict[str,str]]:
query = self._process_string(query)
for phenocode, pheno in self._phenos.items():
if query in pheno['--spaced--phenostring']:
yield {
'pheno' : phenocode,
'display' : "{} ({})".format(pheno['phenostring'], phenocode),
}
def _autocomplete_gene(self, query:str) -> Iterator[Dict[str,str]]:
key = query.upper()
if len(key) >= 2:
alias_canonicals_pairs = list(self._gene_aliases_sqlite3.execute(
'SELECT alias,canonicals_comma FROM gene_aliases WHERE alias LIKE ? ORDER BY LENGTH(alias),alias LIMIT 10',
(key+'%',)
))
for row in alias_canonicals_pairs:
alias, canonical_symbols = row['alias'], row['canonicals_comma'].split(',')
if len(canonical_symbols) > 1:
yield {
'gene' : canonical_symbols[0],
'display': '{} (alias for {})'.format(alias, ' and '.join(canonical_symbols)),
}
elif canonical_symbols[0] == alias:
yield {
'gene' : canonical_symbols[0],
"display" : canonical_symbols[0],
}
else:
yield {
'gene' : canonical_symbols[0],
'display' : '{} (alias for {})'.format(alias, canonical_symbols[0]),
}
def create_autocompleter(phenos):
try:
autocompleter = SQLiteAutocompleter(phenos)
# random test query
autocompleter.autocomplete("2a593769-f25f-4658-a21d-aa372d52a6ae")
return autocompleter
except Exception as e:
print("attempted creating sqlite autocomplete and failed ...")
import sys
import traceback
print(traceback.format_exc(), file=sys.stderr)
return None
| 48.988827 | 166 | 0.605884 | 1,011 | 8,769 | 5.091988 | 0.277943 | 0.012238 | 0.016317 | 0.019425 | 0.209207 | 0.158314 | 0.115773 | 0.103341 | 0.103341 | 0.088578 | 0 | 0.01372 | 0.285209 | 8,769 | 178 | 167 | 49.264045 | 0.807594 | 0.160908 | 0 | 0.191489 | 0 | 0 | 0.1024 | 0.013635 | 0 | 0 | 0 | 0.005618 | 0 | 1 | 0.092199 | false | 0 | 0.085106 | 0.014184 | 0.248227 | 0.014184 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
101a132eac985f8e06fbc298f71166afb00448d1 | 4,603 | py | Python | rnn_text_classifier.py | kamei86i/rnn-classifier-tf | f56d7460dce8ee5ebd02b5ae773c1c5f899be125 | [
"Apache-2.0"
] | 1 | 2018-07-27T00:05:22.000Z | 2018-07-27T00:05:22.000Z | rnn_text_classifier.py | kamei86i/rnn-classifier-tf | f56d7460dce8ee5ebd02b5ae773c1c5f899be125 | [
"Apache-2.0"
] | null | null | null | rnn_text_classifier.py | kamei86i/rnn-classifier-tf | f56d7460dce8ee5ebd02b5ae773c1c5f899be125 | [
"Apache-2.0"
] | null | null | null | import tensorflow as tf
class RnnTextClassifier:
def __init__(self, batch_size, sentence_length, embedding, cell_layer_size, cell_layer_num, num_classes, lam=1,
lr=0.001):
self.batch_size = batch_size
self.sentence_length = sentence_length
self.embedding = embedding
self.cell_layer_size = cell_layer_size
self.cell_layer_num = cell_layer_num
self.num_classes = num_classes
self.dtype = tf.float32
self.lr = lr
self.lmd = lam
def build_network(self):
with tf.name_scope('input'):
self.input_x = tf.placeholder(shape=[None, self.sentence_length], dtype=tf.int32, name="input_x")
self.input_y = tf.placeholder(shape=[None, self.num_classes], dtype=self.dtype, name="input_y")
self.dropout = tf.placeholder(dtype=self.dtype, name="dropout")
with tf.name_scope('embedding'):
# create embedding variable
emb_w = tf.Variable(initial_value=self.embedding.get_w(), name="w", trainable=self.embedding.is_trainable(),
dtype=self.dtype)
# do embedding lookup
embedding_input = tf.nn.embedding_lookup(emb_w, self.input_x, name="lookup_op")
# define the GRU cell
with tf.name_scope('rnn_cell'):
cell = tf.nn.rnn_cell.GRUCell(self.cell_layer_size, activation=tf.nn.relu)
if self.cell_layer_num > 1:
cell = tf.nn.rnn_cell.MultiRNNCell([cell] * self.cell_layer_num)
# define the RNN operation
with tf.name_scope('rnn_ops'):
output, state = tf.nn.dynamic_rnn(cell, embedding_input, time_major=False, dtype=self.dtype)
to_classify = state
if self.cell_layer_num > 1:
to_classify = tf.concat(1, to_classify)
with tf.name_scope('dropout'):
to_classify = tf.nn.dropout(to_classify, self.dropout)
with tf.name_scope('classifier'):
w = tf.get_variable(name="W", shape=[self.cell_layer_size * self.cell_layer_num, self.num_classes],
dtype=self.dtype,
initializer=tf.random_uniform_initializer(0, 1, 0))
b = tf.get_variable(name="b", shape=[self.num_classes], dtype=self.dtype,
initializer=tf.constant_initializer(0.1))
self.l2_loss = tf.nn.l2_loss(w, name="l2_loss")
scores = tf.nn.xw_plus_b(to_classify, w, b, name="logits")
self.predictions = tf.argmax(scores, 1, name="predictions")
with tf.name_scope('loss'):
losses = self.softmax_cross_entropy(scores, self.input_y)
self.loss = tf.reduce_mean(losses) + self.lmd * self.l2_loss
tf.summary.scalar('loss', self.loss)
with tf.name_scope('accuracy'):
correct_predictions = tf.equal(self.predictions, tf.argmax(self.input_y, 1))
self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")
tf.summary.scalar('accuracy', self.accuracy)
def softmax_cross_entropy(self, scores, gold):
logsoftmax = tf.log(tf.nn.softmax(scores) + 1e-9)
return tf.neg(tf.reduce_sum(tf.mul(logsoftmax, gold), 1))
def summary(self):
self.merged = tf.summary.merge_all()
def build_train_ops(self):
with tf.name_scope('training_operations'):
self.global_step = tf.Variable(0, name="global_step", trainable=False)
self.optimizer = tf.train.AdamOptimizer(self.lr, name="Adam")
self.grads_and_vars = self.optimizer.compute_gradients(self.loss)
self.train_op = self.optimizer.apply_gradients(self.grads_and_vars, global_step=self.global_step,
name="train_op")
def train(self, session, batch_x, batch_y, dropout):
feed_dict = {
self.input_x: batch_x,
self.input_y: batch_y,
self.dropout: dropout
}
_, step, loss, accuracy, summary = session.run(
[self.train_op, self.global_step, self.loss, self.accuracy, self.merged], feed_dict)
return step, loss, accuracy, summary
def step(self, session, x, y):
feed_dict = {
self.input_x: x,
self.input_y: y,
self.dropout: 1.0
}
step, loss, accuracy, predictions = session.run(
[self.global_step, self.loss, self.accuracy, self.predictions], feed_dict)
return step, loss, accuracy, predictions
| 44.68932 | 120 | 0.615034 | 597 | 4,603 | 4.522613 | 0.21608 | 0.04 | 0.033333 | 0.05 | 0.235185 | 0.136296 | 0.08 | 0.058519 | 0 | 0 | 0 | 0.008654 | 0.271997 | 4,603 | 102 | 121 | 45.127451 | 0.797076 | 0.019552 | 0 | 0.05 | 0 | 0 | 0.040373 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0875 | false | 0 | 0.0125 | 0 | 0.15 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
101abe09fb563597dc036618480bdb3605bcf35d | 357 | py | Python | app/rooms/utils.py | frikke/code-examples-python | ecb3d9c386501a584cab1b0b5c83e5b71b4c9a2f | [
"MIT"
] | null | null | null | app/rooms/utils.py | frikke/code-examples-python | ecb3d9c386501a584cab1b0b5c83e5b71b4c9a2f | [
"MIT"
] | null | null | null | app/rooms/utils.py | frikke/code-examples-python | ecb3d9c386501a584cab1b0b5c83e5b71b4c9a2f | [
"MIT"
] | null | null | null | from docusign_rooms import ApiClient
def create_rooms_api_client(access_token):
"""Create API client and construct API headers"""
api_client = ApiClient(host="https://demo.rooms.docusign.com/restapi")
api_client.set_default_header(
header_name="Authorization",
header_value=f"Bearer {access_token}"
)
return api_client
| 29.75 | 74 | 0.733894 | 46 | 357 | 5.413043 | 0.608696 | 0.180723 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.170868 | 357 | 11 | 75 | 32.454545 | 0.841216 | 0.120448 | 0 | 0 | 0 | 0 | 0.237013 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.125 | 0 | 0.375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
63da6938462c65e9252485f08e43d556eb29664b | 2,118 | py | Python | etox/apps/backend/mixins.py | a1xg/OpenTox | 52b807185a8113c83b7f5b9a2896974e9f02c8d0 | [
"Apache-2.0"
] | 1 | 2021-09-19T18:07:10.000Z | 2021-09-19T18:07:10.000Z | etox/apps/backend/mixins.py | a1xg/OpenTox | 52b807185a8113c83b7f5b9a2896974e9f02c8d0 | [
"Apache-2.0"
] | null | null | null | etox/apps/backend/mixins.py | a1xg/OpenTox | 52b807185a8113c83b7f5b9a2896974e9f02c8d0 | [
"Apache-2.0"
] | null | null | null | from .services.hazard_assessor import HazardMeter
from .services.ocr import ImageOCR
from .serializers import IngredientsSerializer, ProductSerializer, DetailsIngredientSerializer
from .services.text_blocks_screening import IngredientsBlockFinder
from .services.db_tools import DBQueries
from .services.ocr_settings import *
class SearchMixin:
def __init__(self):
self.box_index = None # Target block with text
self.queryset = None
self.output_image = None
def _get_queryset(self, **kwargs):
if 'request_text' in kwargs:
finder = IngredientsBlockFinder(data=kwargs['request_text'])
self.queryset = finder.get_data()
if finder.box_index != None:
self.box_index = finder.box_index
elif 'pk' in kwargs:
self.queryset = DBQueries().search_in_db(pk=kwargs['pk'])
def get_context(self, **kwargs):
if 'image' in kwargs:
ocr = ImageOCR(img=kwargs['image'])
kwargs['request_text'] = ocr.get_text(
text_lang=DEFAULT_LANG,
crop=kwargs['crop'],
)
elif 'text' in kwargs:
kwargs['request_text'] = [{
'lang':DEFAULT_LANG,
'text':kwargs['text']
}]
self._get_queryset(**kwargs)
ingredients_data = IngredientsSerializer(self.queryset, many=True).data
output_data = HazardMeter(data=ingredients_data, display_format=kwargs['display_format']).get_data()
output_data['image_with_ingredients'] = None
if self.box_index != None:
output_data['image_with_ingredients'] = ocr.draw_boxes(
index=self.box_index,
max_resolution=700,
color= (0,255,0),
base64=True
)
if kwargs['display_format'] == 'list':
return ProductSerializer(output_data, many=False).data
elif kwargs['display_format'] == 'detail':
return {
'ingredient': DetailsIngredientSerializer(output_data, many=False).data
}
| 36.517241 | 108 | 0.616147 | 222 | 2,118 | 5.653153 | 0.306306 | 0.038247 | 0.038247 | 0.025498 | 0.084462 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006592 | 0.283758 | 2,118 | 57 | 109 | 37.157895 | 0.820699 | 0.010387 | 0 | 0 | 0 | 0 | 0.08978 | 0.021012 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0.125 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
63dd7fae6c8c1a5d59b9d4ffad7b82f95f7d3cb7 | 3,122 | py | Python | spaghettifs/treetree.py | mgax/SpaghettiFS | 7782ed1a30910330f0380b70edf110262743374e | [
"MIT"
] | 3 | 2016-03-16T08:22:47.000Z | 2019-09-30T09:35:27.000Z | spaghettifs/treetree.py | mgax/SpaghettiFS | 7782ed1a30910330f0380b70edf110262743374e | [
"MIT"
] | null | null | null | spaghettifs/treetree.py | mgax/SpaghettiFS | 7782ed1a30910330f0380b70edf110262743374e | [
"MIT"
] | 4 | 2015-09-18T12:45:14.000Z | 2020-09-22T13:01:58.000Z | """
TreeTree is a wrapper over `easygit.EasyTree` that provides more efficient
storage of lists. Keys must be strings made up of digits, and they should
be as close as possible to the indices of a list.
"""
class TreeTree(object):
is_tree = True
def __init__(self, container, prefix='tt'):
self.container = container
self.prefix = prefix
def walk(self, name, look):
check_name(name)
keys = ['%s%d' % (self.prefix, len(name))] + list(name)
last_key = keys.pop()
ikeys = iter(keys)
def step(node):
assert node.is_tree
try:
key = next(ikeys)
except StopIteration:
return look(node, last_key, True, lambda nextnode: nextnode)
else:
return look(node, key, False, step)
return step(self.container)
def new_tree(self, name):
def look(node, key, last, step):
try:
nextnode = node[key]
except KeyError:
nextnode = node.new_tree(key)
return step(nextnode)
value = self.walk(name, look)
if not value.is_tree:
raise ValueError
return value
def new_blob(self, name):
def look(node, key, last, step):
try:
nextnode = node[key]
except KeyError:
if last:
nextnode = node.new_blob(key)
else:
nextnode = node.new_tree(key)
return step(nextnode)
value = self.walk(name, look)
if value.is_tree:
raise ValueError
return value
def clone(self, source, name):
def look(node, key, last, step):
try:
nextnode = node[key]
except KeyError:
if last:
nextnode = node.clone(source, key)
else:
nextnode = node.new_tree(key)
return step(nextnode)
value = self.walk(name, look)
if source.is_tree and not value.is_tree:
raise ValueError
if not source.is_tree and value.is_tree:
raise ValueError
return value
def __getitem__(self, name):
def look(node, key, last, step):
return step(node[key])
return self.walk(name, look)
def __contains__(self, name):
try:
self[name]
except KeyError:
return False
else:
return True
def __delitem__(self, name):
def look(node, key, last, step):
if last:
del node[key]
return
nextnode = node[key]
step(nextnode)
if not nextnode.keys():
del node[key]
return self.walk(name, look)
def remove(self):
return self.container.remove()
def check_name(name):
if not name:
raise ValueError('Blank names not allowed: %r' % name)
if not isinstance(name, basestring):
raise ValueError('Names must be strings: %r' % name)
| 28.126126 | 76 | 0.528187 | 359 | 3,122 | 4.498607 | 0.239554 | 0.056347 | 0.040867 | 0.04644 | 0.431579 | 0.431579 | 0.411765 | 0.411765 | 0.260681 | 0.260681 | 0 | 0 | 0.38565 | 3,122 | 110 | 77 | 28.381818 | 0.842023 | 0.063421 | 0 | 0.511364 | 0 | 0 | 0.01989 | 0 | 0 | 0 | 0 | 0 | 0.011364 | 1 | 0.181818 | false | 0 | 0 | 0.022727 | 0.386364 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
63def619e46b70dd31fc4852669b37e3e1d6df41 | 737 | py | Python | bot.py | rojserbest/contact-bot | 2763d115cec28ae1b27a6cd489ab79e885ec13ce | [
"MIT"
] | 3 | 2021-02-10T06:17:27.000Z | 2021-08-08T16:44:11.000Z | bot.py | rojserbest/contact-bot | 2763d115cec28ae1b27a6cd489ab79e885ec13ce | [
"MIT"
] | null | null | null | bot.py | rojserbest/contact-bot | 2763d115cec28ae1b27a6cd489ab79e885ec13ce | [
"MIT"
] | 2 | 2021-02-10T09:59:02.000Z | 2021-02-25T02:37:40.000Z | from telegram.ext import Updater, PicklePersistence
from config import BOT_TOKEN
updater = Updater(
BOT_TOKEN,
persistence=PicklePersistence(filename="data")
)
dp = updater.dispatcher
def main():
from handlers import all_handlers
for handler in all_handlers:
if len(handler) == 2:
if handler[0] == "error":
dp.add_error_handler(
handler[1]
)
else:
dp.add_handler(
handler[0],
handler[1]
)
else:
dp.add_handler(
handler[0]
)
updater.start_polling()
updater.idle()
if __name__ == "__main__":
main()
| 19.918919 | 51 | 0.514247 | 71 | 737 | 5.098592 | 0.464789 | 0.066298 | 0.066298 | 0.077348 | 0.176796 | 0.176796 | 0.176796 | 0.176796 | 0 | 0 | 0 | 0.013575 | 0.400271 | 737 | 36 | 52 | 20.472222 | 0.80543 | 0 | 0 | 0.214286 | 0 | 0 | 0.023066 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.035714 | false | 0 | 0.107143 | 0 | 0.142857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
63e06ea259c0e2b97e801a344e2061220ba0cacc | 951 | py | Python | D/D.py | staguchi0703/abc142 | d529a14a76586cb582cecec17ab6267736673937 | [
"MIT"
] | null | null | null | D/D.py | staguchi0703/abc142 | d529a14a76586cb582cecec17ab6267736673937 | [
"MIT"
] | null | null | null | D/D.py | staguchi0703/abc142 | d529a14a76586cb582cecec17ab6267736673937 | [
"MIT"
] | null | null | null | #
# VScodeで入力をテキストから読み込んで標準入力に渡す
import sys
import os
f=open(r'.\D\D_input.txt', 'r', encoding="utf-8")
# inputをフルパスで指定
# win10でファイルを作るとs-jisで保存されるため、読み込みをutf-8へエンコードする必要あり
# VScodeでinput file開くとutf8になってるんだけど中身は結局s-jisになっているらしい
sys.stdin=f
#
# 入力スニペット
# num = int(input())
# num_list = [int(item) for item in input().split()]
# num_list = [input() for _ in range(3)]
##################################
#
# 以下ペースト可
# start 21:14
a, b = [int(item) for item in input().split()]
# 素因数リスト
import math
def factor(num):
divisor_list = [1]
divisor = 2
max_prime = int(math.sqrt(num))
while max_prime >= divisor:
if num % divisor == 0:
divisor_list.append(divisor)
num //= divisor
else:
divisor += 1
divisor_list.append(num)
return divisor_list
# 最大公約数
def gcd(a, b):
while b > 1:
a, b = b, a & b
return a
cd_list = set(factor(gcd(a, b)))
print(len(cd_list))
| 19.8125 | 54 | 0.604627 | 127 | 951 | 4.433071 | 0.472441 | 0.017762 | 0.035524 | 0.049734 | 0.092362 | 0.092362 | 0.092362 | 0 | 0 | 0 | 0 | 0.020408 | 0.227129 | 951 | 47 | 55 | 20.234043 | 0.745578 | 0.312303 | 0 | 0 | 0 | 0 | 0.034884 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.125 | 0 | 0.291667 | 0.041667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
63e0b323a1930c8792758cd6dfc42283e2496a50 | 1,769 | py | Python | sources/ebf-demo/scripts/cfilter.py | zwg0106/imx-yocto | e378ca25352a59d1ef84ee95f3386b7314f4565b | [
"MIT"
] | 1 | 2020-01-13T13:16:52.000Z | 2020-01-13T13:16:52.000Z | sources/ebf-demo/scripts/cfilter.py | zwg0106/imx-yocto | e378ca25352a59d1ef84ee95f3386b7314f4565b | [
"MIT"
] | 3 | 2019-11-20T02:53:01.000Z | 2019-12-26T03:00:15.000Z | sources/ebf-demo/scripts/cfilter.py | zwg0106/imx-yocto | e378ca25352a59d1ef84ee95f3386b7314f4565b | [
"MIT"
] | null | null | null | import time
import math
class ComplementaryFilter(object):
def __init__(self, gyroWeight=0.95):
self.gyroWeight = gyroWeight
self._reset()
def _reset(self):
self.last = 0
self.accelPos = [0, 0, 0]
self.gyroPos = [0, 0, 0]
self.filterPos = [0, 0, 0]
def input(self, vals):
now = int(round(time.time() * 1000))
# unpack sensor readings
accelData = vals[0:3]
gyroData = vals[4:7]
# convert accelerometer reading to degrees
self.accelPos = self.calculateAccelPos(*accelData)
# if this is our first chunk of data, simply accept
# the accelerometer reads and move on.
if self.last == 0:
self.filterPos = self.gyroPos = self.accelPos
self.last = now
return
# calculate the elapsed time (in seconds) since last data.
# we need this because the gyroscope measures movement in
# degrees/second.
dt = (now - self.last)/1000
self.last = now
# calculate change in position from gyroscope readings
gyroDelta = [i * dt for i in gyroData]
self.gyroPos = [i + j for i, j in zip(self.gyroPos, gyroDelta)]
# pitch
self.filterPos[0] = (self.gyroWeight * (self.filterPos[0] + gyroDelta[0])) + (1-self.gyroWeight) * self.accelPos[0]
# roll
self.filterPos[1] = (self.gyroWeight * (self.filterPos[1] + gyroDelta[1])) + (1-self.gyroWeight) * self.accelPos[1]
def calculateAccelPos(self, x, y, z):
x2 = (x*x);
y2 = (y*y);
z2 = (z*z);
adx = math.atan2(y, math.sqrt(x2 + z2))
ady = math.atan2(-x, math.sqrt(y2 + z2))
return [math.degrees(x) for x in [adx, ady, 0]]
| 29.983051 | 123 | 0.57377 | 229 | 1,769 | 4.406114 | 0.379913 | 0.083251 | 0.071358 | 0.056492 | 0.053518 | 0 | 0 | 0 | 0 | 0 | 0 | 0.037459 | 0.305823 | 1,769 | 58 | 124 | 30.5 | 0.784202 | 0.193895 | 0 | 0.060606 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.121212 | false | 0 | 0.060606 | 0 | 0.272727 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
63e0ebde336a5f3eeb1ad190a79f282a9b028bed | 3,008 | py | Python | face_recognition/deep_learning/main_flow_processer.py | cclauss/TripletLossFace | 6da4ae571cf2fb912ab528afa3f0b9f1efe71767 | [
"MIT"
] | 88 | 2020-01-18T09:47:03.000Z | 2021-12-18T22:34:18.000Z | face_recognition/deep_learning/main_flow_processer.py | cclauss/TripletLossFace | 6da4ae571cf2fb912ab528afa3f0b9f1efe71767 | [
"MIT"
] | 4 | 2020-01-18T09:20:24.000Z | 2020-03-02T19:40:58.000Z | face_recognition/deep_learning/main_flow_processer.py | cclauss/TripletLossFace | 6da4ae571cf2fb912ab528afa3f0b9f1efe71767 | [
"MIT"
] | 40 | 2020-01-18T11:15:07.000Z | 2021-03-09T07:58:57.000Z | import sys
sys.path.append("../")
import json
import cv2, os
import numpy as np
import tensorflow as tf
from make_better_dataset_for_deepfake.main_data_creator import FaceExtractor
class Engine:
def load_image(self, path):
image = tf.io.read_file(path)
image = tf.image.decode_jpeg(image, channels=3)
image = tf.image.resize(image, (self.input_shape[0], self.input_shape[1]), method="nearest")
return image.numpy()
def __init__(self, model_path: str):
self.faceExtractor = FaceExtractor()
self.model = tf.keras.models.load_model(model_path, {"ReLU": tf.keras.layers.ReLU})
self.model.summary()
self.i = 0
self.input_shape = self.model.layers[0].input_shape[0][1:]
self.mistaken = []
self.json_path = os.path.join("../datasets", "dfdc_train_part_45/metadata.json")
with open(self.json_path, 'rb') as f:
self.json = json.loads(f.read())
def go_for_image(self, faces, load_image_first: bool = False, detect_faces_first: bool = True):
y_map = {0: "real", 1: "fake"}
aaa1 = faces
if load_image_first:
try:
faces = self.load_image(faces)
if not detect_faces_first:
faces = [faces]
except:
print("error")
return
if detect_faces_first:
faces = self.faceExtractor.extract([faces])[0]
for face in faces:
try:
face = tf.image.resize(face, (self.input_shape[0], self.input_shape[1]), method="nearest")
aa = tf.nn.softmax(self.model(tf.expand_dims(tf.cast(face, tf.float32)/255., 0)))
if np.argmax(aa) == 0:
print(y_map[np.argmax(aa)])
print(aa[0][0]*100)
print(aa[0][1]*100)
print(aaa1)
self.i += 1
self.mistaken.append(aaa1)
print("----------------------------------------")
# cv2.imshow("face", face.numpy())
# cv2.waitKey(0)
except:
print("error")
continue
def go_for_video(self, video_path, detect_faces_first: bool = True):
y_map = {0: "REAL", 1: "FAKE"}
all_frames = self.faceExtractor.extract_frames(video_path, 20)
for face in all_frames:
try:
if detect_faces_first:
faces_all, frames = self.faceExtractor.extract([face])
for face in faces_all:
face = face[0]
face = tf.image.resize(face, (self.input_shape[0], self.input_shape[1]), method="nearest")
aa = tf.nn.softmax(self.model(tf.expand_dims(tf.divide(tf.cast(face, tf.float32), 255.), 0)))
if self.json[video_path.split("/")[-1]]["label"] == y_map[np.argmax(aa)]: # self.json[video_path.split("/")[-1]]["label"]
return True
else:
return False
except:
continue
if __name__ == '__main__': # 216
engine = Engine(model_path="models/softmax_deepfake_freezed.h5")
from tqdm import tqdm
q = tf.io.gfile.glob(os.path.join("../datasets", "dfdc_train_part_45/*.mp4"))
all_num = len(q)
bar = tqdm(all_num)
trues = 0
for n, path in enumerate(q):
if engine.go_for_video(path, True):
trues += 1
bar.update(1)
bar.set_description(f"{trues}/{(n+1)} = {(100*trues)/(n+1)}")
print(engine.mistaken)
print(engine.i)
| 27.345455 | 128 | 0.655918 | 459 | 3,008 | 4.119826 | 0.272331 | 0.042306 | 0.051824 | 0.031729 | 0.332628 | 0.258593 | 0.258593 | 0.228979 | 0.168694 | 0.168694 | 0 | 0.027678 | 0.17121 | 3,008 | 109 | 129 | 27.59633 | 0.730846 | 0.032247 | 0 | 0.170732 | 0 | 0 | 0.089126 | 0.044735 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04878 | false | 0 | 0.085366 | 0 | 0.195122 | 0.109756 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
63e142b8a3ac1fd9757268f4acda900e7ecba57e | 3,668 | py | Python | ops/models/schedule.py | dengshaochun/devOps | 58b5a37918ffca3340aa535af670b805c19a87ec | [
"MIT"
] | null | null | null | ops/models/schedule.py | dengshaochun/devOps | 58b5a37918ffca3340aa535af670b805c19a87ec | [
"MIT"
] | 3 | 2020-06-05T19:01:02.000Z | 2021-09-23T23:22:32.000Z | ops/models/schedule.py | dengshaochun/ansibleX | 58b5a37918ffca3340aa535af670b805c19a87ec | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/11/15 10:51
# @Author : Dengsc
# @Site :
# @File : schedule.py
# @Software: PyCharm
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django_celery_beat.models import (CrontabSchedule, IntervalSchedule)
from django.core.exceptions import ValidationError
from ops.models.ansible import AnsiblePlayBookTask, AnsibleScriptTask
from ops.models.project import ProjectTask
class ScheduleTaskBase(models.Model):
name = models.CharField(_('schedule task name'), max_length=100,
unique=True)
crontab = models.ForeignKey(
CrontabSchedule, verbose_name=_('crontab'),
on_delete=models.SET_NULL, null=True, blank=True,
related_name='crontab_schedule_base_schedules')
interval = models.ForeignKey(
IntervalSchedule, verbose_name=_('interval'),
on_delete=models.SET_NULL, null=True, blank=True,
related_name='interval_schedule_base_schedules')
enabled = models.BooleanField(_('enable'), default=True)
def validate_unique(self, *args, **kwargs):
super(ScheduleTaskBase, self).validate_unique(*args, **kwargs)
if not self.interval and not self.crontab:
raise ValidationError({
'interval': [
'One of interval, crontab must be set.'
]
})
if self.interval and self.crontab:
raise ValidationError({
'crontab': [
'Only one of interval, crontab must be set'
]
})
def __str__(self):
return self.name
class Meta:
abstract = True
class AnsibleScriptTaskSchedule(ScheduleTaskBase):
task = models.ForeignKey(AnsibleScriptTask, verbose_name=_('task'),
related_name='ansible_script_schedules',
on_delete=models.CASCADE)
crontab = models.ForeignKey(
CrontabSchedule, verbose_name=_('crontab'),
on_delete=models.SET_NULL, null=True, blank=True,
related_name='crontab_ansible_script_schedules')
interval = models.ForeignKey(
IntervalSchedule, verbose_name=_('interval'),
on_delete=models.SET_NULL, null=True, blank=True,
related_name='interval_ansible_script_schedules')
class AnsiblePlayBookTaskSchedule(ScheduleTaskBase):
task = models.ForeignKey(AnsiblePlayBookTask, verbose_name=_('task'),
related_name='ansible_playbook_schedules',
on_delete=models.CASCADE)
crontab = models.ForeignKey(
CrontabSchedule, verbose_name=_('crontab'),
on_delete=models.SET_NULL, null=True, blank=True,
related_name='crontab_ansible_playbook_schedules')
interval = models.ForeignKey(
IntervalSchedule, verbose_name=_('interval'),
on_delete=models.SET_NULL, null=True, blank=True,
related_name='interval_ansible_playbook_schedules')
class ProjectTaskSchedule(ScheduleTaskBase):
task = models.ForeignKey(ProjectTask, verbose_name=_('task'),
related_name='task_project_schedules',
on_delete=models.CASCADE)
crontab = models.ForeignKey(
CrontabSchedule, verbose_name=_('crontab'),
on_delete=models.SET_NULL, null=True, blank=True,
related_name='crontab_project_schedules')
interval = models.ForeignKey(
IntervalSchedule, verbose_name=_('interval'),
on_delete=models.SET_NULL, null=True, blank=True,
related_name='interval_project_schedules')
| 37.428571 | 73 | 0.658942 | 371 | 3,668 | 6.264151 | 0.25876 | 0.075732 | 0.066265 | 0.05852 | 0.523236 | 0.512048 | 0.483649 | 0.458692 | 0.458692 | 0.458692 | 0 | 0.00577 | 0.244002 | 3,668 | 97 | 74 | 37.814433 | 0.832312 | 0.038713 | 0 | 0.430556 | 0 | 0 | 0.144643 | 0.090935 | 0 | 0 | 0 | 0 | 0 | 1 | 0.027778 | false | 0 | 0.083333 | 0.013889 | 0.375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
63e2bca2d8584267cec472d15b17a95fc15a081c | 18,110 | py | Python | LAB4/bin/task_5.py | Yalfoosh/DUBUCE | 3f53923c27b1bce0ac592b20c5bb98649cb7fb75 | [
"Apache-2.0"
] | null | null | null | LAB4/bin/task_5.py | Yalfoosh/DUBUCE | 3f53923c27b1bce0ac592b20c5bb98649cb7fb75 | [
"Apache-2.0"
] | null | null | null | LAB4/bin/task_5.py | Yalfoosh/DUBUCE | 3f53923c27b1bce0ac592b20c5bb98649cb7fb75 | [
"Apache-2.0"
] | 1 | 2020-04-23T02:06:47.000Z | 2020-04-23T02:06:47.000Z | # Copyright 2020 Miljenko Šuflaj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from copy import deepcopy
from sys import stdout
from time import sleep
from typing import Callable, List, Tuple
from matplotlib import pyplot as plt
import numpy as np
import torch
import torch.utils.data
from tqdm import tqdm
from util.losses import get_gan_loss
class Discriminator(torch.nn.Module):
def __init__(self,
in_channels: int = 1,
channels: List[int] or Tuple[int] = (64, 128, 256, 512, 1),
kernels: List[int] or Tuple[int] = (4, 4, 4, 4, 4),
strides: List[int] or Tuple[int] = (2, 2, 2, 2, 1),
padding: List[int] or Tuple[int] = (1, 1, 1, 1, 0),
leaky_relu_slope: float = 0.2,
use_batch_norm: bool = True):
"""
The Discriminator constructor.
:param in_channels:
(Optional) An int representing the number of input channels.
Default: 1.
:param channels:
(Optional) A List[int] or Tuple[int] representing the channels of
every convolutional layer. Default: (64, 128, 256, 512, 1).
:param kernels:
(Optional) A List[int] or Tuple[int] representing the kernels of
every convolutional layer. Default: (4, 4, 4, 4, 4).
:param strides:
(Optional) A List[int] or Tuple[int] representing the strides of
every convolutional layer. Default: (2, 2, 2, 2, 1).
:param padding:
(Optional) A List[int] or Tuple[int] representing the padding of
every convolutional layer. Default: (1, 1, 1, 1, 0).
:param leaky_relu_slope:
(Optional) A float representing the slope of the leaky ReLu
activation functions. Default: 0.2.
:param use_batch_norm:
(Optional) A bool: True if you wish to use batch normalization,
False otherwise. Batch normalization is applied after every layer
that isn't the input or an output. Default: True.
"""
super().__init__()
self._conv = torch.nn.ModuleList()
self._batch_norm = torch.nn.ModuleList() if use_batch_norm else None
self._leaky_relu = torch.nn.LeakyReLU(leaky_relu_slope)
last_out = in_channels
for i, (chan, kern, stri, padd) in enumerate(zip(channels,
kernels,
strides,
padding)):
self.conv.append(torch.nn.Conv2d(in_channels=last_out,
out_channels=chan,
kernel_size=kern,
stride=stri,
padding=padd))
if use_batch_norm and i != 0 and i != (len(channels) - 1):
self.batch_norm.append(torch.nn.BatchNorm2d(num_features=chan))
last_out = chan
self.reset_parameters()
# region Properties
@property
def conv(self) -> List[torch.nn.Conv2d]:
return self._conv
@property
def batch_norm(self) -> List[torch.nn.BatchNorm2d]:
return self._batch_norm
@property
def leaky_relu(self) -> Callable:
return self._leaky_relu
# endregion
def reset_parameters(self):
"""
Resets this instance's parameters.
:return:
Nothing.
"""
for conv in self.conv[:-1]:
torch.nn.init.kaiming_normal_(conv.weight,
nonlinearity="leaky_relu")
torch.nn.init.normal_(conv.bias, 0, 1e-6 / 3)
torch.nn.init.xavier_normal_(self.conv[-1].weight)
torch.nn.init.constant_(self.conv[-1].bias, 0.)
def forward(self, x):
"""
The forward method of a Discriminator instance.
:param x:
A torch.Tensor representing the network input.
:return:
A torch.Tensor of shape (B, 1) representing the network's
confidence that the input is a real image.
"""
y = self.conv[0](x)
y = self.leaky_relu(y)
for i, conv in enumerate(self.conv[1:-1]):
y = conv(y)
y = self.leaky_relu(y)
if self.batch_norm is not None:
y = self.batch_norm[i](y)
y = self.conv[-1](y)
y = y.view(-1)
return torch.sigmoid(y)
class Generator(torch.nn.Module):
def __init__(self,
input_size: int = 100,
channels: List[int] or Tuple[int] = (512, 256, 128, 64, 1),
kernels: List[int] or Tuple[int] = (4, 4, 4, 4, 4),
strides: List[int] or Tuple[int] = (1, 2, 2, 2, 2),
padding: List[int] or Tuple[int] = (0, 1, 1, 1, 1),
leaky_relu_slope: float = 0.2,
use_batch_norm: bool = True):
"""
The Generator constructor.
:param input_size:
(Optional) An int representing the dimensionality of the samples
generated. Default: 100.
:param channels:
(Optional) A List[int] or Tuple[int] representing the channels of
every convolutional layer. Default: (512, 256, 128, 64, 1).
:param kernels:
(Optional) A List[int] or Tuple[int] representing the kernels of
every convolutional layer. Default: (4, 4, 4, 4, 4).
:param strides:
(Optional) A List[int] or Tuple[int] representing the strides of
every convolutional layer. Default: (1, 2, 2, 2, 2).
:param padding:
(Optional) A List[int] or Tuple[int] representing the padding of
every convolutional layer. Default: (0, 1, 1, 1, 1).
:param leaky_relu_slope:
(Optional) A float representing the slope of the leaky ReLu
activation functions. Default: 0.2.
:param use_batch_norm:
(Optional) A bool: True if you wish to use batch normalization,
False otherwise. Batch normalization is applied after every layer
except the output. Default: True.
"""
super().__init__()
self._input_size = input_size
self._conv = torch.nn.ModuleList()
self._batch_norm = torch.nn.ModuleList() if use_batch_norm else None
self._leaky_relu = torch.nn.LeakyReLU(leaky_relu_slope)
last_out = input_size
for i, (chan, kern, stri, padd) in enumerate(zip(channels,
kernels,
strides,
padding)):
self.conv.append(torch.nn.ConvTranspose2d(in_channels=last_out,
out_channels=chan,
kernel_size=kern,
stride=stri,
padding=padd))
if use_batch_norm and i != (len(channels) - 1):
self.batch_norm.append(torch.nn.BatchNorm2d(num_features=chan))
last_out = chan
self.reset_parameters()
# region Properties
@property
def input_size(self):
return self._input_size
@property
def conv(self) -> List[torch.nn.ConvTranspose2d]:
return self._conv
@property
def batch_norm(self) -> List[torch.nn.BatchNorm2d]:
return self._batch_norm
@property
def leaky_relu(self) -> Callable:
return self._leaky_relu
# endregion
def reset_parameters(self):
"""
Resets this instance's parameters.
:return:
Nothing.
"""
for conv in self.conv[:-1]:
torch.nn.init.kaiming_normal_(conv.weight,
nonlinearity="leaky_relu")
torch.nn.init.normal_(conv.bias, 0, 1e-6 / 3)
torch.nn.init.xavier_normal_(self.conv[-1].weight)
torch.nn.init.constant_(self.conv[-1].bias, 0.)
def forward(self, x):
"""
The forward method of a Generator instance.
:param x:
A torch.Tensor representing the network input.
:return:
A torch.Tensor of shape (B, 64, 64) representing the generator's
output.
"""
for i in range(len(self.conv) - 1):
x = self.conv[i](x)
x = self.leaky_relu(x)
if self.batch_norm is not None:
x = self.batch_norm[i](x)
x = self.conv[-1](x)
return torch.tanh(x)
class DCGAN(torch.nn.Module):
def __init__(self,
discriminator: Discriminator,
generator: Generator):
"""
The DCGAN constructor.
:param discriminator:
A Discriminator object representing the model's discriminator
module.
:param generator:
A Generator object representing the model's generator module.
"""
super().__init__()
self._discriminator = deepcopy(discriminator)
self._generator = deepcopy(generator)
self._component_names = ("discriminator", "generator")
# region Properties
@property
def discriminator(self) -> Discriminator:
return self._discriminator
@property
def generator(self) -> Generator:
return self._generator
@property
def component_names(self) -> Tuple[str, str]:
return self._component_names
# endregion
def fit(self,
dataset: torch.utils.data.Dataset,
n_epochs: int = 1,
batch_size: int = 1,
learning_rate: float or Tuple[float, float] or List[float] = 3e-4,
lr_gamma: float or Tuple[float, float] or List[float] = None,
loss: Callable = None,
device: str = "cpu",
discriminator_batches_till_step: int = 1,
generator_batches_till_step: int = 1,
verbose: int = 1):
"""
:param dataset:
A torch.utils.data.Dataset representing the dataset your wish to
fit the model on.
:param n_epochs:
(Optional) An int representing the number of epochs you wish to
train the model for. Default: 1.
:param batch_size:
(Optional) An int representing the batch size. Default: 1.
:param learning_rate:
(Optional) A float representing the starting learning rate during
training. Default: 3e-4.
:param lr_gamma:
(Optional) A float representing the learning rate decay multiplier
per fit epoch. Default: None.
:param loss:
(Optional) A Callable representing the loss function for the VAE.
Default: None (takes it from losses.get_gan_loss())
:param device:
(Optional) A string representing the device you wish to fit on.
Default: "cpu".
:param discriminator_batches_till_step:
An int representing the number of batches to wait before updating
the discriminator parameters.
:param generator_batches_till_step:
An int representing the number of batches to wait before updating
the generator parameters.
:param verbose:
(Optional) An int representing the level of verbosity you wish to
have which fitting the model. Default: 1 (progress bar).
:return:
Nothing.
"""
self.train()
self.to(device)
if loss is None:
loss = get_gan_loss()
if isinstance(learning_rate, int) or isinstance(learning_rate, float):
learning_rate = tuple([learning_rate] * 2)
if lr_gamma is None:
lr_gamma = 1.
if isinstance(lr_gamma, int) or isinstance(lr_gamma, float):
lr_gamma = tuple([lr_gamma] * 2)
loss = {k: loss for k in self.component_names}
losses = {k: list() for k in self.component_names}
optimizer = dict()
scheduler = dict()
for key, component, lr, gamma in zip(self.component_names,
[self.discriminator,
self.generator],
learning_rate,
lr_gamma):
optimizer[key] = torch.optim.Adam(component.parameters(),
lr=lr)
scheduler[key] = torch.optim \
.lr_scheduler \
.ExponentialLR(optimizer[key], gamma=gamma)
tr_loader = torch.utils.data.DataLoader(dataset,
batch_size=batch_size,
shuffle=True)
for epoch in range(n_epochs):
iterator = tqdm(tr_loader, file=stdout)\
if verbose > 0\
else tr_loader
for i, (x, _) in enumerate(iterator):
curr_batch_size = x.shape[0]
noise = torch.randn((curr_batch_size, self.generator.input_size,
1, 1),
device=device)
x_real = x.to(device)
x_fake = self.generator.forward(noise)
y_real = torch.ones(curr_batch_size, device=device).float()
y_fake = torch.zeros(curr_batch_size, device=device).float()
y_dis_real = self.discriminator.forward(x_real)
y_dis_fake = self.discriminator.forward(x_fake.detach())
loss_dis = loss[self.component_names[0]](y_dis_real, y_real) +\
loss[self.component_names[0]](y_dis_fake, y_fake)
loss_dis.backward()
losses[self.component_names[0]].append(float(loss_dis))
if (i + 1) % discriminator_batches_till_step == 0:
optimizer[self.component_names[0]].step()
# --------------------------------------------------------------
y_dis_fake = self.discriminator.forward(x_fake)
loss_gen = loss[self.component_names[1]](y_dis_fake, y_real)
loss_gen.backward()
losses[self.component_names[1]].append(float(loss_gen))
if (i + 1) % generator_batches_till_step == 0:
optimizer[self.component_names[1]].step()
if verbose > 0:
iterator.set_description(
f"Epoch {epoch + 1} "
f"DisLoss: "
f"{np.mean(losses[self.component_names[0]]):.04f} "
f"GenLoss: "
f"{np.mean(losses[self.component_names[1]]):.04f}")
for component_name in self.component_names:
optimizer[component_name].zero_grad()
for component_name in self.component_names:
scheduler[component_name].step()
losses[component_name].clear()
def plot_generations(self,
n_samples: int = 4,
shape: Tuple[int, int] = None,
base_size: Tuple[int, int] = (1.6, 1.6),
device: str = "cpu"):
"""
Plots generated images given a number of samples from the dataset.
:param n_samples:
(Optional) An int representing the number of samples you wish to
plot. Default: 4.
:param shape:
(Optional) The shape of the subplots. Default: None (calculates
the shape dynamically, focusing on a square shape with a width of
at most 10).
:param base_size:
(Optional) A Tuple[float, float] containing the base sizes of a
subplot. Default: (1.6, 1.6).
:param device:
(Optional) A string representing the device you wish to fit on.
Default: "cpu".
:return:
A Tuple[matplotlib.pyplot.Figure, matplotlib.pyplot.Axes]
containing the plot information.
"""
self.eval()
self.to(device)
if n_samples is None or n_samples < 3:
n_samples = 4
if shape is None or shape[0] * shape[1] < n_samples:
width = min(int((n_samples ** 0.5) + 1e-6), 10)
height = (n_samples + width - 1) // width
shape = (height, width)
fig, ax = plt.subplots(*shape, figsize=(base_size[0] * shape[0],
base_size[1] * shape[1]))
with torch.no_grad():
samples = self.generator.forward(
torch.randn(n_samples, 100, 1, 1, device=device))\
.view(n_samples, 64, 64)\
.data\
.cpu()\
.numpy()
for i in range(n_samples):
curr_axis = ax[i // shape[0]][i % shape[0]]
curr_axis.axis("off")
curr_axis.imshow(samples[i], vmin=0, vmax=1)
return fig, ax
| 34.693487 | 80 | 0.533186 | 2,069 | 18,110 | 4.537458 | 0.152731 | 0.047934 | 0.015339 | 0.02386 | 0.522369 | 0.480507 | 0.444078 | 0.395611 | 0.371538 | 0.371538 | 0 | 0.021983 | 0.374544 | 18,110 | 521 | 81 | 34.760077 | 0.806833 | 0.305798 | 0 | 0.35 | 0 | 0 | 0.016213 | 0.008194 | 0 | 0 | 0 | 0 | 0 | 1 | 0.079167 | false | 0 | 0.041667 | 0.041667 | 0.1875 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
63e352cd8b57ca9e170046df83f9f887ff2cb243 | 2,024 | py | Python | Main.py | ahmetmutlugun/clone_bot | ced61f9af626345753f0a7decc512a9f1d8a224b | [
"Apache-2.0"
] | null | null | null | Main.py | ahmetmutlugun/clone_bot | ced61f9af626345753f0a7decc512a9f1d8a224b | [
"Apache-2.0"
] | null | null | null | Main.py | ahmetmutlugun/clone_bot | ced61f9af626345753f0a7decc512a9f1d8a224b | [
"Apache-2.0"
] | null | null | null | import discord
from discord.ext import commands
from discord.ext.commands import Bot
client: Bot = commands.Bot(command_prefix=['-'], case_insensitive=True, description="Train an AI to send messages.")
guilds = []
guild_ids = []
@client.event
async def on_ready():
await client.change_presence(activity=discord.Game("-" + "help"))
print("Bot Ready")
for guild in client.guilds:
guilds.append(guild)
guild_ids.append(guild.id)
@client.command(brief='Displays bot ping')
async def ping(ctx):
await ctx.send(f"My ping is: {round(client.latency * 1000)}ms")
@client.event
async def on_message(ctx):
if check_user(str(ctx.author.id)) and "-train off" not in ctx.content:
with open("messages.txt", "a") as f:
f.write(str(ctx.author.id) + ": " + ctx.content + "\n")
await client.process_commands(ctx)
@client.command(brief='Turn on or off the AI training.')
async def train(ctx, preference):
if preference is None:
await ctx.send("Please pick \"on\" or \"off\" to train the bot.")
return
if preference.lower() == "off":
remove_user(str(ctx.author.id))
await ctx.send("Your messages will no longer be recorded.")
return
elif preference.lower() == "on":
add_user(str(ctx.author.id))
await ctx.send("Your messages will now be used to train this bot.")
return
def add_user(author_id):
if not check_user(author_id):
with open("whitelist.txt", "a") as f:
f.write(str(author_id) + "\n")
def remove_user(author_id):
with open("whitelist.txt", "r") as f:
lines = f.readlines()
with open("whitelist.txt", "w") as f:
for line in lines:
if line.strip("\n") != author_id:
f.write(line)
def check_user(author_id):
f = open("whitelist.txt", "r")
data = f.read()
f.close()
if author_id not in data:
return False
return True
fl = open("discord.key", "r")
token = fl.read()
client.run(token)
| 27.351351 | 116 | 0.628953 | 298 | 2,024 | 4.197987 | 0.348993 | 0.070344 | 0.038369 | 0.044764 | 0.195843 | 0.147882 | 0.147882 | 0.073541 | 0.073541 | 0.073541 | 0 | 0.002559 | 0.227767 | 2,024 | 73 | 117 | 27.726027 | 0.797825 | 0 | 0 | 0.089286 | 0 | 0 | 0.18083 | 0.010375 | 0 | 0 | 0 | 0 | 0 | 1 | 0.053571 | false | 0 | 0.053571 | 0 | 0.196429 | 0.017857 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
63e40a9d8fa982ae7027fd1e676365c6c684bb99 | 6,920 | py | Python | tester/src/sirv_gnrl_fifo.py | giraffe50/RISCV-M4F | 1b1ed756a8ea02c2d2a11d8472f8603847170ad8 | [
"Apache-2.0"
] | 3 | 2021-01-13T03:41:14.000Z | 2021-03-23T11:31:48.000Z | tester/src/sirv_gnrl_fifo.py | scutdig/LG-32HP | 1b1ed756a8ea02c2d2a11d8472f8603847170ad8 | [
"Apache-2.0"
] | 1 | 2021-03-01T09:32:59.000Z | 2021-03-01T09:32:59.000Z | tester/src/sirv_gnrl_fifo.py | scutdig/LG-32HP | 1b1ed756a8ea02c2d2a11d8472f8603847170ad8 | [
"Apache-2.0"
] | 4 | 2021-01-07T03:01:26.000Z | 2021-02-28T02:20:10.000Z | from pyhcl import *
from .sirv_gnrl_dffl import sirv_gnrl_dffl
from .sirv_gnrl_dfflr import sirv_gnrl_dfflr
from .sirv_gnrl_dfflrs import sirv_gnrl_dfflrs
def carray(el, len):
ary = []
for i in range(len):
ary.append(el)
return ary
def sirv_gnrl_fifo(CUT_READY: int = 0,
MSKO: int = 0,
DP: int = 8,
DW: int = 32):
print("sirv_gnrl_fifo: DP = ", DP)
class SirvGnrlFifo(Module):
# # /////////////////////////////////////////
# # default parameters
# CUT_READY = 0
# MSKO = 0
# DP = 8
# DW = 32
# # /////////////////////////////////////////
io = IO(
# clk=Input(U.w(1)),
# rst_n=Input(U.w(1)),
i_vld=Input(U.w(1)),
i_rdy=Output(U.w(1)),
i_dat=Input(U.w(DW)),
o_vld=Output(U.w(1)),
o_rdy=Input(U.w(1)),
o_dat=Output(U.w(DW))
)
# //////////////////////////////////////////////////////////////////
# # Intermediate variables' definition
#
rptr_vec_nxt = Wire(U.w(DP))
rptr_vec_r = Wire(U.w(DP))
rptr_vec_r_vec = Wire(Vec(DP, U.w(1)))
wptr_vec_nxt = Wire(U.w(DP))
wptr_vec_r = Wire(U.w(DP))
wptr_vec_r_vec = Wire(Vec(DP, U.w(1)))
i_vec = Wire(U.w(DP+1))
o_vec = Wire(U.w(DP+1))
vec_nxt = Wire(U.w(DP+1))
vec_r = Wire(U.w(DP+1))
vec_r_vec = Wire(Vec(DP+1, U.w(1)))
fifo_rf_r = Wire(Vec(DP, U.w(DW)))
fifo_rf_en = Wire(Vec(DP, U.w(1)))
# //////////////////////////////////////////////////////////////////
if DP == 0:
io.o_vld <<= io.i_vld
io.i_rdy <<= io.o_rdy
io.o_dat <<= io.i_dat
else:
# //////////////////////////////////////////////////////////////////
# Instantiate two submodules and their called parameters
#
rptr_vec_0_dfflrs = sirv_gnrl_dfflrs(1)
wptr_vec_0_dfflrs = sirv_gnrl_dfflrs(1)
if DP > 1:
rptr_vec_31_dfflr = sirv_gnrl_dfflr(DP-1)
wptr_vec_31_dfflr = sirv_gnrl_dfflr(DP-1)
vec_0_dfflrs = sirv_gnrl_dfflrs(1)
vec_31_dfflr = sirv_gnrl_dfflr(DP)
fifo_rf_dffl = [sirv_gnrl_dffl(DW).io for _ in range(0, DP)]
# //////////////////////////////////////////////////////////////////
# //////////////////////////////////////////////////////////////////
# change VEC to Wire
# (1) rptr_vec_r
rptr_vec_r_vec[0] <<= rptr_vec_0_dfflrs.io.qout
if DP > 1:
for i in range(1, DP):
rptr_vec_r_vec[i] <<= rptr_vec_31_dfflr.io.qout[i-1]
rptr_vec_r <<= CatVecH2L(rptr_vec_r_vec)
# (2) wptr_vec_r
wptr_vec_r_vec[0] <<= wptr_vec_0_dfflrs.io.qout
if DP > 1:
for i in range(1, DP):
wptr_vec_r_vec[i] <<= wptr_vec_31_dfflr.io.qout[i-1]
wptr_vec_r <<= CatVecH2L(wptr_vec_r_vec)
# (3) vec_r
vec_r_vec[0] <<= vec_0_dfflrs.io.qout
for i in range(1, DP+1):
vec_r_vec[i] <<= vec_31_dfflr.io.qout[i-1]
vec_r <<= CatVecH2L(vec_r_vec)
# //////////////////////////////////////////////////////////////////
wen = io.i_vld & io.i_rdy
ren = io.o_vld & io.o_rdy
if DP == 1:
rptr_vec_nxt <<= U.w(DP)(1)
else:
ary0 = carray(U.w(1)(0), DP-1)
rptr_vec_nxt <<= Mux(rptr_vec_r[DP-1] == U.w(1)(1), CatBits(*ary0, U.w(1)(1)), (rptr_vec_r << U(1)))
if DP == 1:
wptr_vec_nxt <<= U.w(DP)(1)
else:
ary1 = carray(U.w(1)(0), DP-1)
wptr_vec_nxt <<= Mux(wptr_vec_r[DP-1] == U.w(1)(1), CatBits(*ary1, U.w(1)(1)), (wptr_vec_r << U(1)))
# rptr_vec_0_dfflrs connect
rptr_vec_0_dfflrs.io.lden <<= ren
rptr_vec_0_dfflrs.io.dnxt <<= rptr_vec_nxt[0]
# rptr_vec_r_vec[0] <<= rptr_vec_0_dfflrs.io.qout
# wptr_vec_0_dfflrs connect
wptr_vec_0_dfflrs.io.lden <<= wen
wptr_vec_0_dfflrs.io.dnxt <<= wptr_vec_nxt[0]
# wptr_vec_r_vec[0] <<= wptr_vec_0_dfflrs.io.qout
if DP > 1:
# rptr_vec_31_dfflr connect
rptr_vec_31_dfflr.io.lden <<= ren
rptr_vec_31_dfflr.io.dnxt <<= rptr_vec_nxt[DP-1:1]
# for i in range(1, DP):
# rptr_vec_r_vec[i] <<= rptr_vec_31_dfflr.io.qout[i-1]
# wptr_vec_31_dfflr connect
wptr_vec_31_dfflr.io.lden <<= wen
wptr_vec_31_dfflr.io.dnxt <<= wptr_vec_nxt[DP-1:1]
# for i in range(1, DP):
# wptr_vec_r_vec[i] <<= wptr_vec_31_dfflr.io.qout[i-1]
# next part
vec_en = ren ^ wen
vec_nxt <<= Mux(wen == U.w(1)(1), CatBits(vec_r[DP-1:0], U.w(1)(1)), (vec_r >> U(1)))
# vec_0_dfflrs connect
vec_0_dfflrs.io.lden <<= vec_en
vec_0_dfflrs.io.dnxt <<= vec_nxt[0]
# vec_31_dfflr connect
vec_31_dfflr.io.lden <<= vec_en
vec_31_dfflr.io.dnxt <<= vec_nxt[DP:1]
i_vec <<= CatBits(U.w(1)(0), vec_r[DP:1])
o_vec <<= CatBits(U.w(1)(0), vec_r[DP:1])
if DP == 1:
if CUT_READY == 1:
io.i_rdy <<= (~i_vec[DP-1])
else:
io.i_rdy <<= (~i_vec[DP-1]) | ren
else:
io.i_rdy <<= (~i_vec[DP-1])
for i in range(0, DP):
fifo_rf_en[i] <<= wen & wptr_vec_r[i]
fifo_rf_dffl[i].lden <<= fifo_rf_en[i]
fifo_rf_dffl[i].dnxt <<= io.i_dat
fifo_rf_r[i] <<= fifo_rf_dffl[i].qout
ary2 = carray(U.w(1)(0), DW)
for j in range(0, DP):
ary3 = carray(rptr_vec_r[j], DW)
mux_rdat = CatBits(*ary2) | (CatBits(*ary3) & fifo_rf_r[j])
if MSKO == 1:
ary4 = carray(io.o_vld, DW)
io.o_dat <<= CatBits(*ary4) & mux_rdat
else:
io.o_dat <<= mux_rdat
io.o_vld <<= (o_vec[0])
return SirvGnrlFifo()
# DW = (1+AW+DW+(DW/8)+1+1+2+2+2+USR_W) = 1+32+32+4+1+1+2+2+2+1=78
if __name__ == '__main__':
f = Emitter.dump(Emitter.emit(sirv_gnrl_fifo(1, 0, 1, 78)), "sirv_gnrl_fifo.fir")
Emitter.dumpVerilog(f)
| 34.949495 | 117 | 0.434104 | 980 | 6,920 | 2.762245 | 0.104082 | 0.051718 | 0.023273 | 0.048762 | 0.526413 | 0.37089 | 0.318803 | 0.210935 | 0.179165 | 0.146657 | 0 | 0.045747 | 0.371387 | 6,920 | 197 | 118 | 35.126904 | 0.576552 | 0.175578 | 0 | 0.144068 | 0 | 0 | 0.008603 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.016949 | false | 0 | 0.033898 | 0 | 0.194915 | 0.008475 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
63e478f68db4b89cfa3ff715867177085fa62d17 | 6,600 | py | Python | data.py | QTIM-Lab/rop | 1befc7c2910daa151105fdd2f5fac785d0515f48 | [
"MIT"
] | 1 | 2021-07-29T15:51:35.000Z | 2021-07-29T15:51:35.000Z | data.py | QTIM-Lab/rop | 1befc7c2910daa151105fdd2f5fac785d0515f48 | [
"MIT"
] | null | null | null | data.py | QTIM-Lab/rop | 1befc7c2910daa151105fdd2f5fac785d0515f48 | [
"MIT"
] | null | null | null | # ==================================================================== #
# #
# DATASET / DATALOADER #
# #
# ==================================================================== #
from pathlib import Path
from typing import (
Callable, Dict, # Literal, # requires python 3.8
List,
Optional, Sequence, Union
)
import torch
import torchvision
import monai
import monai.transforms as mtf
import pandas as pd
import numpy as np
from utils import first, index
# SPLIT = Literal['train', 'valid', 'test']
SPLIT = {'train', 'valid', 'test'}
def type_check(inst, inst_type):
err_msg = f'Got type {type(inst)}. Need {inst_type}.'
assert isinstance(inst, inst_type), err_msg
class TORCH_DS(torch.utils.data.Dataset):
def __init__(self,
data: pd.DataFrame,
base: Path,
augment: bool = True,
transforms = torchvision.transforms.Compose([
torchvision.transforms.RandomHorizontalFlip(p=0.5),
torchvision.transforms.RandomVerticalFlip(p=0.5),
torchvision.transforms.RandomRotation(degrees=30),
torchvision.transforms.RandomResizedCrop(size=[224, 224],
scale=(0.8, 1.2),
ratio=(0.7, 1.3)),
#torchvision.transforms.RandomAffine(degrees=10),
torchvision.transforms.RandomPerspective(distortion_scale=0.5,
p=0.5),
]),
invert_prob: float = 0.3,
weight_attr: Optional[str] = None,
weight_map: Optional[Dict[str, int]] = None,
):
super().__init__()
type_check(data, pd.DataFrame)
type_check(base, Path)
type_check(augment, bool)
assert 0 <= invert_prob <= 1
self.data = data
self.base = base
self.augment = augment
self.transforms = transforms
self.invert_prob = invert_prob
self.weight_attr = weight_attr
self.weight_map = weight_map
def __len__(self):
return self.data.shape[0]
def __getitem__(self, index: int):
ret_attr = self.weight_attr
row = self.data.iloc[index]
img = row.image
lab = row.label
if ret_attr is not None:
attr = getattr(row, ret_attr)
if self.weight_map is not None:
attr = self.weight_map[attr]
x = torch.tensor(np.moveaxis(np.load(self.base / img), -1, 0))
if self.augment:
x = self.random_augment(x)
x = x.float()
y = torch.tensor(lab, dtype=torch.long)
assert not torch.isnan(x).any(), f'NaN issue at index: {index}'
assert (x == x).all(), f'INF issue in index: {index}'
return (x, y, attr) if ret_attr else (x, y)
def random_augment(self, x: torch.Tensor):
for i in range(1, 3):
k = torch.randint(low=1, high=20, size=[1])
x_i = torch.exp(k * x[i])
x_i -= x_i.mean()
x_i /= x_i.max() - x_i.min()
x[i] = x_i
if torch.rand(size=[1]) < self.invert_prob:
x *= -1
x = self.transforms(x)
return x
class MONAI_DS(monai.data.Dataset):
def __init__(self,
df: pd.DataFrame,
example: str = 'image',
target: str = 'label',
augment: bool = True,
transform: Optional[Callable] = None,
):
data: list = [
{
'image': getattr(row, example),
'label': getattr(row, target),
} for _, row in df.iterrows()
]
if transform is None:
transform = [
mtf.LoadImaged('image'),
mtf.EnsureChannelFirstd('image'),
mtf.RepeatChanneld('image', 3),
mtf.ScaleIntensityd('image'),
]
if augment:
transform += [
mtf.RandFlipd('image', prob=0.5, spatial_axis=[0]),
mtf.RandFlipd('image', prob=0.5, spatial_axis=[1]),
mtf.RandAffined('image', prob=0.3,
translate_range=(10, 10),
scale_range=(0.1, 0.1),
shear_range=(0.1, 0.1),
padding_mode='zeros',
mode='bilinear',
),
mtf.RandRotated('image',
prob=0.5,
range_x=1,
mode='bilinear',
),
]
transform += [
mtf.ToTensord('image'),
]
transform = mtf.Compose(transform)
super().__init__(data=data, transform=transform)
def get_weighted_sampler(weights: np.ndarray, batch_size: int = 4):
"""
weights -- list of class weighting (should be same length as dataset)
"""
sampler = torch.utils.data.WeightedRandomSampler(weights,
len(weights),
replacement=True,
)
batch_sampler = torch.utils.data.BatchSampler(sampler,
batch_size=batch_size,
drop_last=True,
)
return batch_sampler
class InfiniteDataLoader(torch.utils.data.DataLoader):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Initialize an iterator over the dataset.
self.dataset_iterator = super().__iter__()
def __iter__(self):
return self
def __next__(self):
try:
batch = next(self.dataset_iterator)
except StopIteration:
# Dataset exhausted, use a new fresh iterator.
self.dataset_iterator = super().__iter__()
batch = next(self.dataset_iterator)
return batch
| 37.288136 | 84 | 0.455152 | 627 | 6,600 | 4.620415 | 0.301435 | 0.006213 | 0.01933 | 0.005523 | 0.11253 | 0.023473 | 0.023473 | 0.023473 | 0 | 0 | 0 | 0.017947 | 0.425909 | 6,600 | 176 | 85 | 37.5 | 0.746635 | 0.095 | 0 | 0.111111 | 0 | 0 | 0.032649 | 0 | 0 | 0 | 0 | 0 | 0.027778 | 1 | 0.069444 | false | 0 | 0.0625 | 0.013889 | 0.194444 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
63f00ee89c5fd40dc28452a69994969bb19dc442 | 6,876 | py | Python | packages/vaex-core/vaex/file/cache.py | lrq3000/vaex | 00c83d1fe2b73330705ef63e649abc9dfc8f2478 | [
"MIT"
] | null | null | null | packages/vaex-core/vaex/file/cache.py | lrq3000/vaex | 00c83d1fe2b73330705ef63e649abc9dfc8f2478 | [
"MIT"
] | null | null | null | packages/vaex-core/vaex/file/cache.py | lrq3000/vaex | 00c83d1fe2b73330705ef63e649abc9dfc8f2478 | [
"MIT"
] | null | null | null | try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
import logging
import os
import mmap
import numpy as np
import vaex.utils
import vaex.file
DEFAULT_BLOCK_SIZE = 1024*1024*1 # 1mb by default
logger = logging.getLogger("vaex.file.cache")
class MMappedFile:
"""Small wrapper around a memory mapped file"""
def __init__(self, path, length, dtype=np.uint8):
self.path = path
self.length = length
if not os.path.exists(path):
with open(self.path, 'wb') as fp:
fp.seek(self.length-1)
fp.write(b'\00')
fp.flush()
self.fp = open(self.path, 'rb+')
kwargs = {}
if vaex.utils.osname == "windows":
kwargs["access"] = mmap.ACCESS_WRITE
else:
kwargs["prot"] = mmap.PROT_WRITE
self.mmap = mmap.mmap(self.fp.fileno(), self.length)
self.memoryview = memoryview(self.mmap)
self.data = np.frombuffer(self.mmap, dtype=dtype, count=self.length)
def __getitem__(self, item):
return self.memoryview.__getitem__(item)
def _to_block_ceil(index, block_size):
return (index + block_size - 1) // block_size
def _to_block_floor(index, block_size):
return index // block_size
def _to_index(block, block_size):
return block * block_size
class CachedFile:
def __init__(self, file, path=None, cache_dir=None, block_size=DEFAULT_BLOCK_SIZE, data_file=None, mask_file=None):
"""Decorator that wraps a file object (typically a s3) by caching the content locally on disk.
The standard location for the cache is: ~/.vaex/file-cache/<protocol (e.g. s3)>/path/to/file.ext
Arguments:
:file file or callable: if callable, invoking it should give a file like object
:path str: path of file, defaults of file.name
:cache_dir str: path of cache dir, defaults to ~/.vaex/file-cache
"""
self.name = path
self.path = path
self.file = file
self.cache_dir = cache_dir
self.block_size = block_size
self.block_reads = 0
self.reads = 0
self.loc = 0
if data_file is None or mask_file is None:
o = urlparse(path)
if cache_dir is None:
self.cache_dir_path = vaex.utils.get_private_dir('file-cache', o.scheme, o.netloc, o.path[1:])
else:
# this path is used for testing
self.cache_dir_path = os.path.join(cache_dir, 'file-cache', o.scheme, o.netloc, o.path[1:])
if not os.path.exists(self.cache_dir_path):
os.makedirs(self.cache_dir_path)
self.data_path = os.path.join(self.cache_dir_path, 'data')
self.mask_path = os.path.join(self.cache_dir_path, 'mask')
# if possible, we avoid using the file
if os.path.exists(self.data_path):
with open(self.data_path, 'rb') as f:
f.seek(0, 2)
self.length = f.tell()
else:
self._use_file()
self.file.seek(0, 2)
self.length = self.file.tell()
self.mask_length = _to_block_ceil(self.length, self.block_size)
logging.debug('cache path: %s', self.cache_dir_path)
self.data_file = MMappedFile(self.data_path, self.length)
self.mask_file = MMappedFile(self.mask_path, self.mask_length)
else:
self.data_file = data_file
self.mask_file = mask_file
self.length = self.data_file.length
self.mask_length = self.mask_file.length
def dup(self):
if callable(self.file):
file = self.file
else:
file = lambda: vaex.file.dup(self.file)
return CachedFile(file, self.path, self.cache_dir, self.block_size, data_file=self.data_file, mask_file=self.mask_file)
def tell(self):
return self.loc
def seek(self, loc, whence=0):
if whence == 0:
self.loc = loc
elif whence == 1:
self.loc = self.loc + loc
elif whence == 2:
self.loc = self.length + loc
assert (self.loc >= 0) and (self.loc <= self.length)
def _use_file(self):
if callable(self.file):
self.file = self.file()
def read(self, length=-1):
start = self.loc
end = self.loc + length if length != -1 else self.length
self._ensure_cached(start, end)
self.loc = end
# we have no other option than to return a copy of the data here
return self.data_file.data[start:end].view('S1').tobytes()
def __readinto(self, bytes):
start = self.loc
end = start + len(bytes)
self._ensure_cached(start, end)
bytes[:] = self.data_file.data[start:end]
def _as_numpy(self, offset, byte_length, dtype):
# quick route that avoids memory copies
self._ensure_cached(offset, offset+byte_length)
return self.data_file.data[offset:offset+byte_length].view(dtype)
def _fetch_blocks(self, block_start, block_end):
start_blocked = _to_index(block_start, self.block_size)
end_blocked = min(self.length, _to_index(block_end, self.block_size))
self._use_file()
self.file.seek(start_blocked)
bytes_read = self.file.readinto(self.data_file[start_blocked:end_blocked])
expected = (end_blocked - start_blocked)
assert bytes_read == expected, f'Read {bytes_read}, expected {expected} ({start_blocked}-{end_blocked} out of {self.length})'
self.mask_file.data[block_start:block_end] = 1
self.reads += 1
self.block_reads += block_end - block_start
def _ensure_cached(self, start, end):
block_start = _to_block_floor(start, self.block_size)
block_end = _to_block_ceil(end, self.block_size)
missing = self.mask_file.data[block_start:block_end] == 0
if np.all(missing):
self._fetch_blocks(block_start, block_end)
elif np.any(missing):
i = block_start
done = False
while not done:
# find first block that is not cached
while i < block_end and self.mask_file.data[i] == 1:
i += 1
if i == block_end:
break
# find block that *is* cached
j = i + 1
while j < block_end and self.mask_file.data[j] == 0:
j += 1
self._fetch_blocks(i, j)
i = j
def close(self):
# if it is callable, the file is never opened
if not callable(self.file):
self.file.close()
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
| 35.443299 | 133 | 0.596131 | 939 | 6,876 | 4.171459 | 0.197018 | 0.041358 | 0.027572 | 0.028593 | 0.209344 | 0.116416 | 0.080163 | 0.049017 | 0.016339 | 0.016339 | 0 | 0.008735 | 0.300756 | 6,876 | 193 | 134 | 35.626943 | 0.805948 | 0.105876 | 0 | 0.103448 | 0 | 0 | 0.029107 | 0.004933 | 0 | 0 | 0 | 0 | 0.013793 | 1 | 0.124138 | false | 0 | 0.062069 | 0.041379 | 0.262069 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
63f444046bb1df157608c2a56987485735098de5 | 1,810 | py | Python | select_language.py | elviscruz45/Selenium | 4959b552fe3658802663520fc817f5e3c86aa2b7 | [
"MIT"
] | null | null | null | select_language.py | elviscruz45/Selenium | 4959b552fe3658802663520fc817f5e3c86aa2b7 | [
"MIT"
] | null | null | null | select_language.py | elviscruz45/Selenium | 4959b552fe3658802663520fc817f5e3c86aa2b7 | [
"MIT"
] | null | null | null | import unittest
from selenium import webdriver
#submodulo para usar el dropdown
from selenium.webdriver.support.ui import Select
class LanguageOptions(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Chrome(executable_path = r'./chromedriver')
driver = self.driver
driver.implicitly_wait(30)
driver.maximize_window()
driver.get("http://demo-store.seleniumacademy.com/")
def test_select_language(self):
#el orden respeta como aparecen en la página
exposed_options = ['English', 'French', 'German']
#para almacenar las opciones que elijamos
active_options = []
#para acceder a las opciones del dropdown
select_language = Select(self.driver.find_element_by_id('select-language'))
#para comprobar que si esté la cantidad de opciones correcta
#'options' permite ingresar directamente a las opciones del dropdown
self.assertEqual(3, len(select_language.options))
for option in select_language.options:
active_options.append(option.text)
#verifico que la lista de opciones disponibles y activas sean indénticas
self.assertListEqual(exposed_options,active_options)
#vamos a verificar la palabra "English" sea la primera opción seleccionada del dropdown
self.assertEqual('English', select_language.first_selected_option.text)
#seleccionamos "German" por el texto visible
select_language.select_by_visible_text('German')
#verificamos que el sitio cambio a Alemán
#preguntamos a selenium si la url del sitio contiene esas palabras
self.assertTrue('store=german' in self.driver.current_url)
select_language = Select(self.driver.find_element_by_id('select-language'))
select_language.select_by_index(0)
def tearDown(self):
self.driver.implicitly_wait(3)
self.driver.close()
if __name__ == "__main__":
unittest.main(verbosity = 2) | 36.2 | 89 | 0.781215 | 245 | 1,810 | 5.608163 | 0.493878 | 0.101892 | 0.07278 | 0.021834 | 0.11936 | 0.085881 | 0.085881 | 0.085881 | 0.085881 | 0.085881 | 0 | 0.003824 | 0.133149 | 1,810 | 50 | 90 | 36.2 | 0.871893 | 0.324309 | 0 | 0.071429 | 0 | 0 | 0.11047 | 0 | 0 | 0 | 0 | 0 | 0.142857 | 1 | 0.107143 | false | 0 | 0.107143 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
63f4edbce00fde34ebff1d87a9721442ae30e42f | 819 | py | Python | tests/test_control.py | chaostoolkit-incubator/chaostoolkit-opentracing | c3b4cf755f81db40a4a5fbf342fc6d70455eee42 | [
"Apache-2.0"
] | 4 | 2019-03-06T07:02:28.000Z | 2021-12-14T05:16:46.000Z | tests/test_control.py | chaostoolkit-incubator/chaostoolkit-opentracing | c3b4cf755f81db40a4a5fbf342fc6d70455eee42 | [
"Apache-2.0"
] | 2 | 2019-05-23T16:53:09.000Z | 2019-06-20T10:10:59.000Z | tests/test_control.py | chaostoolkit-incubator/chaostoolkit-opentracing | c3b4cf755f81db40a4a5fbf342fc6d70455eee42 | [
"Apache-2.0"
] | 2 | 2019-04-27T20:17:43.000Z | 2019-11-29T21:44:21.000Z | # -*- coding: utf-8 -*-
from unittest.mock import patch
import opentracing
from chaoslib.types import Configuration
from chaostracing.control import cleanup_control, configure_control
def test_create_noop_tracer(configuration: Configuration):
assert opentracing.is_global_tracer_registered() is False
tracer = configure_control()
assert opentracing.is_global_tracer_registered() is True
assert isinstance(tracer, opentracing.Tracer)
assert tracer == opentracing.global_tracer()
def test_cleanup_control(configuration: Configuration):
tracer = opentracing.global_tracer()
tracer.start_active_span("boom")
scope = tracer.scope_manager.active
assert scope is not None
with patch.object(scope, "close") as close:
cleanup_control()
assert close.call_count == 1
| 30.333333 | 67 | 0.765568 | 98 | 819 | 6.183673 | 0.438776 | 0.079208 | 0.062706 | 0.082508 | 0.141914 | 0.141914 | 0.141914 | 0 | 0 | 0 | 0 | 0.002899 | 0.157509 | 819 | 26 | 68 | 31.5 | 0.875362 | 0.025641 | 0 | 0 | 0 | 0 | 0.011307 | 0 | 0 | 0 | 0 | 0 | 0.333333 | 1 | 0.111111 | false | 0 | 0.222222 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
63f9a97f8a1d12a0c494ff5ebbdc9c80c2a7869b | 1,752 | py | Python | sensor/components/rf24.py | mattgrogan/ledmatrix | 3a54de98ab107cf1266404400c7eb576007c8b17 | [
"MIT"
] | 1 | 2017-10-27T20:27:13.000Z | 2017-10-27T20:27:13.000Z | sensor/components/rf24.py | mattgrogan/ledmatrix | 3a54de98ab107cf1266404400c7eb576007c8b17 | [
"MIT"
] | null | null | null | sensor/components/rf24.py | mattgrogan/ledmatrix | 3a54de98ab107cf1266404400c7eb576007c8b17 | [
"MIT"
] | null | null | null | import logging
import time
import requests
import influxdb
from nrf24 import NRF24
log = logging.getLogger("ledmatrix")
class RF24_Sensor(object):
def __init__(self, dbclient):
# Set up the RF24
pipes = [[0xe7, 0xe7, 0xe7, 0xe7, 0xe7],
[0xc2, 0xc2, 0xc2, 0xc2, 0xc2]]
self.radio = NRF24()
self.radio.begin(0, 0, 17, 27)
self.radio.setRetries(15, 15)
self.radio.setPayloadSize(32)
self.radio.setChannel(0x60)
self.radio.setDataRate(NRF24.BR_250KBPS)
self.radio.setPALevel(NRF24.PA_MAX)
self.radio.setAutoAck(1)
self.radio.openWritingPipe(pipes[0])
self.radio.openReadingPipe(1, pipes[1])
self.radio.startListening()
# Set up influxdb
self.dbclient = dbclient
log.info("Started NF24")
def get_msg(self):
msg_str = ""
if self.radio.available():
while self.radio.available():
msg = []
self.radio.read(msg, self.radio.getDynamicPayloadSize())
for n in msg:
# Break on null character
if n == 0:
break
if 32 <= n <= 126:
msg_str += chr(n)
log.info("Received message %s" % msg_str)
return msg_str
def save_value(self, value):
point = {"measurement": "Soil", "fields": {
"humidity": value
}}
try:
self.dbclient.write_points([point])
except requests.exceptions.ConnectionError:
log.critical("Unable to connect to InfluxDB")
def execute(self):
#log.info("Executing rf24")
msg = self.get_msg()
if len(msg) > 0:
log.info("Received RF24 message")
self.save_value(int(msg))
#log.info("Exiting rf24")
| 20.372093 | 65 | 0.586758 | 211 | 1,752 | 4.796209 | 0.436019 | 0.133399 | 0.035573 | 0.031621 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.057304 | 0.292808 | 1,752 | 85 | 66 | 20.611765 | 0.759483 | 0.059932 | 0 | 0 | 0 | 0 | 0.076478 | 0 | 0 | 0 | 0.028278 | 0 | 0 | 1 | 0.081633 | false | 0 | 0.102041 | 0 | 0.22449 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
63fdf7825ab44c67de691a26081114f91b5fc925 | 9,985 | bzl | Python | cc/private/common.bzl | EtherealWake/tools | 54e81b4cd01c1af34e4180376aac60bb96668865 | [
"0BSD"
] | 1 | 2019-08-13T01:11:11.000Z | 2019-08-13T01:11:11.000Z | cc/private/common.bzl | EtherealWake/tools | 54e81b4cd01c1af34e4180376aac60bb96668865 | [
"0BSD"
] | null | null | null | cc/private/common.bzl | EtherealWake/tools | 54e81b4cd01c1af34e4180376aac60bb96668865 | [
"0BSD"
] | null | null | null | #
# Copyright (c) 2019 Jonathan McGee <broken.source@etherealwake.com>
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
"""Common Constants and Routines for C/C++ Toolchain Construction."""
load(
"@bazel_tools//tools/build_defs/cc:action_names.bzl",
_ACTION_NAMES = "ACTION_NAMES",
)
load(
"@bazel_tools//tools/cpp:cc_toolchain_config_lib.bzl",
"feature",
"flag_group",
"flag_set",
"tool",
"with_feature_set",
)
#
# Constants
#
# Names of All C/C++ Toolchain Actions.
ACTION_NAMES = _ACTION_NAMES
# Names of All C/C++ Toolchain Features.
FEATURE_NAMES = struct(
# Compilation Mode
dbg = "dbg",
fastbuild = "fastbuild",
opt = "opt",
# Linking Mode
dynamic_linking_mode = "dynamic_linking_mode",
static_linking_mode = "static_linking_mode",
# Official Features
fully_static_link = "fully_static_link",
no_legacy_features = "no_legacy_features",
no_stripping = "no_stripping",
parse_showincludes = "parse_showincludes",
per_object_debug_info = "per_object_debug_info",
static_link_cpp_runtimes = "static_link_cpp_runtimes",
supports_dynamic_linker = "supports_dynamic_linker",
supports_fission = "supports_fission",
supports_interface_shared_libraries = "supports_interface_shared_libraries",
supports_pic = "supports_pic",
supports_start_end_lib = "supports_start_end_lib",
# Common/Legacy Features
archiver_flags = "archiver_flags",
compiler_input_flags = "compiler_input_flags",
compiler_output_flags = "compiler_output_flags",
def_file = "def_file",
default_compile_flags = "default_compile_flags",
default_link_flags = "default_link_flags",
dependency_file = "dependency_file",
fission_support = "fission_support",
force_pic_flags = "force_pic_flags",
includes = "includes",
include_paths = "include_paths",
libraries_to_link = "libraries_to_link",
library_search_directories = "library_search_directories",
linkstamps = "linkstamps",
linker_param_file = "linker_param_file",
msvc_env = "msvc_env",
nologo = "nologo",
objcopy_embed_flags = "objcopy_embed_flags",
output_execpath_flags = "output_execpath_flags",
pic = "pic",
preprocessor_defines = "preprocessor_defines",
random_seed = "random_seed",
runtime_library_search_directories = "runtime_library_search_directories",
shared_flag = "shared_flag",
static_libgcc = "static_libgcc",
strip_debug_symbols = "strip_debug_symbols",
sysroot = "sysroot",
user_compile_flags = "user_compile_flags",
user_link_flags = "user_link_flags",
unfiltered_compile_flags = "unfiltered_compile_flags",
)
# C/C++ Toolchain Action Names for Object-Generating Operations.
ALL_COMPILE_ACTIONS = [
ACTION_NAMES.assemble,
ACTION_NAMES.preprocess_assemble,
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.linkstamp_compile,
]
# C/C++ Toolchain Action Names for Assembler Operations.
ASM_COMPILE_ACTIONS = [
ACTION_NAMES.assemble,
ACTION_NAMES.preprocess_assemble,
]
# C/C++ Toolchain Action Names for C Compilation Operations.
C_COMPILE_ACTIONS = [
ACTION_NAMES.c_compile,
]
# C/C++ Toolchain Action Names for C++ Compilation Operations.
CPP_COMPILE_ACTIONS = [
ACTION_NAMES.cpp_compile,
ACTION_NAMES.linkstamp_compile,
]
# C/C++ Toolchain Action Names for Preprocessing Operations.
PREPROCESSOR_ACTIONS = [
ACTION_NAMES.preprocess_assemble,
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.linkstamp_compile,
]
# C/C++ Toolchain Action Names for Linking Operations.
ALL_LINK_ACTIONS = [
ACTION_NAMES.cpp_link_dynamic_library,
ACTION_NAMES.cpp_link_nodeps_dynamic_library,
ACTION_NAMES.cpp_link_executable,
]
#
# General Functions
#
def make_flag_set(actions, flags, with_features = [], **kwargs):
"""Constructs a `flag_set` for the specified action and flags.
Args:
actions: Actions covered by the `flag_set`.
flags: Flags for the `flag_group`.
kwargs: Additional arguments for the `flag_group` instance.
Returns:
Empty list if `flags` is empty; otherwise, an list containing a
single instance of `flag_set` combining `actions` and `flags`.
"""
if not flags:
return []
return [flag_set(
actions = actions,
flag_groups = [flag_group(flags = flags, **kwargs)],
with_features = with_features,
)]
def make_mode_flag_set(ctx, name):
"""Constructs a `flag_set` for the specific compilation mode."""
modes = ctx.attr.modes
flag_sets = []
flag_sets += make_flag_set(
ALL_COMPILE_ACTIONS,
modes.get(name, []) + modes.get(name + ".copts", []),
with_features = [with_feature_set(features = [name])],
)
flag_sets += make_flag_set(
ASM_COMPILE_ACTIONS,
modes.get(name, []) + modes.get(name + ".asmopts", []),
with_features = [with_feature_set(features = [name])],
)
flag_sets += make_flag_set(
C_COMPILE_ACTIONS,
modes.get(name, []) + modes.get(name + ".conlyopts", []),
with_features = [with_feature_set(features = [name])],
)
flag_sets += make_flag_set(
CPP_COMPILE_ACTIONS,
modes.get(name, []) + modes.get(name + ".cxxopts", []),
with_features = [with_feature_set(features = [name])],
)
return flag_sets
def make_tool(ctx, path, **kwargs):
"""Constructs a `tool` for the specified File.
CROSSTOOL requires that paths be relative to the package defining the
toolchain. This function will take a `File` input and encode its path to
meet this requirement.
Args:
ctx: `ctx` object from the rule implementation.
path: `File` object to analyze.
kwargs: Additional arguments for the `tool` instance.
Returns:
List with a single instance of `tool`.
"""
depth = ctx.build_file_path.count("/")
if path and hasattr(path, "path"):
path = ("../" * depth) + path.path
return [tool(path = path, **kwargs)]
#
# Common Feature Sets
#
def make_default_compile_flags_feature(ctx, copts = []):
copts = copts + ctx.attr.copts
modes = ctx.attr.modes
return feature(
name = FEATURE_NAMES.default_compile_flags,
flag_sets = make_flag_set(ALL_COMPILE_ACTIONS, copts) +
make_flag_set(ASM_COMPILE_ACTIONS, ctx.attr.asmopts) +
make_flag_set(C_COMPILE_ACTIONS, ctx.attr.conlyopts) +
make_flag_set(CPP_COMPILE_ACTIONS, ctx.attr.cxxopts) +
make_mode_flag_set(ctx, "dbg") +
make_mode_flag_set(ctx, "fastbuild") +
make_mode_flag_set(ctx, "opt"),
)
def make_default_link_flags_feature(ctx, linkopts = []):
linkopts = linkopts + ctx.attr.linkopts
modes = ctx.attr.modes
dbg = make_flag_set(
ALL_LINK_ACTIONS,
modes.get("dbg", []) + modes.get("dbg.linkopts", []),
with_features = [with_feature_set(features = ["dbg"])],
)
fast = make_flag_set(
ALL_LINK_ACTIONS,
modes.get("fastbuild", []) + modes.get("fastbuild.linkopts", []),
with_features = [with_feature_set(features = ["fastbuild"])],
)
opt = make_flag_set(
ALL_LINK_ACTIONS,
modes.get("opt", []) + modes.get("opt.linkopts", []),
with_features = [with_feature_set(features = ["opt"])],
)
return feature(
name = FEATURE_NAMES.default_link_flags,
flag_sets = make_flag_set(ALL_LINK_ACTIONS, linkopts) +
dbg + fast + opt,
)
def make_linkstamps_feature(ctx):
return feature(
name = FEATURE_NAMES.linkstamps,
flag_sets = [flag_set(
actions = ALL_LINK_ACTIONS,
flag_groups = [flag_group(
expand_if_available = "linkstamp_paths",
iterate_over = "linkstamp_paths",
flags = ["%{linkstamp_paths}"],
)],
)],
)
def make_unfiltered_compile_flags_feature(ctx):
return feature(
name = FEATURE_NAMES.unfiltered_compile_flags,
flag_sets = [flag_set(
actions = ALL_COMPILE_ACTIONS,
flag_groups = [flag_group(
expand_if_available = "unfiltered_compile_flags",
iterate_over = "unfiltered_compile_flags",
flags = ["%{unfiltered_compile_flags}"],
)],
)],
)
def make_user_compile_flags_feature(ctx):
return feature(
name = FEATURE_NAMES.user_compile_flags,
flag_sets = [flag_set(
actions = ALL_COMPILE_ACTIONS,
flag_groups = [flag_group(
expand_if_available = "user_compile_flags",
iterate_over = "user_compile_flags",
flags = ["%{user_compile_flags}"],
)],
)],
)
def make_user_link_flags_feature(ctx):
return feature(
name = FEATURE_NAMES.user_link_flags,
flag_sets = [flag_set(
actions = ALL_LINK_ACTIONS,
flag_groups = [flag_group(
expand_if_available = "user_link_flags",
iterate_over = "user_link_flags",
flags = ["%{user_link_flags}"],
)],
)],
)
| 33.733108 | 80 | 0.665699 | 1,188 | 9,985 | 5.256734 | 0.19697 | 0.049319 | 0.022898 | 0.025781 | 0.395516 | 0.330184 | 0.273979 | 0.234428 | 0.184948 | 0.128743 | 0 | 0.000522 | 0.232649 | 9,985 | 295 | 81 | 33.847458 | 0.814539 | 0.221332 | 0 | 0.265403 | 0 | 0 | 0.163767 | 0.061347 | 0 | 0 | 0 | 0 | 0 | 1 | 0.042654 | false | 0 | 0 | 0.018957 | 0.090047 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
120014680c2bd2d7ca378ada98745e9de9eb3aee | 7,094 | py | Python | platforms/m3/programming/program_via_i2c.py | lab11/M-ulator | 95b49c6194678c74accca4a20af71380efbcac5f | [
"Apache-2.0",
"MIT"
] | 19 | 2015-01-26T10:47:23.000Z | 2021-08-13T11:07:54.000Z | platforms/m3/programming/program_via_i2c.py | lab11/M-ulator | 95b49c6194678c74accca4a20af71380efbcac5f | [
"Apache-2.0",
"MIT"
] | 14 | 2015-08-24T02:35:46.000Z | 2021-05-05T03:53:44.000Z | platforms/m3/programming/program_via_i2c.py | lab11/M-ulator | 95b49c6194678c74accca4a20af71380efbcac5f | [
"Apache-2.0",
"MIT"
] | 9 | 2015-05-27T23:27:35.000Z | 2020-10-05T22:02:43.000Z | #!/usr/bin/env python
#
# self-test: socat -x pty,link=/tmp/com1,raw,echo=0 pty,link=/tmp/com2,raw,echo=0
from math import ceil
from m3_common import printing_sleep as sleep
import socket
import sys
import os
import mimetypes
import queue
import logging
logging.basicConfig(level=logging.INFO, format="%(message)s")
logger = logging.getLogger('program')
logger.info("-" * 80)
logger.info("-- M3 Programmer")
logger.info("")
from ice import ICE
ice = ICE()
if len(sys.argv) not in (3,):
logger.info("USAGE: %s BINFILE SERIAL_DEVICE\n" % (sys.argv[0]))
sys.exit(2)
binfile = sys.argv[1]
ext = os.path.splitext(binfile)[1]
if ext == os.extsep + "txt":
t = 'hex'
elif ext == os.extsep + 'hex':
t = 'hex'
elif ext == os.extsep + 'bin':
t = 'bin'
else:
logger.debug("File ext (%s) not matched", ext)
logger.debug("MIME Type: " + str(mimetypes.guess_type(binfile)[0]))
if mimetypes.guess_type(binfile)[0] == 'text/plain':
t = 'hex'
else:
t = 'bin'
if t == 'hex':
logger.info("Guessing hex-encoded stream for NI setup")
logger.info(" ** This means one byte (two hex characters) per line")
logger.info(" ** and these are the first two characters on each line.")
logger.info(" ** If it needs to parse something more complex, let me know.")
binfd = open(binfile, 'r')
hexencoded = ""
for line in binfd:
hexencoded += line[0:2]
elif t == 'bin':
logger.info("Guessing compiled binary")
binfd = open(binfile, 'rb')
hexencoded = binfd.read().encode("hex").upper()
else:
logger.error("No file type set?")
if (len(hexencoded) % 4 == 0) and (len(hexencoded) % 8 != 0):
# Image is halfword-aligned. Some tools generate these, but our system
# assumes things are word-aligned. We pad an extra nop to the end to fix
hexencoded += '46C0' # nop; (mov r8, r8)
if (len(hexencoded) % 8) != 0:
logger.warn("Binfile is not word-aligned. This is not a valid image")
sys.exit(3)
else:
logger.info("Binfile is %d bytes long\n" % (len(hexencoded) / 2))
# Callback for async I2C message
def validate_bin_helper(msg_type, event_id, length, msg):
logger.debug("Bin Helper got msg len" + str(len(msg)))
if len(msg) == 0:
logger.debug("Ignore msg of len 0")
return
validate_q.put(msg)
validate_q = queue.Queue()
ice.msg_handler['d+'] = validate_bin_helper
ice.connect(sys.argv[2])
ice.i2c_set_address("1001100x") # 0x98
logger.info("Turning all M3 power rails on")
ice.power_set_voltage(0,0.6)
ice.power_set_voltage(1,1.2)
ice.power_set_voltage(2,3.8)
logger.info("Turning 3.8 on")
ice.power_set_onoff(2,True)
sleep(1.0)
logger.info("Turning 1.2 on")
ice.power_set_onoff(1,True)
sleep(1.0)
logger.info("Turning 0.6 on")
ice.power_set_onoff(0,True)
sleep(1.0)
logger.info("Waiting 8 seconds for power rails to settle")
sleep(8.0)
logger.info("M3 0.6V => OFF (reset controller)")
ice.power_set_onoff(0,False)
sleep(4.0)
logger.info("M3 0.6V => ON")
ice.power_set_onoff(0,True)
sleep(4.0)
resp = input("About to send I2C message to wake controller. Continue? [Y/n] ")
if len(resp) != 0 and resp[0] in ('n', 'N'):
sys.exit()
junk_dma_done_msg = "%08X" % (socket.htonl(0x20000000))
logger.info("Sending junk message (DMA Done, 0 bytes to addr 0) to ensure chip is awake")
logger.debug("Sending: 0xAA " + junk_dma_done_msg)
ice.i2c_send(0xaa, junk_dma_done_msg.decode('hex'))
def write_bin(ice, hexencoded, offset=0):
logger.info("Running programming sequence:")
logger.info("\tI2C message for start DMA write at address 0x%x, length %d"
% (offset, len(hexencoded)/2))
logger.info("\tI2C message for DMA data")
logger.info("\tI2C message for end of DMA at address 0x%x, length %d" %
(offset, len(hexencoded)/2))
logger.info("")
length = len(hexencoded)/8
offset = socket.htons(offset)
data = 0x40000000 | (length << 16) | offset
dma_write_req = "%08X" % (socket.htonl(data))
logger.debug("Sending: " + dma_write_req)
ice.i2c_send(0xaa, dma_write_req.decode('hex'))
logger.info("Sending data to address 0xA8")
ice.i2c_send(0xa8, hexencoded.decode('hex'))
data = 0x20000000 | (length << 16) | offset
dma_done_msg = "%08X" % (socket.htonl(data))
logger.debug("Sending: " + dma_done_msg)
ice.i2c_send(0xaa, dma_done_msg.decode('hex'))
def validate_bin(ice, hexencoded, offset=0):
logger.info("Running Validation sequence:")
logger.info("\tI2C message for start DMA read at address 0x%x, length %d" %
(offset, len(hexencoded)/2))
logger.info("\t<Receive I2C message for DMA data>")
logger.info("\tCompare received data and validate it was programmed correctly")
logger.info("")
length = len(hexencoded)/8
offset = socket.htons(offset)
data = 0x80000000 | (length << 16) | offset
dma_read_req = "%08X" % (socket.htonl(data))
logger.debug("Sending: " + dma_read_req)
ice.i2c_send(0xaa, dma_read_req.decode('hex'))
logger.info("Chip Program Dump Response:")
chip_bin = validate_q.get(True, ICE.ONEYEAR)
chip_bin = chip_bin.encode('hex')
logger.debug(chip_bin)
#1,2-addr ...
chip_bin = chip_bin[2:]
# Consistent capitalization
chip_bin = chip_bin.upper()
hexencoded = hexencoded.upper()
for b in range(2, len(hexencoded)):
try:
if hexencoded[b] != chip_bin[b]:
logger.warn("ERR: Mismatch at half-byte" + str(b))
logger.warn("Expected:" + hexencoded[b])
logger.warn("Got:" + chip_bin[b])
return False
except IndexError:
logger.warn("ERR: Length mismatch")
logger.warn("Expected %d bytes" % (len(hexencoded)/2))
logger.warn("Got %d bytes" % (len(chip_bin)/2))
logger.warn("All prior bytes validated correctly")
return False
logger.info("Programming validated successfully")
return True
resp = input("About to send program data to I2C. Continue? [Y/n] ")
if len(resp) != 0 and resp[0] in ('n', 'N'):
sys.exit()
# Length field is bits 28:16, 4*2^(28-16+1) = 32768 maximum byte single message
if (len(hexencoded)/2) > (4*2**(28-16+1)):
logger.warn("Program is too long to write in one DMA message")
logger.warn("I can fix this with fragmentation if you encounter it")
logger.warn("Send me an email and I'll take care of it")
sys.exit()
tries = 0
while True:
write_bin(ice, hexencoded)
sleep(1)
if validate_bin(ice, hexencoded):
break
tries += 1
if tries > 2:
logger.info("")
logger.info("")
logger.info('=' * 80)
logger.info("Maximum number of tries exceeded. Programming Failed.")
sys.exit(-1)
logger.info("")
logger.info("")
logger.info('=' * 80)
logger.info("Programming complete.")
logger.info("")
resp = input("Would you like to send the DMA start interrupt? [Y/n] ")
if len(resp) != 0 and resp[0] in ('n', 'N'):
sys.exit()
logger.info("Sending 0x88 0x00000000")
ice.i2c_send(0x88, "00000000".decode('hex'))
| 31.669643 | 89 | 0.64914 | 1,084 | 7,094 | 4.173432 | 0.256458 | 0.090628 | 0.019452 | 0.014368 | 0.307913 | 0.251547 | 0.195402 | 0.173077 | 0.116711 | 0.079576 | 0 | 0.04174 | 0.199605 | 7,094 | 223 | 90 | 31.811659 | 0.755019 | 0.057795 | 0 | 0.22905 | 0 | 0 | 0.275929 | 0 | 0 | 0 | 0.01289 | 0 | 0 | 1 | 0.01676 | false | 0 | 0.050279 | 0 | 0.089385 | 0.005587 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1200fc9093c669658eb3eda6038151eb69622171 | 7,889 | py | Python | aviacme/cert.py | vidarno/aviacme | 6a00c29fda1664e5f546c879487bba84ca9d3681 | [
"MIT"
] | null | null | null | aviacme/cert.py | vidarno/aviacme | 6a00c29fda1664e5f546c879487bba84ca9d3681 | [
"MIT"
] | null | null | null | aviacme/cert.py | vidarno/aviacme | 6a00c29fda1664e5f546c879487bba84ca9d3681 | [
"MIT"
] | null | null | null | """Functions related to certificates"""
import json
import logging
import uuid
from datetime import datetime, timedelta
from enum import Enum
from pathlib import Path
import attr
from cryptography import x509
from cryptography.hazmat.backends import default_backend
logger = logging.getLogger(__name__)
class CertError(Exception):
"""Superclass for all cert exceptions."""
class CertificateNotFoundError(CertError):
"""Raised when the certificate was not found"""
class Status(Enum):
NEW = "New"
INSTALLED = "Installed"
TO_BE_INSTALLED = "To be installed"
class ValidationMethod(Enum):
HTTP01 = "http-01"
DNS01 = "dns-01"
def get_certs_that_need_action(config):
"""Returns certificate that should be installed"""
to_be_renewed = []
to_be_installed = []
all_certs = get_all_certs()
for cert in all_certs:
if cert.up_for_renewal(config.cm_renewal_days):
to_be_renewed.append(cert)
elif cert.up_for_installation(config.cm_delayed_days):
to_be_installed.append(cert)
return to_be_renewed, to_be_installed
def get_all_certs():
"""Returns all the stored certificates"""
certs = []
for path in Path("cert").iterdir():
if path.is_file():
try:
cert = Certificate.load(path)
except ValueError as error:
logger.warning("Could not load '%s': %s", path.resolve(), error)
continue
certs.append(cert)
return certs
def _get_cert_dates(pem_cert):
"""Returns the dates in the cert"""
cert = x509.load_pem_x509_certificate(pem_cert.encode(), default_backend())
logger.debug(
"Certificate with serial '%s', has not before: '%s' and not after: '%s' (UTC)",
cert.serial_number,
cert.not_valid_before,
cert.not_valid_after,
)
return cert.not_valid_after, cert.not_valid_before
def _check_if_cert_about_to_expire(not_after, threshold):
"""
Check whether a certificate with the specified
not after date is about to expire.
"""
datelimit = datetime.today().utcnow() - timedelta(days=threshold * -1)
if not_after < datelimit:
logger.debug("'%s' is before '%s', returning True", not_after, datelimit)
return True
else:
logger.debug("'%s' is after '%s', returning False", not_after, datelimit)
return False
def delete_expired_backups():
"""Deletes expired certificates from the backup folder"""
for path in Path("cert", "backup").iterdir():
try:
not_after, _ = _get_cert_dates(path.read_text())
except ValueError as error:
logger.warning(
"Could not load '%s' as a certificate: %s", path.resolve(), error
)
continue
if _check_if_cert_about_to_expire(not_after, 0):
logger.debug("Deleting cert '%s'", path.resolve())
path.unlink()
@attr.s
class Certificate:
"""Represents a stored certificate + csr"""
name = attr.ib()
partition = attr.ib()
path = attr.ib()
csr = attr.ib()
_cert = attr.ib()
status = attr.ib()
validation_method = attr.ib()
not_after = attr.ib()
not_before = attr.ib()
@classmethod
def create(cls, partition, name, **kwargs):
path = Path("cert", f"{partition}_{name}.json")
csr = kwargs.pop("csr", None)
cert = kwargs.pop("cert", None)
status = kwargs.pop("status", Status.NEW)
validation_method = kwargs.pop("validation_method", ValidationMethod.HTTP01)
not_after = kwargs.pop("not_after", datetime.fromtimestamp(0))
not_before = kwargs.pop("not_before", datetime.fromtimestamp(0))
return cls(
name,
partition,
path,
csr,
cert,
status,
validation_method,
not_after,
not_before,
)
@classmethod
def load(cls, path):
"""Load a certificate from a specified file"""
loaded = json.loads(path.read_text())
not_after = datetime.strptime(loaded.pop("not_after"), "%Y-%m-%dT%H:%M:%S")
not_before = datetime.strptime(loaded.pop("not_before"), "%Y-%m-%dT%H:%M:%S")
status = Status(loaded.pop("status"))
validation_method = ValidationMethod(
# default to http-01 if not specified
# (for backwards compability)
loaded.pop("validation_method", "http-01")
)
loaded.update(
{
"not_before": not_before,
"not_after": not_after,
"status": status,
"validation_method": validation_method,
}
)
return cls.create(**loaded)
@classmethod
def new(cls, partition, name, csr, validation_method):
"""Creates a new Certificate object from a csr"""
return cls.create(partition, name, csr=csr, validation_method=validation_method)
@classmethod
def get(cls, partition, name):
"""Get an existing certificate from disk"""
path = Path("cert", f"{partition}_{name}.json")
if path.exists():
return cls.load(path)
raise CertificateNotFoundError()
@property
def cert(self):
"""The pem encoded certificate (with chain)"""
return self._cert
@cert.setter
def cert(self, pem):
self.not_after, self.not_before = _get_cert_dates(pem)
self._cert = pem
def save(self):
"""Saves the cert object to disk"""
dumped_json = json.dumps(
{
"name": self.name,
"partition": self.partition,
"status": self.status.value,
"not_before": self.not_before.isoformat(),
"not_after": self.not_after.isoformat(),
"csr": self.csr,
"cert": self.cert,
"validation_method": self.validation_method.value,
},
indent=4,
sort_keys=True,
)
try:
self.path.write_text(dumped_json)
except IOError as error:
if error.errno == 13:
# It may be owned by another user,
# try to recreate it.
temp_path = Path(str(uuid.uuid1()))
self.path.rename(temp_path)
self.path.write_text(dumped_json)
temp_path.unlink()
else:
raise
def mark_as_installed(self):
"""Marks the certicate as installed, and saves it to disk"""
self.status = Status.INSTALLED
self.save()
def renew(self, new_cert):
"""Backups the cert, sets a new one with status 'To be installed'"""
backup_path = Path("cert", "backup", f"{self.partition}_{self.name}.cer")
backup_path.write_text(self.cert)
self.cert = new_cert
self.status = Status.TO_BE_INSTALLED
self.save()
def delete(self):
"""Removes the certificate from disk"""
self.path.unlink()
def up_for_renewal(self, threshold):
"""Checks if the cert is in need of renewal"""
return _check_if_cert_about_to_expire(self.not_after, threshold)
def up_for_installation(self, threshold):
"""Checks if the cert is ready to be installed"""
if self.status != Status.TO_BE_INSTALLED:
return False
datelimit = datetime.today().utcnow() - timedelta(days=threshold)
if self.not_before < datelimit:
logger.debug(
"'%s' is before '%s', returning True", self.not_before, datelimit
)
return True
else:
logger.debug(
"'%s' is after '%s', returning False", self.not_before, datelimit
)
return False
| 30.110687 | 88 | 0.591837 | 930 | 7,889 | 4.848387 | 0.212903 | 0.035485 | 0.025948 | 0.01242 | 0.213129 | 0.172987 | 0.129075 | 0.080284 | 0.06609 | 0.047017 | 0 | 0.005589 | 0.296869 | 7,889 | 261 | 89 | 30.226054 | 0.807283 | 0.119534 | 0 | 0.150538 | 0 | 0.005376 | 0.099956 | 0.011415 | 0 | 0 | 0 | 0 | 0 | 1 | 0.091398 | false | 0 | 0.048387 | 0 | 0.317204 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
12016e4fe64bb35056a54608c56a4fed76dd5e23 | 4,456 | py | Python | LeNetWithS3Pooling/training/pooling.py | cclauss/DL4AGX | b4d73f6c39b0428e32ce5656352800cc7e2cfb22 | [
"Apache-2.0"
] | 1 | 2021-04-16T10:20:08.000Z | 2021-04-16T10:20:08.000Z | LeNetWithS3Pooling/training/pooling.py | andi4191/DL4AGX | b2aec0cc0d1375bcc29a94999e8cf66ca8e218fd | [
"Apache-2.0"
] | null | null | null | LeNetWithS3Pooling/training/pooling.py | andi4191/DL4AGX | b2aec0cc0d1375bcc29a94999e8cf66ca8e218fd | [
"Apache-2.0"
] | null | null | null | ##########################################################################
# Copyright (c) 2018-2019 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# File: //LeNetWithS3Pooling/training/pooling.py
# Description: Implementation of S3Pooling
##########################################################################
import numpy as np
import random
import torch
from torchsummary import summary
import torch.nn.functional as F
class StochasticPool2d(torch.nn.Module):
def __init__(self, kernel_size=2, stride=2, padding=0):
super(StochasticPool2d, self).__init__()
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.grid_size = kernel_size
# Reference: https://arxiv.org/pdf/1611.05138.pdf
# First, perform with stride=1 and maintain resolution
# Hence, padding zeroes only on the right and bottom
self.padding = torch.nn.ConstantPad2d((0,1,0,1),0)
def forward(self, x, s3pool_flag=False):
# If S3Pool flag is enabled or training mode: Run S3Pooling
if s3pool_flag or self.training:
# Compute spatial dimensions from input feature map tensor
h, w = x.shape[-2:]
n_h = h // self.grid_size
n_w = w // self.grid_size
n_h = int(n_h)
n_w = int(n_w)
# Reference: https://arxiv.org/pdf/1611.05138.pdf
# First, perform with stride=1 and maintain resolution
# Hence, padding only on the right and bottom
x = self.padding(x)
# First step : perform maxpooling
x = F.max_pool2d(x, self.kernel_size, 1)
w_indices = []
h_indices = []
# Second step : Perform stochastic S3Pooling
for i in range(n_w):
# Calculate offset
position_offset = self.grid_size * i
# Max range for Boundary case
if i + 1 < n_w:
max_range = self.grid_size
else:
max_range = w - position_offset
# Pick random w index from [ position_offset to grid size ]
# Don't use random at inference time for exporting to IR
if not self.training:
w_index = torch.LongTensor([0])
else:
w_index = torch.LongTensor(1).random_(0, max_range)
w_indices.append(torch.add(w_index, position_offset))
for j in range(n_h):
# Calculate offset
position_offset = self.grid_size * j
# Max range for Boundary case
if j + 1 < n_h:
max_range = self.grid_size
else:
max_range = h - position_offset
# Pick random h index from [position offset to grid_size]
# Don't use random at inference time for exporting to IR
if not self.training:
h_index = torch.LongTensor([0])
else:
h_index = torch.LongTensor(1).random_(0, max_range)
h_indices.append(torch.add(h_index, position_offset))
# Gather all the h, w indicies from S3Pooling step
h_indices = torch.cat(h_indices, dim = 0)
w_indices = torch.cat(w_indices, dim = 0)
#output = x
# Pick values corresponding to h, w indices calculated
output = x[:, :, h_indices.cuda()][:, :, :, w_indices.cuda()]
print(x.shape, output.shape)
else:
# If S3Pooling flag disabled and inference time, perform average pooling
# Use AvgPooling
output = F.avg_pool2d(x, self.kernel_size, self.stride)
return output
| 37.445378 | 84 | 0.559022 | 541 | 4,456 | 4.480592 | 0.32902 | 0.029703 | 0.034653 | 0.013201 | 0.321782 | 0.283828 | 0.244224 | 0.210396 | 0.15429 | 0.15429 | 0 | 0.022282 | 0.335278 | 4,456 | 118 | 85 | 37.762712 | 0.796084 | 0.375673 | 0 | 0.169811 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.037736 | false | 0 | 0.09434 | 0 | 0.169811 | 0.018868 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1203afb61ab68811b0be249f8039a7d8df5ddf01 | 85,636 | py | Python | hubspot/cms/blogs/blog_posts/models/blog_post.py | Ronfer/hubspot-api-python | 1c87274ecbba4aa3c7728f890ccc6e77b2b6d2e4 | [
"Apache-2.0"
] | 117 | 2020-04-06T08:22:53.000Z | 2022-03-18T03:41:29.000Z | hubspot/cms/blogs/blog_posts/models/blog_post.py | Ronfer/hubspot-api-python | 1c87274ecbba4aa3c7728f890ccc6e77b2b6d2e4 | [
"Apache-2.0"
] | 62 | 2020-04-06T16:21:06.000Z | 2022-03-17T16:50:44.000Z | hubspot/cms/blogs/blog_posts/models/blog_post.py | Ronfer/hubspot-api-python | 1c87274ecbba4aa3c7728f890ccc6e77b2b6d2e4 | [
"Apache-2.0"
] | 45 | 2020-04-06T16:13:52.000Z | 2022-03-30T21:33:17.000Z | # coding: utf-8
"""
Blog Post endpoints
\"Use these endpoints for interacting with Blog Posts, Blog Authors, and Blog Tags\" # noqa: E501
The version of the OpenAPI document: v3
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from hubspot.cms.blogs.blog_posts.configuration import Configuration
class BlogPost(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
"id": "str",
"slug": "str",
"content_group_id": "str",
"campaign": "str",
"category_id": "int",
"state": "str",
"template_path": "str",
"name": "str",
"mab_experiment_id": "str",
"archived": "bool",
"author_name": "str",
"ab_test_id": "str",
"created_by_id": "str",
"updated_by_id": "str",
"domain": "str",
"subcategory": "str",
"ab_status": "str",
"folder_id": "str",
"widget_containers": "dict(str, object)",
"widgets": "dict(str, object)",
"language": "str",
"translated_from_id": "str",
"dynamic_page_hub_db_table_id": "str",
"blog_author_id": "str",
"tag_ids": "list[int]",
"post_body": "str",
"post_summary": "str",
"rss_body": "str",
"rss_summary": "str",
"enable_google_amp_output_override": "bool",
"html_title": "str",
"page_redirected": "bool",
"page_expiry_enabled": "bool",
"page_expiry_date": "int",
"page_expiry_redirect_id": "int",
"page_expiry_redirect_url": "str",
"use_featured_image": "bool",
"password": "str",
"attached_stylesheets": "list[dict(str, object)]",
"include_default_custom_css": "bool",
"enable_domain_stylesheets": "bool",
"enable_layout_stylesheets": "bool",
"meta_description": "str",
"publish_immediately": "bool",
"head_html": "str",
"footer_html": "str",
"content_type_category": "str",
"current_state": "str",
"link_rel_canonical_url": "str",
"featured_image": "str",
"featured_image_alt_text": "str",
"public_access_rules_enabled": "bool",
"public_access_rules": "list[object]",
"layout_sections": "dict(str, LayoutSection)",
"theme_settings_values": "dict(str, object)",
"url": "str",
"publish_date": "datetime",
"deleted_at": "datetime",
"created_at": "datetime",
"published": "bool",
"updated_at": "datetime",
}
attribute_map = {
"id": "id",
"slug": "slug",
"content_group_id": "contentGroupId",
"campaign": "campaign",
"category_id": "categoryId",
"state": "state",
"template_path": "templatePath",
"name": "name",
"mab_experiment_id": "mabExperimentId",
"archived": "archived",
"author_name": "authorName",
"ab_test_id": "abTestId",
"created_by_id": "createdById",
"updated_by_id": "updatedById",
"domain": "domain",
"subcategory": "subcategory",
"ab_status": "abStatus",
"folder_id": "folderId",
"widget_containers": "widgetContainers",
"widgets": "widgets",
"language": "language",
"translated_from_id": "translatedFromId",
"dynamic_page_hub_db_table_id": "dynamicPageHubDbTableId",
"blog_author_id": "blogAuthorId",
"tag_ids": "tagIds",
"post_body": "postBody",
"post_summary": "postSummary",
"rss_body": "rssBody",
"rss_summary": "rssSummary",
"enable_google_amp_output_override": "enableGoogleAmpOutputOverride",
"html_title": "htmlTitle",
"page_redirected": "pageRedirected",
"page_expiry_enabled": "pageExpiryEnabled",
"page_expiry_date": "pageExpiryDate",
"page_expiry_redirect_id": "pageExpiryRedirectId",
"page_expiry_redirect_url": "pageExpiryRedirectUrl",
"use_featured_image": "useFeaturedImage",
"password": "password",
"attached_stylesheets": "attachedStylesheets",
"include_default_custom_css": "includeDefaultCustomCss",
"enable_domain_stylesheets": "enableDomainStylesheets",
"enable_layout_stylesheets": "enableLayoutStylesheets",
"meta_description": "metaDescription",
"publish_immediately": "publishImmediately",
"head_html": "headHtml",
"footer_html": "footerHtml",
"content_type_category": "contentTypeCategory",
"current_state": "currentState",
"link_rel_canonical_url": "linkRelCanonicalUrl",
"featured_image": "featuredImage",
"featured_image_alt_text": "featuredImageAltText",
"public_access_rules_enabled": "publicAccessRulesEnabled",
"public_access_rules": "publicAccessRules",
"layout_sections": "layoutSections",
"theme_settings_values": "themeSettingsValues",
"url": "url",
"publish_date": "publishDate",
"deleted_at": "deletedAt",
"created_at": "createdAt",
"published": "published",
"updated_at": "updatedAt",
}
def __init__(
self,
id=None,
slug=None,
content_group_id=None,
campaign=None,
category_id=None,
state=None,
template_path=None,
name=None,
mab_experiment_id=None,
archived=None,
author_name=None,
ab_test_id=None,
created_by_id=None,
updated_by_id=None,
domain=None,
subcategory=None,
ab_status=None,
folder_id=None,
widget_containers=None,
widgets=None,
language=None,
translated_from_id=None,
dynamic_page_hub_db_table_id=None,
blog_author_id=None,
tag_ids=None,
post_body=None,
post_summary=None,
rss_body=None,
rss_summary=None,
enable_google_amp_output_override=None,
html_title=None,
page_redirected=None,
page_expiry_enabled=None,
page_expiry_date=None,
page_expiry_redirect_id=None,
page_expiry_redirect_url=None,
use_featured_image=None,
password=None,
attached_stylesheets=None,
include_default_custom_css=None,
enable_domain_stylesheets=None,
enable_layout_stylesheets=None,
meta_description=None,
publish_immediately=None,
head_html=None,
footer_html=None,
content_type_category=None,
current_state=None,
link_rel_canonical_url=None,
featured_image=None,
featured_image_alt_text=None,
public_access_rules_enabled=None,
public_access_rules=None,
layout_sections=None,
theme_settings_values=None,
url=None,
publish_date=None,
deleted_at=None,
created_at=None,
published=None,
updated_at=None,
local_vars_configuration=None,
): # noqa: E501
"""BlogPost - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._id = None
self._slug = None
self._content_group_id = None
self._campaign = None
self._category_id = None
self._state = None
self._template_path = None
self._name = None
self._mab_experiment_id = None
self._archived = None
self._author_name = None
self._ab_test_id = None
self._created_by_id = None
self._updated_by_id = None
self._domain = None
self._subcategory = None
self._ab_status = None
self._folder_id = None
self._widget_containers = None
self._widgets = None
self._language = None
self._translated_from_id = None
self._dynamic_page_hub_db_table_id = None
self._blog_author_id = None
self._tag_ids = None
self._post_body = None
self._post_summary = None
self._rss_body = None
self._rss_summary = None
self._enable_google_amp_output_override = None
self._html_title = None
self._page_redirected = None
self._page_expiry_enabled = None
self._page_expiry_date = None
self._page_expiry_redirect_id = None
self._page_expiry_redirect_url = None
self._use_featured_image = None
self._password = None
self._attached_stylesheets = None
self._include_default_custom_css = None
self._enable_domain_stylesheets = None
self._enable_layout_stylesheets = None
self._meta_description = None
self._publish_immediately = None
self._head_html = None
self._footer_html = None
self._content_type_category = None
self._current_state = None
self._link_rel_canonical_url = None
self._featured_image = None
self._featured_image_alt_text = None
self._public_access_rules_enabled = None
self._public_access_rules = None
self._layout_sections = None
self._theme_settings_values = None
self._url = None
self._publish_date = None
self._deleted_at = None
self._created_at = None
self._published = None
self._updated_at = None
self.discriminator = None
self.id = id
self.slug = slug
self.content_group_id = content_group_id
self.campaign = campaign
self.category_id = category_id
self.state = state
self.template_path = template_path
self.name = name
self.mab_experiment_id = mab_experiment_id
self.archived = archived
self.author_name = author_name
self.ab_test_id = ab_test_id
self.created_by_id = created_by_id
self.updated_by_id = updated_by_id
self.domain = domain
self.subcategory = subcategory
self.ab_status = ab_status
self.folder_id = folder_id
self.widget_containers = widget_containers
self.widgets = widgets
self.language = language
self.translated_from_id = translated_from_id
self.dynamic_page_hub_db_table_id = dynamic_page_hub_db_table_id
self.blog_author_id = blog_author_id
self.tag_ids = tag_ids
self.post_body = post_body
self.post_summary = post_summary
self.rss_body = rss_body
self.rss_summary = rss_summary
self.enable_google_amp_output_override = enable_google_amp_output_override
self.html_title = html_title
self.page_redirected = page_redirected
self.page_expiry_enabled = page_expiry_enabled
self.page_expiry_date = page_expiry_date
self.page_expiry_redirect_id = page_expiry_redirect_id
self.page_expiry_redirect_url = page_expiry_redirect_url
self.use_featured_image = use_featured_image
self.password = password
self.attached_stylesheets = attached_stylesheets
self.include_default_custom_css = include_default_custom_css
self.enable_domain_stylesheets = enable_domain_stylesheets
self.enable_layout_stylesheets = enable_layout_stylesheets
self.meta_description = meta_description
self.publish_immediately = publish_immediately
self.head_html = head_html
self.footer_html = footer_html
self.content_type_category = content_type_category
self.current_state = current_state
self.link_rel_canonical_url = link_rel_canonical_url
self.featured_image = featured_image
self.featured_image_alt_text = featured_image_alt_text
self.public_access_rules_enabled = public_access_rules_enabled
self.public_access_rules = public_access_rules
self.layout_sections = layout_sections
self.theme_settings_values = theme_settings_values
self.url = url
self.publish_date = publish_date
self.deleted_at = deleted_at
self.created_at = created_at
self.published = published
self.updated_at = updated_at
@property
def id(self):
"""Gets the id of this BlogPost. # noqa: E501
The unique ID of the Blog Post. # noqa: E501
:return: The id of this BlogPost. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this BlogPost.
The unique ID of the Blog Post. # noqa: E501
:param id: The id of this BlogPost. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and id is None: # noqa: E501
raise ValueError("Invalid value for `id`, must not be `None`") # noqa: E501
self._id = id
@property
def slug(self):
"""Gets the slug of this BlogPost. # noqa: E501
The path of the this blog post. This field is appended to the domain to construct the url of this post. # noqa: E501
:return: The slug of this BlogPost. # noqa: E501
:rtype: str
"""
return self._slug
@slug.setter
def slug(self, slug):
"""Sets the slug of this BlogPost.
The path of the this blog post. This field is appended to the domain to construct the url of this post. # noqa: E501
:param slug: The slug of this BlogPost. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and slug is None: # noqa: E501
raise ValueError("Invalid value for `slug`, must not be `None`") # noqa: E501
self._slug = slug
@property
def content_group_id(self):
"""Gets the content_group_id of this BlogPost. # noqa: E501
The ID of the parent Blog this Blog Post is associated with. # noqa: E501
:return: The content_group_id of this BlogPost. # noqa: E501
:rtype: str
"""
return self._content_group_id
@content_group_id.setter
def content_group_id(self, content_group_id):
"""Sets the content_group_id of this BlogPost.
The ID of the parent Blog this Blog Post is associated with. # noqa: E501
:param content_group_id: The content_group_id of this BlogPost. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and content_group_id is None: # noqa: E501
raise ValueError("Invalid value for `content_group_id`, must not be `None`") # noqa: E501
self._content_group_id = content_group_id
@property
def campaign(self):
"""Gets the campaign of this BlogPost. # noqa: E501
The GUID of the marketing campaign this Blog Post is a part of. # noqa: E501
:return: The campaign of this BlogPost. # noqa: E501
:rtype: str
"""
return self._campaign
@campaign.setter
def campaign(self, campaign):
"""Sets the campaign of this BlogPost.
The GUID of the marketing campaign this Blog Post is a part of. # noqa: E501
:param campaign: The campaign of this BlogPost. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and campaign is None: # noqa: E501
raise ValueError("Invalid value for `campaign`, must not be `None`") # noqa: E501
self._campaign = campaign
@property
def category_id(self):
"""Gets the category_id of this BlogPost. # noqa: E501
ID of the type of object this is. Should always . # noqa: E501
:return: The category_id of this BlogPost. # noqa: E501
:rtype: int
"""
return self._category_id
@category_id.setter
def category_id(self, category_id):
"""Sets the category_id of this BlogPost.
ID of the type of object this is. Should always . # noqa: E501
:param category_id: The category_id of this BlogPost. # noqa: E501
:type: int
"""
if self.local_vars_configuration.client_side_validation and category_id is None: # noqa: E501
raise ValueError("Invalid value for `category_id`, must not be `None`") # noqa: E501
self._category_id = category_id
@property
def state(self):
"""Gets the state of this BlogPost. # noqa: E501
An ENUM descibing the current state of this Blog Post. # noqa: E501
:return: The state of this BlogPost. # noqa: E501
:rtype: str
"""
return self._state
@state.setter
def state(self, state):
"""Sets the state of this BlogPost.
An ENUM descibing the current state of this Blog Post. # noqa: E501
:param state: The state of this BlogPost. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and state is None: # noqa: E501
raise ValueError("Invalid value for `state`, must not be `None`") # noqa: E501
if self.local_vars_configuration.client_side_validation and state is not None and len(state) > 25:
raise ValueError("Invalid value for `state`, length must be less than or equal to `25`") # noqa: E501
self._state = state
@property
def template_path(self):
"""Gets the template_path of this BlogPost. # noqa: E501
:return: The template_path of this BlogPost. # noqa: E501
:rtype: str
"""
return self._template_path
@template_path.setter
def template_path(self, template_path):
"""Sets the template_path of this BlogPost.
:param template_path: The template_path of this BlogPost. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and template_path is None: # noqa: E501
raise ValueError("Invalid value for `template_path`, must not be `None`") # noqa: E501
self._template_path = template_path
@property
def name(self):
"""Gets the name of this BlogPost. # noqa: E501
The internal name of the blog post. # noqa: E501
:return: The name of this BlogPost. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this BlogPost.
The internal name of the blog post. # noqa: E501
:param name: The name of this BlogPost. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
@property
def mab_experiment_id(self):
"""Gets the mab_experiment_id of this BlogPost. # noqa: E501
:return: The mab_experiment_id of this BlogPost. # noqa: E501
:rtype: str
"""
return self._mab_experiment_id
@mab_experiment_id.setter
def mab_experiment_id(self, mab_experiment_id):
"""Sets the mab_experiment_id of this BlogPost.
:param mab_experiment_id: The mab_experiment_id of this BlogPost. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and mab_experiment_id is None: # noqa: E501
raise ValueError("Invalid value for `mab_experiment_id`, must not be `None`") # noqa: E501
self._mab_experiment_id = mab_experiment_id
@property
def archived(self):
"""Gets the archived of this BlogPost. # noqa: E501
If True, the post will not show up in your dashboard, although the post could still be live. # noqa: E501
:return: The archived of this BlogPost. # noqa: E501
:rtype: bool
"""
return self._archived
@archived.setter
def archived(self, archived):
"""Sets the archived of this BlogPost.
If True, the post will not show up in your dashboard, although the post could still be live. # noqa: E501
:param archived: The archived of this BlogPost. # noqa: E501
:type: bool
"""
if self.local_vars_configuration.client_side_validation and archived is None: # noqa: E501
raise ValueError("Invalid value for `archived`, must not be `None`") # noqa: E501
self._archived = archived
@property
def author_name(self):
"""Gets the author_name of this BlogPost. # noqa: E501
The name of the user that updated this blog post. # noqa: E501
:return: The author_name of this BlogPost. # noqa: E501
:rtype: str
"""
return self._author_name
@author_name.setter
def author_name(self, author_name):
"""Sets the author_name of this BlogPost.
The name of the user that updated this blog post. # noqa: E501
:param author_name: The author_name of this BlogPost. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and author_name is None: # noqa: E501
raise ValueError("Invalid value for `author_name`, must not be `None`") # noqa: E501
self._author_name = author_name
@property
def ab_test_id(self):
"""Gets the ab_test_id of this BlogPost. # noqa: E501
:return: The ab_test_id of this BlogPost. # noqa: E501
:rtype: str
"""
return self._ab_test_id
@ab_test_id.setter
def ab_test_id(self, ab_test_id):
"""Sets the ab_test_id of this BlogPost.
:param ab_test_id: The ab_test_id of this BlogPost. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and ab_test_id is None: # noqa: E501
raise ValueError("Invalid value for `ab_test_id`, must not be `None`") # noqa: E501
self._ab_test_id = ab_test_id
@property
def created_by_id(self):
"""Gets the created_by_id of this BlogPost. # noqa: E501
The ID of the user that created this blog post. # noqa: E501
:return: The created_by_id of this BlogPost. # noqa: E501
:rtype: str
"""
return self._created_by_id
@created_by_id.setter
def created_by_id(self, created_by_id):
"""Sets the created_by_id of this BlogPost.
The ID of the user that created this blog post. # noqa: E501
:param created_by_id: The created_by_id of this BlogPost. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and created_by_id is None: # noqa: E501
raise ValueError("Invalid value for `created_by_id`, must not be `None`") # noqa: E501
self._created_by_id = created_by_id
@property
def updated_by_id(self):
"""Gets the updated_by_id of this BlogPost. # noqa: E501
The ID of the user that updated this blog post. # noqa: E501
:return: The updated_by_id of this BlogPost. # noqa: E501
:rtype: str
"""
return self._updated_by_id
@updated_by_id.setter
def updated_by_id(self, updated_by_id):
"""Sets the updated_by_id of this BlogPost.
The ID of the user that updated this blog post. # noqa: E501
:param updated_by_id: The updated_by_id of this BlogPost. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and updated_by_id is None: # noqa: E501
raise ValueError("Invalid value for `updated_by_id`, must not be `None`") # noqa: E501
self._updated_by_id = updated_by_id
@property
def domain(self):
"""Gets the domain of this BlogPost. # noqa: E501
The domain this Blog Post will resolve to. If null, the Blog Post will default to the domain of the ParentBlog. # noqa: E501
:return: The domain of this BlogPost. # noqa: E501
:rtype: str
"""
return self._domain
@domain.setter
def domain(self, domain):
"""Sets the domain of this BlogPost.
The domain this Blog Post will resolve to. If null, the Blog Post will default to the domain of the ParentBlog. # noqa: E501
:param domain: The domain of this BlogPost. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and domain is None: # noqa: E501
raise ValueError("Invalid value for `domain`, must not be `None`") # noqa: E501
self._domain = domain
@property
def subcategory(self):
"""Gets the subcategory of this BlogPost. # noqa: E501
:return: The subcategory of this BlogPost. # noqa: E501
:rtype: str
"""
return self._subcategory
@subcategory.setter
def subcategory(self, subcategory):
"""Sets the subcategory of this BlogPost.
:param subcategory: The subcategory of this BlogPost. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and subcategory is None: # noqa: E501
raise ValueError("Invalid value for `subcategory`, must not be `None`") # noqa: E501
self._subcategory = subcategory
@property
def ab_status(self):
"""Gets the ab_status of this BlogPost. # noqa: E501
:return: The ab_status of this BlogPost. # noqa: E501
:rtype: str
"""
return self._ab_status
@ab_status.setter
def ab_status(self, ab_status):
"""Sets the ab_status of this BlogPost.
:param ab_status: The ab_status of this BlogPost. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and ab_status is None: # noqa: E501
raise ValueError("Invalid value for `ab_status`, must not be `None`") # noqa: E501
allowed_values = ["master", "variant", "loser_variant", "mab_master", "mab_variant", "automated_master", "automated_variant", "automated_loser_variant"] # noqa: E501
if self.local_vars_configuration.client_side_validation and ab_status not in allowed_values: # noqa: E501
raise ValueError("Invalid value for `ab_status` ({0}), must be one of {1}".format(ab_status, allowed_values)) # noqa: E501
self._ab_status = ab_status
@property
def folder_id(self):
"""Gets the folder_id of this BlogPost. # noqa: E501
:return: The folder_id of this BlogPost. # noqa: E501
:rtype: str
"""
return self._folder_id
@folder_id.setter
def folder_id(self, folder_id):
"""Sets the folder_id of this BlogPost.
:param folder_id: The folder_id of this BlogPost. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and folder_id is None: # noqa: E501
raise ValueError("Invalid value for `folder_id`, must not be `None`") # noqa: E501
self._folder_id = folder_id
@property
def widget_containers(self):
"""Gets the widget_containers of this BlogPost. # noqa: E501
A data structure containing the data for all the modules inside the containers for this post. This will only be populated if the page has widget containers. # noqa: E501
:return: The widget_containers of this BlogPost. # noqa: E501
:rtype: dict(str, object)
"""
return self._widget_containers
@widget_containers.setter
def widget_containers(self, widget_containers):
"""Sets the widget_containers of this BlogPost.
A data structure containing the data for all the modules inside the containers for this post. This will only be populated if the page has widget containers. # noqa: E501
:param widget_containers: The widget_containers of this BlogPost. # noqa: E501
:type: dict(str, object)
"""
if self.local_vars_configuration.client_side_validation and widget_containers is None: # noqa: E501
raise ValueError("Invalid value for `widget_containers`, must not be `None`") # noqa: E501
self._widget_containers = widget_containers
@property
def widgets(self):
"""Gets the widgets of this BlogPost. # noqa: E501
A data structure containing the data for all the modules for this page. # noqa: E501
:return: The widgets of this BlogPost. # noqa: E501
:rtype: dict(str, object)
"""
return self._widgets
@widgets.setter
def widgets(self, widgets):
"""Sets the widgets of this BlogPost.
A data structure containing the data for all the modules for this page. # noqa: E501
:param widgets: The widgets of this BlogPost. # noqa: E501
:type: dict(str, object)
"""
if self.local_vars_configuration.client_side_validation and widgets is None: # noqa: E501
raise ValueError("Invalid value for `widgets`, must not be `None`") # noqa: E501
self._widgets = widgets
@property
def language(self):
"""Gets the language of this BlogPost. # noqa: E501
The explicitly defined language of the Blog Post. If null, the Blog Post will default to the language of the ParentBlog. # noqa: E501
:return: The language of this BlogPost. # noqa: E501
:rtype: str
"""
return self._language
@language.setter
def language(self, language):
"""Sets the language of this BlogPost.
The explicitly defined language of the Blog Post. If null, the Blog Post will default to the language of the ParentBlog. # noqa: E501
:param language: The language of this BlogPost. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and language is None: # noqa: E501
raise ValueError("Invalid value for `language`, must not be `None`") # noqa: E501
allowed_values = [
"af",
"af-na",
"af-za",
"agq",
"agq-cm",
"ak",
"ak-gh",
"am",
"am-et",
"ar",
"ar-001",
"ar-ae",
"ar-bh",
"ar-dj",
"ar-dz",
"ar-eg",
"ar-eh",
"ar-er",
"ar-il",
"ar-iq",
"ar-jo",
"ar-km",
"ar-kw",
"ar-lb",
"ar-ly",
"ar-ma",
"ar-mr",
"ar-om",
"ar-ps",
"ar-qa",
"ar-sa",
"ar-sd",
"ar-so",
"ar-ss",
"ar-sy",
"ar-td",
"ar-tn",
"ar-ye",
"as",
"as-in",
"asa",
"asa-tz",
"ast",
"ast-es",
"az",
"az-az",
"bas",
"bas-cm",
"be",
"be-by",
"bem",
"bem-zm",
"bez",
"bez-tz",
"bg",
"bg-bg",
"bm",
"bm-ml",
"bn",
"bn-bd",
"bn-in",
"bo",
"bo-cn",
"bo-in",
"br",
"br-fr",
"brx",
"brx-in",
"bs",
"bs-ba",
"ca",
"ca-ad",
"ca-es",
"ca-fr",
"ca-it",
"ccp",
"ccp-bd",
"ccp-in",
"ce",
"ce-ru",
"cgg",
"cgg-ug",
"chr",
"chr-us",
"ckb",
"ckb-iq",
"ckb-ir",
"cs",
"cs-cz",
"cu",
"cu-ru",
"cy",
"cy-gb",
"da",
"da-dk",
"da-gl",
"dav",
"dav-ke",
"de",
"de-at",
"de-be",
"de-ch",
"de-de",
"de-gr",
"de-it",
"de-li",
"de-lu",
"dje",
"dje-ne",
"dsb",
"dsb-de",
"dua",
"dua-cm",
"dyo",
"dyo-sn",
"dz",
"dz-bt",
"ebu",
"ebu-ke",
"ee",
"ee-gh",
"ee-tg",
"el",
"el-cy",
"el-gr",
"en",
"en-001",
"en-150",
"en-ag",
"en-ai",
"en-as",
"en-at",
"en-au",
"en-bb",
"en-be",
"en-bi",
"en-bm",
"en-bs",
"en-bw",
"en-bz",
"en-ca",
"en-cc",
"en-ch",
"en-ck",
"en-cm",
"en-cx",
"en-cy",
"en-de",
"en-dg",
"en-dk",
"en-dm",
"en-er",
"en-fi",
"en-fj",
"en-fk",
"en-fm",
"en-gb",
"en-gd",
"en-gg",
"en-gh",
"en-gi",
"en-gm",
"en-gu",
"en-gy",
"en-hk",
"en-ie",
"en-il",
"en-im",
"en-in",
"en-io",
"en-je",
"en-jm",
"en-ke",
"en-ki",
"en-kn",
"en-ky",
"en-lc",
"en-lr",
"en-ls",
"en-mg",
"en-mh",
"en-mo",
"en-mp",
"en-ms",
"en-mt",
"en-mu",
"en-mw",
"en-my",
"en-na",
"en-nf",
"en-ng",
"en-nl",
"en-nr",
"en-nu",
"en-nz",
"en-pg",
"en-ph",
"en-pk",
"en-pn",
"en-pr",
"en-pw",
"en-rw",
"en-sb",
"en-sc",
"en-sd",
"en-se",
"en-sg",
"en-sh",
"en-si",
"en-sl",
"en-ss",
"en-sx",
"en-sz",
"en-tc",
"en-tk",
"en-to",
"en-tt",
"en-tv",
"en-tz",
"en-ug",
"en-um",
"en-us",
"en-vc",
"en-vg",
"en-vi",
"en-vu",
"en-ws",
"en-za",
"en-zm",
"en-zw",
"eo",
"eo-001",
"es",
"es-419",
"es-ar",
"es-bo",
"es-br",
"es-bz",
"es-cl",
"es-co",
"es-cr",
"es-cu",
"es-do",
"es-ea",
"es-ec",
"es-es",
"es-gq",
"es-gt",
"es-hn",
"es-ic",
"es-mx",
"es-ni",
"es-pa",
"es-pe",
"es-ph",
"es-pr",
"es-py",
"es-sv",
"es-us",
"es-uy",
"es-ve",
"et",
"et-ee",
"eu",
"eu-es",
"ewo",
"ewo-cm",
"fa",
"fa-af",
"fa-ir",
"ff",
"ff-cm",
"ff-gn",
"ff-mr",
"ff-sn",
"fi",
"fi-fi",
"fil",
"fil-ph",
"fo",
"fo-dk",
"fo-fo",
"fr",
"fr-be",
"fr-bf",
"fr-bi",
"fr-bj",
"fr-bl",
"fr-ca",
"fr-cd",
"fr-cf",
"fr-cg",
"fr-ch",
"fr-ci",
"fr-cm",
"fr-dj",
"fr-dz",
"fr-fr",
"fr-ga",
"fr-gf",
"fr-gn",
"fr-gp",
"fr-gq",
"fr-ht",
"fr-km",
"fr-lu",
"fr-ma",
"fr-mc",
"fr-mf",
"fr-mg",
"fr-ml",
"fr-mq",
"fr-mr",
"fr-mu",
"fr-nc",
"fr-ne",
"fr-pf",
"fr-pm",
"fr-re",
"fr-rw",
"fr-sc",
"fr-sn",
"fr-sy",
"fr-td",
"fr-tg",
"fr-tn",
"fr-vu",
"fr-wf",
"fr-yt",
"fur",
"fur-it",
"fy",
"fy-nl",
"ga",
"ga-ie",
"gd",
"gd-gb",
"gl",
"gl-es",
"gsw",
"gsw-ch",
"gsw-fr",
"gsw-li",
"gu",
"gu-in",
"guz",
"guz-ke",
"gv",
"gv-im",
"ha",
"ha-gh",
"ha-ne",
"ha-ng",
"haw",
"haw-us",
"he",
"hi",
"hi-in",
"hr",
"hr-ba",
"hr-hr",
"hsb",
"hsb-de",
"hu",
"hu-hu",
"hy",
"hy-am",
"id",
"ig",
"ig-ng",
"ii",
"ii-cn",
"id-id",
"is",
"is-is",
"it",
"it-ch",
"it-it",
"it-sm",
"it-va",
"he-il",
"ja",
"ja-jp",
"jgo",
"jgo-cm",
"yi",
"yi-001",
"jmc",
"jmc-tz",
"ka",
"ka-ge",
"kab",
"kab-dz",
"kam",
"kam-ke",
"kde",
"kde-tz",
"kea",
"kea-cv",
"khq",
"khq-ml",
"ki",
"ki-ke",
"kk",
"kk-kz",
"kkj",
"kkj-cm",
"kl",
"kl-gl",
"kln",
"kln-ke",
"km",
"km-kh",
"kn",
"kn-in",
"ko",
"ko-kp",
"ko-kr",
"kok",
"kok-in",
"ks",
"ks-in",
"ksb",
"ksb-tz",
"ksf",
"ksf-cm",
"ksh",
"ksh-de",
"kw",
"kw-gb",
"ky",
"ky-kg",
"lag",
"lag-tz",
"lb",
"lb-lu",
"lg",
"lg-ug",
"lkt",
"lkt-us",
"ln",
"ln-ao",
"ln-cd",
"ln-cf",
"ln-cg",
"lo",
"lo-la",
"lrc",
"lrc-iq",
"lrc-ir",
"lt",
"lt-lt",
"lu",
"lu-cd",
"luo",
"luo-ke",
"luy",
"luy-ke",
"lv",
"lv-lv",
"mas",
"mas-ke",
"mas-tz",
"mer",
"mer-ke",
"mfe",
"mfe-mu",
"mg",
"mg-mg",
"mgh",
"mgh-mz",
"mgo",
"mgo-cm",
"mk",
"mk-mk",
"ml",
"ml-in",
"mn",
"mn-mn",
"mr",
"mr-in",
"ms",
"ms-bn",
"ms-my",
"ms-sg",
"mt",
"mt-mt",
"mua",
"mua-cm",
"my",
"my-mm",
"mzn",
"mzn-ir",
"naq",
"naq-na",
"nb",
"nb-no",
"nb-sj",
"nd",
"nd-zw",
"nds",
"nds-de",
"nds-nl",
"ne",
"ne-in",
"ne-np",
"nl",
"nl-aw",
"nl-be",
"nl-bq",
"nl-cw",
"nl-nl",
"nl-sr",
"nl-sx",
"nmg",
"nmg-cm",
"nn",
"nn-no",
"nnh",
"nnh-cm",
"no",
"no-no",
"nus",
"nus-ss",
"nyn",
"nyn-ug",
"om",
"om-et",
"om-ke",
"or",
"or-in",
"os",
"os-ge",
"os-ru",
"pa",
"pa-in",
"pa-pk",
"pl",
"pl-pl",
"prg",
"prg-001",
"ps",
"ps-af",
"pt",
"pt-ao",
"pt-br",
"pt-ch",
"pt-cv",
"pt-gq",
"pt-gw",
"pt-lu",
"pt-mo",
"pt-mz",
"pt-pt",
"pt-st",
"pt-tl",
"qu",
"qu-bo",
"qu-ec",
"qu-pe",
"rm",
"rm-ch",
"rn",
"rn-bi",
"ro",
"ro-md",
"ro-ro",
"rof",
"rof-tz",
"ru",
"ru-by",
"ru-kg",
"ru-kz",
"ru-md",
"ru-ru",
"ru-ua",
"rw",
"rw-rw",
"rwk",
"rwk-tz",
"sa",
"sah",
"sah-ru",
"saq",
"saq-ke",
"sbp",
"sbp-tz",
"sd",
"sd-pk",
"se",
"se-fi",
"se-no",
"se-se",
"seh",
"seh-mz",
"ses",
"ses-ml",
"sg",
"sg-cf",
"shi",
"shi-ma",
"si",
"si-lk",
"sk",
"sk-sk",
"sl",
"sl-si",
"smn",
"smn-fi",
"sn",
"sn-zw",
"so",
"so-dj",
"so-et",
"so-ke",
"so-so",
"sq",
"sq-al",
"sq-mk",
"sq-xk",
"sr",
"sr-ba",
"sr-cs",
"sr-me",
"sr-rs",
"sr-xk",
"sv",
"sv-ax",
"sv-fi",
"sv-se",
"sw",
"sw-cd",
"sw-ke",
"sw-tz",
"sw-ug",
"sy",
"ta",
"ta-in",
"ta-lk",
"ta-my",
"ta-sg",
"te",
"te-in",
"teo",
"teo-ke",
"teo-ug",
"tg",
"tg-tj",
"th",
"th-th",
"ti",
"ti-er",
"ti-et",
"tk",
"tk-tm",
"to",
"to-to",
"tr",
"tr-cy",
"tr-tr",
"tt",
"tt-ru",
"twq",
"twq-ne",
"tzm",
"tzm-ma",
"ug",
"ug-cn",
"uk",
"uk-ua",
"ur",
"ur-in",
"ur-pk",
"uz",
"uz-af",
"uz-uz",
"vai",
"vai-lr",
"vi",
"vi-vn",
"vo",
"vo-001",
"vun",
"vun-tz",
"wae",
"wae-ch",
"wo",
"wo-sn",
"xog",
"xog-ug",
"yav",
"yav-cm",
"yo",
"yo-bj",
"yo-ng",
"yue",
"yue-cn",
"yue-hk",
"zgh",
"zgh-ma",
"zh",
"zh-cn",
"zh-hk",
"zh-mo",
"zh-sg",
"zh-tw",
"zh-hans",
"zh-hant",
"zu",
"zu-za",
] # noqa: E501
if self.local_vars_configuration.client_side_validation and language not in allowed_values: # noqa: E501
raise ValueError("Invalid value for `language` ({0}), must be one of {1}".format(language, allowed_values)) # noqa: E501
self._language = language
@property
def translated_from_id(self):
"""Gets the translated_from_id of this BlogPost. # noqa: E501
ID of the primary blog post this object was translated from. # noqa: E501
:return: The translated_from_id of this BlogPost. # noqa: E501
:rtype: str
"""
return self._translated_from_id
@translated_from_id.setter
def translated_from_id(self, translated_from_id):
"""Sets the translated_from_id of this BlogPost.
ID of the primary blog post this object was translated from. # noqa: E501
:param translated_from_id: The translated_from_id of this BlogPost. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and translated_from_id is None: # noqa: E501
raise ValueError("Invalid value for `translated_from_id`, must not be `None`") # noqa: E501
self._translated_from_id = translated_from_id
@property
def dynamic_page_hub_db_table_id(self):
"""Gets the dynamic_page_hub_db_table_id of this BlogPost. # noqa: E501
:return: The dynamic_page_hub_db_table_id of this BlogPost. # noqa: E501
:rtype: str
"""
return self._dynamic_page_hub_db_table_id
@dynamic_page_hub_db_table_id.setter
def dynamic_page_hub_db_table_id(self, dynamic_page_hub_db_table_id):
"""Sets the dynamic_page_hub_db_table_id of this BlogPost.
:param dynamic_page_hub_db_table_id: The dynamic_page_hub_db_table_id of this BlogPost. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and dynamic_page_hub_db_table_id is None: # noqa: E501
raise ValueError("Invalid value for `dynamic_page_hub_db_table_id`, must not be `None`") # noqa: E501
self._dynamic_page_hub_db_table_id = dynamic_page_hub_db_table_id
@property
def blog_author_id(self):
"""Gets the blog_author_id of this BlogPost. # noqa: E501
The ID of the Blog Author associated with this Blog Post. # noqa: E501
:return: The blog_author_id of this BlogPost. # noqa: E501
:rtype: str
"""
return self._blog_author_id
@blog_author_id.setter
def blog_author_id(self, blog_author_id):
"""Sets the blog_author_id of this BlogPost.
The ID of the Blog Author associated with this Blog Post. # noqa: E501
:param blog_author_id: The blog_author_id of this BlogPost. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and blog_author_id is None: # noqa: E501
raise ValueError("Invalid value for `blog_author_id`, must not be `None`") # noqa: E501
self._blog_author_id = blog_author_id
@property
def tag_ids(self):
"""Gets the tag_ids of this BlogPost. # noqa: E501
List of IDs for the tags associated with this Blog Post. # noqa: E501
:return: The tag_ids of this BlogPost. # noqa: E501
:rtype: list[int]
"""
return self._tag_ids
@tag_ids.setter
def tag_ids(self, tag_ids):
"""Sets the tag_ids of this BlogPost.
List of IDs for the tags associated with this Blog Post. # noqa: E501
:param tag_ids: The tag_ids of this BlogPost. # noqa: E501
:type: list[int]
"""
if self.local_vars_configuration.client_side_validation and tag_ids is None: # noqa: E501
raise ValueError("Invalid value for `tag_ids`, must not be `None`") # noqa: E501
self._tag_ids = tag_ids
@property
def post_body(self):
"""Gets the post_body of this BlogPost. # noqa: E501
The HTML of the main post body. # noqa: E501
:return: The post_body of this BlogPost. # noqa: E501
:rtype: str
"""
return self._post_body
@post_body.setter
def post_body(self, post_body):
"""Sets the post_body of this BlogPost.
The HTML of the main post body. # noqa: E501
:param post_body: The post_body of this BlogPost. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and post_body is None: # noqa: E501
raise ValueError("Invalid value for `post_body`, must not be `None`") # noqa: E501
self._post_body = post_body
@property
def post_summary(self):
"""Gets the post_summary of this BlogPost. # noqa: E501
The summary of the blog post that will appear on the main listing page. # noqa: E501
:return: The post_summary of this BlogPost. # noqa: E501
:rtype: str
"""
return self._post_summary
@post_summary.setter
def post_summary(self, post_summary):
"""Sets the post_summary of this BlogPost.
The summary of the blog post that will appear on the main listing page. # noqa: E501
:param post_summary: The post_summary of this BlogPost. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and post_summary is None: # noqa: E501
raise ValueError("Invalid value for `post_summary`, must not be `None`") # noqa: E501
self._post_summary = post_summary
@property
def rss_body(self):
"""Gets the rss_body of this BlogPost. # noqa: E501
The contents of the RSS body for this Blog Post. # noqa: E501
:return: The rss_body of this BlogPost. # noqa: E501
:rtype: str
"""
return self._rss_body
@rss_body.setter
def rss_body(self, rss_body):
"""Sets the rss_body of this BlogPost.
The contents of the RSS body for this Blog Post. # noqa: E501
:param rss_body: The rss_body of this BlogPost. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and rss_body is None: # noqa: E501
raise ValueError("Invalid value for `rss_body`, must not be `None`") # noqa: E501
self._rss_body = rss_body
@property
def rss_summary(self):
"""Gets the rss_summary of this BlogPost. # noqa: E501
The contents of the RSS summary for this Blog Post. # noqa: E501
:return: The rss_summary of this BlogPost. # noqa: E501
:rtype: str
"""
return self._rss_summary
@rss_summary.setter
def rss_summary(self, rss_summary):
"""Sets the rss_summary of this BlogPost.
The contents of the RSS summary for this Blog Post. # noqa: E501
:param rss_summary: The rss_summary of this BlogPost. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and rss_summary is None: # noqa: E501
raise ValueError("Invalid value for `rss_summary`, must not be `None`") # noqa: E501
self._rss_summary = rss_summary
@property
def enable_google_amp_output_override(self):
"""Gets the enable_google_amp_output_override of this BlogPost. # noqa: E501
Boolean to allow overriding the AMP settings for the blog. # noqa: E501
:return: The enable_google_amp_output_override of this BlogPost. # noqa: E501
:rtype: bool
"""
return self._enable_google_amp_output_override
@enable_google_amp_output_override.setter
def enable_google_amp_output_override(self, enable_google_amp_output_override):
"""Sets the enable_google_amp_output_override of this BlogPost.
Boolean to allow overriding the AMP settings for the blog. # noqa: E501
:param enable_google_amp_output_override: The enable_google_amp_output_override of this BlogPost. # noqa: E501
:type: bool
"""
if self.local_vars_configuration.client_side_validation and enable_google_amp_output_override is None: # noqa: E501
raise ValueError("Invalid value for `enable_google_amp_output_override`, must not be `None`") # noqa: E501
self._enable_google_amp_output_override = enable_google_amp_output_override
@property
def html_title(self):
"""Gets the html_title of this BlogPost. # noqa: E501
The html title of this Blog Post. # noqa: E501
:return: The html_title of this BlogPost. # noqa: E501
:rtype: str
"""
return self._html_title
@html_title.setter
def html_title(self, html_title):
"""Sets the html_title of this BlogPost.
The html title of this Blog Post. # noqa: E501
:param html_title: The html_title of this BlogPost. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and html_title is None: # noqa: E501
raise ValueError("Invalid value for `html_title`, must not be `None`") # noqa: E501
self._html_title = html_title
@property
def page_redirected(self):
"""Gets the page_redirected of this BlogPost. # noqa: E501
:return: The page_redirected of this BlogPost. # noqa: E501
:rtype: bool
"""
return self._page_redirected
@page_redirected.setter
def page_redirected(self, page_redirected):
"""Sets the page_redirected of this BlogPost.
:param page_redirected: The page_redirected of this BlogPost. # noqa: E501
:type: bool
"""
if self.local_vars_configuration.client_side_validation and page_redirected is None: # noqa: E501
raise ValueError("Invalid value for `page_redirected`, must not be `None`") # noqa: E501
self._page_redirected = page_redirected
@property
def page_expiry_enabled(self):
"""Gets the page_expiry_enabled of this BlogPost. # noqa: E501
:return: The page_expiry_enabled of this BlogPost. # noqa: E501
:rtype: bool
"""
return self._page_expiry_enabled
@page_expiry_enabled.setter
def page_expiry_enabled(self, page_expiry_enabled):
"""Sets the page_expiry_enabled of this BlogPost.
:param page_expiry_enabled: The page_expiry_enabled of this BlogPost. # noqa: E501
:type: bool
"""
if self.local_vars_configuration.client_side_validation and page_expiry_enabled is None: # noqa: E501
raise ValueError("Invalid value for `page_expiry_enabled`, must not be `None`") # noqa: E501
self._page_expiry_enabled = page_expiry_enabled
@property
def page_expiry_date(self):
"""Gets the page_expiry_date of this BlogPost. # noqa: E501
:return: The page_expiry_date of this BlogPost. # noqa: E501
:rtype: int
"""
return self._page_expiry_date
@page_expiry_date.setter
def page_expiry_date(self, page_expiry_date):
"""Sets the page_expiry_date of this BlogPost.
:param page_expiry_date: The page_expiry_date of this BlogPost. # noqa: E501
:type: int
"""
if self.local_vars_configuration.client_side_validation and page_expiry_date is None: # noqa: E501
raise ValueError("Invalid value for `page_expiry_date`, must not be `None`") # noqa: E501
self._page_expiry_date = page_expiry_date
@property
def page_expiry_redirect_id(self):
"""Gets the page_expiry_redirect_id of this BlogPost. # noqa: E501
:return: The page_expiry_redirect_id of this BlogPost. # noqa: E501
:rtype: int
"""
return self._page_expiry_redirect_id
@page_expiry_redirect_id.setter
def page_expiry_redirect_id(self, page_expiry_redirect_id):
"""Sets the page_expiry_redirect_id of this BlogPost.
:param page_expiry_redirect_id: The page_expiry_redirect_id of this BlogPost. # noqa: E501
:type: int
"""
if self.local_vars_configuration.client_side_validation and page_expiry_redirect_id is None: # noqa: E501
raise ValueError("Invalid value for `page_expiry_redirect_id`, must not be `None`") # noqa: E501
self._page_expiry_redirect_id = page_expiry_redirect_id
@property
def page_expiry_redirect_url(self):
"""Gets the page_expiry_redirect_url of this BlogPost. # noqa: E501
:return: The page_expiry_redirect_url of this BlogPost. # noqa: E501
:rtype: str
"""
return self._page_expiry_redirect_url
@page_expiry_redirect_url.setter
def page_expiry_redirect_url(self, page_expiry_redirect_url):
"""Sets the page_expiry_redirect_url of this BlogPost.
:param page_expiry_redirect_url: The page_expiry_redirect_url of this BlogPost. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and page_expiry_redirect_url is None: # noqa: E501
raise ValueError("Invalid value for `page_expiry_redirect_url`, must not be `None`") # noqa: E501
self._page_expiry_redirect_url = page_expiry_redirect_url
@property
def use_featured_image(self):
"""Gets the use_featured_image of this BlogPost. # noqa: E501
Boolean to determine if this post should use a featuredImage. # noqa: E501
:return: The use_featured_image of this BlogPost. # noqa: E501
:rtype: bool
"""
return self._use_featured_image
@use_featured_image.setter
def use_featured_image(self, use_featured_image):
"""Sets the use_featured_image of this BlogPost.
Boolean to determine if this post should use a featuredImage. # noqa: E501
:param use_featured_image: The use_featured_image of this BlogPost. # noqa: E501
:type: bool
"""
if self.local_vars_configuration.client_side_validation and use_featured_image is None: # noqa: E501
raise ValueError("Invalid value for `use_featured_image`, must not be `None`") # noqa: E501
self._use_featured_image = use_featured_image
@property
def password(self):
"""Gets the password of this BlogPost. # noqa: E501
Set this to create a password protected page. Entering the password will be required to view the page. # noqa: E501
:return: The password of this BlogPost. # noqa: E501
:rtype: str
"""
return self._password
@password.setter
def password(self, password):
"""Sets the password of this BlogPost.
Set this to create a password protected page. Entering the password will be required to view the page. # noqa: E501
:param password: The password of this BlogPost. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and password is None: # noqa: E501
raise ValueError("Invalid value for `password`, must not be `None`") # noqa: E501
self._password = password
@property
def attached_stylesheets(self):
"""Gets the attached_stylesheets of this BlogPost. # noqa: E501
List of stylesheets to attach to this blog post. These stylesheets are attached to just this page. Order of precedence is bottom to top, just like in the HTML. # noqa: E501
:return: The attached_stylesheets of this BlogPost. # noqa: E501
:rtype: list[dict(str, object)]
"""
return self._attached_stylesheets
@attached_stylesheets.setter
def attached_stylesheets(self, attached_stylesheets):
"""Sets the attached_stylesheets of this BlogPost.
List of stylesheets to attach to this blog post. These stylesheets are attached to just this page. Order of precedence is bottom to top, just like in the HTML. # noqa: E501
:param attached_stylesheets: The attached_stylesheets of this BlogPost. # noqa: E501
:type: list[dict(str, object)]
"""
if self.local_vars_configuration.client_side_validation and attached_stylesheets is None: # noqa: E501
raise ValueError("Invalid value for `attached_stylesheets`, must not be `None`") # noqa: E501
self._attached_stylesheets = attached_stylesheets
@property
def include_default_custom_css(self):
"""Gets the include_default_custom_css of this BlogPost. # noqa: E501
Boolean to determine whether or not the Primary CSS Files should be applied. # noqa: E501
:return: The include_default_custom_css of this BlogPost. # noqa: E501
:rtype: bool
"""
return self._include_default_custom_css
@include_default_custom_css.setter
def include_default_custom_css(self, include_default_custom_css):
"""Sets the include_default_custom_css of this BlogPost.
Boolean to determine whether or not the Primary CSS Files should be applied. # noqa: E501
:param include_default_custom_css: The include_default_custom_css of this BlogPost. # noqa: E501
:type: bool
"""
if self.local_vars_configuration.client_side_validation and include_default_custom_css is None: # noqa: E501
raise ValueError("Invalid value for `include_default_custom_css`, must not be `None`") # noqa: E501
self._include_default_custom_css = include_default_custom_css
@property
def enable_domain_stylesheets(self):
"""Gets the enable_domain_stylesheets of this BlogPost. # noqa: E501
Boolean to determine whether or not the styles from the template should be applied. # noqa: E501
:return: The enable_domain_stylesheets of this BlogPost. # noqa: E501
:rtype: bool
"""
return self._enable_domain_stylesheets
@enable_domain_stylesheets.setter
def enable_domain_stylesheets(self, enable_domain_stylesheets):
"""Sets the enable_domain_stylesheets of this BlogPost.
Boolean to determine whether or not the styles from the template should be applied. # noqa: E501
:param enable_domain_stylesheets: The enable_domain_stylesheets of this BlogPost. # noqa: E501
:type: bool
"""
if self.local_vars_configuration.client_side_validation and enable_domain_stylesheets is None: # noqa: E501
raise ValueError("Invalid value for `enable_domain_stylesheets`, must not be `None`") # noqa: E501
self._enable_domain_stylesheets = enable_domain_stylesheets
@property
def enable_layout_stylesheets(self):
"""Gets the enable_layout_stylesheets of this BlogPost. # noqa: E501
Boolean to determine whether or not the styles from the template should be applied. # noqa: E501
:return: The enable_layout_stylesheets of this BlogPost. # noqa: E501
:rtype: bool
"""
return self._enable_layout_stylesheets
@enable_layout_stylesheets.setter
def enable_layout_stylesheets(self, enable_layout_stylesheets):
"""Sets the enable_layout_stylesheets of this BlogPost.
Boolean to determine whether or not the styles from the template should be applied. # noqa: E501
:param enable_layout_stylesheets: The enable_layout_stylesheets of this BlogPost. # noqa: E501
:type: bool
"""
if self.local_vars_configuration.client_side_validation and enable_layout_stylesheets is None: # noqa: E501
raise ValueError("Invalid value for `enable_layout_stylesheets`, must not be `None`") # noqa: E501
self._enable_layout_stylesheets = enable_layout_stylesheets
@property
def meta_description(self):
"""Gets the meta_description of this BlogPost. # noqa: E501
A description that goes in <meta> tag on the page. # noqa: E501
:return: The meta_description of this BlogPost. # noqa: E501
:rtype: str
"""
return self._meta_description
@meta_description.setter
def meta_description(self, meta_description):
"""Sets the meta_description of this BlogPost.
A description that goes in <meta> tag on the page. # noqa: E501
:param meta_description: The meta_description of this BlogPost. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and meta_description is None: # noqa: E501
raise ValueError("Invalid value for `meta_description`, must not be `None`") # noqa: E501
self._meta_description = meta_description
@property
def publish_immediately(self):
"""Gets the publish_immediately of this BlogPost. # noqa: E501
Set this to true if you want to be published immediately when the schedule publish endpoint is called, and to ignore the publish_date setting. # noqa: E501
:return: The publish_immediately of this BlogPost. # noqa: E501
:rtype: bool
"""
return self._publish_immediately
@publish_immediately.setter
def publish_immediately(self, publish_immediately):
"""Sets the publish_immediately of this BlogPost.
Set this to true if you want to be published immediately when the schedule publish endpoint is called, and to ignore the publish_date setting. # noqa: E501
:param publish_immediately: The publish_immediately of this BlogPost. # noqa: E501
:type: bool
"""
if self.local_vars_configuration.client_side_validation and publish_immediately is None: # noqa: E501
raise ValueError("Invalid value for `publish_immediately`, must not be `None`") # noqa: E501
self._publish_immediately = publish_immediately
@property
def head_html(self):
"""Gets the head_html of this BlogPost. # noqa: E501
Custom HTML for embed codes, javascript, etc. that goes in the <head> tag of the page. # noqa: E501
:return: The head_html of this BlogPost. # noqa: E501
:rtype: str
"""
return self._head_html
@head_html.setter
def head_html(self, head_html):
"""Sets the head_html of this BlogPost.
Custom HTML for embed codes, javascript, etc. that goes in the <head> tag of the page. # noqa: E501
:param head_html: The head_html of this BlogPost. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and head_html is None: # noqa: E501
raise ValueError("Invalid value for `head_html`, must not be `None`") # noqa: E501
self._head_html = head_html
@property
def footer_html(self):
"""Gets the footer_html of this BlogPost. # noqa: E501
Custom HTML for embed codes, javascript that should be placed before the </body> tag of the page. # noqa: E501
:return: The footer_html of this BlogPost. # noqa: E501
:rtype: str
"""
return self._footer_html
@footer_html.setter
def footer_html(self, footer_html):
"""Sets the footer_html of this BlogPost.
Custom HTML for embed codes, javascript that should be placed before the </body> tag of the page. # noqa: E501
:param footer_html: The footer_html of this BlogPost. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and footer_html is None: # noqa: E501
raise ValueError("Invalid value for `footer_html`, must not be `None`") # noqa: E501
self._footer_html = footer_html
@property
def content_type_category(self):
"""Gets the content_type_category of this BlogPost. # noqa: E501
An ENUM descibing the type of this object. Should always be BLOG_POST. # noqa: E501
:return: The content_type_category of this BlogPost. # noqa: E501
:rtype: str
"""
return self._content_type_category
@content_type_category.setter
def content_type_category(self, content_type_category):
"""Sets the content_type_category of this BlogPost.
An ENUM descibing the type of this object. Should always be BLOG_POST. # noqa: E501
:param content_type_category: The content_type_category of this BlogPost. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and content_type_category is None: # noqa: E501
raise ValueError("Invalid value for `content_type_category`, must not be `None`") # noqa: E501
allowed_values = ["0", "1", "2", "3", "4", "5", "6", "7"] # noqa: E501
if self.local_vars_configuration.client_side_validation and content_type_category not in allowed_values: # noqa: E501
raise ValueError("Invalid value for `content_type_category` ({0}), must be one of {1}".format(content_type_category, allowed_values)) # noqa: E501
self._content_type_category = content_type_category
@property
def current_state(self):
"""Gets the current_state of this BlogPost. # noqa: E501
A generated ENUM descibing the current state of this Blog Post. Should always match state. # noqa: E501
:return: The current_state of this BlogPost. # noqa: E501
:rtype: str
"""
return self._current_state
@current_state.setter
def current_state(self, current_state):
"""Sets the current_state of this BlogPost.
A generated ENUM descibing the current state of this Blog Post. Should always match state. # noqa: E501
:param current_state: The current_state of this BlogPost. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and current_state is None: # noqa: E501
raise ValueError("Invalid value for `current_state`, must not be `None`") # noqa: E501
allowed_values = [
"AUTOMATED",
"AUTOMATED_DRAFT",
"AUTOMATED_SENDING",
"AUTOMATED_FOR_FORM",
"AUTOMATED_FOR_FORM_BUFFER",
"AUTOMATED_FOR_FORM_DRAFT",
"AUTOMATED_FOR_FORM_LEGACY",
"BLOG_EMAIL_DRAFT",
"BLOG_EMAIL_PUBLISHED",
"DRAFT",
"DRAFT_AB",
"DRAFT_AB_VARIANT",
"ERROR",
"LOSER_AB_VARIANT",
"PAGE_STUB",
"PRE_PROCESSING",
"PROCESSING",
"PUBLISHED",
"PUBLISHED_AB",
"PUBLISHED_AB_VARIANT",
"PUBLISHED_OR_SCHEDULED",
"RSS_TO_EMAIL_DRAFT",
"RSS_TO_EMAIL_PUBLISHED",
"SCHEDULED",
"SCHEDULED_AB",
"SCHEDULED_OR_PUBLISHED",
"AUTOMATED_AB",
"AUTOMATED_AB_VARIANT",
"AUTOMATED_DRAFT_AB",
"AUTOMATED_DRAFT_ABVARIANT",
"AUTOMATED_LOSER_ABVARIANT",
] # noqa: E501
if self.local_vars_configuration.client_side_validation and current_state not in allowed_values: # noqa: E501
raise ValueError("Invalid value for `current_state` ({0}), must be one of {1}".format(current_state, allowed_values)) # noqa: E501
self._current_state = current_state
@property
def link_rel_canonical_url(self):
"""Gets the link_rel_canonical_url of this BlogPost. # noqa: E501
Optional override to set the URL to be used in the rel=canonical link tag on the page. # noqa: E501
:return: The link_rel_canonical_url of this BlogPost. # noqa: E501
:rtype: str
"""
return self._link_rel_canonical_url
@link_rel_canonical_url.setter
def link_rel_canonical_url(self, link_rel_canonical_url):
"""Sets the link_rel_canonical_url of this BlogPost.
Optional override to set the URL to be used in the rel=canonical link tag on the page. # noqa: E501
:param link_rel_canonical_url: The link_rel_canonical_url of this BlogPost. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and link_rel_canonical_url is None: # noqa: E501
raise ValueError("Invalid value for `link_rel_canonical_url`, must not be `None`") # noqa: E501
self._link_rel_canonical_url = link_rel_canonical_url
@property
def featured_image(self):
"""Gets the featured_image of this BlogPost. # noqa: E501
The featuredImage of this Blog Post. # noqa: E501
:return: The featured_image of this BlogPost. # noqa: E501
:rtype: str
"""
return self._featured_image
@featured_image.setter
def featured_image(self, featured_image):
"""Sets the featured_image of this BlogPost.
The featuredImage of this Blog Post. # noqa: E501
:param featured_image: The featured_image of this BlogPost. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and featured_image is None: # noqa: E501
raise ValueError("Invalid value for `featured_image`, must not be `None`") # noqa: E501
self._featured_image = featured_image
@property
def featured_image_alt_text(self):
"""Gets the featured_image_alt_text of this BlogPost. # noqa: E501
Alt Text of the featuredImage. # noqa: E501
:return: The featured_image_alt_text of this BlogPost. # noqa: E501
:rtype: str
"""
return self._featured_image_alt_text
@featured_image_alt_text.setter
def featured_image_alt_text(self, featured_image_alt_text):
"""Sets the featured_image_alt_text of this BlogPost.
Alt Text of the featuredImage. # noqa: E501
:param featured_image_alt_text: The featured_image_alt_text of this BlogPost. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and featured_image_alt_text is None: # noqa: E501
raise ValueError("Invalid value for `featured_image_alt_text`, must not be `None`") # noqa: E501
self._featured_image_alt_text = featured_image_alt_text
@property
def public_access_rules_enabled(self):
"""Gets the public_access_rules_enabled of this BlogPost. # noqa: E501
Boolean to determine whether or not to respect publicAccessRules. # noqa: E501
:return: The public_access_rules_enabled of this BlogPost. # noqa: E501
:rtype: bool
"""
return self._public_access_rules_enabled
@public_access_rules_enabled.setter
def public_access_rules_enabled(self, public_access_rules_enabled):
"""Sets the public_access_rules_enabled of this BlogPost.
Boolean to determine whether or not to respect publicAccessRules. # noqa: E501
:param public_access_rules_enabled: The public_access_rules_enabled of this BlogPost. # noqa: E501
:type: bool
"""
if self.local_vars_configuration.client_side_validation and public_access_rules_enabled is None: # noqa: E501
raise ValueError("Invalid value for `public_access_rules_enabled`, must not be `None`") # noqa: E501
self._public_access_rules_enabled = public_access_rules_enabled
@property
def public_access_rules(self):
"""Gets the public_access_rules of this BlogPost. # noqa: E501
Rules for require member registration to access private content. # noqa: E501
:return: The public_access_rules of this BlogPost. # noqa: E501
:rtype: list[object]
"""
return self._public_access_rules
@public_access_rules.setter
def public_access_rules(self, public_access_rules):
"""Sets the public_access_rules of this BlogPost.
Rules for require member registration to access private content. # noqa: E501
:param public_access_rules: The public_access_rules of this BlogPost. # noqa: E501
:type: list[object]
"""
if self.local_vars_configuration.client_side_validation and public_access_rules is None: # noqa: E501
raise ValueError("Invalid value for `public_access_rules`, must not be `None`") # noqa: E501
self._public_access_rules = public_access_rules
@property
def layout_sections(self):
"""Gets the layout_sections of this BlogPost. # noqa: E501
:return: The layout_sections of this BlogPost. # noqa: E501
:rtype: dict(str, LayoutSection)
"""
return self._layout_sections
@layout_sections.setter
def layout_sections(self, layout_sections):
"""Sets the layout_sections of this BlogPost.
:param layout_sections: The layout_sections of this BlogPost. # noqa: E501
:type: dict(str, LayoutSection)
"""
if self.local_vars_configuration.client_side_validation and layout_sections is None: # noqa: E501
raise ValueError("Invalid value for `layout_sections`, must not be `None`") # noqa: E501
self._layout_sections = layout_sections
@property
def theme_settings_values(self):
"""Gets the theme_settings_values of this BlogPost. # noqa: E501
:return: The theme_settings_values of this BlogPost. # noqa: E501
:rtype: dict(str, object)
"""
return self._theme_settings_values
@theme_settings_values.setter
def theme_settings_values(self, theme_settings_values):
"""Sets the theme_settings_values of this BlogPost.
:param theme_settings_values: The theme_settings_values of this BlogPost. # noqa: E501
:type: dict(str, object)
"""
if self.local_vars_configuration.client_side_validation and theme_settings_values is None: # noqa: E501
raise ValueError("Invalid value for `theme_settings_values`, must not be `None`") # noqa: E501
self._theme_settings_values = theme_settings_values
@property
def url(self):
"""Gets the url of this BlogPost. # noqa: E501
A generated field representing the URL of this blog post. # noqa: E501
:return: The url of this BlogPost. # noqa: E501
:rtype: str
"""
return self._url
@url.setter
def url(self, url):
"""Sets the url of this BlogPost.
A generated field representing the URL of this blog post. # noqa: E501
:param url: The url of this BlogPost. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and url is None: # noqa: E501
raise ValueError("Invalid value for `url`, must not be `None`") # noqa: E501
self._url = url
@property
def publish_date(self):
"""Gets the publish_date of this BlogPost. # noqa: E501
The date (ISO8601 format) the blog post is to be published at. # noqa: E501
:return: The publish_date of this BlogPost. # noqa: E501
:rtype: datetime
"""
return self._publish_date
@publish_date.setter
def publish_date(self, publish_date):
"""Sets the publish_date of this BlogPost.
The date (ISO8601 format) the blog post is to be published at. # noqa: E501
:param publish_date: The publish_date of this BlogPost. # noqa: E501
:type: datetime
"""
if self.local_vars_configuration.client_side_validation and publish_date is None: # noqa: E501
raise ValueError("Invalid value for `publish_date`, must not be `None`") # noqa: E501
self._publish_date = publish_date
@property
def deleted_at(self):
"""Gets the deleted_at of this BlogPost. # noqa: E501
The timestamp (ISO8601 format) when this Blog Post was deleted. # noqa: E501
:return: The deleted_at of this BlogPost. # noqa: E501
:rtype: datetime
"""
return self._deleted_at
@deleted_at.setter
def deleted_at(self, deleted_at):
"""Sets the deleted_at of this BlogPost.
The timestamp (ISO8601 format) when this Blog Post was deleted. # noqa: E501
:param deleted_at: The deleted_at of this BlogPost. # noqa: E501
:type: datetime
"""
if self.local_vars_configuration.client_side_validation and deleted_at is None: # noqa: E501
raise ValueError("Invalid value for `deleted_at`, must not be `None`") # noqa: E501
self._deleted_at = deleted_at
@property
def created_at(self):
"""Gets the created_at of this BlogPost. # noqa: E501
The timestamp (ISO8601 format) when this blog post was created. # noqa: E501
:return: The created_at of this BlogPost. # noqa: E501
:rtype: datetime
"""
return self._created_at
@created_at.setter
def created_at(self, created_at):
"""Sets the created_at of this BlogPost.
The timestamp (ISO8601 format) when this blog post was created. # noqa: E501
:param created_at: The created_at of this BlogPost. # noqa: E501
:type: datetime
"""
if self.local_vars_configuration.client_side_validation and created_at is None: # noqa: E501
raise ValueError("Invalid value for `created_at`, must not be `None`") # noqa: E501
self._created_at = created_at
@property
def published(self):
"""Gets the published of this BlogPost. # noqa: E501
Boolean describing if this Blog Post is published. # noqa: E501
:return: The published of this BlogPost. # noqa: E501
:rtype: bool
"""
return self._published
@published.setter
def published(self, published):
"""Sets the published of this BlogPost.
Boolean describing if this Blog Post is published. # noqa: E501
:param published: The published of this BlogPost. # noqa: E501
:type: bool
"""
if self.local_vars_configuration.client_side_validation and published is None: # noqa: E501
raise ValueError("Invalid value for `published`, must not be `None`") # noqa: E501
self._published = published
@property
def updated_at(self):
"""Gets the updated_at of this BlogPost. # noqa: E501
The timestamp (ISO8601 format) when this Blog Post was last updated. # noqa: E501
:return: The updated_at of this BlogPost. # noqa: E501
:rtype: datetime
"""
return self._updated_at
@updated_at.setter
def updated_at(self, updated_at):
"""Sets the updated_at of this BlogPost.
The timestamp (ISO8601 format) when this Blog Post was last updated. # noqa: E501
:param updated_at: The updated_at of this BlogPost. # noqa: E501
:type: datetime
"""
if self.local_vars_configuration.client_side_validation and updated_at is None: # noqa: E501
raise ValueError("Invalid value for `updated_at`, must not be `None`") # noqa: E501
self._updated_at = updated_at
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items()))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, BlogPost):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, BlogPost):
return True
return self.to_dict() != other.to_dict()
| 32.315472 | 181 | 0.57115 | 10,329 | 85,636 | 4.520767 | 0.07203 | 0.0711 | 0.073156 | 0.070543 | 0.706585 | 0.629232 | 0.601007 | 0.529843 | 0.462019 | 0.380341 | 0 | 0.023237 | 0.332127 | 85,636 | 2,649 | 182 | 32.327671 | 0.793195 | 0.305444 | 0 | 0.040829 | 0 | 0 | 0.185602 | 0.0268 | 0 | 0 | 0 | 0 | 0 | 1 | 0.080402 | false | 0.007538 | 0.002513 | 0 | 0.127513 | 0.001256 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1204747831fc9864ec931f9ad42fa72ea7913318 | 4,743 | py | Python | detector/train.py | EthanLuu/code-clone-detector | 0b6270ba18f9f266de0160c758e3e4554912fe08 | [
"MIT"
] | 3 | 2021-11-22T03:57:43.000Z | 2022-01-11T12:08:52.000Z | detector/train.py | EthanLuu/code-clone-detector | 0b6270ba18f9f266de0160c758e3e4554912fe08 | [
"MIT"
] | null | null | null | detector/train.py | EthanLuu/code-clone-detector | 0b6270ba18f9f266de0160c758e3e4554912fe08 | [
"MIT"
] | 1 | 2021-07-12T04:25:32.000Z | 2021-07-12T04:25:32.000Z | import pandas as pd
import os
import torch
import numpy as np
import pickle
import dill
from settings import settings
from model import BatchProgramCC
from torch.autograd import Variable
from gensim.models.word2vec import Word2Vec
from sklearn.metrics import precision_recall_fscore_support
categories = 5
HIDDEN_DIM = 100
ENCODE_DIM = 128
LABELS = 1
EPOCHS = 5
BATCH_SIZE = 32
USE_GPU = False
def get_batch(dataset, idx, bs):
tmp = dataset.iloc[idx: idx+bs]
x1, x2, labels = [], [], []
for _, item in tmp.iterrows():
x1.append(item['ast_x'])
x2.append(item['ast_y'])
labels.append([item['label']])
return x1, x2, torch.FloatTensor(labels)
def train():
train_data = pd.read_pickle(settings.train_block_path).sample(frac=1)
word2vec = Word2Vec.load(settings.w2v_model_path).wv
MAX_TOKENS = word2vec.syn0.shape[0]
EMBEDDING_DIM = word2vec.syn0.shape[1]
embeddings = np.zeros((MAX_TOKENS + 1, EMBEDDING_DIM), dtype="float32")
embeddings[:word2vec.syn0.shape[0]] = word2vec.syn0
model = BatchProgramCC(EMBEDDING_DIM, HIDDEN_DIM, MAX_TOKENS+1, ENCODE_DIM, LABELS, BATCH_SIZE,
USE_GPU, embeddings)
parameters = model.parameters()
optimizer = torch.optim.Adamax(parameters)
loss_function = torch.nn.BCELoss()
print('Start training...')
for t in range(1, categories+1):
model_path = "./models/model_" + str(t) + ".pkl"
if os.path.exists(model_path):
continue
# 筛选出当前 type 的克隆代码对,克隆标记为 1,不克隆为 0
train_data_t = train_data[train_data['label'].isin([t, 0])]
train_data_t.loc[train_data_t['label'] > 0, 'label'] = 1
print(train_data_t)
# training procedure
for _ in range(EPOCHS):
# training epoch
i = 0
while i < len(train_data_t):
try:
batch = get_batch(train_data_t, i, BATCH_SIZE)
i += BATCH_SIZE
train1_inputs, train2_inputs, train_labels = batch
if USE_GPU:
train1_inputs, train2_inputs, train_labels = train1_inputs, train2_inputs, train_labels.cuda()
model.zero_grad()
model.batch_size = len(train_labels)
model.hidden = model.init_hidden()
output = model(train1_inputs, train2_inputs)
loss = loss_function(output, Variable(train_labels))
loss.backward()
optimizer.step()
print(str(i) + " good")
except:
print(str(i) + " bad")
continue
# save model
f = open(model_path, 'wb')
dill.dump(model, f)
f.close()
print(model_path + " generated")
def test():
precision, recall, f1 = 0, 0, 0
test_data = pd.read_pickle(settings.test_block_path).sample(frac=1)
loss_function = torch.nn.BCELoss()
for t in range(1, categories+1):
test_data_t = test_data[test_data['label'].isin([t, 0])]
test_data_t.loc[test_data_t['label'] > 0, 'label'] = 1
model_path = "./models/model_" + str(t) + ".pkl"
f = open(model_path, 'rb')
model = dill.load(f)
f.close()
print("Testing-%d..." % t)
# testing procedure
predicts = []
trues = []
total_loss = 0.0
total = 0.0
i = 0
while i < len(test_data_t):
batch = get_batch(test_data_t, i, BATCH_SIZE)
i += BATCH_SIZE
test1_inputs, test2_inputs, test_labels = batch
if USE_GPU:
test_labels = test_labels.cuda()
model.batch_size = len(test_labels)
model.hidden = model.init_hidden()
output = model(test1_inputs, test2_inputs)
loss = loss_function(output, Variable(test_labels))
# calc testing acc
predicted = (output.data > 0.5).cpu().numpy()
predicts.extend(predicted)
trues.extend(test_labels.cpu().numpy())
total += len(test_labels)
total_loss += loss.item() * len(test_labels)
weights = [0, 0.005, 0.001, 0.002, 0.010, 0.982]
p, r, f, _ = precision_recall_fscore_support(
trues, predicts, average='binary')
precision += weights[t] * p
recall += weights[t] * r
f1 += weights[t] * f
print("Type-" + str(t) + ": " + str(p) +
" " + str(r) + " " + str(f))
print("Total testing results(P,R,F1):%.3f, %.3f, %.3f" %
(precision, recall, f1))
def main():
train()
test()
if __name__ == "__main__":
main()
| 33.167832 | 118 | 0.571157 | 591 | 4,743 | 4.377327 | 0.27242 | 0.02126 | 0.023193 | 0.037109 | 0.259374 | 0.172787 | 0.091612 | 0.074217 | 0 | 0 | 0 | 0.031117 | 0.308876 | 4,743 | 142 | 119 | 33.401408 | 0.758084 | 0.023614 | 0 | 0.153846 | 0 | 0 | 0.045848 | 0.004542 | 0 | 0 | 0 | 0 | 0 | 1 | 0.034188 | false | 0 | 0.094017 | 0 | 0.136752 | 0.068376 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1205b697340a6ca66a4b5eefeee7e83d113d7439 | 3,336 | py | Python | whatthefood/data/visualise.py | lychanl/WhatTheFood | 94b6eec2c306e7e55b19395cde207d6e6beec7fe | [
"MIT"
] | null | null | null | whatthefood/data/visualise.py | lychanl/WhatTheFood | 94b6eec2c306e7e55b19395cde207d6e6beec7fe | [
"MIT"
] | null | null | null | whatthefood/data/visualise.py | lychanl/WhatTheFood | 94b6eec2c306e7e55b19395cde207d6e6beec7fe | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
import pickle
import numpy as np
import argparse
from whatthefood.data.xml_to_obj import parse_file
from whatthefood.data.obj_to_nparray import get_objects_from_output, load_input_image
from whatthefood.data.preprocessing import ScalePreprocessor
from whatthefood.classification.utils import get_output_mean_with_flipped
def visualise_objects(ax, objects, color, scale=None):
for o in objects:
loc = (o.center[1] - o.size[1] / 2, o.center[0] - o.size[0] / 2)
size = (o.size[1], o.size[0])
if scale:
loc = (loc[0] / scale, loc[1] / scale)
size = (size[0] / scale, size[1] / scale)
ax.add_patch(plt.Rectangle(loc, size[0], size[1], fill=False, linewidth=2, edgecolor=color))
ax.text(loc[0], loc[1], o.label,
color=color, weight="bold",
verticalalignment="bottom",
horizontalalignment="left")
def visualise_img_and_annot(img, objects, annot_objects, scale=None):
fig, ax = plt.subplots(1)
ax.imshow(img)
ax.set_xticks([])
ax.set_yticks([])
if annot_objects:
print(f"Actual objects number: {len(annot_objects)}")
visualise_objects(ax, annot_objects, 'green', scale)
if objects:
print(f"Detected objects number: {len(objects)}")
visualise_objects(ax, objects, 'red')
plt.show()
def visualise_data(img, expected_out, model_out, classes, threshold=0.5):
yolo_out_objs = get_objects_from_output(model_out, img.shape[:2], classes, threshold)\
if model_out is not None else None
yolo_exp_objs = get_objects_from_output(expected_out, img.shape[:2], classes, threshold)\
if expected_out is not None else None
visualise_img_and_annot(img, yolo_out_objs, yolo_exp_objs)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--model', action='store', type=str, default=None, required=False)
parser.add_argument('--model-classes-from', action='store', type=str, default=None, required=False)
parser.add_argument('--annotation', action='store_const', const=True, default=False)
parser.add_argument('img')
args = parser.parse_args()
flips = False
model = None
preprocessor = None
scale = None
if args.model:
with open(args.model, 'rb') as file:
model = pickle.load(file)
scale = 2340 // model.inputs[0].shape[0]
assert scale == 4160 // model.inputs[0].shape[1]
preprocessor = ScalePreprocessor(scale, np.mean)
if not args.annotation:
annot = None
img = load_input_image(args.img, preprocessor)
else:
annot = parse_file(args.img)
img = load_input_image(annot.img_path, preprocessor)
yolo_out_annot = None
if model:
ds = None
if args.model_classes_from:
with open(args.model_classes_from, 'rb') as file:
ds = pickle.load(file)
classes = list(range(model.output.shape[2] - 5)) if not ds else ds.classes
if flips:
out = get_output_mean_with_flipped(model, [img])[0]
else:
out = model([img])[0]
yolo_out_annot = get_objects_from_output(out, img.shape[:2], classes)
visualise_img_and_annot(img, yolo_out_annot, annot.objects, scale)
| 34.391753 | 103 | 0.658573 | 459 | 3,336 | 4.588235 | 0.261438 | 0.02849 | 0.026591 | 0.037987 | 0.197531 | 0.132004 | 0.11301 | 0.05603 | 0.05603 | 0.05603 | 0 | 0.015052 | 0.223321 | 3,336 | 96 | 104 | 34.75 | 0.797761 | 0 | 0 | 0.027027 | 0 | 0 | 0.053657 | 0 | 0 | 0 | 0 | 0 | 0.013514 | 1 | 0.040541 | false | 0 | 0.108108 | 0 | 0.148649 | 0.027027 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1206d8e2c1e71e7cdd590a11a2f6c861efb19b7e | 12,320 | py | Python | 1_code/cluster/predict_symptoms_scv_grid.py | lindenmp/NormativeNeuroDev_CrossSec_DWI | af54928f047dd0c08fefcd7102b604cfeb84d364 | [
"MIT"
] | null | null | null | 1_code/cluster/predict_symptoms_scv_grid.py | lindenmp/NormativeNeuroDev_CrossSec_DWI | af54928f047dd0c08fefcd7102b604cfeb84d364 | [
"MIT"
] | 7 | 2020-03-25T14:09:37.000Z | 2022-01-13T02:37:09.000Z | 1_code/cluster/predict_symptoms_scv_grid.py | lindenmp/neurodev_cs_predictive | af54928f047dd0c08fefcd7102b604cfeb84d364 | [
"MIT"
] | null | null | null | import argparse
# Essentials
import os, sys, glob
import pandas as pd
import numpy as np
import copy
import json
# Stats
import scipy as sp
from scipy import stats
# Sklearn
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import KFold, GridSearchCV, cross_val_score
from sklearn.linear_model import Ridge, Lasso, LinearRegression
from sklearn.kernel_ridge import KernelRidge
from sklearn.svm import SVR, LinearSVR
from sklearn.metrics import make_scorer, r2_score, mean_squared_error, mean_absolute_error
# --------------------------------------------------------------------------------------------------------------------
# parse input arguments
parser = argparse.ArgumentParser()
parser.add_argument("-x", help="IVs", dest="X_file", default=None)
parser.add_argument("-y", help="DVs", dest="y_file", default=None)
parser.add_argument("-c", help="DVs", dest="c_file", default=None)
parser.add_argument("-metric", help="brain feature (e.g., ac)", dest="metric", default=None)
parser.add_argument("-pheno", help="psychopathology dimension", dest="pheno", default=None)
parser.add_argument("-seed", help="seed for shuffle_data", dest="seed", default=1)
parser.add_argument("-alg", help="estimator", dest="alg", default=None)
parser.add_argument("-score", help="score set order", dest="score", default=None)
parser.add_argument("-o", help="output directory", dest="outroot", default=None)
args = parser.parse_args()
print(args)
X_file = args.X_file
y_file = args.y_file
c_file = args.c_file
metric = args.metric
pheno = args.pheno
# seed = int(args.seed)
# seed = int(os.environ['SGE_TASK_ID'])-1
alg = args.alg
score = args.score
outroot = args.outroot
# --------------------------------------------------------------------------------------------------------------------
# --------------------------------------------------------------------------------------------------------------------
# prediction functions
def corr_true_pred(y_true, y_pred):
if type(y_true) == np.ndarray:
y_true = y_true.flatten()
if type(y_pred) == np.ndarray:
y_pred = y_pred.flatten()
r,p = sp.stats.pearsonr(y_true, y_pred)
return r
def root_mean_squared_error(y_true, y_pred):
mse = np.mean((y_true - y_pred)**2, axis=0)
rmse = np.sqrt(mse)
return rmse
def get_reg(num_params = 10):
regs = {'rr': Ridge(),
'lr': Lasso(),
'krr_lin': KernelRidge(kernel='linear'),
'krr_rbf': KernelRidge(kernel='rbf'),
'svr_lin': SVR(kernel='linear'),
'svr_rbf': SVR(kernel='rbf')
}
# From the sklearn docs, gamma defaults to 1/n_features. In my cases that will be either 1/400 features = 0.0025 or 1/200 = 0.005.
# I'll set gamma to same range as alpha then [0.001 to 1] - this way, the defaults will be included in the gridsearch
param_grids = {'rr': {'reg__alpha': np.logspace(0.5, -1, num_params)},
'lr': {'reg__alpha': np.logspace(0.5, -1, num_params)},
'krr_lin': {'reg__alpha': np.logspace(0.5, -1, num_params)},
'krr_rbf': {'reg__alpha': np.logspace(0.5, -1, num_params)},
'svr_lin': {'reg__C': np.logspace(0, 4, num_params)},
'svr_rbf': {'reg__C': np.logspace(0, 4, num_params), 'reg__gamma': np.logspace(0, -3, num_params)}
}
return regs, param_grids
def get_stratified_cv(X, y, c = None, n_splits = 10):
# sort data on outcome variable in ascending order
idx = y.sort_values(ascending = True).index
if X.ndim == 2: X_sort = X.loc[idx,:]
elif X.ndim == 1: X_sort = X.loc[idx]
y_sort = y.loc[idx]
if c is not None:
if c.ndim == 2: c_sort = c.loc[idx,:]
elif c.ndim == 1: c_sort = c.loc[idx]
# create custom stratified kfold on outcome variable
my_cv = []
for k in range(n_splits):
my_bool = np.zeros(y.shape[0]).astype(bool)
my_bool[np.arange(k,y.shape[0],n_splits)] = True
train_idx = np.where(my_bool == False)[0]
test_idx = np.where(my_bool == True)[0]
my_cv.append( (train_idx, test_idx) )
if c is not None:
return X_sort, y_sort, my_cv, c_sort
else:
return X_sort, y_sort, my_cv
def cross_val_score_nuis(X, y, c, my_cv, reg, my_scorer, c_y = None):
accuracy = np.zeros(len(my_cv),)
for k in np.arange(len(my_cv)):
tr = my_cv[k][0]
te = my_cv[k][1]
# Split into train test
X_train = X.iloc[tr,:]; X_test = X.iloc[te,:]
y_train = y.iloc[tr]; y_test = y.iloc[te]
c_train = c.iloc[tr,:]; c_test = c.iloc[te,:]
if c_y is not None: c_y_train = c_y.iloc[tr,:]; c_y_test = c_y.iloc[te,:]
# standardize predictors
sc = StandardScaler(); sc.fit(X_train); X_train = sc.transform(X_train); X_test = sc.transform(X_test)
X_train = pd.DataFrame(data = X_train, index = X.iloc[tr,:].index, columns = X.iloc[tr,:].columns)
X_test = pd.DataFrame(data = X_test, index = X.iloc[te,:].index, columns = X.iloc[te,:].columns)
# standardize covariates
sc = StandardScaler(); sc.fit(c_train); c_train = sc.transform(c_train); c_test = sc.transform(c_test)
c_train = pd.DataFrame(data = c_train, index = c.iloc[tr,:].index, columns = c.iloc[tr,:].columns)
c_test = pd.DataFrame(data = c_test, index = c.iloc[te,:].index, columns = c.iloc[te,:].columns)
if c_y is not None:
sc = StandardScaler(); sc.fit(c_y_train); c_y_train = sc.transform(c_y_train); c_y_test = sc.transform(c_y_test)
c_y_train = pd.DataFrame(data = c_y_train, index = c.iloc[tr,:].index, columns = c.iloc[tr,:].columns)
c_y_test = pd.DataFrame(data = c_y_test, index = c.iloc[te,:].index, columns = c.iloc[te,:].columns)
# regress nuisance (X)
# nuis_reg = LinearRegression(); nuis_reg.fit(c_train, X_train)
nuis_reg = KernelRidge(kernel='rbf'); nuis_reg.fit(c_train, X_train)
X_pred = nuis_reg.predict(c_train); X_train = X_train - X_pred
X_pred = nuis_reg.predict(c_test); X_test = X_test - X_pred
# # regress nuisance (y)
# if c_y is None:
# # nuis_reg = LinearRegression(); nuis_reg.fit(c_train, y_train)
# nuis_reg = KernelRidge(kernel='rbf'); nuis_reg.fit(c_train, y_train)
# y_pred = nuis_reg.predict(c_train); y_train = y_train - y_pred
# y_pred = nuis_reg.predict(c_test); y_test = y_test - y_pred
# elif c_y is not None:
# # nuis_reg = LinearRegression(); nuis_reg.fit(c_y_train, y_train)
# nuis_reg = KernelRidge(kernel='rbf'); nuis_reg.fit(c_y_train, y_train)
# y_pred = nuis_reg.predict(c_y_train); y_train = y_train - y_pred
# y_pred = nuis_reg.predict(c_y_test); y_test = y_test - y_pred
reg.fit(X_train, y_train)
accuracy[k] = my_scorer(reg, X_test, y_test)
return accuracy
def run_reg_scv(X, y, c, reg, param_grid, n_splits = 10, scoring = 'r2', run_perm = False):
pipe = Pipeline(steps=[('standardize', StandardScaler()),
('reg', reg)])
# X_sort, y_sort, my_cv = get_stratified_cv(X, y, n_splits = n_splits)
X_sort, y_sort, my_cv, c_sort = get_stratified_cv(X = X, y = y, c = c, n_splits = n_splits)
# if scoring is a dictionary then we run GridSearchCV with multiple scoring metrics and refit using the first one in the dict
grid = GridSearchCV(pipe, param_grid, cv = my_cv, scoring = scoring)
grid.fit(X_sort, y_sort);
# rescore with nuisance regression
new_reg = copy.deepcopy(reg)
if 'reg__alpha' in grid.best_params_: new_reg.alpha = grid.best_params_['reg__alpha']
if 'reg__gamma' in grid.best_params_: new_reg.gamma = grid.best_params_['reg__gamma']
if 'reg__C' in grid.best_params_: new_reg.C = grid.best_params_['reg__C']
accuracy_nuis = cross_val_score_nuis(X = X_sort, y = y_sort, c = c_sort, my_cv = my_cv, reg = new_reg, my_scorer = scoring)
if run_perm:
null_reg = copy.deepcopy(reg)
if 'reg__alpha' in grid.best_params_: null_reg.alpha = grid.best_params_['reg__alpha']
if 'reg__gamma' in grid.best_params_: null_reg.gamma = grid.best_params_['reg__gamma']
if 'reg__C' in grid.best_params_: null_reg.C = grid.best_params_['reg__C']
pipe = Pipeline(steps=[('standardize', StandardScaler()),
('reg', null_reg)])
X_sort.reset_index(drop = True, inplace = True)
c_sort.reset_index(drop = True, inplace = True)
n_perm = 5000
permuted_acc = np.zeros((n_perm,))
permuted_acc_nuis = np.zeros((n_perm,))
for i in np.arange(n_perm):
np.random.seed(i)
idx = np.arange(y_sort.shape[0])
np.random.shuffle(idx)
y_perm = y_sort.iloc[idx]
y_perm.reset_index(drop = True, inplace = True)
c_y = c_sort.iloc[idx,:]
c_y.reset_index(drop = True, inplace = True)
permuted_acc[i] = cross_val_score(pipe, X_sort, y_perm, scoring = my_scorer, cv = my_cv).mean()
permuted_acc_nuis[i] = cross_val_score_nuis(X = X_sort, y = y_perm, c = c_sort, my_cv = my_cv, reg = null_reg, my_scorer = scoring, c_y = c_y).mean()
if run_perm:
return grid, accuracy_nuis, permuted_acc, permuted_acc_nuis
else:
return grid, accuracy_nuis
# --------------------------------------------------------------------------------------------------------------------
# --------------------------------------------------------------------------------------------------------------------
# inputs
X = pd.read_csv(X_file)
X.set_index(['bblid', 'scanid'], inplace = True)
X = X.filter(regex = metric)
y = pd.read_csv(y_file)
y.set_index(['bblid', 'scanid'], inplace = True)
y = y.loc[:,pheno]
c = pd.read_csv(c_file)
c.set_index(['bblid', 'scanid'], inplace = True)
# outdir
outdir = os.path.join(outroot, alg + '_' + score + '_' + metric + '_' + pheno)
if not os.path.exists(outdir): os.makedirs(outdir);
# --------------------------------------------------------------------------------------------------------------------
# --------------------------------------------------------------------------------------------------------------------
# set scorer
if score == 'r2':
my_scorer = make_scorer(r2_score, greater_is_better = True)
elif score == 'corr':
my_scorer = make_scorer(corr_true_pred, greater_is_better = True)
elif score == 'mse':
my_scorer = make_scorer(mean_squared_error, greater_is_better = False)
elif score == 'rmse':
my_scorer = make_scorer(root_mean_squared_error, greater_is_better = False)
elif score == 'mae':
my_scorer = make_scorer(mean_absolute_error, greater_is_better = False)
# prediction
regs, param_grids = get_reg()
grid, accuracy_nuis, permuted_acc, permuted_acc_nuis = run_reg_scv(X = X, y = y, c = c, reg = regs[alg], param_grid = param_grids[alg], scoring = my_scorer, run_perm = True)
# --------------------------------------------------------------------------------------------------------------------
# --------------------------------------------------------------------------------------------------------------------
# outputs
json_data = json.dumps(grid.best_params_)
f = open(os.path.join(outdir,'best_params.json'),'w')
f.write(json_data)
f.close()
np.savetxt(os.path.join(outdir,'accuracy_mean.txt'), np.array([grid.cv_results_['mean_test_score'][grid.best_index_]]))
np.savetxt(os.path.join(outdir,'accuracy_std.txt'), np.array([grid.cv_results_['std_test_score'][grid.best_index_]]))
np.savetxt(os.path.join(outdir,'permuted_acc.txt'), permuted_acc)
np.savetxt(os.path.join(outdir,'accuracy_nuis.txt'), accuracy_nuis)
np.savetxt(os.path.join(outdir,'accuracy_mean_nuis.txt'), np.array([accuracy_nuis.mean()]))
np.savetxt(os.path.join(outdir,'accuracy_std_nuis.txt'), np.array([accuracy_nuis.std()]))
np.savetxt(os.path.join(outdir,'permuted_acc_nuis.txt'), permuted_acc_nuis)
# --------------------------------------------------------------------------------------------------------------------
print('Finished!')
| 43.380282 | 173 | 0.589205 | 1,766 | 12,320 | 3.843148 | 0.151189 | 0.007662 | 0.026816 | 0.01886 | 0.442022 | 0.364962 | 0.274937 | 0.238544 | 0.1662 | 0.128334 | 0 | 0.007008 | 0.189286 | 12,320 | 283 | 174 | 43.533569 | 0.672507 | 0.217289 | 0 | 0.045198 | 0 | 0 | 0.075151 | 0.006671 | 0 | 0 | 0 | 0 | 0 | 1 | 0.033898 | false | 0 | 0.084746 | 0 | 0.163842 | 0.011299 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1207c0c819347d26f1f5ae9a5c2ef2ea9e853dbd | 1,783 | py | Python | src/recognize.py | avi09/Desktop-Assistant | 58019b41872af932c219106db94f7c3772cdde36 | [
"MIT"
] | 1 | 2020-11-24T11:34:57.000Z | 2020-11-24T11:34:57.000Z | src/recognize.py | avi09/Desktop-Assistant | 58019b41872af932c219106db94f7c3772cdde36 | [
"MIT"
] | null | null | null | src/recognize.py | avi09/Desktop-Assistant | 58019b41872af932c219106db94f7c3772cdde36 | [
"MIT"
] | null | null | null | import speech_recognition as sr
from gtts import gTTS
import os
import threading
import time
heard = False
control = False
r = sr.Recognizer()
audio = ""
def secondary_detect():
global heard, control, audio, r
while True:
s = ""
if control==True:
try:
s = r.recognize_google(audio).lower()
except sr.UnknownValueError:
print("Google Speech Recognition could not understand audio")
except sr.RequestError as e:
print("Could not request results from Google Speech Recognition service; {0}".format(e))
print('Heard - ' + s)
if s.find("rachel")!=-1 or s.find("richa")!=-1:
heard = True
control = False
audio = ""
else:
heard = False
else:
time.sleep(0.4)
secondary_detect_thread = threading.Thread(target = secondary_detect)
secondary_detect_thread.start()
# obtain audio from the microphone
def hear():
global heard, control, r, audio
while True:
s = ""
with sr.Microphone() as source:
print("Waiting for invoke message - Rachel")
audio = r.listen(source)
control = True
if heard==True:
control = False
heard = False
return
def getcommand():
s = ""
while True:
r1 = sr.Recognizer()
with sr.Microphone() as source:
print("What Can I Do For You?")
audio = r1.listen(source)
try:
s = r1.recognize_google(audio)
except sr.UnknownValueError:
print("Google Speech Recognition could not understand audio")
except sr.RequestError as e:
print("Could not request results from Google Speech Recognition service; {0}".format(e))
s = s.lower()
print('------')
print('You Said - '+s)
print('------')
return s
def say(s):
x = 'en'
myobj = gTTS(text=s, lang=x, slow=False)
myobj.save("audio.mp3")
os.system("play audio.mp3")
return
| 23.155844 | 92 | 0.658441 | 244 | 1,783 | 4.77459 | 0.340164 | 0.072961 | 0.07897 | 0.051502 | 0.338197 | 0.338197 | 0.288412 | 0.288412 | 0.288412 | 0.288412 | 0 | 0.007868 | 0.215928 | 1,783 | 76 | 93 | 23.460526 | 0.825465 | 0.017947 | 0 | 0.477612 | 0 | 0 | 0.209262 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.059701 | false | 0 | 0.074627 | 0 | 0.179104 | 0.149254 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
120ca5c4ac58e2fc2895a4b9f281ee43dcb77564 | 2,120 | py | Python | venv/lib/python3.7/site-packages/allauth/socialaccount/providers/azure/views.py | vikram0207/django-rest | eafec575999dce6859dc7b99177cff339b2bcbdd | [
"MIT"
] | 12 | 2019-08-02T07:58:16.000Z | 2022-01-31T23:45:08.000Z | venv/lib/python3.7/site-packages/allauth/socialaccount/providers/azure/views.py | vikram0207/django-rest | eafec575999dce6859dc7b99177cff339b2bcbdd | [
"MIT"
] | 23 | 2019-01-19T08:54:48.000Z | 2022-03-11T23:39:37.000Z | venv/lib/python3.7/site-packages/allauth/socialaccount/providers/azure/views.py | vikram0207/django-rest | eafec575999dce6859dc7b99177cff339b2bcbdd | [
"MIT"
] | 17 | 2020-03-03T08:42:17.000Z | 2020-10-03T16:08:49.000Z | from __future__ import unicode_literals
import requests
from allauth.socialaccount.providers.oauth2.views import (
OAuth2Adapter,
OAuth2CallbackView,
OAuth2LoginView,
)
from .provider import AzureProvider
LOGIN_URL = 'https://login.microsoftonline.com/common/oauth2/v2.0'
GRAPH_URL = 'https://graph.microsoft.com/v1.0'
class AzureOAuth2Adapter(OAuth2Adapter):
"""
Docs available at:
https://docs.microsoft.com/en-us/azure/active-directory/develop/active-directory-v2-protocols
"""
provider_id = AzureProvider.id
access_token_url = LOGIN_URL + '/token'
authorize_url = LOGIN_URL + '/authorize'
profile_url = 'https://graph.microsoft.com/v1.0/me'
# Can be used later to obtain group data. Needs 'Group.Read.All' or
# similar.
#
# See https://developer.microsoft.com/en-us/graph/docs/api-reference/beta/api/user_list_memberof # noqa
groups_url = GRAPH_URL + '/me/memberOf?$select=displayName'
def complete_login(self, request, app, token, **kwargs):
headers = {'Authorization': 'Bearer {0}'.format(token.token)}
extra_data = {}
resp = requests.get(self.profile_url, headers=headers)
# See:
#
# https://developer.microsoft.com/en-us/graph/docs/api-reference/v1.0/api/user_get # noqa
#
# example of what's returned (in python format)
#
# {u'displayName': u'John Smith', u'mobilePhone': None,
# u'preferredLanguage': u'en-US', u'jobTitle': u'Director',
# u'userPrincipalName': u'john@smith.com',
# u'@odata.context':
# u'https://graph.microsoft.com/v1.0/$metadata#users/$entity',
# u'officeLocation': u'Paris', u'businessPhones': [],
# u'mail': u'john@smith.com', u'surname': u'Smith',
# u'givenName': u'John', u'id': u'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'}
profile_data = resp.json()
extra_data.update(profile_data)
return self.get_provider().sociallogin_from_response(request,
extra_data)
oauth2_login = OAuth2LoginView.adapter_view(AzureOAuth2Adapter)
oauth2_callback = OAuth2CallbackView.adapter_view(AzureOAuth2Adapter)
| 33.650794 | 108 | 0.692925 | 267 | 2,120 | 5.378277 | 0.460674 | 0.050139 | 0.039694 | 0.045961 | 0.151114 | 0.131616 | 0.114206 | 0.075209 | 0.075209 | 0.075209 | 0 | 0.014156 | 0.166981 | 2,120 | 62 | 109 | 34.193548 | 0.798981 | 0.396698 | 0 | 0 | 0 | 0 | 0.153349 | 0.025827 | 0 | 0 | 0 | 0 | 0 | 1 | 0.038462 | false | 0 | 0.153846 | 0 | 0.461538 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
120ca81e28bba9f7cc8389252be215df8a98086a | 44,887 | py | Python | meerk40t/lihuiyu/gui/lhystudiosdrivergui.py | joerlane/meerk40t | a75d78848ff1682640e112111fb6ac4e23e08616 | [
"MIT"
] | null | null | null | meerk40t/lihuiyu/gui/lhystudiosdrivergui.py | joerlane/meerk40t | a75d78848ff1682640e112111fb6ac4e23e08616 | [
"MIT"
] | null | null | null | meerk40t/lihuiyu/gui/lhystudiosdrivergui.py | joerlane/meerk40t | a75d78848ff1682640e112111fb6ac4e23e08616 | [
"MIT"
] | null | null | null | # -*- coding: ISO-8859-1 -*-
import wx
from meerk40t.core.units import Length
from meerk40t.gui.icons import icons8_administrative_tools_50
from meerk40t.gui.mwindow import MWindow
from meerk40t.kernel import signal_listener
_ = wx.GetTranslation
FIX_SPEEDS_RATIO = 0.9195
class ConfigurationUsb(wx.Panel):
def __init__(self, *args, context=None, **kwds):
# begin wxGlade: ConfigurationUsb.__init__
kwds["style"] = kwds.get("style", 0)
wx.Panel.__init__(self, *args, **kwds)
self.context = context
sizer_usb_settings = wx.StaticBoxSizer(
wx.StaticBox(self, wx.ID_ANY, _("USB Settings")), wx.VERTICAL
)
sizer_usb_restrict = wx.StaticBoxSizer(
wx.StaticBox(self, wx.ID_ANY, _("Restrict Multiple Lasers")), wx.VERTICAL
)
sizer_usb_settings.Add(sizer_usb_restrict, 0, 0, 0)
sizer_criteria = wx.BoxSizer(wx.HORIZONTAL)
sizer_usb_restrict.Add(sizer_criteria, 1, wx.EXPAND, 0)
sizer_chip_version = wx.StaticBoxSizer(
wx.StaticBox(self, wx.ID_ANY, _("CH341 Version")), wx.HORIZONTAL
)
sizer_criteria.Add(sizer_chip_version, 0, wx.EXPAND, 0)
self.text_device_version = wx.TextCtrl(
self, wx.ID_ANY, "", style=wx.TE_READONLY
)
self.text_device_version.SetMinSize((55, 23))
sizer_chip_version.Add(self.text_device_version, 0, 0, 0)
self.spin_device_version = wx.SpinCtrl(self, wx.ID_ANY, "-1", min=-1, max=25)
self.spin_device_version.SetMinSize((40, 23))
self.spin_device_version.SetToolTip(
_(
"Optional: Distinguish between different lasers using the match criteria below.\n-1 match anything. 0+ match exactly that value."
)
)
sizer_chip_version.Add(self.spin_device_version, 0, 0, 0)
sizer_device_index = wx.StaticBoxSizer(
wx.StaticBox(self, wx.ID_ANY, _("Device Index:")), wx.HORIZONTAL
)
sizer_criteria.Add(sizer_device_index, 0, wx.EXPAND, 0)
self.text_device_index = wx.TextCtrl(self, wx.ID_ANY, "", style=wx.TE_READONLY)
self.text_device_index.SetMinSize((55, 23))
sizer_device_index.Add(self.text_device_index, 0, 0, 0)
self.spin_device_index = wx.SpinCtrl(self, wx.ID_ANY, "-1", min=-1, max=5)
self.spin_device_index.SetMinSize((40, 23))
self.spin_device_index.SetToolTip(
_(
"Optional: Distinguish between different lasers using the match criteria below.\n-1 match anything. 0+ match exactly that value."
)
)
sizer_device_index.Add(self.spin_device_index, 0, 0, 0)
sizer_serial = wx.StaticBoxSizer(
wx.StaticBox(self, wx.ID_ANY, _("Serial Number")), wx.HORIZONTAL
)
sizer_usb_restrict.Add(sizer_serial, 0, wx.EXPAND, 0)
self.check_serial_number = wx.CheckBox(self, wx.ID_ANY, _("Serial Number"))
self.check_serial_number.SetToolTip(
_("Require a serial number match for this board")
)
sizer_serial.Add(self.check_serial_number, 0, 0, 0)
self.text_serial_number = wx.TextCtrl(self, wx.ID_ANY, "")
self.text_serial_number.SetMinSize((150, 23))
self.text_serial_number.SetToolTip(
_(
"Board Serial Number to be used to identify a specific laser. If the device fails to match the serial number it will be disconnected."
)
)
sizer_serial.Add(self.text_serial_number, 0, wx.EXPAND, 0)
sizer_buffer = wx.StaticBoxSizer(
wx.StaticBox(self, wx.ID_ANY, _("Write Buffer")), wx.HORIZONTAL
)
sizer_usb_settings.Add(sizer_buffer, 0, wx.EXPAND, 0)
self.checkbox_limit_buffer = wx.CheckBox(
self, wx.ID_ANY, _("Limit Write Buffer")
)
self.checkbox_limit_buffer.SetToolTip(
_(
"Limit the write buffer to a certain amount. Permits on-the-fly command production."
)
)
self.checkbox_limit_buffer.SetValue(1)
sizer_buffer.Add(self.checkbox_limit_buffer, 0, 0, 0)
self.text_buffer_length = wx.TextCtrl(self, wx.ID_ANY, "", style=wx.TE_READONLY)
self.text_buffer_length.SetToolTip(
_("Current number of bytes in the write buffer.")
)
sizer_buffer.Add(self.text_buffer_length, 0, 0, 0)
label_14 = wx.StaticText(self, wx.ID_ANY, "/")
sizer_buffer.Add(label_14, 0, 0, 0)
self.spin_packet_buffer_max = wx.SpinCtrl(
self, wx.ID_ANY, "1500", min=1, max=1000000
)
self.spin_packet_buffer_max.SetToolTip(_("Current maximum write buffer limit."))
sizer_buffer.Add(self.spin_packet_buffer_max, 0, 0, 0)
self.SetSizer(sizer_usb_settings)
self.Layout()
self.Bind(
wx.EVT_SPINCTRL, self.spin_on_device_version, self.spin_device_version
)
self.Bind(
wx.EVT_TEXT_ENTER, self.spin_on_device_version, self.spin_device_version
)
self.Bind(wx.EVT_SPINCTRL, self.spin_on_device_index, self.spin_device_index)
self.Bind(wx.EVT_TEXT_ENTER, self.spin_on_device_index, self.spin_device_index)
self.Bind(
wx.EVT_CHECKBOX, self.on_check_serial_number, self.check_serial_number
)
self.Bind(wx.EVT_TEXT, self.on_text_serial_number, self.text_serial_number)
self.Bind(
wx.EVT_CHECKBOX,
self.on_check_limit_packet_buffer,
self.checkbox_limit_buffer,
)
self.Bind(
wx.EVT_SPINCTRL, self.on_spin_packet_buffer_max, self.spin_packet_buffer_max
)
self.Bind(
wx.EVT_TEXT, self.on_spin_packet_buffer_max, self.spin_packet_buffer_max
)
self.Bind(
wx.EVT_TEXT_ENTER,
self.on_spin_packet_buffer_max,
self.spin_packet_buffer_max,
)
# end wxGlade
self.spin_device_index.SetValue(self.context.usb_index)
self.spin_device_version.SetValue(self.context.usb_version)
if self.context.serial is not None:
self.text_serial_number.SetValue(self.context.serial)
self.check_serial_number.SetValue(self.context.serial_enable)
self.checkbox_limit_buffer.SetValue(self.context.buffer_limit)
self.spin_packet_buffer_max.SetValue(self.context.buffer_max)
# Disables of features not yet supported.
self.check_serial_number.Enable(False)
self.text_serial_number.Enable(False)
def pane_show(self):
# self.context.listen("pipe;buffer", self.on_buffer_update)
pass
def pane_hide(self):
# self.context.unlisten("pipe;buffer", self.on_buffer_update)
pass
@signal_listener("pipe;buffer")
def on_buffer_update(self, origin, value, *args):
self.text_buffer_length.SetValue(str(value))
@signal_listener("pipe;index")
def on_update_pipe_index(self, origin, value):
if origin != self.context.path:
return
self.text_device_index.SetValue(str(value))
@signal_listener("pipe;chipv")
def on_update_pipe_chipv(self, origin, value):
if origin != self.context.path:
return
self.text_device_version.SetValue(str(value))
def on_check_limit_packet_buffer(
self, event=None
): # wxGlade: JobInfo.<event_handler>
self.context.buffer_limit = self.checkbox_limit_buffer.GetValue()
def on_spin_packet_buffer_max(self, event=None): # wxGlade: JobInfo.<event_handler>
self.context.buffer_max = self.spin_packet_buffer_max.GetValue()
def spin_on_device_index(self, event=None):
self.context.usb_index = int(self.spin_device_index.GetValue())
def spin_on_device_version(self, event=None):
self.context.usb_version = int(self.spin_device_version.GetValue())
def on_check_serial_number(
self, event
): # wxGlade: ConfigurationUsb.<event_handler>
self.context.serial_enable = self.check_serial_number.GetValue()
def on_text_serial_number(self, event): # wxGlade: ConfigurationUsb.<event_handler>
self.context.serial = self.text_serial_number.GetValue()
class ConfigurationTcp(wx.Panel):
def __init__(self, *args, context=None, **kwds):
# begin wxGlade: ConfigurationTcp.__init__
kwds["style"] = kwds.get("style", 0)
wx.Panel.__init__(self, *args, **kwds)
self.context = context
sizer_13 = wx.StaticBoxSizer(
wx.StaticBox(self, wx.ID_ANY, _("TCP Settings")), wx.HORIZONTAL
)
sizer_21 = wx.StaticBoxSizer(
wx.StaticBox(self, wx.ID_ANY, _("Address")), wx.VERTICAL
)
sizer_13.Add(sizer_21, 0, 0, 0)
self.text_address = wx.TextCtrl(self, wx.ID_ANY, "")
self.text_address.SetMinSize((150, 23))
self.text_address.SetToolTip(_("IP/Host if the server computer"))
sizer_21.Add(self.text_address, 0, 0, 0)
sizer_port = wx.StaticBoxSizer(
wx.StaticBox(self, wx.ID_ANY, _("Port")), wx.VERTICAL
)
sizer_13.Add(sizer_port, 0, 0, 0)
self.text_port = wx.TextCtrl(self, wx.ID_ANY, "")
self.text_port.SetToolTip(_("Port for tcp connection on the server computer"))
sizer_port.Add(self.text_port, 0, wx.EXPAND, 0)
self.SetSizer(sizer_13)
self.Layout()
self.Bind(wx.EVT_TEXT, self.on_text_address, self.text_address)
self.Bind(wx.EVT_TEXT_ENTER, self.on_text_address, self.text_address)
self.Bind(wx.EVT_TEXT, self.on_text_port, self.text_port)
self.Bind(wx.EVT_TEXT_ENTER, self.on_text_port, self.text_port)
# end wxGlade
self.text_port.SetValue(str(self.context.port))
self.text_address.SetValue(self.context.address)
def pane_show(self):
pass
def pane_hide(self):
pass
def on_text_address(self, event): # wxGlade: ConfigurationTcp.<event_handler>
self.context.address = self.text_address.GetValue()
def on_text_port(self, event): # wxGlade: ConfigurationTcp.<event_handler>
try:
self.context.port = int(self.text_port.GetValue())
except ValueError:
pass
class ConfigurationLaserPanel(wx.Panel):
def __init__(self, *args, context=None, **kwds):
# begin wxGlade: ConfigurationLaserPanel.__init__
kwds["style"] = kwds.get("style", 0)
wx.Panel.__init__(self, *args, **kwds)
self.context = context
sizer_27 = wx.StaticBoxSizer(
wx.StaticBox(self, wx.ID_ANY, _("Laser Parameters")), wx.VERTICAL
)
sizer_home = wx.StaticBoxSizer(
wx.StaticBox(self, wx.ID_ANY, _("Shift Home Position")), wx.HORIZONTAL
)
sizer_27.Add(sizer_home, 0, wx.EXPAND, 0)
sizer_4 = wx.StaticBoxSizer(
wx.StaticBox(self, wx.ID_ANY, _("X:")), wx.HORIZONTAL
)
sizer_home.Add(sizer_4, 2, wx.EXPAND, 0)
self.spin_home_x = wx.SpinCtrlDouble(
self, wx.ID_ANY, "0.0", min=-50000.0, max=50000.0
)
self.spin_home_x.SetMinSize((80, 23))
self.spin_home_x.SetToolTip(_("Translate Home X"))
sizer_4.Add(self.spin_home_x, 0, 0, 0)
label_12 = wx.StaticText(self, wx.ID_ANY, _("steps"))
sizer_4.Add(label_12, 0, 0, 0)
sizer_2 = wx.StaticBoxSizer(
wx.StaticBox(self, wx.ID_ANY, _("Y:")), wx.HORIZONTAL
)
sizer_home.Add(sizer_2, 2, wx.EXPAND, 0)
self.spin_home_y = wx.SpinCtrlDouble(
self, wx.ID_ANY, _("0.0"), min=-50000.0, max=50000.0
)
self.spin_home_y.SetMinSize((80, 23))
self.spin_home_y.SetToolTip(_("Translate Home Y"))
sizer_2.Add(self.spin_home_y, 0, 0, 0)
label_11 = wx.StaticText(self, wx.ID_ANY, _("steps"))
sizer_2.Add(label_11, 0, 0, 0)
self.button_home_by_current = wx.Button(self, wx.ID_ANY, _("Set Current"))
self.button_home_by_current.SetToolTip(
_("Set Home Position based on the current position")
)
sizer_home.Add(self.button_home_by_current, 1, 0, 0)
sizer_bed = wx.StaticBoxSizer(
wx.StaticBox(self, wx.ID_ANY, _("Bed Dimensions")), wx.HORIZONTAL
)
sizer_27.Add(sizer_bed, 0, wx.EXPAND, 0)
sizer_14 = wx.StaticBoxSizer(
wx.StaticBox(self, wx.ID_ANY, _("Width")), wx.HORIZONTAL
)
sizer_bed.Add(sizer_14, 1, 0, 0)
self.text_bedwidth = wx.TextCtrl(
self,
wx.ID_ANY,
"310mm",
)
self.text_bedwidth.SetMinSize((80, 23))
self.text_bedwidth.SetToolTip(_("Width of the laser bed."))
sizer_14.Add(self.text_bedwidth, 4, 0, 0)
sizer_15 = wx.StaticBoxSizer(
wx.StaticBox(self, wx.ID_ANY, _("Height")), wx.HORIZONTAL
)
sizer_bed.Add(sizer_15, 1, 0, 0)
label_3 = wx.StaticText(self, wx.ID_ANY, "")
sizer_15.Add(label_3, 0, 0, 0)
self.text_bedheight = wx.TextCtrl(self, wx.ID_ANY, "210mm")
self.text_bedheight.SetMinSize((80, 23))
self.text_bedheight.SetToolTip(_("Height of the laser bed."))
sizer_15.Add(self.text_bedheight, 4, 0, 0)
sizer_scale_factors = wx.StaticBoxSizer(
wx.StaticBox(self, wx.ID_ANY, _("User Scale Factor")), wx.HORIZONTAL
)
sizer_27.Add(sizer_scale_factors, 0, wx.EXPAND, 0)
sizer_19 = wx.StaticBoxSizer(
wx.StaticBox(self, wx.ID_ANY, _("X:")), wx.HORIZONTAL
)
sizer_scale_factors.Add(sizer_19, 0, wx.EXPAND, 0)
self.text_scale_x = wx.TextCtrl(self, wx.ID_ANY, "1.000")
self.text_scale_x.SetToolTip(
_("Scale factor for the X-axis. Board units to actual physical units.")
)
sizer_19.Add(self.text_scale_x, 0, 0, 0)
sizer_20 = wx.StaticBoxSizer(
wx.StaticBox(self, wx.ID_ANY, _("Y:")), wx.HORIZONTAL
)
sizer_scale_factors.Add(sizer_20, 0, wx.EXPAND, 0)
self.text_scale_y = wx.TextCtrl(self, wx.ID_ANY, "1.000")
self.text_scale_y.SetToolTip(
_("Scale factor for the Y-axis. Board units to actual physical units.")
)
sizer_20.Add(self.text_scale_y, 0, 0, 0)
self.SetSizer(sizer_27)
self.spin_home_x.SetValue(self.context.home_adjust_x)
self.spin_home_y.SetValue(self.context.home_adjust_y)
self.text_bedwidth.SetValue(self.context.bedwidth)
self.text_bedheight.SetValue(self.context.bedheight)
self.text_scale_x.SetValue("%.4f" % self.context.scale_x)
self.text_scale_y.SetValue("%.4f" % self.context.scale_y)
self.Layout()
self.Bind(wx.EVT_TEXT, self.spin_on_home_x, self.spin_home_x)
self.Bind(wx.EVT_TEXT, self.spin_on_home_y, self.spin_home_y)
self.Bind(
wx.EVT_BUTTON, self.on_button_set_home_current, self.button_home_by_current
)
self.Bind(wx.EVT_TEXT, self.on_text_bedwidth, self.text_bedwidth)
self.Bind(wx.EVT_TEXT, self.on_text_bedheight, self.text_bedheight)
self.Bind(wx.EVT_TEXT, self.on_text_x_scale, self.text_scale_x)
self.Bind(wx.EVT_TEXT, self.on_text_y_scale, self.text_scale_y)
def pane_show(self):
pass
def pane_hide(self):
pass
def spin_on_home_x(self, event=None):
self.context.home_adjust_x = int(self.spin_home_x.GetValue())
def spin_on_home_y(self, event=None):
self.context.home_adjust_y = int(self.spin_home_y.GetValue())
def on_button_set_home_current(self, event=None):
native_x = self.context.device.native_x
native_y = self.context.device.native_y
self.context.home_adjust_x = int(native_x)
self.context.home_adjust_y = int(native_y)
self.spin_home_x.SetValue(self.context.home_adjust_x)
self.spin_home_y.SetValue(self.context.home_adjust_y)
def on_text_bedwidth(self, event=None):
try:
Length(self.text_bedwidth.GetValue())
Length(self.text_bedheight.GetValue())
except ValueError:
return
self.context.device.width = self.text_bedwidth.GetValue()
self.context.device.height = self.text_bedheight.GetValue()
self.context.device.bedwidth = self.text_bedwidth.GetValue()
self.context.device.bedheight = self.text_bedheight.GetValue()
self.context.signal(
"bed_size", (self.context.device.bedwidth, self.context.device.bedheight)
)
self.context("viewport_update\n")
def on_text_bedheight(self, event=None):
try:
Length(self.text_bedwidth.GetValue())
Length(self.text_bedheight.GetValue())
except ValueError:
return
self.context.device.width = self.text_bedwidth.GetValue()
self.context.device.height = self.text_bedheight.GetValue()
self.context.device.bedwidth = self.text_bedwidth.GetValue()
self.context.device.bedheight = self.text_bedheight.GetValue()
self.context.signal(
"bed_size", (self.context.device.bedwidth, self.context.device.bedheight)
)
self.context("viewport_update\n")
def on_text_x_scale(self, event=None):
try:
self.context.device.scale_x = float(self.text_scale_x.GetValue())
self.context.device.scale_y = float(self.text_scale_y.GetValue())
self.context.signal(
"scale_step", (self.context.device.scale_x, self.context.device.scale_y)
)
self.context("viewport_update\n")
except ValueError:
pass
def on_text_y_scale(self, event=None):
try:
self.context.device.scale_x = float(self.text_scale_x.GetValue())
self.context.device.scale_y = float(self.text_scale_y.GetValue())
self.context.signal(
"scale_step", (self.context.device.scale_x, self.context.device.scale_y)
)
self.context("viewport_update\n")
except ValueError:
pass
class ConfigurationInterfacePanel(wx.Panel):
def __init__(self, *args, context=None, **kwds):
# begin wxGlade: ConfigurationInterfacePanel.__init__
kwds["style"] = kwds.get("style", 0)
wx.Panel.__init__(self, *args, **kwds)
self.context = context
sizer_page_1 = wx.BoxSizer(wx.VERTICAL)
sizer_name = wx.StaticBoxSizer(
wx.StaticBox(self, wx.ID_ANY, _("Device Name")), wx.HORIZONTAL
)
sizer_page_1.Add(sizer_name, 0, wx.EXPAND, 0)
self.text_device_label = wx.TextCtrl(self, wx.ID_ANY, "")
self.text_device_label.SetToolTip(
_("The internal label to be used for this device")
)
sizer_name.Add(self.text_device_label, 1, 0, 0)
sizer_config = wx.StaticBoxSizer(
wx.StaticBox(self, wx.ID_ANY, _("Configuration")), wx.HORIZONTAL
)
sizer_page_1.Add(sizer_config, 0, wx.EXPAND, 0)
sizer_board = wx.StaticBoxSizer(
wx.StaticBox(self, wx.ID_ANY, _("Board Setup")), wx.HORIZONTAL
)
sizer_config.Add(sizer_board, 0, wx.EXPAND, 0)
self.combobox_board = wx.ComboBox(
self,
wx.ID_ANY,
choices=["M2", "B2", "M", "M1", "A", "B", "B1"],
style=wx.CB_DROPDOWN,
)
self.combobox_board.SetToolTip(
_("Select the board to use. This has an effects the speedcodes used.")
)
self.combobox_board.SetSelection(0)
sizer_board.Add(self.combobox_board, 1, 0, 0)
sizer_17 = wx.BoxSizer(wx.VERTICAL)
sizer_config.Add(sizer_17, 1, wx.EXPAND, 0)
self.checkbox_flip_x = wx.CheckBox(self, wx.ID_ANY, _("Flip X"))
self.checkbox_flip_x.SetToolTip(
_("Flip the Right and Left commands sent to the controller")
)
sizer_17.Add(self.checkbox_flip_x, 0, 0, 0)
self.checkbox_home_right = wx.CheckBox(self, wx.ID_ANY, _("Home Right"))
self.checkbox_home_right.SetToolTip(
_("Indicates the device Home is on the right")
)
sizer_17.Add(self.checkbox_home_right, 0, 0, 0)
label_1 = wx.StaticText(self, wx.ID_ANY, "")
sizer_17.Add(label_1, 0, 0, 0)
sizer_16 = wx.BoxSizer(wx.VERTICAL)
sizer_config.Add(sizer_16, 1, wx.EXPAND, 0)
self.checkbox_flip_y = wx.CheckBox(self, wx.ID_ANY, _("Flip Y"))
self.checkbox_flip_y.SetToolTip(
_("Flip the Top and Bottom commands sent to the controller")
)
sizer_16.Add(self.checkbox_flip_y, 0, 0, 0)
self.checkbox_home_bottom = wx.CheckBox(self, wx.ID_ANY, _("Home Bottom"))
self.checkbox_home_bottom.SetToolTip(
_("Indicates the device Home is on the bottom")
)
sizer_16.Add(self.checkbox_home_bottom, 0, 0, 0)
self.checkbox_swap_xy = wx.CheckBox(self, wx.ID_ANY, _("Swap X and Y"))
self.checkbox_swap_xy.SetToolTip(
_("Swaps the X and Y axis. This happens before the FlipX and FlipY.")
)
sizer_16.Add(self.checkbox_swap_xy, 0, 0, 0)
sizer_interface = wx.StaticBoxSizer(
wx.StaticBox(self, wx.ID_ANY, _("Interface")), wx.VERTICAL
)
sizer_page_1.Add(sizer_interface, 0, wx.EXPAND, 0)
sizer_interface_radio = wx.BoxSizer(wx.HORIZONTAL)
sizer_interface.Add(sizer_interface_radio, 0, wx.EXPAND, 0)
self.radio_usb = wx.RadioButton(self, wx.ID_ANY, _("USB"), style=wx.RB_GROUP)
self.radio_usb.SetValue(1)
self.radio_usb.SetToolTip(
_(
"Select this if you have an m2-nano controller physically connected to this computer using a USB cable."
)
)
sizer_interface_radio.Add(self.radio_usb, 1, 0, 0)
self.radio_tcp = wx.RadioButton(self, wx.ID_ANY, _("Networked"))
self.radio_tcp.SetToolTip(
_(
"Select this to connect this instance of Meerk40t to another instance of Meerk40t running as a remote server."
)
)
sizer_interface_radio.Add(self.radio_tcp, 4, 0, 0)
self.radio_mock = wx.RadioButton(self, wx.ID_ANY, _("Mock"))
self.radio_mock.SetToolTip(
_(
"Select this only for debugging without a physical laser available. Execute a burn as if there was an m2-nano controller physically connected by USB."
)
)
sizer_interface_radio.Add(self.radio_mock, 1, 0, 0)
self.panel_usb_settings = ConfigurationUsb(
self, wx.ID_ANY, context=self.context
)
sizer_interface.Add(self.panel_usb_settings, 0, wx.EXPAND, 0)
self.panel_tcp_config = ConfigurationTcp(self, wx.ID_ANY, context=self.context)
sizer_interface.Add(self.panel_tcp_config, 0, wx.EXPAND, 0)
self.ConfigurationLaserPanel = ConfigurationLaserPanel(
self, wx.ID_ANY, context=self.context
)
sizer_page_1.Add(self.ConfigurationLaserPanel, 1, wx.EXPAND, 0)
self.SetSizer(sizer_page_1)
self.Layout()
self.Bind(wx.EVT_TEXT, self.on_device_label, self.text_device_label)
self.Bind(wx.EVT_COMBOBOX, self.on_combobox_boardtype, self.combobox_board)
self.Bind(wx.EVT_CHECKBOX, self.on_check_flip_x, self.checkbox_flip_x)
self.Bind(wx.EVT_CHECKBOX, self.on_check_home_right, self.checkbox_home_right)
self.Bind(wx.EVT_CHECKBOX, self.on_check_flip_y, self.checkbox_flip_y)
self.Bind(wx.EVT_CHECKBOX, self.on_check_home_bottom, self.checkbox_home_bottom)
self.Bind(wx.EVT_CHECKBOX, self.on_check_swapxy, self.checkbox_swap_xy)
self.Bind(wx.EVT_RADIOBUTTON, self.on_radio_interface, self.radio_usb)
self.Bind(wx.EVT_RADIOBUTTON, self.on_radio_interface, self.radio_tcp)
self.Bind(wx.EVT_RADIOBUTTON, self.on_radio_interface, self.radio_mock)
# end wxGlade
self.text_device_label.SetValue(self.context.label)
self.checkbox_swap_xy.SetValue(self.context.swap_xy)
self.checkbox_flip_x.SetValue(self.context.flip_x)
self.checkbox_flip_y.SetValue(self.context.flip_y)
self.checkbox_home_right.SetValue(self.context.home_right)
self.checkbox_home_bottom.SetValue(self.context.home_bottom)
self.combobox_board.SetValue(self.context.board)
if self.context.mock:
self.panel_tcp_config.Hide()
self.panel_usb_settings.Hide()
self.radio_mock.SetValue(True)
elif self.context.networked:
self.panel_usb_settings.Hide()
self.radio_tcp.SetValue(True)
else:
self.radio_usb.SetValue(True)
self.panel_tcp_config.Hide()
def pane_show(self):
self.ConfigurationLaserPanel.pane_show()
self.panel_usb_settings.pane_show()
self.panel_tcp_config.pane_show()
def pane_hide(self):
self.ConfigurationLaserPanel.pane_hide()
self.panel_usb_settings.pane_hide()
self.panel_tcp_config.pane_hide()
def on_combobox_boardtype(self, event=None):
self.context.board = self.combobox_board.GetValue()
def on_check_swapxy(self, event=None):
self.context.swap_xy = self.checkbox_swap_xy.GetValue()
self.context("viewport_update\n")
def on_check_flip_x(self, event=None):
self.context.flip_x = self.checkbox_flip_x.GetValue()
self.context("viewport_update\n")
def on_check_home_right(self, event=None):
self.context.home_right = self.checkbox_home_right.GetValue()
self.context.origin_x = 1.0 if self.context.home_right else 0.0
self.context("viewport_update\n")
def on_check_flip_y(self, event=None):
self.context.flip_y = self.checkbox_flip_y.GetValue()
self.context("viewport_update\n")
def on_check_home_bottom(self, event=None):
self.context.home_bottom = self.checkbox_home_bottom.GetValue()
self.context.origin_y = 1.0 if self.context.home_bottom else 0.0
self.context("viewport_update\n")
def on_device_label(
self, event
): # wxGlade: ConfigurationInterfacePanel.<event_handler>
self.context.label = self.text_device_label.GetValue()
self.context.signal("device;renamed")
def on_radio_interface(
self, event
): # wxGlade: ConfigurationInterfacePanel.<event_handler>
if self.radio_usb.GetValue():
self.panel_tcp_config.Hide()
self.panel_usb_settings.Show()
self.context.networked = False
self.context.mock = False
self.context(".network_update\n")
if self.radio_tcp.GetValue():
self.panel_tcp_config.Show()
self.panel_usb_settings.Hide()
self.context.networked = True
self.context.mock = False
self.context(".network_update\n")
if self.radio_mock.GetValue():
self.panel_tcp_config.Hide()
self.panel_usb_settings.Hide()
self.context.networked = False
self.context.mock = True
self.context(".network_update\n")
self.Layout()
class ConfigurationSetupPanel(wx.Panel):
def __init__(self, *args, context=None, **kwds):
# begin wxGlade: ConfigurationSetupPanel.__init__
kwds["style"] = kwds.get("style", 0)
wx.Panel.__init__(self, *args, **kwds)
self.context = context
sizer_page_2 = wx.BoxSizer(wx.VERTICAL)
sizer_general = wx.StaticBoxSizer(
wx.StaticBox(self, wx.ID_ANY, _("General Options")), wx.VERTICAL
)
sizer_page_2.Add(sizer_general, 0, wx.EXPAND, 0)
self.check_autolock = wx.CheckBox(self, wx.ID_ANY, _("Automatically lock rail"))
self.check_autolock.SetToolTip(_("Lock rail after operations are finished."))
self.check_autolock.SetValue(1)
sizer_general.Add(self.check_autolock, 0, 0, 0)
self.check_plot_shift = wx.CheckBox(self, wx.ID_ANY, _("Pulse Grouping"))
self.check_plot_shift.SetToolTip(
"\n".join(
[
_(
"Pulse Grouping is an alternative means of reducing the incidence of stuttering, allowing you potentially to burn at higher speeds."
),
"",
_(
"It works by swapping adjacent on or off bits to group on and off together and reduce the number of switches."
),
"",
_(
'As an example, instead of X_X_ it will burn XX__ - because the laser beam is overlapping, and because a bit is only moved at most 1/1000", the difference should not be visible even under magnification.'
),
_(
"Whilst the Pulse Grouping option in Operations are set for that operation before the job is spooled, and cannot be changed on the fly, this global Pulse Grouping option is checked as instructions are sent to the laser and can turned on and off during the burn process. Because the changes are believed to be small enough to be undetectable, you may wish to leave this permanently checked."
),
]
),
)
sizer_general.Add(self.check_plot_shift, 0, 0, 0)
self.check_strict = wx.CheckBox(self, wx.ID_ANY, _("Strict"))
self.check_strict.SetToolTip(
_(
"Forces the device to enter and exit programmed speed mode from the same direction.\nThis may prevent devices like the M2-V4 and earlier from having issues. Not typically needed."
)
)
sizer_general.Add(self.check_strict, 0, 0, 0)
self.check_alternative_raster = wx.CheckBox(
self, wx.ID_ANY, _("Alt Raster Style")
)
sizer_general.Add(self.check_alternative_raster, 0, 0, 0)
self.check_twitches = wx.CheckBox(self, wx.ID_ANY, _("Twitch Vectors"))
sizer_general.Add(self.check_twitches, 0, 0, 0)
sizer_jog = wx.StaticBoxSizer(
wx.StaticBox(self, wx.ID_ANY, _("Rapid Jog")), wx.VERTICAL
)
sizer_page_2.Add(sizer_jog, 0, 0, 0)
sizer_23 = wx.BoxSizer(wx.VERTICAL)
sizer_jog.Add(sizer_23, 0, wx.EXPAND, 0)
self.check_rapid_moves_between = wx.CheckBox(
self, wx.ID_ANY, _("Rapid Moves Between Objects")
)
self.check_rapid_moves_between.SetToolTip(
_("Perform rapid moves between the objects")
)
self.check_rapid_moves_between.SetValue(1)
sizer_23.Add(self.check_rapid_moves_between, 0, 0, 0)
sizer_25 = wx.StaticBoxSizer(
wx.StaticBox(self, wx.ID_ANY, _("Minimum Jog Distance")), wx.HORIZONTAL
)
sizer_23.Add(sizer_25, 0, 0, 0)
self.text_minimum_jog_distance = wx.TextCtrl(self, wx.ID_ANY, "")
sizer_25.Add(self.text_minimum_jog_distance, 0, 0, 0)
self.radio_box_jog_method = wx.RadioBox(
self,
wx.ID_ANY,
_("Jog Method"),
choices=[_("Default"), _("Reset"), _("Finish")],
majorDimension=3,
style=wx.RA_SPECIFY_ROWS,
)
self.radio_box_jog_method.SetToolTip(
_(
"Changes the method of jogging. Default are NSE jogs. Reset are @NSE jogs. Finished are @FNSE jogs followed by a wait."
)
)
self.radio_box_jog_method.SetSelection(0)
sizer_jog.Add(self.radio_box_jog_method, 0, 0, 0)
sizer_rapid_override = wx.StaticBoxSizer(
wx.StaticBox(self, wx.ID_ANY, _("Rapid Override")), wx.VERTICAL
)
sizer_page_2.Add(sizer_rapid_override, 0, wx.EXPAND, 0)
self.check_override_rapid = wx.CheckBox(
self, wx.ID_ANY, _("Override Rapid Movements")
)
sizer_rapid_override.Add(self.check_override_rapid, 0, 0, 0)
sizer_36 = wx.StaticBoxSizer(
wx.StaticBox(self, wx.ID_ANY, _("X Travel Speed:")), wx.HORIZONTAL
)
sizer_rapid_override.Add(sizer_36, 0, wx.EXPAND, 0)
self.text_rapid_x = wx.TextCtrl(self, wx.ID_ANY, "")
sizer_36.Add(self.text_rapid_x, 0, 0, 0)
label_2 = wx.StaticText(self, wx.ID_ANY, _("mm/s"))
sizer_36.Add(label_2, 0, 0, 0)
sizer_35 = wx.StaticBoxSizer(
wx.StaticBox(self, wx.ID_ANY, _("Y Travel Speed:")), wx.HORIZONTAL
)
sizer_rapid_override.Add(sizer_35, 0, wx.EXPAND, 0)
self.text_rapid_y = wx.TextCtrl(self, wx.ID_ANY, "")
sizer_35.Add(self.text_rapid_y, 0, 0, 0)
label_4 = wx.StaticText(self, wx.ID_ANY, _("mm/s"))
sizer_35.Add(label_4, 0, 0, 0)
sizer_speed = wx.StaticBoxSizer(
wx.StaticBox(self, wx.ID_ANY, _("Speed:")), wx.VERTICAL
)
sizer_page_2.Add(sizer_speed, 0, wx.EXPAND, 0)
sizer_32 = wx.BoxSizer(wx.HORIZONTAL)
sizer_speed.Add(sizer_32, 0, wx.EXPAND, 0)
self.check_fix_speeds = wx.CheckBox(
self, wx.ID_ANY, _("Fix rated to actual speed")
)
self.check_fix_speeds.SetToolTip(
_(
"Correct for speed invalidity. Lihuiyu Studios speeds are 92% of the correctly rated speed"
)
)
sizer_32.Add(self.check_fix_speeds, 1, 0, 0)
self.text_fix_rated_speed = wx.TextCtrl(
self, wx.ID_ANY, str(FIX_SPEEDS_RATIO), style=wx.TE_READONLY
)
sizer_32.Add(self.text_fix_rated_speed, 1, 0, 0)
sizer_29 = wx.BoxSizer(wx.HORIZONTAL)
sizer_speed.Add(sizer_29, 0, wx.EXPAND, 0)
self.check_scale_speed = wx.CheckBox(self, wx.ID_ANY, _("Scale Speed"))
self.check_scale_speed.SetToolTip(
_(
"Scale any given speeds to this device by this amount. If set to 1.1, all speeds are 10% faster than rated."
)
)
sizer_29.Add(self.check_scale_speed, 1, 0, 0)
self.text_speed_scale_amount = wx.TextCtrl(self, wx.ID_ANY, "1.000")
self.text_speed_scale_amount.SetToolTip(
_(
"Scales the machine's speed ratio so that rated speeds speeds multiplied by this ratio."
)
)
sizer_29.Add(self.text_speed_scale_amount, 1, wx.EXPAND, 0)
sizer_30 = wx.BoxSizer(wx.HORIZONTAL)
sizer_speed.Add(sizer_30, 0, wx.EXPAND, 0)
self.check_max_speed_vector = wx.CheckBox(
self, wx.ID_ANY, _("Max Speed (Vector)")
)
self.check_max_speed_vector.SetToolTip(
_("Limit the maximum vector speed to this value")
)
sizer_30.Add(self.check_max_speed_vector, 1, 0, 0)
self.text_max_speed_vector = wx.TextCtrl(self, wx.ID_ANY, "100")
self.text_max_speed_vector.SetToolTip(
_("maximum speed at which all greater speeds are limited")
)
sizer_30.Add(self.text_max_speed_vector, 1, 0, 0)
sizer_31 = wx.BoxSizer(wx.HORIZONTAL)
sizer_speed.Add(sizer_31, 0, wx.EXPAND, 0)
self.check_max_speed_raster = wx.CheckBox(
self, wx.ID_ANY, _("Max Speed (Raster)")
)
self.check_max_speed_raster.SetToolTip(
_("Limit the maximum raster speed to this value")
)
sizer_31.Add(self.check_max_speed_raster, 1, 0, 0)
self.text_max_speed_raster = wx.TextCtrl(self, wx.ID_ANY, "750")
self.text_max_speed_raster.SetToolTip(
_("maximum speed at which all greater speeds are limited")
)
sizer_31.Add(self.text_max_speed_raster, 1, 0, 0)
self.SetSizer(sizer_page_2)
self.Layout()
self.Bind(wx.EVT_CHECKBOX, self.on_check_autolock, self.check_autolock)
self.Bind(wx.EVT_CHECKBOX, self.on_check_pulse_shift, self.check_plot_shift)
self.Bind(wx.EVT_CHECKBOX, self.on_check_strict, self.check_strict)
self.Bind(
wx.EVT_CHECKBOX, self.on_check_alt_raster, self.check_alternative_raster
)
self.Bind(wx.EVT_CHECKBOX, self.on_check_twitches, self.check_twitches)
self.Bind(
wx.EVT_CHECKBOX, self.on_check_rapid_between, self.check_rapid_moves_between
)
self.Bind(
wx.EVT_TEXT, self.on_text_min_jog_distance, self.text_minimum_jog_distance
)
self.Bind(wx.EVT_RADIOBOX, self.on_jog_method_radio, self.radio_box_jog_method)
self.Bind(
wx.EVT_CHECKBOX, self.on_check_override_rapid, self.check_override_rapid
)
self.Bind(wx.EVT_TEXT, self.on_text_rapid_x, self.text_rapid_x)
self.Bind(wx.EVT_TEXT, self.on_text_rapid_y, self.text_rapid_y)
self.Bind(wx.EVT_CHECKBOX, self.on_check_fix_speeds, self.check_fix_speeds)
self.Bind(wx.EVT_CHECKBOX, self.on_check_scale_speed, self.check_scale_speed)
self.Bind(wx.EVT_TEXT, self.on_text_speed_scale, self.text_speed_scale_amount)
self.Bind(
wx.EVT_CHECKBOX, self.on_check_max_speed_vector, self.check_max_speed_vector
)
self.Bind(
wx.EVT_TEXT, self.on_text_speed_max_vector, self.text_max_speed_vector
)
self.Bind(
wx.EVT_CHECKBOX, self.on_check_max_speed_raster, self.check_max_speed_raster
)
self.Bind(
wx.EVT_TEXT, self.on_text_speed_max_raster, self.text_max_speed_raster
)
# end wxGlade
self.check_autolock.SetValue(self.context.autolock)
self.check_plot_shift.SetValue(self.context.plot_shift)
self.check_strict.SetValue(self.context.strict)
self.check_alternative_raster.SetValue(self.context.nse_raster)
self.check_twitches.SetValue(self.context.twitches)
self.check_rapid_moves_between.SetValue(self.context.opt_rapid_between)
self.text_minimum_jog_distance.SetValue(str(self.context.opt_jog_minimum))
self.radio_box_jog_method.SetSelection(self.context.opt_jog_mode)
self.check_override_rapid.SetValue(self.context.rapid_override)
self.text_rapid_x.SetValue(str(self.context.rapid_override_speed_x))
self.text_rapid_y.SetValue(str(self.context.rapid_override_speed_y))
self.check_fix_speeds.SetValue(self.context.fix_speeds)
self.check_scale_speed.SetValue(self.context.scale_speed_enabled)
self.text_speed_scale_amount.SetValue(str(self.context.scale_speed))
self.check_max_speed_vector.SetValue(self.context.max_speed_vector_enabled)
self.text_max_speed_vector.SetValue(str(self.context.max_speed_vector))
self.check_max_speed_raster.SetValue(self.context.max_speed_raster_enabled)
self.text_max_speed_raster.SetValue(str(self.context.max_speed_raster))
# Disables of features not yet supported.
self.text_max_speed_raster.Enable(False)
self.text_max_speed_vector.Enable(False)
self.text_speed_scale_amount.Enable(False)
self.check_max_speed_raster.Enable(False)
self.check_max_speed_vector.Enable(False)
self.check_scale_speed.Enable(False)
def pane_show(self):
pass
def pane_hide(self):
pass
def on_check_fix_speeds(self, event=None):
self.context.fix_speeds = self.check_fix_speeds.GetValue()
self.text_fix_rated_speed.SetValue(
"1.000" if self.context.fix_speeds else str(FIX_SPEEDS_RATIO)
)
def on_check_strict(self, event=None):
self.context.strict = self.check_strict.GetValue()
def on_check_autolock(self, event=None):
self.context.autolock = self.check_autolock.GetValue()
def on_check_pulse_shift(
self, event=None
): # wxGlade: LhystudiosDriver.<event_handler>
self.context.plot_shift = self.check_plot_shift.GetValue()
try:
self.context.plot_planner.force_shift = self.context.plot_shift
except (AttributeError, TypeError):
pass
def on_check_alt_raster(
self, event
): # wxGlade: ConfigurationSetupPanel.<event_handler>
self.context.nse_raster = self.check_alternative_raster.GetValue()
def on_check_twitches(
self, event
): # wxGlade: ConfigurationSetupPanel.<event_handler>
self.context.twitches = self.check_twitches.GetValue()
def on_check_rapid_between(
self, event
): # wxGlade: ConfigurationSetupPanel.<event_handler>
self.context.opt_rapid_between = self.check_rapid_moves_between.GetValue()
def on_text_min_jog_distance(
self, event
): # wxGlade: ConfigurationSetupPanel.<event_handler>
try:
self.context.opt_jog_minimum = int(
self.text_minimum_jog_distance.GetValue()
)
except ValueError:
pass
def on_jog_method_radio(
self, event
): # wxGlade: ConfigurationSetupPanel.<event_handler>
self.context.opt_jog_mode = self.radio_box_jog_method.GetSelection()
def on_check_override_rapid(
self, event
): # wxGlade: ConfigurationSetupPanel.<event_handler>
self.check_override_rapid.SetValue(self.context.rapid_override)
def on_text_rapid_x(
self, event
): # wxGlade: ConfigurationSetupPanel.<event_handler>
try:
self.context.rapid_override_speed_x = float(self.text_rapid_x.GetValue())
except ValueError:
pass
def on_text_rapid_y(
self, event
): # wxGlade: ConfigurationSetupPanel.<event_handler>
try:
self.context.rapid_override_speed_y = float(self.text_rapid_y.GetValue())
except ValueError:
pass
def on_check_scale_speed(
self, event
): # wxGlade: ConfigurationSetupPanel.<event_handler>
self.context.scale_speed_enabled = self.check_scale_speed.GetValue()
def on_text_speed_scale(
self, event
): # wxGlade: ConfigurationSetupPanel.<event_handler>
try:
self.context.scale_speed = float(self.text_speed_scale_amount.GetValue())
except ValueError:
pass
def on_check_max_speed_vector(
self, event
): # wxGlade: ConfigurationSetupPanel.<event_handler>
self.context.max_speed_vector_enabled = self.check_max_speed_vector.GetValue()
def on_text_speed_max_vector(
self, event
): # wxGlade: ConfigurationSetupPanel.<event_handler>
try:
self.context.max_speed_vector = float(self.text_max_speed_vector.GetValue())
except ValueError:
pass
def on_check_max_speed_raster(
self, event
): # wxGlade: ConfigurationSetupPanel.<event_handler>
self.context.max_speed_raster_enabled = self.check_max_speed_raster.GetValue()
def on_text_speed_max_raster(
self, event
): # wxGlade: ConfigurationSetupPanel.<event_handler>
try:
self.context.max_speed_raster = float(self.text_max_speed_raster.GetValue())
except ValueError:
pass
class LhystudiosDriverGui(MWindow):
def __init__(self, *args, **kwds):
super().__init__(374, 734, *args, **kwds)
self.context = self.context.device
_icon = wx.NullIcon
_icon.CopyFromBitmap(icons8_administrative_tools_50.GetBitmap())
self.SetIcon(_icon)
self.SetTitle(_(_("Lhystudios-Configuration")))
# self.notebook_main = wx.Notebook(self, wx.ID_ANY)
self.notebook_main = wx.aui.AuiNotebook(
self,
-1,
style=wx.aui.AUI_NB_TAB_EXTERNAL_MOVE
| wx.aui.AUI_NB_SCROLL_BUTTONS
| wx.aui.AUI_NB_TAB_SPLIT
| wx.aui.AUI_NB_TAB_MOVE,
)
self.ConfigurationPanel = ConfigurationInterfacePanel(
self.notebook_main, wx.ID_ANY, context=self.context
)
self.notebook_main.AddPage(self.ConfigurationPanel, _("Configuration"))
self.SetupPanel = ConfigurationSetupPanel(
self.notebook_main, wx.ID_ANY, context=self.context
)
self.notebook_main.AddPage(self.SetupPanel, _("Setup"))
self.Layout()
self.add_module_delegate(self.ConfigurationPanel)
self.add_module_delegate(self.SetupPanel)
def window_open(self):
self.SetupPanel.pane_show()
self.ConfigurationPanel.pane_show()
def window_close(self):
self.SetupPanel.pane_hide()
self.ConfigurationPanel.pane_hide()
def window_preserve(self):
return False
| 39.409131 | 414 | 0.646111 | 5,919 | 44,887 | 4.613617 | 0.08346 | 0.060019 | 0.02307 | 0.035447 | 0.666911 | 0.536509 | 0.407902 | 0.341475 | 0.257873 | 0.203164 | 0 | 0.018295 | 0.249872 | 44,887 | 1,138 | 415 | 39.443761 | 0.79273 | 0.036091 | 0 | 0.252155 | 0 | 0.011853 | 0.102989 | 0.000555 | 0 | 0 | 0 | 0 | 0 | 1 | 0.067888 | false | 0.019397 | 0.005388 | 0.001078 | 0.085129 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
12112f7d0f3727152084ea033286d88e53e8c329 | 2,885 | py | Python | module3-nosql-and-document-oriented-databases/rpg_mongo.py | krsmith/DS-Unit-3-Sprint-2-SQL-and-Databases | 9617528ad5fd23354623926b819f98f9a063d252 | [
"MIT"
] | null | null | null | module3-nosql-and-document-oriented-databases/rpg_mongo.py | krsmith/DS-Unit-3-Sprint-2-SQL-and-Databases | 9617528ad5fd23354623926b819f98f9a063d252 | [
"MIT"
] | null | null | null | module3-nosql-and-document-oriented-databases/rpg_mongo.py | krsmith/DS-Unit-3-Sprint-2-SQL-and-Databases | 9617528ad5fd23354623926b819f98f9a063d252 | [
"MIT"
] | null | null | null | import pymongo
import json
import urllib.request
connection_string = 'mongodb://<USERNAME>:<PASSWORD>@cluster0-shard-00-00-pjgev.mongodb.net:27017,cluster0-shard-00-01-pjgev.mongodb.net:27017,cluster0-shard-00-02-pjgev.mongodb.net:27017/test?ssl=true&replicaSet=Cluster0-shard-0&authSource=admin&retryWrites=true'
client = pymongo.MongoClient(connection_string)
db = client.test
valeries_doc = {'favorite animal': 'dolphin'}
if not db.test.find_one(valeries_doc):
db.test.insert_one(valeries_doc)
db.test.find_one(valeries_doc)
url = 'https://raw.githubusercontent.com/LambdaSchool/Django-RPG/master/testdata.json'
response = urllib.request.urlopen(url)
rpg_data = json.loads(response.read().decode())
db_rpg = client.rpg
db_rpg.rpg.insert_many(rpg_data)
# 1. How many total Characters are there?
print('Character Counts')
total_char = db_rpg.rpg.find({'model': 'charactercreator.character'})
char_count = total_char.count()
print('Total Characters', char_count)
# 2. How many of each specific subclass?
for subclass in ['fighter', 'mage', 'cleric', 'thief']:
sub_char = db_rpg.rpg.find({'model': 'charactercreator.'+subclass})
print('Total', subclass, ':', sub_char.count())
# 3. How many total Items?
items_count = db_rpg.rpg.find({'model': 'armory.item'}).count()
print('Total Items:', items_count)
# 4. How many of the Items are weapons? How many are not?
weapons_count = db_rpg.rpg.find({'model': 'armory.weapon'}).count()
print('Total Weapons', weapons_count)
print('Total Non-Weapons', items_count - weapons_count)
# 5. How many Items does each character have? (Return first 20 rows)
print('\nCharacter Item Counts')
for character in total_char[:20]:
print(character['fields']['name'], len(character['fields']['inventory']))
# 6. How many Weapons does each character have? (Return first 20 rows)
print('\nCharacter Weapon Counts')
total_char = db_rpg.rpg.find({'model': 'charactercreator.character'})
weapons = db_rpg.rpg.find({'model': 'armory.weapon'})
weapons_keys = [weapon['pk'] for weapon in weapons]
for character in total_char[:20]:
name = character['fields']['name']
char_items = character['fields']['inventory']
char_weapons = len([item for item in char_items if item in weapons_keys])
print(name, char_weapons)
# 7. On average, how many Items does each Character have?
print('\nAverage Item and Weapon Counts')
total_char = db_rpg.rpg.find({'model': 'charactercreator.character'})
total_items = 0
total_weapons = 0
for character in total_char:
char_items = character['fields']['inventory']
total_items += len(char_items)
total_weapons += len([item for item in char_items if item in weapons_keys])
avg_items = total_items / char_count
print('Average Items', avg_items)
# 8. On average, how many Weapons does each character have?
avg_weapons = total_weapons / char_count
print('Average Weapons', avg_weapons)
| 38.466667 | 264 | 0.744194 | 423 | 2,885 | 4.933806 | 0.264775 | 0.021562 | 0.030666 | 0.040249 | 0.426449 | 0.374221 | 0.327264 | 0.18735 | 0.18735 | 0.18735 | 0 | 0.019562 | 0.114038 | 2,885 | 74 | 265 | 38.986486 | 0.796948 | 0.141768 | 0 | 0.137255 | 0 | 0.019608 | 0.318606 | 0.129712 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.019608 | 0.058824 | 0 | 0.058824 | 0.254902 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
12139f49154f34b8c8740a61c1db66b2f3f6df64 | 453 | py | Python | airodb_analyzer/ui/aboutBoxForm.py | jeremydumais/airodb-analyzer | 2056b95891b3543c51758bc586a98b90e54c9670 | [
"MIT"
] | null | null | null | airodb_analyzer/ui/aboutBoxForm.py | jeremydumais/airodb-analyzer | 2056b95891b3543c51758bc586a98b90e54c9670 | [
"MIT"
] | null | null | null | airodb_analyzer/ui/aboutBoxForm.py | jeremydumais/airodb-analyzer | 2056b95891b3543c51758bc586a98b90e54c9670 | [
"MIT"
] | null | null | null | from PyQt5 import QtCore, QtGui, QtWidgets, uic
import qdarkgraystyle
class Ui_AboutBoxForm(QtWidgets.QDialog):
def __init__(self):
super(Ui_AboutBoxForm, self).__init__()
uic.loadUi('airodb_analyzer/designer/aboutBoxForm.ui', self)
self.setStyleSheet(qdarkgraystyle.load_stylesheet())
#Signals
self.buttonClose.clicked.connect(self.buttonCloseClick)
def buttonCloseClick(self):
self.close() | 34.846154 | 68 | 0.717439 | 47 | 453 | 6.659574 | 0.617021 | 0.089457 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.00271 | 0.18543 | 453 | 13 | 69 | 34.846154 | 0.845528 | 0.015453 | 0 | 0 | 0 | 0 | 0.089686 | 0.089686 | 0 | 0 | 0 | 0 | 0 | 1 | 0.2 | false | 0 | 0.2 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1216510f06846187ec2c36e2c66ddc29c2a13123 | 4,263 | py | Python | flashback/thread.py | miroli/flashback | a7ac89a522c09f3b2277fb0cf3a8cf8da95ae3d1 | [
"MIT"
] | 1 | 2017-06-09T13:14:35.000Z | 2017-06-09T13:14:35.000Z | flashback/thread.py | miroli/flashback | a7ac89a522c09f3b2277fb0cf3a8cf8da95ae3d1 | [
"MIT"
] | 1 | 2016-12-27T19:23:34.000Z | 2016-12-27T19:23:34.000Z | flashback/thread.py | miroli/flashback | a7ac89a522c09f3b2277fb0cf3a8cf8da95ae3d1 | [
"MIT"
] | 1 | 2016-12-26T16:25:18.000Z | 2016-12-26T16:25:18.000Z | # -*- coding: utf-8 -*-
import csv
import json
import datetime
import urllib
from collections import Counter
import requests
from bs4 import BeautifulSoup
from .post import Post
class TrashException(Exception):
pass
class AuthException(Exception):
pass
class LoginException(Exception):
pass
class NotFoundException(Exception):
pass
errors = [
((u'Denna tråd har flyttats till "Papperskorgen". '
u'Ett delforum för trådar med för låg kvalitet.'),
TrashException('Login required for threads in trashcan.')),
((u'du har inte behörighet till den här sidan. '
u'Det kan bero på en av flera anledningar:'),
AuthException('Your account lacks sufficient permissions.')),
((u'Du är inte inloggad eller också har du inte behörighet'
u' att se den här sidan. Det kan bero på en av flera'),
LoginException('Login required for this particular thread.')),
((u'Du angav ett ogiltigt Ämne. Om du följde en giltig'
u' länk, var vänlig och kontakta den'),
NotFoundException('Thread does not exist.')),
((u'Inget Ämne specifierat. Om du följde en giltig'
u' länk var vänlig och meddela den'),
NotFoundException('Thread does not exist.')),
]
class Thread():
"""Temp"""
def __init__(self, base_url):
self.base_url = base_url
self.posts = []
def get(self, requests=requests):
r = requests.get(self.base_url)
self.start = BeautifulSoup(r.text, 'html.parser')
for message, thread_exception in errors:
if message in self.start.text:
raise thread_exception
self.append_page(self.start)
page_count = self._get_page_count(self.start)
for page in range(2, page_count + 1):
slug = 'p{page}'.format(page=str(page))
url = ''.join([self.base_url, slug])
r = requests.get(url)
soup = BeautifulSoup(r.text, 'html.parser')
self.append_page(soup)
def append_page(self, soup):
for div in soup.select('#posts > div')[:-1]:
self.append_post(div)
def append_post(self, soup):
post = Post(soup)
self.posts.append(post)
def describe(self):
counter = Counter([x['user_name'] for x in self.posts])
common_authors = counter.most_common(5)
return {
'common_authors': common_authors
}
@property
def title(self):
"""Title of thread"""
return self.start.title.text[0:-18]
@property
def section(self):
navbar = self.start.find('table', {'class': 'forum-navbar'})
breadcrumbs = navbar.find('tr', {'valign': 'bottom'}).find_all('a')
section = breadcrumbs[-1]
return {'id': section['href'][1:], 'name': section.text}
def _get_page_count(self, soup):
"""Finds the number of pages for the given thread
<td class="vbmenu_control smallfont2 delim">Sidan 1 av 15</td>
"""
element = soup.select_one('td.vbmenu_control.smallfont2.delim')
if element:
page_count = element.text.split(' ')[-1]
return int(page_count)
return 1
def to_csv(self, fname):
"""Saves the posts to a CSV file"""
with open(fname, 'w') as csvfile:
headers = ['id', 'user_name', 'time', 'content']
writer = csv.DictWriter(csvfile, fieldnames=headers)
writer.writeheader()
for p in self.posts:
row = {'id': p.id.encode('utf-8'),
'user_name': p.user_name.encode('utf-8'),
'time': p.timestamp.encode('utf-8'),
'content': p.content.encode('utf-8')}
writer.writerow(row)
def to_json(self, fname):
"""Saves the posts to a JSON file"""
out = {
'title': self.title,
'posts': self.posts
}
with open(fname, 'w') as f:
f.write(json.dumps(out))
def __getitem__(self, index):
return self.posts[index]
def __len__(self):
return len(self.posts)
def __repr__(self):
return '<Flashback.Thread {}>'.format(self.base_url)
def __iter__(self):
return iter(self.posts)
| 29.19863 | 75 | 0.594183 | 537 | 4,263 | 4.610801 | 0.353818 | 0.029079 | 0.022213 | 0.009693 | 0.131664 | 0.096123 | 0.065428 | 0.028271 | 0.028271 | 0.028271 | 0 | 0.007185 | 0.281726 | 4,263 | 145 | 76 | 29.4 | 0.801437 | 0.050199 | 0 | 0.076923 | 0 | 0 | 0.212182 | 0.008487 | 0 | 0 | 0 | 0 | 0 | 1 | 0.134615 | false | 0.038462 | 0.076923 | 0.038462 | 0.346154 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
121677d83b1b19d4371425c9caf2a48da8fb17ef | 1,152 | py | Python | utils/print_statistics.py | tianchenji/Multimodal-SVAE | c76b7f8984610e32819510a7a5295124b97460be | [
"MIT"
] | 8 | 2020-11-12T23:43:28.000Z | 2022-01-14T02:01:18.000Z | utils/print_statistics.py | tianchenji/Multimodal-SVAE | c76b7f8984610e32819510a7a5295124b97460be | [
"MIT"
] | null | null | null | utils/print_statistics.py | tianchenji/Multimodal-SVAE | c76b7f8984610e32819510a7a5295124b97460be | [
"MIT"
] | 2 | 2020-11-18T03:35:38.000Z | 2021-10-21T12:38:59.000Z | import numpy as np
import pandas as pd
def print_statistics(correct, confusion_m, total, confusion_m_flag):
if confusion_m_flag == 0:
accuracy = 100 * np.array(correct) / np.array(total)
index = ['normal', 'untvbl obs', 'tvbl obs', 'crash']
columns = ['accuracy']
print('Accuracy of the network on the test set:')
print(pd.DataFrame(accuracy, index, columns).round(2))
pe_rows = np.sum(confusion_m, axis=0)
pe_cols = np.sum(confusion_m, axis=1)
sum_total = sum(pe_cols)
pe = np.dot(pe_rows, pe_cols) / float(sum_total**2)
po = np.trace(confusion_m) / float(sum_total)
kappa = (po - pe) / (1 - pe)
print('Kappa coefficient on the test set: {:.2f}'.format(kappa))
else:
confusion_m = 100 * np.array(confusion_m) / np.array(total)
index = [['', 'predicted', 'class', ''], ['normal', 'untvbl obs', 'tvbl obs', 'crash']]
columns = [['', 'actual class', '', ''], ['normal', 'untvbl obs', 'tvbl obs', 'crash']]
print('Confusion matrix on the test set:')
print(pd.DataFrame(confusion_m, index, columns).round(2)) | 44.307692 | 95 | 0.598958 | 157 | 1,152 | 4.267516 | 0.343949 | 0.134328 | 0.067164 | 0.085075 | 0.297015 | 0.240299 | 0.240299 | 0 | 0 | 0 | 0 | 0.015927 | 0.236979 | 1,152 | 26 | 96 | 44.307692 | 0.746303 | 0 | 0 | 0 | 0 | 0 | 0.203816 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.045455 | false | 0 | 0.090909 | 0 | 0.136364 | 0.272727 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1219cb36a0069666978851302744932b7773dff4 | 1,185 | py | Python | chaperone/cproc/pt/oneshot.py | msabramo/chaperone | 9ff2c3a5b9c6820f8750320a564ea214042df06f | [
"Apache-2.0"
] | 186 | 2015-07-22T00:08:04.000Z | 2021-11-05T21:51:09.000Z | chaperone/cproc/pt/oneshot.py | msabramo/chaperone | 9ff2c3a5b9c6820f8750320a564ea214042df06f | [
"Apache-2.0"
] | 24 | 2015-07-27T15:30:14.000Z | 2021-09-11T21:19:37.000Z | chaperone/cproc/pt/oneshot.py | msabramo/chaperone | 9ff2c3a5b9c6820f8750320a564ea214042df06f | [
"Apache-2.0"
] | 26 | 2016-01-11T21:02:30.000Z | 2021-08-31T11:09:25.000Z | import asyncio
from chaperone.cproc.subproc import SubProcess
from chaperone.cutil.errors import ChProcessError
class OneshotProcess(SubProcess):
process_timeout = 60.0 # default for a oneshot is 90 seconds
@asyncio.coroutine
def process_started_co(self):
result = yield from self.timed_wait(self.process_timeout, self._exit_timeout)
if result is not None and not result.normal_exit:
if self.ignore_failures:
warn("{0} (ignored) failure on start-up with result '{1}'".format(self.name, result))
else:
raise ChProcessError("{0} failed on start-up with result '{1}'".format(self.name, result), resultcode = result)
def _exit_timeout(self):
service = self.service
message = "oneshot service '{1}' did not exit after {2} second(s), {3}".format(
service.type,
service.name, self.process_timeout,
"proceeding due to 'ignore_failures=True'" if service.ignore_failures else
"terminating due to 'ignore_failures=False'")
if not service.ignore_failures:
self.terminate()
raise Exception(message)
| 42.321429 | 127 | 0.654852 | 146 | 1,185 | 5.205479 | 0.486301 | 0.092105 | 0.047368 | 0.034211 | 0.105263 | 0.105263 | 0.105263 | 0.105263 | 0.105263 | 0.105263 | 0 | 0.013559 | 0.253165 | 1,185 | 27 | 128 | 43.888889 | 0.845198 | 0.029536 | 0 | 0 | 0 | 0 | 0.202091 | 0.039199 | 0 | 0 | 0 | 0 | 0 | 1 | 0.086957 | false | 0 | 0.130435 | 0 | 0.304348 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
121d25679819deafcd527e85f968ff7009bafc54 | 1,918 | py | Python | setup.py | dnif-archive/fnExchange | d75431b37da3193447b919b4be2e0104266156f1 | [
"Apache-2.0"
] | 1 | 2017-07-19T22:13:54.000Z | 2017-07-19T22:13:54.000Z | setup.py | dnif/fnExchange | d75431b37da3193447b919b4be2e0104266156f1 | [
"Apache-2.0"
] | 1 | 2021-03-25T21:27:21.000Z | 2021-03-25T21:27:21.000Z | setup.py | dnif-archive/fnExchange | d75431b37da3193447b919b4be2e0104266156f1 | [
"Apache-2.0"
] | 1 | 2021-07-07T18:55:19.000Z | 2021-07-07T18:55:19.000Z | """
fnExchange is a scalable, open source API layer (also called an API
"router") that provides a consistent proxy web interface for invoking
various web APIs without the caller having to write separate,
special-purpose code for each of them.
fnExchange is packaged as a command line interface executable
``fnexchange`` which starts the web service. The CLI also supports a
mode to run the service as a daemon.
Installation, usage and plugin development instructions can be found
on the project's `GitHub page <http://github.com/dnif/fnExchange>`_
"""
from setuptools import setup, find_packages
from os import path
here = path.abspath(path.dirname(__file__))
dependencies = [
'click==6.7',
'PyYAML==3.12',
'requests>=2.4.2',
'six==1.10.0',
'tornado==4.4.2',
]
setup(
name='fnexchange',
version='0.2.1',
url='https://github.com/dnif/fnExchange',
license='Apache',
author='Bhumil Haria',
author_email='bhumilharia@gmail.com',
description='fnExchange API router and management CLI',
long_description=__doc__,
keywords='fnexchange api router orchestration',
platforms='any',
install_requires=dependencies,
packages=find_packages(exclude=['tests']),
include_package_data=True,
zip_safe=False,
entry_points={
'console_scripts': [
'fnexchange = fnexchange.cli:cli',
],
},
classifiers=[
'Development Status :: 4 - Beta',
# 'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX',
'Operating System :: MacOS',
'Operating System :: Unix',
'Operating System :: Microsoft :: Windows',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
],
)
| 28.626866 | 69 | 0.663712 | 228 | 1,918 | 5.5 | 0.653509 | 0.047847 | 0.059809 | 0.036683 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.014657 | 0.217414 | 1,918 | 66 | 70 | 29.060606 | 0.820786 | 0.309176 | 0 | 0.046512 | 0 | 0 | 0.458618 | 0.015945 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.046512 | 0 | 0.046512 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
121d57b33c2245662cdd146e4a68652096cf0275 | 3,026 | py | Python | local_traction_control.py | ngrabbs/traction_control | 0289604658d68b144a1867f2c70e3c177cc559fe | [
"MIT"
] | null | null | null | local_traction_control.py | ngrabbs/traction_control | 0289604658d68b144a1867f2c70e3c177cc559fe | [
"MIT"
] | null | null | null | local_traction_control.py | ngrabbs/traction_control | 0289604658d68b144a1867f2c70e3c177cc559fe | [
"MIT"
] | null | null | null | ################################################################################
# Time SPK: Traction retard VSS1 VSS2 VSS1 ms 1 VSS2 ms 1 TC slip * time
# the info for traction control says "slip% X 0.01s"
# Retard 0.0 3.3 6.7 10.0
# slip x time 0.0 6.7 13.3 20.0
# settings were above 50.1 mph and 10% slip
# vss1 rear / driven
# vss2 front / undriven
# roll a .05 second window for the multiplier
import re
tcslip_time = (0.0, 6.7, 13.3, 20.0)
tcslip_retard = (0.0, 3.3, 6.7, 10.0)
slip_percent = .10
tc_active_above = 50
slip_window_min = .01
slip_window_max = .05
slip_window = 0.01
time = []
vss1 = []
vss2 = []
vss1dot = []
vss2dot = []
tcsliptime = []
launch_timer = []
tc_retard = []
count = 0
f = open("run_1.msl","r")
lines = f.readlines()
for line in lines:
details = re.split(r'\t', line)
if(len(details) > 56):
time.append(float(details[0]))
vss1.append(float(details[42]))
vss2.append(float(details[43]))
vss1dot.append(float(details[44]))
vss2dot.append(float(details[45]))
tcsliptime.append(float(details[55]))
launch_timer.append(float(details[47]))
tc_retard.append(float(details[25]))
def percentage_difference_calculator(vss1per, vss2per):
# % increase = Increase ÷ Original Number × 100
if(vss2per < tc_active_above or vss2per > vss1per):
return 0
else:
return (((vss1per - vss2per)/vss2per)*100)
def tc_retard_calc(current_slip_time):
if(current_slip_time < tcslip_time[1] and current_slip_time > tcslip_time[0]):
return current_slip_time*(tcslip_retard[1] / tcslip_time[1])
elif(current_slip_time < tcslip_time[2] and current_slip_time > tcslip_time[1]):
return current_slip_time*(tcslip_retard[2] / tcslip_time[2])
elif(current_slip_time < tcslip_time[3] and current_slip_time > tcslip_time[2]):
return current_slip_time*(tcslip_retard[3] / tcslip_time[3])
else:
return 0
launch_active = False
while(count < len(vss1)):
if(launch_timer[count] < 6 and vss1[count] > 0 and vss2[count] > 0):
my_slip = (percentage_difference_calculator(vss1[count], vss2[count])) * slip_window
# calculat the rolling slip window
if(my_slip > slip_percent and slip_window < slip_window_max):
slip_window = slip_window + .01
elif(my_slip < slip_percent and slip_window > slip_window_min):
slip_window = slip_window - .01
my_retard = tc_retard_calc(my_slip)
print("vss1/vss2:[%0.2f/%0.2f] diff: [%0.2f]" % (vss1[count], vss2[count], percentage_difference_calculator(vss1[count], vss2[count])))
# print("time: %0.2f tc_retard/my_retard: %0.2f/%0.2f tcslip/myslip: %0.2f/%0.2f difference %0.2f/%0.2f: %0.2f window: %0.2f"
# % (time[count], tc_retard[count], my_retard, tcsliptime[count], my_slip,
# vss1[count], vss2[count],
# (percentage_difference_calculator(vss1[count], vss2[count])), slip_window))
count = count + 1
| 35.186047 | 143 | 0.639128 | 448 | 3,026 | 4.127232 | 0.236607 | 0.075717 | 0.081125 | 0.102217 | 0.35695 | 0.333153 | 0.187128 | 0.187128 | 0.187128 | 0.091942 | 0 | 0.070124 | 0.203569 | 3,026 | 85 | 144 | 35.6 | 0.696266 | 0.245208 | 0 | 0.072727 | 0 | 0 | 0.022385 | 0.010507 | 0 | 0 | 0 | 0 | 0 | 1 | 0.036364 | false | 0 | 0.018182 | 0 | 0.163636 | 0.018182 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
121f6860127d88688471d0adde04fc41ca279285 | 6,455 | py | Python | util/Spotify.py | broskh/YouSpotify | fcdfe9edb07085a3a607c07adfa60434088e1da5 | [
"MIT"
] | null | null | null | util/Spotify.py | broskh/YouSpotify | fcdfe9edb07085a3a607c07adfa60434088e1da5 | [
"MIT"
] | null | null | null | util/Spotify.py | broskh/YouSpotify | fcdfe9edb07085a3a607c07adfa60434088e1da5 | [
"MIT"
] | null | null | null | import codecs
import eyed3
import http.client
import http.server
import json
import re
from sys import exit
import taglib
import time
import urllib.error
import urllib.parse
import urllib.request
import webbrowser
from util import log
class Spotify:
# Requires an OAuth token.
def __init__(self, auth):
self._auth = auth
self.user = self.get('me')
# Gets a resource from the Spotify API and returns the object.
def get(self, url, params=None, tries=3):
# Construct the correct URL.
if params is None:
params = {}
if not url.startswith('https://api.spotify.com/v1/'):
url = 'https://api.spotify.com/v1/' + url
if params:
url += ('&' if '?' in url else '?') + urllib.parse.urlencode(params)
# Try the sending off the request a specified number of times before giving up.
for _ in range(tries):
try:
req = urllib.request.Request(url)
req.add_header('Authorization', 'Bearer ' + self._auth)
res = urllib.request.urlopen(req)
reader = codecs.getreader('utf-8')
return json.load(reader(res))
except Exception as err:
log.print_console("SPOTIFY REQUEST ERROR", 'Couldn\'t load URL: {} ({})'.format(url, err))
time.sleep(2)
log.print_console("SPOTIFY REQUEST", 'Trying again...')
exit(1)
# The Spotify API breaks long lists into multiple pages. This method automatically
# fetches all pages and joins them, returning in a single list of objects.
def get_list(self, url, params=None):
if params is None:
params = {}
response = self.get(url, params)
items = response['items']
while response['next']:
response = self.get(response['next'])
items += response['items']
return items
# The port that the local server listens on. Don't change this,
# as Spotify only will redirect to certain predefined URLs.
_SERVER_PORT = 43019
# Get the spotify user's playlists
def get_user_playlists(self):
log.print_console("PLAYLISTS","Ricerca in corso...")
# List all playlists and all track in each playlist.
playlists = self.get_list('users/{user_id}/playlists'.format(user_id=self.user['id']), {'limit': 50}) # 50
for playlist in playlists:
log.print_log('LOADING PLAYLIST', '{name} ({tracks[total]} songs)'.format(**playlist))
playlist['tracks'] = self.get_list(playlist['tracks']['href'], {'limit': 100}) # 100
return playlists
# Get full album object from semplified album
def get_full_album(self, album):
return self.get(album['href'])
# Get the spotify user logged
def get_user(self):
return self.user
# Add all metadata tags to mp3 file linked to the track
def tag_mp3_file(self, track):
full_album = self.get_full_album(track['track']['album'])
song = taglib.File(track['file_path'])
song.tags['TITLE'] = track['track']['name']
song.tags['ARTIST'] = ', '.join([artist['name'] for artist in track['track']['artists']])
song.tags['ALBUM'] = track['track']['album']['name']
song.tags['TRACKNUMBER'] = str(track['track']['track_number']) + "/" + str(
full_album['tracks']['items'][-1]['track_number'])
song.tags['DISCNUMBER'] = str(track['track']['disc_number'])
song.tags['COMMENT'] = track['track']['uri']
song.tags['GENRE'] = ', '.join(full_album['genres'])
song.tags['DATE'] = track['track']['album']['release_date']
song.tags['ALBUMARTISTS'] = ', '.join([artist['name'] for artist in track['track']['album']['artists']])
song.save()
song = eyed3.load(track['file_path'])
image = urllib.request.urlopen(track['track']['album']['images'][0]['url'])
song.tag.images.set(3, image.read(), 'image/jpeg')
song.tag.save()
log.print_log("FILE TAGGED", track['file_path'])
class _AuthorizationServer(http.server.HTTPServer):
def __init__(self, host, port):
http.server.HTTPServer.__init__(self, (host, port), Spotify._AuthorizationHandler)
# Disable the default error handling.
def handle_error(self, request, client_address):
raise
class _AuthorizationHandler(http.server.BaseHTTPRequestHandler):
def do_GET(self):
# The Spotify API has redirected here, but access_token is hidden in the URL fragment.
# Read it using JavaScript and send it to /token as an actual query string...
if self.path.startswith('/redirect'):
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.end_headers()
self.wfile.write(b'<script>location.replace("token?" + location.hash.slice(1));</script>')
# Read access_token and use an exception to kill the server listening...
elif self.path.startswith('/token?'):
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.end_headers()
self.wfile.write(b'<script>close()</script>Thanks! You may now close this window.')
raise Spotify._Authorization(re.search('access_token=([^&]*)', self.path).group(1))
else:
self.send_error(404)
# Disable the default logging.
def log_message(self, format, *args):
pass
class _Authorization(Exception):
def __init__(self, access_token):
self.access_token = access_token
# Pops open a browser window for a user to log in and authorize API access.s
def authorize(client_id, scope):
webbrowser.open('https://accounts.spotify.com/authorize?' + urllib.parse.urlencode({
'response_type': 'token',
'client_id': client_id,
'scope': scope,
'redirect_uri': 'http://127.0.0.1:{}/redirect'.format(Spotify._SERVER_PORT)
}))
# Start a simple, local HTTP server to listen for the authorization token... (i.e. a hack).
server = Spotify._AuthorizationServer('127.0.0.1', Spotify._SERVER_PORT)
try:
while True:
server.handle_request()
except Spotify._Authorization as auth:
return Spotify(auth.access_token)
| 40.85443 | 115 | 0.612703 | 803 | 6,455 | 4.818182 | 0.321295 | 0.028431 | 0.019385 | 0.008788 | 0.100284 | 0.074955 | 0.063065 | 0.063065 | 0.044973 | 0.044973 | 0 | 0.010603 | 0.254841 | 6,455 | 157 | 116 | 41.11465 | 0.793763 | 0.177072 | 0 | 0.104348 | 0 | 0 | 0.174135 | 0.023067 | 0 | 0 | 0 | 0 | 0 | 1 | 0.113043 | false | 0.008696 | 0.121739 | 0.017391 | 0.330435 | 0.043478 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1220c122965971e839f7899fd39817e879a3842e | 6,173 | py | Python | foodx_devops_tools/pipeline_config/_paths.py | Food-X-Technologies/foodx_devops_tools | 57d1bf1304d9c9a386eaffa427f9eb36c410c350 | [
"MIT"
] | 3 | 2021-06-23T20:53:43.000Z | 2022-01-26T14:19:43.000Z | foodx_devops_tools/pipeline_config/_paths.py | Food-X-Technologies/foodx_devops_tools | 57d1bf1304d9c9a386eaffa427f9eb36c410c350 | [
"MIT"
] | 33 | 2021-08-09T15:44:51.000Z | 2022-03-03T18:28:02.000Z | foodx_devops_tools/pipeline_config/_paths.py | Food-X-Technologies/foodx_devops_tools | 57d1bf1304d9c9a386eaffa427f9eb36c410c350 | [
"MIT"
] | 1 | 2021-06-23T20:53:52.000Z | 2021-06-23T20:53:52.000Z | # Copyright (c) 2021 Food-X Technologies
#
# This file is part of foodx_devops_tools.
#
# You should have received a copy of the MIT License along with
# foodx_devops_tools. If not, see <https://opensource.org/licenses/MIT>.
"""Configuration file path management."""
import logging
import pathlib
import typing
from ._exceptions import ConfigurationPathsError
log = logging.getLogger(__name__)
PIPELINE_CONFIG_FILES = {
"clients.yml",
"release_states.yml",
"deployments.yml",
"frames.yml",
"puff_map.yml",
"service_principals.vault",
"subscriptions.yml",
"systems.yml",
"tenants.yml",
}
T = typing.TypeVar("T", bound="PipelineConfigurationPaths")
class PipelineConfigurationPaths:
"""Paths to pipeline configuration files."""
clients: pathlib.Path
context: typing.Set[pathlib.Path]
deployments: pathlib.Path
frames: pathlib.Path
puff_map: pathlib.Path
release_states: pathlib.Path
service_principals: pathlib.Path
static_secrets: typing.Set[pathlib.Path]
subscriptions: pathlib.Path
systems: pathlib.Path
tenants: pathlib.Path
CONFIG_SUBDIRS: typing.Set[str] = {"static_secrets", "context"}
def __init__(self: T) -> None:
"""Construct ``PipelineConfigurationPaths`` object."""
for this_file in PIPELINE_CONFIG_FILES:
path = pathlib.Path(this_file)
setattr(self, path.stem, path)
@classmethod
def from_dict(cls: typing.Type[T], data: dict) -> T:
"""
Construct ``PipelineConfigurationPaths`` object.
NOTE: Delivering valid paths is the users responsibility.
Args:
data: Dictionary of data to populate in object.
"""
this_object = cls()
for x, y in data.items():
setattr(this_object, x, y)
return this_object
@classmethod
def from_paths(
cls: typing.Type[T],
client_config: pathlib.Path,
system_config: pathlib.Path,
) -> T:
"""
Construct ``PipelineConfigurationPaths`` object.
Args:
client_config: Path to client configuration directory.
system_config: Path to system configuration directory.
Raises:
ConfigurationPathsError: If any paths are duplicated between
client and system.
"""
this_object = cls()
client_files = this_object.__acquire_client_files(client_config)
system_files = this_object.__acquire_system_files(system_config)
if len(client_files + system_files) > len(PIPELINE_CONFIG_FILES):
# must be duplicate files between the directories
log.debug("client files, {0}".format(str(client_files)))
log.debug("system files, {0}".format(str(system_files)))
raise ConfigurationPathsError(
"Duplicate files between "
"directories, {0}, {1}".format(client_config, system_config)
)
this_object.static_secrets = cls.__acquire_static_secrets(client_config)
this_object.context = cls.__acquire_template_context(
client_config, system_config
)
return this_object
@staticmethod
def __acquire_static_secrets(
client_config: pathlib.Path,
) -> typing.Set[pathlib.Path]:
secrets_path = client_config / "static_secrets"
result = PipelineConfigurationPaths.__acquire_subdir_files(
secrets_path, "static secrets"
)
return result
@staticmethod
def __acquire_template_context(
client_config: pathlib.Path, system_config: pathlib.Path
) -> typing.Set[pathlib.Path]:
# template context could be located in either client or system config.
context_client_path = client_config / "context"
client_context_files = (
PipelineConfigurationPaths.__acquire_subdir_files(
context_client_path, "client template context"
)
)
context_system_path = system_config / "context"
system_context_files = (
PipelineConfigurationPaths.__acquire_subdir_files(
context_system_path, "system template context"
)
)
result = client_context_files.union(system_context_files)
return result
def __acquire_client_files(
self: T, client_config: pathlib.Path
) -> typing.List[pathlib.Path]:
client_files = list()
for x in client_config.iterdir():
if (
x.is_file()
and (x.name in PIPELINE_CONFIG_FILES)
and (x.stem not in self.CONFIG_SUBDIRS)
):
log.info("adding client configuration file, {0}".format(x))
setattr(self, x.stem, x)
client_files.append(x)
return client_files
def __acquire_system_files(
self: T, system_config: pathlib.Path
) -> typing.List[pathlib.Path]:
system_files = list()
for x in system_config.iterdir():
if x.is_file() and (x.name in PIPELINE_CONFIG_FILES):
log.info("adding system configuration file, {0}".format(x))
setattr(self, x.stem, x)
system_files.append(x)
return system_files
@staticmethod
def __acquire_subdir_files(
this_path: pathlib.Path, category: str
) -> typing.Set[pathlib.Path]:
result = set()
if this_path.is_dir():
log.debug(
"{2} directory, {0}, {1}".format(
this_path, str(list(this_path.iterdir())), category
)
)
for x in this_path.iterdir():
if x.is_file():
log.info(
"adding file to configuration, {1}, {0}".format(
x, category
)
)
result.add(x)
elif this_path.exists():
log.debug(f"{category} not a directory, {this_path}")
else:
log.debug(f"{category} not present, {this_path}")
return result
| 32.151042 | 80 | 0.609266 | 662 | 6,173 | 5.441088 | 0.217523 | 0.076346 | 0.033037 | 0.027762 | 0.208495 | 0.149084 | 0.149084 | 0.077179 | 0.051638 | 0.051638 | 0 | 0.003463 | 0.298234 | 6,173 | 191 | 81 | 32.319372 | 0.828024 | 0.145796 | 0 | 0.169118 | 0 | 0 | 0.107755 | 0.009743 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058824 | false | 0 | 0.029412 | 0 | 0.235294 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1222c7282f49d3772df08b9a182251c1657c7ca1 | 948 | py | Python | deploy/show_events.py | Acria-Network/Acria-Contracts | 3c8b0e6f453ef531a8464ee0bed3cf5642938633 | [
"MIT"
] | 29 | 2021-03-11T14:30:21.000Z | 2022-02-23T09:15:48.000Z | deploy/show_events.py | harshitvermadu/Acria-Contracts | 3c8b0e6f453ef531a8464ee0bed3cf5642938633 | [
"MIT"
] | 3 | 2021-05-02T13:58:53.000Z | 2022-01-11T19:12:49.000Z | deploy/show_events.py | harshitvermadu/Acria-Contracts | 3c8b0e6f453ef531a8464ee0bed3cf5642938633 | [
"MIT"
] | 8 | 2021-04-08T12:32:26.000Z | 2022-02-23T09:23:56.000Z | import json
from web3 import Web3, HTTPProvider, IPCProvider
from web3.middleware import geth_poa_middleware
import os
from os.path import join, dirname
from dotenv import load_dotenv
load_dotenv(join(dirname(__file__), '.env'))
if(os.environ.get("WEB3_USE_IPC") == False):
web3 = Web3(HTTPProvider(os.environ.get("WEB3_HTTP_PROVIDER_URL")))
else:
web3 = Web3(IPCProvider(os.environ.get("WEB3_IPC_PROVIDER_URL")))
if(os.environ.get("WEB3_MIDDLEWARE_ONION_INJECT")):
web3.middleware_onion.inject(geth_poa_middleware, layer=0)
web3.eth.defaultAccount = web3.eth.accounts[0]
with open('../build/contracts/AcriaNode.json') as file:
contract_json = json.load(file)
contract_abi = contract_json['abi']
AcriaNode = web3.eth.contract(address=os.environ.get("ACRIA_NODE_ADDRESS"), abi=contract_abi)
event_filter = AcriaNode.events.RequestFilled.createFilter(fromBlock=1)
events0 = event_filter.get_all_entries()
print(events0)
| 33.857143 | 93 | 0.778481 | 135 | 948 | 5.22963 | 0.414815 | 0.063739 | 0.084986 | 0.090652 | 0.050992 | 0 | 0 | 0 | 0 | 0 | 0 | 0.023364 | 0.097046 | 948 | 27 | 94 | 35.111111 | 0.801402 | 0 | 0 | 0 | 0 | 0 | 0.148734 | 0.109705 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.285714 | 0 | 0.285714 | 0.047619 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
122bd4054f1dcb784497724ac66a52927f337457 | 42,079 | py | Python | pybin3/insts.py | avielazari/vlsistuff | 34304dc64437fc849d74addd09963dca587df537 | [
"MIT"
] | 26 | 2018-03-17T18:14:22.000Z | 2022-03-14T07:23:13.000Z | pybin3/insts.py | psumesh/vlsistuff | 1fe64b093d0581d99c7d826b74c31b8655fa0b31 | [
"MIT"
] | 1 | 2019-10-16T10:31:11.000Z | 2019-10-17T04:14:53.000Z | pybin3/insts.py | psumesh/vlsistuff | 1fe64b093d0581d99c7d826b74c31b8655fa0b31 | [
"MIT"
] | 7 | 2018-07-16T07:51:25.000Z | 2022-02-15T14:22:54.000Z | #! /usr/bin/env python3
import os,string,sys,types
instructions={}
inst_id=1
OpcodeWidth=25
Dnops={}
DmanualIfields={}
DmanualOpcodes={}
DnfSpecials = {}
Chip = 'chip'
def get_inst_id():
global inst_id
x = inst_id
inst_id=inst_id+1
return x
class Instruction:
def __init__(self,Name):
self.name=Name
self.id=get_inst_id()
self.coding=''
self.pattern=''
self.translate=''
def main():
global Chip
print('invocation: insts.py ChipName InstructionsFile')
if len(sys.argv)>1:
Chip = sys.argv[1]
if len(sys.argv)>2:
InstFileName = sys.argv[2]
else:
InstFileName = 'instructions.assigned'
print('set: insts.py %s %s'%(Chip,InstFileName))
File = open(InstFileName,'r')
LLL = read_inst_file(File)
for Item in LLL:
deal_one_inst(Item)
check_contentions()
if (OpcodeWidth<17):
check_usage()
check_usage2()
produce_verilog()
produce_func_verilog()
produce_disasm()
produce_html(1)
produce_html(2)
produce_csv()
produce_asm_driver()
produce_py_simulator()
produce_c_decoder()
produce_c_simulator()
produce_c_instr_list()
produce_scheme()
Header0 = '''
def exr(Int,High,Wid):
Low = High-Wid+1
Mask = (1<<Wid)-1
Res = (Int>>Low)&Mask
return Res
def instructions_scheduler(cpu,opcode):
'''
def produce_py_simulator():
ofile=open('%s_decoder.py'%(Chip),'w')
ofile.write(Header0)
prefi = ''
for Inst in instructions:
Coding = instructions[Inst].coding
(Mask,Data)=build_expr(Coding)
ofile.write(' %sif ((opcode & 0x%s)==0x%s):\n'%(prefi,Mask[4:],Data[4:]))
prefi='el'
wrds = gather_fields(Inst,Coding)
res=[]
(Fields,names)=get_good_names(wrds)
for Name in Fields:
L1 = names[Name]
if (len(L1)==1):
(x_,Offset,Width) = L1[0]
res = res + ['exr(opcode,%d,%d)'%(Offset,Width)]
else:
part=[]
L1.sort()
L1.reverse()
for (x_,O,W) in L1:
part = part + [str(O-W+1),str(W)]
prts = ','.join(part)
res = res + ['exr%d(opcode,%s)'%(len(part)/2,prts)]
ofile.write(' cpu.execute_%s(%s)\n'%(Inst,','.join(res)))
ofile.write(' else: cpu.execute_illegal(opcode)\n')
for Inst in instructions:
Coding = instructions[Inst].coding
wrds = gather_fields(Inst,Coding)
(Fields,names)=get_good_names(wrds)
res=[]
for Name in Fields:
res = res + [Name]
Prms = ','.join(['self']+res)
ofile.write('def execute_%s(%s):\n'%(Inst,Prms))
ofile.write(' notImplemented("%s","%s",%s)\n'%(Inst,Prms,Prms))
ofile.write(' return\n')
ofile.close()
def get_good_names(wrds):
names={}
orders = []
for NameOffsetWidth in wrds:
(Name,Offset,Width) = NameOffsetWidth
if ('[' in Name):
x = Name.index('[')
s1 = Name[x+1:-1]
s2 = s1.split(':')
Hi = int(s2[0])
Name = Name[:x]
else:
Hi=0
if (Name in names):
L1 = names[Name]
names[Name]=L1+[(Hi,Offset,Width)]
else:
names[Name]=[(Hi,Offset,Width)]
orders = orders + [Name]
# Fields = names.keys()
# Fields.sort()
return (orders,names)
def get_good_names_no_sort(wrds):
names={}
for NameOffsetWidth in wrds:
(Name,Offset,Width) = NameOffsetWidth
if ('[' in Name):
x = Name.index('[')
s1 = Name[x+1:-1]
s2 = s1.split(':')
Hi = int(s2[0])
Name = Name[:x]
else:
Hi=0
if (Name in names):
L1 = names[Name]
names[Name]=L1+[(Hi,Offset,Width)]
else:
names[Name]=[(Hi,Offset,Width)]
Fields = names.keys()
# Fields.sort()
return (Fields,names)
def gather_fields(Inst,Coding):
Coding1=toAsm(Inst,Coding)
return Coding1
def produce_disasm():
opyfile=open('%s_disasm.py'%(Chip),'w')
opyfile.write('#! /usr/bin/env python3\n')
opyfile.write('import os,sys,string\n')
opyfile.write('codings={}\n')
Keys = instructions.keys()
for Name in Keys:
Coding = instructions[Name].coding
opyfile.write('codings["%s"]=%s\n'%(Name,str(Coding)))
opyfile.write('def disasm(Code):\n')
Rep = "%d'h"%OpcodeWidth
for Name in Keys:
if ok_name(Name):
(Mask,Data)=build_expr(instructions[Name].coding)
opyfile.write(' if ((Code & %s)==%s):\n'%(Mask.replace(Rep,'0x'),Data.replace(Rep,'0x')))
opyfile.write(' return "%s %%s"%%(fields_extr(Code,codings["%s"]))\n'%(Name,Name))
opyfile.write(' return "*%08x"%(Code)\n')
opyfile.write('OpcodeWidth = %d\n'%OpcodeWidth)
opyfile.write(DISASMSTRING)
opyfile.close()
DISASMSTRING = """
def int2bin(Int,Len):
if (Int==0):
res= '0'
while (len(res)<Len):
res = '0'+res
return res
res = ''
while (Int):
if (Int&1):
res = '1'+res
else:
res = '0'+res
Int=Int>>1
while (len(res)<Len):
res = '0'+res
return res
def fields_extr(Code,List):
Fields={}
Str = int2bin(Code,OpcodeWidth)
L1 = list(Str)
for i in range(OpcodeWidth):
Tok = List[i]
if (Tok[0] not in '01'):
Bit = int(L1[i],2)
if ('[' in Tok):
ww = Tok.split('[')
Key = ww[0]
Ind = int(ww[1][:-1])
if Key not in Fields:
Fields[Key]=0
Fields[Key] |= (Bit<<Ind)
else:
Fields[Tok]=Bit
res=''
for Key in Fields:
res += ' %s=0x%x'%(Key,Fields[Key])
return res
def main():
Fname = sys.argv[1]
if len(sys.argv)>2:
Foutname = sys.argv[2]
else:
Foutname = 'dis.listing'
print('i take rom file "%s" as input file and produce "%s" as output'%(Fname,Foutname))
load_rom(Fname)
Fout=open(Foutname,'w')
run_disasm(Fout)
Fout.close()
def run_disasm(Fout):
for Addr,Code in enumerate(Program):
Txt2 = disasm(Code)
Str = '0x%04x : %08x %s\\n'%(Addr,Code,Txt2)
print(Str),
Fout.write(Str)
Program=[]
def load_rom(Fname):
File = open(Fname)
Addr=0
while 1:
line = File.readline()
if (len(line)==0):
return
if "//" in line: line=line[:line.index("//")]
wrds = line.split()
for wrd in wrds:
if (wrd[0]=='@'):
Addr = int(wrd[1:],16)
else:
Data = int(wrd,16)
while len(Program)<=Addr:
Program.append(0)
Program[Addr]=Data
Addr += 1
if __name__=='__main__':
main()
"""
def produce_verilog():
ofile=open('%s_decoder.v'%(Chip),'w')
ofile2 = open('%s_h.py'%(Chip),'w')
ifile=open('%s_decoder.inst'%(Chip),'w')
ifile.write('wire pvalidXX; wire [%d:0] popcodXX; wire [31:0] version_code;\n'%(OpcodeWidth-1))
ofile.write('module %s_decoder(input [%d:0] opcode,input valid,output not_opcode,output [31:0] version_code\n'%(Chip,OpcodeWidth-1))
Fields = collect_fields()
Flags = collect_flags()
Keys = instructions.keys()
III = ['%s_decoder XX%s_decoder (.valid(pvalidXX),.opcode(popcodXX),.version_code(version_code)\n'%(Chip,Chip)]
for Name in Keys:
if ok_name(Name):
ofile.write(',output %s_code\n'%(Name))
if Name=='nop':
ofile.write(',output [%s:0] nop_opcode\n'%(OpcodeWidth-1))
ifile.write('wire XX%s_code;\n'%(Name))
III.append(' ,.%s_code(XX%s_code)\n'%(Name,Name))
for Name in Fields:
if ok_name(Name)and(Name!='x'):
if (Fields[Name]==0):
ofile.write(',output %s_field\n'%(Name))
ifile.write('wire XX%s_field;\n'%(Name))
III.append(' ,.%s_field(XX%s_field)\n'%(Name,Name))
else:
(H,L)=Fields[Name]
ofile.write(',output [%d:%d] %s_field\n'%(H,L,Name))
ifile.write('wire [%d:%d] XX%s_field;\n'%(H,L,Name))
III.append(' ,.%s_field(XX%s_field)\n'%(Name,Name))
for Name in Flags:
ofile.write(',output %s_flag\n'%(Name))
ifile.write('wire XX%s_flag;\n'%(Name))
III.append(' ,.%s_flag(XX%s_flag)\n'%(Name,Name))
ofile.write(');\n')
ofile.write("assign version_code = 32'hXXXX_XXXX;\n")
GoodOpCodes = []
for Name in Keys:
if ok_name(Name):
(Mask,Data)=build_expr(instructions[Name].coding)
Name1 = Name.upper()
ofile2.write('global %s\n'%(Name1))
ofile2.write('%s = %s\n'%(Name1,Data.replace("16'h",'0x')))
ofile2.write('MASK_%s = %s\n'%(Name1,Mask.replace("16'h",'0x')))
if instructions[Name].cond!='':
Cond = instructions[Name].cond
if '=' not in Cond:
Cond = '&&(%s_field!=0)'%instructions[Name].cond
else:
Cond = ''
ofile.write('assign %s_code = valid && ((opcode & %s)==%s) && %s;\n'%(Name,Mask,Data,Cond))
if Name=='nop':
ofile.write('assign nop_opcode = %s;\n'%(Data))
GoodOpCodes.append(Name+'_code')
Str = ' ||'.join(GoodOpCodes)
ofile.write('assign not_opcode = !(\n')
while len(Str)>80:
x = 60;
while Str[x]!=' ': x += 1
Bef = Str[:x]
Str = Str[x:]
ofile.write(' %s\n'%(Bef))
ofile.write(' %s);\n'%Str)
for Name in Fields:
if ok_name(Name)and(Name!='x'):
if (Fields[Name]==0):
Expr=build_field_expr(Name)
ofile.write('assign %s_field = %s;\n'%(Name,Expr))
else:
(H,L)=Fields[Name]
for Ind in range(L,H+1):
Expr=build_field_expr('%s[%d]'%(Name,Ind))
ofile.write('assign %s_field[%d] = %s;\n'%(Name,Ind,Expr))
for Name in Flags:
res=[]
for Inst in Flags[Name]:
res = res + [Inst+'_code']
txt = ' ||'.join(res)
ofile.write('assign %s_flag = valid && (%s);\n'%(Name,txt))
ofile.write('endmodule\n\n')
ofile.close()
ofile2.close()
for Line in III:
ifile.write(Line)
ifile.write(');\n')
ifile.close()
def produce_func_verilog():
ofile=open('%s_func_decoder.v'%(Chip),'w')
ofile.write('module %s_decoder(input [%d:0] opcode,input valid);\n'%(Chip,OpcodeWidth-1))
Fields = collect_fields()
Flags = collect_flags()
Keys = instructions.keys()
# for Name in Keys:
# if ok_name(Name):
# ofile.write(',output %s_code\n'%(Name))
#
# for Name in Fields:
# if ok_name(Name)and(Name!='x'):
# if (Fields[Name]==0):
# ofile.write(',output %s_field\n'%(Name))
# else:
# (H,L)=Fields[Name]
# ofile.write(',output [%d:%d] %s_field\n'%(H,L,Name))
# for Name in Flags:
# ofile.write(',output %s_flag\n'%(Name))
# ofile.write(');\n')
# for Name in Keys:
# if ok_name(Name):
# (Mask,Data)=build_expr(instructions[Name].coding)
# ofile.write('assign %s_code = valid && ((opcode & %s)==%s);\n'%(Name,Mask,Data))
for Name in Fields:
if (Fields[Name]==0):
Wids =''
else:
(H,L)=Fields[Name]
Wids = '[%d:%d]'%(H,L)
ofile.write('function %s field_%s(input [%d:0] opcode);\n'%(Wids,Name,OpcodeWidth-1))
ofile.write('begin\n')
if ok_name(Name)and(Name!='x'):
if (Fields[Name]==0):
Expr=build_func_field_expr(Name)
ofile.write(' field_%s = %s;\n'%(Name,Expr))
else:
(H,L)=Fields[Name]
for Ind in range(L,H+1):
Expr=build_func_field_expr('%s[%d]'%(Name,Ind))
ofile.write(' field_%s[%d] = %s;\n'%(Name,Ind,Expr))
ofile.write('end\n')
ofile.write('endfunction\n')
for Name in Flags:
ofile.write('function flag_%s(input [%d:0] opcode,input valid);\n'%(Name,OpcodeWidth-1))
ofile.write('begin\n')
res=[]
for Inst in Flags[Name]:
(Mask,Data)=build_expr(instructions[Inst].coding)
this = '((opcode & %s)==%s)'%(Mask,Data)
res = res + [this]
txt = ' ||'.join(res)
ofile.write('flag_%s = valid && (%s);\n'%(Name,txt))
ofile.write('end\n')
ofile.write('endfunction\n')
ofile.write('endmodule\n')
ofile.close()
def opcode_expr(Inst):
(Mask,Data)=build_expr(instructions[Inst].coding)
this = '((opcode & %s)==%s)'%(Mask,Data)
return this
def ok_name(Name):
if (len(Name)>len('unused'))and(Name[0:len('unused')]=='unused'):
return 0
return 1
def build_field_expr(Name):
gotit={}
for Inst in instructions:
ind=OpcodeWidth-1
for Id in instructions[Inst].coding:
if ok_name(Inst):
if (Id==Name):
if (ind in gotit):
Was = gotit[ind]
gotit[ind]=Was+[Inst+'_code']
else:
gotit[ind]=[Inst+'_code']
ind=ind-1
if (len(gotit.keys())==1):
ll = list(gotit.keys())
return 'opcode[%s]'%(ll[0])
inds = list(gotit.keys())
res=''
for ind in inds:
insts = gotit[ind]
insts1 = '||'.join(insts)
res = res + '||(opcode[%s] && (%s))'%(ind,insts1)
return res[2:]
def build_func_field_expr(Name):
gotit={}
for Inst in instructions:
ind=OpcodeWidth-1
for Id in instructions[Inst].coding:
if ok_name(Inst):
if (Id==Name):
if (ind in gotit):
Was = gotit[ind]
gotit[ind]=Was+[Inst]
else:
gotit[ind]=[Inst]
ind=ind-1
inds = gotit.keys()
res=''
for ind in inds:
insts = gotit[ind]
insts2 = []
for Inst in insts:
Expr = opcode_expr(Inst)
insts2 += [Expr]
insts1 = '||'.join(insts2)
res = res + '||(opcode[%s] && (%s))'%(ind,insts1)
return res[2:]
def collect_fields():
fields={}
for Inst in instructions:
Coding = instructions[Inst].coding
for X in Coding:
if (X not in ['0','1']):
if ('[' in X):
(Bus,Ind)=extract_bus(X)
if Bus in fields:
XX = fields[Bus]
if (XX==0):
catch_error('field names collide',Inst+' '+Bus)
(H,L)=fields[Bus]
fields[Bus]=(max(H,Ind),min(L,Ind))
else:
fields[Bus]=(Ind,Ind)
else:
fields[X]=0
return fields
def collect_flags():
flags={}
for Inst in instructions:
for flag in instructions[Inst].flags:
if (flag in flags):
flags[flag]=flags[flag]+[Inst]
else:
flags[flag]=[Inst]
return flags
def collect_inst_fields(Coding):
fields={}
for X in Coding:
if (X not in ['0','1']):
if ('[' in X):
(Bus,Ind)=extract_bus(X)
if fields.has_key(Bus):
(H,L)=fields[Bus]
fields[Bus]=(max(H,Ind),min(L,Ind))
else:
fields[Bus]=(Ind,Ind)
else:
fields[X]=0
return fields
def extract_bus(Txt):
x = Txt.index('[')
Bus = Txt[0:x]
Ins = Txt[x+1:-1]
return (Bus,int(Ins))
def build_expr(Coding):
mask=''
data=''
for X in Coding:
if (X=='1'):
mask=mask+'1'
data=data+'1'
elif (X=='0'):
mask=mask+'1'
data=data+'0'
else:
mask=mask+'0'
data=data+'0'
return ( str(OpcodeWidth)+"'h"+bin2hex(mask),str(OpcodeWidth)+"'h"+bin2hex(data) )
header_string = '\
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">\n\
<html>\n\
<head>\n\
<title>CHIPCHIP instruction set </title>\n\
</head>\n\
\n\
<body>\n\
<center>\n\
\n\
<h1>CHIPCHIP instruction set</h1>\n\
\n\
'
table_header_string = '\
<table border>\n\
<tr>\n\
<td align="center"><b>opcode</b></td>\n\
<td colspan=OPCODEWIDTH align="center"><b>data bits</b></td>\n\
<td align="center"><b>comment</b></td>\n\
</tr>\n\
'
tail_string = '</table> </center> </body> </html>\n\n'
def switch_colors():
global color,othercolor
x = color
color=othercolor
othercolor=x
def produce_csv():
File = open('%s_table.csv'%(Chip),'w')
File.write('id,inst,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0,comment\n')
explanation = ''
lll=[]
for Inst in instructions:
lll = lll + [(instructions[Inst].id,Inst)]
lll.sort()
for (id,Inst) in lll:
File.write('%s,%s'%(id,Inst))
run_on_csv_coding(instructions[Inst].coding,File)
expl = instructions[Inst].oneliner.replace('.',' ')
File.write(',%s\n'%expl)
File.close()
def produce_html(Which):
global color,othercolor
ofile = open('%s_table%d.html'%(Chip,Which),'w')
ofile.write(header_string.replace('CHIPCHIP',Chip))
ofile.write(table_header_string.replace('OPCODEWIDTH',str(OpcodeWidth)))
color = '#ffa0ff'
othercolor = '#ffffa0'
instruction = 'bits'
color = '#80ff80'
othercolor = '#ffffa0'
rng = list(range(0,OpcodeWidth))
rng.reverse()
origarr=[]
for x in rng:
origarr = origarr + [str(x)]
ofile.write('<tr bgcolor='+color+'> <td>'+instruction+'</td>\n')
run_on_coding(origarr,ofile)
explanation = ''
lll=[]
for Inst in instructions:
lll = lll + [(instructions[Inst].id,Inst)]
lll.sort()
if (Which==1):
for (id,Inst) in lll:
if ok_name(Inst):
acolor = instructions[Inst].color
if acolor!='none':
acolor='#'+acolor
else:
acolor=color
ofile.write('<tr bgcolor='+acolor+'> <td><a target="_blank" href="file:chip_doc.html/#'+Inst+'">'+Inst+'</a></td>\n')
run_on_coding(instructions[Inst].coding,ofile)
expl = instructions[Inst].oneliner.replace('.',' ')
ofile.write('<td align="center">'+expl+'</td>\n')
ofile.write('</tr>\n')
switch_colors()
else:
Insts = []
for (id,Inst) in lll:
if ok_name(Inst):
Insts = [Inst]+Insts
Insts.sort()
for Inst in Insts:
for (id,Inst2) in lll:
if (Inst2==Inst):
acolor = instructions[Inst].color
if acolor!='none':
acolor='#'+acolor
else:
acolor=color
ofile.write('<tr bgcolor='+acolor+'> <td><a target="_blank" href="file:chip_doc.html#'+Inst+'">'+Inst+'</a></td>\n')
run_on_coding(instructions[Inst].coding,ofile)
expl = instructions[Inst].oneliner.replace('.',' ')
ofile.write('<td align="center">'+expl+'</td>\n')
ofile.write('</tr>\n')
switch_colors()
ofile.write(tail_string)
ofile.close()
def get_inst_explanation(Inst):
return '???'
def run_on_csv_coding(wrds,File):
# wrds = gather_busses(wrds)
for word in wrds:
if (word=='1'):
File.write(',1')
elif (word=='0'):
File.write(',0')
elif (type(word) is str):
File.write(',%s'%str(word))
elif (type(word) is tuple):
(Bus,ind1,ind2)=word
text = '%s[%d:%d]'%(Bus,ind1,ind2)
many = ind1-ind2+1
File.write(',%s'%str(many))
else:
print('error! ilia, coding field bad ',word, wrds)
def run_on_coding(wrds,ofile):
wrds = gather_busses(wrds)
for word in wrds:
if (word=='1'):
ofile.write('<td align="center">'+str(word)+'</td>\n')
elif (word=='0'):
ofile.write('<td align="center">'+str(word)+'</td>\n')
elif (type(word) is str):
ofile.write('<td align="center" >'+str(word)+'</td>\n')
elif (type(word) is tuple):
(Bus,ind1,ind2)=word
text = '%s[%d:%d]'%(Bus,ind1,ind2)
many = ind1-ind2+1
ofile.write('<td align="center" colspan='+str(many)+'>'+text+'</td>\n')
else:
print('error! ilia, coding field bad ',word, wrds)
def gather_busses(wrds):
res = []
state='idle'
for word in wrds:
if (state=='idle'):
if ('[' in word):
state='bus'
x = word.index('[')
Bus = word[:x]
St=int(word[x+1:-1])
En=St
else:
res = res + [word]
elif (state=='bus'):
if ('[' in word):
state='bus'
x = word.index('[')
Bus1 = word[:x]
Here=int(word[x+1:-1])
if (Here==(En-1))and(Bus1==Bus):
En=Here
else:
res = res + [(Bus,St,En)]
Bus=Bus1
St=Here
En=Here
else:
res = res + [(Bus,St,En)]
res = res + [word]
state='idle'
if (state=='bus'):
res = res + [(Bus,St,En)]
return res
def produce_scheme():
kinds={}
ofile=open('%s.inc'%(Chip),'w')
ofile.write('; -*- Scheme -*-\n')
fields = {}
comes_from={}
for Inst in instructions:
Coding=instructions[Inst].coding
wrds = gather_fields(Inst,Coding)
(Fields,names)=get_good_names(wrds)
for Key in names:
L1 = names[Key]
(A,St,Wid)=L1[0]
Str = '%s%d_%d'%(Key,Wid,St+Wid-1)
Kind = get_field_kind(Inst,Key)
fields[Str]=(St,Wid)
comes_from[Str]=(Inst,Key)
Key='%s %d %d'%(Str,St,Wid)
if Key in kinds:
(Was,Winst) = kinds[Key]
if (Was!=Kind):
print('error kind collision field=(%s) new=%s / %s old= %s / %s'%(Key,Inst,Kind,Winst,Was))
kinds['%s %d %d'%(Str,St,Wid)]=(Kind,Inst)
for Field in fields:
(St,Wid)= fields[Field]
(Kind,Winst)=kinds['%s %d %d'%(Field,St,Wid)]
Manual = is_it_manual(Field,comes_from[Field])
(I,K)=comes_from[Field]
Special = get_dnf_special(I,K)
if (Manual):
ofile.write(';manual ')
if (Kind=='sint'):
ofile.write('(df f-%s "%s" (%s) %d %d INT #f #f )\n'%(Field,Field,Special,St+Wid-1,Wid))
else:
ofile.write('(dnf f-%s "%s" (%s) %d %d)\n'%(Field,Field,Special,St+Wid-1,Wid))
for Field in fields:
(St,Wid)= fields[Field]
(Kind,Winst)=kinds['%s %d %d'%(Field,St,Wid)]
Manual = is_it_manual_opcode(Field,comes_from[Field])
if (Manual):
ofile.write(';manual ')
ofile.write('(dnop %s "%s" () h-%s f-%s)\n'%(Field,Field,Kind,Field))
opcodes={}
for Inst in instructions:
Coding=instructions[Inst].coding
(Wid,Id)=get_constants(Coding)
if Wid in opcodes:
was = opcodes[Wid]
else:
was = []
opcodes[Wid] = was + [(Id,Inst)]
for Wid in opcodes:
ofile.write('(dnf f-opc%d "opcode %d-bits field" () 31 %d);\n'%(Wid,Wid,Wid))
for Wid in opcodes:
ofile.write('(define-normal-insn-enum insn-opc%d "opc enums" () OPC%d_ f-opc%d (\n'%(Wid,Wid,Wid))
for (Id,Inst) in opcodes[Wid]:
if (Id>1000):
ofile.write(' ("%s" #x%x)\n'%(Inst,Id))
else:
ofile.write(' ("%s" %d)\n'%(Inst,Id))
ofile.write('))\n')
for Inst in instructions:
Coding=instructions[Inst].coding
wrds = gather_fields(Inst,Coding)
(Fields,names)=get_good_names(wrds)
(Wid,Id)=get_constants(Coding)
ofile.write('(dni %s\n'%(Inst))
ofile.write(' "%s"\n'%(instructions[Inst].oneliner.replace('.',' ')))
if (len(Fields)>0):
ofile.write(' (%s'%('NO-DIS'))
if (Inst in Dnops):
DIS = Dnops[Inst]
ofile.write(' %s'%(DIS))
ofile.write(')\n')
else:
ofile.write(' ()\n')
ofile.write(' "%s'%(Inst))
Psik=' '
for Key in Fields:
L1 = names[Key]
(A,St,Wid1)=L1[0]
Str = '%s%d_%d'%(Key,Wid1,St+Wid1-1)
ofile.write('%s$%s'%(Psik,Str))
Psik=','
ofile.write('"\n')
ofile.write(' (+ OPC%d_%s'%(Wid,Inst))
for Key in Fields:
L1 = names[Key]
(A,St,Wid1)=L1[0]
Str = '%s%d_%d'%(Key,Wid1,St+Wid1-1)
ofile.write(' %s'%(Str))
ofile.write(')\n')
ofile.write(' (nop)\n')
ofile.write(' ()\n')
ofile.write(')\n')
ofile.close()
def is_it_manual(Field,Tuple):
(Inst,Key)=Tuple
Ok = Inst+'+'+Key
if (Key in DmanualIfields):
return True
if (Ok in DmanualIfields):
return True
return False
def is_it_manual_opcode(Field,Tuple):
(Inst,Key)=Tuple
Ok = Inst+'+'+Key
if (Key in DmanualOpcodes):
return True
if (Ok in DmanualOpcodes):
return True
return False
def get_field_kind(Inst,Field):
if (Field in Dnops):
return Dnops[Field]
if (Inst+'+'+Field in Dnops):
return Dnops[Inst+'+'+Field]
return 'uint'
def get_dnf_special(Inst,Field):
if (Field in DnfSpecials):
return DnfSpecials[Field]
if (Inst+'+'+Field in DnfSpecials):
return DnfSpecials[Inst+'+'+Field]
return ''
def get_constants(Coding):
res = []
for X in Coding:
if (X in ['0','1']):
res = res + [X]
X = ''.join(res)
Id = bin2int(X)
return (len(res),Id)
def produce_asm_driver():
ofile=open('%s_asm_coding.py'%(Chip),'w')
ofile.write('Coding={}\n')
ofile.write('def init_coding(add_coding):\n')
for Inst in instructions:
Pattern=instructions[Inst].pattern
Translate=instructions[Inst].translate
Coding=instructions[Inst].coding
Coding1=toAsm(Inst,Coding)
(Mask,Data)=build_expr(Coding)
Flags = ','.join(instructions[Inst].flags)
# ofile.write('Coding["%s"]=(0x%s,%s)\n'%(Inst,Data[4:],Coding1))
ofile.write(' add_coding("%s",0x%s,%s,"%s","%s","%s")\n'%(Inst,Data[4:],Coding1,Pattern,Translate,Flags))
ofile.close()
def toAsm(Inst,Coding):
wrds = gather_busses(Coding)
pos=OpcodeWidth-1
res=[]
for i in range(0,len(wrds)):
if (wrds[i] in ['0','1']):
pos=pos-1
elif (type(wrds[i]) is str):
res = res+[(wrds[i],pos,1)]
pos=pos-1
elif (type(wrds[i]) is tuple):
(Bus,ind1,ind2)=wrds[i]
many = ind1-ind2+1
res = res+[('%s[%d:%d]'%(Bus,ind1,ind2),pos,many)]
# res = res+[('%s[%d:%d]'%(Bus,ind1,ind2),many-1,many)]
pos=pos-many
return res
def produce_c_decoder():
ofile=open('osim_%s_dis_template.c'%(Chip),'w')
for Inst in instructions:
Coding = instructions[Inst].coding
Fields = toC(Inst,Coding)
res=[]
for fld in Fields:
(fldname,pos,wid)=fld
res = res + [fldname]
if len(res)>0:
ofile.write('static void osim_opal_dis_%s(uint32_t %s)\n'%(Inst,', uint32_t '.join(res)))
else:
ofile.write('static void osim_opal_dis_%s(void)\n'%(Inst))
ofile.write('{\n')
ofile.write(' opal->parms.print("%s is not supported\\n");\n'%(Inst))
ofile.write('}\n\n')
ofile.close()
ofile=open('osim_%s_dis_driver_h.c'%(Chip),'w')
ofile.write('int osim_dis_instr(uint32_t instr)\n')
ofile.write('{\n')
ofile.write(' int rc=0;\n')
prefi = ''
for Inst in instructions:
Coding = instructions[Inst].coding
(Mask,Data)=build_expr(Coding)
ofile.write(' %sif ((instr & 0x%s)==0x%s)\n'%(prefi,Mask[4:],Data[4:]))
prefi='else '
Fields = toC(Inst,Coding)
res=[]
for fld in Fields:
(fldname,pos,wid)=fld
res = res + ['field(instr,%d,%d)'%(pos,wid)]
ofile.write(' osim_opal_dis_%s(%s);\n'%(Inst,','.join(res)))
ofile.write(' else\n')
ofile.write(' {\n')
ofile.write(' opal->parms.print("%x*invalid*\\n", instr);\n')
ofile.write(' rc = -EINVAL;\n')
ofile.write(' }\n')
ofile.write(' return rc;\n')
ofile.write('}\n')
ofile.close()
def produce_c_simulator():
ofile=open('osim_%s_exec_template.c'%(Chip),'w')
for Inst in instructions:
Coding = instructions[Inst].coding
Fields = toC(Inst,Coding)
res=[]
for fld in Fields:
(fldname,pos,wid)=fld
res = res + [fldname]
if len(res)>0:
ofile.write('static int osim_opal_exec_%s(struct osim_opal *_opal, uint32_t %s)\n'%(Inst,', uint32_t '.join(res)))
else:
ofile.write('static int osim_opal_exec_%s(struct osim_opal *_opal)\n'%(Inst))
ofile.write('{\n')
ofile.write(' opal->parms.print("%s is not supported\\n");\n'%(Inst))
ofile.write(' return -EINVAL;\n')
ofile.write('}\n\n')
ofile.close()
ofile=open('osim_%s_exec_driver_h.c'%(Chip),'w')
ofile.write('int osim_exec_instr(struct osim_opal *_opal, uint32_t instr)\n')
ofile.write('{\n')
ofile.write(' int rc=-EINVAL;\n')
prefi = ''
for Inst in instructions:
Coding = instructions[Inst].coding
(Mask,Data)=build_expr(Coding)
ofile.write(' %sif ((instr & 0x%s)==0x%s)\n'%(prefi,Mask[4:],Data[4:]))
prefi='else '
Fields = toC(Inst,Coding)
res=[]
for fld in Fields:
(fldname,pos,wid)=fld
res = res + ['field(instr,%d,%d)'%(pos,wid)]
if len(res)>0:
ofile.write(' rc = osim_opal_exec_%s(_opal,%s);\n'%(Inst,','.join(res)))
else:
ofile.write(' rc = osim_opal_exec_%s(_opal);\n'%(Inst))
ofile.write(' else\n')
ofile.write(' OSIM_RUN_ERR(_opal, "***Invalid instruction\\n", instr);\n')
ofile.write(' return rc;\n')
ofile.write('}\n')
ofile.close()
def toC(Inst,Coding):
wrds = gather_busses(Coding)
pos=OpcodeWidth-1
res=[]
for i in range(0,len(wrds)):
if (wrds[i] in ['0','1']):
pos=pos-1
elif (type(wrds[i]) is str):
res = res+[(wrds[i],pos,1)]
pos=pos-1
elif (type(wrds[i]) is tuple):
(fname,ind1,ind2)=wrds[i]
many = ind1-ind2+1
res = res+[(fname,pos-many+1,many)]
pos=pos-many
return res
def produce_c_instr_list():
ofile=open('%s_instructions.h'%(Chip),'w')
ofile.write('#ifndef OPAL_INSTRUCTIONS_H\n')
ofile.write('#define OPAL_INSTRUCTIONS_H\n')
ofile.write('typedef enum {\n')
for Inst in instructions:
ofile.write(' opal_instr_%s,\n'%(Inst))
ofile.write('} opal_instr;\n')
ofile.write('#endif\n')
ofile.close()
def check_usage2():
Total = 1<<OpcodeWidth
Names = instructions.keys()
UsedTotal=0
for Inst in Names:
Coding = instructions[Inst].coding
res=0
for Chr in Coding:
if Chr in ['0','1']:
res += 1
UsedLocal = 1<<(OpcodeWidth-res)
UsedTotal+= UsedLocal
print('checkUsage2 total=%d used=%d free=%d'%(Total,UsedTotal,Total-UsedTotal))
def check_usage():
global useds
useds=list(range(0,1<<OpcodeWidth))
for i in useds:
useds[i]=0
print('check_usage step=0')
Names = instructions.keys()
for Inst in Names:
Coding = instructions[Inst].coding
register_used(Coding,0)
print('check_usage step=1')
print_unused()
print('check_usage step=2')
def register_used(code,sofar):
global useds
if (len(code)==0):
useds[sofar]=useds[sofar]+1
return
if (code[-1]=='0'):
register_used(code[:-1],sofar*2)
elif (code[-1]=='1'):
register_used(code[:-1],sofar*2+1)
else:
register_used(code[:-1],sofar*2)
register_used(code[:-1],sofar*2+1)
def print_unused():
nons=0
baby=[]
for i in range(0,len(useds)):
if (useds[i]==0):
baby = baby + [int2bin(i,OpcodeWidth)]
nons=nons+1
print('we have %d not useds opcodes (baby=%d)'%(nons,len(baby)))
return
baby = compressbin_round(baby,0)
print('baby %d'%(len(baby)))
baby = compressbin_round(baby,0)
print('baby %d'%(len(baby)))
total=0
for b in baby:
xfr = binfree(b)
total = total + xfr
b0 = b.replace('',' ')
b1 = b0.split()
b1.reverse()
print(''.join(b1),' ',xfr)
print('total free', total)
def binfree(In):
res = 1
for ch in In:
if (ch=='x'):
res = res *2
return res
def int2bin(Int,Wid):
res = ['0','0','0','0']
res = res + res + res + res
res = res + res
for i in range(0,32):
x = Int & (1<<i)
if (x):
res[31-i] = '1'
res = res[32-Wid:]
return ''.join(res)
def compressbin_round(In,Pr):
Len = len(In)
if (Len>512):
A = compressbin_round(In[0:Len/2],Pr)
B = compressbin_round(In[Len/2:],Pr)
C = A+B
C.sort()
XX= compressbin(C,Pr)
return XX
else:
return compressbin(In,Pr)
def compressbin(In,Pr):
flag=1
while (flag):
flag=0
for i in range(0,len(In)-1):
for j in range(i+1,len(In)):
if (i<(len(In)-1))and(j<(len(In))):
x1 = In[i]
x2 = In[j]
x3 = bincompatible(x1,x2)
if (Pr):
print(x3,'===',x1,x2,i,j,len(In))
if (x3!=0):
In[i]=x3
In.pop(j)
flag = 1
return In
def bincompatible(x1,x2):
count=0
res = ''
for i in range(0,OpcodeWidth):
if (x2[i]==x1[i]):
res = res + x1[i]
else:
if (count==1):
return 0
res = res + 'x'
count=1
return res
def check_contentions():
global txtline
Names = list(instructions.keys())
for Ind,Inst1 in enumerate(Names):
Coding1 = instructions[Inst1].coding
Cond1 = instructions[Inst1].cond
for Inst2 in Names[Ind+1:]:
if (Inst2!=Inst1):
Coding2 = instructions[Inst2].coding
Cond2 = instructions[Inst2].cond
if coding_collide(Coding1,Coding2)and(Cond1=='')and(Cond2==''):
txtline='%s<>%s'%(Inst1,Inst2)
catch_error('coding_collide',txtline)
print('no contentions found')
def coding_collide(Coding1,Coding2):
for X in range(0,OpcodeWidth):
C1 = Coding1[X]
C2 = Coding2[X]
if (C1 != C2) and(C1 in ['0','1'])and(C2 in ['0','1']):
return 0
return 1
def get_fields(List,Field):
Len = len(Field)+1
res=[]
for X in List:
if (len(X)>Len)and(X[:Len-1]==Field):
res = res + [ X[Len:]]
return res
def get_field(List,Field,Default):
Len = len(Field)+1
for X in List:
if (len(X)>Len)and(X[:Len-1]==Field):
return X[Len:]
if (Default=='error'):
catch_error('bad field search field="%s" on "%s" bailing out'%(Field,' '.join(List)),'nnn')
return Default
def deal_opcode_width(List):
global OpcodeWidth,Dnops,DmanualIfields,DnfSpecials,DmanualOpcodes
Inst = get_field(List,'opcode_width','bubu')
if (Inst!='bubu'):
OpcodeWidth = int(Inst)
print('set opcode width ',OpcodeWidth)
return 1
Inst = get_field(List,'properties','bubu')
if (Inst!='bubu'):
Regs = get_fields(List,'regs')
for Item in Regs:
Dnops[Item]=Inst
return 1
Inst = get_field(List,'manual_ifield','bubu')
if (Inst!='bubu'):
Regs = get_fields(List,'regs')
for Item in Regs:
DmanualIfields[Item]=Inst
return 1
Inst = get_field(List,'manual_opcode','bubu')
if (Inst!='bubu'):
Regs = get_fields(List,'regs')
for Item in Regs:
DmanualOpcodes[Item]=Inst
return 1
Inst = get_field(List,'dnf_special','bubu')
if (Inst!='bubu'):
Regs = get_fields(List,'regs')
for Item in Regs:
DnfSpecials[Item]=Inst
return 1
return 0
def deal_one_inst(List):
global txtline,instructions
# if len(List)<2: return
txtline = ' '.join(List)+' ;'
x = deal_opcode_width(List)
if (x):
return
Inst = get_field(List,'instruction','error')
Cond = get_field(List,'cond','')
coding = get_field(List,'coding','error')
pattern = get_field(List,'pattern','')
translate = get_field(List,'translate','')
color = get_field(List,'color','none')
Coding = parse_coding(coding)
print(Inst,len(Coding),Coding)
inst = Instruction(Inst)
inst.coding=Coding
inst.color=color
inst.pattern=pattern
inst.translate=translate
inst.oneliner = get_field(List,'oneliner','')
inst.flags = get_fields(List,'flag')
inst.cond = Cond
instructions[Inst] = inst
if (len(inst.coding)!=OpcodeWidth):
catch_error('instruction_coding_length %s %s'%(len(inst.coding),inst.coding),'')
def parse_coding(Text):
res=[]
wrds = Text.split(',')
for X in wrds:
Y = parse_item(X)
res=res+Y
return res
def parse_item(Item):
if (Item[0] in ['1','0']):
LL = list(Item)
return LL
if (Item[0]=='B'):
Wid = int(Item[1:])
return ['F']*Wid
if ('[' in Item):
X = Item.index('[')
if (Item[-1]!=']'):
catch_error('parse_item "]" ',Item)
Bus = Item[:X]
Inds = Item[X+1:-1]
wrds = Inds.split(':')
if (len(wrds)==2):
St = int(wrds[0])
En = int(wrds[1])
if (St<=En):
catch_error('parse_item St<=En',Item)
I = St
res=[]
while(I>=En):
II = '%s[%d]'%(Bus,I)
res = res + [II]
I=I-1
return res
elif (len(wrds)==1):
return ['%s[%d]'%(Bus,int(wrds[0]))]
else:
catch_error('parse_item',Item)
return [Item]
def read_inst_file(File):
longline=''
ok=1
while ok:
line = File.readline()
ww = line.split()
if (len(line)==0):
ok=0
elif len(ww)==0:
pass
elif ww[0][0] in '/#':
pass
else:
longline=longline+' '+line
longline = longline.replace(';',' ; ')
wrds = longline.split()
LLL = []
while (len(wrds)>0):
X = wrds.index(';')
OneDef = wrds[0:X]
wrds = wrds[X+1:]
LLL = LLL + [OneDef]
return LLL
def catch_error(Text,What):
print('catch error',Text,What,' >>>',txtline)
# sys.exit()
def hex2bin(In):
res = ''
for X in In:
HH = hexdig2bin(X)
res = res + HH
return res
hexdigs={}
hexdigs['0']='0000'
hexdigs['1']='0001'
hexdigs['2']='0010'
hexdigs['3']='0011'
hexdigs['4']='0100'
hexdigs['5']='0101'
hexdigs['6']='0110'
hexdigs['7']='0111'
hexdigs['8']='1000'
hexdigs['9']='1001'
hexdigs['a']='1010'
hexdigs['b']='1011'
hexdigs['c']='1100'
hexdigs['d']='1101'
hexdigs['e']='1110'
hexdigs['f']='1111'
hexdigs['A']='1010'
hexdigs['B']='1011'
hexdigs['C']='1100'
hexdigs['D']='1101'
hexdigs['E']='1110'
hexdigs['F']='1111'
def hexdig2bin(Dig):
if (Dig in hexdigs):
return hexdigs[Dig]
else:
catch_error('hexdig2bin',Dig)
def bin2hex(Bin):
res=''
while (len(Bin)>0):
A = Bin[0:4]
Bin=Bin[4:]
ok=0
for K in hexdigs:
if (hexdigs[K]==A)and(ok==0)and(K not in 'ABCDEF'):
ok=1
res=res+K
return res
def bin2hex(Bin):
res = ''
while ((len(Bin)%4)!=0):
Bin='0'+Bin
while 1:
X = Bin[-4:]
Bin=Bin[:-4]
S = '%x'%(int(X,2))
res =S+res
if (len(Bin)==0):
return res
def bin2int(Bin):
res=0
for X in Bin:
if (X=='1'):
res=2*res+1
else:
res=res*2
return res
main()
| 29.080166 | 136 | 0.502887 | 5,427 | 42,079 | 3.822738 | 0.081629 | 0.060735 | 0.016437 | 0.017208 | 0.482454 | 0.411597 | 0.379302 | 0.357129 | 0.323677 | 0.306035 | 0 | 0.022352 | 0.323796 | 42,079 | 1,446 | 137 | 29.100277 | 0.706755 | 0.022386 | 0 | 0.428686 | 0 | 0.004808 | 0.183015 | 0.02822 | 0 | 0 | 0 | 0 | 0 | 1 | 0.049679 | false | 0.001603 | 0.001603 | 0.000801 | 0.108173 | 0.020833 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
123220df37e0808414ed2913c0bf8b65b7d004a4 | 5,840 | py | Python | dmlab2d/random_agent.py | jifflund/lab2d | f634c378d428dd19e8b154aa5b590d33f42438bf | [
"Apache-2.0"
] | 377 | 2020-11-16T01:30:06.000Z | 2022-03-24T09:30:00.000Z | dmlab2d/random_agent.py | jifflund/lab2d | f634c378d428dd19e8b154aa5b590d33f42438bf | [
"Apache-2.0"
] | 17 | 2020-11-18T13:57:12.000Z | 2022-03-28T01:20:52.000Z | dmlab2d/random_agent.py | jifflund/lab2d | f634c378d428dd19e8b154aa5b590d33f42438bf | [
"Apache-2.0"
] | 47 | 2020-11-16T12:36:10.000Z | 2022-03-24T17:50:18.000Z | # Copyright 2019 The DMLab2D Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Random agent for running against DM Lab2D environments."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import json
import numpy as np
import pygame
import dmlab2d
from dmlab2d import runfiles_helper
def _make_int32_distribution(random, minimum, maximum):
def function():
return random.randint(minimum, maximum + 1)
return function
def _make_float64_distribution(random, minimum, maximum):
def function():
return random.uniform(minimum, maximum)
return function
class PyGameRandomAgent(object):
"""Random agent works with int32 or float64 bounded actions."""
def __init__(self, action_spec, observation_name, observation_spec, seed,
scale):
"""Create a PyGame agent.
Args:
action_spec: Environment action spec used to generate random actions.
observation_name: Name of observation to render each frame.
observation_spec: Environment observation spec for creating PyGame window.
seed: Agent seed used for generating random actions.
scale: Scales screen.
"""
self._observation_name = observation_name
random = np.random.RandomState(seed)
self._actions = []
self._scores = []
self._scale = scale
for name, spec in action_spec.items():
if spec.dtype == np.dtype('int32'):
self._actions.append(
(name, _make_int32_distribution(random, spec.minimum,
spec.maximum)))
elif spec.dtype == np.dtype('float64'):
self._actions.append(
(name, _make_float64_distribution(random, spec.minimum,
spec.maximum)))
else:
print("Warning '{}' is not supported".format(spec))
obs_spec = observation_spec[observation_name]
self._setup_py_game(obs_spec.shape)
def _setup_py_game(self, shape):
pygame.init()
pygame.display.set_caption('DM Lab2d')
self._game_display = pygame.display.set_mode(
(int(shape[1] * self._scale), int(shape[0] * self._scale)))
def _render_observation(self, observation):
obs = np.transpose(observation, (1, 0, 2))
surface = pygame.surfarray.make_surface(obs)
rect = surface.get_rect()
surf = pygame.transform.scale(
surface, (int(rect[2] * self._scale), int(rect[3] * self._scale)))
self._game_display.blit(surf, dest=(0, 0))
pygame.display.update()
def step(self, timestep):
"""Renders timestep and returns random actions according to spec."""
self._render_observation(timestep.observation[self._observation_name])
display_score_dirty = False
if timestep.reward is not None:
if timestep.reward != 0:
self._scores[-1] += timestep.reward
display_score_dirty = True
else:
self._scores.append(0)
display_score_dirty = True
if display_score_dirty:
pygame.display.set_caption('%d score' % self._scores[-1])
return {name: gen() for name, gen in self._actions}
def print_stats(self):
print('Scores: ' + ', '.join(str(score) for score in self._scores))
def _create_environment(args):
"""Creates an environment.
Args:
args: See `main()` for description of args.
Returns:
dmlab2d.Environment with one observation.
"""
args.settings['levelName'] = args.level_name
lab2d = dmlab2d.Lab2d(runfiles_helper.find(), args.settings)
return dmlab2d.Environment(lab2d, [args.observation], args.env_seed)
def _run(args):
"""Runs a random agent against an environment rendering the results.
Args:
args: See `main()` for description of args.
"""
env = _create_environment(args)
agent = PyGameRandomAgent(env.action_spec(), args.observation,
env.observation_spec(), args.agent_seed, args.scale)
for _ in range(args.num_episodes):
timestep = env.reset()
# Run single episode.
while True:
# Query PyGame for early termination.
if any(event.type == pygame.QUIT for event in pygame.event.get()):
print('Exit early last score may be truncated:')
agent.print_stats()
return
action = agent.step(timestep)
timestep = env.step(action)
if timestep.last():
# Observe last frame of episode.
agent.step(timestep)
break
# All episodes completed, report per episode.
agent.print_stats()
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'--level_name', type=str, default='clean_up', help='Level name to load')
parser.add_argument(
'--observation',
type=str,
default='WORLD.RGB',
help='Observation to render')
parser.add_argument(
'--settings', type=json.loads, default={}, help='Settings as JSON string')
parser.add_argument(
'--env_seed', type=int, default=0, help='Environment seed')
parser.add_argument('--agent_seed', type=int, default=0, help='Agent seed')
parser.add_argument(
'--num_episodes', type=int, default=1, help='Number of episodes')
parser.add_argument(
'--scale', type=float, default=1, help='Scale to render screen')
args = parser.parse_args()
_run(args)
if __name__ == '__main__':
main()
| 31.73913 | 80 | 0.682363 | 743 | 5,840 | 5.188425 | 0.300135 | 0.016342 | 0.030869 | 0.008301 | 0.092348 | 0.079377 | 0.046693 | 0.046693 | 0 | 0 | 0 | 0.011487 | 0.209932 | 5,840 | 183 | 81 | 31.912568 | 0.824014 | 0.246062 | 0 | 0.183486 | 0 | 0 | 0.080297 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.110092 | false | 0 | 0.082569 | 0.018349 | 0.266055 | 0.06422 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
12342b9334c2a4a3508d4c58273cbcad27a8143b | 7,336 | py | Python | KthFoldADXMultiPeriod.py | adamrvfisher/TechnicalAnalysisLibrary | 38a22b2b2b5052623f81edb11b3c5460fc254e45 | [
"Apache-2.0"
] | 3 | 2019-04-26T11:13:14.000Z | 2020-01-10T05:58:16.000Z | KthFoldADXMultiPeriod.py | adamrvfisher/TechnicalAnalysisLibrary | 38a22b2b2b5052623f81edb11b3c5460fc254e45 | [
"Apache-2.0"
] | null | null | null | KthFoldADXMultiPeriod.py | adamrvfisher/TechnicalAnalysisLibrary | 38a22b2b2b5052623f81edb11b3c5460fc254e45 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
@author: Adam Reinhold Von Fisher - https://www.linkedin.com/in/adamrvfisher/
"""
#pandas_datareader is deprecated, use YahooGrabber
#This is part of a k-th fold optimization tool
#Import modules
#import numpy as np
#from pandas_datareader import data
import pandas as pd
import time as t
import numpy as np
from pandas_datareader import data
from DefModADXStratOpt import DefModADXStratOpt
from ModADXAggMaker import ModADXAggMaker
#Assign ticker
ticker = 'TLT'
#Time series splits
firsttime = '07/01/2002'
secondtime = '07/01/2007'
thirdtime = '07/01/2012'
fourthtime = '01/01/2015'
lasttime = '01/01/2050'
#Number of iterations
multiplier = 400
ranger1 = range(0,multiplier)
iterations = 5000
ranger2 = range(0,iterations)
#Empty data structures
empty = []
counter = 0
DS1W = pd.DataFrame()
DS2W = pd.DataFrame()
DS3W = pd.DataFrame()
DS4W = pd.DataFrame()
#DS1W
#Start timer
start1 = t.time()
#Request data
s = data.DataReader(ticker, 'yahoo', start=firsttime, end=secondtime)
#ADX calculation
s['UpMove'] = s['High'] - s['High'].shift(1)
s['DownMove'] = s['Low'] - s['Low'].shift(1)
s['LogRet'] = np.log(s['Adj Close']/s['Adj Close'].shift(1))
s['LogRet'] = s['LogRet'].fillna(0)
s['Method1'] = s['High'] - s['Low']
s['Method2'] = abs((s['High'] - s['Adj Close'].shift(1)))
s['Method3'] = abs((s['Low'] - s['Adj Close'].shift(1)))
s['Method1'] = s['Method1'].fillna(0)
s['Method2'] = s['Method2'].fillna(0)
s['Method3'] = s['Method3'].fillna(0)
s['TrueRange'] = s[['Method1','Method2','Method3']].max(axis = 1)
s['PDM'] = (s['High'] - s['High'].shift(1))
s['MDM'] = (s['Low'].shift(1) - s['Low'])
s['PDM'] = s['PDM'][s['PDM'] > 0]
s['MDM'] = s['MDM'][s['MDM'] > 0]
s['PDM'] = s['PDM'].fillna(0)
s['MDM'] = s['MDM'].fillna(0)
#For number of iterations
for r in ranger1:
#Iteration tracking
print(counter)
counter = counter + 1
#Get random params and generated metrics
holder = DefModADXStratOpt(ranger2, s)
#Add to dataframe
DS1W = pd.concat([DS1W, holder], axis = 1)
#End timer
end1 = t.time()
#Timer stats
print('Dataset 1 is optimized, it took',end1-start1,'seconds') #run time in seconds
#DS2W
counter = 0
#Start timer
start2 = t.time()
#Request data
s = data.DataReader(ticker, 'yahoo', start=secondtime, end=thirdtime)
#ADX calculation
s['UpMove'] = s['High'] - s['High'].shift(1)
s['DownMove'] = s['Low'] - s['Low'].shift(1)
s['LogRet'] = np.log(s['Adj Close']/s['Adj Close'].shift(1))
s['LogRet'] = s['LogRet'].fillna(0)
s['Method1'] = s['High'] - s['Low']
s['Method2'] = abs((s['High'] - s['Adj Close'].shift(1)))
s['Method3'] = abs((s['Low'] - s['Adj Close'].shift(1)))
s['Method1'] = s['Method1'].fillna(0)
s['Method2'] = s['Method2'].fillna(0)
s['Method3'] = s['Method3'].fillna(0)
s['TrueRange'] = s[['Method1','Method2','Method3']].max(axis = 1)
s['PDM'] = (s['High'] - s['High'].shift(1))
s['MDM'] = (s['Low'].shift(1) - s['Low'])
s['PDM'] = s['PDM'][s['PDM'] > 0]
s['MDM'] = s['MDM'][s['MDM'] > 0]
s['PDM'] = s['PDM'].fillna(0)
s['MDM'] = s['MDM'].fillna(0)
#For number of iterations
for r in ranger1:
#Iteration tracking
print(counter)
counter = counter + 1
#Get random params and generated metrics
holder = DefModADXStratOpt(ranger2, s)
#Add to dataframe
DS2W = pd.concat([DS2W, holder], axis = 1)
#End timer
end2 = t.time()
#Timer stats
print('Dataset 2 is optimized, it took',end2-start2,'seconds') #run time in seconds
#DS3W
counter = 0
#Start timer
start3 = t.time()
#Request data
s = data.DataReader(ticker, 'yahoo', start=thirdtime, end=lasttime)
#ADX calculation
s['UpMove'] = s['High'] - s['High'].shift(1)
s['DownMove'] = s['Low'] - s['Low'].shift(1)
s['LogRet'] = np.log(s['Adj Close']/s['Adj Close'].shift(1))
s['LogRet'] = s['LogRet'].fillna(0)
s['Method1'] = s['High'] - s['Low']
s['Method2'] = abs((s['High'] - s['Adj Close'].shift(1)))
s['Method3'] = abs((s['Low'] - s['Adj Close'].shift(1)))
s['Method1'] = s['Method1'].fillna(0)
s['Method2'] = s['Method2'].fillna(0)
s['Method3'] = s['Method3'].fillna(0)
s['TrueRange'] = s[['Method1','Method2','Method3']].max(axis = 1)
s['PDM'] = (s['High'] - s['High'].shift(1))
s['MDM'] = (s['Low'].shift(1) - s['Low'])
s['PDM'] = s['PDM'][s['PDM'] > 0]
s['MDM'] = s['MDM'][s['MDM'] > 0]
s['PDM'] = s['PDM'].fillna(0)
s['MDM'] = s['MDM'].fillna(0)
#For number of iterations
for r in ranger1:
#Iteration tracking
print(counter)
counter = counter + 1
#Get random params and generated metrics
holder = DefModADXStratOpt(ranger2, s)
#Add to dataframe
DS3W = pd.concat([DS3W, holder], axis = 1)
#End timer
end3 = t.time()
#Timer stats
print('Dataset 3 is optimized, it took',end3-start3,'seconds')
#DS4W
counter = 0
#Start timer
start4 = t.time()
#Request data
s = data.DataReader(ticker, 'yahoo', start=fourthtime, end=lasttime)
#ADX calculation
s['UpMove'] = s['High'] - s['High'].shift(1)
s['DownMove'] = s['Low'] - s['Low'].shift(1)
s['LogRet'] = np.log(s['Adj Close']/s['Adj Close'].shift(1))
s['LogRet'] = s['LogRet'].fillna(0)
s['Method1'] = s['High'] - s['Low']
s['Method2'] = abs((s['High'] - s['Adj Close'].shift(1)))
s['Method3'] = abs((s['Low'] - s['Adj Close'].shift(1)))
s['Method1'] = s['Method1'].fillna(0)
s['Method2'] = s['Method2'].fillna(0)
s['Method3'] = s['Method3'].fillna(0)
s['TrueRange'] = s[['Method1','Method2','Method3']].max(axis = 1)
s['PDM'] = (s['High'] - s['High'].shift(1))
s['MDM'] = (s['Low'].shift(1) - s['Low'])
s['PDM'] = s['PDM'][s['PDM'] > 0]
s['MDM'] = s['MDM'][s['MDM'] > 0]
s['PDM'] = s['PDM'].fillna(0)
s['MDM'] = s['MDM'].fillna(0)
#For number of iterations
for r in ranger1:
#Iteration tracking
print(counter)
counter = counter + 1
#Get random params and generated metrics
holder = DefModADXStratOpt(ranger2, s)
#Add to dataframe
DS4W = pd.concat([DS4W, holder], axis = 1)
#End timer
end4 = t.time()
print('Dataset 4 is optimized, it took',end4-start4,'seconds') #run time in seconds
#Define out of sample period test sets
S1TS = pd.DataFrame()
S2TS = pd.DataFrame()
S3TS = pd.DataFrame()
S4TS = pd.DataFrame()
#Remove duplicate columns
DS1W = DS1W.loc[:,~DS1W.columns.duplicated()]
DS2W = DS2W.loc[:,~DS2W.columns.duplicated()]
DS3W = DS3W.loc[:,~DS3W.columns.duplicated()]
DS4W = DS4W.loc[:,~DS4W.columns.duplicated()]
#Merge winners to create test sets
S1TS = pd.concat([S1TS, DS2W, DS3W, DS4W], axis = 1)
S2TS = pd.concat([S2TS, DS1W, DS3W, DS4W], axis = 1)
S3TS = pd.concat([S3TS, DS1W, DS2W, DS4W], axis = 1)
S4TS = pd.concat([S4TS, DS1W, DS2W, DS3W], axis = 1)
#Remove duplicate columns
S1TS = S1TS.loc[:,~S1TS.columns.duplicated()]
S2TS = S2TS.loc[:,~S2TS.columns.duplicated()]
S3TS = S3TS.loc[:,~S3TS.columns.duplicated()]
S4TS = S4TS.loc[:,~S4TS.columns.duplicated()]
#Test the test sets
testset1winners = ModADXAggMaker(ticker, S1TS, firsttime, secondtime)
testset2winners = ModADXAggMaker(ticker, S2TS, secondtime, thirdtime)
testset3winners = ModADXAggMaker(ticker, S3TS, thirdtime, lasttime)
testset4winners = ModADXAggMaker(ticker, S4TS, fourthtime, lasttime)
#Dataframe for params that pass all test sets
Aggregate = pd.DataFrame()
Aggregate = pd.concat([Aggregate, testset1winners, testset2winners,
testset3winners, testset4winners],axis = 1)
#Total optimal param sets
Aggregate = Aggregate.loc[:,~Aggregate.columns.duplicated()]
| 33.497717 | 83 | 0.643675 | 1,116 | 7,336 | 4.228495 | 0.148746 | 0.013562 | 0.041534 | 0.035601 | 0.581903 | 0.551176 | 0.534011 | 0.534011 | 0.534011 | 0.51494 | 0 | 0.044896 | 0.134678 | 7,336 | 218 | 84 | 33.651376 | 0.698488 | 0.166031 | 0 | 0.575163 | 0 | 0 | 0.195207 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.039216 | 0 | 0.039216 | 0.052288 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1235812bfd2f59b3e973436432bfcc56572fe49f | 2,292 | py | Python | python/hw5/logger.py | jeremy24/494-graph-algos | 031a90e46304f405829bad7658965aae215833e1 | [
"MIT"
] | null | null | null | python/hw5/logger.py | jeremy24/494-graph-algos | 031a90e46304f405829bad7658965aae215833e1 | [
"MIT"
] | null | null | null | python/hw5/logger.py | jeremy24/494-graph-algos | 031a90e46304f405829bad7658965aae215833e1 | [
"MIT"
] | null | null | null | from __future__ import print_function
import logging
import sys
import pip
import os
def install(package):
try:
pip.main(["install", package])
except Exception as ex:
raise "Unable to install " + package + ex
try:
install("colorlog")
import colorlog
except Exception as ex:
raise ex
def mk_logger(have_colorlog):
log = logging.getLogger() # root logger
log.setLevel(logging.DEBUG)
format = '%(asctime)s - %(levelname)-8s - %(message)s'
date_format = '%Y-%m-%d %H:%M:%S'
if have_colorlog and os.isatty(2):
cformat = '%(log_color)s' + format
f = colorlog.ColoredFormatter(cformat, date_format,
log_colors = { 'DEBUG' : 'reset', 'INFO' : 'reset',
'WARNING' : 'bold_yellow', 'ERROR': 'bold_red',
'CRITICAL': 'bold_red' })
else:
f = logging.Formatter(format, date_format)
ch = logging.StreamHandler()
ch.setFormatter(f)
log.addHandler(ch)
return logging.getLogger(__name__)
class LogException(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return repr(self.message)
class Logger():
_level = "debug"
_levels = ["debug", "info", "warn", "error"]
# colors = { "debug": "blue", "info": "green", "warning": "yellow", "error": "red"}
def __init__(self, module):
self.module = str(module)
self.have_colorlog = True
self.logger = mk_logger(True)
@property
def level(self):
return self._level
@property
def levels(self):
return self._levels
@level.setter
def level(self, val):
if val in self.levels():
self._level = val
@property
def form(self, *args):
msg = ""
try:
for arg in args:
print (arg)
# msg += " " + arg
# return msg
except Exception as ex:
print("Error concattng args! " + ex.message)
finally:
return msg
def debug(self, *args):
self.logger.debug("a")
self.logger.debug(args)
a = Logger("test")
a.debug("blah", "blah")
| 25.186813 | 87 | 0.545375 | 253 | 2,292 | 4.786561 | 0.367589 | 0.034682 | 0.042114 | 0.047069 | 0.039637 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001302 | 0.329843 | 2,292 | 90 | 88 | 25.466667 | 0.787109 | 0.052792 | 0 | 0.130435 | 0 | 0 | 0.106697 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.144928 | false | 0 | 0.086957 | 0.043478 | 0.362319 | 0.043478 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
12383b0b628c1ffcd2fa0c6b1e0b4f97764ed9f1 | 33,002 | py | Python | egg/platform_cpu.py | eschnett/nsimd | 11a58156ac8f1d8b60f1112c41efd9ef91d91c3d | [
"MIT"
] | null | null | null | egg/platform_cpu.py | eschnett/nsimd | 11a58156ac8f1d8b60f1112c41efd9ef91d91c3d | [
"MIT"
] | null | null | null | egg/platform_cpu.py | eschnett/nsimd | 11a58156ac8f1d8b60f1112c41efd9ef91d91c3d | [
"MIT"
] | null | null | null | # Copyright (c) 2019 Agenium Scale
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# This file gives the implementation of platform CPU, i.e. scalar emulation.
# Reading this file is straightforward. For each function, e.g. the addition,
# code looks like:
#
# return 'return {} + {};'.format(common.in0, common.in1)
#
# with an 'if' before to handle the FP16 special case.
import common
# -----------------------------------------------------------------------------
# Emulation parameters
#
# When emulating, we need to choose a vector length to fit the philosophy of
# SIMD. By default we choose 64 bits. It must be a multiple of 64 bits.
NBITS = common.CPU_NBITS
def get_nb_el(typ):
return NBITS // int(typ[1:])
# -----------------------------------------------------------------------------
# Implementation of mandatory functions for this module
def get_simd_exts():
return ['cpu']
def get_simd_strings(simd_ext):
if simd_ext == 'cpu':
return ['cpu']
else:
raise ValueError('Unknown SIMD extension "{}"'.format(simd_ext))
def emulate_fp16(simd_ext):
if simd_ext != 'cpu':
raise ValueError('Unknown SIMD extension "{}"'.format(simd_ext))
return True
def get_type(simd_ext, typ):
if simd_ext != 'cpu':
raise ValueError('Unknown SIMD extension "{}"'.format(simd_ext))
if typ not in common.types:
raise ValueError('Unknown type "{}"'.format(typ))
typ2 = typ if typ != 'f16' else 'f32'
members = '\n'.join('{} v{};'.format(typ2, i) \
for i in range(0, get_nb_el(typ)))
return 'struct {{ {} }}'.format(members)
def get_logical_type(simd_ext, typ):
if simd_ext != 'cpu':
raise ValueError('Unknown SIMD extension "{}"'.format(simd_ext))
if typ not in common.types:
raise ValueError('Unknown type "{}"'.format(typ))
members = '\n'.join('unsigned int v{};'.format(i) \
for i in range(0, get_nb_el(typ)))
return 'struct {{ {} }}'.format(members)
def get_nb_registers(simd_ext):
if simd_ext != 'cpu':
raise ValueError('Unknown SIMD extension "{}"'.format(simd_ext))
return '1'
def has_compatible_SoA_types(simd_ext):
if simd_ext != 'cpu':
raise ValueError('Unknown SIMD extension "{}"'.format(simd_ext))
return False
def get_additional_include(func, platform, simd_ext):
if func in ['sqrt', 'ceil', 'floor', 'trunc']:
return '''#if NSIMD_CXX > 0
#include <cmath>
#else
#include <math.h>
#endif'''
elif func in ['']:
return '''#include <nsimd/cpu/cpu/reinterpret.h>
'''
return ''
# -----------------------------------------------------------------------------
# Returns C code for func
fmtspec = {}
def repeat_stmt(fmt, typ):
return '\n'.join(fmt.format(i=i) for i in range(0, get_nb_el(typ)))
# -----------------------------------------------------------------------------
def func_body(fmt, typ2, logical = False):
return '''nsimd_cpu_v{logical}{typ2} ret;
{content}
return ret;'''.format(logical='l' if logical else '', typ2=typ2,
content=repeat_stmt(fmt, typ2), **fmtspec)
# -----------------------------------------------------------------------------
def op2(op, typ):
return func_body('ret.v{{i}} = {cast}({in0}.v{{i}} {op} {in1}.v{{i}});'. \
format(cast='({})'.format(typ) if typ in common.iutypes \
else '', op=op, **fmtspec), typ)
# -----------------------------------------------------------------------------
def lop2(op, typ):
return func_body('ret.v{{i}} = {in0}.v{{i}} {op} {in1}.v{{i}};'. \
format(op=op, **fmtspec), typ, True)
# -----------------------------------------------------------------------------
def bitwise2(op, typ):
if typ in common.utypes:
return op2(op, typ)
utyp2 = 'u32' if typ == 'f16' else common.bitfield_type[typ]
typ2 = 'f32' if typ == 'f16' else typ
return '''nsimd_cpu_v{typ} ret;
union {{ {utyp2} u; {typ2} f; }} buf0, buf1;
{content}
return ret;'''.format(content=repeat_stmt(
'''buf0.f = {in0}.v{{i}};
buf1.f = {in1}.v{{i}};
buf0.u = ({utyp2})(buf0.u {op} buf1.u);
ret.v{{i}} = buf0.f;'''.format(utyp2=utyp2, op=op, **fmtspec),
typ), utyp2=utyp2, typ2=typ2, **fmtspec)
# -----------------------------------------------------------------------------
def andnot2(typ):
if typ in common.utypes:
return '''nsimd_cpu_v{typ} ret;
{content}
return ret;'''.format(content=repeat_stmt(
'ret.v{{i}} = ({typ})({in0}.v{{i}} & (~{in1}.v{{i}}));'. \
format(**fmtspec), typ), **fmtspec)
utyp2 = 'u32' if typ == 'f16' else common.bitfield_type[typ]
typ2 = 'f32' if typ == 'f16' else typ
return '''nsimd_cpu_v{typ} ret;
union {{ {utyp2} u; {typ2} f; }} buf0, buf1;
{content}
return ret;'''.format(content=repeat_stmt(
'''buf0.f = {in0}.v{{i}};
buf1.f = {in1}.v{{i}};
buf0.u = ({utyp2})(buf0.u & (~buf1.u));
ret.v{{i}} = buf0.f;'''.format(utyp2=utyp2, **fmtspec), typ),
utyp2=utyp2, typ2=typ2, **fmtspec)
# -----------------------------------------------------------------------------
def landnot2(typ):
return func_body('ret.v{{i}} = {in0}.v{{i}} & (~{in1}.v{{i}});'.\
format(**fmtspec), typ, True)
# -----------------------------------------------------------------------------
def lnot1(typ):
return func_body('ret.v{{i}} = ~{in0}.v{{i}};'.\
format(**fmtspec), typ, True)
# -----------------------------------------------------------------------------
def not1(typ):
if typ in common.utypes:
return func_body('ret.v{{i}} = ({typ})(~{in0}.v{{i}});'. \
format(**fmtspec), typ)
utyp2 = 'u32' if typ == 'f16' else common.bitfield_type[typ]
typ2 = 'f32' if typ == 'f16' else typ
return '''nsimd_cpu_v{typ} ret;
union {{ {utyp2} u; {typ2} f; }} buf0;
{content}
return ret;'''.format(content=repeat_stmt(
'''buf0.f = {in0}.v{{i}};
buf0.u = ({utyp2})(~buf0.u);
ret.v{{i}} = buf0.f;'''.format(utyp2=utyp2, **fmtspec), typ),
utyp2=utyp2, typ2=typ2, **fmtspec)
# -----------------------------------------------------------------------------
def minmax2(minmax, typ):
op = '<' if minmax == 'min' else '>'
return func_body('''ret.v{{i}} = {in0}.v{{i}} {op} {in1}.v{{i}} ?
{in0}.v{{i}} : {in1}.v{{i}};'''. \
format(op=op, **fmtspec), typ)
# -----------------------------------------------------------------------------
def libm_op1(func, typ, until_cpp11 = False, c89_code = ''):
cxx_version = '> 0' if not until_cpp11 else '>= 2011'
comment = \
'''/* {func} is not available in C89 but is given by POSIX 2001 */
/* and C99. But we do not want to pollute the user includes */
/* and POSIX value if set so we play dirty. */'''. \
format(func=func)
if c89_code != '':
c89_code = repeat_stmt(c89_code, typ)
if typ in ['f16', 'f32']:
c99_code = repeat_stmt('ret.v{{i}} = {func}f({in0}.v{{i}});'. \
format(func=func, **fmtspec), typ)
if c89_code == '':
c89_code = repeat_stmt(
'ret.v{{i}} = (f32){func}((f64){in0}.v{{i}});'. \
format(func=func, **fmtspec), typ)
return \
''' {comment}
nsimd_cpu_v{typ} ret;
#if defined(NSIMD_IS_MSVC) && _MSC_VER <= 1800 /* VS 2012 */
{c89_code}
#else
#if NSIMD_CXX {cxx_version} || NSIMD_C >= 1999 || \
_POSIX_C_SOURCE >= 200112L
{c99_code}
#else
{c89_code}
#endif
#endif
return ret;'''. \
format(comment=comment, func=func, cxx_version=cxx_version,
c89_code=c89_code, c99_code=c99_code, **fmtspec)
else:
c99_code = repeat_stmt('ret.v{{i}} = {func}({in0}.v{{i}});'. \
format(func=func, **fmtspec), typ)
if c89_code == '':
return '''nsimd_cpu_vf64 ret;
{c99_code}
return ret;'''.format(c99_code=c99_code)
return \
''' {comment}
nsimd_cpu_vf64 ret;
#if NSIMD_CXX {cxx_version} || NSIMD_C >= 1999 || \
_POSIX_C_SOURCE >= 200112L
{c99_code}
#else
{c89_code}
#endif
return ret;'''. \
format(comment=comment, c89_code=c89_code, c99_code=c99_code,
cxx_version=cxx_version, **fmtspec)
# -----------------------------------------------------------------------------
def sqrt1(typ):
return libm_op1('sqrt', typ)
# -----------------------------------------------------------------------------
def ceil1(typ):
if typ in ['f16', 'f32', 'f64']:
return libm_op1('ceil', typ)
return 'return {in0};'.format(**fmtspec)
# -----------------------------------------------------------------------------
def floor1(typ):
if typ in ['f16', 'f32', 'f64']:
return libm_op1('floor', typ)
return 'return {in0};'.format(**fmtspec)
# -----------------------------------------------------------------------------
def trunc1(typ):
if typ == 'f16':
c89_code = '''ret = {in0}.v{{i}} >= 0.0f
? nsimd_floor_cpu_{typ}({in0})
: nsimd_ceil_cpu_{typ}({in0});'''. \
format(**fmtspec)
return libm_op1('trunc', typ, True, c89_code)
elif typ in common.ftypes:
c89_code = '''ret = {in0}.v{{i}} >= ({typ})0
? nsimd_floor_cpu_{typ}({in0})
: nsimd_ceil_cpu_{typ}({in0});'''. \
format(**fmtspec)
return libm_op1('trunc', typ, True, c89_code)
return 'return {in0};'.format(**fmtspec)
# -----------------------------------------------------------------------------
def round_to_even1(typ):
if typ in common.iutypes:
return 'return {in0};'.format(**fmtspec)
stmt = '''{{{{
{typ2} fl_p_half = fl.v{{i}} + 0.5{suffix};
if (fl.v{{i}} == {in0}.v{{i}}) {{{{
ret.v{{i}} = {in0}.v{{i}};
}}}}
if ({in0}.v{{i}} == fl_p_half) {{{{
f64 flo2 = (f64)(fl.v{{i}} * 0.5{suffix});
if (floor(flo2) == flo2) {{{{
ret.v{{i}} = fl.v{{i}};
}}}} else {{{{
ret.v{{i}} = ce.v{{i}};
}}}}
}}}} else if ({in0}.v{{i}} > fl_p_half) {{{{
ret.v{{i}} = ce.v{{i}};
}}}} else {{{{
ret.v{{i}} = fl.v{{i}};
}}}}
}}}}'''.format(typ2 = 'f32' if typ in ['f16', 'f32'] else 'f64',
suffix = 'f' if typ in ['f16', 'f32'] else '',
**fmtspec)
return \
'''nsimd_cpu_v{typ} fl = nsimd_floor_cpu_{typ}({in0});
nsimd_cpu_v{typ} ce = nsimd_ceil_cpu_{typ}({in0});
nsimd_cpu_v{typ} ret;
'''.format(**fmtspec) + \
repeat_stmt(stmt, typ) + '\n' + \
'return ret;'
# -----------------------------------------------------------------------------
def bitwise1_param(op, typ):
if typ in common.utypes:
return func_body('ret.v{{i}} = ({typ})({in0}.v{{i}} {op} {in1});'. \
format(op=op, **fmtspec), typ)
else:
return '''nsimd_cpu_v{typ} ret;
union {{ {typ} i; {utyp} u; }} buf;
{content}
return ret;'''. \
format(content=repeat_stmt(
'''buf.i = {in0}.v{{i}};
buf.u = ({utyp})(buf.u {op} {in1});
ret.v{{i}} = buf.i;'''.format(op=op, **fmtspec), typ),
**fmtspec)
# -----------------------------------------------------------------------------
def cmp2(op, typ):
return '''nsimd_cpu_vl{typ} ret;
{content}
return ret;'''.format(content=repeat_stmt(
'''ret.v{{i}} = ({in0}.v{{i}} {op} {in1}.v{{i}}
? (u32)-1 : (u32)0);'''. \
format(op=op, **fmtspec), typ), **fmtspec)
# -----------------------------------------------------------------------------
def set1(typ):
if typ == 'f16':
content = repeat_stmt('ret.v{{i}} = nsimd_f16_to_f32({in0});'. \
format(**fmtspec), typ)
else:
content = repeat_stmt('ret.v{{i}} = {in0};'.format(**fmtspec), typ)
return '''nsimd_cpu_v{typ} ret;
{content}
return ret;'''.format(content=content, **fmtspec)
# -----------------------------------------------------------------------------
def load(typ):
if typ == 'f16':
content = repeat_stmt(
'ret.v{{i}} = nsimd_u16_to_f32(((u16 *){in0})[{{i}}]);'. \
format(**fmtspec), typ)
else:
content = repeat_stmt('ret.v{{i}} = {in0}[{{i}}];'.format(**fmtspec),
typ)
return '''nsimd_cpu_v{typ} ret;
{content}
return ret;'''.format(content=content, **fmtspec)
# -----------------------------------------------------------------------------
def load_deg234(typ, deg):
if typ == 'f16':
buf = repeat_stmt(
'''ret.v{{{{j}}}}.v{{i}} =
nsimd_u16_to_f32(
((u16 *){in0})[{deg} * {{i}} + {{{{j}}}}]);'''. \
format(deg=deg, **fmtspec), typ)
else:
buf = repeat_stmt(
'ret.v{{{{j}}}}.v{{i}} = {in0}[{deg} * {{i}} + {{{{j}}}}];'. \
format(deg=deg, **fmtspec), typ)
content = '\n'.join(buf.format(j=j) for j in range(0, deg))
return '''nsimd_cpu_v{typ}x{deg} ret;
{content}
return ret;'''.format(deg=deg, content=content, **fmtspec)
# -----------------------------------------------------------------------------
def store_deg234(typ, deg):
content = ''
for i in range(0, get_nb_el(typ)):
for j in range(1, deg + 1):
arg = fmtspec['in{}'.format(j)]
if typ == 'f16':
content += \
'''((u16 *){in0})[{deg} * {i} + {j}] =
nsimd_f32_to_u16({arg}.v{i});\n'''. \
format(deg=deg, i=i, j=j - 1, arg=arg, **fmtspec)
else:
content += \
'{in0}[{deg} * {i} + {j}] = {arg}.v{i};\n'. \
format(deg=deg, i=i, j=j - 1, arg=arg, **fmtspec)
return content[:-1]
# -----------------------------------------------------------------------------
def loadl(typ):
if typ == 'f16':
content = repeat_stmt(
'''ret.v{{i}} = nsimd_u16_to_f32(
((u16 *){in0})[{{i}}]) == 0.0f
? (u32)0 : (u32)-1;'''.format(**fmtspec), typ)
else:
content = repeat_stmt(
'''ret.v{{i}} = {in0}[{{i}}] == ({typ})0
? (u32)0 : (u32)-1;'''. \
format(**fmtspec), typ)
return '''nsimd_cpu_vl{typ} ret;
{content}
return ret;'''.format(content=content, **fmtspec)
# -----------------------------------------------------------------------------
def store(typ):
if typ == 'f16':
content = repeat_stmt(
'((u16*){in0})[{{i}}] = nsimd_f32_to_u16({in1}.v{{i}});'. \
format(**fmtspec), typ)
else:
content = repeat_stmt('{in0}[{{i}}] = {in1}.v{{i}};'. \
format(**fmtspec), typ)
return content
# -----------------------------------------------------------------------------
def storel(typ):
if typ == 'f16':
content = repeat_stmt(
'''((u16*){in0})[{{i}}] = {in1}.v{{i}} == (u32)0
? nsimd_f32_to_u16(0.0f)
: nsimd_f32_to_u16(1.0f);'''. \
format(**fmtspec), typ)
else:
content = repeat_stmt('''{in0}[{{i}}] = {in1}.v{{i}} == (u32)0
? ({typ})0 : ({typ})1;'''. \
format(**fmtspec), typ)
return content
# -----------------------------------------------------------------------------
def if_else1(typ):
return func_body('''ret.v{{i}} = {in0}.v{{i}} != (u32)0
? {in1}.v{{i}} : {in2}.v{{i}};'''. \
format(**fmtspec), typ)
# -----------------------------------------------------------------------------
def abs1(typ):
if typ in common.utypes:
return func_body('ret.v{{i}} = {in0}.v{{i}};'.format(**fmtspec), typ)
typ2 = 'f32' if typ == 'f16' else typ
return func_body('''ret.v{{i}} = ({typ2})({in0}.v{{i}} < ({typ2})0
? -{in0}.v{{i}} : {in0}.v{{i}});'''. \
format(typ2=typ2, **fmtspec), typ)
# -----------------------------------------------------------------------------
def fma_fms(func, typ):
op = '+' if func in ['fma', 'fnma'] else '-'
neg = '-' if func in ['fnma', 'fnms'] else ''
typ2 = 'f32' if typ == 'f16' else typ
return func_body(
'''ret.v{{i}} = ({typ2})({neg}({in0}.v{{i}} * {in1}.v{{i}})
{op} {in2}.v{{i}});'''.format(op=op, neg=neg,
typ2=typ2, **fmtspec), typ)
# -----------------------------------------------------------------------------
def all_any(typ, func):
op = '&&' if func == 'all' else '||'
if get_nb_el(typ) == 1:
cond = '{in0}.v0 == (u32)-1'.format(**fmtspec)
else:
cond = op.join('({in0}.v{i} == (u32)-1)'.format(i=i, **fmtspec) \
for i in range(0, get_nb_el(typ)))
return '''if ({cond}) {{
return -1;
}} else {{
return 0;
}}'''.format(cond=cond)
# -----------------------------------------------------------------------------
def reinterpret1(from_typ, to_typ):
if from_typ == to_typ:
return func_body('ret.v{{i}} = {in0}.v{{i}};'.format(**fmtspec),
to_typ)
return '''char buf[{len}];
nsimd_storeu_cpu_{from_typ}(({from_typ} *)buf, {in0});
return nsimd_loadu_cpu_{to_typ}(({to_typ} *)buf);'''. \
format(len=NBITS // 8, **fmtspec)
# -----------------------------------------------------------------------------
def reinterpretl1(from_typ, to_typ):
return func_body('ret.v{{i}} = {in0}.v{{i}};'.format(**fmtspec), to_typ,
True);
# -----------------------------------------------------------------------------
def convert1(from_typ, to_typ):
if to_typ == from_typ:
return func_body('ret.v{{i}} = {in0}.v{{i}};'.format(**fmtspec),
to_typ)
typ2 = 'f32' if to_typ == 'f16' else to_typ
return func_body('ret.v{{i}} = ({typ2}){in0}.v{{i}};'. \
format(typ2=typ2, **fmtspec), to_typ)
# -----------------------------------------------------------------------------
def rec_rec11(typ):
one = '1.0f' if typ in ['f16', 'f32'] else '1.0'
return func_body('ret.v{{i}} = {one} / {in0}.v{{i}};'. \
format(one=one, **fmtspec), typ)
# -----------------------------------------------------------------------------
def rsqrt11(typ):
if typ == 'f64':
return func_body('ret.v{{i}} = 1.0 / sqrt({in0}.v{{i}});'. \
format(**fmtspec), typ)
else:
return func_body(
'ret.v{{i}} = (f32)(1.0 / sqrt((f64){in0}.v{{i}}));'. \
format(**fmtspec), typ)
# -----------------------------------------------------------------------------
def neg1(typ):
typ2 = 'f32' if typ == 'f16' else typ
return func_body('ret.v{{i}} = ({typ2})(-{in0}.v{{i}});'. \
format(typ2=typ2, **fmtspec), typ)
# -----------------------------------------------------------------------------
def nbtrue1(typ):
acc_code = repeat_stmt('acc += {in0}.v{{i}} == (u32)-1 ? 1 : 0;'. \
format(**fmtspec), typ)
return '''int acc = 0;
{acc_code}
return acc;'''.format(acc_code=acc_code)
# -----------------------------------------------------------------------------
def reverse1(typ):
n = get_nb_el(typ)
content = '\n'.join('ret.v{i} = {in0}.v{j}'. \
format(i=i, j=n - i, **fmtspec) \
for i in range(0, n))
return '''nsimd_cpu_v{typ} ret;
{content}
return ret;'''.format(content=content, **fmtspec)
# -----------------------------------------------------------------------------
def addv1(typ):
content = '+'.join('{in0}.v{i}'.format(i=i, **fmtspec) \
for i in range(0, get_nb_el(typ)))
if typ == 'f16':
return 'return nsimd_f32_to_f16({});'.format(content)
else:
return 'return {};'.format(content)
# -----------------------------------------------------------------------------
def upcvt1(from_typ, to_typ):
n = get_nb_el(to_typ)
to_typ2 = 'f32' if to_typ == 'f16' else to_typ
lower_half = '\n'.join('ret.v0.v{i} = ({to_typ2}){in0}.v{i};'. \
format(i=i, to_typ2=to_typ2, **fmtspec) \
for i in range(0, n))
upper_half = '\n'.join('ret.v1.v{i} = ({to_typ2}){in0}.v{j};'. \
format(i=i, j=i + n, to_typ2=to_typ2, **fmtspec) \
for i in range(0, n))
return '''nsimd_cpu_v{to_typ}x2 ret;
{lower_half}
{upper_half}
return ret;'''.format(lower_half=lower_half,
upper_half=upper_half, **fmtspec)
# -----------------------------------------------------------------------------
def downcvt2(from_typ, to_typ):
n = get_nb_el(from_typ)
to_typ2 = 'f32' if to_typ == 'f16' else to_typ
lower_half = '\n'.join('ret.v{i} = ({to_typ2}){in0}.v{i};'. \
format(i=i, to_typ2=to_typ2, **fmtspec) \
for i in range(0, n))
upper_half = '\n'.join('ret.v{j} = ({to_typ2}){in1}.v{i};'. \
format(i=i, j=i + n, to_typ2=to_typ2, **fmtspec) \
for i in range(0, n))
return '''nsimd_cpu_v{to_typ} ret;
{lower_half}
{upper_half}
return ret;'''.format(lower_half=lower_half,
upper_half=upper_half, **fmtspec)
# -----------------------------------------------------------------------------
def len1(typ):
return 'return {};'.format(get_nb_el(typ))
# -----------------------------------------------------------------------------
def to_logical1(typ):
unsigned_to_logical = \
'ret.v{{i}} = ({in0}.v{{i}} == ({utyp})0 ? (u32)0 : (u32)-1);'. \
format(**fmtspec)
if typ in common.utypes:
return func_body(unsigned_to_logical, typ, True)
else:
unsigned_to_logical = \
'ret.v{{i}} = (buf.v{{i}} == ({utyp})0 ? (u32)0 : (u32)-1);'. \
format(**fmtspec)
return '''nsimd_cpu_vl{typ} ret;
nsimd_cpu_vu{typnbits} buf;
buf = nsimd_reinterpret_cpu_u{typnbits}_{typ}({in0});
{unsigned_to_logical}
return ret;'''. \
format(unsigned_to_logical=repeat_stmt(unsigned_to_logical,
typ), **fmtspec)
# -----------------------------------------------------------------------------
def to_mask1(typ):
logical_to_unsigned = \
'ret.v{{i}} = ({in0}.v{{i}} ? ({utyp})-1 : ({utyp})0);'. \
format(**fmtspec)
if typ in common.utypes:
return func_body(logical_to_unsigned, typ)
elif typ == 'f16':
return '''union {{ f32 f; u32 u; }} buf;
nsimd_cpu_vf16 ret;
{u32_to_f32}
return ret;'''. \
format(u32_to_f32=repeat_stmt(
'buf.u = {in0}.v{{i}}; ret.v{{i}} = buf.f;'. \
format(**fmtspec), 'f16'), **fmtspec)
else:
return '''nsimd_cpu_vu{typnbits} ret;
{logical_to_unsigned}
return nsimd_reinterpret_cpu_{typ}_u{typnbits}(ret);'''. \
format(logical_to_unsigned=repeat_stmt(logical_to_unsigned,
typ), **fmtspec)
# -----------------------------------------------------------------------------
def zip_half(func, typ):
n = get_nb_el(typ)
if typ in ['i64', 'u64', 'f64']:
return '''(void)({in1});
return {in0};'''.format(**fmtspec)
else:
if func == "ziplo":
content = '\n'.join('ret.v{j1} = {in0}.v{i}; ret.v{j2} = {in1}.v{i};'. \
format(i=i, j1=i*2, j2=i*2+1, **fmtspec) \
for i in range(0, int(n/2)))
else :
content = '\n'.join('ret.v{j1} = {in0}.v{i}; ret.v{j2} = {in1}.v{i};'. \
format(i=i+int(n/2), j1=i*2, j2=i*2+1, **fmtspec) \
for i in range(0, int(n/2)))
return '''nsimd_cpu_v{typ} ret;
{content}
return ret;'''.format(content=content, **fmtspec)
# -----------------------------------------------------------------------------
def unzip(func, typ):
n = get_nb_el(typ)
content = ''
if int(n/2) != 0:
if func == "unziplo":
content = '\n'.join('ret.v{i} = {in0}.v{j}; '. \
format(i=i, j=i*2, **fmtspec) \
for i in range(0, int(n/2)))
content = content + '\n'.join('ret.v{i} = {in1}.v{j}; '. \
format(i=i, j=2*(i-int(n/2)), **fmtspec) \
for i in range(int(n/2), n))
else :
content = '\n'.join('ret.v{i} = {in0}.v{j}; '. \
format(i=i, j=i*2+1, **fmtspec) \
for i in range(0, int(n/2)))
content = content + '\n'.join('ret.v{i} = {in1}.v{j}; '. \
format(i=i, j=2*(i-int(n/2))+1, **fmtspec)\
for i in range(int(n/2), n))
return '''nsimd_cpu_v{typ} ret;
{content}
return ret;'''.format(content=content, **fmtspec)
else:
return '''(void)({in1});
return {in0};'''.format(**fmtspec)
# -----------------------------------------------------------------------------
def get_impl(func, simd_ext, from_typ, to_typ=''):
global fmtspec
fmtspec = {
'simd_ext': simd_ext,
'typ': from_typ,
'from_typ': from_typ,
'to_typ': to_typ,
'utyp': common.bitfield_type[from_typ],
'in0': common.in0,
'in1': common.in1,
'in2': common.in2,
'in3': common.in3,
'in4': common.in4,
'typnbits': from_typ[1:]
}
impls = {
'loada': lambda: load(from_typ),
'load2a': lambda: load_deg234(from_typ, 2),
'load3a': lambda: load_deg234(from_typ, 3),
'load4a': lambda: load_deg234(from_typ, 4),
'loadu': lambda: load(from_typ),
'load2u': lambda: load_deg234(from_typ, 2),
'load3u': lambda: load_deg234(from_typ, 3),
'load4u': lambda: load_deg234(from_typ, 4),
'storea': lambda: store(from_typ),
'store2a': lambda: store_deg234(from_typ, 2),
'store3a': lambda: store_deg234(from_typ, 3),
'store4a': lambda: store_deg234(from_typ, 4),
'storeu': lambda: store(from_typ),
'store2u': lambda: store_deg234(from_typ, 2),
'store3u': lambda: store_deg234(from_typ, 3),
'store4u': lambda: store_deg234(from_typ, 4),
'loadla': lambda: loadl(from_typ),
'loadlu': lambda: loadl(from_typ),
'storela': lambda: storel(from_typ),
'storelu': lambda: storel(from_typ),
'add': lambda: op2('+', from_typ),
'mul': lambda: op2('*', from_typ),
'div': lambda: op2('/', from_typ),
'sub': lambda: op2('-', from_typ),
'orb': lambda: bitwise2('|', from_typ),
'orl': lambda: lop2('|', from_typ),
'andb': lambda: bitwise2('&', from_typ),
'andnotb': lambda: andnot2(from_typ),
'andnotl': lambda: landnot2(from_typ),
'andl': lambda: lop2('&', from_typ),
'xorb': lambda: bitwise2('^', from_typ),
'xorl': lambda: lop2('^', from_typ),
'min': lambda: minmax2('min', from_typ),
'max': lambda: minmax2('max', from_typ),
'notb': lambda: not1(from_typ),
'notl': lambda: lnot1(from_typ),
'sqrt': lambda: sqrt1(from_typ),
'set1': lambda: set1(from_typ),
'shr': lambda: bitwise1_param('>>', from_typ),
'shl': lambda: bitwise1_param('<<', from_typ),
'eq': lambda: cmp2('==', from_typ),
'ne': lambda: cmp2('!=', from_typ),
'gt': lambda: cmp2('>', from_typ),
'ge': lambda: cmp2('>=', from_typ),
'lt': lambda: cmp2('<', from_typ),
'le': lambda: cmp2('<=', from_typ),
'len': lambda: len1(from_typ),
'if_else1': lambda: if_else1(from_typ),
'abs': lambda: abs1(from_typ),
'fma': lambda: fma_fms('fma', from_typ),
'fnma': lambda: fma_fms('fnma', from_typ),
'fms': lambda: fma_fms('fms', from_typ),
'fnms': lambda: fma_fms('fnms', from_typ),
'ceil': lambda: ceil1(from_typ),
'floor': lambda: floor1(from_typ),
'trunc': lambda: trunc1(from_typ),
'round_to_even': lambda: round_to_even1(from_typ),
'all': lambda: all_any(from_typ, 'all'),
'any': lambda: all_any(from_typ, 'any'),
'reinterpret': lambda: reinterpret1(from_typ, to_typ),
'reinterpretl': lambda: reinterpretl1(from_typ, to_typ),
'cvt': lambda: convert1(from_typ, to_typ),
'rec11': lambda: rec_rec11(from_typ),
'rec8': lambda: rec_rec11(from_typ),
'rsqrt11': lambda: rsqrt11(from_typ),
'rsqrt8': lambda: rsqrt11(from_typ),
'rec': lambda: rec_rec11(from_typ),
'neg': lambda: neg1(from_typ),
'nbtrue': lambda: nbtrue1(from_typ),
'reverse': lambda: reverse1(from_typ),
'addv': lambda: addv1(from_typ),
'upcvt': lambda: upcvt1(from_typ, to_typ),
'downcvt': lambda: downcvt2(from_typ, to_typ),
'to_logical': lambda: to_logical1(from_typ),
'to_mask': lambda: to_mask1(from_typ),
'ziplo': lambda: zip_half('ziplo', from_typ),
'ziphi': lambda: zip_half('ziphi', from_typ),
'unziplo': lambda: unzip('unziplo', from_typ),
'unziphi': lambda: unzip('unziphi', from_typ)
}
if simd_ext != 'cpu':
raise ValueError('Unknown SIMD extension "{}"'.format(simd_ext))
if not from_typ in common.types:
raise ValueError('Unknown from_type "{}"'.format(from_typ))
if not func in impls:
return common.NOT_IMPLEMENTED
return impls[func]()
| 40.050971 | 80 | 0.43158 | 3,685 | 33,002 | 3.719674 | 0.110448 | 0.018968 | 0.017874 | 0.023565 | 0.56307 | 0.512366 | 0.444809 | 0.397388 | 0.365653 | 0.347195 | 0 | 0.035769 | 0.292649 | 33,002 | 823 | 81 | 40.099635 | 0.551405 | 0.161263 | 0 | 0.414676 | 0 | 0.027304 | 0.278262 | 0.034062 | 0 | 0 | 0 | 0 | 0 | 1 | 0.093857 | false | 0 | 0.001706 | 0.022184 | 0.274744 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1239594d29a58dedb1b0d505621f1a432d45aa38 | 21,907 | py | Python | stf/dataset/TestSet_avg_mars.py | TencentYoutuResearch/PersonReID-TSF | b56ba8f4b2cbd7569ab15f62474369dd40d3dca7 | [
"Apache-2.0"
] | 19 | 2021-01-07T11:09:46.000Z | 2021-12-31T13:05:02.000Z | stf/dataset/TestSet_avg_mars.py | TencentYoutuResearch/PersonReID-TSF | b56ba8f4b2cbd7569ab15f62474369dd40d3dca7 | [
"Apache-2.0"
] | null | null | null | stf/dataset/TestSet_avg_mars.py | TencentYoutuResearch/PersonReID-TSF | b56ba8f4b2cbd7569ab15f62474369dd40d3dca7 | [
"Apache-2.0"
] | 2 | 2021-01-08T08:30:32.000Z | 2021-02-04T02:18:55.000Z | from __future__ import print_function
import sys
import time
import os.path as osp
from PIL import Image
import cv2
import numpy as np
import random
from collections import defaultdict
from .Dataset import Dataset
from ..utils.utils import measure_time
from ..utils.re_ranking import re_ranking
from ..utils.metric import cmc, mean_ap, precision_recall, evaluate
from ..utils.dataset_utils import parse_im_name
from ..utils.distance import normalize
from ..utils.distance import compute_dist
import pickle
DEBUG = True
class TestSetAvgMARS(Dataset):
"""
Args:
extract_feat_func: a function to extract features. It takes a batch of
images and returns a batch of features.
marks: a list, each element e denoting whether the image is from
query (e == 0), or
gallery (e == 1), or
multi query (e == 2) set
"""
def __init__(
self,
im_dir=None,
im_names=None,
marks=None,
extract_feat_func=None,
separate_camera_set=None,
single_gallery_shot=None,
first_match_break=None,
**kwargs):
# The im dir of all images
self.im_dir = im_dir
self.im_names = im_names
self.extract_feat_func = extract_feat_func
self.separate_camera_set = separate_camera_set
self.single_gallery_shot = single_gallery_shot
self.first_match_break = first_match_break
self.im_dict = {}
self.marks = {}
self.max_n_samples = 25
'''
self.im_names = self.im_names[0:1000]
self.im_names += im_names[-5000:]
marks = marks[0:1000] + marks[-5000:]
'''
id_ch_segment = {}
# self.im_names.sort()
#self.im_names = self.im_names[0:250] + self.im_names[10000:10250] + self.im_names[-250:]
# marks = [0] * 250 + [1] * 500#list(marks[0:250] + marks[1000:1250] + marks[-250:])
for i, im_name in enumerate(self.im_names):
id_ch = '_'.join(im_name.split('_')[0:2])
if id_ch not in id_ch_segment:
id_ch_segment[id_ch] = [id_ch + '_seg00000']
self.im_dict[id_ch + '_seg00000'] = []
self.marks[id_ch + '_seg00000'] = []
key = id_ch_segment[id_ch][-1]
if len(self.im_dict[key]) == self.max_n_samples:
key = id_ch + \
'_seg%05d' % (
int(key.split('_')[-1].replace('seg', '')) + 1)
id_ch_segment[id_ch].append(key)
self.im_dict[key] = []
self.marks[key] = []
self.im_dict[key].append(im_name)
self.marks[key].append(marks[i])
id_list = sorted(list(self.im_dict.keys()))
self.id_list = id_list
self.id_ch_segment = id_ch_segment
super(TestSetAvgMARS, self).__init__(
dataset_size=len(self.id_list), **kwargs)
print('Creating dataset using TestSetAvgMARS')
def set_feat_func(self, extract_feat_func):
self.extract_feat_func = extract_feat_func
def get_sample(self, ptr):
"""get one id in one cam's images to queue"""
if ptr >= len(self.id_list):
ptr = ptr % len(self.id_list)
im_names = []
id_ch = self.id_list[ptr]
im_names = self.im_dict[id_ch]
# if len(im_names) > self.max_n_samples:
# indices = random.sample(range(len(im_names)), self.max_n_samples)
# im_names = [im_names[i] for i in indices]
#print (len(im_names))
ims = np.zeros(
(self.max_n_samples, 3, self.pre_process_im.resize_h_w[0], self.pre_process_im.resize_h_w[1]))
for i, im_name in enumerate(im_names):
im_path = osp.join(self.im_dir, im_name)
im = cv2.imread(im_path)
if im is None:
print('%s img read fail' % im_path)
continue
im = im[:, :, ::-1]
im, _ = self.pre_process_im(im)
ims[i] = np.copy(im)
id = id_ch
cam = id_ch.split('_')[1][0]
track = id_ch.split('_')[1][1:]
mark = self.marks[id_ch][0]
sample_mask = np.array([1] * len(im_names) + [0]
* (self.max_n_samples - len(im_names)))
return (ims, im_names, id, cam, track, sample_mask, mark)
def next_batch(self):
if self.epoch_done and self.shuffle:
self.prng.shuffle(self.im_names)
ims = None
im_names = None
ids = None
cams = None
tracks = None
sample_masks = None
marks = None
samples, self.epoch_done = self.prefetcher.next_batch_test()
if len(samples) > 0:
ims_list, im_names_list, ids, cams, tracks, sample_masks, marks = zip(
*samples)
else:
return ims, im_names, ids, cams, tracks, sample_masks, marks, self.epoch_done
# Transform the list into a numpy array with shape [N, ...]
ims = np.stack(ims_list, axis=0)
ids = np.array(ids)
cams = np.array(cams)
tracks = np.array(tracks)
im_names = im_names_list
sample_masks = np.array(sample_masks)
marks = np.array(marks)
return ims, im_names, ids, cams, tracks, sample_masks, marks, self.epoch_done
def extract_feat(self, normalize_feat, verbose=True):
"""Extract the features of the whole image set.
Args:
normalize_feat: True or False, whether to normalize feature to unit length
verbose: whether to print the progress of extracting feature
Returns:
feat: numpy array with shape [N, C]
ids: numpy array with shape [N]
cams: numpy array with shape [N]
im_names: numpy array with shape [N]
marks: numpy array with shape [N]
"""
feat, ids, id_ch_seg, cams, tracks, im_names, marks = [], [], [], [], [], [], []
done = False
step = 0
printed = False
st = time.time()
last_time = time.time()
while not done:
ims_, im_names_, ids_, cams_, tracks_, samples_masks, marks_, done = self.next_batch()
if done and ims_ is None:
break
feat_ = self.extract_feat_func(ims_, samples_masks)
feat.append(feat_)
id_ch_seg.append(ids_)
ids.append([id_ch.split('_')[0] for id_ch in ids_])
cams.append(cams_)
tracks.append(tracks_)
im_names += list(im_names_)
step += 1
marks.append(marks_)
'''
print ('ids', ids)
print ('id_ch', id_ch_seg)
print ('cams', cams)
print ('tracks', tracks)
print ('im names', im_names)
print ('marks', marks)
'''
if verbose:
# Print the progress of extracting feature
total_batches = (self.prefetcher.dataset_size
// self.prefetcher.batch_size + 1)
if step % 20 == 0:
if not printed:
printed = True
else:
# Clean the current line
sys.stdout.write("\033[F\033[K")
print('{}/{} batches done, +{:.2f}s, total {:.2f}s'
.format(step, total_batches,
time.time() - last_time, time.time() - st))
last_time = time.time()
feat = np.vstack(feat)
ids = np.hstack(ids)
id_ch_seg = np.hstack(id_ch_seg)
cams = np.hstack(cams)
tracks = np.hstack(tracks)
#im_names = np.hstack(im_names)
marks = np.hstack(marks)
feat_dict = {}
for i, ics in enumerate(id_ch_seg):
id = ids[i]
f = feat[i]
cam = cams[i]
im_name = im_names[i]
id_ch = '_'.join(ics.split('_')[0:2])
mark = marks[i]
if id_ch not in feat_dict:
feat_dict[id_ch] = {'id': id, 'feat': [],
'cam': cam, 'mark': mark, 'im_names': []}
feat_dict[id_ch]['feat'].append(f)
feat_dict[id_ch]['im_names'] += im_name
feat, ids, cams, im_names, marks = [], [], [], [], []
for key in feat_dict:
f = feat_dict[key]['feat']
f = np.mean(np.vstack(f), axis=0)
f = normalize(f, axis=0)
feat.append(f)
ids.append(feat_dict[key]['id'])
cams.append(feat_dict[key]['cam'])
marks.append(feat_dict[key]['mark'])
im_names.append(feat_dict[key]['im_names'])
feat = np.array(feat)
ids = np.array(ids)
cams = np.array(cams)
marks = np.array(marks)
print(ids, cams, marks, im_names)
return feat, ids, cams, im_names, marks
def eval(
self,
normalize_feat=True,
to_re_rank=False,
pool_type='average',
verbose=True,
preload_feature=False):
"""Evaluate using metric CMC and mAP.
Args:
normalize_feat: whether to normalize features before computing distance
to_re_rank: whether to also report re-ranking scores
pool_type: 'average' or 'max', only for multi-query case
verbose: whether to print the intermediate information
"""
#to_re_rank = False
if preload_feature:
feat, ids, cams, im_names, marks = pickle.load(
open('test_preload_feature.pkl'))
else:
with measure_time('Extracting feature...', verbose=verbose):
feat, ids, cams, im_names, marks = self.extract_feat(
normalize_feat, verbose)
#im_names = [x if isinstance(x, str) else x.decode('utf-8') for x in im_names]
#pickle.dump((feat, ids, cams, im_names, marks), open('test_preload_feature.pkl', 'w'))
# query, gallery, multi-query indices
'''
"""
rearrange query and gallery, use all the images of the same id and cam_id, as query, others as gallery
"""
print('ids:', ids.shape)
print('cams:', cams.shape)
feat_dim = feat.shape[1]
feat_dict = {}
for fea, id, cam in zip(feat, ids, cams):
if id not in feat_dict:
feat_dict[id] = {}
if cam not in feat_dict[id]:
feat_dict[id][cam] = fea
else:
feat_dict[id][cam] = np.vstack((feat_dict[id][cam], fea))
new_ids = [] # the rank of new person ids
new_feat_matrix = []
query_ids = [] # choose which cam_id of one person to be query
query_feats = np.array([])
gallery_feats = np.array([])
gallery_ids = []
query_cams = []
gallery_cams = []
for i, p_id in enumerate(sorted(feat_dict)):
print('p_id:', i)
new_ids.append(p_id)
new_feat_matrix.append([])
print('p_id:', i)
new_ids.append(p_id)
new_feat_matrix.append([])
for j , cam_track_id in enumerate(sorted(feat_dict[p_id])):
print('cam_track_id:', j)
#trace_feat = np.mean(feat_dict[p_id][cam_track_id], axis = 0)
trace_feat = np.copy(feat_dict[p_id][cam_track_id])
cam_id = cam_track_id[0].zfill(5)
if j == i % len(feat_dict[p_id]): #use as query
if len(query_feats) == 0:
query_feats = np.copy(trace_feat)
else:
query_feats = np.vstack((query_feats, trace_feat))
query_ids.append(p_id)
#resolve cam_id from cam_track
query_cams.append(cam_id)
else: # use as gallery
if len(gallery_feats) == 0:
gallery_feats = np.copy(trace_feat)
else:
gallery_feats = np.vstack((gallery_feats, trace_feat))
print('gallery:',gallery_feats.shape)
gallery_ids.append(p_id)
gallery_cams.append(cam_id)
if len(query_feats.shape) == 1:
query_feats = query_feats.reshape(1, query_feats.shape[0])
if len(gallery_feats.shape) == 1:
gallery_feats = gallery_feats.reshape(1, gallery_feats.shape[0])
#dist_mat = compute_dist(query_feats, gallery_feats, type = 'euclidean')
new_ids = [] # the rank of new person ids
new_feat_matrix = []
query_ids = [] # choose which cam_id of one person to be query
query_feats = np.array([])
gallery_feats = np.array([])
gallery_ids = []
query_cams = []
gallery_cams = []
for i, p_id in enumerate(sorted(feat_dict)):
print('p_id:', i)
new_ids.append(p_id)
new_feat_matrix.append([])
print('p_id:', i)
new_ids.append(p_id)
new_feat_matrix.append([])
for j , cam_track_id in enumerate(sorted(feat_dict[p_id])):
print('cam_track_id:', j)
#trace_feat = np.mean(feat_dict[p_id][cam_track_id], axis = 0)
trace_feat = np.copy(feat_dict[p_id][cam_track_id])
cam_id = cam_track_id[0].zfill(5)
if j == i % len(feat_dict[p_id]): #use as query
if len(query_feats) == 0:
query_feats = np.copy(trace_feat)
else:
query_feats = np.vstack((query_feats, trace_feat))
query_ids.append(p_id)
#resolve cam_id from cam_track
query_cams.append(cam_id)
else: # use as gallery
if len(gallery_feats) == 0:
gallery_feats = np.copy(trace_feat)
else:
gallery_feats = np.vstack((gallery_feats, trace_feat))
print('gallery:',gallery_feats.shape)
gallery_ids.append(p_id)
gallery_cams.append(cam_id)
if len(query_feats.shape) == 1:
query_feats = query_feats.reshape(1, query_feats.shape[0])
if len(gallery_feats.shape) == 1:
gallery_feats = gallery_feats.reshape(1, gallery_feats.shape[0])
#dist_mat = compute_dist(query_feats, gallery_feats, type = 'euclidean')
query_ids = np.array(query_ids)
gallery_ids = np.array(gallery_ids)
query_cams = np.array(query_cams)
gallery_cams = np.array(gallery_cams)
print('query ids', query_ids)
print('gallery ids', gallery_ids)
print('query cams', query_cams)
print('gallery cams', gallery_cams)
print('query ids', query_ids)
print('gallery ids', gallery_ids)
print('query cams', query_cams)
print('gallery cams', gallery_cams)
'''
q_inds = marks == 0
g_inds = marks == 1
mq_inds = marks == 2
#print (query_ids.shape, gallery_ids.shape, query_cams.shape, marks.shape)
# A helper function just for avoiding code duplication.
def compute_score(
dist_mat,
query_ids=ids[q_inds],
gallery_ids=ids[g_inds],
query_cams=cams[q_inds],
gallery_cams=cams[g_inds]):
# Compute mean AP
print(dist_mat, query_ids, gallery_ids, query_cams, gallery_cams)
'''
mAP = mean_ap(
distmat=dist_mat,
query_ids=query_ids, gallery_ids=gallery_ids,
query_cams=query_cams, gallery_cams=gallery_cams)
'''
# Compute CMC scores
'''
cmc_scores0 = cmc(
distmat=dist_mat,
query_ids=query_ids, gallery_ids=gallery_ids,
query_cams=query_cams, gallery_cams=gallery_cams,
separate_camera_set=self.separate_camera_set,
single_gallery_shot=self.single_gallery_shot,
first_match_break=self.first_match_break,
topk=10)
'''
cmc_scores, mAP = evaluate(
dist_mat,
query_ids, gallery_ids,
query_cams, gallery_cams,
)
#raise SystemExit
'''
pr_scores = precision_recall(
distmat=dist_mat,
query_ids=query_ids, gallery_ids=gallery_ids,
query_cams=query_cams, gallery_cams=gallery_cams,
separate_camera_set=self.separate_camera_set,
thres = 0.8
)
'''
pr_scores = [[], []]
return mAP, cmc_scores, pr_scores
def print_scores(mAP, cmc_scores, pr_scores):
print('[mAP: {:5.2%}], [cmc1: {:5.2%}], [cmc5: {:5.2%}], [cmc10: {:5.2%}]'
.format(mAP, *cmc_scores[[0, 4, 9]]))
for p, r in zip(pr_scores[0], pr_scores[1]):
print('precision', p, 'recall', r)
################
# Single Query #
################
with measure_time('Computing distance...', verbose=verbose):
# query-gallery distance
q_g_dist = compute_dist(
feat[q_inds], feat[g_inds], type='euclidean')
#q_g_dist = compute_dist(query_feats, gallery_feats, type = 'euclidean')
with measure_time('Computing scores...', verbose=verbose):
mAP, cmc_scores, pr_scores = compute_score(q_g_dist)
#query_ids = query_ids,
#gallery_ids = gallery_ids,
#query_cams = query_cams,
# gallery_cams = gallery_cams)
print('{:<30}'.format('Single Query:'), end='')
print_scores(mAP, cmc_scores, pr_scores)
s_mAP, s_cmc_scores = mAP, cmc_scores
return s_mAP, s_cmc_scores, 0, 0, 0, 0, 0, 0
###############
# Multi Query #
###############
mq_mAP, mq_cmc_scores = None, None
if any(mq_inds):
mq_ids = ids[mq_inds]
mq_cams = cams[mq_inds]
mq_feat = feat[mq_inds]
unique_mq_ids_cams = defaultdict(list)
for ind, (id, cam) in enumerate(zip(mq_ids, mq_cams)):
unique_mq_ids_cams[(id, cam)].append(ind)
keys = unique_mq_ids_cams.keys()
assert pool_type in ['average', 'max']
pool = np.mean if pool_type == 'average' else np.max
mq_feat = np.stack([pool(mq_feat[unique_mq_ids_cams[k]], axis=0)
for k in keys])
with measure_time('Multi Query, Computing distance...', verbose=verbose):
# multi_query-gallery distance
mq_g_dist = compute_dist(
mq_feat, feat[g_inds], type='euclidean')
with measure_time('Multi Query, Computing scores...', verbose=verbose):
mq_mAP, mq_cmc_scores, pr_scores = compute_score(
mq_g_dist,
query_ids=np.array(zip(*keys)[0]),
gallery_ids=ids[g_inds],
query_cams=np.array(zip(*keys)[1]),
gallery_cams=cams[g_inds]
)
print('{:<30}'.format('Multi Query:'), end='')
print_scores(mq_mAP, mq_cmc_scores, pr_scores)
smq_mAP, smq_cmc_scores = mq_mAP, mq_cmc_scores
rrs_mAP, rrs_cmc_scores = None, None
rrmq_mAP, rrmq_cmc_scores = None, None
if to_re_rank:
##########################
# Re-ranked Single Query #
##########################
with measure_time('Re-ranking distance...', verbose=verbose):
# query-query distance
q_q_dist = compute_dist(
feat[q_inds], feat[q_inds], type='euclidean')
# gallery-gallery distance
g_g_dist = compute_dist(
feat[g_inds], feat[g_inds], type='euclidean')
# re-ranked query-gallery distance
re_r_q_g_dist = re_ranking(q_g_dist, q_q_dist, g_g_dist)
with measure_time('Computing scores for re-ranked distance...',
verbose=verbose):
mAP, cmc_scores, pr_scores = compute_score(re_r_q_g_dist)
print('{:<30}'.format('Re-ranked Single Query:'), end='')
print_scores(mAP, cmc_scores, pr_scores)
rrs_mAP, rrs_cmc_scores = mAP, cmc_scores
smq_mAP, smq_cmc_scores = mq_mAP, mq_cmc_scores
#########################
# Re-ranked Multi Query #
#########################
if any(mq_inds):
with measure_time('Multi Query, Re-ranking distance...',
verbose=verbose):
# multi_query-multi_query distance
mq_mq_dist = compute_dist(
mq_feat, mq_feat, type='euclidean')
# re-ranked multi_query-gallery distance
re_r_mq_g_dist = re_ranking(
mq_g_dist, mq_mq_dist, g_g_dist)
with measure_time(
'Multi Query, Computing scores for re-ranked distance...',
verbose=verbose):
mq_mAP, mq_cmc_scores, pr_scores = compute_score(
re_r_mq_g_dist,
query_ids=np.array(zip(*keys)[0]),
gallery_ids=ids[g_inds],
query_cams=np.array(zip(*keys)[1]),
gallery_cams=cams[g_inds]
)
print('{:<30}'.format('Re-ranked Multi Query:'), end='')
print_scores(mq_mAP, mq_cmc_scores, pr_scores)
rrmq_mAP, rrmq_cmc_scores = mq_mAP, mq_cmc_scores
# return mAP, cmc_scores, mq_mAP, mq_cmc_scores
return s_mAP, s_cmc_scores, smq_mAP, smq_cmc_scores, rrs_mAP, rrs_cmc_scores, rrmq_mAP, rrmq_cmc_scores
| 38.568662 | 111 | 0.545305 | 2,790 | 21,907 | 4.003584 | 0.102509 | 0.033214 | 0.020143 | 0.015219 | 0.515756 | 0.427932 | 0.378603 | 0.331244 | 0.306088 | 0.297851 | 0 | 0.012909 | 0.33875 | 21,907 | 567 | 112 | 38.636684 | 0.75818 | 0.109782 | 0 | 0.142857 | 0 | 0.003484 | 0.057158 | 0.001812 | 0 | 0 | 0 | 0 | 0.003484 | 1 | 0.027875 | false | 0 | 0.059233 | 0 | 0.114983 | 0.069686 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
123e93ac8a310f95ec894f08e807cdf1c4916bbf | 504 | py | Python | stacks/345.Reverse Vowels of a string.py | Rage-ops/Leetcode-Solutions | 48d4ecbb92a0bb7a7bb74a1445b593a67357ac02 | [
"MIT"
] | 1 | 2020-11-23T13:52:11.000Z | 2020-11-23T13:52:11.000Z | stacks/345.Reverse Vowels of a string.py | harsha-sam/Leetcode-Solutions | 48d4ecbb92a0bb7a7bb74a1445b593a67357ac02 | [
"MIT"
] | null | null | null | stacks/345.Reverse Vowels of a string.py | harsha-sam/Leetcode-Solutions | 48d4ecbb92a0bb7a7bb74a1445b593a67357ac02 | [
"MIT"
] | null | null | null | # Easy
# https://leetcode.com/problems/reverse-vowels-of-a-string/
# Time Complexity: O(N)
# Space Complexity: O(N)
class Solution:
def reverseVowels(self, s: str) -> str:
stack = []
for letter in s:
if letter in "aeiouAEIOU":
stack.append(letter)
word = ""
for letter in s:
if letter in "aeiouAEIOU":
word += stack.pop()
else:
word += letter
return word
| 26.526316 | 59 | 0.494048 | 55 | 504 | 4.527273 | 0.6 | 0.128514 | 0.096386 | 0.096386 | 0.257028 | 0.257028 | 0.257028 | 0.257028 | 0 | 0 | 0 | 0 | 0.39881 | 504 | 19 | 60 | 26.526316 | 0.821782 | 0.212302 | 0 | 0.307692 | 0 | 0 | 0.050891 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0 | 0 | 0.230769 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
12480d98d0d64204e46e36015b2e324e7c79f23a | 13,460 | py | Python | spanclient/_endpoint_wrapper.py | illuscio-dev/spanclient-py | 6308d221d179ed0db7c211c7a7ec7e2944e8864c | [
"MIT"
] | null | null | null | spanclient/_endpoint_wrapper.py | illuscio-dev/spanclient-py | 6308d221d179ed0db7c211c7a7ec7e2944e8864c | [
"MIT"
] | null | null | null | spanclient/_endpoint_wrapper.py | illuscio-dev/spanclient-py | 6308d221d179ed0db7c211c7a7ec7e2944e8864c | [
"MIT"
] | null | null | null | import functools
import copy
from dataclasses import dataclass
from marshmallow import Schema
from typing import (
Optional,
MutableMapping,
Any,
Union,
Callable,
Tuple,
Dict,
AsyncGenerator,
Generator,
Sequence,
)
from spantools import MimeType, convert_params_headers, MimeTypeTolerant
from spantools.errors_api import NothingToReturnError
from ._typing import ModelType
from ._request_obj import ClientRequest, PagingReqClient
from ._response_data import ResponseData
class _PagedHalt(BaseException):
"""Raised to halt further paging."""
@dataclass
class _EndpointSettings:
endpoint: str
"""URL endpoint."""
method: str
"""HTTP method to use for request."""
query_params: MutableMapping[str, str]
"""URL params to use on EVERY endpoint request."""
headers: MutableMapping[str, str]
"""HTTP header values to send on EVERY endpoint request."""
req_schema: Optional[Schema]
"""Req body schema to use for decoding request body."""
resp_codes: Tuple[int, ...]
"""Single or tuple of valid HTTP response codes."""
resp_schema: Optional[Schema]
"""Marshmallow schema for decoding response object."""
data_updater: Optional[Callable[[ModelType, Any], None]]
"""
Custom updater for mapping new data to existing data object. Takes arguments
``(current_object, new_object)`` amd returns ``None``
"""
class EndpointWrapper:
"""
Wraps endpoints for client. When an attribute is fetched from this class, a partial
version of :func:`EndpointWrapper.generic` is returned with the attribute name
pre-placed in the ``method`` parameter.
This class is not accessed directly, but invoked through an instance:
``spanclient.handles``.
"""
def __getattribute__(self, item: str) -> Any:
if not item.startswith("_") and item != "paged":
return functools.partial(super().__getattribute__("generic"), item)
else:
return super().__getattribute__(item)
@staticmethod
async def _endpoint_wrapper(
client: "SpanClient",
endpoint_settings: _EndpointSettings,
mimetype_send: MimeTypeTolerant,
mimetype_accept: MimeTypeTolerant,
return_info: bool,
handler: Callable,
args: Sequence[Any],
kwargs: MutableMapping[str, Any],
) -> Any:
endpoint_settings = copy.copy(endpoint_settings)
try:
req: ClientRequest = kwargs["req"]
except KeyError:
req = ClientRequest(client, endpoint_settings=endpoint_settings)
else:
req.endpoint_settings = endpoint_settings
req.mimetype_send = mimetype_send
req.mimetype_accept = mimetype_accept
kwargs["req"] = req
if req.return_info is None:
req.return_info = return_info
result = await handler(client, *args, **kwargs)
if not req.executed:
result_data = await req.execute()
if return_info or req.return_info:
result = result_data
elif result_data.loaded is not None:
result = result_data.loaded
else:
result = result_data.resp
return result
@staticmethod
def generic(
method: str,
endpoint: str,
query_params: Optional[MutableMapping[str, Any]] = None,
headers: Optional[MutableMapping[str, Any]] = None,
mimetype_send: Optional[Union[str, MimeType]] = None,
mimetype_accept: Optional[Union[str, MimeType]] = None,
req_schema: Optional[Schema] = None,
resp_codes: Union[int, Tuple[int, ...]] = 200,
resp_schema: Optional[Schema] = None,
data_updater: Optional[Callable[[ModelType, Any], None]] = None,
return_info: bool = False,
) -> Callable:
"""
Decorator that is ACTUALLY called decorating an endpoint method.
:param method: HTTP method -- GET, POST, PUT, etc. Filled in automatically
by the decorator invoked, ie: ``@handles.get``
:param endpoint: Endpoint path. Can use f-string syntax for path params.
ex: /wizards/{wizard_id}
:param query_params: URL query params to apply to all requests from this method.
:param headers: HTTP headers to apply to all requests made from this method.
:param mimetype_send: Mimetype to use when encoding content. Added to the
``'Content-Type'`` header.
:param mimetype_accept: Mimetype to request from server. Added to the
``'Accept'`` header.
:param req_schema: Schema for dumping request body media.
:param resp_codes: Valid response codes.
:param resp_schema: Schema for loading response body content.
:param data_updater: To use when updating existing data objects in-place.
:param return_info: Whether to return a :class:`ReturnData` instance in place of
the decoded / loaded response body.
:return: Method decorator.
:raises StatusMismatchError: When response status does not match ``resp_codes``.
:raises ContentTypeUnknownError: When ``ClientRequest.media`` is not bytes
but an unregistered mimetype is given to ``mimetype_send`` or
``ClientRequest.mimetype_send``.
:raises ContentEncodeError: When error occurs encoding request body.
:raises ContentDecodeError: When error occurs decoding response body.
"""
if query_params is None:
query_params = dict()
if headers is None:
headers = dict()
convert_params_headers(query_params)
convert_params_headers(headers)
if isinstance(resp_codes, int):
resp_codes = (resp_codes,)
endpoint_settings = _EndpointSettings(
method=method,
endpoint=endpoint,
query_params=query_params,
headers=headers,
req_schema=req_schema,
resp_codes=resp_codes,
resp_schema=resp_schema,
data_updater=data_updater,
)
def decorator(handler: Callable) -> Callable:
@functools.wraps(handler)
async def wrapper(client: "SpanClient", *args: Any, **kwargs: Any) -> Any:
result = await EndpointWrapper._endpoint_wrapper(
client=client,
endpoint_settings=endpoint_settings,
mimetype_send=mimetype_send,
mimetype_accept=mimetype_accept,
return_info=return_info,
handler=handler,
args=args,
kwargs=kwargs,
)
return result
return wrapper
return decorator
@staticmethod
def get(
endpoint: str,
query_params: Optional[Dict[str, Any]] = None,
headers: Optional[Dict[str, Any]] = None,
mimetype_send: Optional[Union[str, MimeType]] = None,
mimetype_accept: Optional[Union[str, MimeType]] = None,
req_schema: Optional[Schema] = None,
resp_codes: Union[int, Tuple[int, ...]] = 200,
resp_schema: Optional[Schema] = None,
data_updater: Optional[Callable[[Any, Any], None]] = None,
return_info: bool = False,
) -> Callable:
"""For IDE code-completion. Alias of :func:`EndpointWrapper.generic`"""
@staticmethod
def post(
endpoint: str,
query_params: Optional[Dict[str, Any]] = None,
headers: Optional[Dict[str, Any]] = None,
mimetype_send: Optional[Union[str, MimeType]] = None,
mimetype_accept: Optional[Union[str, MimeType]] = None,
req_schema: Optional[Schema] = None,
resp_codes: Union[int, Tuple[int, ...]] = 200,
resp_schema: Optional[Schema] = None,
data_updater: Optional[Callable[[Any, Any], None]] = None,
return_info: bool = False,
) -> Callable:
"""For IDE code-completion. Alias of :func:`EndpointWrapper.generic`"""
@staticmethod
def put(
endpoint: str,
query_params: Optional[Dict[str, Any]] = None,
headers: Optional[Dict[str, Any]] = None,
mimetype_send: Optional[Union[str, MimeType]] = None,
mimetype_accept: Optional[Union[str, MimeType]] = None,
req_schema: Optional[Schema] = None,
resp_codes: Union[int, Tuple[int, ...]] = 200,
resp_schema: Optional[Schema] = None,
data_updater: Optional[Callable[[Any, Any], None]] = None,
return_info: bool = False,
) -> Callable:
"""For IDE code-completion. Alias of :func:`EndpointWrapper.generic`"""
@staticmethod
def patch(
endpoint: str,
params: Optional[Dict[str, Any]] = None,
headers: Optional[Dict[str, Any]] = None,
mimetype_send: Optional[Union[str, MimeType]] = None,
mimetype_accept: Optional[Union[str, MimeType]] = None,
req_schema: Optional[Schema] = None,
resp_codes: Union[int, Tuple[int, ...]] = 200,
resp_schema: Optional[Schema] = None,
data_updater: Optional[Callable[[Any, Any], None]] = None,
return_info: bool = False,
) -> Callable:
"""For IDE code-completion. Alias of :func:`EndpointWrapper.generic`"""
@staticmethod
def delete(
endpoint: str,
params: Optional[Dict[str, Any]] = None,
headers: Optional[Dict[str, Any]] = None,
mimetype_send: Optional[Union[str, MimeType]] = None,
mimetype_accept: Optional[Union[str, MimeType]] = None,
req_schema: Optional[Schema] = None,
resp_codes: Union[int, Tuple[int, ...]] = 200,
resp_schema: Optional[Schema] = None,
data_updater: Optional[Callable[[Any, Any], None]] = None,
return_info: bool = False,
) -> Callable:
pass
@staticmethod
def copy(
endpoint: str,
params: Optional[Dict[str, Any]] = None,
headers: Optional[Dict[str, Any]] = None,
mimetype_send: Optional[Union[str, MimeType]] = None,
mimetype_accept: Optional[Union[str, MimeType]] = None,
req_schema: Optional[Schema] = None,
resp_codes: Union[int, Tuple[int, ...]] = 200,
resp_schema: Optional[Schema] = None,
data_updater: Optional[Callable[[Any, Any], None]] = None,
return_info: bool = False,
) -> Callable:
"""For IDE code-completion. Alias of :func:`EndpointWrapper.generic`"""
@staticmethod
def _paged_init_req(
client: "SpanClient",
offset_params: int,
limit: int,
max_pages: int,
page_to_fetch: int,
) -> ClientRequest:
paging = PagingReqClient( # type: ignore
offset=offset_params,
limit=limit,
max_pages=max_pages,
page_to_fetch=page_to_fetch,
)
req = ClientRequest(client, None) # type: ignore
req._paging = paging
req.return_info = True
return req
@staticmethod
def _paged_yield_result_items(result: Any) -> Generator[Any, None, None]:
if isinstance(result, ResponseData):
page_next: Union[str, bool] = result.resp.headers.get("paging-next", False)
if result.loaded is not None:
items = result.loaded
else:
items = [result.resp]
else:
page_next = True
items = result
for item in items:
yield item
if page_next is False:
raise _PagedHalt("Stop")
@staticmethod
def paged(offset: int = 0, limit: int = 50, max_pages: int = -1) -> Callable:
"""
Turns method into an async generator to seamlessly handle paged responses.
:param offset: Beginning offset to use.
:param limit: Default limit to use.
:param max_pages: Maximum number of pages to return when called.
:return: Wrapped function.
THIS METHOD MUST BE USED ON TOP OF A GENERIC ``handles`` decorator.
"""
def decorator(handler: Callable) -> Callable:
@functools.wraps(handler)
async def wrapper(
client: "SpanClient", *args: Any, **kwargs: Any
) -> AsyncGenerator:
offset_param = offset
pages_fetched = 0
while True:
req = EndpointWrapper._paged_init_req(
client,
offset_param,
limit,
max_pages,
page_to_fetch=pages_fetched + 1,
)
kwargs["req"] = req
try:
result = await handler(client, *args, **kwargs)
except NothingToReturnError:
break
try:
for item in EndpointWrapper._paged_yield_result_items(result):
yield item
except _PagedHalt:
break
pages_fetched += 1
if pages_fetched == req.paging.max_pages:
break
offset_param += req.paging.limit
return wrapper
return decorator
typing_help = False
if typing_help:
from ._client import SpanClient
| 35.702918 | 88 | 0.6 | 1,431 | 13,460 | 5.502446 | 0.168414 | 0.020447 | 0.04064 | 0.042672 | 0.383287 | 0.34633 | 0.337694 | 0.33109 | 0.326264 | 0.326264 | 0 | 0.002994 | 0.305201 | 13,460 | 376 | 89 | 35.797872 | 0.838965 | 0.180684 | 0 | 0.426966 | 0 | 0 | 0.007534 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.048689 | false | 0.003745 | 0.041199 | 0 | 0.164794 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
124879265e549764033ecbb3482f5f663c09d3b1 | 1,911 | py | Python | shift_detector/precalculations/embedding_distance_precalculation.py | hpi-bp1819-naumann/shift-detector | 5d081d05ec084021f11827aa3fd3e167854b2a2a | [
"Apache-2.0"
] | 3 | 2019-06-21T11:41:08.000Z | 2019-10-24T06:41:51.000Z | shift_detector/precalculations/embedding_distance_precalculation.py | hpi-bp1819-naumann/shift-detector | 5d081d05ec084021f11827aa3fd3e167854b2a2a | [
"Apache-2.0"
] | 63 | 2019-05-16T12:09:57.000Z | 2022-02-10T00:21:01.000Z | shift_detector/precalculations/embedding_distance_precalculation.py | hpi-bp1819-naumann/shift-detector | 5d081d05ec084021f11827aa3fd3e167854b2a2a | [
"Apache-2.0"
] | null | null | null | from shift_detector.precalculations.text_embedding_precalculation import TextEmbeddingPrecalculation
from shift_detector.precalculations.store import Store
from shift_detector.precalculations.precalculation import Precalculation
from datawig.utils import random_split
import numpy as np
from numpy.linalg import norm
class EmbeddingDistancePrecalculation(Precalculation):
def __init__(self, model=None, trained_model=None):
self.model = model
self.trained_model = trained_model
def __eq__(self, other):
return self.model == other.model and self.trained_model == other.trained_model
def __hash__(self):
return hash((self.model, self.trained_model))
@staticmethod
def sum_and_normalize_vectors(series):
vector = np.array([0.0] * len(series.iloc[0]))
for cell in series:
vector += cell
return vector / len(series)
def process(self, store: Store) -> dict:
"""
Calculate the euclidean distance between two embeddings.
:param store:
:return: CheckResult
"""
df1, df2 = store[TextEmbeddingPrecalculation(model=self.model, trained_model=self.trained_model, agg='sum')]
df1a, df1b = random_split(df1, [0.95, 0.05]) # Baseline for df1
df2a, df2b = random_split(df2, [0.95, 0.05]) # Baseline for df2
if df1a.empty or df1b.empty or df2a.empty or df2b.empty:
raise ValueError('Dataset to small for split ratio')
result = {}
for i in df1:
result[i] = (norm(self.sum_and_normalize_vectors(df1a[i]) - self.sum_and_normalize_vectors(df1b[i])),
norm(self.sum_and_normalize_vectors(df2a[i]) - self.sum_and_normalize_vectors(df2b[i])),
norm(self.sum_and_normalize_vectors(df1[i]) - self.sum_and_normalize_vectors(df2[i])))
return result
| 38.22 | 116 | 0.672423 | 238 | 1,911 | 5.193277 | 0.323529 | 0.07767 | 0.084951 | 0.124595 | 0.168285 | 0.168285 | 0.075243 | 0 | 0 | 0 | 0 | 0.024557 | 0.232862 | 1,911 | 49 | 117 | 39 | 0.818554 | 0.065934 | 0 | 0 | 0 | 0 | 0.020069 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.15625 | false | 0 | 0.1875 | 0.0625 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
124c0cc2c66967fcbd74c872c18100920840b8b1 | 792 | py | Python | scripts/format_files.py | paltmey/masterthesis | 43ed469bcd0ad7f0d578277743f9776078a2c3c3 | [
"MIT"
] | null | null | null | scripts/format_files.py | paltmey/masterthesis | 43ed469bcd0ad7f0d578277743f9776078a2c3c3 | [
"MIT"
] | null | null | null | scripts/format_files.py | paltmey/masterthesis | 43ed469bcd0ad7f0d578277743f9776078a2c3c3 | [
"MIT"
] | null | null | null | import subprocess
from argparse import ArgumentParser
def run(src_dir, fast=False):
print(f'Formatting all files under {src_dir} using black.')
cmd = ['black']
if fast:
cmd.append('--fast')
cmd.append(src_dir)
subprocess.run(cmd)
if __name__ == '__main__':
parser = ArgumentParser(description='Format all files using the black Python formatter.')
requiredNamed = parser.add_argument_group('required named arguments')
requiredNamed.add_argument('--src_dir', type=str, help='Directory where to run formatting.',
required=True)
parser.add_argument("--fast", help='If --fast given, skip temporary sanity checks.', action='store_true', default=False)
args = parser.parse_args()
run(args.src_dir, args.fast)
| 31.68 | 124 | 0.683081 | 100 | 792 | 5.22 | 0.54 | 0.057471 | 0.049808 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.19697 | 792 | 24 | 125 | 33 | 0.820755 | 0 | 0 | 0 | 0 | 0 | 0.311869 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058824 | false | 0 | 0.117647 | 0 | 0.176471 | 0.058824 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
124da53c618c855ab0d8247e2652d534cd052cf4 | 873 | py | Python | eval_empatheticdialogues.py | skywalker023/focused-empathy | 04bdd0cf2fcd7bb4ee204cacb54ce970f426c916 | [
"MIT"
] | 29 | 2021-09-07T06:54:23.000Z | 2022-03-25T12:33:04.000Z | eval_empatheticdialogues.py | skywalker023/focused-empathy | 04bdd0cf2fcd7bb4ee204cacb54ce970f426c916 | [
"MIT"
] | 8 | 2021-09-25T05:39:40.000Z | 2022-03-29T07:04:08.000Z | eval_empatheticdialogues.py | skywalker023/focused-empathy | 04bdd0cf2fcd7bb4ee204cacb54ce970f426c916 | [
"MIT"
] | 2 | 2021-11-07T08:27:38.000Z | 2022-01-09T05:28:41.000Z | import socket
import datetime
import os
import better_exceptions
from from_parlai.eval_model import eval_model
from from_parlai.eval_model import setup_args as eval_setupargs
better_exceptions.hook()
__PATH__ = os.path.abspath(os.path.dirname(__file__))
def setup_args(current_time):
parser = eval_setupargs()
parser.set_defaults(
task='tasks.empathetic_dialogues',
datapath=os.path.join(__PATH__, 'data'),
context_length=-1,
metrics='default',
batchsize=8,
display_examples=True,
display_add_fields='situation,emotion',
datatype='test'
)
return parser
if __name__ == '__main__':
print(f"Job is running on {socket.gethostname()}")
current_time = datetime.datetime.now().strftime("%m%d%H%M%S")
parser = setup_args(current_time)
opt = parser.parse_args()
eval_model(opt)
| 27.28125 | 65 | 0.705613 | 113 | 873 | 5.070796 | 0.59292 | 0.062827 | 0.048866 | 0.062827 | 0.101222 | 0.101222 | 0 | 0 | 0 | 0 | 0 | 0.002817 | 0.186712 | 873 | 31 | 66 | 28.16129 | 0.804225 | 0 | 0 | 0 | 0 | 0 | 0.132875 | 0.054983 | 0 | 0 | 0 | 0 | 0 | 1 | 0.037037 | false | 0 | 0.222222 | 0 | 0.296296 | 0.037037 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
124f15794814742ded4d3518fa8a3f798acf1841 | 5,408 | py | Python | klsh/utils.py | tonygrey/klsh | 77dbcd2bdd3f04e4d9add136201afda31c964580 | [
"BSD-3-Clause"
] | 28 | 2015-08-21T07:42:52.000Z | 2022-03-23T23:18:14.000Z | klsh/utils.py | tonygrey/klsh | 77dbcd2bdd3f04e4d9add136201afda31c964580 | [
"BSD-3-Clause"
] | 2 | 2016-02-04T12:52:28.000Z | 2016-02-19T07:25:25.000Z | klsh/utils.py | tonygrey/klsh | 77dbcd2bdd3f04e4d9add136201afda31c964580 | [
"BSD-3-Clause"
] | 6 | 2016-02-04T06:18:43.000Z | 2020-05-10T10:42:27.000Z | import itertools
import contextlib
import time
import numbers
import numpy as np
@contextlib.contextmanager
def timeit(fmt=None):
if fmt is None:
fmt = "{0:.2g} sec"
t0 = time.time()
yield
t1 = time.time()
print(fmt.format(t1 - t0))
def create_rng(seed):
"""Turn seed into a np.random.RandomState instance
If seed is None, return the RandomState singleton used by np.random.
If seed is an int, return a new RandomState instance seeded with seed.
If seed is already a RandomState instance, return it.
Otherwise raise ValueError.
Adapted from sklearn.utils.check_random_state()
"""
if seed is None or seed is np.random:
rng = np.random.mtrand._rand
elif isinstance(seed, (numbers.Integral, np.integer)):
rng = np.random.RandomState(seed)
elif isinstance(seed, np.random.RandomState):
rng = seed
else:
raise ValueError('{0} cannot be used to seed a '
'numpy.random.RandomState instance'.format(seed))
return rng
def packbits_axis(X, axis=-1):
"""Create a compact representation of rows of bits in numpy
Parameters
----------
X : array_like
a d-dimensional array whose rows will be treated as a sequence of bits
axis : integer
the axis along which to pack the bits (default=-1)
Returns
-------
x : array_like
a (d - 1)-dimensional structured array containing sets of 8-bit
integers which compactly represent the bits along the specified
axis of X.
"""
X = np.asarray(X, dtype=np.uint8)
# roll specified axis to the back
if axis not in (-1, X.ndim - 1):
X = np.rollaxis(X, axis).transpose(list(range(1, X.ndim)) + [0])
# make sure we have a C-ordered contiguous buffer
X = np.asarray(X, order='C')
bits = np.packbits(X, -1)
return_shape = bits.shape[:-1]
return_type = [('', 'u1') for i in range(bits.shape[-1])]
return np.ndarray(return_shape, dtype=return_type, buffer=bits)
def unpackbits_axis(x, axis=-1, axissize=None):
"""Inverse of packbits_axis
Parameters
----------
x : ndarray
record array of any shape, with multiple data of type uint8
axissize : integer
max size of expanded axis. Default is 8 * len(x.dtype)
Returns
-------
X : ndarray
array of shape x.shape[:axis] + (8 * d,) + x.shape[axis:]
where d is the number of unsigned ints in each element of the
record array.
"""
assert all(x.dtype[i] == np.uint8 for i in range(len(x.dtype)))
X = np.ndarray(x.shape + (len(x.dtype),),
dtype=np.uint8,
buffer=x)
X = np.unpackbits(X, -1)
if axissize is not None:
slices = [slice(None) for i in range(X.ndim)]
slices[-1] = slice(0, axissize)
X = X[slices]
return np.rollaxis(X, -1, axis)
def hamming_cdist(x, y=None, use_broadcasting=False):
"""Compute the matrix of hamming distances between x and y, which are
stored in packed-bit format.
Parameters
----------
x, y: nd_arrays
x and y should be one-dimensional structured arrays with data type
made of some number of unsigned integers.
"""
# TODO: make work with types other than uint8? maybe not needed.
x = np.atleast_1d(x)
assert x.ndim == 1
if len(x.dtype) > 0:
nbytes = len(x.dtype)
assert all(x.dtype[i] == np.uint8 for i in range(nbytes))
else:
nbytes = 1
assert x.dtype == np.uint8
if y is None:
y = x
else:
y = np.atleast_1d(y)
assert y.ndim == 1
assert y.dtype == x.dtype
if use_broadcasting:
x_ints = np.ndarray((x.shape[0], nbytes),
dtype=np.uint8,
buffer=x.data)
if y is x:
y_ints = x_ints
else:
y_ints = np.ndarray((y.shape[0], nbytes),
dtype=np.uint8,
buffer=y.data)
nonmatch_matrix = np.bitwise_xor(x_ints[:, np.newaxis, :],
y_ints[np.newaxis, :, :])
res = np.unpackbits(nonmatch_matrix[:, :, :, None], -1).sum((2, 3))
else:
if len(x.dtype) > 0:
it = (np.unpackbits(np.bitwise_xor(x[d][:, None],
y[d])[:, :, None], -1).sum(-1)
for d in x.dtype.names)
res = sum(it)
else:
res = np.unpackbits(np.bitwise_xor(x[:, None],
y)[:, :, None], -1).sum(-1)
return res
def hamming_hashes(hashval, nbits, nmax=None):
"""Return an iterator over all (integer) hashes,
in order of hamming distance
Parameters
----------
hashval : integer
hash value to match
nbits : integer
number of bits in the hash
nmax : integer (optional)
if specified, halt the iterator after given number of results
"""
if nmax is not None:
return itertools.islice(hamming_hashes(hashval, nbits), nmax)
else:
hashval = int(hashval)
bits = [2 ** i for i in range(nbits)]
return (hashval ^ sum(flip)
for nflips in range(nbits + 1)
for flip in itertools.combinations(bits, nflips))
| 29.878453 | 78 | 0.568787 | 743 | 5,408 | 4.096904 | 0.271871 | 0.023653 | 0.01774 | 0.018068 | 0.099869 | 0.058476 | 0.04205 | 0.022339 | 0.022339 | 0.022339 | 0 | 0.014375 | 0.318232 | 5,408 | 180 | 79 | 30.044444 | 0.811229 | 0.331731 | 0 | 0.131868 | 0 | 0 | 0.022452 | 0.00709 | 0 | 0 | 0 | 0.005556 | 0.065934 | 1 | 0.065934 | false | 0 | 0.054945 | 0 | 0.186813 | 0.010989 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
12538ece1750dfd1837c7ec0ab51c093bdd2eb86 | 1,767 | py | Python | Apps/Black_Jack.py | srp98/Python-Stuff | fade8934718e01a3d30cf9db93515b8f02a20b18 | [
"MIT"
] | null | null | null | Apps/Black_Jack.py | srp98/Python-Stuff | fade8934718e01a3d30cf9db93515b8f02a20b18 | [
"MIT"
] | null | null | null | Apps/Black_Jack.py | srp98/Python-Stuff | fade8934718e01a3d30cf9db93515b8f02a20b18 | [
"MIT"
] | 1 | 2019-10-31T03:16:04.000Z | 2019-10-31T03:16:04.000Z | def hand_total(hand):
total = 0
# Count number of aces and deal with how to apply them at the end
aces = 0
for card in hand:
if card in ['J', 'Q', 'K']:
total += 10
elif card == 'A':
aces += 1
else:
# Convert the number on card to int's
total += int(card)
# Now, total is sum of the cards excluding the aces, deal with aces now
total += aces
# Upgrade aces from 1 to 11 as long as it helps us get closer to 21 without losing
while total + 10 <= 21 and aces > 0:
total += 10
aces -= 1
return total
def blackjack_hand_greater_than(hand_1, hand_2):
"""
Return True if hand_1 beats hand_2, and False otherwise.
In order for hand_1 to beat hand_2 the following must be true:
- The total of hand_1 must not exceed 21
- The total of hand_1 must exceed the total of hand_2 OR hand_2's total must exceed 21
Hands are represented as a list of cards. Each card is represented by a string.
When adding up a hand's total, cards with numbers count for that many points. Face
cards ('J', 'Q', and 'K') are worth 10 points. 'A' can count for 1 or 11.
When determining a hand's total, you should try to count aces in the way that
maximizes the hand's total without going over 21. e.g. the total of ['A', 'A', '9'] is 21,
the total of ['A', 'A', '9', '3'] is 14.
Examples:
>>> blackjack_hand_greater_than(['K'], ['3', '4'])
True
>>> blackjack_hand_greater_than(['K'], ['10'])
False
>>> blackjack_hand_greater_than(['K', 'K', '2'], ['3'])
False
"""
total_1 = hand_total(hand_1)
total_2 = hand_total(hand_2)
return total_1 <= 21 and (total_1 > total_2 or total_2 > 21) | 36.061224 | 94 | 0.615167 | 302 | 1,767 | 3.490066 | 0.337748 | 0.028463 | 0.047438 | 0.091082 | 0.131879 | 0.060721 | 0 | 0 | 0 | 0 | 0 | 0.050553 | 0.283531 | 1,767 | 49 | 95 | 36.061224 | 0.781991 | 0.640634 | 0 | 0.105263 | 0 | 0 | 0.007233 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.105263 | false | 0 | 0 | 0 | 0.210526 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1254aa54269ff251642c4464e9329b6e5da57064 | 6,281 | py | Python | models/gaussian/gaussian/Gaussian.py | lijun99/altar | 92c2915de3de0c51138d382c8192ead7d6eed1a1 | [
"BSD-3-Clause"
] | 6 | 2019-07-25T08:02:09.000Z | 2022-02-09T04:19:31.000Z | models/gaussian/gaussian/Gaussian.py | lijun99/altar | 92c2915de3de0c51138d382c8192ead7d6eed1a1 | [
"BSD-3-Clause"
] | null | null | null | models/gaussian/gaussian/Gaussian.py | lijun99/altar | 92c2915de3de0c51138d382c8192ead7d6eed1a1 | [
"BSD-3-Clause"
] | null | null | null | # -*- python -*-
# -*- coding: utf-8 -*-
#
# michael a.g. aïvázis <michael.aivazis@para-sim.com>
#
# (c) 2013-2021 parasim inc
# (c) 2010-2021 california institute of technology
# all rights reserved
#
# externals
import math
# the package
import altar
# declaration
class Gaussian(altar.models.bayesian, family="altar.models.gaussian"):
"""
A model that emulates the probability density for a single observation of the model
parameters. The observation is treated as normally distributed around a given mean, with a
covariance constructed out of its eigenvalues and a rotation in configuration
space. Currently, only two dimensional parameter spaces are supported.
"""
# user configurable state
parameters = altar.properties.int(default=2)
parameters.doc = "the number of model degrees of freedom"
support = altar.properties.array(default=(-1,1))
support.doc = "the support interval of the prior distribution"
prep = altar.distributions.distribution()
prep.doc = "the distribution used to generate the initial sample"
prior = altar.distributions.distribution()
prior.doc = "the prior distribution"
μ = altar.properties.array(default=(0,0))
μ.doc = 'the location of the central value of the observation'
λ = altar.properties.array(default=(.01, .005))
λ.doc = 'the eigenvalues of the covariance matrix'
φ = altar.properties.dimensional(default=0*altar.units.angle.rad)
φ.doc = 'the orientation of the covariance semi-major axis'
# protocol obligations
@altar.export
def initialize(self, application):
"""
Initialize the state of the model given a {problem} specification
"""
# chain up
super().initialize(application=application)
# get my random number generator
rng = self.rng
# initialize my distributions
self.prep.initialize(rng=rng)
self.prior.initialize(rng=rng)
# all done
return self
@altar.export
def initializeSample(self, step):
"""
Fill {step.θ} with an initial random sample from my prior distribution.
"""
# grab the portion of the sample that's mine
θ = self.restrict(theta=step.theta)
# fill it with random numbers from my initializer
self.prep.initializeSample(theta=θ)
# and return
return self
@altar.export
def priorLikelihood(self, step):
"""
Fill {step.prior} with the likelihoods of the samples in {step.theta} in the prior
distribution
"""
# grab my prior pdf
pdf = self.prior
# grab the portion of the sample that's mine
θ = self.restrict(theta=step.theta)
# and the storage for the prior likelihoods
likelihood = step.prior
# delegate
pdf.priorLikelihood(theta=θ, likelihood=likelihood)
# all done
return self
@altar.export
def dataLikelihood(self, step):
"""
Fill {step.data} with the likelihoods of the samples in {step.theta} given the available
data. This is what is usually referred to as the "forward model"
"""
# cache the inverse of {σ}
σ_inv = self.σ_inv
# grab the portion of the sample that's mine
θ = self.restrict(theta=step.theta)
# and the storage for the data likelihoods
data = step.data
# find out how many samples in the set
samples = θ.rows
# for each sample in the sample set
for sample in range(samples):
# prepare vector with the sample difference from the mean
δ = θ.getRow(sample)
δ -= self.peak
# storage for {σ_inv . δ}
y = altar.vector(shape=δ.shape).zero()
# compute {σ_inv . δ} and store it in {y}
altar.blas.dsymv(σ_inv.upperTriangular, 1.0, σ_inv, δ, 0.0, y)
# finally, form {δ^T . σ_inv . δ}
v = altar.blas.ddot(δ, y)
# compute and return the log-likelihood of the data given this sample
data[sample] += self.normalization - v/2
# all done
return self
@altar.export
def verify(self, step, mask):
"""
Check whether the samples in {step.theta} are consistent with the model requirements and
update the {mask}, a vector with zeroes for valid samples and non-zero for invalid ones
"""
# grab the portion of the sample that's mine
θ = self.restrict(theta=step.theta)
# grab my prior
pdf = self.prior
# ask it to verify my samples
pdf.verify(theta=θ, mask=mask)
# all done; return the rejection map
return mask
# meta methods
def __init__(self, **kwds):
# chain up
super().__init__(**kwds)
# local names for the math functions
log, π, cos, sin = math.log, math.pi, math.cos, math.sin
# the number of model parameters
dof = self.parameters
# convert the central value into a vector; allocate
peak = altar.vector(shape=dof)
# and populate
for index, value in enumerate(self.μ): peak[index] = value
# the trigonometry
cos_φ = cos(self.φ)
sin_φ = sin(self.φ)
# the eigenvalues
λ0 = self.λ[0]
λ1 = self.λ[1]
# the eigenvalue inverses
λ0_inv = 1/λ0
λ1_inv = 1/λ1
# build the inverse of the covariance matrix
σ_inv = altar.matrix(shape=(dof, dof))
σ_inv[0,0] = λ0_inv*cos_φ**2 + λ1_inv*sin_φ**2
σ_inv[1,1] = λ1_inv*cos_φ**2 + λ0_inv*sin_φ**2
σ_inv[0,1] = σ_inv[1,0] = (λ1_inv - λ0_inv) * cos_φ * sin_φ
# compute its determinant and store it
σ_lndet = log(λ0 * λ1)
# attach the characteristics of my pdf
self.peak = peak
self.σ_inv = σ_inv
# the log-normalization
self.normalization = -.5*(dof*log(2*π) + σ_lndet)
# all done
return
# implementation details
peak = None # the location of my central value
σ_inv = None # the inverse of my data covariance
normalization = 1 # the normalization factor for my prior distribution
# end of file
| 30.342995 | 96 | 0.61694 | 846 | 6,281 | 4.531915 | 0.289598 | 0.019562 | 0.018258 | 0.021909 | 0.137194 | 0.125456 | 0.119197 | 0.09494 | 0.09494 | 0.073552 | 0 | 0.014922 | 0.295813 | 6,281 | 206 | 97 | 30.490291 | 0.85191 | 0.396434 | 0 | 0.1875 | 0 | 0 | 0.089762 | 0.005891 | 0 | 0 | 0 | 0 | 0 | 1 | 0.075 | false | 0 | 0.025 | 0 | 0.3125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1255698f615a2ee79f73dfb0cbca1d3e028c33b4 | 2,486 | py | Python | prototype/crawling/harvesta/modules/naverfuncs.py | latte-horse/jaehyun | 159963b405b1726717f99df0e4ba62df195aeb94 | [
"MIT"
] | null | null | null | prototype/crawling/harvesta/modules/naverfuncs.py | latte-horse/jaehyun | 159963b405b1726717f99df0e4ba62df195aeb94 | [
"MIT"
] | null | null | null | prototype/crawling/harvesta/modules/naverfuncs.py | latte-horse/jaehyun | 159963b405b1726717f99df0e4ba62df195aeb94 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#naverFuncs.py
import requests
import urllib.request
from bs4 import BeautifulSoup
import re
import json
if __name__ != '__main__':
from . import config
#--------------------------------------------------------------------------
# 실시간 인기 검색어 cnt개 반환
#--------------------------------------------------------------------------
def get_keywords(cnt):
naverUrl = "https://www.naver.com"
try:
html = requests.get(naverUrl).content
soup = BeautifulSoup(html, 'html.parser')
tagList = soup.select('.ah_roll_area .ah_k')
naver_keywords = []
for keyword in tagList:
naver_keywords.append(keyword.get_text())
except Exception as e:
print(e)
#cnt 개의 결과만을 반환
return naver_keywords[:min([len(naver_keywords), cnt])]
#--------------------------------------------------------------------------
# 검색어로 뉴스를 검색하여 cnt개 반환
#--------------------------------------------------------------------------
def get_newslist(search_words, cnt):
encText = urllib.parse.quote(search_words)
url = "https://openapi.naver.com/v1/search/news.json?query={0}&display={1}&sort={2}".format(
encText, cnt, "date")
# NAVER API를 이용하여 검색
request = urllib.request.Request(url)
request.add_header("X-Naver-Client-Id", config.clientID)
request.add_header("X-Naver-Client-Secret", config.clientSecret)
try:
response = urllib.request.urlopen(request)
except Exception as e:
print(e)
else:
rescode = response.getcode()
if(rescode == 200):
response_body = response.read()
newsList = json.loads(response_body.decode('utf-8'))['items']
# title과 link만 추출하여 담기
resultList = []
for news in newsList:
resultList.append({
'title' : re.sub("<[^>]*>", '', news['title']),
'link' : news['originallink'] != '' and news['originallink'] or news['link']})
else:
print("Error Code:" + rescode)
#결과 반환 (없으면 없는대로)
return resultList
#--------------------------------------------------------------------------
# module test code
#--------------------------------------------------------------------------
if __name__ == "__main__":
naverKeywords = get_keywords(120)
print(naverKeywords)
import config
newsList = get_newslist("미대륙 횡단열차", 30) #1 키워드 1 뉴스 테스트
for news in newsList: print(news) | 33.146667 | 98 | 0.498793 | 249 | 2,486 | 4.843373 | 0.51004 | 0.043118 | 0.016584 | 0.019901 | 0.086235 | 0.086235 | 0 | 0 | 0 | 0 | 0 | 0.008705 | 0.214401 | 2,486 | 75 | 99 | 33.146667 | 0.608807 | 0.249397 | 0 | 0.163265 | 0 | 0.020408 | 0.142162 | 0.011351 | 0 | 0 | 0 | 0 | 0 | 1 | 0.040816 | false | 0 | 0.142857 | 0 | 0.22449 | 0.102041 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
125988da139b4ae5434a553bc0bdc723e1b0fa96 | 2,708 | py | Python | neo/Network/core/uintbase.py | BarracudaPff/code-golf-data-pythpn | 42e8858c2ebc6a061012bcadb167d29cebb85c5e | [
"MIT"
] | null | null | null | neo/Network/core/uintbase.py | BarracudaPff/code-golf-data-pythpn | 42e8858c2ebc6a061012bcadb167d29cebb85c5e | [
"MIT"
] | null | null | null | neo/Network/core/uintbase.py | BarracudaPff/code-golf-data-pythpn | 42e8858c2ebc6a061012bcadb167d29cebb85c5e | [
"MIT"
] | null | null | null | if TYPE_CHECKING:
pass
class UIntBase(serializable.SerializableMixin):
_data = bytearray()
_hash: int = 0
def __init__(self, num_bytes: int, data: Union[bytes, bytearray] = None) -> None:
super(UIntBase, self).__init__()
if data is None:
self._data = bytearray(num_bytes)
else:
if isinstance(data, bytes):
self._data = bytearray(data)
elif isinstance(data, bytearray):
self._data = data
else:
raise TypeError("Invalid data type {}. Expecting bytes or bytearray".format(type(data)))
try:
self._data = bytearray(binascii.unhexlify(self._data.decode()))
except UnicodeDecodeError:
pass
except binascii.Error:
pass
if len(self._data) != num_bytes:
raise ValueError("Invalid UInt: data length {} != specified num_bytes {}".format(len(self._data), num_bytes))
self._hash = self.get_hash_code()
@property
def size(self) -> int:
""" Count of data bytes. """
return len(self._data)
def get_hash_code(self) -> int:
""" Get a uint32 identifier. """
slice_length = 4 if len(self._data) >= 4 else len(self._data)
return int.from_bytes(self._data[:slice_length], "little")
def serialize(self, writer: "BinaryWriter") -> None:
""" Serialize object. """
writer.write_bytes(self._data)
def deserialize(self, reader: "BinaryReader") -> None:
""" Deserialize object. """
self._data = reader.read_bytes(self.size)
def to_array(self) -> bytearray:
""" get the raw data. """
return self._data
def to_string(self) -> str:
""" Convert the data to a human readable format (data is in reverse order). """
db = bytearray(self._data)
db.reverse()
return db.hex()
def __eq__(self, other) -> bool:
if other is None:
return False
if not isinstance(other, UIntBase):
return False
if other is self:
return True
if self._data == other._data:
return True
return False
def __hash__(self):
return self._hash
def __str__(self):
return self.to_string()
def _compare_to(self, other) -> int:
if not isinstance(other, UIntBase):
raise TypeError("Cannot compare %s to type %s" % (type(self).__name__, type(other).__name__))
x = self.to_array()
y = other.to_array()
if len(x) != len(y):
raise ValueError("Cannot compare %s with length %s to %s with length %s" % (type(self).__name__, len(x), type(other).__name__, len(y)))
length = len(x)
for i in range(length - 1, 0, -1):
if x[i] > y[i]:
return 1
if x[i] < y[i]:
return -1
return 0
def __lt__(self, other):
return self._compare_to(other) < 0
def __gt__(self, other):
return self._compare_to(other) > 0
def __le__(self, other):
return self._compare_to(other) <= 0
def __ge__(self, other):
return self._compare_to(other) >= 0 | 32.626506 | 138 | 0.677253 | 395 | 2,708 | 4.382278 | 0.255696 | 0.073946 | 0.031774 | 0.043905 | 0.153668 | 0.099365 | 0.099365 | 0.099365 | 0.064125 | 0 | 0 | 0.006772 | 0.182053 | 2,708 | 83 | 139 | 32.626506 | 0.774718 | 0.065731 | 0 | 0.155844 | 0 | 0 | 0.086241 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.194805 | false | 0.038961 | 0 | 0.077922 | 0.467532 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
125df78648f1c8732c0895fa28745acc66e1387e | 18,971 | py | Python | pyhrt/continuous.py | tansey/hrt | f6d271a34590d073a08f0fc40f40e898f38cdf97 | [
"MIT"
] | 19 | 2018-11-05T19:08:03.000Z | 2022-02-15T03:58:47.000Z | pyhrt/continuous.py | tansey/hrt | f6d271a34590d073a08f0fc40f40e898f38cdf97 | [
"MIT"
] | 1 | 2019-11-20T22:35:54.000Z | 2019-11-20T23:06:51.000Z | pyhrt/continuous.py | tansey/hrt | f6d271a34590d073a08f0fc40f40e898f38cdf97 | [
"MIT"
] | 5 | 2019-04-15T23:17:26.000Z | 2020-04-10T04:18:22.000Z | import os
import sys
import numpy as np
import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.optim as optim
from scipy.stats import norm
from scipy.stats.mstats import gmean
from pyhrt.utils import batches, create_folds, logsumexp
############################################################
'''Continuous conditionals'''
############################################################
class GaussianMixtureModel:
def __init__(self, pi, mu, sigma, y_mean=0, y_std=1):
self.pi = pi
self.mu = mu
self.sigma = sigma
self.y_mean = y_mean
self.y_std = y_std
def sample(self):
comps = [np.random.choice(self.pi.shape[1], p=p) for p in self.pi]
return np.array([np.random.normal(self.mu[i,k], self.sigma[i,k]) for i,k in enumerate(comps)])
def pdf(self, y):
return (self.pi * norm.pdf(y[:,np.newaxis], self.mu, self.sigma)).sum(axis=1)
def cdf(self, y):
return (self.pi * norm.cdf(y[:,np.newaxis], self.mu, self.sigma)).sum(axis=1)
'''Neural conditional density estimator (GMM)'''
class MixtureDensityNetwork(nn.Module):
def __init__(self, nfeatures, ncomponents, X_means, X_stds, y_mean, y_std):
super(MixtureDensityNetwork, self).__init__()
self.ncomponents = ncomponents
self.X_means = X_means
self.X_stds = X_stds
self.y_mean = y_mean
self.y_std = y_std
self.fc_in = nn.Sequential(
nn.Linear(nfeatures, 200),
nn.ReLU(),
nn.Dropout(),
nn.Linear(200, 200),
nn.ReLU(),
nn.Dropout(),
nn.Linear(200, 3*ncomponents))
# self.fc_in = nn.Sequential(nn.Linear(nfeatures,3*ncomponents))
self.sigma_transform = nn.Softplus()
self.pi_transform = nn.Softmax(dim=1)
def forward(self, x):
outputs = self.fc_in(x)
pi = self.pi_transform(outputs[:,:self.ncomponents])
mu = outputs[:,self.ncomponents:2*self.ncomponents]
sigma = self.sigma_transform(outputs[:,2*self.ncomponents:])
return pi, mu, sigma
def predict(self, X):
self.eval()
self.zero_grad()
tX = autograd.Variable(torch.FloatTensor((X - self.X_means[np.newaxis,:]) / self.X_stds[np.newaxis,:]), requires_grad=False)
pi, mu, sigma = self.forward(tX)
return GaussianMixtureModel(pi.data.numpy(), mu.data.numpy(), sigma.data.numpy(), self.y_mean, self.y_std)
'''Bootstrap confidence interval density estimator'''
class BootstrapConditionalModel:
def __init__(self, X, y, fit_fn, nbootstraps=100, verbose=True):
self.indices = [np.random.choice(np.arange(X.shape[0]), replace=True, size=X.shape[0]) for _ in range(nbootstraps)]
self.models = []
for i,idx in enumerate(self.indices):
if verbose:
print('\tBootstrap {}'.format(i))
self.models.append(fit_fn(X[idx], y[idx]))
def pdf_quantiles(self, X, y, q, axis=0):
return np.percentile(np.array([m.predict(X).pdf(y) for m in self.models]), q, axis=axis)
def cdf_quantiles(self, X, y, q, axis=0):
return np.percentile(np.array([m.predict(X).cdf(y) for m in self.models]), q, axis=axis)
def sample(self, X):
return self.models[0].predict(X).sample()
def fit_mdn(X, y, ncomponents=5,
nepochs=50, val_pct=0.1,
batch_size=None, target_batch_pct=0.01,
min_batch_size=10, max_batch_size=100,
verbose=False, lr=3e-4, weight_decay=0.01):
import uuid
tmp_file = '/tmp/tmp_file_' + str(uuid.uuid4())
if batch_size is None:
batch_size = max(min_batch_size, min(max_batch_size, int(np.round(X.shape[0]*target_batch_pct))))
# Standardize the features (helps with gradient propagation)
Xstd = X.std(axis=0)
Xstd[Xstd == 0] = 1 # Handle constant features
tX = autograd.Variable(torch.FloatTensor((X - X.mean(axis=0,keepdims=True)) / Xstd[np.newaxis, :]), requires_grad=False)
tY = autograd.Variable(torch.FloatTensor(y), requires_grad=False)
# Create train/validate splits
indices = np.arange(X.shape[0], dtype=int)
np.random.shuffle(indices)
train_cutoff = int(np.round(len(indices)*(1-val_pct)))
train_indices = indices[:train_cutoff]
validate_indices = indices[train_cutoff:]
model = MixtureDensityNetwork(X.shape[1], ncomponents, X.mean(axis=0), Xstd, y.mean(), y.std())
# Setup the SGD method
optimizer = optim.RMSprop(model.parameters(), lr=lr, weight_decay=weight_decay)
# Track progress
train_losses, val_losses, best_loss = np.zeros(nepochs), np.zeros(nepochs), None
# Train the model
for epoch in range(nepochs):
if verbose:
print('\t\tEpoch {}'.format(epoch+1))
sys.stdout.flush()
# Track the loss curves
train_loss = torch.Tensor([0])
for batch_idx, batch in enumerate(batches(train_indices, batch_size, shuffle=True)):
if verbose and (batch_idx % 100 == 0):
print('\t\t\tBatch {}'.format(batch_idx))
tidx = autograd.Variable(torch.LongTensor(batch), requires_grad=False)
# Set the model to training mode
model.train()
# Reset the gradient
model.zero_grad()
# Run the model and get the predictions
pi, mu, sigma = model(tX[tidx])
# Calculate the log-probabilities
components = torch.distributions.Normal(mu, sigma)
logprobs = components.log_prob(tY[tidx][:,None])
# -log(GMM(y | x)) loss
loss = -logsumexp(pi.log() + logprobs, dim=1).mean()
# Calculate gradients
loss.backward()
# Apply the update
# [p for p in model.parameters() if p.requires_grad]
optimizer.step()
# Track the loss
train_loss += loss.data
validate_loss = torch.Tensor([0])
for batch_idx, batch in enumerate(batches(validate_indices, batch_size, shuffle=False)):
if verbose and (batch_idx % 100 == 0):
print('\t\t\tValidation Batch {}'.format(batch_idx))
tidx = autograd.Variable(torch.LongTensor(batch), requires_grad=False)
# Set the model to test mode
model.eval()
# Reset the gradient
model.zero_grad()
# Run the model and get the predictions
pi, mu, sigma = model(tX[tidx])
# Calculate the log-probabilities
components = torch.distributions.Normal(mu, sigma)
logprobs = components.log_prob(tY[tidx][:,None])
# -log(GMM(y | x)) loss
loss = -logsumexp(pi.log() + logprobs, dim=1).sum()
# Track the loss
validate_loss += loss.data
train_losses[epoch] = train_loss.numpy() / float(len(train_indices))
val_losses[epoch] = validate_loss.numpy() / float(len(validate_indices))
# Check if we are currently have the best held-out log-likelihood
if epoch == 0 or val_losses[epoch] <= best_loss:
if verbose:
print('\t\t\tSaving test set results. <----- New high water mark on epoch {}'.format(epoch+1))
# If so, use the current model on the test set
best_loss = val_losses[epoch]
torch.save(model, tmp_file)
if verbose:
print('Validation loss: {} Best: {}'.format(val_losses[epoch], best_loss))
model = torch.load(tmp_file)
os.remove(tmp_file)
return model
def ks_test(ksstat, nsamples, ntrials=10000):
null_stats = np.zeros(ntrials)
null_cdf = (np.arange(nsamples)+1)/float(nsamples)
for trial in range(ntrials):
null_data = np.random.uniform(size=nsamples)
null_data = null_data[np.argsort(null_data)]
null_stats[trial] = np.max(np.abs(null_data - null_cdf))
return (ksstat >= null_stats).mean()
def sample_holdout_dists(dists, model, quantiles):
y = dists[0].sample()
logpdfs = np.log(np.array([d.pdf(y) for d in dists]).clip(1e-100, np.inf))
if quantiles is None:
return y, None
probs = np.exp(logpdfs - logpdfs[0:1]) # likelihood ratio
quants = np.percentile(probs, quantiles, axis=0) # quantile per-sample
quants = gmean(quants, axis=1) # (geometric) mean quantile
return y, quants
class CrossValidationSampler:
def __init__(self, X, models, folds, quantiles=None):
self.N = X.shape[0]
self.models = models
self.folds = folds
self.quantiles = quantiles
self.dists = [[m.predict(X[fold]) for m in model_set.models] for model_set, fold in zip(self.models, self.folds)]
def __call__(self):
y = np.zeros(self.N)
probs = np.zeros(self.N)
if self.quantiles is not None:
quants = np.zeros((self.N, len(self.quantiles)))
for model, fold, dist in zip(self.models, self.folds, self.dists):
y[fold], q = sample_holdout_dists(dist, model, self.quantiles)
if q is not None:
quants[fold] = q
return y, quants
class HoldoutSampler:
def __init__(self, X, model, quantiles=None):
self.model = model
self.quantiles = quantiles
self.dists = [m.predict(X) for m in model.models]
def __call__(self):
return sample_holdout_dists(self.dists, self.model, self.quantiles)
def calibrate_continuous(X, feature,
X_test=None, nquantiles=101, nbootstraps=100,
nfolds=5, ks_threshold=0.005, p_threshold=0.,
use_cv=False):
'''Calibrates a bootstrap confidence interval conditional model for a given feature.'''
# Search over a linear quantile grid to search
quantile_range = np.linspace(0, 100, nquantiles)
jmask = np.ones(X.shape[1], dtype=bool)
jmask[feature] = False
if X_test is None and use_cv:
# Use k-fold cross-validation to generate conditional density estimates for X_j
print('Fitting using {} bootstrap resamples and {} folds'.format(nbootstraps, nfolds))
cdfs = np.zeros((nquantiles, X.shape[0]))
proposals = []
folds = create_folds(X, nfolds)
for fold_idx, fold in enumerate(folds):
imask = np.ones(X.shape[0], dtype=bool)
imask[fold] = False
model = BootstrapConditionalModel(X[imask][:,jmask], X[imask][:,feature], fit_mdn, nbootstraps=nbootstraps)
cdfs[:,fold] = model.cdf_quantiles(X[fold][:,jmask], X[fold][:,feature], quantile_range, axis=0)
proposals.append(model)
sampler = CrossValidationSampler(X[:,jmask], proposals, folds)
else:
if X_test is None:
print('Using training set as testing set.')
X_test = X
# Use a held-out test set
print('Fitting using {} bootstrap resamples and a {}/{} train/test split'.format(nbootstraps, X.shape[0], X_test.shape[0]))
model = BootstrapConditionalModel(X[:,jmask], X[:,feature], fit_mdn, nbootstraps=nbootstraps)
cdfs = model.cdf_quantiles(X_test[:,jmask], X_test[:,feature], quantile_range, axis=0)
sampler = HoldoutSampler(X_test[:,jmask], model)
# Look at the bounds of the CDF along a discrete grid of points
ks_grid = np.linspace(1e-6,1-1e-6,1001)
# Find the lower quantile that forms a sufficient upper bound on the uniform CDF
for i in range(1,nquantiles//2):
lower = quantile_range[nquantiles//2 - i]
qlower = cdfs[nquantiles//2 - i]
# U(0,1) CDF is the (0,1),(0,1) line. So at every point q on the grid of
# CDF points, we expect a well-calibrated model to have q*N points with
# CDF value lower than q. Here we are looking for an upper bound, so
# we measure the KS distance as the maximum amount the U(0,1) CDF is
# above the predicted CDF.
ks_lower = 0
for ks_point in ks_grid:
ks_lower = max(ks_lower, ks_point - (qlower <= ks_point).mean())
ks_pvalue = ks_test(ks_lower, cdfs.shape[1])
# print('Lower: {} KS: {} p: {}'.format(lower, ks_lower, ks_pvalue))
# Allow some error tolerance due to noise/finite data
if ks_lower <= ks_threshold or ks_pvalue <= p_threshold:
break
# Find the upper quantile
for i in range(1,nquantiles//2):
upper = quantile_range[nquantiles//2+i]
qupper = cdfs[nquantiles//2 + i]
# U(0,1) CDF is the (0,1),(0,1) line. So at every point q on the grid of
# CDF points, we expect a well-calibrated model to have q*N points with
# CDF value lower than q. Here we are looking for a lower bound, so
# we measure the KS distance as the maximum amount the U(0,1) CDF is
# below the predicted CDF.
ks_upper = 0
for ks_point in ks_grid:
ks_upper = max(ks_upper, (qupper <= ks_point).mean() - ks_point)
ks_pvalue = ks_test(ks_upper, cdfs.shape[1])
# print('Upper: {} KS: {} p: {}'.format(upper, ks_upper, ks_pvalue))
# Allow some error tolerance due to noise/finite data
if ks_upper <= ks_threshold or ks_pvalue <= p_threshold:
break
# Set the sampler to the chosen regions
sampler.quantiles = np.array([lower, upper])
# Our KS-distance is the worst-case of the two bounds
ks_stat = np.max([ks_lower, ks_upper])
# The p-value on the KS test that the bounded distribution is different
# from the Uniform distribution
ks_pvalue = ks_test(ks_stat, cdfs.shape[1])
print('Selected intervals: [{},{}]'.format(lower, upper))
return {'model': model,
'cdfs': cdfs,
'ks_stat': ks_stat,
'ks_pvalue': ks_pvalue,
'upper': upper,
'lower': lower,
'qupper': qupper,
'qlower': qlower,
'quantiles': quantile_range,
'sampler': sampler
}
def test_mdn():
# Generate the ground truth
N = 1000
X = np.random.normal(size=(1000,2))
logits = np.array([np.exp(X[:,0]**2), np.exp(X[:,0]), np.exp(2*X[:,0])]).T
pi = logits / logits.sum(axis=1, keepdims=True)
# pi = np.array([np.ones(X.shape[0])*0.3, np.ones(X.shape[0])*0.5, np.ones(X.shape[0])*0.2]).T
mu = np.array([X[:,0], 5*X[:,1], -2*X[:,1]*X[:,0]]).T
sigma = np.ones((X.shape[0],3))
true_gmm = GaussianMixtureModel(pi, mu, sigma)
# Sample some observations
y = true_gmm.sample()
truth = true_gmm.cdf(y)
# import matplotlib.pylab as plt
# x1, x2 = np.meshgrid(np.linspace(-5,5,100), np.linspace(-5,5,100))
# im = np.zeros((100,100))
# for i in range(100):
# for j in range(100):
# im[i,j] = 0.3*x2[i,j] + 0.5*5*x2[i,j] - 2 * x2[i,j]
# plt.imshow(im)
# plt.colorbar()
# plt.xlabel('X1')
# plt.ylabel('X2')
# plt.title('Mean(y)')
# plt.show()
# Fit the model
split = int(np.round(X.shape[0]*0.8))
model = fit_mdn(X[:split], y[:split], verbose=True, ncomponents=3, batch_size=100, nepochs=20)
# Predict the likelihood of observations
pred_gmm = model.predict(X)
pred = pred_gmm.cdf(y)
import matplotlib.pylab as plt
import seaborn as sns
plt.clf()
plt.scatter(truth[split:], pred[split:], color='blue')
plt.plot([0,1],[0,1],color='red')
# z = np.linspace(y.min(), y.max(), 1000)
# print(true_gmm.pi[0], true_gmm.mu[0], true_gmm.sigma[0])
# print(pred_gmm.pi[0], pred_gmm.mu[0], pred_gmm.sigma[0])
# plt.plot(z, (true_gmm.pi[0:1]*norm.pdf(z[:,np.newaxis], true_gmm.mu[0], true_gmm.sigma[0])).sum(axis=1), color='blue')
# plt.plot(z, (pred_gmm.pi[0:1]*norm.pdf(z[:,np.newaxis], pred_gmm.mu[0], pred_gmm.sigma[0])).sum(axis=1), color='orange')
plt.xlabel('Truth')
plt.ylabel('Predicted')
plt.show()
# plt.hist(truth/pred, bins=100)
# plt.show()
def test_calibration():
# Generate the ground truth
N = 1000
X = np.random.normal(size=(N,2))
logits = np.array([np.exp(X[:,0]**2), np.exp(X[:,0]), np.exp(2*X[:,0])]).T
pi = logits / logits.sum(axis=1, keepdims=True)
# pi = np.array([np.ones(X.shape[0])*0.3, np.ones(X.shape[0])*0.5, np.ones(X.shape[0])*0.2]).T
mu = np.array([X[:,0], 5*X[:,1], -2*X[:,1]*X[:,0]]).T
sigma = np.ones((X.shape[0],3))
true_gmm = GaussianMixtureModel(pi, mu, sigma)
# Sample some observations of a third variable
y = true_gmm.sample()
truth = true_gmm.cdf(y)
Xy = np.concatenate([X,y[:,np.newaxis]], 1)
# Fit the calibrated model
split = int(np.round(X.shape[0]*0.8))
results = calibrate_continuous(Xy[:split], 2, X_test=Xy[split:], nbootstraps=100)
print(results)
# look at the bounds of the CDF
(model, cdfs,
ks_stat, ks_pvalue,
upper, lower,
qupper, qlower,
quantile_range) = (results['model'],
results['cdfs'],
results['ks_stat'],
results['ks_pvalue'],
results['upper'],
results['lower'],
results['qupper'],
results['qlower'],
results['quantiles'])
print('Quantile chosen: [{},{}] KS={} p={}'.format(lower, upper, ks_stat, ks_pvalue))
plt.clf()
plt.scatter(truth[split:], qlower, color='orange', label='{:.0f}% quantile'.format(lower))
plt.scatter(truth[split:], qupper, color='blue', label='{:.0f}% quantile'.format(upper))
for t,l,u in zip(truth[split:], qlower, qupper):
plt.plot([t,t],[l,u], color='gray', alpha=0.5)
plt.plot([0,1],[0,1], color='red')
plt.xlabel('Truth')
plt.ylabel('Estimated')
plt.legend(loc='upper left')
plt.savefig('plots/quantile-cdfs-scatter.pdf', bbox_inches='tight')
# Plot the confidence bands
ks_grid = np.linspace(1e-4,1-1e-4,101)
qlower = qlower[np.argsort(qlower)]
qupper = qupper[np.argsort(qupper)]
q50 = cdfs[101//2]
q50 = q50[np.argsort(q50)]
plt.plot(truth[np.argsort(truth)], np.arange(len(truth)) / float(len(truth)), color='black', lw=3, label='Truth')
plt.plot(qlower, np.arange(len(qlower)) / float(len(qlower)), color='orange', lw=3, label='{:.0f}% quantile'.format(lower))
plt.plot(q50, np.arange(len(q50)) / float(len(q50)), color='green', lw=3, label='50% quantile')
plt.plot(qupper, np.arange(len(qupper)) / float(len(qupper)), color='blue', lw=3, label='{:.0f}% quantile'.format(upper))
plt.plot([0,1], [0,1], color='gray', lw=3, ls='--', label='U(0,1)')
plt.xlabel('CDF value of observed X')
plt.ylabel('CDF of CDF')
plt.legend(loc='upper left')
plt.savefig('plots/quantile-cdfs-bands.pdf', bbox_inches='tight')
plt.close()
| 40.278132 | 132 | 0.600706 | 2,703 | 18,971 | 4.118757 | 0.153533 | 0.010779 | 0.011318 | 0.010779 | 0.358214 | 0.316087 | 0.27405 | 0.262014 | 0.215755 | 0.206054 | 0 | 0.023734 | 0.249328 | 18,971 | 470 | 133 | 40.36383 | 0.758023 | 0.18154 | 0 | 0.201299 | 0 | 0 | 0.052545 | 0.003951 | 0 | 0 | 0 | 0 | 0 | 1 | 0.068182 | false | 0 | 0.042208 | 0.019481 | 0.175325 | 0.038961 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
12638715ec5a1d40ed4f3618c30418ac709c540c | 1,144 | py | Python | ars2/utils.py | Stranger469/ARS2 | b6d62b66997180abe01b676d5359c20daa42b7ad | [
"Apache-2.0"
] | null | null | null | ars2/utils.py | Stranger469/ARS2 | b6d62b66997180abe01b676d5359c20daa42b7ad | [
"Apache-2.0"
] | null | null | null | ars2/utils.py | Stranger469/ARS2 | b6d62b66997180abe01b676d5359c20daa42b7ad | [
"Apache-2.0"
] | null | null | null | import numpy as np
from typing import List
from wrench.basemodel import BaseClassModel
from wrench.dataset import BaseDataset
from sklearn.metrics import f1_score
from snorkel.utils import probs_to_preds
def calc_prior(labels: List, n_class: int):
return [labels.count(i) for i in range(n_class)]
def create_unbalanced_set(data: BaseDataset, imbalance_ratio: int):
miu = (1 / imbalance_ratio) ** (1 / (data.n_class-1))
ids = np.argsort(data.labels)
prior = np.array(calc_prior(data.labels, data.n_class))
imbalance_list = np.array([int(n * miu ** i) for (i, n) in enumerate(prior)]) # n_i * μ^i
print(imbalance_list)
prior_cumsum = np.cumsum(prior)
prior_cumsum = np.insert(prior_cumsum, 0, 0)
sampled_ids = np.concatenate([np.random.choice(ids[prior_cumsum[i]:prior_cumsum[i + 1]], n)
for i, n in enumerate(imbalance_list)])
return sampled_ids
def calc_f1(data: BaseDataset, model: BaseClassModel):
probas = model.predict_proba(data)
y_pred = probs_to_preds(probas)
y_true = np.array(data.labels)
return f1_score(y_true, y_pred, average=None)
| 33.647059 | 95 | 0.704545 | 175 | 1,144 | 4.417143 | 0.365714 | 0.071151 | 0.031048 | 0.018111 | 0.041397 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009657 | 0.185315 | 1,144 | 33 | 96 | 34.666667 | 0.819742 | 0.007867 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.25 | 0.041667 | 0.5 | 0.041667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
126ad831faaee91fb0ce16251134cb082143384a | 5,462 | py | Python | program_helper/ast/parser/ast_similarity_checker.py | jajajaqlt/nsg | 1873f2b5e10441110c3c69940ceb4650f9684ac0 | [
"Apache-2.0"
] | 10 | 2021-11-02T18:30:38.000Z | 2022-03-21T06:31:33.000Z | program_helper/ast/parser/ast_similarity_checker.py | rohanmukh/nag | f2c4b8e60a97c58a6a1c549cc8b4753ebfe8a5e3 | [
"Apache-2.0"
] | 2 | 2021-11-05T18:40:42.000Z | 2022-03-30T04:33:08.000Z | program_helper/ast/parser/ast_similarity_checker.py | rohanmukh/nag | f2c4b8e60a97c58a6a1c549cc8b4753ebfe8a5e3 | [
"Apache-2.0"
] | 2 | 2021-11-03T19:14:06.000Z | 2021-11-03T23:47:09.000Z | # Copyright 2017 Rice University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from copy import deepcopy
from itertools import chain
from data_extraction.data_reader.utils import gather_calls
from program_helper.set.apicalls import ApiCalls
from collections import defaultdict
from utilities.basics import truncate_two_decimals
class MyDefaultDict:
def __init__(self, types):
self.vals = []
for type in types:
self.vals.append(defaultdict(type))
def get_value(self, length):
out = []
for _dict in self.vals:
out.append(_dict[length])
return out
def set_value(self, length, v1, v2, v3):
vals = [v1, v2, v3]
assert len(vals) == len(self.vals)
for _dict, _val in zip(self.vals, vals):
_dict[length] = _val
return
def keys(self):
return self.vals[0].keys()
def get_item(self, k):
out_ = []
for val in self.vals:
out_.append(val[k])
return out_
def get_item_at_id(self, k, id=2):
return self.vals[id][k]
class AstSimilarityChecker:
def __init__(self,
logger=None):
self.logger = logger
self.sum_jaccard = 0.
self.count = 0
self.max_jaccard = 0.
self.min_jaccard = None
self.min_distance_count = 0
self.min_distance_like_bayou = 0.0
self.min_distance_by_prog_length = MyDefaultDict((int, float, float))
return
def reset_stats(self):
self.min_distance_count = 0
self.min_distance_like_bayou = 0.0
self.sum_jaccard = 0.
self.count = 0
self.max_jaccard = 0.
self.min_jaccard = 0.
def check_similarity_for_all_beams(self, real_ast_json, predicted_ast_jsons):
min_distance = 1.0
for pred_ast_json in predicted_ast_jsons:
distance = 1 - self.check_similarity(real_ast_json, pred_ast_json)
min_distance = min(distance, min_distance)
self.update_min_distance_stat(min_distance)
self.update_min_distance_by_length_stat(min_distance, length=len(gather_calls(real_ast_json['ast'])))
return min_distance
def update_min_distance_stat(self, min_distance):
self.min_distance_count += 1
self.min_distance_like_bayou += min_distance
def update_min_distance_by_length_stat(self, min_distance, length):
curr_count, curr_distance, curr_avg_distance = self.min_distance_by_prog_length.get_value(length)
new_count, new_distance = curr_count + 1, curr_distance + min_distance
new_avg_distance = new_distance/new_count
self.min_distance_by_prog_length.set_value(length, new_count, new_distance, new_avg_distance)
def check_similarity(self, real_ast, pred_ast):
calls = gather_calls(real_ast['ast'])
apicalls1 = list(set(chain.from_iterable([ApiCalls.from_call(call)
for call in calls])))
calls = gather_calls(pred_ast['ast'])
apicalls2 = list(set(chain.from_iterable([ApiCalls.from_call(call)
for call in calls])))
distance = AstSimilarityChecker.get_jaccard_similarity(set(apicalls1), set(apicalls2))
self.update_statistics(distance)
return distance
@staticmethod
def get_jaccard_similarity(setA, setB):
if (len(setA) == 0) and (len(setB) == 0):
return 0
setA = set(setA)
setB = set(setB)
sim = len(setA & setB) / len(setA | setB)
return sim
def update_statistics(self, curr_distance):
self.sum_jaccard += curr_distance
self.count += 1
if curr_distance > self.max_jaccard:
self.max_jaccard = curr_distance
if self.min_jaccard is None or curr_distance < self.min_jaccard:
self.min_jaccard = curr_distance
def print_stats(self):
avg_similarity = self.sum_jaccard / (self.count + 0.00001)
self.logger.info('')
self.logger.info('\tAverage Jaccard Similarity :: {0:0.4f}'.format(avg_similarity))
self.logger.info('\tMaximum Jaccard Similarity :: {0:0.4f}'.format(self.max_jaccard))
self.logger.info('\tMinimum Jaccard Similarity :: {0:0.4f}'.format(self.min_jaccard))
avg_min_bayou = self.min_distance_like_bayou / (self.min_distance_count + 0.00001)
self.logger.info('\tMaximum Jaccard Similarity amongst all beams :: {0:0.4f}'.format(1-avg_min_bayou))
keys = self.min_distance_by_prog_length.keys()
for k in sorted(keys):
val = self.min_distance_by_prog_length.get_item_at_id(k, id=2)
count = self.min_distance_by_prog_length.get_item_at_id(k, id=0)
print("Length of program {} :: Average Maximum Jaccard Similarity amongst all beams :: {} count :: {}".
format(k, truncate_two_decimals(1-val), count))
| 36.172185 | 115 | 0.658184 | 740 | 5,462 | 4.6 | 0.224324 | 0.093713 | 0.070505 | 0.029965 | 0.312867 | 0.269389 | 0.15658 | 0.118684 | 0.118684 | 0.118684 | 0 | 0.017086 | 0.249908 | 5,462 | 150 | 116 | 36.413333 | 0.813766 | 0.100879 | 0 | 0.133333 | 0 | 0 | 0.057382 | 0 | 0 | 0 | 0 | 0 | 0.009524 | 1 | 0.142857 | false | 0 | 0.057143 | 0.019048 | 0.314286 | 0.019048 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
126d523eba5326c29ad99290557c03112dd4b6ae | 3,231 | py | Python | sk_pymc3/model/base_model.py | drewblasius/pymc3-sklearn | a46c325179459a2eae394f72c4be186d3e074214 | [
"MIT"
] | 1 | 2020-07-13T12:45:20.000Z | 2020-07-13T12:45:20.000Z | sk_pymc3/model/base_model.py | drewblasius/pymc3-sklearn | a46c325179459a2eae394f72c4be186d3e074214 | [
"MIT"
] | null | null | null | sk_pymc3/model/base_model.py | drewblasius/pymc3-sklearn | a46c325179459a2eae394f72c4be186d3e074214 | [
"MIT"
] | null | null | null | import logging
import numpy as np
import pandas as pd
import pymc3 as pm
import theano as tt
from abc import ABC, abstractmethod
from contextlib import contextmanager
from sklearn.base import BaseEstimator
from typing import List, Optional, Tuple, Union
logger = logging.getLogger(__name__)
class BasePyMC3Model(ABC, BaseEstimator):
@abstractmethod
def model_block(self) -> pm.backends.base.MultiTrace:
"""
Abstract method that contains all of the model information.
*MUST* return a multi-trace object.
"""
pass
def fit_model(self) -> pm.backends.base.MultiTrace:
return self.trace
def _init_shared(self, X: pd.DataFrame, y: Optional[Union[pd.Series, np.ndarray]]):
self.y = tt.shared(y) if y is not None else None
self.X = {}
self.size = {}
for x in X:
self.X[x] = tt.shared(X[x].values)
self.size[x] = X[x].nunique() + 1 # +1 for non-obseved cases
self.X_ = X.copy()
def _set_shared(self, X):
for x in self.X_:
logger.debug(
f"setting {x} to shared value (old shape {self.X_[x].shape}, new shape {X[x].shape})"
)
self.X[x].set_value(X[x].values)
def _reset_shared(self):
self._set_shared(self.X_)
@contextmanager
def _data_context(self, X: pd.DataFrame, *args, **kwargs):
try:
self._set_shared(X)
yield None
finally:
self._reset_shared()
def _init_model_context(self):
self.model = pm.Model()
def fit(self, X: pd.DataFrame, y: Union[pd.Series, np.ndarray]):
self._init_shared(X, y)
self._mean_trace = {}
self._init_model_context()
self.trace = self.model_block()
return self
@staticmethod
def _rep_frame_if_singleton(X: pd.DataFrame) -> Tuple[pd.DataFrame, bool]:
if X.shape[0] > 1:
return X, False
return pd.concat([X] * 2), True
@staticmethod
def _post_unrep_ppc(ppc_dict: dict):
dk = list(ppc_dict)
for k in dk:
ppc_dict[k] = ppc_dict[k][..., :1]
return ppc_dict
def predict(self, X, mean=False, fast=True, **kwargs):
X, rep = self._rep_frame_if_singleton(X)
with self._data_context(X):
if fast:
sample_ppc = pm.fast_sample_posterior_predictive
else:
sample_ppc = pm.sample_posterior_predictive
ppc = sample_ppc(
trace=self.trace,
model=self.model,
**kwargs
)
# Theano broadcasts shared things incorrectly if singletons.
if rep:
ppc = self._post_unrep_ppc(ppc)
if len(ppc) > 1: # multi-response, deal with later
logger.warning(
"multiple responses found in pymc3 model context "
"returning dict of arrays rather than arrays themselves."
)
return ppc
k = list(ppc)[0]
if mean:
return ppc[k].mean(axis=0)
return ppc[k]
| 29.108108 | 101 | 0.561436 | 407 | 3,231 | 4.297297 | 0.321867 | 0.034305 | 0.027444 | 0.027444 | 0.104059 | 0.029731 | 0 | 0 | 0 | 0 | 0 | 0.005629 | 0.340142 | 3,231 | 110 | 102 | 29.372727 | 0.814728 | 0.065924 | 0 | 0.02439 | 0 | 0.012195 | 0.061997 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.134146 | false | 0.012195 | 0.109756 | 0.012195 | 0.353659 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
126ee51683c13a2a0d3c8f7660aaea573d956c4c | 2,448 | py | Python | main.py | prostodrug95/ping-pong | 66f7d0d5bf83d9400e1933c78da2e2fc262b5fcb | [
"CC0-1.0"
] | 1 | 2021-06-04T16:57:30.000Z | 2021-06-04T16:57:30.000Z | main.py | prostodrug95/ping-pong | 66f7d0d5bf83d9400e1933c78da2e2fc262b5fcb | [
"CC0-1.0"
] | null | null | null | main.py | prostodrug95/ping-pong | 66f7d0d5bf83d9400e1933c78da2e2fc262b5fcb | [
"CC0-1.0"
] | null | null | null | from pygame import *
font.init()
speed_x = 1
speed_y = 1
w,h = 500,500
window = display.set_mode((w,h))
background = transform.scale(image.load("amogus.png"), (w,h))
font1 = font.Font(None,35)
lose1 = font1.render('ПЕРВЫЙ БОТ СЛИТ',True,(180,0,0))
font2 = font.Font(None,35)
lose2 = font1.render('ВТОРОЙ БОТ СЛИТ',True,(180,0,0))
class GameSprite(sprite.Sprite):
def __init__(self,player_image,player_x, player_y,size_x, size_y, player_speed):
super().__init__()
self.image = transform.scale(image.load(player_image), (size_x,size_y))
self.speed = player_speed
self.size_x = size_x
self.size_y = size_y
self.rect = self.image.get_rect()
self.rect.x = player_x
self.rect.y = player_y
def reset(self):
window.blit(self.image, (self.rect.x,self.rect.y))
class Player(GameSprite):
def update_l(self):
keys = key.get_pressed()
if keys[K_RIGHT] and self.rect.y > 5:
self.rect.y -= self.speed
if keys[K_LEFT] and self.rect.y < h - self.size_y - 5:
self.rect.y += self.speed
def update_r(self):
keys = key.get_pressed()
if keys[K_UP] and self.rect.y > 5:
self.rect.y -= self.speed
if keys[K_DOWN] and self.rect.y < h - self.size_y - 5:
self.rect.y += self.speed
rocket_left = Player("page for igra.jpg",30, 30, 30, 100, 3)
rocket_right = Player("page for igra.jpg", w-30-30, h-100-30,30,100,3)
ball = GameSprite("cgfcbnt.jpg", w/2,h/2,15,15,1)
finish = False
game = True
while game:
for e in event.get():
if e.type == QUIT:
game = False
if not finish:
window.blit(background,(0,0))
ball.rect.x += speed_x
ball.rect.y += speed_y
if ball.rect.y > h-15 or ball.rect.y < 0:
speed_y *= -1
if ball.rect.x < 0:
finish = True
window.blit(lose1,(200,200))
if ball.rect.x > w:
finish = True
window.blit(lose2,(200,200))
if sprite.collide_rect(rocket_left,ball) or sprite.collide_rect(rocket_right,ball):
speed_x *= -1
rocket_left.update_l()
rocket_right.update_r()
rocket_left.reset()
rocket_right.reset()
ball.reset()
display.update()
time.delay(2)
| 29.853659 | 92 | 0.566176 | 369 | 2,448 | 3.609756 | 0.243902 | 0.078078 | 0.067568 | 0.036036 | 0.214715 | 0.184685 | 0.160661 | 0.160661 | 0.118619 | 0.118619 | 0 | 0.050729 | 0.299428 | 2,448 | 81 | 93 | 30.222222 | 0.725948 | 0 | 0 | 0.121212 | 0 | 0 | 0.03591 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.060606 | false | 0 | 0.015152 | 0 | 0.106061 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
89bf8ee8a7e5c3c278485bd4901ccfbb359df2a4 | 6,922 | py | Python | clumioapi/models/mssql_database_backup.py | clumio-code/clumio-python-sdk | 63bfaf3afed5c0ab4bae3dd1be52271249d07c51 | [
"Apache-2.0"
] | null | null | null | clumioapi/models/mssql_database_backup.py | clumio-code/clumio-python-sdk | 63bfaf3afed5c0ab4bae3dd1be52271249d07c51 | [
"Apache-2.0"
] | 1 | 2021-09-16T05:56:05.000Z | 2021-09-16T05:56:05.000Z | clumioapi/models/mssql_database_backup.py | clumio-code/clumio-python-sdk | 63bfaf3afed5c0ab4bae3dd1be52271249d07c51 | [
"Apache-2.0"
] | null | null | null | #
# Copyright 2021. Clumio, Inc.
#
from typing import Any, Dict, Mapping, Optional, Sequence, Type, TypeVar
from clumioapi.models import mssql_database_backup_embedded
from clumioapi.models import mssql_database_backup_links
from clumioapi.models import mssql_database_file
T = TypeVar('T', bound='MssqlDatabaseBackup')
class MssqlDatabaseBackup:
"""Implementation of the 'MssqlDatabaseBackup' model.
Attributes:
embedded:
Embedded responses related to the resource.
links:
URLs to pages related to the resource.
database_files:
List of database files at the time of backup.
database_id:
The Clumio-assigned ID of the database associated with this backup.
engine:
The Microsoft SQL database engine at the time of backup.
engine_version:
The Microsoft SQL database engine version at the time of backup.
expiration_timestamp:
The timestamp of when this backup expires. Represented in RFC-3339 format.
group_id:
The Clumio-assigned ID of the management group associated with the database at
the time of backup.
host_endpoint:
The user-provided endpoint of the host containing the given database at the time
of backup.
host_id:
The Clumio-assigned ID of the host associated with the database at the time of
backup.
id:
The Clumio-assigned ID of the backup.
instance_id:
The Clumio-assigned instance id at the time of backup.
instance_name:
The instance name at the time of backup.
start_timestamp:
The timestamp of when this backup started. Represented in RFC-3339 format.
subgroup_id:
The Clumio-assigned ID of the management subgroup associated with the database
at the time of backup.
type:
The type of backup. Possible values include `mssql_database_backup`,
`mssql_log_backup_full_recovery_model` and `mssql_log_backup_bulk_logged_model`.
"""
# Create a mapping from Model property names to API property names
_names = {
'embedded': '_embedded',
'links': '_links',
'database_files': 'database_files',
'database_id': 'database_id',
'engine': 'engine',
'engine_version': 'engine_version',
'expiration_timestamp': 'expiration_timestamp',
'group_id': 'group_id',
'host_endpoint': 'host_endpoint',
'host_id': 'host_id',
'id': 'id',
'instance_id': 'instance_id',
'instance_name': 'instance_name',
'start_timestamp': 'start_timestamp',
'subgroup_id': 'subgroup_id',
'type': 'type',
}
def __init__(
self,
embedded: mssql_database_backup_embedded.MssqlDatabaseBackupEmbedded = None,
links: mssql_database_backup_links.MssqlDatabaseBackupLinks = None,
database_files: Sequence[mssql_database_file.MssqlDatabaseFile] = None,
database_id: str = None,
engine: str = None,
engine_version: str = None,
expiration_timestamp: str = None,
group_id: str = None,
host_endpoint: str = None,
host_id: str = None,
id: str = None,
instance_id: str = None,
instance_name: str = None,
start_timestamp: str = None,
subgroup_id: str = None,
type: str = None,
) -> None:
"""Constructor for the MssqlDatabaseBackup class."""
# Initialize members of the class
self.embedded: mssql_database_backup_embedded.MssqlDatabaseBackupEmbedded = embedded
self.links: mssql_database_backup_links.MssqlDatabaseBackupLinks = links
self.database_files: Sequence[mssql_database_file.MssqlDatabaseFile] = database_files
self.database_id: str = database_id
self.engine: str = engine
self.engine_version: str = engine_version
self.expiration_timestamp: str = expiration_timestamp
self.group_id: str = group_id
self.host_endpoint: str = host_endpoint
self.host_id: str = host_id
self.id: str = id
self.instance_id: str = instance_id
self.instance_name: str = instance_name
self.start_timestamp: str = start_timestamp
self.subgroup_id: str = subgroup_id
self.type: str = type
@classmethod
def from_dictionary(cls: Type, dictionary: Mapping[str, Any]) -> Optional[T]:
"""Creates an instance of this model from a dictionary
Args:
dictionary: A dictionary representation of the object as obtained
from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if not dictionary:
return None
# Extract variables from the dictionary
key = '_embedded'
embedded = (
mssql_database_backup_embedded.MssqlDatabaseBackupEmbedded.from_dictionary(
dictionary.get(key)
)
if dictionary.get(key)
else None
)
key = '_links'
links = (
mssql_database_backup_links.MssqlDatabaseBackupLinks.from_dictionary(
dictionary.get(key)
)
if dictionary.get(key)
else None
)
database_files = None
if dictionary.get('database_files'):
database_files = list()
for value in dictionary.get('database_files'):
database_files.append(mssql_database_file.MssqlDatabaseFile.from_dictionary(value))
database_id = dictionary.get('database_id')
engine = dictionary.get('engine')
engine_version = dictionary.get('engine_version')
expiration_timestamp = dictionary.get('expiration_timestamp')
group_id = dictionary.get('group_id')
host_endpoint = dictionary.get('host_endpoint')
host_id = dictionary.get('host_id')
id = dictionary.get('id')
instance_id = dictionary.get('instance_id')
instance_name = dictionary.get('instance_name')
start_timestamp = dictionary.get('start_timestamp')
subgroup_id = dictionary.get('subgroup_id')
type = dictionary.get('type')
# Return an object of this model
return cls(
embedded,
links,
database_files,
database_id,
engine,
engine_version,
expiration_timestamp,
group_id,
host_endpoint,
host_id,
id,
instance_id,
instance_name,
start_timestamp,
subgroup_id,
type,
)
| 36.819149 | 99 | 0.625397 | 758 | 6,922 | 5.501319 | 0.168865 | 0.059233 | 0.041007 | 0.023741 | 0.351319 | 0.278657 | 0.197842 | 0.074341 | 0.057074 | 0.026859 | 0 | 0.002491 | 0.304103 | 6,922 | 187 | 100 | 37.016043 | 0.863193 | 0.314793 | 0 | 0.052174 | 0 | 0 | 0.116264 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.017391 | false | 0 | 0.034783 | 0 | 0.086957 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
89c23a647647ff32bdfe12c1a122bd1e6b3b7682 | 5,351 | py | Python | tests/dku_config/test_dss_parameter.py | Muennighoff/dss-plugin-dkulib | 8d9a954841c23f163f1992822a2a8e4171695f73 | [
"Apache-2.0"
] | null | null | null | tests/dku_config/test_dss_parameter.py | Muennighoff/dss-plugin-dkulib | 8d9a954841c23f163f1992822a2a8e4171695f73 | [
"Apache-2.0"
] | null | null | null | tests/dku_config/test_dss_parameter.py | Muennighoff/dss-plugin-dkulib | 8d9a954841c23f163f1992822a2a8e4171695f73 | [
"Apache-2.0"
] | null | null | null | import logging
import pytest
from dkulib.dku_config.custom_check import CustomCheckError
from dkulib.dku_config.dss_parameter import DSSParameter, DSSParameterError
LOGGER = logging.getLogger(__name__)
class TestDSSParameter:
def test_nominal_case(self):
dss_parameter = DSSParameter(
name='test',
value=3,
checks=[{
"type": "inf",
"op": 4
}],
required=True
)
assert dss_parameter.value == 3
assert dss_parameter.name == 'test'
assert len(dss_parameter.checks) == 1
def test_error(self):
with pytest.raises(CustomCheckError):
_ = DSSParameter(
name='test',
value=3,
checks=[{
"type": "unknown_type",
"op": 4
}],
required=True
)
def test_success(self, caplog):
caplog.set_level(logging.DEBUG)
_ = DSSParameter(
name='test',
value=3,
checks=[{
"type": "inf",
"op": 4
}],
required=True
)
assert 'All checks passed successfully' in caplog.text
def test_failure(self, caplog):
caplog.set_level(logging.INFO)
with pytest.raises(DSSParameterError) as err:
_ = DSSParameter(
name='test',
value=3,
checks=[{
"type": "inf",
"op": 2
}]
)
assert 'Validation error with parameter' in str(err.value)
def test_double_failure(self, caplog):
caplog.set_level(logging.INFO)
with pytest.raises(DSSParameterError) as err:
_ = DSSParameter(
name='test',
value=3,
checks=[{
"type": "inf",
"op": 2
}],
required=True
)
error_message = str(err.value)
assert 'Validation error with parameter' in error_message
assert 'required' not in error_message
assert 'less' in error_message
def test_default(self, caplog):
dss_parameter_1 = DSSParameter(
name='test_1',
value=None,
default=4
)
assert dss_parameter_1.value == 4
dss_parameter_2 = DSSParameter(
name='test_2',
value=3,
default=4
)
assert dss_parameter_2.value == 3
dss_parameter_3 = DSSParameter(
name='test_2',
value=None,
default=4,
required=True
)
assert dss_parameter_3.value == 4
def test_cast(self, caplog):
dss_parameter_1 = DSSParameter(
name='test_1',
value='4',
cast_to=int
)
assert dss_parameter_1.value == 4
dss_parameter_2 = DSSParameter(
name='test_2',
value=4,
cast_to=int
)
assert dss_parameter_2.value == 4
caplog.set_level(logging.INFO)
with pytest.raises(DSSParameterError) as err:
_ = DSSParameter(
name='test_3',
value='foo',
cast_to=int
)
error_message = str(err.value)
assert 'error with parameter' in error_message
assert '<class \'int\'>' in error_message
assert '<class \'str\'>' in error_message
with pytest.raises(DSSParameterError) as err:
_ = DSSParameter(
name='test_4',
value=[1, 2, 3],
cast_to=float
)
error_message = str(err.value)
assert '<class \'list\'>' in error_message
assert '<class \'float\'>' in error_message
dss_parameter_5 = DSSParameter(
name='test_5',
value=None,
cast_to=int,
default=5
)
assert dss_parameter_5.value == 5
with pytest.raises(DSSParameterError) as err:
_ = DSSParameter(
name='test_6',
value=None,
cast_to=str,
required=True
)
error_message = str(err.value)
assert 'required' in error_message
dss_parameter_7 = DSSParameter(
name='test_5',
value=None,
cast_to=int,
)
assert dss_parameter_7.value == None
def test_label(self, caplog):
with pytest.raises(DSSParameterError) as err:
_ = DSSParameter(
name='test_1',
value=7,
label='Display Name',
checks=[{
"type": "is_type",
"op": str
}]
)
error_message = str(err.value)
assert 'Display Name' in error_message
with pytest.raises(DSSParameterError) as err:
_ = DSSParameter(
name='test_1',
value=7,
checks=[{
"type": "is_type",
"op": str
}]
)
error_message = str(err.value)
assert 'test_1' in error_message
| 28.015707 | 75 | 0.484395 | 509 | 5,351 | 4.88998 | 0.147348 | 0.091603 | 0.136601 | 0.092808 | 0.695862 | 0.623945 | 0.545199 | 0.500201 | 0.451989 | 0.397348 | 0 | 0.018506 | 0.424407 | 5,351 | 190 | 76 | 28.163158 | 0.78961 | 0 | 0 | 0.579882 | 0 | 0 | 0.070641 | 0 | 0 | 0 | 0 | 0 | 0.136095 | 1 | 0.047337 | false | 0.005917 | 0.023669 | 0 | 0.076923 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
89c46bb5cab509b8a596d2ce23c543ef5071170a | 1,525 | py | Python | code/src04_train_Model_for_Classification_Cervix_Type/threading_test_v1.py | gakarak/Challenge_Cervical_Cancer_Screening- | 7cb7cb308b43de4f85a09053723e50c368c05891 | [
"Apache-2.0"
] | null | null | null | code/src04_train_Model_for_Classification_Cervix_Type/threading_test_v1.py | gakarak/Challenge_Cervical_Cancer_Screening- | 7cb7cb308b43de4f85a09053723e50c368c05891 | [
"Apache-2.0"
] | null | null | null | code/src04_train_Model_for_Classification_Cervix_Type/threading_test_v1.py | gakarak/Challenge_Cervical_Cancer_Screening- | 7cb7cb308b43de4f85a09053723e50c368c05891 | [
"Apache-2.0"
] | 2 | 2017-06-27T07:14:06.000Z | 2021-07-20T15:21:58.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
__author__ = 'ar'
# import numpy as np
import multiprocessing as mp
import multiprocessing.pool
from sklearn.cluster import KMeans
import math
import numpy as np
from concurrent.futures import ThreadPoolExecutor
import threading
def my_fun(params):
idx = params[0]
val = params[1]
print("::Random {0} -> {1}".format(idx, val))
tmat = np.random.random( (220, 220) )
print("::Eigen {0} -> {1}".format(idx, val))
tret = np.linalg.eig(tmat)
print("::Mean {0} -> {1}".format(idx, val))
tret = np.mean(tret[1])
# tdat = np.random.random((10000,100))
# print("::KMeans {0} -> {1}".format(idx, val))
# km = KMeans(n_clusters=100, n_jobs=1).fit(tdat)
print('{1} : ret = {0}'.format(tret, idx))
return 0
def map_fun(data):
return np.sum(data)
if __name__ == '__main__':
# threadPoolExecutor = ThreadPoolExecutor(max_workers=4)
p0 = mp.Pool(processes=6)
ress = p0.map(map_fun, [np.random.random(10) for xx in range(100)])
#
tmp = []
pool = mp.pool.ThreadPool(processes=4)
print ('----- RUN THREADS -----')
for iidx in range(16):
# pool.apply_async(my_fun, [(iidx, float(iidx)/2.)])
# tmp.append(threadPoolExecutor.submit(my_fun, [iidx, float(iidx)/2.]))
t = threading.Thread(target=my_fun, args = [(iidx, float(iidx)/2.)])
tmp.append(t)
t.start()
pool.close()
pool.join()
print ('----- WAIT THREADS -----')
# for tt in tmp:
# tt.join()
| 28.773585 | 79 | 0.601967 | 214 | 1,525 | 4.186916 | 0.425234 | 0.022321 | 0.035714 | 0.049107 | 0.154018 | 0.122768 | 0.044643 | 0 | 0 | 0 | 0 | 0.0399 | 0.211148 | 1,525 | 52 | 80 | 29.326923 | 0.704904 | 0.257705 | 0 | 0 | 0 | 0 | 0.112601 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058824 | false | 0 | 0.205882 | 0.029412 | 0.323529 | 0.176471 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
89c5c5bc48949e63b438dc53999fcd0b926a88dd | 6,187 | py | Python | src/UI_statistics_window.py | GatherLab/OLED-evaluation | 419dfd5d2c3773f5f90d76aef634f8b1cc0b6378 | [
"MIT"
] | null | null | null | src/UI_statistics_window.py | GatherLab/OLED-evaluation | 419dfd5d2c3773f5f90d76aef634f8b1cc0b6378 | [
"MIT"
] | null | null | null | src/UI_statistics_window.py | GatherLab/OLED-evaluation | 419dfd5d2c3773f5f90d76aef634f8b1cc0b6378 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from PySide2 import QtCore, QtGui, QtWidgets
import json
import core_functions as cf
import numpy as np
class Ui_Statistics(object):
def setupUi(self, Statistics):
# Note: this is not how it should be done but currently I don't know
# how to do it differently. This is only needed to be able to emit
# signals to the main window
Statistics.setObjectName("Statistics")
Statistics.setWindowTitle("Show Statistics")
Statistics.resize(400, 100)
Statistics.setStyleSheet(
"QWidget {\n"
" background-color: rgb(44, 49, 60);\n"
" color: rgb(255, 255, 255);\n"
' font: 63 10pt "Segoe UI";\n'
"}\n"
"QPushButton {\n"
" border: 2px solid rgb(52, 59, 72);\n"
" border-radius: 5px;\n"
" background-color: rgb(52, 59, 72);\n"
"}\n"
"QPushButton:hover {\n"
" background-color: rgb(57, 65, 80);\n"
" border: 2px solid rgb(61, 70, 86);\n"
"}\n"
"QPushButton:pressed {\n"
" background-color: rgb(35, 40, 49);\n"
" border: 2px solid rgb(43, 50, 61);\n"
"}\n"
"QPushButton:checked {\n"
" background-color: rgb(35, 40, 49);\n"
" border: 2px solid rgb(85, 170, 255);\n"
"}"
"QLineEdit {\n"
" border: 2px solid rgb(61, 70, 86);\n"
" border-radius: 5px;\n"
" background-color: rgb(52, 59, 72);\n"
"}\n"
"QSpinBox {\n"
" border: 2px solid rgb(61, 70, 86);\n"
" border-radius: 5px;\n"
" background-color: rgb(52, 59, 72);\n"
"}\n"
"QDoubleSpinBox {\n"
" border: 2px solid rgb(61, 70, 86);\n"
" border-radius: 5px;\n"
" background-color: rgb(52, 59, 72);\n"
"}\n"
)
self.verticalLayout = QtWidgets.QVBoxLayout(Statistics)
self.verticalLayout.setContentsMargins(25, 10, 25, 10)
self.verticalLayout.setObjectName("verticalLayout")
# Define dialog in which parameters should be entered
# dialog = QtWidgets.QDialog()
# dialog.setWindowTitle("Show Group Dialog")
# Define all the layouts and labels so that the window looks good
# verticalLayout = QtWidgets.QVBoxLayout()
self.header_label = QtWidgets.QLabel()
self.header_label.setObjectName("header_label")
self.verticalLayout.addWidget(self.header_label)
# In a grid layout put buttons for all the valid devices
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.statistics_by_group_pushButton = QtWidgets.QPushButton("Group")
self.statistics_by_device_pushButton = QtWidgets.QPushButton("Device")
# self.statistics_by_group_pushButtonAvg = QtWidgets.QPushButton("Avg Group")
# self.statistics_by_group_pushButtonAvg.clicked.connect(
# functools.partial(self.showOverview, "groups", "forward", avg=True)
# )
# if self.groupsAssigned == True:
# self.statistics_by_group_pushButton.setEnabled(True)
# self.statistics_by_group_pushButtonAvg.setEnabled(True)
# elif self.groupsAssigned == False:
# self.statistics_by_group_pushButton.setEnabled(False)
# self.statistics_by_group_pushButtonAvg.setEnabled(False)
self.horizontalLayout.addWidget(self.statistics_by_device_pushButton)
self.horizontalLayout.addWidget(self.statistics_by_group_pushButton)
# self.horizontalLayout.addWidget(self.statistics_by_group_pushButtonAvg)
self.verticalLayout.addLayout(self.horizontalLayout)
# If there is dual data it must be possible to plot it seperately
# if self.isDual == True:
# verticalLayout.addWidget(QtWidgets.QLabel("Show Overview of Reverse Data:"))
# self.horizontalLayoutRev = QtWidgets.QHBoxLayout()
#
# if self.multipleFoldersLoaded == False:
# self.buttonOverviewDevicesRev = QtWidgets.QPushButton("Devices")
# self.buttonOverviewDevicesRev.clicked.connect(
# functools.partial(self.showOverview, "devices", "reverse")
# )
# self.horizontalLayoutRev.addWidget(self.buttonOverviewDevicesRev)
#
# self.statistics_by_group_pushButtonRev = QtWidgets.QPushButton("Group")
# self.statistics_by_group_pushButtonRevAvg = QtWidgets.QPushButton(
# "Avg Group"
# )
# self.statistics_by_group_pushButtonRev.clicked.connect(
# functools.partial(self.showOverview, "groups", "reverse")
# )
# self.statistics_by_group_pushButtonRevAvg.clicked.connect(
# functools.partial(self.showOverview, "groups", "reverse", avg=True)
# )
#
# self.horizontalLayoutRev.addWidget(self.statistics_by_group_pushButtonRev)
# self.horizontalLayoutRev.addWidget(
# self.statistics_by_group_pushButtonRevAvg
# )
# verticalLayout.addLayout(self.horizontalLayoutRev)
#
# if self.groupsAssigned == True:
# self.statistics_by_group_pushButtonRev.setEnabled(True)
# elif self.groupsAssigned == False:
# self.statistics_by_group_pushButtonRev.setEnabled(False)
# Add an exit button to the dialog
self.close_pushButton = QtWidgets.QPushButton("Close")
self.verticalLayout.addWidget(self.close_pushButton)
self.setLayout(self.verticalLayout)
self.retranslateUi(Statistics)
QtCore.QMetaObject.connectSlotsByName(Statistics)
def retranslateUi(self, Statistics):
_translate = QtCore.QCoreApplication.translate
Statistics.setWindowTitle(_translate("Statistics", "Show Statistics"))
self.header_label.setText(_translate("Statistics", "Show Statistics"))
| 42.965278 | 86 | 0.607564 | 618 | 6,187 | 5.970874 | 0.288026 | 0.079675 | 0.082385 | 0.096748 | 0.452575 | 0.376694 | 0.288076 | 0.245257 | 0.12981 | 0.123035 | 0 | 0.029932 | 0.287215 | 6,187 | 143 | 87 | 43.265734 | 0.806803 | 0.377727 | 0 | 0.304348 | 0 | 0 | 0.318397 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.028986 | false | 0 | 0.057971 | 0 | 0.101449 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
89c8316841df2172a19b9834e4e82b01d6035805 | 1,336 | py | Python | icv/data/converter/coco_converter.py | dmxj/icv | 0b074ec9475f2c70038d2e8b7166414fd5b93e61 | [
"MIT"
] | 5 | 2019-09-10T04:02:19.000Z | 2020-07-24T07:46:08.000Z | icv/data/converter/coco_converter.py | dmxj/icv | 0b074ec9475f2c70038d2e8b7166414fd5b93e61 | [
"MIT"
] | null | null | null | icv/data/converter/coco_converter.py | dmxj/icv | 0b074ec9475f2c70038d2e8b7166414fd5b93e61 | [
"MIT"
] | 1 | 2020-03-20T03:44:04.000Z | 2020-03-20T03:44:04.000Z | # -*- coding: UTF-8 -*-
from ..voc import Voc
from ..coco import Coco
from ..core.sample import Sample
from icv.utils import reset_dir, mkfile
import os
class CocoConverter(object):
def __init__(self, coco):
assert isinstance(coco, Coco)
self.coco = coco
def to_voc(self, voc_root, split=None,reset=False):
split = split if split is not None else self.coco.split
if reset:
reset_dir(voc_root)
anno_path, image_path, imgset_path, imgset_seg_path, \
seg_class_image_path, seg_object_image_path = Voc.reset_dir(voc_root)
setfile = os.path.join(imgset_seg_path, "%s.txt" % split) if self.coco.is_seg_mode else os.path.join(
imgset_path, "%s.txt" % split)
mkfile(setfile)
voc = Voc(
voc_root,
split,
keep_no_anno_image=self.coco.keep_no_anno_image,
mode="detect" if not self.coco.is_seg_mode else "segment",
categories=self.coco.categories,
one_index=self.coco.one_index,
)
for sample in self.coco.get_samples():
assert isinstance(sample, Sample)
voc.sample_db[sample.name] = sample
voc.ids.append(sample.name)
voc.save(voc_root, reset_dir=reset, split=split)
voc.init()
return voc
| 31.809524 | 109 | 0.626497 | 186 | 1,336 | 4.274194 | 0.317204 | 0.090566 | 0.030189 | 0.037736 | 0.05283 | 0.05283 | 0 | 0 | 0 | 0 | 0 | 0.001032 | 0.274701 | 1,336 | 41 | 110 | 32.585366 | 0.819401 | 0.015719 | 0 | 0 | 0 | 0 | 0.01904 | 0 | 0 | 0 | 0 | 0 | 0.060606 | 1 | 0.060606 | false | 0 | 0.151515 | 0 | 0.272727 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
89c867ed9daeb13b7d633af74291b2198a878d31 | 2,097 | py | Python | source/game24_calc/game24.py | adrinamin/algorithm_exercises | 8a3c61afe4672aabaeb7a311ea9948165d9d6b10 | [
"MIT"
] | null | null | null | source/game24_calc/game24.py | adrinamin/algorithm_exercises | 8a3c61afe4672aabaeb7a311ea9948165d9d6b10 | [
"MIT"
] | null | null | null | source/game24_calc/game24.py | adrinamin/algorithm_exercises | 8a3c61afe4672aabaeb7a311ea9948165d9d6b10 | [
"MIT"
] | null | null | null | """ game24.py module
The 24 game is played as follows:
You are given a list of four integers, each between 1 and 9, in a fixed order.
By placing operators +,-,* and / between the numbers, and grouping them
with parentheses, determine whether it is possible to reach the value 24.
Example: 5,2,7,8 => ((5*2)-7)*8
Usage:
python3 game24.py <List_of_comma_separated_integer_numbers>
Example
python3 game24.py 5,2,7,8
"""
import sys
# C++ example: https://helloacm.com/the-24-game-algorithm-using-depth-first-search/
# trimming binary search tree: https://helloacm.com/how-to-trim-a-binary-search-tree-using-dfs-recursion/
def search(numbers: list):
"""The algorithm will bruteforce all pairs of the numbers in the vector,
and apply four operators on these two numbers,
Args:
numbers: list of numbers. 4 integers max.
"""
if len(numbers) == 0:
return False
if len(numbers) == 1:
return abs(numbers[0]-24) < 1*10^6
for i in range(len(numbers)):
for j in range(len(numbers)):
if i == j:
continue
numbers2 = []
for k in range(4):
if k < 2 and j > i:
continue
if k == 0:
numbers2.append(numbers[i] + numbers[j])
if k == 1:
numbers2.append(numbers[i] - numbers[j])
if k == 2:
numbers2.append(numbers[i] * numbers[j])
if k == 3:
if numbers[j] == 0:
continue
numbers2.append(numbers[i] / numbers[j])
if search(numbers2):
return True
numbers2 = []
return False
def main(numbers: str):
_listNumbers = list(map(int, str.split(",")))
if search(_listNumbers):
print("Congrats! Your given numbers result in 24!")
else:
print("Unfortunately, your given numbers do not result in 24.")
if __name__ == "__main__":
main(sys.argv[1]) | 29.535211 | 105 | 0.552217 | 275 | 2,097 | 4.156364 | 0.421818 | 0.013123 | 0.073491 | 0.07699 | 0.114611 | 0.114611 | 0.114611 | 0.086614 | 0 | 0 | 0 | 0.041126 | 0.339056 | 2,097 | 71 | 106 | 29.535211 | 0.78355 | 0.37196 | 0 | 0.194444 | 0 | 0 | 0.082224 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055556 | false | 0 | 0.027778 | 0 | 0.194444 | 0.055556 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
89d3da54bf6c144f8db6cbc9be5e6d799aa65800 | 2,881 | py | Python | addGateways.py | tjschuler/Prisma-Access-Azure-AD-scripts | 241b44c05c8db057b1fbe57c3d4d3ea3321df024 | [
"MIT"
] | null | null | null | addGateways.py | tjschuler/Prisma-Access-Azure-AD-scripts | 241b44c05c8db057b1fbe57c3d4d3ea3321df024 | [
"MIT"
] | null | null | null | addGateways.py | tjschuler/Prisma-Access-Azure-AD-scripts | 241b44c05c8db057b1fbe57c3d4d3ea3321df024 | [
"MIT"
] | null | null | null | import os
import json
while True:
try:
filename = str(input("Enter the filename for the .csv: "))
stream = open(filename, 'r').read()
break
except Exception:
print("Couldn't open the file. Try again.")
# While the file is a CSV, it is easier to use a quote as the delimiter.
values = stream.split("\"")
# The portal is the second value in the file. Has a trailing whitespace that must be removed
portal = values[1].lstrip()
# The list of gateways is the fourth value. It is a comma seperated list.
gateways = values[3].split(',')
# Put the portal as the first item in the list so it will be the default.
replyurls = ["https://"+portal+":443/SAML20/SP/ACS"]
# Put the portal as the first item in the list so it will be the default.
identifiers = ["https://"+portal+":443/SAML20/SP"]
# This section adds the gateways to a list in the correct format.
for g in gateways:
replyurls.append("https://"+g.lstrip()+":443/SAML20/SP/ACS")
identifiers.append("https://"+g.lstrip()+":443/SAML20/SP")
# The update command needs the objectId of the application. Find it using its name.
while True:
try:
appname = str(input("Enter the name of the application: "))
tempresult = os.popen('az ad app list --display-name "'+appname+'"').read()
result = json.loads(tempresult)
# Capture the objectId of the application for the update command later.
objectId = result[0]["objectId"]
# Capture the current list of gateways in the application.
originalreplyUrls = result[0]["replyUrls"]
originalidentifierUris = result[0]["identifierUris"]
# The list command will return non-exact matches. This help identify which
# application it found.
print('Modifying the application '+result[0]["displayName"])
break
except Exception:
print("Couldn't find a application with that name. Try again.")
# Remove gateways from the lists if they are already conifgured in the app.
newreplyUrls = list(set(replyurls) - set(originalreplyUrls))
newidentifierUris = list(set(identifiers) - set(originalidentifierUris))
replyCommand = ""
idCommand = ""
# Generate a list of the new gateways seperated by a space.
for url in newreplyUrls:
replyCommand += url+" "
for uri in newidentifierUris:
idCommand += uri+" "
# Add the new values to the RelyURLS and Identifiers lists.
if replyCommand != "":
print("Updating the ReplyURLs...")
print(os.popen('az ad app update --id ' + objectId + ' --add replyUrls ' + replyCommand).read())
else:
print("No new ReplyUrls...")
if idCommand != "":
print("Updating the Identifier-URIs")
print(os.popen('az ad app update --id ' + objectId + ' --add identifierUris ' + idCommand).read())
else:
print("No new IdentifierUris...")
| 41.753623 | 103 | 0.663311 | 386 | 2,881 | 4.950777 | 0.362694 | 0.015699 | 0.023025 | 0.017268 | 0.238619 | 0.161172 | 0.127682 | 0.097331 | 0.097331 | 0.097331 | 0 | 0.011581 | 0.220757 | 2,881 | 68 | 104 | 42.367647 | 0.839644 | 0.32454 | 0 | 0.212766 | 0 | 0 | 0.287708 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.042553 | 0 | 0.042553 | 0.191489 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
89d51bd707cac8f01e37c01f5477d44d15ccb882 | 1,390 | py | Python | plugin/content/config.py | zhongshmx/JX3BOT | f00179f100349bd38aa871d76ac1fd36601c3e78 | [
"Unlicense"
] | null | null | null | plugin/content/config.py | zhongshmx/JX3BOT | f00179f100349bd38aa871d76ac1fd36601c3e78 | [
"Unlicense"
] | null | null | null | plugin/content/config.py | zhongshmx/JX3BOT | f00179f100349bd38aa871d76ac1fd36601c3e78 | [
"Unlicense"
] | 1 | 2021-09-28T13:26:06.000Z | 2021-09-28T13:26:06.000Z | # -*- coding: utf-8 -*
"""
@Software : PyCharm
@File : config.py
@Author : 梦影
@Time : 2021/04/26 20:21:16
"""
from plugin.common import bot, common
import time
class extend:
@staticmethod
async def select(value): # 查询用户群数据
sql = 'SELECT * FROM `main` WHERE `Value` = %s'
data = await bot.client.query(sql, value)
result = await common.next(data)
return result
@staticmethod
async def update(value, name, number): # 更新用户群间隔数据
name = f"CD.{name}"
sql = f"UPDATE `main` SET `{name}` = {time.time() + number} WHERE `Value` = {value}"
await bot.client.execute(sql)
@staticmethod
async def week(date): # 指定时间星期几 # 查询模块
text = ["星期一", "星期二", "星期三", "星期四", "星期五", "星期六", "星期天"]
ret = date.weekday()
return text[ret]
@staticmethod
async def count(text): # 计算冷却时间 # 查询模块
result = text - int(time.time())
return f'模块冷却中({result})...'
@staticmethod
async def local(data, value): # 本地记录的时间戳
if not data:
return 0
if value == 1080:
result = time.time()
else:
result = data[f'CD.{value}']
return result
@staticmethod
async def lock(server, value): # 更新数据库数据
sql = 'UPDATE `main` SET `Main` = %s WHERE `Value` = %s'
await bot.client.execute(sql, (server, value))
| 26.226415 | 92 | 0.560432 | 169 | 1,390 | 4.609467 | 0.455621 | 0.130937 | 0.154044 | 0.100128 | 0.143774 | 0 | 0 | 0 | 0 | 0 | 0 | 0.020429 | 0.295683 | 1,390 | 52 | 93 | 26.730769 | 0.775281 | 0.116547 | 0 | 0.222222 | 0 | 0.027778 | 0.181518 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.055556 | 0 | 0.222222 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
89d8713aaae9a7376b89ddc2e44e25155e876d3b | 22,470 | py | Python | src/atlinter/pair_interpolation.py | BlueBrain/atlas-interpolation | c19ca081ee5354d72987e8ee4cacb78d96319c34 | [
"Apache-2.0"
] | null | null | null | src/atlinter/pair_interpolation.py | BlueBrain/atlas-interpolation | c19ca081ee5354d72987e8ee4cacb78d96319c34 | [
"Apache-2.0"
] | 5 | 2021-11-03T10:50:53.000Z | 2022-01-17T12:45:34.000Z | src/atlinter/pair_interpolation.py | BlueBrain/atlas-interpolation | c19ca081ee5354d72987e8ee4cacb78d96319c34 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021, Blue Brain Project, EPFL
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Volume interpolation based on pairwise interpolation between slices."""
from __future__ import annotations
import logging
import warnings
from abc import ABC, abstractmethod
from math import ceil, log2
import numpy as np
import torch
from torchvision.transforms import ToTensor
from atlinter.data import GeneDataset
from atlinter.utils import find_closest
logger = logging.getLogger(__name__)
class PairInterpolationModel(ABC):
"""Base class for pair-interpolation models.
Subclasses of this class implement an interpolation between two given
images `img1` and `img2` to produce and intermediate image `img_mid`.
This class and its subclasses are used by the PairInterpolate class,
which applies a given interpolation model to concrete data.
"""
def before_interpolation(self, img1, img2):
"""Run initialization and pre-processing steps before interpolation.
Typical applications of this method are padding and cropping of
input images to fit the model requirements, as well as initialisation
of any internal state, should one be necessary.
Parameters
----------
img1 : np.ndarray
The left image of shape (width, height)
img2 : np.ndarray
The right image of shape (width, height).
Returns
-------
img1 : np.ndarray
The pre-processed left image.
img2 : np.ndarray
The pre-processed right image.
"""
return img1, img2
@abstractmethod
def interpolate(self, img1, img2):
"""Interpolate two images.
In the typical setting the input images are going to be of the format
as returned by the `before_interpolation`.
Parameters
----------
img1 : np.ndarray
The left image.
img2 : np.ndarray
The right image.
Returns
-------
img_mid : np.ndarray
The interpolated image.
"""
def after_interpolation(self, interpolated_images):
"""Run any post-processing after all interpolation is done.
Typical applications are padding and cropping of the image stack,
as well as any clean-up of the model state.
Parameters
----------
interpolated_images : np.ndarray
The stacked interpolated images. The array will include the input
images as the first and the last items respectively and will
therefore have the shape (n_interpolated + 2, height, width)
Returns
-------
np.ndarray
The post-processed interpolated images.
"""
return interpolated_images
class LinearPairInterpolationModel(PairInterpolationModel):
"""Linear pairwise interpolation.
This is the simplest possible interpolation model where the middle
image is the average of the left and right images.
"""
def interpolate(self, img1, img2):
"""Interpolate two images using linear interpolation.
Parameters
----------
img1 : np.ndarray
The left image.
img2 : np.ndarray
The right image.
Returns
-------
img_mid : np.ndarray
The interpolated image.
"""
img_mid = np.mean([img1, img2], axis=0)
return img_mid
class RIFEPairInterpolationModel(PairInterpolationModel):
"""Pairwise image interpolation using the RIFE model.
The typical use is
>>> from atlinter.vendor.rife.RIFE_HD import Model as RifeModel
>>> from atlinter.vendor.rife.RIFE_HD import device as rife_device
>>> rife_model = RifeModel()
>>> rife_model.load_model("/path/to/train_log", -1)
>>> rife_model.eval()
>>> rife_interpolation_model = RIFEPairInterpolationModel(rife_model, rife_device)
Parameters
----------
rife_model : atlinter.vendor.rife.RIFE_HD.Model
The RIFE model instance.
rife_device : from atlinter.vendor.rife.RIFE_HD.device
The RIFE device.
"""
def __init__(self, rife_model, rife_device):
# The behaviour of torch.nn.functional.interpolate has slightly changed,
# which leads to this warning. It doesn't seem to have an impact on the
# results, but if the authors of RIFE decide to update their code base
# by either specifying the `recompute_scale_factor` parameter or by
# some other means, then this warning filter should be removed.
# TODO: check the RIFE code for updates and remove the filter if necessary.
warnings.filterwarnings(
"ignore",
"The default behavior for interpolate/upsample with float scale_factor",
UserWarning,
)
self.rife_model = rife_model
self.rife_device = rife_device
self.shape = (0, 0)
def before_interpolation(self, img1, img2):
"""Pad input images to a multiple of 32 pixels.
Parameters
----------
img1 : np.ndarray
The left image of shape.
img2 : np.ndarray
The right image of shape.
Returns
-------
img1 : np.ndarray
The padded left image.
img2 : np.ndarray
The padded right image.
"""
image_shape = img1.shape
if len(image_shape) == 3 and image_shape[-1] == 3:
rgb = True
image_shape = image_shape[:-1]
else:
rgb = False
self.shape = np.array(image_shape)
pad_x, pad_y = ((self.shape - 1) // 32 + 1) * 32 - self.shape
if rgb:
img1 = np.pad(img1, ((0, pad_x), (0, pad_y), (0, 0)))
img2 = np.pad(img2, ((0, pad_x), (0, pad_y), (0, 0)))
else:
img1 = np.pad(img1, ((0, pad_x), (0, pad_y)))
img2 = np.pad(img2, ((0, pad_x), (0, pad_y)))
return img1, img2
def interpolate(self, img1, img2):
"""Interpolate two images using RIFE.
Note: img1 and img2 needs to have the same shape.
If img1, img2 are grayscale, the dimension should be (height, width).
If img1, img2 are RGB image, the dimension should be (height, width, 3).
Parameters
----------
img1 : np.ndarray
The left image.
img2 : np.ndarray
The right image.
Returns
-------
img_mid : np.ndarray
The interpolated image.
"""
# Add batch and RGB dimensions (if not already), set device
if len(img1.shape) == 2:
rgb = False
img1 = (
torch.tensor(img1, dtype=torch.float32)
.repeat((1, 3, 1, 1))
.to(self.rife_device)
)
img2 = (
torch.tensor(img2, dtype=torch.float32)
.repeat((1, 3, 1, 1))
.to(self.rife_device)
)
else:
rgb = True
img1 = np.transpose(img1, (2, 0, 1))[np.newaxis]
img2 = np.transpose(img2, (2, 0, 1))[np.newaxis]
img1 = torch.tensor(img1, dtype=torch.float32).to(self.rife_device)
img2 = torch.tensor(img2, dtype=torch.float32).to(self.rife_device)
# The actual interpolation
img_mid = self.rife_model.inference(img1, img2).detach().cpu().numpy()
img_mid = img_mid.squeeze()
if rgb:
# Put the RGB channel at the end
img_mid = np.transpose(img_mid, (1, 2, 0))
else:
# Average out the RGB dimension
img_mid = img_mid.mean(axis=0)
return img_mid
def after_interpolation(self, interpolated_images):
"""Undo the padding added in `before_interpolation`.
Parameters
----------
interpolated_images : np.ndarray
The stacked interpolated images.
If input images are grayscale,
the dimension should be (n_img, height, width) or (height, width).
If input images are RGB image,
the dimension should be (n_img, height, width, 3) or (height, width, 3).
Returns
-------
np.ndarray
The stacked interpolated images with padding removed.
"""
# No n_img dimension: (height, width) or (height, width, 3)
if len(interpolated_images.shape) == 2 or (
len(interpolated_images.shape) == 3 and interpolated_images.shape[-1] == 3
):
return interpolated_images[: self.shape[0], : self.shape[1]]
# n_img dimension: (n_img, height, width) or (n_img, height, width, 3)
else:
return interpolated_images[:, : self.shape[0], : self.shape[1]]
class CAINPairInterpolationModel(PairInterpolationModel):
"""Pairwise image interpolation using the CAIN model.
The typical use is
>>> from atlinter.vendor.cain.cain import CAIN
>>> device = "cuda" if torch.cuda.is_available() else "cpu"
>>> cain_model = CAIN().to(device)
>>> cain_checkpoint = torch.load("pretrained_cain.pth", map_location=device)
>>> cain_model.load_state_dict(cain_checkpoint)
>>> cain_interpolation_model = CAINPairInterpolationModel(cain_model)
Parameters
----------
cain_model : atlinter.vendor.cain.cain.CAIN or torch.nn.DataParallel
The CAIN model instance.
"""
def __init__(self, cain_model):
self.cain_model = cain_model
self.to_tensor = ToTensor()
def interpolate(self, img1, img2):
"""Interpolate two images using CAIN.
Note: img1 and img2 needs to have the same shape.
If img1, img2 are grayscale, the dimension should be (height, width).
If img1, img2 are RGB image, the dimension should be (height, width, 3).
Parameters
----------
img1 : np.ndarray
The left image.
img2 : np.ndarray
The right image.
Returns
-------
img_mid : np.ndarray
The interpolated image.
"""
# Add batch and RGB dimensions
if len(img1.shape) == 2:
rgb = False
img1 = self.to_tensor(img1).repeat((1, 3, 1, 1))
img2 = self.to_tensor(img2).repeat((1, 3, 1, 1))
else:
rgb = True
img1 = self.to_tensor(np.transpose(img1, (2, 0, 1)))[None]
img2 = self.to_tensor(np.transpose(img2, (2, 0, 1)))[None]
# The actual interpolation
img_mid, _ = self.cain_model(img1, img2)
img_mid = img_mid.detach().cpu().numpy()
img_mid = img_mid.squeeze()
if rgb:
# Put the RGB channel at the end
img_mid = np.transpose(img_mid, (1, 2, 0))
else:
# Average out the RGB dimension
img_mid = img_mid.mean(axis=0)
return img_mid
class AntsPairInterpolationModel(PairInterpolationModel):
"""Pairwise image interpolation using AntsPy registration.
Typical use is
>>> from atlannot.ants import register, transform
>>> ants_interpolation_model = AntsPairInterpolationModel(register, transform)
Parameters
----------
register_fn : atlannot.ants.register
The AntsPy registration function
transform_fn : atlannot.ants.transform
The AntsPy transformation function
"""
def __init__(self, register_fn, transform_fn):
self.register_fn = register_fn
self.transform_fn = transform_fn
def interpolate(self, img1, img2):
"""Interpolate two images using AntsPy registration.
Parameters
----------
img1 : np.ndarray
The left image.
img2 : np.ndarray
The right image.
Returns
-------
img_mid : np.ndarray
The interpolated image.
"""
# Ensure the correct d-type
img1 = img1.astype(np.float32)
img2 = img2.astype(np.float32)
# The actual interpolation
nii_data = self.register_fn(fixed=img2, moving=img1)
img_mid = self.transform_fn(img1, nii_data / 2)
return img_mid
class PairInterpolate:
"""Runner for pairwise interpolation using different models.
Parameters
----------
n_repeat : int (optional)
The number of times the interpolation should be iterated. For each
iteration an interpolated image is inserted between each pair of
images from the previous iteration. Therefore n_{i+1} = n_i + (n_i + 1).
For example, for n_repeat=3 the progression of the number of images
will be the following: input = 0 -> 1 -> 3 -> 7
"""
def __init__(self, n_repeat=1):
self.n_repeat = n_repeat
def repeat(self, n_repeat):
"""Set the number of interpolation iterations.
Parameters
----------
n_repeat : int
The new number of interpolation iterations. See `__init__` for more
details.
"""
self.n_repeat = n_repeat
return self
def __call__(self, img1, img2, model: PairInterpolationModel):
"""Run the interpolation with the given interpolation model.
Parameters
----------
img1 : np.ndarray
The left input image.
img2 : np.ndarray
The right input image.
model : PairInterpolationModel
The interpolation model.
Returns
-------
interpolated_images : np.ndarray
A stack of interpolation images. The input images are not included
in this stack.
"""
img1, img2 = model.before_interpolation(img1, img2)
interpolated_images = self._repeated_interpolation(
img1, img2, model, self.n_repeat
)
interpolated_images = np.stack(interpolated_images)
interpolated_images = model.after_interpolation(interpolated_images)
return interpolated_images
def _repeated_interpolation(self, img1, img2, model, n_repeat):
# End of recursion
if n_repeat <= 0:
return []
# Recursion step
img_mid = model.interpolate(img1, img2)
left_images = self._repeated_interpolation(img1, img_mid, model, n_repeat - 1)
right_images = self._repeated_interpolation(img_mid, img2, model, n_repeat - 1)
return [*left_images, img_mid, *right_images]
class GeneInterpolate:
"""Interpolation of a gene dataset.
Parameters
----------
gene_data : GeneData
Gene Dataset to interpolate. It contains a `volume` of reference shape
with all known places located at the right place and a `metadata` dictionary
containing information about the axis of the dataset and the section numbers.
model : PairInterpolationModel
Pair-interpolation model.
"""
def __init__(
self,
gene_data: GeneDataset,
model: PairInterpolationModel,
):
self.gene_data = gene_data
self.model = model
self.axis = self.gene_data.axis
self.gene_volume = self.gene_data.volume.copy()
# If sagittal axis, put the sagittal dimension first
if self.axis == "sagittal":
self.gene_volume = np.moveaxis(self.gene_volume, 2, 0)
def get_interpolation(
self, left: int, right: int
) -> tuple[np.ndarray | None, np.ndarray | None]:
"""Compute the interpolation for a pair of images.
Parameters
----------
left
Slice number of the left image to consider.
right
Slice number of the right image to consider.
Returns
-------
interpolated_images : np.array or None
Interpolated image for the given pair of images.
Array of shape (N, dim1, dim2, 3) with N the number of
interpolated images.
predicted_section_numbers : np.array or None
Slice value of the predicted images.
Array of shape (N, 1) with N the number of interpolated images.
"""
diff = right - left
if diff == 0:
return None, None
n_repeat = self.get_n_repeat(diff)
pair_interpolate = PairInterpolate(n_repeat=n_repeat)
interpolated_images = pair_interpolate(
self.gene_volume[left], self.gene_volume[right], self.model
)
predicted_section_numbers = self.get_predicted_section_numbers(
left, right, n_repeat
)
return interpolated_images, predicted_section_numbers
def get_all_interpolation(self) -> tuple[np.ndarray, np.ndarray]:
"""Compute pair interpolation for the entire volume.
Returns
-------
all_interpolated_images : np.array
Interpolated image for the entire volume.
Array of shape (N, dim1, dim2, 3) with N the number of
interpolated images.
all_predicted_section_numbers : np.array
Slice value of the predicted images.
Array of shape (N, 1) with N the number of interpolated images.
"""
# TODO: Try to change the implementation of the prediction so that
# we do not predict slices that are not needed.
logger.info("Start predicting interpolation between two known slices")
known_slices = sorted(self.gene_data.known_slices)
all_interpolated_images = []
all_predicted_section_numbers = []
for i in range(len(known_slices) - 1):
left, right = known_slices[i], known_slices[i + 1]
(
interpolated_images,
predicted_section_numbers,
) = self.get_interpolation(left, right)
if interpolated_images is None:
continue
all_interpolated_images.append(interpolated_images)
all_predicted_section_numbers.append(predicted_section_numbers)
if i % 5 == 0:
logger.info(f"{i} / {len(known_slices) - 1} interpolations predicted")
all_interpolated_images = np.concatenate(all_interpolated_images)
all_predicted_section_numbers = np.concatenate(all_predicted_section_numbers)
return all_interpolated_images, all_predicted_section_numbers
def predict_slice(self, slice_number: int) -> np.ndarray:
"""Predict one gene slice.
Parameters
----------
slice_number
Slice section to predict.
Returns
-------
np.ndarray
Predicted gene slice. Array of shape (dim1, dim2, 3)
being (528, 320) for sagittal dataset and
(320, 456) for coronal dataset.
"""
left, right = self.gene_data.get_surrounding_slices(slice_number)
if left is None:
return self.gene_volume[right]
elif right is None:
return self.gene_volume[left]
else:
interpolated_images, predicted_section_numbers = self.get_interpolation(
left, right
)
index = find_closest(slice_number, predicted_section_numbers)[0]
return interpolated_images[index]
def predict_volume(self) -> np.ndarray:
"""Predict entire volume with known gene slices.
This function might be slow.
"""
volume_shape = self.gene_data.volume_shape
volume = np.zeros(volume_shape, dtype="float32")
logger.info(f"Start predicting the volume of shape {volume_shape}")
if self.gene_data.axis == "sagittal":
volume = np.moveaxis(volume, 2, 0)
# Get all the predictions
(
all_interpolated_images,
all_predicted_section_numbers,
) = self.get_all_interpolation()
min_slice_number = min(self.gene_data.known_slices)
max_slice_number = max(self.gene_data.known_slices)
end = volume_shape[0] if self.gene_data.axis == "coronal" else volume_shape[2]
# Populate the volume
logger.info("Populate volume with interpolation predictions")
for slice_number in range(end):
# If the slice is known, just copy the gene.
if slice_number in self.gene_data.known_slices:
volume[slice_number] = self.gene_volume[slice_number]
# If the slice section is smaller than all known slice
# We copy-paste the smallest known slice.
elif slice_number < min_slice_number:
volume[slice_number] = self.gene_volume[min_slice_number]
# If the slice section is bigger than all known slice
# We copy-paste the biggest known slice.
elif slice_number > max_slice_number:
volume[slice_number] = self.gene_volume[max_slice_number]
# If the slice is surrounded by two known slice.
# Determine the prediction closest to the slice section.
else:
index = find_closest(slice_number, all_predicted_section_numbers)[0]
volume[slice_number] = all_interpolated_images[index]
if slice_number % 5 == 0:
logger.info(f"{slice_number} / {end} populated slices")
if self.gene_data.axis == "sagittal":
volume = np.moveaxis(volume, 0, 2)
return volume
@staticmethod
def get_n_repeat(diff: int) -> int:
"""Determine the number of repetitions to compute."""
if diff <= 0:
return 0
n_repeat = ceil(log2(diff))
return n_repeat
@staticmethod
def get_predicted_section_numbers(
left: int, right: int, n_repeat: int
) -> np.ndarray:
"""Get slice values of predicted images."""
n_steps = 2**n_repeat + 1
predicted_section_numbers = np.linspace(left, right, n_steps)
return predicted_section_numbers[1:-1]
| 34.097117 | 87 | 0.611037 | 2,688 | 22,470 | 4.965774 | 0.157366 | 0.055289 | 0.026071 | 0.011987 | 0.376086 | 0.303042 | 0.247678 | 0.217261 | 0.182799 | 0.144891 | 0 | 0.017471 | 0.302047 | 22,470 | 658 | 88 | 34.148936 | 0.833642 | 0.441923 | 0 | 0.235043 | 0 | 0 | 0.033508 | 0 | 0 | 0 | 0 | 0.00304 | 0 | 1 | 0.098291 | false | 0 | 0.042735 | 0 | 0.269231 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
89d9c795b6169e6fd71d4df0205bf5d7c8f7fc9e | 3,270 | py | Python | tests/test_dyndnsupdate.py | Turgon37/DynDNSUpdate | 3282dba416403a2df0dde4e98d862f0d6bc0897f | [
"MIT"
] | null | null | null | tests/test_dyndnsupdate.py | Turgon37/DynDNSUpdate | 3282dba416403a2df0dde4e98d862f0d6bc0897f | [
"MIT"
] | null | null | null | tests/test_dyndnsupdate.py | Turgon37/DynDNSUpdate | 3282dba416403a2df0dde4e98d862f0d6bc0897f | [
"MIT"
] | 1 | 2015-03-04T01:08:08.000Z | 2015-03-04T01:08:08.000Z | # -*- coding: utf8 -*-
import http.client
import logging
import shlex
import shutil
import socket
import ssl
import subprocess
from unittest.mock import patch, Mock, call
from .mocks.connexionmock import createHTTPConnectionMock, createHTTPSConnectionMock
import dyndnsupdate
# URL settings
def test_without_setting():
"""Must produce an error is no url was given"""
program = dyndnsupdate.DynDNSUpdate()
assert program.main() == 3
@patch('http.client.HTTPConnection', createHTTPConnectionMock())
@patch('http.client.HTTPSConnection', createHTTPSConnectionMock())
def test_with_valid_settings():
"""Must produce an error is bad urls were given"""
program = dyndnsupdate.DynDNSUpdate()
assert program.configure(dyndns_myip='1.1.1.1', server_url='www.api.com:81/',
verbose=-1,
dyndns_hostname=['mydyndnshostname.com']) == True
assert program.main() == 0
program = dyndnsupdate.DynDNSUpdate()
assert program.configure(dyndns_myip='1.1.1.1',
verbose=1,
server_url='https://www.api.com/',
dyndns_hostname=['mydyndnshostname.com']) == True
assert program.main() == 0
def test_with_invalid_url():
"""Must produce an error is bad urls were given"""
program = dyndnsupdate.DynDNSUpdate()
assert program.configure(server_url='ftp://lmdaz') == False
assert program.main() == 3
def test_with_missing_settings():
"""Must produce an error is there is any missing setting"""
program = dyndnsupdate.DynDNSUpdate()
assert program.configure(server_url='http://lmdaz') == True
assert program.main() == 3
@patch('http.client.HTTPConnection', createHTTPConnectionMock())
def test_with_http_auth():
"""Correct usage of HTTP basic auth"""
program = dyndnsupdate.DynDNSUpdate()
assert program.configure(dyndns_myip='1.1.1.1',
server_url='http://www.api.com/',
dyndns_hostname=['mydyndnshostname.com'],
server_username='user', server_password='pass') == True
assert program.main() == 0
c1 = call().request('GET',
'http://www.api.com/nic/update?backmx=NOCHG&hostname=mydyndnshostname.com&mx=&myip=1.1.1.1&offline=NOCHG&system=dyndns&url=&wildcard=NOCHG', headers={'User-Agent': 'dyndns-update/'+dyndnsupdate.__version__, 'Authorization': 'Basic dXNlcjpwYXNz'})
http.client.HTTPConnection.assert_has_calls([c1])
@patch('http.client.HTTPSConnection', createHTTPSConnectionMock('0.0.0.0'))
@patch('ssl._create_unverified_context', return_value=Mock(spec=ssl.SSLContext))
def test_insecure_https_address_from_url(ssl_context_mock, capsys):
"""Use insecure SSL transaction"""
# https
program = dyndnsupdate.DynDNSUpdate()
assert program.configure(dyndns_myip='1.1.1.1',
server_url='https://www.api.com/',
tls_insecure=True,
dyndns_hostname=['mydyndnshostname.com'],
server_username='user', server_password='pass') == True
assert program.main() == 0
ssl._create_unverified_context.assert_called_once_with()
| 39.878049 | 250 | 0.65841 | 369 | 3,270 | 5.682927 | 0.298103 | 0.014306 | 0.014306 | 0.12351 | 0.563662 | 0.497854 | 0.463519 | 0.441106 | 0.407248 | 0.287077 | 0 | 0.014792 | 0.214373 | 3,270 | 81 | 251 | 40.37037 | 0.801479 | 0.088073 | 0 | 0.473684 | 0 | 0.017544 | 0.189556 | 0.046117 | 0 | 0 | 0 | 0 | 0.263158 | 1 | 0.105263 | false | 0.035088 | 0.175439 | 0 | 0.280702 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |