seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
12522375989 | # Pattern Printing
# Input=Integer not
# Boolean=True or False
#
# True
# *
# **
# ***
# ****
# False
# ****
# ***
# **
# *
#FROM COMMENT TRY NEEL AFTER REVISON
print("How Many Row You Want To Print")
one= int(input())
print("Type 1 Or 0")
two = int(input())
new =bool(two)
if new == True:
for i in range(1,one+1):
for j in range(1,i+1):
print("*",end=" ")
print()
elif new ==False:
for i in range(one,0,-1):
for j in range(1,i+1):
print("*", end="")
print() | neelshet007/PythonTuts | exercise4.py | exercise4.py | py | 525 | python | en | code | 0 | github-code | 36 |
31070595638 | import reddit
import socketserver
import socketservervideogenerator
from time import sleep
import database
import datetime
from threading import Thread
def getScripts():
global lastUpdate
print("Grabbing more scripts...")
info = reddit.getInfo('AskReddit', 45)
new_scripts = len([script for script in info if not script.update])
updating_scripts = len([script for script in info if script.update])
print("Adding %s new scripts, updating %s" % (new_scripts, updating_scripts))
for script in info:
if script.update:
database.updateSubmission(script)
else:
database.addSubmission(script)
lastUpdate = datetime.datetime.now()
lastUpdate = None
def updateScripts():
while True:
sleep(10)
if lastUpdate is None:
getScripts()
now = datetime.datetime.now()
if not lastUpdate.hour == now.hour:
print("Getting more scripts - last update at %s" % lastUpdate)
getScripts()
def init():
socketserver.startServer()
socketservervideogenerator.startVideoGeneratorServer()
thread = Thread(target=updateScripts)
thread.start()
#youtubequeue.initQueue()
#socketclient.connectToServer()
#print(checkValueExists("scriptid", "t5_2qh1i"))
#updateScriptStatus("EDITING", "t5_2qh1i")
#print(getVideoCountFromStatus("RAW"))
#print(getRowCount("scripts"))x
if __name__ == "__main__":
init() | drewwebster/Automatic-Youtube-Reddit-Text-To-Speech-Video-Generator-and-Uploader | YouTube Bot Server/initserver.py | initserver.py | py | 1,512 | python | en | code | 2 | github-code | 36 |
42997186771 | import base64
import hashlib
import hmac
import time
import requests
import jwt
from flask import request
class GitHubApp(object):
def __init__(self, app=None):
self.app = app
if app is not None:
self.init_app(app)
def init_app(self, app):
self.webhook_secret = None
self.app_id = None
self.app_private_key_pem = None
self._bearer_token = None
self._bearer_token_exp = -1
if app.config["GITHUB_WEBHOOK_SECRET"]:
self.webhook_secret = app.config["GITHUB_WEBHOOK_SECRET"]
if app.config["GITHUB_APP_ID"]:
self.app_id = app.config["GITHUB_APP_ID"]
if app.config["GITHUB_APP_PRIVATE_KEY"]:
try:
self.app_private_key_pem = base64.b64decode(
app.config["GITHUB_APP_PRIVATE_KEY"]
).decode()
except Exception as exc:
raise ValueError(f"Unable to decode GITHUB_APP_PRIVATE_KEY: {exc}")
app.teardown_appcontext(self.teardown)
def validate_webhook(self):
if self.webhook_secret is None:
return True
return hmac.compare_digest(
request.headers.get("X-Hub-Signature-256").split("=")[1],
hmac.new(
self.webhook_secret.encode(), msg=request.data, digestmod=hashlib.sha256
).hexdigest(),
)
def _token_needs_renewed(self):
return (self._bearer_token_exp - int(time.time())) < 60
@property
def bearer_token(self):
if self._bearer_token is None or self._token_needs_renewed():
issued = int(time.time())
payload = {
"iat": issued,
"exp": issued + 599,
"iss": self.app_id,
}
self._bearer_token = jwt.encode(
payload, self.app_private_key_pem, algorithm="RS256"
)
self._bearer_token_exp = issued + 599
return self._bearer_token
def fetch_installation_access_token(self, installation_id):
access_token_response = requests.post(
f"https://api.github.com/app/installations/{installation_id}/access_tokens",
headers={
"Accept": "application/vnd.github.machine-man-preview+json",
"Authorization": f"Bearer {self.bearer_token}",
},
)
if "token" not in access_token_response.json():
print(f"Unable to authenticate for {installation_id}")
return None
return access_token_response.json()["token"]
def teardown(self, exception):
pass
| cabotage/cabotage-app | cabotage/server/ext/github_app.py | github_app.py | py | 2,642 | python | en | code | 21 | github-code | 36 |
9689017723 | import pep8
# Mixin for testing the pep8 style rules
class Pep8ModelTests(object):
def test_pep8_conformance_views(self):
pep8style = pep8.StyleGuide(quiet=True)
result = pep8style.check_files([self.path])
error_message = ""
if result.total_errors != 0:
error_message = "Style errors in: " + self.path + "\n" + "\n".join(result.get_statistics())
self.assertEqual(result.total_errors, 0, error_message) | Contrast-Security-OSS/DjanGoat | app/tests/mixins/pep8_model_test_mixin.py | pep8_model_test_mixin.py | py | 462 | python | en | code | 69 | github-code | 36 |
11111121290 | from django.test import TestCase
from .models import Producto, Categoria, CategoriaDetalle
from django.contrib.auth import get_user_model
class Categories(TestCase):
def setUp(self):
# Crear categorías para la prueba
self.cat1 = Categoria.objects.create(nombre='Kit')
self.cat2 = Categoria.objects.create(nombre='Ingrediente')
# Crear un usuario (vendedor)
User = get_user_model()
try:
self.user = User.objects.get(username='vendedor')
except:
self.user = User.objects.create_user(username='vendedor', password='pass')
def test_creacion_producto_y_filtrado_categoria(self):
# Crear producto
producto = Producto.objects.create(
nombre='Tacos',
descripcion='Kit tacos',
precio=1000.00,
stock=10,
idVendedor=self.user,
tipo='Kit'
)
# Asociar producto a categorías
CategoriaDetalle.objects.create(idCategoria=self.cat1, idProducto=producto)
CategoriaDetalle.objects.create(idCategoria=self.cat2, idProducto=producto)
# Filtrar productos por categoría
productos_en_kit = Producto.objects.filter(
categoriadetalle__idCategoria=self.cat1
)
# Verificar que el producto está en la categoría correcta
self.assertIn(producto, productos_en_kit)
def test_modificacion_producto_y_filtrado_categoria(self):
# Crear categorías para la prueba
self.cat1 = Categoria.objects.create(nombre='Kit')
self.cat2 = Categoria.objects.create(nombre='Ingrediente')
# Crear un usuario (vendedor)
User = get_user_model()
try:
self.user = User.objects.get(username='vendedor')
except:
self.user = User.objects.create_user(username='vendedor', password='pass')
def test_creacion_producto_y_filtrado_categoria(self):
# Crear producto
producto = Producto.objects.create(
nombre='Tamal',
descripcion='Kit Tamal',
precio=100.00,
stock=10,
idVendedor=self.user,
tipo='Kit'
)
# Crear producto y asociarlo a categorías (como en el test anterior)
# Asociar producto a categorías
CategoriaDetalle.objects.create(idCategoria=self.cat1, idProducto=producto)
CategoriaDetalle.objects.create(idCategoria=self.cat2, idProducto=producto)
# Modificar producto quitando una categoría
CategoriaDetalle.objects.filter(idCategoria=self.cat2, idProducto=producto).delete()
# Filtrar productos por la categoría eliminada
alimentos = Producto.objects.filter(
categoriadetalle__idCategoria=self.cat2
)
# Verificar que el producto ya no está en la categoría eliminada
self.assertNotIn(producto, alimentos) | is-Campos/ethnicBites | categorias/tests.py | tests.py | py | 2,937 | python | es | code | 0 | github-code | 36 |
6284906050 | import sys
def main(args):
lines = []
with open("input6-1") as f:
lines = f.readlines()
coords = []
for line in lines:
line = line.strip()
toks = line.split(',')
coords.append((int(toks[0]), int(toks[1])))
print(coords)
max_x = -1
max_y = -1
for c in coords:
if c[0] > max_x:
max_x = c[0]
if c[1] > max_y:
max_y = c[1]
print("max x and y", max_x, max_y)
max_size = max_x * max_y
print("max size", max_size)
new_max_x = new_max_y = max(max_x, max_y) + 1
# new_max_y = max_y # * 10
# Offset coords
#ncoords = []
#for c in coords:
# nc = (c[0] + max_x, c[1] + max_y)
# ncoords.append(nc)
ncoords = coords.copy()
matrix = {}
for x in range(0, new_max_x):
for y in range(0, new_max_y):
check = (x,y)
for c in ncoords:
dist = get_mdist(check, c)
if check not in matrix:
matrix[check] = (c, dist)
else:
orig_dist = matrix[check][1]
if dist < orig_dist:
matrix[check] = (c, dist)
elif dist == orig_dist:
matrix[check] = (-1, orig_dist)
print_matrix(new_max_x, new_max_y, matrix)
areas = {}
for x in range(0, new_max_x):
for y in range(0, new_max_y):
pt = matrix[(x,y)]
if pt[0] in areas:
areas[pt[0]].append((x,y))
else:
areas[pt[0]] = [(x,y)]
max_area = -1
for k in areas:
l = len(areas[k])
print(k,l, on_edge(new_max_x, new_max_y, areas[k]))
if l > max_area and l <= max_size and k != -1 and not on_edge(new_max_x, new_max_y, areas[k]):
max_area = l
print(max_area)
def on_edge(mx, my, pts):
for p in pts:
if p[0] <= 0 or p[0] >= (mx-1) or p[1] <= 0 or p[1] >= (my-1):
return True
return False
def print_matrix(mx, my, mt):
for i in range(0, my):
for j in range(0, mx):
c = mt[(j,i)]
sys.stdout.write(str(c[0]))
sys.stdout.write('\t')
sys.stdout.flush()
sys.stdout.write("\n")
def get_mdist(a, b):
return abs(a[0] - b[0]) + abs(a[1] - b[1])
if __name__ == "__main__":
main(sys.argv) | alienfluid/aoc2018 | day6-1.py | day6-1.py | py | 2,418 | python | en | code | 0 | github-code | 36 |
14276105253 | #Script used to take a word or phrase and count the inputted letter inside the word or phrase
def count_letters():
word=input('enter word: ')
x=input('count which letter ?: ')
count=0
index=0
for letter in word :
if x == word[index] :
count=count+1
index = index + 1
print('word has', count , x)
count_letters()
| OmegaJP/git_practice | python_scripts/Chapter06/Ex_03_C6.py | Ex_03_C6.py | py | 365 | python | en | code | 0 | github-code | 36 |
74666999465 | import os
from math import cos, sin, sqrt, pi
import numpy as np
import pyvista as pv
from planegeometry.geometry import Triangle
def Gamma(t, A, nlobes):
alpha = np.pi/2 - (np.pi/2-A)*np.cos(nlobes*t)
beta = t + A*np.sin(2*nlobes*t)
return np.array([
np.sin(alpha) * np.cos(beta),
np.sin(alpha) * np.sin(beta),
np.cos(alpha)
])
def HopfInverse(p, phi):
return np.array([
(1+p[2])*np.cos(phi),
p[0]*np.sin(phi) - p[1]*np.cos(phi),
p[0]*np.cos(phi) + p[1]*np.sin(phi),
(1+p[2])*np.sin(phi)
]) / np.sqrt(2*(1+p[2]))
def Stereo(q):
return 2*q[0:3] / (1-q[3])
def F(t, phi, A, nlobes):
return Stereo(HopfInverse(Gamma(t, A, nlobes), phi))
def HopfTorusMesh(nu=400, nv=200, A=0.44, nlobes=3):
angle_u = np.linspace(-np.pi, np.pi, nu)
angle_v = np.linspace(0, np.pi, nv)
u, v = np.meshgrid(angle_u, angle_v)
z, x, y = F(u, v, A, nlobes)
grid = pv.StructuredGrid(x, y, z)
mesh = grid.extract_geometry().clean(tolerance=1e-6)
return mesh
# -----------------------------------------------------------------------------
def Hexlet(Center, Radius, HTmesh, nframes, gifname, convert="magick convert", delay=8, bgcolor="#363940", tori_color="orangered"):
s = Radius # side length of the hexagon
Coef = 2/3
a = Coef*(Radius+s/2)/sin(pi/2-2*pi/6)
I = np.array([a, 0]) # inversion pole
## ------------------------------------------------------------------ ////
O1 = np.array([2*a, 0, 0])
# interior sphere
def inversion(M, RRadius):
II = np.array([Coef*(RRadius+RRadius/2)/sin(pi/2-2*pi/6), 0])
S = Coef*(RRadius+RRadius/2) * np.array([cos(2*pi/6), sin(2*pi/6)])
k = np.vdot(S-II, S-II) # negated inversion constant
M = np.asarray(M, dtype=float)
IM = M-II
return II - k/np.vdot(IM,IM)*IM
SmallRadius = Coef*(Radius-s/2)
p1 = inversion((SmallRadius,0), Radius)
p2 = inversion((0,SmallRadius), Radius)
p3 = inversion((-SmallRadius,0), Radius)
tr = Triangle(p1, p2, p3)
cs = tr.circumcircle()
shift = pi/90
frames = np.linspace(1, 180, nframes)
anim = False
if not gifname is None:
gif_sansext, file_extension = os.path.splitext(os.path.basename(gifname))
anim = True
screenshotfmt = gif_sansext + "_%03d.png"
screenshotglob = gif_sansext + "_*.png"
for frame_number in frames:
pltr = pv.Plotter(window_size=[512,512], off_screen=anim)
pltr.set_background(bgcolor)
i = 1
while i<= 6:
beta = i*pi/3 - frame_number*shift; # frame from 1 to 180
ccenter = Coef*Radius*np.array([cos(beta),sin(beta)])
p1 = inversion((0,Coef*Radius/2)+ccenter,Radius)
p2 = inversion((Coef*Radius/2,0)+ccenter,Radius)
p3 = inversion((0,-Coef*Radius/2)+ccenter,Radius)
tr = Triangle(p1, p2, p3)
cs = tr.circumcircle()
center = np.array([cs.center[0], cs.center[1], 0])
r = cs.radius
mesh = HTmesh.copy()
mesh.scale(r*0.05)
mesh.rotate_z(2*frame_number)
mesh.translate(center-O1)
pltr.add_mesh(mesh, color=tori_color, specular=20, smooth_shading=True)
i += 1
pltr.set_focus([0,0,0])
pltr.set_position([2, 0, 8])
if anim:
pltr.show(screenshot=screenshotfmt % frame_number)
else:
pltr.show()
if anim:
os.system(
convert + (" -dispose previous -loop 0 -delay %d " % delay) + screenshotglob + " " + gifname
)
Hexlet((0,0,0), 2, HopfTorusMesh(), nframes=1, gifname=None)#"HopfToriSteinerOrbit.gif")
| stla/PyVistaMiscellanous | HopfToriSteinerOrbit.py | HopfToriSteinerOrbit.py | py | 3,714 | python | en | code | 4 | github-code | 36 |
23622792802 | import os.path
import numpy
import pickle
import streamlit as st
import statistics
from matplotlib import pyplot as plt
import seaborn as sb
from PIL import Image
from tools.analyse_tool import CompleteAnalysis
from objects.training_set import TrainingSet
from objects.patient import Patient
def warning():
color1='#E75919'
color2='#EE895C'
color3='#FFFFFF'
text ='Before starting the analysis, we strongly recommend to load the desired dataset in advance. You can do this in the "Data Loader" tab.'
st.markdown(
f'<p style="text-align:center;background-image: linear-gradient(to right,{color1}, {color2});color:{color3};font-size:24px;border-radius:2%;">{text}</p>',
unsafe_allow_html=True)
def create_description():
info_p1 = "This Sepsis Research Analysis focuses on displaying the relation of selected features and " \
"the occurrence of sepsis. A histogram is used to visualize the collected data."
st.markdown(info_p1)
SEPSIS_TOOL_CHOICE = {
"both": "positive + negative",
"sepsis": "positive",
"no_sepsis": "negative",
}
def plot_sepsis_analysis(analysis_obj, col2, selected_label, selected_tool):
min_val = analysis_obj.min_for_label[selected_label][1]
max_val = analysis_obj.max_for_label[selected_label][1]
plot_data = analysis_obj.plot_label_to_sepsis[selected_label]
# getting the min max average to scale the plot proportional
bins = numpy.linspace(float(min_val), float(max_val),
100 if selected_label != 'Gender' else 2) # removed [1] from min_val
# Actually Plotting the Histogram
fig, ax1 = plt.subplots()
fig.title = "Histogram" # doesnt work
if selected_tool == SEPSIS_TOOL_CHOICE["both"]:
ax1.hist([plot_data[0], plot_data[1]], density=True, color=['r', 'g'], bins=bins, alpha=0.6,
label=["Sepsis", "No Sepsis"])
elif selected_tool == SEPSIS_TOOL_CHOICE["sepsis"]:
ax1.hist(plot_data[0], bins=bins, alpha=0.6, color="r", label="Sepsis")
elif selected_tool == SEPSIS_TOOL_CHOICE["no_sepsis"]:
ax1.hist(plot_data[1], bins=bins, alpha=0.6, color="g", label="No Sepsis")
ax1.legend()
col2.pyplot(fig)
headline = "Further Statistics for the label " + selected_label + ": "
st.subheader(headline)
# Displaying further Statistics
sepsis_mean = round(statistics.mean(plot_data[0]), 5)
sepsis_median = round(statistics.median(plot_data[0]), 5)
sepsis_var = round(statistics.variance(plot_data[0]), 5)
no_sepsis_mean = round(statistics.mean(plot_data[1]), 5)
no_sepsis_median = round(statistics.median(plot_data[1]), 5)
no_sepsis_var = round(statistics.variance(plot_data[1]), 5)
diff_mean = round(sepsis_mean - no_sepsis_mean, 5)
diff_median = round(sepsis_median - no_sepsis_median, 5)
diff_var = round(sepsis_var - no_sepsis_var, 5)
col0, col1, col2, col3 = st.columns(4)
col0.markdown("**Sepsis**")
col1.metric("Average", sepsis_mean, diff_mean)
col2.metric("Median", sepsis_median, diff_median)
col3.metric("Variance", sepsis_var, diff_var)
col0, col1, col2, col3 = st.columns(4)
col0.markdown("**No Sepsis**")
col1.metric("", no_sepsis_mean)
col2.metric("", no_sepsis_median)
col3.metric("", no_sepsis_var)
class SepsisResearch:
CACHE_CORRELATION_POSTFIX = "frontend-correlation"
def __init__(self):
st.markdown("<h2 style='text-align: left; color: black;'>Histogram for Sepsis Research</h2>",
unsafe_allow_html=True)
create_description()
col1, col2 = st.columns((1, 2))
selected_label, selected_set, selected_tool = self.create_selectors(col1)
analysis_obj, file_name = CompleteAnalysis.get_analysis(selected_label=selected_label,
selected_tool=selected_tool,
selected_set=selected_set)
plot_sepsis_analysis(analysis_obj, col2, selected_label, selected_tool)
st.markdown(
"<h2 style='text-align: left; color: black;'>Correlation of relevant Features</h2>",
unsafe_allow_html=True)
col1, col2 = st.columns((1, 2))
selected_label, selected_set, use_fix_missing_values, use_interpolation\
= self.__create_correlation_selectors(col1)
self.__plot_correlations(set=TrainingSet.get_training_set(selected_set), label=selected_label,
fix_missing_values=use_fix_missing_values, use_interpolation=use_interpolation,
col=col2)
warning()
def create_selectors(self, col1):
selected_label = col1.selectbox('Choose a label:', Patient.LABELS)
selected_set = col1.selectbox('Choose a Set:', TrainingSet.PRESETS.keys())
selected_sepsis = col1.selectbox('Choose if sepsis positive or negative:',
tuple(SEPSIS_TOOL_CHOICE.values()))
selected_tool = selected_sepsis
return selected_label, selected_set, selected_tool
def __create_correlation_selectors(self, col):
sepsislabel_indices = [i for i in range(len(Patient.LABELS)) if Patient.LABELS[i] == "SepsisLabel"]
sepsislabel_index = sepsislabel_indices[0] if len(sepsislabel_indices) > 0 else None
sample_a_indices = [i for i in range(len(TrainingSet.PRESETS.keys())) if list(TrainingSet.PRESETS.keys())[i] == "rnd Sample A"]
sample_a_index = sample_a_indices[0] if len(sepsislabel_indices) > 0 else None
selected_label = col.selectbox("Choose a Label", Patient.LABELS, index=sepsislabel_index, key="corrLabel") # might be useful to only offer labels that are actually in the selected_set
selected_set = col.selectbox("Choose a Set:", TrainingSet.PRESETS.keys(), key="corrSet", index=sample_a_index)
use_fix_missing_values = col.checkbox("Use \"fix missing\"", key="corrFix", value=True)
use_interpolation = col.checkbox("Use interpolation", key="corrInterpolate", value=True)
return selected_label, selected_set, use_fix_missing_values, use_interpolation
def __plot_correlations(self, set: TrainingSet, col, label: str, fix_missing_values: bool, use_interpolation: bool):
avg_df = set.get_average_df(fix_missing_values=fix_missing_values,
use_interpolation=use_interpolation)
file_path = set.get_cache_file_path(SepsisResearch.CACHE_CORRELATION_POSTFIX)
curr_version = "fixed" if fix_missing_values else "no_fixed" + \
"interpolated" if use_interpolation else "no_interp"
sorted_corr_df = None
if not os.path.isfile(file_path):
print("Frontend SepsisResearch found no cache!")
d = None
else:
print("Frontend SepsisResearch is using cache file:", file_path)
d = pickle.load(open(file_path, 'rb'))
if f"sorted_corr_df_{curr_version}" in d.keys():
sorted_corr_df = d[f'sorted_corr_df_{curr_version}']
avg_df_corr_without_nan = d['avg_df_corr']
feature_names = avg_df_corr_without_nan.columns
if sorted_corr_df is None: # cache not found or empty
print("Cache was not found or empty! Calculating ...")
sepsis_df = set.get_sepsis_label_df() # no transpose needed
transposed_df = avg_df.transpose()
added_sepsis_df = transposed_df
added_sepsis_df["SepsisLabel"] = sepsis_df.iloc[0:].values
added_sepsis_df = added_sepsis_df.fillna(0) # fix NaN problem
avg_df_corr = added_sepsis_df.corr()
feature_names = avg_df_corr.columns
avg_df_corr_without_nan = avg_df_corr.fillna(0) # if features have no values they are none
corr_df = avg_df_corr_without_nan[label]
sorted_corr_df = corr_df.sort_values(ascending=False)
print("Frontend SepsisResearch writes to cache file:", file_path)
if d is None:
pickle.dump({f'sorted_corr_df_{curr_version}': sorted_corr_df, "avg_df_corr": avg_df_corr_without_nan},
open(file_path, 'wb'))
else:
d[f'sorted_corr_df_{curr_version}'] = sorted_corr_df
pickle.dump(d, open(file_path, 'wb'))
# Bar plot of correlation to label
try:
fig, ax1 = plt.subplots(1)
plot_corr_df = sorted_corr_df.drop(label)
ax1.bar(plot_corr_df.index, plot_corr_df)# = plot_corr_df.plot.bar(x='Features')
ax1.set_xlabel("Features")
f = 'fixed values,' if fix_missing_values else ''
i = 'quadratic interpolation' if use_interpolation else ''
ax1.set_title(f"Correlation to {label}, {f} {i}")
props = {"rotation": 90}
plt.setp(ax1.get_xticklabels(), **props)
# heat map of feature x feature
#ax2 = sb.heatmap(data=avg_df_corr_without_nan.to_numpy(), vmin=-1, vmax=1, linewidths=0.5,
# cmap='bwr', yticklabels=feature_names)
#ax2.set_title(f"Correlations in {set.name}, {f}"
# f"{i}")
# pair plot of greatest features to label
#important_features = sorted_corr_df.index[:3].tolist()
#important_features.extend(sorted_corr_df.index[-3:].tolist())
#selected_labels_df = avg_df.transpose().filter(important_features, axis=1)
#avg_df_small = selected_labels_df.iloc[:100] # scatter plot nur 100 patients
#sb.set_style('darkgrid')
#pairplot = sb.pairplot(avg_df_small)
#col.pyplot(pairplot)
col.pyplot(fig)
except KeyError:
info_key_error = "The selected label was not found in the selected dataset. It was probably removed" \
" within the imputation. Please select a different label or a different dataset."
st.markdown(info_key_error)
| Data-Challenge-Team-8/data_challenge_team_8 | web/categories/sepsis_research.py | sepsis_research.py | py | 10,177 | python | en | code | 1 | github-code | 36 |
22689168957 | import sqlite3
con = sqlite3.connect('people.db')
c = con.cursor()
c.execute(""" INSERT INTO users (tag_num, "temp", humidity, light_intensity) VALUES ('13 05 5e 0d', 23, 0, 200)""")
con.commit()
con.close()
| KrikorAstour/IoT | phase4/poop.py | poop.py | py | 212 | python | en | code | 0 | github-code | 36 |
28508591217 | # Opus/UrbanSim urban simulation software.
# Copyright (C) 2010-2011 University of California, Berkeley, 2005-2009 University of Washington
# See opus_core/LICENSE
from numpy import sometrue, array, ravel, logical_not, zeros, float32, maximum, equal, int32
from numpy import arcsin, sqrt
from numpy import ma
from scipy.ndimage import maximum_filter, correlate
from opus_core.variables.variable import Variable
from biocomplexity.land_cover.variable_functions import my_attribute_label
class shei(Variable):
"""Shannon's evenness index (SHEI), computed in a 5x5 moving window
- need {"constants":{"FOOTPRINT":footprint}} in resources when compute
where footprint=ones(shape=(5,5))"""
land_cover_type = 'lct'
def dependencies(self):
return [my_attribute_label(self.land_cover_type)]
def _get_cover_types(self, lct):
"""Return a list of landcover types present in the lct grid"""
x = []
max_type = int(maximum.reduce(ravel(lct)))
for itype in range(1, max_type+1):
if sometrue(ravel(lct) == itype):
x = x + [itype]
return array(x)
def _count_covertypes_within_window(self, lct, cover_type_list, footprint):
"""Return integer array indicating the number of different covertypes
of interest that are within the moving window (footprint)"""
m = zeros(shape=lct.shape, dtype=int32)
for cover_type in cover_type_list:
m += maximum_filter(equal(lct, cover_type), footprint=footprint)
return m
def _compute_pct_cover_type_within_footprint(self, lct, cover_type, footprint, lct_mask):
"""Calculates percentage of one covertype within the footprint"""
temp = equal(lct, cover_type)
# use ma.masked_array to prevent divide-by-zero warnings
mask_invert = logical_not(lct_mask) # in numpy import ma.masked, 0 == valid
pixels = ma.masked_array(correlate(mask_invert.astype(int32), footprint, mode="reflect"))
values = correlate(temp.astype(int32), footprint, mode="reflect")
return ma.filled(values / pixels.astype(float32), 0)
def _compute_shannon_evenness_index(self, lct, footprint, lct_mask):
"""Compute Shannon's evenness index"""
cover_types_list = self._get_cover_types(lct)
numerator_sum = ma.masked_array(zeros(shape=lct.shape, dtype=float32),
mask=lct_mask)
for cover_type in cover_types_list:
pi = self._compute_pct_cover_type_within_footprint(lct, cover_type, footprint, lct_mask)
numerator_sum += pi*ma.filled(ma.log(pi), 0)
m = self._count_covertypes_within_window(lct, cover_types_list, footprint)
return ma.filled(-numerator_sum / ma.log(m), 0).astype(float32)
def compute(self, dataset_pool):
constants = dataset_pool.get_dataset('constants')
footprint = constants["FOOTPRINT"]
lct = ma.filled(self.get_dataset().get_2d_attribute(self.land_cover_type), 0)
lct_mask = self.get_dataset().get_mask(is_2d_version=True)
shei = self._compute_shannon_evenness_index(lct, footprint, lct_mask)
return self.get_dataset().flatten_by_id(arcsin(sqrt(shei)))
from opus_core.tests import opus_unittest
from opus_core.datasets.dataset_pool import DatasetPool
from opus_core.storage_factory import StorageFactory
from biocomplexity.tests.expected_data_test import ExpectedDataTest
class Tests(ExpectedDataTest):
variable_name = "biocomplexity.land_cover.shei"
def test_my_inputs(self):
storage = StorageFactory().get_storage('dict_storage')
storage.write_table(
table_name='land_covers',
table_data={
'relative_x': array([1,2,1,2]),
'relative_y': array([1,1,2,2]),
"lct": array([1, 2, 1, 4]),
}
)
dataset_pool = DatasetPool(package_order=['biocomplexity'],
storage=storage)
gridcell = dataset_pool.get_dataset('land_cover')
gridcell.compute_variables(self.variable_name,
dataset_pool=dataset_pool)
values = gridcell.get_attribute(self.variable_name)
should_be = array([1.42948067, 1.18391383, 1.42948103, 1.18391371])
self.assert_(ma.allclose( values, should_be, rtol=1e-6),
msg = "Error in " + self.variable_name)
def test_on_expected_data(self):
self.do_test_on_expected_data(["relative_x","relative_y","lct"],
element_atol=0.3)
if __name__ == "__main__":
opus_unittest.main() | psrc/urbansim | biocomplexity/land_cover/shei.py | shei.py | py | 4,909 | python | en | code | 4 | github-code | 36 |
36663511828 | import os
os.environ['KERAS_BACKEND'] = 'tensorflow'
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from konlpy.tag import Okt
from tensorflow.keras.layers import Embedding, Dense, LSTM
from tensorflow.keras.models import Sequential
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import re
import datetime
pos = []
neg = []
posneg = []
stopwords = ['의', '가', '이', '은', '들', '는', '좀', '잘', '걍', '과', '도', '를', '으로', '자', '에', '와', '한', '하다']
# 한글 형태소 분해작업
def run():
train_data = pd.read_csv("Data/train.csv", encoding='CP949')
test_data = pd.read_csv("Data/test.csv", encoding='CP949')
print(train_data.groupby('label').size().reset_index(name='count'))
print(test_data.groupby('label').size().reset_index(name='count'))
okt = Okt()
X_train = []
for sentence in train_data['headline']:
temp_X = []
sentence = re.sub('[-=+,#/\?:^$.@*\"※~&%ㆍ!』\\‘|\(\)\[\]\<\>`\'…\"\“》]', '', str(sentence))
temp_X = okt.morphs(sentence, stem=True) #토큰화 작업
temp_X = [word for word in temp_X if not word in stopwords]
X_train.append(temp_X)
X_test=[]
for sentence in test_data['headline']:
temp_X = []
sentence = re.sub('[-=+,#/\?:^$.@*\"※~&%ㆍ!』\\‘|\(\)\[\]\<\>`\'…\"\“》]', '', str(sentence))
temp_X = okt.morphs(sentence, stem=True) #토큰화 작업
temp_X = [word for word in temp_X if not word in stopwords]
X_test.append(temp_X)
# 토큰화된 단어를 정수인코딩
max_words = 35000
tokenizer = Tokenizer(num_words=max_words)
tokenizer.fit_on_texts(X_train)
X_train = tokenizer.texts_to_sequences(X_train)
X_test = tokenizer.texts_to_sequences(X_test)
print("제목의 최대 길이 : ", max(len(l) for l in X_train))
print("제목의 평균 길이 : ", sum(map(len, X_train))/ len(X_train))
plt.hist([len(s) for s in X_train], bins=50)
plt.xlabel('length of Data')
plt.ylabel('number of Data')
plt.show()
# y값 (라벨링)인코딩
y_train = []
y_test = []
# one hot encoding
for i in range(len(train_data['label'])):
if train_data['label'].iloc[i] == 1:
y_train.append([0, 0, 1])
elif train_data['label'].iloc[i] == 0:
y_train.append([0, 1, 0])
elif train_data['label'].iloc[i] == -1:
y_train.append([1, 0, 0])
for i in range(len(test_data['label'])):
if test_data['label'].iloc[i] == 1:
y_test.append([0, 0, 1])
elif test_data['label'].iloc[i] == 0:
y_test.append([0, 1, 0])
elif test_data['label'].iloc[i] == -1:
y_test.append([1, 0, 0])
y_train = np.array(y_train)
y_test = np.array(y_test)
# 리스트 셔플
max_len = 40 #전체 데이터 길이를 30으로 맞춤
X_train = pad_sequences(X_train, maxlen=max_len)
X_test = pad_sequences(X_test, maxlen = max_len)
#긍정, 부정, 중립 3가지 분류를 위한 LSTM, softmax, categorical_crossentropy 적용
model = Sequential()
model.add(Embedding(max_words, 100))
model.add(LSTM(128))
model.add(Dense(3,activation='softmax'))
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
history = model.fit(X_train,y_train, epochs=5, batch_size=10, validation_split=0.1)
predict = model.predict(X_test)
predict_labels = np.argmax(predict, axis=1)
original_labels = np.argmax(y_test, axis=1)
for i in range(30):
origin_label=""
if(original_labels[i] == 1):
origin_label="중립"
elif (original_labels[i] == 2):
origin_label = "긍정"
else:
origin_label="부정"
predict_label = ""
if (predict_labels[i] == 1):
predict_label = "중립"
elif (predict_labels[i] == 2):
predict_label = "긍정"
else:
predict_label = "부정"
print("[", test_data['headline'].iloc[i], "]\t[예측한 라벨 : ",predict_label,"]")
now = datetime.datetime.now()
nowDatetime = now.strftime('%Y_%m_%d_%H시%M분%S초')
model.save('Model'+nowDatetime+'.h5')
if __name__ == '__main__':
run() | woqls22/StockNews | BackEnd/PythonScripts/train.py | train.py | py | 4,385 | python | en | code | 3 | github-code | 36 |
15134742588 | from fileinput import filename
import logging
import logging
import os
import pandas as pd
from ekorpkit import eKonf
log = logging.getLogger(__name__)
class Quandl:
def __init__(self, **args):
from fredapi import Fred
import nasdaqdatalink
self.args = eKonf.to_dict(args)
self.autoload = self.args.get("autoload", True)
self.name = self.args.get("name")
self.fred_api_key = self.args.get("fred_api_key")
self.nasdaq_api_key = self.args.get("nasdaq_api_key")
self.verbose = self.args.get("verbose", True)
self.series_id = self.args.get("series_id")
if isinstance(self.series_id, str):
self.series_id = [self.series_id]
elif not isinstance(self.series_id, list):
self.series_id = []
self.series_name = self.args.get("series_name")
self.value_column = self.args.get("value_column") or "value"
if self.series_name is None:
if self.series_id:
self.series_name = "_".join(self.series_id).replace("/", "_")
else:
self.series_name = self.value_column
self.start_date = self.args.get("start_date")
self.end_date = self.args.get("end_date")
self.eval_columns = self.args.get("pipeline").get("eval_columns")
self.output_dir = self.args["output_dir"]
os.makedirs(self.output_dir, exist_ok=True)
self.output_file = self.args["output_file"]
self.force_download = self.args["force_download"]
self.fred = Fred(api_key=self.fred_api_key)
nasdaqdatalink.ApiConfig.api_key = self.nasdaq_api_key
self.data = None
if self.autoload:
self.load(
series_id=self.series_id,
series_name=self.series_name,
start_date=self.start_date,
end_date=self.end_date,
)
def get(self, series_id, start_date=None, end_date=None, **kwargs):
if "/" in series_id:
return self.get_nasqaq(series_id, start_date, end_date, **kwargs)
else:
series = self.get_series(series_id, start_date, end_date, **kwargs)
df = pd.DataFrame(series, columns=[self.value_column])
return df
def get_nasqaq(self, series_id, start_date=None, end_date=None, **kwargs):
"""Return dataframe of requested dataset from Nasdaq Data Link.
:param series_id: str or list, depending on single dataset usage or multiset usage
Dataset codes are available on the Nasdaq Data Link website
:param str api_key: Downloads are limited to 50 unless api_key is specified
:param str start_date, end_date: Optional datefilers, otherwise entire
dataset is returned
:param str collapse: Options are daily, weekly, monthly, quarterly, annual
:param str transform: options are diff, rdiff, cumul, and normalize
:param int rows: Number of rows which will be returned
:param str order: options are asc, desc. Default: `asc`
:param str returns: specify what format you wish your dataset returned as,
either `numpy` for a numpy ndarray or `pandas`. Default: `pandas`
:returns: :class:`pandas.DataFrame` or :class:`numpy.ndarray`
Note that Pandas expects timeseries data to be sorted ascending for most
timeseries functionality to work.
Any other `kwargs` passed to `get` are sent as field/value params to Nasdaq Data Link
with no interference.
"""
import nasdaqdatalink
return nasdaqdatalink.get(
dataset=series_id, start_date=start_date, end_date=end_date, **kwargs
)
def get_series(self, series_id, start_date=None, end_date=None, **kwargs):
"""
Get data for a Fred series id. This fetches the latest known data, and is equivalent to get_series_latest_release()
Parameters
----------
series_id : str
Fred series id such as 'CPIAUCSL'
start_date : datetime or datetime-like str such as '7/1/2014', optional
earliest observation date
end_date : datetime or datetime-like str such as '7/1/2014', optional
latest observation date
kwargs : additional parameters
Any additional parameters supported by FRED. You can see https://api.stlouisfed.org/docs/fred/series_observations.html for the full list
Returns
-------
data : Series
a Series where each index is the observation date and the value is the data for the Fred series
"""
return self.fred.get_series(
series_id,
observation_start=start_date,
observation_end=end_date,
**kwargs,
)
def load(
self,
series_id,
series_name=None,
start_date=None,
end_date=None,
filename=None,
expressions=None,
index_name="date",
reset_index=False,
):
if isinstance(series_id, str):
series_id = [series_id]
elif not isinstance(series_id, list):
series_id = []
if series_name is None:
series_name = "_".join(series_id).replace("/", "_")
if filename is None:
filename = f"{series_name}.parquet"
if start_date is None:
start_date = self.start_date
if end_date is None:
end_date = self.end_date
if isinstance(self.eval_columns, dict):
self.eval_columns["expressions"] = expressions
if self.verbose:
print(f"Loading {series_name}{series_id} from {start_date} to {end_date}")
filepath = os.path.join(self.output_dir, filename)
if not os.path.exists(filepath) or self.force_download:
self.data = self._load_series(
series_id, series_name, start_date, end_date, index_name, reset_index
)
eKonf.save_data(self.data, filepath, verbose=self.verbose)
else:
log.info(f"{filepath} already exists.")
self.data = eKonf.load_data(filepath, verbose=self.verbose)
return self.data.copy()
def _load_series(
self,
series_ids=None,
series_name=None,
start_date=None,
end_date=None,
index_name="date",
reset_index=False,
**kwargs,
):
_dfs = []
for series_id in series_ids:
df = self.get(
series_id=series_id,
start_date=start_date,
end_date=end_date,
**kwargs,
)
if len(df.columns) == 1:
if series_name is None:
series_name = df.columns[0]
df.columns = [self.value_column]
df = eKonf.pipe(df, self.eval_columns)
if len(series_ids) > 1:
df["series_id"] = series_id
if series_name:
columns = {
col: col.replace(self.value_column, series_name)
for col in df.columns
if col.startswith(self.value_column)
}
df.rename(columns=columns, inplace=True)
if self.verbose:
print(df.head())
_dfs.append(df)
df = pd.concat(_dfs)
df.index.name = index_name
if reset_index:
df.index.name = series_name + "_" if series_name else "" + df.index.name
df = df.reset_index()
return df
| entelecheia/ekorpkit | ekorpkit/io/fetch/quandl.py | quandl.py | py | 7,608 | python | en | code | 4 | github-code | 36 |
21251148822 | #!/usr/bin/env python3
"""
Demo of using the pan and tilt servo kit.
This demo assumes you have purchased the Pixy pan and tilt kit. You can connect the servos in two places:
1. Just plug the servos into servo ports on the Arduino 3, 9, or 10 (NOT 11!). This demo uses 3 and 10.
2. Plug the servos in on the Pixy board as recommended here http://cmucam.org/projects/cmucam5/wiki/Assembling_pantilt_Mechanism
This code assumes you have connected the servos connected to the RedBot board NOT to the Pixy board.
"""
from pymata_aio.pymata3 import PyMata3
WIFLY_IP_ADDRESS = None # Leave set as None if not using WiFly
WIFLY_IP_ADDRESS = "10.0.1.19" # If using a WiFly on the RedBot, set the ip address here.
#WIFLY_IP_ADDRESS = "r01.wlan.rose-hulman.edu" # If your WiFi network allows it, you can use the device hostname instead.
if WIFLY_IP_ADDRESS:
# arduino_wait is a timer parameter to allow for the arduino to reboot when the connection is made which is NA for WiFly.
board = PyMata3(arduino_wait=0, sleep_tune=0.0, ip_address=WIFLY_IP_ADDRESS)
else:
# Use a USB cable to RedBot or an XBee connection instead of WiFly.
COM_PORT = None # Use None for automatic com port detection, or set if needed i.e. "COM7"
board = PyMata3(sleep_tune=0.0, com_port=COM_PORT)
# Servo connection locations on the RedBot board.
PIN_PAN_SERVO = 3
PIN_TILT_SERVO = 10
def print_pixy_blocks(blocks):
""" Prints the Pixy blocks data."""
print("Detected " + str(len(blocks)) + " Pixy blocks:")
for block_index in range(len(blocks)):
block = blocks[block_index]
print(" block {}: sig: {} x: {} y: {} width: {} height: {}".format(
block_index, block["signature"], block["x"], block["y"], block["width"], block["height"]))
def main():
board.keep_alive(period=2)
board.pixy_init()
board.servo_config(PIN_PAN_SERVO)
board.servo_config(PIN_TILT_SERVO)
while True:
for pan_deg in range(90, 170, 2):
board.analog_write(PIN_PAN_SERVO, pan_deg)
board.sleep(0.05)
print_pixy_blocks(board.pixy_get_blocks())
for pan_deg in range(170, 90, -2):
board.analog_write(PIN_PAN_SERVO, pan_deg)
board.sleep(0.05)
print_pixy_blocks(board.pixy_get_blocks())
# Test the tilt servo.
for tilt_deg in range(90, 150, 2):
board.analog_write(PIN_TILT_SERVO, tilt_deg)
board.sleep(0.05)
print_pixy_blocks(board.pixy_get_blocks())
for tilt_deg in range(150, 30, -2):
board.analog_write(PIN_TILT_SERVO, tilt_deg)
board.sleep(0.05)
print_pixy_blocks(board.pixy_get_blocks())
for tilt_deg in range(30, 90, 2):
board.analog_write(PIN_TILT_SERVO, tilt_deg)
board.sleep(0.05)
print_pixy_blocks(board.pixy_get_blocks())
for pan_deg in range(90, 10, -2):
board.analog_write(PIN_PAN_SERVO, pan_deg)
board.sleep(0.05)
print_pixy_blocks(board.pixy_get_blocks())
for pan_deg in range(10, 90, 2):
board.analog_write(PIN_PAN_SERVO, pan_deg)
board.sleep(0.05)
print_pixy_blocks(board.pixy_get_blocks())
main()
| MrYsLab/pymata-aio | examples/sparkfun_redbot/pixy/simple_servo_sweep_config_1.py | simple_servo_sweep_config_1.py | py | 3,254 | python | en | code | 154 | github-code | 36 |
2847551483 | # QUs:https://leetcode.com/problems/delete-nodes-and-return-forest/
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
from collections import defaultdict
class Solution(object):
def delNodes(self, root, to_delete):
"""
:type root: TreeNode
:type to_delete: List[int]
:rtype: List[TreeNode]
"""
out = []
to_delete = set(to_delete)
def dfs(root, parent_exist):
if(root == None):
return None
# if this node we want to delete
# we will not add this node to out and move to next nodes with
# parent_exist as false
if(root.val in to_delete):
dfs(root.left, False)
dfs(root.right, False)
# dereference root.left and root.right if
root.left, root.right = None, None
return None
# if the node val does not exist in the delete array
# either it is the root node
# or this is the node after the node we have to delete
if(not parent_exist):
out.append(root)
# we are setting left and right pointer (In case left and right node we need
# to delete then we will automatically set to None)
root.left = dfs(root.left, True)
root.right = dfs(root.right, True)
# we are directly returning root bec
# in case of node that we want to delte we are alreadyh setting its left right # as none
return root
dfs(root, parent_exist=False)
return out
| mohitsinghnegi1/CodingQuestions | leetcoding qus/Delete Nodes And Return Forest.py | Delete Nodes And Return Forest.py | py | 1,780 | python | en | code | 2 | github-code | 36 |
533968659 | print("ASSIGNMENT-1\nNAME-SUNPREET SINGH\nSID-21103118\n\n")
# QUESTION 1
print("Question 1\n")
#Python Program for finding average of three numbers
iFirstNo=input("Enter first number\n")
iSecondNo=input("Enter second number\n")
iThirdNo=input("Enter third number\n")
#Average of three numbers
fAvg=(int(iFirstNo)+int(iSecondNo)+int(iThirdNo))/3
print("The average of three numbers is :")
print(fAvg)
# Question 2
print("\n\nQuestion 2\n")
# Computing tax payable by the user
# Standard Deduction= $10000
iStdDed=10000
# Dependent Deduction per Dependent= $3000
iDependDed=3000
# No. of Dependents
iDependNo=input("Enter the Number of Dependents\n")
# Total Dependent Deduction= Dependent Deduction per Dependent * No.of dependents
# And declaring No. of Dependents as int variable
iTotalDependDed=iDependDed*int(iDependNo)
# Gross Income of the user
iGrossIncome=input("Enter your Gross Income\n")
# Total Taxable Income= Gross Income - (Standard Deduction + Total Dependent Deduction)
iTaxableIncome=int(iGrossIncome)-int(iStdDed)-int(iTotalDependDed)
# Tax rate= 20%
# Total Tax Payable= Taxable Income * 20%
iTax=(float((iTaxableIncome)*20)/100)
print("Total Tax Payable =",iTax)
# Question 3
print("\n\nQuestion 3\n")
# Storing Student Credentials
iSID=int(input("Enter your SID\n"))
sName=input("Enter your name\n")
# Input of M,F,U for Male,Female and Unknown respectively
sGender=input("For Gender\nEnter M,F,U for Male,Female and Unknown respectively\n")
sCourseName=input("Enter your Course Name\n")
fCGPA=float(input("Enter your CGPA\n"))
#Creating list to get the student credentials
indStudentCred=[iSID,sName,sGender,sCourseName,fCGPA]
# Printing Student Credentials
print(indStudentCred)
# Question 4
print("\n\nQuestion 4\n")
# Sorting entered marks of 5 students
# Getting input of marks of 5 students from the user
iStudent1=input("Enter marks of Student 1\n")
iStudent2=input("Enter marks of Student 2\n")
iStudent3=input("Enter marks of Student 3\n")
iStudent4=input("Enter marks of Student 4\n")
iStudent5=input("Enter marks of Student 5\n")
# Creating a list of the marks
indSortMarks=[iStudent1,iStudent2,iStudent3,iStudent4,iStudent5]
# Sorting marks of the 5 students
indSortMarks.sort()
print(indSortMarks)
# Question 5
print("\n\nQuestion 5\n")
# Creating list of 6 colors; Red, Green, White, Black, Pink, Yellow
indColor=['Red','Green','White','Black','Pink','Yellow']
# Removing 4th element i.e. Black from the list
indColor.remove(indColor[3])
print("a.",indColor)
indColor=['Red','Green','White','Black','Pink','Yellow']
# Removing Black and Pink from the list and replacing them with Purple
indColor[3:5]=['Purple']
print("\nb.",indColor) | GevaterTod/PYTHONASSIGNMENTS | assignment1_21103118_cse.py | assignment1_21103118_cse.py | py | 2,806 | python | en | code | 0 | github-code | 36 |
74541858984 | from django import forms
from users.models import Project, Project_pictures, Comment, Donation, Reply
import datetime
class AddProjectForm(forms.ModelForm):
class Meta:
model = Project
fields = (
"title",
"details",
"total_target",
"start_date",
"end_date",
"category",
"tags",
)
error_messages = {
'total_target': {
'min_value': "Invalid value, target must be greater than zero",
},
}
widgets = {
"tags": forms.TextInput(attrs={"data-role": "tagsinput", "name": "tags"}),
"start_date": forms.DateInput(attrs={"type": "date"}),
"end_date": forms.DateInput(attrs={"type": "date"}),
}
def clean(self):
cleaned_data = super().clean()
start_date = cleaned_data.get("start_date")
end_date = cleaned_data.get("end_date")
if end_date <= start_date:
msg = "End date should be greater than start date."
self.add_error("end_date", msg)
if end_date < datetime.date.today() or end_date == datetime.date.today():
msg = "End date should be greater than today date."
self.add_error("end_date", msg)
if start_date < datetime.date.today():
msg = "Start date should be greater than today date."
self.add_error("start_date", msg)
class ImageForm(forms.ModelForm):
class Meta:
model = Project_pictures
fields = ("picture",)
labels = {"picture": "Images"}
widgets = {
"picture": forms.ClearableFileInput(attrs={"multiple": True}),
}
class CommentForm(forms.ModelForm):
class Meta:
model = Comment
fields = ("content",)
labels = {"content": ""}
widgets = {
"content": forms.TextInput(
attrs={
"class": "comment-input",
"placeholder": "New Comment....",
}
),
}
class ReplyForm(forms.ModelForm):
class Meta:
model = Reply
fields = ("content",)
labels = {"content": ""}
widgets = {
"content": forms.TextInput(
attrs={
"class": "comment-input w-100",
"placeholder": "Add reply....",
}
),
}
class DonateForm(forms.ModelForm):
class Meta:
model = Donation
fields = ("amount",)
labels = {"amount": ""}
widgets = {
"amount": forms.NumberInput(
attrs={
"class": "form-control",
"placeholder": "0",
"id":"amount"
}
),
}
| asaid-0/pyreads | projects/forms.py | forms.py | py | 2,835 | python | en | code | 0 | github-code | 36 |
25948284193 | import torch
# import torch.utils
import torch.nn as nn
import torch.nn.functional as F
import torchvision.transforms as transforms
from torchvision.datasets import CIFAR10
import torch.optim as optim
torch.manual_seed(1234)
ratio = 0.1
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
train_set = CIFAR10(root="task2_data", train=True,
transform=transform, download=True)
test_set = CIFAR10(root="task2_data", train=False,
transform=transform, download=True)
train_set, val_set = torch.utils.data.random_split(train_set,
[int((1 - ratio) * len(train_set)),
int(ratio * len(train_set))])
test_dataset = torch.utils.data.DataLoader(test_set, batch_size=1, shuffle=False)
train_dataset = torch.utils.data.DataLoader(train_set, batch_size=1, shuffle=False)
class MyModel(torch.nn.Module):
num_features = 32 * 32 * 3
num_categories = 10
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(MyModel.num_features, 100)
self.fc2 = nn.Linear(100, 100)
self.fc3 = nn.Linear(100, MyModel.num_categories)
self.optimizer = optim.SGD(self.parameters(), lr=0.001)
self.criterion = torch.nn.MSELoss()
# self.loss = 0.0
def forward(self, x):
x = torch.flatten(x, 1)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def loss_func(self, local_data, local_labels):
output = self.forward(local_data)
y = local_labels
self.optimizer.zero_grad()
self.loss = self.criterion(output, y)
return self.loss
def backward(self, loss):
self.loss.backward()
self.optimizer.step()
model = MyModel().to(device)
my_loss = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=0.001)
num_epochs = 200
for epoch in range(num_epochs):
sum_loss = 0
for i, (images, labels) in enumerate(train_dataset):
images = images.to(device)
labels = labels.to(device)
output = model(images)
loss = my_loss(output, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
sum_loss += loss.item()
print('Epoch [%d] Train Loss: %.4f'% (epoch+1, sum_loss/i))
with torch.no_grad():
correct = total = 0
for images, labels in test_dataset:
images = images.to(device)
labels = labels.to(device)
output = model(images)
_, predicted_labels = torch.max(output, 1)
correct += (predicted_labels == labels).sum()
total += labels.size(0)
print('Percent correct: %.3f %%' % ((100 * correct) / (total + 1)))
| bbrk13/Ceng499_THE1 | task2.py | task2.py | py | 2,848 | python | en | code | 0 | github-code | 36 |
12178499748 | from odoo.tests.common import TransactionCase
class TestUsers(TransactionCase):
def test_name_search(self):
""" Check name_search on user. """
User = self.env['res.users']
test_user = User.create({'name': 'Flad the Impaler', 'login': 'vlad'})
like_user = User.create({'name': 'Wlad the Impaler', 'login': 'vladi'})
other_user = User.create({'name': 'Nothing similar', 'login': 'nothing similar'})
all_users = test_user | like_user | other_user
res = User.name_search('vlad', operator='ilike')
self.assertEqual(User.browse(i[0] for i in res) & all_users, test_user)
res = User.name_search('vlad', operator='not ilike')
self.assertEqual(User.browse(i[0] for i in res) & all_users, all_users)
res = User.name_search('', operator='ilike')
self.assertEqual(User.browse(i[0] for i in res) & all_users, all_users)
res = User.name_search('', operator='not ilike')
self.assertEqual(User.browse(i[0] for i in res) & all_users, User)
res = User.name_search('lad', operator='ilike')
self.assertEqual(User.browse(i[0] for i in res) & all_users, test_user | like_user)
res = User.name_search('lad', operator='not ilike')
self.assertEqual(User.browse(i[0] for i in res) & all_users, other_user)
| akradore/odoo-windows | server/odoo/addons/base/tests/test_res_users.py | test_res_users.py | py | 1,338 | python | en | code | 5 | github-code | 36 |
21131376574 | import json
import requests
FB = 'https://graph.facebook.com'
"""Python client library for the Facebook ads API."""
class AdsAPI(object):
"""A client for the Facebook Ads API."""
def __init__(self, ad_account_id, access_token=None, expires=None):
self.ad_account_id = ad_account_id
self.access_token = access_token
self.expires = expires
self.payload = {'access_token': access_token}
def get_ad_account_info(self, fields):
url = '%s/%s' % (FB, self.ad_account_id)
r = requests.get(url, params=self.payload)
return r.json()
def get_ad_account_users(self):
url = '%s/%s/users' % (FB, self.ad_account_id)
r = requests.get(url, params=self.payload)
return r.json()
def get_broad_targeting_categories(self):
url = '%s/%s/broadtargetingcategories' % (FB, self.ad_account_id)
r = requests.get(url, params=self.payload)
return r.json()
def get_connection_objects(self):
url = '%s/%s/connectionobjects' % (FB, self.ad_account_id)
r = requests.get(url, params=self.payload)
return r.json()
def get_ad_stats(self):
url = '%s/%s/stats' % (FB, self.ad_account_id)
r = requests.get(url, params=self.payload)
return r.json()
def get_ad_campaign_stats(self, campaign_ids=None):
"""Return campaign stats for the given list of campaign ids."""
payload = {'campaign_ids': json.dumps(campaign_ids)}
payload.update(self.payload)
url = '%s/%s/adcampaignstats' % (FB, self.ad_account_id)
r = requests.get(url, params=payload)
return r.json()
def get_ad_group_stats(self, adgroup_ids=None, stats_mode=None):
payload = {'adgroup_ids': json.dumps(adgroup_ids),
'stats_mode': stats_mode}
payload.update(self.payload)
url = '%s/%s/adgroupstats' % (FB, self.ad_account_id)
r = requests.get(url, params=payload)
return r.json()
def get_keyword_stats(self, adgroup_id):
url = '%s/%s/keywordstats' % (FB, adgroup_id)
r = requests.get(url, params=self.payload)
return r.json()
def get_conversion_stats(self, campaign_ids=None, adgroup_ids=None):
url = '%s/%s/adcampaignconversions' % (FB, self.ad_account_id)
r = requests.get(url, params=self.payload)
return r.json()
def get_reach_estimate(self, currency, targeting_spec):
payload = {'currency': currency,
'targeting_spec': json.dumps(targeting_spec)}
payload.update(self.payload)
url = '%s/%s/reachestimate' % (FB, self.ad_account_id)
r = requests.get(url, params=payload)
return r.json()
def get_targeting_description(self, adgroup_id):
url = '%s/%s/targetingsentencelines' % (FB, adgroup_id)
r = requests.get(url, params=self.payload)
return r.json()
def search(self, query, type, want_localized_name):
payload = {'q': query, 'type': type,
'want_localized_name': want_localized_name}
payload.update(self.payload)
url = '%s/search' % FB
r = requests.get(url, params=payload)
return r.json()
def create_ad_creative(self):
url = '%s/%s/adcreatives' % (FB, self.ad_account_id)
r = requests.post(url, params=self.payload)
return r.json()
| narrowcast/keywords | ads.py | ads.py | py | 3,407 | python | en | code | 0 | github-code | 36 |
6084342501 | def reorder_list(head):
# we can use 2 pointer approach
# find the middle of the list
slow, fast = head, head.next
while fast and fast.next:
slow = slow.next
fast = fast.next.next
# then we reverse the second half
second = slow.next
prev = slow.next = None
while second:
tmp = second.next
second.next = prev
prev = second
second = tmp
# then we merge 2 halves
first, second = head, prev
while second:
tmp1, tmp2 = first.next, second.next
first.next = second
second.next = tmp1
first, second = tmp1, tmp2 | phuclinh9802/data_structures_algorithms | blind 75/reorder_list.py | reorder_list.py | py | 633 | python | en | code | 0 | github-code | 36 |
20612540072 | import networkx as nx
import random
import matplotlib.pyplot as plt
def main():
# Watts Strogatz Ring Graph
# do it at various levels
ring_lattice = nx.watts_strogatz_graph(100, 4, 0)
# nx.draw(ring_lattice)
# plt.draw()
# plt.show()
print("Ring simple ", simple_contagion(ring_lattice, 0))
print("Ring complex", complex_contagion(ring_lattice, 0, 2))
# 2 dimensional lattice with Moore neighborhoods
#moore_lattice = create_moore_lattice(100, 100, 0.000001, 1)
# nx.draw(moore_lattice)
# plt.draw()
# plt.show()
# print("Moore simple ", simple_contagion(moore_lattice, (0, 0)))
# print("Moore complex", complex_contagion(moore_lattice, (0, 0), 2))
generate_plot()
# simple contagion propagation
def simple_contagion(in_graph, start_node):
infected_nodes = set()
# pick the starting node.
infected_nodes.add(start_node)
# add the ego network of that node.
infected_nodes = infected_nodes.union(set(in_graph.neighbors(start_node)))
time_step_number = 0
while len(infected_nodes) < in_graph.number_of_nodes():
nodes_to_add = set()
for node in infected_nodes:
to_infect_node = random.choices(list(in_graph.neighbors(node)))[0]
nodes_to_add.add(to_infect_node)
# print(nodes_to_add)
infected_nodes = infected_nodes.union(nodes_to_add)
# print(time_step_number, ":", infected_nodes)
time_step_number += 1
# while there are nodes being added to the graph, continue looping
# For each infected node, pick a neighbor of that node and infect it (even if it is already infected)
# print out the state of infected nodes.
return time_step_number
def complex_contagion(in_graph, start_node, a):
"""
:param in_graph: networkx graph object to run the complex contagion on
:param a: number of activated nodes required for activation
:return:
"""
infected_nodes = set()
# pick the starting node.
infected_nodes.add(start_node)
# add the ego network of that node.
infected_nodes = infected_nodes.union(set(in_graph.neighbors(start_node)))
time_step_number = 0
# TODO: not a good end condition for complex
while len(infected_nodes) < in_graph.number_of_nodes() * 0.99 and time_step_number <= 500000:
nodes_to_add = set()
for node in infected_nodes:
to_infect_node = random.choices(list(in_graph.neighbors(node)))[0]
# Check this node has enough critical mass to infect
if len(infected_nodes.intersection(set(in_graph.neighbors(to_infect_node)))) >= a:
nodes_to_add.add(to_infect_node)
time_step_number += 1
if len(infected_nodes) >= in_graph.number_of_nodes() * 0.99 or time_step_number > 500000:
break
# print(nodes_to_add)
infected_nodes = infected_nodes.union(nodes_to_add)
# print(time_step_number, ":", infected_nodes)
return time_step_number
def generate_plot():
# print("Moore simple ", simple_contagion(moore_lattice, (0, 0)))
# print("Moore complex", complex_contagion(moore_lattice, (0, 0), 2))
#for i in [-6, -4, -3, -2, -1, -0.9, -0.8, -0.7, -0.69, -0.68, -0.67, -0.66, -0.65, -0.64, -0.63, -0.62]:
for i in [-6, -4, -3, -2, -1.9, -1.8, -1.7, -1.6, -1.5, -1.4, -1.39, -1.38, -1.37, -1.36, -1.35, -1.34, -1.33, -1.32, -1.31, -1.3, -1.2, -1.1, -1]:
moore_lattice = create_moore_lattice(50, 50, 10**i, 1)
print("Complex", i, complex_contagion(moore_lattice, (25, 25), 3))
def create_moore_lattice(m, n, p, radius=1):
moore_lattice = nx.generators.lattice.grid_2d_graph(m, n)
for node in moore_lattice.nodes():
nodes_to_add = set()
for row in range(node[0] - radius, node[0] + radius + 1):
for column in range(node[1] - radius, node[1] + radius + 1):
if row < 0 or row > m - 1 or column < 0 or column > n - 1:
continue
nodes_to_add.add((row, column))
for add_node in nodes_to_add:
if node != add_node:
moore_lattice.add_edge(node, add_node)
for node in moore_lattice.nodes():
neighbors = list(moore_lattice.neighbors(node))
edges_to_be_deleted = []
for neighbor in neighbors:
delete_edge = random.choices([True, False], [p, 1 - p])[0]
if delete_edge:
edges_to_be_deleted.append((node, neighbor))
unconnected = set(n for n in moore_lattice.nodes() if n not in neighbors)
for i in range(len(edges_to_be_deleted)):
dest = random.sample(unconnected, 1)[0]
unconnected.remove(dest)
moore_lattice.add_edge(node, dest)
for edge in edges_to_be_deleted:
moore_lattice.remove_edge(edge[0], edge[1])
return moore_lattice
if __name__ == '__main__':
main()
| nxlouie/complex_contagions | eecs444_project.py | eecs444_project.py | py | 4,950 | python | en | code | 2 | github-code | 36 |
26006767642 | from time import sleep
class Dog:
def __init__(self, name, idade, sexo):
self.name = name
self.idade = idade
self.sexo = sexo
def latir(self):
print("Au Au")
def dormir(self):
print(f"{self.name} está dormindo...")
class Carro:
def __init__(self, marca, cor):
self.marca = marca
self.cor = cor
self.is_started = False
def start(self):
print("Ligando o carro...")
sleep(5)
print("Carro ligado!")
self.is_started = True
def run(self, km):
if self.is_started:
print(f"Andando {km} Kilômetros")
sleep(10)
else:
print("Você precisa ligar o carro primeiro!")
if __name__ == '__main__':
pedro = Dog("Pedro", 2, 'M')
maria = Dog("Maria", 2, 'F')
pedro.latir()
maria.dormir()
carro1 = Carro("BMW", "Branco")
carro1.run(10)
carro1.start()
carro1.run(10)
| JolyneKo/Code | Python/Testes/Teste 07/index.py | index.py | py | 966 | python | pt | code | 0 | github-code | 36 |
19920196010 | #result gives the largest distance of values farthest
def solution(A):
#hash = {}
N = len(A)
currMax = 0
max = N-1
for i in xrange(N):
key = A[i]
buffResult = N - 1 - i
window = 0
for j in range(N-1,i,-1):
curr = A[j]
if(key == curr):
buffResult = buffResult - window
else:
window = window +1
currMax = max(currMax,buffResult)
return currMax
solution([1,2,3,4])
| david19han/pythonPlayground | myprogram.py | myprogram.py | py | 498 | python | en | code | 0 | github-code | 36 |
74113986983 | from enum import Enum, IntEnum
from typing import List, Optional, Set
from odmantic import Model
import datetime
class EntryType (str, Enum):
"""[summary]
An entry performed by the user.
[description]
A user can record of three types of entries:
1) Traumatic Event
2) Journal Entry
3) Incident
"""
trauma = "Traumatic Event"
journal = "Journal"
incident = "Incident"
class IncidentSeverity (int, Enum):
"""[summary]
In the case of an incident entry a severity level of the incident.
[description]
A user can determine the severity of the incident based on how it affected them.
"""
uncomfortable = 1
difficult_to_function = 2
very_difficult_to_function = 3
unable_to_function = 4
required_medical_assistance = 5
class IncidentType (str, Enum):
"""[summary]
In the case of an incident entry the type of incident.
[description]
Identification of what happened during the incident. Can be more than one.
"""
panic_attack = "Panic Attack"
nightmare = "Nightmare"
excessive_anger_outburst = "Excessive Anger Outburst"
disassociation = "Disassociation"
disconnected = "Disconnected"
flashback = "Flashback"
auditory_flashback = "Auditory Flashback"
intrusive_thoughts = "Intrusive Thoughts"
hyper_arousal = "Hyper Arousal"
other = "Other"
class Entry (Model):
"""[summary]
Base data structure of the PAT API.
[description]
All API calls will perform CRUD operations against the record using the appID. No personal identifiable data should be submitted.
"""
appID : str
entry_type : EntryType
entry_date : datetime.date
journal_entry : Optional[str] = None
trauma_entry : Optional[str] = None
incident_severity : Optional[IncidentSeverity]
incident_type : Optional[Set[IncidentType]] = None
location : Optional[str] = None
location_description : Optional[str] = None
class Config:
schema_extra = {
"example": {
"appID" : "00000001",
"entry_type" : "Incident",
"entry_date" : "2020-1-1",
"journal_entry" : "This is a jouurnal entry which is an optional field and free form text entry",
"trauma_entry" : "This is a trauma entry which is just like a journal entry, but flagged as a traumatic event to make it easier to share with a healthcare provider.",
"incident_severity" : 3,
"incident_type" : "nightmare",
"location" : "111 Microsoft Way",
"location_description" : "Work"
}
}
| blevinscm/patapi | app/data/models.py | models.py | py | 2,731 | python | en | code | 0 | github-code | 36 |
37350650967 | import pytest
from gpaw.utilities import compiled_with_libvdwxc
from ase.build import bulk
from gpaw import GPAW, Davidson, Mixer, PW
from gpaw.xc.libvdwxc import vdw_mbeef
from gpaw.test import gen
pytestmark = pytest.mark.skipif(not compiled_with_libvdwxc(),
reason='not compiled_with_libvdwxc()')
@pytest.mark.mgga
def test_vdw_libvdwxc_mbeef():
setup = gen('Si', xcname='PBEsol')
system = bulk('Si')
calc = GPAW(mode=PW(200), xc=vdw_mbeef(),
kpts=(2, 2, 2),
nbands=4,
convergence=dict(density=1e-6),
mixer=Mixer(1.0),
eigensolver=Davidson(4),
setups={'Si': setup})
system.calc = calc
e = system.get_potential_energy()
ref = -60.53662105617721
err = abs(e - ref)
print('e=%r ref=%r err=%r' % (e, ref, err))
# It would be reasonable to put 1e-6 as tolerance,
# but the value changes by 4e-4 depending on libxc version.
# See https://gitlab.com/gpaw/gpaw/issues/161 .
assert err < 1e-3, err
| f-fathurrahman/ffr-learns-gpaw | my_gpaw/test/vdw/test_libvdwxc_mbeef.py | test_libvdwxc_mbeef.py | py | 1,075 | python | en | code | 0 | github-code | 36 |
33167792265 | from ast import List
import random
from typing import Callable, Tuple
import functools
class Genetics:
def __init__(self, domain: tuple, population_len: int, mutation_rate: float = 0, evaluation_cb: Callable[[float, float], float] = lambda x, y: 0, minima=False, seed=None) -> None:
self.domain = domain
self.length = population_len
self.mutation_rate = mutation_rate
self.evaluate = evaluation_cb
self.population = []
self.minima = minima
if seed == None:
_seed = random.random()
print("Semilla:", _seed)
random.seed(_seed)
return
random.seed(seed)
def gen_initial_population(self) -> List:
self.population = [(random.uniform(self.domain[0], self.domain[1]), (random.uniform(
self.domain[0], self.domain[1]))) for i in range(self.length)]
return self.population
def __evaluate(self) -> List:
minima = -1 if self.minima else 1
evaluations = [(i, minima*self.evaluate(i[0], i[1]))
for i in self.population]
evaluations.sort(key=lambda i: i[1])
return evaluations
def evaluate_population(self) -> List:
evaluations = self.__evaluate()
return [(ev[0], -ev[1] if self.minima else ev[1]) for ev in evaluations]
def format_evaluations(evaluations: List) -> str:
# evaluations = self.evaluate_population()
res = ""
for eval in evaluations:
res += f"({eval[0][0]},{eval[0][1]})\t->\t{eval[1]}\n"
return res
def __normalize_evaluations(evaluations: List) -> List:
total = functools.reduce(lambda res, e: res+e[1], evaluations, 0.0)
return [(e[0], e[1]/total) for e in evaluations]
def __accumulate_evaluations(normalized: List) -> List:
accumulator = 0
res = []
for norm in normalized:
accumulator += norm[1]
res.append((norm[0], accumulator))
return res
def __choose_parents(accumulated: List) -> List:
parents = []
for n in range(0, len(accumulated), 2):
tolerance = 10
while tolerance > 0:
r_a = random.uniform(0, 1)
r_b = random.uniform(0, 1)
parent_a = None
parent_b = None
for parent in accumulated:
if parent_a == None and parent[1] >= r_a:
parent_a = parent
if parent_b == None and parent[1] >= r_b and parent != parent_a:
parent_b = parent
if parent_a != None and parent_b != None:
break
if parent_a != None and parent_b != None:
break
tolerance -= 1
if parent_b == None or parent_a == None:
raise Exception(
"No hay suficiente material genético. Se están creando clones!!")
parents.append(parent_a)
parents.append(parent_b)
return parents
def __mutate(self, individual: Tuple) -> Tuple:
r = random.uniform(0, 1)
if r < self.mutation_rate:
left = random.randrange(0, 1)
if left == 1:
return (random.uniform(self.domain[0], self.domain[1]), individual[1])
return (individual[0], random.uniform(self.domain[0], self.domain[1]))
return individual
def __combine_and_mutate(self, parents: List) -> List:
children = []
for i in range(0, len(parents), 2):
parent_a = parents[i][0]
parent_b = parents[i+1][0]
child_a = self.__mutate((parent_a[0], parent_b[1]))
child_b = self.__mutate((parent_b[0], parent_a[1]))
children.append(child_a)
children.append(child_b)
return children
def next_generation(self) -> List:
evaluations = self.__evaluate()
evaluations = Genetics.__normalize_evaluations(evaluations)
evaluations = Genetics.__accumulate_evaluations(evaluations)
parents = Genetics.__choose_parents(evaluations)
children = self.__combine_and_mutate(parents)
self.population = children
return children
| Fairbrook/algoritmo_evolutivo | src/genetics.py | genetics.py | py | 4,295 | python | en | code | 0 | github-code | 36 |
8324961764 | n, m, v, u = map(int, input().split())
print(v, u)
mos = []
for i in range(m):
a = list(map(int, input().split()))
mos.append(a)
print(mos)
k = int(input())
if k != 0:
zat = list(map(int, input().split()))
else:
zat = []
print(zat)
ways = []
c = False
for i in mos:
if v in i:
try:
n = i[i.index(v)+1]
except:
n = i[i.index(v)-1]
if n not in zat:
ways.append([v, n])
print(ways)
print()
for i in ways:
for b in mos:
if i[-1] in b:
try :
n = b[b.index(i[-1])+1]
except:
n = b[b.index(i[-1])-1]
if n not in i and n not in zat:
i.append(n)
print(ways)
if i[-1] == u:
c = True
print('break')
break
if c == True:
print('YES')
else:
print('NO')
| gavt45/nti_2017_gornostay | wave_2/ilya_solutions/6-7/6.py | 6.py | py | 928 | python | en | code | 1 | github-code | 36 |
15954616766 | import os, sys
import pickle
sys.path.append("../../../")
from data.otb import *
from forecasters import load_forecaster
import torch as tc
if __name__ == "__main__":
dsld = loadOTB("../datasets/otb", 100, bb_format="xyxy")
ld_names = ['val1', 'val2', 'test']
lds = [dsld.val1, dsld.val2, dsld.test]
root = "../datasets/otb_precomp"
model = load_forecaster(None)
model.cuda()
model.eval()
# extract response
for ld_name, ld in zip(ld_names, lds):
subroot = os.path.join(root, ld_name)
os.makedirs(subroot, exist_ok=True)
i = 0
for xs, ys in ld:
xs = [x.cuda() for x in xs]
ys = ys.cuda()
yhs, yhs_var = model(xs)
ys = model.baseF.encode_bb(xs, ys, model.baseF.opts)
for y, yh, yh_var in zip(ys, yhs, yhs_var):
fn = os.path.join(subroot, "%d.pk"%(i))
print(fn)
yh = yh.detach().cpu()
yh_var = yh_var.detach().cpu()
y = y.detach().cpu()
pickle.dump((yh, yh_var, y), open(fn, "wb"))
i += 1
| sangdon/PAC-confidence-set | demo/conf_set/otb/extract_response.py | extract_response.py | py | 1,182 | python | en | code | 7 | github-code | 36 |
12429439023 | import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report
import re
import string
import joblib
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import GradientBoostingClassifier, RandomForestClassifier
# Load data
data_fake = pd.read_csv("Fake.csv")
data_true = pd.read_csv("True.csv")
# Add label to data
data_fake["class"] = 0
data_true['class'] = 1
# Remove last 10 rows from each dataset for manual testing
data_fake_manual_testing = data_fake.tail(10)
for i in range(23480, 23470, -1):
data_fake.drop([i], axis=0, inplace=True)
data_true_manual_testing = data_true.tail(10)
for i in range(21416, 21406, -1):
data_true.drop([i], axis=0, inplace=True)
# Merge data
data_merge = pd.concat([data_fake, data_true], axis=0)
data_merge = data_merge.sample(frac=1).reset_index(drop=True)
# Clean data
def clean_text(text):
text = text.lower()
text = re.sub('\[.*?\]', '', text)
text = re.sub("\\W", " ", text)
text = re.sub('https?://\S+|www.\.\S+', '', text)
text = re.sub('<.*?>+', '', text)
text = re.sub('[%s]' % re.escape(string.punctuation), '', text)
text = re.sub('\n', '', text)
text = re.sub('\w*\d\w*', '', text)
return text
data_merge['text'] = data_merge['text'].apply(clean_text)
# Split data into train and test sets
x = data_merge['text']
y = data_merge['class']
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.25, random_state=42)
# Vectorize data
vectorizer = TfidfVectorizer()
xv_train = vectorizer.fit_transform(x_train)
xv_test = vectorizer.transform(x_test)
joblib.dump(vectorizer, 'vectorizer.pkl')
# Train models
models = {'Logistic Regression': LogisticRegression(), 'Decision Tree': DecisionTreeClassifier(),
'Gradient Boosting': GradientBoostingClassifier(), 'Random Forest': RandomForestClassifier()}
best_model = None
best_score = 0
for name, model in models.items():
model.fit(xv_train, y_train)
joblib.dump(model, f'{name}.pkl')
score = model.score(xv_test, y_test)
if score > best_score:
best_score = score
best_model = model
print(f"{name} score: {score}")
pred = model.predict(xv_test)
print(classification_report(y_test, pred))
# Use best model for manual testing
def out_label(n):
if n == 0:
return "FAKE NEWS"
else :
return "IT IS NOT"
| Pj-develop/ML_FAKE_NEW_DETECTION | best_chooser.py | best_chooser.py | py | 2,728 | python | en | code | 0 | github-code | 36 |
34325207808 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
#Author: Read AUTHORS file.
#License: Read COPYING file.
import xml.etree.cElementTree as etree
from os import path
from pkg import pkg
from constants import const
from ..lib import eventHandler, listdir, getfile, ratioCalc
class pkgdir(object):
def __init__(self, actCode):
self.__clear()
self.onError = eventHandler() #(ActionCode, Error code, data)
self.onProcessing = eventHandler() #(ActionCode, ProcessRatio, data)
self.__actCode = actCode
def __clear(self):
self.__path = ""
self.__packages = {} #{"pkg name": pkg obj, ...}
def setTarget(self, p):
self.__clear()
self.__path = path.normpath(p)
def loadList(self):
pkgdirs = listdir(self.__path)
if not pkgdirs:
return False
return(pkgdirs)
def loadPackageInfo(self, d):
xml = getfile( path.join(self.__path, d, const.PACKAGE_INFO_FILE) )
if xml:
package = pkg(xml)
if package.right:
self.__packages[package.n] = package
return(True)
return(False)
def load(self, p):
#
self.__clear()
self.__path = path.normpath(p)
pkgdirs = listdir(self.__path)
if not pkgdirs:
return False
totalPackages = len(pkgdirs)
for d in pkgdirs:
xml = getfile(
path.join(self.__path, d, const.PACKAGE_INFO_FILE)
)
if not xml:
self.__clear()
return(False)
package = pkg(xml)
if package.right:
self.__packages[package.n] = package
self.onProcessing.raiseEvent(
self.__actCode,
ratioCalc(totalPackages, len(self.__packages)),
package.n
)
else:
self.__clear()
return(False)
return(True)
def getPackages(self):
return(self.__packages)
| pardus-anka/paso | src/engine/packages/packages.py | packages.py | py | 2,158 | python | en | code | 0 | github-code | 36 |
13910036602 | """Module to write data classes information."""
# Python packages
import yaml
import logging as log
# write_data_class ------------------------------------------------------------
def write_data_class(data, file='data.yaml'):
"""Take a data object and write its attributes.
Args:
data: object with __dict__ attribute.
file (:obj:`str`): filename.
"""
log.debug('Write data')
with open(file, 'w') as f:
yaml.dump(data.__dict__, f, default_flow_style=False)
# --------------------------------------------------------------------------- #
# write_solution_as_csv -------------------------------------------------------
def write_solution_as_csv(
data_dict, header=True, mode='w', file='data.csv', dec=".", sep=";"):
"""Take a data object and write its attributes.
Args:
data_dict (:obj:`dict`): dictionary.
header (:obj:`bool`): if True write the header.
mode (:obj:`str`): write mode: 'w' or 'a'.
file (:obj:`str`): filename.
dec (:obj:`str`): decimal point.
sep (:obj:`str`): column separator
"""
with open(file, mode) as f:
if header:
f.write(sep.join(data_dict.keys()) + '\n')
f.write(sep.join([str(v).replace(".", dec)
if isinstance(v, (int, float)) else
v if v is not None else "None"
for v in data_dict.values()]) + '\n')
# --------------------------------------------------------------------------- #
| jorgerodriguezveiga/firedecomp | firedecomp/utilities/write.py | write.py | py | 1,536 | python | en | code | 0 | github-code | 36 |
33102651292 | from datetime import timedelta, datetime
import os
from airflow import DAG
from google.cloud import storage
from airflow.utils.dates import days_ago
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.bash import BashOperator
from airflow.decorators import task, dag
from airflow.operators.email import EmailOperator
from airflow.providers.google.cloud.operators.dataproc import (
DataprocCreateClusterOperator,
DataprocDeleteClusterOperator,
DataprocSubmitJobOperator,
ClusterGenerator
)
from airflow.providers.google.cloud.sensors.dataproc import DataprocJobSensor
from airflow.providers.google.cloud.transfers.gcs_to_bigquery import GCSToBigQueryOperator
GOOGLE_CONN_ID = "google_cloud_default"
PROJECT_ID="data-engineering-rj"
BUCKET_NAME = 'fhvhv-data-lake'
CLUSTER_NAME = 'fhvhvcluster'
REGION = 'us-central1'
PYSPARK_FILENAME ='spark_processing.py'
PYSPARK_URI = f'gs://fhvhv-data-lake/spark-job/{PYSPARK_FILENAME}'
LOCAL_PATH = os.environ.get("AIRFLOW_HOME", "/opt/airflow/")
BIGQUERY_TABLE = 'data-engineering-rj.fhvhv_analysis.fhvhv_trips_data'
PROCESSED_DATA_PATH = 'gs://fhvhv-data-lake/output_fhv_data/trips_data/*.parquet'
'''
Process:
- create a dataproc cluster
- upload a pyspark file to gcs bucket
- submit spark job to dataproc cluster
- excute pyspark job(load data from gcs-> transform data -> submit data to GCS -> Submit data to bigquery )
- delete the cluster
- submit processed data from GCS to BigQuery
'''
PYSPARK_JOB = {
"reference":{"project_id":PROJECT_ID},
"placement":{"cluster_name":CLUSTER_NAME},
"pyspark_job":{"main_python_file_uri":PYSPARK_URI}
}
CLUSTER_CONFIG = ClusterGenerator(
project_id = PROJECT_ID,
zone="us-central1-a",
master_machine_type="n1-standard-2",
worker_machine_type="n1-standard-2",
num_workers=2,
worker_disk_size=40,
master_disk_size=30,
storage_bucket=BUCKET_NAME,
).make()
default_args = {
'owner': 'Rohit Joshi',
'depends_on_past': False,
'email_on_failure': True,
'email_on_retry': False,
'email_on_success':True,
'retries': 1,
'start_date': days_ago(1),
'retry_delay': timedelta(minutes=3),
'email_on_success': False,
'schedule_interval':'@once',
'email': ['rohitjoshi9july@gmail.com']
}
@task(task_id="upload_pyspark_file")
def upload_to_gcs(bucket_name, filename):
local_path = f"/opt/{filename}"
target_path = f"spark-job/{filename}"
storage.blob._DEFAULT_CHUNKSIZE = 5 * 1024 * 1024
storage.blob._MAX_MULTIPART_SIZE = 5 * 1024 * 1024
client = storage.Client()
bucket = client.bucket(bucket_name)
blob = bucket.blob(target_path)
blob.upload_from_filename(local_path)
print("file added successfully")
with DAG("Spark_FHVHV_ETL", default_args = default_args) as dag:
os.environ['GOOGLE_APPLICATION_CREDENTIALS ']= '/home/rohit/.gc/de-cred.json' #path of google service account credentials
start_pipeline = DummyOperator(
task_id= "start_pipeline",
dag=dag
)
#create dataproc cluster
create_cluster = DataprocCreateClusterOperator(
task_id="create_dataproc_cluster",
project_id=PROJECT_ID,
cluster_config=CLUSTER_CONFIG,
region=REGION,
cluster_name =CLUSTER_NAME,
priority_weight=4
)
#upload pyspark code to gcs
upload_pyspark_file = upload_to_gcs(BUCKET_NAME, PYSPARK_FILENAME)
#submit pyspark job to dataproc
execute_pyspark_task = DataprocSubmitJobOperator(
task_id="submit_pyspark_job",
job=PYSPARK_JOB,
region=REGION,
project_id=PROJECT_ID,
priority_weight=2
)
#delete cluster after processing
delete_cluster = DataprocDeleteClusterOperator(
task_id="delete_dataproc_cluster",
project_id=PROJECT_ID,
cluster_name=CLUSTER_NAME,
region=REGION,
priority_weight=1
)
#submit processed data from GCS to BQ
gcs_to_bq = GCSToBigQueryOperator(
task_id= "submit_processed_data_to_bigquery",
bucket= BUCKET_NAME,
source_objects=[PROCESSED_DATA_PATH],
destination_project_dataset_table=BIGQUERY_TABLE,
source_format='parquet',
autodetect=True,
cluster_fields=['trip_month']
)
finish_pipeline = DummyOperator(
task_id="finish_pipeline",
dag=dag
)
start_pipeline >> create_cluster >> upload_pyspark_file >> execute_pyspark_task >> delete_cluster >> gcs_to_bq >> finish_pipeline
| Rohitjoshi07/FHVDataAnalysis | airflow/dags/spark-dataproc.py | spark-dataproc.py | py | 4,553 | python | en | code | 0 | github-code | 36 |
15991440245 | import torch.nn as nn
import torch
import torch.nn.functional as F
up_kwargs = {'mode': 'bilinear', 'align_corners': True}
norm_layer = nn.BatchNorm2d
class Conv2dReLU(nn.Sequential):
def __init__(
self,
in_channels,
out_channels,
kernel_size,
padding=0,
stride=1,
use_batchnorm=True,
):
conv = nn.Conv2d(
in_channels,
out_channels,
kernel_size,
stride=stride,
padding=padding,
bias=not (use_batchnorm),
)
relu = nn.ReLU(inplace=True)
if use_batchnorm:
bn = nn.BatchNorm2d(out_channels)
else:
bn = nn.Identity()
super(Conv2dReLU, self).__init__(conv, bn, relu)
class SCSEAttention(nn.Module):
def __init__(self, in_channels, reduction=16):
super().__init__()
self.cSE = nn.Sequential(
nn.AdaptiveAvgPool2d(1),
nn.Conv2d(in_channels, in_channels // reduction, 1),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels // reduction, in_channels, 1),
nn.Sigmoid(),
)
self.sSE = nn.Sequential(nn.Conv2d(in_channels, 1, 1), nn.Sigmoid())
def forward(self, x):
return x * self.cSE(x) + x * self.sSE(x)
class DecoderBlock(nn.Module):
def __init__(
self,
in_channels,
skip_channels,
out_channels,
use_batchnorm=True,
use_attention=False,
):
super().__init__()
self.conv1 = Conv2dReLU(
in_channels + skip_channels,
out_channels,
kernel_size=3,
padding=1,
use_batchnorm=use_batchnorm,
)
self.conv2 = Conv2dReLU(
out_channels,
out_channels,
kernel_size=3,
padding=1,
use_batchnorm=use_batchnorm,
)
self.use_attention = use_attention
if self.use_attention:
self.attention1 = SCSEAttention(in_channels=in_channels + skip_channels)
self.attention2 = SCSEAttention(in_channels=out_channels)
def forward(self, x, skip=None):
x = F.interpolate(x, scale_factor=2, **up_kwargs)
if skip is not None:
x = torch.cat([x, skip], dim=1)
if self.use_attention:
x = self.attention1(x)
x = self.conv1(x)
x = self.conv2(x)
if self.use_attention:
x = self.attention2(x)
return x
class CenterBlock(nn.Sequential):
def __init__(self, in_channels, out_channels, use_batchnorm=True):
conv1 = Conv2dReLU(
in_channels,
out_channels,
kernel_size=3,
padding=1,
use_batchnorm=use_batchnorm,
)
conv2 = Conv2dReLU(
out_channels,
out_channels,
kernel_size=3,
padding=1,
use_batchnorm=use_batchnorm,
)
super().__init__(conv1, conv2)
class UNetHead(nn.Module):
def __init__(
self,
in_channels,
num_classes=6,
n_blocks=4,
use_batchnorm=True,
use_attention=False,
center=False,
in_index=[0, 1, 2, 3],
):
super(UNetHead, self).__init__()
self.in_index = in_index
decoder_channels = [in_channels[i] // 4 for i in self.in_index]
if n_blocks != len(decoder_channels):
raise ValueError(
"Model depth is {}, but you provide `decoder_channels` for {} blocks.".format(
n_blocks, len(decoder_channels)
)
)
encoder_channels = in_channels[::-1] # reverse channels to start from head of encoder
# computing blocks input and output channels
head_channels = encoder_channels[0]
in_channels = [head_channels] + list(decoder_channels[:-1])
skip_channels = list(encoder_channels[1:]) + [0]
out_channels = decoder_channels
if center:
self.center = CenterBlock(
head_channels, head_channels, use_batchnorm=use_batchnorm
)
else:
self.center = nn.Identity()
# combine decoder keyword arguments
kwargs = dict(use_batchnorm=use_batchnorm, use_attention=use_attention)
blocks = [
DecoderBlock(in_ch, skip_ch, out_ch, **kwargs)
for in_ch, skip_ch, out_ch in zip(in_channels, skip_channels, out_channels)
]
self.blocks = nn.ModuleList(blocks)
self.head = nn.Conv2d(out_channels[-1], num_classes, kernel_size=1)
def _transform_inputs(self, inputs):
if isinstance(self.in_index, (list, tuple)):
inputs = [inputs[i] for i in self.in_index]
elif isinstance(self.in_index, int):
inputs = inputs[self.in_index]
return inputs
def forward(self, features):
features = self._transform_inputs(features)
features = features[::-1] # reverse channels to start from head of encoder
head = features[0]
skips = features[1:]
x = self.center(head)
for i, decoder_block in enumerate(self.blocks):
skip = skips[i] if i < len(skips) else None
x = decoder_block(x, skip)
x = self.head(x)
return x
| zyxu1996/Efficient-Transformer | models/head/unet.py | unet.py | py | 5,457 | python | en | code | 67 | github-code | 36 |
20102072281 | import cv2
import numpy as np
import os
import time
label_names = ['scissors']
for label_name in label_names:
IMG_SAVE_PATH = 'test_images2'
IMG_CLASS_PATH = os.path.join(IMG_SAVE_PATH, label_name)
try:
os.mkdir(IMG_SAVE_PATH)
except FileExistsError:
pass
try:
os.mkdir(IMG_CLASS_PATH)
except FileExistsError:
pass
cap = cv2.VideoCapture(0)
start = False
count = 300
while True:
ret, frame = cap.read()
if not ret:
continue
if count == 400:
break
cv2.rectangle(frame, (50, 50), (400, 400), (255, 255, 255), 2)
if start:
roi = frame[50:400, 50:400]
save_path = os.path.join(IMG_CLASS_PATH, '{}.jpg'.format(count+1))
cv2.imwrite(save_path, roi)
count += 1
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(frame, f"Collecting {count}", (5,50), font, 0.7, (0, 255, 255), 2, cv2.LINE_AA)
cv2.imshow("Collecting images", frame)
time.sleep(0.2)
k = cv2.waitKey(10)
if k == ord('a'):
start = not start
if k == ord('q'):
break
print(f"\n{count} images saved to {IMG_CLASS_PATH}")
cap.release()
cv2.destroyAllWindows() | miir2709/rock-paper-scissors | imageGenerate/captureImage.py | captureImage.py | py | 1,197 | python | en | code | 0 | github-code | 36 |
70322815465 | """
TACCJM Hug Server
Server for managing instances of TACCJobManager classes via http endpoints
using the hug framework.
TODO: Add more extensive logging
"""
import pdb
import os
import hug
import falcon
import logging
import json
from typing import Union, List, Tuple
from taccjm.TACCJobManager import TACCJobManager, TJMCommandError
__author__ = "Carlos del-Castillo-Negrete"
__copyright__ = "Carlos del-Castillo-Negrete"
__license__ = "MIT"
# Initialize server logger
logger = logging.getLogger()
logging.basicConfig(level=logging.DEBUG)
# Dictionary containing all job manager instances being managed
# Note there could be multiple instance if managing more than one system
JM = {}
@hug.exception(ValueError)
def handle_custom_exceptions(exception):
"""Handling exceptions when ValueError due to input thrown."""
# Raise 400 Bad Request if invalid data type passed
err = falcon.HTTPError(falcon.HTTP_400, "BadRequest", str(exception))
logger.error(str(err))
raise err
@hug.exception(FileNotFoundError)
def handle_custom_exceptions(exception):
"""Handling exceptions when resource can't be found."""
# Raise 404 not found error if local or remoate path don't exist
err = falcon.HTTPError(falcon.HTTP_404, "NotFound", str(exception))
logger.error(str(err))
raise err
@hug.exception(PermissionError)
def handle_custom_exceptions(exception):
"""Handling exception when don't have access to resource"""
# Raise 403 forbidden if dont have permissions to access either paath
err = falcon.HTTPError(falcon.HTTP_403, "Forbidden", str(exception))
logger.error(str(err))
raise err
@hug.exception(TJMCommandError)
def handle_custom_exceptions(exception):
"""Handling other command errors"""
# Log TCJMCommandError message
logger.error(f"{str(exception)}")
# Raise 500 internal server error for unanticipated error
err = falcon.HTTPError(falcon.HTTP_500, "Command error", str(exception))
logger.error(str(err))
raise err
def _check_init(jm_id):
"""Check if Job Manager is initalized"""
if jm_id not in JM.keys():
raise falcon.HTTPError(falcon.HTTP_404,
"jm_error", f"TACCJM {jm_id} does not exist.")
@hug.post('/init')
def init_jm(jm_id:str, system:str, user:str, psw:str, mfa:str):
"""Initialize Job Manager Instances"""
global JM
if jm_id not in JM.keys():
try:
logger.info(f"INIT - Initializing TACCJM {jm_id}.")
JM[jm_id] = TACCJobManager(system, user=user, psw=psw, mfa=mfa,
working_dir=jm_id)
logger.info(f"SUCCESS - TACCJM {jm_id} initialized successfully.")
ret = {'jm_id':jm_id,
'sys':JM[jm_id].system,
'user':JM[jm_id].user,
'apps_dir':JM[jm_id].apps_dir,
'jobs_dir':JM[jm_id].jobs_dir,
'scripts_dir':JM[jm_id].scripts_dir,
'trash_dir':JM[jm_id].trash_dir}
return ret
except ValueError as v:
# Raise Not Found HTTP code for non TACC system
msg = f"Unable to initialize {jm_id} on {system} for {user}: {v}"
raise falcon.HTTPError(falcon.HTTP_404, "jm_error", msg)
except Exception as e:
# Raise Unauthorized HTTP code for bad login to system
msg = f"Unable to initialize {jm_id} on {system} for {user}: {e}"
raise falcon.HTTPError(falcon.HTTP_401, "jm_error", msg)
else:
# Raise Conflict HTTP error
raise falcon.HTTPError(falcon.HTTP_409,
"jm_error", f"TACCJM {jm_id} already exists.")
@hug.get('/list')
def list_jm():
"""Show initialized job managers"""
out = []
for jm in JM.keys():
out.append({'jm_id':jm, 'sys':JM[jm].system, 'user':JM[jm].user,
'apps_dir':JM[jm].apps_dir, 'jobs_dir':JM[jm].jobs_dir})
return out
@hug.get('/{jm_id}')
def get_jm(jm_id:str):
"""Get Job Manager managed by this server if it exists"""
_check_init(jm_id)
jm = {'jm_id':jm_id, 'sys':JM[jm_id].system, 'user':JM[jm_id].user,
'apps_dir':JM[jm_id].apps_dir, 'jobs_dir':JM[jm_id].jobs_dir,
'trash_dir':JM[jm_id].trash_dir, 'scripts_dir':JM[jm_id].scripts_dir}
return jm
@hug.get('/{jm_id}/queue')
def get_queue(jm_id:str, user:str=None):
"""Show job queue for user on system."""
_check_init(jm_id)
return JM[jm_id].showq(user=user)
@hug.get('/{jm_id}/allocations')
def allocations(jm_id:str):
"""List all allocatiosn for user on system."""
_check_init(jm_id)
return JM[jm_id].get_allocations()
@hug.get('/{jm_id}/files/list')
def list_files(jm_id:str, path:str="."):
"""List files on system"""
_check_init(jm_id)
logger.info(f'Getting files from {path}')
files = JM[jm_id].list_files(path=path)
_ = [f.pop('ls_str') for f in files]
return files
@hug.get('/{jm_id}/files/peak')
def peak_file(jm_id:str, path:str, head:int=-1, tail:int=-1):
"""Peak File
Extract first or last lines of a file via head/tail command.
"""
_check_init(jm_id)
return JM[jm_id].peak_file(path, head=head, tail=tail)
@hug.put('/{jm_id}/files/upload')
def upload(jm_id:str, local_path:str, remote_path:str, file_filter:str='*'):
"""File Upload
Upload file or folder to TACC system for given job manager
"""
_check_init(jm_id)
JM[jm_id].upload(local_path, remote_path, file_filter=file_filter)
@hug.get('/{jm_id}/files/download')
def download(jm_id:str, remote_path:str, local_path:str, file_filter:str='*'):
"""File Download
Download file or folder to TACC system for given job manager to local path
"""
_check_init(jm_id)
JM[jm_id].download(remote_path, local_path, file_filter=file_filter)
@hug.delete('/{jm_id}/files/remove')
def remove(jm_id:str, remote_path:str):
"""Remove file. In reality just moves file to JM's trash directory.
"""
_check_init(jm_id)
JM[jm_id].remove(remote_path)
@hug.put('/{jm_id}/files/restore')
def restore(jm_id:str, remote_path:str):
"""Restore file. Restore previously removed file in trash directory to original location.
"""
_check_init(jm_id)
JM[jm_id].restore(remote_path)
@hug.put('/{jm_id}/files/write')
def write(jm_id:str, data, remote_path:str):
"""Write file
Write text or json data directly to a file path on JM's remote system.
"""
_check_init(jm_id)
JM[jm_id].write(data, remote_path)
@hug.get('/{jm_id}/files/read')
def read(jm_id:str, remote_path:str, data_type:str='text'):
"""Read file
Read text or json file directly from path on remote system managed by by
job manager instance.
"""
_check_init(jm_id)
return JM[jm_id].read(remote_path, data_type=data_type)
@hug.get('/{jm_id}/apps/list')
def list_apps(jm_id:str):
"""List Apps
Gets all apps.
"""
_check_init(jm_id)
return JM[jm_id].get_apps()
@hug.get('/{jm_id}/apps/{app_id}')
def get_app(jm_id:str, app_id:str):
"""Get App
Get configuration for a deploy HPC Application.
"""
_check_init(jm_id)
return JM[jm_id].get_app(app_id)
@hug.post('/{jm_id}/apps/deploy')
def deploy_app(jm_id:str,
local_app_dir:str='.',
app_config_file:str="app.json",
overwrite:bool=False) -> dict:
"""Deploy App
Deploy an application from local directory to TACC system
"""
_check_init(jm_id)
logger.info(f"{jm_id} - deploying app - {local_app_dir}/{app_config_file}")
return JM[jm_id].deploy_app(
local_app_dir=local_app_dir,
app_config_file=app_config_file,
overwrite=overwrite)
@hug.get('/{jm_id}/jobs/list')
def list_jobs(jm_id:str):
"""Gets all jobs."""
_check_init(jm_id)
return JM[jm_id].get_jobs()
@hug.get('/{jm_id}/jobs/{job_id}')
def get_job(jm_id:str, job_id:str):
"""Get Job
Get job configuration for job deployed on TACC system.
"""
_check_init(jm_id)
return JM[jm_id].get_job(job_id)
@hug.post('/{jm_id}/jobs/deploy')
def deploy_job(jm_id:str,
job_config:str=None,
local_job_dir:str='.',
job_config_file:str='job.json',
stage:bool=True,
**kwargs):
""" Deploy a job to TACC system. """
_check_init(jm_id)
msg = f"{jm_id} - deploying job at path {local_job_dir}/{job_config_file}"
logger.info(msg)
return JM[jm_id].deploy_job(job_config = None if job_config is None else json.loads(job_config),
local_job_dir=local_job_dir,
job_config_file=job_config_file,
stage=stage,
**kwargs)
@hug.put('/{jm_id}/jobs/{job_id}/submit')
def submit_job(jm_id:str, job_id:str):
"""Submit job
Submit a job to the Slurm Job Queue on given TACC system
"""
_check_init(jm_id)
return JM[jm_id].submit_job(job_id)
@hug.put('/{jm_id}/jobs/{job_id}/cancel')
def cancel_job(jm_id:str, job_id:str):
"""Cancel Job
Cancels a job that has been submitted to the SLURM job queue on given
TACC system
"""
_check_init(jm_id)
return JM[jm_id].cancel_job(job_id)
@hug.delete('/{jm_id}/jobs/{job_id}/remove')
def remove_job(jm_id:str, job_id:str):
"""Cleanup Job
Removes job directory (Sends it to trash) on given TACC system.
"""
return JM[jm_id].remove_job(job_id)
@hug.post('/{jm_id}/jobs/{job_id}/restore')
def restore_job(jm_id:str, job_id:str):
"""Cleanup Job
Restores a job directory from the trash dir on given TACC system.
"""
return JM[jm_id].restore_job(job_id)
@hug.get('/{jm_id}/jobs/{job_id}/files/list')
def list_job_files(jm_id:str, job_id:str, path:str=''):
"""List Job files
List files in a job directory.
"""
_check_init(jm_id)
logger.info(f'Getting files from {path}')
files = JM[jm_id].ls_job(job_id, path=path)
_ = [f.pop('ls_str') for f in files]
return files
@hug.get('/{jm_id}/jobs/{job_id}/files/download')
def download_job_file(jm_id:str, job_id:str, path:str, dest_dir:str='.'):
"""Download Job file/folder
Download a file/folder from a job directory
"""
_check_init(jm_id)
return JM[jm_id].download_job_file(job_id, path, dest_dir=dest_dir)
@hug.get('/{jm_id}/jobs/{job_id}/files/read')
def read_job_file(jm_id:str, job_id:str, path:str, data_type:str='text'):
"""Read Job file
Read a job text or json file and return contents directly.
"""
_check_init(jm_id)
return JM[jm_id].read_job_file(job_id, path, data_type=data_type)
@hug.put('/{jm_id}/jobs/{job_id}/files/upload')
def upload_job_file(jm_id:str, job_id:str,
path:str, dest_dir:str='.', file_filter:str='*'):
"""Upload Job File/Folder
Uplaod a file/folder to a job's directory
"""
_check_init(jm_id)
JM[jm_id].upload_job_file(job_id, path, dest_dir=dest_dir,
file_filter=file_filter)
@hug.put('/{jm_id}/jobs/{job_id}/files/write')
def write_job_file(jm_id:str, job_id:str, data, path:str):
"""Write Job file
Write text or json data to a file in a job directory directly.
"""
_check_init(jm_id)
return JM[jm_id].write_job_file(job_id, data, path)
@hug.get('/{jm_id}/jobs/{job_id}/files/peak')
def peak_job_file(jm_id:str,
job_id:str, path:str, head:int=-1, tail:int=-1):
"""Peak Job File
Extract first or last lines of a file in job directory via head/tail command.
"""
_check_init(jm_id)
return JM[jm_id].peak_job_file(job_id, path, head=head, tail=tail)
@hug.get('/{jm_id}/scripts/list')
def list_scripts(jm_id:str):
"""List Scripts
"""
_check_init(jm_id)
return JM[jm_id].list_scripts()
@hug.post('/{jm_id}/scripts/deploy')
def deploy_script(jm_id:str, script_name:str, local_file:str=None):
"""Deploy Script
"""
_check_init(jm_id)
JM[jm_id].deploy_script(script_name, local_file=local_file)
@hug.put('/{jm_id}/scripts/run')
def run_script(jm_id:str, script_name:str,
job_id:str=None, args:hug.types.multiple=[]):
"""Run Script
"""
_check_init(jm_id)
return JM[jm_id].run_script(script_name, job_id=job_id, args=args)
@hug.delete('/{jm_id}/trash/empty')
def empty_trash(jm_id:str, filter_str:str='*'):
""" Empty trash directory """
_check_init(jm_id)
return JM[jm_id].empty_trash(filter_str=filter_str)
| cdelcastillo21/taccjm | src/taccjm/taccjm_server.py | taccjm_server.py | py | 12,602 | python | en | code | 2 | github-code | 36 |
20145792954 | import argparse
import inspect
def parse_funct_arguments(fn, args=None, free_arguments=None):
if free_arguments is None:
free_arguments = []
fn_parser = argparse.ArgumentParser()
sign = inspect.signature(fn)
for pname, pval in sign.parameters.items():
if pname not in free_arguments:
fn_parser.add_argument('--'+pname, default=pval.default, type=pval.annotation)
fn_args, unk = fn_parser.parse_known_args(args)
def new_fn(*args, **kwargs):
return fn(*args, **kwargs, **vars(fn_args))
return new_fn, vars(fn_args), unk
| antonior92/narx-double-descent | util.py | util.py | py | 588 | python | en | code | 6 | github-code | 36 |
31057530307 | '''
JexoSim
2.0
Recipe 3 with ability to split timeline into chunks - used to test split method
'''
from jexosim.modules import exosystem, telescope, channel, backgrounds
from jexosim.modules import detector, timeline, light_curve, systematics, noise, output
from jexosim.pipeline.run_pipeline import pipeline_stage_1, pipeline_stage_2
from jexosim.lib.jexosim_lib import jexosim_msg, jexosim_plot, write_record
from astropy import units as u
from datetime import datetime
import pickle, os
import numpy as np
class recipe_3a(object):
def __init__(self, opt):
output_directory = opt.common.output_directory.val
filename=""
self.results_dict ={}
self.results_dict['simulation_mode'] = opt.simulation.sim_mode.val
self.results_dict['simulation_realisations'] = opt.simulation.sim_realisations.val
self.results_dict['ch'] = opt.observation.obs_channel.val
self.noise_dict ={}
self.feasibility =1
opt.pipeline.useSignal.val=1
opt.simulation.sim_use_fast.val =1
opt.pipeline.split = 0
opt.noise.ApplyRandomPRNU.val=1
opt.timeline.apply_lc.val = 0
opt.timeline.useLDC.val = 0
opt.pipeline.useAllen.val =1
opt.pipeline.pipeline_auto_ap.val = 0 # for noise budget keep this to a fixed value (i.e. choose 0) so same for all sims
opt.timeline.obs_time.val = 0*u.hr
opt.timeline.n_exp.val = 1000.0
noise_list = [0,2,3,4,5,6,7,8,9,12,13]
# noise_list = [0,2,3,4,5,6,7,8,9]
# noise_list = [2,9,12]
start = 0
end = int(start + opt.no_real)
nb_dict = {'rn' :[1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
'sn' :[1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1],
'spat' :[1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0],
'spec' :[1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0],
'emm_switch' :[1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0],
'zodi_switch' :[1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0],
'dc_switch' :[1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
'source_switch' :[1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0],
'diff' :[0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1],
'jitter_switch' :[1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0],
'fano' :[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
'sunshield_switch' :[1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1],
'noise_tag': [ 'All noise','All photon noise','Source photon noise','Dark current noise',
'Zodi noise','Emission noise','Read noise','Spatial jitter noise',
'Spectral jitter noise','Combined jitter noise','No noise - no background','No noise - all background', 'Fano noise', 'Sunshield noise'],
'color': ['0.5','b', 'b','k','orange','pink', 'y','g','purple','r', '0.8','c', 'c','brown']
}
for i in noise_list:
self.noise_dict[nb_dict['noise_tag'][i]] ={}
for j in range(start,end):
seed = np.random.randint(100000000)
for i in noise_list:
opt.noise.EnableReadoutNoise.val = nb_dict['rn'][i]
opt.noise.EnableShotNoise.val = nb_dict['sn'][i]
opt.noise.EnableSpatialJitter.val= nb_dict['spat'][i]
opt.noise.EnableSpectralJitter.val= nb_dict['spec'][i]
opt.noise.EnableFanoNoise.val= nb_dict['fano'][i]
opt.background.EnableEmission.val = nb_dict['emm_switch'][i]
opt.background.EnableZodi.val = nb_dict['zodi_switch'][i]
opt.background.EnableSunshield.val = nb_dict['sunshield_switch'][i]
opt.background.EnableDC.val = nb_dict['dc_switch'][i]
opt.background.EnableSource.val = nb_dict['source_switch'][i]
opt.diff = nb_dict['diff'][i]
opt.noise_tag = nb_dict['noise_tag'][i]
opt.color = nb_dict['color'][i]
jexosim_msg('==========================================', 1)
jexosim_msg('Noise source:%s'%(opt.noise_tag), 1)
np.random.seed(seed)
opt = self.run_JexoSimA(opt)
opt = self.run_JexoSimA1(opt)
n_ndr0 = opt.n_ndr*1
ndr_end_frame_number0 = opt.ndr_end_frame_number*1
frames_per_ndr0 = opt.frames_per_ndr*1
duration_per_ndr0 = opt.duration_per_ndr*1
n_exp0 = opt.n_exp
if n_ndr0 > 10000:
opt.pipeline.split = 1
if opt.diagnostics ==1 :
jexosim_msg ('number of NDRs > 10000: using split protocol', opt.diagnostics)
else:
opt.pipeline.split = 0
opt.pipeline.split = 1
# =============================================================================
# split simulation into chunks to permit computation - makes no difference to final results
# =============================================================================
if opt.pipeline.split ==1:
jexosim_msg('Splitting data series into chunks', opt.diagnostics)
# uses same QE grid and jitter timeline but otherwise randomoses noise
ndrs_per_round = opt.effective_multiaccum*int(5000/opt.multiaccum)
ndrs_per_round = opt.effective_multiaccum*int(50/opt.multiaccum)
total_chunks = len(np.arange(0, n_ndr0, ndrs_per_round))
idx = np.arange(0, n_ndr0, ndrs_per_round) # list of starting ndrs
for i in range(len(idx)):
jexosim_msg('=== Chunk %s / %s====='%(i+1, total_chunks), opt.diagnostics)
if idx[i] == idx[-1]:
opt.n_ndr = n_ndr0 - idx[i]
opt.ndr_end_frame_number = ndr_end_frame_number0[idx[i]:]
opt.frames_per_ndr= frames_per_ndr0[idx[i]:]
opt.duration_per_ndr = duration_per_ndr0[idx[i]:]
else:
opt.n_ndr = idx[i+1]- idx[i]
opt.ndr_end_frame_number = ndr_end_frame_number0[idx[i]: idx[i+1]]
opt.frames_per_ndr= frames_per_ndr0[idx[i]: idx[i+1]]
opt.duration_per_ndr = duration_per_ndr0[idx[i]: idx[i+1]]
opt.n_exp = int(opt.n_ndr/opt.effective_multiaccum)
if i == 0:
opt.use_external_jitter = 0
opt = self.run_JexoSimB(opt)
opt = self.run_pipeline_stage_1(opt)
if opt.pipeline.pipeline_auto_ap.val == 1:
opt.pipeline.pipeline_ap_factor.val= opt.AvBest
if (opt.noise.EnableSpatialJitter.val ==1 or opt.noise.EnableSpectralJitter.val ==1 or opt.noise.EnableAll.val == 1) and opt.noise.DisableAll.val != 1:
opt.input_yaw_jitter, opt.input_pitch_jitter, opt._input_frame_osf = opt.yaw_jitter, opt.pitch_jitter, opt.frame_osf
else:
opt.pipeline.pipeline_auto_ap.val = 0
opt.use_external_jitter = 1 # uses the jitter timeline from the first realization
opt = self.run_JexoSimB(opt)
opt = self.run_pipeline_stage_1(opt)
jexosim_msg('Aperture used %s'%(opt.pipeline.pipeline_ap_factor.val), opt.diagnostics)
binnedLC = opt.pipeline_stage_1.binnedLC
data = opt.pipeline_stage_1.opt.data_raw
#After chunks processed, now recombine
if i ==0:
data_stack = data
binnedLC_stack = binnedLC
else:
data_stack = np.dstack((data_stack,data))
binnedLC_stack = np.vstack((binnedLC_stack, binnedLC))
aa = data_stack.sum(axis=0)
bb = aa.sum(axis=0)
jexosim_plot('test_from_sim', opt.diagnostics,
ydata=bb[opt.effective_multiaccum::opt.effective_multiaccum] )
aa = binnedLC_stack.sum(axis=1)
jexosim_plot('test_from_pipeline', opt.diagnostics,
ydata=aa)
opt.n_ndr = n_ndr0
opt.ndr_end_frame_number = ndr_end_frame_number0
opt.frames_per_ndr = frames_per_ndr0
opt.duration_per_ndr = duration_per_ndr0
opt.n_exp = n_exp0
elif opt.pipeline.split ==0:
opt = self.run_JexoSimB(opt)
if j==start: # first realization sets the ap, then the other use the same one
opt.pipeline.pipeline_auto_ap.val= 1
else:
opt.pipeline.pipeline_auto_ap.val= 0
opt = self.run_pipeline_stage_1(opt)
if j==start: # first realization sets the ap, then the other use the same one
if opt.pipeline.pipeline_apply_mask.val == 1:
opt.pipeline.pipeline_ap_factor.val= opt.AvBest
binnedLC_stack = opt.pipeline_stage_1.binnedLC
data_stack = opt.pipeline_stage_1.opt.data_raw
jexosim_plot('testvvv', opt.diagnostics,
ydata=binnedLC_stack.sum(axis=1) )
#take binnedLC_stack and now complete through pipeline stage 2
opt.pipeline_stage_1.binnedLC = binnedLC_stack
opt = self.run_pipeline_stage_2(opt)
self.pipeline = opt.pipeline_stage_2
self.opt = opt
self.noise_dict[opt.noise_tag]['wl'] = self.pipeline.binnedWav
if j == start:
self.noise_dict[opt.noise_tag]['signal_std_stack'] = self.pipeline.ootNoise
self.noise_dict[opt.noise_tag]['signal_mean_stack'] = self.pipeline.ootSignal
if opt.pipeline.useAllen.val == 1:
self.noise_dict[opt.noise_tag]['fracNoT14_stack'] = self.pipeline.noiseAt1hr
self.noise_dict[opt.noise_tag]['signal_std_mean'] = self.pipeline.ootNoise
self.noise_dict[opt.noise_tag]['signal_mean_mean'] = self.pipeline.ootSignal
if opt.pipeline.useAllen.val == 1:
self.noise_dict[opt.noise_tag]['fracNoT14_mean'] = self.pipeline.noiseAt1hr
self.noise_dict[opt.noise_tag]['signal_std_std'] = np.zeros(len(self.pipeline.binnedWav))
self.noise_dict[opt.noise_tag]['signal_mean_std'] = np.zeros(len(self.pipeline.binnedWav))
if opt.pipeline.useAllen.val == 1:
self.noise_dict[opt.noise_tag]['fracNoT14_std'] = np.zeros(len(self.pipeline.binnedWav))
self.noise_dict[opt.noise_tag]['bad_map'] = opt.bad_map
self.noise_dict[opt.noise_tag]['example_exposure_image'] = opt.exp_image
self.noise_dict[opt.noise_tag]['pixel wavelengths'] = opt.x_wav_osr[1::3].value
else:
self.noise_dict[opt.noise_tag]['signal_std_stack'] = np.vstack((self.noise_dict[opt.noise_tag]['signal_std_stack'], self.pipeline.ootNoise))
self.noise_dict[opt.noise_tag]['signal_mean_stack'] = np.vstack((self.noise_dict[opt.noise_tag]['signal_mean_stack'], self.pipeline.ootSignal))
if opt.pipeline.useAllen.val == 1:
self.noise_dict[opt.noise_tag]['fracNoT14_stack'] = np.vstack((self.noise_dict[opt.noise_tag]['fracNoT14_stack'], self.pipeline.noiseAt1hr))
self.noise_dict[opt.noise_tag]['signal_std_mean'] = self.noise_dict[opt.noise_tag]['signal_std_stack'].mean(axis=0)
self.noise_dict[opt.noise_tag]['signal_mean_mean'] = self.noise_dict[opt.noise_tag]['signal_mean_stack'].mean(axis=0)
if opt.pipeline.useAllen.val == 1:
self.noise_dict[opt.noise_tag]['fracNoT14_mean'] = self.noise_dict[opt.noise_tag]['fracNoT14_stack'].mean(axis=0)
self.noise_dict[opt.noise_tag]['signal_std_std'] = self.noise_dict[opt.noise_tag]['signal_std_stack'].std(axis=0)
self.noise_dict[opt.noise_tag]['signal_mean_std'] = self.noise_dict[opt.noise_tag]['signal_mean_stack'].std(axis=0)
if opt.pipeline.useAllen.val == 1:
self.noise_dict[opt.noise_tag]['fracNoT14_std'] = self.noise_dict[opt.noise_tag]['fracNoT14_stack'].std(axis=0)
self.noise_dict[opt.noise_tag]['bad_map'] = opt.bad_map
self.noise_dict[opt.noise_tag]['example_exposure_image'] = opt.exp_image
self.noise_dict[opt.noise_tag]['pixel wavelengths'] = opt.x_wav_osr[1::3].value
self.results_dict['noise_dic'] = self.noise_dict
# dump pickle file at end of each cycle of noise sims
if opt.simulation.sim_output_type.val == 1:
time_tag = (datetime.now().strftime('%Y_%m_%d_%H%M_%S'))
self.results_dict['time_tag'] = time_tag
# if j != start:
# os.remove(filename) # delete previous temp file
# filename = '%s/Noise_budget_%s_TEMP.pickle'%(output_directory, opt.lab)
# with open(filename, 'wb') as handle:
# pickle.dump(self.results_dict , handle, protocol=pickle.HIGHEST_PROTOCOL)
if j == end-1:
# os.remove(filename) # delete previous temp file
filename = '%s/Noise_budget_%s_%s.pickle'%(output_directory, opt.lab, time_tag)
with open(filename, 'wb') as handle:
pickle.dump(self.results_dict , handle, protocol=pickle.HIGHEST_PROTOCOL)
jexosim_msg('Results in %s'%(filename), 1)
self.filename = 'Noise_budget_%s_%s.pickle'%(opt.lab, time_tag)
write_record(opt, output_directory, self.filename, opt.params_file_path)
def run_JexoSimA(self, opt):
jexosim_msg('Exosystem', 1)
exosystem.run(opt)
jexosim_msg('Telescope', 1)
telescope.run(opt)
jexosim_msg('Channel', 1)
channel.run(opt)
jexosim_msg('Backgrounds', 1)
backgrounds.run(opt)
jexosim_msg('Detector', 1)
detector.run(opt)
if opt.observation_feasibility ==1: # if detector does not saturate continue
jexosim_msg('Timeline', 1)
timeline.run(opt)
jexosim_msg('Light curve', 1)
light_curve.run(opt)
return opt
else: # if detector saturates end sim
return opt
def run_JexoSimA1(self, opt):
jexosim_msg('Systematics', 1)
systematics.run(opt)
return opt
def run_JexoSimB(self, opt):
jexosim_msg('Noise', 1)
noise.run(opt)
return opt
def run_pipeline_stage_1(self, opt):
jexosim_msg('Pipeline stage 1', 1)
opt.pipeline_stage_1 = pipeline_stage_1(opt)
return opt
def run_pipeline_stage_2(self, opt):
jexosim_msg('Pipeline stage 2', 1)
opt.pipeline_stage_2 = pipeline_stage_2(opt)
return opt
| subisarkar/JexoSim | jexosim/run_files/recipe_3a.py | recipe_3a.py | py | 17,161 | python | en | code | 6 | github-code | 36 |
14061566995 | import time
import json
from typing import *
import prestodb # pip install presto-python-client
class PrestoClient:
def __init__(self):
props = {"enable_hive_syntax": "true"}
self.conn = prestodb.dbapi.connect(
host="presto.bigo.sg",
port=8285,
user="baijialuo",
catalog="hive",
schema="default",
source="live_moderation_pipeline",
session_properties=props,
)
def query(self, sql):
cur = self.conn.cursor()
cur.execute(sql)
return cur.fetchall()
def query_yield(self, sql):
cur = self.conn.cursor()
cur.execute(sql)
while True:
item = cur.fetchone()
if item is None:
return
yield item
def close(self):
self.conn.close()
if __name__ == "__main__":
START_DAY = "2020-08-10 15:25:18"
END_DAY = "2020-08-10"
PROJECT_NAME = "auto-deploy"
sql = r'select rtime, submodule, cost_ms, message from vlog.likee_evg_content where rtime >= "{}" and "{}" >= day and project_name = "{}"'.format(
START_DAY, END_DAY, PROJECT_NAME)
client = PrestoClient()
res = client.query(sql)
for item in res:
print('rtime:{}\nsubmodule:{}\ncost_ms:{}\nmessage:{}'.format(
item[0], item[1], item[2], json.loads(item[3])
))
for item in client.query_yield(sql):
print('rtime:{}\nsubmodule:{}\ncost_ms:{}\nmessage:{}'.format(
item[0], item[1], item[2], json.loads(item[3])
))
| ThreeBucks/model-deploy | scripts/extract_report_data.py | extract_report_data.py | py | 1,588 | python | en | code | 0 | github-code | 36 |
4162697796 | import asyncio
import aiofiles
# Define an async function to write data to a file
async def write_to_file(filename, data):
# Open the file for writing using async with, which ensures the file is closed
# when we're done with it
async with aiofiles.open(filename, 'w') as f:
# Write the data to the file using the await keyword
await f.write(data)
# Define an async function to read data from a file
async def read_from_file(filename):
# Open the file for reading using async with, which ensures the file is closed
# when we're done with it
async with aiofiles.open(filename, 'r') as f:
# Read the contents of the file using the await keyword
data = await f.read()
# Return the data as a string
return data
# Define the main coroutine, which will run when we execute the script
async def main():
# Set up a filename and some data to write to the file
filename = 'example.txt'
data = 'Hello, world!'
# Create tasks to write and read the file concurrently
write_task = asyncio.create_task(write_to_file(filename, data))
read_task = asyncio.create_task(read_from_file(filename))
# Wait for both tasks to complete
await asyncio.gather(write_task, read_task)
# Print the contents of the file to the console
print(read_task.result())
# Run the main coroutine using asyncio.run, which creates and manages the event loop
if __name__ == '__main__':
asyncio.run(main())
| PythonCodeNemesis/Python_Event_Loop | file_update_loop_2.py | file_update_loop_2.py | py | 1,482 | python | en | code | 0 | github-code | 36 |
9194759746 | from PIL import Image
import unittest
from lightly.transforms import GaussianBlur
class TestGaussianBlur(unittest.TestCase):
def test_on_pil_image(self):
for w in range(1, 100):
for h in range(1, 100):
gaussian_blur = GaussianBlur(w * 0.1)
sample = Image.new('RGB', (w, h))
gaussian_blur(sample)
| tibe97/thesis-self-supervised-learning | tests/transforms/test_GaussianBlur.py | test_GaussianBlur.py | py | 372 | python | en | code | 2 | github-code | 36 |
70167432424 | import os
import mongoengine
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = '6c8+rfm8e6#bt&13$+4#(btshtf7#^)y3e@58=lzrxd5j4%q(m'
DEBUG = True
ALLOWED_HOSTS = ['*']
REST_FRAMEWORK = {
'UNAUTHENTICATED_USER': None
}
# Application definition
INSTALLED_APPS = [
'rest_framework',
'django.contrib.staticfiles',
'corsheaders',
'django_pds',
'api'
]
MIDDLEWARE = [
'corsheaders.middleware.CorsMiddleware',
'django_pds.middlewares.UrlPathExistsMiddleware',
'django.middleware.common.CommonMiddleware'
]
CORS_ORIGIN_ALLOW_ALL = True
CORS_ALLOW_METHODS = (
'DELETE',
'GET',
'OPTIONS',
'PATCH',
'POST',
'PUT',
)
ROOT_URLCONF = 'mongo_django.urls'
TEMPLATES = []
WSGI_APPLICATION = 'mongo_django.wsgi.application'
DATABASES = {}
# connecting to mongodb
DATABASE_NAME = '89595B8A-13EE-43F3-9D06-0D34D71D161B'
mongoengine.connect(DATABASE_NAME)
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
| knroy/django-pds | sample/mongo_django/mongo_django/settings.py | settings.py | py | 1,169 | python | en | code | 3 | github-code | 36 |
33917629928 | # importacao de bibliotecas
import requests
import csv
# import datetime (NAO SERA UTILIZADO E NAO FUNCIONOU NO PYCHARM)
from PIL import Image
from urllib.parse import quote
from time import sleep
# recebendo os dados da API
url = 'https://api.covid19api.com/dayone/country/brazil'
requisicao = requests.get(url)
dados_api = requisicao.json()
# criando uma lista de listas (final_data) com os dados recebidos da API, para ser usada em um arquivo CSV
dados_finais = []
for dicionario in dados_api:
dados_finais.append([dicionario['Confirmed'], dicionario['Deaths'], dicionario['Recovered'],
dicionario['Active'], dicionario['Date'][:10]])
dados_finais.insert(0, ['Confirmados', 'Obitos', 'Recuperados', 'Ativos', 'Data'])
# nomeando as posicoes de cada lista
confirmados = 0
mortos = 1
recuperados = 2
ativos = 3
data = 4
# criando o arquivo CSV (final_data ja e uma lista de listas, facilitando esse processo) NAO SERA USADO PARA O GRAFICO
with open('brasil-covid.csv', 'w', newline='') as arquivo:
escritor = csv.writer(arquivo)
escritor.writerows(dados_finais)
# transformando a string da data em um formato de data real (NAO FUNCIONOU NO PYCHARM, MAS NAO IREI USAR)
'''for dados_diarios in range(1, len(dados_finais)):
dados_finais[dados_diarios][data] = datetime.datetime.strptime(final_data[dados_diarios][data], '%Y-%m-%d')'''
# criando funcao para gerar datasets para a API geradora de graficos chamada quickchart
def gerar_dados(lista_dados, etiqueta):
if len(lista_dados) > 1:
conjunto_dados = []
for contador in range(len(lista_dados)):
conjunto_dados.append({
'label': etiqueta[contador],
'data': lista_dados[contador]
})
return conjunto_dados
else:
return [{
'label': etiqueta,
'data': lista_dados
}]
# criando funcao para colocar titulo no grafico
def titulo_grafico(titulo=''):
if titulo != '':
mostrar = 'true'
else:
mostrar = 'false'
return {
'display': mostrar,
'title': titulo
}
# criando funcao para gerar o codigo(dicionario) do grafico
def gerar_grafico(etiquetasx, lista_dados, etiquetasy, tipo='bar', titulo=''):
conjunto_dados = gerar_dados(lista_dados, etiquetasy)
opcoes = titulo_grafico(titulo)
dadosdografico = {
'type': tipo,
'data': {
'labels': etiquetasx,
'datasets': conjunto_dados
},
'options': opcoes
}
return dadosdografico
# criando funcao para requisitar o grafico da API
def requrer_grafico(dadosdografico):
url_base = 'https://quickchart.io/chart?c='
req = requests.get(f'{url_base}{str(dadosdografico)}')
conteudo = req.content
return conteudo
# criando funcao para salvar a imagem do grafico
def salvar_imagem_grafico(caminho, conteudo_binario):
with open(caminho, 'wb') as imagem_grafico:
imagem_grafico.write(conteudo_binario)
# criando funcao para exibir a imagem do grafico na tela
def mostrar_grafico(caminho):
imagem = Image.open(caminho)
imagem.show()
# criando funcao para gerar qr code
def requerer_qrcode(url_texto):
url_link = quote(url_texto)
url_final = f'https://quickchart.io/qr?text={url_link}'
req = requests.get(url_final)
conteudo = req.content
return conteudo
# PROGRAMA PRINCIPAL
# 1o. - criando dados para informar para as funcoes
try:
while True:
intervalo_dados = int(input('Com qual intervalo deseja gerar o grafico? [20 ou mais]\n'))
if 20 <= intervalo_dados <= 400:
break
else:
print('OPCAO INVALIDA!', end=' ')
except:
sleep(1)
print('ERRO - OPCAO INVALIDA!')
sleep(1)
print('INTERVALO PADRAO DE 50 DIAS ESCOLHIDO!')
sleep(3)
finally:
intervalo_dados = 50
etiquetas_x = []
for dados_diarios in dados_finais[1::intervalo_dados]:
etiquetas_x.append(dados_diarios[data])
dados_confirmados = []
dados_recuperados = []
for dados_diarios in dados_finais[1::intervalo_dados]:
dados_confirmados.append(dados_diarios[confirmados])
dados_recuperados.append(dados_diarios[recuperados])
lista_de_dados_final = [dados_confirmados, dados_recuperados]
etiquetas_y = ['Confirmados', 'Recuperados']
# 2o. - chamandos as funcoes e exibindo o grafico
grafico = gerar_grafico(etiquetas_x, lista_de_dados_final, etiquetas_y, titulo='Confirmados x Recuperados')
conteudo_do_grafico = requrer_grafico(grafico)
salvar_imagem_grafico('meu-primeiro-grafico.png', conteudo_do_grafico)
mostrar_grafico('meu-primeiro-grafico.png')
# 3o. - criando um qr code do grafico
conteudo_qrcode = requerer_qrcode(f'https://quickchart.io/chart?c={grafico}')
salvar_imagem_grafico('qr_code_meu_primeiro_grafico.png', conteudo_qrcode)
mostrar_grafico('qr_code_meu_primeiro_grafico.png')
| gabrieleugeni0/Covid19DataGraphic | covid19.py | covid19.py | py | 4,876 | python | pt | code | 1 | github-code | 36 |
950940082 | pkgname = "gtar"
pkgver = "1.35"
pkgrel = 0
build_style = "gnu_configure"
configure_args = [
"--program-prefix=g",
"gl_cv_struct_dirent_d_ino=yes",
]
configure_gen = []
hostmakedepends = ["texinfo"]
makedepends = ["acl-devel"]
pkgdesc = "GNU tape archiver"
maintainer = "q66 <q66@chimera-linux.org>"
license = "GPL-3.0-or-later"
url = "http://www.gnu.org/software/tar"
source = f"$(GNU_SITE)/tar/tar-{pkgver}.tar.xz"
sha256 = "4d62ff37342ec7aed748535323930c7cf94acf71c3591882b26a7ea50f3edc16"
hardening = ["vis", "cfi"]
| chimera-linux/cports | main/gtar/template.py | template.py | py | 528 | python | en | code | 119 | github-code | 36 |
18109339052 | #! /usr/bin/python3
# -*- coding: utf-8 -*-
import rospy
import subprocess
import threading
from tkinter import *
from geometry_msgs.msg import Twist
import subprocess
from std_msgs.msg import String
import threading
class UI:
def __init__(self, master=None):
self._enable = False
self.__real_robot_bt = Button(master, text="Real Robot",bg='#49A', width = 10)
self.__virtual_robot_bt = Button(master, text="Virtual Robot",bg="green", width = 10)
#Botones
self.__real_robot_bt.bind("<Button-1>", self.real_robot_launch)
self.__virtual_robot_bt.bind("<Button-1>", self.virtual_robot_launch)
self.__real_robot_bt.grid(row=0, column=0, columnspan=1)
self.__virtual_robot_bt.grid(row=1, column=0, columnspan=1)
#Etiquetas
self.__label1 = Label(master,text="SELECCIONE EL TIPO DE ROBOT")
self.__label1.grid(column=4, row=0, padx=10, pady=1)
self.__label1.config(bg="white")
self.__label2 = Label(master,text="",bg='white')
self.__label2.grid(column=4, row=1, padx=10, pady=1)
self.__label2.config(bg="white")
#Imagen
self.__image=PhotoImage(file="src/kinova_UI/image/kinovaPhoto.png")
self.__imageLable=Label(master,image=self.__image)
self.__imageLable.config(bg="white")
self.__imageLable.place(x=100, y=100, width=200, height=80)
self.__robot_selected = False
# Lista de comandos
self.__cmd = []
def real_robot_launch(self, event):
if not self.__robot_selected:
self.__robot_selected = True
subprocess.run(["gnome-terminal","--", "sh", "-c", "roslaunch controllers scullion.launch type:=real"])
#Para que no de error de conexión
rospy.sleep(3)
#Se inicializa el nodo
rospy.init_node("kinova_ui")
# Suscriptor al nodo del control por voz
rospy.Subscriber("/voice_ui", String, self.__cb)
# Suscriptor al nodo de la cámara
rospy.Subscriber("/ready", String, self.__cb)
self.__label1.config(text="¿QUÉ INGREDIENTE DESEA?")
def virtual_robot_launch(self, event):
if not self.__robot_selected:
self.__robot_selected = True
#Abre terminal y ejecuta el comando
subprocess.run(["gnome-terminal","--", "sh", "-c","roslaunch controllers scullion.launch"])
#Para que no de error de conexión
rospy.sleep(3)
#Se inicializa el nodo
rospy.init_node("kinova_ui")
# Suscriptor al nodo del control por voz
rospy.Subscriber("/voice_ui", String, self.__cb)
# Suscriptor al nodo de la cámara
rospy.Subscriber("/ready", String, self.__cb)
self.__label1.config(text="¿QUÉ INGREDIENTE DESEA?")
#Resetea las etiquetas
def reset_label(self, kinova_frame):
self.__label2.config(text="")
# Bucle de control
def control_loop(self,kinova_frame):
# Bucle de control
if not rospy.is_shutdown():
# Comprueba que hay un comando a realizar
if len(self.__cmd) > 0:
# Saca el comando de la lista de comandos
command = self.__cmd.pop(0)
command = command.lower().split()
#Mensaje para comunicar a la interfaz el ingrediente que se coge
mensaje = String()
# Realiza el pick and place si tiene que dar un ingrediente, luego pone la tupla a False (no hay ingrediente en la zona de almacén)
if len(command) == 1:
if command[0] == "sal":
self.__label2.config(text="Cogiendo la sal")
kinova_frame.after(15000, self.reset_label,kinova_frame)
elif command[0] == "azúcar":
self.__label2.config(text="Cogiendo el azúcar")
kinova_frame.after(15000, self.reset_label,kinova_frame)
elif command[0] == "pimienta":
self.__label2.config(text="Cogiendo la pimienta")
kinova_frame.after(15000, self.reset_label,kinova_frame)
elif len(command) > 1:
X = float(command[0])
Y = float(command[1])
# Realiza el pick and place si tiene que devolver un ingrediente, luego pone la tupla a True (hay ingrediente en la zona de almacén)
if command[2] == "0":
self.__label2.config(text="Devolviendo la sal")
kinova_frame.after(15000, self.reset_label,kinova_frame)
elif command[2] == "1":
self.__label2.config(text="Devolviendo la pimienta")
kinova_frame.after(15000, self.reset_label,kinova_frame)
elif command[2] == "2":
self.__label2.config(text="Devolviendo el azúcar")
kinova_frame.after(15000, self.reset_label,kinova_frame)
kinova_frame.after(500, self.control_loop,kinova_frame)
# Callback de la interfaz por voz y cámara: recoge los comandos y los almacena
def __cb(self, data):
self.__cmd.append(data.data)
def main():
try:
kinova_frame = Tk()
kinova_frame.geometry("370x200")
kinova_frame.title("Kinova UI")
kinova_frame['bg']= 'white'
kinova_frame.resizable(width=False, height=False)
kinova_UI=UI(kinova_frame)
kinova_UI.control_loop(kinova_frame)
kinova_frame.mainloop()
except rospy.ROSInterruptException:
return
except KeyboardInterrupt:
return
if __name__ == '__main__':
main()
| DanielFrauAlfaro/Proyecto_Servicios | kinova_UI/scripts/kinova_UI.py | kinova_UI.py | py | 6,374 | python | es | code | 0 | github-code | 36 |
6504291760 | # import sys
from collections import deque
# from io import StringIO
#
# test_input1 = """11, 6, 8, 1
# 3, 1, 9, 10, 5, 9, 1
# """
#
#
# test_input2 = """10, 9, 8, 7, 5
# 5, 10, 9, 8, 7
# """
#
# test_input3 = """12, -3, 14, 3, 2, 0
# 10, 15, 4, 6, 3, 1, 22, 1
# """
#
# sys.stdin = StringIO(test_input1)
# sys.stdin = StringIO(test_input2)
# sys.stdin = StringIO(test_input3)
orders = deque([int(x) for x in input().split(', ') if 0 < int(x) <= 10])
employees = [int(x) for x in input().split(', ')]
total_pizzas = 0
while orders and employees:
current_order = orders[0]
current_employee = employees[-1]
if current_order <= current_employee:
total_pizzas += current_order
orders.popleft()
employees.pop()
if current_order > current_employee:
total_pizzas += current_employee
rest = current_order - current_employee
employees.pop()
orders[0] = rest
if not orders:
print("All orders are successfully completed!")
print(f"Total pizzas made: {total_pizzas}")
print(f"Employees: {', '.join(map(str, employees))}")
elif not employees:
print("Not all orders are completed.")
print(f"Orders left: {', '.join(map(str, orders))}")
| gyurel/Python-Advanced-Course | python_advanced_retake_exam_14_april_2021/problem1.py | problem1.py | py | 1,221 | python | en | code | 0 | github-code | 36 |
21632921628 | import pandas as pd
from scipy.stats import spearmanr, pearsonr, kendalltau
import os
import numpy as np
base_path = 'output_alignment/alignment_checkpoint/'
dfs = []
for file in os.listdir(base_path):
dfs.append(pd.read_csv(os.path.join(base_path, file)))
df1 = pd.concat(dfs)
# df1 = pd.read_csv('output_alignment/alignmnets_y0_com_bal_v1.csv')
df1_beta_star = (df1['E1'] / df1['Ea_hat']) ** 0.5
df1['FBeta*'] = ((1 + df1_beta_star ** 2) * df1['Ea']) / (df1['E1'] + df1_beta_star ** 2 * df1['Ea_hat'])
measures = ['NC', 'EC', 'ICS', 'S3', 'F1', 'ACC', 'BM', 'MK', 'MCC', 'F0.1', 'F0.33', 'F3', 'F10', 'FBeta*', 'BMK2', 'Ilia']
df1['BMK2'] = (df1['BM'] + df1['MK']) / 2.0
df1['Ilia'] = (df1['TP'] / df1['E1'] + df1['TN'] / (df1['omega'] - df1['E1'])) / 2.0
df2 = pd.DataFrame(columns=measures)
static_measures = ['NC', 'S3', 'F1', 'ACC', 'MCC']
for measure in static_measures:
df2[measure] = df1[measure]
dynamic_measures = [('EC', 'ICS'), ('BM', 'MK'), ('F0.1', 'F10'), ('F0.33', 'F3')]
for m1, m2 in dynamic_measures:
df2[m1] = df1[m2]
df2[m2] = df2[m1]
df2_beta_star = 1 / ((df1['E1'] / df1['Ea_hat']) ** 0.5)
df2['FBeta*'] = ((1 + df2_beta_star ** 2) * df1['Ea']) / (df1['E1'] + df2_beta_star ** 2 * df1['Ea_hat'])
df2['BMK2'] = (df2['BM'] + df2['MK']) / 2.0
df = pd.concat([df1[measures], df2[measures]])
print('number of rows: ' + str(len(df)))
rho, p_value = spearmanr(df[measures])
pd.DataFrame(rho, columns=measures, index=measures).to_csv('output_alignment/spearman_correlation_ba_er_ws.csv')
# rho = np.corrcoef(df[measures])
pc = pd.DataFrame(columns=measures, index=['NC'])
for measure in measures:
pc[measure]['NC'] = pearsonr(df['NC'], df[measure])[0]
pc.to_csv('output_alignment/pearson_correlation_ba_er_ws.csv')
kt = pd.DataFrame(columns=measures, index=['NC'])
for measure in measures:
kt[measure]['NC'] = kendalltau(df['NC'], df[measure])[0]
kt.to_csv('output_alignment/kendalltau_correlation_ba_er_ws.csv')
| maor63/sana_project | compute_spearman_correlation_for_alignments.py | compute_spearman_correlation_for_alignments.py | py | 1,962 | python | en | code | 0 | github-code | 36 |
22688875816 | from logging import error
import random
import datetime
from app.lessons import LESSONS_BY_ID
from flask import jsonify, g, request
from app import app, db
from app.api.auth import api_admin_login_required, api_login_required, error_response
from app.models import Puzzle, PuzzleCompletion, Test
from flask_sqlalchemy import sqlalchemy
PUZZLES_PER_TEST = 10
def validate_puzzle_data(puzzle):
""" Validate puzzle data from client """
if puzzle is None:
return False
return "fen" in puzzle and "move_tree" in puzzle and "is_atomic" in puzzle and "lesson_id" in puzzle
def validate_completion_data(data):
""" Validate completion data """
if data is None:
return False
return "attempts" in data and "start_time" in data and "end_time" in data and "test_id" in data
def get_all_puzzles(lesson_id=None):
""" Returns all puzzles for a given lesson, if lesson_id==None, returns all puzzles """
if lesson_id is None:
return Puzzle.query.all()
return Puzzle.query.filter_by(lesson_id=lesson_id).all()
def get_incomplete_puzzles_for_test(test_id, user_id):
query = Puzzle.query.outerjoin(PuzzleCompletion, (Puzzle.id==PuzzleCompletion.puzzle_id) & (PuzzleCompletion.test_number==test_id) & (PuzzleCompletion.user==user_id)).filter(PuzzleCompletion.id==None)
return query.all()
def get_unique_puzzle_completions_for_test(test_id, user_id):
return PuzzleCompletion.query.filter_by(test_number=test_id, user=user_id).group_by(PuzzleCompletion.puzzle_id).all()
@app.route("/api/puzzles/random")
@api_login_required
def random_puzzle_api():
""" API route which serves a random puzzle
Accepts a query parameter ?lesson to select puzzles for a given lesson
"""
lesson_id = request.args.get("lesson")
puzzles = get_all_puzzles(lesson_id=lesson_id)
if len(puzzles) > 0:
return jsonify({ "puzzle": random.choice(puzzles).to_json() })
return jsonify({ "puzzle": None })
@app.route("/api/puzzles/test/<int:test_id>")
@api_login_required
def random_test_puzzle_api(test_id):
""" API route which serves a random puzzle that hasn't been completed by the current user """
test = Test.query.get(test_id)
# Ensure that the request is from the correct user
if test.user != g.user.id:
return error_response(401)
puzzles = get_incomplete_puzzles_for_test(test_id, g.user.id)
if len(puzzles) > 0:
return jsonify({ "puzzle": random.choice(puzzles).to_json(), "is_final": len(get_unique_puzzle_completions_for_test(test_id, g.user.id)) >= (PUZZLES_PER_TEST - 1) })
return jsonify({ "puzzle": None, "is_final": True })
@app.route("/api/tests/<int:test_id>", methods=["POST"])
@api_login_required
def test_api(test_id):
test = Test.query.filter_by(id=test_id).first()
if test is not None:
# Ensure the request is from the correct user
if test.user != g.user.id:
return error_response(401)
if len(get_unique_puzzle_completions_for_test(test_id, g.user.id)) >= PUZZLES_PER_TEST:
test.end_time = datetime.datetime.now()
db.session.commit()
return jsonify({ "status": "Ok" })
return error_response(403)
return error_response(404)
@app.route("/api/puzzles/<int:puzzle_id>", methods=["POST"])
@api_login_required
def puzzle_api(puzzle_id):
""" API route which allows updating the completion of a puzzle """
puzzle = Puzzle.query.filter_by(id=puzzle_id).first()
if puzzle is not None:
data = request.get_json()
if validate_completion_data(data):
completion = PuzzleCompletion(
user=g.user.id,
puzzle_id=puzzle_id,
attempts=data["attempts"],
start_time=datetime.datetime.fromtimestamp(data["start_time"] / 1000),
end_time=datetime.datetime.fromtimestamp(data["end_time"] / 1000),
test_number=data["test_id"],
)
db.session.add(completion)
db.session.commit()
return jsonify({ "status": "Ok" })
return error_response(400)
return error_response(404, message="Puzzle not found or already completed")
@app.route("/api/puzzles", methods=["GET"])
@api_login_required
def puzzles_api():
""" API route which serves all puzzle data """
lesson_id = request.args.get("lesson")
puzzles = get_all_puzzles(lesson_id=lesson_id)
return jsonify({ "puzzles": list(map(lambda puzzle: puzzle.to_json(), puzzles)) })
@app.route("/api/puzzles", methods=["POST"])
@api_admin_login_required
@api_login_required
def puzzles_post_api():
""" API route used to create a new puzzle from the create_puzzle page """
data = request.get_json()
if validate_puzzle_data(data):
puzzle = Puzzle(
fen=data["fen"],
move_tree=data["move_tree"],
is_atomic=data["is_atomic"],
lesson_id=data["lesson_id"],
)
db.session.add(puzzle)
db.session.commit()
return jsonify({ "status": "Ok" })
return error_response(400)
@app.route("/api/stats/<int:test_id>", methods=["GET"])
@api_login_required
def get_stats(test_id):
test = Test.query.filter_by(id=test_id).first()
time_taken = round(test.end_time.timestamp() - test.start_time.timestamp(), 1)
total_accuracy = 0
puzzles = PuzzleCompletion.query.filter_by(test_number=test_id).all()
for puzzle in puzzles:
total_accuracy += 1/puzzle.attempts
average_accuracy = round(100*total_accuracy/len(puzzles), 1)
return jsonify({ "time_taken": time_taken, "accuracy": average_accuracy}) | APM246/Atomic-Chess | app/api/puzzles_api.py | puzzles_api.py | py | 5,650 | python | en | code | 2 | github-code | 36 |
8429282748 |
def main():
"""
Matrix is assumed to be like this:
A B C D
A 0 2 3 7
B 2 0 6 4
C 3 6 0 1
D 7 4 1 0
Sample test files have also been uploaded
"""
filename = input(" Enter filename (e.g. tree.txt):")
#Extraction of data
listform = []
myFile = open(filename, "r" )
for Row in myFile:
curr = Row.split('\t')
listform.append(curr)
myFile.close()
#print(listform)
#Extraction of distances
data = []
distances = []
for i in range(0, len(listform) ):
for j in listform[i]:
data.append(j.strip())
try:
curr = int(j)
distances.append(curr) # creation of a distance list
except:
pass
#print(distances)
# Making dictionary to store headers
row = {}
col = {}
converted =[]
for i in listform[0]:
converted.append(i.strip())
store = []
count = 0
for i in converted:
if(i != ''):
store.append(i)
row.update({count:i})
col.update({count:i})
count = count + 1
# print(row)
# print(col)
# Making 2D matrix for distances
num_row = len(row)
num_col = len(col)
size = num_row
matrice = []
new = []
count = 0
for i in range(num_row):
for j in range(num_col):
new.append(distances[count])
count = count + 1
matrice.append(new)
new = []
#print(matrice[2][1])
# print(row[0]+col[0])
check = termination(matrice, num_row, num_col) # check termination condition intially
while(check == True ):
target_row, target_col, smallest = Find_Min(matrice, num_row, num_col) # finding minimum value and its corresponding row and col
new_entry = "(" + col[target_col] + "," + row[target_row] + ")" # add a new entry to the dictionary which is in newick format
row.update({num_row: new_entry})
col.update({num_col: new_entry})
#update row and col size
num_row = len(row)
num_col = len(col)
matrice = Update_Tables(matrice, num_row, num_col, target_row, target_col) # updating tables include setting used entries to 0s and calculating new distances
check = termination(matrice, num_row, num_col) # check termination condition again
get_newick = len(row)-1 # last value will be newick notation itself
print(" Newick notation:", row[get_newick] )
def Find_Min(table, rows, cols):
target_row = 0
target_col = 0
curr_min = 10000 # arbitrary number assuming no distance will be greater than this. If distance is greater than this, change curr_min to a higher number
for i in range(rows):
for j in range(cols):
if(table[i][j] < curr_min and table[i][j] != 0):
curr_min = table[i][j]
target_row = i
target_col = j
# print(curr_min)
# print(target_row,target_col)
return target_row, target_col, curr_min
def Update_Tables(matrix, rows, cols, target_row, target_col):
# create a new distance matrix
new_matrix = []
new = []
for i in range(rows):
for j in range(cols):
try:
new.append(matrix[i][j])
except:
new.append(0)
new_matrix.append(new)
new = []
# Update Col
for i in range(0,cols):
if(i == target_row or i == target_col):
new_matrix[i][cols-1] = 0
elif(i == cols-1):
new_matrix[i][cols-1] = 0
else:
new_matrix[i][cols-1] = ( new_matrix[i][target_row] + new_matrix[i][target_col] )/ 2
# Update Row
for i in range(0,rows):
if(i == target_row or i == target_col):
new_matrix[rows-1][i] = 0
elif(i == rows-1):
new_matrix[rows-1][i] = 0
else:
new_matrix[rows-1][i] = ( new_matrix[target_row][i] + new_matrix[target_col][i] )/ 2
# making target_row and target_col entries 0
for i in range(rows):
new_matrix[target_col][i] = 0
new_matrix[target_row][i] = 0
for i in range(cols):
new_matrix[i][target_row] = 0
new_matrix[i][target_col] = 0
# print(new_matrix)
return new_matrix
def termination(matrix, rows, cols):
# repeat loop until all values are 0 in the matrix
count = 0
for i in range(rows):
for j in range(cols):
if(matrix[i][j] != 0):
count = count + 1
if(count == 0):
return False
else:
return True
main()
| HassaanAW/Computational-Biology | Phylogenetic Tree.py | Phylogenetic Tree.py | py | 4,128 | python | en | code | 0 | github-code | 36 |
15947687332 | def process(dae_dir_path, out_dir_path):
import os
if not os.path.exists(dae_dir_path):
print('Path "%s" does not exist' % dae_dir_path)
return
if not os.path.exists(out_dir_path):
os.makedirs(out_dir_path)
files = os.listdir(dae_dir_path)
for file in files:
if file.endswith('.dae'):
file = dae_dir_path + '\\' + file
out_file = out_dir_path + '\\' + file[file.rfind('\\') + 1: file.find('.')] + '.gltf'
cmd = ''' cd C:/Users/dell/Documents/Projects/BIM+GIS & \
collada2gltf.exe -f "%s" -o "%s" -e
''' % (file, out_file)
print(cmd)
os.system(cmd)
if __name__ == '__main__':
dae_dir_path = 'C:\\Users\\dell\\Desktop\\Lab'
out_dir_path = 'C:\\Users\\dell\\Desktop\\Lab\\gltf'
process(dae_dir_path, out_dir_path)
| XinJack/UsefulScripts | collada2gltfWrapper_python/collada2gltf.py | collada2gltf.py | py | 751 | python | en | code | 0 | github-code | 36 |
9914536662 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
import os
import time
import stat
import io
import ctypes
import struct
import zlib
# https://users.cs.jmu.edu/buchhofp/forensics/formats/pkzip.html
# https://pkware.cachefly.net/webdocs/casestudies/APPNOTE.TXT
# https://en.wikipedia.org/wiki/Zip_(file_format)#File_headers
ZIP_VERSION = b'\x14' # copied from output of the zipfile Python module
BLOCKSIZE = io.DEFAULT_BUFFER_SIZE # usually 8192
USE_DATA_DESCRIPTOR = True # required to be true for compliance to the spec
LOCAL_FILE_HEADER_SIZE = 30 # local file header + data descriptor (local file footer)
if USE_DATA_DESCRIPTOR:
LOCAL_FILE_HEADER_SIZE += 16
CENTRAL_DIRECTORY_FILE_HEADER_SIZE = 46
CENTRAL_DIRECTORY_FOOTER_SIZE = 22
FLAGS = (1 << 3 | 1 << 11) # flags: hide checksum (bit 3), UTF-8 names (bit 11)
# See: https://unix.stackexchange.com/a/14727/234161
# To quote, this is the layout used on Unix:
# TTTTsstrwxrwxrwx0000000000ADVSHR
# ^^^^____________________________ file type as explained above
# ^^^_________________________ setuid, setgid, sticky
# ^^^^^^^^^________________ permissions
# ^^^^^^^^________ This is the "lower-middle byte" your post mentions
# ^^^^^^^^ DOS attribute bits
EXTERNAL_ATTRIBUTES = (stat.S_IFREG | 0o664) << 16 # use a sane default
class ZipFileChanged(Exception):
'''
Raised when the stat() filesize isn't the same as the read filesize.
'''
pass
class ZipFile:
def __init__(self, path, zipname, st, offset):
self.path = path
self.zipname = zipname
self.st = st
self.localHeaderOffset = offset
def localSize(self):
'''
Calculate the size of the local header + data.
Every file will take up this space plus the space in the central directory.
'''
return LOCAL_FILE_HEADER_SIZE + len(self.zipname) + self.st.st_size
def centralDirectorySize(self):
'''
Return the size of the file entry in the central directory.
'''
return CENTRAL_DIRECTORY_FILE_HEADER_SIZE + len(self.zipname)
def totalSize(self):
'''
Return all space this file takes in the zipfile.
'''
return self.localSize() + self.centralDirectorySize()
def dos_time(self):
'''
Convert a timestamp to the weird DOS format the ZIP format uses - which
uses a simple naive date/time encoding function instead of storing the
GMT time in seconds like so many other formats do.
'''
ts = time.localtime(self.st.st_mtime)
return (ts.tm_sec // 2) | (ts.tm_min << 5) | (ts.tm_hour << 11)
def dos_date(self):
'''
See dos_time
'''
ts = time.localtime(self.st.st_mtime)
return (ts.tm_mday) | (ts.tm_mon << 5) | ((ts.tm_year - 1980) << 9)
class ZipSeeker:
def __init__(self):
self.files = []
def add(self, path, zipname=None):
'''
Add a file to this ZIP.
Doesn't actually read the file, only stat()s it.
You may need to provide another name for use within the ZIP.
'''
if zipname is None:
zipname = path
st = os.stat(path)
offset = 0
if len(self.files):
offset = self.files[-1].localHeaderOffset + self.files[-1].localSize()
self.files.append(ZipFile(path, zipname.encode('utf-8'), st, offset))
def size(self):
'''
Calculate and return the zip file size before generating it.
'''
size = sum(map(lambda f: f.totalSize(), self.files))
size += CENTRAL_DIRECTORY_FOOTER_SIZE
return size
def lastModified(self):
'''
Return the last last-modified time of all files in this ZIP file.
'''
return max(map(lambda f: f.st.st_mtime, self.files))
def centralDirectorySize(self):
'''
Internal helper function.
Calculate the length of the central directory (all entries in the
central directory, excluding the end-of-central-directory entry).
'''
size = 0
for file in self.files:
size += CENTRAL_DIRECTORY_FILE_HEADER_SIZE + len(file.zipname) # central directory header
return size
def centralDirectoryStart(self):
'''
Internal helper function.
Calculate the start index of the central directory.
'''
size = 0
for file in self.files:
size += file.st.st_size # (uncompressed) file itself
size += LOCAL_FILE_HEADER_SIZE + len(file.zipname) # central directory header
return size
def blocks(self):
for file in self.files:
# local file header
# length is 30 bytes (LOCAL_FILE_HEADER_SIZE)
yield struct.pack('<IccHHHHIIIHH',
0x04034b50, # 4-byte signature ("PK\x03\x04")
ZIP_VERSION, b'\x00', # 2-byte PKZIP version
FLAGS, # 2-byte flags
0, # 2-byte compression (no compression)
file.dos_time(), # 2-byte modtime in MS-DOS format
file.dos_date(), # 2-byte moddate in MS-DOS format
0, # 4-byte checksum - stored in data descriptor
file.st.st_size, # 4-byte compressed size
file.st.st_size, # 4-byte uncompressed size
len(file.zipname), # 2-byte filename length
0) # 2-byte extra field length
# Write the zip filename
yield file.zipname
# actual file data (without compression)
checksum = 0
size = 0
fp = open(file.path, 'rb')
buf = fp.read(BLOCKSIZE)
while buf:
size += len(buf)
if size > file.st.st_size:
raise ZipFileChanged('file %s at least %d bytes too big' % (repr(file.zipname), size - file.st.st_size))
checksum = zlib.crc32(buf, checksum) & 0xffffffff
yield buf
buf = fp.read(BLOCKSIZE)
fp.close()
if size != file.st.st_size:
raise ZipFileChanged('file %s with size %d doesn\'t match st_size %d' % (repr(file.zipname), size, file.st.st_size))
file.checksum = checksum
# Data descriptor
# Not strictly necessary, but doesn't add much overhead and might
# help ZIP readers. It is strictly required by the standard (MUST)
# but ZIP readers seem to work fine without it.
# length is 16 bytes (see LOCAL_FILE_HEADER_SIZE)
if USE_DATA_DESCRIPTOR:
yield struct.pack('<IIII',
0x08074b50, # 4-byte signature: "PK\x07\x08"
checksum, # 4-byte checksum
size, # 4-byte compressed size
size) # 4-byte uncompressed size
# Write the central directory file headers
# Length is 46 bytes + file name (CENTRAL_DIRECTORY_FILE_HEADER_SIZE)
for file in self.files:
yield struct.pack('<IccccHHHHIIIHHHHHII',
0x02014b50, # 4-byte signature: "PK\x01\x02"
ZIP_VERSION, b'\x03', # 2-byte system and version, copied from Python zipfile output
ZIP_VERSION, b'\x00', # 2-byte PKZIP version needed
FLAGS, # 2-byte flags
0, # 2-byte compression (no compression)
file.dos_time(), # 2-byte last modified time (local time)
file.dos_date(), # 2-byte last modified date (local time)
file.checksum, # 4-byte CRC-32 checksum
file.st.st_size, # 4-byte compressed size (not read due to flag bit 3 being set)
file.st.st_size, # 4-byte uncompressed size (not read due to flag bit 3 being set)
len(file.zipname), # 2-byte filename length
0, # 2-byte extra field length
0, # 2-byte comment length (no comment)
0, # 2-byte disk number (no split archives, always 0)
0, # 2-byte internal attributes, TODO detect text files
EXTERNAL_ATTRIBUTES, # 4-byte external attributes
file.localHeaderOffset) # 4-byte offset (index) of local header
# file name
yield file.zipname
# Write the end of central directory record
# length is 22 bytes (CENTRAL_DIRECTORY_FOOTER_SIZE)
yield struct.pack('<IHHHHIIH',
0x06054b50, # 4-byte signature: "PK\x05\x06"
0, # 2-byte disk number - always 0 (we don't split)
0, # 2-byte disk with central directory - always 0 (we don't split)
len(self.files), # 2-byte number of central directory entries on this disk
len(self.files), # 2-byte total number of central directory entries
self.centralDirectorySize(), # 4-byte index of start of central directory
self.centralDirectoryStart(), # 4-byte size of central directory (without this footer)
0) # 2-byte comment length - we don't use comments
def blocksOffset(self, start=0, end=None):
# TODO: optimize start=0 and end=None
pos = 0
for block in self.blocks():
if pos+len(block) <= start:
# stream hasn't started
pos += len(block)
continue
if end is not None and pos >= end:
# EOF
break
# start index in the block
if pos >= start:
startblock = 0
else:
startblock = start - pos
# end index of the block
if end is None or pos+len(block) <= end:
endblock = len(block)
else:
endblock = end - pos
if startblock == 0 and endblock == len(block):
yield block
else:
yield block[startblock:endblock]
pos += len(block)
def writeStream(self, out, start=0, end=None):
for block in self.blocksOffset(start, end):
out.write(block)
| aykevl/python-zipseeker | zipseeker/__init__.py | __init__.py | py | 10,783 | python | en | code | 2 | github-code | 36 |
38447232162 |
@login_required
@require_http_methods(['POST'])
def update_user_image(request):
form = UpdateImageProfileForm(request.POST, request.FILES)
data = {'status': 'success'}
if form.is_valid():
LOGGER.info('trying to update profile picture')
try:
(filename, image) = _resize_image(form.cleaned_data.get('profile_picture'))
user = SEIUser.objects.get(id=request.user.id)
user.avatar.save(filename, image)
if (user.avatar is not None):
data['url'] = user.avatar.url
except:
LOGGER.error('error while trying to update profile picture for user ' + request.user.username)
else:
data = {'status': 'danger', 'message': 'Archivo inválido'}
return JsonResponse(data, safe=False)
def _resize_image(external_image):
# scale dimensions
image_file = StringIO.StringIO(external_image.read())
image = Image.open(image_file)
resized_image = image.resize(
scale_dimensions(image.size[0], image.size[1]))
image_file = StringIO.StringIO()
resized_image.save(image_file, 'JPEG', quality=90)
filename = hashlib.md5(image_file.getvalue()).hexdigest()+'.jpg'
fullpath = os.path.join('/tmp', filename)
thumbnail_file = open(fullpath, 'w')
resized_image.save(thumbnail_file, 'JPEG')
thumbnail_file = open(fullpath, 'r')
content = File(thumbnail_file)
return (filename, content)
| carlos-mtz-arenas/py_utils | django_resize_image_endpoint.py | django_resize_image_endpoint.py | py | 1,447 | python | en | code | 0 | github-code | 36 |
2266175166 | import dash_core_components as dcc
import dash_html_components as html
from dash_devices.dependencies import Input, Output, State
import numpy as np
import pandas as pd
import sys
import re
import num2words
from app import app
from apps import user_mode
sys.path.insert(1, 'C:\\Users\\Antoine\\CloudStation\\EPFL\\Master 4\\Master project\\masterProject')
from UIDatasheet import UIDatasheet
import ui_finder
ui_path = 'C:\\Users\\Antoine\\CloudStation\\EPFL\\Master 4\\Master project\\Dataset\\VINS Dataset\\ui_list.json'
ui_df = pd.read_json(ui_path)
current_page = 0
max_page = 0
filtered_ui = []
image_selected = ''
images = ['', '', '', '', '', '', '', '']
description_layout = html.Div([
html.Div([
html.H3('Descriptions'),
html.P(id='content-description', className='ow'),
html.Button('Copy description', id='button-copy-description', n_clicks=0),
], style={'margin': '15px'}),
html.Div([
html.H3('AI search'),
], style={'margin': '15px'}),
html.Div([
html.Div([
dcc.Textarea(id='content-text-value', value='', cols=70, placeholder='Text value'),
], style={'flex-grow': '1'}),
html.Div([
dcc.Input(id='input-topk', type='number', value=16, min=1, placeholder='Top-k value'),
], style={'flex-grow': '1'}),
html.Div([
html.Button('Run', id='button-run-ai', n_clicks=0),
], style={'flex-grow': '1'}),
], style={'margin': '15px', 'display': 'flex'}),
html.Div([
html.P(id='content-info-retrieved'),
], style={'margin': '15px'}),
html.Div([
html.Div([
html.H3('Label'),
dcc.Dropdown(
id='dropdown-label',
options=[
{'label': 'Bare', 'value': 'bare'},
{'label': 'Shop', 'value': 'shop'},
{'label': 'Form', 'value': 'form'},
{'label': 'Gallery', 'value': 'gallery'},
{'label': 'List', 'value': 'list'},
{'label': 'Login', 'value': 'login'},
{'label': 'Map', 'value': 'map'},
{'label': 'Menu', 'value': 'menu'},
{'label': 'Modal', 'value': 'modal'},
{'label': 'News', 'value': 'news'},
{'label': 'Profile', 'value': 'profile'},
{'label': 'Search', 'value': 'search'},
{'label': 'Settings', 'value': 'settings'},
{'label': 'Terms', 'value': 'terms'},
{'label': 'Tutorial', 'value': 'tutorial'},
{'label': 'Other', 'value': 'other'},
],
),
], style={'flex-grow': '1', 'margin': '15px'}),
html.Div([
html.H3('Number of buttons'),
dcc.Input(id='input-buttons', type='number', min=0),
], style={'flex-grow': '1', 'margin': '15px'}),
html.Div([
html.H3('Number of input fields'),
dcc.Input(id='input-input-fields', type='number', min=0),
], style={'flex-grow': '1', 'margin': '15px'}),
html.Div([
html.H3('Page indicator'),
dcc.Dropdown(
id='dropdown-page-indicator',
options=[
{'label': 'Yes', 'value': 'yes'},
{'label': 'No', 'value': 'no'},
],
),
], style={'flex-grow': '1', 'margin': '15px'}),
html.Div([
html.H3('Map'),
dcc.Dropdown(
id='dropdown-map',
options=[
{'label': 'Yes', 'value': 'yes'},
{'label': 'No', 'value': 'no'},
],
),
], style={'flex-grow': '1', 'margin': '15px'}),
html.Div([
html.H3('Text filter'),
dcc.Textarea(id='content-text-filter', value='', rows=7),
], style={'flex-grow': '2', 'margin': '15px'}),
], style={'display': 'flex'}),
html.Div([
html.Button('Clear filters', id='button-clear-filters', n_clicks=0, style={'margin': '15px'}),
html.Button('Search', id='button-search', n_clicks=0, style={'margin': '15px'}),
], style={'margin-bottom': '10px',
'textAlign': 'center',
'margin': 'auto'}),
html.Div([
html.Progress(id='progress-search', value='0', max=100, style={'width': '30%'}),
html.P(id='content-search'),
], style={'margin-bottom': '10px',
'textAlign': 'center',
'margin': 'auto'}),
html.Div([
html.Div([
html.Div([
html.H3('Image selections'),
html.Div([
html.Img(src=app.get_asset_url('background.png'), id='img_select0', style={'width': '20%'}),
html.Button('Select 1st image', id='button-select0', n_clicks=0, style={'height': '10%'}),
html.P('None', id='content-select0'),
html.Button('Clear', id='button-clear0', n_clicks=0, style={'height': '10%'}),
], style={'display': 'flex', 'margin': '15px'}),
html.Div([
html.Img(src=app.get_asset_url('background.png'), id='img_select1', style={'width': '20%'}),
html.Button('Select 2nd image', id='button-select1', n_clicks=0, style={'height': '10%'}),
html.P('None', id='content-select1'),
html.Button('Clear', id='button-clear1', n_clicks=0, style={'height': '10%'}),
], style={'display': 'flex', 'margin': '15px'}),
html.Div([
html.Img(src=app.get_asset_url('background.png'), id='img_select2', style={'width': '20%'}),
html.Button('Select 3rd image', id='button-select2', n_clicks=0, style={'height': '10%'}),
html.P('None', id='content-select2'),
html.Button('Clear', id='button-clear2', n_clicks=0, style={'height': '10%'}),
], style={'display': 'flex', 'margin': '15px'}),
html.Div([
html.Img(src=app.get_asset_url('background.png'), id='img_select3', style={'width': '20%'}),
html.Button('Select 4th image', id='button-select3', n_clicks=0, style={'height': '10%'}),
html.P('None', id='content-select3'),
html.Button('Clear', id='button-clear3', n_clicks=0, style={'height': '10%'}),
], style={'display': 'flex', 'margin': '15px'}),
# html.Div([
# html.Button('Select 5th image', id='button-select4', n_clicks=0),
# html.P('None', id='content-select4')
# ], style={'display': 'flex', 'margin': '15px'}),
# html.Div([
# html.Button('Select 6th image', id='button-select5', n_clicks=0),
# html.P('None', id='content-select5')
# ], style={'display': 'flex', 'margin': '15px'}),
# html.Div([
# html.Button('Select 7th image', id='button-select6', n_clicks=0),
# html.P('None', id='content-select6')
# ], style={'display': 'flex', 'margin': '15px'}),
# html.Div([
# html.Button('Select 8th image', id='button-select7', n_clicks=0),
# html.P('None', id='content-select7')
# ], style={'display': 'flex', 'margin': '15px'}),
html.Button('Send images', id='button-send-images', n_clicks=0),
], style={'margin': '15px'})
], style={'float': 'left', 'width': '20%'}),
html.Div([
html.Div(
[
html.Div([
html.Img(src=app.get_asset_url('background.png'), id='img_god0', className='imageUI',
n_clicks_timestamp=-1),
], style={'flex-grow': '1', 'margin': '15px'}),
html.Div([
html.Img(src=app.get_asset_url('background.png'), id='img_god1', className='imageUI',
n_clicks_timestamp=-1),
], style={'flex-grow': '1', 'margin': '15px'}),
html.Div([
html.Img(src=app.get_asset_url('background.png'), id='img_god2', className='imageUI',
n_clicks_timestamp=-1),
], style={'flex-grow': '1', 'margin': '15px'}),
html.Div([
html.Img(src=app.get_asset_url('background.png'), id='img_god3', className='imageUI',
n_clicks_timestamp=-1),
], style={'flex-grow': '1', 'margin': '15px'}),
], style={'display': 'flex'}
),
html.Div(
[
html.Div([
html.Img(src=app.get_asset_url('background.png'), id='img_god4', className='imageUI',
n_clicks_timestamp=-1),
], style={'flex-grow': '1', 'margin': '15px'}),
html.Div([
html.Img(src=app.get_asset_url('background.png'), id='img_god5', className='imageUI',
n_clicks_timestamp=-1),
], style={'flex-grow': '1', 'margin': '15px'}),
html.Div([
html.Img(src=app.get_asset_url('background.png'), id='img_god6', className='imageUI',
n_clicks_timestamp=-1),
], style={'flex-grow': '1', 'margin': '15px'}),
html.Div([
html.Img(src=app.get_asset_url('background.png'), id='img_god7', className='imageUI',
n_clicks_timestamp=-1),
], style={'flex-grow': '1', 'margin': '15px'}),
], style={'display': 'flex'}
),
html.Div(
[
html.Div([
html.Button('Previous page', id='button-previous', n_clicks=0),
], style={'flex-grow': '1', 'textAlign': 'right', 'margin': '15px'}),
html.Div([
html.Button('Next page', id='button-next-page', n_clicks=0),
], style={'flex-grow': '1', 'textAlign': 'left', 'margin': '15px'}),
], style={'display': 'flex'}
),
html.Div(
[
html.P('Page ... out of ...', id='content-page-number'),
], style={'textAlign': 'center', 'margin': 'auto'}
),
], style={'float': 'right', 'width': '80%'}),
]),
])
commands_layout = html.Div([
html.Div([
html.H3('Selected Images'),
html.P(id='content-image', className='ow'),
], style={'margin': '15px'}),
html.Div([
html.H3('Commands'),
html.P(id='content-command', className='ow'),
], style={'margin': '15px'}),
html.Div(
[
html.Button('Send error message', id='button-send-error', n_clicks=0, style={'margin': '15px'}),
html.Button('Clear error message', id='button-clear-error', n_clicks=0, style={'margin': '15px'}),
],
style={'margin-bottom': '10px',
'textAlign': 'center',
'width': '220px',
'margin': 'auto'}
),
])
layout = html.Div([
dcc.Tabs(id='tabs-god-mode', value='tab-description', children=[
dcc.Tab(label='User\'s descriptions', value='tab-description', children=description_layout),
dcc.Tab(label='User\'s commands', value='tab-commands', children=commands_layout),
])
])
@app.callback(None,
[Input('button-send-error', 'n_clicks')])
def send_error(n_clicks):
if n_clicks:
app.push_mods({'content-error': {'children': 'Error: the requested command is too complex for the system, please modify your request'}})
@app.callback(None,
[Input('button-clear-error', 'n_clicks')])
def send_error(n_clicks):
if n_clicks:
app.push_mods({'content-error': {'children': ''}})
@app.callback(None,
[Input('button-select0', 'n_clicks')])
def select_image(n_clicks):
if n_clicks:
global image_selected
if image_selected:
images[0] = image_selected
app.push_mods({'content-select0': {'children': image_selected}})
app.push_mods({'img_select0': {'src': app.get_asset_url('wireframes/' + images[0])}})
@app.callback(None,
[Input('button-select1', 'n_clicks')])
def select_image(n_clicks):
if n_clicks:
global image_selected
if image_selected:
images[1] = image_selected
app.push_mods({'content-select1': {'children': image_selected}})
app.push_mods({'img_select1': {'src': app.get_asset_url('wireframes/' + images[1])}})
@app.callback(None,
[Input('button-select2', 'n_clicks')])
def select_image(n_clicks):
if n_clicks:
global image_selected
if image_selected:
images[2] = image_selected
app.push_mods({'content-select2': {'children': image_selected}})
app.push_mods({'img_select2': {'src': app.get_asset_url('wireframes/' + images[2])}})
@app.callback(None,
[Input('button-select3', 'n_clicks')])
def select_image(n_clicks):
if n_clicks:
global image_selected
if image_selected:
images[3] = image_selected
app.push_mods({'content-select3': {'children': image_selected}})
app.push_mods({'img_select3': {'src': app.get_asset_url('wireframes/' + images[3])}})
@app.callback(None,
[Input('button-clear0', 'n_clicks')])
def select_image(n_clicks):
if n_clicks:
global images
images[0] = ''
app.push_mods({'content-select0': {'children': 'None'}})
app.push_mods({'img_select0': {'src': app.get_asset_url('background.png')}})
@app.callback(None,
[Input('button-clear1', 'n_clicks')])
def select_image(n_clicks):
if n_clicks:
global images
images[1] = ''
app.push_mods({'content-select1': {'children': 'None'}})
app.push_mods({'img_select1': {'src': app.get_asset_url('background.png')}})
@app.callback(None,
[Input('button-clear2', 'n_clicks')])
def select_image(n_clicks):
if n_clicks:
global images
images[2] = ''
app.push_mods({'content-select2': {'children': 'None'}})
app.push_mods({'img_select2': {'src': app.get_asset_url('background.png')}})
@app.callback(None,
[Input('button-clear3', 'n_clicks')])
def select_image(n_clicks):
if n_clicks:
global images
images[3] = ''
app.push_mods({'content-select3': {'children': 'None'}})
app.push_mods({'img_select3': {'src': app.get_asset_url('background.png')}})
@app.callback(None,
[Input('button-copy-description', 'n_clicks')])
def copy_description(n_clicks):
if n_clicks:
if user_mode.data.descriptions:
description = user_mode.data.descriptions[-1]
app.push_mods({'content-text-value': {'value': description}})
@app.callback(None,
[Input('img_god0', 'n_clicks_timestamp'),
Input('img_god1', 'n_clicks_timestamp'),
Input('img_god2', 'n_clicks_timestamp'),
Input('img_god3', 'n_clicks_timestamp'),
Input('img_god4', 'n_clicks_timestamp'),
Input('img_god5', 'n_clicks_timestamp'),
Input('img_god6', 'n_clicks_timestamp'),
Input('img_god7', 'n_clicks_timestamp')])
def select_image(n_clicks_timestamp0, n_clicks_timestamp1, n_clicks_timestamp2, n_clicks_timestamp3,
n_clicks_timestamp4, n_clicks_timestamp5, n_clicks_timestamp6, n_clicks_timestamp7):
if filtered_ui:
global current_page
global image_selected
list_timestamps = [n_clicks_timestamp0, n_clicks_timestamp1, n_clicks_timestamp2, n_clicks_timestamp3,
n_clicks_timestamp4, n_clicks_timestamp5, n_clicks_timestamp6, n_clicks_timestamp7]
max_idx = np.argmax(list_timestamps)
if sum(list_timestamps) != -1 * len(list_timestamps):
if max_idx + current_page * 8 < len(filtered_ui):
image_selected = filtered_ui[max_idx + current_page * 8]
app.push_mods({'img_god0': {'className': 'imageUI'}})
app.push_mods({'img_god1': {'className': 'imageUI'}})
app.push_mods({'img_god2': {'className': 'imageUI'}})
app.push_mods({'img_god3': {'className': 'imageUI'}})
app.push_mods({'img_god4': {'className': 'imageUI'}})
app.push_mods({'img_god5': {'className': 'imageUI'}})
app.push_mods({'img_god6': {'className': 'imageUI'}})
app.push_mods({'img_god7': {'className': 'imageUI'}})
app.push_mods({'img_god{}'.format(max_idx): {'className': 'imageUIselected'}})
@app.callback(None,
[Input('button-send-images', 'n_clicks')])
def send_images(n_clicks):
if n_clicks:
if images[0]:
app.push_mods({'img0': {'hidden': False}})
app.push_mods({'img0': {'src': app.get_asset_url('wireframes/' + images[0])}})
else:
app.push_mods({'img0': {'hidden': True}})
if images[1]:
app.push_mods({'img1': {'hidden': False}})
app.push_mods({'img1': {'src': app.get_asset_url('wireframes/' + images[1])}})
else:
app.push_mods({'img1': {'hidden': True}})
if images[2]:
app.push_mods({'img2': {'hidden': False}})
app.push_mods({'img2': {'src': app.get_asset_url('wireframes/' + images[2])}})
else:
app.push_mods({'img2': {'hidden': True}})
if images[3]:
app.push_mods({'img3': {'hidden': False}})
app.push_mods({'img3': {'src': app.get_asset_url('wireframes/' + images[3])}})
else:
app.push_mods({'img3': {'hidden': True}})
# app.push_mods({'img4': {'src': app.get_asset_url('wireframes/' + images[4])}})
# app.push_mods({'img5': {'src': app.get_asset_url('wireframes/' + images[5])}})
# app.push_mods({'img6': {'src': app.get_asset_url('wireframes/' + images[6])}})
# app.push_mods({'img7': {'src': app.get_asset_url('wireframes/' + images[7])}})
user_mode.data.images_selected = images
user_mode.data.image_sent = True
#
# @app.callback(None,
# [Input('tabs-god-mode', 'value'), ])
# def render_content(tab):
# if tab == 'tab-description':
# app.push_mods({'tabs-layout': {'children': [description_layout]}})
# description_content = ''
# if user_mode.data.descriptions:
# nb_desc = len(user_mode.data.descriptions)
# for i in range(nb_desc):
# description_content += '{}: {} <br>'.format(i + 1, user_mode.data.descriptions[i])
# app.push_mods({'content-description': {'children': DangerouslySetInnerHTML(description_content)}})
# elif tab == 'tab-commands':
# app.push_mods({'tabs-layout': {'children': [commands_layout]}})
# image_content = ''
# if user_mode.data.images:
# nb_desc = len(user_mode.data.images)
# for i in range(nb_desc):
# image_content += '{}: {} <br>'.format(i + 1, user_mode.data.images[i].replace('.jpg', ''))
# app.push_mods({'content-image': {'children': DangerouslySetInnerHTML(image_content)}})
#
# command_content = ''
# if user_mode.data.commands:
# nb_desc = len(user_mode.data.commands)
# for i in range(nb_desc):
# command_content += '{}: {} <br>'. format(i+1, user_mode.data.commands[i])
# app.push_mods({'content-command': {'children': DangerouslySetInnerHTML(command_content)}})
@app.callback(None,
[Input('button-previous', 'n_clicks')])
def control_previous(n_clicks_previous):
global current_page
global max_page
global filtered_ui
if max_page:
if n_clicks_previous:
if current_page > 0:
current_page -= 1
app.push_mods(
{'img_god0': {'src': app.get_asset_url('wireframes/' + filtered_ui[0 + current_page * 8])}})
app.push_mods(
{'img_god1': {'src': app.get_asset_url('wireframes/' + filtered_ui[1 + current_page * 8])}})
app.push_mods(
{'img_god2': {'src': app.get_asset_url('wireframes/' + filtered_ui[2 + current_page * 8])}})
app.push_mods(
{'img_god3': {'src': app.get_asset_url('wireframes/' + filtered_ui[3 + current_page * 8])}})
app.push_mods(
{'img_god4': {'src': app.get_asset_url('wireframes/' + filtered_ui[4 + current_page * 8])}})
app.push_mods(
{'img_god5': {'src': app.get_asset_url('wireframes/' + filtered_ui[5 + current_page * 8])}})
app.push_mods(
{'img_god6': {'src': app.get_asset_url('wireframes/' + filtered_ui[6 + current_page * 8])}})
app.push_mods(
{'img_god7': {'src': app.get_asset_url('wireframes/' + filtered_ui[7 + current_page * 8])}})
app.push_mods(
{'content-page-number': {'children': 'Page {} out of {}'.format(current_page + 1, max_page)}})
@app.callback(None,
[Input('button-clear-filters', 'n_clicks')])
def clear_filters(n_clicks):
if n_clicks:
app.push_mods({'dropdown-label': {'value': ''}})
app.push_mods({'input-buttons': {'value': ''}})
app.push_mods({'input-input-fields': {'value': ''}})
app.push_mods({'dropdown-page-indicator': {'value': ''}})
app.push_mods({'dropdown-map': {'value': ''}})
app.push_mods({'content-text-filter': {'value': ''}})
global max_page
max_page = 0
global filtered_ui
filtered_ui = []
@app.callback(None,
[Input('button-next-page', 'n_clicks')])
def control_next(n_clicks_next):
global current_page
global max_page
global filtered_ui
if max_page:
if n_clicks_next:
if current_page < max_page - 1:
current_page += 1
app.push_mods({'img_god0': {'src': app.get_asset_url('background.png')}})
app.push_mods({'img_god1': {'src': app.get_asset_url('background.png')}})
app.push_mods({'img_god2': {'src': app.get_asset_url('background.png')}})
app.push_mods({'img_god3': {'src': app.get_asset_url('background.png')}})
app.push_mods({'img_god4': {'src': app.get_asset_url('background.png')}})
app.push_mods({'img_god5': {'src': app.get_asset_url('background.png')}})
app.push_mods({'img_god6': {'src': app.get_asset_url('background.png')}})
app.push_mods({'img_god7': {'src': app.get_asset_url('background.png')}})
if len(filtered_ui) > 0 + current_page * 8:
app.push_mods(
{'img_god0': {'src': app.get_asset_url('wireframes/' + filtered_ui[0 + current_page * 8])}})
if len(filtered_ui) > 1 + current_page * 8:
app.push_mods(
{'img_god1': {'src': app.get_asset_url('wireframes/' + filtered_ui[1 + current_page * 8])}})
if len(filtered_ui) > 2 + current_page * 8:
app.push_mods(
{'img_god2': {'src': app.get_asset_url('wireframes/' + filtered_ui[2 + current_page * 8])}})
if len(filtered_ui) > 3 + current_page * 8:
app.push_mods(
{'img_god3': {'src': app.get_asset_url('wireframes/' + filtered_ui[3 + current_page * 8])}})
if len(filtered_ui) > 4 + current_page * 8:
app.push_mods(
{'img_god4': {'src': app.get_asset_url('wireframes/' + filtered_ui[4 + current_page * 8])}})
if len(filtered_ui) > 5 + current_page * 8:
app.push_mods(
{'img_god5': {'src': app.get_asset_url('wireframes/' + filtered_ui[5 + current_page * 8])}})
if len(filtered_ui) > 6 + current_page * 8:
app.push_mods(
{'img_god6': {'src': app.get_asset_url('wireframes/' + filtered_ui[6 + current_page * 8])}})
if len(filtered_ui) > 7 + current_page * 8:
app.push_mods(
{'img_god7': {'src': app.get_asset_url('wireframes/' + filtered_ui[7 + current_page * 8])}})
app.push_mods(
{'content-page-number': {'children': 'Page {} out of {}'.format(current_page + 1, max_page)}})
@app.callback(None,
[Input('button-search', 'n_clicks')],
[State('dropdown-label', 'value'),
State('input-buttons', 'value'),
State('input-input-fields', 'value'),
State('dropdown-page-indicator', 'value'),
State('dropdown-map', 'value'),
State('content-text-filter', 'value')])
def filter_ui(n_clicks, label, nb_buttons, nb_input, page, map_, text_filter):
if n_clicks:
index_list = list(range(len(ui_df)))
global filtered_ui
filtered_ui = []
if label:
drop = []
count = 0
app.push_mods({'content-search': {'children': 'Label filtering...'}})
for index in index_list:
progress = int((count / len(index_list)) * 100)
count += 1
if progress % 10 == 0:
app.push_mods({'progress-search': {'value': str(progress)}})
if ui_df.iloc[index].label != label:
drop.append(index)
index_list = list(set(index_list) - set(drop))
app.push_mods({'progress-search': {'value': '100'}})
app.push_mods({'content-search': {'children': ''}})
if nb_buttons:
drop = []
count = 0
app.push_mods({'content-search': {'children': 'Button filtering...'}})
for index in index_list:
progress = int((count / len(index_list)) * 100)
count += 1
if progress % 10 == 0:
app.push_mods({'progress-search': {'value': str(progress)}})
components = pd.DataFrame.from_dict(ui_df.iloc[index].components)
nb_buttons_ui = components.type.str.count('TextButton').sum()
if nb_buttons_ui != nb_buttons:
drop.append(index)
index_list = list(set(index_list) - set(drop))
app.push_mods({'progress-search': {'value': '100'}})
app.push_mods({'content-search': {'children': ''}})
if nb_input:
drop = []
count = 0
app.push_mods({'content-search': {'children': 'Input fields filtering...'}})
for index in index_list:
progress = int((count / len(index_list)) * 100)
count += 1
if progress % 10 == 0:
app.push_mods({'progress-search': {'value': str(progress)}})
components = pd.DataFrame.from_dict(ui_df.iloc[index].components)
nb_input_ui = components.type.str.count('EditText').sum()
if nb_input_ui != nb_input:
drop.append(index)
index_list = list(set(index_list) - set(drop))
app.push_mods({'progress-search': {'value': '100'}})
app.push_mods({'content-search': {'children': ''}})
if page:
drop = []
count = 0
app.push_mods({'content-search': {'children': 'Page indicator filtering...'}})
for index in index_list:
progress = int((count / len(index_list)) * 100)
count += 1
if progress % 10 == 0:
app.push_mods({'progress-search': {'value': str(progress)}})
components = pd.DataFrame.from_dict(ui_df.iloc[index].components)
nb_page_ui = components.type.str.count('PageIndicator').sum()
if page == 'yes':
if nb_page_ui == 0:
drop.append(index)
if page == 'no':
if nb_page_ui > 0:
drop.append(index)
index_list = list(set(index_list) - set(drop))
app.push_mods({'progress-search': {'value': '100'}})
app.push_mods({'content-search': {'children': ''}})
if map_:
drop = []
count = 0
app.push_mods({'content-search': {'children': 'Map filtering...'}})
for index in index_list:
progress = int((count / len(index_list)) * 100)
count += 1
if progress % 10 == 0:
app.push_mods({'progress-search': {'value': str(progress)}})
components = pd.DataFrame.from_dict(ui_df.iloc[index].components)
nb_map_ui = components.type.str.count('Map').sum()
if map_ == 'yes':
if nb_map_ui == 0:
drop.append(index)
if map_ == 'no':
if nb_map_ui > 0:
drop.append(index)
index_list = list(set(index_list) - set(drop))
app.push_mods({'progress-search': {'value': '100'}})
app.push_mods({'content-search': {'children': ''}})
if text_filter:
drop = []
count = 0
app.push_mods({'content-search': {'children': 'Text filtering...'}})
text_filter_words = text_filter.lower().split()
for index in index_list:
progress = int((count / len(index_list)) * 100)
count += 1
if progress % 10 == 0:
app.push_mods({'progress-search': {'value': str(progress)}})
components = pd.DataFrame.from_dict(ui_df.iloc[index].components)
text_ui = ' '.join(components.text.tolist()).lower()
if not all(text in text_ui for text in text_filter_words):
drop.append(index)
index_list = list(set(index_list) - set(drop))
app.push_mods({'progress-search': {'value': '100'}})
app.push_mods({'content-search': {'children': ''}})
for index in index_list:
filtered_ui.append(ui_df.iloc[index, 0] + '.jpg')
app.push_mods({'img_god0': {'src': app.get_asset_url('background.png')}})
app.push_mods({'img_god1': {'src': app.get_asset_url('background.png')}})
app.push_mods({'img_god2': {'src': app.get_asset_url('background.png')}})
app.push_mods({'img_god3': {'src': app.get_asset_url('background.png')}})
app.push_mods({'img_god4': {'src': app.get_asset_url('background.png')}})
app.push_mods({'img_god5': {'src': app.get_asset_url('background.png')}})
app.push_mods({'img_god6': {'src': app.get_asset_url('background.png')}})
app.push_mods({'img_god7': {'src': app.get_asset_url('background.png')}})
if len(filtered_ui) == 0:
app.push_mods({'content-search': {'children': 'No results found for the given filters.'}})
if len(filtered_ui) > 0:
app.push_mods({'img_god0': {'src': app.get_asset_url('wireframes/' + filtered_ui[0])}})
if len(filtered_ui) > 1:
app.push_mods({'img_god1': {'src': app.get_asset_url('wireframes/' + filtered_ui[1])}})
if len(filtered_ui) > 2:
app.push_mods({'img_god2': {'src': app.get_asset_url('wireframes/' + filtered_ui[2])}})
if len(filtered_ui) > 3:
app.push_mods({'img_god3': {'src': app.get_asset_url('wireframes/' + filtered_ui[3])}})
if len(filtered_ui) > 4:
app.push_mods({'img_god4': {'src': app.get_asset_url('wireframes/' + filtered_ui[4])}})
if len(filtered_ui) > 5:
app.push_mods({'img_god5': {'src': app.get_asset_url('wireframes/' + filtered_ui[5])}})
if len(filtered_ui) > 6:
app.push_mods({'img_god6': {'src': app.get_asset_url('wireframes/' + filtered_ui[6])}})
if len(filtered_ui) > 7:
app.push_mods({'img_god7': {'src': app.get_asset_url('wireframes/' + filtered_ui[7])}})
page_size = 8
global max_page
global current_page
current_page = 0
max_page = int((len(filtered_ui) + page_size - 1) / page_size)
if max_page == 0:
max_page = 1
app.push_mods({'content-page-number': {'children': 'Page {} out of {}'.format(current_page + 1, max_page)}})
@app.callback(None,
[Input('button-run-ai', 'n_clicks')],
[State('content-text-value', 'value'),
State('input-topk', 'value')])
def run_ai(n_clicks, description, k):
if n_clicks and description and k:
app.push_mods({'content-info-retrieved': {'children': 'Running...'}})
description = re.sub(r"(\d+)", lambda x: num2words.num2words(int(x.group(0))), description)
ui_datasheet = UIDatasheet()
ui_datasheet.description = description
ui_finder.get_label(ui_datasheet)
ui_finder.get_components(ui_datasheet)
info = ui_finder.print_info(ui_datasheet)
app.push_mods({'content-info-retrieved': {'children': info}})
wf_list = ui_finder.search_wf(ui_datasheet, k)
global filtered_ui
filtered_ui = []
for wf in wf_list:
filtered_ui.append(wf + '.jpg')
app.push_mods({'img_god0': {'src': app.get_asset_url('background.png')}})
app.push_mods({'img_god1': {'src': app.get_asset_url('background.png')}})
app.push_mods({'img_god2': {'src': app.get_asset_url('background.png')}})
app.push_mods({'img_god3': {'src': app.get_asset_url('background.png')}})
app.push_mods({'img_god4': {'src': app.get_asset_url('background.png')}})
app.push_mods({'img_god5': {'src': app.get_asset_url('background.png')}})
app.push_mods({'img_god6': {'src': app.get_asset_url('background.png')}})
app.push_mods({'img_god7': {'src': app.get_asset_url('background.png')}})
if len(filtered_ui) == 0:
app.push_mods({'content-search': {'children': 'No results found for the given filters.'}})
if len(filtered_ui) > 0:
app.push_mods({'img_god0': {'src': app.get_asset_url('wireframes/' + filtered_ui[0])}})
if len(filtered_ui) > 1:
app.push_mods({'img_god1': {'src': app.get_asset_url('wireframes/' + filtered_ui[1])}})
if len(filtered_ui) > 2:
app.push_mods({'img_god2': {'src': app.get_asset_url('wireframes/' + filtered_ui[2])}})
if len(filtered_ui) > 3:
app.push_mods({'img_god3': {'src': app.get_asset_url('wireframes/' + filtered_ui[3])}})
if len(filtered_ui) > 4:
app.push_mods({'img_god4': {'src': app.get_asset_url('wireframes/' + filtered_ui[4])}})
if len(filtered_ui) > 5:
app.push_mods({'img_god5': {'src': app.get_asset_url('wireframes/' + filtered_ui[5])}})
if len(filtered_ui) > 6:
app.push_mods({'img_god6': {'src': app.get_asset_url('wireframes/' + filtered_ui[6])}})
if len(filtered_ui) > 7:
app.push_mods({'img_god7': {'src': app.get_asset_url('wireframes/' + filtered_ui[7])}})
page_size = 8
global max_page
global current_page
current_page = 0
max_page = int((len(filtered_ui) + page_size - 1) / page_size)
if max_page == 0:
max_page = 1
app.push_mods({'content-page-number': {'children': 'Page {} out of {}'.format(current_page + 1, max_page)}})
| antoine-zurcher/master-project | prototype/apps/god_mode.py | god_mode.py | py | 37,217 | python | en | code | 0 | github-code | 36 |
34212026055 | # 19.09.28
stic1 = [12,12,12,12,12]
stic2 = [12,80,14,22,100]
def get_maximum(stic):
stic_len = len(stic)
dp = [0]*stic_len # stic_len 만큼 dp배열 생성
# 초기화
dp[0] = stic[0]
dp[1] = max((stic[0], stic[1]))
# dp
for i in range(2, stic_len):
dp[i] = max((stic[i]+dp[i-2], dp[i-1] ))
return dp[-1]
if __name__=="__main__":
get_maximum(stic1)
get_maximum(stic2) | chankoo/problem-solving | dp/prog_1909_3.py | prog_1909_3.py | py | 433 | python | en | code | 1 | github-code | 36 |
8087397109 | from transformers import BertTokenizer, BertModel
import torch
class Bert:
def __init__(self):
self.model = BertModel.from_pretrained("bert-base-uncased", output_hidden_states = True)
self.tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
self.dimensions = 768
def getEmbedding(self, data):
with torch.no_grad():
tab = self.tokenizer(
data,
padding=True,
truncation=True,
return_tensors="pt"
).to(self.device)
self.model = self.model.to(self.device)
output = self.model(**tab)
return [i[0] for i in output.last_hidden_state] | aberenguerpas/inferia | services/embeddings/Bert.py | Bert.py | py | 806 | python | en | code | 0 | github-code | 36 |
4354535219 | from collections import OrderedDict
from datetime import date, datetime
from decimal import Decimal, ROUND_DOWN
from models import models
from peewee import fn
from playhouse.shortcuts import model_to_dict
from pytz import timezone
from time import time
import app_config
import copytext
import simplejson as json
import xlrd
MONTHS = ['January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December']
AP_MONTHS = ['Jan.', 'Feb.', 'March', 'April', 'May', 'June', 'July', 'Aug.', 'Sept.', 'Oct.', 'Nov.', 'Dec.']
ORDINAL_SUFFIXES = { 1: 'st', 2: 'nd', 3: 'rd' }
USPS_TO_AP_STATE = {
'AL': 'Ala.',
'AK': 'Alaska',
'AR': 'Ark.',
'AZ': 'Ariz.',
'CA': 'Calif.',
'CO': 'Colo.',
'CT': 'Conn.',
'DC': 'D.C.',
'DE': 'Del.',
'FL': 'Fla.',
'GA': 'Ga.',
'HI': 'Hawaii',
'IA': 'Iowa',
'ID': 'Idaho',
'IL': 'Ill.',
'IN': 'Ind.',
'KS': 'Kan.',
'KY': 'Ky.',
'LA': 'La.',
'MA': 'Mass.',
'MD': 'Md.',
'ME': 'Maine',
'MI': 'Mich.',
'MN': 'Minn.',
'MO': 'Mo.',
'MS': 'Miss.',
'MT': 'Mont.',
'NC': 'N.C.',
'ND': 'N.D.',
'NE': 'Neb.',
'NH': 'N.H.',
'NJ': 'N.J.',
'NM': 'N.M.',
'NV': 'Nev.',
'NY': 'N.Y.',
'OH': 'Ohio',
'OK': 'Okla.',
'OR': 'Ore.',
'PA': 'Pa.',
'PR': 'P.R.',
'RI': 'R.I.',
'SC': 'S.C.',
'SD': 'S.D.',
'TN': 'Tenn.',
'TX': 'Texas',
'UT': 'Utah',
'VA': 'Va.',
'VT': 'Vt.',
'WA': 'Wash.',
'WI': 'Wis.',
'WV': 'W.Va.',
'WY': 'Wyo.'
}
GOP_CANDIDATES = [
'Ted Cruz',
'John Kasich',
'Donald Trump'
]
DEM_CANDIDATES = [
'Hillary Clinton',
'Bernie Sanders'
]
PARTY_MAPPING = {
'dem': {
'AP': 'Dem',
'long': 'Democrat',
'class': 'democrat',
'adverb': 'Democratic',
},
'gop': {
'AP': 'GOP',
'long': 'Republican',
'class': 'republican',
'adverb': 'Republican',
}
}
def comma_filter(value):
"""
Format a number with commas.
"""
return '{:,}'.format(value)
def percent_filter(value):
"""
Format percentage
"""
value = Decimal(value) * Decimal(100)
if value == 0:
return '0%'
elif value == 100:
return '100%'
elif value > 0 and value < 1:
return '<1%'
else:
cleaned_pct = value.quantize(Decimal('.1'), rounding=ROUND_DOWN)
return '{:.1f}%'.format(cleaned_pct)
def ordinal_filter(num):
"""
Format a number as an ordinal.
"""
num = int(num)
if 10 <= num % 100 <= 20:
suffix = 'th'
else:
suffix = ORDINAL_SUFFIXES.get(num % 10, 'th')
return unicode(num) + suffix
def ap_month_filter(month):
"""
Convert a month name into AP abbreviated style.
"""
return AP_MONTHS[int(month) - 1]
def ap_date_filter(value):
"""
Converts a date string in m/d/yyyy format into AP style.
"""
if isinstance(value, basestring):
value = datetime.strptime(value, '%m/%d/%Y')
value_tz = _set_timezone(value)
output = AP_MONTHS[value_tz.month - 1]
output += ' ' + unicode(value_tz.day)
output += ', ' + unicode(value_tz.year)
return output
def ap_time_filter(value):
"""
Converts a datetime or string in hh:mm format into AP style.
"""
if isinstance(value, basestring):
value = datetime.strptime(value, '%I:%M')
value_tz = _set_timezone(value)
value_year = value_tz.replace(year=2016)
return value_year.strftime('%-I:%M')
def ap_state_filter(usps):
"""
Convert a USPS state abbreviation into AP style.
"""
return USPS_TO_AP_STATE[unicode(usps)]
def ap_time_period_filter(value):
"""
Converts Python's AM/PM into AP Style's a.m./p.m.
"""
if isinstance(value, basestring):
value = datetime.strptime(value, '%p')
value_tz = _set_timezone(value)
value_year = value_tz.replace(year=2016)
periods = '.'.join(value_year.strftime('%p')) + '.'
return periods.lower()
def candidate_sort_lastname(item):
if item.winner:
return -1
elif item.last == 'Other' or item.last == 'Uncommitted' or item.last == 'Write-ins':
return 'zzz'
else:
return item.last
def candidate_sort_votecount(item):
return item.votecount
def _set_timezone(value):
datetime_obj_utc = value.replace(tzinfo=timezone('GMT'))
datetime_obj_est = datetime_obj_utc.astimezone(timezone('US/Eastern'))
return datetime_obj_est
def collate_other_candidates(results, party):
if party == 'GOP':
whitelisted_candidates = GOP_CANDIDATES
elif party == 'Dem':
whitelisted_candidates = DEM_CANDIDATES
other_votecount = 0
other_votepct = 0
for result in reversed(results):
candidate_name = '%s %s' % (result.first, result.last)
if candidate_name not in whitelisted_candidates:
other_votecount += result.votecount
other_votepct += result.votepct
results.remove(result)
return results, other_votecount, other_votepct
def set_delegates_updated_time():
"""
Write timestamp to filesystem
"""
now = time()
with open(app_config.DELEGATE_TIMESTAMP_FILE, 'w') as f:
f.write(str(now))
def get_delegates_updated_time():
"""
Read timestamp from file system and return UTC datetime object.
"""
with open(app_config.DELEGATE_TIMESTAMP_FILE) as f:
updated_ts = f.read()
return datetime.utcfromtimestamp(float(updated_ts))
def never_cache_preview(response):
"""
Ensure preview is never cached
"""
response.cache_control.max_age = 0
response.cache_control.no_cache = True
response.cache_control.must_revalidate = True
response.cache_control.no_store = True
return response
def open_db():
"""
Open db connection
"""
models.db.connect()
def close_db(response):
"""
Close db connection
"""
models.db.close()
return response
def get_results(party, electiondate):
ap_party = PARTY_MAPPING[party]['AP']
race_ids = models.Result.select(fn.Distinct(models.Result.raceid), models.Result.statename).where(
models.Result.electiondate == electiondate,
models.Result.party == ap_party,
models.Result.level == 'state',
models.Result.officename == 'President',
)
blacklist = app_config.RACE_BLACKLIST.get(electiondate)
if blacklist:
race_ids = race_ids.where(~(models.Result.raceid << blacklist))
race_ids.order_by(models.Result.statename, models.Result.raceid)
# Get copy once
copy_obj = copytext.Copy(app_config.COPY_PATH)
copy = copy_obj['meta']._serialize()
output = []
for race in race_ids:
output.append(get_race_results(race.raceid, ap_party, copy, race.statename))
sorted_output = sorted(output, key=lambda k: k['statename'])
return sorted_output
def get_race_results(raceid, party, copy, statename):
"""
Results getter
"""
race_results = models.Result.select().where(
models.Result.raceid == raceid,
models.Result.level == 'state',
models.Result.statename == statename
)
filtered, other_votecount, other_votepct = collate_other_candidates(list(race_results), party)
secondary_sort = sorted(filtered, key=candidate_sort_lastname)
sorted_results = sorted(secondary_sort, key=candidate_sort_votecount, reverse=True)
called = False
serialized_results = []
for result in sorted_results:
if (result.winner and result.call[0].accept_ap) or result.call[0].override_winner:
called = True
serialized_results.append(model_to_dict(result, backrefs=True))
output = {
'results': serialized_results,
'other_votecount': other_votecount,
'other_votepct': other_votepct,
'statename': serialized_results[0]['statename'],
'statepostal': serialized_results[0]['statepostal'],
'precinctsreportingpct': serialized_results[0]['precinctsreportingpct'],
'precinctsreporting': serialized_results[0]['precinctsreporting'],
'precinctstotal': serialized_results[0]['precinctstotal'],
'total': tally_results(raceid, statename),
'called': called,
'race_type': '',
'note': get_race_note(serialized_results[0], copy)
}
if len(serialized_results[0]['meta']):
output.update({
'poll_closing': serialized_results[0]['meta'][0].get('poll_closing'),
'race_type': serialized_results[0]['meta'][0].get('race_type'),
'order': serialized_results[0]['meta'][0].get('order')
})
return output
def get_race_note(race, copy):
"""
Pluck race note out of meta sheet
"""
key = '{0}_{1}_note'.format(race['statepostal'], race['party']).lower()
return copy.get(key, '')
def group_poll_closings(races):
poll_closing_orders = []
for race in races:
if race['order'] not in poll_closing_orders:
poll_closing_orders.append(race['order'])
poll_closing_orders.sort()
grouped = OrderedDict()
for group in poll_closing_orders:
grouped[group] = {
'poll_closing': '',
'races': []
}
for race in races:
if race['total'] == 0 and not race['called'] and race['order'] == group:
grouped[group]['poll_closing'] = race['poll_closing']
grouped[group]['races'].append(race['statename'])
return grouped
def get_unreported_races(races):
unreported = [race['statename'] for race in races if race['total'] == 0 and not race['called']]
return unreported
def _format_poll_closing(poll_closing):
formatted_time = ap_time_filter(poll_closing)
formatted_period = ap_time_period_filter(poll_closing)
return '{0} {1}'.format(formatted_time, formatted_period)
def get_last_updated(races):
last_updated = None
for race in races:
if race['called'] or race['precinctsreporting'] > 0:
for result in race['results']:
if not last_updated or result['lastupdated'] > last_updated:
last_updated = result['lastupdated']
if not last_updated:
last_updated = datetime.utcnow()
return last_updated
def tally_results(raceid, statename):
"""
Add results for a given party on a given date.
"""
tally = models.Result.select(fn.SUM(models.Result.votecount)).where(
models.Result.level == 'state',
models.Result.raceid == raceid,
models.Result.statename == statename
).scalar()
return tally
def convert_serial_date(value):
parsed = datetime(*(xlrd.xldate_as_tuple(float(value), 0)))
eastern = timezone('US/Eastern')
parsed_eastern = eastern.localize(parsed)
parsed_utc = parsed_eastern.astimezone(timezone('GMT'))
parsed_naive = parsed_utc.replace(tzinfo=None)
return parsed_naive
class APDatetimeEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime):
thedate = ap_date_filter(obj)
thetime = ap_time_filter(obj)
theperiod = ap_time_period_filter(obj)
return '{0}, {1} {2}'.format(thedate, thetime, theperiod)
elif isinstance(obj, date):
return obj.isoformat()
else:
return super(APDatetimeEncoder, self).default(obj)
| nprapps/elections16 | app/utils.py | utils.py | py | 11,492 | python | en | code | 15 | github-code | 36 |
71877360104 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import logging
from executions import Program
def main():
program_name = sys.argv[2]
logging.basicConfig(level=logging.DEBUG, format="%(asctime)s [%(levelname)s] %(message)s", filename="process-dtrace.log", filemode='w')
logging.debug("Starting work with %s" % program_name)
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter("%(message)s")
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
program = Program()
program.debug = True
for i in range(int(sys.argv[1])+1):
program.read_program_points("%s.%d" % (program_name, i))
#program.read_program_invariants("%s.invariants" % program_name)
for i in range(int(sys.argv[1])+1):
program.read_program_executions("%s.%d" % (program_name, i), True)
for i in range(int(sys.argv[1])+1):
program.read_program_executions("%s.%d" % (program_name, i))
logging.debug("Finished work with %s" % program_name)
if __name__ == '__main__':
main()
| danilchenko-andrey/trace-models | process_dtrace.py | process_dtrace.py | py | 1,101 | python | en | code | 0 | github-code | 36 |
469422111 | from argparse import ArgumentParser
from threading import Thread, current_thread
import logging
import time
from sloq import SlowQueue
def main(args=None):
prog = ArgumentParser()
prog.add_argument("-n", type=int, default=10, metavar="TASK_COUNT",
help="The number of tasks")
prog.add_argument("-t", type=float, default=1, metavar="TASK_INTERVAL",
help="The tick: seconds between tasks being released")
prog.add_argument("-w", type=int, default=3, metavar="WORKER_COUNT",
help="Number of workers")
prog.add_argument("-d", type=float, default=0, metavar="TASK_DURATION",
help="Duration of a single task")
prog.add_argument("-s", type=float, default=0, metavar="MAX_SLAM",
help="The maximum amount of slam to allow")
args = prog.parse_args(args)
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
logger.addHandler(logging.StreamHandler())
test_queue(logger, args.t, args.n, args.d, args.w, args.s)
def test_queue(logger, tick=1, tasks=10, task_duration=0, worker_count=3,
slam=0):
start_time = time.time()
sloq = SlowQueue(release_tick=tick, max_slam=slam)
# Begin the workers
for w in xrange(0, worker_count):
Thread(target=test_worker, args=(logger, start_time, sloq)).start()
# Populate the queue
for task in xrange(0, tasks):
sloq.put((task, task_duration))
for w in xrange(0, worker_count):
sloq.put((None, None))
sloq.join()
def test_worker(logger, start_time, queue):
while True:
task, sleep = queue.get()
if task is None:
logger.info("%s, Done" % current_thread().name)
queue.task_done()
return
else:
logger.info("%s, Elapsed time: %0.2f, Task: %r",
current_thread().name, time.time() - start_time, task)
if sleep:
time.sleep(sleep)
queue.task_done()
if __name__ == "__main__":
main()
| duedil-ltd/python-sloq | demo_sloq.py | demo_sloq.py | py | 2,091 | python | en | code | 3 | github-code | 36 |
33362276648 | import cv2
import base64
import json
import time
import os
import numpy
class Recognition:
"""
This class contain everything required to detect, learn or recognize a face
"""
db_path = "database/"
name_list_path = "database/"
def __init__(self, haar_cascade_file_path):
self.haar_cascade_file_path = haar_cascade_file_path
def take_pictures(self, number_of_pics):
(images, labels) = ([], [])
(width, height) = (130, 100)
face_cascade = cv2.CascadeClassifier(self.haar_cascade_file_path)
webcam = cv2.VideoCapture(0)
count = 1
while count < number_of_pics + 1:
ret_val, im = webcam.read()
time.sleep(1)
if ret_val:
gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 4)
for (x, y, w, h) in faces:
cv2.rectangle(im, (x, y), (x + w, y + h), (0, 255, 0), 2)
face = gray[y:y + h, x:x + w]
face_resize = cv2.resize(face, (width, height))
images.append(face_resize)
labels.append(count)
count += 1
webcam.release()
cv2.destroyAllWindows()
return images, labels
@staticmethod
def create_recognize_msg(db_name, images):
retval, image = cv2.imencode('.png', images[0])
json_string = {'data': {'type': "recognize", 'db_name': db_name, 'image0': base64.b64encode(image)}}
return json.dumps(json_string)
@staticmethod
def create_learn_msg(db_name, person_name, info, images):
json_string = {'data': {'type': "learn", 'db_name': db_name, 'person_name': person_name, 'info': info,
'number_of_images': len(images)}}
i = 0
for image in images:
retval, im = cv2.imencode('.png', image)
json_string['data']['image' + str(i)] = base64.b64encode(im)
i += 1
return json.dumps(json_string)
def get_image_name(self, label, db_name):
names_list = self.image_name_list(self.db_path)
if db_name in names_list:
fh = open(self.name_list_path + db_name + ".txt", "r")
list_items = fh.readlines()
if label >= 0:
for item in list_items:
if int(item.split(":")[:-1][0]) == label:
return item.split(":")[1]
else:
return list_items[-1].split(":")[:-1][0]
else:
return 0
def set_image_name(self, person_name, db_name, info):
last_id = self.get_image_name(-1, db_name)
fh = open(self.name_list_path + db_name + ".txt", "a")
fh.write(str(int(last_id) + 1) + ":" + person_name + ":" + info)
fh.write("\n")
fh.close()
return int(last_id) + 1
def get_image_info(self, label, db_name):
names_list = self.image_name_list(self.db_path)
if db_name in names_list:
fh = open(self.name_list_path + db_name + ".txt", "r")
list_items = fh.readlines()
if label >= 0:
for item in list_items:
if int(item.split(":")[:-1][0]) == label:
return item.split(":")[2]
else:
return 0
@staticmethod
def db_list(db_path):
names = []
for filename in os.listdir(db_path):
names.append(filename.split(".")[:-1][0])
return names
@staticmethod
def image_name_list(path):
names = []
for filename in os.listdir(path):
names.append(filename.split(".")[:-1][0])
return names
def learn_person(self, db_name, person_name, info, images):
dbs = self.db_list('database/')
label_list = [self.set_image_name(person_name, db_name, info), self.set_image_name(person_name, db_name, info)]
(image, label) = [numpy.array(lists) for lists in [images, label_list]]
if db_name in dbs:
model = cv2.face.LBPHFaceRecognizer_create() # 125 #110
model.read(self.db_path + db_name + ".xml")
model.update(image, label)
model.write(self.db_path + db_name + ".xml")
else:
model = cv2.face.LBPHFaceRecognizer_create() # 125 #110
model.train(image, label)
model.write(self.db_path + db_name + ".xml")
def recognize_person(self, db_name, images):
dbs = self. db_list('database/')
if db_name in dbs:
model = cv2.face.LBPHFaceRecognizer_create() # 125 #110
model.read(self.db_path + db_name + ".xml")
for faces in images:
prediction = model.predict(faces)
if prediction[1] < 125:
rec = self.get_image_name(prediction[0], db_name)
info = self.get_image_info(prediction[0], db_name)
return rec, info
else:
return "Unknown"
else:
return None
| farshid616/clinet-server_FaceRecognition | recognition.py | recognition.py | py | 5,116 | python | en | code | 3 | github-code | 36 |
6797375421 | from django.core.management.base import BaseCommand
from mailer.models import Message
class Command(BaseCommand):
help = 'Update server name in emails'
def add_arguments(self, parser):
# Positional arguments
parser.add_argument('from_server', nargs='+', type=str)
parser.add_argument('to_server', nargs='+', type=str)
def handle(self, *args, **options):
from_server = options.get('from_server')[0]
to_server = options.get('to_server')[0]
found = 0
self.stdout.write(self.style.SUCCESS('Changing from "{}" to "{}"'.format(from_server, to_server)))
for message in Message.objects.all():
email = message.email
if from_server in email.body:
email.body = email.body.replace(from_server, to_server)
message.email = email
message.save(update_fields=['message_data'])
found += 1
self.stdout.write(self.style.SUCCESS('{} found'.format(found)))
| tomasgarzon/exo-services | service-exo-mail/mail/management/commands/update_server_name.py | update_server_name.py | py | 1,016 | python | en | code | 0 | github-code | 36 |
22056037109 | import os
import re
import sys
import time
from setuptools import setup
HERE = os.path.dirname(__file__)
ROOT_DIR = os.path.abspath(os.path.join(HERE, os.pardir))
TEMP_PATH = "target"
in_src = os.path.isfile(os.path.join(ROOT_DIR, "pom.xml"))
if in_src:
pom_file = os.path.join(ROOT_DIR, 'pom.xml')
with open(pom_file) as pomf:
pom = pomf.read()
version_match = re.search(r'\n <version>([\w\.\-]+)</version>', pom)
if version_match:
version_string = version_match.group(1)
print("Version from: '%s' is: %s" % (pom_file, version_string))
version_elements = version_string.split("-")
is_release = "SNAPSHOT" != version_elements[-1]
base_version_elements = version_elements if is_release else version_elements[0:-1]
base_version = base_version_elements[0] + ".".join(base_version_elements[1:])
version = base_version if is_release else "%s+%08x" % (base_version, int(time.time()))
else:
print("ERROR: Cannot read version from pom file '%s'." % pom_file, file=sys.stderr)
exit(1)
print("Module version is: %s" % version)
print("Writing version file in: %s" % os.path.abspath("."))
with open("pyrander/version.py", "w") as vf:
vf.write("__version__='%s'\n" % version)
with open('pyrander/version.py') as vf:
exec(vf.read())
setup(
name='pyrander',
packages=['pyrander'], # this must be the same as the name above
version=__version__,
description='A random test lib',
author='Piotr Szul',
author_email='piotr.szul@csiro.au',
url='https://github.com/piotrszul/pyrander',
keywords=['testing', 'logging', 'example'], # arbitrary keywords
classifiers=[],
extras_require={
'test': [
'pyspark==2.1.2',
],
'dev': ['twine'],
},
license="MIT",
)
| piotrszul/sparktest | python/setup.py | setup.py | py | 1,848 | python | en | code | 0 | github-code | 36 |
73654029544 | from typing import Any, List
from common import TestResult
from proc import SubprocessAppController
from runner import Runner
from validation import TestExecutionValidator, validate_all
from web import TestGetUrl, TestPostToUrl
class NetworkMetricsCollectedValidator(TestExecutionValidator):
def __init__(self, applicable_tests: List[str]):
self.applicable_tests = applicable_tests
def should_validate(self, name: str, scoped: bool) -> bool:
return scoped and name in self.applicable_tests
def validate(self, test_data: Any, scope_messages: List[str]) -> TestResult:
return validate_all(
(any("net.tx" in msg for msg in scope_messages if "#proc:nginx" in msg), "No 'net.tx' metrics is collected"),
(any("net.rx" in msg for msg in scope_messages if "#proc:nginx" in msg), "No 'net.rx' metrics is collected")
)
def configure(runner: Runner, config):
app_controller = SubprocessAppController(["nginx", "-g", "daemon off;"], "nginx", config.scope_path,
config.logs_path)
get_home_page = TestGetUrl(url="http://localhost/", requests=10000, app_controller=app_controller)
post_file = TestPostToUrl(url="http://localhost/log/", requests=10000, post_file="/opt/test-runner/post.json",
app_controller=app_controller)
runner.add_test_execution_validators([NetworkMetricsCollectedValidator([get_home_page.name, post_file.name])])
runner.add_tests([get_home_page, post_file])
| criblio/appscope | test/integration/test_runner/nginx.py | nginx.py | py | 1,535 | python | en | code | 248 | github-code | 36 |
74877532582 | from tkinter import *
from tkinter import filedialog
from PIL import ImageTk, Image
import os
from tkinter import messagebox
root = Tk()
root.minsize(650,650)
root.maxsize(650,650)
root.configure(background='gray')
open_img = ImageTk.PhotoImage(Image.open("open.png"))
save_img = ImageTk.PhotoImage(Image.open("save.png"))
exit_img = ImageTk.PhotoImage(Image.open("exit.jpg"))
label_file_name = Label(root, text="File name")
label_file_name.place(relx=0.28, rely=0.03, anchor=CENTER)
input_file_name = Entry(root)
input_file_name.place(relx=0.46, rely=0.03, anchor=CENTER)
my_text=Text(root,height=35,width=80)
my_text.place(relx=0.5,rely=0.55,anchor=CENTER)
#name = ""
#def openFile():
# global name
open_buttom=Button(root,image=open_img,text="OpenFile",commamd=openFile)
open_button.place(rex=0.5,rely=0.3,anchor=CENTER)
save_button=Button(root,image=open_img,text="SaveFile",commamd=openFile)
save_button.place(rex=0.11,rely=0.3,anchor=CENTER)
exit_buttom=Button(root,image=open_img,text="ExitFile",commamd=openFile)
exit_button.place(rex=0.17,rely=0.3,anchor=CENTER)
root.mainloop() | arjun380/P-IDE | P-Ide.py | P-Ide.py | py | 1,151 | python | en | code | 0 | github-code | 36 |
15167501320 | """
@author: amal machtalay
"""
import torch
from torch import optim
from torchvision import models
def save_checkpoint(model, optimizer, name, epoch, image_datasets, path):
model.class_to_idx = image_datasets['training'].class_to_idx
torch.save({'name': name,
'epoch': epoch,
'class_to_idx': model.class_to_idx,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'classifier': model.classifier,
}, path)
return 0
# function that loads a checkpoint and rebuilds the model
def rebuild_model(path):
loaded_checkpoint = torch.load(path)
loaded_name = loaded_checkpoint['name']
loaded_epoch = loaded_checkpoint['epoch']
### Remember to first initialize the model and optimizer, then load the dictionary
model = models.vgg16(pretrained=True)
model.class_to_idx = loaded_checkpoint['class_to_idx']
model.load_state_dict = loaded_checkpoint['model_state_dict']
model.classifier = loaded_checkpoint['classifier']
optimizer = optim.SGD(model.classifier.parameters(), lr=0.001)
optimizer.load_state_dict(loaded_checkpoint['optimizer_state_dict'])
return loaded_name, model, optimizer, loaded_epoch
| amalmach/image_classifier | project_2/save_load_checkpoint.py | save_load_checkpoint.py | py | 1,287 | python | en | code | 0 | github-code | 36 |
21841926557 | # -*- coding: utf-8 -*-
"""
Created on Wed Dec 16 17:17:57 2015
Kepler problem and modified equations.
Apply the Stoermer-Verlet to the plannar Kepler problem
H(q, p) = 1/2p^Tp - 1/||q||, q, p belong to R^2.
Use the BCH formular (L + R 5.40) to compute the second-order correction of
the modified Hamiltonian \tilde{H} for this particular problem. Verify the
forth order convergence of the Stoermer-Verlet method with respect to the
modified Hamiltonian \tilde{H}_2 numerically. Take, for example, initial
conditions q = (1, 0) and p = (0, 1).
@author: rpoolman
"""
import Steppers.steppers as step
import numpy as np
import matplotlib.pyplot as plt
# setups for Kepler problem
V = lambda qx, qy: -1.0/np.sqrt(qx**2.0 + qy**2.0)
Dt = 0.01
T = 10
N = np.int(T/Dt)
q = np.zeros((N, 2))
p = np.zeros((N, 2))
q[0, :] = np.array([1.0, 0.0])
p[0, :] = np.array([0.0, 1.0])
# integrate
#for qRow, pRow in zip(q, p):
for ii in range(len(q) - 1):
q[ii + 1], p[ii + 1] = step.stoermerstep(V, q[ii], p[ii], Dt)
# plot results
plt.figure(1)
plt.subplots_adjust(hspace = 0.2, wspace = 0.15)
plt.clf()
ax1 = plt.subplot2grid((2,2), (0,0), colspan=2)
plt.title('Real Space Plot of Numerical Solution')
plt.plot(q[:, 0], q[:, 1])
plt.xlabel('X Coordinate')
plt.ylabel('Y Coordinate')
plt.axis([-1.5, 1.5, -1.5, 1.5])
ax2 = plt.subplot2grid((2,2), (1,0))
plt.title('Phase Space Plot of Numerical Solution along X')
plt.plot(q[:, 0], p[:, 0])
plt.xlabel('X Coordinate')
plt.ylabel('X Velocity')
plt.axis([-1.5, 1.5, -1.5, 1.5])
ax3 = plt.subplot2grid((2,2), (1,1))
plt.title('Phase Space Plot of Numerical Solution along X')
plt.plot(q[:, 1], p[:, 1])
plt.xlabel('Y Coordinate')
plt.ylabel('Y Velocity')
plt.axis([-1.5, 1.5, -1.5, 1.5]) | Rhys314/Simulating_Hamiltonian_Dynamics | Section5,5_Ex6.py | Section5,5_Ex6.py | py | 1,760 | python | en | code | 0 | github-code | 36 |
33718292699 | def solution(bridge_length, weight, truck_weights):
answer=1
from collections import deque
bridge=deque([0]*bridge_length)
total=0
for truck in range(len(truck_weights)):
while True:
removed=bridge[0]
if total-removed+truck_weights[truck]>weight:
bridge.popleft()
bridge.append(0)
total-=removed
answer+=1
else:
break
if truck==len(truck_weights)-1:
answer+=bridge_length
return answer
total=total-bridge.popleft()+truck_weights[truck]
bridge.append(truck_weights[truck])
answer+=1
return answer
| ayocado/algorithm-study | heewon00/스택, 큐/PGS level2 다리를 지나는 트럭.py | PGS level2 다리를 지나는 트럭.py | py | 709 | python | en | code | 0 | github-code | 36 |
34499172798 | import tkinter as tk
from tkinter import messagebox
import random
#defininindo as configurações do jogo
NUM_LINHAS = 4
NUM_COLUNAS = 4
CARTAO_SIZE_W = 10
CARTAO_SIZE_H = 5
CORES_CARTAO = ['red', 'blue', 'green', 'yellow', 'purple', 'orange', 'magenta', 'gray']
COR_FUNDO = "#343a40"
COR_LETRA = "#ffffff"
FONT_STYLE = ('Arial', 12, 'bold')
MAX_TENTATIVAS = 25
# Cria uma grade aleatoria de cores para os cartoes
def create_card_grid():
cores = CORES_CARTAO * 2
random.shuffle(cores)
grid = []
for _ in range(NUM_LINHAS):
linha = []
for _ in range(NUM_COLUNAS):
if cores:
cor = cores.pop()
linha.append(cor)
grid.append(linha)
return grid
# Interagindo com o clique do jogador
def card_clicked(linha, coluna):
print("Clicado: linha =", linha, "coluna =", coluna)
cartao = cartoes[linha][coluna]
cor = cartao['bg']
if cor == 'black':
cartao['bg'] = grid[linha][coluna]
cartao_revelado.append(cartao)
if len(cartao_revelado) == 2:
check_math()
# Verificar se os dois cartoes revelados são iguais
def check_math():
cartao1, cartao2 = cartao_revelado
if cartao1['bg'] == cartao2['bg']:
cartao1.after(1000, cartao1.destroy)
cartao2.after(1000, cartao2.destroy)
cartao_correspondentes.extend([cartao1, cartao2])
check_win()
else:
cartao1.after(1000, lambda:cartao1.config(bg='black'))
cartao2.after(1000, lambda:cartao2.config(bg='black'))
cartao_revelado.clear()
update_score()
# Ver se o plyer ganhou
def check_win():
if len(cartao_correspondentes) == NUM_LINHAS * NUM_COLUNAS:
messagebox.showinfo('Legal', 'Zerou o jogo!')
janela.quit()
# Atualizar a pontuacao e verificar se o jogador foi de vasco
def update_score():
global numero_tentativas
numero_tentativas += 1
label_tentativas.config(text='Tentativas: {}/{}'.format(numero_tentativas, MAX_TENTATIVAS))
if numero_tentativas >= MAX_TENTATIVAS:
messagebox.showinfo('F', 'Voce foi de vasco :/')
janela.quit()
# Criando a interface principal
janela = tk.Tk()
janela.title('Jogo da Memoria')
janela.configure(bg=COR_FUNDO)
# criar grade de cartoes
grid = create_card_grid()
cartoes = []
cartao_revelado = []
cartao_correspondentes =[]
numero_tentativas = 0
for linha in range(NUM_LINHAS):
linha_de_cartoes = []
for coluna in range(NUM_COLUNAS): # Use NUM_COLUNAS aqui
cartao = tk.Button(janela, command=lambda linha=linha, coluna=coluna: card_clicked(linha, coluna), width=CARTAO_SIZE_W, height=CARTAO_SIZE_H, bg='black', relief=tk.RAISED, bd=3)
cartao.grid(row=linha, column=coluna, padx=5, pady=5) # Use row=linha e column=coluna aqui
linha_de_cartoes.append(cartao)
cartoes.append(linha_de_cartoes)
# personalizando o botão
button_style = {'activebackground': '#f8f9fa', 'font' : FONT_STYLE, 'fg' :COR_LETRA}
janela.option_add('*Button' ,button_style)
# lable para numero de tentativas
label_tentativas = tk.Label(janela, text='Tentativas: {}/{}'.format(numero_tentativas, MAX_TENTATIVAS), fg=COR_LETRA, bg=COR_FUNDO, font=FONT_STYLE)
label_tentativas.grid(row=NUM_LINHAS, columnspan=NUM_COLUNAS, padx=10, pady=10)
janela.mainloop()
print(len(cartoes)) # Deve ser igual a NUM_LINHAS
print(len(cartoes[0])) # Deve ser igual a NUM_COLUNAS
print(len(grid)) # Deve ser igual a NUM_LINHAS
print(len(grid[0])) # Deve ser igual a NUM_COLUNAS
def card_clicked(linha, coluna):
print("Clicado: linha =", linha, "coluna =", coluna)
cartao = cartoes[linha][coluna]
| Zpkbiel/prog23gabriel | trabalho de progamação 2.py | trabalho de progamação 2.py | py | 3,707 | python | pt | code | 0 | github-code | 36 |
27401072653 | import subprocess
emotions = ['Angry', 'Disgust', 'Fear', 'Happy', 'Sad', 'Surprise']
distances = ['/home/hamza/AffectiveTDA2/hera/bottleneck/bottleneck_dist', '/home/hamza/AffectiveTDA2/hera/wasserstein/wasserstein_dist']
subsections = [
('leftEye', 'rightEye', 'leftEyebrow', 'rightEyebrow', 'nose', 'mouth', 'jawline'),
('leftEye', 'rightEye', 'leftEyebrow', 'rightEyebrow', 'nose', 'mouth'),
('leftEyebrow', 'rightEyebrow', 'nose'),
('leftEye', 'rightEye', 'nose'),
('nose', 'mouth')
]
if __name__ == '__main__':
for distance in distances[1:]:
for emotion in emotions:
for i in range(len(subsections)):
print('{} ../Output/ {} {}'.format(distance,emotion,i))
p = subprocess.Popen(
'{} ../Output/ {} {}'.format(distance,emotion,i),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True)
std_out, std_err = p.communicate()
std_out = std_out.decode('ascii')
std_out = std_out.split('\n')[:-1]
outfile = '../Output/F002/nonmetric/signal/{}_{}_{}.txt'.format(
distance.split('/')[-2],
'_'.join(subsections[i]), emotion)
print(outfile)
with open(outfile, 'w') as file:
for line in std_out:
file.write(line + '\n')
| USFDataVisualization/AffectiveTDA | topological_distance.py | topological_distance.py | py | 1,464 | python | en | code | 1 | github-code | 36 |
5812334986 | import babel
import babel.numbers
import babel.plural
from typing import Any, Callable, Dict, List, TYPE_CHECKING, Tuple, Union, cast
from typing_extensions import Literal
from fluent.syntax import ast as FTL
from .builtins import BUILTINS
from .prepare import Compiler
from .resolver import CurrentEnvironment, Message, Pattern, ResolverEnvironment
from .utils import native_to_fluent
if TYPE_CHECKING:
from .types import FluentNone, FluentType
PluralCategory = Literal['zero', 'one', 'two', 'few', 'many', 'other']
class FluentBundle:
"""
Bundles are single-language stores of translations. They are
aggregate parsed Fluent resources in the Fluent syntax and can
format translation units (entities) to strings.
Always use `FluentBundle.get_message` to retrieve translation units from
a bundle. Generate the localized string by using `format_pattern` on
`message.value` or `message.attributes['attr']`.
Translations can contain references to other entities or
external arguments, conditional logic in form of select expressions, traits
which describe their grammatical features, and can use Fluent builtins.
See the documentation of the Fluent syntax for more information.
"""
def __init__(self,
locales: List[str],
functions: Union[Dict[str, Callable[[Any], 'FluentType']], None] = None,
use_isolating: bool = True):
self.locales = locales
self._functions = {**BUILTINS, **(functions or {})}
self.use_isolating = use_isolating
self._messages: Dict[str, Union[FTL.Message, FTL.Term]] = {}
self._terms: Dict[str, Union[FTL.Message, FTL.Term]] = {}
self._compiled: Dict[str, Message] = {}
# The compiler is not typed, and this cast is only valid for the public API
self._compiler = cast(Callable[[Union[FTL.Message, FTL.Term]], Message], Compiler())
self._babel_locale = self._get_babel_locale()
self._plural_form = cast(Callable[[Any], Callable[[Union[int, float]], PluralCategory]],
babel.plural.to_python)(self._babel_locale.plural_form)
def add_resource(self, resource: FTL.Resource, allow_overrides: bool = False) -> None:
# TODO - warn/error about duplicates
for item in resource.body:
if not isinstance(item, (FTL.Message, FTL.Term)):
continue
map_ = self._messages if isinstance(item, FTL.Message) else self._terms
full_id = item.id.name
if full_id not in map_ or allow_overrides:
map_[full_id] = item
def has_message(self, message_id: str) -> bool:
return message_id in self._messages
def get_message(self, message_id: str) -> Message:
return self._lookup(message_id)
def _lookup(self, entry_id: str, term: bool = False) -> Message:
if term:
compiled_id = '-' + entry_id
else:
compiled_id = entry_id
try:
return self._compiled[compiled_id]
except LookupError:
pass
entry = self._terms[entry_id] if term else self._messages[entry_id]
self._compiled[compiled_id] = self._compiler(entry)
return self._compiled[compiled_id]
def format_pattern(self,
pattern: Pattern,
args: Union[Dict[str, Any], None] = None
) -> Tuple[Union[str, 'FluentNone'], List[Exception]]:
if args is not None:
fluent_args = {
argname: native_to_fluent(argvalue)
for argname, argvalue in args.items()
}
else:
fluent_args = {}
errors: List[Exception] = []
env = ResolverEnvironment(context=self,
current=CurrentEnvironment(args=fluent_args),
errors=errors)
try:
result = pattern(env)
except ValueError as e:
errors.append(e)
result = '{???}'
return (result, errors)
def _get_babel_locale(self) -> babel.Locale:
for lc in self.locales:
try:
return babel.Locale.parse(lc.replace('-', '_'))
except babel.UnknownLocaleError:
continue
# TODO - log error
return babel.Locale.default()
| projectfluent/python-fluent | fluent.runtime/fluent/runtime/bundle.py | bundle.py | py | 4,408 | python | en | code | 185 | github-code | 36 |
5315481317 |
import re
class Pattern(object):
def __init__(self, name, regex):
self.name = name
self.pattern = re.compile(regex)
def match(self, input, index):
return self.pattern.match(input, index)
class Terminal(object):
def __init__(self, name, value):
self.name = name
self.value = value
def __str__(self):
if self.name.lower() == self.value:
return self.name
return '%s(%s)' % (self.name, self.value)
class Tree(object):
def __init__(self, *values):
self.value = None
if len(values) > 0:
self.value = values[0]
if len(values) > 1:
self.children = [x for x in values[1:]]
else:
self.children = []
def add(self, value):
if isinstance(value, Tree):
self.children.append(value)
else:
self.children.append(Tree(value))
return self
def __len__(self):
return len(self.children)
def isLeaf(self):
return len(self.children) == 0
def toDot(self):
self.label(1)
return 'digraph ast {%s\n}' % self.doToDot('')
def label(self, id):
self.id = id
id += 1
for c in self.children:
id = c.label(id)
return id
def doToDot(self, dot):
dot = '%s\n%d [label="%s"];' % (dot, self.id, self.value.value)
for c in self.children:
dot = c.doToDot(dot)
dot = '%s\n%d -> %d;' % (dot, self.id, c.id)
return dot
def __str__(self):
if self.isLeaf():
return self.value.__str__()
result = '(%s)' % self.value
for c in self.children:
result = '%s %s' % (result, c)
return '%s)' % result
class Scanner(object):
def __init__(self, input, patterns):
self.input = input
self.index = 0
self.patterns = patterns
self.terminal = None
self.lookAhead = self.next()
def next(self):
while self.index < len(self.input) and self.input[self.index].isspace():
self.index += 1
if self.index >= len(self.input):
return None
for p in self.patterns:
match = p.match(self.input, self.index)
if match:
self.index = match.end()
return Terminal(p.name, match.group())
raise Exception('Unrecognized input: %s' % (self.input[self.index]))
def matches(self, *types):
if self.lookAhead == None:
return False
for t in types:
if t == self.lookAhead.name:
self.terminal = self.lookAhead
self.lookAhead = self.next()
return True
return False
def expect(self, *types):
if self.matches(*types):
return self.terminal
raise Exception('Expected %s, found %s' % (','.join(types), self.lookAhead))
def atEnd(self):
return self.lookAhead == None
class Parser(object):
def __init__(self, scanner):
self.sc = scanner
self.prec = [('&&','||'), ('==','!=','>','<','>=','<='), ('+','-'), ('*','/','%')]
def parse(self):
tree = self.parseStatement()
if not self.sc.atEnd():
raise Exception('Unexpected input: %s' % self.sc.terminal)
return tree
def parseStatement(self):
if self.sc.matches('{'):
tree = Tree(self.sc.terminal)
while not self.sc.matches('}'):
tree.add(self.parseStatement())
return tree
if self.sc.matches('WHILE'):
return Tree(self.sc.terminal, self.parseExp(), self.parseStatement())
if self.sc.matches('BREAK'):
tree = Tree(self.sc.terminal)
self.sc.expect(';')
return tree
if self.sc.matches('IF'):
tree = Tree(self.sc.terminal, self.parseExp(), self.parseStatement())
if self.sc.matches('ELSE'):
tree.add(self.parseStatement())
return tree
if self.sc.matches('ID'):
id = self.sc.terminal
if self.sc.matches('='):
tree = Tree(self.sc.terminal, Tree(id), self.parseExp())
self.sc.expect(';')
return tree
self.sc.expect(';')
def parseExp(self):
return self.parseHead(0)
def parseHead(self, index):
result = self.parseTail(index)
while self.sc.matches(*self.prec[index]):
result = Tree(self.sc.terminal, result, self.parseTail(index))
return result
def parseTail(self, index):
if index >= len(self.prec)-1:
return self.parsePrim()
return self.parseHead(index + 1)
def parsePrim(self):
if self.sc.matches('('):
tree = self.parseExp()
self.sc.expect(')')
return tree
if self.sc.matches('-'):
return Tree(self.sc.terminal, self.parsePrim())
return Tree(self.sc.expect('INT', 'ID'))
if __name__ == '__main__':
patterns = []
patterns.append(Pattern('INT', r'[0-9]+'))
patterns.append(Pattern('IF', r'if'))
patterns.append(Pattern('ELSE', r'else'))
patterns.append(Pattern('WHILE', r'while'))
patterns.append(Pattern('BREAK', r'break'))
patterns.append(Pattern('ID', r'[a-zA-Z][a-zA-Z0-9_]*'))
patterns.append(Pattern(';', r'\;'))
patterns.append(Pattern('{', r'\{'))
patterns.append(Pattern('}', r'\}'))
patterns.append(Pattern('[', r'\['))
patterns.append(Pattern(']', r'\]'))
patterns.append(Pattern('(', r'\('))
patterns.append(Pattern(')', r'\)'))
patterns.append(Pattern('+', r'\+'))
patterns.append(Pattern('-', r'\-'))
patterns.append(Pattern('*', r'\*'))
patterns.append(Pattern('/', r'\/'))
patterns.append(Pattern('<=', r'\<\='))
patterns.append(Pattern('>=', r'\>\='))
patterns.append(Pattern('==', r'\=\='))
patterns.append(Pattern('!=', r'\!\='))
patterns.append(Pattern('&&', r'\&\&'))
patterns.append(Pattern('||', r'\|\|'))
patterns.append(Pattern('=', r'\='))
patterns.append(Pattern('<', r'\<'))
patterns.append(Pattern('>', r'\>'))
patterns.append(Pattern('%', r'\%'))
input = '''
{
i = 0;
while i<10 {
a = 2*3;
if i % 1 == 0 {
a = a + 1;
} else {
a = a + 2;
}
i = i+1;
}
}
'''
p = Parser(Scanner(input, patterns))
dot = p.parse().toDot()
print(dot)
| msiddalingaiah/TDOP | py/TDOP.py | TDOP.py | py | 6,660 | python | en | code | 0 | github-code | 36 |
29861997533 | from dataclasses import dataclass
import aiohttp
import time
from datetime import datetime
import orjson
import aiofiles
import asyncio
import logging
_LOGGER = logging.getLogger(__name__)
from .const import JSON_CACHE_FILE
@dataclass
class InverterData:
serial_number: str
firmware_version: str|None
model: str
temperature: float
current_power: float
daily_power_yield: float
alerts: bool|None
@classmethod
def from_dict(cls,data:dict):
return cls(
serial_number=data["serial_number"],
firmware_version=data["firmware_version"],
model=data["model"],
temperature=data["temperature"],
current_power=data["current_power"],
daily_power_yield=data["daily_power_yield"],
alerts=data["alerts"],
)
@dataclass
class WifiDataLoggerData:
online_status:bool #derived from connectivity to wifi data logger stick over network
last_seen:datetime
serial_number:str
firmware_version:str|None
wireless_ap_mode:bool|None
wireless_sta_mode:bool|None
router_ssid:str|None
signal_quality:int
ip_address:str|None
mac_address:str
remote_server_a:bool|None
remote_server_b:bool|None
@classmethod
def from_dict(cls,data:dict):
return cls(
online_status=data["online_status"],
last_seen=data["last_seen"],
serial_number=data["serial_number"],
firmware_version=data["firmware_version"],
wireless_ap_mode=data["wireless_ap_mode"],
wireless_sta_mode=data["wireless_sta_mode"],
router_ssid=data["router_ssid"],
signal_quality=data["signal_quality"],
ip_address=data["ip_address"],
mac_address=data["mac_address"],
remote_server_a=data["remote_server_a"],
remote_server_b=data["remote_server_b"],
)
@dataclass
class SystemData:
inverter:InverterData
wifi_logger:WifiDataLoggerData
@classmethod
def from_dict(cls,data:dict):
return cls(
inverter=InverterData.from_dict(data["inverter"]),
wifi_logger=WifiDataLoggerData.from_dict(data["wifi_logger"])
)
class SolisWifiApi():
def __init__(self,hostname:str,username:str,password:str) -> None:
_LOGGER.info((hostname,username,password))
self._session = aiohttp.ClientSession(base_url=hostname,auth=aiohttp.BasicAuth(username,password))
async def getSystemData(self) -> SystemData:
inverter_data = await self.getInverterData()
wifi_logger_data = await self.getWifiDataLoggerData()
return SystemData(inverter_data,wifi_logger_data)
async def getInverterData(self) -> InverterData:
inverterDataRaw= await self._loadDataAndParseResponse("inverter","Inverter",8)
return InverterData(
inverterDataRaw[0],
inverterDataRaw[1],
inverterDataRaw[2],
float(inverterDataRaw[3]),
float(inverterDataRaw[4]),
float(inverterDataRaw[5]),
#Data in element 6 is 'Total yield' which only show value 'd'??
True if inverterDataRaw[7] == "YES" else False
)
async def getWifiDataLoggerData(self) -> WifiDataLoggerData:
monitorDataRaw= await self._loadDataAndParseResponse("moniter","Wifi Data Logger",13)
return WifiDataLoggerData(
True,
datetime.now(),
monitorDataRaw[0],
monitorDataRaw[1],
True if monitorDataRaw[2] == "Enable" else False,
#Data in elements 3-5 are Null, do not know what they are
True if monitorDataRaw[6] == "Enable" else False,
monitorDataRaw[7],
int(monitorDataRaw[8]),
monitorDataRaw[9],
monitorDataRaw[10],
True if monitorDataRaw[11] == "Connected" else False,
True if monitorDataRaw[12] == "Connected" else False
)
async def getOffLineData(self,last_known_system_data:SystemData) -> SystemData:
if last_known_system_data == None:
last_known_system_data= await self._getCachedData()
inverter_data = InverterData(
last_known_system_data.inverter.serial_number if last_known_system_data else "",
None,
last_known_system_data.inverter.model if last_known_system_data else "",
0,
0,
0,
None
)
wifi_logger_data=WifiDataLoggerData(
False,
last_known_system_data.wifi_logger.last_seen if last_known_system_data else datetime.min,
last_known_system_data.wifi_logger.serial_number if last_known_system_data else "",
None,
None,
None,
None,
0,
last_known_system_data.wifi_logger.ip_address if last_known_system_data else "",
last_known_system_data.wifi_logger.mac_address if last_known_system_data else "",
None,
None
)
return SystemData(inverter_data,wifi_logger_data)
async def _getCachedData(self) -> SystemData | None:
try:
async with aiofiles.open(JSON_CACHE_FILE, mode='rb') as f:
content = await f.read()
system_data_dict=orjson.loads(content)
system_data=SystemData.from_dict(system_data_dict)
return system_data
except OSError:
#await asyncio.sleep(0)
return None
def _generateTimeToken(self) -> str:
return str(int(time.time()))
async def _loadDataAndParseResponse(self,dataSource:str,dataSourceName:str,dataExpectedLength:int)-> list[str]:
response= await self._session.get("/{dataSource}.cgi?t={time}".format(dataSource=dataSource,time=self._generateTimeToken()))
response.raise_for_status()
responseText = await response.text()
dataRaw=self._parseResponseText(responseText)
if len(dataRaw) != dataExpectedLength:
raise SolisWifiApiParseException(f"Could not parse {dataSourceName} data, please check connection")
return dataRaw
def _parseResponseText(self,responseText:str)-> list[str]:
#Removing NUL characters from response
cleanedup=responseText.replace("\x00","").removesuffix(";\r\n")
return cleanedup.split(";")
async def close(self):
await self._session.close()
class SolisWifiApiManager:
def __init__(self,hostname:str,username:str,password:str) -> None:
self._hostname=hostname
self._username=username
self._password=password
async def __aenter__(self) -> SolisWifiApi:
self.soliswifiapi=SolisWifiApi(self._hostname,self._username,self._password)
return self.soliswifiapi
async def __aexit__(self, exc_type, exc, tb):
await self.soliswifiapi.close()
class SolisWifiApiParseException(Exception):
"""When the response payload cannot be parsed"""
| tmulkern/SolisWifiDataLogger | custom_components/solis_wifi_data_logger/solis_wifi_api.py | solis_wifi_api.py | py | 7,146 | python | en | code | 0 | github-code | 36 |
4419557828 | import sys
import pygame
import random
pygame.init()
pygame.display.set_caption("who is the caption")
window = pygame.display.set_mode((500,500))
font = pygame.font.Font("思源黑体.otf",35)
wheel_pics = []
for i in range(25):
filename = './pics/' + str(i) + '.png'
pic = pygame.image.load(filename)
wheel_pics.append(pic)
start_pic = pygame.image.load('start.png')
window.fill((255,255,255))
window.blit(start_pic,(0,0))
pygame.display.flip()
with open('names.txt',encoding = 'utf8') as f:
name_list = []
for i in range(6):
name_list.append(f.readline().strip())
choice = random.choice(name_list)
print(choice)
rolling = False
pic_index = 0
clock = pygame.time.Clock()
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE:
rolling = True
if rolling:
window.fill((255,255,255))
window.blit(wheel_pics[pic_index % 25],(0,0))
pic_index += 1
if pic_index >= 25:
rolling = False
pic_index = 0
choice = random.choice(name_list)
text = font.render(choice, True, (255,255,255))
window.blit(text,(215,220))
pygame.display.flip()
clock.tick(30) | Julia1976/python-project | Game Handling/Chosing the name/新的作品-55-1.py | 新的作品-55-1.py | py | 1,389 | python | en | code | 0 | github-code | 36 |
10121862874 | class Standalone(object):
"""Common class for all standalone applications"""
def __init__(self, args, user_options):
# stores the arguments
self.args = args
self.user_options = user_options
if len(args) == 1:
# shows the help message if no arguments provided
self.help()
else:
# The user values should be used to update the
# user_options
options = self.user_options.parse_args(args[1:])
# Should update the CNOConfig file with the provided options
for key in self.user_options.config.keys():
for option in self.user_options.config[key]._get_names():
value = getattr(options, option)
setattr(getattr( getattr(self.user_options.config, key), option ), 'value', value)
self.options = options
def help(self):
self.user_options.parse_args(["prog", "--help"])
def report(self):
"""Create report and shows report (or not)"""
if self.options.onweb is True:
self.trainer.report(show=True)
elif self.options.report is True:
self.trainer.report(show=False)
else:
from easydev.console import red
print(red("No report requested; nothing will be saved or shown"))
print("use --on-web or --report options")
| cellnopt/cellnopt | cno/core/standalone.py | standalone.py | py | 1,405 | python | en | code | 10 | github-code | 36 |
16442940700 | def main():
num_emps = int(input('How many employees record to create? '))
with open('employee.txt','w') as emp_file:
emp_file.write('Name\t')
emp_file.write('ID number\t')
emp_file.write('Department\t')
emp_file.write('\n')
for emp in range(1, num_emps + 1):
print(f'Enter information for employee {emp}:')
name = input('Name: ')
id_num = input('ID number: ')
dept = input('Department: ')
emp_file.write(name + '\t')
emp_file.write(id_num + '\t')
emp_file.write(dept + '\t')
emp_file.write('\n')
print('................................................................')
print(f'All employee information are saved')
def read_emp_info():
with open('employee.txt', 'r') as emp_file:
for ind, line in enumberate(emp_file.readline()):
if ind == 0:
continue
print('Information of employee {ind + 1}:')
name = line.split("\t")[0]
id_num = line.split("\t")[1]
dept = line.split("\t")[2]
print(f'Name: {name}')
print(f'ID number: {id_num}')
print(f'Department: {dept}')
print('....................................')
if __name__ == '__main__':
read_emp_info()
| thanhtugn/python_core_thanhtugn | lesson_08/solution_assignment_05.py | solution_assignment_05.py | py | 1,359 | python | en | code | 1 | github-code | 36 |
6212641952 | ##########################################################
# File: Config.py
# Author: Jose Perez <josegperez@mail.com>
# Version: Model v5
#
# Modify this file to your heart's content
# Here we have constants used by my code
# I put them all in one place so they can be easy to modify
#
##########################################################
# How much CD28 is needed to become active
CD28_THRESHOLD = 2
# How much time a cell will wait before resetting (mcs)
WAIT_TIME = 10
# How old the cells are when born (volume)
INITIAL_AGE = 15.0
# How old the cells will get at every step
STEP_AGE = 1.0
# How old the cells need to be to choose between apoptosis, division, and quiescence
DECISION_AGE = 50.0
# Probabilities of what the cell will do stochastically once it reaches a certain age/size
# Must add up to 1.0
PROB_APOPTOSIS = 0.1
PROB_DIVISION = 0.4
PROB_QUIESCENCE = 0.5
# Probabilities of which ligand will be lost when losing a ligand
PROB_LOST_CD80 = 0.5
PROB_LOST_CD86 = 0.5
# Probabilities of receptors binding to CD80
# Must add up to 1.0
PROB_CTLA4_BIND_CD80 = .9524
PROB_CD28_BIND_CD80 = .0476
WEIGHTS_CD80 = [PROB_CTLA4_BIND_CD80, PROB_CD28_BIND_CD80]
# Probabilities of receptors binding to CD86
# Must add up to 1.0
PROB_CTLA4_BIND_CD86 = .8837
PROB_CD28_BIND_CD86 = .1163
WEIGHTS_CD86 = [PROB_CTLA4_BIND_CD86, PROB_CD28_BIND_CD86] | DeveloperJose/Python-Multiscale-Cell-Simulation | Simulation/Config.py | Config.py | py | 1,359 | python | en | code | 1 | github-code | 36 |
13735356077 |
import os
import sys
import numpy as np
import PIL
import torch
import torch.utils.data
import torchvision
import matplotlib.pyplot as plt
import pickle
class AkMinitImagenetDataset(torch.utils.data.Dataset):
def __init__(self, rootdir_dataset, str_trainortestorinducing):
#grab args ===
self.rootdir_dataset = rootdir_dataset
self.str_trainortestorinducing = str_trainortestorinducing
#make internals ==
assert(isinstance(str_trainortestorinducing, str))
assert(self.str_trainortestorinducing in [
"train", "test", "inducing"
])
fname_train = "MiniImagenet/miniImageNet_category_split_train_phase_train.pickle"
fname_test = "MiniImagenet/miniImageNet_category_split_train_phase_test.pickle"
#"MiniImagenet/miniImageNet_category_split_test.pickle"
fname_pkl = fname_test if(str_trainortestorinducing == "test") else fname_train
with open(os.path.join(self.rootdir_dataset, fname_pkl), 'rb') as f:
content_pkl = pickle.load(f, encoding='latin1')
self.X = content_pkl['data'] #[N x 84 x 84 x 3]
self.Y = content_pkl['labels']
#make the transforms ===
tfm_colornormalization = torchvision.transforms.Normalize(
(0.4914, 0.4822, 0.4465),
(0.2023, 0.1994, 0.2010)
)
if(self.str_trainortestorinducing == "train"):
self.tfm = torchvision.transforms.Compose([
torchvision.transforms.ToPILImage(),
torchvision.transforms.ToTensor(),
torchvision.transforms.RandomHorizontalFlip(p=0.5),
tfm_colornormalization
])
elif(self.str_trainortestorinducing == "inducing"):
self.tfm = torchvision.transforms.Compose([
torchvision.transforms.ToPILImage(),
torchvision.transforms.ToTensor(),
tfm_colornormalization
])
elif(self.str_trainortestorinducing == "test"):
self.tfm = torchvision.transforms.Compose([
torchvision.transforms.ToPILImage(),
torchvision.transforms.ToTensor(),
tfm_colornormalization
])
else:
raise Exception("Unknown str_trainortestorinducing: {}".format(
self.str_trainortestorinducing
))
def __len__(self):
return self.X.shape[0]
def __getitem__(self, n):
xn = self.X[n,:,:,:] #[84x84x3]
yn = self.Y[n]
return self.tfm(xn), yn, n
| blindreviewgtdxjnsd/gpex_blindreview | PaperResults/MiniImagenet/loadminiimagenet.py | loadminiimagenet.py | py | 2,624 | python | en | code | 0 | github-code | 36 |
32921124712 | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import datetime
import re
import redis
import scrapy
from scrapy.loader import ItemLoader
from scrapy.loader.processors import TakeFirst, MapCompose, Join, Identity
from w3lib.html import remove_tags
from models.es_type import ArticleType
from elasticsearch_dsl.connections import connections
es = connections.create_connection(ArticleType._doc_type.using)
redis_cli = redis.StrictRedis()
def date_convert(value):
try:
create_date = datetime.datetime.strptime(value.replace(u'·', '').strip(), "%Y/%m/%d").date()
except Exception as e:
create_date = datetime.datetime.now().date()
return create_date
def return_value(value):
return value
def get_nums(value):
if value == '' or value == []:
value = 0
match_re = re.match(".*?(\d+).*?", value)
if match_re:
nums = int(match_re.group(1))
else:
nums = 0
return nums
def remove_comment_tags(value):
# 去掉tag中提取的"评论"
if u"评论" in value:
return ""
else:
return value
def gen_suggests(index, info_tuple):
#根据字符串生成搜索建议数组
used_words = set()
suggests = []
for text, weight in info_tuple:
if text:
#调用es的analyze接口分析字符串
words = es.indices.analyze(index=index, analyzer="ik_max_word", params={'filter':["lowercase"]}, body=text)
anylyzed_words = set([r["token"] for r in words["tokens"] if len(r["token"])>1])
new_words = anylyzed_words - used_words
else:
new_words = set()
if new_words:
suggests.append({"input":list(new_words), "weight":weight})
return suggests
class ArticlespiderItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
pass
class ArticleItemLoader(ItemLoader):
# 自定义itemloader
default_output_processor = TakeFirst()
class Remove_tag(Identity):
def __call__(self, values):
return [tag for tag in values if u"评论" not in tag]
class JobBoleArticleItem(scrapy.Item):
title = scrapy.Field()
create_date = scrapy.Field(
input_processor = MapCompose(date_convert),
output_processor = TakeFirst()
)
url = scrapy.Field()
url_object_id = scrapy.Field()
front_image_url = scrapy.Field(
output_processor = MapCompose(return_value)
)
front_image_path = scrapy.Field()
praise_nums = scrapy.Field(
input_processor = MapCompose(get_nums)
)
comment_nums = scrapy.Field(
input_processor = MapCompose(get_nums)
)
fav_nums = scrapy.Field(
input_processor = MapCompose(get_nums)
)
content = scrapy.Field()
tags = scrapy.Field(
# input_processor = MapCompose(remove_comment_tags),
input_processor = Remove_tag(),
output_processor = Join(",")
)
def get_insert_sql(self):
insert_sql = """
insert into jobbole_article(title, url, url_object_id, create_date, fav_nums, front_image_url, front_image_path,
praise_nums, comment_nums, tags, content)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
ON DUPLICATE KEY UPDATE fav_nums=VALUES(fav_nums), praise_nums=VALUES(praise_nums), comment_nums=VALUES(comment_nums)
"""
try:
praise_nums = self["praise_nums"]
except:
praise_nums = 0
self["front_image_path"] = ''
params = (self["title"], self["url"], self["url_object_id"], self["create_date"], self["fav_nums"],
self["front_image_url"], self["front_image_path"], praise_nums, self["comment_nums"], self["tags"],
self["content"])
return insert_sql, params
def save_to_es(self):
article = ArticleType()
article.title = self['title']
article.create_date = self["create_date"]
article.content = remove_tags(self["content"][0])
article.front_image_url = self["front_image_url"]
if "front_image_path" in self:
article.front_image_path = self["front_image_path"]
article.praise_nums = self["praise_nums"]
article.fav_nums = self["fav_nums"]
article.comment_nums = self["comment_nums"]
article.url = self["url"]
article.tags = self["tags"]
article.meta.id = self["url_object_id"]
article.suggest = gen_suggests(ArticleType._doc_type.index, ((article.title,10),(article.tags, 7)))
article.save()
redis_cli.incr("jobbole_count")
return | Umi101108/Spider | www.jobbole.com/ArticleSpider/ArticleSpider/items.py | items.py | py | 4,739 | python | en | code | 2 | github-code | 36 |
37661910708 | import numpy as np
from math import pi
import time
from scipy.sparse.linalg import eigs, eigsh, spsolve, lobpcg
class Solution:
def __init__(self, stiffness_matrix, mass_matrix, **kwargs):
self.stiffness_matrix = stiffness_matrix.tocsc()
self.mass_matrix = mass_matrix.tocsc()
self.Kr = kwargs.get("Kr", None)
self.Mr = kwargs.get("Mr", None)
self.presc_dofs_info = kwargs.get("presc_dofs_info", None)
self.free_dofs = kwargs.get("free_dofs", None)
self.frequencies = kwargs.get("frequencies", None)
self.minor_freq = kwargs.get("minor_freq", None)
self.major_freq = kwargs.get("major_freq", None)
self.df = kwargs.get("df", None)
self.number_points = kwargs.get("number_points", None)
self.alpha_v = kwargs.get("alpha_v", None)
self.beta_v = kwargs.get("beta_v", None)
self.alpha_h = kwargs.get("alpha_h", None)
self.beta_h = kwargs.get("beta_h", None)
def modal_analysis(self, number_modes = 10, which = 'LM', sigma = 0.01, timing = False ):
""" Perform a modal analysis and returns natural frequencies and modal shapes normalized
with respect to generalized mass coordinates.
"""
start = time.time()
eigen_values, eigen_vectors = eigs( self.stiffness_matrix,
k = number_modes,
M = self.mass_matrix,
which = which,
sigma = sigma)
end = time.time()
if timing:
print('Time to perform modal analysis :' + str(round((end - start),6)) + '[s]')
natural_frequencies = np.sqrt( np.absolute( np.real(eigen_values) ) ) /(2 * pi)
ind_ord = np.argsort( natural_frequencies )
natural_frequencies = natural_frequencies[ ind_ord ]
modal_shape = np.real( eigen_vectors[ :, ind_ord ] )
return natural_frequencies, modal_shape
def freq_vector(self):
if np.array(self.frequencies).all() == None or self.frequencies==[] :
if self.minor_freq == None:
self.minor_freq = float(input('Enter a value to a minor frequency of analysis: '))
if self.major_freq == None:
self.major_freq = float(input('Enter a value to a major frequency of analysis: '))
if self.df == None and self.number_points == None:
self.df = float(input('Enter a value to frequency resolution: '))
if self.df == None and self.number_points != None:
self.df = (self.major_freq - self.minor_freq)/(self.number_points - 1)
self.frequencies = np.arange(self.minor_freq, self.major_freq + self.df, self.df)
return self.frequencies
def direct_method(self, F, timing = False):
"""
Perform an harmonic analysis through direct method and returns the response of
all nodes due the external or internal equivalent load. It has been implemented two
different damping models: Viscous Proportional and Hysteretic Proportional
Entries for Viscous Proportional Model Damping: (alpha_v, beta_v)
Entries for Hyteretic Proportional Model Damping: (alpha_h, beta_h)
"""
if self.alpha_v == None:
self.alpha_v = 0
if self.beta_v == None:
self.beta_v = 0
if self.alpha_h == None:
self.alpha_h = 0
if self.beta_h == None:
self.beta_h = 0
if self.Kr == None or self.Mr == None:
Kr_v, Mr_v = 0, 0
else:
Kr = (self.Kr.toarray())[ :, self.free_dofs ]
Mr = (self.Mr.toarray())[ :, self.free_dofs ]
Kr_temp = np.zeros(( Kr.shape[1], Kr.shape[0] ))
Mr_temp = np.zeros(( Mr.shape[1], Mr.shape[0] ))
for ind, value in enumerate(self.presc_dofs_info[:,2]):
Kr_temp[ :, ind ] = value*Kr[ ind, : ]
Mr_temp[ :, ind ] = value*Mr[ ind, : ]
Kr_v = np.sum( Kr_temp, axis=1 )
Mr_v = np.sum( Mr_temp, axis=1 )
M = self.mass_matrix
K = self.stiffness_matrix
frequencies = self.freq_vector()
x = np.zeros([ self.stiffness_matrix.shape[0], len(frequencies) ], dtype=complex )
start = time.time()
for i, freq in enumerate(frequencies):
F_add = (1 + 1j*freq*self.beta_v + 1j*self.beta_h)*Kr_v - ( ((2 * pi * freq)**2) - 1j*freq*self.alpha_v - 1j*self.alpha_h)*Mr_v
K_damp = ( 1 + 1j*freq*self.beta_v + 1j*self.beta_h )*K
M_damp = ( -((2 * pi * freq)**2) + 1j*freq*self.alpha_v + 1j*self.alpha_h)*M
A = K_damp + M_damp
x[:,i] = spsolve(A, F - F_add)
if timing:
end = time.time()
print('Time to solve harmonic analisys problem through direct method:' + str(round((end - start),6)) + '[s]')
return x, frequencies
def mode_superposition(self, F, number_modes = 10, which = 'LM', sigma = 0.01, timing = False, **kwargs):
"""
Perform an harmonic analysis through superposition method and returns the response of
all nodes due the external or internal equivalent load. It has been implemented two
different damping models: Viscous Proportional and Hysteretic Proportional
Entries for Viscous Proportional Model Damping: (alpha_v, beta_v)
Entries for Hyteretic Proportional Model Damping: (alpha_h, beta_h)
"""
if self.alpha_v == None:
self.alpha_v = 0
elif self.beta_v == None:
self.beta_v = 0
if self.alpha_h == None:
self.alpha_h = 0
elif self.beta_h == None:
self.beta_h = 0
if self.Kr == None or self.Mr == None:
Kr_v, Mr_v = 0, 0
else:
Kr = (self.Kr.toarray())[ :, self.free_dofs ]
Mr = (self.Mr.toarray())[ :, self.free_dofs ]
Kr_temp = np.zeros(( Kr.shape[1], Kr.shape[0] ))
Mr_temp = np.zeros(( Mr.shape[1], Mr.shape[0] ))
for ind, value in enumerate(self.presc_dofs_info[:,2]):
Kr_temp[ :, ind ] = value*Kr[ ind, : ]
Mr_temp[ :, ind ] = value*Mr[ ind, : ]
Kr_v = np.sum( Kr_temp, axis=1 )
Mr_v = np.sum( Mr_temp, axis=1 )
frequencies = self.freq_vector()
x = np.zeros([ self.stiffness_matrix.shape[0], len(frequencies) ], dtype=complex)
modal_shape = kwargs.get("modal_shape", None)
natural_frequencies = kwargs.get("natural_frequencies", None)
start = time.time()
if np.array(modal_shape).all() == None or modal_shape.shape[1] != number_modes:
natural_frequencies, modal_shape = self.modal_analysis( number_modes = number_modes, which = 'LM', sigma = sigma )
#F_aux = modal_shape.T @ F
for i, freq in enumerate(frequencies):
Kg_damp = (1 + 1j*self.beta_v*freq + 1j*self.beta_h)*((2 * pi * natural_frequencies)**2)
Mg_damp = (1j*freq*self.alpha_v + 1j*self.alpha_h) - ((2 * pi * freq)**2)
data = np.divide(1, (Kg_damp + Mg_damp))
diag = np.diag(data)
F_add = (1 + 1j*freq*self.beta_v + 1j*self.beta_h)*Kr_v - ( ((2 * pi * freq)**2) - 1j*freq*self.alpha_v - 1j*self.alpha_h)*Mr_v
F_aux = modal_shape.T @ (F - F_add)
x[:,i] = modal_shape @ (diag @ F_aux)
end = time.time()
if timing:
print('Time to solve harmonic analisys problem through mode superposition method:' + str(round((end - start),6)) + '[s]')
return x, frequencies, natural_frequencies, modal_shape
| atbrandao/OpenPulse_f | pulse/engine/solution.py | solution.py | py | 8,010 | python | en | code | null | github-code | 36 |
9663828561 | import os
import pickle
from tqdm import tqdm
import multiprocessing as mp
import itertools
import numpy as np
import functools
from termcolor import colored
import time
from numba import jit
SAVE_DIR = 'iterator_saves'
EXPENDABLE_MEMORY = 5 # in Gig
class Iterator:
def __init__(self, width, height, _print=False, save_at=500_000):
self.width = width
self.height = height
self._print = _print
self.variants = []
self.iteration_counter = 0
self.leaf_counter = 0
self.queue = [] # nodes that are next to be processed
self.nodes = [] # nodes being currently processed
self.save_at = save_at
def iterate(self, depth_first=True, multi_processing=True, parallel=10000, continued=False):
if not continued:
start = Node(grid=create_base_grid(np.zeros((self.width, self.height), dtype=np.byte), positive_cells=[(0, 0)], negative_cells=[]), num_positives=1)
self.queue = [start]
last_checkpoint = len(self.variants)
with tqdm(total=0) as pbar:
while len(self.queue) > 0:
if multi_processing:
pool = mp.Pool(mp.cpu_count())
queue_len = len(self.queue)
if depth_first:
self.queue, self.nodes = (self.queue[:-parallel], self.queue[-parallel:])
else:
self.queue, self.nodes = (self.queue[parallel:], self.queue[:parallel])
pbar.set_description(pretty_description(queue_len, len(self.nodes), self.leaf_counter, len(self.variants)))
full_iteration = pool.map(next_wrapper, self.nodes)
pool.close()
_counter = 0
for _next in full_iteration:
add_to_queue = self.unpack_next(_next)
self.queue += add_to_queue
self.iteration_counter += len(add_to_queue)
_counter += len(add_to_queue)
pbar.update(_counter)
else:
if self.iteration_counter % 1000 == 0:
pbar.set_description(pretty_description(len(self.queue), 1, self.leaf_counter, len(self.variants)))
if depth_first:
next_node = self.queue.pop(len(self.queue) - 1)
else:
next_node = self.queue.pop(0)
self.nodes = [next_node]
_next = next_node.get_next()
add_to_queue = self.unpack_next(_next)
self.queue += add_to_queue
pbar.update(len(add_to_queue))
self.iteration_counter += len(add_to_queue)
pbar.refresh()
if self.save_at is not None:
if len(self.variants) > last_checkpoint + self.save_at:
self.save_wrapper('checkpoint')
last_checkpoint += self.save_at
if self._print:
print("Number of processed nodes: {}".format(self.iteration_counter))
print("Number of checked leafs: {}".format(self.leaf_counter))
print("Number found variants: {}".format(len(self.variants)))
self.nodes = []
self.save_wrapper('complete')
return self.variants
def unpack_next(self, _next):
leaf, content = _next
add_to_queue = []
if leaf:
if content is not None:
self.variants.append(content)
self.leaf_counter += 1
else:
add_to_queue = content
return add_to_queue
def next_wrapper(self, node):
_next = node.get_next()
return self.unpack_next(_next)
def save_wrapper(self, keyword=""):
save_path = os.path.join(os.getcwd(), SAVE_DIR, '{}{}'.format(keyword, time.strftime("%Y%m%d-%H%M%S")))
self.save(save_path)
save_message = '{} save: {} variants found. Iteration Progress saved at {}'.format(keyword, len(self.variants), save_path)
print(colored(save_message, 'green'))
return save_path
def save(self, path):
os.makedirs(path, exist_ok=True)
# save variants via numpy
variants_path = os.path.join(path, 'variants.npy')
np.save(variants_path, self.variants)
# save state of iterator via pickle
state_path = os.path.join(path, 'state.pkl')
iterator_state = {'queue': self.queue,
'nodes': self.nodes,
'width': self.width,
'height': self.height,
'iteration_counter': self.iteration_counter,
'leaf_counter': self.leaf_counter}
with open(state_path, 'wb') as handle:
pickle.dump(iterator_state, handle, protocol=pickle.HIGHEST_PROTOCOL)
def load(self, path):
# load variants via numpy
variants_path = os.path.join(path, 'variants.npy')
self.variants = list(np.load(variants_path))
# load state of iterator via pickle
state_path = os.path.join(path, 'state.pkl')
with open(state_path, 'rb') as handle:
state = pickle.load(handle)
self.queue = state['queue'] + state['nodes']
self.width = state['width']
self.height = state['height']
self.iteration_counter = state['iteration_counter']
self.leaf_counter = state['leaf_counter']
def next_wrapper(node):
return node.get_next()
def create_base_grid(base_grid, positive_cells, negative_cells):
grid = np.copy(base_grid)
for (x, y) in positive_cells:
grid[x][y] = 1
for (x, y) in negative_cells:
grid[x][y] = -1
return grid
def count_positive_cells(grid):
return np.sum(np.array(grid))
@jit(nopython=True)
def count_adjacent(grid, coords):
x, y = coords
counter = 0
adjacent_edge = [(1, 0), (0, 1), (-1, 0), (0, -1)]
adjacent_corner = [[(-1, -1), (-1, 1)], [(-1, -1), (1, -1)], [(1, 1), (1, -1)], [(-1, 1), (1, 1)]]
for index, (_x, _y) in enumerate(adjacent_edge):
if get_safe(grid, (x + _x, y + _y)) > 0:
for (__x, __y) in adjacent_corner[index]:
if get_safe(grid, (x + __x, y + __y)) > 0:
return 2
counter += 1
return counter
@jit(nopython=True)
def get_safe(grid, coords):
x, y = coords
if x >= len(grid) or y >= len(grid[0]) or x < 0 or y < 0:
return 0
return grid[x][y]
@jit(nopython=True)
def make_next_grid(base_grid, combination, possibilites):
"""
Makes new grid from given base grid, as well as a list of possible next cells and a combination of those to select.
Note: the new cells are placed sequentially. Should two new cells be adjacent, the function will return with success=false
:param base_grid: the grid that the new grid is based on
:param combination: a tuple (b1, ..., bn) of booleans indicating which possible cells should be selected
:param possibilites a list [(x1, y1), ..., (xn, yn)] of coords, representing the selectable cells
:return: new grid, counter of new positive cells, success bool
"""
new_positive_cells = 0
next_grid = np.copy(base_grid)
for index, (x, y) in enumerate(possibilites):
if combination[index]:
if count_adjacent(next_grid, (x, y)) != 1:
return None, 0, False
next_grid[x][y] = 1
new_positive_cells += 1
else:
next_grid[x][y] = -1
return next_grid, new_positive_cells, True
@functools.lru_cache()
def get_combinations(length):
combinations = [list(i) for i in itertools.product([0, 1], repeat=length)]
return np.array(combinations)
@functools.lru_cache()
def get_n(width, height):
return int(np.ceil(width / 2) * np.ceil(height / 2)) * 2 - 1
class Node:
def __init__(self, grid, num_positives):
self.grid = grid
self.num_positives = num_positives
def get_next(self):
# leaf node
if self.num_positives == get_n(len(self.grid), len(self.grid[0])):
self.export_grid()
return True, self.grid
possibilites = []
indices = np.argwhere(self.grid == 0)
for (x, y) in indices:
if count_adjacent(self.grid, (x, y)) == 1:
possibilites.append((x, y))
# also leaf node, but invald
if len(possibilites) == 0:
return True, None
_next = []
combinations = get_combinations(len(possibilites))[1:]
for combination in combinations:
next_grid, counter, success = make_next_grid(self.grid, combination, np.array(possibilites))
if not success:
continue
# if self.num_positives + counter > get_n(len(self.grid), len(self.grid[0])):
# print("To many cells!")
# continue
_next.append(Node(next_grid, self.num_positives + counter))
return False, _next
def export_grid(self):
self.grid = np.floor_divide(self.grid + np.ones_like(self.grid), 2)
#####################
#### BASIC UTIL #####
#####################
def print_2d(grid, print_zeros=True, highlight=None):
height = len(grid[0])
width = len(grid)
for y in range(height - 1, -1, -1):
row = ""
for x in range(width):
if highlight is not None:
_x, _y = highlight
if x == _x and y == _y:
row += "{} ".format(colored(grid[x][y], 'yellow'))
continue
if not print_zeros:
row += "{} ".format(" " if grid[x][y] == 0 else grid[x][y])
else:
row += "{} ".format(grid[x][y])
print(row)
print()
def pretty_description(num_queue, num_nodes, num_leafs, num_variants):
string1 = "processing {}/{} items in queue".format(num_nodes, num_queue)
string2 = "{} leafs checked".format(num_leafs)
string3 = "{} variants found".format(num_variants)
description_string = "{}, {}, {}".format(*[colored(string1, 'cyan'), colored(string2, 'yellow'), colored(string3, 'green')])
return description_string
def load_print_variants(path, limit=None, print_zeros=False):
if not os.path.exists(path):
print("Path {} does not exist.".format(colored(path, 'yellow')))
return
variants = np.load(path)
if limit is None:
limit = len(variants)
print("Variants loaded: {}. Printing the first {}:".format(len(variants), limit))
for index in range(limit):
print_2d(variants[index], print_zeros=print_zeros)
def continue_iteration(path, multi_processing=True):
if not os.path.exists(path):
print("Path {} does not exist.".format(colored(path, 'yellow')))
return
iterator = Iterator(0, 0, _print=True)
try:
iterator.load(path)
if len(iterator.queue) > 0:
print(colored('Continue Iteration\n', 'blue'))
time.sleep(0.01)
iterator.iterate(continued=True, depth_first=True, multi_processing=multi_processing, parallel=15000 * EXPENDABLE_MEMORY)
else:
print(colored('Iteration saved in {} already completed.'.format(path), 'blue'))
except KeyboardInterrupt:
print(colored('Program Interrupted. Saving progress..', 'yellow'))
iterator.save_wrapper('interrupted')
def iterate(w=7, h=7, multi_processing=True):
if max(w, h) < 5:
multi_processing = False
iterator = Iterator(w, h, _print=True)
try:
iterator.iterate(multi_processing=multi_processing, depth_first=True, parallel=15000 * EXPENDABLE_MEMORY)
except KeyboardInterrupt:
print(colored('Program Interrupted. Saving progress..', 'yellow'))
iterator.save_wrapper('interrupted')
if __name__ == '__main__':
print(colored("Available cores: {}\n".format(mp.cpu_count()), 'green'))
time.sleep(0.01)
# Iterate from scratch
iterate(w=5, h=5)
# Continue Iteration from save
continue_iteration(os.path.join(os.getcwd(), SAVE_DIR, 'save_name'))
# Print found variants from save
load_print_variants(os.path.join(os.getcwd(), SAVE_DIR, 'save_name', 'variants.npy'), limit=2)
| m1a9l9t7e/RoadGenerator | ip/iterative_construction.py | iterative_construction.py | py | 12,413 | python | en | code | 2 | github-code | 36 |
6514242070 | """
Author: Inigo Hohmeyer
Date: May 9th, 2022
Programming Assignment 4: Naive Bayes Text Classification
"""
import copy
import math
import re
import sys
N = 5
# this function updates the current
# dictionary with the counts from the current dictionary
def update(current_dict, overall):
for i in current_dict:
if i in overall:
overall[i] += 1
else:
overall[i] = 1
#
stop_words = {}
with open("stopwords.txt") as new_file:
lines = new_file.readlines()
for i in lines:
for j in i.split():
stop_words[j] = 1
# Learning Phase
with open("tinyCorpus.txt") as file:
switch = 0
count = 0
# switch = 0, then Person
# switch = 1, then Category
# switch = 2, then description
# switch = 3, we already read the description we are just waiting until
# we reach a blank space
# If blank then reset to 1 and if blank do not add to the category.
# If blank and count is equal to N then we stop
corpus = file.readlines()
cat_count = {}
current_cat = ""
final_index = 0
for index, i in enumerate(corpus):
# This means we have reached the end of the corpus
if count == N and i == "\n":
break
# This means that we are in still in the in-between space
elif switch == 0 and i == "\n":
continue
# We are at the person's name
elif switch == 0:
# Updates the number of biographies we've gone over
count += 1
switch += 1
# This means that we are at the category
elif switch == 1:
# If the category has already been created
if i.split()[0] in cat_count:
cat_count[i.split()[0]][0] += 1
switch += 1
current_cat = i.split()[0]
else:
cat_count[i.split()[0]] = [1, {}]
switch += 1
current_cat = i.split()[0]
# This means we were in the description and
# now we have reached the in-between space
elif switch == 2 and i == "\n":
switch = 0
# this means that we are in the description
# but we will only do the description once
elif switch == 2:
desc_line = index
# creates a dictionary which will be only used for this autobiography
current_dict = {}
# this will iterate through the description
while corpus[desc_line] != "\n":
desc_val = re.sub(r'[^\w\s]', '', corpus[desc_line])
for j in desc_val.split():
if j.lower() not in current_dict and j.lower() not in stop_words and len(j) > 2:
current_dict[j.lower()] = 1
desc_line += 1
update(current_dict, cat_count[current_cat][1])
# switches to 3 so we are in the description
switch = 3
# This means that we have reached the end of the description
elif switch == 3 and i == "\n":
switch = 0
final_index = index + 1
freq_table = copy.deepcopy(cat_count)
# learning phase
for i in freq_table:
freq_table[i][0] = -math.log2((cat_count[i][0]/N + 0.1)/(1 + len(cat_count) * 0.1))
for j in freq_table[i][1]:
freq_table[i][1][j] = -math.log2((cat_count[i][1][j]/cat_count[i][0] + 0.1)/(1 + 2 * 0.1))
print(freq_table)
# 3.2 Applying the classifier to the training data.
pred_dict = {}
with open("tinyCorpus.txt") as file:
switch = 0
test_corpus = file.readlines()
# the index starts at 0
# that seems to pe a problem
for value in test_corpus[final_index:]:
index = final_index
# we were in the in between zone
# we've reached a name
if switch == 0 and value != "\n":
pred_dict[value] = ["", {}]
current_bio = value
switch = 1
# if switch is equal to 1
# this means that we are at
# category
# we will put this as the true category
elif switch == 1:
pred_dict[current_bio][0] = value.split()[0]
switch = 2
# this means that we have reached the description
elif switch == 2:
# this puts the probability of each category into the dictionary
for i in freq_table:
pred_dict[current_bio][1][i] = freq_table[i][0]
# resets the line
line = final_index
# resets the dictionary so there are no repeats
repeat_dict = {}
# goes through the description
while test_corpus[line] != "\n":
# takes out the punctuation in each line
no_punc_line = re.sub(r'[^\w\s]', '', test_corpus[line])
# goes through the line
for j in no_punc_line.split():
# goes through the category
# if it's in the dictionary of the category and we have not seen it before
# in this biography then we add its value
if j.lower() in freq_table[i][1] and j.lower() not in repeat_dict:
print("adding", j.lower(), "for", current_bio, "in", i)
print("")
pred_dict[current_bio][1][i] += freq_table[i][1][j.lower()]
repeat_dict[j.lower()] = 1
line += 1
switch = 3
elif switch == 3 and value == "\n":
switch = 0
final_index += 1
# Prediction dictionary has the Biography: Predicted Category, [L(C|B) for each category]
def recoverProb(pred):
cat_prob = copy.deepcopy(pred)
m = sys.maxsize
pred_cat = ""
for i in pred:
for j in pred[i][1]:
if pred[i][1][j] < m:
m = pred[i][1][j]
pred_cat = j
for i in pred:
for j in pred[i][1]:
if pred[i][1][j] - m < 7:
cat_prob[i][1][j] = pow(2, (m-pred[i][1][j]))
else:
cat_prob[i][1][j] = 0
for i in cat_prob:
total = sum(cat_prob[i][1].values())
for j in cat_prob[i][1]:
cat_prob[i][1][j] = cat_prob[i][1][j]/total
return cat_prob
final_prob = recoverProb(pred_dict)
def printOutput(final):
for i in final:
min = -sys.maxsize
prediction = ""
for j in final[i][1]:
if final[i][1][j] > min:
prediction = j
min = final[i][1][j]
if prediction == final[i][0]:
print(i.strip(), "Prediction:", prediction, "Right" )
else:
print(i.strip(), "Prediction:", prediction, "Wrong")
for n in final[i][1]:
print(n, ":", final[i][1][n], end=" ")
print("\n")
printOutput(final_prob)
| hohmeyer/AIAssignment4 | main.py | main.py | py | 7,019 | python | en | code | 0 | github-code | 36 |
33039659569 | class Solution(object):
def findLengthOfLCIS(self, nums):
if len(nums) == 0:
return 0
temp = [1]*(len(nums) + 1)
for i in range(1, len(nums)):
if nums[i] > nums[i - 1]:
temp[i] = 1 + temp[i - 1]
return max(temp)
print(Solution.findLengthOfLCIS(0,[1,3,5,4,7]))
print(Solution.findLengthOfLCIS(0,[2,2,2,2,2]))
| NikitaFir/Leetcode | Longest Continuous Increasing Subsequence.py | Longest Continuous Increasing Subsequence.py | py | 403 | python | en | code | 0 | github-code | 36 |
17113250880 | import string
fname = input("Enter a file name: ")
try:
fhand = open(fname)
except:
print("File is not found:",fname)
counts = dict()
for line in fhand:
line = line.rstrip()
line = line.lower()
line = line.translate(line.maketrans('','',string.punctuation))
line = line.translate((line.maketrans('','',string.whitespace)))
line = line.translate((line.maketrans('','',string.digits)))
words = line.split()
for word in words:
for char in word:
if char not in counts:
counts[char] = 1
else:
counts[char] += 1
lst = list()
for key, val in list(counts.items()):
lst.append((val, key))
lst.sort(reverse=True)
for key, val in lst:
print(
val, key
) | amrmabdelazeem/PythonForEverybody | exercise10.12.3.py | exercise10.12.3.py | py | 766 | python | en | code | 0 | github-code | 36 |
40177340538 | """
Cultural Evolution Simulation (Yummy_Recipe)
Ali Fazeli
Final Project of B.sc Level in Computer Engineering
Task: Simulate changes in recipes within a population of Agents over several
generations depending on different rule sets.
"""
idA = 0
# All Agents ever created, one consecutive list
agentsOverGenerations = []
# same as above just as a dictionary
# {int_index : [Agent List for the Generation specified by int_index]}
agentsOverAllDict = {}
# a dictionary holding all the recipes for each Generation in an array
# RecListOverGenerations{"int_Index : Recipe"}
RecListOverGenerations = {}
# all the winning recipes in a dictionary
WinningArrsOverGenerations = {}
# The name says it all, Dictionary holding the social groups for each Generation
SocialGroups = {}
def config_reset():
global SocialGroups
SocialGroups = {}
global WinningArrsOverGenerations
WinningArrsOverGenerations = {}
global RecListOverGenerations
RecListOverGenerations = {}
global agentsOverAllDict
agentsOverAllDict = {}
global agentsOverGenerations
agentsOverGenerations = []
global idA
idA = 0
| alifzl/Final_B.Sc_Project | second_and_probably_last/Yummy_Recipe/Config.py | Config.py | py | 1,183 | python | en | code | 2 | github-code | 36 |
39274579320 | import os
from ..auth.auth_handler import create_jwt
from postmarker.core import PostmarkClient
from ..models.attendee import Attendee
from fastapi import APIRouter, HTTPException, Depends, Request
from pyairtable.api.table import Table
from ..dependencies import get_registration_table
from email_validator import validate_email, EmailNotValidError
from datetime import timedelta
import requests
import bisect
from sentry_sdk import capture_message
from dotenv import load_dotenv
load_dotenv()
POSTMARK_SERVER_TOKEN = os.getenv("POSTMARK_SERVER_TOKEN")
BLACKLIST_PATH = os.getenv("BLACKLIST_PATH")
RECAPTCHA_SECRET_KEY = os.getenv("RECAPTCHA_SECRET_KEY")
VERIFY_PATH = "https://register.losaltoshacks.com/verify"
router = APIRouter(
prefix="/register",
tags=["register"],
)
postmark = PostmarkClient(server_token=POSTMARK_SERVER_TOKEN, verbosity=3)
class EmailDomainValidator:
def __init__(self):
path = os.path.join(
os.path.dirname(__file__), "../disposable_email_blocklist.txt"
)
with open(path) as fin:
self.sorted_blacklist = tuple(fin.read().splitlines())
def validate(self, email: str):
index = bisect.bisect_left(self.sorted_blacklist, email)
# Returns false if the domain is found in the blacklist, and true if not
return not (
index < len(self.sorted_blacklist) and self.sorted_blacklist[index] == email
)
domain_validator = EmailDomainValidator()
@router.post("/attendee")
async def add_attendee(
attendee: Attendee, request: Request, table: Table = Depends(get_registration_table)
):
# Check if the reCAPTCHA token is valid
# recaptcha = requests.post(
# "https://www.google.com/recaptcha/api/siteverify",
# data={
# "secret": RECAPTCHA_SECRET_KEY,
# "response": attendee.token,
# },
# ).json()
# if not recaptcha["success"]:
# e = HTTPException(
# status_code=400, detail="reCAPTCHA Error: " + str(recaptcha["error-codes"])
# )
# capture_message("reCAPTCHA Error: " + str(recaptcha["error-codes"]))
# raise e
# if recaptcha["action"] != "submit":
# e = HTTPException(
# status_code=400,
# detail="Invalid reCAPTCHA action for this route.",
# )
# capture_message(
# f'Invalid reCAPTCHA action for this route. Action was {recaptcha["action"]}'
# )
# raise e
# if recaptcha["score"] < 0.5:
# e = HTTPException(
# status_code=400,
# detail=f'Failed reCAPTCHA (score was {recaptcha["score"]}). Are you a bot?',
# )
# capture_message(f'Failed reCAPTCHA (score was {recaptcha["score"]}).')
# raise e
# Check emails are valid
try:
email = validate_email(attendee.email, check_deliverability=True)
attendee.email = email.ascii_email
except EmailNotValidError as e:
raise HTTPException(
status_code=400,
detail=f"Your email address is not valid. {e}",
)
try:
parent_email = validate_email(attendee.parent_email, check_deliverability=True)
attendee.parent_email = parent_email.ascii_email
except EmailNotValidError as e:
raise HTTPException(
status_code=400,
detail=f"Your parent/legal guardian's email address is not valid. {e}",
)
if attendee.email == attendee.parent_email:
raise HTTPException(
status_code=400,
detail="Attendee email and parent email cannot be the same.",
)
if not domain_validator.validate(
email.ascii_domain
) or not domain_validator.validate(parent_email.ascii_domain):
e = HTTPException(
status_code=400,
detail="Do not use temporary email addresses.",
)
raise e
# Verify both attendee and parent emails
res = table.create(attendee.getAirtableFields())
attendee_id = res["id"]
# Tokens expire in 6 weeks
expire_delta = timedelta(weeks=6)
student_token = create_jwt(
{"id": attendee_id, "type": "student"}, expires_delta=expire_delta
)
parent_token = create_jwt(
{"id": attendee_id, "type": "parent"}, expires_delta=expire_delta
)
postmark.emails.send_with_template(
TemplateAlias="email-verification",
TemplateModel={
"name": attendee.first_name,
"action_url": f"{VERIFY_PATH}?token={student_token}",
},
From="hello@losaltoshacks.com",
To=attendee.email,
)
postmark.emails.send_with_template(
TemplateAlias="email-verification",
TemplateModel={
"name": attendee.first_name + "'s parent/guardian",
"action_url": f"{VERIFY_PATH}?token={parent_token}",
},
From="hello@losaltoshacks.com",
To=attendee.parent_email,
)
return res
| LosAltosHacks/api | app/routers/register.py | register.py | py | 4,977 | python | en | code | 0 | github-code | 36 |
70562613224 | from collections import deque
from heapq import heappush, heappop
def solution(priorities, location):
priority_heap = []
queue = deque()
count = 0
for idx, priority in enumerate(priorities):
queue.append((priority, idx))
heappush(priority_heap,(-priority, priority))
while queue:
priority, idx = queue.popleft()
if priority == priority_heap[0][1]:
heappop(priority_heap)
count += 1
if idx == location:
return count
else:
queue.append((priority, idx)) | zsmalla/algorithm-jistudy-season1 | src/Programmers_HighScore_Kit/스택&큐/임지수/프린터_python_임지수.py | 프린터_python_임지수.py | py | 576 | python | en | code | 0 | github-code | 36 |
30247439502 | #!/usr/bin/env python3
from optparse import OptionParser
def parse_arguments():
parser = OptionParser(usage='usage: %prog [option]... URL...')
parser.add_option('-i', dest='input_file', metavar='FILE',
help='Download URLs found in FILE')
parser.add_option('-C', dest='cookie_file', default='', metavar='FILE',
help='File to load cookies from (WIP)')
opts, args = parser.parse_args()
if not args and not opts.input_file:
parser.print_help()
exit(1)
return opts, args
| b1337xyz/e-hen | args.py | args.py | py | 553 | python | en | code | 1 | github-code | 36 |
42298776688 | num =[1,32,42]
for i in num:
print(i)
print("\t\tBy iterator built in function:")
it=iter(num)#ye ak built in fun ha jo values ko iterate krta ha
print(it.__next__())#next valu k liye
print(it.__next__())
print(next(it))#ye dono same han =print(it.__next__())
print()
class topten:
def __init__(self):
self.count=1
def __next__(self):
if(self.count<=10):
val=self.count
self.count+=1
return val
else:
raise StopIteration#itr will stop the iteration
def __iter__(self):
return self
print("\t\tBy making iteration class and acces the values:")
values=topten()
for i in values:
print(i)
| IbrarShakoor/Python-codes | iterators.py | iterators.py | py | 728 | python | en | code | 1 | github-code | 36 |
73522000744 | from math import ceil
def SimpleLowerBound(items, binSize, className):
d1Sum = 0
d2Sum = 0
d3Sum = 0
for item in items:
d1Sum += item.getD1()
d2Sum += item.getD2()
d3Sum += item.getD3()
lowerBound = max(ceil(d1Sum/binSize[0]), ceil(d3Sum/binSize[1]), ceil(d3Sum/binSize[2]))
if className:
r = open(f"results\{className}_Results_Steps.txt", "a")
r.write("Az osztályra számolt egyszerű alsó korlát: " + str(lowerBound) + "\n")
r.close()
# print(f"A {className} inputon legalább {lowerBound} db láda fog kelleni!")
"""
Ez ugye azt nem veszi figyelembe ha a tárgyakat nem lehet egymás mellé pakolni így azokban az esetekben
amikor mindegyik elemet külön kell tegyünk nagyon távoli korlátott add, pedig nem lehet jobb pakolást összehozni
sehogy semm.
"""
return lowerBound | Pityundra/VektorPakolas | resources/simpleLowerBound.py | simpleLowerBound.py | py | 919 | python | hu | code | 0 | github-code | 36 |
39835210130 | xa=float(input("Zadaj x-ovú súradnicu bodu A:"+"\n"))
ya=float(input("Zadaj y-ovú súradnicu bodu A:"+"\n"))
xb=float(input("Zadaj x-ovú súradnicu bodu B:"+"\n"))
yb=float(input("Zadaj y-ovú súradnicu bodu B:"+"\n"))
print()
xu=xa-xb
yu=ya-yb
print("Súradnice vektora AB=["+str(xu)+","+str(yu)+"]")
print("Veľkosť vektora AB:",(xu**2+yu**2)**(1/2))
print("Parametrické vyjadrenie priamky AB:")
print("x =",xa,"+",xu,"* t")
print("y =",ya,"+",yu,"* t")
c=-(-yu*xa+xu*ya)
print("Všeobecná rovnica priamky AB:")
print(str(-yu)+"x +",str(xu)+"y +",c,"= 0")
if -yu==0:
px="Nemá priesečník osi x."
else:
px=-c/-yu
if xu==0:
py="Nemá prisečník osi y."
else:
py=-c/xu
print("Px=["+str(px)+",0]")
print("Py=[0,"+str(py)+"]")
print()
input("Pre skončenie stlačte ENTER")
quit()
| Rastislav19/programs-learning | Python-EKG/funkcie/funkcie2/6.py | 6.py | py | 806 | python | sk | code | 0 | github-code | 36 |
34046878888 | from pyspark.sql import SparkSession
from pyspark.sql.types import StringType,StructField,StructType,IntegerType
spark = SparkSession.builder.master("local[*]").appName("romeoJulietWordCount").getOrCreate()
sc = spark.sparkContext
inprdd = sc.textFile("D:/Spark_Scala/data/wordcount/romeojuliet.txt")
#convert to lower case,
# split based on the space
#count the words
"""for x in inprdd.take(10):
print(x)"""
outputRDD = inprdd\
.map(lambda x: x.lower())\
.flatMap(lambda x: x.split(" ")).map(lambda x : (x,1)).filter(lambda x: ((x[0] != ''),x[1]))\
.reduceByKey(lambda a,b : a + b)
#.toDF("word","count")
#outputDF.show()
for x in outputRDD.take(10):
print(x)
outputDF = outputRDD.toDF(["words","count"])
"""schema = StructType([StructField("words",StringType(),True),
StructField("count",IntegerType(),True)])
outputDF = spark.createDataFrame(outputRDD,schema=schema)"""
outputDF.show() | ZosBHAI/pysparkexamples | wordcount01.py | wordcount01.py | py | 940 | python | en | code | 0 | github-code | 36 |
21639598208 | from transopt.utils.Register import benchmark_registry
from transopt.Benchmark.BenchBase import (
TransferOptBenchmark,
RemoteTransferOptBenchmark,
)
def construct_test_suits(
tasks: dict = None, seed: int = 0, remote: bool = False, server_url: str = None
) -> TransferOptBenchmark:
tasks = tasks or {}
if remote:
if server_url is None:
raise ValueError("Server URL must be provided for remote testing.")
test_suits = RemoteTransferOptBenchmark(server_url, seed)
else:
test_suits = TransferOptBenchmark(seed)
for task_name, task_params in tasks.items():
benchmark = task_name
budget = task_params["budget"]
workloads = task_params["workloads"]
params = task_params.get("params", {})
benchmark_cls = benchmark_registry.get(benchmark)
if benchmark_cls is None:
raise KeyError(f"Task '{benchmark}' not found in the benchmark registry.")
for idx, workload in enumerate(workloads):
problem = benchmark_cls(
task_name=f"{task_name}_{workload}",
task_id=idx,
budget=budget,
seed=seed,
workload=workload,
params=params,
)
test_suits.add_task(problem)
return test_suits
| maopl/TransOpt | transopt/Benchmark/construct_test_suits.py | construct_test_suits.py | py | 1,336 | python | en | code | 3 | github-code | 36 |
74045387624 | #!/usr/bin/env python
# coding: utf-8
# """
# Task-3: Design and implement recursive solution for the below mentioned scenarios.
# There are n industries, each with a water treatment plant, located along a side of a river.
# We will assume that the industries are arranged from left to right. Each industry generates sewage water that must be cleaned in a plant (not necessarily its own) and discharged into the river through pipes, where additional pipes connect neighboring industries.
# If a plant is working it will clean the sewage water from its industry, plus any water coming from a pipe connecting a neighboring industry, and discharge it to the river. However, a plant might not be working.
# In that case the water from its industry, plus any water coming from a neighboring industry.
# must be sent to another industry. Given that water can flow in only one direction inside a pipe, the problem consists of determining the number of different ways it is possible to discharge the cleaned water into the river, for n industries.
# Finally, we can assume that at least one of the plants will work.
# In[ ]:
def discharge_water(n):
if n == 1:
return 1
else:
result = 0
# plant is working
result += 2 * discharge_water(n-1)
# plant is not working
for i in range(2, n+1):
result += discharge_water(i-2) * discharge_water(n-i+1)
return result
| aman2003selwaria/activity | problem 3 (discharge water).py | problem 3 (discharge water).py | py | 1,433 | python | en | code | 0 | github-code | 36 |
33525029807 | from methods import *
testOne = [{'title': 'Evaluation','words': [('bad','good')]},
{'title': 'Potency', 'words': [('weak','strong')]},
{'title': 'Activity', 'words': [('passive','active')]}]
testOneAlt = [{'title': 'Evaluation','words': [('bad','good'),('negative','positive')]},
{'title': 'Potency', 'words': [('weak','strong'),('incompetent','competent'),('fragile','tough'),('ineffective','effective')]},
{'title': 'Activity', 'words': [('passive','active'),('indifferent','caring'),('lazy','energetic')]}]
testTwo = [{'title':'Typicality','words':[('rare','regular'),('exclusive','typical')]},
{'title':'Reality','words':[('imaginary','real'),('abstract','concrete')]},
{'title':'Complexity','words':[('simple','complex'),('limited','unlimited'),('usual','mysterious')]},
{'title':'Organisation','words':[('spasmodic','regular'),('changeable','constant'),('disorganized','organized'),('indefinite','precise')]},
{'title':'Stimulation','words':[('boring','interesting'),('trivial','new')]}]
# The original version returns more accurate results
# hypothesis: Using more synonym words lead to better results
testThree = [{'title':'Male','words':[('female','male'),('girl','boy'),('feminine','masculine')]}]
testThreeAlt = [{'title':'Male','words':[('female','male')]}]
testFour = [{'title':'Good','words': [('bad','good')]},
{'title':'Positive','words':['negative','positive']},
{'title':'Useful','words': [('useless','useful')]},
{'title':'Practical','words':['impractical','practical']},
{'title':'Valuable','words':[('worthless','valuable')]},
{'title':'Convenient','words':[('inconvenient','convenient')]},
{'title': 'Simple','words':[('complex','simple')]}]
testFourAlt = [{'title':'Good/Positive','words':[('bad','good'),('negative','positive')]},
{'title':'Useful/Valuable/Practical','words':[('useless','useful'),('worthless','valuable'),('impractical','practical')]},
{'title':'Easy/Simple','words':[('hard','easy'),('complex','simple')]}]
testFive = [{'title':'Hardworking','words':[('lazy','hardworking')]},{'title':'Beautiful','words':[('ugly','beautiful')]}]
# This is the Word-Embedding Association Test
testSix = [{'title':'Pleasant','words':[('unpleasant','pleasant')]},
{'title':'Male','words':[('female','male')]},
{'title':'Temporary','words':[('permanent','temporary')]},
{'title':'Career','words':[('family','career')]}]
p.pprint(computeScores(['man','woman'],testOne))
p.pprint(computeScores(['man','woman'],testOneAlt))
p.pprint(computeScores(['man','woman'],testTwo))
p.pprint(computeScores(['man','woman'],testThree))
p.pprint(computeScores(['man','woman'],testThreeAlt))
p.pprint(computeScores(['man','woman'],testFive))
p.pprint(computeScores(['openness','closeness'],testFour))
p.pprint(computeScores(['organisation','chaos'],testFour))
p.pprint(computeScores(['extraversion','introversion'],testFour))
p.pprint(computeScores(['extraversion','introversion'],testFourAlt))
p.pprint(computeScores(['openness','conscientiousness','extraversion','agreeableness','neuroticism'],testFour))
p.pprint(computeScores(['openness','conscientiousness','extraversion','agreeableness','neuroticism'],testFourAlt))
p.pprint(computeScores(['asian','african','european'],testFour))
p.pprint(computeScores(['asian','african','european'],testOneAlt))
p.pprint(computeScores(['asian','african','european'],testFive))
p.pprint(computeScores(['insects','flowers'],testSix))
p.pprint(computeScores(['man','woman'],testSix))
| SoliMouse/Semantic-Differential | playground.py | playground.py | py | 3,440 | python | en | code | 1 | github-code | 36 |
37977404672 | '''
Python module containing "Master" classes of easy_gui project.
The classes in here are designed to be subclassed in user applications.
'''
import tkinter as tk
from tkinter import ttk
from tkinter import _tkinter
from .styles import BaseStyle
from . import widgets
import os
import sys
import threading
import traceback
from typing import List, Dict
def recreate_if_needed(func):
'''
Decorator used to enable addition of Sections or Widgets after GUI has been created.
(that is, can add elements outside of EasyGUI subclass' __init__ method)
'''
def inner(*args, **kwargs):
self = args[0]
value = func(*args, **kwargs)
if self.root.created:
self.root.create() # need to re-create GUI so that the new elements show up!
return value
return inner
class GridMaster():
def __init__(self):
self.grid_areas = {}
self.grid_configuration = []
def configure_grid(self, grid_configuration: List[str]) -> Dict[str, int]:
'''
Specify full-window layout with CSS grid-template-area style list of strings.
- Each item in provided grid_configuration corresponds to a grid row and spaces
delimit each cell.
- Individual cells or rectangular groups of contiguous cells may be indicated by name
while unnamed cells are specified by one or more periods.
'''
self.grid_configuration = grid_configuration
self.grid_rows = len(grid_configuration)
self.grid_columns = len(grid_configuration[0].split())
for row in grid_configuration:
if len(grid_configuration[0].split()) != self.grid_columns:
print('ERROR! Differing number of grid columns specified below:')
print(grid_configuration)
return
names = set(cell for row in grid_configuration for cell in row.split() if '.' not in cell)
for name in names:
first_row, last_row, first_column, last_column = None, None, None, None
for i, row in enumerate(grid_configuration):
if name in row.split():
if first_row is None:
first_row = i # will stay fixed at the first row containing name
last_row = i # will continue to increase for multiple rows
if first_column is None:
row_list = row.split()
first_column = row_list.index(name) # get far left column of name
last_column = len(row_list) - row_list[::-1].index(name) - 1 # reverse to get far right column
self.grid_areas[name] = {'first_row': first_row, 'last_row': last_row,
'first_column': first_column, 'last_column': last_column}
# Now make elements expand evenly with window resize by default
if self.grid_areas != {}:
limits = self.grid_limits()
for row in range(limits['min_row'], limits['max_row'] + 1):
self.grid_rowconfigure(row, weight=1, minsize=10)
for col in range(limits['min_col'], limits['max_col'] + 1):
self.grid_columnconfigure(col, weight=1, minsize=10)
def add_grid_row(self, row_name: str):
if self.grid_configuration == []:
self.grid_configuration = [row_name]
else:
num_columns = len(self.grid_configuration[0].split(' '))
self.grid_configuration.append(' '.join([row_name] * num_columns))
self.configure_grid(self.grid_configuration)
def grid_limits(self) -> dict:
min_row, max_row, min_col, max_col = 500, -500, 500, -500 # arbitrarily large starting points so no risk of surprising row/col not being captured
for area in self.grid_areas.values():
if area['first_row'] < min_row:
min_row = area['first_row']
if area['last_row'] > max_row:
max_row = area['last_row']
if area['first_column'] < min_col:
min_col = area['first_column']
if area['last_column'] > max_col:
max_col = area['last_column']
return {'min_row': min_row, 'max_row': max_row, 'min_col': min_col, 'max_col': max_col}
class SectionMaster():
def __init__(self):
self.sections: dict = {}
self.widgets: dict = {}
@recreate_if_needed
def add_section(self, name='', title=False, grid_area=None,
borderwidth=None, relief=None, tabbed: bool=False, equal_button_width: bool=False, external_section=None):
'''
Add a Section object to the parent (root window or other Section).
'''
if external_section: # if is an externally-built section is passed in
if not name:
name = external_section.__name__
section = external_section(parent=self, name=name, title=title, grid_area=grid_area,
borderwidth=borderwidth, relief=relief, tabbed=tabbed, equal_button_width=equal_button_width)
else:
if name == '':
name = f'section{len(self.sections) + 1}'
if name in self.sections:
raise ValueError('Unable to add section as a section with the given name already exists!')
if borderwidth is None:
borderwidth = self.style.borderwidth
if relief is None:
relief = self.style.section_border
# Next 2 lines set grid_area to be name if not explicitly declared and not already used as a grid_area
if grid_area is None and name not in [s.grid_area for s in self.sections.values()]:
grid_area = name
section = Section(parent=self, name=name, title=title, grid_area=grid_area,
borderwidth=borderwidth, relief=relief, tabbed=tabbed, equal_button_width=equal_button_width)
self.sections[name] = section
return section
@recreate_if_needed
def add_widget(self, type='label', text='', widget_name=None, grid_area=None, **kwargs):
'''
Add a Widget object to this Section by calling the add_widget function in widgets.py
(Easier to keep the function there as it needs access to all the individual Widget classes.)
'''
return widgets.add_widget(self, type=type, text=text, widget_name=widget_name, grid_area=grid_area, **kwargs)
def delete_widget(self, widget_name) -> None:
'''
Fully delete a widget.
Pass without issue if the widget doesn't exist.
'''
try:
self.widgets[widget_name].destroy()
del self.widgets[widget_name]
except:
pass
def delete_all_widgets(self) -> None:
'''
Fully delete all child widgets of this section.
'''
for w_name in list(self.widgets.keys()):
self.delete_widget(w_name)
def _clear_and_recreate_plot(self, mpl_figure, widget_name, grid_area, kwargs):
old_widget = self.widgets[widget_name] # grab reference to widget to be deleted so that its place in dict can be given to new widget
new_widget = self.add_widget(type='matplotlib', widget_name=widget_name, toolbar=old_widget.toolbar, grid_area=grid_area)
new_widget.bindings = old_widget.bindings
new_widget.small_figure_warning_given = old_widget.small_figure_warning_given
new_widget.position()
new_widget.draw_plot(mpl_figure=mpl_figure)
new_widget.position() # have to reposition/create Widget
old_widget.destroy() # destroy after new widget is positioned for slightly less flickering
@recreate_if_needed
def add_tab(self, name='', **kwargs):
if not self.tabbed:
print('Error! Cannot .add_tab to a Section unless tabbed=True when it is created.')
return
section = Section(parent=self.tabs, name=name, **kwargs)
self.sections[name] = section
self.tabs.add(section, text=name)
return section
def delete_section(self, section_name) -> None:
'''
Fully delete a section and all of its child widgets.
Pass without issue if the section doesn't exist.
'''
try:
for key, widget in self.sections[section_name].widgets.items():
widget._widget.destroy()
self.sections[section_name].destroy()
del self.sections[section_name]
except:
pass
class EasyGUI(tk.Tk, GridMaster, SectionMaster):
'''
Main class to be subclassed for full GUI window.
'''
style = BaseStyle()
def __init__(self, alpha: float=1.0, topmost: bool=False, disable_interaction: bool=False, toolwindow: bool=False, fullscreen: bool=False, overrideredirect: bool=False, **kwargs) -> None:
super().__init__()
GridMaster.__init__(self)
SectionMaster.__init__(self)
EasyGUI.style.create_font() # have to generate font.Font object after initial tk root window is created
self.key_log = [] # record keys/buttons triggered
self.key_triggers = [('closegui', lambda: self.close())]
self.icon(bitmap=os.path.join(os.path.dirname(__file__), 'resources', 'transparent.ico'), default=True)
self.title('EasyGUI')
self.geometry("300x180+100+60") # format of "WIDTHxHEIGHT+(-)XPOSITION+(-)YPOSITION"
# Instead of setting .geometry, can also set "width", "height" to integer values
# and "center" to True in application subclass to size and center window
self.transparent = False
self.configure(background=self.style.window_color)
if self.style.transparent:
self.wm_attributes('-transparentcolor', 'white') # turn off window shadow
# See documention of below WINDOWS options here: https://wiki.tcl-lang.org/page/wm+attributes
self.wm_attributes('-alpha', alpha)
self.wm_attributes('-fullscreen', fullscreen)
self.wm_attributes('-topmost', topmost) # make root window always on top
self.overrideredirect(overrideredirect) # hide root window drag bar and close button
try:
self.wm_attributes('-disabled', disable_interaction) # disables window interaction for click pass through
self.wm_attributes('-toolwindow', toolwindow) # makes a window with a single close-button (which is smaller than usual) on the right of the title bar
except Exception: # above options only for Windows system
pass
s = ttk.Style()
s.configure('.', background=self.style.widget_bg_color)
s.configure('.', font=self.style.font)
s.configure('.', foreground=self.style.text_color)
self.created = False
def __init_subclass__(cls, **kwargs):
'''
Wraps user subclass __init__ to implicitly handle the EasyGUI.__init__ call along with
calling .create() after application is fully defined in subclass __init__ method
'''
old_init = cls.__init__ # reference to original subclass method so new_init isn't recursive
def new_init(self, *args, **kwargs):
EasyGUI.__init__(self, **kwargs) # in place of super().__init__() in subclass __init__
try:
old_init(self, *args, **kwargs)
except TypeError:
print('\n* Are you passing in kwargs to GUI creation?\n* If so, remember to put a "**kwargs" in the __init__ function!\n')
traceback.print_exc()
self.create() # populates GUI elements
# now change window geometry if "width", "height" and/or "center" attributes are set in subclass' __init__ method
# seems easier to allow this than forcing self.geometry() usage as that is a bit cryptic and hard to remember
# auto-centering by setting self.center = True is also convenient as usually that behavior is desired
self.update_idletasks() # need to run here so any geometry changes from subclass __init__ run before checking sizes
current_width, current_height = self.winfo_width(), self.winfo_height()
frame_width = self.winfo_rootx() - self.winfo_x()
window_width = current_width + 2 * frame_width
titlebar_height = self.winfo_rooty() - self.winfo_y()
window_height = current_height + titlebar_height + frame_width
if hasattr(self, 'width'):
window_width = self.width
if hasattr(self, 'height'):
window_height = self.height
if hasattr(self, 'center') and self.center == True:
center_x_val = int(self.winfo_screenwidth() / 2 - window_width / 2)
center_y_val = int((self.winfo_screenheight() / 2 - window_height / 2))
center_y_val -= 30 # offset a little higher than middle since many people have toolbar on bottom of screen
if center_x_val < 0: # don't let left side of window go beyond screen if too wide
center_x_val = 0
if center_y_val < 0: # don't let top of window go above screen if too tall
center_y_val = 0
self.geometry(f'{window_width}x{window_height}+{center_x_val}+{center_y_val}')
elif hasattr(self, 'width') or hasattr(self, 'height'):
self.geometry(f'{window_width}x{window_height}')
self.bind_all('<Key>', self.log_keys)
self.mainloop() # runs tkinter mainloop
cls.__init__ = new_init # overwrite subclass __init__ method
@property
def root(self):
'''Used by downstream elements to reference EasyGUI as root'''
return self
def log_keys(self, event):
'''
Record key presses up to a maximum of 100 characters.
Also check to see if any triggers are met and execute as needed.
'''
self.key_log.append(event.char)
self.key_log = self.key_log[-100:]
self.check_key_triggers()
def check_key_triggers(self):
'''
Check if a key trigger has been met,
run function if so, and clear out key log.
(so next key doesn't trigger same result)
'''
key_str = ''.join(self.key_log)
for trigger, action in self.key_triggers:
if trigger in key_str:
self.key_log = []
action()
break
def add_key_trigger(self, trigger, func, separate_thread: bool=False):
'''
Bind a function to a sequence of key presses.
Can specify as separate_thread=True for long-running functions.
'''
if separate_thread:
def threaded_func(*args):
threading.Thread(target=func).start()
self.key_triggers.append((trigger, threaded_func))
else:
self.key_triggers.append((trigger, func))
def close(self):
'''
Alias for self.destroy.
Can be used by any GUI element to close the window via "self.root.close()"
since self.root will travel upstream until it hits EasyGUI.close().
'''
self.destroy()
def icon(self, bitmap, default: bool=False) -> None:
'''
Alternate method to call tk.Tk iconbitmap method using altered path handling
so that PyInstaller can package application with specified .ico file.
If not default, warning message is printed on failing to locate .ico file.
'''
try:
super().iconbitmap(bitmap=resource_path(bitmap))
except _tkinter.TclError:
if default:
pass # Pass silently if default .ico not found occurs when using PyInstaller and not adding transparent.ico to "datas"
else:
print(f'Cannot locate {bitmap}! If using PyInstaller, be sure to specify this file in "datas".')
def create(self, force_row=False) -> None:
'''
Positions GUI elements in window.
May be called recursively by child Sections as elements are positioned.
'''
for child in {**self.widgets, **self.sections}.values():
try:
child.create(force_row) # if child is another Section object
except AttributeError:
child.position(force_row) # if child is a Widget object
self.created = True
def add_menu(self,
commands={'File': lambda: print('File button'), 'Edit': lambda: print('Edit button')},
cascades={'Options': {'Option 1': lambda: print('Option 1'), 'Option 2': lambda: print('Option 2')}}) -> None:
'''
Add a Menu to the top of the root window.
'''
self.menu = tk.Menu(self)
for label, cmd in commands.items():
self.menu.add_command(label=label, command=cmd)
for cascade, c_commands in cascades.items():
cascade_menu = tk.Menu(self.menu, tearoff=0)
for label, cmd in c_commands.items():
cascade_menu.add_command(label=label, command=cmd)
self.menu.add_cascade(label=cascade, menu=cascade_menu)
self.config(menu=self.menu)
def __repr__(self):
return 'Main EasyGUI Application'
def popup(self, *args, **kwargs):
'''
Returns a context manager for generating a popup window. Example usage:
with self.popup() as popup:
popup.add_widget('lbl', 'Test1')
popup.add_widget('btn', 'Test Button', command_func=lambda *args: print('Test Button clicked'))
'''
return PopUp(*args, **kwargs)
class PopUp(tk.Toplevel, GridMaster, SectionMaster):
'''
Basically a mini EasyGUI class that inherits from tk.Toplevel instead of tk.Tk.
Re-implements basic methods of EasyGUI class so widgets can be added.
'''
def __init__(self, *args, width: int=300, height: int=180, x: int=120, y: int=80, **kwargs):
if kwargs.get('tooltip', False):
super().__init__()
GridMaster.__init__(self)
SectionMaster.__init__(self)
self.wm_attributes('-disabled', True) # disables window interaction for click pass through
self.wm_overrideredirect(True) # removes window
self.wm_attributes('-alpha', 0.8)
self.geometry(f'{width}x{height}+{x}+{y}') # format of "WIDTHxHEIGHT+(-)XPOSITION+(-)YPOSITION"
self.style = EasyGUI.style
self.style.create_font()
self.configure(bg=self.style.tooltip_color)
else:
super().__init__()
GridMaster.__init__(self)
SectionMaster.__init__(self)
self.icon(bitmap=os.path.join(os.path.dirname(__file__), 'resources', 'transparent.ico'), default=True)
self.geometry(f'{width}x{height}+{x}+{y}') # format of "WIDTHxHEIGHT+(-)XPOSITION+(-)YPOSITION"
self.style = EasyGUI.style
self.style.create_font()
def __enter__(self):
self.created = False
return self
def __exit__(self, *args):
self.create()
@property
def root(self):
'''Used by downstream elements to reference EasyGUI as root'''
return self
def icon(self, bitmap, default: bool=False) -> None:
'''
Alternate method to call tk.Tk iconbitmap method using altered path handling
so that PyInstaller can package application with specified .ico file.
If not default, warning message is printed on failing to locate .ico file.
'''
try:
super().iconbitmap(bitmap=resource_path(bitmap))
except _tkinter.TclError:
if default:
pass # Pass silently if default .ico not found occurs when using PyInstaller and not adding transparent.ico to "datas"
else:
print(f'Cannot locate {bitmap}! If using PyInstaller, be sure to specify this file in "datas".')
def create(self, force_row=False) -> None:
'''Copied from EasyGUI.create'''
for name, section in self.sections.items():
section.create(force_row=force_row)
self.created = True
@recreate_if_needed
def add_widget(self, *args, **kwargs):
'''Copied from EasyGUI.add_widget'''
if '_default' not in self.sections:
self.add_section('_default')
return self.sections['_default'].add_widget(*args, **kwargs)
def __repr__(self):
return 'EasyGUI PopUp Window'
class Section(tk.Frame, GridMaster, SectionMaster):
'''
A Section is a tk.Frame used for storing and managing widgets.
Sections exist as children of the root (EasyGUI) window or other Sections.
'''
def __init__(self, parent=None, name='', title=False, grid_area=None,
tabbed: bool=False, equal_button_width: bool=False, **kwargs) -> None:
borderwidth = kwargs.get('borderwidth', 1)
relief = kwargs.get('relief', 'ridge')
if relief != 'ridge' and not borderwidth:
borderwidth = 1
self.tabbed = tabbed
super().__init__(master=parent,
bg=EasyGUI.style.section_color,
padx=EasyGUI.style.frame_padx,
pady=EasyGUI.style.frame_pady,
borderwidth=borderwidth,
relief=relief)
GridMaster.__init__(self)
SectionMaster.__init__(self)
self.parent = parent
self.name = name
self.grid_area = grid_area
if tabbed:
self.tabs = ttk.Notebook(self)
self.tabs.style = self.style
self.tabs.root = self.root
self.equal_button_width = equal_button_width
if title: # title kwargs can be provided as True or a string
if isinstance(title, str): # if string, use title for label text
self.add_widget(type='label', text=title)
elif title == True: # if True, use the name as the label text
self.add_widget(type='label', text=name)
def __init_subclass__(cls, **kwargs):
'''
Wraps user subclass __init__ to implicitly handle the Section.__init__ call.
This avoids the need for subclass to use "super().__init__(*args, **kwargs)"
'''
old_init = cls.__init__ # reference to original subclass method so new_init isn't recursive
def new_init(self, *args, **kwargs):
Section.__init__(self, **kwargs) # in place of super().__init__() in subclass __init__
try:
old_init(self, *args, **kwargs)
except TypeError:
# traceback.print_exc()
# print('\n\n* Are you subclassing Section or passing in kwargs to Section creation?\n* If so, remember to put a "**kwargs" in the __init__ function!\n')
old_init(self)
cls.__init__ = new_init # overwrite subclass __init__ method
@property
def style(self):
'''Goes upsteam to evenually reference EasyGUI.style'''
return self.parent.style
@property
def root(self):
'''Goes upsteam to evenually reference EasyGUI as root'''
return self.parent.root
def create(self, force_row: bool=False):
'''
Positions this section within the parent along with
positioning all children (Sections and/or Widgets).
'''
self.position(force_row)
if self.equal_button_width:
self.match_child_button_widths()
for child in {**self.widgets, **self.sections}.values():
try:
child.create(force_row) # if child is another Section object
except AttributeError:
child.position(force_row) # if child is a Widget object
def match_child_button_widths(self):
child_buttons = [child for child in self.widgets.values() if isinstance(child, widgets.Button)]
if len(child_buttons) > 1:
max_width = int(round(max(child.width / 7.0 for child in child_buttons if not child.image)))
for child in child_buttons:
if not child.image:
child.config(width=max_width)
def position(self, force_row: bool=False) -> None:
'''
Physically position this Section within its parent container.
'''
try:
if hasattr(self.parent, 'grid_areas'):
if self.parent.grid_areas != {} and self.grid_area and not force_row:
try:
if not hasattr(self.parent, 'tabbed') or not self.parent.tabbed:
bounds = self.parent.grid_areas[self.grid_area]
self.grid(row=bounds['first_row'], column=bounds['first_column'], rowspan=bounds['last_row']-bounds['first_row']+1, columnspan=bounds['last_column']-bounds['first_column']+1, sticky='NSEW')
else:
self.pack()
if self.tabbed:
self.tabs.pack()
return # early return if everything works fine with initial attempt (no other actions needed)
except KeyError:
if self.grid_area != self.name: # basically, if user-specified grid_area (are same if programatically set grid_area)
print(f'"{self.grid_area}" not found in parent\'s grid areas.\nResorting to a new row.')
self.parent.add_grid_row(self.name)
self.grid_area = self.name
self.parent.create()
except _tkinter.TclError:
print(f'\n--- GRID FAILED for Section: "{self.name}" ---\nTry ensuring "grid_area" arg is given for all Sections in a given parent.\nAdding to a new row instead.')
self.parent.create(force_row=True) # go back and fully recreate section forcing all children to be packed/in new rows
@property
def width(self) -> float:
'''
Estimate and return width desired by this Section.
'''
return float(max(widget.width for widget in self.widgets.values()))
@property
def height(self) -> float:
'''
Estimate and return height desired by this Section.
'''
return float(sum(widget.height for widget in self.widgets.values()))
def __repr__(self) -> str:
return f'Section: "{self.name}"'
def resource_path(relative_path):
'''Get absolute path to resource to allow PyInstaller bundling.'''
try:
base_path = sys._MEIPASS # PyInstaller-created temporary folder
except:
base_path = os.path.abspath('.')
return os.path.join(base_path, relative_path)
| zachbateman/easy_gui | easy_gui/master_classes.py | master_classes.py | py | 26,902 | python | en | code | 1 | github-code | 36 |
10507977468 | # -*- coding:utf-8 -*-
# 016 ファイルをN分割する
# 自然数Nをコマンドライン引数などの手段で受け取り,入力のファイルを行単位でN分割せよ.同様の処理をsplitコマンドで実現せよ.
# 参考資料
# http://itpro.nikkeibp.co.jp/article/COLUMN/20060227/230888/
# 解答
# split -l 10 hightemp.txt out
def split_file(fileName, n):
f = open(fileName, 'r')
result = []
for line in f:
result.append(line)
countFile = 0
minLine = -1
while len(result) > minLine:
out = open('out' + str(countFile) + '.txt', 'a')
for i in range(n):
minLine += 1
if len(result) == minLine:
out.close()
return
out.write(result[minLine])
countFile += 1
out.close()
if __name__ == '__main__':
n = int(raw_input())
split_file('hightemp.txt', n)
| c-su/python_nlp_100practice | chapter2/nlp016.py | nlp016.py | py | 923 | python | ja | code | 1 | github-code | 36 |
1912079774 | import os
from time import sleep
#defining the menu
def menu():
sleep(1)
os.system('cls')
print("\nMENU")
print("1.Metric")
print("2.Imperial")
print("3.Quotes")
print("4.Quit")
#define function to calculate BMI in metric system
def metric():
weight=float(input("Please enter your weight in kg: "))
height=float(input("Please enter your height in meters: "))
BMI_metric=weight/(height*height)
print("your BMI in metric system is : " + str(BMI_metric))
if(BMI_metric>0):
if(BMI_metric<=16):
print(f"{name},you are very underweight")
elif(BMI_metric<=18.5):
print("You are underweight")
elif(BMI_metric<=25):
print(f"Congrats,{name}! You are Healthy")
elif(BMI_metric<=30):
print(f"{name},you are overweight")
else:
print(f"{name},you are very overweight/obese")
else:
print("impossible!!!!")
#define function to calculate BMI in imperial system
def imperial():
weight=int(input("Please enter your weight: "))
height=float(input("Please enter your height: "))
BMI_imperial=(weight/(height*height))*703
print(name+ ", your BMI in metric system is : " + str(BMI_imperial))
if(BMI_imperial>0):
if(BMI_imperial<=16):
print(f"{name},you are very underweight")
elif(BMI_imperial<=18.5):
print("You are underweight")
elif(BMI_imperial<=25):
print(f"Congrats,{name}! You are Healthy")
elif(BMI_imperial<=30):
print(f"{name},you are overweight")
else:
print(f"{name},you are very overweight/obese")
else:
print("impossible!!!!")
#------------------MAIN----------------------------
print("\nWelcome to BMI calculator.")
print("\nA simple way to calculate if you have to go to gym or not.")
sleep(2)
os.system('cls')
print("Please enter your name bellow: \n")
name= input()
menu()
choice=input("Your choice: ")
while True:
if choice == '1':
metric()
break
elif choice == '2':
imperial()
break
elif choice in ['q','quit','Quit','4']:
print("GoodBye " + name)
exit()
else:
print("Sorry , wrong choice, try again")
menu()
choice=input()
| archeris32/evolution | BMI calculator/bmi_calculator.py | bmi_calculator.py | py | 2,306 | python | en | code | 0 | github-code | 36 |
2014711280 | # -*- coding:utf-8 -*-
import requests
import json
import os
import time
start = time.time()
respond = requests.get('http://pvp.qq.com/web201605/js/herolist.json')
respond = respond.read()
respond = respond.encode('utf-8')[3:].decode('utf-8')
json_hero = json.loads(respond)
x = 0
hero_dir = 'D:\img\\'
if not os.path.exists(hero_dir):
os.mkdir(hero_dir)
for m in range(len(hero_dir)):
save_file_name = (hero_dir + str(json))
x = x + 1
print("正在下载....第" + str(x) + "张")
end = time.time()
time_second = end - start
print("共下载" + str(x) + "张,共耗时" + str(time_second) + "秒")
# ename = json_hero[m]['ename']
# cname = json_hero[m]['cname']
# skinName = json_hero[m]['skin_name'].split('|')
#
# skinNumber = len(skinName)
#
# for bigskin in range(1,skinNumber+1):
# urlPocture = 'http://game.gtimg.cn/images/yxzj/img201605/heroimg/hero-info/' + str(ename) + '/' + str(ename) + '-bigskin-' + str(bigskin) + '.jpg'
#
# picture = requests.get(urlPocture).content
#
# with open(hero_dir + cname +"-" + skinName[bigskin-1] + '.jpg', 'wb') as f:
# f.write(picture)
| rinuandengfeng/python_study | 所有/爬虫.py | 爬虫.py | py | 1,158 | python | en | code | 0 | github-code | 36 |
31298726473 | class Solution(object):
def searchInsert(self, nums, target):
if len(nums) == 0:
return 0
left, right = 0, len(nums)
mid = 0
while left < right:
mid = (left + right) // 2
if nums[mid] == target:
return mid
elif nums[mid] < target:
left = mid
else:
right = mid + 1
return mid
s = Solution()
print(s.searchInsert(nums = [1,3,5,6], target = 5)) | shwjdgh34/algorithms-python | leetcode/35.py | 35.py | py | 496 | python | en | code | 2 | github-code | 36 |
38746128821 | from sqlalchemy import create_engine
from config import MYSQL_ROSPARK_DB
from db.rospark_db import Base, make_engine
try:
engine = make_engine(MYSQL_ROSPARK_DB)
Base.metadata.drop_all(bind=engine)
Base.metadata.create_all(bind=engine,
tables=[Base.metadata.tables["parkings"]])
except Exception as e:
print("Error: " + str(e))
| andxeg/LeadersOfDigital_2020_parking_system | src/recreate_db.py | recreate_db.py | py | 374 | python | en | code | 1 | github-code | 36 |
74483870822 | """
Implement the functions used when the user
chooses either platform (Chess.com or Lichess)
"""
from collections import OrderedDict
def calculate_percentages(moves_dict):
"""
Calculate the winning, losing and drawing percentages
of each position in moves_dict
"""
for move in moves_dict["next_moves"]:
aux_dict = moves_dict["next_moves"][move]
aux_dict["white_percentage"] = round(aux_dict["white_wins"] /
aux_dict["count"]*100)
aux_dict["black_percentage"] = round(aux_dict["black_wins"] /
aux_dict["count"]*100)
aux_dict["draw_percentage"] = 100 - (aux_dict["white_percentage"] +
aux_dict["black_percentage"])
# Iterate recursively through all positions
calculate_percentages(aux_dict)
def order_dict(moves_dict):
"""
Order the moves dictionary by the number of times
the position was reached.
E.g. the position 1.e4 was reached in 500 games, and
the position 1.d4 was reached in 200 games, so, the first position
in moves_dict will be 1.e4 and the second will be 1.d4
"""
# Initialize ordered dictionary
new_dict = OrderedDict()
# Check if this is the first lement of the dictionary,
# which has only the "next_moves" key
if len(moves_dict) != 1:
# Copy all information into the new dictionary
new_dict["count"] = moves_dict["count"]
new_dict["white_wins"] = moves_dict["white_wins"]
new_dict["black_wins"] = moves_dict["black_wins"]
new_dict["white_percentage"] = moves_dict["white_percentage"]
new_dict["black_percentage"] = moves_dict["black_percentage"]
new_dict["draw_percentage"] = moves_dict["draw_percentage"]
new_dict["next_moves"] = OrderedDict()
# Iterate through all moves to order everything
for move in moves_dict["next_moves"]:
moves_dict["next_moves"][move] = order_dict(moves_dict["next_moves"][move])
# Initialize the variable which holds the move
# with the largest count
aux_move = {"move": "", "count": 0}
for _ in range(len(moves_dict["next_moves"])):
for move in moves_dict["next_moves"]:
# Check if the count of the current move is larger than
# the count of the aux variable
if moves_dict["next_moves"][move]["count"] > aux_move["count"]:
aux_move["move"] = move
aux_move["count"] = moves_dict["next_moves"][move]["count"]
# Copy the "move" dictionary into the "next_moves" key of the new dictionary
new_dict["next_moves"][aux_move["move"]] = moves_dict["next_moves"][aux_move["move"]].copy()
# Set the "count" of this move to 0, to get the next largest count value
moves_dict["next_moves"][aux_move["move"]]["count"] = 0
# Reset aux_move
aux_move = {"move": "", "count": 0}
# Return the ordered dictionary
return new_dict
def increment_move_in_moves_dict(moves_dict, move, result):
"""
Add the information to the moves dictonary that the 'move'",
found previously in some game, was reached again in another game.
"""
moves_dict["next_moves"][move]["count"] += 1
if result == 1:
moves_dict["next_moves"][move]["white_wins"] += 1
elif result == 0:
moves_dict["next_moves"][move]["black_wins"] += 1
def create_move_in_moves_dict(moves_dict, move, result):
"""
Add the information to the moves dictonary that a new 'move',
never found previously in a game, was reached in the current analyzed game.
"""
if result == 1:
move_info = {"count": 1, "white_wins": 1, "black_wins": 0, "next_moves": {}}
elif result == 0:
move_info = {"count": 1, "white_wins": 0, "black_wins": 1, "next_moves": {}}
else:
move_info = {"count": 1, "white_wins": 0, "black_wins": 0, "next_moves": {}}
moves_dict["next_moves"][move] = move_info
| felaube/chess-games-explorer | helpers.py | helpers.py | py | 4,149 | python | en | code | 0 | github-code | 36 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.