seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
11194223295 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Use this to execute the differential kinematics
controller in our kinecontrol paper.
'''
from __future__ import print_function
import Sofa
import math
import sys, os
import time
import logging
import datetime
import numpy as np
from utils import *
from config import *
from matplotlib import pyplot as plt
import matplotlib.gridspec as gridspec
logger = logging.getLogger(__name__)
# generate sinusoid trajectory for head
t, x = gen_sinusoid(amp=.8, freq=2, phase=30, interval=[0.1, 1, 0.01])
# https://www.sofa-framework.org/community/forum/topic/get-the-position-value-from-a-mechanicalobject-point-in-python/
def moveRestPos(rest_pos, pose):
str_out = ' '
dx, dy, dz = pose
for i in range(0,len(rest_pos)) :
str_out= str_out + ' ' + str(rest_pos[i][0]+dx)
str_out= str_out + ' ' + str(rest_pos[i][1]+dy)
str_out= str_out + ' ' + str(rest_pos[i][2]+dz)
return str_out
def rotateRestPos(rest_pos,rx,centerPosY,centerPosZ):
str_out = ' '
for i in xrange(0,len(rest_pos)) :
newRestPosY = (rest_pos[i][1] - centerPosY)*math.cos(rx) - (rest_pos[i][2] - centerPosZ)*math.sin(rx) + centerPosY
newRestPosZ = (rest_pos[i][1] - centerPosY)*math.sin(rx) + (rest_pos[i][2] - centerPosZ)*math.cos(rx) + centerPosZ
str_out= str_out + ' ' + str(rest_pos[i][0])
str_out= str_out + ' ' + str(newRestPosY)
str_out= str_out + ' ' + str(newRestPosZ)
return str_out
class controller(Sofa.PythonScriptController):
'''
For examples, see:
+ Keyboard Control:
- https://github.com/lakehanne/sofa/blob/master/examples/Tutorials/StepByStep/Dentistry_Python/keyboardControl.py
+ Parallel and SSH Launcher:
- https://github.com/lakehanne/sofa/blob/master/tools/sofa-launcher/launcher.py
+ OneParticle:
- https://github.com/lakehanne/sofa/blob/master/tools/sofa-launcher/example.py
'''
def initGraph(self, root):
self.move_dist = move_dist #(0, .40, 0)
self.growth_rate = growth_rate #.5 #was .05
self.max_pressure = max_pressure #100 # was 15
# controls if IABs should continue to be inflated in a open-loop setting
self.is_inflated = True
self.deltaTime = root.findData('dt').value
# print('deltaTime ', self.deltaTime, type(self.deltaTime))
self._fig = plt.figure()
self._gs = gridspec.GridSpec(1,1) # rows cols
self.traj_plotter = HeadTrajPlotter(self._fig, self._gs[0]) # subplot in gridspec
self.patient = root.getChild('patient')
self.patient_dofs = self.patient.getObject('patient_dofs')
pat_rest_pose = self.patient_dofs.findData('rest_position').value
self.thresholds = thresholds
self.first_iter = True
self.root = root
logger.debug('patient initial pose {}'.format(thresholds['patient_trans']))
# get base IABs
self.base_neck_left = root.getChild('base_neck_left')
self.base_neck_right = root.getChild('base_neck_right')
self.base_skull_left = root.getChild('base_skull_left')
self.base_skull_right = root.getChild('base_skull_right')
# get side IABs
self.side_fore_left = root.getChild('side_fore_left')
self.side_chin_left = root.getChild('side_chin_left')
self.side_fore_right = root.getChild('side_fore_right')
self.side_chin_right = root.getChild('side_chin_right')
# obtain associated dofs and cavity dofs
self.base_neck_left_dofs = self.get_dome_dofs(self.base_neck_left)
self.base_neck_right_dofs = self.get_dome_dofs(self.base_neck_right)
self.base_skull_left_dofs = self.get_dome_dofs(self.base_skull_left)
self.base_skull_right_dofs = self.get_dome_dofs(self.base_skull_right)
self.is_chart_updated = False
# use this to track the x, y and z positions of the patient over time
self._x, self._y, self._z = [], [], []
# visualization
display_chart(self.run_traj_plotter)
# plt.ioff()
# plt.show()
# io
self._pat_dofs_filename = patient_dofs_filename
self.max_vals = 0 # maximum positional values in the patient
# domes' mechanical states
def get_dome_dofs(self, node):
'dof name shall be in the form patient or base_neck etc'
dh_dofs = node.getObject('dh_dofs') # dome head
# dh_collis_dofs = node.getObject('dh_collis_dofs')
# cavity
cav_node = node.getChild('DomeCavity')
cav_dofs = cav_node.getObject('dome_cav_dofs')
pressure_constraint = cav_node.getObject('SurfacePressureConstraint')
# pressure_constraint_collis = node.getChild('dome_cav_collis_dofs')
# dome cover back
cover_node = node.getChild('DomeCover')
cover_dofs = cover_node.getObject('dome_cover_dofs')
# cover collis node
cover_collis_node = node.getChild('DomeCoverCollis')
cover_collis_dofs = cover_collis_node.getObject('dome_cover_collis_dofs')
return Bundle(dict(dh_dofs=dh_dofs,
cav_dofs=cav_dofs,
pressure_constraint=pressure_constraint, # cavity
cover_dofs=cover_dofs,
cover_collis_dofs=cover_collis_dofs))
def bwdInitGraph(self,node):
# find the position at the end of the shape (which has the biggest x coordinate)
# Positions = self.patient_dofs.findData('position').value
Positions = self.patient_dofs.position#.value
max_x, max_y, max_z = 0, 0, 0
max_idx_x, max_idx_y, max_idx_z = 0, 0, 0
for i in range(len(Positions)):
if Positions[i][0] > max_x:
max_idx_x = i
max_x = Positions[i][0]
if Positions[i][1] > max_y:
max_idx_y = i
max_y = Positions[i][1]
if Positions[i][2] > max_z:
max_idx_z = i
max_z = Positions[i][2]
#
max_ids = Bundle(dict(max_idx_x=max_idx_x, max_idx_y=max_idx_y, max_idx_z=max_idx_z, position=Positions))
self.max_vals = Bundle(dict(max_x=max_x, max_y=max_y, max_z=max_z))
# print('max x,y,z indices: {}, {}, {}'.format(max_idx_x, max_idx_y, max_idx_z))
print('patient positions [x,y,z] {}, {}, {}'.format(max_x, max_y, max_z))
return 0
def run_traj_plotter(self):
if self.is_chart_updated:
self.traj_plotter.update(self.data)
# time.sleep(.11)
self.is_chart_updated = False
def update_head_pose(self):
rest_pose = self.patient_dofs.findData('rest_position').value
# rest pose is a lisrt
x, y, z = [t[0] for t in rest_pose], [t[1] for t in rest_pose], [t[2] for t in rest_pose]
# use 2-norm of x, y, and z
self.data = np.linalg.norm(np.c_[x, y, z], axis=0)
self.is_chart_updated = True
def onBeginAnimationStep(self, deltaTime):
self.deltaTime += deltaTime
# repopulate each iab at each time step
self.base_neck_left = self.root.getChild('base_neck_left')
self.base_neck_right = self.root.getChild('base_neck_right')
self.base_skull_left = self.root.getChild('base_skull_left')
self.base_skull_right = self.root.getChild('base_skull_right')
# get side IABs
self.side_fore_left = self.root.getChild('side_fore_left')
self.side_chin_left = self.root.getChild('side_chin_left')
self.side_fore_right = self.root.getChild('side_fore_right')
self.side_chin_right = self.root.getChild('side_chin_right')
# obtain associated dofs and cavity dofs
self.base_neck_left_dofs = self.get_dome_dofs(self.base_neck_left)
self.base_neck_right_dofs = self.get_dome_dofs(self.base_neck_right)
self.base_skull_left_dofs = self.get_dome_dofs(self.base_skull_left)
self.base_skull_right_dofs = self.get_dome_dofs(self.base_skull_right)
# self.patient = self.root.getChild('patient')
self.patient_dofs = self.patient.getObject('patient_dofs')
if self.first_iter:
rest_pat_pose = np.array([self.max_vals.max_x, self.max_vals.max_y, self.max_vals.max_z])
self.thresholds['patient_trans'] = rest_pat_pose
self.thresholds['patient_trans'][0] += 100
self.thresholds['patient_trans'][1] += 100
self.thresholds['patient_trans'][2] += 100
self.thresholds.update(self.thresholds)
logger.debug('rest_pat_pose: {}, '.format(rest_pat_pose))
self.first_iter = False
curr_pat_pose = np.array([self.max_vals.max_x, self.max_vals.max_y, self.max_vals.max_z])
if curr_pat_pose[0]<self.thresholds['patient_trans'][0]: # not up to desired z
pose = (self.growth_rate, 0, 0)
test1 = moveRestPos(self.patient_dofs.findData('rest_position').value, pose)
self.patient_dofs.findData('rest_position').value = test1
self.patient_dofs.position = test1
self._x.append(self.max_vals.max_x)
# not up to desired z
# if curr_pat_pose[2]>=self.thresholds['patient_trans'][2] and \
# curr_pat_pose[1]<self.thresholds['patient_trans'][1]:
# logger.warning('moving along y now')
# pose = (0.0, self.growth_rate, 0.0)
# test1 = moveRestPos(self.patient_dofs.position, pose)
# # self.patient_dofs.findData('rest_position').value = test1
# self.patient_dofs.position = test1
# self._y.append(self.max_vals.max_y)
# if curr_pat_pose[2]>=self.thresholds['patient_trans'][2] and \
# curr_pat_pose[1]>=self.thresholds['patient_trans'][1] and \
# curr_pat_pose[0]<self.thresholds['patient_trans'][0]:
# logger.warning(' moving along x now')
# pose = (self.growth_rate, 0.0, 0.0)
# test1 = moveRestPos(self.patient_dofs.position, pose)
# # self.patient_dofs.findData('rest_position').value = test1
# self.patient_dofs.position = test1
# self._x.append(self.max_vals.max_x)
# pose = (0, 0, self.growth_rate)
# self._x.append(self.max_vals.max_x)
# self._y.append(self.max_vals.max_y)
# self._z.append(self.max_vals.max_z)
# save what you got and end simulation
#curr_pat_pose[2]>=self.thresholds['patient_trans'][2] and \
#curr_pat_pose[1]>=self.thresholds['patient_trans'][1] and \
if curr_pat_pose[0]>=self.thresholds['patient_trans'][0]:
stab_val= self._x[-1]
for i in range(len(self._x)*4):
self._x.append(stab_val)
with open(self._pat_dofs_filename, 'a') as foo:
arr_to_save = np.array([self._x])
np.savetxt(foo, arr_to_save, delimiter=' ', fmt='%1.4e')
# with open(self._pat_dofs_filename+'_ref.txt', 'a') as foo:
# np.savetxt(foo, self.thresholds['patient_trans'], delimiter=' ', fmt='%1.4e')
self.root.getRootContext().animate = False
# os._exit()
return 0;
def onEndAnimationStep(self, deltaTime):
sys.stdout.flush()
#access the 'position' state vector
pat_poses = self.patient_dofs.findData('position').value
self.bwdInitGraph(self.root)
return 0;
def onLoaded(self, node):
return 0;
def reset(self):
## Please feel free to add an example for a simple usage in /home/lex/catkin_ws/src/superchicko/sofa/python/xml_2_scn.py
return 0;
def onMouseButtonMiddle(self, mouseX,mouseY,isPressed):
# usage e.g.
if isPressed :
print("Control+Middle mouse button pressed at position "+str(mouseX)+", "+str(mouseY))
return 0;
def onScriptEvent(self, senderNode, eventName,data):
## Please feel free to add an example for a simple usage in /home/lex/catkin_ws/src/superchicko/sofa/python/xml_2_scn.py
return 0;
def onMouseButtonRight(self, mouseX,mouseY,isPressed):
## usage e.g.
if isPressed :
print("Control+Right mouse button pressed at position "+str(mouseX)+", "+str(mouseY))
return 0;
def onMouseButtonLeft(self, mouseX,mouseY,isPressed):
## usage e.g.
if isPressed :
print("Control+Left mouse button pressed at position "+str(mouseX)+", "+str(mouseY))
return 0;
| robotsorcerer/superchicko | sofa/python/kinecontrol/diff_kine_controller.py | diff_kine_controller.py | py | 11,135 | python | en | code | 0 | github-code | 36 |
43221995512 | # 1
from selenium import webdriver
from math import log, sin
browser = webdriver.Chrome()
# Открыть страницу http://suninjuly.github.io/get_attribute.html
browser.get('http://suninjuly.github.io/get_attribute.html')
# Найти на ней элемент-картинку/ Взять у этого элемента значение атрибута valuex
valuex = browser.find_element_by_css_selector('[id = "treasure"]').get_attribute('valuex')
# Посчитать математическую функцию от x, Ввести ответ в текстовое поле.
browser.find_element_by_id('answer').send_keys(str(log(abs(12 * sin(int(valuex))))))
# Отметить checkbox "Подтверждаю, что являюсь роботом". Выбрать radiobutton "Роботы рулят!". Нажать на кнопку Отправить.
for selector in ['#robotCheckbox', '#robotsRule', '.btn.btn-default']:
browser.find_element_by_css_selector(selector).click()
# 2
import math
import time
from selenium import webdriver
browser = webdriver.Chrome()
try:
browser.get("http://suninjuly.github.io/get_attribute.html")
x = browser.find_element_by_id('treasure').get_attribute("valuex")
y = str(math.log(abs(12*math.sin(int(x)))))
browser.find_element_by_id('answer').send_keys(y)
browser.find_element_by_id('robotCheckbox').click()
browser.find_element_by_id('robotsRule').click()
browser.find_element_by_css_selector("button.btn").click()
finally:
time.sleep(5)
browser.quit()
# 3
from selenium import webdriver
import math
link = 'http://suninjuly.github.io/get_attribute.html'
def calc(x):
'''
Функция возращает результат формулы
:param x:
:return:
'''
return str(math.log(abs(12 * math.sin(int(x)))))
driver = webdriver.Chrome()
driver.get(link)
# Находим значение аттрибута valuex элемента "сундук"
x = driver.find_element_by_css_selector('#treasure').get_attribute('valuex')
y = calc(x)
# Передаем в поле ввода результат вычисления функции
driver.find_element_by_css_selector('#answer').send_keys(y)
# Кликаем чекбокс "Подтверждаю, что являюсь роботом"
driver.find_element_by_css_selector('#robotCheckbox').click()
# Кликаем radio "Роботы рулят"
driver.find_element_by_css_selector('#robotsRule').click()
# Нажимаем кнопку "Отправить"
driver.find_element_by_css_selector('button.btn').click() | Rzktype/StepikPythonCourses | Module 2/lesson2-1_step7_anotherDecisions.py | lesson2-1_step7_anotherDecisions.py | py | 2,614 | python | ru | code | 0 | github-code | 36 |
25446749830 | """
Run correlation analysis asking if lucidity during the dream task influenced reported wakeup time.
"""
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
import pingouin as pg
import utils
################################################################################
# SETUP
################################################################################
wakeup_col = "Wakeup"
lucidity_col = "Task_lucid"
wakeup_label = "Time between task and awakening"
lucidity_label = "Lucidity while performing the task"
# Load custom plotting settings.
utils.load_matplotlib_settings()
# Choose filepaths.
config = utils.load_config()
root_dir = Path(config["root_directory"])
export_path_plot = root_dir / "derivatives" / "wakeup_lucidity-plot.png"
export_path_desc = root_dir / "derivatives" / "wakeup_lucidity-desc.tsv"
export_path_stat = root_dir / "derivatives" / "wakeup_lucidity-stat.tsv"
# Load data.
df, meta = utils.load_raw(trim=True)
# Reduce to only wakeup task conditions.
df = df.query("Condition != 'Clench'")
# Ensure values are floats.
df[wakeup_col] = df[wakeup_col].astype(float)
df[lucidity_col] = df[lucidity_col].astype(float)
################################################################################
# STATISTICS
################################################################################
# Get descriptives.
desc = df[[wakeup_col, lucidity_col]].describe().T.rename_axis("variable")
# Run correlation.
x = df[lucidity_col].to_numpy()
y = df[wakeup_col].to_numpy()
stat = pg.corr(x, y, method="kendall")
################################################################################
# PLOTTING
################################################################################
# Get regression line predictor.
coef = np.polyfit(x, y, 1)
poly1d_func = np.poly1d(coef)
# Grab ticks and labels from the sidecar file.
xticks, xticklabels = zip(*meta[wakeup_col]["Levels"].items())
xticks = list(map(int, xticks))
yticks, yticklabels = zip(*meta[lucidity_col]["Levels"].items())
yticks = list(map(int, yticks))
# Open figure.
fig, ax = plt.subplots(figsize=(2.4, 2.4))
# Draw dots and regression line.
ax.plot(x, y, "ko", ms=5, alpha=0.2)
ax.plot(x, poly1d_func(x), "-k")
# Aesthetics.
ax.set_xticks(xticks)
ax.set_yticks(yticks)
ax.set_xlabel(lucidity_label)
ax.set_ylabel(wakeup_label)
ax.grid(True, axis="both")
ax.set_aspect("equal")
ax.margins(0.1)
ax.tick_params(direction="out", axis="both", which="both", top=False, right=False)
################################################################################
# EXPORT
################################################################################
desc.to_csv(export_path_desc, na_rep="n/a", sep="\t")
stat.to_csv(export_path_stat, index_label="method", na_rep="n/a", sep="\t")
plt.savefig(export_path_plot)
plt.savefig(export_path_plot.with_suffix(".pdf"))
plt.savefig(export_path_plot.with_suffix(".svg"))
| remrama/wakeup | wakeup_lucidity.py | wakeup_lucidity.py | py | 2,944 | python | en | code | 0 | github-code | 36 |
22767714265 | import sys
import random
import numpy as np
import networkx as nx
import matplotlib.pyplot as plt
##################################################################################################################################
##################################################################################################################################
### CÁLCULO DE DISTANCIAS ENTRE DOS ELEMENTOS DE UNA REJILLA CUADRADA BIDIMENSIONAL DOTADA DE CONDICIONTES DE FRONTERA PERIÓDICAS
def calc_dist(loc1, loc2, longitud):
distx = loc1[0] - loc2[0]
disty = loc1[1] - loc2[1]
dist1 = (distx ** 2 + disty ** 2) ** (1 / 2)
dist2 = ((distx - longitud) ** 2 + disty ** 2) ** (1 / 2)
dist3 = ((distx + longitud) ** 2 + disty ** 2) ** (1 / 2)
dist4 = (distx ** 2 + (disty - longitud) ** 2) ** (1 / 2)
dist5 = (distx ** 2 + (disty + longitud) ** 2) ** (1 / 2)
dist6 = ((distx + longitud) ** 2 + (disty + longitud) ** 2) ** (1 / 2)
dist7 = ((distx - longitud) ** 2 + (disty + longitud) ** 2) ** (1 / 2)
dist8 = ((distx - longitud) ** 2 + (disty - longitud) ** 2) ** (1 / 2)
dist9 = ((distx + longitud) ** 2 + (disty - longitud) ** 2) ** (1 / 2)
lista_distancias = [dist1, dist2, dist3, dist4, dist5, dist6, dist7, dist8, dist9]
distancia = min(lista_distancias)
return distancia
### INICIACIÓN DE GRÁFICA PARA SISTEMA CML
def ini_graf(longitud):
grafica = nx.Graph()
# Definición de nodos
num_sitios = longitud ** 2
for sitio in range(num_sitios):
grafica.add_node(sitio, u = random.uniform(-1, 1))
# Definición de bordes
lista_edges = []
for sitio1 in range(num_sitios):
for sitio2 in range(num_sitios):
if sitio2 == sitio1:
continue
coord1 = (sitio1 % longitud, sitio1 // longitud)
coord2 = (sitio2 % longitud, sitio2 // longitud)
distancia = calc_dist(coord1, coord2, longitud)
lista_edges.append((sitio1, sitio2, {'path_distance': distancia}))
grafica.add_edges_from(lista_edges)
return grafica
### CÁLCULO DE PRIMEROS VECINOS DE UN NODO EN UNA GRÁFICA CON BORDES DOTADOS DE ATRIBUTOS EQUIVALENTES A LAS DISTANCIAS ENTRE LOS SITIOS QUE LOS DEFINEN
def primeros_vecinos(grafica, nodo):
# Lista de posibles distancias
lista_dist = []
for vec1, datos1 in grafica.adj[nodo].items():
for keys1, dists1 in datos1.items():
lista_dist.append(dists1)
min_dist = min(lista_dist)
# Lista de sitios a distancia mínima (primeros vecinos)
lista_1v = []
for vec2, datos2 in grafica.adj[nodo].items():
for keys2, dists2 in datos2.items():
if dists2 == min_dist:
lista_1v.append(vec2)
return lista_1v
### MAPEO PHI
def phi(valor_t):
if -1 <= valor_t < -1 / 3.:
valor_tt = (-3 * valor_t) - 2
elif -1 / 3. <= valor_t < 1 / 3.:
valor_tt = 3 * valor_t
elif 1 / 3. <= valor_t <= 1:
valor_tt = (-3 * valor_t) + 2
return valor_tt
### EVOLUCIÓN TEMPORAL DE LA GRÁFICA
def ev_temp(num_iter, trans, x0, g_acople, grafica, lista_1vecinos, guardar):
print('INICIO DE EVOLUCION TEMPORAL')
lista_sitios = list(grafica.nodes())
tenth_progress = int(num_iter / 10)
# Guardar evolución de 'u'
if guardar == True:
arr_promtemp = np.zeros((num_iter - trans))
for iteracion1 in range(num_iter):
if iteracion1 % tenth_progress == 0:
print('*')
# Gráfica auxiliar con valores de 'u' de siguiente iteración
grafica_holder1 = nx.Graph()
for sitio1a in lista_sitios:
grafica_holder1.add_node(sitio1a, u = 0)
sum1vec1 = 0
for vecino1 in lista_1vecinos[sitio1a][1]:
dif_u1 = phi(grafica.nodes[vecino1]['u']) - phi(grafica.nodes[sitio1a]['u'])
sum1vec1 = sum1vec1 + dif_u1
grafica_holder1.nodes[sitio1a]['u'] = phi(grafica.nodes[sitio1a]['u']) + g_acople * sum1vec1
# Actualización de gráfica "original"
for sitio1b in lista_sitios:
grafica.nodes[sitio1b]['u'] = grafica_holder1.nodes[sitio1b]['u']
if iteracion1 <= trans - 1:
continue
arr_promtemp[iteracion1 - trans] = grafica.nodes[x0]['u']
print('FIN DE EVOLUCION TEMPORAL')
return grafica, arr_promtemp
# Sin guardar evolución de 'u'
else:
for iteracion2 in range(num_iter):
if iteracion2 % tenth_progress == 0:
print('*')
# Gráfica auxiliar
grafica_holder2 = nx.Graph()
for sitio2a in lista_sitios:
grafica_holder2.add_node(sitio2a, u = 0)
sum1vec2 = 0
for vecino2 in lista_1vecinos[sitio2a][1]:
dif_u2 = phi(grafica.nodes[vecino2]['u']) - phi(grafica.nodes[sitio2a]['u'])
sum1vec2 = sum1vec2 + dif_u2
grafica_holder2.nodes[sitio2a]['u'] = phi(grafica.nodes[sitio2a]['u']) + g_acople * sum1vec2
# Actualización de gráfica
for sitio2b in lista_sitios:
grafica.nodes[sitio2b]['u'] = grafica_holder2.nodes[sitio2b]['u']
print('FIN DE EVOLUCION TEMPORAL')
return grafica
##################################################################################################################################
##################################################################################################################################
### DEFINICIÓN DE PARÁMETROS DE SIMULACIÓN
# Selección aleatoria de semilla
s = random.randrange(sys.maxsize)
# Definición de otros parámetros
L = int(input('Ingresa la longitud de la rejilla CML (entero): '))
g = float(input('Ingresa la constante de acoplamiento (real positivo con tres decimales): '))
N_iter = int(input('Ingresa el total de iteraciones (entero): '))
transient = int(input('Ingresa el valor de transient (entero): '))
site_x0 = int(random.randrange(0,int(L**2),1))
N_ensembles = int(input('Ingresa el total de sistemas que conforman el ensamble (entero): '))
### GENERACIÓN DE GRÁFICA Y LISTA CON PRIMEROS VECINOS
# Iniciación de generador de números aleatorios
random.seed(s)
# Definición de gráfica y lista con primeros vecinos
lattice = ini_graf(L)
list_1neighbors = []
for site in range(L**2):
list1v = primeros_vecinos(lattice, site)
list_1neighbors.append((site, list1v))
print(lattice.nodes[15]['u'])
print(type(lattice.nodes[15]['u']))
### DISTRIBUCIÓN DE VARIABLE 'U' EN UN SITIO PARTICULAR TRAS MÚLTIPLES ITERACIONES, CONSIDERANDO UN SISTEMA
safe_timeavg = True
lattice, arr_timeavg = ev_temp(N_iter, transient, site_x0, g, lattice, list_1neighbors, safe_timeavg)
fname_timeavg = 'tesis_Egolf_ergodicidad_L%(length)i_g%(coupling).3f_Niter%(iterations).3e_trans%(trans).3e_seed%(seed)i_site%(site)i_timeavg.txt'
dict_fname_timeavg = {'length': L, 'coupling': g, 'iterations': N_iter, 'trans': transient, 'site': site_x0, 'seed': s}
np.savetxt(fname_timeavg % dict_fname_timeavg, arr_timeavg)
print('Evolución de sistema completada')
# Distribución de variable 'u' en un sitio particular tras múltiples iteraciones, considerando un ensamble
safe_ensembleavg = True
arr_ensembleavg = np.zeros((N_ensembles, (N_iter - transient)))
for sys in range(N_ensembles):
lattice = ini_graf(L)
lattice, arr_ensemble_holder = ev_temp(N_iter, transient, site_x0, g, lattice, list_1neighbors, safe_ensembleavg)
arr_ensembleavg[sys] = arr_ensemble_holder
arr_ensembleavg = arr_ensembleavg.flatten()
fname_ensavg = 'tesis_Egolf_ergodicidad_L%(length)i_g%(coupling).3f_Niter%(iterations).3e_trans%(trans).3e_seed%(seed)i_site%(site)i_Nens%(ens)i_ensavg.txt'
dict_fname_ensavg = {'length': L, 'coupling': g, 'iterations': N_iter, 'trans': transient, 'site': site_x0, 'ens': N_ensembles, 'seed': s}
np.savetxt(fname_ensavg % dict_fname_ensavg, arr_ensembleavg)
print('Evolución de ensamble completada')
# Iniciación de rejilla CML para prueba de self-averaging
L2 = int(2*L)
lattice2 = ini_graf(L2)
list_1neighbors2 = []
for site2 in range(L2**2):
list1v2 = primeros_vecinos(lattice2, site2)
list_1neighbors2.append((site2, list1v2))
# Distribución de variable 'u' en un sistema a un tiempo fijo
safe_selfavg = False
lattice2 = ev_temp(N_iter, transient, site_x0, g, lattice2, list_1neighbors2, safe_selfavg)
arr_selfavg = np.zeros(L2**2)
for site in range(L2**2):
arr_selfavg[site] = lattice2.nodes[site]['u']
fname_selfavg = 'tesis_Egolf_ergodicidad_L%(length)i_g%(coupling).3f_Niter%(iterations).3e_trans%(trans).3e_seed%(seed)i_selfavg.txt'
dict_fname_selfavg = {'length': L, 'coupling': g, 'iterations': N_iter, 'trans': transient, 'seed': s}
np.savetxt(fname_selfavg % dict_fname_selfavg, arr_selfavg)
print('Prueba de self-averaging completada')
# Gráfica con resultados de ergodicidad y self-averaging
plt.figure(1)
fig1, (ax1A, ax1B, ax1C) = plt.subplots(nrows = 1, ncols = 3, figsize = (30,10))
plt.tight_layout(pad=4, h_pad=4, w_pad=6)
hist_time, bins_time = np.histogram(arr_timeavg, range = (-1, 1))
hist_ens, bins_ens = np.histogram(arr_ensembleavg, range = (-1, 1))
hist_self, bins_self = np.histogram(arr_selfavg, range = (-1,1))
ax1A.hist(bins_time[:-1], bins_time, weights = hist_time, density = True)
ax1A.set_title('Distribución de ' + r'$u_{\vec{x}_{0}}^{t}$' + ' con un sistema de %(lo)i x %(lo)i sitios ' % {'lo': L} + r'$(\vec{x}_{0} = %(siteref)i) $' % {'siteref': site_x0}, size=16)
ax1A.set_ylabel('dP(u)', size=15)
ax1A.set_xlabel('u', size=15)
for tick in ax1A.xaxis.get_major_ticks():
tick.label.set_fontsize(14)
for tick in ax1A.yaxis.get_major_ticks():
tick.label.set_fontsize(14)
ax1B.hist(bins_ens[:-1], bins_ens, weights = hist_ens, density = True)
ax1B.set_title('Distribución de ' + r'$u_{\vec{x}_{0}}^{t}$' + ' con un ensamble de %(Nens)i sistemas de %(lo)i x %(lo)i sitios' % {'Nens': N_ensembles, 'lo': L}, size=16)
ax1B.set_ylabel('dP(u)', size=15)
ax1B.set_xlabel('u', size=15)
for tick in ax1B.xaxis.get_major_ticks():
tick.label.set_fontsize(14)
for tick in ax1B.yaxis.get_major_ticks():
tick.label.set_fontsize(14)
ax1C.hist(bins_self[:-1], bins_self, weights = hist_self, density = True)
ax1C.set_title('Distribución de ' + r'$u_{\vec{x}}^{t_{0}}$' + ' sobre un sistema de %(lo)i x %(lo)i sitios ' % {'lo': L2} + r'$(t_{0} = %(tiempo).2e) $' % {'tiempo': N_iter}, size=16)
ax1C.set_ylabel('dP(u)', size=15)
ax1C.set_xlabel('u', size=15)
for tick in ax1C.xaxis.get_major_ticks():
tick.label.set_fontsize(14)
for tick in ax1C.yaxis.get_major_ticks():
tick.label.set_fontsize(14)
imgname = 'tesis_Egolf_ergodicidad_L%(length)i_g%(coupling).3f_Niter%(iterations).3e_trans%(trans).3e_seed%(seed)i_site%(site)i_Nens%(ens)i_Graph.png'
dict_imgname = {'length': L, 'coupling': g, 'iterations': N_iter, 'trans': transient, 'site': site_x0, 'ens': N_ensembles, 'seed': s}
plt.savefig(imgname % dict_imgname)
print('Programa concluido')
| maal22/Tesis_Licenciatura | tesis_Egolf_ergodicidad4_2.py | tesis_Egolf_ergodicidad4_2.py | py | 10,416 | python | es | code | 0 | github-code | 36 |
26626836853 | import astropy.units as u
from astropy.coordinates.sky_coordinate import SkyCoord
from astropy.units import Quantity
from astropy.table import Table
from scipy import stats
from matplotlib.colors import LogNorm
import matplotlib.pyplot as plt
import astropy.coordinates as asc
import numpy as np
import random
import csv
import os
contour = np.genfromtxt("Data/OB-Katz-contour-0.1.dat", names = True, dtype=None)
contour_id = contour['source_id']
readresults = Table.read("Data/OB-Katz.fits",format='fits')
results = np.array(readresults)
matches = np.array([])
j=0
for i in range(len(contour_id)):
not_found = True
while not_found:
if contour_id[i]==results['source_id'][j]:
matches = np.append(matches,j)
not_found = False
else:
j+=1
newresults = np.append(results[int(matches[0])],results[int(matches[1])])
k = 2
while k <= len(matches)-1:
newresults = np.append(newresults,results[int(matches[k])])
k+=1
results = newresults
distances = 1000/results['parallax']
#Convert coordinates to galactic and then to cartesian.
coordinates_ICRS = asc.SkyCoord(ra=results['ra']*u.degree, dec=results['dec']*u.degree, distance=distances*u.pc, pm_ra_cosdec=results['pmra']*u.mas/u.yr, pm_dec=results['pmdec']*u.mas/u.yr, frame='icrs', obstime='J2015.5')
#coordinates_ICRS = asc.ICRS(ra=results['ra']*u.degree, dec=results['dec']*u.degree, distance=distances*u.pc, pm_ra_cosdec=results['pmra']*u.mas/u.yr, pm_dec=results['pmdec']*u.mas/u.yr)
coordinates_galactic = coordinates_ICRS.galactic
#coordinates_galactic = asc.SkyCoord(l=results['l']*u.degree, b=results['b']*u.degree, distance=distances*u.pc, pm_ra_cosdec=results['pmra']*u.mas/u.yr, pm_dec=results['pmdec']*u.mas/u.yr, radial_velocity=results['radial_velocity']*u.km/u.s, frame='galactic', obstime='J2015.5')
#coordinates_galactic = asc.SkyCoord(l=results['l']*u.degree, b=results['b']*u.degree, distance=distances*u.pc, frame='galactic', obstime='J2015.5')
coordinates_cartesian = np.column_stack((coordinates_galactic.cartesian.x.value, coordinates_galactic.cartesian.y.value, coordinates_galactic.cartesian.z.value))
x = coordinates_cartesian[:,0]#.filled(0)
y = coordinates_cartesian[:,1]#.filled(0)
z = coordinates_cartesian[:,2]#.filled(0)
#counts,xbins,ybins,image = plt.hist2d(distances*(4.74 * 10**-3)*results['pmra'],distances*(4.74 * 10**-3)*results['pmdec'],bins=60,normed=True,norm=LogNorm(), cmap = 'Blues')
#counts,xbins,ybins,image = plt.hist2d(results['pmra'],results['pmdec'],bins=40,normed=True,norm=LogNorm(),cmap = 'Blues')
hb = plt.hexbin(distances*(4.74 * 10**-3)*results['pmra'], distances*(4.74 * 10**-3)*results['pmdec'], extent=(-50,50,-50,50), gridsize=80, bins='log', cmap = 'Blues')
#hb = plt.hexbin(results['pmra'], results['pmdec'], gridsize=80, extent=(-50,50,-50,50), bins='log', cmap = 'Blues')
plt.colorbar()
#plt.contour(counts.transpose(), extent=[xbins.min(),xbins.max(),ybins.min(),ybins.max()], colors='k', linewidth=0.01), levels = [0.001])
#plt.text(-0.1, 10.0, 'Gaia DR1')
plt.xlim(-50,50)
plt.ylim(-50,50)
plt.xlabel(r'$V_{Tra} \ (km/s)$')
plt.ylabel(r'$V_{Tdec} \ (km/s)$')
#plt.xlabel(r'$\mu_{ra} \ (mas/yr)$')
#plt.ylabel(r'$\mu_{dec} \ (mas/yr)$')
#plt.savefig('Proper-Motion-Katz-contour-0.1.png')
plt.savefig('Tangential-Velocites-Katz-Contour-0.1.png')
| spacer730/Gaia_research | Overdensities-Propermotion-Graph.py | Overdensities-Propermotion-Graph.py | py | 3,287 | python | en | code | 0 | github-code | 36 |
33210794674 | from enum import Enum
from typing import List
import numpy as np
import torch
from nemo import logging
from nemo.backends.pytorch.nm import DataLayerNM
from nemo.core.neural_types import *
__all__ = ['MultiDataLayer', 'DataCombination']
class DataCombination(Enum):
CROSSPRODUCT = 1
ZIP = 2
class MultiDataLayer(DataLayerNM):
def __init__(
self,
data_layers: List[DataLayerNM],
batch_size: int,
shuffle: bool = False,
combination_mode: DataCombination = DataCombination.CROSSPRODUCT,
port_names: List[str] = None,
):
"""
data_layers: (list) of DataLayerNM objects
batch_size: (int) batchsize when the underlying dataset is loaded
combination_mode: (DataCombination) defines how to combine the datasets.
shuffle: (bool) whether underlying multi dataset should be shuffled in each epoch
port_names: List(str) user can override all port names if specified
"""
super().__init__()
self._data_layers = data_layers
self._batch_size = batch_size
self._shuffle = shuffle
self._combination_mode = combination_mode
self._port_names = port_names
self._dataset = MultiDataset(
datasets=[dl.dataset for dl in self._data_layers], combination_mode=combination_mode
)
self._ports = dict()
if self._port_names:
i = 0
for dl in self._data_layers:
for _, port_type in dl.output_ports.items():
self._ports[self._port_names[i]] = port_type
i += 1
else:
for dl_idx, dl in enumerate(self._data_layers):
for port_name, port_type in dl.output_ports.items():
if port_name in self._ports:
logging.warning(f"name collision {port_name}, will rename")
self._ports[f"{port_name}_{dl_idx}"] = port_type
else:
self._ports[port_name] = port_type
@property
def output_ports(self):
"""Return: dict
Returns union of all individual data_layer output ports
In case of name collision, resolve by renaming
"""
return self._ports
def __len__(self):
return len(self._dataset)
@property
def dataset(self):
return self._dataset
@property
def data_iterator(self):
return None
class MultiDataset(torch.utils.data.Dataset):
def __init__(
self,
datasets: List[torch.utils.data.Dataset],
combination_mode: DataCombination = DataCombination.CROSSPRODUCT,
):
"""
Datasets: list of torch.utils.data.Dataset objects.
combination_mode: DataCombination, defines how to combine the datasets, Options are [DataCombination.CROSSPRODUCT, DataCombination.ZIP].
"""
self.datasets = datasets
self.combination_mode = combination_mode
if self.combination_mode == DataCombination.CROSSPRODUCT:
self.len = np.prod([len(d) for d in self.datasets])
elif self.combination_mode == DataCombination.ZIP:
ds_lens = [len(d) for d in self.datasets]
self.len = np.min(ds_lens)
if len(set(ds_lens)) != 1:
raise ValueError("datasets do not have equal lengths.")
else:
raise ValueError("combination_mode unknown")
def __getitem__(self, i):
"""
Returns list [x1, x2, ...xn] where x1 \in D1, x2 \in D2, ..., xn \in Dn
"""
return [x for d in self.datasets for x in d[i % len(d)]]
def __len__(self):
"""
Returns length of this dataset (int).
In case of DataCombination.CROSSPRODUCT this would be prod(len(d) for d in self.datasets).
In case of DataCombination.ZIP this would be min(len(d) for d in self.datasets) given that all datasets have same length.
"""
return self.len
| cppxaxa/ICAN.ShapeShifter | ICAN.ShapeShifter.Worker/nemo/backends/pytorch/common/multi_data.py | multi_data.py | py | 4,014 | python | en | code | 0 | github-code | 36 |
12954890166 | #!/usr/bin/env python
import os
import re
import sys
import json
import random
import argparse
from checks import cors
from checks import cookie
from core.requester import requester
from core.colors import red, green, white, info, bad, end
parser = argparse.ArgumentParser()
parser.add_argument('-u', '--url', help='url', dest='url')
parser.add_argument('--json', help='json output', dest='jsonOutput', action='store_true')
args = parser.parse_args()
def banner():
newText = ''
text = '''\n\t{ meta v0.1-beta }\n'''
for char in text:
if char != ' ':
newText += (random.choice([green, white]) + char + end)
else:
newText += char
print (newText)
with open(sys.path[0] + '/db/headers.json') as file:
database = json.load(file)
def information(headers):
result = {}
for header, value in headers.items():
if header in database.keys():
result[header] = database[header]['description']
return result
def missing(headers):
result = {}
for header in database:
if database[header]['security'] == 'yes':
if header not in headers:
result[header] = database[header]['description']
return result
def misconfiguration(headers):
result = {}
if 'Access-Control-Allow-Origin' in headers:
result['Access-Control-Allow-Origin'] = cors.check(args.url)
if 'Set-Cookie' in headers:
result['Set-Cookie'] = cookie.check(headers['Set-Cookie'])
elif 'Cookie' in headers:
result['Cookie'] = cookie.check(headers['Cookie'])
return result
headers = {}
if args.url:
headers = requester(args.url).headers
else:
banner()
print ('%s No data to act upon.' % bad)
quit()
if not args.jsonOutput:
banner()
if headers:
headerInformation = information(headers)
missingHeaders = missing(headers)
misconfiguration = misconfiguration(headers)
if args.jsonOutput:
jsoned = {}
jsoned['information'] = headerInformation
jsoned['missing'] = missingHeaders
jsoned['misconfigurations'] = misconfiguration
sys.stdout.write(json.dumps(jsoned, indent=4))
else:
if headerInformation:
print ('%s Header information\n' % info)
print (json.dumps(headerInformation, indent=4))
if missingHeaders:
print ('\n%s Missing Headers\n' % bad)
print (json.dumps(missingHeaders, indent=4))
if missingHeaders:
print ('\n%s Mis-configurations\n' % bad)
print (json.dumps(misconfiguration, indent=4))
| s0md3v/meta | meta.py | meta.py | py | 2,604 | python | en | code | 37 | github-code | 36 |
18132978929 | import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import numpy as np
import re
import matplotlib.pyplot as plt
import keras
from tensorflow import keras
from keras.layers import Dense, SimpleRNN, Input, Embedding
from keras.models import Sequential
from keras.optimizers import Adam
from keras.preprocessing.text import Tokenizer, text_to_word_sequence
from keras.utils import to_categorical
class WordsPredict:
'''RNN для прогнозирования следующего слова в тексте.
Для обучающей выборки мы из текста будем выделять слова целиком,
а набор уникальных слов будет составлять наш словарь, размер которого
будет определяться переменной max_words_count_in_dict, затем каждое слово будет
кодироваться OneHot-вектором в соответствии с его номером в словаре,
переменная inp_words будет содержать количество слов на основе которых будет
строиться прогноз следующего слова'''
def load_text(self):
'''Метод для загрузки текста из ПК'''
with open('/home/andrey/Machine_Learning/ML_practice/datasets/text_for_rnn/texsts_samples.txt',
'r', encoding='utf-8') as my_text:
text = my_text.read()
text = text.replace('\ufeff', '') # убираем первый невидимый символ
return text
def prepearing_data(self):
'''Метод разбивки текста на отдельные слова с помощью Tokenizer,
указывая, что у нас будет 20000 наиболее часто встречающихся слов в тексте, а остальные
будут просто отброены сетью и она наних не будет обучаться,
парметр filters удаляет все лишние символы из нашего текста,
lower переврдит текст в нижний регистр, split - мы будем разбивать слова по пробелу,
char_level=False потому что будем разбивать текст по словам, а не по символам.'''
# указываем сколько максимум слов может быть у нас в словаре:
max_words_count_in_dict = 2000
# создаем токенайзер
tokenizer = Tokenizer(num_words=max_words_count_in_dict,
filters='!-"-#$%amp;()*+,-./:;<=>?@[\\]^_`{|}~\t\n\r',
lower=True, split=' ', char_level=False)
# далее пропускаем наш текст через токенайзер (текст берем из метода load_data),
# чтобы придать каждому слову свое число
tokenizer.fit_on_texts(self.load_text())
# просмотр того, что у нас получается (для примера):
# my_dict = list(tokenizer.word_counts.items())
# print(my_dict[:10])
# далее мы преобразовываем текст в последовательность чисел в соответствии
# с полученным словарем, т.е. мы берем каждое отдельное слова в тексте
# и на место этого слова ставим то число(индекс), которое соответствует этому слову:
data = tokenizer.texts_to_sequences([self.load_text()])
# далее преобразуем эту последовательность в OneHotEncoding-векторы (0 и 1):
# result = to_categorical(data[0], num_classes=max_words_count_in_dict)
result=np.array(data[0])
# далее на основе коллекции result мы формируем 3-мерный тензор,
# который у нас должен быть в обучающей выборке
# Мы будем брать первые 3 слова и далее прогнозировать следующее слово,
# потом мы смещаемся на 1 элемент вперед и повторяем операцию.
input_words = 3
n = result.shape[0] - input_words # т.к. мы прогнозируем по трем словам четвертое
# создаем тренировочную выборку:
train = np.array([result[i:i + input_words] for i in range(n)])
# строим целевую выборку:
target = to_categorical(result[input_words:], num_classes=max_words_count_in_dict)
return train, target, input_words, n, max_words_count_in_dict, tokenizer
def __init__(self):
train, target, input_words, n, max_words_count_in_dict, tokenizer = self.prepearing_data()
self.model = keras.Sequential([
Embedding(max_words_count_in_dict,512,input_length=input_words),
SimpleRNN(256, activation='tanh'),
Dense(max_words_count_in_dict, activation='softmax')
])
self.model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
self.history=self.model.fit(train, target, batch_size=32, epochs=100, validation_split=0.2)
def CreateText(self, text, len_text=10):
'''Метод для составления текста'''
# текст пользователя, который он вводит
result = text
tokenizer = self.prepearing_data()[5]
input_words = self.prepearing_data()[2]
max_words_in_dict = self.prepearing_data()[4]
# преобразовываем слова в последовательность чисел в тексте
data = tokenizer.texts_to_sequences([text])[0]
for i in range(len_text):
# формируем слова на основе которых делаем прогноз следующего слова:
# преобразовываем коллекцию data в векторы OneHotEncoding с 0 и 1:
# OHE_vectors = to_categorical(data[i:i + input_words], num_classes=max_words_in_dict)
# # создаем формат коллекции, которая подходит для подачи на RNN
# collection = OHE_vectors.reshape(1, input_words, max_words_in_dict)
#на вход НС будем подавать тензор из цифр
digits = data[i:i + input_words]
collection = np.expand_dims(digits, axis=0)
# затем пропускаем эту коллекцию слов через нашу обученную модель:
predict_words = self.model.predict(collection)
# выбираем индекс с максимальным значением из коллекции слов predict_words:
get_index = predict_words.argmax(axis=1)[0]
# добавляем это слово в последовательность слов в тексте data
data.append(get_index)
# далее преобразовываем индекс обратно в слово и добавляем к тексту пользователя:
result += ' ' + tokenizer.index_word[get_index]
return result
def MakePhrase(self,user_text,tex_length=10):
result=user_text
tokenizer = self.prepearing_data()[5]
input_words = self.prepearing_data()[2]
max_words_in_dict = self.prepearing_data()[4]
data=tokenizer.texts_to_sequences([user_text])[0]
for i in range(tex_length):
# x=to_categorical(data[i:i+input_words],num_classes=max_words_in_dict)
# inp=x.reshape(1,input_words,max_words_in_dict)
#создаем список уже из индексов, а не из OHE-векторов
digs_list=data[i:i+input_words]
#добавляем ось
input_collection=np.expand_dims(digs_list, axis=0)
#делаем предсказание по обученной модели
prediction=self.model.predict(input_collection)
#ыбираем максимальное значение
index=prediction.argmax(axis=1)[0]
#добавляем в текст пользователя
data.append(index)
result+=' '+tokenizer.index_word[index]
return result
def show_acc_loss_during_learn_graphics(self):
'''Выведем графики точности и потерь при обучении RNN'''
acc = self.history.history['accuracy']
val_acc = self.history.history['val_accuracy']
loss = self.history.history['loss']
val_loss = self.history.history['val_loss']
epochs = range(1, len(acc) + 1)
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.figure()
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
user = WordsPredict()
print(user.MakePhrase('я люблю виски'))
user.show_acc_loss_during_learn_graphics() | Sautenko-Andrey/ML_practice | RNN_words_predict.py | RNN_words_predict.py | py | 9,910 | python | ru | code | 0 | github-code | 36 |
12256071208 | # 3. Добавьте в пакет, созданный на семинаре шахматный модуль. Внутри него напишите код, решающий задачу о 8 ферзях.
# Известно, что на доске 8×8 можно расставить 8 ферзей так, чтобы они не били друг друга.
ROWS = 8
COLS = 8
START_POINT = 0
def creating_chessboard() -> list:
return [['_' for _ in range(ROWS)] for _ in range(COLS)]
def not_attacked(board: list, row, col) -> bool:
for i in range(row):
if board[i][col] == 'Q':
return False
i, j = row, col
while i >= 0 and j >= 0:
if board[i][j] == 'Q':
return False
i = i - 1
j = j - 1
i, j = row, col
while i >= 0 and j < len(board):
if board[i][j] == 'Q':
return False
i = i - 1
j = j + 1
return True
def queens_placement(board: list, point: int=START_POINT):
if point == len(board):
print_board(board)
return
for i in range(len(board)):
if not_attacked(board, point, i):
board[point][i] = 'Q'
queens_placement(board, point + 1)
board[point][i] = '_'
def print_board(board):
for row in board:
print(str(row).replace(',', '\t').replace('\'', ''))
print()
if __name__ == '__main__':
queens_placement(creating_chessboard())
| BespilotNick/Python_DE | Seminar_6/chess_mod_var_1.py | chess_mod_var_1.py | py | 1,457 | python | ru | code | 0 | github-code | 36 |
25014074159 | from collections import defaultdict
import yaml
import json
def redial_config(path,data_type):
def _entity_kg_process(opt, SELF_LOOP_ID=185):
edge_list = [] # [(entity, entity, relation)]
for entity in range(opt['n_entity']):
if str(entity) not in opt['entity_kg']:
continue
edge_list.append((entity, entity, SELF_LOOP_ID)) # add self loop
for tail_and_relation in opt['entity_kg'][str(entity)]:
if entity != tail_and_relation[1] and tail_and_relation[0] != SELF_LOOP_ID:
edge_list.append((entity, tail_and_relation[1], tail_and_relation[0]))
edge_list.append((tail_and_relation[1], entity, tail_and_relation[0]))
relation_cnt, relation2id, edges, entities = defaultdict(int), dict(), set(), set()
for h, t, r in edge_list:
relation_cnt[r] += 1
for h, t, r in edge_list:
if relation_cnt[r] > 1000:
if r not in relation2id:
relation2id[r] = len(relation2id)
edges.add((h, t, relation2id[r]))
entities.add(opt['id2entity'][h])
entities.add(opt['id2entity'][t])
return {
'edge': list(edges),
'n_relation': len(relation2id),
'entity': list(entities)
}
def _word_kg_process(opt):
edges = set() # {(entity, entity)}
entities = set()
with open(opt['word_kg'],'r') as f:
for line in f:
kg = line.strip().split('\t')
entities.add(kg[1].split('/')[0])
entities.add(kg[2].split('/')[0])
e0 = opt['word2id'][kg[1].split('/')[0]]
e1 = opt['word2id'][kg[2].split('/')[0]]
edges.add((e0, e1))
edges.add((e1, e0))
# edge_set = [[co[0] for co in list(edges)], [co[1] for co in list(edges)]]
return {
'edge': list(edges),
'entity': list(entities)
}
config_dict = dict()
with open(path, 'r', encoding='utf-8') as f:
config_dict.update(yaml.safe_load(f.read()))
with open(config_dict[data_type]['movie_ids_path'],'r') as f:
movie_ids = json.load(f)
config_dict['movie_ids'] = movie_ids
with open(config_dict[data_type]['entity2id_path'],'r') as f:
entity2id = json.load(f)
with open(config_dict[data_type]['entity_kg_path'],'r') as f:
entity_kg = json.load(f)
with open(config_dict[data_type]['token2id_path'],'r') as f:
token2id = json.load(f)
with open(config_dict[data_type]['word2id_path'],'r') as f:
word2id = json.load(f)
config_dict['graph'] = {}
config_dict['graph']['word_kg'] = config_dict[data_type]['concept_kg_path']
config_dict['graph']['entity2id'] = entity2id
config_dict['graph']['token2id'] = token2id
config_dict['graph']['word2id'] = word2id
config_dict['graph']['entity_kg'] = entity_kg
config_dict['graph']['id2entity'] = {idx: entity for entity, idx in entity2id.items()}
config_dict['graph']['n_entity'] = max(entity2id.values()) + 1
config_dict['graph']['n_word'] = max(word2id.values()) + 1
entity_kg_dict = _entity_kg_process(config_dict['graph'])
word_kg_dict = _word_kg_process(config_dict['graph'])
config_dict['graph']['entity_kg'] = entity_kg_dict
config_dict['graph']['word_kg'] = word_kg_dict
return config_dict | Oran-Ac/LOT-CRS | src/utils/config.py | config.py | py | 3,493 | python | en | code | 1 | github-code | 36 |
18690677704 | from analyzer.SentiWordNetScores import SentiWordNetScores
ADJECTIVE_CLASS_NAME = "adjective"
POSITIVE_CLASS_NAME = "positive"
NEGATIVE_CLASS_NAME = "negative"
NOUN_CLASS_NAME = "noun"
class AdjectiveUse:
"""contains information about an adjective, what it refers to and the context in which it is used"""
sentiwordnet_scores=SentiWordNetScores
def __init__(self, adjective_token, noun, review_id, sentence, sentiment_value, negated=False):
self.adjective_token = adjective_token
if negated:
self.adjective_text = 'not '+ adjective_token.text.lower()
else:
self.adjective_text = adjective_token.text.lower()
self.noun = noun
self.sentence = sentence
self.review_id = review_id
self.sentiment_value = sentiment_value
def to_html(self):
html_sentence = self.sentence.text
try:
adjective_all_lowercase = self.adjective_token.text.lower()
adj_start = html_sentence.lower().index(adjective_all_lowercase)
adj_end = adj_start + len(adjective_all_lowercase)
if self.sentiment_value>0:
span_tag=self.open_span_tag(ADJECTIVE_CLASS_NAME, POSITIVE_CLASS_NAME)
elif self.sentiment_value<0:
span_tag = self.open_span_tag(ADJECTIVE_CLASS_NAME, NEGATIVE_CLASS_NAME)
else:
span_tag = self.open_span_tag(ADJECTIVE_CLASS_NAME)
html_sentence = html_sentence[:adj_start] + span_tag + \
html_sentence[adj_start:adj_end] + self.close_span_tag() + html_sentence[adj_end:]
noun_start = self.find_token_position(self.sentence, self.noun, html_sentence)
noun_end = noun_start + len(self.noun)
html_sentence = html_sentence[:noun_start] + self.open_span_tag(NOUN_CLASS_NAME) + \
html_sentence[noun_start:noun_end] + self.close_span_tag() + html_sentence[noun_end:]
return html_sentence
except ValueError:
raise ValueError("noun or adjective not found in the sentence")
@staticmethod
def open_span_tag(*span_classes):
classes = ""
for span_class in span_classes:
classes += span_class+" "
return '<span class="'+classes+'">'
def find_word_end_position(self, sentence, word):
pass
@staticmethod
def close_span_tag():
return "</span>"
def find_token_position(self, noun_sent, noun, html_sent):
occurence = 0
for token in noun_sent:
if noun.text in token.text:
occurence+=1
if token == noun:
break
if occurence==0:
raise ValueError('noun not found')
from_index=0
while occurence!=0:
from_index = html_sent.find(noun.text, from_index+1)
occurence-=1
return from_index
| MMthesis2019/ReviewAnalyzer | analyzer/AdjectiveUse.py | AdjectiveUse.py | py | 2,941 | python | en | code | 0 | github-code | 36 |
6821894680 |
import pandas as pd
from flask import Flask
from flask import render_template
import logging
import json
import os
import glob
from tools import *
data = loadData()
neg = "marian"
t = "Gorras_rotas"
firsts = [data["negs"][neg]["images"][t][x]["images"][0]
for x in data["negs"][neg]["images"][t].keys()]
# print(firsts)
names = ["test.asd.0.1.png", "test.asd.0.2.png", "test.asd.0.3.png"]
path = "./static/"
# old
def create_excel_from_json():
data = loadData()
# Load the JSON input into a Python object
# Create an empty list to hold all the rows of the Excel file
rows = []
# Loop through each item in the "negs" dictionary
for grupo, items in data["negs"].items():
for tipo, details in items["images"].items():
for index, item in details.items():
# Create a new row for each item in the Excel file
row = [
tipo,
index,
", ".join(item["images"]),
item["alto"],
item["largo"],
item["ancho"],
item["costo"],
item["venta_menor"],
item["venta_mayor"],
item["stock"],
item["descripcion"],
item["uniqueID"]
]
# Add the row to the list of rows
rows.append(row)
# Create a Pandas DataFrame from the list of rows
df = pd.DataFrame(rows, columns=[
"tipo",
"grupo",
"imágenes",
"alto",
"largo",
"ancho",
"costo",
"venta_menor",
"venta_mayor",
"stock",
"descripcion",
"uniqueID"
])
# Write the DataFrame to an Excel file
writer = pd.ExcelWriter("data.xlsx", engine="xlsxwriter")
df.to_excel(writer, sheet_name="Sheet1", index=False)
writer.save()
create_excel_from_json(data)
neg = "marian"
t = "Gorras_rotas"
firsts = [data["negs"][neg]["images"][t][x]["images"][0]
for x in data["negs"][neg]["images"][t].keys()]
| Paradis4432/wsbot | web/tempTest.py | tempTest.py | py | 2,133 | python | en | code | 0 | github-code | 36 |
35919702620 | import difflib
import redis
from pymongo import MongoClient, ASCENDING
client = MongoClient('mongodb+srv://Alex:goit123@utcluster.zrkwr.mongodb.net/myFirstDatabase?retryWrites=true&w=majority')
def command_assistant():
commands = ['add', 'show', 'delete', 'show_all', 'exit', 'update'] # list of commands
r = redis.StrictRedis(host='localhost', port=6379, db=0)
while True:
command = str(input('Enter command:\n>>> ')).lower().strip()
if not command in commands: # prediction logic
if r.get(command): # checking cache
print(f"(Cache)Perhaps you mean {(r.get(command)).decode('utf-8')}")
ans = str(input("Answer (Y/N): ")).lower()
if ans == "n":
print("Command input error, try again")
continue
elif ans == "y":
variant = r.get(command).decode('utf-8')
break
else:
variant = str(difflib.get_close_matches(command, commands, cutoff=0.1, n=1))[2:-2] # prediction realisation
print(f"Perhaps you mean {variant}")
answer = str(input("Answer (Y/N): ")).lower()
if answer == "n":
print("Command input error, try again")
continue
elif answer == "y":
r.set(command, variant)
break
else:
variant = command
break
return variant
if __name__ == '__main__':
with client:
db = client.myfirst_mongoDB
print(f'{" "*20}*** Welcome to Personal assistant Contact book DB edition!***')
print("Commands:\n - add;\n - show;\n - show_all;\n - delete;\n - update;\n - exit\n")
while True:
try:
answer = command_assistant()
except (ConnectionRefusedError, redis.exceptions.ConnectionError, ConnectionError) as Error:
print("Error! Connection problems to Redis. App is working without command prediction")
answer = str(input('Enter command:\n>>> ')).lower().strip()
if answer == 'add':
name = input('Enter name: ')
if db.ContactBook.find_one({'name': name}):
print(f"The record with name '{name}' is already exist. Try another name or update the one")
continue
phone = input('Enter phone: ')
email = input('Enter email: ')
db.ContactBook.insert_one({'name': name, 'email': email, 'phone': phone})
print('New record successfully added')
continue
elif answer == 'show_all':
for rec in db.ContactBook.find():
print(f'name = {rec["name"]}, phone = {rec["phone"]}, email = {rec["email"]}')
continue
elif answer == 'delete':
name = input('Enter name: ')
if db.ContactBook.find_one({'name': name}):
db.ContactBook.delete_one({'name': name})
print(f'Record with name "{name}" has been successfully deleted')
continue
else:
print("There is no such record in DB")
continue
elif answer == 'show':
name = input('Enter name: ')
result = db.ContactBook.find_one({'name': name})
if result:
print(f'name = {result["name"]}, phone = {result["phone"]}, email = {result["email"]}')
else:
print("There is no such record in DB")
continue
elif answer == 'update':
name = input('Enter name: ')
if db.ContactBook.find_one({'name': name}):
print("The record exists in DB. Enter a new data:")
phone = input('Enter phone: ')
email = input('Enter email: ')
db.ContactBook.update_one({'name': name},{'$set':{'name': name, 'email': email, 'phone': phone}})
print(f'Record "{name}" has been successfully updated')
continue
else:
print("There is no such record in DB. Try another command")
continue
elif answer == 'exit':
break
else:
print("Command input error. Try correct command again")
continue
print("Good bye!")
| AlexUtchenko/goit-python | WEB10/PA_Mongo_Redis.py | PA_Mongo_Redis.py | py | 4,755 | python | en | code | 0 | github-code | 36 |
30722582331 | #baekjoon_23757_2021고려대학교 프로그래밍 경시대회
#=== import module ===#
import heapq
import sys
input = sys.stdin.readline
#=== variable declare ===#
#=== Function define ===#
#=== main function ===#
N,M = map(int,input().split());
presents = list(map(int,input().split()));
wishNum = list(map(int,input().split()));
q = [];
for elem in presents:
heapq.heappush(q,-elem);
disappoint = False;
for idx in range(M):
max_present = -heapq.heappop(q);
if max_present < wishNum[idx]:
disappoint = True; break;
else:
max_present -= wishNum[idx];
heapq.heappush(q,-max_present);
if not disappoint: print(1);
else: print(0); | Hoony0321/Algorithm | 2021_12/10/baekjoon_23757.py | baekjoon_23757.py | py | 662 | python | en | code | 0 | github-code | 36 |
28986020651 | EXPECTED_TEST_ANSWER_PART1 = [15]
EXPECTED_TEST_ANSWER_PART2 = [12]
OP_MOVES = ["A", "B", "C"]
YOUR_MOVES = ["X", "Y", "Z"]
def get_shape_score(move):
"""
Determine the move's score (not if you win or lose)
"""
if move == "A" or move == "X":
# Rock
return 1
elif move == "B" or move == "Y":
# Paper
return 2
else:
# Scissors
return 3
def get_game_score(op_move, your_move):
"""
Determine the score by the move
"""
if OP_MOVES.index(op_move) == YOUR_MOVES.index(your_move):
# A tie
return 3
elif (OP_MOVES.index(op_move) + 1) % 3 == YOUR_MOVES.index(your_move):
# A win
return 6
else:
# A loss
return 0
def get_shape(op_move, goal):
"""
Determine which move to use given an expected outcome (goal)
"""
if goal == "X":
# Goal is a loss, so look to the entry one before it in the list
return YOUR_MOVES[(OP_MOVES.index(op_move) - 1) % 3]
elif goal == "Y":
# Goal is a tie, so copy the move type
return YOUR_MOVES[OP_MOVES.index(op_move)]
else:
# Goal is a win, so look one entry to the right for the winning move
return YOUR_MOVES[(OP_MOVES.index(op_move) + 1) % 3]
def run(data):
score = 0
for row in data:
moves = row.split(" ")
score += get_shape_score(moves[1])
score += get_game_score(moves[0], moves[1])
return score
def run_p2(data):
score = 0
for row in data:
moves = row.split(" ")
# Translate the result into a shape so we can just reuse the part 1 answer code
shape = get_shape(moves[0], moves[1])
score += get_shape_score(shape)
score += get_game_score(moves[0], shape)
return score
| SocialFinanceDigitalLabs/AdventOfCode | solutions/2022/pughmds/day02/__main__.py | __main__.py | py | 1,808 | python | en | code | 2 | github-code | 36 |
19066422289 | name = ""
contact = ""
enrollno = ""
username = ""
quiz_choice = ""
questions = {
"python": [
{
'question': 'What is Python?',
'options': ['A type of snake', 'A programming language', 'A bird', 'A dessert'],
'correct_answer': 'A programming language',
},
{
'question': 'Which of the following is true about Python?',
'options': ['It is statically typed', 'It is dynamically typed', 'It is compiled', 'It is platform-dependent'],
'correct_answer': 'It is dynamically typed',
},
],
"java": [
{
'question': 'What is Java?',
'options': ['A programming language', 'A type of coffee', 'An island', 'A fruit'],
'correct_answer': 'A programming language',
},
{
'question': 'What is the main purpose of Java?',
'options': ['Web development', 'Game development', 'Mobile app development', 'Platform-independent programming'],
'correct_answer': 'Platform-independent programming',
},
],
"c": [
{
'question': 'What is C?',
'options': ['A programming language', 'A vitamin', 'A type of car', 'A musical note'],
'correct_answer': 'A programming language',
},
{
'question': 'C is known for its:',
'options': ['Simplicity', 'Complexity', 'Colorful syntax', 'Object-oriented features'],
'correct_answer': 'Simplicity',
},
],
"c++": [
{
'question' : 'Who invented C++?',
'options' : ['Dennis Ritchie','Ken Thompson','Brian Kernighan','Bjarne Stroustrup'],
'correct_answer' : 'Bjarne Stroustrup'
},
{
'question' : 'Which of the following user-defined header file extension used in c++?',
'options' : ['hg','cpp','py','c'],
'correct_answer' : 'cpp'
}
]
}
def welcome_message():
global name, contact, email, quiz_choice
print("Welcome to the Quiz Application!")
print("Please fill in your details:")
name = input("Name: ")
contact = input("Contact: ")
enrollno = input("Enrollnment: ")
username = input("Username: ")
print("Choose a quiz:")
print("1. Python")
print("2. Java")
print("3. C")
print("4. C++")
quiz_choice = input("Enter the number of your choice (1/2/3/4): ")
return quiz_choice
def start_quiz(selected_quiz):
global name
quiz = selected_quiz.lower()
score = 0
if quiz in questions:
quiz_questions = questions[quiz]
for q in quiz_questions:
print(q['question'])
for i, option in enumerate(q['options'], 1):
print(f"{i}. {option}")
user_answer = input("Your answer (enter the option number): ")
try:
user_answer = int(user_answer)
if 1 <= user_answer <= len(q['options']):
if q['options'][user_answer - 1] == q['correct_answer']:
score += 1
else:
print("Invalid option number. Please choose a valid option.")
except ValueError:
print("Invalid input. Please enter the option number.")
print(f"Hello {name}, your {quiz} marks is {score}/{len(quiz_questions)}")
while True:
quiz_choice = welcome_message()
if quiz_choice == "1":
start_quiz("python")
elif quiz_choice == "2":
start_quiz("java")
elif quiz_choice == "3":
start_quiz("c")
elif quiz_choice == "4":
start_quiz("c++")
else:
print("Invalid choice. Please select a valid quiz.")
continue
choice = input("Do you want to logout? (yes/no): ")
if choice.lower() == "yes":
break
| Himanshu-dy/Intern-Army | Online Quiz System.py | Online Quiz System.py | py | 3,975 | python | en | code | 0 | github-code | 36 |
5238797489 | from sklearn.tree import DecisionTreeClassifier
from DataUtils import getTokens, modelfile_path, vectorfile_path
import pickle
from sklearn.feature_extraction.text import TfidfVectorizer
def getDataFromFile(filename):
with open(filename, "r", encoding="utf8") as f:
inputurls, y = [], []
for line in f:
link = line.strip()
inputurls.append(link)
print("read ok!")
return inputurls
def loadModel():
file1 = modelfile_path
with open(file1, 'rb') as f1:
model = pickle.load(f1)
f1.close()
file2 = vectorfile_path
with open(file2, 'rb') as f2:
vector = pickle.load(f2)
f2.close()
return model, vector
model, vector = loadModel()
all_urls = getDataFromFile("./data/test(unlabeled).csv")
x = vector.transform(all_urls)
y_predict = model.predict(x)
print(len(all_urls))
print(len(y_predict))
with open("./data/test(labeled).csv", "w", encoding="utf8") as w:
for link, label in zip(all_urls, y_predict):
w.write(link+","+label+"\n") | zzhdbw/WebsiteClassification-main | cheater_classifier_ML/predict.py | predict.py | py | 1,076 | python | en | code | 1 | github-code | 36 |
15680325535 | import random
number = random.randint(1, 10)
tries = 3
def get_input(prompt_text, type, **kwargs):
user_res = input(prompt_text)
if type(user_res) == type:
print(f"Your response is incorrect. Please provide a value of type {type}")
return get_input(prompt_text, type, kwargs)
user_res = type(user_res)
lov = kwargs.get("lov")
if lov is not None and user_res not in lov:
print(f"Value provided is not within the acceptable values {lov}")
return get_input(prompt_text, type, kwargs)
return user_res
while tries > 0:
user_guess = int(
get_input("Guess a number from 1 to 10: ", int, lov=[i for i in range(1, 11)])
)
if user_guess == number:
print(f"You guesed it right. The correct number is {number}")
break
elif user_guess > number:
print(f"The number you guessed is qreater than what we are looking for...")
else:
print(
f"The number you guessed is lessser in comparison to what we are looking for .."
)
tries -= 1
print(f"You have another {tries} {'try' if tries == 1 else 'tries'}")
print("======================")
print()
| csukel/rest-api-flask-and-python | python-refresher/11_loops/code.py | code.py | py | 1,188 | python | en | code | 0 | github-code | 36 |
8472917150 | #!/usr/bin/python3
##
##
import os,sys,re
import requests
import time
# Global variables
BASE_URL = "https://api.thousandeyes.com"
USERNAME = "your_username"
PASSWORD = "your_password"
API_TOKEN = None
def get_api_token():
global API_TOKEN
auth_endpoint = BASE_URL + "/v6/auth/login"
data = {
"username": USERNAME,
"password": PASSWORD
}
try:
response = requests.post(auth_endpoint, json=data)
response.raise_for_status()
token = response.json().get("token")
if token:
API_TOKEN = token
print("New API token obtained.")
else:
print("Failed to obtain API token.")
except requests.exceptions.RequestException as e:
print("Error occurred during API token retrieval:", e)
def revoke_api_token():
global API_TOKEN
if API_TOKEN:
revoke_endpoint = BASE_URL + "/v6/auth/logout"
headers = {
"Authorization": "Bearer " + API_TOKEN
}
try:
response = requests.post(revoke_endpoint, headers=headers)
response.raise_for_status()
print("API token revoked.")
API_TOKEN = None
except requests.exceptions.RequestException as e:
print("API token revocation failed:", e)
def test_api_token():
if API_TOKEN:
test_endpoint = BASE_URL + "/v6/account"
headers = {
"Authorization": "Bearer " + API_TOKEN
}
try:
response = requests.get(test_endpoint, headers=headers)
response.raise_for_status()
print("API token is still valid.")
except requests.exceptions.RequestException as e:
print("API token test failed:", e)
get_api_token()
else:
print("API token is not available. Please obtain a new token.")
def main():
while True:
test_api_token()
time.sleep(300) # Sleep for 5 minutes (300 seconds)
revoke_api_token()
get_api_token()
test_api_token()
time.sleep(120) # Sleep for 2 minutes (120 seconds)
if __name__ == "__main__":
get_api_token()
main()
##
##
##
## In this updated script, I've added two additional functions:
##
## revoke_api_token(): Revokes the current API token by making a POST request to the /auth/logout endpoint.
## main(): The main function now includes the revocation process. After testing the API token every 5 minutes, it revokes the token, obtains a new one, and tests the new token again.
##
| babywyrm/sysadmin | pyth3/api/check_revoke_.py | check_revoke_.py | py | 2,547 | python | en | code | 10 | github-code | 36 |
17811464587 | import boto3
from pprint import pprint
import time
aws_console=boto3.session.Session()
ec2_console=aws_console.resource('ec2', region_name='us-east-1')
# #ec2_console.start_instances(instance_id=['i-06186ed7e182cc28b'])
# response=ec2_console.describe_instances(InstanceIds=['i-06186ed7e182cc28b'])
# #pprint(response)
# #print(response['Reservations'])
# for each_iteam in (response['Reservations']):
# print("########################")
# pprint(each_iteam)
my_inst=ec2_console.Instance("i-06186ed7e182cc28b")
print("Starting given instance")
my_inst.start()
while True:
my_inst_obj=ec2_console.Instance("i-06186ed7e182cc28b")
print(f"The current state of ec2: {my_inst_obj.state['Name']}")
if my_inst_obj.state['Name'] == "running":
break
print("Wating instance to be up")
time.sleep(5)
print("instance is up and running")
| SachinPitale/AWS_Lambda | 05-waiters/01-ec2-status.py | 01-ec2-status.py | py | 868 | python | en | code | 0 | github-code | 36 |
15317046420 | #!/usr/bin/env python3
# encoding: utf-8
import string
CODE = """
Cbcq Dgyk!
Dmeybh kce cew yrwyg hmrylyaqmr:
rylsjb kce y Nwrfml npmepykmxyqg lwcjtcr!
Aqmimjjyi:
Ynyb
"""
def main():
alphabet = list(string.ascii_lowercase)
solution = ""
for i in range(len(CODE)):
char = CODE[i]
if char.isalpha():
lower_letter = char.lower()
if lower_letter in alphabet:
letter_index = alphabet.index(lower_letter) + 2
new_letter_index = (letter_index - len(alphabet)) if letter_index >= len(alphabet) else letter_index
solution += alphabet[new_letter_index] if char.islower() else alphabet[new_letter_index].upper()
else:
solution += char
print(solution)
if(__name__ == "__main__"):
main() | Psychol0g1st/ScriptLanguages | L3/rejtelyes_uzenet.py | rejtelyes_uzenet.py | py | 826 | python | en | code | 0 | github-code | 36 |
15166686250 | from matplotlib import pyplot as plot
plot.rcParams [ "savefig.facecolor" ] = "w"
plot.rcParams [ "savefig.edgecolor" ] = "b"
lab_value , title_numb = 'Lab_None' , 0
def setLab ( text = None ) :
global lab_value
if text : lab_value = text
print ( 'lab value:' , lab_value )
def save () :
global title_numb , lab_value
for i in plot.get_fignums () :
fig = plot.figure ( i )
plot.savefig ( '{}_{}.png'.format ( lab_value , str ( title_numb ) ) )
print ( '{}_{}.png'.format ( lab_value , str ( title_numb ) ) )
fig.clear ()
plot.close ( fig )
title_numb += 1
def plot_series ( time = [] , series = [] , format = "-" , start = 0 , end = None ,
lr_with_var_value = [] , history = {} , xmin = None , xmax = None , ymin = None , ymax = None ,
title = None , xlabel = None , ylabel = None , labels = [] ) :
plot.figure ( figsize = ( 10 , 6 ) )
label = ( labels [ 0 ] if ( len ( labels ) and labels [ 0 ] ) else None )
if len ( time ) and len ( series ) and ( type ( series ) is tuple ) :
for i , series_num in enumerate ( series ) :
label = ( labels [ i ] if ( len ( labels ) and labels [ i ] ) else None )
plot.plot ( time [ start : end ] , series_num [ start : end ] , format , label = label )
elif len ( time ) and len ( series ) : plot.plot ( time [ start : end ] , series [ start : end ] , format , label = label )
plot.title ( str ( title ) )
plot.legend ( )
plot.xlabel ( "Time" if not xlabel else xlabel )
plot.ylabel ( "Value" if not ylabel else ylabel )
plot.grid ( True )
if len ( lr_with_var_value ) and ( 'loss' in history.history ) and (
xmin != None and xmax != None and ymin != None and ymax != None ) :
plot.semilogx ( lr_with_var_value , history.history [ "loss" ] )
plot.tick_params ( 'both' , length = 10 , width = 1 , which = 'both' )
plot.axis ( [ xmin , xmax , ymin , ymax ] )
save ()
| AmalLight/deepL_RL | saveFigure.py | saveFigure.py | py | 2,204 | python | en | code | 0 | github-code | 36 |
13159739208 | #!/usr/bin/python
import main.distribution as worker
if __name__ == '__main__':
server = worker.Server()
server.check_probe()
availability, performance = server.get_warning_from_baseline()
server.notify_me(data=availability, type='availability')
server.notify_me(data=performance, type='performance') | pdeesawat4887/python-cgi-monitor | server/running_server.py | running_server.py | py | 322 | python | en | code | 0 | github-code | 36 |
3650201755 | import random
from users import users
from random import randrange
from datetime import timedelta,datetime
#LEVEL TIMESTAMP METHOD API STATUS USERID
def random_date(start= datetime.now()-timedelta(days=120), end=datetime.now()):
"""
This function will return a random datetime between two datetime
objects.
"""
delta = end - start
int_delta = (delta.days * 24 * 60 * 60) + delta.seconds
random_second = randrange(int_delta)
return str(start + timedelta(seconds=random_second))
api_list = [
'/api/cart','/api/order','/api/products/{0}/coupons','/api/users/profile','/api/login','/api/logout'
]
status_codes = [
500,200,202,400,404,301,308,401,403,405,408,409,502
]
method = ['GET', 'POST', 'DELETE','PUT']
statuscodelength = len(status_codes) - 1
apilistlength = len(api_list) - 1
userslength = len(users) - 1
methodlength=len(method) - 1
logs = []
for i in range(0,500000):
randomuser=users[random.randint(0,userslength)]
randomstatus=status_codes[random.randint(0,statuscodelength)]
randomapi=api_list[random.randint(0,apilistlength)]
randommethod=method[random.randint(0,methodlength)]
if randomapi == '/api/products/{0}/coupons':
randomapi = '/api/products/{0}/coupons'.format(random.randint(1000,200000))
if randomstatus in [200,202]:
randomloglevel='INFO'
elif randomstatus in [400,404,301,308,401,403,405,408,409]:
randomloglevel='WARNING'
else:
randomloglevel='ERROR'
logrow='{0} {1} {2} {3} {4} {5}'.format(randomloglevel,random_date(),randommethod,randomapi,randomstatus,randomuser['id'])
logs.append(logrow)
with open('server.log', 'w') as f:
for line in logs:
f.write(f"{line}\n") | Prashantpx-17237/Datahub | datascripts/serverlog.py | serverlog.py | py | 1,666 | python | en | code | 0 | github-code | 36 |
15903708519 | import os
import numpy as np
path = os.path.dirname(os.path.realpath(__file__))
def bingo(numbers:list, boards:list, play2loose:bool=False):
def play(boards, number):
for b in range(len(boards)):
for r in range(5):
for c in range(5):
if boards[b][r][c] == number:
boards[b][r][c] = -1
def check(boards):
results = []
for b in range(len(boards)):
tot = 0
rows = [0 for i in range(5)]
cols = [0 for i in range(5)]
for r in range(len(boards[b])):
for c in range(len(boards[b][r])):
v = boards[b][r][c]
rows[r] = rows[r] + v
cols[c] = cols[c] + v
if v >= 0:
tot += v
if (-5 in rows) or (-5 in cols):
results.append([b, tot])
else:
results.append([b, None])
return results
leaderboard = []
for number in numbers:
play(boards, number)
results = check(boards)
for (board, total) in results:
if total != None:
if not play2loose:
return (board,total*number)
else:
if len(leaderboard)<len(boards)-1:
if (board not in leaderboard):
leaderboard.append(board)
else:
if (board not in leaderboard):
return (board,total*number)
return (None, None)
test_numbers = [7,4,9,5,11,17,23,2,0,14,21,24,10,16,13,6,15,25,12,22,18,20,8,19,3,26,1]
test_boards = [
[
[22,13,17,11,0],
[8,2,23,4,24],
[21,9,14,16,7],
[6,10,3,18,5],
[1,12,20,15,19],
],
[
[3,15,0,2,22],
[9,18,13,17,5],
[19,8,7,25,23],
[20,11,10,24,4],
[14,21,16,12,6],
],
[
[14,21,17,24,4],
[10,16,15,9,19],
[18,8,23,26,20],
[22,11,13,6,5],
[2,0,12,3,7],
]
]
numbers = []
boards = []
with open(path+"/input.txt") as file:
lines = file.readlines()
numbers = [int(i) for i in lines.pop(0).strip().split(",")]
while len(lines)>=5:
lines.pop(0)
board = []
for i in range(5):
row = [int(i) for i in lines.pop(0).strip().split()]
board.append(row)
boards.append(board)
assert bingo(test_numbers, test_boards) == (2, 4512), "Function is wrong"
print("Part A:", bingo(numbers, boards))
assert bingo(test_numbers, test_boards, play2loose=True) == (1, 1924), "Function is wrong"
print("Part B:", bingo(numbers, boards, play2loose=True))
| coolafabbe/AdventOfCode2021 | Mikel/Day4/main.py | main.py | py | 2,792 | python | en | code | 0 | github-code | 36 |
7228924054 | import json
import logging
import os
from uuid import uuid4
from sqlalchemy import and_
from log import Msg
from helper import Now, model_to_dict, Http_error, value, check_schema
from .model import Event
from user.controller import get_profile
def add(data, username, db_session):
logging.info(Msg.START)
required_data = ['action', 'target', 'entity_name', 'entity_id']
check_schema(required_data, data.keys())
logging.debug(Msg.SCHEMA_CHECKED)
model_instance = Event()
model_instance.creator = username
model_instance.id = str(uuid4())
model_instance.creation_date = Now()
model_instance.target = data.get('target')
model_instance.action = data.get('action')
model_instance.entity_id = data.get('entity_id')
model_instance.entity_name = data.get('entity_name')
model_instance.seen = False
logging.debug(Msg.DATA_ADDITION + " || Data :" + json.dumps(data))
db_session.add(model_instance)
logging.debug(Msg.DB_ADD)
logging.info(Msg.END)
return model_instance
def get_events(data, db_session, username):
if data.get('time') is None:
data['time'] = Now()
if data.get('count_number') is None:
data['count_number'] = 50
final_result = []
logging.debug(Msg.GET_ALL_REQUEST + 'Events...')
logging.info(Msg.START + 'getting events for user = {}'.format(username))
logging.debug(Msg.MODEL_GETTING)
if data.get('scroll') == 'down':
result = db_session.query(Event).filter(and_(Event.target == username,
Event.creation_date < data.get(
'time'))).order_by(
Event.creation_date.desc()).limit(data.get('count_number')).all()
else:
result = db_session.query(Event).filter(and_(Event.target == username,
Event.creation_date >
data.get(
'time'))).order_by(
Event.creation_date.desc()).limit(data.get('count_number')).all()
for event in result:
event.seen = True
event_creator = get_profile(event.creator, db_session)
creator = model_to_dict(event_creator)
del creator['password']
new_event = model_to_dict(event)
new_event['creator'] = creator
final_result.append(new_event)
logging.debug(Msg.GET_SUCCESS)
logging.info(Msg.END)
return final_result
def get_new_events(db_session, data, username):
logging.info(Msg.START)
required = ['scroll']
check_schema(required, data.keys())
if data.get('time') is None:
data['time'] = Now()
if data.get('count_number') is None:
data['count_number'] = 50
logging.debug(Msg.GET_ALL_REQUEST + 'new unread Events...')
if data.get('scroll') == 'down':
result = db_session.query(Event).filter(
and_(Event.target == username, Event.seen == False)).filter(
Event.creation_date < data.get('time')).order_by(
Event.creation_date.desc()).limit(data.get('count_number')).all()
else:
result = db_session.query(Event).filter(
and_(Event.target == username, Event.seen == False)).filter(
Event.creation_date > data.get('time')).order_by(
Event.creation_date.desc()).limit(data.get('count_number')).all()
logging.debug(Msg.GET_SUCCESS)
logging.info(Msg.END)
return result
def get_new_events_count(db_session, username):
logging.info(Msg.START)
logging.debug(Msg.GET_ALL_REQUEST + 'the count of unread Events...')
result = db_session.query(Event).filter(
and_(Event.target == username, Event.seen == False)).count()
logging.debug(Msg.GET_SUCCESS)
logging.info(Msg.END)
return {'count': int(result)}
| nsmseifi/Bellezza | event/controller.py | controller.py | py | 3,930 | python | en | code | 0 | github-code | 36 |
70879441703 | class NameTooShortError(Exception):
def __init__(self, message="Name must be more than 4 characters"):
self.message = message
super().__init__(message)
class MustContainAtSymbolError(Exception):
def __init__(self, message="Email must contain @"):
self.message = message
super().__init__(message)
class InvalidDomainError(Exception):
def __init__(self, message="Domain must be one of the following: .com, .bg, .org, .net"):
self.message = message
super().__init__(message)
def validate_name(email):
username = email.split("@")[0]
if len(username) <= 4:
raise NameTooShortError("Name must be more than 4 characters")
def validate_at_symbol(email):
if "@" not in email:
raise MustContainAtSymbolError("Email must contain @")
def validate_domain(email, valid_domains):
domain = email.split(".")[-1]
if domain not in valid_domains:
raise InvalidDomainError("Domain must be one of the following: .com, .bg, .org, .net")
while True:
line = input()
valid_domains = ("com", "net", "bg", "org")
if line == "End":
break
validate_name(line)
validate_at_symbol(line)
validate_domain(line, valid_domains) | AngelValAngelov/Python-Advanced-Exercises | Exercise Error Handling/2. Email Validator.py | 2. Email Validator.py | py | 1,284 | python | en | code | 0 | github-code | 36 |
22811929801 | from flask import Flask, render_template
from gevent.pywsgi import WSGIServer
from strategy_thread import StrategyThread
import atexit
from apscheduler.schedulers.background import BackgroundScheduler
from copy import deepcopy
import time
import pandas as pd
###########################################################
### Static Variables
###########################################################
cols = ["Ticker", "Direction", "Status", "State", "Last Update", "Candle Size",
"Avg Filled Price", "Take Profit Price", "Soft Stop Price", "Initiated Time",
"Execution Time", "Execution Logic", "Drawdown", "Run Up", "Trade Length",
"Quantity", "Filled Position"]
app = Flask(__name__)
###########################################################
### UI Events
###########################################################
def get_table():
if len(strat.strategy.manager.trades) == 0:
return pd.DataFrame(columns=cols).to_html()
start = time.time()
trades = []
for ticker in strat.strategy.manager.trades:
trades.append(strat.strategy.manager.trades[ticker].simple_view())
df = pd.DataFrame(trades)
df = df[cols]
df.sort_values('Status', inplace=True)
return df.to_html(classes=["table", "table-hover", "thead-dark"])
def get_health():
colors = ["#14ba14", "#ede91a", "#ef1010"]
## 2103, 2104, 2108
manager_data_code = strat.strategy.manager.last_data_code
manager_color = min(manager_data_code - 2104, 1)
manager_color = colors[manager_color]
## 2105, 2106, 2107
scanner_data_code = strat.strategy.scanner.last_data_code
scanner_color = scanner_data_code - 2106
scanner_color = colors[scanner_color]
colors = [colors[-1], colors[0]]
api_code = int(strat.strategy.manager.isConnected() & strat.strategy.scanner.isConnected())
api_color = colors[api_code]
return {
"manager" : manager_color,
"scanner" : scanner_color,
"api" : api_color
}
@app.after_request
def after_request(response):
response.headers["Cache-Control"] = "no-cache, no-store, must-revalidate, public, max-age=0"
response.headers["Expires"] = 0
response.headers["Pragma"] = "no-cache"
return response
@app.route('/')
def dashboard():
## Positions
position_table = get_table()
## System Health
system_health = get_health()
return render_template("index.html", position_table = position_table, system_health = system_health)
if __name__ == '__main__':
try:
strat = StrategyThread(num_periods = 50, short_num_periods = 20, time_period = 5)
strat.start()
http_server = WSGIServer(('0.0.0.0', 9095), app)
http_server.serve_forever()
except Exception as e:
print('EEE', e)
strat.on_close()
strat.join()
finally:
strat.on_close()
strat.join()
| zQuantz/Logma | ibapi/ui/retrace.py | retrace.py | py | 2,714 | python | en | code | 0 | github-code | 36 |
32736730593 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 3/9/19 7:49 PM
# @Author : zchai
import itertools
import os
import torch
from allennlp.data.dataset_readers.seq2seq import Seq2SeqDatasetReader
from allennlp.data.iterators import BucketIterator
from allennlp.data.token_indexers import SingleIdTokenIndexer
from allennlp.data.tokenizers.word_tokenizer import WordTokenizer
from allennlp.data.vocabulary import Vocabulary
from allennlp.nn.activations import Activation
from allennlp.models.encoder_decoders.simple_seq2seq import SimpleSeq2Seq
from allennlp.modules.attention import LinearAttention
from allennlp.modules.seq2seq_encoders import PytorchSeq2SeqWrapper
from allennlp.modules.text_field_embedders import BasicTextFieldEmbedder
from allennlp.modules.token_embedders import Embedding
from allennlp.predictors import SimpleSeq2SeqPredictor, Seq2SeqPredictor
from allennlp.training.trainer import Trainer
from torch import optim
from couplet_generator.my_logger import Logger
from couplet_generator.utils import conf
logger = Logger(__name__).get_logger()
class Seq2SeqAllen:
def __init__(self, training=False):
self.training = training
config = conf['seq2seq_allen']
self.model_path = config['model_path']
self.vocab_path = config['vocab_path']
prefix = config['processed_data_prefix']
train_file = config['train_data']
valid_file = config['test_data']
src_embedding_dim = config['src_embedding_dim']
trg_embedding_dim = config['trg_embedding_dim']
hidden_dim = config['hidden_dim']
epoch = config['epoch']
patience = config['patience']
if torch.cuda.is_available():
self.cuda_device = 0
else:
self.cuda_device = -1
self.reader = Seq2SeqDatasetReader(
source_tokenizer=WordTokenizer(),
target_tokenizer=WordTokenizer(),
source_token_indexers={'tokens': SingleIdTokenIndexer()},
target_token_indexers={'tokens': SingleIdTokenIndexer()})
if self.training:
self.train_dataset = self.reader.read(os.path.join(prefix, train_file))
self.valid_dataset = self.reader.read(os.path.join(prefix, valid_file))
self.vocab = Vocabulary.from_instances(self.train_dataset + self.valid_dataset,
min_count={'tokens': 3})
else:
self.vocab = Vocabulary.from_files(self.vocab_path)
src_embedding = Embedding(num_embeddings=self.vocab.get_vocab_size('tokens'),
embedding_dim=src_embedding_dim)
encoder = PytorchSeq2SeqWrapper(
torch.nn.LSTM(src_embedding_dim, hidden_dim, batch_first=True))
source_embedder = BasicTextFieldEmbedder({"tokens": src_embedding})
self.model = SimpleSeq2Seq(vocab=self.vocab, source_embedder=source_embedder, encoder=encoder,
max_decoding_steps=20,
target_embedding_dim=trg_embedding_dim,
use_bleu=True)
optimizer = optim.Adam(self.model.parameters())
iterator = BucketIterator(batch_size=32, sorting_keys=[("source_tokens", "num_tokens")])
# 迭代器需要接受vocab,在训练时可以用vocab来index数据
iterator.index_with(self.vocab)
self.model.cuda(self.cuda_device)
if training:
self.trainer = Trainer(model=self.model,
optimizer=optimizer,
iterator=iterator,
patience=patience,
train_dataset=self.train_dataset,
validation_dataset=self.valid_dataset,
serialization_dir=self.model_path,
num_epochs=epoch,
cuda_device=self.cuda_device)
if not self.training:
with open(os.path.join(self.model_path, 'best.th'), 'rb') as f:
self.model.load_state_dict(torch.load(f))
self.model.cuda(self.cuda_device)
self.model.training = self.training
self.predictor = Seq2SeqPredictor(self.model, dataset_reader=self.reader)
def train(self):
self.vocab.save_to_files(self.vocab_path)
self.trainer.train()
def predict(self, sentence):
if not self.training:
return self.predictor.predict(sentence)
else:
logger.warning('Mode is in training mode!')
| adamsZQ/couplet_generator | couplet_generator/seq2seq_allen.py | seq2seq_allen.py | py | 4,703 | python | en | code | 0 | github-code | 36 |
18114969380 | #!/usr/bin/env python3
from enum import Enum
from io import BytesIO
from itertools import count
from struct import pack
import csv
import sys
usage = 'To use this script, run it in a directory containing csv files generated by par2csv. It will compile them back into a new EARTH2150.par in the same directory.'
class Faction(Enum):
NEUTRAL = 0
UCS = 1
ED = 2
LC = 3
class EntityType(Enum):
Vehicle = 1
Cannon = 2
Missile = 3
Building = 4
Special = 5
Equipment = 6
ShieldGenerator = 7
SoundPack = 8
SpecialUpdatesLinks = 9
Parameters = 10
class ResearchTab(Enum):
CHASSIS = 0
WEAPON = 1
AMMO = 2
SPECIAL = 3
next_id = count()
class Research:
def __init__(self, row):
self.previous = row[10].strip().split()
self.id = next(next_id)
self.faction = Faction.__members__[row[1]]
self.campaign_cost = int(row[2])
self.skirmish_cost = int(row[3])
self.campaign_time = int(row[4])
self.skirmish_time = int(row[5])
self.name = row[0]
self.video = row[6]
self.type = ResearchTab.__members__[row[7]]
self.mesh = row[8]
self.meshParamsIndex = int(row[9])
def __repr__(self):
items = ', '.join(f'{k}={v!r}' for k, v in self.__dict__.items())
return 'Research{{{items}}}'
class Entity:
def __init__(self, row):
self.name = row[0]
self.req_research = row[1].strip().split()
self.fields = list()
for f in row[2:]:
# This will need extra processing later once par2csv can handle enums and floats
try:
self.fields.append(int(f))
except ValueError:
self.fields.append(f)
def __repr__(self):
return f'Entity{{name={self.name!r}, req_research={self.req_research}, fields={len(self.fields)}{self.fields}}}'
class EntityGroup:
def __init__(self):
self.faction = None
self.entity_type = None
self.entities = list()
self.ref_fields = None
def __repr__(self):
entities = ''
for entity in self.entities:
entities += f' {entity}\n'
return f'EntityGroup{{faction={self.faction}, entity_type={self.entity_type}, entities=\n{entities}}}'
class ParWriter:
def __init__(self, fd):
self.fd = fd
def write_header(self):
self.fd.write(b'PAR\x00\x99\x00\x00\x00')
def write(self, value):
if isinstance(value, str):
self.fd.write(pack('<I', len(value)))
self.fd.write(value.encode(encoding='latin_1'))
elif isinstance(value, int):
self.fd.write(pack('<I', value))
elif isinstance(value, float):
self.fd.write(pack('<f', value))
elif isinstance(value, list):
self.fd.write(pack('<I', len(value)))
for v in value:
self.write(v)
elif isinstance(value, Enum):
self.write(value.value)
else:
raise TypeError(f'Cannot encode {type(value)}')
def write_fields(self, fields, pad=True):
types = bytearray()
values = BytesIO()
# Use a second internal writer to avoid duplicating the write method's logic here
writer = ParWriter(values)
for f in fields:
is_string = type(f) is str
types.append(1 if is_string else 0)
writer.write(f)
self.write(len(types))
self.fd.write(types)
self.fd.write(values.getbuffer())
csv_files = [
('buildrobot.csv', EntityType.Vehicle, {6, 7, 8, 9, 18, 19, 33, 34, 35, 36, 37, 38, 39, 66}),
('vehicle.csv', EntityType.Vehicle, {6, 7, 8, 9, 18, 19, 33, 34, 35, 36, 37}),
('miningrobot.csv', EntityType.Vehicle, {6, 7, 8, 9, 18, 19, 33, 34, 35, 36, 37, 47}),
('sapperrobot.csv', EntityType.Vehicle, {6, 7, 8, 9, 18, 19, 33, 34, 35, 36, 37, 39, 45}),
('supplytransporter.csv', EntityType.Vehicle, {6, 7, 8, 9, 18, 19, 33, 34, 35, 36, 37}),
('buildingtransporter.csv', EntityType.Special, {6, 7, 8, 9, 18, 19, 27}),
('resourcetransporter.csv', EntityType.Special, {6, 7, 8, 9, 18, 19}),
('unittransporter.csv', EntityType.Special, {6, 7, 8, 9, 18, 19}),
('building.csv', EntityType.Building, {6, 7, 8, 9, 18, 19, 28, 29, 30, 31, 32, 34, 35, 36, 37, 39, 47, 50, 51, 53, 55, 58}),
('cannon.csv', EntityType.Cannon, {6, 7, 8, 9, 20, 30}),
('missile.csv', EntityType.Missile, {6, 7, 8, 9, 20, 29}),
('soundpack.csv', EntityType.SoundPack, set()),
('repairer.csv', EntityType.Equipment, {6, 7, 8, 9}),
('containertransporter.csv', EntityType.Equipment, {6, 7, 8, 9}),
('transporterhook.csv', EntityType.Equipment, {6, 7, 8, 9}),
('lookroundequipment.csv', EntityType.Equipment, {6, 7, 8, 9}),
('upgradecopula.csv', EntityType.Special, {6, 7, 8, 9}),
('equipment.csv', EntityType.Equipment, {6, 7, 8, 9}),
('passive.csv', EntityType.Special, {6, 7, 8, 9, 18}),
('artefact.csv', EntityType.Special, {6, 7, 8, 9, 18}),
('startingpositionmark.csv', EntityType.Special, {6, 7, 8, 9, 18, 19}),
('multiexplosion.csv', EntityType.Special, {6, 7, 8, 9, 13, 17, 21, 25, 29, 33, 37, 41}),
('explosion.csv', EntityType.Special, {6, 7, 8, 9}),
('smoke.csv', EntityType.Special, {6, 7, 8, 9}),
('flyingwaste.csv', EntityType.Special, {6, 7, 8, 9, 18, 20, 22, 24}),
('mine.csv', EntityType.Special, {6, 7, 8, 9}),
('walllaser.csv', EntityType.Special, {6, 7, 8, 9}),
('builderline.csv', EntityType.Special, {6, 7, 8, 9}),
('platoon.csv', EntityType.Special, {6, 7, 8, 9, 18, 19}),
('shieldgenerator.csv', EntityType.ShieldGenerator, set()),
('talkpack.csv', EntityType.SoundPack, set()),
('parameters.csv', EntityType.Parameters, set()),
('playertalkpack.csv', EntityType.SoundPack, set()),
('specialupdateslinks.csv', EntityType.SpecialUpdatesLinks, {0})
]
research = None
entity_groups = []
try:
with open('research.csv', newline='') as csv_file:
reader = csv.reader(csv_file)
next(reader) # Skip header line
research = [Research(row) for row in reader]
except FileNotFoundError:
print(usage)
sys.exit(1)
research_ids = {r.name : r.id for r in research}
for (filename, etype, ref_fields) in csv_files:
try:
with open(filename, newline='') as csv_file:
print(f'Reading {filename}')
reader = csv.reader(csv_file)
next(reader) # Skip header line
group = None
for row in reader:
if not row: continue
if len(row) < 3:
group = EntityGroup()
group.faction = Faction.__members__[row[-1]]
group.entity_type = etype
group.ref_fields = ref_fields
entity_groups.append(group)
else:
group.entities.append(Entity(row))
except FileNotFoundError:
print(f'{filename} not found')
continue
with open('EARTH2150.par', 'wb') as parfile:
writer = ParWriter(parfile)
writer.write_header()
writer.write(len(entity_groups))
writer.write(0)
for group in entity_groups:
writer.write(group.faction.value)
writer.write(group.entity_type.value)
writer.write(len(group.entities))
for entity in group.entities:
writer.write(entity.name)
writer.write([research_ids[r] for r in entity.req_research])
fields = list()
for (i, f) in enumerate(entity.fields):
fields.append(f)
if i in group.ref_fields:
fields.append(0xffffffff)
writer.write_fields(fields)
writer.write(len(research))
for r in research:
writer.write([research_ids[p] for p in r.previous])
writer.write(r.id)
writer.write(r.faction.value)
writer.write(r.campaign_cost)
writer.write(r.skirmish_cost)
writer.write(r.campaign_time)
writer.write(r.skirmish_time)
writer.write(r.name)
writer.write(r.video)
writer.write(r.type)
writer.write(r.mesh)
writer.write(r.meshParamsIndex)
writer.write(1)
writer.write(len(research) - 1)
print(f'Wrote EARTH2150.par containing {sum(len(g.entities) for g in entity_groups)} entities (in {len(entity_groups)} groups) and {len(research)} research topics')
| InsideEarth2150/Programming | Tools/Community Tools/Ninetailed/earth-2150/csv2par.py | csv2par.py | py | 8,490 | python | en | code | 1 | github-code | 36 |
8128484100 | import electra_class
from transformers import ElectraForQuestionAnswering, ElectraTokenizer
model_name = "ahotrod/electra_large_discriminator_squad2_512"
model = ElectraForQuestionAnswering.from_pretrained(model_name)
tokenizer = ElectraTokenizer.from_pretrained(model_name)
while(True):
context = input("Enter Target Filename for BERT:\n")
if context == "exit": break
if context[:13] != "MinecraftWiki/": context = "MinecraftWiki/" + context
if context[-4:] != ".txt": context += ".txt"
while(True):
query = input("JARVIS online. What would you like to know?\n")
if query == "exit": break
answer, score = electra_class.answerfromwebpage(query, context, model, tokenizer)
print("Answer: " + answer)
| gale2307/Jarvis | electra_test.py | electra_test.py | py | 754 | python | en | code | 1 | github-code | 36 |
4397820007 | import os
from shutil import copyfile
from unittest.mock import patch, Mock
import pytest
from ow import migrate
class TestMigrateUnit(object):
def test_includeme(self):
config = Mock()
migrate.includeme(config)
config.scan.assert_called_with('ow.migrate')
@patch('ow.migrate.run_migrations')
def test_closer_wrapper_ok(self, run):
closer = Mock()
env = dict(
registry=Mock(settings={}),
root_factory=Mock(__module__='mytest'),
request=1,
root=2,
closer=closer)
migrate.closer_wrapper(env)
run.assert_called_with(1, 2, 'mytest.migrations')
assert closer.called
@patch('ow.migrate.closer_wrapper')
@patch('ow.migrate.prepare')
def test_application_created_ok(self, prepare, wrap):
event = Mock()
migrate.application_created(event)
assert prepare.called
assert wrap.called
@patch('ow.migrate.output')
def test_command_line_no_conf(self, pr):
ret = migrate.command_line_main(['test.py'])
assert ret == 1
assert pr.called
@patch('ow.migrate.closer_wrapper')
@patch('ow.migrate.bootstrap')
def test_command_line_no(self, bs, wrap):
ret = migrate.command_line_main(['test.py', 'dev.ini'])
assert ret == 0
assert bs.called
assert wrap.called
@patch('ow.migrate.commit')
@patch('ow.migrate.get_connection')
def test_reset_version(self, pget_connection, pcommit):
zodb = {}
pget_connection.return_value.root.return_value = zodb
migrate.reset_version('myrequest', 25)
assert zodb == {'database_version': 25}
pcommit.asert_called_with()
def cleanup():
"""
Clean up pyc files generated while running the following test suite
"""
migrations = os.path.join(os.path.dirname(__file__), 'migrations')
for f in os.listdir(migrations):
if '.pyc' in f or 'fail' in f or f == '3.py':
os.remove(os.path.join(migrations, f))
class mocked_get_connection(object):
"""
This is a class we can use to mock pyramid_zodbconn.get_connection()
(see test_run_migrations)
"""
def __init__(self, versions=0):
self.versions = versions
def root(self):
return {'database_version': self.versions}
class TestsMigrate(object):
package_name = 'ow.tests.migrations'
ini_path = os.path.join(os.path.dirname(__file__), 'migrations')
def test_get_indexes_ok(self):
indexes = migrate.get_indexes(self.package_name)
assert isinstance(indexes, list)
assert len(indexes) == 2
def test_get_indexes_fail(self):
migrate.get_indexes(self.package_name)
with pytest.raises(ImportError):
migrate.get_indexes('nonexistent.module.migrations')
def test_get_indexes_invalid(self):
# Create a new migration file with an invalid name, so the get_indexes
# will raise a ValueError exception
copyfile(os.path.join(os.path.dirname(__file__), 'migrations/1.py'),
os.path.join(os.path.dirname(__file__), 'migrations/fail.py'))
indexes = migrate.get_indexes(self.package_name)
assert isinstance(indexes, list)
assert len(indexes) == 2
def test_get_max_in_max_cache(self):
with patch.dict(migrate.MAX_CACHE, {self.package_name: 10}):
max_version = migrate.get_max(self.package_name)
assert max_version == 10
def test_get_max(self):
max_version = migrate.get_max(self.package_name)
assert max_version == 2
def test_version(self):
# instead of a real ZODB root, we do use a simple dict here,
# it should be enough for what need to test.
root = {}
root = migrate.set_version(root, 10)
assert root['database_version'] == 10
def test_max_version(self):
# instead of a real ZODB root, we do use a simple dict here,
# it should be enough for what need to test.
root = {}
root = migrate.set_max_version(root, self.package_name)
assert root['database_version'] == 2
@patch('ow.migrate.get_connection')
@patch('ow.tests.migrations.1.output')
@patch('ow.tests.migrations.2.output')
def test_run_all_migrations(self, pr2, pr1, gc):
"""
Test that all migrations apply
"""
gc.return_value = mocked_get_connection()
migrate.run_migrations(None, {}, self.package_name)
cleanup()
assert pr1.called
assert pr2.called
@patch('ow.migrate.get_connection')
def test_run_no_migrations(self, gc):
"""
Test that there are no more migrations to apply
"""
gc.return_value = mocked_get_connection(versions=2)
migrate.run_migrations(None, {}, self.package_name)
@patch('ow.migrate.get_connection')
@patch('ow.tests.migrations.1.output')
@patch('ow.tests.migrations.2.output')
def test_run_invalid_migrations(self, pr2, pr1, gc):
"""
Test what happens if a migration does not contains the proper migrate
method
"""
invalid_migration = open(os.path.join(os.path.dirname(__file__),
'migrations/3.py'), 'w')
invalid_migration.write('# This is an empty migration, just for tests')
invalid_migration.write('def no_migrate_method_here():')
invalid_migration.write(' print "Nothing to see here!"')
invalid_migration.close()
gc.return_value = mocked_get_connection(versions=0)
migrate.run_migrations(None, {}, self.package_name)
cleanup()
assert pr1.called
assert pr2.called
| openworkouts/OpenWorkouts | ow/tests/test_migrate.py | test_migrate.py | py | 5,768 | python | en | code | 5 | github-code | 36 |
20324223182 | from pymongo import MongoClient
def get_db(database):
"""
A quick way to get MongoDb Client link
"""
clientmg=MongoClient()
db=clientmg[database]
return db
db=get_db("foundation")
plist=db.process.find({},{"price":1,"qtt":2})
for p in plist:
ttlamt=float(float(p["price"])*float(p["qtt"]))
db.process.update({"_id":p["_id"]},{"$set":{"ttlamt":ttlamt}},upsert=True) | raynardj/terminus | major/mongo/pricefloat.py | pricefloat.py | py | 372 | python | en | code | 0 | github-code | 36 |
24500666134 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Helper functions and classes."""
from typing import Callable, Iterable, List, Mapping, Tuple, Type
import string
import functools
_registry = dict()
_printable = set(string.printable)
class MultiMethod:
"""
Representation of an overloaded method.
Takes in the name of a function and allows for registering
various signatures with different types. When an instance of
this class is called, the appropriated function is called based
on the types of the given arguments.
From a tutorial written by Guido van Rossum. Please see
https://www.artima.com/weblogs/viewpost.jsp?thread=101605
Parameters
----------
name: str
Name of the function
"""
def __init__(self, name):
self.name = name
self.typemap = dict()
def __call__(self, *args):
types = tuple(arg.__class__ for arg in args)
function = self.typemap.get(types)
if function is None:
raise TypeError("No match for overloaded function.")
return function(*args)
def register(self, types: Tuple[Type, ...], function: Callable) -> None:
"""
Register a new function signature.
Parameters
----------
types: tuple of classes
Types of the arguments for the function.
function: callable
To be called when arguments types match ``types``.
Raises
------
TypeError
If the given ``types`` is already registered to a function.
"""
if types in self.typemap:
raise TypeError(f"Duplicate registration of function {self.name}")
self.typemap[types] = function
def multimethod(*types: Type) -> Callable:
"""
Function decorator for supporting method overloading.
Based on an article written by Guido van Rossum (see
https://www.artima.com/weblogs/viewpost.jsp?thread=101605).
Best way to see its usage is by example.
Examples
--------
>>> from glo.helpers import multimethod
>>> @multimethod(int, int)
... def my_func(a, b):
... return a * b
...
>>> @multimethod(int, int, str)
... def my_func(a, b, s):
... return s.format(my_func(a, b))
...
>>> my_func(5, 6)
30
>>> my_func(5, 6, "The result is: {}")
'The result is: 30'
"""
def register(function):
name = function.__name__
multi = _registry.get(name)
if multi is None:
multi = _registry[name] = MultiMethod(name)
multi.register(types, function)
return multi
return register
def prep_ascii_str(s_in: str) -> str:
"""
Takes in a string and prepares it for parsing.
In this method we convert the string to all lowercase and
remove any characters that aren't supported by the ASCII
character set.
Parameters
----------
s_in: str
Input string to prep.
Returns
-------
str
Prepped version of the input ``s_in``.
Examples
--------
>>> from glo.helpers import prep_ascii_str
>>> prep_ascii_str("25 Ounces")
'25 ounces'
>>> prep_ascii_str("some\x05string. with\x15 funny characters")
'somestring. with funny characters'
>>> prep_ascii_str(" some string with whitespace ")
'some string with whitespace'
"""
as_ascii = "".join(filter(lambda x: x in _printable, s_in))
return as_ascii.strip().lower()
def remove_substrings(s_in: str, subs: Iterable[str]) -> str:
"""
Remove list of substrings from a given input string.
Parameters
----------
s_in: str
String to remove substrings from.
subs: iterable of str
List of substrings to remove from the input string. Will be
removed in the order they are iterated over.
Returns
-------
str
Input string with all substrings found in given substring
list removed.
Examples
--------
>>> from glo.helpers import remove_substrings
>>> remove_substrings("test1 test2 test3", ["test1", "test3"])
'test2'
>>> remove_substrings("TEST1 TEST2 TEST3", ["test1", "test3"])
'TEST1 TEST2 TEST3'
>>> remove_substrings("hey there", ["y there", "hey"])
'he'
"""
return functools.reduce(
lambda string, substring: string.replace(substring, "").strip(),
subs,
s_in,
)
def split_in_list(in_list: Iterable[str], split_on: str) -> List[str]:
"""
Return flattened list of split input strings.
Let's say that there are a bunch of strings that we want to split
on a certain character, but want the results of the splits to be
returned in a 1D array, rather than a 2D array:
```python
>>> # instead of this:
>>> l_in = ["test1 test2", "test3 test4"]
>>> [s.split(" ") for s in l_in]
[['test1', 'test2'], ['test3', 'test4']]
>>> # we have this:
>>> from glo.helpers import split_in_list
>>> split_in_list(l_in, " ")
['test1', 'test2', 'test3', 'test4']
```
Parameters
----------
l_in: iterable of str
List of input strings to split.
split_in: str
String holding the substring that each input string will
be split on.
Returns
-------
list of str
Flattened list containing results from the splits
Examples
--------
>>> from glo.helpers import split_in_list
>>> split_in_list(["hey this", "is a sentence."], " ")
['hey', 'this', 'is', 'a', 'sentence.']
>>> split_in_list(["and then, he said: ", "wait, what's that?"], ", ")
['and then', 'he said:', 'wait', "what's that?"]
"""
return functools.reduce(
lambda results, next_str: results
+ [sub.strip() for sub in next_str.split(split_on)],
in_list,
list(),
)
def contains_substring(s_in: str, subs: Iterable[str]) -> bool:
"""
Determine if any of the given substrings is in the given string.
Parameters
----------
s_in: str
Input string to check for given substrings.
subs: iterable of str
Substrings to check for in str
Examples
--------
>>> from glo.helpers import contains_substring
>>> contains_substring("this is a test", ["hey", "there"])
False
>>> contains_substring("this is another test", ["test", "hey", "there"])
True
>>> contains_substring("this is another test", ["this", "is", "another"])
True
>>> contains_substring("THIS IS ANOTHER TEST", ["this", "is", "another"])
False
"""
return any(sub_str in s_in for sub_str in subs)
def replace_multiple_substrings(s_in: str, subs: Mapping[str, str]) -> str:
"""
Replace multiple substrings within the given input string.
The order in which the replacements occur cannot be guaranteed.
Parameters
----------
s_in: str
Input string to make the substitutions in.
sub: mapping of str to str
Keys in this dict are substrings to replace, with each values
being the string the key should be replaced with.
Examples
--------
>>> from glo.helpers import replace_multiple_substrings
>>> replace_multiple_substrings("a test", {"a": "hey", "test": "there"})
'hey there'
>>> replace_multiple_substrings("12546", {"5": "3", "6": "321"})
'1234321'
"""
return functools.reduce(
lambda result, next_sub: result.replace(*next_sub), subs.items(), s_in
)
| learnitall/glo | glo/helpers.py | helpers.py | py | 7,497 | python | en | code | 0 | github-code | 36 |
17111518684 | class Solution(object):
def longestCommonPrefix(self, strs):
"""
:param strs: List[str]
:return: str
"""
if not strs:
return ""
for i in range(len(strs[0])):
for str in strs:
if len(str) <= i or strs[0][i] != str[i]:
return strs[0][:i]
return strs[0]
def longestCommonPrefix3(self, strs) -> str:
if len(strs) == 0:
return ""
res = ""
for j in range(len(strs[0])):
c = strs[0][j]
for i in range(1,len(strs)):
if (j >=len(strs[i]) or strs[i][j] != c):
return res
res = res + c
return res
def longestCommonprefix2(self, strs) -> str:
if len(strs) == 0 or len(strs[0]) == 0:
return ""
res = strs[0]
for i in range(1,len(strs)):
while not strs[i].startswith(res):
res = res[0:len(res)-1]
return res
assert Solution().longestCommonPrefix3(["flower", "flow", "flight"]) == "fl"
assert Solution().longestCommonprefix2(["flower", "flow", "flight"]) == "fl"
if __name__ == '__main__':
assert Solution().longestCommonPrefix(["hello", "heabc", "hell"]) == "he"
| yannweb/yanns_code | code/014.py | 014.py | py | 1,310 | python | en | code | 1 | github-code | 36 |
12728217384 | #-------------------------------------------------------------------------------
# Name: module1
# Purpose:
#
# Author: mo-mo-
#
# Created: 08/09/2018
# Copyright: (c) mo-mo- 2018
# Licence: <your licence>
#-------------------------------------------------------------------------------
n = 0
h, w = map(int, input().split())
ex = []
for i in range(h):
a = list(map(int, input().split()))
ex_line = []
for j in range(w):
if a[j] % 2 != 0:
ex_line.append((i+1, j+1))
ex.append(ex_line)
ex_line = []
p_list = []
noko = []
for ex_line in ex:
for i in range(0, len(ex_line), 2):
x1, y1 = ex_line[i]
x1, y2 = ex_line[i+1]
n += y2 - y1
p_list.append(x1, x2, y1, y2)
if len(ex_line) % 2 != 0:
noko.append(ex_line[-1])
for i in range(0, len(noko), 2):
x1, y1 = noko[i]
x2, y2 = noko[i+1]
n += abs(x1-x2) + abs(y1-y2)
if y1 > y2:
noko.append(x1, y1, x2, y2)
else:
noko.append(x2, y2, x1, y1)
p = p_list + noko
print(n)
if n != 0:
for xy in p:
x1, y1, x2, y2 = xy
if not x1 == x2:
for i in range(x2-x1):
print(x1+i, y1, x1+i+1, y1)
if y1 < y2 :
for i in range(y2-y1):
print(x2, y1+i, x2, y1+i+1)
elif y1 > y2:
for i in range(y1-y2):
print(x2, y1-i, x2, y1-i-1)
| mo-mo-666/AtCoder | legacy/ABC_109/D2.py | D2.py | py | 1,502 | python | en | code | 0 | github-code | 36 |
29378709444 | from flask import Blueprint, request, session
from database.service.location import Location as LocationSvc
from database.service.user import User as UserSvc
from database.service.device import Device as DeviceSvc
from database.service.property import Property as PropertySvc
from database.service.exceptions import NoRecordError
'''
MQTT interface
'''
import paho.mqtt.client as mqtt
import json
# The callback for when the client receives a CONNACK response from the server.
def on_connect(client, userdata, rc):
print("Connected with result code " + str(rc))
# Subscribing in on_connect() means that if we lose the connection and
# reconnect then subscriptions will be renewed.
client.subscribe("record/#")
client.on_message = on_message
import re
record_insert_regex = re.compile("record\/(\w+)\/(\w+)\/(\w+)")
# The callback for when a PUBLISH message is received from the server.
def on_message(client, userdata, msg):
topic = msg.topic
payload = msg.payload.decode('utf8')
matched = record_insert_regex.match(topic)
print(payload)
json_payload = json.loads(payload)
if matched:
username = matched.group(1)
location_name = matched.group(2)
device_name = matched.group(3)
print("MQTT received. Topic: " + username + " " + location_name +" " + device_name + " Payload: " + str(payload))
# Get user id
try:
password = json_payload["password"]
user = UserSvc.verify(username, password)
user_id = user.id
print("User ID: " + str(user_id))
except NoRecordError as error:
print(error)
return # Usually username password mismatch
except Exception as error:
print(error)
return
# Get location, deices ids
try:
location = LocationSvc.get(user_id, name=location_name)
device = DeviceSvc.get(user_id, name=device_name)
print("Location ID: " + str(location.id))
print("Device ID: " + str(device.id))
except NoRecordError as error:
print(error)
return # No record
# Not put data in there
try:
print("content: " + str(json_payload["content"]))
PropertySvc.save_record_dict(device.id, location.id, json_payload["content"])
except Exception as error:
print(error)
return
return
mqtt_client = mqtt.Client()
mqtt_client.on_connect = on_connect
try:
mqtt_client.connect(host='127.0.0.1', port=1883, keepalive=60)
except:
print('Failed to connect to the server')
exit()
else:
print('Connection Success!')
print('MQTT connection is being ready...')
mqtt_client.loop_start() | Wolfie-Home/webserver2 | wolfie_home/api_mqtt.py | api_mqtt.py | py | 2,763 | python | en | code | 6 | github-code | 36 |
9194439636 | # coding: utf-8
"""
Lightly API
Lightly.ai enables you to do self-supervised learning in an easy and intuitive way. The lightly.ai OpenAPI spec defines how one can interact with our REST API to unleash the full potential of lightly.ai # noqa: E501
OpenAPI spec version: 1.0.0
Contact: support@lightly.ai
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from lightly.openapi_generated.swagger_client.configuration import Configuration
class JobStatusData(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'MongoObjectID',
'status': 'JobState',
'meta': 'JobStatusMeta',
'wait_time_till_next_poll': 'int',
'created_at': 'Timestamp',
'finished_at': 'Timestamp',
'error': 'str',
'result': 'JobStatusDataResult'
}
attribute_map = {
'id': 'id',
'status': 'status',
'meta': 'meta',
'wait_time_till_next_poll': 'waitTimeTillNextPoll',
'created_at': 'createdAt',
'finished_at': 'finishedAt',
'error': 'error',
'result': 'result'
}
def __init__(self, id=None, status=None, meta=None, wait_time_till_next_poll=None, created_at=None, finished_at=None, error=None, result=None, _configuration=None): # noqa: E501
"""JobStatusData - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._id = None
self._status = None
self._meta = None
self._wait_time_till_next_poll = None
self._created_at = None
self._finished_at = None
self._error = None
self._result = None
self.discriminator = None
self.id = id
self.status = status
if meta is not None:
self.meta = meta
self.wait_time_till_next_poll = wait_time_till_next_poll
self.created_at = created_at
if finished_at is not None:
self.finished_at = finished_at
if error is not None:
self.error = error
if result is not None:
self.result = result
@property
def id(self):
"""Gets the id of this JobStatusData. # noqa: E501
:return: The id of this JobStatusData. # noqa: E501
:rtype: MongoObjectID
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this JobStatusData.
:param id: The id of this JobStatusData. # noqa: E501
:type: MongoObjectID
"""
if self._configuration.client_side_validation and id is None:
raise ValueError("Invalid value for `id`, must not be `None`") # noqa: E501
self._id = id
@property
def status(self):
"""Gets the status of this JobStatusData. # noqa: E501
:return: The status of this JobStatusData. # noqa: E501
:rtype: JobState
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this JobStatusData.
:param status: The status of this JobStatusData. # noqa: E501
:type: JobState
"""
if self._configuration.client_side_validation and status is None:
raise ValueError("Invalid value for `status`, must not be `None`") # noqa: E501
self._status = status
@property
def meta(self):
"""Gets the meta of this JobStatusData. # noqa: E501
:return: The meta of this JobStatusData. # noqa: E501
:rtype: JobStatusMeta
"""
return self._meta
@meta.setter
def meta(self, meta):
"""Sets the meta of this JobStatusData.
:param meta: The meta of this JobStatusData. # noqa: E501
:type: JobStatusMeta
"""
self._meta = meta
@property
def wait_time_till_next_poll(self):
"""Gets the wait_time_till_next_poll of this JobStatusData. # noqa: E501
The time in seconds the client should wait before doing the next poll. # noqa: E501
:return: The wait_time_till_next_poll of this JobStatusData. # noqa: E501
:rtype: int
"""
return self._wait_time_till_next_poll
@wait_time_till_next_poll.setter
def wait_time_till_next_poll(self, wait_time_till_next_poll):
"""Sets the wait_time_till_next_poll of this JobStatusData.
The time in seconds the client should wait before doing the next poll. # noqa: E501
:param wait_time_till_next_poll: The wait_time_till_next_poll of this JobStatusData. # noqa: E501
:type: int
"""
if self._configuration.client_side_validation and wait_time_till_next_poll is None:
raise ValueError("Invalid value for `wait_time_till_next_poll`, must not be `None`") # noqa: E501
self._wait_time_till_next_poll = wait_time_till_next_poll
@property
def created_at(self):
"""Gets the created_at of this JobStatusData. # noqa: E501
:return: The created_at of this JobStatusData. # noqa: E501
:rtype: Timestamp
"""
return self._created_at
@created_at.setter
def created_at(self, created_at):
"""Sets the created_at of this JobStatusData.
:param created_at: The created_at of this JobStatusData. # noqa: E501
:type: Timestamp
"""
if self._configuration.client_side_validation and created_at is None:
raise ValueError("Invalid value for `created_at`, must not be `None`") # noqa: E501
self._created_at = created_at
@property
def finished_at(self):
"""Gets the finished_at of this JobStatusData. # noqa: E501
:return: The finished_at of this JobStatusData. # noqa: E501
:rtype: Timestamp
"""
return self._finished_at
@finished_at.setter
def finished_at(self, finished_at):
"""Sets the finished_at of this JobStatusData.
:param finished_at: The finished_at of this JobStatusData. # noqa: E501
:type: Timestamp
"""
self._finished_at = finished_at
@property
def error(self):
"""Gets the error of this JobStatusData. # noqa: E501
:return: The error of this JobStatusData. # noqa: E501
:rtype: str
"""
return self._error
@error.setter
def error(self, error):
"""Sets the error of this JobStatusData.
:param error: The error of this JobStatusData. # noqa: E501
:type: str
"""
self._error = error
@property
def result(self):
"""Gets the result of this JobStatusData. # noqa: E501
:return: The result of this JobStatusData. # noqa: E501
:rtype: JobStatusDataResult
"""
return self._result
@result.setter
def result(self, result):
"""Sets the result of this JobStatusData.
:param result: The result of this JobStatusData. # noqa: E501
:type: JobStatusDataResult
"""
self._result = result
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(JobStatusData, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, JobStatusData):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, JobStatusData):
return True
return self.to_dict() != other.to_dict()
| tibe97/thesis-self-supervised-learning | lightly/openapi_generated/swagger_client/models/job_status_data.py | job_status_data.py | py | 9,149 | python | en | code | 2 | github-code | 36 |
7535863812 | from django.test import TestCase, Client, tag
from djangoplicity.media.models import Video
@tag('frontpage')
class TestFrontPageApp(TestCase):
fixtures = ['test/pages', 'test/media', 'test/announcements', 'test/releases', 'test/highlights']
def setUp(self):
self.client = Client()
def test_homepage(self):
youtube_only_html = '<div class="youtube-wrapper"><div id="youtube-player"></div></div>'
homepage_sections = ['What\'s New', 'ESA/Hubble Facebook', 'Subscribe to Hubble News']
# first hubblecast with use_youtube = True
response = self.client.get('/')
for section in homepage_sections:
self.assertContains(response, section)
self.assertContains(response, youtube_only_html, html=True)
# first hubblecast with use_youtube = False
Video.objects.update(use_youtube=False)
response = self.client.get('/')
self.assertNotContains(response, youtube_only_html, html=True)
| esawebb/esawebb | webb/frontpage/tests.py | tests.py | py | 990 | python | en | code | 0 | github-code | 36 |
1429552952 | n = int(input())
a = [int(input()) for _ in range(n)]
imax = max(a)
b = sorted(a)
#print(imax)
for i in range(n):
if not a[i] == imax:
print(imax)
else:
print(b[n-2]) | nawta/atcoder_archive | atcoder.jp/abc134/abc134_c/Main.py | Main.py | py | 191 | python | en | code | 0 | github-code | 36 |
27187104573 | import sys
import pandas as pd
from run_vcp import run_vcp
def main(params, vcmd, protocol_name):
# Get model_ndx
ndx = params.model_ndx
num_models = params.shape[0]
for i in range(num_models):
run_vcp(params.iloc[i, 1:14], model_ndx=ndx[i], vc_data=vcmd, protocol_name=protocol_name)
if __name__ == '__main__':
if (len(sys.argv) != 4):
print('run_vcp_top_models.py params_file vc_cmd_file protocol_name')
sys.exit()
else:
params = pd.read_csv(sys.argv[1], delimiter=' ')
vcmd = pd.read_csv(sys.argv[2], delimiter=' ')
protocol_name = sys.argv[3]
main(params, vcmd, protocol_name)
| dtilley/EA_fit_to_AP_set | main_run_vcp.py | main_run_vcp.py | py | 667 | python | en | code | 0 | github-code | 36 |
42154845618 | # 연속된 부분 수열의 합
def solution(sequence, k):
answer = []
left,right=0,1
max_seq=len(sequence)+1
prefix_sum=[0]*(max_seq)
tmp=0
for i in range(0,len(sequence)):
tmp+=sequence[i]
prefix_sum[i+1]=tmp
while left<right and right<len(prefix_sum):
current_v=(prefix_sum[right]-prefix_sum[left])
if current_v==k:
answer.append((left,right-1))
right+=1
elif k<current_v:
left+=1
else:
right+=1
answer.sort(key=lambda x:(x[1]-x[0]))
return answer[0]
| FeelingXD/algorithm | programers/178870.py | 178870.py | py | 592 | python | en | code | 2 | github-code | 36 |
23420574740 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('stock', '0201_auto_20160926_1614'),
]
operations = [
migrations.RenameField(
model_name='transferenciastock',
old_name='deposito_solicitante_transferencia',
new_name='deposito_destino_transferencia',
),
migrations.RenameField(
model_name='transferenciastock',
old_name='deposito_proveedor_transferencia',
new_name='deposito_origen_transferencia',
),
migrations.AddField(
model_name='transferenciastock',
name='fecha_hora_autorizacion_transferencia',
field=models.DateTimeField(auto_now=True, help_text=b'La fecha y hora se asignan al momento de autorizarse la Transferencia. No se requiere el ingreso de este dato.', null=True, verbose_name=b'Fecha/hora autorizacion Transferencia'),
),
migrations.AlterField(
model_name='movimientostock',
name='producto_stock',
field=models.ForeignKey(related_name='producto_stock', verbose_name=b'Producto', to='stock.Producto', help_text=b'Seleccione el Producto a registrar en el Stock.'),
),
]
| pmmrpy/SIGB | stock/migrations/0202_auto_20160926_1825.py | 0202_auto_20160926_1825.py | py | 1,333 | python | es | code | 0 | github-code | 36 |
72201720423 | import argparse
import transformers
from transformers import AutoModel, AutoTokenizer
import numpy as np
import torch
import logging
from pathlib import Path
from os.path import exists
import os
import pandas as pd
from tqdm import tqdm
from datasets import load_dataset
from transformers import AutoTokenizer, DataCollatorWithPadding, AutoModelForSequenceClassification, TrainingArguments, Trainer
import csv, json
import evaluate
from datasets import Dataset
from captum.influence import TracInCP, TracInCPFast, TracInCPFastRandProj
from sklearn.metrics import auc, roc_curve
from torch import tensor
from transformers.pipelines import TextClassificationPipeline
from captum.attr import LayerIntegratedGradients, TokenReferenceBase, visualization
import matplotlib.pyplot as plt
import jsonlines
labelToModelLogitIndex = {
"Negative": 0,
"Positive": 1,
}
colsToRemove = {
"imdb": [
"text"
]
}
labelTag = {
"imdb": "label"
}
parser = argparse.ArgumentParser()
parser.add_argument(
"-info",
action="store_true",
help="Boolean flag to enable info mode"
)
parser.add_argument(
"-log",
"--logFile",
type=str,
help="Path to file to print logging information",
default=None
)
parser.add_argument(
"-cacheDir",
help="Path to cache location for Huggingface",
default="/scratch/general/vast/u1419542/huggingface_cache/"
)
parser.add_argument(
"-dataset",
choices = [
"imdb",
],
default="imdb",
)
parser.add_argument(
"-numEpochs",
type=int,
help="Number of epochs to train model for",
default=1
)
parser.add_argument(
"-batchSize",
type=int,
help="Batch size of dataloader",
default=16
)
parser.add_argument(
"-learningRate",
type=float,
help="Learning rate for optimizer",
default=2e-5
)
parser.add_argument(
"-weightDecay",
type=float,
help="Weight Decay for optimizer",
default=0.01
)
parser.add_argument(
"-model",
help="Path to model to use",
default="microsoft/deberta-v3-large"
)
parser.add_argument(
"-out",
"--output_dir",
help="Path to output directory where trained model is to be saved",
required=True
)
parser.add_argument(
'-seed',
type=int,
help='Random seed',
default=13
)
parser.add_argument(
"-do_train",
action="store_true",
help="Boolean flag to train model"
)
parser.add_argument(
"-do_predict",
action="store_true",
help="Boolean flag to make predictions"
)
parser.add_argument(
"-cpu",
"--use_cpu",
action="store_true",
help="Boolean flag to use cpu only"
)
#---------------------------------------------------------------------------
def checkIfExists(path, isDir=False, createIfNotExists=False):
if isDir and not path.endswith("/"):
raise ValueError("Directory path should end with '/'")
pathExists = exists(path)
if not pathExists:
if createIfNotExists:
os.makedirs(path)
else:
raise ValueError(f"{path} is an invalid path!")
if not isDir:
filePath = Path(path)
if not filePath.is_file():
raise ValueError(f"{path} is not a file!")
#---------------------------------------------------------------------------
def checkFile(fileName, fileExtension=None):
if fileExtension:
if not fileName.endswith(fileExtension):
raise ValueError(f"[checkFile] {fileName} does not have expected file extension {fileExtension}!")
file_exists = exists(fileName)
if not file_exists:
raise RuntimeError(f"[checkFile] {fileName} is an invalid file path!")
path = Path(fileName)
if not path.is_file():
raise RuntimeError(f"[checkFile] {fileName} is not a file!")
#---------------------------------------------------------------------------
class ComputeMetrics:
def __init__(self, metricName="accuracy"):
self.metricName = metricName
self.metric = evaluate.load(metricName)
def __call__(self, evalPreds):
predictions, labels = evalPreds
predictions = np.argmax(predictions, axis=1)
return self.metric.compute(predictions=predictions, references=labels)
#---------------------------------------------------------------------------
class Tokenize:
def __init__(self, tokenizer, dataset):
self.tokenizer = tokenizer
self.dataset = dataset
def __call__(self, example):
# return self.tokenizer(inputToPrompt(example, self.dataset), truncation=True)
return self.tokenizer(example["text"], truncation=True)
#---------------------------------------------------------------------------
def inputToPrompt(instance, dataset):
if dataset == "imdb":
inpPrompt = "Review: {review}\nWhat is the sentiment of the review: negative or positive?".format(
review=instance["text"]
)
else:
raise ValueError("[inputToPrompt] {} not supported!".format(dataset))
return inpPrompt
#---------------------------------------------------------------------------
def writeFile(data, fileName):
if fileName.endswith(".csv"):
with open(fileName, 'w', newline='') as f:
writer = csv.DictWriter(f, data[0].keys())
writer.writeheader()
writer.writerows(data)
elif fileName.endswith(".json"):
with open(fileName, "w") as f:
json.dump(data, f)
elif fileName.endswith(".jsonl"):
with open(fileName, "w") as f:
for instance in data:
f.write(json.dumps(instance))
f.write("\n")
else:
raise ValueError("[readFile] {} has unrecognized file extension!".format(fileName))
#---------------------------------------------------------------------------
def collateBatch(batch):
return zip(*batch)
#---------------------------------------------------------------------------
def createDataLoader(ds, batchSize, collateFn=collateBatch):
return torch.utils.data.DataLoader(
ds,
batch_size=batchSize,
num_workers=0,
shuffle=True,
collate_fn=collateFn,
)
# ---------------------------------------------------------------------------
class DeBertaWrapper(torch.nn.Module):
def __init__(self, model, device="cpu"):
super(DeBertaWrapper, self).__init__()
self.model = model
self.device = device
self.model.to(device)
def __call__(self, *inputs):
inputs = torch.tensor(inputs, device=self.device).squeeze()
return torch.tensor(self.model(inputs)["logits"])
# return self.model(*inputs)
def children(self):
return self.model.children()
# ---------------------------------------------------------------------------
def main():
args = parser.parse_args()
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
if args.logFile:
checkFile(args.logFile)
logging.basicConfig(filename=args.logFile, filemode='w', level=logging.INFO)
elif args.info:
logging.basicConfig(filemode='w', level=logging.INFO)
else:
# logging.basicConfig(filemode='w', level=logging.ERROR)
logging.basicConfig(filemode='w', level=logging.INFO)
if torch.cuda.is_available() and not args.use_cpu:
logging.info("Using GPU: cuda")
device = "cuda"
else:
logging.info("Using CPU")
device = "cpu"
if args.batchSize <= 0:
raise ValueError("[main] Batch Size has to be a positive number!")
data = load_dataset(args.dataset, cache_dir=args.cacheDir)
data = data.shuffle(seed=args.seed)
if "train" not in data.keys():
raise RuntimeError("[main] No train split found in {} dataset!".format(args.dataset))
if "test" not in data.keys():
raise RuntimeError("[main] No test split found in {} dataset!".format(args.dataset))
data["train"] = data["train"].select(np.random.choice(len(data["train"]), 10))
data["test"] = data["test"].select(np.random.choice(len(data["test"]), 2))
tokenizer = AutoTokenizer.from_pretrained(args.model)
model = AutoModelForSequenceClassification.from_pretrained(args.model, num_labels=len(labelToModelLogitIndex))
model.to(device)
tokenizedDatasets = data.map(Tokenize(tokenizer, args.dataset), batched=True, remove_columns=colsToRemove[args.dataset])
tokenizedDatasets = tokenizedDatasets.rename_column(labelTag[args.dataset], "labels")
dataCollator = DataCollatorWithPadding(tokenizer=tokenizer, padding="max_length", max_length=1024)
if args.do_train or args.do_predict:
trainingArgs = TrainingArguments(
output_dir=args.output_dir,
num_train_epochs=args.numEpochs,
learning_rate=args.learningRate,
weight_decay=args.weightDecay,
per_device_train_batch_size=args.batchSize,
per_device_eval_batch_size=args.batchSize,
evaluation_strategy="steps",
save_strategy="steps",
save_steps=50,
eval_steps=50,
save_total_limit=100,
metric_for_best_model="accuracy",
load_best_model_at_end=True,
bf16=True,
gradient_accumulation_steps=4,
gradient_checkpointing=True
)
trainer = Trainer(
model,
trainingArgs,
train_dataset=tokenizedDatasets["train"],
eval_dataset=tokenizedDatasets["test"],
data_collator=dataCollator,
tokenizer=tokenizer,
compute_metrics=ComputeMetrics("accuracy")
)
if args.do_train:
#Train the model
trainer.train()
if args.do_predict:
#Sample 10 mispredictions randomly
predictions = trainer.predict(tokenizedDatasets["test"])
preds = np.argmax(predictions.predictions, axis=-1)
incorrectInds = np.where(~np.equal(preds, tokenizedDatasets["test"]["labels"]))[0]
assert len(incorrectInds) >= 10
testData = data["test"]
testData = testData.add_column("predicted", preds)
if args.dataset == "imdb":
testData = testData.rename_column("text", "review")
allData = Dataset.from_dict(testData[incorrectInds])
sampledData = Dataset.from_dict(testData[np.random.choice(incorrectInds, 10, replace=False)])
allData.to_json("mispredictions.jsonl", orient="records", lines=True)
sampledData.to_json("mispredictions_10.jsonl", orient="records", lines=True)
#Finding most influential training examples for test examples
# clf = transformers.pipeline("text-classification",
# model=model,
# tokenizer=tokenizer,
# device=device
# )
# modelCheckpoints = list(os.walk(args.output_dir))[0][1]
# extrChkpt = lambda path: int(path.split("-")[-1])
# sorted(modelCheckpoints, key=extrChkpt)
# appendOutputDirPath = lambda path: args.output_dir + "/" + path
# modelCheckpoints = list(map(appendOutputDirPath, modelCheckpoints))
# model = ExplainableTransformerPipeline(modelCheckpoints[-1], clf, device)
# checkpoints_load_func = lambda _, path: ExplainableTransformerPipeline(path, clf, device)
checkpoints_load_func = lambda _, path: DeBertaWrapper(AutoModelForSequenceClassification.from_pretrained(path, num_labels=len(labelToModelLogitIndex)), device)
model = DeBertaWrapper(model, device)
# #Generate train data in the format TracInCPFast expects
# trainDataLoader = createDataLoader(tokenizedDatasets["train"], args.batchSize, dataCollator)
# #Generate test data in the format TracInCPFast expects
# testDataLoader = createDataLoader(tokenizedDatasets["test"], args.batchSize, dataCollator)
tokenizedDatasets["train"] = tokenizedDatasets["train"].map(dataCollator)
tokenizedDatasets["test"] = tokenizedDatasets["test"].map(dataCollator)
tracin_cp_fast = TracInCPFast(
model=model,
final_fc_layer=list(model.children())[-1],
train_dataset=(
tokenizedDatasets["train"]["input_ids"],
torch.tensor(tokenizedDatasets["train"]["labels"], device=device),
),
# train_dataset=tokenizedDatasets["train"],
# train_dataset=trainDataLoader,
# checkpoints=modelCheckpoints,
checkpoints=args.output_dir,
checkpoints_load_func=checkpoints_load_func,
loss_fn=torch.nn.CrossEntropyLoss(reduction="sum"),
batch_size=1,
vectorize=False,
)
k = 10
proponents_indices, proponents_influence_scores = tracin_cp_fast.influence(
# testDataLoader,
(
tokenizedDatasets["test"]["input_ids"],
torch.tensor(tokenizedDatasets["test"]["labels"], device=device),
),
k=k,
proponents=True,
show_progress=True,
)
opponents_indices, opponents_influence_scores = tracin_cp_fast.influence(
# testDataLoader,
(
tokenizedDatasets["test"]["input_ids"],
torch.tensor(tokenizedDatasets["test"]["labels"], device=device),
),
k=k,
proponents=False,
show_progress=True,
)
print(proponents_indices)
print(opponents_indices)
#---------------------------------------------------------------------------
if __name__ == "__main__":
main() | RishanthRajendhran/influenceFunctions | model.py | model.py | py | 13,479 | python | en | code | 0 | github-code | 36 |
24185040460 | # for 目标 in 表达式: 最常搭配range,break,continue;
# 循环体
favour = "yanchao"
for i in favour:
print(i, end="-") # end=是用来连接
member = ["小甲鱼", "小布丁", "小乌龟", "王八"]
for each in member:
print(each, len(each)) # len是计算长度并返回;
# range()内置函数,语法:range([start,]stop[,step=1]),这个BIF有3个参数,用中括号扩起来的是表示可选;
# step默认是1,如果是2,则从开始的那个数就要加2,每次递增2;
# range的作用是生成一个从start参数的值开始到stop参数的值结束的数字序列;
i = range(5)
print(i) # 则返回range(0,5),因为返回的是一个对象;
a = list(range(5))
for a in range(5):
print(a) # 如果没有for in,返回的是[0,1,2,3,4],生成一个列表;在for in里则循环成0,1,2,3,4;
# 同时,只有一个参数时,默认从0开始,不包含最后stop的数值;
for i in range(2, 9):
print(i)
for i in range(1, 10, 2):
print(i)
# break:终止当前循环,跳出循环体
bingo = "宋彦超是小乌堆"
answer = input("请输入宋彦超最想听到的一句话:")
while True:
if answer == bingo:
break
answer = input("oh sorry~请重新输入(答案正确才能退出游戏哦):")
print("哎哟帅哦~")
print("你真是小彦超肚子里的小蛔虫~")
# continue:终止本轮循环并开始下一轮循环,开始下一轮循环前会先测试条件,只有为Ture才会开始,否则退出;
for i in range(10):
if i % 2 != 0: # 即余数为2,不等于0即不是偶数的话,输出奇数,如是偶数,则i+2再输出;
print(i)
continue
i += 2
print(i)
# (0,1,2,3,4,5,6,7,8,9)分别除以2,看余数是偶数还是奇数,偶数+2再输出,奇数直接输出;
| hewuling001/python-learn | index8.py | index8.py | py | 1,871 | python | zh | code | 1 | github-code | 36 |
25059204376 | import numpy as np
from mat import A2195_T84
def get_sphere_mass(r, p, material):
# for a given pressure and target stress allowable, this function gets
# mass of a material
A = 4*np.pi*r**2
t = get_hoop_t(r, p, material)
m = A*t*material.get("rho")
return m
def get_hoop_t(r, p, material):
t = r*p/material.get("sigma_y")/1e6
return t
def get_sphere_volume(r):
V = 4/3*np.pi*r**3
return V
def elongate(r):
v1 = get_sphere_volume(r)
r_new = r - 0.1
v2 = get_sphere_volume(r_new)
v3 = v1 - v2
L = v3 / (np.pi*r_new**2)
return [r_new, L]
def get_cylinder_mass(r, l, t):
m = 2*np.pi*r*t*l
return m
R = 0.4 # radius [m]
P = 2.4e6 # pressure [Pa]
mat = A2195_T84
print(get_sphere_mass(R, P, mat))
R_new = elongate(R)[0]
L = elongate(R)[1]
print(get_sphere_mass(R_new, P, mat)+get_cylinder_mass(R_new, L, get_hoop_t(R, P, mat)))
print(get_sphere_mass(0.34961345684772915, P, mat))
| C08-System-Design/WP5 | test.py | test.py | py | 964 | python | en | code | 0 | github-code | 36 |
41797269228 | from django.core.management.base import BaseCommand
from django.conf import settings
import requests
import json
import telebot
from telebot import types
from crypton.models import Profile
from preferences import preferences
bot = telebot.TeleBot(settings.TOKEN)
coins = {
'BTC': ['btc', 'bitcoin', 'биткоин'],
'ETH': ['eth', 'ethereum', 'эфириум'],
'DOGE': ['doge', 'dogecoin', 'догикоин']
}
class Command(BaseCommand):
help = 'Telegram Bot'
def handle(self, *args, **options):
bot.polling(none_stop=True)
def exchange(crypto):
url = 'https://pro-api.coinmarketcap.com/v1/cryptocurrency/quotes/latest'
headers = {
"X-CMC_PRO_API_KEY": settings.COINMARKETCAP_API_KEY,
"Accept": "application/json"
}
parameters = {
'symbol': crypto
}
session = requests.Session()
session.headers.update(headers)
data = session.get(url, params=parameters)
results = (json.loads(data.text)).get('data')
return results[f'{crypto}']['quote']['USD']['price']
@bot.message_handler(commands=['start'])
def send_welcome(message):
bot.send_message(message.chat.id, preferences.BotPreferences.welcome, reply_markup=choice_crypto())
chat_id = message.chat.id
Profile.objects.get_or_create(
tg_id=chat_id,
defaults={
'tg_username': message.from_user.username,
'tg_firstname': message.from_user.first_name,
'tg_lastname': message.from_user.last_name,
}
)
@bot.message_handler(content_types=["text"])
def send_anytext(message):
text = message.text.lower()
chat_id = message.chat.id
for key, val in coins.items():
if text in val:
bot.send_message(chat_id, exchange(key), reply_markup=choice_crypto())
break
else:
bot.send_message(chat_id, preferences.BotPreferences.error_message, reply_markup=choice_crypto())
def choice_crypto():
markup = types.ReplyKeyboardMarkup(one_time_keyboard=True, resize_keyboard=True)
btc = types.KeyboardButton(preferences.BotPreferences.btc)
eth = types.KeyboardButton(preferences.BotPreferences.eth)
doge = types.KeyboardButton(preferences.BotPreferences.doge)
markup.add(btc, eth, doge)
return markup
| iterweb/test_bot | crypton/management/commands/tg_bot.py | tg_bot.py | py | 2,340 | python | en | code | 0 | github-code | 36 |
13404192861 | import warnings, logging, os, sys
warnings.filterwarnings('ignore',category=FutureWarning)
logging.disable(logging.WARNING)
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
import json
import tensorflow as tf
from utils import *
from arguments import get_args_many
args = get_args_many()
MDIR = args.MDIR
n2d_layers = 61
n2d_filters = 64
window2d = 3
wmin = 0.8
ns = 21
# load network weights in RAM
w,b,beta_,gamma_ = load_weights(args.MDIR)
#
# network
#
config = tf.ConfigProto(
gpu_options = tf.GPUOptions(allow_growth=True)
)
activation = tf.nn.elu
conv1d = tf.layers.conv1d
conv2d = tf.layers.conv2d
with tf.Graph().as_default():
with tf.name_scope('input'):
ncol = tf.placeholder(dtype=tf.int32, shape=())
nrow = tf.placeholder(dtype=tf.int32, shape=())
msa = tf.placeholder(dtype=tf.uint8, shape=(None,None))
#
# collect features
#
msa1hot = tf.one_hot(msa, ns, dtype=tf.float32)
weights = reweight(msa1hot, wmin)
# 1D features
f1d_seq = msa1hot[0,:,:20]
f1d_pssm = msa2pssm(msa1hot, weights)
f1d = tf.concat(values=[f1d_seq, f1d_pssm], axis=1)
f1d = tf.expand_dims(f1d, axis=0)
f1d = tf.reshape(f1d, [1,ncol,42])
# 2D features
f2d_dca = tf.cond(nrow>1, lambda: fast_dca(msa1hot, weights), lambda: tf.zeros([ncol,ncol,442], tf.float32))
f2d_dca = tf.expand_dims(f2d_dca, axis=0)
f2d = tf.concat([tf.tile(f1d[:,:,None,:], [1,1,ncol,1]),
tf.tile(f1d[:,None,:,:], [1,ncol,1,1]),
f2d_dca], axis=-1)
f2d = tf.reshape(f2d, [1,ncol,ncol,442+2*42])
#
# 2D network
#
# store ensemble of networks in separate branches
layers2d = [[] for _ in range(len(w))]
preds = [[] for _ in range(4)]
Activation = tf.nn.elu
for i in range(len(w)):
layers2d[i].append(Conv2d(f2d,w[i][0],b[i][0]))
layers2d[i].append(InstanceNorm(layers2d[i][-1],beta_[i][0],gamma_[i][0]))
layers2d[i].append(Activation(layers2d[i][-1]))
# resnet
idx = 1
dilation = 1
for _ in range(n2d_layers):
layers2d[i].append(Conv2d(layers2d[i][-1],w[i][idx],b[i][idx],dilation))
layers2d[i].append(InstanceNorm(layers2d[i][-1],beta_[i][idx],gamma_[i][idx]))
layers2d[i].append(Activation(layers2d[i][-1]))
idx += 1
layers2d[i].append(Conv2d(layers2d[i][-1],w[i][idx],b[i][idx],dilation))
layers2d[i].append(InstanceNorm(layers2d[i][-1],beta_[i][idx],gamma_[i][idx]))
layers2d[i].append(Activation(layers2d[i][-1] + layers2d[i][-6]))
idx += 1
dilation *= 2
if dilation > 16:
dilation = 1
# probabilities for theta and phi
preds[0].append(tf.nn.softmax(Conv2d(layers2d[i][-1],w[i][123],b[i][123]))[0])
preds[1].append(tf.nn.softmax(Conv2d(layers2d[i][-1],w[i][124],b[i][124]))[0])
# symmetrize
layers2d[i].append(0.5*(layers2d[i][-1]+tf.transpose(layers2d[i][-1],perm=[0,2,1,3])))
# probabilities for dist and omega
preds[2].append(tf.nn.softmax(Conv2d(layers2d[i][-1],w[i][125],b[i][125]))[0])
preds[3].append(tf.nn.softmax(Conv2d(layers2d[i][-1],w[i][127],b[i][127]))[0])
#preds[4].append(tf.nn.softmax(Conv2d(layers2d[i][-1],w[i][126],b[i][126]))[0])
# average over all branches
prob_theta = tf.reduce_mean(tf.stack(preds[0]),axis=0)
prob_phi = tf.reduce_mean(tf.stack(preds[1]),axis=0)
prob_dist = tf.reduce_mean(tf.stack(preds[2]),axis=0)
prob_omega = tf.reduce_mean(tf.stack(preds[3]),axis=0)
with tf.Session(config=config) as sess:
# loop over all A3M files in the imput folder
for filename in os.listdir(args.ALNDIR):
if not filename.endswith(".a3m"):
continue
# parse & predict
a3m = parse_a3m(args.ALNDIR + '/' + filename)
print("processing:", filename)
pd, pt, pp, po = sess.run([prob_dist, prob_theta, prob_phi, prob_omega],
feed_dict = {msa : a3m, ncol : a3m.shape[1], nrow : a3m.shape[0] })
# save distograms & anglegrams
npz_file = args.NPZDIR + '/' + filename[:-3] + 'npz'
np.savez_compressed(npz_file, dist=pd, omega=po, theta=pt, phi=pp)
| gjoni/trRosetta | network/predict_many.py | predict_many.py | py | 4,388 | python | en | code | 192 | github-code | 36 |
42305321529 | import sqlite3
import DepartmentStudentAmounts as dsa
Database = sqlite3.connect('Identities')
items = Database.cursor()
"""class used to validate the adding none year group specific items
Attributes
-----------
private Attributes
---> name item
---> average
---> department
"""
class validate_adding_item_none_yeargroup_specfic:
name_item_length_max = 25
name_item_length_mini = 3
average_upper_bound = 6
average_lower_bound = 1
total_students = 0
DefaultStockAlertValue = 0
def __init__(self, name_item, average, department):
self.name_item = name_item
self.average = average
self.department = department
# Method is used to check the user inputs
def check(self):
if validate_adding_item_none_yeargroup_specfic.name_item_length_mini < len(self.name_item) <= validate_adding_item_none_yeargroup_specfic.name_item_length_max and validate_adding_item_none_yeargroup_specfic.average_lower_bound <= self.average < validate_adding_item_none_yeargroup_specfic.average_upper_bound:
return True
else:
return False
# Method is used to perform claculation about the amount that should be inputted into the database
def calculations(self):
# Loops through the dictonary calculating the total amount of students
for i in dsa.StudentAmounts:
value = dsa.StudentAmounts[i][self.department]
# Adds the value to the total students class variable
self.total_students += value
return self.total_students
# Method is used to perform the final calcultaion abou the amount that would be inputted to the database
def final_calc(self):
TotalStudents = self.calculations()
CurrentAmount = TotalStudents * self.average
MinimumAmount = round((CurrentAmount * 0.1), 0)
return CurrentAmount, MinimumAmount
# Method is used to check if the item already exsists in the database
def checkItem_exsist(self):
previous = self.check()
if previous:
items.execute("SELECT * FROM None_yeargroup_specific WHERE Name_item=?", (self.name_item,))
item = items.fetchone()
Database.commit()
if item is not None:
return True
else:
return False
else:
return True
# Method is used to input the item to the database
def inputItem(self):
previous = self.checkItem_exsist()
if previous is False:
CalculationResults = self.final_calc()
CurrentAmount = CalculationResults[0]
MinimumAmount = CalculationResults[1]
items.execute("INSERT INTO None_yeargroup_specific VALUES (?, ?, ?, ?, ?)", (self.name_item, CurrentAmount, MinimumAmount, self.department, validate_adding_item_none_yeargroup_specfic.DefaultStockAlertValue))
result = Database.commit()
if result is not None:
return False
else:
return True
else:
return False
"""Subclass used to validate entries
Subclass ( valdidate_adding_yeargroup_specfic ) inherites from Superclass ( validate_adding_item_none_yeargroup_specfic )
subclass superclass
validate_adding_item_none_yeargroup_specfic -----> valdidate_adding_yeargroup_specfic
Attributes
-----------
---> year group
+ the inherted attributes
"""
class valdidate_adding_yeargroup_specfic(validate_adding_item_none_yeargroup_specfic):
def __init__(self, name_item, average, department, yeargroup):
super().__init__(name_item, average, department)
self.yeargroup = yeargroup
# Polyphorism may be able to be used on this method by overriding it.
def calculation(self):
amount_students = dsa.StudentAmounts["Year " + self.yeargroup][self.department]
result = self.average * amount_students
Minimum_amount = round(result * 0.3)
return result, Minimum_amount
# Polyphorism used on this method by overriding it.
def checkItem_exsist(self):
previous = self.check()
print(previous)
if previous:
items.execute("SELECT * FROM year_group_specific WHERE Name_item=? AND year_group=?", (self.name_item, self.yeargroup))
result = Database.commit()
if result is not None:
return True
else:
return False
else:
return False
# The method check would be called first if True connect to year group specific database and determine if the item exsist or not.
# Polyphorism is used here to input the item. (Overidding)
def inputItem(self):
previous = self.checkItem_exsist()
if previous is False:
# return tuple (result, minmum amount)
result = self.calculation()
MinimumAmount = result[1]
CurrentAmount = result[0]
items.execute("INSERT INTO year_group_specific VALUES (?, ?, ?, ?, ?, ?)", (self.name_item, CurrentAmount, MinimumAmount, self.department, self.yeargroup, validate_adding_item_none_yeargroup_specfic.DefaultStockAlertValue, ))
result = Database.commit()
if result is not None:
return False
else:
return True
else:
return False
| newton124/code | AddingItem.py | AddingItem.py | py | 5,597 | python | en | code | 0 | github-code | 36 |
29884273423 | """[Recommend] iPhone 13 Again"""
def main():
"""iPhone 13 Again"""
model = input()
capacity = input()
result = ""
case1 = capacity == '128 GB' or capacity == '256 GB' or capacity == '512 GB'
case2 = capacity == '128 GB' or capacity == '256 GB' or \
capacity == '512 GB' or capacity == '1 TB'
if (model == "iPhone 13 mini" or model == "iPhone 13") and case1:
capacity = int(capacity[0:3])
capacity = capacity // 128
if model == "iPhone 13":
result += str(29900 + 4000*(capacity-1))
else:
result += str(25900 + 4000*(capacity-1))
elif (model == "iPhone 13 Pro" or model == "iPhone 13 Pro Max") and case2:
if capacity == "1 TB":
capacity = 6
else:
capacity = int(capacity[0:3])//128
if model == "iPhone 13 Pro":
result += str(38900 + 4000*(capacity-1))
else:
result += str(42900 + 4000*(capacity-1))
else:
result += "Not Available"
print(result)
main()
| DefinitelyNotJay/ejudge | Iphone13.py | Iphone13.py | py | 1,043 | python | en | code | 0 | github-code | 36 |
8651855944 | import time
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.support.select import Select
from selenium.webdriver.support.wait import WebDriverWait
service_obj = Service("C:/Users/PUJA/chromedriver")
driver = webdriver.Chrome(service=service_obj)
driver.implicitly_wait(5)
expectedlist=['Cucumber - 1 Kg', 'Raspberry - 1/4 Kg', 'Strawberry - 1/4 Kg']
driver.get("https://rahulshettyacademy.com/seleniumPractise/#/")
driver.find_element(By.CSS_SELECTOR,".search-keyword").send_keys("ber")
time.sleep(2)
#product_list:
actuallist=[]
productlist= driver.find_elements(By.XPATH,"//div/h4")
for product in productlist:
actuallist.append(product.text)
print(actuallist)
assert actuallist == expectedlist
count = driver.find_elements(By.XPATH,"//div[@class='products']/div")
print(len(count))
assert len(count)>0
#Chaining of web element
for c in count:
c.find_element(By.XPATH,"div/button").click()
driver.find_element(By.XPATH,"//img[@alt='Cart']").click()
driver.find_element(By.XPATH,"//button[text()='PROCEED TO CHECKOUT']").click()
#sum _validation
sum=0
rates= driver.find_elements(By.XPATH,"//tr/td[5]/p")
for rate in rates:
sum= sum+int(rate.text)
print(sum)
totalamount= int(driver.find_element(By.CSS_SELECTOR,".totAmt").text)
assert sum == totalamount
driver.find_element(By.CSS_SELECTOR,".promoCode").send_keys("rahulshettyacademy")
driver.find_element(By.CLASS_NAME,"promoBtn").click()
wait= WebDriverWait(driver,5)
wait.until(expected_conditions.presence_of_element_located((By.CSS_SELECTOR,".promoInfo")))
print(driver.find_element(By.CLASS_NAME,"promoInfo").text)
discountedamount= float(driver.find_element(By.CLASS_NAME,"discountAmt").text)
print(discountedamount)
assert totalamount>discountedamount
driver.find_element(By.XPATH,"//button[text()='Place Order']").click()
dropdown= Select(driver.find_element(By.XPATH,"//div/select"))
dropdown.select_by_value("India")
driver.find_element(By.CSS_SELECTOR,".chkAgree").click()
driver.find_element(By.XPATH,"//button[text()='Proceed']").click()
| PCPuja/FirstGit | Waits.py | Waits.py | py | 2,198 | python | en | code | 0 | github-code | 36 |
31320163241 | import pandas as pd
import pymongo
from .metacritic_scrape import scrape_game_page
from .pandas_functions import clean_df
from selenium.webdriver import Firefox
import random
import time
mc = pymongo.MongoClient()
db =mc['game_recommender']
reviews_coll = db['reviews']
games=db['games']
#omc=pymongo.MongoClient()
#cb = omc['ps4_game_data']
#games = cb['games']
def flatten_game_dict(game_dict):
"""Take a dictionary of dictionaries & flatten it"""
for (game_id, user_score_dict) in game_dict.items():
for (user_id, score) in user_score_dict.items():
yield {'game_id': game_id, 'user_id': user_id, 'score': score}
def store_all_users(coll=games):
"""Take raw_html from a game's user review page, and store the game, username, & score
as an entry in reviews collection"""
games_dict={}
df = pd.DataFrame(list(coll.find()))
#df =clean_df(df=df)
game_titles = list(df.title)
browser=Firefox()
for game in game_titles:
result= scrape_game_page(title=game, browser=browser)
if not result:
continue
games_dict[game] = result
existing_game_reviews=set((r['game_id'], r['user_id'])
for r in reviews_coll.find())
flattened=flatten_game_dict(game_dict=games_dict)
for review in flattened:
game_id = review['game_id']
user_id = review['user_id']
if (game_id, user_id) not in existing_game_reviews:
reviews_coll.insert_one(review)
def make_preference_df(db=reviews_coll):
"""Go from all entries in reviews collection, to pandas dataframe"""
df=pd.DataFrame(list(db.find()))
"""Set of unique user & game IDs"""
users=set(df['user_id'])
games=set(df['game_id'])
"""Zipping a number to each unique user & game ID"""
game_id_lookup = dict(zip(games, range(len(games))))
user_id_lookup = dict(zip(users, range(len(users))))
df['game_number']=df['game_id'].apply(game_id_lookup.get)
df['user_number']=df['user_id'].apply(user_id_lookup.get)
#df=df.pivot(index='user_number', columns='game_number', values='score' )
return df
| loeschn/Video-Game-Recommender | Notebooks/notebook_src/make_preference_matrix.py | make_preference_matrix.py | py | 2,145 | python | en | code | 0 | github-code | 36 |
74050766504 | from parlai.utils.safety import OffensiveLanguageClassifier
from parlai.utils.safety import OffensiveStringMatcher
from parlai.utils.testing import skipUnlessBPE, skipUnlessGPU
import unittest
DEFINITELY_GOOD = [
'hey how are you',
'what is your name?',
'i like to play sports',
'i have a horse',
]
PROBABLY_BAD = ['i hate you', 'you are pretty ugly', 'parlai is the worst platform']
DEFINITELY_BAD = ['fuck you', 'you are a piece of shit']
class TestSafetyModules(unittest.TestCase):
def test_string_matcher(self):
sm = OffensiveStringMatcher()
for phrase in DEFINITELY_BAD:
assert phrase in sm, f'`{phrase}` is offensive'
for phrase in DEFINITELY_GOOD:
assert phrase not in sm, f'`{phrase}` is not offensive'
@skipUnlessGPU
@skipUnlessBPE
def test_classifier(self):
lc = OffensiveLanguageClassifier()
for phrase in DEFINITELY_BAD:
assert phrase in lc, f'`{phrase}` is offensive'
for phrase in PROBABLY_BAD:
assert phrase in lc, f'`{phrase}` is offensive'
for phrase in DEFINITELY_GOOD:
assert phrase not in lc, f'`{phrase}` is not offensive'
if __name__ == '__main__':
unittest.main()
| facebookresearch/ParlAI | tests/nightly/gpu/test_safety_modules.py | test_safety_modules.py | py | 1,247 | python | en | code | 10,365 | github-code | 36 |
29580682652 | #!/usr/local/bin/py
import argparse
import hashlib
import logging
import os
import oss2 # pip install oss2
import re
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger('oss2').setLevel(logging.WARNING)
logging.basicConfig(
level=logging.INFO,
format='[%(asctime)s][%(levelname)s] %(message)s',
# filename='/tmp/oss-sync.log'
)
_CACHE = {}
ROOT_API_KEY = os.path.join(os.getenv('HOME'), '.aliyun')
# Doc: https://help.aliyun.com/knowledge_detail/5974206.htm
# 青岛节点外网地址: oss-cn-qingdao.aliyuncs.com
# 青岛节点内网地址: oss-cn-qingdao-internal.aliyuncs.com
#
# 北京节点外网地址:oss-cn-beijing.aliyuncs.com
# 北京节点内网地址:oss-cn-beijing-internal.aliyuncs.com
#
# 杭州节点外网地址: oss-cn-hangzhou.aliyuncs.com
# 杭州节点内网地址: oss-cn-hangzhou-internal.aliyuncs.com
#
# 上海节点外网地址: oss-cn-shanghai.aliyuncs.com
# 上海节点内网地址: oss-cn-shanghai-internal.aliyuncs.com
#
# 香港节点外网地址: oss-cn-hongkong.aliyuncs.com
# 香港节点内网地址: oss-cn-hongkong-internal.aliyuncs.com
#
# 深圳节点外网地址: oss-cn-shenzhen.aliyuncs.com
# 深圳节点内网地址: oss-cn-shenzhen-internal.aliyuncs.com
#
# 美国节点外网地址: oss-us-west-1.aliyuncs.com
# 美国节点内网地址: oss-us-west-1-internal.aliyuncs.com
#
# 新加坡节点外网地址: oss-ap-southeast-1.aliyuncs.com
# 新加坡节点内网地址: oss-ap-southeast-1-internal.aliyuncs.com
#
# 原地址oss.aliyuncs.com 默认指向杭州节点外网地址。
# 原内网地址oss-internal.aliyuncs.com 默认指向杭州节点内网地址
API_URL = 'oss-cn-hangzhou.aliyuncs.com'
IGNORE_FILES = (
'\/\..*$',
'\.pyc$',
)
def is_in_ignore_files(file_path):
for p in IGNORE_FILES:
if re.search(p, file_path):
return True
return False
def get_file_md5(file_path):
hasher = hashlib.md5()
with open(file_path, 'rb') as f:
buf = f.read(65536)
while len(buf) > 0:
hasher.update(buf)
buf = f.read(65536)
return hasher.hexdigest()
def sizeof_fmt(num):
if num <= 1024:
return '1 KB'
for x in ['bytes', 'KB', 'MB', 'GB', 'TB']:
if num < 1024.0:
return "%3.1f %s" % (num, x)
num /= 1024.0
def get_bucket(args):
if 'bucket' in _CACHE:
return _CACHE['bucket']
api_key = open(os.path.join(ROOT_API_KEY, 'apikey')).read().strip()
api_secret = open(os.path.join(ROOT_API_KEY, 'secretkey')).read().strip()
auth = oss2.Auth(api_key, api_secret)
bucket = oss2.Bucket(auth, API_URL, args.bucket)
_CACHE['bucket'] = bucket
return bucket
def get_local_objects(target_path):
objects = {}
oss_dir = os.path.dirname(__file__)
if target_path:
oss_dir = os.path.join(oss_dir, target_path)
else:
oss_dir = os.path.join(oss_dir, '.')
if not os.path.exists(oss_dir):
return objects
file_count = 0
if os.path.isdir(oss_dir):
for root, dirs, files in os.walk(oss_dir):
for f in files:
root = re.sub(r'^\./?', '', root)
local_path = os.path.join(root, f)
if is_in_ignore_files(local_path):
logging.info('ignored file: {}'.format(local_path))
continue
md5 = get_file_md5(local_path)
objects[local_path] = md5.upper()
file_count += 1
else:
md5 = get_file_md5(oss_dir)
local_path = re.sub(r'^\./', '', target_path)
objects[local_path] = md5.upper()
file_count += 1
logging.info('local files: {}'.format(file_count))
return objects
def get_remote_objects(args):
objects = {'files': {}, 'etags': {}, 'meta': {}}
bucket = get_bucket(args)
marker = None
file_count = 0
prefix = re.sub(r'^\./?', '', args.target_path or '')
while True:
result = bucket.list_objects(prefix=prefix, max_keys=100, marker=marker)
for obj in result.object_list:
if obj.key.endswith('/'):
continue
if args.min_size and obj.size < args.min_size:
continue
if args.max_size and obj.size > args.max_size:
continue
if args.re and not re.search(args.re, obj.key):
continue
objects['files'][obj.key] = obj.etag
objects['etags'][obj.etag] = obj.key
objects['meta'][obj.key] = obj
file_count += 1
marker = result.next_marker
if not result.is_truncated:
break
logging.info('remote files: {}'.format(file_count))
return objects
def upload_file(local_path, args):
bucket = get_bucket(args)
key = re.sub(r'^\./?', '', local_path)
res = bucket.put_object_from_file(key, local_path)
if res.status != 200:
logging.error('Upload {} failed. Exit.'.format(local_path))
exit(1)
def upload_files_to_oss(args):
target_path = re.sub(r'^\./?', '', args.target_path)
logging.info('Uploading/Updating for: {}'.format(target_path))
los = get_local_objects(target_path)
if args.check_duplicated:
ros = get_remote_objects(args)
else:
ros = get_remote_objects(args)
files_need_to_update = []
files_need_to_upload = []
for local_path in los.keys():
md5 = los[local_path]
if md5 in ros['etags']:
logging.info('* Identical file found:')
logging.info('* @ {}'.format(ros['etags'][md5]))
continue
if local_path not in ros['files']:
size = sizeof_fmt(os.path.getsize(local_path))
files_need_to_upload.append((local_path, size))
elif ros['files'][local_path] != md5:
size = sizeof_fmt(os.path.getsize(local_path))
files_need_to_update.append((local_path, size))
files_need_to_update.sort()
files_need_to_upload.sort()
index = 1
count = len(files_need_to_update)
for local_path, size in files_need_to_update:
if args.no:
break
elif args.yes:
upload_file(local_path, args)
index += 1
else:
print('Q: Do you want to update {}:'.format(local_path))
response = input()
while response.lower().strip() not in ('yes', 'no'):
print('Q: Do you want to update {}:'.format(local_path))
response = input()
if response == 'no':
logging.info('skipped {} by user'.format(local_path))
continue
logging.info('= [{}/{}] Updating old file: {} ({})'.format(
index, count, local_path, size))
upload_file(local_path, args)
index += 1
index = 1
count = len(files_need_to_upload)
for local_path, size in files_need_to_upload:
try:
logging.info('+ [{}/{}] Uploading new file: {} ({})'.format(
index, count, local_path, size))
except:
pass
upload_file(local_path, args)
index += 1
logging.info('Uploading/Updating Done\n')
def _get_dir_of_file(f):
return '/'.join(f.split('/')[:-1])
def download_file(oss_path, local_path, args):
dir_ = _get_dir_of_file(local_path)
if not os.path.exists(dir_):
os.makedirs(dir_)
logging.info('+ Downloading {}'.format(oss_path))
bucket = get_bucket(args)
local_path = local_path.encode('utf-8')
res = bucket.get_object_to_file(oss_path, local_path)
if res.status != 200:
logging.error('Download {} failed. Exit.'.format(oss_path))
exit(1)
def list_files_on_oss(args):
files = get_remote_objects(args)
size_total = 0
for o in files['meta']:
size_total += files['meta'][o].size
if args.verbose:
print('\n- file: {}'.format(o))
print('- size: {}'.format(sizeof_fmt(files['meta'][o].size)))
print('- md5: {}'.format(files['meta'][o].etag))
if not args.verbose:
keys_to_list = list(files['files'].keys())
keys_to_list.sort()
print('== First 3 files:')
for x in keys_to_list[:3]:
print(' - {}'.format(x))
print('== Last 3 files:')
for x in keys_to_list[-3:]:
print(' - {}'.format(x))
print('\n== Total file count: {}'.format(len(files['files'])))
print('== Total size: {}'.format(sizeof_fmt(size_total)))
def delete_files_from_oss(args):
files = get_remote_objects(args)
keys_to_delete = list(files['files'].keys())
keys_to_delete.sort()
print('== Will delete {} files:'.format(len(keys_to_delete)))
print('== First 3 files:')
for x in keys_to_delete[:3]:
print(' - {}'.format(x))
print('== Last 3 files:')
for x in keys_to_delete[-3:]:
print(' - {}'.format(x))
answer = input('== Please enter YES to delete them ALL: ')
if answer.strip() != 'YES':
print('\nAction Canceled. Files are safe. Bye.')
return
bucket = get_bucket(args)
count = 0
for x in keys_to_delete:
bucket.delete_object(x)
count += 1
print('- deleted: {}'.format(x))
print('\nDeleted {} files.'.format(count))
def download_files_from_oss(args):
target_path = args.target_path
if target_path.startswith('./'):
target_path = target_path[2:]
if target_path.startswith('/'):
raise ValueError('Must use relative path')
oss_dir = os.path.dirname(__file__)
oss_dir = os.path.join(oss_dir, '.')
logging.info('Downloading file from: {}'.format(target_path))
los = get_local_objects(target_path)
ros = get_remote_objects(args)
target_files = []
for obj_key in ros['files']:
if obj_key in los and ros['files'][obj_key] == los[obj_key]:
logging.info('= {} exists'.format(obj_key))
continue
target_files.append(obj_key)
target_files.sort()
for oss_path in target_files:
local_path = os.path.join(oss_dir, oss_path)
download_file(oss_path, local_path, args)
logging.info('Downloading Done\n')
def main():
parser = argparse.ArgumentParser(description='Use Aliyun-OSS as Dropbox')
parser.add_argument(
'--target-path',
'-p',
action='store',
const=None,
default=None,
help='Target path to sync/delete files'
)
parser.add_argument(
'--download',
'-d',
action='store_true',
default=False,
help='Download files from OSS'
)
parser.add_argument(
'--yes',
action='store_true',
default=False,
help='overwrite existing files'
)
parser.add_argument(
'--no',
action='store_true',
default=False,
help='Do NOT overwrite existing files'
)
parser.add_argument(
'--upload',
'-u',
action='store_true',
default=False,
help='Upload files to OSS'
)
parser.add_argument(
'--listing',
'-L',
action='store_true',
default=False,
help='List files meta info on OSS'
)
parser.add_argument(
'--min-size',
type=int,
default=0,
help='[Listing] do not list size smaller than this'
)
parser.add_argument(
'--max-size',
type=int,
default=0,
help='[Listing] do not list size bigger than this'
)
parser.add_argument(
'--re',
type=str,
default='',
help='[Listing] filter file name by RE string'
)
parser.add_argument(
'--check-duplicated',
'-c',
action='store_false',
default=True,
help='Do not upload files already in bucket other dirs'
)
parser.add_argument(
'--bucket',
'-b',
required=True,
help='bucket name to store data',
)
parser.add_argument(
'--delete',
action='store_true',
help='To delete files with prefix from OSS',
)
parser.add_argument(
'--verbose',
'-v',
action='store_true',
help='Print more info',
)
args = parser.parse_args()
if args.listing:
list_files_on_oss(args)
elif args.download:
download_files_from_oss(args)
elif args.delete:
delete_files_from_oss(args)
else:
upload_files_to_oss(args)
if __name__ == "__main__":
main()
| mitnk/oss-sync | sync.py | sync.py | py | 12,601 | python | en | code | 0 | github-code | 36 |
28525524930 | import pygame, time, random
from cars import car
from things import thing
from shooting_thing import shoot
pygame.init()
display_width = 800
display_height = 600
black = (0, 0, 0)
white = (255, 255, 255)
red = (255, 0, 0)
green = (0, 255, 0)
blue = (0, 0, 255)
random_color = (random.randrange(0, 255), random.randrange(0, 255), random.randrange(0, 255))
gameDisplay = pygame.display.set_mode((display_width, display_height))
pygame.display.set_caption('fun_run_car')
clock = pygame.time.Clock()
car_img = pygame.image.load('car.png')
dack = pygame.image.load('dack.png')
def overall_music():#open music
global pause
pygame.mixer.music.load('Raining Bits.ogg')
pygame.mixer.music.play(-1)
def stop_music():
pygame.mixer.music.stop()
def things_dodged(count): #counter for dodges
font = pygame.font.SysFont(None, 30)
text = font.render("Dodge: " + str(count), True, black)
gameDisplay.blit(text, (0, 0))
#massages
def text_object(text, font):
TextSurface = font.render(text, True, red)
return TextSurface, TextSurface.get_rect()
def message_display(text, game_type):
LargeText = pygame.font.Font('freesansbold.ttf', 120)
TextSurf, TextRect = text_object(text, LargeText)
TextRect.center = ((display_width / 2), (display_height / 2))
gameDisplay.blit(TextSurf, TextRect)
pygame.display.update()
time.sleep(3)
stop_music()
#pygame.mixer.music.stop()
game_loop(game_type)
def crash_sound():
pygame.mixer.music.load('Aargh7.ogg')
pygame.mixer.music.play(1)
def crash(game_type):#when the car crashes
stop_music()
crash_sound()
message_display('you Crashed', game_type)
def game_start():
message_display('time to play')
def button(msg,x,y,w,h,ic,ac,action=None):#buttons
mouse = pygame.mouse.get_pos()
click = pygame.mouse.get_pressed()
if x + w > mouse[0] > x and y + h > mouse[1] > y:#checking if the mouse press any button
pygame.draw.rect(gameDisplay, ac, (x, y, w, h))
if click[0] == 1 and action != None:
if msg == 'quit':
action()
else:
action(msg)
else:
pygame.draw.rect(gameDisplay, ic, (x, y, w, h))
smallText = pygame.font.SysFont("comicsansms", 20)
textSurf, textRect = text_object(msg, smallText)
textRect.center = ((x + (w / 2)), (y + (h / 2)))
gameDisplay.blit(textSurf, textRect)
def quit_game():
pygame.quit()
quit()
def game_intro():#game intro screen
pygame.mixer.music.load('intro_music.wav')#music
pygame.mixer.music.play(-1)
intro =True
while intro:
events = pygame.event.get()
for event in events: # event per frame per sec
if event.type == pygame.QUIT:
pygame.quit()
quit()
gameDisplay.blit(dack, (0, 0))
LargeText = pygame.font.Font('freesansbold.ttf', 120)
TextSurf, TextRect = text_object("fun run car", LargeText)
TextRect.center = ((display_width / 2), (display_height / 2))
gameDisplay.blit(TextSurf, TextRect)
#add difficulty
button("normal", display_width * 0.1875, display_height * 0.85, display_width*0.125, display_height*0.085, green, white,
game_loop) # first and sec x,y sec rectangle boundaries
button("shooting", display_width * 0.1875, display_height * 0.65, display_width*0.125, display_height*0.085, green, white, game_loop)
button("quit", display_width * 0.6875, display_height * 0.75, display_width*0.125, display_height*0.085, blue, white, quit_game)
#button("register", display_width * 0.4, display_height * 0.85, display_width * 0.125, display_height * 0.085,
# black, white, reg_log) # first and sec x,y sec rectangle boundaries
#calls the buttons function
pygame.display.update()
clock.tick(15)
def destroy_thing(things_list, shot):#when obstacles get destroy
for thingss in things_list:
if shot.y < thingss.thing_starty + thingss.thing_height and shot.y > thingss.thing_starty or \
shot.y + shot.height < thingss.thing_starty + thingss.thing_height and shot.y + shot.height > thingss.thing_starty: # checking his back of the car
# print ('y crossover')
if shot.x > thingss.thing_startx and shot.x < thingss.thing_startx + thingss.thing_width or \
shot.x + shot.width > thingss.thing_startx and shot.x + shot.width < thingss.thing_startx + thingss.thing_width:
return thingss
return 0
def reset_things(things,list_thing, dodged):#reset obstacles
things.change_thing_starty_reset()
things.change_thing_startx(display_width)
things.change_thing_speed()
things.change_thing_width()
new_color = (random.randrange(0, 255), random.randrange(0, 255), random.randrange(0, 255))
things.change_thing_color(new_color)
if dodged % 7 == 0:
list_thing.append(thing(random.randrange(0, display_width), -600, 4, 20, 100, random_color, 1, 1.2))
#normal gamee loop
def game_loop(game_type):#main game
overall_music()
shoot.shooting_counter = 0
shoot_speed = -5.0
#__init__(self, car_width, car_height, x, y, car_img)
list_cars = [] #cars list of objects
list_cars.append(car(33, 55, (display_width * 0.45), (display_height * 0.8), car_img))#x,y,car_width,car_height,car_img creating car object
x_change = 0
y_change = 0
#__init__(self,thing_startx, thing_starty, thing_speed, thing_width, thing_height, color, thing_increase_speed, thing_increase_width):
list_thing = [] #obstacles list of objects
list_thing.append(thing(random.randrange(0, display_width), -600, 4, 20, 100, random_color, 0.1, 0.5))#x_start,y_start,thing_speed, thing_width,thing height, color, speed_after dodge, width_increase_after_dodge
shooting_list = []
y_dack = 0
dodged = 0
gameExit = False
while not gameExit:
for event in pygame.event.get(): # event per frame per sec, checking every event that occur in game
if event.type == pygame.QUIT:
pygame.quit()
quit()
# moving x
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
x_change = -5.0
elif event.key == pygame.K_RIGHT:
x_change = +5.0
if event.type == pygame.KEYUP:
if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:
x_change = 0.0
# moving y
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_UP:
y_change = -5.0
elif event.key == pygame.K_DOWN:
y_change = +5.0
if event.type == pygame.KEYUP:
if event.key == pygame.K_UP or event.key == pygame.K_DOWN:
y_change = 0.0
#shooting
if game_type == 'shooting':
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE:
#def __init__(self, x, y, speed, width, height, color):
for cars in list_cars:
shooting_list.append(shoot(cars.x,cars.y, red))
# event endler
for cars in list_cars:
cars.move_car(x_change,y_change)#moving car
gameDisplay.blit(dack, (0, 0)) # background highway drawing
things_dodged(dodged)
# gameDisplay.fill(black)
for things in list_thing: #draw obstacles
things.draw_thing(gameDisplay)
things.change_thing_starty_speed()#change the y with speed
for cars in list_cars:
cars.draw_car(gameDisplay) # drawing the car
#
if cars.x > display_width -cars.car_width or cars.x < 0:#checking ends of screen
crash(game_type)
if cars.y > display_height - cars.car_height or cars.y < 0:#checking ends of screen
crash(game_type)
for things in list_thing:
if things.thing_starty > display_height: #reseting after obstacles out of screen + counter
reset_things(things,list_thing, dodged)
dodged += 1
for cars in list_cars:
for things in list_thing:#checking crash with obstacles
if cars.y < things.thing_starty + things.thing_height and cars.y > things.thing_starty or \
cars.y + cars.car_height < things.thing_starty + things.thing_height and cars.y + cars.car_height > things.thing_starty: #checking his back of the car
#print ('y crossover')
if cars.x > things.thing_startx and cars.x < things.thing_startx + things.thing_width or \
cars.x +cars.car_width > things.thing_startx and cars.x +cars.car_width < things.thing_startx + things.thing_width:
#print (cars.x)
#print ("sx: " + str(things.thing_startx) + "tw: " + str(things.thing_startx + things.thing_width))
crash(game_type)
#shooting
if shoot.shooting_counter > 0:#checking shooting hit with obstacles and counted as dodged
for shooting in shooting_list:
shooting.move_shoot()
shooting.draw_shoot(gameDisplay)
destroy = destroy_thing(list_thing, shooting)
if (destroy > 0):
shooting_list.remove(shooting)
list_thing.append(thing(random.randrange(0, display_width), -600, destroy.thing_speed, 20, 100, random_color, 0.1,
0.5)) # x_start,y_start,thing_speed, thing_width,thing height, color, speed_after dodge, width_increase_after_dodge
list_thing.remove(destroy)
dodged += 1
pygame.display.update()
clock.tick(60) #fps
#main
game_intro()
| maoriole/car-game | game_car.py | game_car.py | py | 10,448 | python | en | code | 0 | github-code | 36 |
9044988683 | from __future__ import annotations
from abc import abstractmethod, ABC
from typing import Optional
from api.mvc.controller.content_model.i_content_model_controller import IContentModelController
from api.mvc.controller.project.i_project_controller import IProjectController
from api.mvc.controller.property.i_property_controller import IPropertyController
from api.mvc.model.data.aspect_model import AspectModel
from api.mvc.model.data.content_model import ContentModel
from api.mvc.model.data.content_type_model import ContentTypeModel
from api.mvc.model.data.data_model import DataModel
from api.mvc.model.data.data_type import DataType
from api.mvc.model.data.folder_type_model import FolderTypeModel
from api.mvc.model.data.property_model import PropertyModel
from api.mvc.model.data.type_model import TypeModel
from api.mvc.model.service.file.content_model_service import ContentModelFileService
from api_core.exception.api_exception import ApiException
from api_core.helper.file_folder_helper import FileFolderHelper
from api_core.mvc.controller.controller import Controller
from api_core.mvc.service.model.service import Service
from api_core.mvc.view.view import View
class DataController(Controller, ABC):
"""
Controller class used to manage API project's data.
"""
def __init__(self, name: str, service: Service, view: View, pc: IProjectController, cmc: IContentModelController):
"""
Initialize a new instance of DataController class.
:param name: The name of the controller.
:param service: The controller's basic service.
:param view: The controller's view.
:param pc: A project controller.
:param cmc: A content model controller.
:param prc: A property controller.
"""
super().__init__(name, service, view)
self._pc: IProjectController = pc
self._cmc: IContentModelController = cmc
self._prc: Optional[IPropertyController] = None
self._cmfs: ContentModelFileService = ContentModelFileService()
def set_property_controller(self, value: IPropertyController):
"""
Change value of class property '_rpc'
:param value: The new value of the '_rpc' class property.
"""
self._prc = value
def _get(self, content_model: ContentModel, data_type: str, name: str) -> Optional[DataModel]:
"""
Retrieves the data model of an Alfresco AIO type or aspect.
:param data_type: The type of the data.
:param content_model: The type's content-model.
:param name: The type or aspect name.
:return: The data model of a type or aspect otherwise None.
"""
if data_type.__eq__(DataType.TYPE.value):
if name.__eq__("folder"):
return FolderTypeModel(content_model)
elif name.__eq__("content"):
return ContentTypeModel(content_model)
filename: str = FileFolderHelper.extract_filename_from_path(content_model.path)
# Verification that the type exists.
if self._cmfs.find_data(content_model, data_type, name) is None:
return None
# Verification that the aspect has been declared only once in the file.
datas_name: list[str] = self._cmfs.get_data_names(content_model, data_type)
if datas_name.count(name).__gt__(1):
raise ApiException("{3} '{0}' was declared more than once in content model '{1}' in file '{2}'."
.format(name, content_model.complete_name, filename, data_type.title()))
# Verification that there is no circular inheritance.
ancestors: list[str] = self.__check_ancestors(content_model, data_type, name,
"{0}:{1}".format(content_model.prefix, name), [])
self._check_mandatory_aspects(content_model, name, "{0}:{1}".format(content_model.prefix, name), ancestors, [])
data: Optional[AspectModel | TypeModel] = None
if data_type.__eq__(DataType.ASPECT.value):
data = AspectModel(content_model, name, self._cmfs.get_aspect_title(content_model, name),
self._cmfs.get_aspect_description(content_model, name))
else:
data = TypeModel(content_model, name, self._cmfs.get_type_title(content_model, name),
self._cmfs.get_type_description(content_model, name))
# Set the parent data.
data.parent = self._get(content_model, self._cmfs.get_data_parent(content_model, data_type, name), name)
# Set the data mandatory aspects.
try:
if data_type.__eq__(DataType.TYPE.value):
for mandatory_aspect in self._cmfs.get_type_mandatory_aspects(content_model, name):
data.add_mandatory_aspect(self._get(content_model, DataType.ASPECT.value,
mandatory_aspect.rsplit(":", 1)[1]))
else:
for mandatory_aspect in self._cmfs.get_aspect_mandatory_aspects(content_model, name):
data.add_mandatory_aspect(self._get(content_model, DataType.ASPECT.value,
mandatory_aspect.rsplit(":", 1)[1]))
except IndexError:
raise ApiException("A mandatory aspect value of {0} '{1}' of content model '{2}' in file '{3}' is not "
"valid. Its be formed this way: prefix:name."
.format(data_type, name, content_model.complete_name, filename))
# Recovery of properties.
properties: list[str] = self._cmfs.get_data_property_names(content_model, data)
index: int = 0
property_found: bool = False
maximum: int = len(properties)
prop: Optional[PropertyModel] = None
while index.__lt__(maximum) and not property_found:
# Recovery of a property.
prop = self._prc.load_property(content_model, data, properties[index])
# Verification that a property is not declared twice.
(property_found, data_name) = self.is_property_exist(data, data, prop)
# Not declare = addition in the data model.
if not property_found:
data.add_property(prop)
index += 1
# property found = Declare twice = error
if property_found:
raise ApiException("Property '{0}' is defined twice in {1} '{2}' of content model '{3}' of file '{4}'."
.format(prop.name, data.typology, data.name, content_model.complete_name, filename))
# Return the data
return data
def _extend(self, content_model: ContentModel, data_type: str, source_name: str, parent_name: str):
"""
Method allowing a datum (aspect or type) to extend over another datum.
:param content_model: The data content model.
:param data_type: The type of data to bind.
:param source_name: The name of the data to expand.
:param parent_name: The name of the parent data.
"""
source: DataModel = self._get(content_model, data_type, source_name)
parent: DataModel = self._get(content_model, data_type, parent_name)
filename: str = FileFolderHelper.extract_filename_from_path(content_model.path)
if source is None:
raise ApiException("The '{0}' {3} does not exist in the '{1}' content-model of the '{2} file.'"
.format(source_name, content_model.complete_name, filename, data_type))
elif parent is None:
raise ApiException("The '{0}' {3} does not exist in the '{1}' content-model of the '{2} file.'"
.format(source_name, content_model.complete_name, filename, data_type))
self.__check_data_link(content_model, data_type, source, parent)
self.__check_data_link(content_model, data_type, parent, source)
self._service.extend(content_model, source, parent)
def _add_mandatory(self, content_model: ContentModel, data_type: str, source_name: str, mandatory_name: str):
"""
Method allowing to add a "mandatory-aspect" to data (aspect or type).
:param content_model: The data content model.
:param data_type: The type of data to bind.
:param source_name: The type of data to modify (the one that will include the new mandatory-aspect).
:param mandatory_name: The name of the required aspect to add.
"""
# Retrieving the source data model model.
source: DataModel = self._get(content_model, data_type, source_name)
filename: str = FileFolderHelper.extract_filename_from_path(content_model.path)
# Obligatory aspect recovery.
mandatory: DataModel = self._get(content_model, DataType.ASPECT.value, mandatory_name)
# Verification of the existence of data models.
if source is None:
raise ApiException("The '{0}' {3} does not exist in the '{1}' content-model of the '{2} file.'"
.format(source_name, content_model.complete_name, filename, data_type))
elif mandatory is None:
raise ApiException("The '{0}' {3} does not exist in the '{1}' content-model of the '{2} file.'"
.format(source_name, content_model.complete_name, filename, data_type))
# Check that there is no circular inheritance between the two data models
self.__check_data_link(content_model, data_type, source, mandatory)
self.__check_data_link(content_model, data_type, mandatory, source)
# Addition of the aspect in the list of mandatory aspects.
self._cmfs.add_mandatory(content_model, source, mandatory)
def __check_data_link(self, content_model: ContentModel, data_type: str, data_1: DataModel, data_2: DataModel):
# source: str = data_1.name
# complete_name: str = "{0}:{1}".format(content_model.prefix, source)
filename: str = FileFolderHelper.extract_filename_from_path(content_model.path)
ancestors: list[str] = self.__check_ancestors(content_model, data_type, data_1.name, data_1.complete_name, [])
if data_2.name in ancestors:
raise ApiException("The '{0}' {1} already has the '{2}' {1} for ancestor in the '{3}' file."
.format(data_1.name, data_type, data_2.name, filename))
mandatory: list[str] = self._check_mandatory_aspects(content_model, data_1.name, data_1.complete_name, ancestors, [])
if data_2.name in mandatory:
raise ApiException("The '{0}' {1} already has the '{2}' {1} in the list of mandatory aspects (by "
"inheritance or directly) in the '{3}' file."
.format(data_1.name, data_type, data_2.name, filename))
def __check_ancestors(self, content_model: ContentModel, typology: str, source: str, complete_name: Optional[str],
ancestors: list[str]) -> list[str]:
if complete_name is None or (complete_name.__eq__("cm:folder") or complete_name.__eq__("cm:content")
and typology.__eq__(DataType.TYPE.value)):
# Removing the first element, which is the aspect we're trying to get.
if len(ancestors).__gt__(0):
ancestors.pop(0)
return ancestors
name: str = complete_name.rsplit(":", 1)[1]
if self._cmfs.find_data(content_model, typology, name) is None:
raise ApiException("There is an inheritance problem. {4} '{0}' inherits {5} '{1}' which does not "
"exist in content model '{2}' of file '{3}'.\n"
.format(ancestors[len(ancestors) - 1 if len(ancestors).__gt__(0) else 0], name,
content_model.complete_name,
FileFolderHelper.extract_filename_from_path(content_model.path),
typology.title(), typology))
if ancestors.count(name).__gt__(0):
raise ApiException("There is an inheritance problem. {3} '{0}' appears twice in the ancestors of aspect"
" '{1}'.\n{2}".format(name, source, " -> ".join(ancestors), typology.title()))
ancestors.append(name)
return self.__check_ancestors(content_model, typology, source,
self._cmfs.get_aspect_parent(content_model, name), ancestors)
@abstractmethod
def _check_mandatory_aspects(self, content_model: ContentModel, source: str, complete_name: Optional[str],
ancestors: list[str], mandatory: list[str]) -> list[str]:
pass
def is_property_exist(self, data_source: DataModel, data: DataModel, property_model: PropertyModel) \
-> tuple[bool, Optional[str]]:
if data.parent is not None:
self.is_property_exist(data_source, data.parent, property_model)
index: int = 0
maximum: int = len(data.properties)
while index.__lt__(maximum) and data.properties[index].name.__ne__(property_model.name):
index += 1
if index.__ne__(maximum):
return True, data.name
success: bool = False
index = 0
maximum = len(data.mandatory)
while index.__lt__(maximum) and not success:
(success, data_model) = self.is_property_exist(data_source, data.mandatory[index], property_model)
if not success:
index += 1
if index.__ne__(maximum):
return True, data.name
return False, None
| seedbaobab/alfresco_helper | api/mvc/controller/data/data_controller.py | data_controller.py | py | 13,805 | python | en | code | 0 | github-code | 36 |
1529604811 | import pandas as pd
df = pd.read_csv('HappinessIndex.csv')
dff = pd.DataFrame(df)
#Country with largest population
col = "pop2022"
max_pop = dff.loc[dff[col].idxmax()]
print("Country with the largest population and its corresponding rank, happines index in 2020 and 2021: ")
print( max_pop)
#Country with smallest population
col = "pop2022"
min_pop = dff.loc[dff[col].idxmin()]
print("Country with the smallest population and its corresponding rank, happines index in 2020 and 2021: ")
print( min_pop)
#Average happiness index in each year
#2020
df1 = dff["happiness2020"].mean()
print ("The average happiness index in 2020 is " ,df1)
#2021
df2 = dff["happiness2021"].mean()
print ("The average happiness index in 2021 is " ,df2)
| sabb7296/Assignment-Stage1 | Asm_Stage1/Data Summaries/HI_summary.py | HI_summary.py | py | 740 | python | en | code | 0 | github-code | 36 |
28522011627 | # Opus/UrbanSim urban simulation software.
# Copyright (C) 2010-2011 University of California, Berkeley, 2005-2009 University of Washington
# See opus_core/LICENSE
from urbansim.gridcell.has_SSS_buildings import has_SSS_buildings as gridcell_has_SSS_buildings
from variable_functions import my_attribute_label
class has_SSS_buildings(gridcell_has_SSS_buildings):
"""Returns 1 if the location contains buildings of the given type, otherwise 0."""
def dependencies(self):
return [my_attribute_label(self.number_of_buildings)]
from opus_core.tests import opus_unittest
from urbansim.variable_test_toolbox import VariableTestToolbox
from urbansim.datasets.building_type_dataset import BuildingTypeDataset
from numpy import array
from numpy import ma
from opus_core.storage_factory import StorageFactory
class Tests(opus_unittest.OpusTestCase):
variable_name = "urbansim.zone.has_commercial_buildings"
def test_my_inputs(self):
storage = StorageFactory().get_storage('dict_storage')
building_types_table_name = 'building_types'
storage.write_table(
table_name = building_types_table_name,
table_data = {
'building_type_id':array([1,2]),
'name': array(['foo', 'commercial'])
}
)
building_types = BuildingTypeDataset(in_storage=storage, in_table_name=building_types_table_name)
values = VariableTestToolbox().compute_variable(self.variable_name,
data_dictionary = {
'zone':{
'zone_id':array([1,2,3]),
},
'building': {
'building_type_id':array([1,2,1,2,1,1]),
'zone_id':array([2,3,1,1,2,1])
},
'building_type': building_types
},
dataset = 'zone'
)
should_be = array([1, 0, 1])
self.assert_(ma.allequal(values, should_be),
'Error in ' + self.variable_name)
if __name__=='__main__':
opus_unittest.main() | psrc/urbansim | urbansim/zone/has_SSS_buildings.py | has_SSS_buildings.py | py | 2,176 | python | en | code | 4 | github-code | 36 |
20658918357 | """Computes eigenvalues and eigenvectors of the PMI similarity matrices for a given attribute type. Saves the results of this along with kMeans clustering of the attributes, and the assignment of graph nodes to clusters."""
import pickle
import time
import numpy as np
import pandas as pd
import optparse
from scipy.sparse import coo_matrix, diags
from sklearn.cluster import KMeans
from gplus import *
def generate_cluster_report(attr_analyzer, attr_type, cluster_labels, topN = 30):
"""Given the AttributeAnalyzer, attr_type, and a list of cluster labels (corresponding to the attribute vocab indices only), generates a report listing the top N members of each cluster, and the frequency and prevalence (relative frequency) of each attribute in the data set. Orders the clusters by total occurrences of attributes in each cluster. If topN = None, list all the attributes in each cluster."""
attr_freq_dict = attr_analyzer.attr_freqs_by_type[attr_type]
total_attr_freqs = sum(attr_freq_dict.values())
pfa = attr_analyzer.pairwise_freq_analyzers[attr_type]
attr_indices, attr_vocab = get_attr_indices(pfa, attr_analyzer.attributed_nodes)
unique_cluster_labels = set(cluster_labels)
# compute vocab lists for each cluster
attr_vocab_by_cluster = dict((lab, []) for lab in unique_cluster_labels)
for (i, lab) in enumerate(cluster_labels):
v = attr_vocab[i]
if v.startswith('*???*'):
continue
freq = attr_freq_dict[v]
attr_vocab_by_cluster[lab].append((v, freq, freq / total_attr_freqs))
# sort vocab lists by decreasing frequencies
for lab in unique_cluster_labels:
attr_vocab_by_cluster[lab].sort(key = lambda item : item[1], reverse = True)
# total number of occurrences of any attribute in each cluster
total_freqs_by_cluster = dict((lab, sum([item[1] for item in attr_vocab_by_cluster[lab]])) for lab in unique_cluster_labels)
info_by_cluster = dict((lab, dict()) for lab in unique_cluster_labels)
# create a DataFrame for each cluster listing the top N vocab items in order with their frequencies and prevalences
for lab in unique_cluster_labels:
df = pd.DataFrame(attr_vocab_by_cluster[lab], columns = ['attribute', 'frequency', 'prevalence'])
info_by_cluster[lab]['df'] = df if (topN is None) else df[:topN]
info_by_cluster[lab]['size'] = len(attr_vocab_by_cluster[lab])
info_by_cluster[lab]['totalFreq'] = total_freqs_by_cluster[lab]
info_by_cluster[lab]['totalPrevalence'] = sum(df['prevalence'])
# sort clusters by decreasing number of occurrences
sorted_clusters_with_total_freqs = sorted(total_freqs_by_cluster.items(), key = lambda item : item[1], reverse = True)
# generate report
num_attrs = len(attr_vocab)
s = ''
for (lab, freq) in sorted_clusters_with_total_freqs:
info = info_by_cluster[lab]
width = 12 + len(str(lab))
s += '#' * width + '\n'
s += '# ' + 'CLUSTER ' + str(lab) + ' #\n'
s += '#' * width + '\n\n'
s += 'attribute prevalence = %6d / %6d = %f\n' % (info['size'], num_attrs, info['size'] / num_attrs)
s += 'occurrence prevalence = %6d / %6d = %f\n\n' % (info['totalFreq'], total_attr_freqs, info['totalPrevalence'])
s += info['df'].to_string(index = False) + '\n\n\n'
return s
# save off:
# matrix or LinearOperator for similarity matrix
# eigenvalues and scree plot
# embedded vectors corresponding to attributes
# kmeans clusters corresponding to attributes
# report of top clusters
# mappings from nodes to clusters of the given attribute type
def main():
p = optparse.OptionParser()
p.add_option('--attr_type', '-a', type = str, help = 'attribute type')
p.add_option('-p', type = str, help = 'PMI type (PMIs, NPMI1s, or NPMI2s)')
p.add_option('-e', type = str, help = 'embedding (adj, normlap, regnormlap)')
p.add_option('-s', action = 'store_true', default = False, help = 'normalize in sphere')
p.add_option('-d', type = float, help = 'smoothing parameter')
p.add_option('-k', type = int, help = 'number of eigenvalues')
p.add_option('-c', type = int, help = 'number of kmeans clusters')
p.add_option('-t', type = float, default = None, help = 'tolerance for eigsh')
p.add_option('-v', action = 'store_true', default = False, help = 'save scree plot')
opts, args = p.parse_args()
attr_type = opts.attr_type
sim = opts.p
embedding = opts.e
assert (embedding in ['adj', 'normlap', 'regnormlap'])
sphere = opts.s
delta = opts.d
k = opts.k
nclusts = opts.c
tol = opts.t
save_plot = opts.v
topN = 50 # for the report
assert (((sim == 'PMIs') or (delta == 0)) and (sim in ['PMIs', 'NPMI1s', 'NPMI2s']))
data_folder = 'gplus0_lcc/data/PMI/'
report_folder = 'gplus0_lcc/reports/PMI/'
plot_folder = 'gplus0_lcc/plots/PMI/'
file_prefix1 = ('%s_%s_%s_delta' % (attr_type, sim, embedding)) + str(delta) + ('_k%d' % k)
file_prefix2 = ('%s_%s_%s_delta' % (attr_type, sim, embedding)) + str(delta) + ('_k%d%s_c%d' % (k, '_normalized' if sphere else '', nclusts))
print_flush("\nLoading AttributeAnalyzer...")
a = AttributeAnalyzer()
a.load_pairwise_freq_analyzer(attr_type)
a.make_attrs_by_node_by_type()
attrs_by_node = a.attrs_by_node_by_type[attr_type]
pfa = a.pairwise_freq_analyzers[attr_type]
n = pfa.num_vocab
tol = (1.0 / n) if (tol is None) else tol # use 1/n instead of machine precision as default tolerance
attr_indices, attr_vocab = get_attr_indices(pfa, a.attributed_nodes)
try:
print_flush("\nLoading labels from '%s%s_labels.csv'..." % (data_folder, file_prefix2))
labels = np.loadtxt('%s%s_labels.csv' % (data_folder, file_prefix2), dtype = int)
print_flush("\nLoading cluster centers from '%s%s_cluster_centers.csv'..." % (data_folder, file_prefix2))
cluster_centers = np.loadtxt('%s%s_cluster_centers.csv' % (data_folder, file_prefix2), delimiter = ',')
print_flush("\nLoading eigenvalues from '%s%s_eigvals.csv'..." % (data_folder, file_prefix1))
eigvals = np.loadtxt('%s%s_eigvals.csv' % (data_folder, file_prefix1), delimiter = ',')
print_flush("\nLoading embedded features from '%s%s_features.pickle'..." % (data_folder, file_prefix1))
features = pickle.load(open('%s%s_features.pickle' % (data_folder, file_prefix1), 'rb'))
if sphere:
for i in range(len(attr_indices)):
features[i] = normalize(features[i])
except FileNotFoundError:
print_flush("Failed to load.")
try:
print_flush("\nLoading eigenvalues from '%s%s_eigvals.csv'..." % (data_folder, file_prefix1))
eigvals = np.loadtxt('%s%s_eigvals.csv' % (data_folder, file_prefix1), delimiter = ',')
print_flush("\nLoading embedded features from '%s%s_features.pickle'..." % (data_folder, file_prefix1))
features = pickle.load(open('%s%s_features.pickle' % (data_folder, file_prefix1), 'rb'))
except FileNotFoundError:
print_flush("Failed to load.")
print_flush("\nComputing similarity matrix (%s)..." % sim)
sim_op = pfa.to_sparse_PMI_operator(sim, delta)
matrix_type = 'adjacency' if (embedding == 'adj') else ('normalized Laplacian' if (embedding == 'normlap') else 'regularized normalized Laplacian')
print_flush("\nComputing eigenvectors of %s matrix (k = %d)..." % (matrix_type, k))
if (embedding == 'adj'):
(eigvals, features) = timeit(eigsh)(sim_op, k = k, tol = tol)
features = np.sqrt(np.abs(eigvals)) * features # scale the feature columns by the sqrt of the eigenvalues
elif (embedding == 'normlap'):
normlap = SparseNormalizedLaplacian(sim_op)
(eigvals, features) = timeit(eigsh)(normlap, k = k, tol = tol)
elif (embedding == 'regnormlap'):
regnormlap = SparseRegularizedNormalizedLaplacian(sim_op)
(eigvals, features) = timeit(eigsh)(regnormlap, k = k, tol = tol)
features = features[attr_indices, :] # free up memory by deleting embeddings of nodes with no attributes
np.savetxt('%s%s_eigvals.csv' % (data_folder, file_prefix1), eigvals, delimiter = ',')
pickle.dump(features, open('%s%s_features.pickle' % (data_folder, file_prefix1), 'wb'))
if sphere: # normalize the features to have unit norm (better for kMeans)
for i in range(len(attr_indices)):
features[i] = normalize(features[i])
km = KMeans(nclusts)
print_flush("\nClustering attribute feature vectors into %d clusters using kMeans..." % nclusts)
labels = timeit(km.fit_predict)(features)
# save the cluster labels
np.savetxt('%s%s_labels.csv' % (data_folder, file_prefix2), np.array(labels, dtype = int), delimiter = ',', fmt = '%d')
# save the cluster centers
cluster_centers = km.cluster_centers_
np.savetxt('%s%s_cluster_centers.csv' % (data_folder, file_prefix2), cluster_centers, delimiter = ',')
# save the attribute cluster report
with open('%s%s_cluster_report.txt' % (report_folder, file_prefix2), 'w') as f:
f.write(generate_cluster_report(a, attr_type, labels, topN))
if save_plot:
print_flush("\nSaving scree plot to '%s%s_screeplot.png'..." % (plot_folder, file_prefix1))
scree_plot(eigvals, show = False, filename = '%s%s_screeplot.png' % (plot_folder, file_prefix1))
print_flush("\nAssigning cluster labels to each node...")
indices_by_vocab = dict((v, i) for (i, v) in enumerate(attr_vocab))
centers = [normalize(center) for center in cluster_centers] if sphere else cluster_centers
def assign_cluster(node):
"""Assigns -1 to a node with no attribute present. Otherwise, takes the cluster whose center is closest to the mean of the attribute vectors. Uses cosine distance if sphere = True, otherwise Euclidean distance."""
if (node not in attrs_by_node):
return -1
else:
attrs = list(attrs_by_node[node])
if (len(attrs) == 1):
return labels[indices_by_vocab[attrs[0]]]
else:
vec = np.zeros(k, dtype = float)
for attr in attrs:
vec += features[indices_by_vocab[attr]]
vec /= len(attrs)
if sphere:
vec = normalize(vec)
sims = [np.dot(vec, center) for center in centers]
else:
sims = [-np.linalg.norm(vec - center) for center in centers]
max_index, max_sim = -1, -float('inf')
for (i, sim) in enumerate(sims):
if (sim > max_sim):
max_index = i
max_sim = sim
return max_index
# save file with the list of cluster labels for each node
clusters_by_node = [assign_cluster(i) for i in range(a.num_vertices)]
np.savetxt('%s%s_node_labels.csv' % (data_folder, file_prefix2), np.array(clusters_by_node, dtype = int), delimiter = ',', fmt = '%d')
print_flush("\nDone!")
if __name__ == "__main__":
main() | jeremander/Gplus | factor_attr_mat.py | factor_attr_mat.py | py | 11,356 | python | en | code | 2 | github-code | 36 |
28224836636 | import random
from string import ascii_letters
from typing import Any, Generator
def data_stream_gen(
total_length: int | None = None, element_length: int = 100
) -> Generator[str, None, None]:
if total_length is None:
total_length == float("inf")
idx = 0
while idx < total_length:
element = ""
for el_length in range(element_length):
element += random.choice(ascii_letters)
yield element
idx += 1
def n_th_data(data_gen: Generator[str, None, None], idx: int) -> str:
for i in range(idx + 1):
element = next(data_gen)
return element
def solution(big_stream: Any) -> Any:
random_element = None
for idx, element in enumerate(big_stream):
if idx == 0:
random_element = element
elif random.randint(1, idx + 1) == 1: # prob of 1 in n
random_element = element
return random_element
TOTAL_LENGTH = 1_000_000
ELEMENT_LENGTH = 100
data = data_stream_gen(TOTAL_LENGTH, ELEMENT_LENGTH)
element_idx = random.randint(0, TOTAL_LENGTH)
print(n_th_data(data, element_idx))
| HomayoonAlimohammadi/Training | DailyProblem/2_7_2022.py | 2_7_2022.py | py | 1,104 | python | en | code | 2 | github-code | 36 |
44392387615 | import falcon
from routes.user import UserRoutes
from routes.workspace import WorkspaceRoutes
from routes.display import DisplayRoutes
from routes.token import TokenRoutes
from routes.scene import SceneRoutes
from routes.slide import SlideRoutes
from falcon.http_status import HTTPStatus
class HandleCORS(object):
def process_request(self, req, resp):
resp.set_header('Access-Control-Allow-Origin', '*')
resp.set_header('Access-Control-Allow-Methods', '*')
resp.set_header('Access-Control-Allow-Headers', 'Origin, X-Requested-With, Content-Type, Accept, Authorization')
resp.set_header('Access-Control-Max-Age', 1728000) # 20 days
if req.method == 'OPTIONS':
raise HTTPStatus(falcon.HTTP_200, body='\n')
app = falcon.API(middleware=[HandleCORS()])
userRoutes = UserRoutes()
workspaceRoutes = WorkspaceRoutes()
displayRoutes = DisplayRoutes()
tokenRoutes = TokenRoutes()
sceneRoutes = SceneRoutes()
slideRoutes = SlideRoutes()
app.add_route('/user', userRoutes)
app.add_route('/user/register', userRoutes, suffix='register')
app.add_route('/user/login', userRoutes, suffix='login')
app.add_route('/user/forgot', userRoutes, suffix='forgot')
app.add_route('/user/reset', userRoutes, suffix='reset')
app.add_route('/token', tokenRoutes)
app.add_route('/workspaces', workspaceRoutes)
app.add_route('/workspaces/{workspaceId}', workspaceRoutes)
app.add_route('/workspaces/{workspaceId}/users/{userId}', userRoutes, suffix='giveaccess')
app.add_route('/workspaces/{workspaceId}/scenes', sceneRoutes)
app.add_route('/workspaces/{workspaceId}/scenes/{sceneId}', sceneRoutes, suffix='withSceneId')
app.add_route('/workspaces/{workspaceId}/slides', slideRoutes)
app.add_route('/workspaces/{workspaceId}/slides/{slideId}', slideRoutes, suffix='withSlideId')
app.add_route('/workspaces/{workspaceId}/displays', displayRoutes)
app.add_route('/workspaces/{workspaceId}/displays/{displayId}', displayRoutes, suffix='withDisplayId')
| Silassales/Displayly | backend/main.py | main.py | py | 1,942 | python | en | code | 0 | github-code | 36 |
33040434381 | import asyncio
import dataclasses
import time
from secrets import token_bytes
from typing import Callable, Dict, List, Optional, Tuple, Set
from blspy import AugSchemeMPL, G2Element
from chiabip158 import PyBIP158
import chia.server.ws_connection as ws
from chia.consensus.block_creation import create_unfinished_block
from chia.consensus.block_record import BlockRecord
from chia.consensus.pot_iterations import calculate_ip_iters, calculate_iterations_quality, calculate_sp_iters
from chia.full_node.bundle_tools import best_solution_generator_from_template, simple_solution_generator
from chia.full_node.full_node import FullNode
from chia.full_node.mempool_check_conditions import get_puzzle_and_solution_for_coin
from chia.full_node.signage_point import SignagePoint
from chia.protocols import farmer_protocol, full_node_protocol, introducer_protocol, timelord_protocol, wallet_protocol
from chia.protocols.full_node_protocol import RejectBlock, RejectBlocks
from chia.protocols.protocol_message_types import ProtocolMessageTypes
from chia.protocols.wallet_protocol import PuzzleSolutionResponse, RejectHeaderBlocks, RejectHeaderRequest
from chia.server.outbound_message import Message, make_msg
from chia.types.blockchain_format.coin import Coin, hash_coin_list
from chia.types.blockchain_format.pool_target import PoolTarget
from chia.types.blockchain_format.program import Program
from chia.types.blockchain_format.sized_bytes import bytes32
from chia.types.coin_record import CoinRecord
from chia.types.end_of_slot_bundle import EndOfSubSlotBundle
from chia.types.full_block import FullBlock
from chia.types.generator_types import BlockGenerator
from chia.types.mempool_inclusion_status import MempoolInclusionStatus
from chia.types.mempool_item import MempoolItem
from chia.types.peer_info import PeerInfo
from chia.types.unfinished_block import UnfinishedBlock
from chia.util.api_decorators import api_request, peer_required, bytes_required, execute_task
from chia.util.generator_tools import get_block_header
from chia.util.hash import std_hash
from chia.util.ints import uint8, uint32, uint64, uint128
from chia.util.merkle_set import MerkleSet
class FullNodeAPI:
full_node: FullNode
def __init__(self, full_node) -> None:
self.full_node = full_node
def _set_state_changed_callback(self, callback: Callable):
self.full_node.state_changed_callback = callback
@property
def server(self):
return self.full_node.server
@property
def log(self):
return self.full_node.log
@property
def api_ready(self):
return self.full_node.initialized
@peer_required
@api_request
async def request_peers(self, _request: full_node_protocol.RequestPeers, peer: ws.WSChiaConnection):
if peer.peer_server_port is None:
return None
peer_info = PeerInfo(peer.peer_host, peer.peer_server_port)
if self.full_node.full_node_peers is not None:
msg = await self.full_node.full_node_peers.request_peers(peer_info)
return msg
@peer_required
@api_request
async def respond_peers(
self, request: full_node_protocol.RespondPeers, peer: ws.WSChiaConnection
) -> Optional[Message]:
self.log.debug(f"Received {len(request.peer_list)} peers")
if self.full_node.full_node_peers is not None:
await self.full_node.full_node_peers.respond_peers(request, peer.get_peer_info(), True)
return None
@peer_required
@api_request
async def respond_peers_introducer(
self, request: introducer_protocol.RespondPeersIntroducer, peer: ws.WSChiaConnection
) -> Optional[Message]:
self.log.debug(f"Received {len(request.peer_list)} peers from introducer")
if self.full_node.full_node_peers is not None:
await self.full_node.full_node_peers.respond_peers(request, peer.get_peer_info(), False)
await peer.close()
return None
@execute_task
@peer_required
@api_request
async def new_peak(self, request: full_node_protocol.NewPeak, peer: ws.WSChiaConnection) -> Optional[Message]:
"""
A peer notifies us that they have added a new peak to their blockchain. If we don't have it,
we can ask for it.
"""
# this semaphore limits the number of tasks that can call new_peak() at
# the same time, since it can be expensive
async with self.full_node.new_peak_sem:
return await self.full_node.new_peak(request, peer)
@peer_required
@api_request
async def new_transaction(
self, transaction: full_node_protocol.NewTransaction, peer: ws.WSChiaConnection
) -> Optional[Message]:
"""
A peer notifies us of a new transaction.
Requests a full transaction if we haven't seen it previously, and if the fees are enough.
"""
# Ignore if syncing
if self.full_node.sync_store.get_sync_mode():
return None
if not (await self.full_node.synced()):
return None
if int(time.time()) <= self.full_node.constants.INITIAL_FREEZE_END_TIMESTAMP:
return None
# Ignore if already seen
if self.full_node.mempool_manager.seen(transaction.transaction_id):
return None
if self.full_node.mempool_manager.is_fee_enough(transaction.fees, transaction.cost):
# If there's current pending request just add this peer to the set of peers that have this tx
if transaction.transaction_id in self.full_node.full_node_store.pending_tx_request:
if transaction.transaction_id in self.full_node.full_node_store.peers_with_tx:
current_set = self.full_node.full_node_store.peers_with_tx[transaction.transaction_id]
if peer.peer_node_id in current_set:
return None
current_set.add(peer.peer_node_id)
return None
else:
new_set = set()
new_set.add(peer.peer_node_id)
self.full_node.full_node_store.peers_with_tx[transaction.transaction_id] = new_set
return None
self.full_node.full_node_store.pending_tx_request[transaction.transaction_id] = peer.peer_node_id
new_set = set()
new_set.add(peer.peer_node_id)
self.full_node.full_node_store.peers_with_tx[transaction.transaction_id] = new_set
async def tx_request_and_timeout(full_node: FullNode, transaction_id, task_id):
counter = 0
try:
while True:
# Limit to asking 10 peers, it's possible that this tx got included on chain already
# Highly unlikely 10 peers that advertised a tx don't respond to a request
if counter == 10:
break
if transaction_id not in full_node.full_node_store.peers_with_tx:
break
peers_with_tx: Set = full_node.full_node_store.peers_with_tx[transaction_id]
if len(peers_with_tx) == 0:
break
peer_id = peers_with_tx.pop()
assert full_node.server is not None
if peer_id not in full_node.server.all_connections:
continue
peer = full_node.server.all_connections[peer_id]
request_tx = full_node_protocol.RequestTransaction(transaction.transaction_id)
msg = make_msg(ProtocolMessageTypes.request_transaction, request_tx)
await peer.send_message(msg)
await asyncio.sleep(5)
counter += 1
if full_node.mempool_manager.seen(transaction_id):
break
except asyncio.CancelledError:
pass
finally:
# Always Cleanup
if transaction_id in full_node.full_node_store.peers_with_tx:
full_node.full_node_store.peers_with_tx.pop(transaction_id)
if transaction_id in full_node.full_node_store.pending_tx_request:
full_node.full_node_store.pending_tx_request.pop(transaction_id)
if task_id in full_node.full_node_store.tx_fetch_tasks:
full_node.full_node_store.tx_fetch_tasks.pop(task_id)
task_id = token_bytes()
fetch_task = asyncio.create_task(
tx_request_and_timeout(self.full_node, transaction.transaction_id, task_id)
)
self.full_node.full_node_store.tx_fetch_tasks[task_id] = fetch_task
return None
return None
@api_request
async def request_transaction(self, request: full_node_protocol.RequestTransaction) -> Optional[Message]:
"""Peer has requested a full transaction from us."""
# Ignore if syncing
if self.full_node.sync_store.get_sync_mode():
return None
spend_bundle = self.full_node.mempool_manager.get_spendbundle(request.transaction_id)
if spend_bundle is None:
return None
transaction = full_node_protocol.RespondTransaction(spend_bundle)
msg = make_msg(ProtocolMessageTypes.respond_transaction, transaction)
return msg
@peer_required
@api_request
@bytes_required
async def respond_transaction(
self,
tx: full_node_protocol.RespondTransaction,
peer: ws.WSChiaConnection,
tx_bytes: bytes = b"",
test: bool = False,
) -> Optional[Message]:
"""
Receives a full transaction from peer.
If tx is added to mempool, send tx_id to others. (new_transaction)
"""
assert tx_bytes != b""
spend_name = std_hash(tx_bytes)
if spend_name in self.full_node.full_node_store.pending_tx_request:
self.full_node.full_node_store.pending_tx_request.pop(spend_name)
if spend_name in self.full_node.full_node_store.peers_with_tx:
self.full_node.full_node_store.peers_with_tx.pop(spend_name)
await self.full_node.respond_transaction(tx.transaction, spend_name, peer, test)
return None
@api_request
async def request_proof_of_weight(self, request: full_node_protocol.RequestProofOfWeight) -> Optional[Message]:
if self.full_node.weight_proof_handler is None:
return None
if not self.full_node.blockchain.contains_block(request.tip):
self.log.error(f"got weight proof request for unknown peak {request.tip}")
return None
if request.tip in self.full_node.pow_creation:
event = self.full_node.pow_creation[request.tip]
await event.wait()
wp = await self.full_node.weight_proof_handler.get_proof_of_weight(request.tip)
else:
event = asyncio.Event()
self.full_node.pow_creation[request.tip] = event
wp = await self.full_node.weight_proof_handler.get_proof_of_weight(request.tip)
event.set()
tips = list(self.full_node.pow_creation.keys())
if len(tips) > 4:
# Remove old from cache
for i in range(0, 4):
self.full_node.pow_creation.pop(tips[i])
if wp is None:
self.log.error(f"failed creating weight proof for peak {request.tip}")
return None
# Serialization of wp is slow
if (
self.full_node.full_node_store.serialized_wp_message_tip is not None
and self.full_node.full_node_store.serialized_wp_message_tip == request.tip
):
return self.full_node.full_node_store.serialized_wp_message
message = make_msg(
ProtocolMessageTypes.respond_proof_of_weight, full_node_protocol.RespondProofOfWeight(wp, request.tip)
)
self.full_node.full_node_store.serialized_wp_message_tip = request.tip
self.full_node.full_node_store.serialized_wp_message = message
return message
@api_request
async def respond_proof_of_weight(self, request: full_node_protocol.RespondProofOfWeight) -> Optional[Message]:
self.log.warning("Received proof of weight too late.")
return None
@api_request
async def request_block(self, request: full_node_protocol.RequestBlock) -> Optional[Message]:
if not self.full_node.blockchain.contains_height(request.height):
reject = RejectBlock(request.height)
msg = make_msg(ProtocolMessageTypes.reject_block, reject)
return msg
header_hash = self.full_node.blockchain.height_to_hash(request.height)
block: Optional[FullBlock] = await self.full_node.block_store.get_full_block(header_hash)
if block is not None:
if not request.include_transaction_block and block.transactions_generator is not None:
block = dataclasses.replace(block, transactions_generator=None)
return make_msg(ProtocolMessageTypes.respond_block, full_node_protocol.RespondBlock(block))
reject = RejectBlock(request.height)
msg = make_msg(ProtocolMessageTypes.reject_block, reject)
return msg
@api_request
async def request_blocks(self, request: full_node_protocol.RequestBlocks) -> Optional[Message]:
if request.end_height < request.start_height or request.end_height - request.start_height > 32:
reject = RejectBlocks(request.start_height, request.end_height)
msg: Message = make_msg(ProtocolMessageTypes.reject_blocks, reject)
return msg
for i in range(request.start_height, request.end_height + 1):
if not self.full_node.blockchain.contains_height(uint32(i)):
reject = RejectBlocks(request.start_height, request.end_height)
msg = make_msg(ProtocolMessageTypes.reject_blocks, reject)
return msg
if not request.include_transaction_block:
blocks: List[FullBlock] = []
for i in range(request.start_height, request.end_height + 1):
block: Optional[FullBlock] = await self.full_node.block_store.get_full_block(
self.full_node.blockchain.height_to_hash(uint32(i))
)
if block is None:
reject = RejectBlocks(request.start_height, request.end_height)
msg = make_msg(ProtocolMessageTypes.reject_blocks, reject)
return msg
block = dataclasses.replace(block, transactions_generator=None)
blocks.append(block)
msg = make_msg(
ProtocolMessageTypes.respond_blocks,
full_node_protocol.RespondBlocks(request.start_height, request.end_height, blocks),
)
else:
blocks_bytes: List[bytes] = []
for i in range(request.start_height, request.end_height + 1):
block_bytes: Optional[bytes] = await self.full_node.block_store.get_full_block_bytes(
self.full_node.blockchain.height_to_hash(uint32(i))
)
if block_bytes is None:
reject = RejectBlocks(request.start_height, request.end_height)
msg = make_msg(ProtocolMessageTypes.reject_blocks, reject)
return msg
blocks_bytes.append(block_bytes)
respond_blocks_manually_streamed: bytes = (
bytes(uint32(request.start_height))
+ bytes(uint32(request.end_height))
+ len(blocks_bytes).to_bytes(4, "big", signed=False)
)
for block_bytes in blocks_bytes:
respond_blocks_manually_streamed += block_bytes
msg = make_msg(ProtocolMessageTypes.respond_blocks, respond_blocks_manually_streamed)
return msg
@api_request
async def reject_block(self, request: full_node_protocol.RejectBlock):
self.log.debug(f"reject_block {request.height}")
@api_request
async def reject_blocks(self, request: full_node_protocol.RejectBlocks):
self.log.debug(f"reject_blocks {request.start_height} {request.end_height}")
@api_request
async def respond_blocks(self, request: full_node_protocol.RespondBlocks) -> None:
self.log.warning("Received unsolicited/late blocks")
return None
@api_request
@peer_required
async def respond_block(
self,
respond_block: full_node_protocol.RespondBlock,
peer: ws.WSChiaConnection,
) -> Optional[Message]:
"""
Receive a full block from a peer full node (or ourselves).
"""
self.log.warning(f"Received unsolicited/late block from peer {peer.get_peer_info()}")
return None
@api_request
async def new_unfinished_block(
self, new_unfinished_block: full_node_protocol.NewUnfinishedBlock
) -> Optional[Message]:
# Ignore if syncing
if self.full_node.sync_store.get_sync_mode():
return None
block_hash = new_unfinished_block.unfinished_reward_hash
if self.full_node.full_node_store.get_unfinished_block(block_hash) is not None:
return None
# This prevents us from downloading the same block from many peers
if block_hash in self.full_node.full_node_store.requesting_unfinished_blocks:
return None
msg = make_msg(
ProtocolMessageTypes.request_unfinished_block,
full_node_protocol.RequestUnfinishedBlock(block_hash),
)
self.full_node.full_node_store.requesting_unfinished_blocks.add(block_hash)
# However, we want to eventually download from other peers, if this peer does not respond
# Todo: keep track of who it was
async def eventually_clear():
await asyncio.sleep(5)
if block_hash in self.full_node.full_node_store.requesting_unfinished_blocks:
self.full_node.full_node_store.requesting_unfinished_blocks.remove(block_hash)
asyncio.create_task(eventually_clear())
return msg
@api_request
async def request_unfinished_block(
self, request_unfinished_block: full_node_protocol.RequestUnfinishedBlock
) -> Optional[Message]:
unfinished_block: Optional[UnfinishedBlock] = self.full_node.full_node_store.get_unfinished_block(
request_unfinished_block.unfinished_reward_hash
)
if unfinished_block is not None:
msg = make_msg(
ProtocolMessageTypes.respond_unfinished_block,
full_node_protocol.RespondUnfinishedBlock(unfinished_block),
)
return msg
return None
@peer_required
@api_request
async def respond_unfinished_block(
self,
respond_unfinished_block: full_node_protocol.RespondUnfinishedBlock,
peer: ws.WSChiaConnection,
) -> Optional[Message]:
if self.full_node.sync_store.get_sync_mode():
return None
await self.full_node.respond_unfinished_block(respond_unfinished_block, peer)
return None
@api_request
@peer_required
async def new_signage_point_or_end_of_sub_slot(
self, new_sp: full_node_protocol.NewSignagePointOrEndOfSubSlot, peer: ws.WSChiaConnection
) -> Optional[Message]:
# Ignore if syncing
if self.full_node.sync_store.get_sync_mode():
return None
if (
self.full_node.full_node_store.get_signage_point_by_index(
new_sp.challenge_hash,
new_sp.index_from_challenge,
new_sp.last_rc_infusion,
)
is not None
):
return None
if self.full_node.full_node_store.have_newer_signage_point(
new_sp.challenge_hash, new_sp.index_from_challenge, new_sp.last_rc_infusion
):
return None
if new_sp.index_from_challenge == 0 and new_sp.prev_challenge_hash is not None:
if self.full_node.full_node_store.get_sub_slot(new_sp.prev_challenge_hash) is None:
collected_eos = []
challenge_hash_to_request = new_sp.challenge_hash
last_rc = new_sp.last_rc_infusion
num_non_empty_sub_slots_seen = 0
for _ in range(30):
if num_non_empty_sub_slots_seen >= 3:
self.log.debug("Diverged from peer. Don't have the same blocks")
return None
# If this is an end of sub slot, and we don't have the prev, request the prev instead
# We want to catch up to the latest slot so we can receive signage points
full_node_request = full_node_protocol.RequestSignagePointOrEndOfSubSlot(
challenge_hash_to_request, uint8(0), last_rc
)
response = await peer.request_signage_point_or_end_of_sub_slot(full_node_request, timeout=10)
if not isinstance(response, full_node_protocol.RespondEndOfSubSlot):
self.full_node.log.debug(f"Invalid response for slot {response}")
return None
collected_eos.append(response)
if (
self.full_node.full_node_store.get_sub_slot(
response.end_of_slot_bundle.challenge_chain.challenge_chain_end_of_slot_vdf.challenge
)
is not None
or response.end_of_slot_bundle.challenge_chain.challenge_chain_end_of_slot_vdf.challenge
== self.full_node.constants.GENESIS_CHALLENGE
):
for eos in reversed(collected_eos):
await self.respond_end_of_sub_slot(eos, peer)
return None
if (
response.end_of_slot_bundle.challenge_chain.challenge_chain_end_of_slot_vdf.number_of_iterations
!= response.end_of_slot_bundle.reward_chain.end_of_slot_vdf.number_of_iterations
):
num_non_empty_sub_slots_seen += 1
challenge_hash_to_request = (
response.end_of_slot_bundle.challenge_chain.challenge_chain_end_of_slot_vdf.challenge
)
last_rc = response.end_of_slot_bundle.reward_chain.end_of_slot_vdf.challenge
self.full_node.log.warning("Failed to catch up in sub-slots")
return None
if new_sp.index_from_challenge > 0:
if (
new_sp.challenge_hash != self.full_node.constants.GENESIS_CHALLENGE
and self.full_node.full_node_store.get_sub_slot(new_sp.challenge_hash) is None
):
# If this is a normal signage point,, and we don't have the end of sub slot, request the end of sub slot
full_node_request = full_node_protocol.RequestSignagePointOrEndOfSubSlot(
new_sp.challenge_hash, uint8(0), new_sp.last_rc_infusion
)
return make_msg(ProtocolMessageTypes.request_signage_point_or_end_of_sub_slot, full_node_request)
# Otherwise (we have the prev or the end of sub slot), request it normally
full_node_request = full_node_protocol.RequestSignagePointOrEndOfSubSlot(
new_sp.challenge_hash, new_sp.index_from_challenge, new_sp.last_rc_infusion
)
return make_msg(ProtocolMessageTypes.request_signage_point_or_end_of_sub_slot, full_node_request)
@api_request
async def request_signage_point_or_end_of_sub_slot(
self, request: full_node_protocol.RequestSignagePointOrEndOfSubSlot
) -> Optional[Message]:
if request.index_from_challenge == 0:
sub_slot: Optional[Tuple[EndOfSubSlotBundle, int, uint128]] = self.full_node.full_node_store.get_sub_slot(
request.challenge_hash
)
if sub_slot is not None:
return make_msg(
ProtocolMessageTypes.respond_end_of_sub_slot,
full_node_protocol.RespondEndOfSubSlot(sub_slot[0]),
)
else:
if self.full_node.full_node_store.get_sub_slot(request.challenge_hash) is None:
if request.challenge_hash != self.full_node.constants.GENESIS_CHALLENGE:
self.log.info(f"Don't have challenge hash {request.challenge_hash}")
sp: Optional[SignagePoint] = self.full_node.full_node_store.get_signage_point_by_index(
request.challenge_hash,
request.index_from_challenge,
request.last_rc_infusion,
)
if sp is not None:
assert (
sp.cc_vdf is not None
and sp.cc_proof is not None
and sp.rc_vdf is not None
and sp.rc_proof is not None
)
full_node_response = full_node_protocol.RespondSignagePoint(
request.index_from_challenge,
sp.cc_vdf,
sp.cc_proof,
sp.rc_vdf,
sp.rc_proof,
)
return make_msg(ProtocolMessageTypes.respond_signage_point, full_node_response)
else:
self.log.info(f"Don't have signage point {request}")
return None
@peer_required
@api_request
async def respond_signage_point(
self, request: full_node_protocol.RespondSignagePoint, peer: ws.WSChiaConnection
) -> Optional[Message]:
if self.full_node.sync_store.get_sync_mode():
return None
async with self.full_node.timelord_lock:
# Already have signage point
if self.full_node.full_node_store.have_newer_signage_point(
request.challenge_chain_vdf.challenge,
request.index_from_challenge,
request.reward_chain_vdf.challenge,
):
return None
existing_sp = self.full_node.full_node_store.get_signage_point(
request.challenge_chain_vdf.output.get_hash()
)
if existing_sp is not None and existing_sp.rc_vdf == request.reward_chain_vdf:
return None
peak = self.full_node.blockchain.get_peak()
if peak is not None and peak.height > self.full_node.constants.MAX_SUB_SLOT_BLOCKS:
next_sub_slot_iters = self.full_node.blockchain.get_next_slot_iters(peak.header_hash, True)
sub_slots_for_peak = await self.full_node.blockchain.get_sp_and_ip_sub_slots(peak.header_hash)
assert sub_slots_for_peak is not None
ip_sub_slot: Optional[EndOfSubSlotBundle] = sub_slots_for_peak[1]
else:
sub_slot_iters = self.full_node.constants.SUB_SLOT_ITERS_STARTING
next_sub_slot_iters = sub_slot_iters
ip_sub_slot = None
added = self.full_node.full_node_store.new_signage_point(
request.index_from_challenge,
self.full_node.blockchain,
self.full_node.blockchain.get_peak(),
next_sub_slot_iters,
SignagePoint(
request.challenge_chain_vdf,
request.challenge_chain_proof,
request.reward_chain_vdf,
request.reward_chain_proof,
),
)
if added:
await self.full_node.signage_point_post_processing(request, peer, ip_sub_slot)
else:
self.log.debug(
f"Signage point {request.index_from_challenge} not added, CC challenge: "
f"{request.challenge_chain_vdf.challenge}, RC challenge: {request.reward_chain_vdf.challenge}"
)
return None
@peer_required
@api_request
async def respond_end_of_sub_slot(
self, request: full_node_protocol.RespondEndOfSubSlot, peer: ws.WSChiaConnection
) -> Optional[Message]:
if self.full_node.sync_store.get_sync_mode():
return None
msg, _ = await self.full_node.respond_end_of_sub_slot(request, peer)
return msg
@peer_required
@api_request
async def request_mempool_transactions(
self,
request: full_node_protocol.RequestMempoolTransactions,
peer: ws.WSChiaConnection,
) -> Optional[Message]:
received_filter = PyBIP158(bytearray(request.filter))
items: List[MempoolItem] = await self.full_node.mempool_manager.get_items_not_in_filter(received_filter)
for item in items:
transaction = full_node_protocol.RespondTransaction(item.spend_bundle)
msg = make_msg(ProtocolMessageTypes.respond_transaction, transaction)
await peer.send_message(msg)
return None
# FARMER PROTOCOL
@api_request
@peer_required
async def declare_proof_of_space(
self, request: farmer_protocol.DeclareProofOfSpace, peer: ws.WSChiaConnection
) -> Optional[Message]:
"""
Creates a block body and header, with the proof of space, coinbase, and fee targets provided
by the farmer, and sends the hash of the header data back to the farmer.
"""
if self.full_node.sync_store.get_sync_mode():
return None
async with self.full_node.timelord_lock:
sp_vdfs: Optional[SignagePoint] = self.full_node.full_node_store.get_signage_point(
request.challenge_chain_sp
)
if sp_vdfs is None:
self.log.warning(f"Received proof of space for an unknown signage point {request.challenge_chain_sp}")
return None
if request.signage_point_index > 0:
assert sp_vdfs.rc_vdf is not None
if sp_vdfs.rc_vdf.output.get_hash() != request.reward_chain_sp:
self.log.debug(
f"Received proof of space for a potentially old signage point {request.challenge_chain_sp}. "
f"Current sp: {sp_vdfs.rc_vdf.output.get_hash()}"
)
return None
if request.signage_point_index == 0:
cc_challenge_hash: bytes32 = request.challenge_chain_sp
else:
assert sp_vdfs.cc_vdf is not None
cc_challenge_hash = sp_vdfs.cc_vdf.challenge
pos_sub_slot: Optional[Tuple[EndOfSubSlotBundle, int, uint128]] = None
if request.challenge_hash != self.full_node.constants.GENESIS_CHALLENGE:
# Checks that the proof of space is a response to a recent challenge and valid SP
pos_sub_slot = self.full_node.full_node_store.get_sub_slot(cc_challenge_hash)
if pos_sub_slot is None:
self.log.warning(f"Received proof of space for an unknown sub slot: {request}")
return None
total_iters_pos_slot: uint128 = pos_sub_slot[2]
else:
total_iters_pos_slot = uint128(0)
assert cc_challenge_hash == request.challenge_hash
# Now we know that the proof of space has a signage point either:
# 1. In the previous sub-slot of the peak (overflow)
# 2. In the same sub-slot as the peak
# 3. In a future sub-slot that we already know of
# Checks that the proof of space is valid
quality_string: Optional[bytes32] = request.proof_of_space.verify_and_get_quality_string(
self.full_node.constants, cc_challenge_hash, request.challenge_chain_sp
)
assert quality_string is not None and len(quality_string) == 32
# Grab best transactions from Mempool for given tip target
aggregate_signature: G2Element = G2Element()
block_generator: Optional[BlockGenerator] = None
additions: Optional[List[Coin]] = []
removals: Optional[List[Coin]] = []
async with self.full_node.blockchain.lock:
peak: Optional[BlockRecord] = self.full_node.blockchain.get_peak()
if peak is not None:
# Finds the last transaction block before this one
curr_l_tb: BlockRecord = peak
while not curr_l_tb.is_transaction_block:
curr_l_tb = self.full_node.blockchain.block_record(curr_l_tb.prev_hash)
try:
mempool_bundle = await self.full_node.mempool_manager.create_bundle_from_mempool(
curr_l_tb.header_hash
)
except Exception as e:
self.full_node.log.error(f"Error making spend bundle {e} peak: {peak}")
mempool_bundle = None
if mempool_bundle is not None:
spend_bundle = mempool_bundle[0]
additions = mempool_bundle[1]
removals = mempool_bundle[2]
self.full_node.log.info(f"Add rem: {len(additions)} {len(removals)}")
aggregate_signature = spend_bundle.aggregated_signature
if self.full_node.full_node_store.previous_generator is not None:
self.log.info(
f"Using previous generator for height "
f"{self.full_node.full_node_store.previous_generator}"
)
block_generator = best_solution_generator_from_template(
self.full_node.full_node_store.previous_generator, spend_bundle
)
else:
block_generator = simple_solution_generator(spend_bundle)
def get_plot_sig(to_sign, _) -> G2Element:
if to_sign == request.challenge_chain_sp:
return request.challenge_chain_sp_signature
elif to_sign == request.reward_chain_sp:
return request.reward_chain_sp_signature
return G2Element()
def get_pool_sig(_1, _2) -> Optional[G2Element]:
return request.pool_signature
prev_b: Optional[BlockRecord] = self.full_node.blockchain.get_peak()
# Finds the previous block from the signage point, ensuring that the reward chain VDF is correct
if prev_b is not None:
if request.signage_point_index == 0:
if pos_sub_slot is None:
self.log.warning("Pos sub slot is None")
return None
rc_challenge = pos_sub_slot[0].reward_chain.end_of_slot_vdf.challenge
else:
assert sp_vdfs.rc_vdf is not None
rc_challenge = sp_vdfs.rc_vdf.challenge
# Backtrack through empty sub-slots
for eos, _, _ in reversed(self.full_node.full_node_store.finished_sub_slots):
if eos is not None and eos.reward_chain.get_hash() == rc_challenge:
rc_challenge = eos.reward_chain.end_of_slot_vdf.challenge
found = False
attempts = 0
while prev_b is not None and attempts < 10:
if prev_b.reward_infusion_new_challenge == rc_challenge:
found = True
break
if prev_b.finished_reward_slot_hashes is not None and len(prev_b.finished_reward_slot_hashes) > 0:
if prev_b.finished_reward_slot_hashes[-1] == rc_challenge:
# This block includes a sub-slot which is where our SP vdf starts. Go back one more
# to find the prev block
prev_b = self.full_node.blockchain.try_block_record(prev_b.prev_hash)
found = True
break
prev_b = self.full_node.blockchain.try_block_record(prev_b.prev_hash)
attempts += 1
if not found:
self.log.warning("Did not find a previous block with the correct reward chain hash")
return None
try:
finished_sub_slots: Optional[
List[EndOfSubSlotBundle]
] = self.full_node.full_node_store.get_finished_sub_slots(
self.full_node.blockchain, prev_b, cc_challenge_hash
)
if finished_sub_slots is None:
return None
if (
len(finished_sub_slots) > 0
and pos_sub_slot is not None
and finished_sub_slots[-1] != pos_sub_slot[0]
):
self.log.error("Have different sub-slots than is required to farm this block")
return None
except ValueError as e:
self.log.warning(f"Value Error: {e}")
return None
if prev_b is None:
pool_target = PoolTarget(
self.full_node.constants.GENESIS_PRE_FARM_POOL_PUZZLE_HASH,
uint32(0),
)
farmer_ph = self.full_node.constants.GENESIS_PRE_FARM_FARMER_PUZZLE_HASH
else:
farmer_ph = request.farmer_puzzle_hash
if request.proof_of_space.pool_contract_puzzle_hash is not None and request.pool_target is None:
pool_target = PoolTarget(request.proof_of_space.pool_contract_puzzle_hash, uint32(0))
else:
assert request.pool_target is not None
pool_target = request.pool_target
if peak is None or peak.height <= self.full_node.constants.MAX_SUB_SLOT_BLOCKS:
difficulty = self.full_node.constants.DIFFICULTY_STARTING
sub_slot_iters = self.full_node.constants.SUB_SLOT_ITERS_STARTING
else:
difficulty = uint64(peak.weight - self.full_node.blockchain.block_record(peak.prev_hash).weight)
sub_slot_iters = peak.sub_slot_iters
for sub_slot in finished_sub_slots:
if sub_slot.challenge_chain.new_difficulty is not None:
difficulty = sub_slot.challenge_chain.new_difficulty
if sub_slot.challenge_chain.new_sub_slot_iters is not None:
sub_slot_iters = sub_slot.challenge_chain.new_sub_slot_iters
required_iters: uint64 = calculate_iterations_quality(
self.full_node.constants.DIFFICULTY_CONSTANT_FACTOR,
quality_string,
request.proof_of_space.size,
difficulty,
request.challenge_chain_sp,
)
sp_iters: uint64 = calculate_sp_iters(self.full_node.constants, sub_slot_iters, request.signage_point_index)
ip_iters: uint64 = calculate_ip_iters(
self.full_node.constants,
sub_slot_iters,
request.signage_point_index,
required_iters,
)
# The block's timestamp must be greater than the previous transaction block's timestamp
timestamp = uint64(int(time.time()))
curr: Optional[BlockRecord] = prev_b
while curr is not None and not curr.is_transaction_block and curr.height != 0:
curr = self.full_node.blockchain.try_block_record(curr.prev_hash)
if curr is not None:
assert curr.timestamp is not None
if timestamp <= curr.timestamp:
timestamp = uint64(int(curr.timestamp + 1))
self.log.info("Starting to make the unfinished block")
unfinished_block: UnfinishedBlock = create_unfinished_block(
self.full_node.constants,
total_iters_pos_slot,
sub_slot_iters,
request.signage_point_index,
sp_iters,
ip_iters,
request.proof_of_space,
cc_challenge_hash,
farmer_ph,
pool_target,
get_plot_sig,
get_pool_sig,
sp_vdfs,
timestamp,
self.full_node.blockchain,
b"",
block_generator,
aggregate_signature,
additions,
removals,
prev_b,
finished_sub_slots,
)
self.log.info("Made the unfinished block")
if prev_b is not None:
height: uint32 = uint32(prev_b.height + 1)
else:
height = uint32(0)
self.full_node.full_node_store.add_candidate_block(quality_string, height, unfinished_block)
foliage_sb_data_hash = unfinished_block.foliage.foliage_block_data.get_hash()
if unfinished_block.is_transaction_block():
foliage_transaction_block_hash = unfinished_block.foliage.foliage_transaction_block_hash
else:
foliage_transaction_block_hash = bytes([0] * 32)
message = farmer_protocol.RequestSignedValues(
quality_string,
foliage_sb_data_hash,
foliage_transaction_block_hash,
)
await peer.send_message(make_msg(ProtocolMessageTypes.request_signed_values, message))
# Adds backup in case the first one fails
if unfinished_block.is_transaction_block() and unfinished_block.transactions_generator is not None:
unfinished_block_backup = create_unfinished_block(
self.full_node.constants,
total_iters_pos_slot,
sub_slot_iters,
request.signage_point_index,
sp_iters,
ip_iters,
request.proof_of_space,
cc_challenge_hash,
farmer_ph,
pool_target,
get_plot_sig,
get_pool_sig,
sp_vdfs,
timestamp,
self.full_node.blockchain,
b"",
None,
G2Element(),
None,
None,
prev_b,
finished_sub_slots,
)
self.full_node.full_node_store.add_candidate_block(
quality_string, height, unfinished_block_backup, backup=True
)
return None
@api_request
@peer_required
async def signed_values(
self, farmer_request: farmer_protocol.SignedValues, peer: ws.WSChiaConnection
) -> Optional[Message]:
"""
Signature of header hash, by the harvester. This is enough to create an unfinished
block, which only needs a Proof of Time to be finished. If the signature is valid,
we call the unfinished_block routine.
"""
candidate_tuple: Optional[Tuple[uint32, UnfinishedBlock]] = self.full_node.full_node_store.get_candidate_block(
farmer_request.quality_string
)
if candidate_tuple is None:
self.log.warning(f"Quality string {farmer_request.quality_string} not found in database")
return None
height, candidate = candidate_tuple
if not AugSchemeMPL.verify(
candidate.reward_chain_block.proof_of_space.plot_public_key,
candidate.foliage.foliage_block_data.get_hash(),
farmer_request.foliage_block_data_signature,
):
self.log.warning("Signature not valid. There might be a collision in plots. Ignore this during tests.")
return None
fsb2 = dataclasses.replace(
candidate.foliage,
foliage_block_data_signature=farmer_request.foliage_block_data_signature,
)
if candidate.is_transaction_block():
fsb2 = dataclasses.replace(
fsb2, foliage_transaction_block_signature=farmer_request.foliage_transaction_block_signature
)
new_candidate = dataclasses.replace(candidate, foliage=fsb2)
if not self.full_node.has_valid_pool_sig(new_candidate):
self.log.warning("Trying to make a pre-farm block but height is not 0")
return None
# Propagate to ourselves (which validates and does further propagations)
request = full_node_protocol.RespondUnfinishedBlock(new_candidate)
try:
await self.full_node.respond_unfinished_block(request, None, True)
except Exception as e:
# If we have an error with this block, try making an empty block
self.full_node.log.error(f"Error farming block {e} {request}")
candidate_tuple = self.full_node.full_node_store.get_candidate_block(
farmer_request.quality_string, backup=True
)
if candidate_tuple is not None:
height, unfinished_block = candidate_tuple
self.full_node.full_node_store.add_candidate_block(
farmer_request.quality_string, height, unfinished_block, False
)
message = farmer_protocol.RequestSignedValues(
farmer_request.quality_string,
unfinished_block.foliage.foliage_block_data.get_hash(),
unfinished_block.foliage.foliage_transaction_block_hash,
)
await peer.send_message(make_msg(ProtocolMessageTypes.request_signed_values, message))
return None
# TIMELORD PROTOCOL
@peer_required
@api_request
async def new_infusion_point_vdf(
self, request: timelord_protocol.NewInfusionPointVDF, peer: ws.WSChiaConnection
) -> Optional[Message]:
if self.full_node.sync_store.get_sync_mode():
return None
# Lookup unfinished blocks
async with self.full_node.timelord_lock:
return await self.full_node.new_infusion_point_vdf(request, peer)
@peer_required
@api_request
async def new_signage_point_vdf(
self, request: timelord_protocol.NewSignagePointVDF, peer: ws.WSChiaConnection
) -> None:
if self.full_node.sync_store.get_sync_mode():
return None
full_node_message = full_node_protocol.RespondSignagePoint(
request.index_from_challenge,
request.challenge_chain_sp_vdf,
request.challenge_chain_sp_proof,
request.reward_chain_sp_vdf,
request.reward_chain_sp_proof,
)
await self.respond_signage_point(full_node_message, peer)
@peer_required
@api_request
async def new_end_of_sub_slot_vdf(
self, request: timelord_protocol.NewEndOfSubSlotVDF, peer: ws.WSChiaConnection
) -> Optional[Message]:
if self.full_node.sync_store.get_sync_mode():
return None
if (
self.full_node.full_node_store.get_sub_slot(request.end_of_sub_slot_bundle.challenge_chain.get_hash())
is not None
):
return None
# Calls our own internal message to handle the end of sub slot, and potentially broadcasts to other peers.
full_node_message = full_node_protocol.RespondEndOfSubSlot(request.end_of_sub_slot_bundle)
msg, added = await self.full_node.respond_end_of_sub_slot(full_node_message, peer)
if not added:
self.log.error(
f"Was not able to add end of sub-slot: "
f"{request.end_of_sub_slot_bundle.challenge_chain.challenge_chain_end_of_slot_vdf.challenge}. "
f"Re-sending new-peak to timelord"
)
await self.full_node.send_peak_to_timelords(peer=peer)
return None
else:
return msg
@api_request
async def request_block_header(self, request: wallet_protocol.RequestBlockHeader) -> Optional[Message]:
header_hash = self.full_node.blockchain.height_to_hash(request.height)
if header_hash is None:
msg = make_msg(ProtocolMessageTypes.reject_header_request, RejectHeaderRequest(request.height))
return msg
block: Optional[FullBlock] = await self.full_node.block_store.get_full_block(header_hash)
if block is not None:
tx_removals, tx_additions = await self.full_node.blockchain.get_tx_removals_and_additions(block)
header_block = get_block_header(block, tx_additions, tx_removals)
msg = make_msg(
ProtocolMessageTypes.respond_block_header,
wallet_protocol.RespondBlockHeader(header_block),
)
return msg
return None
@api_request
async def request_additions(self, request: wallet_protocol.RequestAdditions) -> Optional[Message]:
block: Optional[FullBlock] = await self.full_node.block_store.get_full_block(request.header_hash)
# We lock so that the coin store does not get modified
if (
block is None
or block.is_transaction_block() is False
or self.full_node.blockchain.height_to_hash(block.height) != request.header_hash
):
reject = wallet_protocol.RejectAdditionsRequest(request.height, request.header_hash)
msg = make_msg(ProtocolMessageTypes.reject_additions_request, reject)
return msg
assert block is not None and block.foliage_transaction_block is not None
# Note: this might return bad data if there is a reorg in this time
additions = await self.full_node.coin_store.get_coins_added_at_height(block.height)
if self.full_node.blockchain.height_to_hash(block.height) != request.header_hash:
raise ValueError(f"Block {block.header_hash} no longer in chain")
puzzlehash_coins_map: Dict[bytes32, List[Coin]] = {}
for coin_record in additions:
if coin_record.coin.puzzle_hash in puzzlehash_coins_map:
puzzlehash_coins_map[coin_record.coin.puzzle_hash].append(coin_record.coin)
else:
puzzlehash_coins_map[coin_record.coin.puzzle_hash] = [coin_record.coin]
coins_map: List[Tuple[bytes32, List[Coin]]] = []
proofs_map: List[Tuple[bytes32, bytes, Optional[bytes]]] = []
if request.puzzle_hashes is None:
for puzzle_hash, coins in puzzlehash_coins_map.items():
coins_map.append((puzzle_hash, coins))
response = wallet_protocol.RespondAdditions(block.height, block.header_hash, coins_map, None)
else:
# Create addition Merkle set
addition_merkle_set = MerkleSet()
# Addition Merkle set contains puzzlehash and hash of all coins with that puzzlehash
for puzzle, coins in puzzlehash_coins_map.items():
addition_merkle_set.add_already_hashed(puzzle)
addition_merkle_set.add_already_hashed(hash_coin_list(coins))
assert addition_merkle_set.get_root() == block.foliage_transaction_block.additions_root
for puzzle_hash in request.puzzle_hashes:
result, proof = addition_merkle_set.is_included_already_hashed(puzzle_hash)
if puzzle_hash in puzzlehash_coins_map:
coins_map.append((puzzle_hash, puzzlehash_coins_map[puzzle_hash]))
hash_coin_str = hash_coin_list(puzzlehash_coins_map[puzzle_hash])
result_2, proof_2 = addition_merkle_set.is_included_already_hashed(hash_coin_str)
assert result
assert result_2
proofs_map.append((puzzle_hash, proof, proof_2))
else:
coins_map.append((puzzle_hash, []))
assert not result
proofs_map.append((puzzle_hash, proof, None))
response = wallet_protocol.RespondAdditions(block.height, block.header_hash, coins_map, proofs_map)
msg = make_msg(ProtocolMessageTypes.respond_additions, response)
return msg
@api_request
async def request_removals(self, request: wallet_protocol.RequestRemovals) -> Optional[Message]:
block: Optional[FullBlock] = await self.full_node.block_store.get_full_block(request.header_hash)
# We lock so that the coin store does not get modified
if (
block is None
or block.is_transaction_block() is False
or block.height != request.height
or block.height > self.full_node.blockchain.get_peak_height()
or self.full_node.blockchain.height_to_hash(block.height) != request.header_hash
):
reject = wallet_protocol.RejectRemovalsRequest(request.height, request.header_hash)
msg = make_msg(ProtocolMessageTypes.reject_removals_request, reject)
return msg
assert block is not None and block.foliage_transaction_block is not None
# Note: this might return bad data if there is a reorg in this time
all_removals: List[CoinRecord] = await self.full_node.coin_store.get_coins_removed_at_height(block.height)
if self.full_node.blockchain.height_to_hash(block.height) != request.header_hash:
raise ValueError(f"Block {block.header_hash} no longer in chain")
all_removals_dict: Dict[bytes32, Coin] = {}
for coin_record in all_removals:
all_removals_dict[coin_record.coin.name()] = coin_record.coin
coins_map: List[Tuple[bytes32, Optional[Coin]]] = []
proofs_map: List[Tuple[bytes32, bytes]] = []
# If there are no transactions, respond with empty lists
if block.transactions_generator is None:
proofs: Optional[List]
if request.coin_names is None:
proofs = None
else:
proofs = []
response = wallet_protocol.RespondRemovals(block.height, block.header_hash, [], proofs)
elif request.coin_names is None or len(request.coin_names) == 0:
for removed_name, removed_coin in all_removals_dict.items():
coins_map.append((removed_name, removed_coin))
response = wallet_protocol.RespondRemovals(block.height, block.header_hash, coins_map, None)
else:
assert block.transactions_generator
removal_merkle_set = MerkleSet()
for removed_name, removed_coin in all_removals_dict.items():
removal_merkle_set.add_already_hashed(removed_name)
assert removal_merkle_set.get_root() == block.foliage_transaction_block.removals_root
for coin_name in request.coin_names:
result, proof = removal_merkle_set.is_included_already_hashed(coin_name)
proofs_map.append((coin_name, proof))
if coin_name in all_removals_dict:
removed_coin = all_removals_dict[coin_name]
coins_map.append((coin_name, removed_coin))
assert result
else:
coins_map.append((coin_name, None))
assert not result
response = wallet_protocol.RespondRemovals(block.height, block.header_hash, coins_map, proofs_map)
msg = make_msg(ProtocolMessageTypes.respond_removals, response)
return msg
@api_request
async def send_transaction(self, request: wallet_protocol.SendTransaction) -> Optional[Message]:
spend_name = request.transaction.name()
status, error = await self.full_node.respond_transaction(request.transaction, spend_name)
error_name = error.name if error is not None else None
if status == MempoolInclusionStatus.SUCCESS:
response = wallet_protocol.TransactionAck(spend_name, uint8(status.value), error_name)
else:
# If if failed/pending, but it previously succeeded (in mempool), this is idempotence, return SUCCESS
if self.full_node.mempool_manager.get_spendbundle(spend_name) is not None:
response = wallet_protocol.TransactionAck(spend_name, uint8(MempoolInclusionStatus.SUCCESS.value), None)
else:
response = wallet_protocol.TransactionAck(spend_name, uint8(status.value), error_name)
msg = make_msg(ProtocolMessageTypes.transaction_ack, response)
return msg
@api_request
async def request_puzzle_solution(self, request: wallet_protocol.RequestPuzzleSolution) -> Optional[Message]:
coin_name = request.coin_name
height = request.height
coin_record = await self.full_node.coin_store.get_coin_record(coin_name)
reject = wallet_protocol.RejectPuzzleSolution(coin_name, height)
reject_msg = make_msg(ProtocolMessageTypes.reject_puzzle_solution, reject)
if coin_record is None or coin_record.spent_block_index != height:
return reject_msg
header_hash = self.full_node.blockchain.height_to_hash(height)
block: Optional[FullBlock] = await self.full_node.block_store.get_full_block(header_hash)
if block is None or block.transactions_generator is None:
return reject_msg
block_generator: Optional[BlockGenerator] = await self.full_node.blockchain.get_block_generator(block)
assert block_generator is not None
error, puzzle, solution = get_puzzle_and_solution_for_coin(
block_generator, coin_name, self.full_node.constants.MAX_BLOCK_COST_CLVM
)
if error is not None:
return reject_msg
pz = Program.to(puzzle)
sol = Program.to(solution)
wrapper = PuzzleSolutionResponse(coin_name, height, pz, sol)
response = wallet_protocol.RespondPuzzleSolution(wrapper)
response_msg = make_msg(ProtocolMessageTypes.respond_puzzle_solution, response)
return response_msg
@api_request
async def request_header_blocks(self, request: wallet_protocol.RequestHeaderBlocks) -> Optional[Message]:
if request.end_height < request.start_height or request.end_height - request.start_height > 32:
return None
header_hashes = []
for i in range(request.start_height, request.end_height + 1):
if not self.full_node.blockchain.contains_height(uint32(i)):
reject = RejectHeaderBlocks(request.start_height, request.end_height)
msg = make_msg(ProtocolMessageTypes.reject_header_blocks, reject)
return msg
header_hashes.append(self.full_node.blockchain.height_to_hash(uint32(i)))
blocks: List[FullBlock] = await self.full_node.block_store.get_blocks_by_hash(header_hashes)
header_blocks = []
for block in blocks:
added_coins_records = await self.full_node.coin_store.get_coins_added_at_height(block.height)
removed_coins_records = await self.full_node.coin_store.get_coins_removed_at_height(block.height)
added_coins = [record.coin for record in added_coins_records if not record.coinbase]
removal_names = [record.coin.name() for record in removed_coins_records]
header_block = get_block_header(block, added_coins, removal_names)
header_blocks.append(header_block)
msg = make_msg(
ProtocolMessageTypes.respond_header_blocks,
wallet_protocol.RespondHeaderBlocks(request.start_height, request.end_height, header_blocks),
)
return msg
@api_request
async def respond_compact_proof_of_time(self, request: timelord_protocol.RespondCompactProofOfTime):
if self.full_node.sync_store.get_sync_mode():
return None
await self.full_node.respond_compact_proof_of_time(request)
@execute_task
@peer_required
@api_request
async def new_compact_vdf(self, request: full_node_protocol.NewCompactVDF, peer: ws.WSChiaConnection):
if self.full_node.sync_store.get_sync_mode():
return None
# this semaphore will only allow a limited number of tasks call
# new_compact_vdf() at a time, since it can be expensive
async with self.full_node.compact_vdf_sem:
await self.full_node.new_compact_vdf(request, peer)
@peer_required
@api_request
async def request_compact_vdf(self, request: full_node_protocol.RequestCompactVDF, peer: ws.WSChiaConnection):
if self.full_node.sync_store.get_sync_mode():
return None
await self.full_node.request_compact_vdf(request, peer)
@peer_required
@api_request
async def respond_compact_vdf(self, request: full_node_protocol.RespondCompactVDF, peer: ws.WSChiaConnection):
if self.full_node.sync_store.get_sync_mode():
return None
await self.full_node.respond_compact_vdf(request, peer)
| snight1983/chia-rosechain | chia/full_node/full_node_api.py | full_node_api.py | py | 61,930 | python | en | code | 369 | github-code | 36 |
9556425028 | def gen(num):
while num > 0:
yield num
num -=1
return
if __name__ == "__main__":
# gen(): generator function
# g: generator object
g = gen(5)
first = next(g)
for i in g:
print(i) | viperyl/Notes | Book/Temp/example1.py | example1.py | py | 232 | python | en | code | 3 | github-code | 36 |
15215617350 | # -*- coding: utf-8 -*-
from get_data import getData
import numpy as np
import matplotlib.pyplot as plt
from lung_mask import getLungMask
from keras.models import Model
from keras.layers import Input, BatchNormalization, Activation, Dropout
from keras.layers.convolutional import Conv3D, Conv3DTranspose
from keras.layers.pooling import MaxPooling3D
from keras.layers.merge import concatenate
from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
from keras.optimizers import Adam
#import keras
import cv2
train_volumes, train_volumes_masks, _, val_volumes, val_volumes_masks, _, test_volumes, test_volumes_masks, _ = getData(type_ = "volume")
#%%
def run_segmentation_CNN():
train_volumes, train_volumes_masks, _, val_volumes, val_volumes_masks, _, test_volumes, test_volumes_masks, _ = getData(type_ = "volume")
train, test, val, trainMasks, testMasks, valMasks=prepare_CNN(train_volumes, train_volumes_masks, val_volumes, val_volumes_masks, test_volumes, test_volumes_masks)
results, accuracy, dice, jaccard, preds_test_nodules, accuracy_val, dice_val, jaccard_val, preds_val_nodules=train_model(train, test, val, trainMasks, testMasks, valMasks)
plot_loss(results)
print("Test set: The dice value is %.2f and the jaccard value is %.2f. The accuracy is %.2f" % (dice, jaccard, accuracy))
print("validation set: The dice value is %.2f and the jaccard value is %.2f. The accuracy is %.2f" % (dice_val, jaccard_val, accuracy_val))
#%%
"""
prepare_CNN
===============
prepares the input for the model of the CNN
Arguments:
Returns:train_volumes, test_volumes, val_volumes - images of train, test and validation sets after aplying lung mask, normalization and
reshaped for input on the CNN
train_volumes_masks, test_volumes_masks, val_volumes_masks - classes in the format of one-hot-vector (1,0,0)
"""
def prepare_CNN(train_volumes, train_volumes_masks, val_volumes, val_volumes_masks, test_volumes, test_volumes_masks):
mean_int=np.mean(train_volumes)
std_int=np.std(train_volumes)
train_volumes = (train_volumes - mean_int)/std_int
val_volumes = (val_volumes - mean_int)/std_int
test_volumes = (test_volumes - mean_int)/std_int
train=[]
test=[]
val=[]
train_mask=[]
val_mask=[]
#reshape to a multiple of 16 to better applye the U-net CNN - padding from 51 to 64
for train_volume, i in zip(train_volumes, range(len(train_volumes))):
train_volumes[i]= [cv2.copyMakeBorder(train_volume[i],7,6,6,7,cv2.BORDER_CONSTANT,value=0) for i in range(len(train_volume))]
#val_volume_mask= [cv2.copyMakeBorder(val_volume_mask[i],7,6,6,7,cv2.BORDER_CONSTANT,value=0) for i in range(len(val_volume_mask))]
train_volumes_masks = np.asarray(train_volumes_masks)
test_volumes_masks = np.asarray(test_volumes_masks)
val_volumes_masks = np.asarray(val_volumes_masks)
train_volumes = np.asarray(train_volumes)
test_volumes = np.asarray(test_volumes)
val_volumes = np.asarray(val_volumes)
train_volumes_masks = train_volumes_masks.astype('float32')
test_volumes_masks = test_volumes_masks.astype('float32')
val_volumes_masks = val_volumes_masks.astype('float32')
train_volumes = train_volumes.astype('float32')
test_volumes = test_volumes.astype('float32')
val_volumes = val_volumes.astype('float32')
train_volumes = train_volumes.reshape(-1,64,64,64,1)
test_volumes = test_volumes.reshape(-1,64,64,64,1)
val_volumes = val_volumes.reshape(-1, 64,64, 64,1)
train_volumes_masks = train_volumes_masks.reshape(-1,64,64,64,1)
val_volumes_masks = val_volumes_masks.reshape(-1, 64,64,64, 1)
return train_volumes, test_volumes, val_volumes, train_volumes_masks, test_volumes_masks, val_volumes_masks
#%%
def conv3d_block(input_tensor, n_filters, kernel_size=3, batchnorm=True):
# first layer
x = Conv3D(filters=n_filters, kernel_size=(kernel_size, kernel_size,kernel_size ), kernel_initializer="he_normal",
padding="same")(input_tensor)
if batchnorm:
x = BatchNormalization()(x)
x = Activation("relu")(x)
# second layer
x = Conv3D(filters=n_filters, kernel_size=(kernel_size, kernel_size,kernel_size), kernel_initializer="he_normal",
padding="same")(x)
if batchnorm:
x = BatchNormalization()(x)
x = Activation("relu")(x)
return x
def get_unet(input_img, n_filters=16, dropout=0.4, batchnorm=True):
# contracting path
c1 = conv3d_block(input_img, n_filters=n_filters*1, kernel_size=3, batchnorm=batchnorm)
p1 = MaxPooling3D((2, 2,2)) (c1)
p1 = Dropout(dropout*0.5)(p1)
c2 = conv3d_block(p1, n_filters=n_filters*2, kernel_size=3, batchnorm=batchnorm)
p2 = MaxPooling3D((2, 2,2)) (c2)
p2 = Dropout(dropout)(p2)
c3 = conv3d_block(p2, n_filters=n_filters*4, kernel_size=3, batchnorm=batchnorm)
p3 = MaxPooling3D((2, 2,2)) (c3)
p3 = Dropout(dropout)(p3)
c4 = conv3d_block(p3, n_filters=n_filters*8, kernel_size=3, batchnorm=batchnorm)
p4 = MaxPooling3D(pool_size=(2, 2,2)) (c4)
p4 = Dropout(dropout)(p4)
c5 = conv3d_block(p4, n_filters=n_filters*16, kernel_size=3, batchnorm=batchnorm)
# expansive path
u6 = Conv3DTranspose(n_filters*8, (3, 3,3), strides=(2, 2, 2), padding='same') (c5)
u6 = concatenate([u6, c4])
u6 = Dropout(dropout)(u6)
c6 = conv3d_block(u6, n_filters=n_filters*8, kernel_size=3, batchnorm=batchnorm)
u7 = Conv3DTranspose(n_filters*4, (3, 3, 3), strides=(2, 2, 2), padding='same') (c6)
u7 = concatenate([u7, c3])
u7 = Dropout(dropout)(u7)
c7 = conv3d_block(u7, n_filters=n_filters*4, kernel_size=3, batchnorm=batchnorm)
u8 = Conv3DTranspose(n_filters*2, (3, 3, 3), strides=(2, 2, 2), padding='same') (c7)
u8 = concatenate([u8, c2])
u8 = Dropout(dropout)(u8)
c8 = conv3d_block(u8, n_filters=n_filters*2, kernel_size=3, batchnorm=batchnorm)
u9 = Conv3DTranspose(n_filters*1, (3, 3, 3), strides=(2, 2,2), padding='same') (c8)
u9 = concatenate([u9, c1], axis=3)
u9 = Dropout(dropout)(u9)
c9 = conv3d_block(u9, n_filters=n_filters*1, kernel_size=3, batchnorm=batchnorm)
outputs = Conv3D(1, (1, 1, 1), activation='sigmoid') (c9)
model = Model(inputs=[input_img], outputs=[outputs])
return model
#%%
"""
IoU_loss
===============
defenition of loss for binary problem - try to maximize the jaccard coefficient ( as only true values matter)
it solves the problem of having more false (0) pixeis
Arguments:
Returns:
* results- coefiicient to minimize (1-jaccard)
"""
from keras import backend as K
def IoU_loss(y_true,y_pred):
smooth = 1e-12
# author = Vladimir Iglovikov
intersection = K.sum(y_true * y_pred)
sum_ = K.sum(y_true + y_pred)
jac = (intersection + smooth) / (sum_ - intersection + smooth)
return K.mean(1-jac)
#%%
"""
train_model
===============
train the model with tarin set and validation set to define treshold - evaluates test set
Arguments:
Returns:
* results- result of the trained model with keras
accuracy, dice, jaccard - evaluation scores for the test set
preds_test_nodules - predicted nodules on test set
"""
def train_model(train_volumes, test_volumes, val_volumes, train_volumes_masks, test_volumes_masks, val_volumes_masks):
# define parameters
im_width = 64
im_height = 64
epochs=100
batch=len(train_volumes)
input_img = Input((im_height, im_width, 1), name='img')
model = get_unet(input_img, n_filters=3, dropout=0.05, batchnorm=True)
model.compile(optimizer=Adam(), loss=IoU_loss)
#model.summary()
callbacks = [
EarlyStopping(patience=10, verbose=1),
ReduceLROnPlateau(factor=0.1, patience=3, min_lr=0.00001, verbose=1),
ModelCheckpoint('model3dsegmentation.h5', verbose=1, save_best_only=True, save_weights_only=True)
]
results = model.fit(train_volumes, train_volumes_masks, batch_size=batch,steps_per_epoch=10, epochs=epochs, callback=callbacks, verbose=0, validation_data=(val_volumes, val_volumes_masks))
model.load_weights('model3dsegmentation.h5')
treshold=(0.35,0.4, 0.45, 0.5,0.55,0.6,0.65,0.7,0.75)
maximo=0
# Predict for test with treshold
preds_train = model.predict(train_volumes, verbose=0)
for tresh in treshold:
preds_train_nodules = (preds_train >tresh).astype(np.uint8)
preds_train_nodules=preds_train_nodules.reshape(-1,64,64)
train_volumes_masks=train_volumes_masks.reshape(-1,64,64)
_, dice, jaccard = confusionMatrix(np.hstack(np.hstack(preds_train_nodules)), np.hstack(np.hstack(train_volumes_masks)))
metrics=dice+jaccard # the best result will dictate which is the bst treshold
if metrics > maximo :
maximo=metrics
best_treshold=tresh
# Predict for test with treshold already defined by training set
preds_val = model.predict(val_volumes, verbose=0)
preds_val_nodules = (preds_val >best_treshold).astype(np.uint8)
val_volumes_masks=val_volumes_masks.reshape(-1,64,64)
preds_val_nodules=preds_val_nodules.reshape(-1,64,64)
accuracy_val, dice_val, jaccard_val = confusionMatrix(np.hstack(np.hstack(preds_val_nodules)), np.hstack(np.hstack(val_volumes_masks)))
# Predict for test with treshold already defined by training set
preds_test = model.predict(test_volumes, verbose=0)
preds_test_nodules = (preds_test >best_treshold).astype(np.uint8)
preds_test_nodules=preds_test_nodules.reshape(-1,64,64)
#test_volumes_masks=test_volumes_masks.reshape(-1,51,51)
#cut the border previously used to match the ground truth
border_size_top_right=6
border_size_bottom_left=6
preds_test_nodules=[nodule[border_size_top_right:-(border_size_top_right+1),border_size_bottom_left:-(border_size_bottom_left+1)] for nodule in preds_test_nodules]
#Aplly morphologic operation to close some holes on predicted images
preds_test_nodules=closing(preds_test_nodules)
accuracy, dice, jaccard = confusionMatrix(np.hstack(np.hstack(preds_test_nodules)), np.hstack(np.hstack(test_volumes_masks)))
return results, accuracy, dice, jaccard, preds_test_nodules, accuracy_val, dice_val, jaccard_val, preds_val_nodules
#%%
"""
closing- morpological closing operation
=================================================
Arguments: image array
return: image array after closing
"""
def closing(preds_image):
new_preds=[]
for i in range(len(preds_image)):
kernel_ellipse = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5,5))
dilated_mask = cv2.dilate(preds_image[i],kernel_ellipse,iterations = 2)
erode_mask = cv2.erode(dilated_mask,kernel_ellipse,iterations = 2)
new_preds.append(erode_mask)
return new_preds
#%%
def confusionMatrix(predictions, labels):
true_positives = 0
false_negatives = 0
false_positives = 0
true_negatives = 0
predictions= predictions.astype('float32')
labels = labels.astype('float32')
for i in range(len(predictions)):
if predictions[i] == labels[i] :
if predictions[i] == 1.0:
true_positives += 1
elif predictions[i] == 0.0:
true_negatives += 1
elif predictions[i] != labels[i]:
if predictions[i] == 1.0:
false_positives += 1
elif predictions[i] == 0.0:
false_negatives += 1
accuracy = (true_positives + true_negatives)/(true_positives + true_negatives + false_positives + false_negatives)
dice = (2*true_positives/(false_positives+false_negatives+(2*true_positives)))
jaccard = (true_positives)/(true_positives+false_positives+false_negatives)
return accuracy, dice, jaccard
#%%
"""
show loss
===============
shows the progression of loss during the training of the model
Arguments: results - model trained
Returns:
*void
"""
def plot_loss(results):
plt.figure(figsize=(8, 8))
plt.title("Learning curve")
plt.plot(results.history["loss"], 'bo', label="loss")
plt.plot(results.history["val_loss"],'b', label="val_loss")
plt.xlabel("Epochs")
plt.ylabel("log_loss")
plt.legend();
#%%
run_segmentation_CNN()
| franciscapessanha/Pulmonary-nodules-analysis | CNN_segmentation_3D.py | CNN_segmentation_3D.py | py | 12,525 | python | en | code | 1 | github-code | 36 |
21203385181 | from pyramid_promosite.models import (
DBSession,
Page,
)
from pyramid.view import view_config
@view_config(route_name='admin',
renderer='admin/index.jinja2',
permission="authenticated")
def admin(request):
pages = DBSession.query(Page).filter(Page.orign_page_id == 0).\
order_by(Page.position).all()
return dict(pages=pages)
| uralbash/pyramid_promosite | pyramid_promosite/views/admin.py | admin.py | py | 386 | python | en | code | 12 | github-code | 36 |
38092864913 | import abc
import collections
import copyreg
import enum
import functools
import inspect
import logging
import operator
import random
import string
import typing
from .. import _exception, _struct
from . import series
if typing.TYPE_CHECKING:
from forml.io import dsl
LOGGER = logging.getLogger(__name__)
class Rows(typing.NamedTuple):
"""Row limit spec container.
Attention:
Instances are expected to be created internally via :meth:`dsl.Queryable.limit
<forml.io.dsl.Queryable.limit>`.
"""
count: int
"""Number of rows to return."""
offset: int = 0
"""Skip the given number of rows."""
def __repr__(self):
return f'{self.offset}:{self.count}'
class Source(tuple, metaclass=abc.ABCMeta):
"""Base class of the *tabular* data frame sources.
A *Source* is anything that can be used to obtain tabular data *FROM*. It is a logical
collection of :class:`dsl.Feature <forml.io.dsl.Feature>` instances represented by its
:attr:`schema`.
"""
class Schema(type):
"""Meta-class for schema types construction.
It guarantees consistent hashing and comparability for equality of the produced schema
classes.
Attention:
This meta-class is used internally, for schema frontend API see the :class:`dsl.Schema
<forml.io.dsl.Schema>`.
"""
def __new__(mcs, name: str, bases: tuple[type], namespace: dict[str, typing.Any]):
seen = set()
existing = collections.ChainMap(
*(
{f.name: k}
for b in bases
if isinstance(b, Source.Schema)
for c in reversed(inspect.getmro(b))
for k, f in c.__dict__.items()
if isinstance(f, _struct.Field) and k not in seen and not seen.add(k)
)
)
if existing and len(existing.maps) > len(existing):
raise _exception.GrammarError(f'Colliding base classes in schema {name}')
for key, field in namespace.items():
if not isinstance(field, _struct.Field):
continue
if not field.name:
namespace[key] = field = field.renamed(key) # to normalize so that hash/eq is consistent
if field.name in existing and existing[field.name] != key:
raise _exception.GrammarError(f'Colliding field name {field.name} in schema {name}')
existing[field.name] = key
cls = super().__new__(mcs, name, bases, namespace)
cls.__qualname__ = f'{name}.schema'
return cls
def __hash__(cls):
# pylint: disable=not-an-iterable
return functools.reduce(operator.xor, (hash(f) for f in cls), 0)
def __eq__(cls, other: 'dsl.Source.Schema'):
return (
isinstance(other, cls.__class__) and len(cls) == len(other) and all(c == o for c, o in zip(cls, other))
)
def __len__(cls):
return sum(1 for _ in cls) # pylint: disable=not-an-iterable
def __repr__(cls):
return f'{cls.__module__}:{cls.__qualname__}'
@functools.lru_cache
def __getitem__(cls, name: str) -> 'dsl.Field':
try:
item = getattr(cls, name)
except AttributeError:
for field in cls: # pylint: disable=not-an-iterable
if name == field.name:
return field
else:
if isinstance(item, _struct.Field):
return item
raise KeyError(f'Unknown field {name}')
def __iter__(cls) -> typing.Iterator['dsl.Field']:
return iter(
{
k: f
for c in reversed(inspect.getmro(cls))
for k, f in c.__dict__.items()
if isinstance(f, _struct.Field)
}.values()
)
copyreg.pickle(
Schema,
lambda s: (
Source.Schema,
(s.__name__, s.__bases__, {k: f for k, f in s.__dict__.items() if isinstance(f, _struct.Field)}),
),
)
class Visitor:
"""Source visitor."""
def visit_source(self, source: 'dsl.Source') -> None: # pylint: disable=unused-argument
"""Generic source hook.
Args:
source: Source instance to be visited.
"""
def visit_table(self, source: 'dsl.Table') -> None:
"""Table hook.
Args:
source: Source instance to be visited.
"""
self.visit_source(source)
def visit_reference(self, source: 'dsl.Reference') -> None:
"""Reference hook.
Args:
source: Instance to be visited.
"""
source.instance.accept(self)
self.visit_source(source)
def visit_join(self, source: 'dsl.Join') -> None:
"""Join hook.
Args:
source: Instance to be visited.
"""
source.left.accept(self)
source.right.accept(self)
self.visit_source(source)
def visit_set(self, source: 'dsl.Set') -> None:
"""Set hook.
Args:
source: Instance to be visited.
"""
source.left.accept(self)
source.right.accept(self)
self.visit_source(source)
def visit_query(self, source: 'dsl.Query') -> None:
"""Query hook.
Args:
source: Instance to be visited.
"""
source.source.accept(self)
self.visit_source(source)
def __new__(cls, *args):
return super().__new__(cls, args)
def __getnewargs__(self):
return tuple(self)
def __hash__(self):
return hash(self.__class__.__module__) ^ hash(self.__class__.__qualname__) ^ super().__hash__()
def __repr__(self):
return f'{self.__class__.__name__}({", ".join(repr(a) for a in self)})'
def __getattr__(self, name: str) -> 'dsl.Feature':
try:
return self[name]
except KeyError as err:
raise AttributeError(f'Invalid feature {name}') from err
@functools.lru_cache
def __getitem__(self, name: typing.Union[int, str]) -> typing.Any:
try:
return super().__getitem__(name)
except (TypeError, IndexError) as err:
name = self.schema[name].name
for field, feature in zip(self.schema, self.features):
if name == field.name:
return feature
raise RuntimeError(f'Inconsistent {name} lookup vs schema iteration') from err
@abc.abstractmethod
def accept(self, visitor: 'dsl.Source.Visitor') -> None:
"""Visitor acceptor.
Args:
visitor: Visitor instance.
"""
@functools.cached_property
def schema(self) -> 'dsl.Source.Schema':
"""Schema type representing this source.
Returns:
Schema type.
"""
return self.Schema(
self.__class__.__name__,
(_struct.Schema.schema,),
{(c.name or f'_{i}'): _struct.Field(c.kind, c.name) for i, c in enumerate(self.features)},
)
@functools.cached_property
@abc.abstractmethod
def features(self) -> typing.Sequence['dsl.Feature']:
"""List of features logically contained in or potentially produced by this Source.
Returns:
Sequence of contained features.
"""
@property
def query(self) -> 'dsl.Query':
"""Query equivalent of this Source.
Returns:
Query instance.
"""
return Query(self)
@property
def statement(self) -> 'dsl.Statement':
"""Statement equivalent of this Source.
Returns:
Statement instance.
"""
return self.query
@property
def instance(self) -> 'dsl.Source':
"""Return the source instance.
Apart from the ``Reference`` type is the Source itself.
Returns:
Source instance.
"""
return self
def reference(self, name: typing.Optional[str] = None) -> 'dsl.Reference':
"""Get an independent reference to this Source (e.g. for self-join conditions).
Args:
name: Optional alias to be used for this reference (random by default).
Returns:
New reference to this Source.
Examples:
>>> manager = staff.Employee.reference('manager')
>>> subs = (
... manager.join(staff.Employee, staff.Employee.manager == manager.id)
... .select(manager.name, function.Count(staff.Employee.id).alias('subs'))
... .groupby(manager.id)
... )
"""
return Reference(self, name)
def union(self, other: 'dsl.Source') -> 'dsl.Set':
"""Create a new Source as a set union of this and the other Source.
Args:
other: Source to union with.
Returns:
Set instance.
Examples:
>>> barbaz = (
... foo.Bar.select(foo.Bar.X, foo.Bar.Y)
... .union(foo.Baz.select(foo.Baz.X, foo.Baz.Y))
... )
"""
return Set(self, other, Set.Kind.UNION)
def intersection(self, other: 'dsl.Source') -> 'dsl.Set':
"""Create a new Source as a set intersection of this and the other Source.
Args:
other: Source to intersect with.
Returns:
Set instance.
Examples:
>>> barbaz = (
... foo.Bar.select(foo.Bar.X, foo.Bar.Y)
... .intersection(foo.Baz.select(foo.Baz.X, foo.Baz.Y))
... )
"""
return Set(self, other, Set.Kind.INTERSECTION)
def difference(self, other: 'dsl.Source') -> 'dsl.Set':
"""Create a new Source as a set difference of this and the other Source.
Args:
other: Source to difference with.
Returns:
Set instance.
Examples:
>>> barbaz = (
... foo.Bar.select(foo.Bar.X, foo.Bar.Y)
... .difference(foo.Baz.select(foo.Baz.X, foo.Baz.Y))
... )
"""
return Set(self, other, Set.Kind.DIFFERENCE)
class Statement(Source, metaclass=abc.ABCMeta):
"""Base class for complete statements.
Complete statements are:
* :class:`forml.io.dsl.Query`
* :class:`forml.io.dsl.Set`.
"""
class Set(Statement):
"""Source made of two set-combined sub-statements with the same schema.
Attention:
Instances are expected to be created internally via:
* :meth:`dsl.Source.union() <forml.io.dsl.Source.union>`
* :meth:`dsl.Source.intersection() <forml.io.dsl.Source.intersection>`
* :meth:`dsl.Source.difference() <forml.io.dsl.Source.difference>`
"""
@enum.unique
class Kind(enum.Enum):
"""Set type enum."""
UNION = 'union'
"""Union set operation type."""
INTERSECTION = 'intersection'
"""Intersection set operation type."""
DIFFERENCE = 'difference'
"""Difference set operation type."""
left: 'dsl.Statement' = property(operator.itemgetter(0))
"""Left side of the set operation."""
right: 'dsl.Statement' = property(operator.itemgetter(1))
"""Right side of the set operation."""
kind: 'dsl.Set.Kind' = property(operator.itemgetter(2))
"""Set operation enum type."""
def __new__(cls, left: 'dsl.Source', right: 'dsl.Source', kind: 'dsl.Set.Kind'):
if left.schema != right.schema:
raise _exception.GrammarError('Incompatible sources')
return super().__new__(cls, left.statement, right.statement, kind)
def __repr__(self):
return f'{repr(self.left)} {self.kind.value} {repr(self.right)}'
@property
def statement(self) -> 'dsl.Statement':
return self
@functools.cached_property
def features(self) -> typing.Sequence['dsl.Feature']:
return self.left.features + self.right.features
def accept(self, visitor: 'dsl.Source.Visitor') -> None:
visitor.visit_set(self)
class Queryable(Source, metaclass=abc.ABCMeta):
"""Base class for any *Source* that can be queried directly."""
def select(self, *features: 'dsl.Feature') -> 'dsl.Query':
"""Specify the output features to be provided (projection).
Repeated calls to ``.select`` replace the earlier selection.
Args:
features: Sequence of features.
Returns:
Query instance.
Examples:
>>> barxy = foo.Bar.select(foo.Bar.X, foo.Bar.Y)
"""
return self.query.select(*features)
def where(self, condition: 'dsl.Predicate') -> 'dsl.Query':
"""Add a row-filtering condition that's evaluated before any aggregations.
Repeated calls to ``.where`` combine all the conditions (logical AND).
Args:
condition: Boolean feature expression.
Returns:
Query instance.
Examples:
>>> barx10 = foo.Bar.where(foo.Bar.X == 10)
"""
return self.query.where(condition)
def having(self, condition: 'dsl.Predicate') -> 'dsl.Query':
"""Add a row-filtering condition that's applied to the evaluated aggregations.
Repeated calls to ``.having`` combine all the conditions (logical AND).
Args:
condition: Boolean feature expression.
Returns:
Query instance.
Examples:
>>> bargy10 = foo.Bar.groupby(foo.Bar.X).having(function.Count(foo.Bar.Y) == 10)
"""
return self.query.having(condition)
def groupby(self, *features: 'dsl.Operable') -> 'dsl.Query':
"""Aggregation grouping specifiers.
Repeated calls to ``.groupby`` replace the earlier grouping.
Args:
features: Sequence of aggregation features.
Returns:
Query instance.
Examples:
>>> bargbx = foo.Bar.groupby(foo.Bar.X).select(foo.Bar.X, function.Count(foo.Bar.Y))
"""
return self.query.groupby(*features)
def orderby(self, *terms: 'dsl.Ordering.Term') -> 'dsl.Query':
"""Ordering specifiers.
Default direction is *ascending*.
Repeated calls to ``.orderby`` replace the earlier ordering.
Args:
terms: Sequence of feature and direction tuples.
Returns:
Query instance.
Examples:
>>> barbyx = foo.Bar.orderby(foo.Bar.X)
>>> barbyxd = foo.Bar.orderby(foo.Bar.X, 'desc')
>>> barbxy = foo.Bar.orderby(foo.Bar.X, foo.Bar.Y)
>>> barbxdy = foo.Bar.orderby(
... foo.Bar.X, dsl.Ordering.Direction.DESCENDING, foo.Bar.Y, 'asc'
... )
>>> barbydxd = foo.Bar.orderby(
... (foo.Bar.X, 'desc'),
... (foo.Bar.Y, dsl.Ordering.Direction.DESCENDING),
... )
"""
return self.query.orderby(*terms)
def limit(self, count: int, offset: int = 0) -> 'dsl.Query':
"""Restrict the result rows by its max *count* with an optional *offset*.
Repeated calls to ``.limit`` replace the earlier restriction.
Args:
count: Number of rows to return.
offset: Skip the given number of rows.
Returns:
Query instance.
Examples:
>>> bar10 = foo.Bar.limit(10)
"""
return self.query.limit(count, offset)
class Origin(Queryable, metaclass=abc.ABCMeta):
"""Origin is a queryable Source with some handle.
Its features are represented using :class:`dsl.Element <forml.io.dsl.Element>`.
"""
@property
@abc.abstractmethod
def features(self) -> typing.Sequence['dsl.Element']:
"""Origin features are instances of ``dsl.Element``.
Returns:
Sequence of ``dsl.Element`` instances.
"""
def inner_join(self, other: 'dsl.Origin', condition: 'dsl.Predicate') -> 'dsl.Join':
"""Construct an *inner* join with the other *origin* using the provided *condition*.
Args:
other: Source to join with.
condition: Feature expression as the join condition.
Returns:
Join instance.
Examples:
>>> barbaz = foo.Bar.inner_join(foo.Baz, foo.Bar.baz == foo.Baz.id)
"""
return Join(self, other, Join.Kind.INNER, condition)
def left_join(self, other: 'dsl.Origin', condition: 'dsl.Predicate') -> 'dsl.Join':
"""Construct a *left* join with the other *origin* using the provided *condition*.
Args:
other: Source to join with.
condition: Feature expression as the join condition.
Returns:
Join instance.
Examples:
>>> barbaz = foo.Bar.left_join(foo.Baz, foo.Bar.baz == foo.Baz.id)
"""
return Join(self, other, Join.Kind.LEFT, condition)
def right_join(self, other: 'dsl.Origin', condition: 'dsl.Predicate') -> 'dsl.Join':
"""Construct a *right* join with the other *origin* using the provided *condition*.
Args:
other: Source to join with.
condition: Feature expression as the join condition.
Returns:
Join instance.
Examples:
>>> barbaz = foo.Bar.right_join(foo.Baz, foo.Bar.baz == foo.Baz.id)
"""
return Join(self, other, Join.Kind.RIGHT, condition)
def full_join(self, other: 'dsl.Origin', condition: 'dsl.Predicate') -> 'dsl.Join':
"""Construct a *full* join with the other *origin* using the provided *condition*.
Args:
other: Source to join with.
condition: Feature expression as the join condition.
Returns:
Join instance.
Examples:
>>> barbaz = foo.Bar.full_join(foo.Baz, foo.Bar.baz == foo.Baz.id)
"""
return Join(self, other, Join.Kind.FULL, condition)
def cross_join(self, other: 'dsl.Origin') -> 'dsl.Join':
"""Construct a *cross* join with the other *origin*.
Args:
other: Source to join with.
Returns:
Join instance.
Examples:
>>> barbaz = foo.Bar.cross_join(foo.Baz)
"""
return Join(self, other, kind=Join.Kind.CROSS)
class Join(Origin):
"""Source made of two join-combined sub-sources.
Attention:
Instances are expected to be created internally via:
* :meth:`dsl.Origin.inner_join() <forml.io.dsl.Origin.inner_join>`
* :meth:`dsl.Origin.left_join() <forml.io.dsl.Origin.left_join>`
* :meth:`dsl.Origin.right_join() <forml.io.dsl.Origin.right_join>`
* :meth:`dsl.Origin.full_join() <forml.io.dsl.Origin.full_join>`
* :meth:`dsl.Origin.cross_join() <forml.io.dsl.Origin.cross_join>`
"""
@enum.unique
class Kind(enum.Enum):
"""Join type enum."""
INNER = 'inner'
"""Inner join type (default if *condition* is provided)."""
LEFT = 'left'
"""Left outer join type."""
RIGHT = 'right'
"""Right outer join type."""
FULL = 'full'
"""Full join type."""
CROSS = 'cross'
"""Cross join type (default if *condition* is not provided)."""
def __repr__(self):
return f'<{self.value}-join>'
left: 'dsl.Origin' = property(operator.itemgetter(0))
"""Left side of the join operation."""
right: 'dsl.Origin' = property(operator.itemgetter(1))
"""Right side of the join operation."""
kind: 'dsl.Join.Kind' = property(operator.itemgetter(2))
"""Join type."""
condition: typing.Optional['dsl.Predicate'] = property(operator.itemgetter(3))
"""Join condition (invalid for *CROSS*-join)."""
def __new__(
cls,
left: 'dsl.Origin',
right: 'dsl.Origin',
kind: typing.Union['dsl.Join.Kind', str],
condition: typing.Optional['dsl.Predicate'] = None,
):
if (kind is cls.Kind.CROSS) ^ (condition is None):
raise _exception.GrammarError('Illegal use of condition and join type')
if condition is not None:
condition = series.Cumulative.ensure_notin(series.Predicate.ensure_is(condition))
if not series.Element.dissect(condition).issubset(series.Element.dissect(*left.features, *right.features)):
raise _exception.GrammarError(
f'({condition}) not a subset of source features ({left.features}, {right.features})'
)
return super().__new__(cls, left, right, kind, condition)
def __repr__(self):
return f'{repr(self.left)}{repr(self.kind)}{repr(self.right)}'
@functools.cached_property
def features(self) -> typing.Sequence['dsl.Element']:
return self.left.features + self.right.features
def accept(self, visitor: 'dsl.Source.Visitor') -> None:
visitor.visit_join(self)
class Reference(Origin):
"""Wrapper around any *Source* associating it with a (possibly random) name.
Attention:
Instances are expected to be created internally via :meth:`dsl.Source.reference
<forml.io.dsl.Source.reference>`.
"""
_NAMELEN: int = 8
instance: 'dsl.Source' = property(operator.itemgetter(0))
"""Wrapped *Source* instance."""
name: str = property(operator.itemgetter(1))
"""Reference name."""
def __new__(cls, instance: 'dsl.Source', name: typing.Optional[str] = None):
if not name:
name = ''.join(random.choice(string.ascii_lowercase) for _ in range(cls._NAMELEN))
return super().__new__(cls, instance.instance, name)
def __repr__(self):
return f'{self.name}=[{repr(self.instance)}]'
@functools.cached_property
def features(self) -> typing.Sequence['dsl.Element']:
return tuple(series.Element(self, c.name) for c in self.instance.features)
@property
def schema(self) -> 'dsl.Source.Schema':
return self.instance.schema
def accept(self, visitor: 'dsl.Source.Visitor') -> None:
"""Visitor acceptor.
Args:
visitor: Visitor instance.
"""
visitor.visit_reference(self)
class Table(Origin):
"""Table based *Source* with an explicit *schema*.
Attention:
The primary way of creating ``Table`` instances is by inheriting the :class:`dsl.Schema
<forml.io.dsl.Schema>` which is using this type as a meta-class.
"""
class Meta(abc.ABCMeta):
"""Metaclass for dynamic parent classes."""
copyreg.pickle(
Meta,
lambda c: (
Table.Meta,
(c.__name__, c.__bases__, {}),
),
)
@typing.overload
def __new__( # pylint: disable=bad-classmethod-argument
mcs,
name: str,
bases: tuple[type],
namespace: dict[str, typing.Any],
):
"""Meta-class mode constructor.
Args:
name: Table class name.
bases: Table base classes.
namespace: Class namespace container.
"""
@typing.overload
def __new__(cls, schema: 'dsl.Source.Schema'):
"""Standard class mode constructor.
Args:
schema: Table *schema* type.
"""
def __new__(mcs, schema, bases=None, namespace=None): # pylint: disable=bad-classmethod-argument
if isinstance(schema, str): # used as metaclass
if bases:
bases = tuple(b.schema for b in bases if isinstance(b, Table))
# strip the parent base class and namespace
mcs = mcs.Meta(schema, mcs.__bases__, {}) # pylint: disable=self-cls-assignment
elif not any(isinstance(a, _struct.Field) for a in namespace.values()):
# used as a base class definition - let's propagate the namespace
mcs = mcs.Meta(schema, (mcs,), namespace) # pylint: disable=self-cls-assignment
schema = mcs.Schema(schema, bases, namespace)
elif bases or namespace:
raise TypeError('Unexpected use of schema table')
return super().__new__(mcs, schema) # used as constructor
def __repr__(self):
return self.schema.__name__
@property
def schema(self) -> 'dsl.Source.Schema':
return self[0]
@functools.cached_property
def features(self) -> typing.Sequence['dsl.Column']:
return tuple(series.Column(self, f.name) for f in self.schema)
def accept(self, visitor: 'dsl.Source.Visitor') -> None:
visitor.visit_table(self)
class Query(Queryable, Statement):
"""Query based *Source*.
Container for holding all the parameters supplied via the :class:`dsl.Queryable
<forml.io.dsl.Queryable>` interface.
Attention:
Instances are expected to be created internally via the ``dsl.Queryable`` interface methods.
"""
source: 'dsl.Source' = property(operator.itemgetter(0))
"""Base *Source* to query *FROM*."""
selection: tuple['dsl.Feature'] = property(operator.itemgetter(1))
"""Result projection features."""
prefilter: typing.Optional['dsl.Predicate'] = property(operator.itemgetter(2))
"""Row-filtering condition to be applied before potential aggregations."""
grouping: tuple['dsl.Operable'] = property(operator.itemgetter(3))
"""Aggregation grouping specifiers."""
postfilter: typing.Optional['dsl.Predicate'] = property(operator.itemgetter(4))
"""Row-filtering condition to be applied after aggregations."""
ordering: tuple['dsl.Ordering'] = property(operator.itemgetter(5))
"""Ordering specifiers."""
rows: typing.Optional['dsl.Rows'] = property(operator.itemgetter(6))
"""Row restriction limit."""
def __new__(
cls,
source: 'dsl.Source',
selection: typing.Optional[typing.Iterable['dsl.Feature']] = None,
prefilter: typing.Optional['dsl.Predicate'] = None,
grouping: typing.Optional[typing.Iterable['dsl.Operable']] = None,
postfilter: typing.Optional['dsl.Predicate'] = None,
ordering: typing.Optional[typing.Sequence['dsl.Ordering.Term']] = None,
rows: typing.Optional['dsl.Rows'] = None,
):
def ensure_subset(*features: 'dsl.Feature') -> typing.Sequence['dsl.Feature']:
"""Ensure the provided features is a valid subset of the available Source features.
Args:
*features: List of features to validate.
Returns:
Original list of features if all valid.
"""
if not series.Element.dissect(*features).issubset(superset):
raise _exception.GrammarError(f'{features} not a subset of source features: {superset}')
return features
superset = series.Element.dissect(*source.features)
selection = tuple(ensure_subset(*(series.Feature.ensure_is(c) for c in selection or [])))
if prefilter is not None:
prefilter = series.Cumulative.ensure_notin(
series.Predicate.ensure_is(*ensure_subset(series.Operable.ensure_is(prefilter)))
)
if grouping:
grouping = ensure_subset(*(series.Cumulative.ensure_notin(series.Operable.ensure_is(g)) for g in grouping))
for aggregate in {c.operable for c in selection or source.features}.difference(grouping):
series.Aggregate.ensure_in(aggregate)
if postfilter is not None:
postfilter = series.Window.ensure_notin(
series.Predicate.ensure_is(*ensure_subset(series.Operable.ensure_is(postfilter)))
)
ordering = tuple(series.Ordering.make(*(ordering or [])))
ensure_subset(*(o.feature for o in ordering))
return super().__new__(cls, source, selection, prefilter, tuple(grouping or []), postfilter, ordering, rows)
def __repr__(self):
value = repr(self.source)
if self.selection:
value += f'[{", ".join(repr(c) for c in self.selection)}]'
if self.prefilter:
value += f'.where({repr(self.prefilter)})'
if self.grouping:
value += f'.groupby({", ".join(repr(c) for c in self.grouping)})'
if self.postfilter:
value += f'.having({repr(self.postfilter)})'
if self.ordering:
value += f'.orderby({", ".join(repr(c) for c in self.ordering)})'
if self.rows:
value += f'[{repr(self.rows)}]'
return value
@property
def query(self) -> 'dsl.Query':
return self
@functools.cached_property
def features(self) -> typing.Sequence['dsl.Feature']:
"""Get the list of features supplied by this query.
Returns:
A sequence of supplying features.
"""
return self.selection if self.selection else self.source.features
def accept(self, visitor: 'dsl.Source.Visitor') -> None:
visitor.visit_query(self)
def select(self, *features: 'dsl.Feature') -> 'dsl.Query':
return Query(self.source, features, self.prefilter, self.grouping, self.postfilter, self.ordering, self.rows)
def where(self, condition: 'dsl.Predicate') -> 'dsl.Query':
if self.prefilter is not None:
condition &= self.prefilter
return Query(self.source, self.selection, condition, self.grouping, self.postfilter, self.ordering, self.rows)
def having(self, condition: 'dsl.Predicate') -> 'dsl.Query':
if self.postfilter is not None:
condition &= self.postfilter
return Query(self.source, self.selection, self.prefilter, self.grouping, condition, self.ordering, self.rows)
def groupby(self, *features: 'dsl.Operable') -> 'dsl.Query':
return Query(self.source, self.selection, self.prefilter, features, self.postfilter, self.ordering, self.rows)
def orderby(self, *terms: 'dsl.Ordering.Term') -> 'dsl.Query':
return Query(self.source, self.selection, self.prefilter, self.grouping, self.postfilter, terms, self.rows)
def limit(self, count: int, offset: int = 0) -> 'dsl.Query':
return Query(
self.source,
self.selection,
self.prefilter,
self.grouping,
self.postfilter,
self.ordering,
Rows(count, offset),
)
| formlio/forml | forml/io/dsl/_struct/frame.py | frame.py | py | 30,727 | python | en | code | 103 | github-code | 36 |
10837351756 | def fun(s):
# return True if s is a valid email, else return False
username = s.split("@")[0] if "@" in s else ""
website = s.split("@")[1] if "@" in s else ""
extension = website.split(".")[1] if "." in website else ""
website = website.split(".")[0] if "." in website else ""
if not len(username) or not len(website) or \
not len(extension) or len(extension)>3:
return False
chars = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
digits = "0123456789"
username = username.translate(str.maketrans("","",chars+digits+"_-"))
website = website.translate(str.maketrans("","",chars+digits))
extension = extension.translate(str.maketrans("","",chars))
if len(username)+len(website)+len(extension) == 0:
return True
else:
return False
def filter_mail(emails):
return list(filter(fun, emails))
if __name__ == '__main__':
n = int(input())
emails = []
for _ in range(n):
emails.append(input())
filtered_emails = filter_mail(emails)
filtered_emails.sort()
print(filtered_emails)
| sidorkinandrew/python-coding | hackerrank/functionals_01_map_and_lambda.py | functionals_01_map_and_lambda.py | py | 1,092 | python | en | code | 0 | github-code | 36 |
27479122996 | import machine
import socket
import network
import uhashlib
import ubinascii
# Configurações do dispositivo
LED_PIN = 2
MANUFACTURER = "ESP32-SmartLock"
MODEL = "SSL01"
DEVICE_ID = "BE4ADCD4-E8FA-40B0-ACB5-2B2B25B5B9"
ESSID = "ESP32-SmartLock-06"
WIFI_PASS = "12345678"
DEFAULT_TOKEN = "ACB5"
TOKEN_FILE = "token.txt"
SECRET_KEY = "minhachave"
led = machine.Pin(LED_PIN, machine.Pin.OUT)
def hash_token(token):
h = uhashlib.sha256()
h.update(token + SECRET_KEY)
return ubinascii.hexlify(h.digest()).decode()
def load_token():
try:
with open(TOKEN_FILE, 'r') as f:
return f.read().strip()
except OSError:
return hash_token(DEFAULT_TOKEN)
def update_token(new_token):
hashed_token = hash_token(new_token)
with open(TOKEN_FILE, 'w') as f:
f.write(hashed_token)
def init_wifi():
ap = network.WLAN(network.AP_IF)
ap.active(True)
ap.config(essid=ESSID, password=WIFI_PASS)
print(f'Rede criada. Conecte-se a "{ESSID}" com a senha "{WIFI_PASS}".')
def get_connected_devices():
ap = network.WLAN(network.AP_IF)
return ap.status('stations')
def generate_connected_devices_page():
devices = get_connected_devices()
devices_list = "<br>".join([":".join(["{:02x}".format(b) for b in mac[0]]) for mac in devices])
html = f"""
<html>
<head>
<title>Connected Devices</title>
</head>
<body>
<h1>Connected Devices</h1>
<p>{devices_list}</p>
</body>
</html>
"""
return html
def handle_request(request):
stored_token = load_token()
if "POST /update_token" in request:
start_idx = request.find("new_token=") + len("new_token=")
end_idx = request.find("&", start_idx)
new_token = request[start_idx:end_idx]
update_token(new_token)
return "redirect", "Token updated successfully!"
if "POST /" in request:
start_idx = request.find("token=") + len("token=")
end_idx = request.find("&", start_idx)
provided_token = request[start_idx:end_idx]
if hash_token(provided_token) == stored_token:
if "led=on" in request:
led.value(0)
elif "led=off" in request:
led.value(1)
return "redirect", "LED command processed!"
else:
return "redirect", "Unauthorized request!"
if "GET /connected_devices" in request:
return "content", generate_connected_devices_page()
return "content", generate_html_page()
def generate_html_page():
device_info = f"""
Manufacturer: {MANUFACTURER}<br>
Model: {MODEL}<br>
Device ID: {DEVICE_ID}
"""
html = f"""
<html>
<head>
<title>{ESSID}</title>
<style>
button {{
font-size: 30px;
margin: 10px;
cursor: pointer;
width: 300px;
height: 70px;
}}
.green {{
background-color: green;
color: white;
}}
.red {{
background-color: red;
color: white;
}}
</style>
</head>
<body>
<h1>{ESSID}</h1><br>
<p><strong>Device Info:</strong></p>
<p>{device_info}</p><br>
<form action="/" method="post">
<input type="text" name="token" placeholder="Security token" required><br><br>
<button class="green" name="led" value="on">Ligar LED</button>
</form>
<form action="/" method="post">
<input type="text" name="token" placeholder="Security token" required><br><br>
<button class="red" name="led" value="off">Desligar LED</button>
</form>
<form action="/update_token" method="post">
<input type="text" name="new_token" placeholder="New token" required><br><br>
<button type="submit">Update Token</button>
</form>
<a href="/connected_devices">View connected devices</a>
</body>
</html>
"""
return html
def main():
init_wifi()
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('', 80))
s.listen(5)
while True:
conn, addr = s.accept()
request = conn.recv(1024).decode("utf-8")
action, response = handle_request(request)
if action == "redirect":
conn.send('HTTP/1.1 302 Found\r\nLocation: /\r\n\r\n')
else:
conn.send('HTTP/1.1 200 OK\r\nContent-Type: text/html\r\n\r\n')
conn.send(response)
conn.close()
if __name__ == "__main__":
main()
| elderofz1on/ZionArchive | Projetos/SmartLock/SmartLock06.py | SmartLock06.py | py | 4,817 | python | en | code | 0 | github-code | 36 |
39989734557 | #%%
import os
from transformers import pipeline, AutoTokenizer
def EvalModel(modelname, input_words, author_name=None, out_lines_number=None, temperature=None):
model = os.path.join("models", modelname)
tokenizer = AutoTokenizer.from_pretrained("GroNLP/gpt2-small-italian")
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
result = pipe(input_words)[0]['generated_text'].replace(" .", ".")
return result | AndPazzaglia/xEleonora2 | utils/EvalModel.py | EvalModel.py | py | 444 | python | en | code | 0 | github-code | 36 |
22219470819 | import random
def modular_exponential(a, d, n):
if(d == 0):
return 1
if(a == 0):
return 0
#Verify if d is even
if(d % 2 == 0):
x = modular_exponential(a, d / 2, n)
x = (x * x) % n
else:
x = modular_exponential(a, d - 1, n)
x = ((a % n) * (x % n)) % n
return (x + n) % n
def miller_rabin(n, d, r):
#Choose a random number a
a = random.randint(2, n - 2)
x = modular_exponential(a, d, n)
if(x == 1 or x == n - 1):
return True
for i in range(0, r):
x = modular_exponential(x, x, n)
if(x == 1):
return False
if(x == n - 1):
return True
return False
def fermat(n, k):
# Choose a random number a
a = random.randint(2, n - 2)
x = modular_exponential(a, n - 1, n)
for i in range(0, k):
if(x != 1):
return False
return True
def primality(n):
#Verify some basic tests
if(n <= 1):
return False
elif(n == 2 or n == 3):
return True
elif(n % 2 == 0):
return False
else:
# k is the number of iterating
k = 20
return fermat(n, k)
# # Obtain d odd by factoring in powers of 2
# d = n - 1
# r = 0
# while (d % 2 == 0):
# d = d / 2
# r = r + 1
#
# # k is the number of iterating
# k = 20
#
# for i in range(0, k):
#
# if(miller_rabin(n, d, r) == False):
# return False
#
# return True
def main():
#Read the number of tests
tests = int(input())
for i in range(0, tests):
#Read the number n to be verified
n = int(input())
#Verify if n is prime
prime = primality(n)
if(prime == True):
print('YES')
else:
print('NO')
if __name__ == '__main__':
main()
| Vitorka/maratona | PrimeorNot.py | PrimeorNot.py | py | 1,955 | python | en | code | 0 | github-code | 36 |
72387880423 | # -*- coding:utf-8 -*-
from scrapy import Spider
from scrapy.selector import Selector
from qhpage.items import QhpageItem
class QhpageSpider(Spider):
name = "qhpage"
allowed_domains = ["stackoverflow.com"]
start_urls = [
"http://stackoverflow.com/questions?pagesize=50&sort=newest",
]
def parse(self, response):
questions = Selector(response).xpath('//div[@class="summary"]')
for question in questions:
item = QhpageItem()
item['name'] = question.xpath('./h3/a[@class="question-hyperlink"]/text()').extract()[0]
item['url'] = question.xpath('./h3/a[@class="question-hyperlink"]/@href').extract()[0]
item['title'] = question.xpath('./div[@class="excerpt"]/text()').extract()[0]
yield item
| yangaoquan/qhpage | qhpage/spiders/qhspider.py | qhspider.py | py | 796 | python | en | code | 2 | github-code | 36 |
38790072566 | """Module for Bresenham kernel"""
import numpy as np
from copa_map.util.occ_grid import OccGrid
import cv2
class KernelGrid(OccGrid):
"""Class for creating an occupation map with widened walls"""
def __init__(self, base_occ_map: OccGrid, digitize_size=0.2, num_of_borders=2):
"""
Constructor
Args:
base_occ_map: Occupancy grid map to use as basis of the kernel. The Kernel grid will have the same
dimension and origin as the map
digitize_size: Discretization size for grid bins
num_of_borders: Number of cells around occupied cells, from which covariance factor increases linearly
from 0 to 1
"""
# We do not need the full map resolution, so we resize the image based on the given parameter
assert digitize_size >= base_occ_map.resolution,\
"Kernel map discretization should be larger than Occupancy grid map resolution"
# Rescale the occupancy map
new_img_size = (np.array(base_occ_map.img.shape) * base_occ_map.resolution / digitize_size).astype(int)
new_img = cv2.resize(base_occ_map.img, dsize=(new_img_size[1], new_img_size[0]),
interpolation=cv2.INTER_NEAREST_EXACT)
super(KernelGrid, self).__init__(img=new_img,
width=base_occ_map.width,
height=base_occ_map.height,
resolution=digitize_size,
origin=base_occ_map.orig,
rotation=base_occ_map.rotation,
)
self.digitize_size = digitize_size
self.num_of_borders = num_of_borders
self._create_map()
def _create_map(self):
"""
Creates a grid array characterizing walls and cells near walls
Reads the map and creates cells with the defined digitize_size, where walls are classified with 0
and free cells with 1. The values of surrounding cells increase linearly to 1 depending on the
number of neighboring cells num_of_borders
"""
# Create kernel for dilation. Every pixels 8-neighbors should be extended
kernel = np.ones((3, 3), np.uint8)
# Get factor between extension border which determines the occupancy
# Interpolates linearly so that every border increases occupancy by same amount
increment = 1 / (self.num_of_borders + 1)
adj_img = dil_img = self.img
# Extend the wall pixels by dilating the image, then multiplying with the respective factor for occupancy
# reduction
for i in np.arange(0, 1, increment):
if i == 0:
continue
# Dilate the image from last iteration by one more border
# Our map has zeros where we want to extend, so we need to use the inverse
dil_img = cv2.dilate(~dil_img, kernel)
dil_img = ~dil_img
# Change the pixels of the new border, where the old image was still white (255) and the new
# is now black (0)
adj_img[np.logical_and(dil_img == 0, adj_img == 255)] = i * 255
self.img = adj_img
self.map = np.flipud(adj_img.astype(float) / 255)
| MarvinStuede/copa-map | src/copa_map/kernel/kernel_grid.py | kernel_grid.py | py | 3,359 | python | en | code | 0 | github-code | 36 |
21290035402 | import sqlalchemy as sqla
from sqlalchemy import create_engine, MetaData, Table, Column, Integer, String, sql
import psycopg2
import re
import os
import matplotlib.pyplot as plt
import matplotlib.patches as patch
from matplotlib.patches import Patch
import matplotlib.lines as mlines
import numpy as np
import math
def schanger(x, s1, s2):
if (x in s1):
return s2
return x
#Parsowanie inserta/update'a/deleta
def parser(table, opera, ins=None, wher=None):
exc=""
to_kill=[]
if (ins!=None):
for x, y in ins.items():
if (len(str(y))==0):
to_kill.append(x)
for x in to_kill:
ins.pop(x, None)
if (wher!=None):
to_kill=[]
for x, y in wher.items():
if (str(x)=="hero_name" or (table=="castle_on_map" and str(x)=='color') or str(x)=='estimated_power'):
to_kill.append(x)
for x in to_kill:
wher.pop(x, None)
if (opera=='insert'):
ins1str=str([x for x in ins.keys()])[1:-1]
ins2str=str([x for x in ins.values()])[1:-1]
ins1str="".join([schanger(x, "'\"", "") for x in ins1str])
ins2str="".join([schanger(x, '"', "'") for x in ins2str])
exc=opera+" into "+table+" ("+ins1str+")"+" values ("+ins2str+")"
elif (opera=='update'):
exc=opera+" "+table+" set "
c=len(ins.items())
for i, a in enumerate(ins.items()):
x, y=a
if (str(y)==y):
exc=exc+x+"='"+y+"'"
else:
exc=exc+x+"="+str(y)
if (c-1!=i):
exc=exc+","
exc=exc+" "
elif (opera=='delete'):
exc=opera+" from "+table+" "
if (opera=="update" or opera=="delete"):
c=len(wher.items())
if (wher!=None and len(wher)>0):
exc=exc+"where "
for i, a in enumerate(wher.items()):
x, y=a
if (str(y)==y):
exc=exc+x+"='"+y+"'"
else:
exc=exc+x+"="+str(y)
if (c-1!=i):
exc=exc+" and "
return exc+";"
#Zamiana dicta z niepoprawnymi nazwami na sensownego dicta
def dictvisioner(dct, alter=1):
dct=dict(dct)
ancient_dict={}
dct.pop('which', None)
if (alter==1):
for x in dct.keys():
sv=x[re.search('-', x).span()[1]:]
for v, y in zip(re.split('-', sv), re.split('-', dct[x])):
ancient_dict[v]=y
else:
return dct
return ancient_dict
#Wbijacz zapytań
def interactor(engine, query, tp=None, arg=[]):
_Dict_prime={
'player_pkey':"Player already inserted!",
'pk_castle_on_map':"Castle already exists on that point!",
'pk_build_in_castle_on_map':"This building is already created in that castle!",
'pk_army_connect':"This army already has some unit on that position!",
'hero_pkey':"This hero name is already used!",
'pk_point':"This point already exists!",
}
_Dict_foreign={
'fk_playh':"Player doesn't exist!",
'fk_armyxy':"You tried to place army on non-existent point of map!",
'fk_tohero':"This hero doesn't exist!",
'fk_toarmy':"This army doesn't exist!",
'fk_armycon':"This army doesn't exist!",
'fk_unit_from_army':"This unit doesn't exist",
'fk_castle_map_point':"You tried to place castle on non-existent point of map!" ,
'fk_castle_merge':"You tried to create castle of non-existent type!",
'fk_to_player':"Player does not exist!",
'fk_castle_build_map':"This type of building doesn't exist for that castle!",
'fk_xy_place':"You tried to attach building on non-existent point on map!",
}
e=1
comm=""
connection = engine.raw_connection()
cursor = connection.cursor()
try:
if (tp==None):
cursor.execute(query)
elif (tp=='proc'):
cursor.callproc(*arg)
elif (tp=='procp'):
if (len(arg[1])>0):
pv=str(*arg[1])
cursor.execute(f"do $$ begin call {arg[0]}('{pv}'); end $$;")
else:
cursor.execute(f"do $$ begin call {arg[0]}(); end $$;")
cursor.close()
connection.commit()
except BaseException as ex:
comm=ex.args[0][:re.search('\n', ex.args[0]).span()[0]]
print(comm)
if (re.search('too long for type character', comm)):
comm="You cannot use names longer than 50 characters!"
if (re.search('unique', comm)):
for x in _Dict_prime.keys():
if (re.search(x, comm)):
comm=_Dict_prime[x]
break
elif (re.search("violates foreign key", comm)):
for x in _Dict_foreign.keys():
if (re.search(x, comm)):
comm=_Dict_foreign[x]
break
elif (re.search("violates not-null constraint", comm)):
if (re.search("id_army", comm)):
comm="You must fill army id field!!"
if (re.search("color", comm)):
comm="You cannot create a hero without player!"
if (re.search('"x"', comm) or re.search('"y"', comm)):
comm="You cannot leave empty map coordinates!"
if (re.search("castle", comm)):
comm="You need to provide a castle type!"
if (re.search("unit_name", comm)):
comm="You need to provide unit name!"
else:
e=0
finally:
connection.close()
return(e, comm)
#Selekcja danych
def selector(engine, table, order=None, col=None):
g=engine.execute("SELECT * FROM information_schema.columns WHERE table_name = '"+table+"'")
cols=[]
for x in g:
cols.append(x[3])
cols=cols[::-1]
if (order==None and col==None):
f=engine.execute(f'select {", ".join(cols)} from '+table)
elif (order!=None):
f=engine.execute(f'select {", ".join(cols)} from '+table+' order by '+col+' '+order)
res=[]
for x in f:
res.append(x)
#Dodanie generowanej funkcji
if (table=='player'):
cols.append("estimated_power")
lst=[]
for x in res:
wn=engine.execute(f"select firepower('{x[0]}')")
for y in wn:
y=y[0]
if (len(y)>10):
y=[int(z) for z in y[1:-1].split(',')]
wnn=math.log((y[0]+y[1])/2*math.sqrt(y[4])+y[5]/100+(y[2]+y[3])/2)
else:
wnn=0
#print(x[0], wn)
lst.append([*x, wnn])
return (lst, cols)
return(res, cols)
#twórca tablicy z zapytania selecta(lst) i nazwy tabeli(table)
def selhtmler(table, lst):
sv=[]
#Button do th
bth="""
<div class="buth">
<button class="sbtnd">
v
</button>
<button class="sbtnu">
v
</button>
</div>"""
sv.append("<div class=\"wrapped\"><table id="+table+"><thead><tr>")
for x in lst[1]:
sv.append("<th>"+x+bth+"</th>")
sv.append("</tr></thead>")
sv.append("<tbody>")
for x in lst[0]:
sv.append("<tr>")
for y in x:
sv.append(f"<td>{y}</td>")
sv.append("</tr>")
sv.append("</tbody>")
sv.append("</table>")
sv.append("</div>")
return ''.join(sv)
#Wyrysowanie 2 grup dla 1 koloru - armii i zamków
def doubleprinter(ax, l1, l2, coll):
if (len(l1)>0):
ax.scatter(l1[0], l1[1], color=coll, s=100, marker='P')
if (len(l2)>0):
ax.scatter(l2[0], l2[1], color=coll, s=100)
#Colorland to zbiór kolorów dla konkretnych graczy
colorland={}
#Twórca mapy dla jakiejś osi
def map_maker(engine, ax):
#Poszukiwanie rzeczy w DB
csel=engine.execute('select x, y, color from castle_on_map')
csel2=engine.execute('select a.x, a.y, h.color from army a left join hero h on a.hero_name=h.name')
xm=engine.execute('select max(x), max(y) from point_on_map')
conn=1
#Ustalanie wymiaru mapy
for k in xm:
xmax, ymax=k[0], k[1]
ax.set_xlim(0, conn*xmax)
ax.set_ylim(0, ymax)
#Zamek, armie - dicty z 2 listami i nazwami graczy jako klucze, wypełnianie punktami
hlegs=[]
castel={}
here={}
for w in csel:
try:
castel[w[2]].append((w[0], w[1]))
except:
castel[w[2]]=[(w[0], w[1])]
for w in csel2:
try:
here[w[2]].append((w[0], w[1]))
except:
here[w[2]]=[(w[0], w[1])]
if (not w[2] in castel):
castel[w[2]]=[]
for x, y in castel.items():
#Poszukiwanie x-koloru, y-zamka, z-armia
if (not x in here.keys()):
here[x]=[]
z=here[x]
lst=list(zip(*y))
lst2=list(zip(*z))
try:
doubleprinter(ax, lst, lst2, colorland[x])
except:
if (x==None):
clr='Grey'
else:
clr=str(x).lower()
try:
doubleprinter(ax, lst, lst2, clr)
vs=clr
except:
vs=np.random.uniform(0, 1, 3)
doubleprinter(ax, lst, lst2, vs)
#Definiowanie nowego koloru dla usera w przypadku jego nieistnienia
colorland[x]=vs
finally:
hlegs.append(Patch(facecolor=colorland[x], alpha=1.0, label=f"Player: {x}"))
hlegs.append(mlines.Line2D([], [], color='#FFFFFF', marker='P', markerfacecolor='#000000', markersize=15, label='Castle'))
hlegs.append(mlines.Line2D([], [], color='#FFFFFF', marker='o', markerfacecolor='#000000', markersize=15, label='Hero'))
ax.legend(handles=hlegs, loc=1, facecolor='#FFFFFF', shadow=1.0, prop={'size': 12})
ax.fill_between([xmax, conn*xmax], [0, 0], [ymax, ymax], color='#000000')
ax.set_xticks(ax.get_xticks()[ax.get_xticks()<=xmax])
#Funkcja tworząca/updatująca mapę
def inserto_creato_mapo(engine, arg='n'):
fig, ax=plt.subplots(1, 1, figsize=(24, 18))
if (arg=='n'):
map_maker(engine, ax)
dirname = os.path.dirname(__file__)
filename = os.path.join(dirname, 'arda.png')
plt.savefig(filename, bbox_inches='tight')
plt.close()
| JonothorDarry/PostgreProject | flaskk/alchlib.py | alchlib.py | py | 10,498 | python | en | code | 0 | github-code | 36 |
41810977534 | # Reid Nguyen | Number Game | 02/01/2023
import random
randNum = random.randint (1,100)
print (randNum)
atemmpt = 0
play = True
guess = input("Welcome to Number Guesser, Try to Guess a Number Between 1 and 100 in as Few Tries as Posible: ")
inLoop = True
while guess != randNum:
if (str(guess).isdigit() == False):
print("It Must Be a Number")
elif int(guess)<1:
print("The Number Must Be Between 1 and 100")
elif int(guess)>100:
print("The Number Must Be Between 1 and 100")
while inLoop == True:
if int(guess) > randNum:
print(guess + " Is Too High")
atemmpt += 1
guess = input("New Guess:")
elif int(guess) < randNum:
print(guess + " Is Too Low")
guess = input ("New Guess:")
atemmpt += 1
elif int(guess) == randNum:
inLoop = False
print ("You Guess the Number in " + str(atemmpt) + " tries")
atemmpt += 1
if input("Want to Play Again? (y/n): ") == "n":
play = False
print("Thanks For Playing")
else:
randNum = random.randint (1,100)
atemmpt = 0
inLoop = True
guess = input("Welcome to Number Guesser, Try to Guess a Number Between 1 and 100 in as Few Tries as Posible: ")
| Reid-Dzung/A-3-Programming-Portfolio | src/NumberGame.py | NumberGame.py | py | 1,224 | python | en | code | 0 | github-code | 36 |
31773538373 | import pygame
pygame.init()
tela = pygame.display.set_mode((800, 600))
imgNave = pygame.image.load("Spacepack/Rocket.png")
imgNave = pygame.transform.scale(imgNave, (200,100))
imgUFO = pygame.image.load("Spacepack/UFOBoss.png")
imgUFO = pygame.transform.scale(imgUFO, (200,200))
rect_nave = imgNave.get_rect()
rect_ufo = imgUFO.get_rect()
posicao = (400, 300)
rect_ufo = rect_ufo.move(posicao)
clock = pygame.time.Clock()
velocidadeNave = 7
velocidadeUFO = 5
while True:
rect_ufo.move_ip(velocidadeUFO, 0)
if rect_ufo.right > 800 or rect_ufo.left < 0:
velocidadeUFO *= -1
imgUFO = pygame.transform.flip(imgUFO, True, False)
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
exit()
tecla = pygame.key.get_pressed()
if tecla[pygame.K_d]:
rect_nave.move_ip(velocidadeNave, 0)
if tecla[pygame.K_a]:
rect_nave.move_ip(-velocidadeNave, 0)
if tecla[pygame.K_w]:
rect_nave.move_ip(0, -velocidadeNave)
if tecla[pygame.K_s]:
rect_nave.move_ip(0, velocidadeNave)
if rect_nave.colliderect(rect_ufo):
tela.fill((255,0,0))
fonte = pygame.font.SysFont("arial", 48)
txtGameOver = fonte.render("GAME OVER!", True, (255,255,255))
tela.blit(txtGameOver,(400, 300))
pygame.display.update()
pygame.time.delay(2000)
pygame.quit()
exit()
tela.fill((0,0,0))
tela.blit(imgNave, rect_nave)
tela.blit(imgUFO, rect_ufo)
pygame.display.update()
clock.tick(60) | rafaelleal/extensaoPythonPygame | script3.py | script3.py | py | 1,453 | python | en | code | 0 | github-code | 36 |
27697116 | from enum import Enum
from typing import List
from datetime import datetime
from typing import Optional
class FileType(Enum):
PDF = "pdf"
LINK = "link"
DIRECTORY = "directory"
GITHUB = "github"
GENERIC = "generic"
class File:
def __init__(
self,
id: str,
name: str,
type: FileType,
parent_id: str,
path: str,
created_at: datetime,
updated_at: datetime,
tags: List[str],
processed: bool,
summary: Optional[str] = None,
index_id: Optional[str] = None,
):
self.id = id
self.name = name
self.type = type
self.parent_id = parent_id
self.path = path
self.created_at = created_at
self.updated_at = updated_at
self.tags = tags
self.processed = processed
self.summary = summary
self.index_id = index_id
@staticmethod
def from_dict_factory(data: dict):
file_type = FileType(data.get("type"))
if file_type == FileType.DIRECTORY:
return Directory.from_dict(data)
elif file_type == FileType.PDF:
return PdfFile.from_dict(data)
elif file_type == FileType.LINK:
return LinkFile.from_dict(data)
elif file_type == FileType.GITHUB:
return GithubFile.from_dict(data)
else:
return File.from_dict(data)
def to_dict(self) -> dict:
return {
"id": self.id,
"name": self.name,
"type": self.type.value,
"parent_id": self.parent_id,
"path": self.path,
"created_at": self.created_at.isoformat(),
"updated_at": self.updated_at.isoformat(),
"tags": self.tags,
"processed": self.processed,
"index_id": self.index_id,
"summary": self.summary,
}
@classmethod
def from_dict(cls, data: dict):
return cls(
id=data["id"],
name=data["name"],
type=FileType(data["type"]),
parent_id=data["parent_id"],
path=data["path"],
created_at=datetime.fromisoformat(data["created_at"]),
updated_at=datetime.fromisoformat(data["updated_at"]),
tags=data["tags"],
processed=data["processed"],
index_id=data.get("index_id"),
summary=data.get("summary"),
)
class PdfFile(File):
def __init__(self, fs_id: Optional[str] = None, **kwargs):
super().__init__(type=FileType.PDF, **kwargs)
self.fs_id = fs_id
def to_dict(self) -> dict:
result = super().to_dict()
result["fs_id"] = self.fs_id
return result
@classmethod
def from_dict(cls, data: dict):
return cls(
fs_id=data.get("fs_id"), # .get() is used here in case fs_id is not present
id=data["id"],
name=data["name"],
parent_id=data["parent_id"],
path=data["path"],
created_at=datetime.fromisoformat(data["created_at"]),
updated_at=datetime.fromisoformat(data["updated_at"]),
tags=data["tags"],
processed=data["processed"],
index_id=data.get("index_id"),
summary=data.get("summary"),
)
class LinkFile(File):
def __init__(self, url: str, **kwargs):
super().__init__(type=FileType.LINK, **kwargs)
self.url = url
def to_dict(self) -> dict:
result = super().to_dict()
result["url"] = self.url
return result
@classmethod
def from_dict(cls, data: dict):
return cls(
url=data["url"],
id=data["id"],
name=data["name"],
parent_id=data["parent_id"],
path=data["path"],
created_at=datetime.fromisoformat(data["created_at"]),
updated_at=datetime.fromisoformat(data["updated_at"]),
tags=data["tags"],
processed=data["processed"],
index_id=data.get("index_id"),
summary=data.get("summary"),
)
class GithubFile(File):
def __init__(self, url: str, **kwargs):
super().__init__(type=FileType.GITHUB, **kwargs)
self.url = url
def to_dict(self) -> dict:
result = super().to_dict()
result["url"] = self.url
return result
@classmethod
def from_dict(cls, data: dict):
return cls(
url=data["url"],
id=data["id"],
name=data["name"],
parent_id=data["parent_id"],
path=data["path"],
created_at=datetime.fromisoformat(data["created_at"]),
updated_at=datetime.fromisoformat(data["updated_at"]),
tags=data["tags"],
processed=data["processed"],
index_id=data.get("index_id"),
summary=data.get("summary"),
)
class Directory(File):
def __init__(self, **kwargs):
super().__init__(**kwargs)
| Kitenite/llm-kb | server/src/datasource/file_system.py | file_system.py | py | 5,030 | python | en | code | 31 | github-code | 36 |
14076267942 | DATA_FILE = './data/day21.txt'
def get_allergens(line):
return line[line.index('contains') + 8:line.index(')')].replace(' ', '').split(',')
def get_foods(line):
return set((line[:line.index(' (')].split(' ')))
def get_allergen_map(lines):
allergen_map = {}
for line in lines:
allergens = get_allergens(line)
foods = get_foods(line)
for allergen in allergens:
if allergen in allergen_map.keys():
current_foods = allergen_map[allergen]
allergen_map[allergen] = current_foods.intersection(foods)
else:
allergen_map[allergen] = foods
return allergen_map
def get_foods_with_allergies(allergen_map):
return set.union(*allergen_map.values())
def get_all_foods(lines):
all_foods = {}
for line in lines:
foods = get_foods(line)
for food in foods:
if food in all_foods.keys():
appearances = all_foods[food] + 1
all_foods[food] = appearances
else:
all_foods[food] = 1
return all_foods
def get_appearances(foods, appearance_map):
total = 0
for food in foods:
total += appearance_map[food]
return total
def single_value(all_values):
for values in all_values:
if len(values) != 1:
return False
return True
def get_dangerous_foods(allergen_map):
allergens = allergen_map
while not single_value(allergens.values()):
remove_singletons_from_other_allergens(allergens)
return allergens
def remove_singletons_from_other_allergens(allergens):
for allergen in allergens.keys():
foods = allergens[allergen]
if len(foods) == 1:
food = foods.pop()
foods.add(food)
for other_allergy in allergens.keys():
dangerous = allergens[other_allergy]
if allergen != other_allergy and food in dangerous:
dangerous.remove(food)
allergens[other_allergy] = dangerous
def get_sorted_foods_by_allergen(foods):
return ','.join([foods[allergen].pop() for allergen in (sorted(foods.keys()))])
with open(DATA_FILE) as file:
lines = file.readlines()
all_foods = get_all_foods(lines)
allergen_map = get_allergen_map(lines)
foods_with_allergies = get_foods_with_allergies(allergen_map)
foods_without_allergies = all_foods.keys() - foods_with_allergies
appearances = get_appearances(foods_without_allergies, all_foods)
print(f'Number of appearances of food without allergies: {appearances}')
dangerous_foods = get_dangerous_foods(allergen_map)
sorted_dangerous_foods = get_sorted_foods_by_allergen(dangerous_foods)
print(f'sorted_dangerous_foods: {sorted_dangerous_foods}')
| rnewstead1/advent-2020 | day21.py | day21.py | py | 2,810 | python | en | code | 0 | github-code | 36 |
6937535017 | # -*- coding: utf-8 -*-
from __future__ import division, print_function
__all__ = ["RotationModel"]
import numpy as np
from scipy.optimize import minimize
from scipy.linalg import cho_factor, cho_solve
import celerite
from celerite import modeling
from .pld import PLDModel
from .gp import get_simple_gp, get_rotation_gp
from .estimator import lomb_scargle_estimator, autocorr_estimator
class RotationModel(modeling.ModelSet):
def __init__(self, t, F, yerr, min_period=0.1, max_period=40.0,
lomb_scargle_kwargs=None, autocorr_kwargs=None,
**pld_kwargs):
self.t = np.array(t)
self.F = np.array(F)
self.fsap = np.sum(F, axis=1)
self.yerr = yerr
A = self.F / self.fsap[:, None]
self.min_period = min_period
self.max_period = max_period
# Run 1st order PLD
w = np.linalg.solve(np.dot(A.T, A), np.dot(A.T, self.fsap-1.0))
self.fdet = self.fsap - np.dot(A, w)
self.update_estimators(lomb_scargle_kwargs, autocorr_kwargs)
# Set up the PLD model
pld = PLDModel(self.t, self.F / self.fsap[:, None], **pld_kwargs)
# Set up the GP model:
self.simple_gp = get_simple_gp(self.t, self.fsap, yerr)
self.rotation_gp = get_rotation_gp(self.t, self.fsap, yerr,
self.lomb_scargle_period,
min_period, max_period)
super(RotationModel, self).__init__([("gp", self.simple_gp),
("pld", pld)])
# Save the default parameters
self.default_pld_vector = \
pld.get_parameter_vector(include_frozen=True)
self.default_simple_vector = \
self.simple_gp.get_parameter_vector(include_frozen=True)
self.default_rotation_vector = \
self.rotation_gp.get_parameter_vector(include_frozen=True)
# Set up an optimization cache
self.model_cache = []
def update_estimators(self, lomb_scargle_kwargs=None,
autocorr_kwargs=None):
# Esimate the periods
if lomb_scargle_kwargs is None:
lomb_scargle_kwargs = dict(filter_period=10.0)
self.lomb_scargle_result = \
lomb_scargle_estimator(self.t, self.fdet, self.yerr,
self.min_period, self.max_period,
**lomb_scargle_kwargs)
peaks = self.lomb_scargle_result["peaks"]
if len(peaks):
self.lomb_scargle_period = peaks[0]["period"]
else:
self.lomb_scargle_period = self.max_period
if autocorr_kwargs is None:
autocorr_kwargs = {}
self.autocorr_result = \
autocorr_estimator(self.t, self.fdet, self.yerr,
self.min_period, self.max_period,
**autocorr_kwargs)
peaks = self.autocorr_result["peaks"]
if len(peaks):
self.autocorr_period = peaks[0]["period"]
else:
self.autocorr_period = self.max_period
def use_simple_gp(self):
self.models["gp"] = self.simple_gp
def use_rotation_gp(self):
self.models["gp"] = self.rotation_gp
def get_weights(self):
log_lams = self.pld.get_parameter_vector()
A = self.pld.A
fsap = self.fsap
gp = self.gp
alpha = np.dot(A.T, gp.apply_inverse(fsap - gp.mean.value)[:, 0])
ATKinvA = np.dot(A.T, gp.apply_inverse(A))
S = np.array(ATKinvA)
dids = np.diag_indices_from(S)
for bid, (s, f) in enumerate(self.pld.block_inds):
S[(dids[0][s:f], dids[1][s:f])] += np.exp(-log_lams[bid])
factor = cho_factor(S, overwrite_a=True)
alpha -= np.dot(ATKinvA, cho_solve(factor, alpha))
for bid, (s, f) in enumerate(self.pld.block_inds):
alpha[s:f] *= np.exp(log_lams[bid])
return alpha
def get_pld_model(self):
return np.dot(self.pld.A, self.get_weights())
def get_predictions(self):
pld_pred = self.get_pld_model()
gp_pred = self.gp.predict(self.fsap - pld_pred, return_cov=False)
return pld_pred, gp_pred
def log_likelihood(self):
log_lams = self.pld.get_parameter_vector()
A = self.pld.A
fsap = self.fsap
gp = self.gp
r = fsap - gp.mean.value
try:
alpha = gp.apply_inverse(r)[:, 0]
except celerite.solver.LinAlgError:
return -np.inf
value = np.dot(r, alpha)
ATalpha = np.dot(A.T, alpha)
try:
KA = gp.apply_inverse(A)
except celerite.solver.LinAlgError:
return -np.inf
S = np.dot(A.T, KA)
dids = np.diag_indices_from(S)
for bid, (s, f) in enumerate(self.pld.block_inds):
S[(dids[0][s:f], dids[1][s:f])] += np.exp(-log_lams[bid])
try:
factor = cho_factor(S, overwrite_a=True)
value -= np.dot(ATalpha, cho_solve(factor, ATalpha))
except (np.linalg.LinAlgError, ValueError):
return -np.inf
# Penalty terms
log_det = 2*np.sum(np.log(np.diag(factor[0])))
log_det += np.sum(log_lams * self.pld.nblocks)
log_det += gp.solver.log_determinant()
return -0.5 * (value + log_det)
def nll(self, params):
self.set_parameter_vector(params)
ll = self.log_likelihood()
if not np.isfinite(ll):
ll = -1e10 + np.random.randn()
return -ll
@property
def period(self):
return np.exp(self.rotation_gp.kernel.get_parameter("terms[2]:log_P"))
@period.setter
def period(self, period):
self.rotation_gp.kernel.set_parameter("terms[2]:log_P", np.log(period))
def set_default(self):
self.pld.set_parameter_vector(self.default_pld_vector,
include_frozen=True)
self.simple_gp.set_parameter_vector(self.default_simple_vector,
include_frozen=True)
self.rotation_gp.set_parameter_vector(self.default_rotation_vector,
include_frozen=True)
def optimize(self, **kwargs):
init = self.get_parameter_vector()
bounds = self.get_parameter_bounds()
soln = minimize(self.nll, init, bounds=bounds, **kwargs)
self.set_parameter_vector(soln.x)
pld_pred = self.get_pld_model()
self.fdet = self.fsap - pld_pred
return soln
def gp_grad_nll(self, params):
self.gp.set_parameter_vector(params)
gll = self.gp.grad_log_likelihood(self.fdet, quiet=True)
if not np.isfinite(gll[0]):
return (1e10 + np.random.randn(),
10000*np.random.randn(len(params)))
return -gll[0], -gll[1]
def optimize_gp(self, **kwargs):
init = self.gp.get_parameter_vector()
bounds = self.gp.get_parameter_bounds()
soln = minimize(self.gp_grad_nll, init, bounds=bounds, jac=True,
**kwargs)
self.gp.set_parameter_vector(soln.x)
return soln
| dfm/rotate | rotate/model.py | model.py | py | 7,231 | python | en | code | 3 | github-code | 36 |
16558040325 | import network # Importar librerías necesarias
import socket
import time
import secrets # Librería con las credenciales de tu red Wi-Fi
from machine import Pin
#Asignación de pin para el LED
led = Pin(15, Pin.OUT)
# Configuración de red Wi-Fi
wlan = network.WLAN(network.STA_IF)
wlan.active(True)
wlan.config(pm = 0xa11140)
wlan.connect(secrets.SSID, secrets.PASSWORD)
# Página HTML
html_on = """<!DOCTYPE html>
<html>
<head> <title>Raspberry pi Pico W</title>
<style>html { font-family: Helvetica; display: inline-block; margin: 0px auto; text-align: center;}
.buttonRed { background-color: #d11d53; border: 2px solid #000000;; color: white; padding: 15px 32px; text-align: center;
text-decoration: none; display: font-size: 16px; margin: 4px 2px; cursor: pointer; }
text-decoration: none; font-size: 30px; margin: 2px; cursor: pointer;}
</style></head>
<body> <center><h1>Server on a Pico W</h1></center><br><br>
<form><center>
<center> <button class="buttonRed" name="Apagar" value="Off" formaction="/light/off" type="submit">Apagar LED </button>
<br><br>
<center><p>%s</p>
</center></form>
</body>
</html>
"""
html_off = """<!DOCTYPE html>
<html>
<head> <title>Raspberry pi Pico W</title>
<style>html { font-family: Helvetica; display: inline-block; margin: 0px auto; text-align: center;}
.button { background-color: #4CAF50; border: 2px solid #000000;; color: white; padding: 15px 32px; text-align: center;
text-decoration: none; display: font-size: 16px; margin: 4px 2px; cursor: pointer; }
text-decoration: none; font-size: 30px; margin: 2px; cursor: pointer;}
</style></head>
<body> <center><h1>Server on a Pico W</h1></center><br><br>
<form><center>
<center> <button class="button" name="Encender" value="On" formaction="/light/on" type="submit">Encender LED </button>
<br><br>
<center><p>%s</p>
</center></form>
</body>
</html>
"""
#Esperamos que la conexion WiFi se establezca o falle
max_wait = 10
while max_wait > 0:
if wlan.status() < 0 or wlan.status() >= 3:
break
max_wait -= 1
print('waiting for connection...')
time.sleep(1)
# Error al conectar la red WiFi
if wlan.status() != 3:
raise RuntimeError('network connection failed')
else:
print('connected')
status = wlan.ifconfig()
print( 'ip = ' + status[0] )
print( 'Subnet = ' + status[1] )
print( 'Gateway = ' + status[2] )
print( 'DNS = ' + status[3] )
# Open socket
addr = socket.getaddrinfo('0.0.0.0', 80)[0][-1]
s = socket.socket()
s.bind(addr)
s.listen(1)
print('listening on', addr)
#Esperamos una conexion a nuestra pagina
while True:
try:
cl, addr = s.accept()
print('client connected from', addr)
request = cl.recv(1024)
print(request)
request = str(request)
led_on = request.find('/light/on')
led_off = request.find('/light/off')
print( 'led on = ' + str(led_on))
print( 'led off = ' + str(led_off))
if led_on == 6:
print("led on")
led.value(1)
stateis = "Encendido"
response = html_on % stateis
if led_off == 6:
print("led off")
led.value(0)
stateis = "Apagado"
response = html_off % stateis
cl.send('HTTP/1.0 200 OK\r\nContent-type: text/html\r\n\r\n')
cl.send(response)
cl.close()
except OSError as e:
cl.close()
print('connection closed')
| LuisSkap/ServerRaspberryPiPICO | Led_1.py | Led_1.py | py | 3,618 | python | en | code | 1 | github-code | 36 |
7730783737 | import random
def rating_file(name):
with open('rating.txt', 'r') as file:
for line in file:
user, points = line.split()
if user == name:
return points
return 0
def game(points, option):
if option == "":
option = ['paper', 'scissors', 'rock']
else:
option = option.split(',')
length = len(option)
half = (length - 1) / 2
while True:
user_move = input()
computer_move = random.choice(option)
if user_move == '!exit':
print('Bye!')
break
elif user_move == '!rating':
print(f'Your rating: {points}')
break
elif user_move not in option:
print('Invalid input')
else:
user_move_id = option.index(user_move)
computer_move_id = option.index(computer_move)
wins = []
if user_move_id != computer_move_id:
if user_move_id >= half:
wins += [i for i in range(user_move_id - int(half), user_move_id)]
else:
start_point = half - user_move_id
if user_move_id - 1 >= 0:
wins += [i for i in range(0, user_move_id)]
wins += [i for i in range(length - int(start_point), length)]
if computer_move_id in wins:
print(f'Well done. The computer chose {computer_move} and failed')
points += 100
else:
print(f'Sorry, but the computer chose {computer_move}')
else:
print(f'There is a draw ({computer_move})')
points += 50
def main():
name = input('Enter your name: ')
print(f'Hello, {name}')
points = rating_file(name)
option = input('Paste your tape of game: ')
print("Okay, let's start")
game(int(points), option)
main()
| cubusgg/rock-paper-scissors | game.py | game.py | py | 1,957 | python | en | code | 0 | github-code | 36 |
8984579354 | mapping_test_data = [
{
"product": "glibc",
"version": "2.31",
"version_strings": [
"GLIBC 2.31",
"The following command substitution is needed to make ldd work in SELinux",
"environments where the RTLD might not have permission to write to the",
],
}
]
package_test_data = [
{
"url": "http://mirror.centos.org/centos/8/BaseOS/x86_64/os/Packages/",
"package_name": "glibc-2.28-101.el8.i686.rpm",
"product": "glibc",
"version": "2.28",
}
]
| chinvib66/cve-bin-tool | test/test_data/glibc.py | glibc.py | py | 555 | python | en | code | null | github-code | 36 |
42998550666 | from __future__ import annotations
#
# SSL wrap socket for PyOpenSSL.
# Mostly copied from
#
# https://github.com/shazow/urllib3/blob/master/urllib3/contrib/pyopenssl.py
#
# and added OCSP validator on the top.
import logging
import time
from functools import wraps
from inspect import getfullargspec as get_args
from socket import socket
from typing import Any
import certifi
import OpenSSL.SSL
from .constants import OCSPMode
from .errorcode import ER_OCSP_RESPONSE_CERT_STATUS_REVOKED
from .errors import OperationalError
from .vendored.urllib3 import connection as connection_
from .vendored.urllib3.contrib.pyopenssl import PyOpenSSLContext, WrappedSocket
from .vendored.urllib3.util import ssl_ as ssl_
DEFAULT_OCSP_MODE: OCSPMode = OCSPMode.FAIL_OPEN
FEATURE_OCSP_MODE: OCSPMode = DEFAULT_OCSP_MODE
"""
OCSP Response cache file name
"""
FEATURE_OCSP_RESPONSE_CACHE_FILE_NAME: str | None = None
log = logging.getLogger(__name__)
def inject_into_urllib3() -> None:
"""Monkey-patch urllib3 with PyOpenSSL-backed SSL-support and OCSP."""
log.debug("Injecting ssl_wrap_socket_with_ocsp")
connection_.ssl_wrap_socket = ssl_wrap_socket_with_ocsp
@wraps(ssl_.ssl_wrap_socket)
def ssl_wrap_socket_with_ocsp(*args: Any, **kwargs: Any) -> WrappedSocket:
# Extract host_name
hostname_index = get_args(ssl_.ssl_wrap_socket).args.index("server_hostname")
server_hostname = (
args[hostname_index]
if len(args) > hostname_index
else kwargs.get("server_hostname", None)
)
# Remove context if present
ssl_context_index = get_args(ssl_.ssl_wrap_socket).args.index("ssl_context")
context_in_args = len(args) > ssl_context_index
ssl_context = (
args[hostname_index] if context_in_args else kwargs.get("ssl_context", None)
)
if not isinstance(ssl_context, PyOpenSSLContext):
# Create new default context
if context_in_args:
new_args = list(args)
new_args[ssl_context_index] = None
args = tuple(new_args)
else:
del kwargs["ssl_context"]
# Fix ca certs location
ca_certs_index = get_args(ssl_.ssl_wrap_socket).args.index("ca_certs")
ca_certs_in_args = len(args) > ca_certs_index
if not ca_certs_in_args and not kwargs.get("ca_certs"):
kwargs["ca_certs"] = certifi.where()
ret = ssl_.ssl_wrap_socket(*args, **kwargs)
log.debug(
"OCSP Mode: %s, " "OCSP response cache file name: %s",
FEATURE_OCSP_MODE.name,
FEATURE_OCSP_RESPONSE_CACHE_FILE_NAME,
)
if FEATURE_OCSP_MODE != OCSPMode.INSECURE:
from .ocsp_asn1crypto import SnowflakeOCSPAsn1Crypto as SFOCSP
v = SFOCSP(
ocsp_response_cache_uri=FEATURE_OCSP_RESPONSE_CACHE_FILE_NAME,
use_fail_open=FEATURE_OCSP_MODE == OCSPMode.FAIL_OPEN,
).validate(server_hostname, ret.connection)
if not v:
raise OperationalError(
msg=(
"The certificate is revoked or "
"could not be validated: hostname={}".format(server_hostname)
),
errno=ER_OCSP_RESPONSE_CERT_STATUS_REVOKED,
)
else:
log.info(
"THIS CONNECTION IS IN INSECURE "
"MODE. IT MEANS THE CERTIFICATE WILL BE "
"VALIDATED BUT THE CERTIFICATE REVOCATION "
"STATUS WILL NOT BE CHECKED."
)
return ret
def _openssl_connect(
hostname: str, port: int = 443, max_retry: int = 20, timeout: int | None = None
) -> OpenSSL.SSL.Connection:
"""The OpenSSL connection without validating certificates.
This is used to diagnose SSL issues.
"""
err = None
sleeping_time = 1
for _ in range(max_retry):
try:
client = socket()
client.connect((hostname, port))
context = OpenSSL.SSL.Context(OpenSSL.SSL.SSLv23_METHOD)
if timeout is not None:
context.set_timeout(timeout)
client_ssl = OpenSSL.SSL.Connection(context, client)
client_ssl.set_connect_state()
client_ssl.set_tlsext_host_name(hostname.encode("utf-8"))
client_ssl.do_handshake()
return client_ssl
except (
OpenSSL.SSL.SysCallError,
OSError,
) as ex:
err = ex
sleeping_time = min(sleeping_time * 2, 16)
time.sleep(sleeping_time)
if err:
raise err
| snowflakedb/snowflake-connector-python | src/snowflake/connector/ssl_wrap_socket.py | ssl_wrap_socket.py | py | 4,486 | python | en | code | 511 | github-code | 36 |
7209789987 | """Functionality related to listing and querying apps"""
import os
import subprocess
import logging
import time
import re
from typing import Optional
from bundle.bundle import Bundle, InvalidBundle
from binary.binary import Binary
from extern.tools import tool_named, call_sbpl
def all_apps(at: str = "/Applications", mas_only: bool = False, sandboxed_only: bool = False):
"""
Returns all apps from a target folder
:param at: The base folder where to search for applications
:param mas_only: Whether to only consider applications from the Mac App Store
:param sandboxed_only: Whether to only return sandboxed applications
:return: Filepaths to applications fulfilling the criteria specified
"""
all_entries = [ os.path.join(at, x) for x in os.listdir(at) if x.endswith(".app") ]
for entry in all_entries:
try:
app_bundle = Bundle.make(entry)
if mas_only and not app_bundle.is_mas_app():
continue
if sandboxed_only and not app_bundle.is_sandboxed():
continue
yield entry
except InvalidBundle:
continue
def container_for_app(app):
"""
Returns the container directory used by the application or None if the container does not exist.
:param app: The app for which to find the container directory. Note that valid arguments are both
a filepath to the application and a bundle for that application
:return: Filepath to the container or None, if the lookup failed.
"""
# Handle code that already has a bundle for an app
if isinstance(app, Bundle):
app_bundle = app
elif isinstance(app, str):
try:
app_bundle = Bundle.make(app)
except InvalidBundle:
return None
bid = app_bundle.bundle_identifier(normalized=True)
# Verify the container exists.
container_path = os.path.join(os.path.expanduser("~/Library/Containers/"), bid)
if not os.path.exists(container_path):
return None
# Also verify that the metadata file is present, else the container is invalid and of
# no use to other code
container_metadata = os.path.join(container_path, "Container.plist")
if not os.path.exists(container_metadata):
return None
return container_path
def _entitlements_can_be_parsed(app_bundle: Bundle) -> bool:
"""
Check whether an application's entitlements can be parsed by libsecinit.
We only check part of the process, namely the parsing of entitlements via xpc_create_from_plist.
:param app_bundle: Bundle for which to check whether the entitlements can be parsed
:type app_bundle: Bundle
:return: True, iff the entitlements of the main executable can be parsed, else false.
"""
# No entitlements, no problem
# If the app contains no entitlements, entitlement validation cannot fail.
if not app_bundle.has_entitlements():
return True
exe_path = app_bundle.executable_path()
raw_entitlements = Binary.get_entitlements(exe_path, raw=True)
# Call the local xpc_vuln_checker program that does the actual checking.
exit_code, _ = tool_named("xpc_vuln_checker")(input=raw_entitlements)
return exit_code != 1
def init_sandbox(app_bundle: Bundle, logger: logging.Logger, force_initialisation: bool = False) -> bool:
"""
Initialises the sandbox for a particular app bundle.
:param app_bundle: The App for which to initialise the App Sandbox
:param logger: Logger object used to record failure cases
:param force_initialisation: Whether to overwrite / start initialisation even if metadata
exists that indicates the sandbox has already been initialised
:return: Boolean value indicating whether the sandbox was successfully initialised
(or was already initialised)
"""
# Guarding against a few applications that ship with entitlements libsecinit cannot parse.
if not _entitlements_can_be_parsed(app_bundle):
return False
# Super useful environment variable used by libsecinit. If this variable is set, the application
# is terminated after its sandbox is initialised.
init_sandbox_environ = {**os.environ, 'APP_SANDBOX_EXIT_AFTER_INIT': str(1)}
app_container = container_for_app(app_bundle)
if app_container is not None and not force_initialisation:
if logger:
logger.info("Container directory already existed. Skipping sandbox initialisation.")
return True
if logger:
logger.info("Starting process {} to initialize sandbox.".format(app_bundle.executable_path()))
process = subprocess.Popen([app_bundle.executable_path()],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
env=init_sandbox_environ)
# Sandbox initialisation should be almost instant. If the application is still
# running after a couple of seconds, the sandbox failed to initialise.
# We use 10 seconds as an arbitrary cutoff time.
try:
process.wait(10)
except subprocess.TimeoutExpired:
process.kill()
if logger:
logger.error("Sandbox was not initialised successfully for executable at {}. Skipping.".format(
app_bundle.executable_path())
)
return False
# Check that there now is an appropriate container
if container_for_app(app_bundle) is None:
if logger:
logger.info(
"Sandbox initialisation for executable {} succeeded \
but no appropriate container metadata was created.".format(
app_bundle.executable_path()
)
)
return False
return True
def sandbox_status(app_bundle: Bundle, logger: logging.Logger) -> Optional[int]:
process = subprocess.Popen([app_bundle.executable_path()],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
# Sandbox initialisation should be almost instant. If the application is still
# running after a couple of seconds, the sandbox failed to initialise or is
# not enabled at all.
# We use 10 seconds as an arbitrary cutoff time.
time.sleep(10)
pid = str(process.pid)
if process.poll() is not None:
logger.error("Process terminated early: {}".format(app_bundle.executable_path()))
return None
sandbox_status = tool_named("sandbox_status")
returncode, sb_status = sandbox_status(pid)
process.kill()
rx = re.compile(r'^Sandbox status for PID {} is (\d+)$'.format(pid))
m = rx.match(sb_status.decode().strip())
if m:
return int(m.group(1))
logger.error("`sandbox_status` did not return a status for executable at {}. Skipping.".format(
app_bundle.executable_path())
)
return None
def run_process(executable, duration, stdout_file=subprocess.DEVNULL, stderr_file=subprocess.DEVNULL) -> int:
"""
Executes and runs a process for a certain number of seconds, then kills the process.
:param executable: Filepath to executable to execute
:param duration: Duration in seconds or None to let the executable run indefinitely.
:param stdout_file: File object to write standard output to
:param stderr_file: File object to write standard error to
:return: The PID of the running process
"""
process = subprocess.Popen([executable], stdout=stdout_file, stderr=stderr_file)
process_pid = process.pid
try:
process.wait(duration)
except subprocess.TimeoutExpired:
process.kill()
return process_pid
def get_sandbox_rules(app_bundle, result_format: str = 'scheme', patch: bool = False):
"""
Obtain the final sandbox ruleset for a target application. Optionally
also patches the result so that all allow decisions are logged to the
syslog.
:param app_bundle: The bundle for which to obtain the sandbox ruleset
:param result_format: The format to return. Supported are \"scheme\" and \"json\"
:param patch: Whether to patch the resulting profile. Patching a profile results
in a profile that logs all allowed decisions.
:return: Raw bytes of sandbox profile.
"""
container = container_for_app(app_bundle)
return call_sbpl(container, result_format=result_format, patch=patch)
| 0xbf00/maap | misc/app_utils.py | app_utils.py | py | 8,464 | python | en | code | 8 | github-code | 36 |
19751656745 | import requests
import json
import yaml
import os
from bs4 import BeautifulSoup
# set URLS for APIs
proPublica_DataTable_url = (
"https://api.propublica.org/congress/v1/bills/search.json?query={}&sort=date"
)
proPublica_bill_url = "https://api.propublica.org/congress/v1/116/bills/{}.json"
proPublica_SENATE_member_url = (
"https://api.propublica.org/congress/v1/members/{}/{}/current.json"
)
proPublica_SENATE_member_id_url = (
"https://api.propublica.org/congress/v1/members/{}.json"
)
proPublica_HOUSE_member_url = "https://api.propublica.org/congress/v1/members/{chamber}/{state}/{district}/current.json"
govtrack_bill_url = "https://www.govinfo.gov/link/bills/116/{}/{}?link-type=html"
def generate_datatable_JSON(api_key, topicQueryString):
proPublica_DataTable_request_url = proPublica_DataTable_url.format(topicQueryString)
response = requests.get(
proPublica_DataTable_request_url, headers={"X-API-KEY": api_key}
)
return response, json.dumps(response.json()["results"][0]["bills"])
def generate_bill_data(api_key, bill_slug):
proPublica_bill_url_slug = proPublica_bill_url.format(bill_slug)
response = requests.get(proPublica_bill_url_slug, headers={"X-API-KEY": api_key})
return response, response.json()["results"]
def generate_bill_fulltext(api_key, bill_type, bill_number):
govtrack_bill_url_formatted = govtrack_bill_url.format(bill_type, bill_number)
response = requests.get(govtrack_bill_url_formatted)
soup = BeautifulSoup(response.text, features="html.parser")
soupTitle = soup.find("title")
if soupTitle and "Service Error" in soupTitle.text:
return "There was an error fetching the bill's full text. It could be this bill is too recent, or another error with GovTrack."
return response, response.text
def get_contact_form_url(member_id):
member_file = "{}.yaml".format(member_id)
yaml_file = os.path.abspath(
os.path.join(os.path.dirname(__file__), "..", "yaml", member_file)
)
with open(yaml_file, "r") as stream:
yaml_data = yaml.safe_load(stream)
return yaml_data["contact_form"]["steps"][0]["visit"]
def get_members_by_state(api_key, member_state):
senate_members_formatted_url = proPublica_SENATE_member_url.format(
"senate", member_state
)
response = requests.get(
senate_members_formatted_url, headers={"X-API-KEY": api_key}
)
response_with_contact = []
for i in response.json()["results"]:
member_id = i["id"]
contact_url = get_contact_form_url(member_id)
i["contact_url"] = contact_url
response_with_contact.append(i)
return response, response_with_contact
def get_member_by_id(api_key, member_id):
senate_member_by_id_url = proPublica_SENATE_member_id_url.format(member_id)
response = requests.get(senate_member_by_id_url, headers={"X-API-KEY": api_key})
response_with_contact = []
for i in response.json()["results"]:
member_id = i["id"]
contact_url = get_contact_form_url(member_id)
i["contact_url"] = contact_url
response_with_contact.append(i)
return response, response_with_contact
| jon-behnken/reform-project | code/powertools/main.py | main.py | py | 3,180 | python | en | code | 0 | github-code | 36 |
71075549545 | class Solution:
def fullJustify(self, words: List[str], maxWidth: int) -> List[str]:
res, current, numbers = [], [], 0
for w in words:
if numbers + len(w) + len(current) > maxWidth:
if len(current) == 1:
res.append(current[0]+' '*(maxWidth-numbers))
else:
space = (maxWidth-numbers) // (len(current)-1)
extra_space = (maxWidth-numbers) % (len(current)-1)
for i in range(extra_space):
current[i] += ' '
res.append((space*' ').join(current))
current, numbers = [], 0
current.append(w)
numbers += len(w)
tmp = ' '.join(current)
res.append(tmp+ ' '*(maxWidth-len(tmp)))
return res
| nango94213/Leetcode-solution | 68-text-justification/68-text-justification.py | 68-text-justification.py | py | 949 | python | en | code | 2 | github-code | 36 |
5049725299 | import configparser
import time
from operator import attrgetter
from pathlib import Path
from typing import Dict, List, Optional, Union
import numpy as np
import torch
import tensorrt_llm
import tensorrt_llm.logger as logger
from tensorrt_llm._utils import pad_vocab_size, str_dtype_to_np
from tensorrt_llm.mapping import Mapping
from tensorrt_llm.models import GPTJForCausalLM
from tensorrt_llm.models.quantized.quant import get_dummy_quant_scales
from tensorrt_llm.quantization import QuantMode
def get_scaling_factors(
model_path: Union[str, Path],
num_layers: int,
quant_mode: Optional[QuantMode] = None,
) -> Optional[Dict[str, List[int]]]:
""" Get the scaling factors for GPT-J model
Returns a dictionary of scaling factors for the selected layers of the
GPT-J model.
Args:
model_path (str): Path to the quantized GPT-J model
layers (list): List of layers to get the scaling factors for. If None,
all layers are selected.
Returns:
dict: Dictionary of scaling factors for the selected layers of the
GPT-J model.
example:
{
'qkv_act': qkv_act_scale,
'qkv_weights': qkv_weights_scale,
'qkv_output' : qkv_outputs_scale,
'dense_act': dense_act_scale,
'dense_weights': dense_weights_scale,
'fc_act': fc_act_scale,
'fc_weights': fc_weights_scale,
'proj_act': proj_act_scale,
'proj_weights': proj_weights_scale,
}
"""
if model_path is None:
logger.warning(f"--quantized_fp8_model_path not specified. "
f"Initialize quantization scales automatically.")
return get_dummy_quant_scales(num_layers)
weight_dict = np.load(model_path)
# yapf: disable
scaling_factor = {
'qkv_act': [],
'qkv_weights': [],
'qkv_output': [],
'dense_act': [],
'dense_weights': [],
'fc_act': [],
'fc_weights': [],
'proj_act': [],
'proj_weights': [],
}
for layer in range(num_layers):
scaling_factor['qkv_act'].append(max(
weight_dict[f'_np:layers:{layer}:attention:qkv:q:activation_scaling_factor'].item(),
weight_dict[f'_np:layers:{layer}:attention:qkv:k:activation_scaling_factor'].item(),
weight_dict[f'_np:layers:{layer}:attention:qkv:v:activation_scaling_factor'].item()
))
scaling_factor['qkv_weights'].append(max(
weight_dict[f'_np:layers:{layer}:attention:qkv:q:weights_scaling_factor'].item(),
weight_dict[f'_np:layers:{layer}:attention:qkv:k:weights_scaling_factor'].item(),
weight_dict[f'_np:layers:{layer}:attention:qkv:v:weights_scaling_factor'].item()
))
if quant_mode is not None and quant_mode.has_fp8_kv_cache():
# Not calibrarting KV cache.
scaling_factor['qkv_output'].append(1.0)
scaling_factor['dense_act'].append(weight_dict[f'_np:layers:{layer}:attention:dense:activation_scaling_factor'].item())
scaling_factor['dense_weights'].append(weight_dict[f'_np:layers:{layer}:attention:dense:weights_scaling_factor'].item())
scaling_factor['fc_act'].append(weight_dict[f'_np:layers:{layer}:mlp:fc:activation_scaling_factor'].item())
scaling_factor['fc_weights'].append(weight_dict[f'_np:layers:{layer}:mlp:fc:weights_scaling_factor'].item())
scaling_factor['proj_act'].append(weight_dict[f'_np:layers:{layer}:mlp:proj:activation_scaling_factor'].item())
scaling_factor['proj_weights'].append(weight_dict[f'_np:layers:{layer}:mlp:proj:weights_scaling_factor'].item())
# yapf: enable
for k, v in scaling_factor.items():
assert len(v) == num_layers, \
f'Expect scaling factor {k} of length {num_layers}, got {len(v)}'
return scaling_factor
def gen_suffix(rank, use_smooth_quant, quant_per_channel):
suffix = f"{rank}.bin"
if use_smooth_quant:
sq_prefix = "int8."
if quant_per_channel:
sq_prefix += "col."
suffix = sq_prefix + suffix
return suffix
def extract_layer_idx(name):
ss = name.split('.')
for s in ss:
if s.isdigit():
return s
return None
def split(v, tp_size, idx, dim=0):
if tp_size == 1:
return v
if len(v.shape) == 1:
return np.ascontiguousarray(np.split(v, tp_size)[idx])
elif len(v.shape) == 2:
return np.ascontiguousarray(np.split(v, tp_size, axis=dim)[idx])
return None
def parse_config(ini_file):
gpt_config = configparser.ConfigParser()
gpt_config.read(ini_file)
n_embd = gpt_config.getint('gpt', 'n_embd')
n_head = gpt_config.getint('gpt', 'n_head')
n_layer = gpt_config.getint('gpt', 'n_layer')
n_positions = gpt_config.getint('gpt', 'n_positions')
vocab_size = gpt_config.getint('gpt', 'vocab_size')
do_layer_norm_before = gpt_config.getboolean('gpt',
'do_layer_norm_before',
fallback=True)
rotary_pct = gpt_config.getfloat('gpt', 'rotary_pct', fallback=0.0)
hidden_act = gpt_config.get('gpt', 'activation_function')
bias = gpt_config.getboolean('gpt', 'bias', fallback=True)
inter_size = gpt_config.getint('gpt', 'intermediate_size', fallback=None)
dtype = gpt_config.get('gpt', 'storage_dtype', fallback='float32')
if inter_size is None:
inter_size = 4 * n_embd
multi_query_mode = gpt_config.getboolean('gpt',
'multi_query_mode',
fallback=False)
prompt_num_tasks = gpt_config.getint('gpt', 'prompt_num_tasks', fallback=0)
prompt_max_vocab_size = gpt_config.getint('gpt',
'prompt_max_vocab_size',
fallback=0)
return n_embd, n_head, n_layer, n_positions, vocab_size, do_layer_norm_before, hidden_act, rotary_pct, bias, inter_size, multi_query_mode, dtype, prompt_num_tasks, prompt_max_vocab_size
def load_from_bin_gpt_j(tensorrt_llm_gpt_j: GPTJForCausalLM,
dir_path,
rank=0,
tensor_parallel=1,
dtype='float32',
use_parallel_embedding=False,
sharding_dim=0,
share_embedding_table=False,
scaling_factors=None):
tensorrt_llm.logger.info('Loading weights from bin...')
tik = time.time()
quant_mode = getattr(tensorrt_llm_gpt_j, 'quant_mode', QuantMode(0))
if quant_mode.is_int8_weight_only():
plugin_weight_only_quant_type = torch.int8
elif quant_mode.is_int4_weight_only():
plugin_weight_only_quant_type = torch.quint4x2
n_embd, n_head, n_layer, n_positions, vocab_size, do_layer_norm_before, hidden_act, rotary_pct, bias, inter_size, multi_query_mode, *_ = parse_config(
Path(dir_path) / 'config.ini')
np_dtype = str_dtype_to_np(dtype)
def fromfile(dir_path, name, shape=None, dtype=None):
dtype = np_dtype if dtype is None else dtype
p = dir_path + '/' + name
if Path(p).exists():
t = np.fromfile(p, dtype=dtype)
if shape is not None:
t = t.reshape(shape)
return t
return None
def set_smoothquant_scale_factors(module,
pre_scale_weight,
dir_path,
basename,
shape,
per_tok_dyn,
per_channel,
is_qkv=False,
rank=None):
suffix = "bin"
if per_channel:
if rank is not None:
suffix = f"{rank}." + suffix
suffix = "col." + suffix
col_shape = shape if (per_channel or is_qkv) else [1, 1]
if per_tok_dyn:
if pre_scale_weight is not None:
pre_scale_weight.value = np.array([1.0], dtype=np.float32)
t = fromfile(dir_path, f"{basename}scale_w_quant_orig.{suffix}",
col_shape, np.float32)
module.per_channel_scale.value = t
else:
t = fromfile(dir_path, f"{basename}scale_x_orig_quant.bin", [1],
np.float32)
pre_scale_weight.value = t
t = fromfile(dir_path, f"{basename}scale_y_accum_quant.{suffix}",
col_shape, np.float32)
module.per_channel_scale.value = t
t = fromfile(dir_path, f"{basename}scale_y_quant_orig.bin", [1, 1],
np.float32)
module.act_scale.value = t
# Do we use SmoothQuant?
use_smooth_quant = quant_mode.has_act_and_weight_quant()
# Do we use quantization per token?
quant_per_token_dyn = quant_mode.has_per_token_dynamic_scaling()
# Do we use quantization per channel?
quant_per_channel = quant_mode.has_per_channel_scaling()
# Do we use INT4/INT8 weight-only?
use_weight_only = quant_mode.is_weight_only()
# Int8 KV cache
use_int8_kv_cache = quant_mode.has_int8_kv_cache()
#Enable FP8 Gemm
enable_fp8_qdq = quant_mode.has_fp8_qdq()
def sq_trick(x):
return x.view(np.float32) if use_smooth_quant else x
# Debug
suffix = gen_suffix(rank, use_smooth_quant, quant_per_channel)
# The type of weights.
w_type = np_dtype if not use_smooth_quant else np.int8
# pe = fromfile(dir_path, 'model.wpe.bin', [n_positions, n_embd])
# if pe is not None:
# tensorrt_llm_gpt_j.embedding.position_embedding.weight.value = (pe)
vocab_embedding_weight = fromfile(dir_path, 'model.wte.bin',
[vocab_size, n_embd])
if not use_parallel_embedding:
tensorrt_llm_gpt_j.embedding.weight.value = vocab_embedding_weight
else:
if sharding_dim == 0:
if vocab_size % tensor_parallel != 0:
# padding
vocab_size_padded = pad_vocab_size(
tensorrt_llm_gpt_j.embedding.num_embeddings,
tensor_parallel)
pad_width = vocab_size_padded - vocab_size
vocab_embedding_weight = np.pad(vocab_embedding_weight,
((0, pad_width), (0, 0)),
'constant',
constant_values=0)
tensorrt_llm_gpt_j.embedding.weight.value = np.ascontiguousarray(
split(vocab_embedding_weight,
tensor_parallel,
rank,
dim=sharding_dim))
if do_layer_norm_before:
tensorrt_llm_gpt_j.ln_f.bias.value = (fromfile(
dir_path, 'model.final_layernorm.bias.bin'))
tensorrt_llm_gpt_j.ln_f.weight.value = (fromfile(
dir_path, 'model.final_layernorm.weight.bin'))
# share input embedding
if not share_embedding_table:
lm_head_weight = fromfile(dir_path, 'model.lm_head.weight.bin',
[vocab_size, n_embd])
lm_head_bias = fromfile(dir_path, 'model.lm_head.bias.bin',
[vocab_size])
if lm_head_weight is None:
lm_head_weight = fromfile(dir_path, 'model.wte.bin',
[vocab_size, n_embd])
if vocab_size % tensor_parallel != 0:
# padding
vocab_size_padded = tensorrt_llm_gpt_j.lm_head.out_features * tensor_parallel
pad_width = vocab_size_padded - vocab_size
lm_head_weight = np.pad(lm_head_weight, ((0, pad_width), (0, 0)),
'constant',
constant_values=0)
tensorrt_llm_gpt_j.lm_head.weight.value = np.ascontiguousarray(
split(lm_head_weight, tensor_parallel, rank))
tensorrt_llm_gpt_j.lm_head.bias.value = np.ascontiguousarray(
split(lm_head_bias, tensor_parallel, rank))
fake_fp8_sf_dt = np.float32
for i in range(n_layer):
c_attn_out_dim = (3 * n_embd //
tensor_parallel) if not multi_query_mode else (
n_embd // tensor_parallel +
(n_embd // n_head) * 2)
tensorrt_llm_gpt_j.layers[i].input_layernorm.weight.value = (fromfile(
dir_path, 'model.layers.' + str(i) + '.input_layernorm.weight.bin'))
tensorrt_llm_gpt_j.layers[i].input_layernorm.bias.value = (fromfile(
dir_path, 'model.layers.' + str(i) + '.input_layernorm.bias.bin'))
t = fromfile(
dir_path, 'model.layers.' + str(i) +
'.attention.query_key_value.weight.' + suffix,
[n_embd, c_attn_out_dim], w_type)
if t is not None:
dst = tensorrt_llm_gpt_j.layers[i].attention.qkv.weight
if use_smooth_quant:
dst.value = sq_trick(
np.ascontiguousarray(np.transpose(t, [1, 0])))
set_smoothquant_scale_factors(
tensorrt_llm_gpt_j.layers[i].attention.qkv,
tensorrt_llm_gpt_j.layers[i].input_layernorm.scale_to_int,
dir_path,
'model.layers.' + str(i) + '.attention.query_key_value.',
[1, c_attn_out_dim],
quant_per_token_dyn,
quant_per_channel,
rank=rank,
is_qkv=True)
elif use_weight_only:
processed_torch_weights, torch_weight_scales = torch.ops.fastertransformer.symmetric_quantize_last_axis_of_batched_matrix(
torch.tensor(t), plugin_weight_only_quant_type)
dst.value = processed_torch_weights.numpy()
scales = tensorrt_llm_gpt_j.layers[
i].attention.qkv.per_channel_scale
scales.value = torch_weight_scales.numpy()
else:
dst.value = np.ascontiguousarray(np.transpose(t, [1, 0]))
if enable_fp8_qdq:
tensorrt_llm_gpt_j.layers[
i].attention.qkv.activation_scaling_factor.value = np.array(
[scaling_factors['qkv_act'][i]], dtype=fake_fp8_sf_dt)
tensorrt_llm_gpt_j.layers[
i].attention.qkv.weights_scaling_factor.value = np.array(
[scaling_factors['qkv_weights'][i]], dtype=fake_fp8_sf_dt)
tensorrt_llm_gpt_j.layers[
i].attention.kv_orig_quant_scale.value = np.array(
[scaling_factors['qkv_output'][i]], dtype=np.float32)
tensorrt_llm_gpt_j.layers[
i].attention.kv_quant_orig_scale.value = np.array(
[1.0 / scaling_factors['qkv_output'][i]], dtype=np.float32)
dst = tensorrt_llm_gpt_j.layers[i].attention.dense.weight
t = fromfile(
dir_path,
'model.layers.' + str(i) + '.attention.dense.weight.' + suffix,
[n_embd // tensor_parallel, n_embd], w_type)
if use_smooth_quant:
dst.value = sq_trick(np.ascontiguousarray(np.transpose(t, [1, 0])))
dense_scale = getattr(tensorrt_llm_gpt_j.layers[i].attention,
"quantization_scaling_factor", None)
set_smoothquant_scale_factors(
tensorrt_llm_gpt_j.layers[i].attention.dense, dense_scale,
dir_path, 'model.layers.' + str(i) + '.attention.dense.',
[1, n_embd], quant_per_token_dyn, quant_per_channel)
# change it to the real smoother if dense layer is applied smooth quant
tensorrt_llm_gpt_j.layers[
i].attention.dense.smoother.value = np.ones(
[1, n_embd // tensor_parallel], dtype=np.float32)
elif use_weight_only:
processed_torch_weights, torch_weight_scales = torch.ops.fastertransformer.symmetric_quantize_last_axis_of_batched_matrix(
torch.tensor(t), plugin_weight_only_quant_type)
dst.value = processed_torch_weights.numpy()
scales = tensorrt_llm_gpt_j.layers[
i].attention.dense.per_channel_scale
scales.value = torch_weight_scales.numpy()
else:
dst.value = np.ascontiguousarray(np.transpose(t, [1, 0]))
if enable_fp8_qdq:
tensorrt_llm_gpt_j.layers[
i].attention.dense.activation_scaling_factor.value = np.array(
[scaling_factors['dense_act'][i]], dtype=fake_fp8_sf_dt)
tensorrt_llm_gpt_j.layers[
i].attention.dense.weights_scaling_factor.value = np.array(
[scaling_factors['dense_weights'][i]], dtype=fake_fp8_sf_dt)
t = fromfile(
dir_path,
'model.layers.' + str(i) + '.mlp.dense_h_to_4h.weight.' + suffix,
[n_embd, inter_size // tensor_parallel], w_type)
if use_smooth_quant:
tensorrt_llm_gpt_j.layers[i].mlp.fc.weight.value = sq_trick(
np.ascontiguousarray(np.transpose(t, [1, 0])))
set_smoothquant_scale_factors(
tensorrt_llm_gpt_j.layers[i].mlp.fc,
tensorrt_llm_gpt_j.layers[i].post_layernorm.scale_to_int,
dir_path,
'model.layers.' + str(i) + '.mlp.dense_h_to_4h.',
[1, inter_size // tensor_parallel],
quant_per_token_dyn,
quant_per_channel,
rank=rank)
elif use_weight_only:
dst = tensorrt_llm_gpt_j.layers[i].mlp.fc.weight
processed_torch_weights, torch_weight_scales = torch.ops.fastertransformer.symmetric_quantize_last_axis_of_batched_matrix(
torch.tensor(t), plugin_weight_only_quant_type)
dst.value = processed_torch_weights.numpy()
scales = tensorrt_llm_gpt_j.layers[i].mlp.fc.per_channel_scale
scales.value = torch_weight_scales.numpy()
else:
tensorrt_llm_gpt_j.layers[
i].mlp.fc.weight.value = np.ascontiguousarray(
np.transpose(t, [1, 0]))
if bias:
tensorrt_llm_gpt_j.layers[i].mlp.fc.bias.value = fromfile(
dir_path, 'model.layers.' + str(i) +
'.mlp.dense_h_to_4h.bias.' + str(rank) + '.bin')
if enable_fp8_qdq:
tensorrt_llm_gpt_j.layers[
i].mlp.fc.activation_scaling_factor.value = np.array(
[scaling_factors['fc_act'][i]], dtype=fake_fp8_sf_dt)
tensorrt_llm_gpt_j.layers[
i].mlp.fc.weights_scaling_factor.value = np.array(
[scaling_factors['fc_weights'][i]], dtype=fake_fp8_sf_dt)
t = fromfile(
dir_path,
'model.layers.' + str(i) + '.mlp.dense_4h_to_h.weight.' + suffix,
[inter_size // tensor_parallel, n_embd], w_type)
if use_smooth_quant:
tensorrt_llm_gpt_j.layers[i].mlp.proj.weight.value = sq_trick(
np.ascontiguousarray(np.transpose(t, [1, 0])))
proj_scale = getattr(tensorrt_llm_gpt_j.layers[i].mlp,
"quantization_scaling_factor", None)
set_smoothquant_scale_factors(
tensorrt_llm_gpt_j.layers[i].mlp.proj, proj_scale, dir_path,
'model.layers.' + str(i) + '.mlp.dense_4h_to_h.', [1, n_embd],
quant_per_token_dyn, quant_per_channel)
# change it to the real smoother if proj layer is applied smooth quant
tensorrt_llm_gpt_j.layers[i].mlp.proj.smoother.value = np.ones(
[1, inter_size // tensor_parallel], dtype=np.float32)
elif use_weight_only:
dst = tensorrt_llm_gpt_j.layers[i].mlp.proj.weight
processed_torch_weights, torch_weight_scales = torch.ops.fastertransformer.symmetric_quantize_last_axis_of_batched_matrix(
torch.tensor(t), plugin_weight_only_quant_type)
dst.value = processed_torch_weights.numpy()
scales = tensorrt_llm_gpt_j.layers[i].mlp.proj.per_channel_scale
scales.value = torch_weight_scales.numpy()
else:
tensorrt_llm_gpt_j.layers[i].mlp.proj.weight.value = (
np.ascontiguousarray(np.transpose(t, [1, 0])))
if bias:
tensorrt_llm_gpt_j.layers[i].mlp.proj.bias.value = fromfile(
dir_path,
'model.layers.' + str(i) + '.mlp.dense_4h_to_h.bias.bin')
if use_int8_kv_cache:
t = fromfile(
dir_path, 'model.layers.' + str(i) +
'.attention.query_key_value.scale_y_quant_orig.bin', [1],
np.float32)
tensorrt_llm_gpt_j.layers[
i].attention.kv_orig_quant_scale.value = 1.0 / t
tensorrt_llm_gpt_j.layers[i].attention.kv_quant_orig_scale.value = t
if enable_fp8_qdq:
tensorrt_llm_gpt_j.layers[
i].mlp.proj.activation_scaling_factor.value = np.array(
[scaling_factors['proj_act'][i]], dtype=fake_fp8_sf_dt)
tensorrt_llm_gpt_j.layers[
i].mlp.proj.weights_scaling_factor.value = np.array(
[scaling_factors['proj_weights'][i]], dtype=fake_fp8_sf_dt)
tok = time.time()
t = time.strftime('%H:%M:%S', time.gmtime(tok - tik))
tensorrt_llm.logger.info(f'Weights loaded. Total time: {t}')
def load_from_hf_gpt_j(tensorrt_llm_gpt_j: GPTJForCausalLM,
hf_gpt_j,
fp16=False,
scaling_factors=None):
hf_model_gptj_block_names = [
"ln_1.weight",
"ln_1.bias",
"mlp.fc_in.weight",
"mlp.fc_in.bias",
"mlp.fc_out.weight",
"mlp.fc_out.bias",
]
tensorrt_llm_model_gptj_block_names = [
"input_layernorm.weight",
"input_layernorm.bias",
"mlp.fc.weight",
"mlp.fc.bias",
"mlp.proj.weight",
"mlp.proj.bias",
]
quant_mode = getattr(tensorrt_llm_gpt_j, 'quant_mode', QuantMode(0))
if quant_mode.is_int8_weight_only():
plugin_weight_only_quant_type = torch.int8
elif quant_mode.is_int4_weight_only():
plugin_weight_only_quant_type = torch.quint4x2
# Do we use INT4/INT8 weight-only?
use_weight_only = quant_mode.is_weight_only()
tensorrt_llm.logger.info('Loading weights from HF GPT-J...')
tik = time.time()
torch_dtype = torch.float16 if fp16 else torch.float32
hf_gpt_j_state_dict = hf_gpt_j.state_dict()
v = hf_gpt_j_state_dict.get('transformer.wte.weight')
tensorrt_llm_gpt_j.embedding.weight.value = v.to(torch_dtype).cpu().numpy()
n_layer = hf_gpt_j.config.n_layer
for layer_idx in range(n_layer):
prefix = "transformer.h." + str(layer_idx) + "."
for idx, hf_attr in enumerate(hf_model_gptj_block_names):
v = hf_gpt_j_state_dict.get(prefix + hf_attr)
layer = attrgetter(tensorrt_llm_model_gptj_block_names[idx])(
tensorrt_llm_gpt_j.layers[layer_idx])
if idx == 2 and scaling_factors:
tensorrt_llm_gpt_j.layers[
layer_idx].mlp.fc.activation_scaling_factor.value = np.array(
[scaling_factors['fc_act'][layer_idx]],
dtype=np.float32)
tensorrt_llm_gpt_j.layers[
layer_idx].mlp.fc.weights_scaling_factor.value = np.array(
[scaling_factors['fc_weights'][layer_idx]],
dtype=np.float32)
elif idx == 4 and scaling_factors:
tensorrt_llm_gpt_j.layers[
layer_idx].mlp.proj.activation_scaling_factor.value = np.array(
[scaling_factors['proj_act'][layer_idx]],
dtype=np.float32)
tensorrt_llm_gpt_j.layers[
layer_idx].mlp.proj.weights_scaling_factor.value = np.array(
[scaling_factors['proj_weights'][layer_idx]],
dtype=np.float32)
if use_weight_only and (idx == 2 or idx == 4):
processed_torch_weights, torch_weight_scales = \
torch.ops.fastertransformer.symmetric_quantize_last_axis_of_batched_matrix(
v.transpose(0, 1).contiguous(), plugin_weight_only_quant_type
)
layer.value = processed_torch_weights.numpy()
if idx == 2:
scales = tensorrt_llm_gpt_j.layers[
layer_idx].mlp.fc.per_channel_scale
elif idx == 4:
scales = tensorrt_llm_gpt_j.layers[
layer_idx].mlp.proj.per_channel_scale
scales.value = torch_weight_scales.numpy()
else:
setattr(layer, 'value', v.to(torch_dtype).cpu().numpy())
# Attention QKV Linear
# concatenate the Q, K, V layers weights.
q_weights = hf_gpt_j_state_dict.get(prefix + "attn.q_proj.weight")
k_weights = hf_gpt_j_state_dict.get(prefix + "attn.k_proj.weight")
v_weights = hf_gpt_j_state_dict.get(prefix + "attn.v_proj.weight")
qkv_weights = torch.cat((q_weights, k_weights, v_weights))
layer = attrgetter("attention.qkv.weight")(
tensorrt_llm_gpt_j.layers[layer_idx])
if use_weight_only:
processed_torch_weights, torch_weight_scales = \
torch.ops.fastertransformer.symmetric_quantize_last_axis_of_batched_matrix(
qkv_weights.transpose(0, 1).contiguous(), plugin_weight_only_quant_type)
layer.value = processed_torch_weights.numpy()
scales = tensorrt_llm_gpt_j.layers[
layer_idx].attention.qkv.per_channel_scale
scales.value = torch_weight_scales.numpy()
else:
setattr(layer, "value", qkv_weights.to(torch_dtype).cpu().numpy())
if scaling_factors:
tensorrt_llm_gpt_j.layers[
layer_idx].attention.qkv.activation_scaling_factor.value = np.array(
[scaling_factors['qkv_act'][layer_idx]], dtype=np.float32)
tensorrt_llm_gpt_j.layers[
layer_idx].attention.qkv.weights_scaling_factor.value = np.array(
[scaling_factors['qkv_weights'][layer_idx]],
dtype=np.float32)
if quant_mode.has_fp8_kv_cache():
if scaling_factors:
tensorrt_llm_gpt_j.layers[
layer_idx].attention.kv_orig_quant_scale.value = np.array(
[scaling_factors['qkv_output'][layer_idx]],
dtype=np.float32)
tensorrt_llm_gpt_j.layers[
layer_idx].attention.kv_quant_orig_scale.value = np.array(
[1.0 / scaling_factors['qkv_output'][layer_idx]],
dtype=np.float32)
# Attention Dense (out_proj) Linear
v = hf_gpt_j_state_dict.get(prefix + "attn.out_proj.weight")
layer = attrgetter("attention.dense.weight")(
tensorrt_llm_gpt_j.layers[layer_idx])
if use_weight_only:
processed_torch_weights, torch_weight_scales = \
torch.ops.fastertransformer.symmetric_quantize_last_axis_of_batched_matrix(
v.transpose(0, 1).contiguous(), plugin_weight_only_quant_type)
layer.value = processed_torch_weights.numpy()
scales = tensorrt_llm_gpt_j.layers[
layer_idx].attention.dense.per_channel_scale
scales.value = torch_weight_scales.numpy()
else:
setattr(layer, "value", v.to(torch_dtype).cpu().numpy())
if scaling_factors:
tensorrt_llm_gpt_j.layers[
layer_idx].attention.dense.activation_scaling_factor.value = np.array(
[scaling_factors['dense_act'][layer_idx]], dtype=np.float32)
tensorrt_llm_gpt_j.layers[
layer_idx].attention.dense.weights_scaling_factor.value = np.array(
[scaling_factors['dense_weights'][layer_idx]],
dtype=np.float32)
v = hf_gpt_j_state_dict.get('transformer.ln_f.weight')
tensorrt_llm_gpt_j.ln_f.weight.value = v.to(torch_dtype).cpu().numpy()
v = hf_gpt_j_state_dict.get('transformer.ln_f.bias')
tensorrt_llm_gpt_j.ln_f.bias.value = v.to(torch_dtype).cpu().numpy()
v = hf_gpt_j_state_dict.get('lm_head.weight')
tensorrt_llm_gpt_j.lm_head.weight.value = v.to(torch_dtype).cpu().numpy()
v = hf_gpt_j_state_dict.get('lm_head.bias')
tensorrt_llm_gpt_j.lm_head.bias.value = v.to(torch_dtype).cpu().numpy()
tok = time.time()
t = time.strftime('%H:%M:%S', time.gmtime(tok - tik))
tensorrt_llm.logger.info(f'Weights loaded. Total time: {t}')
def load_from_awq_gpt_j(tensorrt_llm_gpt_j: GPTJForCausalLM,
awq_gpt_j,
config,
mapping=Mapping(),
fp16=False,
group_size=128,
ft_model_dir=None):
awq_gptj_block_names = [
"ln_1.weight",
"ln_1.bias",
"mlp.fc_in.bias",
"mlp.fc_out.bias",
]
tensorrt_llm_model_gptj_block_names = [
"input_layernorm.weight",
"input_layernorm.bias",
"mlp.fc.bias",
"mlp.proj.bias",
]
def fromfile(dir_path, name, shape=None, dtype=None):
p = dir_path + '/' + name
if Path(p).exists():
t = np.fromfile(p, dtype=dtype)
if shape is not None:
t = t.reshape(shape)
return t
return None
quant_mode = getattr(tensorrt_llm_gpt_j, 'quant_mode', QuantMode(0))
# Int8 KV cache
use_int8_kv_cache = quant_mode.has_int8_kv_cache()
packer = torch.ops.fastertransformer.pack_int8_tensor_to_packed_int4
preprocessor = torch.ops.fastertransformer.preprocess_weights_for_mixed_gemm
tensorrt_llm.logger.info('Loading weights from AWQ GPT-J...')
tik = time.time()
torch_dtype = torch.float16 if fp16 else torch.float32
def AWQ_quantize_pack_preprocess(weight, scale):
scale = scale.repeat_interleave(group_size, dim=0)
weight = weight / scale
qweight_int8 = torch.clamp(torch.round(weight.cuda()).char(), -8, 7)
int4_weight = packer(qweight_int8.cpu())
int4_weight = preprocessor(int4_weight, torch.quint4x2)
return int4_weight.view(torch.int8).cpu().numpy()
def process_and_assign_weight(awq_gpt_j, mPrefix, mOp, tp_dim=0):
weight = awq_gpt_j[mPrefix + ".weight"].T.contiguous()
[k, n] = weight.shape
weight = weight.split(weight.shape[tp_dim] // mapping.tp_size,
dim=tp_dim)[mapping.tp_rank]
amax = awq_gpt_j[mPrefix + ".weight_quantizer._amax"].reshape(
(n, int(k / group_size))).T.contiguous()
amax = amax.split(amax.shape[tp_dim] // mapping.tp_size,
dim=tp_dim)[mapping.tp_rank]
pre_quant_scale = awq_gpt_j[
mPrefix + ".input_quantizer._pre_quant_scale"].reshape((1, k))
if tp_dim == 0:
pre_quant_scale = pre_quant_scale.split(k // mapping.tp_size,
dim=1)[mapping.tp_rank]
scale = amax / 8.0
mOp.qweight.value = AWQ_quantize_pack_preprocess(weight, scale)
mOp.scale.value = scale.to(torch_dtype).cpu().numpy()
mOp.pre_quant_scale.value = pre_quant_scale.to(
torch_dtype).cpu().numpy()
def deSmooth(weight, pre_quant_scale):
[k, n] = weight.shape
pre_quant_scale = pre_quant_scale.repeat(
(n, 1)).transpose(1, 0).contiguous()
weight = weight * pre_quant_scale
return weight
def reSmooth(weight, pre_quant_scale):
[k, n] = weight.shape
pre_quant_scale = pre_quant_scale.repeat(
(n, 1)).transpose(1, 0).contiguous()
weight = weight / pre_quant_scale
return weight
def get_scale(weight):
weight = weight.T.contiguous()
[n, k] = weight.shape
weight = weight.reshape(n, int(k / group_size), group_size)
weight = torch.abs(weight.reshape(-1, group_size))
amax, idx = weight.max(1)
amax = amax.reshape(n, int(k / group_size)).T.contiguous()
return amax / 8
def reSmooth_and_get_scale(weight, pre_quant_scale, avg_pre_quant_scale):
weight = deSmooth(weight, pre_quant_scale)
weight = reSmooth(weight, avg_pre_quant_scale)
scale = get_scale(weight)
return weight, scale
def process_and_assign_qkv_weight(awq_gpt_j, prefix, mOp):
q_weight = awq_gpt_j[prefix + "attn.q_proj.weight"].T.contiguous()
k_weight = awq_gpt_j[prefix + "attn.k_proj.weight"].T.contiguous()
v_weight = awq_gpt_j[prefix + "attn.v_proj.weight"].T.contiguous()
k = q_weight.shape[0]
q_weight = q_weight.split(q_weight.shape[1] // mapping.tp_size,
dim=1)[mapping.tp_rank]
k_weight = k_weight.split(k_weight.shape[1] // mapping.tp_size,
dim=1)[mapping.tp_rank]
v_weight = v_weight.split(v_weight.shape[1] // mapping.tp_size,
dim=1)[mapping.tp_rank]
q_pre_quant_scale = awq_gpt_j[
prefix + "attn.q_proj.input_quantizer._pre_quant_scale"].reshape(
(1, k))
k_pre_quant_scale = awq_gpt_j[
prefix + "attn.k_proj.input_quantizer._pre_quant_scale"].reshape(
(1, k))
v_pre_quant_scale = awq_gpt_j[
prefix + "attn.v_proj.input_quantizer._pre_quant_scale"].reshape(
(1, k))
qkv_pre_quant_scale = (q_pre_quant_scale + k_pre_quant_scale +
v_pre_quant_scale) / 3.0
q_weight, q_scale = reSmooth_and_get_scale(q_weight, q_pre_quant_scale,
qkv_pre_quant_scale)
k_weight, k_scale = reSmooth_and_get_scale(k_weight, k_pre_quant_scale,
qkv_pre_quant_scale)
v_weight, v_scale = reSmooth_and_get_scale(v_weight, v_pre_quant_scale,
qkv_pre_quant_scale)
qkv_weights = torch.cat((q_weight, k_weight, v_weight), dim=1)
qkv_scale = torch.cat((q_scale, k_scale, v_scale), dim=1)
mOp.pre_quant_scale.value = qkv_pre_quant_scale.to(
torch_dtype).cpu().numpy()
mOp.qweight.value = AWQ_quantize_pack_preprocess(qkv_weights, qkv_scale)
mOp.scale.value = qkv_scale.to(torch_dtype).cpu().numpy()
#check if we need to pad vocab
v = awq_gpt_j.get('transformer.wte.weight')
[vocab_size, k] = v.shape
pad_vocab = False
pad_vocab_size = vocab_size
if vocab_size % 64 != 0:
pad_vocab = True
pad_vocab_size = int((vocab_size + 63) / 64) * 64
if pad_vocab:
new_v = torch.zeros([pad_vocab_size, k])
new_v[:vocab_size, :] = v
v = new_v
tensorrt_llm_gpt_j.embedding.weight.value = v.to(torch_dtype).cpu().numpy()
n_layer = config["n_layer"]
for layer_idx in range(n_layer):
prefix = "transformer.h." + str(layer_idx) + "."
tensorrt_llm.logger.info(f'Process weights in layer: {layer_idx}')
for idx, awq_attr in enumerate(awq_gptj_block_names):
v = awq_gpt_j[prefix + awq_attr]
if awq_attr == "mlp.fc_in.bias":
v = v.split(v.shape[0] // mapping.tp_size, dim=0)[mapping.rank]
elif awq_attr == "mlp.fc_out.bias":
v = torch.zeros_like(v) if mapping.rank != 0 else v
layer = attrgetter(tensorrt_llm_model_gptj_block_names[idx])(
tensorrt_llm_gpt_j.layers[layer_idx])
setattr(layer, 'value', v.to(torch_dtype).cpu().numpy())
# Attention QKV Linear
# concatenate the Q, K, V layers weights.
process_and_assign_qkv_weight(
awq_gpt_j, prefix,
tensorrt_llm_gpt_j.layers[layer_idx].attention.qkv)
# Attention Dense (out_proj) Linear
mPrefix = prefix + "attn.out_proj"
mOp = tensorrt_llm_gpt_j.layers[layer_idx].attention.dense
process_and_assign_weight(awq_gpt_j, mPrefix, mOp, 0)
# MLP Dense (mlp.fc) Linear
mPrefix = prefix + "mlp.fc_in"
mOp = tensorrt_llm_gpt_j.layers[layer_idx].mlp.fc
process_and_assign_weight(awq_gpt_j, mPrefix, mOp, 1)
# MLP Dense (mlp.proj) Linear
mPrefix = prefix + "mlp.fc_out"
mOp = tensorrt_llm_gpt_j.layers[layer_idx].mlp.proj
process_and_assign_weight(awq_gpt_j, mPrefix, mOp, 0)
if use_int8_kv_cache:
assert ft_model_dir, "You must pass --ft_model_dir to tell TRT-LLM where to look for scales of INT8 kv cache."
t = fromfile(
ft_model_dir, 'model.layers.' + str(layer_idx) +
'.attention.query_key_value.scale_y_quant_orig.bin', [1],
np.float32)
assert t is not None, f"{ft_model_dir} does not contain model.layers.{layer_idx}.attention.query_key_value.scale_y_quant_orig.bin"
tensorrt_llm_gpt_j.layers[
layer_idx].attention.kv_orig_quant_scale.value = 1.0 / t
tensorrt_llm_gpt_j.layers[
layer_idx].attention.kv_quant_orig_scale.value = t
v = awq_gpt_j['transformer.ln_f.weight']
tensorrt_llm_gpt_j.ln_f.weight.value = v.to(torch_dtype).cpu().numpy()
v = awq_gpt_j['transformer.ln_f.bias']
tensorrt_llm_gpt_j.ln_f.bias.value = v.to(torch_dtype).cpu().numpy()
#lm_head
if pad_vocab:
weight = awq_gpt_j['lm_head.weight']
[vocab_size, k] = weight.shape
new_weight = torch.zeros([pad_vocab_size, k])
new_weight[:vocab_size, :] = weight
new_weight = new_weight.T.contiguous()
new_weight = new_weight.split(new_weight.shape[1] // mapping.tp_size,
dim=1)[mapping.tp_rank]
amax = awq_gpt_j['lm_head.weight_quantizer._amax'].reshape(
[vocab_size, int(k / group_size)])
new_amax = torch.ones([pad_vocab_size, int(k / group_size)])
new_amax[:vocab_size, :] = amax
new_amax = new_amax.T.contiguous()
new_amax = new_amax.split(new_amax.shape[1] // mapping.tp_size,
dim=1)[mapping.tp_rank]
new_scale = new_amax / 8
tensorrt_llm_gpt_j.lm_head.qweight.value = AWQ_quantize_pack_preprocess(
new_weight, new_scale)
tensorrt_llm_gpt_j.lm_head.scale.value = new_scale.to(
torch_dtype).cpu().numpy()
tensorrt_llm_gpt_j.lm_head.pre_quant_scale.value = awq_gpt_j[
'lm_head.input_quantizer._pre_quant_scale'].to(
torch_dtype).cpu().numpy()
bias = awq_gpt_j['lm_head.bias']
new_bias = torch.zeros([pad_vocab_size])
new_bias[:vocab_size] = bias
new_bias = new_bias.split(pad_vocab_size // mapping.tp_size,
dim=0)[mapping.tp_rank]
tensorrt_llm_gpt_j.lm_head.bias.value = new_bias.to(
torch_dtype).cpu().numpy()
else:
mPrefix = "lm_head"
mOp = tensorrt_llm_gpt_j.lm_head
process_and_assign_weight(awq_gpt_j, mPrefix, mOp, 1)
v = awq_gpt_j['lm_head.bias']
tensorrt_llm_gpt_j.lm_head.bias.value = v.to(torch_dtype).cpu().numpy()
tok = time.time()
t = time.strftime('%H:%M:%S', time.gmtime(tok - tik))
tensorrt_llm.logger.info(f'Weights loaded. Total time: {t}')
| NVIDIA/TensorRT-LLM | examples/gptj/weight.py | weight.py | py | 40,329 | python | en | code | 3,328 | github-code | 36 |
8325185032 | import re
def getWholeIngreSet(recipe):
""" find the common pattarn for calculable ingredients which usually start with a number
, for example: '1 cup unsalted butter'
"""
pat = "[0-9]"
COMMA = ','
RECIPE = recipe
wholeIngreSet = set()
with open(RECIPE,'r') as fin:
for line in fin:
line = line.strip()
if not line:
continue
if COMMA in line:
line = line[:line.index(COMMA)]
if re.match(pat,line):
wholeIngreSet.add(line)
return wholeIngreSet
def getCalculableSetAndDicts(wholeIngreSet):
""" uses the dictionary to select calculable ingredients and saves the dictionary we've used."""
calculableSet = set()
unitChangedDicts = dictChangeUnit()
dictsForCalculate = list()
for item in wholeIngreSet:
for d in dicts:
if d['food'].upper() in item.upper() and d['unit'] in item:
calculableSet.add(item)
dictsForCalculate.append(d)
elif d['food'] in item and 'ounce' in item:
calculableSet.add(item)
dictsForCalculate.append(d)
elif d['food'] in item and 'gram' in item:
calculableSet.add(item)
dictsUsedForCalculate.append(d)
return [calculableSet,dictsForCalculate]
setAndDicts = getCalculableSetAndDicts(wholeIngreSet)
calculableSet,dictsForCalculate = setAndDicts
def getNotCalculableSet(wholeIngreSet,calculableSet):
return wholeIngreSet - calculableSet
| twu38/Calorie_Counter | IngredientHandle.py | IngredientHandle.py | py | 1,594 | python | en | code | 0 | github-code | 36 |
540233672 | import torch
import numpy as np
import encoding_tree
import time
import copy
def find(x, parent, dep):
if parent[x] == -1:
dep[x] = 0
return dep[x]
if (dep[x] > 0):
return dep[x]
dep[x] = find(parent[x], parent, dep) + 1
return dep[x]
def get_tree(input_):
data, k = input_
edges = data.edge_index.transpose(0, 1).numpy()
G = encoding_tree.Graph(edges=edges, n=data.num_nodes)
T = encoding_tree.Tree(G=G)
parent = T.k_HCSE(k)
parent = np.array(parent)
dep = [-1] * parent.size
dep = np.array(dep)
for i in range(parent.size):
dep[i] = find(i, parent, dep)
return parent, dep
def graph2tree(input_):
data, k = input_
parent, dep = get_tree((data, k))
dt = np.dtype([('dep', int), ('id', int)])
node = [(-d, i) for i, d in enumerate(dep)]
node = np.array(node, dtype=dt)
node.sort(order='dep')
data.num_edges = data.edge_index.shape[1]
data.num_nodes = len(parent)
data.x = torch.cat([data.x, torch.zeros(data.num_nodes - data.x.shape[0], data.x.shape[1])], dim=0)
d = 0
st, pn = 0, 0
data.layer_mask = torch.zeros(k + 1, len(parent), dtype=torch.bool)
for i in range(node.size):
pn += 1
if i + 1 == node.size or node[i][0] != node[i + 1][0]:
data.layer_mask[d, st:pn] = True
if i + 1 != node.size:
t = torch.zeros(2, pn - st, dtype=torch.int64)
for j in range(0, pn - st):
t[0, j], t[1, j] = j + st, parent[j + st]
data['pool' + str(d)] = t
d += 1
st = pn
layer_edge = [data.edge_index]
for i in range(k - 1):
edge = copy.deepcopy(layer_edge[-1])
edge = edge.reshape(-1)
for j in range(edge.shape[0]):
edge[j] = parent[edge[j]]
edge = edge.reshape(2, -1)
layer_edge.append(edge)
data.edge_index = torch.cat(layer_edge, dim=1)
return data
| zzq229-creator/entpool | data/graph2tree.py | graph2tree.py | py | 1,985 | python | en | code | 0 | github-code | 36 |
29620128812 | import mysql.connector
import json
# pip3 install mysql-connector-python
# 连接数据库
config = {
'user': 'root',
'password': 'xxx',
'host': '192.168.137.129',
'port': '3306',
'database': 'db_example'
}
json_data = {}
with open('./data.json', 'r', encoding='utf8')as fp:
json_data = json.load(fp)[0]
print(json_data)
fp.close()
con = mysql.connector.connect(**config)
mycursor = con.cursor(buffered=True)
# 查询这里面所有的人:
val = (json_data["businessRegistration"]["socialCreditCode"],)
sql = "SELECT * FROM company where social_credit_code = %s "
print(sql % val)
mycursor.execute(sql, val)
data = mycursor.fetchone() # fetchone() 获取一条记录
if data:
print(data)
updateVal = (json_data["companyName"], json_data["companyPhone"],
json_data["companyEmail"], json_data["officialWebsite"], json_data["companyAddress"], json_data["companyProfile"], data[0])
updateSql = "UPDATE company SET company_name = %s, company_phone = %s, company_email = %s, official_website = %s, company_address = %s, company_profile = %s,update_at = now() WHERE id = %s ;"
print(updateSql % updateVal)
mycursor.execute(updateSql, updateVal)
companyRegistration = json_data["businessRegistration"]
registeredCapital = companyRegistration["registeredCapital"].replace(
"万(元)", "").replace(",", "")
paidInCapital = companyRegistration["paidInCapital"]
if '-' == paidInCapital:
paidInCapital = None
operatingPeriod = companyRegistration["operatingPeriod"]
operatingPeriodList = operatingPeriod.split("至")
operatingPeriodBegin = operatingPeriodList[0].strip()
operatingPeriodEnd = operatingPeriodList[1].strip()
updateDetailVal = (companyRegistration["legalRepresentative"], companyRegistration["operatingStatus"], registeredCapital,
paidInCapital, companyRegistration["industry"], companyRegistration[
"socialCreditCode"], companyRegistration["taxpayerIdentificationNumber"],
companyRegistration["businessRegistrationNumber"], companyRegistration[
"organizationCode"], companyRegistration["registrationAuthority"],
companyRegistration["establishmentDate"], companyRegistration[
"enterpriseType"], operatingPeriodBegin, operatingPeriodEnd,
companyRegistration["administrativeDivisions"], companyRegistration[
"annualInspectionDate"], companyRegistration["registeredAddress"],
companyRegistration["businessScope"], data[0])
updateDetailSql = "UPDATE db_example.company_registration SET legal_representative = %s, operating_status = %s, registered_capital = %s, paidIn_capital = %s, industry = %s, social_credit_code = %s, taxpayer_identification_number = %s, company_registration_number = %s, organization_code = %s, registration_authority = %s, establishment_date = %s, enterprise_type = %s, operating_period_begin = %s, operating_period_end = %s, administrative_divisions = %s, annualInspection_date = %s, registered_address = %s, business_scope = %s, update_at = now() WHERE company_id = %s;"
print(updateDetailSql % updateDetailVal)
company = mycursor.execute(updateDetailSql, updateDetailVal)
else:
insertVal = (json_data["businessRegistration"]["socialCreditCode"], json_data["companyName"], json_data["companyPhone"],
json_data["companyEmail"], json_data["officialWebsite"], json_data["companyAddress"], json_data["companyProfile"],)
insertSql = "INSERT INTO company (social_credit_code, company_name, company_phone, company_email, official_website, company_address, company_profile) VALUES (%s, %s, %s, %s, %s, %s, %s);"
print(insertSql % insertVal)
company = mycursor.execute(insertSql, insertVal)
# 最后插入行的主键id
print(mycursor.lastrowid)
companyRegistration = json_data["businessRegistration"]
registeredCapital = companyRegistration["registeredCapital"].replace(
"万(元)", "").replace(",", "")
paidInCapital = companyRegistration["paidInCapital"]
if '-' == paidInCapital:
paidInCapital = None
operatingPeriod = companyRegistration["operatingPeriod"]
operatingPeriodList = operatingPeriod.split("至")
operatingPeriodBegin = operatingPeriodList[0].strip()
operatingPeriodEnd = operatingPeriodList[1].strip()
insertDetailVal = (mycursor.lastrowid, companyRegistration["legalRepresentative"], companyRegistration["operatingStatus"], registeredCapital,
paidInCapital, companyRegistration["industry"], companyRegistration[
"socialCreditCode"], companyRegistration["taxpayerIdentificationNumber"],
companyRegistration["businessRegistrationNumber"], companyRegistration[
"organizationCode"], companyRegistration["registrationAuthority"],
companyRegistration["establishmentDate"], companyRegistration[
"enterpriseType"], operatingPeriodBegin, operatingPeriodEnd,
companyRegistration["administrativeDivisions"], companyRegistration[
"annualInspectionDate"], companyRegistration["registeredAddress"],
companyRegistration["businessScope"])
insertDetailSql = "INSERT INTO company_registration (company_id, legal_representative, operating_status, registered_capital, paidIn_capital, industry, social_credit_code, taxpayer_identification_number, company_registration_number, organization_code, registration_authority, establishment_date, enterprise_type, operating_period_begin, operating_period_end, administrative_divisions, annualInspection_date, registered_address, business_scope) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s);"
print(insertDetailSql % insertDetailVal)
company = mycursor.execute(insertDetailSql, insertDetailVal)
con.commit()
| hua345/myBlog | python/mysql/index.py | index.py | py | 6,087 | python | en | code | 0 | github-code | 36 |
74128345063 | #!/usr/bin/python3
"""Error code #0"""
import urllib.request
import urllib.error
import sys
if __name__ == "__main__":
if len(sys.argv) != 2:
sys.exit(1)
url = sys.argv[1]
try:
with urllib.request.urlopen(url) as response:
decode_response = response.read().decode('utf-8')
print(decode_response)
except urllib.error.HTTPError as e:
print("Error code: {}".format(e.code))
| GreenMoCh/alx-higher_level_programming | 0x11-python-network_1/3-error_code.py | 3-error_code.py | py | 441 | python | en | code | 0 | github-code | 36 |
17013470871 | import datetime
from components import line_bot_api
from utils import utils_database
import json
from linebot.models import (
TextSendMessage,
)
def get_event_info(event):
event_dict = event.message.as_json_dict()
timestamp = float(event.timestamp/1000)
dt_object = datetime.datetime.fromtimestamp(timestamp)
datetime_string = dt_object.strftime("%Y-%m-%d %H:%M:%S") # 0.日期時間
date_string = dt_object.strftime("%Y-%m-%d") # 1.日期
time_string = dt_object.strftime("%H:%M:%S") # 2.時間
session = 'A' if float(time_string.replace(':', '')) < 12e4 else 'P'
source_type = event.source.type
group_id = event.source.group_id if source_type == "group" else "" # 4.群組ID
summary = line_bot_api.get_group_summary(group_id) if group_id != '' else ""
group_name = summary.group_name if group_id != '' else "" # 5.群組名稱
user_id = event.source.user_id # 6.傳訊者ID
profile = line_bot_api.get_group_member_profile(group_id, event.source.user_id) if group_id != '' else ""
user_name = profile.display_name if group_id != '' else "" # 7.傳訊者顯示名稱
user_img = profile.picture_url if group_id != '' else ""
msg_type = event.message.type
msg_id = event.message.id
image_set_id = event_dict["imageSet"]["id"] if "imageSet" in event_dict.keys() else 'null'
return {
"source_type": source_type,
"datetime": datetime_string,
"date": date_string,
"time": time_string,
"session": session,
"group_id": group_id,
"group_name": group_name,
"user_id": user_id,
"user_name": user_name,
"user_img": user_img,
"msg_type": msg_type,
"msg_id": msg_id,
"image_set_id": image_set_id
}
def get_img_count(img_event):
def __check_is_image_set(img_event):
return "imageSet" in img_event.message.as_json_dict().keys()
is_image_set = __check_is_image_set(img_event)
count = 0
if is_image_set:
index = img_event.message.image_set.index
total = img_event.message.image_set.total
db_is_image_set = utils_database.check_is_image_set_by_id(img_event.message.image_set.id)
count = total if db_is_image_set else 0
else:
count = 1
status = (True if count != 0 else False) or db_is_image_set
reply_info = {
"status": status,
"img_count": count
}
return reply_info
def get_user_info(event):
user_id = event.source.user_id
profile = line_bot_api.get_profile(user_id)
return {
"user_id": user_id,
"display_name": profile.display_name,
"picture_url": profile.picture_url
}
def update_group_name():
group_ids = utils_database.get_all_joined_groups()
for group_id in group_ids:
try:
group_summary = line_bot_api.get_group_summary(group_id)
group_name = group_summary.group_name
status = utils_database.update_group_name_by_group_id(group_id=group_id, group_name=group_name)
except Exception as e:
utils_database.set_disbanded_group_by_group_id(group_id=group_id, note="已解散/disbanded")
return {"status": True}
def linebot_send_text(reply_token, msg):
message = TextSendMessage(text=msg)
try:
line_bot_api.reply_message(reply_token, message)
except Exception as e:
print("error: ", str(e))
return
| jialiang8931/WRA06-Volunteer-LineBot | src/utils/utils_common.py | utils_common.py | py | 3,726 | python | en | code | 0 | github-code | 36 |
40890241642 |
"""
Python utils for RFC calls to SAP NetWeaver System
"""
import sys
if sys.version < '2.4':
print('Wrong Python Version (must be >=2.4) !!!')
sys.exit(1)
# load the native extensions
import nwsaprfcutil
import sapnwrfc.rfc
from struct import *
from string import *
import re
from types import *
#from copy import deepcopy
# Parameter types
IMPORT = 1
EXPORT = 2
CHANGING = 3
TABLES = 7
CONFIG_OK = ('ashost', 'sysnr', 'client', 'lang', 'user', 'passwd', 'gwhost', 'gwserv', 'tpname', 'lcheck')
CONF_FILE = 'sap.yml'
class RFCException(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class base(object):
"""
Base class used to trigger everything off
"""
config_location = CONF_FILE
configuration = {}
@classmethod
def load_config(cls):
# REFACTOR: there is no need to depend on yaml
import yaml
cls.configuration = yaml.load(open(cls.config_location, 'rb').read())
#cls.configuration = yaml.load(open(cls.config_location, 'rb').read())
return cls.configuration
@classmethod
def rfc_connect(cls, cfg=None):
config = {}
# pass in the config load from config_location YAML file
for k, v in cls.configuration.items():
if k in CONFIG_OK:
if not k in ('gwserv', 'gwhost', 'tpname', 'loglevel'):
config[k] = str(v)
# Overload the YAML file config with parameters passed to
# rfc_connect
if not cfg == None:
if not type(cfg) == dict:
raise RFCException("Config passed to rfc_connect must be a Dictionary object")
for k, v in cfg.items():
if k in CONFIG_OK:
if not k in ('gwserv', 'gwhost', 'tpname', 'loglevel'):
config[k] = str(v)
#conn = sapnwrfcconn.new_connection(config)
conn = nwsaprfcutil.Conn(config)
c = connection(conn)
return c
class connection:
"""
Connection class - must not be created by the user - automatically generated by
a call to sapnwrfc.base.rfc_connect()
"""
def __init__(self, handle=None):
self.handle = handle
def connection_attributes(self):
if self.handle == None:
raise RFCException("Invalid handle (connection_attributes)\n")
return self.handle.connection_attributes()
def discover(self, name):
if self.handle == None:
raise RFCException("Invalid handle (discover)\n")
func = self.handle.function_lookup(name)
f = FunctionDescriptor(func)
return f
def close(self):
if self.handle == None:
raise RFCException("Invalid handle (close)\n")
rc = self.handle.close()
self.handle = None
return rc
class FunctionDescriptor:
"""
FunctionDescriptor class - must not be created by the user - automatically
generated by a call to sapnwrfc.connection.function_lookup()
"""
def __init__(self, handle=None):
self.handle = handle
self.name = self.handle.name
def create_function_call(self):
call = self.handle.create_function_call()
c = FunctionCall(call)
return c
class FunctionCall:
"""
FunctionCall class - must not be created by the user - automatically generated by
a call to sapnwrfc.FunctionDescriptor.create_function_call()
"""
def __init__(self, handle=None):
#sys.stderr.write("inside funccall python init\n")
self.handle = handle
self.name = self.handle.name
for k, v in self.handle.function_descriptor.parameters.items():
# value: {'direction': 1, 'name': 'QUERY_TABLE', 'type': 0, 'len': 30, 'decimals': 0, 'ulen': 60}
if v['direction'] == IMPORT:
cpy = sapnwrfc.rfc.Import(self.function_descriptor, v['name'], v['type'], v['len'], v['ulen'], v['decimals'], None)
elif v['direction'] == EXPORT:
cpy = sapnwrfc.rfc.Export(self.function_descriptor, v['name'], v['type'], v['len'], v['ulen'], v['decimals'], None)
elif v['direction'] == CHANGING:
cpy = sapnwrfc.rfc.Changing(self.function_descriptor, v['name'], v['type'], v['len'], v['ulen'], v['decimals'], None)
elif v['direction'] == TABLES:
cpy = sapnwrfc.rfc.Table(self.function_descriptor, v['name'], v['type'], v['len'], v['ulen'], v['decimals'], None)
else:
raise RFCException("Unknown parameter type: %d\n" % v['direction'])
self.handle.parameters[k] = cpy
def __repr__(self):
return "<FunctionCall %s instance at 0x%x>" % (self.name, id(self))
def __getattr__(self, *args, **kwdargs):
if args[0] in self.handle.parameters:
return self.handle.parameters[args[0]]
else:
return None
def __call__(self, *args, **kwdargs):
# REFACTOR: This seems not to make too much sense here ;-)
print("Hello!\n")
def invoke(self):
return self.handle.invoke()
| piersharding/python-sapnwrfc | sapnwrfc/__init__.py | __init__.py | py | 5,163 | python | en | code | 26 | github-code | 36 |
42229554921 | def recur_fibo(n):
if n <= 1:
return n
else:
return(recur_fibo(n-1) + recur_fibo(n-2))
def print_armstrong(lower,upper):
for num in range(lower, upper + 1):
len_1 = len(str(num))
sum = 0
temp = num
while temp > 0:
digit = temp % 10
sum += digit ** len_1
temp //= 10
if num == sum:
print(num)
def find_fact(num):
fact_1 = 1
if num < 0:
print("Sorry, factorial does not exist for negative numbers")
elif num == 0:
print("The factorial of 0 is 1")
else:
for i in range(1, num + 1):
fact_1 = fact_1 * i
print("The factorial of", num, "is", fact_1)
def check_armstrong(num):
sum = 0
temp = num
while temp > 0:
digit = temp % 10
sum += digit ** 3
temp //= 10
if num == sum:
print(num, "is an Armstrong number")
else:
print(num, "is not an Armstrong number")
def print_prime(lower,upper):
for num in range(lower, upper + 1):
# prime numbers are greater than 1
if num > 1:
for i in range(2, num):
if (num % i) == 0:
break
else:
print(num)
def check_prime(num):
if num > 1:
for i in range(2, num):
if (num % i) == 0:
print(num, "is not a prime number")
break
else:
print(num, "is a prime number")
else:
print(num, "is not a prime number")
def digit_sum(num_1):
tot = 0
while (num > 0):
dig = num % 10
tot = tot + dig
num = num // 10
print("The total sum of digits is:", tot)
print("Please select operation -\n" \
"1. Fibbonaci Series\n" \
"2. Factorial\n" \
"3. Print all Armstrong Number\n" \
"4. Check whether Armstrong Number or not\n" \
"5. Print all prime number\n" \
"6. Check whether number is prime or not\n" \
"7. Sum of digits")
select = input("Select operations form 1, 2, 3, 4, 5, 6: ")
if select == '1':
num_1 = int(input("Enter the number of terms:"))
if num_1 <= 0:
print("Plese enter a positive integer")
else:
print("Fibonacci sequence:")
for i in range(num_1):
print(recur_fibo(i))
elif select == '2':
num_1 = int(input("Enter the number to find factorial:"))
find_fact(num_1)
elif select == '3':
low_1 = int(input("Enter a lower case value:"))
upp_1 = int(input("Enter a upper case value:"))
print_armstrong(low_1,upp_1)
elif select == '4':
num_1 = int(input("Enter a number:"))
check_armstrong(num_1)
elif select == '5':
low_1 = int(input("Enter a lower case value:"))
upp_1 = int(input("Enter a upper case value:"))
print_prime(low_1,upp_1)
elif select == '6':
num_1 = int(input("Enter a number:"))
check_prime(num_1)
elif select == '7':
num_1 = int(input("Enter the number:"))
digit_sum(num_1)
else:
print("Invalid input specified\n") | puneetbawa/python-me2sem | assign2.py | assign2.py | py | 3,183 | python | en | code | 0 | github-code | 36 |
18903378632 | from os import makedirs
from os.path import join, dirname, isfile
from uuid import uuid4
from json import dumps
from logging import getLogger
from uchicagoldrtoolsuite import log_aware
from uchicagoldrtoolsuite.core.lib.bash_cmd import BashCommand
from uchicagoldrtoolsuite.core.lib.convenience import log_init_attempt, \
log_init_success
from ..ldritems.ldrpath import LDRPath
from ..ldritems.abc.ldritem import LDRItem
from ..ldritems.ldritemcopier import LDRItemCopier
from .abc.technicalmetadatacreator import TechnicalMetadataCreator
__author__ = "Brian Balsamo"
__email__ = "balsamo@uchicago.edu"
__company__ = "The University of Chicago Library"
__copyright__ = "Copyright University of Chicago, 2016"
__publication__ = ""
__version__ = "0.0.1dev"
log = getLogger(__name__)
class FITsCreator(TechnicalMetadataCreator):
# TODO: Technical metadata creators probably need a go over
# like the converters
"""
A TechnicalMetadataCreator which runs a local FITs instance against the
content of a MaterialSuite in order to generate a technical metadata entry
"""
@log_aware(log)
def __init__(self, materialsuite, working_dir, timeout=None,
data_transfer_obj={}):
"""
Creates a new FITsCreator
__Args__
1. materialsuite (MaterialSuite): The materialsuite whose content to
create the technical metadata for
2. working_dir (str): A path to a directory where the techmd creator
can write files
__KWArgs__
* timeout (int): A timeout (in seconds) after which the technical
metadata creation process will fail out, if it hasn't finished
* data_transfer_obj (dict): A dictionary for passing techmd creator
specific configuration values into the class from a wrapper.
"""
log_init_attempt(self, log, locals())
super().__init__(materialsuite, working_dir, timeout)
self.fits_path = data_transfer_obj.get('fits_path', None)
if self.fits_path is None:
raise ValueError('No fits_path specified in the data ' +
'transfer object!')
log_init_success(self, log)
@log_aware(log)
def __repr__(self):
attr_dict = {
'source_materialsuite': str(self.source_materialsuite),
'working_dir': str(self.working_dir),
'timeout': self.timeout
}
return "<FITsCreator {}>".format(dumps(attr_dict, sort_keys=True))
@log_aware(log)
def process(self):
"""
runs a local FITs installation against the MaterialSuite's content
"""
if not isinstance(self.get_source_materialsuite().get_premis(),
LDRItem):
raise ValueError("All material suites must have a PREMIS record " +
"in order to generate technical metadata.")
log.debug("Building FITS-ing environment")
premis_file_path = join(self.working_dir, str(uuid4()))
LDRItemCopier(
self.get_source_materialsuite().get_premis(),
LDRPath(premis_file_path)
).copy()
# hacky fix for not setting the originalName in presforms during the
# staging tearup in response to some filename encodings not being
# interoperable on different operating systems. (OSX/BSD/Windows/Linux)
original_name = uuid4().hex
content_file_path = dirname(
join(
self.working_dir,
uuid4().hex,
original_name
)
)
content_file_containing_dir_path = dirname(content_file_path)
makedirs(content_file_containing_dir_path, exist_ok=True)
original_holder = LDRPath(content_file_path)
LDRItemCopier(
self.get_source_materialsuite().get_content(),
original_holder
).copy()
fits_file_path = join(self.working_dir, uuid4().hex)
cmd = BashCommand([self.fits_path, '-i', content_file_path,
'-o', fits_file_path])
if self.get_timeout() is not None:
cmd.set_timeout(self.get_timeout())
log.debug(
"Running FITS on file. Timeout: {}".format(str(self.get_timeout()))
)
cmd.run_command()
cmd_data = cmd.get_data()
if isfile(fits_file_path):
success = True
log.debug("FITS successfully created")
else:
success = False
log.warn("FITS creation failed on {}".format(
self.get_source_materialsuite().identifier)
)
self.handle_premis(cmd_data, self.get_source_materialsuite(),
"FITs", success, "fitsRecord", fits_file_path)
log.debug("Cleaning up temporary file instantiation")
original_holder.delete(final=True)
| uchicago-library/uchicagoldr-toolsuite | uchicagoldrtoolsuite/bit_level/lib/techmdcreators/fitscreator.py | fitscreator.py | py | 4,907 | python | en | code | 0 | github-code | 36 |
70519989225 | import sys
sys.path.append('path/to/SPFlow')
import numpy as np
import pandas as pd
from spn.structure.Base import Context
from spn.algorithms.oSLRAU import oSLRAU, oSLRAUParams
from spn.structure.leaves.parametric.Parametric import Gaussian, In_Latent
from spn.algorithms.LearningWrappers import learn_parametric
from spn.io.Graphics import plot_spn
from spn.algorithms.Inference import log_likelihood
from spn.algorithms.TransformStructure import Prune ,Prune_oSLRAU
def run_oSLRAU(dataset, update_after_no_min_batches, prune_after):
data = get_data(dataset)
data = np.where(np.isnan(data), np.ma.array(data, mask=np.isnan(data)).mean(axis=0), data)
from sklearn.model_selection import train_test_split
train_data, test_data = train_test_split(data, test_size=0.33, random_state=42)
# make first mini_batch from data
mini_batch_size = 50
first_mini_batch = data[0:mini_batch_size]
n = first_mini_batch.shape[1] # num of variables
print(n)
context = [Gaussian] * n
ds_context = Context(parametric_types=context).add_domains(first_mini_batch)
# Learn initial spn
spn = learn_parametric(first_mini_batch, ds_context)
plot_spn(spn, 'intitial_spn.pdf')
print(np.mean(log_likelihood(spn, test_data)))
oSLRAU_params = oSLRAUParams(mergebatch_threshold=128, corrthresh=0.1, mvmaxscope=1, equalweight=True,
currVals=True)
no_of_minibatches = int(data.shape[0] / mini_batch_size)
# update using oSLRAU
for i in range(1, no_of_minibatches):
mini_batch = data[i * mini_batch_size: (i+1) * mini_batch_size]
update_structure = False
if update_after_no_min_batches//i == 0:
print(i)
update_structure = True
spn = oSLRAU(spn, mini_batch, oSLRAU_params, update_structure)
if i == prune_after:
spn = Prune_oSLRAU(spn)
print(np.mean(log_likelihood(spn, test_data)))
plot_spn(spn, 'final_spn.pdf')
def get_data(dataset):
csv_file_path_hh_power = 'path/to/file'
csv_file_path_other_power = 'path/to/file'
csv_file_path_wine_qual = 'path/to/file'
if dataset == 'hh_power':
file_path = csv_file_path_hh_power
df = pd.read_csv(file_path, sep=',')
df = df.iloc[:, 2:6]
df = df.convert_objects(convert_numeric=True)
data = df.values
data = data.astype(float)
print(data)
return data
elif dataset == 'other_power':
file_path = csv_file_path_other_power
df = pd.read_csv(file_path, sep=',')
df = df.iloc[:]
df = df.convert_objects(convert_numeric=True)
data = df.values
data = data[0:-1]
data = data.astype(float)
print(data)
return data
elif dataset == 'wine_qual':
file_path = csv_file_path_wine_qual
df = pd.read_csv(file_path, sep=';')
df = df.iloc[:]
df = df.convert_objects(convert_numeric=True)
data = df.values
data = data[0:-1]
data = data.astype(float)
print(data)
return data
def main():
dataset = 'wine_qual'
update_after_no_min_batches = 15
prune_after = 50
run_oSLRAU(dataset, update_after_no_min_batches, prune_after)
if __name__ == "__main__":
main()
| c0derzer0/oSLRAU_and_RSPN | oSLRAU_run.py | oSLRAU_run.py | py | 3,323 | python | en | code | 0 | github-code | 36 |
73881856745 | import sys
import os
from numpy.lib.arraysetops import isin
from argparse import ArgumentParser
sys.path.insert(1, './tendims')
sys.path.insert(2, './complexity')
sys.path.insert(3, './sentiment')
sys.path.insert(4, './empathy')
import logging
import json
import numpy as np
import wget
import pickle
import oyaml as yaml
from flask import Flask, request, redirect , jsonify, send_file, send_from_directory, safe_join, abort
from flask.json import JSONEncoder
from flask_cors import CORS
from flask_socketio import SocketIO, send, emit
import uuid
import pandas as pd
from cryptography.fernet import Fernet
from complexity import ComplexityClassifier
from sentiment import SentimentClassifier
from success import SuccessPredictor
from tendims import TenDimensionsClassifier
from empathy import empathy_processing
from werkzeug.utils import secure_filename
from werkzeug.datastructures import FileStorage
from dh_encryption import DiffieHellman, decrypt_data, encrypt_data, decrypt_file, encrypt_file
import sys
import urllib
import urllib.request
from cryptography.fernet import Fernet
import base64
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC
class CustomJSONEncoder(JSONEncoder):
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
return JSONEncoder.default(self, obj)
class Engine():
class Models:
All = "all"
Sentiment = "sentiment"
TenDims = "tendims"
Success = "success"
Complexity = "complexity"
Empathy = "empathy"
def register_model(self, model_name, model_fun):
self.models_map[model_name] = model_fun
def get_model_methods(self, model_name):
fun_list = []
if model_name == Engine.Models.All:
fun_list = list(self.models_map.values())
else:
fun_list = self.models_map[model_name]
return fun_list
def __init__(self, logger, load_ten_dims=True):
self.models_map = {}
self.ip_keys_dict = {}
self.using_encryption = True
self.no_key_error_msg = 'Connection is not secure, request a shared key first'
self.wrong_key_error_msg = 'The shared key is not the same'
self.dh = DiffieHellman()
#### Complexity Models ####
logger.info('Loading complexity models...')
self.ic_model_file = 'complexity/models/Vocab+FullPOS_xbgoost.model'
self.liwc_dictionary_file = 'complexity/data/LIWC2007_English100131.dic'
self.model_complexity = ComplexityClassifier(self.ic_model_file, self.liwc_dictionary_file)
self.register_model(Engine.Models.Complexity, self.get_complexity)
logger.info('Complexity models loaded')
#####################
# #### Ten Dimensions Models ####
if load_ten_dims:
logger.info('Loading tenDims models...')
self.models_dir = 'tendims/models/lstm_trained_models'
self.embeddings_dir = 'tendims/embeddings' # change urls to embeddings dir
self.success_model_file = 'tendims/models/meeting_success/xgboost_10dims_success_prediction_model_v0.81.dat'
# Success is not available
self.model_tendim = TenDimensionsClassifier(models_dir=self.models_dir, embeddings_dir=self.embeddings_dir)
self.success_predictor = SuccessPredictor(self.success_model_file) # Sucess prediction
self.register_model(Engine.Models.TenDims, self.get_ten_dims)
logger.info('Tend dims models loaded')
#####################
# self.empathy_model_file = './empathy/models/Vocab+FullPOS+LIWCtrained_XGboost_model_99perc.pickle'
# self.empathy_ic_model_file = './empathy/models/Vocab+FullPOS_xbgoost.pickle'
# self.empathy_scorer = empathy_processing.EmpathyScorer(self.empathy_model_file, self.empathy_ic_model_file)
# self.register_model(Engine.Models.Empathy, self.empathyIC_from_texts)
#####################
#### Sentiment Models ####
logger.info('Loading sentiment model...')
self.model_sentim = SentimentClassifier()
self.register_model(Engine.Models.Sentiment, self.get_sentiment)
logger.info('Sentiment models loaded')
#####################
def generate_keys(self, ip_address, logger):
self.ip_keys_dict[ip_address] = {}
client_private_key, client_public_key = self.dh.get_private_key(), self.dh.gen_public_key()
server_private_key, server_public_key = self.dh.get_private_key(), self.dh.gen_public_key()
self.ip_keys_dict[ip_address]["client"] = {"private_key": client_private_key, "public_key": client_public_key}
self.ip_keys_dict[ip_address]["server"] = {"private_key": server_private_key, "public_key": server_public_key}
return {"private_key": client_private_key, "public_key": client_public_key, 'server_public_key':server_public_key}
def generate_shared_keys(self, ip_address, local_private_key, remote_public_key, logger):
client_shared_key = DiffieHellman.gen_shared_key_static(local_private_key, remote_public_key)
server_shared_key = DiffieHellman.gen_shared_key_static(self.ip_keys_dict[ip_address]["server"]["private_key"], self.ip_keys_dict[ip_address]["server"]["public_key"])
self.ip_keys_dict[ip_address]["client"]["shared_key"] = client_shared_key
self.ip_keys_dict[ip_address]["server"]["shared_key"] = server_shared_key
return client_shared_key
# https://dev.to/ruppysuppy/implementing-end-to-end-encryption-in-your-cross-platform-app-3a2k
# https://dev.to/ruppysuppy/implementing-end-to-end-encryption-in-your-cross-platform-app-part-2-cgg
def check_request_key(self, ip_address, logger):
if ip_address not in self.ip_keys_dict:
logger.error(self.no_key_error_msg)
return 400, self.no_key_error_msg
elif "shared_key" not in self.ip_keys_dict[ip_address]["server"] or "shared_key" not in self.ip_keys_dict[ip_address]["client"]:
logger.error(self.wrong_key_error_msg)
return 400, self.wrong_key_error_msg
return 200, None
def encrypt_decrypt_file(self, ip_address, folder, filename, logger, new_prefix="", decrypt=False):
code, error_text = self.check_request_key(ip_address, logger)
if code >= 400:
return code, error_text
try:
with open(os.path.join(folder, filename), 'rb') as enc_file:
file_data = enc_file.read()
if self.using_encryption:
temp_filename = new_prefix+'_temp_file_data.csv'
client_shared_key = self.ip_keys_dict[ip_address]["client"]["shared_key"]
if decrypt:
new_data = decrypt_file(file_data, client_shared_key)
else:
new_data = encrypt_file(file_data, client_shared_key)
with open(os.path.join(folder, temp_filename), 'wb') as dec_file:
dec_file.write(new_data)
try:
os.remove(os.path.join(folder, filename))
except:
print(f"Error removing file {filename}")
filename = temp_filename
if decrypt:
logger.debug(f"\n\nReceived encrypted File, decrypted using {ip_address} key {client_shared_key}")
else:
logger.debug(f"\n\nFile Encrypted using {ip_address} key {client_shared_key}")
else:
logger.debug(f"\n\n: Received non encrypted File from {ip_address}")
return 200, filename
except Exception as e:
error_text = f"\n\n Something went wrong while decrypting/encrypting the file {filename}: {e}"
logger.error(error_text)
return 400, error_text
def get_decrypted_text(self, ip_address, text, method, logger):
code, error_text = self.check_request_key(ip_address, logger)
if code >= 400:
return code, error_text
try:
if engine.using_encryption:
client_shared_key = engine.ip_keys_dict[ip_address]["client"]["shared_key"]
text = decrypt_data(text, client_shared_key)
logger.debug(f"\n\n{method}: Received encrypted Text, decrypted using {ip_address} key {client_shared_key}: {text}")
else:
logger.debug(f"\n\n{method}: Received plain Text from {ip_address}: {text}")
return 200, text
except Exception as e:
error_text = f"\n\n{method}: Something went wrong while getting the request's text {e}"
logger.error(error_text)
return 400, error_text
def get_ten_dims(self, text, logger):
if USE_TEN_DIMS:
# you can give in input one string of text
# dimensions = None extracts all dimensions
tendim_scores = engine.model_tendim.compute_score(text, dimensions=None)
success_probability = engine.success_predictor.predict_success(tendim_scores)
tendim_scores['success'] = float(success_probability)
else:
tendim_scores = {'conflict': 0, 'fun': 0, 'identity': 0, 'knowledge': 0, 'power': 0, 'romance': 0, 'similarity': 0, 'status': 0, 'support': 0, 'trust': 0}
tendim_scores['success'] = 0
return tendim_scores
def get_sentiment(self, text, logger):
return self.model_sentim.get_sentiment(text)
def get_complexity(self, text, logger):
return self.model_complexity.get_complexity(text)
def get_empathy(self, text, logger):
avg_empathy, avg_ic, scored_text_list = engine.empathy_scorer.empathyIC_from_texts(text)
return {'Average_Empathy': avg_empathy , 'Average_IC':avg_ic}
def calculate_stats(self, texts, text_ids, stat_method, logger):
if not isinstance(stat_method, list):
stat_method = [stat_method]
returnAll = []
for txt, txt_id in zip(texts,text_ids):
return_data = {}
return_data["server_text_id"] = txt_id
# return_data["server_text_data"] = str(txt)
for stat_fun in stat_method:
return_data.update(stat_fun(txt, logger))
returnAll.append(return_data)
return returnAll
def call_model_from_text(self, ip_address, text, no_encryption, method, logger):
try:
logger.debug(f"Text Getting decrypted text")
if not isinstance(text, list):
text = [text]
retCode = 200
if not no_encryption:
retCode, text = engine.get_decrypted_text(ip_address, text, method, logger)
if retCode == 200:
text_id = range(0, len(text))
ret = engine.calculate_stats(text, text_id, self.get_model_methods(method), logger)
return ret, retCode
else:
error_msg = f"\n\nText {method}: Something went wrong while calculating {method} stats. Code: {retCode}"
logger.error(f"Error {retCode}\n{error_msg}\n{text}")
return {"message": error_msg, "error_info":text, "status": retCode}, retCode
except Exception as e:
logger.error(f"Exception in Text {method}:{e}")
return {"message": f"Internal Server Error in Text {method}", "error_info":str(e), "status": 500}, 500
def call_model_from_request(self, flask_request, method, logger):
try:
text = flask_request.form.getlist('text')
if len(text) <= 0:
text = [flask_request.form.get('text')]
no_encryption = flask_request.form.get('no_encryption', False)
retCode = 200
if not no_encryption:
retCode, text = engine.get_decrypted_text(flask_request.remote_addr, text, method, logger)
text_id = flask_request.form.getlist('id')
if len(text_id) <= 0:
text_id = [flask_request.form.get('id')]
logger.info(f"Text stats request from {flask_request.remote_addr}. Encrypted: {not no_encryption}. List len: {len(text)}")
if retCode == 200:
ret = engine.calculate_stats(text, text_id, self.get_model_methods(method), logger)
return ret, retCode
else:
error_msg = f"\n\nRequest {method}: Something went wrong while calculating {method} stats. Code: {retCode}"
logger.error(f"Error {retCode}\n{error_msg}\n{text}")
return {"message": error_msg, "error_info":text, "status": retCode}, retCode
except Exception as e:
logger.error(f"Exception in Request {method}:{e}")
return {"message": f"Internal Server Error in Request {method}", "error_info":str(e), "status": 500}, 500
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
parser = ArgumentParser()
parser.add_argument('-c', nargs='?', const="config.yaml", type=str)
args = parser.parse_args()
config_filename = args.c
# config_filename = "config5000.yaml"
global config
try:
config = yaml.safe_load(open(config_filename))
except:
config = {}
UPLOAD_FOLDER = config.get("upload_folder", './uploaded_files/')
ALLOWED_EXTENSIONS = config.get("allowed_extensions", {'csv', 'txt', 'dat', 'json'})
IP = config.get("ip", "0.0.0.0")
PORT = config.get("port", 5000)
USE_TEN_DIMS = config.get("use_ten_dims", True)
LOG_FILENAME = config.get("log_filename", "flask_log.log")
app = Flask(__name__)
with open(LOG_FILENAME, 'w'):
pass
handler = logging.FileHandler(LOG_FILENAME) # Create the file logger
app.logger.addHandler(handler) # Add it to the built-in logger
app.logger.setLevel(logging.DEBUG) # Set the log level to debug
app.json_encoder = CustomJSONEncoder
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
socketio = SocketIO(app)
engine = Engine(app.logger, USE_TEN_DIMS)
@app.route("/request-keys", methods=["GET"])
def request_keys():
method = "Request Keys"
try:
retCode = 200
ip_address = request.remote_addr
keys_dict = engine.generate_keys(ip_address, app.logger)
keys_dict["status"] = retCode
return jsonify(keys_dict), retCode
except Exception as e:
app.logger.error(f"Exception in {method}:{e}")
return jsonify({"message": f"Internal Server Error in {method}", "error_info":str(e), "status": 500}), 500
@app.route("/request-shared-key", methods=["GET"])
def request_shared_key():
method = "Request Shared Key"
try:
retCode = 200
ip_address = request.remote_addr
try:
local_private_key = request.args.get("local_private_key")
remote_public_key = request.args.get("remote_public_key")
client_shared_key = engine.generate_shared_keys(ip_address, local_private_key, remote_public_key, app.logger)
except Exception as e:
retCode = 400
return jsonify({"message": "Invalid shared key", "error_info":str(e), "status": retCode}), retCode
return jsonify({"shared_key": client_shared_key, "status": retCode}), retCode
except Exception as e:
app.logger.error(f"Exception in {method}:{e}")
return jsonify({"message": f"Internal Server Error in {method}", "error_info":str(e), "status": 500}), 500
@app.route("/getStats", methods=['POST'])
def getStats():
ret_data, code = engine.call_model_from_request(request, Engine.Models.All, app.logger)
return jsonify(ret_data), code
@app.route("/getStatsFile", methods=['POST'])
def getStatsFile():
no_encryption = str(request.form.get('no_encryption')) != "False" # No clue why the boolean is returned as a string... But just in case I converted it to a string every time
# check if the post request has the file part
if 'file' not in request.files:
return jsonify({"message": f"No file submitted", "error_info":f"No file submitted", "status": 400}), 400
file = request.files['file']
# If the user does not select a file, the browser submits an empty file without a filename.
if file.filename == '':
return redirect(request.url)
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
if not os.path.exists(UPLOAD_FOLDER):
os.makedirs(UPLOAD_FOLDER)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
if not no_encryption:
code, filename = engine.encrypt_decrypt_file(request.remote_addr, UPLOAD_FOLDER, filename, app.logger, new_prefix="decrypted", decrypt=True)
txt_col = request.form["txt_col_name"]
amount = int(request.form.get("amount", 0))
data_df = pd.read_csv(os.path.join(app.config['UPLOAD_FOLDER'], filename))
try:
os.remove(os.path.join(app.config['UPLOAD_FOLDER'], filename))
except:
print(f"Error removing file {filename}")
# remove file
data_df["idx"] = range(0,len(data_df))
app.logger.info(f"File stats request from {request.remote_addr}. Encrypted: {not no_encryption}. Row col: {txt_col}, limit: {amount}, rows: {len(data_df)}")
output_filename = os.path.splitext(filename)[0]
output_filename = UPLOAD_FOLDER+output_filename+"_pandas_res.csv"
initialized = False
for index, row in data_df.iterrows():
ret_data, code = engine.call_model_from_text(request.remote_addr, str(row[txt_col]), True, Engine.Models.All, app.logger)
if code == 200 :
for key, value in ret_data[0].items():
if not initialized:
data_df[key] = 0 if type(value) == int or float else ""
initialized = True
data_df.at[index, key] = value
else:
app.logger.error(f"{ret_data}\t{index}\t{row[txt_col]}")
if amount > 0 and index >= amount:
break
data_df.to_csv(output_filename)
if not no_encryption:
code, output_filename = engine.encrypt_decrypt_file(request.remote_addr, UPLOAD_FOLDER, output_filename, app.logger, new_prefix="encrypted", decrypt=False)
try:
return send_file(output_filename, attachment_filename=output_filename+"_pandas_res.csv")
except Exception as e:
app.logger.error(f"Exception in files stats:{e}")
return jsonify({"message": f"Internal Server Error in files stats", "error_info":str(e), "status": 500}), 500
@app.route("/tenDimensions", methods=['POST'])
def tenDimensions():
ret_data, code = engine.call_model_from_request(request, Engine.Models.TenDims, app.logger)
return jsonify(ret_data), code
@app.route("/complexity", methods=['POST'])
def complexity():
ret_data, code = engine.call_model_from_request(request, Engine.Models.Complexity, app.logger)
return jsonify(ret_data), code
@app.route("/sentiment", methods=['POST'])
def sentiment():
ret_data, code = engine.call_model_from_request(request, Engine.Models.Sentiment, app.logger)
return jsonify(ret_data), code
@app.route("/empathy", methods=['GET'])
def empathy():
ret_data, code = engine.call_model_from_request(request, Engine.Models.Sentiment, app.logger)
return jsonify(ret_data), code
@socketio.on('json')
def handle_json(json):
app.logger.info('received json: ' + str(json))
# data = engine.call_model(json, "All", app.logger)
send(json.dumps({"test":0}), json=True)
@socketio.on('message')
def handle_message(message):
app.logger.info('received message: ' + str(message))
send(message)
if __name__ == '__main__':
CORS(app)
app.run(host="0.0.0.0",port=5000,threaded=True)
socketio.run(app)
app.run()
# Run gunicorn
# sudo nohup sudo gunicorn3 --workers 30 --timeout 0 --bind 0.0.0.0:5000 wsgi:app &
# sudo nohup sudo gunicorn3 --threads 100 --timeout 0 --bind 0.0.0.0:5000 wsgi:app &
# sudo pkill -P [PID]
# ps -ef | grep gun | Gabryxx7/nlp-flask-server | nlp_flask_server.py | nlp_flask_server.py | py | 20,471 | python | en | code | 2 | github-code | 36 |
31324286848 | #!/usr/bin/python3
import sqlite3
from itertools import chain
conn = sqlite3.connect('vexdb.db')
curs = conn.cursor()
ItyToId=dict()
for row in curs.execute('SELECT id, "Ity_" || btype || nbits AS cenum FROM IRType'):
ItyToId[row[1]]=row[0]
curs.execute('DELETE FROM AiOpSig')
conn.commit()
ItyToId['ity_RMode']=ItyToId['Ity_I32']
with open('unique-opsigs.csv') as f:
for line in f:
fields=line.rstrip().split(',')
n=int(fields[0])
if n<2 or n>5:
raise Exception("Invalid operand count.")
u=int(fields[1])
r=False;
if fields[2]=='ity_RMode':
r=True
values=chain((n, u, r), (int(ItyToId[x]) for x in fields[2:2+n]))
i_stub='INSERT INTO AiOpSig(nopds, ntypes, rmode, res,opd1'
v_stub=') VALUES (?,?,?,?,?'
if n>=3:
i_stub += ',opd2'
v_stub += ',?'
if n>=4:
i_stub += ',opd3'
v_stub += ',?'
if n==5:
i_stub += ',opd4'
v_stub += ',?'
try:
curs.execute(i_stub + v_stub + ')', tuple(values))
except Exception as e:
print(e)
print(line)
conn.commit()
conn.close()
| EmmetCaulfield/valgrind | arinx/hacking/insert-opsigs.py | insert-opsigs.py | py | 1,236 | python | en | code | 0 | github-code | 36 |
13511349013 | import tensorflow as tf
import numpy as np
import cifar
def get_value(a):
for i in range(0,len(a)):
if a[i] == 1.0: return i
return -1
m = cifar.cifar("/home/lr/workspace/python/ai/data/cifar-10-batches-py/","/home/lr/workspace/python/ai/model/cifar_cnn/model.ckpt")
m.open()
datas = m.next_datas(50000)
x_train,y_train = datas[0],datas[1]
print(x_train.shape)
print(y_train.shape)
x_train = x_train / 255.0
m.close()
y_train = np.array([get_value(y) for y in y_train])
model = tf.keras.models.Sequential([
tf.keras.layers.InputLayer(input_shape=[3072]),
tf.keras.layers.Dense(128, activation=tf.nn.relu),
tf.keras.layers.Dense(64, activation=tf.nn.relu),
tf.keras.layers.Dense(32, activation=tf.nn.relu),
tf.keras.layers.Dense(16, activation=tf.nn.sigmoid),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(10, activation=tf.nn.softmax)
])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.summary()
model.fit(x_train, y_train, epochs=1000,batch_size=100)
output = model.evaluate(x_train, y_train,batch_size=100)
print("the result is %s" %(output))
model.save('/home/lr/workspace/python/ai/nn/tfk/model/hello.2')
| angelbruce/NN | tfk/hello.2.py | hello.2.py | py | 1,232 | python | en | code | 0 | github-code | 36 |
31112021799 | """ Store packages in GCS """
import io
import json
import logging
import os
import posixpath
from datetime import timedelta
from google.auth import compute_engine
from google.auth.transport import requests
from google.cloud import storage
from pyramid.settings import asbool
from pypicloud.models import Package
from .object_store import ObjectStoreStorage
LOG = logging.getLogger(__name__)
class GoogleCloudStorage(ObjectStoreStorage):
"""Storage backend that uses GCS"""
test = False
def __init__(
self,
request=None,
bucket_factory=None,
service_account_json_filename=None,
project_id=None,
use_iam_signer=False,
iam_signer_service_account_email=None,
**kwargs
):
super(GoogleCloudStorage, self).__init__(request=request, **kwargs)
self._bucket = None
self._bucket_factory = bucket_factory
self.use_iam_signer = use_iam_signer
self.iam_signer_service_account_email = iam_signer_service_account_email
if self.public_url:
raise NotImplementedError(
"GoogleCloudStorage backend does not yet support public URLs"
)
if self.sse:
raise NotImplementedError(
"GoogleCloudStorage backend does not yet support customized "
"server-side encryption"
)
@classmethod
def _subclass_specific_config(cls, settings, common_config):
"""Extract GCP-specific config settings: specifically, the path to
the service account key file, and the project id. Both are
optional.
"""
service_account_json_filename = settings.get(
"storage.gcp_service_account_json_filename"
) or os.getenv("GOOGLE_APPLICATION_CREDENTIALS")
if (
service_account_json_filename
and not os.path.isfile(service_account_json_filename)
and not cls.test
):
raise Exception(
"Service account json file not found at path {}".format(
service_account_json_filename
)
)
bucket_name = settings.get("storage.bucket")
if bucket_name is None:
raise ValueError("You must specify the 'storage.bucket'")
iam_signer_service_account_email = settings.get(
"storage.iam_signer_service_account_email"
)
if iam_signer_service_account_email is None and service_account_json_filename:
with io.open(service_account_json_filename, "r", encoding="utf-8") as ifile:
credentials = json.load(ifile)
iam_signer_service_account_email = credentials.get("client_email")
return {
"service_account_json_filename": service_account_json_filename,
"project_id": settings.get("storage.gcp_project_id"),
"use_iam_signer": asbool(settings.get("storage.gcp_use_iam_signer", False)),
"iam_signer_service_account_email": iam_signer_service_account_email,
"bucket_factory": lambda: cls.get_bucket(bucket_name, settings),
}
@classmethod
def _get_storage_client(cls, settings):
"""Helper method for constructing a properly-configured GCS client
object from the provided settings.
"""
client_settings = cls._subclass_specific_config(settings, {})
client_args = {}
if client_settings["project_id"]:
LOG.info("Using GCP project id `%s`", client_settings["project_id"])
client_args["project"] = client_settings["project_id"]
service_account_json_filename = client_settings.get(
"service_account_json_filename"
)
if not service_account_json_filename:
LOG.info("Creating GCS client without service account JSON file")
client = storage.Client(**client_args)
else:
if not os.path.isfile(service_account_json_filename) and not cls.test:
raise Exception(
"Service account JSON file not found at provided "
"path {}".format(service_account_json_filename)
)
LOG.info(
"Creating GCS client from service account JSON file %s",
service_account_json_filename,
)
client = storage.Client.from_service_account_json(
service_account_json_filename, **client_args
)
return client
@classmethod
def get_bucket(cls, bucket_name, settings):
client = cls._get_storage_client(settings)
bucket = client.bucket(bucket_name)
if not bucket.exists():
bucket.location = settings.get("storage.region_name")
LOG.info(
"Creating GCS bucket %s in location %s", bucket_name, bucket.location
)
bucket.create()
return bucket
@classmethod
def package_from_object(cls, blob, factory):
"""Create a package from a GCS object"""
filename = posixpath.basename(blob.name)
if blob.metadata is None:
return None
name = blob.metadata.get("name")
version = blob.metadata.get("version")
if name is None or version is None:
return None
metadata = Package.read_metadata(blob.metadata)
return factory(
name, version, filename, blob.updated, path=blob.name, **metadata
)
@property
def bucket(self):
if self._bucket is None:
self._bucket = self._bucket_factory()
return self._bucket
def list(self, factory=Package):
blobs = self.bucket.list_blobs(prefix=self.bucket_prefix or None)
for blob in blobs:
pkg = self.package_from_object(blob, factory)
if pkg is not None:
# If we have a separate upload prefix, flag THIS package as being a fallback. Otherwise
# we don't know enough to differentiate.
pkg.origin = "fallback" if self.upload_prefix else None
yield pkg
# If we have an upload_prefix, now go back and process anything that matches.
if self.upload_prefix:
blobs = self.bucket.list_blobs(prefix=self.upload_prefix)
for blob in blobs:
pkg = self.package_from_object(blob, factory)
if pkg is not None:
# If we have a separate upload prefix, flag THIS package as being a fallback. Otherwise
# we don't know enough to differentiate.
pkg.origin = "upload"
yield pkg
def _generate_url(self, package):
"""Generate a signed url to the GCS file"""
blob = self._get_gcs_blob(package)
if self.use_iam_signer:
# Workaround for https://github.com/googleapis/google-auth-library-python/issues/50
signing_credentials = compute_engine.IDTokenCredentials(
requests.Request(),
"",
service_account_email=self.iam_signer_service_account_email,
)
else:
signing_credentials = None
return blob.generate_signed_url(
expiration=timedelta(seconds=self.expire_after),
credentials=signing_credentials,
version="v4",
)
def _get_gcs_blob(self, package):
"""Get a GCS blob object for the specified package"""
return self.bucket.blob(self.get_path(package))
def upload(self, package, datastream):
"""Upload the package to GCS"""
metadata = {"name": package.name, "version": package.version}
metadata.update(package.get_metadata())
blob = self._get_gcs_blob(package)
blob.metadata = metadata
blob.upload_from_file(datastream, predefined_acl=self.object_acl)
if self.storage_class is not None:
blob.update_storage_class(self.storage_class)
def delete(self, package):
"""Delete the package"""
blob = self._get_gcs_blob(package)
blob.delete()
| ambitioninc/pypicloud | pypicloud/storage/gcs.py | gcs.py | py | 8,167 | python | en | code | null | github-code | 36 |
18913829283 | import heapq
class Solution:
def maximumRobots(
self, charge_times: list[int], running_costs: list[int], budget: int
) -> int:
n = len(charge_times)
max_consecutive = 0
sum_running_costs = 0
max_charge_times: list[tuple[int, int]] = []
left = right = 0
def over_budget() -> bool:
k = right - left + 1
max_charge_time = -max_charge_times[0][0]
cost = max_charge_time + sum_running_costs * k
return cost > budget
for right in range(n):
sum_running_costs += running_costs[right]
heapq.heappush(max_charge_times, (-charge_times[right], right))
while max_charge_times and over_budget():
sum_running_costs -= running_costs[left]
if max_charge_times[0][1] <= left:
heapq.heappop(max_charge_times)
left += 1
max_consecutive = max(max_consecutive, right - left + 1)
return max_consecutive
| lancelote/leetcode | src/maximum_number_of_robots_within_budget.py | maximum_number_of_robots_within_budget.py | py | 1,031 | python | en | code | 3 | github-code | 36 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.