max_stars_repo_path stringlengths 4 286 | max_stars_repo_name stringlengths 5 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.03M | content_cleaned stringlengths 6 1.03M | language stringclasses 111 values | language_score float64 0.03 1 | comments stringlengths 0 556k | edu_score float64 0.32 5.03 | edu_int_score int64 0 5 |
|---|---|---|---|---|---|---|---|---|---|---|
examples/pytorch/data_loading.py | neomatrix369/plz | 1 | 6618451 |
from torchvision import datasets, transforms
from torch.utils.data import DataLoader
def create_loader(
input_directory: str, batch_size: int, pin_memory: bool,
is_training: bool):
return DataLoader(
datasets.MNIST(
input_directory,
train=is_training,
transform=transforms.ToTensor()),
batch_size=batch_size,
shuffle=True,
num_workers=1,
pin_memory=pin_memory)
|
from torchvision import datasets, transforms
from torch.utils.data import DataLoader
def create_loader(
input_directory: str, batch_size: int, pin_memory: bool,
is_training: bool):
return DataLoader(
datasets.MNIST(
input_directory,
train=is_training,
transform=transforms.ToTensor()),
batch_size=batch_size,
shuffle=True,
num_workers=1,
pin_memory=pin_memory)
| none | 1 | 2.834219 | 3 | |
14B-088/HI/analysis/HI_peak_stacking_analysis.py | e-koch/VLA_Lband | 1 | 6618452 | <gh_stars>1-10
'''
Analyze the outputs of HI_peak_stacking_feathered
(Only focusing on the feathered data).
'''
from pandas import DataFrame
import matplotlib.pyplot as p
import numpy as np
from spectral_cube import SpectralCube, Projection
import astropy.units as u
from astropy.io import fits
from cube_analysis.spectral_stacking_models import fit_hwhm
from paths import (fourteenB_HI_data_wGBT_path, fourteenB_wGBT_HI_file_dict,
allfigs_path, alltables_path)
from plotting_styles import default_figure, onecolumn_figure
# Compare properties of the stacked profiles
# Finally, fit Gaussian models and save the fit results
hi_peaktemp_hdu = fits.open(fourteenB_wGBT_HI_file_dict["PeakTemp"])[0]
hi_peaktemp = Projection.from_hdu(hi_peaktemp_hdu)
dperc = 5
unit = hi_peaktemp.unit
inneredge = np.nanpercentile(hi_peaktemp, np.arange(0, 101, dperc)[:-1]) * unit
outeredge = np.nanpercentile(hi_peaktemp, np.arange(0, 101, dperc)[1:]) * unit
# Add something small to the 100th percentile so it is used
outeredge[-1] += 1e-3 * unit
wstring = "{}percentile".format(int(dperc))
sigma_noise = 2.8 # K
npix_beam = 41.
num_pix = np.load(fourteenB_HI_data_wGBT_path("stacked_spectra/peak_stacking_{}_num_pix.npy".format(wstring)))
rot_stack = SpectralCube.read(fourteenB_HI_data_wGBT_path("stacked_spectra/rotation_stacked_peak_{}.fits".format(wstring)))
cent_stack = SpectralCube.read(fourteenB_HI_data_wGBT_path("stacked_spectra/centroid_stacked_peak_{}.fits".format(wstring)))
peakvel_stack = SpectralCube.read(fourteenB_HI_data_wGBT_path("stacked_spectra/peakvel_stacked_peak_{}.fits".format(wstring)))
hi_params = {}
hwhm_models = {}
labels = ["rotsub", "centsub", "peaksub"]
hi_params = {}
param_names = ["sigma", "v_peak", "f_wings", "sigma_wing", "asymm", "kappa"]
for sub in labels:
for name in param_names:
par_name = "{0}_{1}".format(sub, name)
par_lowlim = "{}_low_lim".format(par_name)
par_uplim = "{}_up_lim".format(par_name)
hi_params[par_name] = np.zeros_like(inneredge.value)
hi_params[par_lowlim] = np.zeros_like(inneredge.value)
hi_params[par_uplim] = np.zeros_like(inneredge.value)
for ctr, (r0, r1) in enumerate(zip(inneredge,
outeredge)):
print("On {0} of {1}".format(ctr + 1, len(inneredge)))
hi_spectra = [rot_stack[:, ctr, 0],
cent_stack[:, ctr, 0],
peakvel_stack[:, ctr, 0]]
for spectrum, label in zip(hi_spectra, labels):
vels = spectrum.spectral_axis.to(u.km / u.s).value
nbeams = num_pix[ctr] / npix_beam
# Fit +/- 60 km/s
vel_mask = np.logical_and(vels >= -80, vels <= 80)
parvals_hwhm, parerrs_hwhm, parnames_hwhm, g_HI_hwhm = \
fit_hwhm(vels[vel_mask], spectrum.value[vel_mask],
sigma_noise=sigma_noise,
nbeams=nbeams, niters=100, interp_factor=1.)
for idx, name in enumerate(parnames_hwhm):
par_name = "{0}_{1}".format(label, name)
hi_params[par_name][ctr] = parvals_hwhm[idx]
hi_params["{}_low_lim".format(par_name)][ctr] = \
np.abs(parerrs_hwhm[0, idx])
hi_params["{}_up_lim".format(par_name)][ctr] = \
np.abs(parerrs_hwhm[1, idx])
bin_names = ["{:.2f}-{:.2f} K".format(r0.value, r1.value)
for r0, r1 in zip(inneredge, outeredge)]
bin_center = (0.5 * (inneredge + outeredge)).value
hi_params["bin_center"] = bin_center
hi_peak_fits = DataFrame(hi_params, index=bin_names)
hi_peak_fits.to_latex(alltables_path("hi_hwhm_totalprof_fits_peak_{}_feather.tex".format(wstring)))
hi_peak_fits.to_csv(fourteenB_HI_data_wGBT_path("tables/hi_hwhm_totalprof_fits_peak_{}_feather.csv".format(wstring),
no_check=True))
# Let's plot some properties.
# from pandas import read_csv
# hi_peak_fits = read_csv(fourteenB_HI_data_wGBT_path("tables/hi_hwhm_totalprof_fits_peak_{}_feather.csv".format(wstring)), index_col=0)
onecolumn_figure()
# These errorbars looked small on the plot. Unsure if it is treating it as
# single-sided or not. Doesn't really matter in this case.
p.errorbar(hi_peak_fits['bin_center'], hi_peak_fits['rotsub_sigma'],
yerr=hi_peak_fits['rotsub_sigma_low_lim'] * 2,
label="Rotation\nSubtracted", fmt='-D')
p.errorbar(hi_peak_fits['bin_center'], hi_peak_fits['centsub_sigma'],
yerr=hi_peak_fits['centsub_sigma_low_lim'] * 2,
label="Centroid\nSubtracted", fmt='--o')
p.errorbar(hi_peak_fits['bin_center'], hi_peak_fits['peaksub_sigma'],
yerr=hi_peak_fits['peaksub_sigma_low_lim'] * 2,
label="Peak Vel.\nSubtracted", fmt='-.^')
p.legend(frameon=True)
p.ylabel(r"$\sigma_{\rm HWHM}$ (km/s)")
p.xlabel("Peak Temperature (K)")
p.grid()
p.tight_layout()
p.savefig(allfigs_path("stacked_profiles/hi_veldisp_peak_stackedfits_feather.png"))
p.savefig(allfigs_path("stacked_profiles/hi_veldisp_peak_stackedfits_feather.pdf"))
p.close()
p.errorbar(hi_peak_fits['bin_center'], hi_peak_fits['rotsub_v_peak'],
yerr=hi_peak_fits['rotsub_v_peak_low_lim'] * 2,
label="Rotation\nSubtracted", fmt='-D')
p.errorbar(hi_peak_fits['bin_center'], hi_peak_fits['centsub_v_peak'],
yerr=hi_peak_fits['centsub_v_peak_low_lim'] * 2,
label="Centroid\nSubtracted", fmt='--o')
p.errorbar(hi_peak_fits['bin_center'], hi_peak_fits['peaksub_v_peak'],
yerr=hi_peak_fits['peaksub_v_peak_low_lim'] * 2,
label="Peak Vel.\nSubtracted", fmt='-.^')
p.legend(frameon=True)
p.ylabel("Centroid (km/s)")
p.xlabel("Peak Temperature (K)")
p.grid()
p.tight_layout()
p.savefig(allfigs_path("stacked_profiles/hi_vpeak_peak_stackedfits_feather.png"))
p.savefig(allfigs_path("stacked_profiles/hi_vpeak_peak_stackedfits_feather.pdf"))
p.close()
p.errorbar(hi_peak_fits['bin_center'], hi_peak_fits['rotsub_f_wings'],
yerr=[hi_peak_fits['rotsub_f_wings_low_lim'],
hi_peak_fits['rotsub_f_wings_up_lim']],
label="Rotation\nSubtracted", fmt='-D')
p.errorbar(hi_peak_fits['bin_center'], hi_peak_fits['centsub_f_wings'],
yerr=[hi_peak_fits['centsub_f_wings_low_lim'],
hi_peak_fits['centsub_f_wings_up_lim']],
label="Centroid\nSubtracted", fmt='--o')
p.errorbar(hi_peak_fits['bin_center'], hi_peak_fits['peaksub_f_wings'],
yerr=[hi_peak_fits['peaksub_f_wings_low_lim'],
hi_peak_fits['peaksub_f_wings_up_lim']],
label="Peak Vel.\nSubtracted", fmt='-.^')
p.legend(frameon=True)
p.ylabel(r"$f_{\rm wings}$")
p.xlabel("Peak Temperature (K)")
p.grid()
p.tight_layout()
p.savefig(allfigs_path("stacked_profiles/hi_fwings_peak_stackedfits_feather.png"))
p.savefig(allfigs_path("stacked_profiles/hi_fwings_peak_stackedfits_feather.pdf"))
p.close()
p.errorbar(hi_peak_fits['bin_center'], hi_peak_fits['rotsub_asymm'],
yerr=[hi_peak_fits['rotsub_asymm_low_lim'],
hi_peak_fits['rotsub_asymm_up_lim']],
label="Rotation\nSubtracted", fmt='-D')
p.errorbar(hi_peak_fits['bin_center'], hi_peak_fits['centsub_asymm'],
yerr=[hi_peak_fits['centsub_asymm_low_lim'],
hi_peak_fits['centsub_asymm_up_lim']],
label="Centroid\nSubtracted", fmt='--o')
p.errorbar(hi_peak_fits['bin_center'], hi_peak_fits['peaksub_asymm'],
yerr=[hi_peak_fits['peaksub_asymm_low_lim'],
hi_peak_fits['peaksub_asymm_up_lim']],
label="Peak Vel.\nSubtracted", fmt='-.^')
p.legend(frameon=True)
p.ylabel(r"Asymmetry")
p.xlabel("Peak Temperature (K)")
p.grid()
p.tight_layout()
p.savefig(allfigs_path("stacked_profiles/hi_asymm_peak_stackedfits_feather.png"))
p.savefig(allfigs_path("stacked_profiles/hi_asymm_peak_stackedfits_feather.pdf"))
p.close()
p.errorbar(hi_peak_fits['bin_center'], hi_peak_fits['rotsub_kappa'],
yerr=[hi_peak_fits['rotsub_kappa_low_lim'],
hi_peak_fits['rotsub_kappa_up_lim']],
label="Rotation\nSubtracted", fmt='-D')
p.errorbar(hi_peak_fits['bin_center'], hi_peak_fits['centsub_kappa'],
yerr=[hi_peak_fits['centsub_kappa_low_lim'],
hi_peak_fits['centsub_kappa_up_lim']],
label="Centroid\nSubtracted", fmt='--o')
p.errorbar(hi_peak_fits['bin_center'], hi_peak_fits['peaksub_kappa'],
yerr=[hi_peak_fits['peaksub_kappa_low_lim'],
hi_peak_fits['peaksub_kappa_up_lim']],
label="Peak Vel.\nSubtracted", fmt='-.^')
p.legend(frameon=True)
p.ylabel(r"$\kappa$")
p.xlabel("Peak Temperature (K)")
p.grid()
p.tight_layout()
p.savefig(allfigs_path("stacked_profiles/hi_kappa_peak_stackedfits_feather.png"))
p.savefig(allfigs_path("stacked_profiles/hi_kappa_peak_stackedfits_feather.pdf"))
p.close()
default_figure()
| '''
Analyze the outputs of HI_peak_stacking_feathered
(Only focusing on the feathered data).
'''
from pandas import DataFrame
import matplotlib.pyplot as p
import numpy as np
from spectral_cube import SpectralCube, Projection
import astropy.units as u
from astropy.io import fits
from cube_analysis.spectral_stacking_models import fit_hwhm
from paths import (fourteenB_HI_data_wGBT_path, fourteenB_wGBT_HI_file_dict,
allfigs_path, alltables_path)
from plotting_styles import default_figure, onecolumn_figure
# Compare properties of the stacked profiles
# Finally, fit Gaussian models and save the fit results
hi_peaktemp_hdu = fits.open(fourteenB_wGBT_HI_file_dict["PeakTemp"])[0]
hi_peaktemp = Projection.from_hdu(hi_peaktemp_hdu)
dperc = 5
unit = hi_peaktemp.unit
inneredge = np.nanpercentile(hi_peaktemp, np.arange(0, 101, dperc)[:-1]) * unit
outeredge = np.nanpercentile(hi_peaktemp, np.arange(0, 101, dperc)[1:]) * unit
# Add something small to the 100th percentile so it is used
outeredge[-1] += 1e-3 * unit
wstring = "{}percentile".format(int(dperc))
sigma_noise = 2.8 # K
npix_beam = 41.
num_pix = np.load(fourteenB_HI_data_wGBT_path("stacked_spectra/peak_stacking_{}_num_pix.npy".format(wstring)))
rot_stack = SpectralCube.read(fourteenB_HI_data_wGBT_path("stacked_spectra/rotation_stacked_peak_{}.fits".format(wstring)))
cent_stack = SpectralCube.read(fourteenB_HI_data_wGBT_path("stacked_spectra/centroid_stacked_peak_{}.fits".format(wstring)))
peakvel_stack = SpectralCube.read(fourteenB_HI_data_wGBT_path("stacked_spectra/peakvel_stacked_peak_{}.fits".format(wstring)))
hi_params = {}
hwhm_models = {}
labels = ["rotsub", "centsub", "peaksub"]
hi_params = {}
param_names = ["sigma", "v_peak", "f_wings", "sigma_wing", "asymm", "kappa"]
for sub in labels:
for name in param_names:
par_name = "{0}_{1}".format(sub, name)
par_lowlim = "{}_low_lim".format(par_name)
par_uplim = "{}_up_lim".format(par_name)
hi_params[par_name] = np.zeros_like(inneredge.value)
hi_params[par_lowlim] = np.zeros_like(inneredge.value)
hi_params[par_uplim] = np.zeros_like(inneredge.value)
for ctr, (r0, r1) in enumerate(zip(inneredge,
outeredge)):
print("On {0} of {1}".format(ctr + 1, len(inneredge)))
hi_spectra = [rot_stack[:, ctr, 0],
cent_stack[:, ctr, 0],
peakvel_stack[:, ctr, 0]]
for spectrum, label in zip(hi_spectra, labels):
vels = spectrum.spectral_axis.to(u.km / u.s).value
nbeams = num_pix[ctr] / npix_beam
# Fit +/- 60 km/s
vel_mask = np.logical_and(vels >= -80, vels <= 80)
parvals_hwhm, parerrs_hwhm, parnames_hwhm, g_HI_hwhm = \
fit_hwhm(vels[vel_mask], spectrum.value[vel_mask],
sigma_noise=sigma_noise,
nbeams=nbeams, niters=100, interp_factor=1.)
for idx, name in enumerate(parnames_hwhm):
par_name = "{0}_{1}".format(label, name)
hi_params[par_name][ctr] = parvals_hwhm[idx]
hi_params["{}_low_lim".format(par_name)][ctr] = \
np.abs(parerrs_hwhm[0, idx])
hi_params["{}_up_lim".format(par_name)][ctr] = \
np.abs(parerrs_hwhm[1, idx])
bin_names = ["{:.2f}-{:.2f} K".format(r0.value, r1.value)
for r0, r1 in zip(inneredge, outeredge)]
bin_center = (0.5 * (inneredge + outeredge)).value
hi_params["bin_center"] = bin_center
hi_peak_fits = DataFrame(hi_params, index=bin_names)
hi_peak_fits.to_latex(alltables_path("hi_hwhm_totalprof_fits_peak_{}_feather.tex".format(wstring)))
hi_peak_fits.to_csv(fourteenB_HI_data_wGBT_path("tables/hi_hwhm_totalprof_fits_peak_{}_feather.csv".format(wstring),
no_check=True))
# Let's plot some properties.
# from pandas import read_csv
# hi_peak_fits = read_csv(fourteenB_HI_data_wGBT_path("tables/hi_hwhm_totalprof_fits_peak_{}_feather.csv".format(wstring)), index_col=0)
onecolumn_figure()
# These errorbars looked small on the plot. Unsure if it is treating it as
# single-sided or not. Doesn't really matter in this case.
p.errorbar(hi_peak_fits['bin_center'], hi_peak_fits['rotsub_sigma'],
yerr=hi_peak_fits['rotsub_sigma_low_lim'] * 2,
label="Rotation\nSubtracted", fmt='-D')
p.errorbar(hi_peak_fits['bin_center'], hi_peak_fits['centsub_sigma'],
yerr=hi_peak_fits['centsub_sigma_low_lim'] * 2,
label="Centroid\nSubtracted", fmt='--o')
p.errorbar(hi_peak_fits['bin_center'], hi_peak_fits['peaksub_sigma'],
yerr=hi_peak_fits['peaksub_sigma_low_lim'] * 2,
label="Peak Vel.\nSubtracted", fmt='-.^')
p.legend(frameon=True)
p.ylabel(r"$\sigma_{\rm HWHM}$ (km/s)")
p.xlabel("Peak Temperature (K)")
p.grid()
p.tight_layout()
p.savefig(allfigs_path("stacked_profiles/hi_veldisp_peak_stackedfits_feather.png"))
p.savefig(allfigs_path("stacked_profiles/hi_veldisp_peak_stackedfits_feather.pdf"))
p.close()
p.errorbar(hi_peak_fits['bin_center'], hi_peak_fits['rotsub_v_peak'],
yerr=hi_peak_fits['rotsub_v_peak_low_lim'] * 2,
label="Rotation\nSubtracted", fmt='-D')
p.errorbar(hi_peak_fits['bin_center'], hi_peak_fits['centsub_v_peak'],
yerr=hi_peak_fits['centsub_v_peak_low_lim'] * 2,
label="Centroid\nSubtracted", fmt='--o')
p.errorbar(hi_peak_fits['bin_center'], hi_peak_fits['peaksub_v_peak'],
yerr=hi_peak_fits['peaksub_v_peak_low_lim'] * 2,
label="Peak Vel.\nSubtracted", fmt='-.^')
p.legend(frameon=True)
p.ylabel("Centroid (km/s)")
p.xlabel("Peak Temperature (K)")
p.grid()
p.tight_layout()
p.savefig(allfigs_path("stacked_profiles/hi_vpeak_peak_stackedfits_feather.png"))
p.savefig(allfigs_path("stacked_profiles/hi_vpeak_peak_stackedfits_feather.pdf"))
p.close()
p.errorbar(hi_peak_fits['bin_center'], hi_peak_fits['rotsub_f_wings'],
yerr=[hi_peak_fits['rotsub_f_wings_low_lim'],
hi_peak_fits['rotsub_f_wings_up_lim']],
label="Rotation\nSubtracted", fmt='-D')
p.errorbar(hi_peak_fits['bin_center'], hi_peak_fits['centsub_f_wings'],
yerr=[hi_peak_fits['centsub_f_wings_low_lim'],
hi_peak_fits['centsub_f_wings_up_lim']],
label="Centroid\nSubtracted", fmt='--o')
p.errorbar(hi_peak_fits['bin_center'], hi_peak_fits['peaksub_f_wings'],
yerr=[hi_peak_fits['peaksub_f_wings_low_lim'],
hi_peak_fits['peaksub_f_wings_up_lim']],
label="Peak Vel.\nSubtracted", fmt='-.^')
p.legend(frameon=True)
p.ylabel(r"$f_{\rm wings}$")
p.xlabel("Peak Temperature (K)")
p.grid()
p.tight_layout()
p.savefig(allfigs_path("stacked_profiles/hi_fwings_peak_stackedfits_feather.png"))
p.savefig(allfigs_path("stacked_profiles/hi_fwings_peak_stackedfits_feather.pdf"))
p.close()
p.errorbar(hi_peak_fits['bin_center'], hi_peak_fits['rotsub_asymm'],
yerr=[hi_peak_fits['rotsub_asymm_low_lim'],
hi_peak_fits['rotsub_asymm_up_lim']],
label="Rotation\nSubtracted", fmt='-D')
p.errorbar(hi_peak_fits['bin_center'], hi_peak_fits['centsub_asymm'],
yerr=[hi_peak_fits['centsub_asymm_low_lim'],
hi_peak_fits['centsub_asymm_up_lim']],
label="Centroid\nSubtracted", fmt='--o')
p.errorbar(hi_peak_fits['bin_center'], hi_peak_fits['peaksub_asymm'],
yerr=[hi_peak_fits['peaksub_asymm_low_lim'],
hi_peak_fits['peaksub_asymm_up_lim']],
label="Peak Vel.\nSubtracted", fmt='-.^')
p.legend(frameon=True)
p.ylabel(r"Asymmetry")
p.xlabel("Peak Temperature (K)")
p.grid()
p.tight_layout()
p.savefig(allfigs_path("stacked_profiles/hi_asymm_peak_stackedfits_feather.png"))
p.savefig(allfigs_path("stacked_profiles/hi_asymm_peak_stackedfits_feather.pdf"))
p.close()
p.errorbar(hi_peak_fits['bin_center'], hi_peak_fits['rotsub_kappa'],
yerr=[hi_peak_fits['rotsub_kappa_low_lim'],
hi_peak_fits['rotsub_kappa_up_lim']],
label="Rotation\nSubtracted", fmt='-D')
p.errorbar(hi_peak_fits['bin_center'], hi_peak_fits['centsub_kappa'],
yerr=[hi_peak_fits['centsub_kappa_low_lim'],
hi_peak_fits['centsub_kappa_up_lim']],
label="Centroid\nSubtracted", fmt='--o')
p.errorbar(hi_peak_fits['bin_center'], hi_peak_fits['peaksub_kappa'],
yerr=[hi_peak_fits['peaksub_kappa_low_lim'],
hi_peak_fits['peaksub_kappa_up_lim']],
label="Peak Vel.\nSubtracted", fmt='-.^')
p.legend(frameon=True)
p.ylabel(r"$\kappa$")
p.xlabel("Peak Temperature (K)")
p.grid()
p.tight_layout()
p.savefig(allfigs_path("stacked_profiles/hi_kappa_peak_stackedfits_feather.png"))
p.savefig(allfigs_path("stacked_profiles/hi_kappa_peak_stackedfits_feather.pdf"))
p.close()
default_figure() | en | 0.846546 | Analyze the outputs of HI_peak_stacking_feathered (Only focusing on the feathered data). # Compare properties of the stacked profiles # Finally, fit Gaussian models and save the fit results # Add something small to the 100th percentile so it is used # K # Fit +/- 60 km/s # Let's plot some properties. # from pandas import read_csv # hi_peak_fits = read_csv(fourteenB_HI_data_wGBT_path("tables/hi_hwhm_totalprof_fits_peak_{}_feather.csv".format(wstring)), index_col=0) # These errorbars looked small on the plot. Unsure if it is treating it as # single-sided or not. Doesn't really matter in this case. | 2.462054 | 2 |
dhost/github/permissions.py | dhost-project/dhost | 0 | 6618453 | <filename>dhost/github/permissions.py
from rest_framework import permissions
from .utils import user_has_github_account
class HasGithubLinked(permissions.BasePermission):
def has_permission(self, request, view):
if request.user.is_authenticated:
# if the user has linked his account with Github
if user_has_github_account(request.user):
return True
return False
| <filename>dhost/github/permissions.py
from rest_framework import permissions
from .utils import user_has_github_account
class HasGithubLinked(permissions.BasePermission):
def has_permission(self, request, view):
if request.user.is_authenticated:
# if the user has linked his account with Github
if user_has_github_account(request.user):
return True
return False
| en | 0.996589 | # if the user has linked his account with Github | 2.363984 | 2 |
desafio043.py | Darlingcris/Desafios-Python | 0 | 6618454 | <filename>desafio043.py
#Desenvolva uma logica que leia o peso
#e a altura de uma pessoa, calcule seu
#IMC e mostre seu status de acordo com
#a tabela abaixo:
#abaixo de 18.5: abaixo do peso
#entre 18.5 e 25: Peso ideal
#25 ate 30: Sobrepeso
#30 a 40: Obesidade
#acima de 40: obesidade morbida
idade=int(input("Quantos anos voce tem: "))
altura=float(input("Digite a sua altura: (m) "))
peso=float(input("Digite o seu peso: (kg) "))
imc=peso/(altura**2)
print("\nA sua idade e {} e o seu Indice de Massa Corporal (IMC) e {:.1f}.".format(idade,imc), end=" ")
if idade<=15:
print("Voce deve verificar o resultado na tabela de IMC INFANTIL.")
elif idade>=60:
print("Voce deve verificar o resultado na tabela de IMC IDOSO.")
elif 15<idade<60:
if imc<18.5:
print("Voce esta ABAIXO do Peso.")
elif imc<25:
print("O seu Peso e IDEAL!")
elif imc<30:
print("ATENçAO! Voce esta com SOBREPESO!")
elif imc<40:
print("ATENçAO!! Voce esta OBESO!!")
else:
print("ATENçAO!!! OBESIDADE MORBIDA!!!")
print("\nSempre cuide da sua SAUDE!!!")
| <filename>desafio043.py
#Desenvolva uma logica que leia o peso
#e a altura de uma pessoa, calcule seu
#IMC e mostre seu status de acordo com
#a tabela abaixo:
#abaixo de 18.5: abaixo do peso
#entre 18.5 e 25: Peso ideal
#25 ate 30: Sobrepeso
#30 a 40: Obesidade
#acima de 40: obesidade morbida
idade=int(input("Quantos anos voce tem: "))
altura=float(input("Digite a sua altura: (m) "))
peso=float(input("Digite o seu peso: (kg) "))
imc=peso/(altura**2)
print("\nA sua idade e {} e o seu Indice de Massa Corporal (IMC) e {:.1f}.".format(idade,imc), end=" ")
if idade<=15:
print("Voce deve verificar o resultado na tabela de IMC INFANTIL.")
elif idade>=60:
print("Voce deve verificar o resultado na tabela de IMC IDOSO.")
elif 15<idade<60:
if imc<18.5:
print("Voce esta ABAIXO do Peso.")
elif imc<25:
print("O seu Peso e IDEAL!")
elif imc<30:
print("ATENçAO! Voce esta com SOBREPESO!")
elif imc<40:
print("ATENçAO!! Voce esta OBESO!!")
else:
print("ATENçAO!!! OBESIDADE MORBIDA!!!")
print("\nSempre cuide da sua SAUDE!!!")
| pt | 0.964927 | #Desenvolva uma logica que leia o peso #e a altura de uma pessoa, calcule seu #IMC e mostre seu status de acordo com #a tabela abaixo: #abaixo de 18.5: abaixo do peso #entre 18.5 e 25: Peso ideal #25 ate 30: Sobrepeso #30 a 40: Obesidade #acima de 40: obesidade morbida | 4.147891 | 4 |
utils.py | tabatahg/fun_with_regex | 0 | 6618455 | """utilities"""
import re
def isNumberRegex(value):
not_integer = re.compile(r'\D')
number_check = not_integer.search(value.strip())
if number_check is None:
convert_number = int(value)
return convert_number
else:
return value
"""Test"""
# print(isNumberRegex(input("input something:")))
| """utilities"""
import re
def isNumberRegex(value):
not_integer = re.compile(r'\D')
number_check = not_integer.search(value.strip())
if number_check is None:
convert_number = int(value)
return convert_number
else:
return value
"""Test"""
# print(isNumberRegex(input("input something:")))
| en | 0.463682 | utilities Test # print(isNumberRegex(input("input something:"))) | 3.740503 | 4 |
fuzzy_ship_navigation/fuzzy_defines.py | gister9000/Combining-fuzzy-logic-neural-networks-and-genetic-algorithm | 0 | 6618456 | <filename>fuzzy_ship_navigation/fuzzy_defines.py<gh_stars>0
from domain import *
from fuzzy_sets import *
from relations import *
# heuristically defined constants used for instantiating fuzzy sets
# used for navigating the ship
# distance categories in pixels
distance_far = 160
distance_close = 70
distance_critical = 30
# velocity_categories in pixels/second
velocity_fast = 70
velocity_normal = 40
velocity_slow = 10
# how much direction will change in degrees/second
direction_sharp = 10
direction_normal = 6
direction_minimal = 2
# acceleration defines, how much will speed increase in pixels/second
acceleration_big = 11
acceleration_small = 3
# distance domain
distance_domain = simple_domain(0, 1301)
# velocity domain
velocity_domain = simple_domain(0, 501)
# direction domain
direction_domain = simple_domain(-90, 91)
# acceleration domain
acceleration_domain = simple_domain(-35, 36)
# class that contains all possible actions used for navigating
class action:
# DISTANCES fuzz
# returns fuzzy_set which defines distance_far
def get_distance_far():
return calculated_fuzzy_set(distance_domain, standard_fuzzy_sets.gamma_function(distance_close, distance_far))
# returns fuzzy_set which defines distance_close
def get_distance_close():
return calculated_fuzzy_set(distance_domain, standard_fuzzy_sets.lambda_function(distance_critical, distance_close, distance_far))
# returns fuzzy_set which defines distance_critical
def get_distance_critical():
return calculated_fuzzy_set(distance_domain, standard_fuzzy_sets.l_function(distance_critical, distance_close))
# VELOCITY fuzz
# returns fuzzy_set which defines velocity_slow
def get_velocity_slow():
return calculated_fuzzy_set(velocity_domain, standard_fuzzy_sets.l_function(velocity_slow, velocity_normal))
# returns fuzzy_set which defines velocity_normal
def get_velocity_normal():
return calculated_fuzzy_set(velocity_domain, standard_fuzzy_sets.lambda_function(velocity_slow, velocity_normal, velocity_fast))
# returns fuzzy_set which defines velocity_fast
def get_velocity_fast():
return calculated_fuzzy_set(velocity_domain, standard_fuzzy_sets.l_function(velocity_normal, velocity_fast))
# DIRECTION fuzz
# options include 3 different turns to each side and straight direction (direction to zero)
# returns fuzzy_set which defines direction_sharp to the left
def get_direction_sharp_left():
return calculated_fuzzy_set(direction_domain, standard_fuzzy_sets.gamma_function(direction_normal, direction_sharp))
# returns fuzzy_set which defines direction_normal to the left
def get_direction_normal_left():
return calculated_fuzzy_set(direction_domain, standard_fuzzy_sets.lambda_function(direction_minimal, direction_normal, direction_sharp))
# returns fuzzy_set which defines direction_normal to the left
def get_direction_minimal_left():
return calculated_fuzzy_set(direction_domain, standard_fuzzy_sets.lambda_function(0, direction_normal, direction_sharp))
# returns fuzzy_set which defines direction to be zero
def get_direction_zero():
return calculated_fuzzy_set(direction_domain, standard_fuzzy_sets.lambda_function( 0-direction_minimal, 0, direction_minimal))
# returns fuzzy_set which defines direction_sharp to the right
def get_direction_sharp_right():
return calculated_fuzzy_set(direction_domain, standard_fuzzy_sets.l_function(0-direction_sharp, 0-direction_normal))
# returns fuzzy_set which defines direction_normal to the right
def get_direction_normal_right():
return calculated_fuzzy_set(direction_domain, standard_fuzzy_sets.lambda_function(0-direction_sharp, 0-direction_normal, 0-direction_minimal))
# returns fuzzy_set which defines direction_normal to the right
def get_direction_minimal_right():
return calculated_fuzzy_set(direction_domain, standard_fuzzy_sets.lambda_function(0-direction_normal, 0-direction_minimal, 0))
# ACCELERATION fuzz
# options include nullifying, small and big acceleration, small and big decceleration
# returns fuzzy_set which defines zero acceleration
def get_acceleration_zero():
return calculated_fuzzy_set(acceleration_domain, standard_fuzzy_sets.lambda_function(0-acceleration_small, 0, acceleration_small))
# returns fuzzy_set which defines small decceleration
def get_decceleration_small():
return calculated_fuzzy_set(acceleration_domain, standard_fuzzy_sets.lambda_function(0-acceleration_big, 0-acceleration_small, 0))
# returns fuzzy_set which defines strong decceleration
def get_decceleration_strong():
return calculated_fuzzy_set(acceleration_domain, standard_fuzzy_sets.l_function(0-acceleration_big, 0-acceleration_small))
# returns fuzzy_set which defines small acceleration
def get_acceleration_small():
return calculated_fuzzy_set(acceleration_domain, standard_fuzzy_sets.lambda_function(0, acceleration_small, acceleration_big))
# returns fuzzy_set which defines strong acceleration
def get_acceleration_strong():
return calculated_fuzzy_set(acceleration_domain, standard_fuzzy_sets.gamma_function(acceleration_small, acceleration_big))
| <filename>fuzzy_ship_navigation/fuzzy_defines.py<gh_stars>0
from domain import *
from fuzzy_sets import *
from relations import *
# heuristically defined constants used for instantiating fuzzy sets
# used for navigating the ship
# distance categories in pixels
distance_far = 160
distance_close = 70
distance_critical = 30
# velocity_categories in pixels/second
velocity_fast = 70
velocity_normal = 40
velocity_slow = 10
# how much direction will change in degrees/second
direction_sharp = 10
direction_normal = 6
direction_minimal = 2
# acceleration defines, how much will speed increase in pixels/second
acceleration_big = 11
acceleration_small = 3
# distance domain
distance_domain = simple_domain(0, 1301)
# velocity domain
velocity_domain = simple_domain(0, 501)
# direction domain
direction_domain = simple_domain(-90, 91)
# acceleration domain
acceleration_domain = simple_domain(-35, 36)
# class that contains all possible actions used for navigating
class action:
# DISTANCES fuzz
# returns fuzzy_set which defines distance_far
def get_distance_far():
return calculated_fuzzy_set(distance_domain, standard_fuzzy_sets.gamma_function(distance_close, distance_far))
# returns fuzzy_set which defines distance_close
def get_distance_close():
return calculated_fuzzy_set(distance_domain, standard_fuzzy_sets.lambda_function(distance_critical, distance_close, distance_far))
# returns fuzzy_set which defines distance_critical
def get_distance_critical():
return calculated_fuzzy_set(distance_domain, standard_fuzzy_sets.l_function(distance_critical, distance_close))
# VELOCITY fuzz
# returns fuzzy_set which defines velocity_slow
def get_velocity_slow():
return calculated_fuzzy_set(velocity_domain, standard_fuzzy_sets.l_function(velocity_slow, velocity_normal))
# returns fuzzy_set which defines velocity_normal
def get_velocity_normal():
return calculated_fuzzy_set(velocity_domain, standard_fuzzy_sets.lambda_function(velocity_slow, velocity_normal, velocity_fast))
# returns fuzzy_set which defines velocity_fast
def get_velocity_fast():
return calculated_fuzzy_set(velocity_domain, standard_fuzzy_sets.l_function(velocity_normal, velocity_fast))
# DIRECTION fuzz
# options include 3 different turns to each side and straight direction (direction to zero)
# returns fuzzy_set which defines direction_sharp to the left
def get_direction_sharp_left():
return calculated_fuzzy_set(direction_domain, standard_fuzzy_sets.gamma_function(direction_normal, direction_sharp))
# returns fuzzy_set which defines direction_normal to the left
def get_direction_normal_left():
return calculated_fuzzy_set(direction_domain, standard_fuzzy_sets.lambda_function(direction_minimal, direction_normal, direction_sharp))
# returns fuzzy_set which defines direction_normal to the left
def get_direction_minimal_left():
return calculated_fuzzy_set(direction_domain, standard_fuzzy_sets.lambda_function(0, direction_normal, direction_sharp))
# returns fuzzy_set which defines direction to be zero
def get_direction_zero():
return calculated_fuzzy_set(direction_domain, standard_fuzzy_sets.lambda_function( 0-direction_minimal, 0, direction_minimal))
# returns fuzzy_set which defines direction_sharp to the right
def get_direction_sharp_right():
return calculated_fuzzy_set(direction_domain, standard_fuzzy_sets.l_function(0-direction_sharp, 0-direction_normal))
# returns fuzzy_set which defines direction_normal to the right
def get_direction_normal_right():
return calculated_fuzzy_set(direction_domain, standard_fuzzy_sets.lambda_function(0-direction_sharp, 0-direction_normal, 0-direction_minimal))
# returns fuzzy_set which defines direction_normal to the right
def get_direction_minimal_right():
return calculated_fuzzy_set(direction_domain, standard_fuzzy_sets.lambda_function(0-direction_normal, 0-direction_minimal, 0))
# ACCELERATION fuzz
# options include nullifying, small and big acceleration, small and big decceleration
# returns fuzzy_set which defines zero acceleration
def get_acceleration_zero():
return calculated_fuzzy_set(acceleration_domain, standard_fuzzy_sets.lambda_function(0-acceleration_small, 0, acceleration_small))
# returns fuzzy_set which defines small decceleration
def get_decceleration_small():
return calculated_fuzzy_set(acceleration_domain, standard_fuzzy_sets.lambda_function(0-acceleration_big, 0-acceleration_small, 0))
# returns fuzzy_set which defines strong decceleration
def get_decceleration_strong():
return calculated_fuzzy_set(acceleration_domain, standard_fuzzy_sets.l_function(0-acceleration_big, 0-acceleration_small))
# returns fuzzy_set which defines small acceleration
def get_acceleration_small():
return calculated_fuzzy_set(acceleration_domain, standard_fuzzy_sets.lambda_function(0, acceleration_small, acceleration_big))
# returns fuzzy_set which defines strong acceleration
def get_acceleration_strong():
return calculated_fuzzy_set(acceleration_domain, standard_fuzzy_sets.gamma_function(acceleration_small, acceleration_big))
| en | 0.746432 | # heuristically defined constants used for instantiating fuzzy sets # used for navigating the ship # distance categories in pixels # velocity_categories in pixels/second # how much direction will change in degrees/second # acceleration defines, how much will speed increase in pixels/second # distance domain # velocity domain # direction domain # acceleration domain # class that contains all possible actions used for navigating # DISTANCES fuzz # returns fuzzy_set which defines distance_far # returns fuzzy_set which defines distance_close # returns fuzzy_set which defines distance_critical # VELOCITY fuzz # returns fuzzy_set which defines velocity_slow # returns fuzzy_set which defines velocity_normal # returns fuzzy_set which defines velocity_fast # DIRECTION fuzz # options include 3 different turns to each side and straight direction (direction to zero) # returns fuzzy_set which defines direction_sharp to the left # returns fuzzy_set which defines direction_normal to the left # returns fuzzy_set which defines direction_normal to the left # returns fuzzy_set which defines direction to be zero # returns fuzzy_set which defines direction_sharp to the right # returns fuzzy_set which defines direction_normal to the right # returns fuzzy_set which defines direction_normal to the right # ACCELERATION fuzz # options include nullifying, small and big acceleration, small and big decceleration # returns fuzzy_set which defines zero acceleration # returns fuzzy_set which defines small decceleration # returns fuzzy_set which defines strong decceleration # returns fuzzy_set which defines small acceleration # returns fuzzy_set which defines strong acceleration | 2.93644 | 3 |
scripts/examples/Arduino/Nano-33-BLE-Sense/00-Board/blinky.py | elmagnificogi/openmv | 1,761 | 6618457 | <filename>scripts/examples/Arduino/Nano-33-BLE-Sense/00-Board/blinky.py
# Blinky example
import time
from board import LED
led_red = LED(1)
led_green = LED(2)
led_blue = LED(3)
led_yellow = LED(4)
while (True):
led_blue.on()
time.sleep_ms(250)
led_blue.off()
led_red.on()
time.sleep_ms(250)
led_red.off()
led_green.on()
time.sleep_ms(250)
led_green.off()
led_yellow.on()
time.sleep_ms(250)
led_yellow.off()
time.sleep_ms(500)
| <filename>scripts/examples/Arduino/Nano-33-BLE-Sense/00-Board/blinky.py
# Blinky example
import time
from board import LED
led_red = LED(1)
led_green = LED(2)
led_blue = LED(3)
led_yellow = LED(4)
while (True):
led_blue.on()
time.sleep_ms(250)
led_blue.off()
led_red.on()
time.sleep_ms(250)
led_red.off()
led_green.on()
time.sleep_ms(250)
led_green.off()
led_yellow.on()
time.sleep_ms(250)
led_yellow.off()
time.sleep_ms(500)
| en | 0.131746 | # Blinky example | 2.63926 | 3 |
dilatedAttention/functions.py | Qiuhao-Zhou/DilatedAttention | 0 | 6618458 | <filename>dilatedAttention/functions.py
import torch
import torch.nn as nn
import torch.autograd as autograd
import torch.cuda.comm as comm
import torch.nn.functional as F
from torch.autograd.function import once_differentiable
from torch.utils.cpp_extension import load
import os, time
import functools
curr_dir = os.path.dirname(os.path.abspath(__file__))
_src_path = os.path.join(curr_dir, "src")
_build_path = os.path.join(curr_dir, "build")
os.makedirs(_build_path, exist_ok=True)
pyda = load(name="pyda",
extra_cflags=["-O3"],
build_directory=_build_path,
verbose=True,
sources = [os.path.join(_src_path, f) for f in [
"lib_da.cpp", "da.cu"
]],
extra_cuda_cflags=["--expt-extended-lambda"])
def _check_contiguous(*args):
if not all([mod is None or mod.is_contiguous() for mod in args]):
raise ValueError("Non-contiguous input")
class DA_Weight(autograd.Function):
@staticmethod
def forward(ctx, t, f):
# Save context
n, c, h, w = t.size()
size = (n, 9, h, w)
weight = torch.zeros(size, dtype=t.dtype, layout=t.layout, device=t.device)
pyda.da_forward_cuda(t, f, weight, 1)
# Output
ctx.save_for_backward(t, f)
return weight
@staticmethod
@once_differentiable
def backward(ctx, dw):
t, f = ctx.saved_tensors
dt = torch.zeros_like(t)
df = torch.zeros_like(f)
pyda.da_backward_cuda(dw.contiguous(), t, f, dt, df, 1)
_check_contiguous(dt, df)
return dt, df
class DA_Map(autograd.Function):
@staticmethod
def forward(ctx, weight, g):
# Save context
out = torch.zeros_like(g)
pyda.da_map_forward_cuda(weight, g, out, 1)
# Output
ctx.save_for_backward(weight, g)
return out
@staticmethod
@once_differentiable
def backward(ctx, dout):
weight, g = ctx.saved_tensors
dw = torch.zeros_like(weight)
dg = torch.zeros_like(g)
pyda.da_map_backward_cuda(dout.contiguous(), weight, g, dw, dg, 1)
_check_contiguous(dw, dg)
return dw, dg
da_weight = DA_Weight.apply
da_map = DA_Map.apply
class DilatedAttention(nn.Module):
""" Dilated Attention Module"""
def __init__(self,in_dim):
super(DilatedAttention,self).__init__()
self.chanel_in = in_dim
self.query_conv = nn.Conv2d(in_channels = in_dim , out_channels = in_dim//8 , kernel_size= 1)
self.key_conv = nn.Conv2d(in_channels = in_dim , out_channels = in_dim//8 , kernel_size= 1)
self.value_conv = nn.Conv2d(in_channels = in_dim , out_channels = in_dim , kernel_size= 1)
#self.gamma = nn.Parameter(torch.zeros(1))
def forward(self, a, b, c):
#proj_query = self.query_conv(x)
#proj_key = self.key_conv(x)
#proj_value = self.value_conv(x)
energy = da_weight(a, b)
self.attention = energy
attention = F.softmax(energy, 1)
out = da_map(attention, c)
#out = self.gamma*out + x
return out
__all__ = ["DilatedAttention", "da_weight", "da_map"]
if __name__ == "__main__":
ca = DilatedAttention(256).cuda()
x = torch.zeros(1, 3, 6, 6).cuda() + 1
y = torch.zeros(1, 3, 6, 6).cuda() + 2
z = torch.zeros(1, 3, 6, 6).cuda() + 3
out = ca(x, y, z)
print (out)
print(ca.attention.permute((0,2,3,1)))
| <filename>dilatedAttention/functions.py
import torch
import torch.nn as nn
import torch.autograd as autograd
import torch.cuda.comm as comm
import torch.nn.functional as F
from torch.autograd.function import once_differentiable
from torch.utils.cpp_extension import load
import os, time
import functools
curr_dir = os.path.dirname(os.path.abspath(__file__))
_src_path = os.path.join(curr_dir, "src")
_build_path = os.path.join(curr_dir, "build")
os.makedirs(_build_path, exist_ok=True)
pyda = load(name="pyda",
extra_cflags=["-O3"],
build_directory=_build_path,
verbose=True,
sources = [os.path.join(_src_path, f) for f in [
"lib_da.cpp", "da.cu"
]],
extra_cuda_cflags=["--expt-extended-lambda"])
def _check_contiguous(*args):
if not all([mod is None or mod.is_contiguous() for mod in args]):
raise ValueError("Non-contiguous input")
class DA_Weight(autograd.Function):
@staticmethod
def forward(ctx, t, f):
# Save context
n, c, h, w = t.size()
size = (n, 9, h, w)
weight = torch.zeros(size, dtype=t.dtype, layout=t.layout, device=t.device)
pyda.da_forward_cuda(t, f, weight, 1)
# Output
ctx.save_for_backward(t, f)
return weight
@staticmethod
@once_differentiable
def backward(ctx, dw):
t, f = ctx.saved_tensors
dt = torch.zeros_like(t)
df = torch.zeros_like(f)
pyda.da_backward_cuda(dw.contiguous(), t, f, dt, df, 1)
_check_contiguous(dt, df)
return dt, df
class DA_Map(autograd.Function):
@staticmethod
def forward(ctx, weight, g):
# Save context
out = torch.zeros_like(g)
pyda.da_map_forward_cuda(weight, g, out, 1)
# Output
ctx.save_for_backward(weight, g)
return out
@staticmethod
@once_differentiable
def backward(ctx, dout):
weight, g = ctx.saved_tensors
dw = torch.zeros_like(weight)
dg = torch.zeros_like(g)
pyda.da_map_backward_cuda(dout.contiguous(), weight, g, dw, dg, 1)
_check_contiguous(dw, dg)
return dw, dg
da_weight = DA_Weight.apply
da_map = DA_Map.apply
class DilatedAttention(nn.Module):
""" Dilated Attention Module"""
def __init__(self,in_dim):
super(DilatedAttention,self).__init__()
self.chanel_in = in_dim
self.query_conv = nn.Conv2d(in_channels = in_dim , out_channels = in_dim//8 , kernel_size= 1)
self.key_conv = nn.Conv2d(in_channels = in_dim , out_channels = in_dim//8 , kernel_size= 1)
self.value_conv = nn.Conv2d(in_channels = in_dim , out_channels = in_dim , kernel_size= 1)
#self.gamma = nn.Parameter(torch.zeros(1))
def forward(self, a, b, c):
#proj_query = self.query_conv(x)
#proj_key = self.key_conv(x)
#proj_value = self.value_conv(x)
energy = da_weight(a, b)
self.attention = energy
attention = F.softmax(energy, 1)
out = da_map(attention, c)
#out = self.gamma*out + x
return out
__all__ = ["DilatedAttention", "da_weight", "da_map"]
if __name__ == "__main__":
ca = DilatedAttention(256).cuda()
x = torch.zeros(1, 3, 6, 6).cuda() + 1
y = torch.zeros(1, 3, 6, 6).cuda() + 2
z = torch.zeros(1, 3, 6, 6).cuda() + 3
out = ca(x, y, z)
print (out)
print(ca.attention.permute((0,2,3,1)))
| en | 0.175882 | # Save context # Output # Save context # Output Dilated Attention Module #self.gamma = nn.Parameter(torch.zeros(1)) #proj_query = self.query_conv(x) #proj_key = self.key_conv(x) #proj_value = self.value_conv(x) #out = self.gamma*out + x | 2.074207 | 2 |
symposion/schedule/migrations/0005_slot_override_rowspan.py | pyohio/symposion | 0 | 6618459 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2018-07-01 06:46
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('symposion_schedule', '0004_auto_20180630_0140'),
]
operations = [
migrations.AddField(
model_name='slot',
name='override_rowspan',
field=models.IntegerField(blank=True, null=True),
),
]
| # -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2018-07-01 06:46
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('symposion_schedule', '0004_auto_20180630_0140'),
]
operations = [
migrations.AddField(
model_name='slot',
name='override_rowspan',
field=models.IntegerField(blank=True, null=True),
),
]
| en | 0.563925 | # -*- coding: utf-8 -*- # Generated by Django 1.11.9 on 2018-07-01 06:46 | 1.388939 | 1 |
src/chrome/evaluate/collectscripts.py | Ultra-Seven/newStream | 0 | 6618460 | import json
import os
import bsddb3
branches = ["bgwte", "fpyz", "gal", "gr2547_sh3266", "lw2666_az2407"]
custom_files = ["kf.js", "poly_predict.js", "regression.min.js", "serverrequest.js"]
scripts = [
"""
rm -rf /tmp/stream;
""",
"""
cd /tmp;
git clone <EMAIL>:cudbg/stream.git;
cd -;
""",
"""
cd /tmp/stream;
git pull;
git checkout master;
cd -;
cp /tmp/stream/src/chrome/server/js/ktm.js ./js/;
cp /tmp/stream/src/chrome/server/js/predict.js ./js/;
cp /tmp/stream/src/chrome/server/js/dist.js ./js/;
"""
]
def copy_from_branches(branches):
for script in scripts:
try:
os.system(script)
except Exception as e:
print e
for branch in branches:
print branch
os.system("""
cd /tmp/stream;
git checkout %s;
git pull;
cd -;
cp /tmp/stream/src/chrome/server/js/evaluator.js ./js/evaluator_%s.js;
cp /tmp/stream/src/chrome/server/js/predict.js ./js/predict_%s.js;
""" % (branch, branch, branch))
for cfname in custom_files:
try:
os.system("cp /tmp/stream/src/chrome/server/js/%s ./js/%s;" % (cfname, cfname))
except e:
pass
for fname in os.listdir("/tmp/stream/src/chrome/server/"):
if fname.endswith(".bdb"):
os.system("""
cp /tmp/stream/src/chrome/server/%s ./data/%s_%s
""" % (fname, branch, fname))
#os.system("git checkout predeval")
def combine_traces(branches):
# go through all the bdb files in data/ and merge into a single json file
trace_keys = ["xs", "ys", "ts", "actions"]
all_traces = []
for fname in os.listdir("./data"):
if fname.endswith(".bdb"):
db = bsddb3.hashopen(os.path.join("./data", fname))
for key in db:
try:
trace = json.loads(db[key])
trace = map(list,zip(*map(trace.get, trace_keys)))
all_traces.append(trace)
except Exception as e:
pass
print "flushing %s traces" % len(all_traces)
with file("./data/alltraces.json", "w") as f:
json.dump(all_traces, f)
copy_from_branches(branches)
combine_traces(branches)
| import json
import os
import bsddb3
branches = ["bgwte", "fpyz", "gal", "gr2547_sh3266", "lw2666_az2407"]
custom_files = ["kf.js", "poly_predict.js", "regression.min.js", "serverrequest.js"]
scripts = [
"""
rm -rf /tmp/stream;
""",
"""
cd /tmp;
git clone <EMAIL>:cudbg/stream.git;
cd -;
""",
"""
cd /tmp/stream;
git pull;
git checkout master;
cd -;
cp /tmp/stream/src/chrome/server/js/ktm.js ./js/;
cp /tmp/stream/src/chrome/server/js/predict.js ./js/;
cp /tmp/stream/src/chrome/server/js/dist.js ./js/;
"""
]
def copy_from_branches(branches):
for script in scripts:
try:
os.system(script)
except Exception as e:
print e
for branch in branches:
print branch
os.system("""
cd /tmp/stream;
git checkout %s;
git pull;
cd -;
cp /tmp/stream/src/chrome/server/js/evaluator.js ./js/evaluator_%s.js;
cp /tmp/stream/src/chrome/server/js/predict.js ./js/predict_%s.js;
""" % (branch, branch, branch))
for cfname in custom_files:
try:
os.system("cp /tmp/stream/src/chrome/server/js/%s ./js/%s;" % (cfname, cfname))
except e:
pass
for fname in os.listdir("/tmp/stream/src/chrome/server/"):
if fname.endswith(".bdb"):
os.system("""
cp /tmp/stream/src/chrome/server/%s ./data/%s_%s
""" % (fname, branch, fname))
#os.system("git checkout predeval")
def combine_traces(branches):
# go through all the bdb files in data/ and merge into a single json file
trace_keys = ["xs", "ys", "ts", "actions"]
all_traces = []
for fname in os.listdir("./data"):
if fname.endswith(".bdb"):
db = bsddb3.hashopen(os.path.join("./data", fname))
for key in db:
try:
trace = json.loads(db[key])
trace = map(list,zip(*map(trace.get, trace_keys)))
all_traces.append(trace)
except Exception as e:
pass
print "flushing %s traces" % len(all_traces)
with file("./data/alltraces.json", "w") as f:
json.dump(all_traces, f)
copy_from_branches(branches)
combine_traces(branches)
| en | 0.549417 | rm -rf /tmp/stream; cd /tmp; git clone <EMAIL>:cudbg/stream.git; cd -; cd /tmp/stream; git pull; git checkout master; cd -; cp /tmp/stream/src/chrome/server/js/ktm.js ./js/; cp /tmp/stream/src/chrome/server/js/predict.js ./js/; cp /tmp/stream/src/chrome/server/js/dist.js ./js/; cd /tmp/stream; git checkout %s; git pull; cd -; cp /tmp/stream/src/chrome/server/js/evaluator.js ./js/evaluator_%s.js; cp /tmp/stream/src/chrome/server/js/predict.js ./js/predict_%s.js; cp /tmp/stream/src/chrome/server/%s ./data/%s_%s #os.system("git checkout predeval") # go through all the bdb files in data/ and merge into a single json file | 2.179768 | 2 |
dnstap_receiver/outputs/output_file.py | ExaneServerTeam/dnstap-receiver | 0 | 6618461 | import logging
import logging.handlers
import sys
clogger = logging.getLogger("dnstap_receiver.console")
file_logger = logging.getLogger("dnstap_receiver.output.file")
from dnstap_receiver.outputs import transform
def checking_conf(cfg):
"""validate the config"""
clogger.debug("Output handler: file")
return True
def setup_logger(cfg):
"""setup loggers"""
logfmt = '%(message)s'
max_bytes = int(cfg["file-max-size"].split('M')[0]) * 1024 * 1024
file_logger.setLevel(logging.INFO)
file_logger.propagate = False
lh = logging.handlers.RotatingFileHandler(
cfg["file"],
maxBytes=max_bytes,
backupCount=cfg["file-count"]
)
lh.setLevel(logging.INFO)
lh.setFormatter(logging.Formatter(logfmt))
file_logger.addHandler(lh)
async def handle(output_cfg, queue, metrics):
"""stdout output handler"""
# init output logger
setup_logger(output_cfg)
while True:
# read item from queue
tapmsg = await queue.get()
# convert dnstap message
msg = transform.convert_dnstap(fmt=output_cfg["format"], tapmsg=tapmsg)
# print to stdout
file_logger.info(msg.decode())
# all done
queue.task_done() | import logging
import logging.handlers
import sys
clogger = logging.getLogger("dnstap_receiver.console")
file_logger = logging.getLogger("dnstap_receiver.output.file")
from dnstap_receiver.outputs import transform
def checking_conf(cfg):
"""validate the config"""
clogger.debug("Output handler: file")
return True
def setup_logger(cfg):
"""setup loggers"""
logfmt = '%(message)s'
max_bytes = int(cfg["file-max-size"].split('M')[0]) * 1024 * 1024
file_logger.setLevel(logging.INFO)
file_logger.propagate = False
lh = logging.handlers.RotatingFileHandler(
cfg["file"],
maxBytes=max_bytes,
backupCount=cfg["file-count"]
)
lh.setLevel(logging.INFO)
lh.setFormatter(logging.Formatter(logfmt))
file_logger.addHandler(lh)
async def handle(output_cfg, queue, metrics):
"""stdout output handler"""
# init output logger
setup_logger(output_cfg)
while True:
# read item from queue
tapmsg = await queue.get()
# convert dnstap message
msg = transform.convert_dnstap(fmt=output_cfg["format"], tapmsg=tapmsg)
# print to stdout
file_logger.info(msg.decode())
# all done
queue.task_done() | en | 0.375648 | validate the config setup loggers stdout output handler # init output logger # read item from queue # convert dnstap message # print to stdout # all done | 2.434279 | 2 |
agent/txt_proxy.py | doncat99/proxy_pool | 0 | 6618462 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import, division, print_function
import re
import logging
import os
import sys
currentdir = os.path.dirname(os.path.realpath(__file__))
parentdir = os.path.dirname(currentdir)
sys.path.append(parentdir)
from agent import Agent
from util.webRequest import WebRequest
logger = logging.getLogger(__name__)
@Agent.register
class TxtProxy(Agent):
def __init__(self):
self.re_ip_port_pattern = re.compile(r"(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}):([\d]{1,5})")
self.txt_list = [
# 'http://api.xicidaili.com/free2016.txt',
'http://static.fatezero.org/tmp/proxy.txt',
'http://pubproxy.com/api/proxy?limit=20&format=txt&type=http',
'http://comp0.ru/downloads/proxylist.txt',
'http://www.proxylists.net/http_highanon.txt',
'http://www.proxylists.net/http.txt',
'http://ab57.ru/downloads/proxylist.txt',
'https://www.rmccurdy.com/scripts/proxy/good.txt'
]
def extract_proxy(self):
for url in self.txt_list:
try:
rp = WebRequest().get(url, timeout=10)
re_ip_port_result = self.re_ip_port_pattern.findall(rp.text)
if not re_ip_port_result:
raise Exception("empty")
for host, port in re_ip_port_result:
yield f'{host}:{port}'
except:
pass
if __name__ == '__main__':
p = Agent.proxies[0]()
for proxy in p.extract_proxy():
print(proxy)
| #! /usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import, division, print_function
import re
import logging
import os
import sys
currentdir = os.path.dirname(os.path.realpath(__file__))
parentdir = os.path.dirname(currentdir)
sys.path.append(parentdir)
from agent import Agent
from util.webRequest import WebRequest
logger = logging.getLogger(__name__)
@Agent.register
class TxtProxy(Agent):
def __init__(self):
self.re_ip_port_pattern = re.compile(r"(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}):([\d]{1,5})")
self.txt_list = [
# 'http://api.xicidaili.com/free2016.txt',
'http://static.fatezero.org/tmp/proxy.txt',
'http://pubproxy.com/api/proxy?limit=20&format=txt&type=http',
'http://comp0.ru/downloads/proxylist.txt',
'http://www.proxylists.net/http_highanon.txt',
'http://www.proxylists.net/http.txt',
'http://ab57.ru/downloads/proxylist.txt',
'https://www.rmccurdy.com/scripts/proxy/good.txt'
]
def extract_proxy(self):
for url in self.txt_list:
try:
rp = WebRequest().get(url, timeout=10)
re_ip_port_result = self.re_ip_port_pattern.findall(rp.text)
if not re_ip_port_result:
raise Exception("empty")
for host, port in re_ip_port_result:
yield f'{host}:{port}'
except:
pass
if __name__ == '__main__':
p = Agent.proxies[0]()
for proxy in p.extract_proxy():
print(proxy)
| zh | 0.145667 | #! /usr/bin/env python # -*- coding: utf-8 -*- # 'http://api.xicidaili.com/free2016.txt', | 2.32419 | 2 |
mongoapi/hotline_database/hotline_db.py | 133794m3r/i_am_not_forgotten | 0 | 6618463 | <filename>mongoapi/hotline_database/hotline_db.py<gh_stars>0
from flask_mongoengine import MongoEngine
hotline_db = MongoEngine()
def hotline_initialize_db(app):
hotline_db.init_app(app) | <filename>mongoapi/hotline_database/hotline_db.py<gh_stars>0
from flask_mongoengine import MongoEngine
hotline_db = MongoEngine()
def hotline_initialize_db(app):
hotline_db.init_app(app) | none | 1 | 1.587749 | 2 | |
firstfit.py | fernandosutter/Alocacao_Particao | 0 | 6618464 | # First-fit
# memoria - Variavel que representa a memoria nos casos
# programa - Qual programa será inserido na memoria
# tamanho - Qual o tamanho do programa a ser inserido na memoria.
def Firstfit(memoria, programa, tamanho):
vazio = 0
for i in range(0, 10):
if memoria[i] == '' and tamanho == 1:
memoria[i] = programa
break
if memoria[i] == '':
vazio += 1
if memoria[i] != '':
vazio = 0
if vazio == tamanho:
posini = (i+1) - vazio
for j in range(posini, i+1):
memoria[j] = programa
break
else:
print("Não é possível alocar o programa " + programa + " na memória!!!")
return memoria
| # First-fit
# memoria - Variavel que representa a memoria nos casos
# programa - Qual programa será inserido na memoria
# tamanho - Qual o tamanho do programa a ser inserido na memoria.
def Firstfit(memoria, programa, tamanho):
vazio = 0
for i in range(0, 10):
if memoria[i] == '' and tamanho == 1:
memoria[i] = programa
break
if memoria[i] == '':
vazio += 1
if memoria[i] != '':
vazio = 0
if vazio == tamanho:
posini = (i+1) - vazio
for j in range(posini, i+1):
memoria[j] = programa
break
else:
print("Não é possível alocar o programa " + programa + " na memória!!!")
return memoria
| pt | 0.626813 | # First-fit # memoria - Variavel que representa a memoria nos casos # programa - Qual programa será inserido na memoria # tamanho - Qual o tamanho do programa a ser inserido na memoria. | 3.49311 | 3 |
example_dags/natstats_postcodes.py | ministryofjustice/analytics-platform-airflow-example-dags | 2 | 6618465 | <filename>example_dags/natstats_postcodes.py
from airflow.utils.dates import days_ago
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.models import DAG
from datetime import datetime, timedelta
log = LoggingMixin().log
SCRAPER_IMAGE = "quay.io/mojanalytics/airflow_natstats_postcodes:latest"
SCRAPER_IAM_ROLE = "airflow_natstats_postcodes"
from airflow.contrib.operators.kubernetes_pod_operator import KubernetesPodOperator
args = {"owner": "Robin",
"start_date": days_ago(0), # No point in backfilling/catchup as only latest data is available
"retries": 2,
"retry_delay": timedelta(minutes=120),
"email": ["<EMAIL>"]}
dag = DAG(
dag_id="natstats_postcodes",
default_args=args,
schedule_interval='@daily',
)
# https://github.com/apache/incubator-airflow/blob/5a3f39913739998ca2e9a17d0f1d10fccb840d36/airflow/contrib/operators/kubernetes_pod_operator.py#L129
download = KubernetesPodOperator(
namespace="airflow",
image=SCRAPER_IMAGE,
image_pull_policy='Always',
cmds=["bash", "-c"],
arguments=["python -u main.py --task=download"],
labels={"foo": "bar"},
name="airflow-test-pod",
in_cluster=True,
task_id="natstats_postcodes_download",
get_logs=True,
dag=dag,
annotations={"iam.amazonaws.com/role": SCRAPER_IAM_ROLE},
)
process = KubernetesPodOperator(
namespace="airflow",
image=SCRAPER_IMAGE,
image_pull_policy='Always',
cmds=["bash", "-c"],
arguments=["python -u main.py --task=process"],
labels={"foo": "bar"},
name="airflow-test-pod",
in_cluster=True,
task_id="natstats_postcodes_process",
get_logs=True,
dag=dag,
annotations={"iam.amazonaws.com/role": SCRAPER_IAM_ROLE},
)
curate = KubernetesPodOperator(
namespace="airflow",
image=SCRAPER_IMAGE,
image_pull_policy='Always',
cmds=["bash", "-c"],
arguments=["python -u main.py --task=curate"],
labels={"foo": "bar"},
name="airflow-test-pod",
in_cluster=True,
task_id="natstats_postcodes_curate",
get_logs=True,
dag=dag,
annotations={"iam.amazonaws.com/role": SCRAPER_IAM_ROLE},
)
download >> process >> curate
| <filename>example_dags/natstats_postcodes.py
from airflow.utils.dates import days_ago
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.models import DAG
from datetime import datetime, timedelta
log = LoggingMixin().log
SCRAPER_IMAGE = "quay.io/mojanalytics/airflow_natstats_postcodes:latest"
SCRAPER_IAM_ROLE = "airflow_natstats_postcodes"
from airflow.contrib.operators.kubernetes_pod_operator import KubernetesPodOperator
args = {"owner": "Robin",
"start_date": days_ago(0), # No point in backfilling/catchup as only latest data is available
"retries": 2,
"retry_delay": timedelta(minutes=120),
"email": ["<EMAIL>"]}
dag = DAG(
dag_id="natstats_postcodes",
default_args=args,
schedule_interval='@daily',
)
# https://github.com/apache/incubator-airflow/blob/5a3f39913739998ca2e9a17d0f1d10fccb840d36/airflow/contrib/operators/kubernetes_pod_operator.py#L129
download = KubernetesPodOperator(
namespace="airflow",
image=SCRAPER_IMAGE,
image_pull_policy='Always',
cmds=["bash", "-c"],
arguments=["python -u main.py --task=download"],
labels={"foo": "bar"},
name="airflow-test-pod",
in_cluster=True,
task_id="natstats_postcodes_download",
get_logs=True,
dag=dag,
annotations={"iam.amazonaws.com/role": SCRAPER_IAM_ROLE},
)
process = KubernetesPodOperator(
namespace="airflow",
image=SCRAPER_IMAGE,
image_pull_policy='Always',
cmds=["bash", "-c"],
arguments=["python -u main.py --task=process"],
labels={"foo": "bar"},
name="airflow-test-pod",
in_cluster=True,
task_id="natstats_postcodes_process",
get_logs=True,
dag=dag,
annotations={"iam.amazonaws.com/role": SCRAPER_IAM_ROLE},
)
curate = KubernetesPodOperator(
namespace="airflow",
image=SCRAPER_IMAGE,
image_pull_policy='Always',
cmds=["bash", "-c"],
arguments=["python -u main.py --task=curate"],
labels={"foo": "bar"},
name="airflow-test-pod",
in_cluster=True,
task_id="natstats_postcodes_curate",
get_logs=True,
dag=dag,
annotations={"iam.amazonaws.com/role": SCRAPER_IAM_ROLE},
)
download >> process >> curate
| en | 0.707114 | # No point in backfilling/catchup as only latest data is available # https://github.com/apache/incubator-airflow/blob/5a3f39913739998ca2e9a17d0f1d10fccb840d36/airflow/contrib/operators/kubernetes_pod_operator.py#L129 | 2.152624 | 2 |
AlgoExpert/linked_lists/mergeLinkedLists.py | Muzque/Leetcode | 1 | 6618466 | <filename>AlgoExpert/linked_lists/mergeLinkedLists.py
# This is an input class. Do not edit.
class LinkedList:
def __init__(self, value):
self.value = value
self.next = None
def get_minor_node(node1, node2):
if node1 is None:
return node2, node1, node2.next
if node2 is None:
return node1, node1.next, node2
if node1.value < node2.value:
return node1, node1.next, node2
else:
return node2, node1, node2.next
def mergeLinkedLists(headOne, headTwo):
head, headOne, headTwo = get_minor_node(headOne, headTwo)
node = head
while headOne or headTwo:
n, headOne, headTwo = get_minor_node(headOne, headTwo)
node.next = n
node = n
return head
| <filename>AlgoExpert/linked_lists/mergeLinkedLists.py
# This is an input class. Do not edit.
class LinkedList:
def __init__(self, value):
self.value = value
self.next = None
def get_minor_node(node1, node2):
if node1 is None:
return node2, node1, node2.next
if node2 is None:
return node1, node1.next, node2
if node1.value < node2.value:
return node1, node1.next, node2
else:
return node2, node1, node2.next
def mergeLinkedLists(headOne, headTwo):
head, headOne, headTwo = get_minor_node(headOne, headTwo)
node = head
while headOne or headTwo:
n, headOne, headTwo = get_minor_node(headOne, headTwo)
node.next = n
node = n
return head
| en | 0.615383 | # This is an input class. Do not edit. | 3.804494 | 4 |
notebooks/scripts/make_clades.py | blab/cartography | 1 | 6618467 | <reponame>blab/cartography
import argparse
from augur.utils import write_json
import Bio.SeqIO
from collections import OrderedDict
import pandas as pd
import sys
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--metadata", help="a decompressed tsv metadata file that can be read into pandas")
parser.add_argument("--sequences", help="a file to intersect with the metadata to filter clade file")
parser.add_argument("--output", help="a clades.json file to be used by the KDE plots")
parser.add_argument("--col-name", help="cluster data from embedding and assign labels given via HDBSCAN")
args = parser.parse_args()
if args.sequences is not None:
sequences_by_name = OrderedDict()
for sequence in Bio.SeqIO.parse(args.sequences, "fasta"):
sequences_by_name[sequence.id] = str(sequence.seq)
sequence_names_val = list(sequences_by_name.keys())
print(len(sequence_names_val))
metadata_df = pd.read_csv(args.metadata, sep="\t", index_col=0)
if args.sequences is not None:
metadata_df = metadata_df.loc[sequence_names_val]
print(metadata_df)
metadata_df.rename(columns={args.col_name:"clade_membership"}, inplace=True)
clades_df = metadata_df[["clade_membership"]]
if args.output is not None:
clades_dict = clades_df.transpose().to_dict()
write_json({"nodes": clades_dict}, args.output) | import argparse
from augur.utils import write_json
import Bio.SeqIO
from collections import OrderedDict
import pandas as pd
import sys
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--metadata", help="a decompressed tsv metadata file that can be read into pandas")
parser.add_argument("--sequences", help="a file to intersect with the metadata to filter clade file")
parser.add_argument("--output", help="a clades.json file to be used by the KDE plots")
parser.add_argument("--col-name", help="cluster data from embedding and assign labels given via HDBSCAN")
args = parser.parse_args()
if args.sequences is not None:
sequences_by_name = OrderedDict()
for sequence in Bio.SeqIO.parse(args.sequences, "fasta"):
sequences_by_name[sequence.id] = str(sequence.seq)
sequence_names_val = list(sequences_by_name.keys())
print(len(sequence_names_val))
metadata_df = pd.read_csv(args.metadata, sep="\t", index_col=0)
if args.sequences is not None:
metadata_df = metadata_df.loc[sequence_names_val]
print(metadata_df)
metadata_df.rename(columns={args.col_name:"clade_membership"}, inplace=True)
clades_df = metadata_df[["clade_membership"]]
if args.output is not None:
clades_dict = clades_df.transpose().to_dict()
write_json({"nodes": clades_dict}, args.output) | none | 1 | 2.844599 | 3 | |
caracara/common/sorting.py | CrowdStrike/falconpy-tools | 2 | 6618468 | """Caracara Policies: Sorting Options."""
SORT_ASC = "precedence.asc"
SORT_DESC = "precedence.desc"
SORTING_OPTIONS = [
SORT_ASC,
SORT_DESC,
]
| """Caracara Policies: Sorting Options."""
SORT_ASC = "precedence.asc"
SORT_DESC = "precedence.desc"
SORTING_OPTIONS = [
SORT_ASC,
SORT_DESC,
]
| en | 0.507572 | Caracara Policies: Sorting Options. | 1.110791 | 1 |
python/tools/numbers/largest_swap.py | xanderyzwich/Playground | 1 | 6618469 | """
Largest Swap
Write a function that takes a two-digit number and determines if it's the largest of two possible digit swaps.
"""
from unittest import TestCase
def largest_swap(input_int):
# return input_int >= int(str(input_int)[::-1])
return int(str(input_int)[0]) >= int(str(input_int)[1])
class TestLargestSwap(TestCase):
def test_one(self):
assert largest_swap(27) is False
assert largest_swap(43) is True
def test_two(self):
assert largest_swap(14) is False
assert largest_swap(53) is True
assert largest_swap(99) is True
| """
Largest Swap
Write a function that takes a two-digit number and determines if it's the largest of two possible digit swaps.
"""
from unittest import TestCase
def largest_swap(input_int):
# return input_int >= int(str(input_int)[::-1])
return int(str(input_int)[0]) >= int(str(input_int)[1])
class TestLargestSwap(TestCase):
def test_one(self):
assert largest_swap(27) is False
assert largest_swap(43) is True
def test_two(self):
assert largest_swap(14) is False
assert largest_swap(53) is True
assert largest_swap(99) is True
| en | 0.584872 | Largest Swap
Write a function that takes a two-digit number and determines if it's the largest of two possible digit swaps. # return input_int >= int(str(input_int)[::-1]) | 3.943229 | 4 |
diamandas/userpanel/admin.py | bailey-ann/diamanda | 0 | 6618470 | # -*- coding: utf-8 -*-
from django.contrib import admin
from django.utils.translation import ugettext as _
from diamandas.userpanel.models import *
| # -*- coding: utf-8 -*-
from django.contrib import admin
from django.utils.translation import ugettext as _
from diamandas.userpanel.models import *
| en | 0.769321 | # -*- coding: utf-8 -*- | 1.09247 | 1 |
test/issue170-pwm.py | adafruit/adafruit-beaglebone-io-python | 305 | 6618471 | <gh_stars>100-1000
import Adafruit_BBIO.PWM as PWM
PWM.start("P9_14", 50, 2000, 1)
PWM.cleanup()
PWM.start("P9_14", 50, 2000, 0)
PWM.cleanup()
| import Adafruit_BBIO.PWM as PWM
PWM.start("P9_14", 50, 2000, 1)
PWM.cleanup()
PWM.start("P9_14", 50, 2000, 0)
PWM.cleanup() | none | 1 | 1.588111 | 2 | |
Course/migrations/0004_auto_20210917_1320.py | Ryize/CourseMC | 2 | 6618472 | # Generated by Django 3.2.7 on 2021-09-17 13:20
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('Course', '0003_auto_20210917_1312'),
]
operations = [
migrations.AddField(
model_name='student',
name='password',
field=models.CharField(default=96563426, max_length=128, verbose_name='Пароль'),
),
migrations.AlterField(
model_name='student',
name='groups',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Course.learngroup', verbose_name='Группа обучения'),
),
]
| # Generated by Django 3.2.7 on 2021-09-17 13:20
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('Course', '0003_auto_20210917_1312'),
]
operations = [
migrations.AddField(
model_name='student',
name='password',
field=models.CharField(default=96563426, max_length=128, verbose_name='Пароль'),
),
migrations.AlterField(
model_name='student',
name='groups',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Course.learngroup', verbose_name='Группа обучения'),
),
]
| en | 0.839639 | # Generated by Django 3.2.7 on 2021-09-17 13:20 | 1.503081 | 2 |
data.py | GMvandeVen/pytorch-deep-generative-replay | 4 | 6618473 | import copy
import math
from torchvision import datasets, transforms
from torchvision.transforms import ImageOps
from torch.utils.data import ConcatDataset
def _permutate_image_pixels(image, permutation):
'''Permutate the pixels of an image according to [permutation].
[image] 3D-tensor containing the image
[permutation] <ndarray> of pixel-indeces in their new order
'''
if permutation is None:
return image
c, h, w = image.size()
# NOTE: this doesn't preserve the pixels per channel!
# (e.g., a pixel from the red channel can end up in the green channel)
# image = image.view(-1, c)
# image = image[permutation, :]
# image = image.view(c, h, w)
# the code below permutates per channel (same permutation for each channel)
image = image.view(c, -1)
image = image[:, permutation]
image = image.view(c, h, w)
return image
def _colorize_grayscale_image(image):
'''Transform [image] from one channel to 3 (identical) channels.'''
return ImageOps.colorize(image, (0, 0, 0), (255, 255, 255))
def get_dataset(name, train=True, download=True, permutation=None, capacity=None, data_dir='./datasets'):
data_name = 'mnist' if name=='mnist-color' else name
dataset_class = AVAILABLE_DATASETS[data_name]
dataset_transform = transforms.Compose([
*AVAILABLE_TRANSFORMS[name],
transforms.Lambda(lambda x: _permutate_image_pixels(x, permutation)),
])
if data_name=='svhn':
dataset = dataset_class('{dir}/{name}'.format(dir=data_dir, name=data_name),
split="train" if train else "test", download=download, transform=dataset_transform,
target_transform=transforms.Compose(AVAILABLE_DATASETS['svhn-target']))
else:
dataset = dataset_class('{dir}/{name}'.format(dir=data_dir, name=data_name), train=train,
download=download, transform=dataset_transform)
# if dataset is (possibly) not large enough, create copies until it is.
if capacity is not None and len(dataset) < capacity:
return ConcatDataset([
copy.deepcopy(dataset) for _ in
range(math.ceil(capacity / len(dataset)))
])
else:
return dataset
# specify available data-sets.
AVAILABLE_DATASETS = {
'mnist': datasets.MNIST,
'cifar10': datasets.CIFAR10,
'cifar100': datasets.CIFAR100,
'svhn': datasets.SVHN,
}
# specify available transforms.
AVAILABLE_TRANSFORMS = {
'mnist': [
transforms.ToTensor(),
transforms.ToPILImage(),
transforms.Pad(2),
transforms.ToTensor(),
],
'mnist-color': [
transforms.ToTensor(),
transforms.ToPILImage(),
transforms.Lambda(lambda x: _colorize_grayscale_image(x)),
transforms.Pad(2),
transforms.ToTensor(),
],
'cifar10': [
transforms.ToTensor(),
],
'cifar100': [
transforms.ToTensor(),
],
'svhn': [
transforms.ToTensor(),
],
'svhn-target': [
transforms.Lambda(lambda y: y % 10),
],
}
# specify configurations of available data-sets.
DATASET_CONFIGS = {
'mnist': {'size': 32, 'channels': 1, 'classes': 10},
'mnist-color': {'size': 32, 'channels': 3, 'classes': 10},
'cifar10': {'size': 32, 'channels': 3, 'classes': 10},
'cifar100': {'size': 32, 'channels': 3, 'classes': 100},
'svhn': {'size': 32, 'channels': 3, 'classes': 10},
}
| import copy
import math
from torchvision import datasets, transforms
from torchvision.transforms import ImageOps
from torch.utils.data import ConcatDataset
def _permutate_image_pixels(image, permutation):
'''Permutate the pixels of an image according to [permutation].
[image] 3D-tensor containing the image
[permutation] <ndarray> of pixel-indeces in their new order
'''
if permutation is None:
return image
c, h, w = image.size()
# NOTE: this doesn't preserve the pixels per channel!
# (e.g., a pixel from the red channel can end up in the green channel)
# image = image.view(-1, c)
# image = image[permutation, :]
# image = image.view(c, h, w)
# the code below permutates per channel (same permutation for each channel)
image = image.view(c, -1)
image = image[:, permutation]
image = image.view(c, h, w)
return image
def _colorize_grayscale_image(image):
'''Transform [image] from one channel to 3 (identical) channels.'''
return ImageOps.colorize(image, (0, 0, 0), (255, 255, 255))
def get_dataset(name, train=True, download=True, permutation=None, capacity=None, data_dir='./datasets'):
data_name = 'mnist' if name=='mnist-color' else name
dataset_class = AVAILABLE_DATASETS[data_name]
dataset_transform = transforms.Compose([
*AVAILABLE_TRANSFORMS[name],
transforms.Lambda(lambda x: _permutate_image_pixels(x, permutation)),
])
if data_name=='svhn':
dataset = dataset_class('{dir}/{name}'.format(dir=data_dir, name=data_name),
split="train" if train else "test", download=download, transform=dataset_transform,
target_transform=transforms.Compose(AVAILABLE_DATASETS['svhn-target']))
else:
dataset = dataset_class('{dir}/{name}'.format(dir=data_dir, name=data_name), train=train,
download=download, transform=dataset_transform)
# if dataset is (possibly) not large enough, create copies until it is.
if capacity is not None and len(dataset) < capacity:
return ConcatDataset([
copy.deepcopy(dataset) for _ in
range(math.ceil(capacity / len(dataset)))
])
else:
return dataset
# specify available data-sets.
AVAILABLE_DATASETS = {
'mnist': datasets.MNIST,
'cifar10': datasets.CIFAR10,
'cifar100': datasets.CIFAR100,
'svhn': datasets.SVHN,
}
# specify available transforms.
AVAILABLE_TRANSFORMS = {
'mnist': [
transforms.ToTensor(),
transforms.ToPILImage(),
transforms.Pad(2),
transforms.ToTensor(),
],
'mnist-color': [
transforms.ToTensor(),
transforms.ToPILImage(),
transforms.Lambda(lambda x: _colorize_grayscale_image(x)),
transforms.Pad(2),
transforms.ToTensor(),
],
'cifar10': [
transforms.ToTensor(),
],
'cifar100': [
transforms.ToTensor(),
],
'svhn': [
transforms.ToTensor(),
],
'svhn-target': [
transforms.Lambda(lambda y: y % 10),
],
}
# specify configurations of available data-sets.
DATASET_CONFIGS = {
'mnist': {'size': 32, 'channels': 1, 'classes': 10},
'mnist-color': {'size': 32, 'channels': 3, 'classes': 10},
'cifar10': {'size': 32, 'channels': 3, 'classes': 10},
'cifar100': {'size': 32, 'channels': 3, 'classes': 100},
'svhn': {'size': 32, 'channels': 3, 'classes': 10},
}
| en | 0.743409 | Permutate the pixels of an image according to [permutation]. [image] 3D-tensor containing the image [permutation] <ndarray> of pixel-indeces in their new order # NOTE: this doesn't preserve the pixels per channel! # (e.g., a pixel from the red channel can end up in the green channel) # image = image.view(-1, c) # image = image[permutation, :] # image = image.view(c, h, w) # the code below permutates per channel (same permutation for each channel) Transform [image] from one channel to 3 (identical) channels. # if dataset is (possibly) not large enough, create copies until it is. # specify available data-sets. # specify available transforms. # specify configurations of available data-sets. | 3.085808 | 3 |
Python/Books/Learning-Programming-with-Python.Tamim-Shahriar-Subeen/chapter-008/ph-8.15-startwith-method-with-logic.py | shihab4t/Books-Code | 0 | 6618474 | name = input("Input a name: ")
if name.startswith("Mr."):
print("Dear Sir")
| name = input("Input a name: ")
if name.startswith("Mr."):
print("Dear Sir")
| none | 1 | 3.51087 | 4 | |
heightChecker.py | hazardinho/LeetcodeSolutions | 1 | 6618475 | def heightChecker(self, heights: List[int]) -> int:
sortedH = sorted(heights)
r = 0
for i in range(len(heights)):
if(heights[i] != sortedH[i]):
r+=1
return r | def heightChecker(self, heights: List[int]) -> int:
sortedH = sorted(heights)
r = 0
for i in range(len(heights)):
if(heights[i] != sortedH[i]):
r+=1
return r | none | 1 | 3.57549 | 4 | |
Tree/GenericTree/CodingNinjas/Lecture/Traversal/Preorder_Travesal.py | prash-kr-meena/GoogleR | 0 | 6618476 | <gh_stars>0
from Tree.GenericTree.GenericTree import GenericTree
from Utils.Array import input_array
# First Root, then children
def preorder_traversal(root) -> None:
if root is None:
return
print(root.data, end=" ")
for child in root.children:
preorder_traversal(child)
if __name__ == '__main__':
array = input_array()
tree_root = GenericTree.single_line_input(array)
preorder_traversal(tree_root)
"""
10 3 20 30 40 2 40 50 0 0 0 0
10
20 30 40
40 50
"""
| from Tree.GenericTree.GenericTree import GenericTree
from Utils.Array import input_array
# First Root, then children
def preorder_traversal(root) -> None:
if root is None:
return
print(root.data, end=" ")
for child in root.children:
preorder_traversal(child)
if __name__ == '__main__':
array = input_array()
tree_root = GenericTree.single_line_input(array)
preorder_traversal(tree_root)
"""
10 3 20 30 40 2 40 50 0 0 0 0
10
20 30 40
40 50
""" | en | 0.486454 | # First Root, then children 10 3 20 30 40 2 40 50 0 0 0 0 10 20 30 40 40 50 | 3.280088 | 3 |
_modules/elasticsearcharbe.py | picturae/salt-modules | 0 | 6618477 | # -*- coding: utf-8 -*-
'''
Connection module for Elasticsearch
notice: early state, etc.
:depends: elasticsearch
'''
# TODO
# * improve error/ exception handling
# * implement update methods?
from __future__ import absolute_import
# Import Python libs
import logging
log = logging.getLogger(__name__)
# Import third party libs
try:
import elasticsearch
logging.getLogger('elasticsearch').setLevel(logging.CRITICAL)
HAS_ELASTICSEARCH = True
except ImportError:
HAS_ELASTICSEARCH = False
from salt.ext.six import string_types
def __virtual__():
'''
Only load if elasticsearch libraries exist.
'''
if not HAS_ELASTICSEARCH:
return False
return True
def _get_instance(hosts, profile):
'''
Return the elasticsearch instance
'''
if profile:
if isinstance(profile, string_types):
_profile = __salt__['config.option'](profile)
elif isinstance(profile, dict):
_profile = profile
if _profile:
hosts = _profile.get('host')
if not hosts:
hosts = _profile.get('hosts')
if isinstance(hosts, string_types):
hosts = [hosts]
return elasticsearch.Elasticsearch(hosts)
def alias_create(indices, alias, hosts=None, body=None, profile='elasticsearch'):
'''
Create an alias for a specific index/indices
CLI example::
salt myminion elasticsearch.alias_create testindex_v1 testindex
'''
es = _get_instance(hosts, profile)
try:
result = es.indices.put_alias(index=indices, name=alias, body=body) # TODO error handling
return True
except elasticsearch.exceptions.NotFoundError:
return None
return None
def alias_delete(indices, aliases, hosts=None, body=None, profile='elasticsearch'):
'''
Delete an alias of an index
CLI example::
salt myminion elasticsearch.alias_delete testindex_v1 testindex
'''
es = _get_instance(hosts, profile)
try:
result = es.indices.delete_alias(index=indices, name=aliases)
if result.get('acknowledged', False): # TODO error handling
return True
except elasticsearch.exceptions.NotFoundError:
return None
return None
def alias_exists(aliases, indices=None, hosts=None, profile='elasticsearch'):
'''
Return a boolean indicating whether given alias exists
CLI example::
salt myminion elasticsearch.alias_exists testindex
'''
es = _get_instance(hosts, profile)
try:
if es.indices.exists_alias(name=aliases, index=indices):
return True
else:
return False
except elasticsearch.exceptions.NotFoundError:
return None
except elasticsearch.exceptions.ConnectionError:
# TODO log error
return None
return None
def alias_get(indices=None, aliases=None, hosts=None, profile='elasticsearch'):
'''
Check for the existence of an alias and if it exists, return it
CLI example::
salt myminion elasticsearch.alias_get testindex
'''
es = _get_instance(hosts, profile)
try:
ret = es.indices.get_alias(index=indices, name=aliases) # TODO error handling
return ret
except elasticsearch.exceptions.NotFoundError:
return None
return None
def document_create(index, doc_type, body=None, hosts=None, profile='elasticsearch'):
'''
Create a document in a specified index
CLI example::
salt myminion elasticsearch.document_create testindex doctype1 '{}'
'''
es = _get_instance(hosts, profile)
try:
result = es.index(index=index, doc_type=doc_type, body=body) # TODO error handling
return True
except elasticsearch.exceptions.NotFoundError:
return None
return None
def document_delete(index, doc_type, id, hosts=None, profile='elasticsearch'):
'''
Delete a document from an index
CLI example::
salt myminion elasticsearch.document_delete testindex doctype1 AUx-384m0Bug_8U80wQZ
'''
es = _get_instance(hosts, profile)
try:
if not index_exists(index=index):
return True
else:
result = es.delete(index=index, doc_type=doc_type, id=id)
if result.get('found', False): # TODO error handling
return True
except elasticsearch.exceptions.NotFoundError:
return None
return None
def document_exists(index, id, doc_type='_all', hosts=None, profile='elasticsearch'):
'''
Return a boolean indicating whether given document exists
CLI example::
salt myminion elasticsearch.document_exists testindex AUx-384m0Bug_8U80wQZ
'''
es = _get_instance(hosts, profile)
try:
if es.exists(index=index, id=id, doc_type=doc_type):
return True
else:
return False
except elasticsearch.exceptions.NotFoundError:
return None
except elasticsearch.exceptions.ConnectionError:
# TODO log error
return None
return None
def document_get(index, id, doc_type='_all', hosts=None, profile='elasticsearch'):
'''
Check for the existence of a document and if it exists, return it
CLI example::
salt myminion elasticsearch.document_get testindex AUx-384m0Bug_8U80wQZ
'''
es = _get_instance(hosts, profile)
try:
ret = es.get(index=index, id=id, doc_type=doc_type) # TODO error handling
return ret
except elasticsearch.exceptions.NotFoundError:
return None
return None
def index_create(index, body=None, hosts=None, profile='elasticsearch'):
'''
Create an index
CLI example::
salt myminion elasticsearch.index_create testindex
'''
es = _get_instance(hosts, profile)
try:
if index_exists(index):
return True
else:
result = es.indices.create(index=index, body=body) # TODO error handling
return True
except elasticsearch.exceptions.NotFoundError:
return None
return None
def index_delete(index, hosts=None, profile='elasticsearch'):
'''
Delete an index
CLI example::
salt myminion elasticsearch.index_delete testindex
'''
es = _get_instance(hosts, profile)
try:
if not index_exists(index=index):
return True
else:
result = es.indices.delete(index=index)
if result.get('acknowledged', False): # TODO error handling
return True
except elasticsearch.exceptions.NotFoundError:
return None
return None
def index_exists(index, hosts=None, profile='elasticsearch'):
'''
Return a boolean indicating whether given index exists
CLI example::
salt myminion elasticsearch.index_exists testindex
'''
es = _get_instance(hosts, profile)
try:
if not isinstance(index, list):
index = [index]
if es.indices.exists(index=index):
return True
else:
return False
except elasticsearch.exceptions.NotFoundError:
return None
except elasticsearch.exceptions.ConnectionError:
# TODO log error
return None
return None
def index_get(index, hosts=None, profile='elasticsearch'):
'''
Check for the existence of an index and if it exists, return it
CLI example::
salt myminion elasticsearch.index_get testindex
'''
es = _get_instance(hosts, profile)
try:
if index_exists(index):
ret = es.indices.get(index=index) # TODO error handling
return ret
except elasticsearch.exceptions.NotFoundError:
return None
return None
def mapping_create(index, doc_type, body, hosts=None, profile='elasticsearch'):
'''
Create a mapping in a given index
CLI example::
salt myminion elasticsearch.mapping_create testindex user '{ "user" : { "properties" : { "message" : {"type" : "string", "store" : true } } } }'
'''
es = _get_instance(hosts, profile)
try:
result = es.indices.put_mapping(index=index, doc_type=doc_type, body=body) # TODO error handling
return mapping_get(index, doc_type)
except elasticsearch.exceptions.NotFoundError:
return None
return None
def mapping_delete(index, doc_type, hosts=None, profile='elasticsearch'):
'''
Delete a mapping (type) along with its data
CLI example::
salt myminion elasticsearch.mapping_delete testindex user
'''
es = _get_instance(hosts, profile)
try:
# TODO check if mapping exists, add method mapping_exists()
result = es.indices.delete_mapping(index=index, doc_type=doc_type)
if result.get('acknowledged', False): # TODO error handling
return True
except elasticsearch.exceptions.NotFoundError:
return None
return None
def mapping_get(index, doc_type, hosts=None, profile='elasticsearch'):
'''
Retrieve mapping definition of index or index/type
CLI example::
salt myminion elasticsearch.mapping_get testindex user
'''
es = _get_instance(hosts, profile)
try:
ret = es.indices.get_mapping(index=index, doc_type=doc_type) # TODO error handling
return ret
except elasticsearch.exceptions.NotFoundError:
return None
return None
def index_template_create(name, body, hosts=None, profile='elasticsearch'):
'''
Create an index template
CLI example::
salt myminion elasticsearch.index_template_create testindex_templ '{ "template": "logstash-*", "order": 1, "settings": { "number_of_shards": 1 } }'
'''
es = _get_instance(hosts, profile)
try:
result = es.indices.put_template(name=name, body=body) # TODO error handling
return True
except elasticsearch.exceptions.NotFoundError:
return None
return None
def index_template_delete(name, hosts=None, profile='elasticsearch'):
'''
Delete an index template (type) along with its data
CLI example::
salt myminion elasticsearch.index_template_delete testindex_templ user
'''
es = _get_instance(hosts, profile)
try:
# TODO check if template exists, add method template_exists() ?
result = es.indices.delete_template(name=name)
if result.get('acknowledged', False): # TODO error handling
return True
except elasticsearch.exceptions.NotFoundError:
return None
return None
def index_template_exists(name, hosts=None, profile='elasticsearch'):
'''
Return a boolean indicating whether given index template exists
CLI example::
salt myminion elasticsearch.index_template_exists testindex_templ
'''
es = _get_instance(hosts, profile)
try:
if es.indices.exists_template(name=name):
return True
else:
return False
except elasticsearch.exceptions.NotFoundError:
return None
return None
def index_template_get(name, hosts=None, profile='elasticsearch'):
'''
Retrieve template definition of index or index/type
CLI example::
salt myminion elasticsearch.index_template_get testindex_templ user
'''
es = _get_instance(hosts, profile)
try:
ret = es.indices.get_template(name=name) # TODO error handling
return ret
except elasticsearch.exceptions.NotFoundError:
return None
return None
| # -*- coding: utf-8 -*-
'''
Connection module for Elasticsearch
notice: early state, etc.
:depends: elasticsearch
'''
# TODO
# * improve error/ exception handling
# * implement update methods?
from __future__ import absolute_import
# Import Python libs
import logging
log = logging.getLogger(__name__)
# Import third party libs
try:
import elasticsearch
logging.getLogger('elasticsearch').setLevel(logging.CRITICAL)
HAS_ELASTICSEARCH = True
except ImportError:
HAS_ELASTICSEARCH = False
from salt.ext.six import string_types
def __virtual__():
'''
Only load if elasticsearch libraries exist.
'''
if not HAS_ELASTICSEARCH:
return False
return True
def _get_instance(hosts, profile):
'''
Return the elasticsearch instance
'''
if profile:
if isinstance(profile, string_types):
_profile = __salt__['config.option'](profile)
elif isinstance(profile, dict):
_profile = profile
if _profile:
hosts = _profile.get('host')
if not hosts:
hosts = _profile.get('hosts')
if isinstance(hosts, string_types):
hosts = [hosts]
return elasticsearch.Elasticsearch(hosts)
def alias_create(indices, alias, hosts=None, body=None, profile='elasticsearch'):
'''
Create an alias for a specific index/indices
CLI example::
salt myminion elasticsearch.alias_create testindex_v1 testindex
'''
es = _get_instance(hosts, profile)
try:
result = es.indices.put_alias(index=indices, name=alias, body=body) # TODO error handling
return True
except elasticsearch.exceptions.NotFoundError:
return None
return None
def alias_delete(indices, aliases, hosts=None, body=None, profile='elasticsearch'):
'''
Delete an alias of an index
CLI example::
salt myminion elasticsearch.alias_delete testindex_v1 testindex
'''
es = _get_instance(hosts, profile)
try:
result = es.indices.delete_alias(index=indices, name=aliases)
if result.get('acknowledged', False): # TODO error handling
return True
except elasticsearch.exceptions.NotFoundError:
return None
return None
def alias_exists(aliases, indices=None, hosts=None, profile='elasticsearch'):
'''
Return a boolean indicating whether given alias exists
CLI example::
salt myminion elasticsearch.alias_exists testindex
'''
es = _get_instance(hosts, profile)
try:
if es.indices.exists_alias(name=aliases, index=indices):
return True
else:
return False
except elasticsearch.exceptions.NotFoundError:
return None
except elasticsearch.exceptions.ConnectionError:
# TODO log error
return None
return None
def alias_get(indices=None, aliases=None, hosts=None, profile='elasticsearch'):
'''
Check for the existence of an alias and if it exists, return it
CLI example::
salt myminion elasticsearch.alias_get testindex
'''
es = _get_instance(hosts, profile)
try:
ret = es.indices.get_alias(index=indices, name=aliases) # TODO error handling
return ret
except elasticsearch.exceptions.NotFoundError:
return None
return None
def document_create(index, doc_type, body=None, hosts=None, profile='elasticsearch'):
'''
Create a document in a specified index
CLI example::
salt myminion elasticsearch.document_create testindex doctype1 '{}'
'''
es = _get_instance(hosts, profile)
try:
result = es.index(index=index, doc_type=doc_type, body=body) # TODO error handling
return True
except elasticsearch.exceptions.NotFoundError:
return None
return None
def document_delete(index, doc_type, id, hosts=None, profile='elasticsearch'):
'''
Delete a document from an index
CLI example::
salt myminion elasticsearch.document_delete testindex doctype1 AUx-384m0Bug_8U80wQZ
'''
es = _get_instance(hosts, profile)
try:
if not index_exists(index=index):
return True
else:
result = es.delete(index=index, doc_type=doc_type, id=id)
if result.get('found', False): # TODO error handling
return True
except elasticsearch.exceptions.NotFoundError:
return None
return None
def document_exists(index, id, doc_type='_all', hosts=None, profile='elasticsearch'):
'''
Return a boolean indicating whether given document exists
CLI example::
salt myminion elasticsearch.document_exists testindex AUx-384m0Bug_8U80wQZ
'''
es = _get_instance(hosts, profile)
try:
if es.exists(index=index, id=id, doc_type=doc_type):
return True
else:
return False
except elasticsearch.exceptions.NotFoundError:
return None
except elasticsearch.exceptions.ConnectionError:
# TODO log error
return None
return None
def document_get(index, id, doc_type='_all', hosts=None, profile='elasticsearch'):
'''
Check for the existence of a document and if it exists, return it
CLI example::
salt myminion elasticsearch.document_get testindex AUx-384m0Bug_8U80wQZ
'''
es = _get_instance(hosts, profile)
try:
ret = es.get(index=index, id=id, doc_type=doc_type) # TODO error handling
return ret
except elasticsearch.exceptions.NotFoundError:
return None
return None
def index_create(index, body=None, hosts=None, profile='elasticsearch'):
'''
Create an index
CLI example::
salt myminion elasticsearch.index_create testindex
'''
es = _get_instance(hosts, profile)
try:
if index_exists(index):
return True
else:
result = es.indices.create(index=index, body=body) # TODO error handling
return True
except elasticsearch.exceptions.NotFoundError:
return None
return None
def index_delete(index, hosts=None, profile='elasticsearch'):
'''
Delete an index
CLI example::
salt myminion elasticsearch.index_delete testindex
'''
es = _get_instance(hosts, profile)
try:
if not index_exists(index=index):
return True
else:
result = es.indices.delete(index=index)
if result.get('acknowledged', False): # TODO error handling
return True
except elasticsearch.exceptions.NotFoundError:
return None
return None
def index_exists(index, hosts=None, profile='elasticsearch'):
'''
Return a boolean indicating whether given index exists
CLI example::
salt myminion elasticsearch.index_exists testindex
'''
es = _get_instance(hosts, profile)
try:
if not isinstance(index, list):
index = [index]
if es.indices.exists(index=index):
return True
else:
return False
except elasticsearch.exceptions.NotFoundError:
return None
except elasticsearch.exceptions.ConnectionError:
# TODO log error
return None
return None
def index_get(index, hosts=None, profile='elasticsearch'):
'''
Check for the existence of an index and if it exists, return it
CLI example::
salt myminion elasticsearch.index_get testindex
'''
es = _get_instance(hosts, profile)
try:
if index_exists(index):
ret = es.indices.get(index=index) # TODO error handling
return ret
except elasticsearch.exceptions.NotFoundError:
return None
return None
def mapping_create(index, doc_type, body, hosts=None, profile='elasticsearch'):
'''
Create a mapping in a given index
CLI example::
salt myminion elasticsearch.mapping_create testindex user '{ "user" : { "properties" : { "message" : {"type" : "string", "store" : true } } } }'
'''
es = _get_instance(hosts, profile)
try:
result = es.indices.put_mapping(index=index, doc_type=doc_type, body=body) # TODO error handling
return mapping_get(index, doc_type)
except elasticsearch.exceptions.NotFoundError:
return None
return None
def mapping_delete(index, doc_type, hosts=None, profile='elasticsearch'):
'''
Delete a mapping (type) along with its data
CLI example::
salt myminion elasticsearch.mapping_delete testindex user
'''
es = _get_instance(hosts, profile)
try:
# TODO check if mapping exists, add method mapping_exists()
result = es.indices.delete_mapping(index=index, doc_type=doc_type)
if result.get('acknowledged', False): # TODO error handling
return True
except elasticsearch.exceptions.NotFoundError:
return None
return None
def mapping_get(index, doc_type, hosts=None, profile='elasticsearch'):
'''
Retrieve mapping definition of index or index/type
CLI example::
salt myminion elasticsearch.mapping_get testindex user
'''
es = _get_instance(hosts, profile)
try:
ret = es.indices.get_mapping(index=index, doc_type=doc_type) # TODO error handling
return ret
except elasticsearch.exceptions.NotFoundError:
return None
return None
def index_template_create(name, body, hosts=None, profile='elasticsearch'):
'''
Create an index template
CLI example::
salt myminion elasticsearch.index_template_create testindex_templ '{ "template": "logstash-*", "order": 1, "settings": { "number_of_shards": 1 } }'
'''
es = _get_instance(hosts, profile)
try:
result = es.indices.put_template(name=name, body=body) # TODO error handling
return True
except elasticsearch.exceptions.NotFoundError:
return None
return None
def index_template_delete(name, hosts=None, profile='elasticsearch'):
'''
Delete an index template (type) along with its data
CLI example::
salt myminion elasticsearch.index_template_delete testindex_templ user
'''
es = _get_instance(hosts, profile)
try:
# TODO check if template exists, add method template_exists() ?
result = es.indices.delete_template(name=name)
if result.get('acknowledged', False): # TODO error handling
return True
except elasticsearch.exceptions.NotFoundError:
return None
return None
def index_template_exists(name, hosts=None, profile='elasticsearch'):
'''
Return a boolean indicating whether given index template exists
CLI example::
salt myminion elasticsearch.index_template_exists testindex_templ
'''
es = _get_instance(hosts, profile)
try:
if es.indices.exists_template(name=name):
return True
else:
return False
except elasticsearch.exceptions.NotFoundError:
return None
return None
def index_template_get(name, hosts=None, profile='elasticsearch'):
'''
Retrieve template definition of index or index/type
CLI example::
salt myminion elasticsearch.index_template_get testindex_templ user
'''
es = _get_instance(hosts, profile)
try:
ret = es.indices.get_template(name=name) # TODO error handling
return ret
except elasticsearch.exceptions.NotFoundError:
return None
return None
| en | 0.404146 | # -*- coding: utf-8 -*- Connection module for Elasticsearch notice: early state, etc. :depends: elasticsearch # TODO # * improve error/ exception handling # * implement update methods? # Import Python libs # Import third party libs Only load if elasticsearch libraries exist. Return the elasticsearch instance Create an alias for a specific index/indices CLI example:: salt myminion elasticsearch.alias_create testindex_v1 testindex # TODO error handling Delete an alias of an index CLI example:: salt myminion elasticsearch.alias_delete testindex_v1 testindex # TODO error handling Return a boolean indicating whether given alias exists CLI example:: salt myminion elasticsearch.alias_exists testindex # TODO log error Check for the existence of an alias and if it exists, return it CLI example:: salt myminion elasticsearch.alias_get testindex # TODO error handling Create a document in a specified index CLI example:: salt myminion elasticsearch.document_create testindex doctype1 '{}' # TODO error handling Delete a document from an index CLI example:: salt myminion elasticsearch.document_delete testindex doctype1 AUx-384m0Bug_8U80wQZ # TODO error handling Return a boolean indicating whether given document exists CLI example:: salt myminion elasticsearch.document_exists testindex AUx-384m0Bug_8U80wQZ # TODO log error Check for the existence of a document and if it exists, return it CLI example:: salt myminion elasticsearch.document_get testindex AUx-384m0Bug_8U80wQZ # TODO error handling Create an index CLI example:: salt myminion elasticsearch.index_create testindex # TODO error handling Delete an index CLI example:: salt myminion elasticsearch.index_delete testindex # TODO error handling Return a boolean indicating whether given index exists CLI example:: salt myminion elasticsearch.index_exists testindex # TODO log error Check for the existence of an index and if it exists, return it CLI example:: salt myminion elasticsearch.index_get testindex # TODO error handling Create a mapping in a given index CLI example:: salt myminion elasticsearch.mapping_create testindex user '{ "user" : { "properties" : { "message" : {"type" : "string", "store" : true } } } }' # TODO error handling Delete a mapping (type) along with its data CLI example:: salt myminion elasticsearch.mapping_delete testindex user # TODO check if mapping exists, add method mapping_exists() # TODO error handling Retrieve mapping definition of index or index/type CLI example:: salt myminion elasticsearch.mapping_get testindex user # TODO error handling Create an index template CLI example:: salt myminion elasticsearch.index_template_create testindex_templ '{ "template": "logstash-*", "order": 1, "settings": { "number_of_shards": 1 } }' # TODO error handling Delete an index template (type) along with its data CLI example:: salt myminion elasticsearch.index_template_delete testindex_templ user # TODO check if template exists, add method template_exists() ? # TODO error handling Return a boolean indicating whether given index template exists CLI example:: salt myminion elasticsearch.index_template_exists testindex_templ Retrieve template definition of index or index/type CLI example:: salt myminion elasticsearch.index_template_get testindex_templ user # TODO error handling | 2.142654 | 2 |
stoked/hydrodynamics.py | johnaparker/stoked | 1 | 6618478 | <reponame>johnaparker/stoked<filename>stoked/hydrodynamics.py
import numpy as np
class interface:
"""A no-slip interface"""
def __init__(self, z=0):
"""
Arguments:
z z-position of the interface
"""
self.z = z
def levi_civita():
"""return the levi-civita symbol"""
eijk = np.zeros((3, 3, 3), dtype=float)
eijk[0, 1, 2] = eijk[1, 2, 0] = eijk[2, 0, 1] = 1
eijk[0, 2, 1] = eijk[2, 1, 0] = eijk[1, 0, 2] = -1
return eijk
def particle_wall_self_mobility(position, interface, viscosity, radius):
"""
Construct the particle wall self-mobility matrix for a single particle
Arguments:
position[3] position of particle
interface interface object
viscosity dynamic viscosity µ of surrounding fluid
radius particle radius
"""
M = np.zeros([2, 2, 3, 3], dtype=float)
h = (position[2] - interface.z)/radius
gamma_T = 6*np.pi*viscosity*radius
gamma_R = 6*np.pi*viscosity*radius**3
a = 1/(16*gamma_T)*(9/h - 2/h**3 + 1/h**5)
b = 1/(8*gamma_T)*(9/h - 4/h**3 + 1/h**5)
M[0,0] = np.diag([a,a,b])
a = 15/(64*gamma_R)*(1/h**3)
b = 3/(32*gamma_R)*(1/h**3)
M[1,1] = np.diag([a,a,b])
return M
def grand_mobility_matrix(position, drag_T, drag_R, viscosity):
"""
Construct the grand mobility matrix for a given cluster
Arguments:
position[N,3] position of N particles
drag_T[N,3,3] 3 by 3 translational drag tensors of N particles
drag_R[N,3,3] 3 by 3 rotational drag tensors of N particles
viscosity dynamic viscosity µ of surrounding fluid
"""
Nparticles = len(position)
M = np.zeros([2, 3*Nparticles, 2, 3*Nparticles], dtype=float)
### block-diagonal components
for i in range(Nparticles):
idx = np.s_[0,3*i:3*i+3,0,3*i:3*i+3]
M[idx] = drag_T[i]
idx = np.s_[1,3*i:3*i+3,1,3*i:3*i+3]
M[idx] = drag_R[i]
### Off block-diagonal components
factor = 1/(8*np.pi*viscosity)
eps = levi_civita()
for i in range(Nparticles):
for j in range(i+1, Nparticles):
r_ijx = position[i] - position[j]
r_ij = np.linalg.norm(r_ijx)
I = np.identity(3, dtype=float)
T = np.outer(r_ijx, r_ijx)/r_ij**2
K = np.einsum('ijk,k->ij', eps, r_ijx)/r_ij
### TT coupling
idx = np.s_[0,3*i:3*i+3,0,3*j:3*j+3]
M[idx] = factor/r_ij*(I + T)
idx2 = np.s_[0,3*j:3*j+3,0,3*i:3*i+3]
M[idx2] = M[idx]
### RR coupling
idx = np.s_[1,3*i:3*i+3,1,3*j:3*j+3]
M[idx] = factor/(2*r_ij**3)*(3*T - I)
idx2 = np.s_[1,3*j:3*j+3,1,3*i:3*i+3]
M[idx2] = M[idx]
### RT coupling
idx = np.s_[1,3*i:3*i+3,0,3*j:3*j+3]
M[idx] = -factor/r_ij**2*(K)
idx2 = np.s_[1,3*j:3*j+3,0,3*i:3*i+3]
M[idx2] = -M[idx]
### TR coupling
idx3 = np.s_[0,3*i:3*i+3,1,3*j:3*j+3]
M[idx3] = -M[idx]
idx4 = np.s_[0,3*j:3*j+3,1,3*i:3*i+3]
M[idx4] = -M[idx2]
return M.reshape([6*Nparticles, 6*Nparticles])
| import numpy as np
class interface:
"""A no-slip interface"""
def __init__(self, z=0):
"""
Arguments:
z z-position of the interface
"""
self.z = z
def levi_civita():
"""return the levi-civita symbol"""
eijk = np.zeros((3, 3, 3), dtype=float)
eijk[0, 1, 2] = eijk[1, 2, 0] = eijk[2, 0, 1] = 1
eijk[0, 2, 1] = eijk[2, 1, 0] = eijk[1, 0, 2] = -1
return eijk
def particle_wall_self_mobility(position, interface, viscosity, radius):
"""
Construct the particle wall self-mobility matrix for a single particle
Arguments:
position[3] position of particle
interface interface object
viscosity dynamic viscosity µ of surrounding fluid
radius particle radius
"""
M = np.zeros([2, 2, 3, 3], dtype=float)
h = (position[2] - interface.z)/radius
gamma_T = 6*np.pi*viscosity*radius
gamma_R = 6*np.pi*viscosity*radius**3
a = 1/(16*gamma_T)*(9/h - 2/h**3 + 1/h**5)
b = 1/(8*gamma_T)*(9/h - 4/h**3 + 1/h**5)
M[0,0] = np.diag([a,a,b])
a = 15/(64*gamma_R)*(1/h**3)
b = 3/(32*gamma_R)*(1/h**3)
M[1,1] = np.diag([a,a,b])
return M
def grand_mobility_matrix(position, drag_T, drag_R, viscosity):
"""
Construct the grand mobility matrix for a given cluster
Arguments:
position[N,3] position of N particles
drag_T[N,3,3] 3 by 3 translational drag tensors of N particles
drag_R[N,3,3] 3 by 3 rotational drag tensors of N particles
viscosity dynamic viscosity µ of surrounding fluid
"""
Nparticles = len(position)
M = np.zeros([2, 3*Nparticles, 2, 3*Nparticles], dtype=float)
### block-diagonal components
for i in range(Nparticles):
idx = np.s_[0,3*i:3*i+3,0,3*i:3*i+3]
M[idx] = drag_T[i]
idx = np.s_[1,3*i:3*i+3,1,3*i:3*i+3]
M[idx] = drag_R[i]
### Off block-diagonal components
factor = 1/(8*np.pi*viscosity)
eps = levi_civita()
for i in range(Nparticles):
for j in range(i+1, Nparticles):
r_ijx = position[i] - position[j]
r_ij = np.linalg.norm(r_ijx)
I = np.identity(3, dtype=float)
T = np.outer(r_ijx, r_ijx)/r_ij**2
K = np.einsum('ijk,k->ij', eps, r_ijx)/r_ij
### TT coupling
idx = np.s_[0,3*i:3*i+3,0,3*j:3*j+3]
M[idx] = factor/r_ij*(I + T)
idx2 = np.s_[0,3*j:3*j+3,0,3*i:3*i+3]
M[idx2] = M[idx]
### RR coupling
idx = np.s_[1,3*i:3*i+3,1,3*j:3*j+3]
M[idx] = factor/(2*r_ij**3)*(3*T - I)
idx2 = np.s_[1,3*j:3*j+3,1,3*i:3*i+3]
M[idx2] = M[idx]
### RT coupling
idx = np.s_[1,3*i:3*i+3,0,3*j:3*j+3]
M[idx] = -factor/r_ij**2*(K)
idx2 = np.s_[1,3*j:3*j+3,0,3*i:3*i+3]
M[idx2] = -M[idx]
### TR coupling
idx3 = np.s_[0,3*i:3*i+3,1,3*j:3*j+3]
M[idx3] = -M[idx]
idx4 = np.s_[0,3*j:3*j+3,1,3*i:3*i+3]
M[idx4] = -M[idx2]
return M.reshape([6*Nparticles, 6*Nparticles]) | en | 0.660479 | A no-slip interface Arguments: z z-position of the interface return the levi-civita symbol Construct the particle wall self-mobility matrix for a single particle Arguments: position[3] position of particle interface interface object viscosity dynamic viscosity µ of surrounding fluid radius particle radius Construct the grand mobility matrix for a given cluster Arguments: position[N,3] position of N particles drag_T[N,3,3] 3 by 3 translational drag tensors of N particles drag_R[N,3,3] 3 by 3 rotational drag tensors of N particles viscosity dynamic viscosity µ of surrounding fluid ### block-diagonal components ### Off block-diagonal components ### TT coupling ### RR coupling ### RT coupling ### TR coupling | 3.255872 | 3 |
tests/test_core_htype.py | EticaAI/HXL-Data-Science-file-formats | 3 | 6618479 | # import hxlm.core.base
from hxlm.core.htype.data import (
textDataHtype,
emailDataHtype,
numberDataHtype,
urlDataHtype,
phoneDataHtype,
dateDataHtype
)
def test_textDataHtype():
example1 = textDataHtype(value="Lorem ipsum")
assert example1.value == "Lorem ipsum"
def test_numberDataHtype():
# TODO: maybe test type? And if input was string?
example1 = numberDataHtype(value=3.14159265358979323)
assert example1.value == 3.14159265358979323
def test_urlDataHtype():
example1 = urlDataHtype(value="https://example.org")
assert example1.value == "https://example.org"
def test_emailDataHtype():
example1 = emailDataHtype(value="<EMAIL>")
assert example1.value == "<EMAIL>"
def test_phoneDataHtype():
example1 = phoneDataHtype(value="+55 51 99999-9999")
assert example1.value == "+55 51 99999-9999"
def test_dateDataHtype():
example1 = dateDataHtype(value="25/01/1986")
assert example1.value == "25/01/1986"
| # import hxlm.core.base
from hxlm.core.htype.data import (
textDataHtype,
emailDataHtype,
numberDataHtype,
urlDataHtype,
phoneDataHtype,
dateDataHtype
)
def test_textDataHtype():
example1 = textDataHtype(value="Lorem ipsum")
assert example1.value == "Lorem ipsum"
def test_numberDataHtype():
# TODO: maybe test type? And if input was string?
example1 = numberDataHtype(value=3.14159265358979323)
assert example1.value == 3.14159265358979323
def test_urlDataHtype():
example1 = urlDataHtype(value="https://example.org")
assert example1.value == "https://example.org"
def test_emailDataHtype():
example1 = emailDataHtype(value="<EMAIL>")
assert example1.value == "<EMAIL>"
def test_phoneDataHtype():
example1 = phoneDataHtype(value="+55 51 99999-9999")
assert example1.value == "+55 51 99999-9999"
def test_dateDataHtype():
example1 = dateDataHtype(value="25/01/1986")
assert example1.value == "25/01/1986"
| en | 0.882384 | # import hxlm.core.base # TODO: maybe test type? And if input was string? | 2.595411 | 3 |
mc_states/tests/unit/grains/makina_grains_tests.py | makinacorpus/makina-states | 18 | 6618480 | <filename>mc_states/tests/unit/grains/makina_grains_tests.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division
from __future__ import absolute_import
from __future__ import division
import copy
import textwrap
import subprocess
import sys
import os
import unittest
import StringIO
import mc_states.api
from .. import base
import contextlib
from mock import MagicMock, patch, mock_open
rt1 = textwrap.dedent('''
Kernel IP routing table
Destination Gateway Genmask Flags MSS Window irtt Iface
0.0.0.0 172.16.58.3 0.0.0.0 UG 0 0 0 br0
10.0.3.0 0.0.0.0 255.255.255.0 U 0 0 0 lxcbr0
10.5.0.0 0.0.0.0 255.255.0.0 U 0 0 0 lxcbr1
10.90.0.0 10.90.48.65 255.254.0.0 UG 0 0 0 eth1
10.90.48.64 0.0.0.0 255.255.255.192 U 0 0 0 eth1
192.168.122.0 0.0.0.0 255.255.255.0 U 0 0 0 virbr0
172.16.17.32 0.0.0.0 255.255.255.0 U 0 0 0 br0
''')
class TestCase(base.GrainsCase):
def docker(self):
return self.get_private('makina_grains._is_docker')()
@property
def grains(self):
return self._('makina_grains.get_makina_grains')()
def test_pg(self):
with contextlib.nested(
patch(
'os.path.exists', MagicMock(return_value=False)
),
patch(
'os.listdir', MagicMock(return_value=0)
)
):
fun = self.get_private('makina_grains._pgsql_vers')
ret = fun()
self.assertEquals(ret['details'], {})
self.assertEquals(ret['global'], {})
def do_(path):
if path in [
'/var/lib/postgresql/9.0/main/postmaster.pid'
]:
return True
return False
with contextlib.nested(
patch(
'os.path.exists', MagicMock(side_effect=do_)
),
patch(
'os.listdir', MagicMock(return_value=0)
)
):
fun = self.get_private('makina_grains._pgsql_vers')
ret = fun()
self.assertEquals(ret['global'], {'9.0': True})
self.assertEquals(ret['details'],
{'9.0': {'has_data': False, 'running': True}})
def do_(path):
if path in [
'/var/lib/postgresql/9.0/main/postmaster.pid',
'/var/lib/postgresql/9.0/main/base',
'/var/lib/postgresql/9.0/main/globalbase'
]:
return True
return False
with contextlib.nested(
patch(
'os.path.exists', MagicMock(side_effect=do_)
),
patch(
'os.listdir', MagicMock(return_value=0)
)
):
fun = self.get_private('makina_grains._pgsql_vers')
ret = fun()
self.assertEquals(ret['global'], {'9.0': True})
self.assertEquals(ret['details'],
{'9.0': {'has_data': False, 'running': True}})
def do_(path):
if path in [
'/var/lib/postgresql/9.0/main/postmaster.pid',
'/var/lib/postgresql/9.0/main/base',
'/var/lib/postgresql/9.0/main/globalbase'
]:
return True
return False
with contextlib.nested(
patch(
'os.path.exists', MagicMock(side_effect=do_)
),
patch(
'os.listdir', MagicMock(return_value=3)
)
):
fun = self.get_private('makina_grains._pgsql_vers')
ret = fun()
self.assertEquals(ret['global'], {'9.0': True})
self.assertEquals(ret['details'],
{'9.0': {'has_data': True, 'running': True}})
def test_devhostnum(self):
fun = self.get_private('makina_grains._devhost_num')
self.assertEqual(fun(), '')
def test_is_systemd(self):
fun = self.get_private('makina_grains._is_systemd')
with patch(
'os.path.exists', MagicMock(return_value=False)
):
with patch(
'os.readlink', MagicMock(return_value='foo')
):
self.assertFalse(fun())
with patch(
'os.readlink', MagicMock(return_value='/lib/systemd/systemd')
):
self.assertTrue(fun())
with patch(
'os.readlink', MagicMock(return_value='foo')
):
with patch(
'os.path.exists', MagicMock(return_value=True)
):
with patch(
'os.listdir', MagicMock(return_value=[1, 2, 3, 4, 5])
):
self.assertTrue(fun())
with patch(
'os.listdir', MagicMock(return_value=[1, 2, 3])
):
self.assertFalse(fun())
with patch(
'os.path.exists', MagicMock(side_effect=OSError)
):
with patch(
'os.readlink', MagicMock(side_effect=OSError)
):
self.assertTrue(fun() is False)
def test_is_devhost(self):
fun = self.get_private('makina_grains._is_devhost')
mod = sys.modules[fun.__module__]
with patch.object(
mod, '_devhost_num', MagicMock(return_value='')
):
self.assertFalse(fun())
with patch.object(
mod, '_devhost_num', MagicMock(return_value='2')
):
self.assertTrue(fun())
def test_is_docker(self):
def raise_(*a):
raise IOError()
wopen = mock_open(read_data='foo')
gopen = mock_open(read_data='docker')
noopen = MagicMock(side_effect=raise_)
with self.patch(
grains={'makina.docker': False},
filtered=['mc.*'],
kinds=['grains', 'modules']
):
with patch('__builtin__.open', noopen):
with patch("os.path.exists", return_value=False):
ret4 = copy.deepcopy(self.docker())
with patch(
"os.path.exists", return_value=True
):
ret5 = copy.deepcopy(self.docker())
with patch('__builtin__.open', gopen):
ret3 = copy.deepcopy(self.docker())
with patch('__builtin__.open', wopen):
ret6 = copy.deepcopy(self.docker())
with self.patch(
grains={'makina.docker': True},
filtered=['mc.*'],
kinds=['grains', 'modules']
):
ret1 = copy.deepcopy(self.docker())
self.assertFalse(ret4)
self.assertTrue(ret5)
self.assertFalse(ret6)
self.assertTrue(ret3)
self.assertTrue(ret1)
def test_is_container(self):
fun = self.get_private('makina_grains._is_container')
mod = sys.modules[fun.__module__]
with contextlib.nested(
patch.object(
mod, '_is_docker', MagicMock(return_value=True)
),
patch.object(
mod, '_is_lxc', MagicMock(return_value=True)
)
):
self.assertTrue(fun())
with contextlib.nested(
patch.object(
mod, '_is_docker', MagicMock(return_value=False)
),
patch.object(
mod, '_is_lxc', MagicMock(return_value=True)
)
):
self.assertTrue(fun())
with contextlib.nested(
patch.object(
mod, '_is_docker', MagicMock(return_value=True)
),
patch.object(
mod, '_is_lxc', MagicMock(return_value=False)
)
):
self.assertTrue(fun())
with contextlib.nested(
patch.object(
mod, '_is_docker', MagicMock(return_value=False)
),
patch.object(
mod, '_is_lxc', MagicMock(return_value=False)
)
):
self.assertFalse(fun())
def test_routes(self):
class obj:
stdout = StringIO.StringIO(rt1)
with patch.object(subprocess, 'Popen', return_value=obj):
fun = self.get_private('makina_grains._routes')
ret = fun()
self.assertEqual(
ret,
([{'flags': 'UG',
'gateway': '172.16.58.3',
'genmask': '0.0.0.0',
'iface': 'br0',
'irtt': '0',
'mss': '0',
'window': '0'},
{'flags': 'U',
'gateway': '0.0.0.0',
'genmask': '255.255.255.0',
'iface': 'lxcbr0',
'irtt': '0',
'mss': '0',
'window': '0'},
{'flags': 'U',
'gateway': '0.0.0.0',
'genmask': '255.255.0.0',
'iface': 'lxcbr1',
'irtt': '0',
'mss': '0',
'window': '0'},
{'flags': 'UG',
'gateway': '10.90.48.65',
'genmask': '255.254.0.0',
'iface': 'eth1',
'irtt': '0',
'mss': '0',
'window': '0'},
{'flags': 'U',
'gateway': '0.0.0.0',
'genmask': '255.255.255.192',
'iface': 'eth1',
'irtt': '0',
'mss': '0',
'window': '0'},
{'flags': 'U',
'gateway': '0.0.0.0',
'genmask': '255.255.255.0',
'iface': 'virbr0',
'irtt': '0',
'mss': '0',
'window': '0'},
{'flags': 'U',
'gateway': '0.0.0.0',
'genmask': '255.255.255.0',
'iface': 'br0',
'irtt': '0',
'mss': '0',
'window': '0'}],
{'flags': 'UG',
'gateway': '172.16.58.3',
'genmask': '0.0.0.0',
'iface': 'br0',
'irtt': '0',
'mss': '0',
'window': '0'},
'172.16.58.3'))
def test_is_lxc(self):
def raise_(*a):
raise IOError()
wopen = mock_open(read_data='foo')
gopen = mock_open(read_data=':cpu:/a')
g1open = mock_open(read_data=':cpuset:/a')
agopen = mock_open(read_data=':cpu:/')
ag1open = mock_open(read_data=':cpuset:/')
noopen = MagicMock(side_effect=raise_)
fun = self.get_private('makina_grains._is_lxc')
mod = sys.modules[fun.__module__]
with self.patch(
grains={'makina.lxc': None},
filtered=['mc.*'],
kinds=['grains', 'modules']
):
with patch.object(
mod, '_is_docker', MagicMock(return_value=True)
):
ret4 = fun()
with patch('__builtin__.open', wopen):
reta = fun()
with patch('__builtin__.open', gopen):
retb = fun()
with patch.object(
mod, '_is_docker', MagicMock(return_value=False)
):
with patch('__builtin__.open', wopen):
ret5 = fun()
with patch('__builtin__.open', noopen):
ret6 = fun()
with patch('__builtin__.open', g1open):
ret7 = fun()
with patch('__builtin__.open', gopen):
ret8 = fun()
with patch('__builtin__.open', ag1open):
ret11 = fun()
with patch('__builtin__.open', agopen):
ret12 = fun()
with self.patch(
grains={'makina.lxc': True},
filtered=['mc.*'],
kinds=['grains', 'modules']
):
ret1 = copy.deepcopy(self.grains)
with self.patch(
grains={'makina.lxc': True},
filtered=['mc.*'],
kinds=['grains', 'modules']
):
with patch.object(
mod, '_is_docker', MagicMock(return_value=False)
):
ret14 = fun()
with self.patch(
grains={'makina.lxc': False},
filtered=['mc.*'],
kinds=['grains', 'modules']
):
with patch.object(
mod, '_is_docker', MagicMock(return_value=False)
):
ret15 = fun()
self.assertFalse(ret4)
self.assertFalse(ret5)
self.assertFalse(ret6)
self.assertFalse(ret11)
self.assertFalse(ret12)
self.assertFalse(ret15)
self.assertTrue(ret1)
self.assertTrue(ret7)
self.assertTrue(ret8)
self.assertFalse(reta)
self.assertFalse(retb)
self.assertTrue(ret14)
if __name__ == '__main__':
unittest.main()
# vim:set et sts=4 ts=4 tw=80:
| <filename>mc_states/tests/unit/grains/makina_grains_tests.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division
from __future__ import absolute_import
from __future__ import division
import copy
import textwrap
import subprocess
import sys
import os
import unittest
import StringIO
import mc_states.api
from .. import base
import contextlib
from mock import MagicMock, patch, mock_open
rt1 = textwrap.dedent('''
Kernel IP routing table
Destination Gateway Genmask Flags MSS Window irtt Iface
0.0.0.0 172.16.58.3 0.0.0.0 UG 0 0 0 br0
10.0.3.0 0.0.0.0 255.255.255.0 U 0 0 0 lxcbr0
10.5.0.0 0.0.0.0 255.255.0.0 U 0 0 0 lxcbr1
10.90.0.0 10.90.48.65 255.254.0.0 UG 0 0 0 eth1
10.90.48.64 0.0.0.0 255.255.255.192 U 0 0 0 eth1
192.168.122.0 0.0.0.0 255.255.255.0 U 0 0 0 virbr0
172.16.17.32 0.0.0.0 255.255.255.0 U 0 0 0 br0
''')
class TestCase(base.GrainsCase):
def docker(self):
return self.get_private('makina_grains._is_docker')()
@property
def grains(self):
return self._('makina_grains.get_makina_grains')()
def test_pg(self):
with contextlib.nested(
patch(
'os.path.exists', MagicMock(return_value=False)
),
patch(
'os.listdir', MagicMock(return_value=0)
)
):
fun = self.get_private('makina_grains._pgsql_vers')
ret = fun()
self.assertEquals(ret['details'], {})
self.assertEquals(ret['global'], {})
def do_(path):
if path in [
'/var/lib/postgresql/9.0/main/postmaster.pid'
]:
return True
return False
with contextlib.nested(
patch(
'os.path.exists', MagicMock(side_effect=do_)
),
patch(
'os.listdir', MagicMock(return_value=0)
)
):
fun = self.get_private('makina_grains._pgsql_vers')
ret = fun()
self.assertEquals(ret['global'], {'9.0': True})
self.assertEquals(ret['details'],
{'9.0': {'has_data': False, 'running': True}})
def do_(path):
if path in [
'/var/lib/postgresql/9.0/main/postmaster.pid',
'/var/lib/postgresql/9.0/main/base',
'/var/lib/postgresql/9.0/main/globalbase'
]:
return True
return False
with contextlib.nested(
patch(
'os.path.exists', MagicMock(side_effect=do_)
),
patch(
'os.listdir', MagicMock(return_value=0)
)
):
fun = self.get_private('makina_grains._pgsql_vers')
ret = fun()
self.assertEquals(ret['global'], {'9.0': True})
self.assertEquals(ret['details'],
{'9.0': {'has_data': False, 'running': True}})
def do_(path):
if path in [
'/var/lib/postgresql/9.0/main/postmaster.pid',
'/var/lib/postgresql/9.0/main/base',
'/var/lib/postgresql/9.0/main/globalbase'
]:
return True
return False
with contextlib.nested(
patch(
'os.path.exists', MagicMock(side_effect=do_)
),
patch(
'os.listdir', MagicMock(return_value=3)
)
):
fun = self.get_private('makina_grains._pgsql_vers')
ret = fun()
self.assertEquals(ret['global'], {'9.0': True})
self.assertEquals(ret['details'],
{'9.0': {'has_data': True, 'running': True}})
def test_devhostnum(self):
fun = self.get_private('makina_grains._devhost_num')
self.assertEqual(fun(), '')
def test_is_systemd(self):
fun = self.get_private('makina_grains._is_systemd')
with patch(
'os.path.exists', MagicMock(return_value=False)
):
with patch(
'os.readlink', MagicMock(return_value='foo')
):
self.assertFalse(fun())
with patch(
'os.readlink', MagicMock(return_value='/lib/systemd/systemd')
):
self.assertTrue(fun())
with patch(
'os.readlink', MagicMock(return_value='foo')
):
with patch(
'os.path.exists', MagicMock(return_value=True)
):
with patch(
'os.listdir', MagicMock(return_value=[1, 2, 3, 4, 5])
):
self.assertTrue(fun())
with patch(
'os.listdir', MagicMock(return_value=[1, 2, 3])
):
self.assertFalse(fun())
with patch(
'os.path.exists', MagicMock(side_effect=OSError)
):
with patch(
'os.readlink', MagicMock(side_effect=OSError)
):
self.assertTrue(fun() is False)
def test_is_devhost(self):
fun = self.get_private('makina_grains._is_devhost')
mod = sys.modules[fun.__module__]
with patch.object(
mod, '_devhost_num', MagicMock(return_value='')
):
self.assertFalse(fun())
with patch.object(
mod, '_devhost_num', MagicMock(return_value='2')
):
self.assertTrue(fun())
def test_is_docker(self):
def raise_(*a):
raise IOError()
wopen = mock_open(read_data='foo')
gopen = mock_open(read_data='docker')
noopen = MagicMock(side_effect=raise_)
with self.patch(
grains={'makina.docker': False},
filtered=['mc.*'],
kinds=['grains', 'modules']
):
with patch('__builtin__.open', noopen):
with patch("os.path.exists", return_value=False):
ret4 = copy.deepcopy(self.docker())
with patch(
"os.path.exists", return_value=True
):
ret5 = copy.deepcopy(self.docker())
with patch('__builtin__.open', gopen):
ret3 = copy.deepcopy(self.docker())
with patch('__builtin__.open', wopen):
ret6 = copy.deepcopy(self.docker())
with self.patch(
grains={'makina.docker': True},
filtered=['mc.*'],
kinds=['grains', 'modules']
):
ret1 = copy.deepcopy(self.docker())
self.assertFalse(ret4)
self.assertTrue(ret5)
self.assertFalse(ret6)
self.assertTrue(ret3)
self.assertTrue(ret1)
def test_is_container(self):
fun = self.get_private('makina_grains._is_container')
mod = sys.modules[fun.__module__]
with contextlib.nested(
patch.object(
mod, '_is_docker', MagicMock(return_value=True)
),
patch.object(
mod, '_is_lxc', MagicMock(return_value=True)
)
):
self.assertTrue(fun())
with contextlib.nested(
patch.object(
mod, '_is_docker', MagicMock(return_value=False)
),
patch.object(
mod, '_is_lxc', MagicMock(return_value=True)
)
):
self.assertTrue(fun())
with contextlib.nested(
patch.object(
mod, '_is_docker', MagicMock(return_value=True)
),
patch.object(
mod, '_is_lxc', MagicMock(return_value=False)
)
):
self.assertTrue(fun())
with contextlib.nested(
patch.object(
mod, '_is_docker', MagicMock(return_value=False)
),
patch.object(
mod, '_is_lxc', MagicMock(return_value=False)
)
):
self.assertFalse(fun())
def test_routes(self):
class obj:
stdout = StringIO.StringIO(rt1)
with patch.object(subprocess, 'Popen', return_value=obj):
fun = self.get_private('makina_grains._routes')
ret = fun()
self.assertEqual(
ret,
([{'flags': 'UG',
'gateway': '172.16.58.3',
'genmask': '0.0.0.0',
'iface': 'br0',
'irtt': '0',
'mss': '0',
'window': '0'},
{'flags': 'U',
'gateway': '0.0.0.0',
'genmask': '255.255.255.0',
'iface': 'lxcbr0',
'irtt': '0',
'mss': '0',
'window': '0'},
{'flags': 'U',
'gateway': '0.0.0.0',
'genmask': '255.255.0.0',
'iface': 'lxcbr1',
'irtt': '0',
'mss': '0',
'window': '0'},
{'flags': 'UG',
'gateway': '10.90.48.65',
'genmask': '255.254.0.0',
'iface': 'eth1',
'irtt': '0',
'mss': '0',
'window': '0'},
{'flags': 'U',
'gateway': '0.0.0.0',
'genmask': '255.255.255.192',
'iface': 'eth1',
'irtt': '0',
'mss': '0',
'window': '0'},
{'flags': 'U',
'gateway': '0.0.0.0',
'genmask': '255.255.255.0',
'iface': 'virbr0',
'irtt': '0',
'mss': '0',
'window': '0'},
{'flags': 'U',
'gateway': '0.0.0.0',
'genmask': '255.255.255.0',
'iface': 'br0',
'irtt': '0',
'mss': '0',
'window': '0'}],
{'flags': 'UG',
'gateway': '172.16.58.3',
'genmask': '0.0.0.0',
'iface': 'br0',
'irtt': '0',
'mss': '0',
'window': '0'},
'172.16.58.3'))
def test_is_lxc(self):
def raise_(*a):
raise IOError()
wopen = mock_open(read_data='foo')
gopen = mock_open(read_data=':cpu:/a')
g1open = mock_open(read_data=':cpuset:/a')
agopen = mock_open(read_data=':cpu:/')
ag1open = mock_open(read_data=':cpuset:/')
noopen = MagicMock(side_effect=raise_)
fun = self.get_private('makina_grains._is_lxc')
mod = sys.modules[fun.__module__]
with self.patch(
grains={'makina.lxc': None},
filtered=['mc.*'],
kinds=['grains', 'modules']
):
with patch.object(
mod, '_is_docker', MagicMock(return_value=True)
):
ret4 = fun()
with patch('__builtin__.open', wopen):
reta = fun()
with patch('__builtin__.open', gopen):
retb = fun()
with patch.object(
mod, '_is_docker', MagicMock(return_value=False)
):
with patch('__builtin__.open', wopen):
ret5 = fun()
with patch('__builtin__.open', noopen):
ret6 = fun()
with patch('__builtin__.open', g1open):
ret7 = fun()
with patch('__builtin__.open', gopen):
ret8 = fun()
with patch('__builtin__.open', ag1open):
ret11 = fun()
with patch('__builtin__.open', agopen):
ret12 = fun()
with self.patch(
grains={'makina.lxc': True},
filtered=['mc.*'],
kinds=['grains', 'modules']
):
ret1 = copy.deepcopy(self.grains)
with self.patch(
grains={'makina.lxc': True},
filtered=['mc.*'],
kinds=['grains', 'modules']
):
with patch.object(
mod, '_is_docker', MagicMock(return_value=False)
):
ret14 = fun()
with self.patch(
grains={'makina.lxc': False},
filtered=['mc.*'],
kinds=['grains', 'modules']
):
with patch.object(
mod, '_is_docker', MagicMock(return_value=False)
):
ret15 = fun()
self.assertFalse(ret4)
self.assertFalse(ret5)
self.assertFalse(ret6)
self.assertFalse(ret11)
self.assertFalse(ret12)
self.assertFalse(ret15)
self.assertTrue(ret1)
self.assertTrue(ret7)
self.assertTrue(ret8)
self.assertFalse(reta)
self.assertFalse(retb)
self.assertTrue(ret14)
if __name__ == '__main__':
unittest.main()
# vim:set et sts=4 ts=4 tw=80:
| en | 0.241575 | #!/usr/bin/env python # -*- coding: utf-8 -*- Kernel IP routing table Destination Gateway Genmask Flags MSS Window irtt Iface 0.0.0.0 172.16.58.3 0.0.0.0 UG 0 0 0 br0 10.0.3.0 0.0.0.0 255.255.255.0 U 0 0 0 lxcbr0 10.5.0.0 0.0.0.0 255.255.0.0 U 0 0 0 lxcbr1 10.90.0.0 10.90.48.65 255.254.0.0 UG 0 0 0 eth1 10.90.48.64 0.0.0.0 255.255.255.192 U 0 0 0 eth1 192.168.122.0 0.0.0.0 255.255.255.0 U 0 0 0 virbr0 172.16.17.32 0.0.0.0 255.255.255.0 U 0 0 0 br0 # vim:set et sts=4 ts=4 tw=80: | 1.950768 | 2 |
finddupimg.py | lancelotj/finddupimg | 0 | 6618481 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os
import json
import imagehash
import argparse
import collections
from operator import itemgetter
from collections import defaultdict
from PIL import Image, ExifTags
def print_err(content):
print(content, file=sys.stderr)
def get_file_size(file_name):
return os.path.getsize(file_name)
def get_image_size(img):
return "%s x %s" % img.size
def is_image(thefile):
fname, ext = os.path.splitext(thefile)
return not fname.startswith('.') and ext.lower() in set(['.jpg', '.jpeg', '.gif', '.png', '.tiff'])
def walk_images(path):
for root, dirs, files in os.walk(path):
dirs[:] = filter(lambda d: not d.startswith('.'), dirs)
for fname in files:
if is_image(fname):
path = os.path.join(root, fname)
with Image.open(path) as img:
dup_info = {
'hash': str(imagehash.phash(img)),
'path': path,
'size': get_file_size(path),
'image_size': get_image_size(img),
}
yield dup_info
def main(args):
dup_count = 0
total = 0
if args.existing:
src_dict = defaultdict(list, json.load(args.existing))
else:
src_dict = defaultdict(list)
try:
for path in args.dirs:
for dup_info in walk_images(path):
total += 1
print_err('Processing %s.' % dup_info['path'])
src_dict[dup_info['hash']].append({
'path': dup_info['path'],
'size': dup_info['size'],
'image_size': dup_info['image_size'],
})
except KeyboardInterrupt:
pass
print_err('\n%d files processed.' % total)
output = collections.OrderedDict((
info[0], sorted(info[1], key=itemgetter('size'), reverse=True)
) for info in sorted(
src_dict.items(),
key=lambda d: len(d[1])))
print(json.dumps(output, indent=2), file=args.output)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=(
'Looking up a directory to see if there are duplicated file.'))
parser.add_argument('dirs', nargs='+', help='Target directories')
parser.add_argument(
'-v', '--verbose', action='store_false', help='More information')
parser.add_argument(
'-e', '--existing', type=argparse.FileType('r'),
default=None, help='Use this as existing hash table.')
parser.add_argument('-o', '--output', type=argparse.FileType('w'), default=sys.stdout, help='output')
main(parser.parse_args())
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os
import json
import imagehash
import argparse
import collections
from operator import itemgetter
from collections import defaultdict
from PIL import Image, ExifTags
def print_err(content):
print(content, file=sys.stderr)
def get_file_size(file_name):
return os.path.getsize(file_name)
def get_image_size(img):
return "%s x %s" % img.size
def is_image(thefile):
fname, ext = os.path.splitext(thefile)
return not fname.startswith('.') and ext.lower() in set(['.jpg', '.jpeg', '.gif', '.png', '.tiff'])
def walk_images(path):
for root, dirs, files in os.walk(path):
dirs[:] = filter(lambda d: not d.startswith('.'), dirs)
for fname in files:
if is_image(fname):
path = os.path.join(root, fname)
with Image.open(path) as img:
dup_info = {
'hash': str(imagehash.phash(img)),
'path': path,
'size': get_file_size(path),
'image_size': get_image_size(img),
}
yield dup_info
def main(args):
dup_count = 0
total = 0
if args.existing:
src_dict = defaultdict(list, json.load(args.existing))
else:
src_dict = defaultdict(list)
try:
for path in args.dirs:
for dup_info in walk_images(path):
total += 1
print_err('Processing %s.' % dup_info['path'])
src_dict[dup_info['hash']].append({
'path': dup_info['path'],
'size': dup_info['size'],
'image_size': dup_info['image_size'],
})
except KeyboardInterrupt:
pass
print_err('\n%d files processed.' % total)
output = collections.OrderedDict((
info[0], sorted(info[1], key=itemgetter('size'), reverse=True)
) for info in sorted(
src_dict.items(),
key=lambda d: len(d[1])))
print(json.dumps(output, indent=2), file=args.output)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=(
'Looking up a directory to see if there are duplicated file.'))
parser.add_argument('dirs', nargs='+', help='Target directories')
parser.add_argument(
'-v', '--verbose', action='store_false', help='More information')
parser.add_argument(
'-e', '--existing', type=argparse.FileType('r'),
default=None, help='Use this as existing hash table.')
parser.add_argument('-o', '--output', type=argparse.FileType('w'), default=sys.stdout, help='output')
main(parser.parse_args())
| en | 0.352855 | #!/usr/bin/env python # -*- coding: utf-8 -*- | 2.731603 | 3 |
pytest/test_errors.py | ashtul/RedisGears | 0 | 6618482 | from RLTest import Env
import time
def getConnectionByEnv(env):
conn = None
if env.env == 'oss-cluster':
env.broadcast('rg.refreshcluster')
conn = env.envRunner.getClusterConnection()
else:
conn = env.getConnection()
return conn
class testGenericErrors:
def __init__(self):
self.env = Env()
def testInvalidSyntax(self):
self.env.expect('rg.pyexecute', '1defs + GearsBuilder().notexists()').error().contains("invalid syntax")
def testScriptError(self):
self.env.expect('rg.pyexecute', 'GearsBuilder().notexists()').error().equal("'GearsBuilder' object has no attribute 'notexists'")
def testBuilderCreationWithUnexistingReader(self):
self.env.expect('rg.pyexecute', 'GB("unexists").accumulate(lambda a, x: 1 + (a if a else 0)).run()').error().contains('reader are not exists')
class testStepsErrors:
def __init__(self):
self.env = Env()
conn = getConnectionByEnv(self.env)
conn.execute_command('set', 'x', '1')
conn.execute_command('set', 'y', '1')
def testForEachError(self):
res = self.env.cmd('rg.pyexecute', 'GearsBuilder().foreach(lambda x: notexists(x)).collect().run()')
self.env.assertLessEqual(1, res[1])
def testGroupByError(self):
res = self.env.cmd('rg.pyexecute', 'GearsBuilder().groupby(lambda x: "str", lambda a, x, k: notexists(x)).collect().run()')
self.env.assertLessEqual(1, res[1])
def testBatchGroupByError(self):
res = self.env.cmd('rg.pyexecute', 'GearsBuilder().batchgroupby(lambda x: "str", lambda x, k: notexists(x)).collect().run()')
self.env.assertLessEqual(1, res[1])
def testExtractorError(self):
res = self.env.cmd('rg.pyexecute', 'GearsBuilder().groupby(lambda x: notexists(x), lambda a, x, k: 1).collect().run()')
self.env.assertLessEqual(1, res[1])
def testAccumulateError(self):
res = self.env.cmd('rg.pyexecute', 'GearsBuilder().accumulate(lambda a, x: notexists(a, x)).collect().run()')
self.env.assertLessEqual(1, res[1])
def testMapError(self):
res = self.env.cmd('rg.pyexecute', 'GearsBuilder().map(lambda x: notexists(x)).collect().run()')
self.env.assertLessEqual(1, res[1])
def testFlatMapError(self):
res = self.env.cmd('rg.pyexecute', 'GearsBuilder().flatmap(lambda x: notexists(x)).collect().run()')
self.env.assertLessEqual(1, res[1])
def testFilterError(self):
res = self.env.cmd('rg.pyexecute', 'GearsBuilder().filter(lambda x: notexists(x)).collect().run()')
self.env.assertLessEqual(1, res[1])
def testRepartitionError(self):
res = self.env.cmd('rg.pyexecute', 'GearsBuilder().repartition(lambda x: notexists(x)).repartition(lambda x: notexists(x)).collect().run()')
self.env.assertLessEqual(1, res[1])
class testStepsWrongArgs:
def __init__(self):
self.env = Env()
def testRegisterWithWrongRegexType(self):
self.env.expect('rg.pyexecute', 'GB().register(1)').error().contains('regex argument must be a string')
def testRegisterWithWrongEventKeysTypesList(self):
self.env.expect('rg.pyexecute', 'GB().register(regex="*", eventTypes=1)').error().contains('object is not iterable')
self.env.expect('rg.pyexecute', 'GB().register(regex="*", keyTypes=1)').error().contains('object is not iterable')
self.env.expect('rg.pyexecute', 'GB().register(regex="*", eventTypes=[1, 2, 3])').error().contains('type is not string')
self.env.expect('rg.pyexecute', 'GB().register(regex="*", keyTypes=[1, 2, 3])').error().contains('type is not string')
def testGearsBuilderWithWrongBuilderArgType(self):
self.env.expect('rg.pyexecute', 'GB(1).run()').error().contains('reader argument must be a string')
def testExecuteWithWrongCommandArgType(self):
self.env.expect('rg.pyexecute', 'execute(1)').error().contains('the given command must be a string')
def testTimeEventWithWrongCallbackArg(self):
self.env.expect('rg.pyexecute', 'registerTE(2, 2)').error().contains('callback must be a function')
def testTimeEventWithWrongTimeArg(self):
self.env.expect('rg.pyexecute', 'registerTE("2", lambda x: str(x))').error().contains('time argument must be a long')
def testMapWrongArgs(self):
self.env.expect('rg.pyexecute', 'GB().map(1, 2).run()').error().contains('wrong number of args')
self.env.expect('rg.pyexecute', 'GB().map(1).run()').error().contains('argument must be a function')
def testFilterWrongArgs(self):
self.env.expect('rg.pyexecute', 'GB().filter(1, 2).run()').error().contains('wrong number of args')
self.env.expect('rg.pyexecute', 'GB().filter(1).run()').error().contains('argument must be a function')
def testGroupByWrongArgs(self):
self.env.expect('rg.pyexecute', 'GB().groupby(1, 2, 3).run()').error().contains('wrong number of args')
self.env.expect('rg.pyexecute', 'GB().groupby(1, 2).run()').error().contains('argument must be a function')
def testBatchGroupByWrongArgs(self):
self.env.expect('rg.pyexecute', 'GB().batchgroupby(1, 2, 3).run()').error().contains('wrong number of args')
self.env.expect('rg.pyexecute', 'GB().batchgroupby(1, 2).run()').error().contains('argument must be a function')
def testCollectWrongArgs(self):
self.env.expect('rg.pyexecute', 'GB().collect(1, 2, 3).run()').error().contains('wrong number of args')
def testForEachWrongArgs(self):
self.env.expect('rg.pyexecute', 'GB().foreach(1, 2).run()').error().contains('wrong number of args')
self.env.expect('rg.pyexecute', 'GB().foreach(1).run()').error().contains('argument must be a function')
def testRepartitionWrongArgs(self):
self.env.expect('rg.pyexecute', 'GB().repartition(1, 2).run()').error().contains('wrong number of args')
self.env.expect('rg.pyexecute', 'GB().repartition(1).run()').error().contains('argument must be a function')
def testLimitWrongArgs(self):
self.env.expect('rg.pyexecute', 'GB().limit().run()').error().contains('wrong number of args')
self.env.expect('rg.pyexecute', 'GB().limit(1, 2, 3).run()').error().contains('wrong number of args')
self.env.expect('rg.pyexecute', 'GB().limit("awdwada").run()').error().contains('argument must be a number')
self.env.expect('rg.pyexecute', 'GB().limit(1, "kakaka").run()').error().contains('argument must be a number')
def testAccumulateWrongArgs(self):
self.env.expect('rg.pyexecute', 'GB().accumulate(1, 2).run()').error().contains('wrong number of args')
self.env.expect('rg.pyexecute', 'GB().accumulate(1).run()').error().contains('argument must be a function')
def testAvgWrongArgs(self):
self.env.expect('rg.pyexecute', 'GB().avg(1).run()').error().contains('argument must be a function')
def testPyReaderWithWrongArgument(self):
self.env.expect('rg.pyexecute', 'GB("PythonReader").run("*")').error().contains('pyreader argument must be a functio')
self.env.expect('rg.pyexecute', 'GB("PythonReader").run()').error().contains('pyreader argument must be a functio')
self.env.expect('rg.pyexecute', 'GB("PythonReader", "*").run()').error().contains('pyreader argument must be a functio')
self.env.expect('rg.pyexecute', 'GB("PythonReader", ShardReaderCallback).run("*")').error().contains('pyreader argument must be a functio')
class testGetExecutionErrorReporting:
def __init__(self):
self.env = Env()
conn = getConnectionByEnv(self.env)
conn.execute_command('set', '0', 'falsE')
conn.execute_command('set', '1', 'truE')
conn.execute_command('set', '', 'mebbE')
def testErrorShouldBeReportedWithTracebackAttempted(self):
self.env.cmd('RG.CONFIGSET', 'PythonAttemptTraceback', 1)
id = self.env.cmd('RG.PYEXECUTE', 'GearsBuilder().repartition(lambda x: notexists(x)).repartition(lambda x: notexists(x)).collect().run()', 'UNBLOCKING')
time.sleep(1)
res = self.env.cmd('RG.GETEXECUTION', id)
errors = res[0][3][9]
for error in errors:
self.env.assertContains("name \'notexists\' is not defined", error)
self.env.cmd('RG.DROPEXECUTION', id)
def testErrorShouldBeReportedWithTracebackNotAttempted(self):
self.env.cmd('RG.CONFIGSET', 'PythonAttemptTraceback', 0)
id = self.env.cmd('RG.PYEXECUTE', 'GearsBuilder().repartition(lambda x: notexists(x)).repartition(lambda x: notexists(x)).collect().run()', 'UNBLOCKING')
time.sleep(1)
res = self.env.cmd('RG.GETEXECUTION', id)
errors = res[0][3][9]
for error in errors:
self.env.assertContains("name 'notexists' is not defined", error)
self.env.cmd('RG.DROPEXECUTION', id)
self.env.cmd('RG.CONFIGSET', 'PythonAttemptTraceback', 1)
| from RLTest import Env
import time
def getConnectionByEnv(env):
conn = None
if env.env == 'oss-cluster':
env.broadcast('rg.refreshcluster')
conn = env.envRunner.getClusterConnection()
else:
conn = env.getConnection()
return conn
class testGenericErrors:
def __init__(self):
self.env = Env()
def testInvalidSyntax(self):
self.env.expect('rg.pyexecute', '1defs + GearsBuilder().notexists()').error().contains("invalid syntax")
def testScriptError(self):
self.env.expect('rg.pyexecute', 'GearsBuilder().notexists()').error().equal("'GearsBuilder' object has no attribute 'notexists'")
def testBuilderCreationWithUnexistingReader(self):
self.env.expect('rg.pyexecute', 'GB("unexists").accumulate(lambda a, x: 1 + (a if a else 0)).run()').error().contains('reader are not exists')
class testStepsErrors:
def __init__(self):
self.env = Env()
conn = getConnectionByEnv(self.env)
conn.execute_command('set', 'x', '1')
conn.execute_command('set', 'y', '1')
def testForEachError(self):
res = self.env.cmd('rg.pyexecute', 'GearsBuilder().foreach(lambda x: notexists(x)).collect().run()')
self.env.assertLessEqual(1, res[1])
def testGroupByError(self):
res = self.env.cmd('rg.pyexecute', 'GearsBuilder().groupby(lambda x: "str", lambda a, x, k: notexists(x)).collect().run()')
self.env.assertLessEqual(1, res[1])
def testBatchGroupByError(self):
res = self.env.cmd('rg.pyexecute', 'GearsBuilder().batchgroupby(lambda x: "str", lambda x, k: notexists(x)).collect().run()')
self.env.assertLessEqual(1, res[1])
def testExtractorError(self):
res = self.env.cmd('rg.pyexecute', 'GearsBuilder().groupby(lambda x: notexists(x), lambda a, x, k: 1).collect().run()')
self.env.assertLessEqual(1, res[1])
def testAccumulateError(self):
res = self.env.cmd('rg.pyexecute', 'GearsBuilder().accumulate(lambda a, x: notexists(a, x)).collect().run()')
self.env.assertLessEqual(1, res[1])
def testMapError(self):
res = self.env.cmd('rg.pyexecute', 'GearsBuilder().map(lambda x: notexists(x)).collect().run()')
self.env.assertLessEqual(1, res[1])
def testFlatMapError(self):
res = self.env.cmd('rg.pyexecute', 'GearsBuilder().flatmap(lambda x: notexists(x)).collect().run()')
self.env.assertLessEqual(1, res[1])
def testFilterError(self):
res = self.env.cmd('rg.pyexecute', 'GearsBuilder().filter(lambda x: notexists(x)).collect().run()')
self.env.assertLessEqual(1, res[1])
def testRepartitionError(self):
res = self.env.cmd('rg.pyexecute', 'GearsBuilder().repartition(lambda x: notexists(x)).repartition(lambda x: notexists(x)).collect().run()')
self.env.assertLessEqual(1, res[1])
class testStepsWrongArgs:
def __init__(self):
self.env = Env()
def testRegisterWithWrongRegexType(self):
self.env.expect('rg.pyexecute', 'GB().register(1)').error().contains('regex argument must be a string')
def testRegisterWithWrongEventKeysTypesList(self):
self.env.expect('rg.pyexecute', 'GB().register(regex="*", eventTypes=1)').error().contains('object is not iterable')
self.env.expect('rg.pyexecute', 'GB().register(regex="*", keyTypes=1)').error().contains('object is not iterable')
self.env.expect('rg.pyexecute', 'GB().register(regex="*", eventTypes=[1, 2, 3])').error().contains('type is not string')
self.env.expect('rg.pyexecute', 'GB().register(regex="*", keyTypes=[1, 2, 3])').error().contains('type is not string')
def testGearsBuilderWithWrongBuilderArgType(self):
self.env.expect('rg.pyexecute', 'GB(1).run()').error().contains('reader argument must be a string')
def testExecuteWithWrongCommandArgType(self):
self.env.expect('rg.pyexecute', 'execute(1)').error().contains('the given command must be a string')
def testTimeEventWithWrongCallbackArg(self):
self.env.expect('rg.pyexecute', 'registerTE(2, 2)').error().contains('callback must be a function')
def testTimeEventWithWrongTimeArg(self):
self.env.expect('rg.pyexecute', 'registerTE("2", lambda x: str(x))').error().contains('time argument must be a long')
def testMapWrongArgs(self):
self.env.expect('rg.pyexecute', 'GB().map(1, 2).run()').error().contains('wrong number of args')
self.env.expect('rg.pyexecute', 'GB().map(1).run()').error().contains('argument must be a function')
def testFilterWrongArgs(self):
self.env.expect('rg.pyexecute', 'GB().filter(1, 2).run()').error().contains('wrong number of args')
self.env.expect('rg.pyexecute', 'GB().filter(1).run()').error().contains('argument must be a function')
def testGroupByWrongArgs(self):
self.env.expect('rg.pyexecute', 'GB().groupby(1, 2, 3).run()').error().contains('wrong number of args')
self.env.expect('rg.pyexecute', 'GB().groupby(1, 2).run()').error().contains('argument must be a function')
def testBatchGroupByWrongArgs(self):
self.env.expect('rg.pyexecute', 'GB().batchgroupby(1, 2, 3).run()').error().contains('wrong number of args')
self.env.expect('rg.pyexecute', 'GB().batchgroupby(1, 2).run()').error().contains('argument must be a function')
def testCollectWrongArgs(self):
self.env.expect('rg.pyexecute', 'GB().collect(1, 2, 3).run()').error().contains('wrong number of args')
def testForEachWrongArgs(self):
self.env.expect('rg.pyexecute', 'GB().foreach(1, 2).run()').error().contains('wrong number of args')
self.env.expect('rg.pyexecute', 'GB().foreach(1).run()').error().contains('argument must be a function')
def testRepartitionWrongArgs(self):
self.env.expect('rg.pyexecute', 'GB().repartition(1, 2).run()').error().contains('wrong number of args')
self.env.expect('rg.pyexecute', 'GB().repartition(1).run()').error().contains('argument must be a function')
def testLimitWrongArgs(self):
self.env.expect('rg.pyexecute', 'GB().limit().run()').error().contains('wrong number of args')
self.env.expect('rg.pyexecute', 'GB().limit(1, 2, 3).run()').error().contains('wrong number of args')
self.env.expect('rg.pyexecute', 'GB().limit("awdwada").run()').error().contains('argument must be a number')
self.env.expect('rg.pyexecute', 'GB().limit(1, "kakaka").run()').error().contains('argument must be a number')
def testAccumulateWrongArgs(self):
self.env.expect('rg.pyexecute', 'GB().accumulate(1, 2).run()').error().contains('wrong number of args')
self.env.expect('rg.pyexecute', 'GB().accumulate(1).run()').error().contains('argument must be a function')
def testAvgWrongArgs(self):
self.env.expect('rg.pyexecute', 'GB().avg(1).run()').error().contains('argument must be a function')
def testPyReaderWithWrongArgument(self):
self.env.expect('rg.pyexecute', 'GB("PythonReader").run("*")').error().contains('pyreader argument must be a functio')
self.env.expect('rg.pyexecute', 'GB("PythonReader").run()').error().contains('pyreader argument must be a functio')
self.env.expect('rg.pyexecute', 'GB("PythonReader", "*").run()').error().contains('pyreader argument must be a functio')
self.env.expect('rg.pyexecute', 'GB("PythonReader", ShardReaderCallback).run("*")').error().contains('pyreader argument must be a functio')
class testGetExecutionErrorReporting:
def __init__(self):
self.env = Env()
conn = getConnectionByEnv(self.env)
conn.execute_command('set', '0', 'falsE')
conn.execute_command('set', '1', 'truE')
conn.execute_command('set', '', 'mebbE')
def testErrorShouldBeReportedWithTracebackAttempted(self):
self.env.cmd('RG.CONFIGSET', 'PythonAttemptTraceback', 1)
id = self.env.cmd('RG.PYEXECUTE', 'GearsBuilder().repartition(lambda x: notexists(x)).repartition(lambda x: notexists(x)).collect().run()', 'UNBLOCKING')
time.sleep(1)
res = self.env.cmd('RG.GETEXECUTION', id)
errors = res[0][3][9]
for error in errors:
self.env.assertContains("name \'notexists\' is not defined", error)
self.env.cmd('RG.DROPEXECUTION', id)
def testErrorShouldBeReportedWithTracebackNotAttempted(self):
self.env.cmd('RG.CONFIGSET', 'PythonAttemptTraceback', 0)
id = self.env.cmd('RG.PYEXECUTE', 'GearsBuilder().repartition(lambda x: notexists(x)).repartition(lambda x: notexists(x)).collect().run()', 'UNBLOCKING')
time.sleep(1)
res = self.env.cmd('RG.GETEXECUTION', id)
errors = res[0][3][9]
for error in errors:
self.env.assertContains("name 'notexists' is not defined", error)
self.env.cmd('RG.DROPEXECUTION', id)
self.env.cmd('RG.CONFIGSET', 'PythonAttemptTraceback', 1)
| none | 1 | 2.220273 | 2 | |
data.py | wcode-wzx/chinese_ocr | 0 | 6618483 | <filename>data.py
import os
file_dir = r"C:/Users\\thyme\\Desktop\\加密图片分类备份\\test\\一"
i = 1
a = os.walk(file_dir)
b = None
for root, dirs, files in os.walk(file_dir):
print(i)
i += 1
print(root) #当前目录路径
#print(dirs) #当前路径下所有子目录
#print(files) #当前路径下所有非目录子文件
print(b)
# name = ['一', '七', '三', '上', '下', '不', '中', '九', '了', '二', '五', '低', '保', '光', '八', '公', '六', '养', '内', '冷', '副', '加', '动', '十', '只', '右', '启', '呢', '味', '和', '响', '四', '地', '坏', '坐', '外', '多', '大', '好', '孩', '实', '小', '少', '左', '开', '当', '很', '得', '性', '手', '排', '控', '无', '是', '更', '有', '机', '来', '档', '比', '油', '泥', '灯', '电', '的', '皮', '盘', '真', '着', '短', '矮', '硬', '空', '级', '耗', '自', '路', '身', '软', '过', '近', '远', '里', '量', '长', '门', '问', '雨', '音', '高']
# for i in range(0,len(name)):
# print(i)
# oldname = "E:\\vsProject\\YOLOv5\\chinese_ocr\\test\\"+str(name[i])
# newname = "E:\\vsProject\\YOLOv5\\chinese_ocr\\test\\"+str(i)
# print(oldname,newname)
# os.rename(oldname,newname)
| <filename>data.py
import os
file_dir = r"C:/Users\\thyme\\Desktop\\加密图片分类备份\\test\\一"
i = 1
a = os.walk(file_dir)
b = None
for root, dirs, files in os.walk(file_dir):
print(i)
i += 1
print(root) #当前目录路径
#print(dirs) #当前路径下所有子目录
#print(files) #当前路径下所有非目录子文件
print(b)
# name = ['一', '七', '三', '上', '下', '不', '中', '九', '了', '二', '五', '低', '保', '光', '八', '公', '六', '养', '内', '冷', '副', '加', '动', '十', '只', '右', '启', '呢', '味', '和', '响', '四', '地', '坏', '坐', '外', '多', '大', '好', '孩', '实', '小', '少', '左', '开', '当', '很', '得', '性', '手', '排', '控', '无', '是', '更', '有', '机', '来', '档', '比', '油', '泥', '灯', '电', '的', '皮', '盘', '真', '着', '短', '矮', '硬', '空', '级', '耗', '自', '路', '身', '软', '过', '近', '远', '里', '量', '长', '门', '问', '雨', '音', '高']
# for i in range(0,len(name)):
# print(i)
# oldname = "E:\\vsProject\\YOLOv5\\chinese_ocr\\test\\"+str(name[i])
# newname = "E:\\vsProject\\YOLOv5\\chinese_ocr\\test\\"+str(i)
# print(oldname,newname)
# os.rename(oldname,newname)
| zh | 0.18645 | #当前目录路径 #print(dirs) #当前路径下所有子目录 #print(files) #当前路径下所有非目录子文件 # name = ['一', '七', '三', '上', '下', '不', '中', '九', '了', '二', '五', '低', '保', '光', '八', '公', '六', '养', '内', '冷', '副', '加', '动', '十', '只', '右', '启', '呢', '味', '和', '响', '四', '地', '坏', '坐', '外', '多', '大', '好', '孩', '实', '小', '少', '左', '开', '当', '很', '得', '性', '手', '排', '控', '无', '是', '更', '有', '机', '来', '档', '比', '油', '泥', '灯', '电', '的', '皮', '盘', '真', '着', '短', '矮', '硬', '空', '级', '耗', '自', '路', '身', '软', '过', '近', '远', '里', '量', '长', '门', '问', '雨', '音', '高'] # for i in range(0,len(name)): # print(i) # oldname = "E:\\vsProject\\YOLOv5\\chinese_ocr\\test\\"+str(name[i]) # newname = "E:\\vsProject\\YOLOv5\\chinese_ocr\\test\\"+str(i) # print(oldname,newname) # os.rename(oldname,newname) | 2.794455 | 3 |
pybayes/wrappers/__init__.py | strohel/PyBayes | 66 | 6618484 | """Wrappers to ease dual (interpreted & compiled) mode development"""
| """Wrappers to ease dual (interpreted & compiled) mode development"""
| en | 0.858408 | Wrappers to ease dual (interpreted & compiled) mode development | 0.85065 | 1 |
kelte/ui/modifier.py | brianbruggeman/rl | 0 | 6618485 | <filename>kelte/ui/modifier.py
from dataclasses import dataclass
import tcod as tdl
@dataclass()
class KeyboardModifiers:
# ------------------------------------------------------------------
# Control
# ------------------------------------------------------------------
left_control: bool = False
right_control: bool = False
@property
def control(self):
return self.left_control or self.right_control
@control.setter
def control(self, value):
self.left_control = self.right_control = value
# ------------------------------------------------------------------
# Shift
# ------------------------------------------------------------------
left_shift: bool = False
right_shift: bool = False
@property
def shift(self):
return self.left_shift or self.right_shift
@shift.setter
def shift(self, value):
self.left_shift = self.right_shift = value
# ------------------------------------------------------------------
# Alt
# ------------------------------------------------------------------
right_alt: bool = False
left_alt: bool = False
@property
def alt(self):
return self.left_alt or self.right_alt
@alt.setter
def alt(self, value):
self.left_alt = self.right_alt = value
# ------------------------------------------------------------------
# Meta
# ------------------------------------------------------------------
left_meta: bool = False
right_meta: bool = False
@property
def meta(self):
return self.left_meta or self.right_meta
@meta.setter
def meta(self, value):
self.left_meta = self.right_meta = value
# ------------------------------------------------------------------
# Extras
# ------------------------------------------------------------------
num_key: bool = False
caps_key: bool = False
mode_key: bool = False
@property
def sdl_mod(self) -> int:
# See: https://wiki.libsdl.org/SDL_Keymod
mod = (
tdl.lib.KMOD_LSHIFT & self.left_shift
| tdl.lib.KMOD_RSHIFT & self.right_shift
| tdl.lib.KMOD_LCTRL & self.left_control
| tdl.lib.KMOD_RCTL & self.right_control
| tdl.lib.KMOD_LALT & self.left_alt
| tdl.lib.KMOD_RALT & self.right_alt
| tdl.lib.KMOD_LGUI & self.left_meta
| tdl.lib.KMOD_RGUI & self.right_meta
| tdl.lib.KMOD_NUM & self.num_key
| tdl.lib.KMOD_CAPS & self.caps_key
| tdl.lib.KMOD_MODE & self.mode_key
)
return mod
@sdl_mod.setter
def sdl_mod(self, value):
self.left_shift = bool(tdl.lib.KMOD_LSHIFT & value)
self.right_shift = bool(tdl.lib.KMOD_RSHIFT & value)
self.left_control = bool(tdl.lib.KMOD_LCTRL & value)
self.right_control = bool(tdl.lib.KMOD_RCTRL & value)
self.left_alt = bool(tdl.lib.KMOD_LALT & value)
self.right_alt = bool(tdl.lib.KMOD_RALT & value)
self.left_meta = bool(tdl.lib.KMOD_LGUI & value)
self.right_meta = bool(tdl.lib.KMOD_RGUI & value)
self.num_key = bool(tdl.lib.KMOD_NUM & value)
self.caps_key = bool(tdl.lib.KMOD_CAPS & value)
self.mode_key = bool(tdl.lib.KMOD_MODE & value)
def __bool__(self):
if (
self.shift
or self.alt
or self.control
or self.meta
or self.caps_key
or self.num_key
or self.mode_key
):
return True
return False
def __eq__(self, other):
if (self.shift == other.shift
and self.control == other.control
and self.alt == other.alt
and self.meta == other.meta
and self.num_key == other.num_key
and self.caps_key == other.caps_key
and self.mode_key == other.mode_key):
return True
return False
def __hash__(self):
number = (
1 << 0
if self.shift
else 0 + 1 << 1
if self.alt
else 0 + 1 << 2
if self.control
else 0 + 1 << 3
if self.meta
else 0 + 1 << 4
if self.num_key
else 0 + 1 << 5
if self.caps_key
else 0 + 1 << 6
if self.mode_key
else 0
)
return number
def __str__(self):
string = []
if self.shift:
string.append("SHIFT")
if self.control:
string.append("CONTROL")
if self.alt:
string.append("ALT")
if self.meta:
string.append("META")
if self.num_key:
string.append("NUM")
if self.caps_key:
string.append("CAPS")
if self.mode_key:
string.append("MODE")
return "+".join(string)
@dataclass()
class MouseModifier:
# ------------------------------------------------------------------
# Meta
# ------------------------------------------------------------------
left_button_held: bool = False
right_button_held: bool = False
middle_button_held: bool = False
left_button_released: bool = False
right_button_released: bool = False
middle_button_released: bool = False
| <filename>kelte/ui/modifier.py
from dataclasses import dataclass
import tcod as tdl
@dataclass()
class KeyboardModifiers:
# ------------------------------------------------------------------
# Control
# ------------------------------------------------------------------
left_control: bool = False
right_control: bool = False
@property
def control(self):
return self.left_control or self.right_control
@control.setter
def control(self, value):
self.left_control = self.right_control = value
# ------------------------------------------------------------------
# Shift
# ------------------------------------------------------------------
left_shift: bool = False
right_shift: bool = False
@property
def shift(self):
return self.left_shift or self.right_shift
@shift.setter
def shift(self, value):
self.left_shift = self.right_shift = value
# ------------------------------------------------------------------
# Alt
# ------------------------------------------------------------------
right_alt: bool = False
left_alt: bool = False
@property
def alt(self):
return self.left_alt or self.right_alt
@alt.setter
def alt(self, value):
self.left_alt = self.right_alt = value
# ------------------------------------------------------------------
# Meta
# ------------------------------------------------------------------
left_meta: bool = False
right_meta: bool = False
@property
def meta(self):
return self.left_meta or self.right_meta
@meta.setter
def meta(self, value):
self.left_meta = self.right_meta = value
# ------------------------------------------------------------------
# Extras
# ------------------------------------------------------------------
num_key: bool = False
caps_key: bool = False
mode_key: bool = False
@property
def sdl_mod(self) -> int:
# See: https://wiki.libsdl.org/SDL_Keymod
mod = (
tdl.lib.KMOD_LSHIFT & self.left_shift
| tdl.lib.KMOD_RSHIFT & self.right_shift
| tdl.lib.KMOD_LCTRL & self.left_control
| tdl.lib.KMOD_RCTL & self.right_control
| tdl.lib.KMOD_LALT & self.left_alt
| tdl.lib.KMOD_RALT & self.right_alt
| tdl.lib.KMOD_LGUI & self.left_meta
| tdl.lib.KMOD_RGUI & self.right_meta
| tdl.lib.KMOD_NUM & self.num_key
| tdl.lib.KMOD_CAPS & self.caps_key
| tdl.lib.KMOD_MODE & self.mode_key
)
return mod
@sdl_mod.setter
def sdl_mod(self, value):
self.left_shift = bool(tdl.lib.KMOD_LSHIFT & value)
self.right_shift = bool(tdl.lib.KMOD_RSHIFT & value)
self.left_control = bool(tdl.lib.KMOD_LCTRL & value)
self.right_control = bool(tdl.lib.KMOD_RCTRL & value)
self.left_alt = bool(tdl.lib.KMOD_LALT & value)
self.right_alt = bool(tdl.lib.KMOD_RALT & value)
self.left_meta = bool(tdl.lib.KMOD_LGUI & value)
self.right_meta = bool(tdl.lib.KMOD_RGUI & value)
self.num_key = bool(tdl.lib.KMOD_NUM & value)
self.caps_key = bool(tdl.lib.KMOD_CAPS & value)
self.mode_key = bool(tdl.lib.KMOD_MODE & value)
def __bool__(self):
if (
self.shift
or self.alt
or self.control
or self.meta
or self.caps_key
or self.num_key
or self.mode_key
):
return True
return False
def __eq__(self, other):
if (self.shift == other.shift
and self.control == other.control
and self.alt == other.alt
and self.meta == other.meta
and self.num_key == other.num_key
and self.caps_key == other.caps_key
and self.mode_key == other.mode_key):
return True
return False
def __hash__(self):
number = (
1 << 0
if self.shift
else 0 + 1 << 1
if self.alt
else 0 + 1 << 2
if self.control
else 0 + 1 << 3
if self.meta
else 0 + 1 << 4
if self.num_key
else 0 + 1 << 5
if self.caps_key
else 0 + 1 << 6
if self.mode_key
else 0
)
return number
def __str__(self):
string = []
if self.shift:
string.append("SHIFT")
if self.control:
string.append("CONTROL")
if self.alt:
string.append("ALT")
if self.meta:
string.append("META")
if self.num_key:
string.append("NUM")
if self.caps_key:
string.append("CAPS")
if self.mode_key:
string.append("MODE")
return "+".join(string)
@dataclass()
class MouseModifier:
# ------------------------------------------------------------------
# Meta
# ------------------------------------------------------------------
left_button_held: bool = False
right_button_held: bool = False
middle_button_held: bool = False
left_button_released: bool = False
right_button_released: bool = False
middle_button_released: bool = False
| en | 0.143863 | # ------------------------------------------------------------------ # Control # ------------------------------------------------------------------ # ------------------------------------------------------------------ # Shift # ------------------------------------------------------------------ # ------------------------------------------------------------------ # Alt # ------------------------------------------------------------------ # ------------------------------------------------------------------ # Meta # ------------------------------------------------------------------ # ------------------------------------------------------------------ # Extras # ------------------------------------------------------------------ # See: https://wiki.libsdl.org/SDL_Keymod # ------------------------------------------------------------------ # Meta # ------------------------------------------------------------------ | 2.387546 | 2 |
molo/core/tests/test_media.py | Ishma59/molo | 25 | 6618486 | from django.core.files.base import ContentFile
from django.test import TestCase, Client
from six import b
from molo.core.tests.base import MoloTestCaseMixin
from molo.core.models import MoloMedia, SiteLanguageRelation, Main, Languages
class MultimediaViewTest(TestCase, MoloTestCaseMixin):
def setUp(self):
self.mk_main()
main = Main.objects.all().first()
self.language_setting = Languages.objects.create(
site_id=main.get_site().pk)
self.english = SiteLanguageRelation.objects.create(
language_setting=self.language_setting,
locale='en',
is_active=True)
self.client = Client()
def add_media(self, media_type):
fake_file = ContentFile(b("media"))
fake_file.name = 'media.mp3'
self.media = MoloMedia.objects.create(
title="Test Media", file=fake_file, duration=100,
type=media_type, feature_in_homepage=True)
def test_audio_media(self):
self.add_media('audio')
response = self.client.get('/')
self.assertContains(
response,
'<div><audio controls><source src="{0}"'
'type="audio/mpeg">Click here to download'
'<a href="{0}">{1}</a></audio></div>'
.format(self.media.file.url, self.media.title),
html=True)
def test_video_media(self):
self.add_media('video')
response = self.client.get('/')
self.assertContains(
response,
'<video width="320" height="240" controls>'
'<source src=' + self.media.file.url + ' type="video/mp4">'
'Your browser does not support the video tag.'
'</video>', html=True)
| from django.core.files.base import ContentFile
from django.test import TestCase, Client
from six import b
from molo.core.tests.base import MoloTestCaseMixin
from molo.core.models import MoloMedia, SiteLanguageRelation, Main, Languages
class MultimediaViewTest(TestCase, MoloTestCaseMixin):
def setUp(self):
self.mk_main()
main = Main.objects.all().first()
self.language_setting = Languages.objects.create(
site_id=main.get_site().pk)
self.english = SiteLanguageRelation.objects.create(
language_setting=self.language_setting,
locale='en',
is_active=True)
self.client = Client()
def add_media(self, media_type):
fake_file = ContentFile(b("media"))
fake_file.name = 'media.mp3'
self.media = MoloMedia.objects.create(
title="Test Media", file=fake_file, duration=100,
type=media_type, feature_in_homepage=True)
def test_audio_media(self):
self.add_media('audio')
response = self.client.get('/')
self.assertContains(
response,
'<div><audio controls><source src="{0}"'
'type="audio/mpeg">Click here to download'
'<a href="{0}">{1}</a></audio></div>'
.format(self.media.file.url, self.media.title),
html=True)
def test_video_media(self):
self.add_media('video')
response = self.client.get('/')
self.assertContains(
response,
'<video width="320" height="240" controls>'
'<source src=' + self.media.file.url + ' type="video/mp4">'
'Your browser does not support the video tag.'
'</video>', html=True)
| none | 1 | 2.035325 | 2 | |
zfnweb/info/views.py | jokerwho/newzf | 60 | 6618487 | <filename>zfnweb/info/views.py
import datetime
import os
import time
import traceback
import json
import requests
import openpyxl
from bs4 import BeautifulSoup
from api import GetInfo, Login, PLogin, Personal, Infos, Search
from django.utils.encoding import escape_uri_path
from django.http import HttpResponse, JsonResponse, FileResponse
from info.models import Students, Teachers
from mp.models import Config
from openpyxl.styles import Font, colors, Alignment
with open('config.json', mode='r', encoding='utf-8') as f:
config = json.loads(f.read())
base_url = config["base_url"]
def index(request):
return HttpResponse('info_index here')
def calSex(id):
sexNum = id[16:17]
if int(sexNum)%2==0:
return 2
else:
return 1
def diffList(list1,list2):
return [x for x in list1 if x not in list2]
def mywarn(text,desp,xh,pswd):
ServerChan = config["ServerChan"]
text = text
errData = {'err':text+',请返回重试'} if "错误" in text else {'err':text+',建议访问一下“课程通知”以便刷新cookies'}
if ServerChan == "none":
return HttpResponse(json.dumps(errData, ensure_ascii=False),
content_type="application/json,charset=utf-8")
else:
requests.get(ServerChan + 'text=' + text + '&desp=' + desp + '\n' + str(xh) + '\n' + str(pswd))
return HttpResponse(json.dumps(errData, ensure_ascii=False),
content_type="application/json,charset=utf-8")
def cacheData(xh, filename):
docurl = 'data/' + str(xh)[0:2] + '/' + str(xh) + '/'
fileurl = docurl + str(filename) + '.json'
if not os.path.exists(docurl):
os.makedirs(docurl)
else:
if not os.path.exists(fileurl):
return
else:
with open(fileurl, mode='r', encoding='utf-8') as o:
result = json.loads(o.read())
if result.get("err"):
return
return result
def newData(xh, filename, content):
docurl = 'data/' + str(xh)[0:2] + '/' + str(xh) + '/'
fileurl = docurl + str(filename) + '.json'
if not os.path.exists(docurl):
os.makedirs(docurl)
with open(fileurl, mode='w', encoding='utf-8') as n:
n.write(content)
else:
with open(fileurl, mode='w', encoding='utf-8') as n:
n.write(content)
# if not os.path.exists(fileurl):
# with open(fileurl, mode='w', encoding='utf-8') as n:
# n.write(content)
def writeLog(content):
date = datetime.datetime.now().strftime('%Y-%m-%d')
filename = 'mylogs/' + date + '.log'
if not os.path.exists(filename):
with open(filename, mode='w', encoding='utf-8') as n:
n.write('【%s】的日志记录' % date)
with open(filename, mode='a', encoding='utf-8') as l:
l.write('\n%s' % content)
def login_pages_set(xh):
lgn = Login(base_url=base_url)
storage = lgn.login_page()
filename = ('Storage')
newData(xh, filename, json.dumps(storage, ensure_ascii=False))
def login_pages_get(xh):
filename = ('Storage')
storage = cacheData(xh, filename)
return storage
def get_kaptcha_net(request):
xh = request.GET.get("xh")
login_pages_set(xh)
storage = login_pages_get(xh)
kaptcha = storage["kaptcha"]
return HttpResponse(json.dumps({'kaptcha':kaptcha}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
def get_kaptcha(xh):
myconfig = Config.objects.all().first()
if myconfig.maintenance:
return HttpResponse(json.dumps({'err':'教务系统出错维护中,请静待教务系统恢复正常!'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
login_pages_set(xh)
storage = login_pages_get(xh)
kaptcha = storage["kaptcha"]
return HttpResponse(json.dumps({'kaptcha':kaptcha}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
def update_cookies(request):
myconfig = Config.objects.all().first()
if myconfig.maintenance:
return HttpResponse(json.dumps({'err':'教务系统出错维护中,请静待教务系统恢复正常!'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
try:
xh = request.POST.get("xh")
pswd = request.POST.get("pswd")
kaptcha = request.POST.get("kaptcha")
stu = Students.objects.get(studentId=int(xh))
refreshTimes = int(stu.refreshTimes)
startTime = time.time()
content = ('【%s】[%s]更新cookies' % (datetime.datetime.now().strftime('%H:%M:%S'), stu.name))
writeLog(content)
# print('原cookies:')
# print('{JSESSIONID:%s,route:%s}' % (stu.JSESSIONID,stu.route))
lgn = Login(base_url=base_url)
if myconfig.isKaptcha:
storage = login_pages_get(xh)
if storage is None:
return get_kaptcha(xh)
lgn.login_kaptcha(storage["cookies"],xh, pswd,storage["tokens"],storage["n"],storage["e"],kaptcha)
else:
lgn.login(xh, pswd)
if lgn.runcode == 1:
cookies = lgn.cookies
# person = GetInfo(base_url=base_url, cookies=cookies)
NJSESSIONID = requests.utils.dict_from_cookiejar(cookies)["JSESSIONID"]
if myconfig.isKaptcha:
nroute = storage["cookies"]["route"]
else:
nroute = requests.utils.dict_from_cookiejar(cookies)["route"]
ncookies = requests.utils.cookiejar_from_dict({"JSESSIONID":NJSESSIONID,"route":nroute})
updateTime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
refreshTimes += 1
Students.objects.filter(studentId=int(xh)).update(JSESSIONID=NJSESSIONID, route=nroute,
refreshTimes=refreshTimes, updateTime=updateTime)
endTime = time.time()
spendTime = endTime - startTime
# print('新cookies:')
content = ('【%s】更新cookies成功,耗时%.2fs' % (datetime.datetime.now().strftime('%H:%M:%S'), spendTime))
writeLog(content)
person = GetInfo(base_url=base_url, cookies=ncookies)
pinfo = person.get_pinfo()
if stu.email == "无":
Students.objects.filter(studentId=int(xh)).update(email=pinfo["email"])
# print(pinfo)
filename = ('Pinfo')
newData(xh, filename, json.dumps(pinfo, ensure_ascii=False))
# print(requests.utils.dict_from_cookiejar(cookies))
if myconfig.isKaptcha:
return HttpResponse(json.dumps({'success':'更新cookies成功'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
else:
return cookies
elif lgn.runcode == 4:
return HttpResponse(json.dumps({'err':'验证码错误'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
else:
content = ('【%s】[%s]更新cookies时网络或其他错误!' % (datetime.datetime.now().strftime('%H:%M:%S'), xh))
writeLog(content)
return HttpResponse(json.dumps({'err':'网络或token问题,请返回重试'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
except Exception as e:
if str(e) == "'NoneType' object has no attribute 'get'":
return HttpResponse(json.dumps({'err':'教务系统挂掉了,请等待修复后重试~'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
# if "Connection broken" in str(e) or 'ECONNRESET' in str(e):
# return update_cookies(xh, pswd)
else:
traceback.print_exc()
return mywarn("更新cookies未知错误",str(e),xh,pswd)
def writeToExcel(json,saveUrl):
lastCourses = json["lastCourses"]
res = json["res"]
excel = openpyxl.Workbook()
sheet1 = excel.create_sheet('sheet1', index=0)
sheet1.cell(row=1,column=1,value="学号").alignment = Alignment(horizontal='center', vertical='center')
sheet1.cell(row=1,column=2,value="姓名").alignment = Alignment(horizontal='center', vertical='center')
sheet1.column_dimensions['A'].width = 15
for c in range(0,len(lastCourses)):
sheet1.cell(row=1, column=c + 3, value=lastCourses[c]).alignment = Alignment(horizontal='center', vertical='center')
# sheet1.column_dimensions[chr(67+c)].width = 8
for items in range(0,len(res)):
sheet1.cell(row=items+2,column=1,value=res[items]["xh"]).alignment = Alignment(horizontal='center', vertical='center')
sheet1.cell(row=items+2,column=2,value=res[items]["name"]).alignment = Alignment(horizontal='center', vertical='center')
for n in range(0,len(res[items]["grades"])):
for cs in range(0,len(lastCourses)):
if res[items]["grades"][n]["n"] == lastCourses[cs]:
try:
sheet1.cell(row=items+2,column=cs+3,value=int(res[items]["grades"][n]["g"])).alignment = Alignment(horizontal='center', vertical='center')
except:
sheet1.cell(row=items+2,column=cs+3,value=res[items]["grades"][n]["g"]).alignment = Alignment(horizontal='center', vertical='center')
sheet1.merge_cells(start_row=len(res)+2, start_column=1, end_row=len(res)+5, end_column=6)
sheet1.cell(row=len(res)+2,column=1,value="1.表中数据来源须该班同学使用“西院助手”小程序访问并刷新该学期成绩\n2.留空为该同学还未刷新到最新,未使用小程序不会显示该同学行\n3.该表成绩为教务系统获取成绩,真实有效").alignment = Alignment(horizontal='center', vertical='center')
sheet1.merge_cells(start_row=len(res)+2, start_column=7, end_row=len(res)+5, end_column=10)
sheet1.cell(row=len(res)+2,column=7,value="生成时间:%s" % time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()))).alignment = Alignment(horizontal='center', vertical='center')
excel.save(saveUrl)
def get_pinfo(request):
myconfig = Config.objects.all().first()
if myconfig.apichange:
data = {
'xh':request.POST.get("xh"),
'pswd':request.POST.get("pswd"),
'kaptcha':request.POST.get("kaptcha")
}
res = requests.post(url=myconfig.otherapi+"/info/pinfo",data=data)
return HttpResponse(json.dumps(json.loads(res.text), ensure_ascii=False),
content_type="application/json,charset=utf-8")
if myconfig.maintenance:
return HttpResponse(json.dumps({'err':'教务系统出错维护中,请静待教务系统恢复正常!'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
# if mpconfig["loginbad"]:
# return HttpResponse(json.dumps({'err':'当前教务系统无法请求登录,请待学校修复!'}, ensure_ascii=False),
# content_type="application/json,charset=utf-8")
if request.method == 'POST':
if request.POST:
xh = request.POST.get("xh")
pswd = request.POST.get("pswd")
kaptcha = request.POST.get("kaptcha")
else:
return HttpResponse(json.dumps({'err':'请提交正确的post数据'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
if Students.objects.filter(studentId=int(xh)):
stu = Students.objects.get(studentId=int(xh))
refreshTimes = int(stu.refreshTimes)
try:
startTime = time.time()
lgn = Login(base_url=base_url)
if myconfig.isKaptcha:
storage = login_pages_get(xh)
if storage is None:
return get_kaptcha(xh)
lgn.login_kaptcha(storage["cookies"],xh, pswd,storage["tokens"],storage["n"],storage["e"],kaptcha)
else:
lgn.login(xh, pswd)
if lgn.runcode == 1:
cookies = lgn.cookies
JSESSIONID = requests.utils.dict_from_cookiejar(cookies)["JSESSIONID"]
if myconfig.isKaptcha:
route = storage["cookies"]["route"]
else:
route = requests.utils.dict_from_cookiejar(cookies)["route"]
ncookies = requests.utils.cookiejar_from_dict({"JSESSIONID":JSESSIONID,"route":route})
person = GetInfo(base_url=base_url, cookies=ncookies)
pinfo = person.get_pinfo()
if pinfo.get("idNumber")[-6:] == pswd:
return HttpResponse(json.dumps({'err':"新生或专升本同学请在教务系统(jwxt.xcc.edu.cn)完善信息并审核且修改密码后登陆小程序!"}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
if pinfo.get('err'):
if pinfo.get('err') == "Connect Timeout":
return mywarn("登录超时","",xh,pswd)
else:
return pinfo
refreshTimes += 1
updateTime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
Students.objects.filter(studentId=int(xh)).update(JSESSIONID=JSESSIONID, route=route,
refreshTimes=refreshTimes, updateTime=updateTime)
endTime = time.time()
spendTime = endTime - startTime
print('【%s】登录了' % pinfo["name"])
content = ('【%s】[%s]第%d次访问登录了,耗时%.2fs' % (
datetime.datetime.now().strftime('%H:%M:%S'), pinfo["name"], refreshTimes, spendTime))
writeLog(content)
filename = ('Pinfo')
newData(xh, filename, json.dumps(pinfo, ensure_ascii=False))
return HttpResponse(json.dumps(pinfo, ensure_ascii=False),
content_type="application/json,charset=utf-8")
elif lgn.runcode == 4:
return HttpResponse(json.dumps({'err':'验证码错误'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
elif lgn.runcode == 2:
content = ('【%s】[%s]在登录时学号或者密码错误!' % (datetime.datetime.now().strftime('%H:%M:%S'), xh))
writeLog(content)
return HttpResponse(json.dumps({'err':'学号或者密码错误'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
else:
content = ('【%s】[%s]在登录时网络或其它错误!' % (datetime.datetime.now().strftime('%H:%M:%S'), xh))
writeLog(content)
return HttpResponse(json.dumps({'err':'网络或token问题,请返回重试'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
except Exception as e:
if "Connection broken" in str(e) or 'ECONNRESET' in str(e):
# return get_pinfo(request)
return HttpResponse(json.dumps({'err':"请重新刷新一下"}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
else:
content = ('【%s】[%s]登录时出错' % (datetime.datetime.now().strftime('%H:%M:%S'), xh))
writeLog(content)
traceback.print_exc()
return mywarn("登录未知错误",str(e),xh,pswd)
else:
try:
startTime = time.time()
lgn = Login(base_url=base_url)
if myconfig.isKaptcha:
storage = login_pages_get(xh)
if storage is None:
return get_kaptcha(xh)
lgn.login_kaptcha(storage["cookies"],xh, pswd,storage["tokens"],storage["n"],storage["e"],kaptcha)
else:
lgn.login(xh, pswd)
if lgn.runcode == 1:
cookies = lgn.cookies
JSESSIONID = requests.utils.dict_from_cookiejar(cookies)["JSESSIONID"]
if myconfig.isKaptcha:
route = storage["cookies"]["route"]
else:
route = requests.utils.dict_from_cookiejar(cookies)["route"]
ncookies = requests.utils.cookiejar_from_dict({"JSESSIONID":JSESSIONID,"route":route})
person = GetInfo(base_url=base_url, cookies=ncookies)
pinfo = person.get_pinfo()
if pinfo.get("idNumber")[-6:] == pswd:
return HttpResponse(json.dumps({'err':"新生或专升本同学请在教务系统(jwxt.xcc.edu.cn)完善信息并审核且修改密码后登陆小程序!"}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
if pinfo.get('err'):
if pinfo.get('err') == "Connect Timeout":
return mywarn("登录超时","",xh,pswd)
else:
return pinfo
updateTime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
newstu = Students.create(int(pinfo["studentId"]), pinfo["name"], calSex(pinfo["idNumber"]), pinfo["collegeName"],
pinfo["majorName"], pinfo["className"], pinfo["phoneNumber"],
pinfo["birthDay"], pinfo["graduationSchool"], pinfo["domicile"],
pinfo["email"], pinfo["national"], pinfo["idNumber"],
JSESSIONID, route, updateTime)
newstu.save()
endTime = time.time()
spendTime = endTime - startTime
print('【%s】第一次登录' % pinfo["name"])
content = ('【%s】[%s]第一次登录,耗时%.2fs' % (
datetime.datetime.now().strftime('%H:%M:%S'), pinfo["name"], spendTime))
writeLog(content)
filename = ('Pinfo')
newData(xh, filename, json.dumps(pinfo, ensure_ascii=False))
return HttpResponse(json.dumps(pinfo, ensure_ascii=False),
content_type="application/json,charset=utf-8")
elif lgn.runcode == 4:
return HttpResponse(json.dumps({'err':'验证码错误'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
elif lgn.runcode == 2:
content = ('【%s】[%s]在第一次登录时学号或者密码错误!' % (datetime.datetime.now().strftime('%H:%M:%S'), xh))
writeLog(content)
return HttpResponse(json.dumps({'err':'学号或者密码错误'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
else:
content = ('【%s】[%s]在第一次登录时网络或其它错误!' % (datetime.datetime.now().strftime('%H:%M:%S'), xh))
writeLog(content)
return HttpResponse(json.dumps({'err':'网络或token问题,请返回重试'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
except Exception as e:
# print(e)
if "Connection broken" in str(e) or 'ECONNRESET' in str(e):
# return get_pinfo(request)
return HttpResponse(json.dumps({'err':"请重新刷新一下"}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
else:
content = ('【%s】[%s]第一次登录时出错' % (datetime.datetime.now().strftime('%H:%M:%S'), xh))
writeLog(content)
if str(e) == "'NoneType' object has no attribute 'get'":
return HttpResponse(json.dumps({'err':'教务系统挂掉了,请等待修复后重试~'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
traceback.print_exc()
return mywarn("登录未知错误",str(e),xh,pswd)
else:
return HttpResponse(json.dumps({'err':'请使用post并提交正确数据'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
def refresh_class(request):
myconfig = Config.objects.all().first()
if myconfig.apichange:
data = {
'xh':request.POST.get("xh"),
'pswd':request.POST.get("pswd")
}
res = requests.post(url=myconfig.otherapi+"/info/refreshclass",data=data)
return HttpResponse(json.dumps(json.loads(res.text), ensure_ascii=False),
content_type="application/json,charset=utf-8")
if myconfig.maintenance:
return HttpResponse(json.dumps({'err':'教务系统出错维护中,请静待教务系统恢复正常!'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
# if mpconfig["loginbad"]:
# return HttpResponse(json.dumps({'err':'当前教务系统无法请求登录,请待学校修复!'}, ensure_ascii=False),
# content_type="application/json,charset=utf-8")
if request.method == 'POST':
if request.POST:
xh = request.POST.get("xh")
pswd = request.POST.get("pswd")
else:
return HttpResponse(json.dumps({'err':'请提交正确的post数据'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
if not Students.objects.filter(studentId=int(xh)):
content = ('【%s】[%s]未登录更新班级信息' % (datetime.datetime.now().strftime('%H:%M:%S'), xh))
writeLog(content)
return HttpResponse(json.dumps({'err':'还未登录,请重新登录!'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
else:
stu = Students.objects.get(studentId=int(xh))
try:
startTime = time.time()
print('【%s】更新了班级信息' % stu.name)
JSESSIONID = str(stu.JSESSIONID)
route = str(stu.route)
cookies_dict = {
'JSESSIONID': JSESSIONID,
'route': route
}
cookies = requests.utils.cookiejar_from_dict(cookies_dict)
person = GetInfo(base_url=base_url, cookies=cookies)
nowClass = person.get_now_class()
try:
if nowClass.get('err'):
if nowClass.get('err') == "Connect Timeout":
return mywarn("更新班级超时","",xh,pswd)
except:
pass
if stu.className == nowClass:
return HttpResponse(json.dumps({'err':"你的班级并未发生变化~"}, ensure_ascii=False), content_type="application/json,charset=utf-8")
Students.objects.filter(studentId=int(xh)).update(className=nowClass)
endTime = time.time()
spendTime = endTime - startTime
content = ('【%s】[%s]更新了班级信息,耗时%.2fs' % (datetime.datetime.now().strftime('%H:%M:%S'), stu.name, spendTime))
writeLog(content)
return HttpResponse(json.dumps({'success':"你已成功变更到【"+ nowClass + "】!",'class':nowClass}, ensure_ascii=False), content_type="application/json,charset=utf-8")
except Exception as e:
content = ('【%s】[%s]更新班级信息出错' % (datetime.datetime.now().strftime('%H:%M:%S'), stu.name))
writeLog(content)
if str(e) == "'NoneType' object has no attribute 'get'":
return HttpResponse(json.dumps({'err':'教务系统挂掉了,请等待修复后重试~'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
if "Connection broken" in str(e) or 'ECONNRESET' in str(e):
return refresh_class(request)
if 'Expecting value' not in str(e):
traceback.print_exc()
return mywarn("更新班级错误",str(e),xh,pswd)
if myconfig.isKaptcha:
return get_kaptcha(xh)
else:
sta = update_cookies(request)
person = GetInfo(base_url=base_url, cookies=sta)
nowClass = person.get_now_class()
if stu.className == nowClass:
return HttpResponse(json.dumps({'err':"你的班级并未发生变化~"}, ensure_ascii=False), content_type="application/json,charset=utf-8")
Students.objects.filter(studentId=int(xh)).update(className=nowClass)
return HttpResponse(json.dumps({'success':"你已成功变更到【"+ nowClass + "】!",'class':nowClass}, ensure_ascii=False), content_type="application/json,charset=utf-8")
else:
return HttpResponse(json.dumps({'err':'请使用post并提交正确数据'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
def get_message(request):
myconfig = Config.objects.all().first()
if myconfig.apichange:
data = {
'xh':request.POST.get("xh"),
'pswd':request.POST.get("pswd")
}
res = requests.post(url=myconfig.otherapi+"/info/message",data=data)
return HttpResponse(json.dumps(json.loads(res.text), ensure_ascii=False),
content_type="application/json,charset=utf-8")
if myconfig.maintenance:
return HttpResponse(json.dumps({'err':'教务系统出错维护中,请静待教务系统恢复正常!'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
# if mpconfig["jwxtbad"]:
# return HttpResponse(json.dumps({'err':'当前教务系统无法访问(可能是学校机房断电或断网所致),小程序暂时无法登录和更新,请待学校修复!'}, ensure_ascii=False),
# content_type="application/json,charset=utf-8")
if request.method == 'POST':
if request.POST:
xh = request.POST.get("xh")
pswd = request.POST.get("pswd")
else:
return HttpResponse(json.dumps({'err':'请提交正确的post数据'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
if not Students.objects.filter(studentId=int(xh)):
content = ('【%s】[%s]未登录访问消息' % (datetime.datetime.now().strftime('%H:%M:%S'), xh))
writeLog(content)
return HttpResponse(json.dumps({'err':'还未登录,请重新登录!'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
else:
stu = Students.objects.get(studentId=int(xh))
try:
startTime = time.time()
# print('【%s】查看了消息' % stu.name)
JSESSIONID = str(stu.JSESSIONID)
route = str(stu.route)
cookies_dict = {
'JSESSIONID': JSESSIONID,
'route': route
}
cookies = requests.utils.cookiejar_from_dict(cookies_dict)
person = GetInfo(base_url=base_url, cookies=cookies)
message = person.get_message()
endTime = time.time()
spendTime = endTime - startTime
# content = ('【%s】[%s]访问了消息,耗时%.2fs' % (datetime.datetime.now().strftime('%H:%M:%S'), stu.name, spendTime))
# writeLog(content)
return HttpResponse(json.dumps(message, ensure_ascii=False), content_type="application/json,charset=utf-8")
except Exception as e:
if "Connection broken" in str(e) or 'ECONNRESET' in str(e):
# return get_message(request)
return HttpResponse(json.dumps({'err':"请重新刷新一下"}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
else:
content = ('【%s】[%s]访问消息出错' % (datetime.datetime.now().strftime('%H:%M:%S'), stu.name))
writeLog(content)
if str(e) == 'Expecting value: line 1 column 1 (char 0)':
return HttpResponse(json.dumps({'err':'教务系统挂掉了,请等待修复后重试~'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
if str(e) != 'Expecting value: line 6 column 1 (char 11)':
traceback.print_exc()
return mywarn("消息请求错误",str(e),xh,pswd)
if myconfig.isKaptcha:
return get_kaptcha(xh)
else:
sta = update_cookies(request)
person = GetInfo(base_url=base_url, cookies=sta)
message = person.get_message()
return HttpResponse(json.dumps(message, ensure_ascii=False), content_type="application/json,charset=utf-8")
else:
return HttpResponse(json.dumps({'err':'请使用post并提交正确数据'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
def get_study(request):
myconfig = Config.objects.all().first()
if myconfig.apichange:
data = {
'xh':request.POST.get("xh"),
'pswd':request.POST.get("pswd"),
'refresh':request.POST.get("refresh")
}
res = requests.post(url=myconfig.otherapi+"/info/study",data=data)
return HttpResponse(json.dumps(json.loads(res.text), ensure_ascii=False),
content_type="application/json,charset=utf-8")
if myconfig.maintenance:
return HttpResponse(json.dumps({'err':'教务系统出错维护中,请静待教务系统恢复正常!'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
# if mpconfig["studybad"]:
# return HttpResponse(json.dumps({'err':'当前教务系统无法请求学业,请待学校修复!'}, ensure_ascii=False),
# content_type="application/json,charset=utf-8")
if request.method == 'POST':
if request.POST:
xh = request.POST.get("xh")
pswd = request.POST.get("pswd")
refresh = request.POST.get("refresh")
else:
return HttpResponse(json.dumps({'err':'请提交正确的post数据'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
if not Students.objects.filter(studentId=int(xh)):
content = ('【%s】[%s]未登录访问学业情况' % (datetime.datetime.now().strftime('%H:%M:%S'), xh))
writeLog(content)
return HttpResponse(json.dumps({'err':'还未登录,请重新登录!'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
else:
stu = Students.objects.get(studentId=int(xh))
if refresh == "no":
filename = ('Study')
cache = cacheData(xh, filename)
if cache is not None:
# print('cache')
print('【%s】查看了学业缓存' % stu.name)
return HttpResponse(json.dumps(cache, ensure_ascii=False),
content_type="application/json,charset=utf-8")
else:
pass
try:
startTime = time.time()
print('【%s】查看了学业情况' % stu.name)
JSESSIONID = str(stu.JSESSIONID)
route = str(stu.route)
cookies_dict = {
'JSESSIONID': JSESSIONID,
'route': route
}
cookies = requests.utils.cookiejar_from_dict(cookies_dict)
person = GetInfo(base_url=base_url, cookies=cookies)
study = person.get_study(xh)
if study.get("err") == 'Connect Timeout':
if myconfig.isKaptcha:
return get_kaptcha(xh)
else:
sta = update_cookies(request)
person = GetInfo(base_url=base_url, cookies=sta)
study = person.get_study(xh)
gpa = str(study["gpa"]) if str(study["gpa"]) !="" or str(study["gpa"]) is not None else "init"
Students.objects.filter(studentId=int(xh)).update(gpa=gpa)
filename = ('Study')
newData(xh, filename, json.dumps(study, ensure_ascii=False))
return HttpResponse(json.dumps(study, ensure_ascii=False),
content_type="application/json,charset=utf-8")
endTime = time.time()
spendTime = endTime - startTime
content = ('【%s】[%s]访问了学业情况,耗时%.2fs' % (datetime.datetime.now().strftime('%H:%M:%S'), stu.name, spendTime))
writeLog(content)
gpa = str(study["gpa"]) if str(study["gpa"]) !="" or str(study["gpa"]) is not None else "init"
Students.objects.filter(studentId=int(xh)).update(gpa=gpa)
filename = ('Study')
newData(xh, filename, json.dumps(study, ensure_ascii=False))
return HttpResponse(json.dumps(study, ensure_ascii=False), content_type="application/json,charset=utf-8")
except Exception as e:
if "Connection broken" in str(e) or 'ECONNRESET' in str(e):
# return get_study(request)
return HttpResponse(json.dumps({'err':'更新出现问题,请待教务系统修复'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
elif "list index out of range" in str(e) and int(xh[0:2]) >= int(myconfig.nGrade[2:4]):
return HttpResponse(json.dumps({'err':'暂无学业信息或请先刷新“我的成绩”后访问'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
else:
content = ('【%s】[%s]访问学业情况出错' % (datetime.datetime.now().strftime('%H:%M:%S'), stu.name))
writeLog(content)
if str(e) != 'list index out of range':
traceback.print_exc()
return mywarn("学业请求错误",str(e),xh,pswd)
if myconfig.isKaptcha:
return get_kaptcha(xh)
else:
sta = update_cookies(request)
person = GetInfo(base_url=base_url, cookies=sta)
study = person.get_study(xh)
gpa = str(study["gpa"]) if str(study["gpa"]) !="" or str(study["gpa"]) is not None else "init"
Students.objects.filter(studentId=int(xh)).update(gpa=gpa)
filename = ('Study')
newData(xh, filename, json.dumps(study, ensure_ascii=False))
return HttpResponse(json.dumps(study, ensure_ascii=False), content_type="application/json,charset=utf-8")
else:
return HttpResponse(json.dumps({'err':'请使用post并提交正确数据'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
def get_grade(request):
myconfig = Config.objects.all().first()
if myconfig.apichange:
data = {
'xh':request.POST.get("xh"),
'pswd':request.POST.get("pswd"),
'year':request.POST.get("year"),
'term':request.POST.get("term"),
'refresh':request.POST.get("refresh")
}
res = requests.post(url=myconfig.otherapi,data=data)
return HttpResponse(json.dumps(json.loads(res.text), ensure_ascii=False),
content_type="application/json,charset=utf-8")
if myconfig.maintenance:
return HttpResponse(json.dumps({'err':'教务系统出错维护中,请静待教务系统恢复正常!'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
# if mpconfig["gradebad"]:
# return HttpResponse(json.dumps({'err':'当前教务系统无法请求成绩,请待学校修复!'}, ensure_ascii=False),
# content_type="application/json,charset=utf-8")
if request.method == 'POST':
if request.POST:
xh = request.POST.get("xh")
pswd = request.POST.get("pswd")
year = request.POST.get("year")
term = request.POST.get("term")
refresh = request.POST.get("refresh")
else:
return HttpResponse(json.dumps({'err':'请提交正确的post数据'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
if not Students.objects.filter(studentId=int(xh)):
content = ('【%s】[%s]未登录访问成绩' % (datetime.datetime.now().strftime('%H:%M:%S'), xh))
writeLog(content)
return HttpResponse(json.dumps({'err':'还未登录,请重新登录!'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
else:
stu = Students.objects.get(studentId=int(xh))
if refresh == "no":
filename = ('Grades-%s%s' % (str(year), str(term)))
cache = cacheData(xh, filename)
if cache is not None:
# print('cache')
def isLast(ny,nt,y,t):
ny = (myconfig.nGrade)[0:4]
nt = (myconfig.nGrade)[4:5]
if str(year) == ny:
pass
else:
if int(nt)-1 == 0 and int(term)==2:
pass
else:
print('【%s】查看了%s-%s的成绩缓存' % (stu.name, year, term))
return HttpResponse(json.dumps(cache, ensure_ascii=False),
content_type="application/json,charset=utf-8")
else:
pass
try:
startTime = time.time()
print('【%s】查看了%s-%s的成绩' % (stu.name, year, term))
JSESSIONID = str(stu.JSESSIONID)
route = str(stu.route)
cookies_dict = {
'JSESSIONID': JSESSIONID,
'route': route
}
cookies = requests.utils.cookiejar_from_dict(cookies_dict)
person = GetInfo(base_url=base_url, cookies=cookies)
grade = person.get_grade(year, term)
if grade.get("err"):
if grade.get("err") == "Connect Timeout":
# update_cookies(xh, pswd)
# return mywarn("成绩超时","",xh,pswd)
return get_kaptcha(xh)
elif grade.get("err") == "No Data":
if int(xh[0:2]) > int(myconfig.nGrade[2:4]):
return HttpResponse(json.dumps({'err':"当前你还没有任何成绩信息"}, ensure_ascii=False), content_type="application/json,charset=utf-8")
else:
return HttpResponse(json.dumps({'err':"还没有" + year+"-"+term + "学期的成绩,点击顶栏也看看以前的吧~"}, ensure_ascii=False), content_type="application/json,charset=utf-8")
elif grade.get("err") == "Error Term":
return HttpResponse(json.dumps({'err':"网络问题,请重新访问请求课程"}, ensure_ascii=False), content_type="application/json,charset=utf-8")
Students.objects.filter(studentId=int(xh)).update(gpa = grade.get("gpa") if grade.get("gpa")!="" or grade.get("gpa") is not None else "init")
endTime = time.time()
spendTime = endTime - startTime
content = ('【%s】[%s]访问了%s-%s的成绩,耗时%.2fs' % (
datetime.datetime.now().strftime('%H:%M:%S'), stu.name, year, term, spendTime))
writeLog(content)
filename = ('Grades-%s%s' % (str(year), str(term)))
newData(xh, filename, json.dumps(grade, ensure_ascii=False))
# print('write')
return HttpResponse(json.dumps(grade, ensure_ascii=False), content_type="application/json,charset=utf-8")
except Exception as e:
# print(e)
if "Connection broken" in str(e) or 'ECONNRESET' in str(e):
# return get_grade(request)
return HttpResponse(json.dumps({'err':"请重新刷新一下"}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
else:
content = ('【%s】[%s]访问成绩出错' % (datetime.datetime.now().strftime('%H:%M:%S'), stu.name))
writeLog(content)
if str(e) == 'Expecting value: line 1 column 1 (char 0)':
return HttpResponse(json.dumps({'err':'教务系统挂掉了,请等待修复后重试~'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
if str(e) != 'Expecting value: line 3 column 1 (char 4)':
traceback.print_exc()
return mywarn("成绩请求错误",str(e),xh,pswd)
if myconfig.isKaptcha:
return get_kaptcha(xh)
else:
sta = update_cookies(request)
person = GetInfo(base_url=base_url, cookies=sta)
grade = person.get_grade(year, term)
if grade.get("gpa") == "" or grade.get("gpa") is None:
return HttpResponse(json.dumps({'err':'平均学分绩点获取失败,请重试~'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
Students.objects.filter(studentId=int(xh)).update(gpa = grade.get("gpa"))
filename = ('Grades-%s%s' % (str(year), str(term)))
newData(xh, filename, json.dumps(grade, ensure_ascii=False))
return HttpResponse(json.dumps(grade, ensure_ascii=False), content_type="application/json,charset=utf-8")
else:
return HttpResponse(json.dumps({'err':'请使用post并提交正确数据'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
# def get_grade2(request):
# myconfig = Config.objects.all().first()
# if myconfig.apichange:
# data = {
# 'xh':request.POST.get("xh"),
# 'pswd':request.POST.get("pswd"),
# 'year':request.POST.get("year"),
# 'term':request.POST.get("term"),
# 'refresh':request.POST.get("refresh")
# }
# res = requests.post(url=myconfig.otherapi+"/info/grade",data=data)
# return HttpResponse(json.dumps(json.loads(res.text), ensure_ascii=False),
# content_type="application/json,charset=utf-8")
# if myconfig.maintenance:
# return HttpResponse(json.dumps({'err':'教务系统出错维护中,请静待教务系统恢复正常!'}, ensure_ascii=False),
# content_type="application/json,charset=utf-8")
# # if mpconfig["gradebad"]:
# # return HttpResponse(json.dumps({'err':'当前教务系统无法请求成绩,请待学校修复!'}, ensure_ascii=False),
# # content_type="application/json,charset=utf-8")
# if request.method == 'POST':
# if request.POST:
# xh = request.POST.get("xh")
# pswd = request.POST.get("pswd")
# year = request.POST.get("year")
# term = request.POST.get("term")
# refresh = request.POST.get("refresh")
# else:
# return HttpResponse(json.dumps({'err':'请提交正确的post数据'}, ensure_ascii=False),
# content_type="application/json,charset=utf-8")
# if not Students.objects.filter(studentId=int(xh)):
# content = ('【%s】[%s]未登录访问成绩' % (datetime.datetime.now().strftime('%H:%M:%S'), xh))
# writeLog(content)
# return HttpResponse(json.dumps({'err':'还未登录,请重新登录!'}, ensure_ascii=False),
# content_type="application/json,charset=utf-8")
# else:
# stu = Students.objects.get(studentId=int(xh))
# if refresh == "no":
# filename = ('GradesN-%s%s' % (str(year), str(term)))
# cache = cacheData(xh, filename)
# if cache is not None:
# # print('cache')
# print('【%s】查看了%s-%s的成绩缓存' % (stu.name, year, term))
# return HttpResponse(json.dumps(cache, ensure_ascii=False),
# content_type="application/json,charset=utf-8")
# else:
# pass
# try:
# startTime = time.time()
# print('【%s】查看了%s-%s的成绩' % (stu.name, year, term))
# JSESSIONID = str(stu.JSESSIONID)
# route = str(stu.route)
# cookies_dict = {
# 'JSESSIONID': JSESSIONID,
# 'route': route
# }
# cookies = requests.utils.cookiejar_from_dict(cookies_dict)
# person = GetInfo(base_url=base_url, cookies=cookies)
# grade = person.get_grade2(year, term)
# if grade.get("err") == "请求超时,鉴于教务系统特色,已帮你尝试重新登录,重试几次,还不行请麻烦你自行重新登录,或者在关于里面反馈!当然,也可能是教务系统挂了~":
# update_cookies(xh, pswd)
# return HttpResponse(json.dumps({'err':grade.get("err")}, ensure_ascii=False), content_type="application/json,charset=utf-8")
# if grade.get("err") == "看起来你这学期好像还没有出成绩,点击顶栏也看看以前的吧~":
# return HttpResponse(json.dumps({'err':grade.get("err")}, ensure_ascii=False), content_type="application/json,charset=utf-8")
# Students.objects.filter(studentId=int(xh)).update(gpa = grade.get("gpa"))
# endTime = time.time()
# spendTime = endTime - startTime
# content = ('【%s】[%s]访问了%s-%s的成绩,耗时%.2fs' % (
# datetime.datetime.now().strftime('%H:%M:%S'), stu.name, year, term, spendTime))
# writeLog(content)
# filename = ('GradesN-%s%s' % (str(year), str(term)))
# newData(xh, filename, json.dumps(grade, ensure_ascii=False))
# # print('write')
# return HttpResponse(json.dumps(grade, ensure_ascii=False), content_type="application/json,charset=utf-8")
# except Exception as e:
# # print(e)
# if "Connection broken" in str(e) or 'ECONNRESET' in str(e):
# # return get_grade2(request)
# return HttpResponse(json.dumps({'err':'更新出现问题,请待教务系统修复'}, ensure_ascii=False),
# content_type="application/json,charset=utf-8")
# else:
# content = ('【%s】[%s]访问成绩出错' % (datetime.datetime.now().strftime('%H:%M:%S'), stu.name))
# writeLog(content)
# if str(e) == 'Expecting value: line 1 column 1 (char 0)':
# return HttpResponse(json.dumps({'err':'教务系统挂掉了,请等待修复后重试~'}, ensure_ascii=False),
# content_type="application/json,charset=utf-8")
# if str(e) != 'Expecting value: line 3 column 1 (char 4)':
# traceback.print_exc()
# return mywarn("成绩请求错误",str(e),xh,pswd)
# sta = update_cookies(xh, pswd)
# person = GetInfo(base_url=base_url, cookies=sta)
# grade = person.get_grade2(year, term)
# if grade.get("gpa") == "" or grade.get("gpa") is None:
# return HttpResponse(json.dumps({'err':'平均学分绩点获取失败,请重试~'}, ensure_ascii=False),
# content_type="application/json,charset=utf-8")
# Students.objects.filter(studentId=int(xh)).update(gpa = grade.get("gpa"))
# filename = ('GradesN-%s%s' % (str(year), str(term)))
# newData(xh, filename, json.dumps(grade, ensure_ascii=False))
# return HttpResponse(json.dumps(grade, ensure_ascii=False), content_type="application/json,charset=utf-8")
# else:
# return HttpResponse(json.dumps({'err':'请使用post并提交正确数据'}, ensure_ascii=False),
# content_type="application/json,charset=utf-8")
def get_schedule(request):
myconfig = Config.objects.all().first()
if myconfig.apichange:
data = {
'xh':request.POST.get("xh"),
'pswd':request.POST.get("pswd"),
'year':request.POST.get("year"),
'term':request.POST.get("term"),
'refresh':request.POST.get("refresh")
}
res = requests.post(url=myconfig.otherapi+"/info/schedule",data=data)
return HttpResponse(json.dumps(json.loads(res.text), ensure_ascii=False),
content_type="application/json,charset=utf-8")
if myconfig.maintenance:
return HttpResponse(json.dumps({'err':'教务系统出错维护中,请静待教务系统恢复正常!'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
# if mpconfig["schedulebad"]:
# return HttpResponse(json.dumps({'err':'当前教务系统无法请求课表,请待学校修复!'}, ensure_ascii=False),
# content_type="application/json,charset=utf-8")
if request.method == 'POST':
if request.POST:
xh = request.POST.get("xh")
pswd = request.POST.get("pswd")
year = request.POST.get("year")
term = request.POST.get("term")
refresh = request.POST.get("refresh")
else:
return HttpResponse(json.dumps({'err':'请提交正确的post数据'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
if not Students.objects.filter(studentId=int(xh)):
content = ('【%s】[%s]未登录访问课程' % (datetime.datetime.now().strftime('%H:%M:%S'), xh))
writeLog(content)
return HttpResponse(json.dumps({'err':'还未登录,请重新登录!'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
else:
stu = Students.objects.get(studentId=int(xh))
if refresh == "no":
filename = ('Schedules-%s%s' % (str(year), str(term)))
cache = cacheData(xh, filename)
if cache is not None:
# print('cache')
print('【%s】查看了%s-%s的课表缓存' % (stu.name, year, term))
return HttpResponse(json.dumps(cache, ensure_ascii=False),
content_type="application/json,charset=utf-8")
else:
pass
try:
startTime = time.time()
print('【%s】查看了%s-%s的课程' % (stu.name, year, term))
JSESSIONID = str(stu.JSESSIONID)
route = str(stu.route)
cookies_dict = {
'JSESSIONID': JSESSIONID,
'route': route
}
cookies = requests.utils.cookiejar_from_dict(cookies_dict)
person = GetInfo(base_url=base_url, cookies=cookies)
schedule = person.get_schedule(year, term)
if schedule.get('err'):
if schedule.get('err') == "Connect Timeout":
return mywarn("更新课程超时","",xh,pswd)
elif schedule.get('err') == "Error Term":
return HttpResponse(json.dumps({'err':"网络问题,请重新访问请求课程"}, ensure_ascii=False), content_type="application/json,charset=utf-8")
endTime = time.time()
spendTime = endTime - startTime
content = ('【%s】[%s]访问了%s-%s的课程,耗时%.2fs' % (
datetime.datetime.now().strftime('%H:%M:%S'), stu.name, year, term, spendTime))
writeLog(content)
filename = ('Schedules-%s%s' % (str(year), str(term)))
newData(xh, filename, json.dumps(schedule, ensure_ascii=False))
# print('write')
return HttpResponse(json.dumps(schedule, ensure_ascii=False), content_type="application/json,charset=utf-8")
except Exception as e:
if "Connection broken" in str(e) or 'ECONNRESET' in str(e):
# return get_schedule(request)
return HttpResponse(json.dumps({'err':"请重新刷新一下"}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
else:
content = ('【%s】[%s]访问课程出错' % (datetime.datetime.now().strftime('%H:%M:%S'), stu.name))
writeLog(content)
if str(e) == 'Expecting value: line 1 column 1 (char 0)':
return HttpResponse(json.dumps({'err':'教务系统挂掉了,请等待修复后重试~'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
if str(e) != 'Expecting value: line 3 column 1 (char 4)':
traceback.print_exc()
return mywarn("课程请求错误",str(e),xh,pswd)
if myconfig.isKaptcha:
return get_kaptcha(xh)
else:
sta = update_cookies(request)
person = GetInfo(base_url=base_url, cookies=sta)
schedule = person.get_schedule(year, term)
filename = ('Schedules-%s%s' % (str(year), str(term)))
newData(xh, filename, json.dumps(schedule, ensure_ascii=False))
return HttpResponse(json.dumps(schedule, ensure_ascii=False), content_type="application/json,charset=utf-8")
else:
return HttpResponse(json.dumps({'err':'请使用post并提交正确数据'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
def joinDetail(request):
myconfig = Config.objects.all().first()
if myconfig.apichange:
res = requests.get(url=myconfig.otherapi+"/info/joindetail?type=" + request.GET.get("type"))
return HttpResponse(json.dumps(json.loads(res.text), ensure_ascii=False),
content_type="application/json,charset=utf-8")
type = request.GET.get("type")
allUsers = Students.objects.filter().all().count()
if type == 'college':
detail = [{
'collegeName': i["collegeName"],
'collegeNum': Students.objects.filter(collegeName=i["collegeName"]).count()
} for i in Students.objects.values('collegeName').distinct().order_by('collegeName')]
ndetail = sorted(detail,key=lambda keys:keys['collegeNum'], reverse=True)
res = {
'allUsers': allUsers,
'collegeNum': int(Students.objects.values('collegeName').distinct().order_by('collegeName').count()),
'detail': ndetail
}
elif type == 'major':
detail = [{
'majorName': i["majorName"],
'majorNum': Students.objects.filter(majorName=i["majorName"]).count()
} for i in Students.objects.values('majorName').distinct().order_by('majorName')]
ndetail = sorted(detail,key=lambda keys:keys['majorNum'], reverse=True)
res = {
'allUsers': allUsers,
'majorNum': int(Students.objects.values('majorName').distinct().order_by('majorName').count()),
'detail': ndetail
}
elif type == 'class':
detail = [{
'className': i["className"],
'classNum': Students.objects.filter(className=i["className"]).count()
} for i in Students.objects.values('className').distinct().order_by('className')]
ndetail = sorted(detail,key=lambda keys:keys['classNum'], reverse=True)
res = {
'allUsers': allUsers,
'classNum': int(Students.objects.values('className').distinct().order_by('className').count()),
'detail': ndetail
}
return HttpResponse(json.dumps(res, ensure_ascii=False),
content_type="application/json,charset=utf-8")
def get_position(request):
myconfig = Config.objects.all().first()
if myconfig.apichange:
res = requests.get(url=myconfig.otherapi+"/info/position?xh=" + request.GET.get("xh"))
return HttpResponse(json.dumps(json.loads(res.text), ensure_ascii=False),
content_type="application/json,charset=utf-8")
#print(request)
xh = request.GET.get("xh")
if xh is None:
return HttpResponse(json.dumps({'err':'参数不全'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
if not Students.objects.filter(studentId=int(xh)):
return HttpResponse(json.dumps({'err':'还未登录,请重新登录!'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
else:
stu = Students.objects.get(studentId=int(xh))
majorName = stu.majorName
className = stu.className
majorNum = Students.objects.filter(majorName=majorName,studentId__startswith=int(xh[0:2])).all().count()
classNum = Students.objects.filter(className=className).all().count()
if stu.gpa == "init":
gpa = "init"
return HttpResponse(json.dumps({'gpa': gpa,'majorCount':0,'classCount':0,'majorNum':majorNum,'classNum':classNum,'nMajorCount':"init",'nClassCount':"init"}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
else:
gpa = float(stu.gpa)
majorCount = 1
classCount = 1
nMajorCount = 0
nClassCount = 0
for m in Students.objects.filter(majorName=majorName).all().order_by('-gpa'):
if m.gpa == "init" and str(m.studentId)[0:2] == xh[0:2]:
nMajorCount += 1
elif m.gpa == "init" or str(m.studentId)[0:2] != xh[0:2]:
pass
elif gpa >= float(m.gpa):
break
else:
majorCount += 1
for c in Students.objects.filter(className=className).all().order_by('-gpa'):
if c.gpa == "init":
nClassCount += 1
elif gpa >= float(c.gpa):
break
else:
classCount += 1
return HttpResponse(json.dumps({'gpa': str(gpa),'majorCount':majorCount,'nMajorCount':nMajorCount,'nClassCount':nClassCount,'classCount':classCount,'majorNum':majorNum,'classNum':classNum}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
def searchTeacher(request):
myconfig = Config.objects.all().first()
if request.method == "GET":
xh = request.GET.get("xh")
tname = request.GET.get("tname")
if myconfig.apichange:
res = requests.get(url=myconfig.otherapi+"/info/steacher?xh=" + request.GET.get("xh") + "&tname=" + request.GET.get("tname"))
return HttpResponse(json.dumps(json.loads(res.text), ensure_ascii=False),
content_type="application/json,charset=utf-8")
elif request.method == "POST":
xh = request.POST.get("xh")
tname = request.POST.get("tname")
if myconfig.apichange:
data = {
'xh':request.POST.get("xh"),
'tname':request.POST.get("tname")
}
res = requests.post(url=myconfig.otherapi+"/info/steacher",data=data)
return HttpResponse(json.dumps(json.loads(res.text), ensure_ascii=False),
content_type="application/json,charset=utf-8")
if xh is None or tname is None:
return HttpResponse(json.dumps({'err': '参数不全'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
else:
if not Students.objects.filter(studentId=int(xh)):
return HttpResponse(json.dumps({'err':'还未登录,请重新登录!'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
else:
date = datetime.datetime.now().strftime('%Y-%m-%d')
stu = Students.objects.filter(studentId=int(xh))
thisStu = Students.objects.get(studentId=int(xh))
lastTime = thisStu.searchTimes.split(',')[0]
remainTimes = thisStu.searchTimes.split(',')[1]
if lastTime == date:
if remainTimes != '0':
searchList = []
for s in Teachers.objects.filter(name__contains=tname).order_by('name'):
item = {
'name': s.name,
'collegeName': s.collegeName,
'title': s.title,
'phoneNumber': s.phoneNumber
}
searchList.append(item)
content = ('【%s】%s学号查询[%s]' % (datetime.datetime.now().strftime('%H:%M:%S'), xh, tname))
writeLog(content)
if len(searchList) != 0:
nremainTimes = int(remainTimes) - 1
stu.update(searchTimes=lastTime+','+str(nremainTimes))
else:
nremainTimes = int(remainTimes)
return HttpResponse(json.dumps({'count': len(searchList),'result':searchList,'times':nremainTimes}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
else:
return HttpResponse(json.dumps({'err': '同学,你今天的查询次数已满哦~'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
else:
if thisStu.classMonitor == 1:
nlastTime = date
nremainTimes = '4'
ncontent = nlastTime + ',' + nremainTimes
stu.update(searchTimes=ncontent)
searchList = []
for s in Teachers.objects.filter(name__contains=tname).order_by('name'):
item = {
'name': s.name,
'collegeName': s.collegeName,
'title': s.title,
'phoneNumber': s.phoneNumber
}
searchList.append(item)
content = ('【%s】%s学号查询[%s]' % (datetime.datetime.now().strftime('%H:%M:%S'), xh, tname))
writeLog(content)
return HttpResponse(json.dumps({'count': len(searchList),'result':searchList,'times':int(nremainTimes)}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
else:
nlastTime = date
nremainTimes = '2'
ncontent = nlastTime + ',' + nremainTimes
stu.update(searchTimes=ncontent)
searchList = []
for s in Teachers.objects.filter(name__contains=tname).order_by('name'):
item = {
'name': s.name,
'collegeName': s.collegeName,
'title': s.title,
'phoneNumber': s.phoneNumber
}
searchList.append(item)
content = ('【%s】%s学号查询[%s]' % (datetime.datetime.now().strftime('%H:%M:%S'), xh, tname))
writeLog(content)
return HttpResponse(json.dumps({'count': len(searchList),'result':searchList,'times':int(nremainTimes)}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
def searchExcept(request):
myconfig = Config.objects.all().first()
if myconfig.apichange:
data = {
'xh':request.POST.get("xh"),
'tname':request.POST.get("tname"),
'collegeName':request.POST.get("collegeName"),
'content':request.POST.get("content")
}
res = requests.post(url=myconfig.otherapi+"/info/scallback",data=data)
return HttpResponse(json.dumps(json.loads(res.text), ensure_ascii=False),
content_type="application/json,charset=utf-8")
xh = request.POST.get("xh")
tname = request.POST.get("tname")
collegeName = request.POST.get("college")
content = request.POST.get("content")
ServerChan = config["ServerChan"]
text = "黄页反馈"
if ServerChan == "none":
return HttpResponse(json.dumps({'err':'反馈失败,管理员未打开反馈接口'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
else:
requests.get(ServerChan + 'text=' + text + '&desp=' + str(xh) + '\n' + str(tname) + str(collegeName) + '\n' + str(content))
return HttpResponse(json.dumps({'msg':'反馈成功'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
def classGrades(request):
myconfig = Config.objects.all().first()
if myconfig.apichange:
res = requests.get(url=myconfig.otherapi+"/info/classgrades?className=" + request.GET.get("className") + "&yt=" + request.GET.get("yt"))
return HttpResponse(json.dumps(json.loads(res.text), ensure_ascii=False),
content_type="application/json,charset=utf-8")
className = request.GET.get("className")
yt = request.GET.get("yt")
year = yt[0:4]
term = yt[4:5]
studentIdList = []
for i in Students.objects.filter(className=className).order_by("studentId"):
studentIdList.append(i.studentId)
res = []
lastCourses = []
try:
lastStu = Students.objects.filter(className=className).order_by("-updateTime")[0].studentId
with open('data/' + str(lastStu)[0:2] + '/' + str(lastStu) + '/Grades-' + yt + '.json') as l:
lastReq = json.loads(l.read())
for course in lastReq.get("course"):
if course.get("courseNature") != "通识教育任选" and course.get("courseNature") != "无" and course.get("gradeNature") == "正常考试":
lastCourses.append(course.get("courseTitle"))
except:
lastStu = Students.objects.filter(className=className).order_by("-updateTime")[1].studentId
with open('data/' + str(lastStu)[0:2] + '/' + str(lastStu) + '/Grades-' + yt + '.json') as l:
lastReq = json.loads(l.read())
for course in lastReq.get("course"):
if course.get("courseNature") != "通识教育任选" and course.get("courseNature") != "无" and course.get("gradeNature") == "正常考试":
lastCourses.append(course.get("courseTitle"))
for stu in studentIdList:
nowUrl = 'data/' + str(stu)[0:2] + '/' + str(stu) + '/Grades-' + yt + '.json'
try:
with open(nowUrl,mode='r',encoding='UTF-8') as f:
stuReq = json.loads(f.read())
stuRes = {
'name':stuReq.get("name"),
'xh':stuReq.get("studentId"),
'grades':[{
'n':item.get("courseTitle"),
'g':item.get("grade")
}for item in stuReq["course"] if item.get("courseNature") != "通识教育任选" and item.get("courseNature") != "无" and item.get("gradeNature") == "正常考试"]
}
res.append(stuRes)
except:
res.append({'name':Students.objects.get(studentId=int(str(stu))).name,'xh':str(stu),'grades':[]})
result = {'lastCourses':lastCourses,'res':res}
writeToExcel(result,'data/classes/'+className+'.xlsx')
try:
file = open('data/classes/'+className+'.xlsx', 'rb')
except:
return HttpResponse(json.dumps({'error': "文件不存在"}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
response = FileResponse(file)
response['Content-Type'] = 'application/octet-stream'
response["Content-Disposition"] = "attachment; filename*=UTF-8''{}".format(escape_uri_path(className)+'.xlsx')
return response
def book_search(request):
type = request.GET.get("type")
content = request.GET.get("content")
page = request.GET.get("page")
result = Search()
res = result.search_book(type,content,page)
return HttpResponse(json.dumps(res, ensure_ascii=False),
content_type="application/json,charset=utf-8")
def book_detail(request):
marc = request.GET.get("marc")
result = Search()
res = result.book_detail(marc)
return HttpResponse(json.dumps(res, ensure_ascii=False),
content_type="application/json,charset=utf-8")
def library_info(request):
xh = request.POST.get("xh")
ppswd = request.POST.get("ppswd")
lgn = PLogin()
cookies = lgn.login(xh,ppswd)
person = Personal(cookies)
res = person.get_info()
return HttpResponse(json.dumps(res, ensure_ascii=False),
content_type="application/json,charset=utf-8")
def library_list(request):
xh = request.POST.get("xh")
ppswd = request.POST.get("ppswd")
lgn = PLogin()
cookies = lgn.login(xh,ppswd)
person = Personal(cookies)
res = person.book_list()
return HttpResponse(json.dumps(res, ensure_ascii=False),
content_type="application/json,charset=utf-8")
def library_hist(request):
xh = request.POST.get("xh")
ppswd = request.POST.get("ppswd")
lgn = PLogin()
cookies = lgn.login(xh,ppswd)
person = Personal(cookies)
res = person.book_hist()
return HttpResponse(json.dumps(res, ensure_ascii=False),
content_type="application/json,charset=utf-8")
def library_paylist(request):
xh = request.POST.get("xh")
ppswd = request.POST.get("ppswd")
lgn = PLogin()
cookies = lgn.login(xh,ppswd)
person = Personal(cookies)
res = person.paylist()
return HttpResponse(json.dumps(res, ensure_ascii=False),
content_type="application/json,charset=utf-8")
def library_paydetail(request):
xh = request.POST.get("xh")
ppswd = request.POST.get("ppswd")
lgn = PLogin()
cookies = lgn.login(xh,ppswd)
person = Personal(cookies)
res = person.paydetail()
return HttpResponse(json.dumps(res, ensure_ascii=False),
content_type="application/json,charset=utf-8")
def school_card(request):
xh = request.POST.get("xh")
ppswd = request.POST.get("ppswd")
page = request.POST.get("page")
lgn = PLogin()
cookies = lgn.plogin(xh,ppswd)
person = Infos(cookies)
res = person.school_card(page)
return HttpResponse(json.dumps(res, ensure_ascii=False),
content_type="application/json,charset=utf-8")
def financial(request):
xh = request.POST.get("xh")
ppswd = request.POST.get("ppswd")
page = request.POST.get("page")
lgn = PLogin()
cookies = lgn.plogin(xh,ppswd)
person = Infos(cookies)
res = person.financial(page)
return HttpResponse(json.dumps(res, ensure_ascii=False),
content_type="application/json,charset=utf-8")
def award(request):
if request.method == "POST":
keyword = request.POST.get("keyword")
else:
keyword = request.GET.get("keyword")
url = "http://xcctw.cn/app/index.php?keyword=" + keyword + "&i=2&c=entry&a=site&do=fm&m=yoby_cha&rid=13"
res = requests.get(url=url)
soup = BeautifulSoup(res.text,'lxml')
if soup.find(class_="weui-msgbox"):
return HttpResponse(json.dumps({'err':"没有查询到结果"}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
list = soup.find_all(class_="weui-cell__bd")
result = []
for items in list:
name = (items.find_all(class_="f16")[0].get_text()[3:]).strip()
studentId = (items.find_all(class_="f16")[1].get_text()[3:]).strip()
college = (items.find_all(class_="f16")[2].get_text()[5:]).strip()
major = (items.find_all(class_="f16")[3].get_text()[3:]).strip()
detail = (items.find_all(class_="f16")[4].get_text()[5:]).strip()
number = (items.find_all(class_="f16")[5].get_text()[5:]).strip()
items = {'name':name,'studentId':studentId,'college':college,'major':major,'detail':detail,'number':number}
result.append(items)
return HttpResponse(json.dumps(result, ensure_ascii=False),
content_type="application/json,charset=utf-8")
def get_maps(request):
if request.method == "GET":
xh = request.GET.get("xh")
elif request.method == "POST":
xh = request.POST.get("xh")
allIn = Students.objects.all().count()
thisStu = Students.objects.get(studentId=int(xh))
thisStuBirthDayAndMonth = (thisStu.birthDay)[5:]
names = Students.objects.filter(name=thisStu.name).count() - 1
birthDay = Students.objects.filter(birthDay=thisStu.birthDay).count() - 1
birthDayAndMonth = Students.objects.filter(birthDay__contains=thisStuBirthDayAndMonth).count() - 1
classBirthDay = Students.objects.filter(className=thisStu.className,birthDay=thisStu.birthDay).count() - 1
classBirthDayAndMonth = Students.objects.filter(className=thisStu.className,birthDay__contains=thisStuBirthDayAndMonth).count() - 1
graduationSchool = Students.objects.filter(graduationSchool=thisStu.graduationSchool).count() - 1
classGraduationSchool = Students.objects.filter(className=thisStu.className,graduationSchool=thisStu.graduationSchool).count() - 1
domicile = Students.objects.filter(domicile=thisStu.domicile).count() - 1
classDomicile = Students.objects.filter(className=thisStu.className,domicile=thisStu.domicile).count() - 1
res = {
'allIn': allIn,
'name': names,
'birthDay': birthDay,
'birthDayAndMonth': birthDayAndMonth,
'classBirthDay': classBirthDay,
'classBirthDayAndMonth': classBirthDayAndMonth,
'graduationSchool': graduationSchool,
'classGraduationSchool': classGraduationSchool,
'domicile': domicile,
'places':thisStu.domicile,
'classDomicile': classDomicile
}
return HttpResponse(json.dumps(res, ensure_ascii=False),
content_type="application/json,charset=utf-8")
def isMonitor(request):
xh = request.GET.get("xh")
if Students.objects.filter(studentId=int(xh)):
thisStu = Students.objects.get(studentId=int(xh))
res = {"code":200,"monitor":True if thisStu.classMonitor == 1 else False}
return HttpResponse(json.dumps(res, ensure_ascii=False),
content_type="application/json,charset=utf-8")
else:
return HttpResponse(json.dumps({"err":"没有这个同学"}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
def freetime(request):
myconfig = Config.objects.all().first()
xh = request.GET.get("xh")
term = request.GET.get("term") if request.GET.get("term") is not None else myconfig.nSchedule
weeks = request.GET.get("weeks") if request.GET.get("weeks") is not None else myconfig.nowweek
mode = request.GET.get("mode") if request.GET.get("mode") is not None else "1"
datafile = 'data/' + xh[0:2] + "/" + xh + "/" + "Schedules-" + term + ".json"
fullSections = [1,2,3,4,5,6,7,8,9,10,11,12]
if os.path.exists(datafile):
with open(datafile,mode='r',encoding='UTF-8') as f:
schedule_data = json.loads(f.read())
res = {"Mon":[],"Tue":[],"Wed":[],"Thu":[],"Fri":[]}
for item in schedule_data["normalCourse"]:
if item["courseWeekday"] == "1" and int(weeks) in item["includeWeeks"]:
res["Mon"].extend(item["includeSection"])
elif item["courseWeekday"] == "2" and int(weeks) in item["includeWeeks"]:
res["Tue"].extend(item["includeSection"])
elif item["courseWeekday"] == "3" and int(weeks) in item["includeWeeks"]:
res["Wed"].extend(item["includeSection"])
elif item["courseWeekday"] == "4" and int(weeks) in item["includeWeeks"]:
res["Thu"].extend(item["includeSection"])
elif item["courseWeekday"] == "5" and int(weeks) in item["includeWeeks"]:
res["Fri"].extend(item["includeSection"])
else:
pass
if mode == "1":
res["Mon"] = diffList(fullSections,res["Mon"])
res["Tue"] = diffList(fullSections,res["Tue"])
res["Wed"] = diffList(fullSections,res["Wed"])
res["Thu"] = diffList(fullSections,res["Thu"])
res["Fri"] = diffList(fullSections,res["Fri"])
return HttpResponse(json.dumps(res, ensure_ascii=False),
content_type="application/json,charset=utf-8")
else:
return HttpResponse(json.dumps({"err":"原因:1.该同学没有使用“西院助手”小程序。2.没有在小程序请求过该学期课程信息。3.还未到该学期"}, ensure_ascii=False),
content_type="application/json,charset=utf-8") | <filename>zfnweb/info/views.py
import datetime
import os
import time
import traceback
import json
import requests
import openpyxl
from bs4 import BeautifulSoup
from api import GetInfo, Login, PLogin, Personal, Infos, Search
from django.utils.encoding import escape_uri_path
from django.http import HttpResponse, JsonResponse, FileResponse
from info.models import Students, Teachers
from mp.models import Config
from openpyxl.styles import Font, colors, Alignment
with open('config.json', mode='r', encoding='utf-8') as f:
config = json.loads(f.read())
base_url = config["base_url"]
def index(request):
return HttpResponse('info_index here')
def calSex(id):
sexNum = id[16:17]
if int(sexNum)%2==0:
return 2
else:
return 1
def diffList(list1,list2):
return [x for x in list1 if x not in list2]
def mywarn(text,desp,xh,pswd):
ServerChan = config["ServerChan"]
text = text
errData = {'err':text+',请返回重试'} if "错误" in text else {'err':text+',建议访问一下“课程通知”以便刷新cookies'}
if ServerChan == "none":
return HttpResponse(json.dumps(errData, ensure_ascii=False),
content_type="application/json,charset=utf-8")
else:
requests.get(ServerChan + 'text=' + text + '&desp=' + desp + '\n' + str(xh) + '\n' + str(pswd))
return HttpResponse(json.dumps(errData, ensure_ascii=False),
content_type="application/json,charset=utf-8")
def cacheData(xh, filename):
docurl = 'data/' + str(xh)[0:2] + '/' + str(xh) + '/'
fileurl = docurl + str(filename) + '.json'
if not os.path.exists(docurl):
os.makedirs(docurl)
else:
if not os.path.exists(fileurl):
return
else:
with open(fileurl, mode='r', encoding='utf-8') as o:
result = json.loads(o.read())
if result.get("err"):
return
return result
def newData(xh, filename, content):
docurl = 'data/' + str(xh)[0:2] + '/' + str(xh) + '/'
fileurl = docurl + str(filename) + '.json'
if not os.path.exists(docurl):
os.makedirs(docurl)
with open(fileurl, mode='w', encoding='utf-8') as n:
n.write(content)
else:
with open(fileurl, mode='w', encoding='utf-8') as n:
n.write(content)
# if not os.path.exists(fileurl):
# with open(fileurl, mode='w', encoding='utf-8') as n:
# n.write(content)
def writeLog(content):
date = datetime.datetime.now().strftime('%Y-%m-%d')
filename = 'mylogs/' + date + '.log'
if not os.path.exists(filename):
with open(filename, mode='w', encoding='utf-8') as n:
n.write('【%s】的日志记录' % date)
with open(filename, mode='a', encoding='utf-8') as l:
l.write('\n%s' % content)
def login_pages_set(xh):
lgn = Login(base_url=base_url)
storage = lgn.login_page()
filename = ('Storage')
newData(xh, filename, json.dumps(storage, ensure_ascii=False))
def login_pages_get(xh):
filename = ('Storage')
storage = cacheData(xh, filename)
return storage
def get_kaptcha_net(request):
xh = request.GET.get("xh")
login_pages_set(xh)
storage = login_pages_get(xh)
kaptcha = storage["kaptcha"]
return HttpResponse(json.dumps({'kaptcha':kaptcha}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
def get_kaptcha(xh):
myconfig = Config.objects.all().first()
if myconfig.maintenance:
return HttpResponse(json.dumps({'err':'教务系统出错维护中,请静待教务系统恢复正常!'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
login_pages_set(xh)
storage = login_pages_get(xh)
kaptcha = storage["kaptcha"]
return HttpResponse(json.dumps({'kaptcha':kaptcha}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
def update_cookies(request):
myconfig = Config.objects.all().first()
if myconfig.maintenance:
return HttpResponse(json.dumps({'err':'教务系统出错维护中,请静待教务系统恢复正常!'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
try:
xh = request.POST.get("xh")
pswd = request.POST.get("pswd")
kaptcha = request.POST.get("kaptcha")
stu = Students.objects.get(studentId=int(xh))
refreshTimes = int(stu.refreshTimes)
startTime = time.time()
content = ('【%s】[%s]更新cookies' % (datetime.datetime.now().strftime('%H:%M:%S'), stu.name))
writeLog(content)
# print('原cookies:')
# print('{JSESSIONID:%s,route:%s}' % (stu.JSESSIONID,stu.route))
lgn = Login(base_url=base_url)
if myconfig.isKaptcha:
storage = login_pages_get(xh)
if storage is None:
return get_kaptcha(xh)
lgn.login_kaptcha(storage["cookies"],xh, pswd,storage["tokens"],storage["n"],storage["e"],kaptcha)
else:
lgn.login(xh, pswd)
if lgn.runcode == 1:
cookies = lgn.cookies
# person = GetInfo(base_url=base_url, cookies=cookies)
NJSESSIONID = requests.utils.dict_from_cookiejar(cookies)["JSESSIONID"]
if myconfig.isKaptcha:
nroute = storage["cookies"]["route"]
else:
nroute = requests.utils.dict_from_cookiejar(cookies)["route"]
ncookies = requests.utils.cookiejar_from_dict({"JSESSIONID":NJSESSIONID,"route":nroute})
updateTime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
refreshTimes += 1
Students.objects.filter(studentId=int(xh)).update(JSESSIONID=NJSESSIONID, route=nroute,
refreshTimes=refreshTimes, updateTime=updateTime)
endTime = time.time()
spendTime = endTime - startTime
# print('新cookies:')
content = ('【%s】更新cookies成功,耗时%.2fs' % (datetime.datetime.now().strftime('%H:%M:%S'), spendTime))
writeLog(content)
person = GetInfo(base_url=base_url, cookies=ncookies)
pinfo = person.get_pinfo()
if stu.email == "无":
Students.objects.filter(studentId=int(xh)).update(email=pinfo["email"])
# print(pinfo)
filename = ('Pinfo')
newData(xh, filename, json.dumps(pinfo, ensure_ascii=False))
# print(requests.utils.dict_from_cookiejar(cookies))
if myconfig.isKaptcha:
return HttpResponse(json.dumps({'success':'更新cookies成功'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
else:
return cookies
elif lgn.runcode == 4:
return HttpResponse(json.dumps({'err':'验证码错误'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
else:
content = ('【%s】[%s]更新cookies时网络或其他错误!' % (datetime.datetime.now().strftime('%H:%M:%S'), xh))
writeLog(content)
return HttpResponse(json.dumps({'err':'网络或token问题,请返回重试'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
except Exception as e:
if str(e) == "'NoneType' object has no attribute 'get'":
return HttpResponse(json.dumps({'err':'教务系统挂掉了,请等待修复后重试~'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
# if "Connection broken" in str(e) or 'ECONNRESET' in str(e):
# return update_cookies(xh, pswd)
else:
traceback.print_exc()
return mywarn("更新cookies未知错误",str(e),xh,pswd)
def writeToExcel(json,saveUrl):
lastCourses = json["lastCourses"]
res = json["res"]
excel = openpyxl.Workbook()
sheet1 = excel.create_sheet('sheet1', index=0)
sheet1.cell(row=1,column=1,value="学号").alignment = Alignment(horizontal='center', vertical='center')
sheet1.cell(row=1,column=2,value="姓名").alignment = Alignment(horizontal='center', vertical='center')
sheet1.column_dimensions['A'].width = 15
for c in range(0,len(lastCourses)):
sheet1.cell(row=1, column=c + 3, value=lastCourses[c]).alignment = Alignment(horizontal='center', vertical='center')
# sheet1.column_dimensions[chr(67+c)].width = 8
for items in range(0,len(res)):
sheet1.cell(row=items+2,column=1,value=res[items]["xh"]).alignment = Alignment(horizontal='center', vertical='center')
sheet1.cell(row=items+2,column=2,value=res[items]["name"]).alignment = Alignment(horizontal='center', vertical='center')
for n in range(0,len(res[items]["grades"])):
for cs in range(0,len(lastCourses)):
if res[items]["grades"][n]["n"] == lastCourses[cs]:
try:
sheet1.cell(row=items+2,column=cs+3,value=int(res[items]["grades"][n]["g"])).alignment = Alignment(horizontal='center', vertical='center')
except:
sheet1.cell(row=items+2,column=cs+3,value=res[items]["grades"][n]["g"]).alignment = Alignment(horizontal='center', vertical='center')
sheet1.merge_cells(start_row=len(res)+2, start_column=1, end_row=len(res)+5, end_column=6)
sheet1.cell(row=len(res)+2,column=1,value="1.表中数据来源须该班同学使用“西院助手”小程序访问并刷新该学期成绩\n2.留空为该同学还未刷新到最新,未使用小程序不会显示该同学行\n3.该表成绩为教务系统获取成绩,真实有效").alignment = Alignment(horizontal='center', vertical='center')
sheet1.merge_cells(start_row=len(res)+2, start_column=7, end_row=len(res)+5, end_column=10)
sheet1.cell(row=len(res)+2,column=7,value="生成时间:%s" % time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()))).alignment = Alignment(horizontal='center', vertical='center')
excel.save(saveUrl)
def get_pinfo(request):
myconfig = Config.objects.all().first()
if myconfig.apichange:
data = {
'xh':request.POST.get("xh"),
'pswd':request.POST.get("pswd"),
'kaptcha':request.POST.get("kaptcha")
}
res = requests.post(url=myconfig.otherapi+"/info/pinfo",data=data)
return HttpResponse(json.dumps(json.loads(res.text), ensure_ascii=False),
content_type="application/json,charset=utf-8")
if myconfig.maintenance:
return HttpResponse(json.dumps({'err':'教务系统出错维护中,请静待教务系统恢复正常!'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
# if mpconfig["loginbad"]:
# return HttpResponse(json.dumps({'err':'当前教务系统无法请求登录,请待学校修复!'}, ensure_ascii=False),
# content_type="application/json,charset=utf-8")
if request.method == 'POST':
if request.POST:
xh = request.POST.get("xh")
pswd = request.POST.get("pswd")
kaptcha = request.POST.get("kaptcha")
else:
return HttpResponse(json.dumps({'err':'请提交正确的post数据'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
if Students.objects.filter(studentId=int(xh)):
stu = Students.objects.get(studentId=int(xh))
refreshTimes = int(stu.refreshTimes)
try:
startTime = time.time()
lgn = Login(base_url=base_url)
if myconfig.isKaptcha:
storage = login_pages_get(xh)
if storage is None:
return get_kaptcha(xh)
lgn.login_kaptcha(storage["cookies"],xh, pswd,storage["tokens"],storage["n"],storage["e"],kaptcha)
else:
lgn.login(xh, pswd)
if lgn.runcode == 1:
cookies = lgn.cookies
JSESSIONID = requests.utils.dict_from_cookiejar(cookies)["JSESSIONID"]
if myconfig.isKaptcha:
route = storage["cookies"]["route"]
else:
route = requests.utils.dict_from_cookiejar(cookies)["route"]
ncookies = requests.utils.cookiejar_from_dict({"JSESSIONID":JSESSIONID,"route":route})
person = GetInfo(base_url=base_url, cookies=ncookies)
pinfo = person.get_pinfo()
if pinfo.get("idNumber")[-6:] == pswd:
return HttpResponse(json.dumps({'err':"新生或专升本同学请在教务系统(jwxt.xcc.edu.cn)完善信息并审核且修改密码后登陆小程序!"}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
if pinfo.get('err'):
if pinfo.get('err') == "Connect Timeout":
return mywarn("登录超时","",xh,pswd)
else:
return pinfo
refreshTimes += 1
updateTime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
Students.objects.filter(studentId=int(xh)).update(JSESSIONID=JSESSIONID, route=route,
refreshTimes=refreshTimes, updateTime=updateTime)
endTime = time.time()
spendTime = endTime - startTime
print('【%s】登录了' % pinfo["name"])
content = ('【%s】[%s]第%d次访问登录了,耗时%.2fs' % (
datetime.datetime.now().strftime('%H:%M:%S'), pinfo["name"], refreshTimes, spendTime))
writeLog(content)
filename = ('Pinfo')
newData(xh, filename, json.dumps(pinfo, ensure_ascii=False))
return HttpResponse(json.dumps(pinfo, ensure_ascii=False),
content_type="application/json,charset=utf-8")
elif lgn.runcode == 4:
return HttpResponse(json.dumps({'err':'验证码错误'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
elif lgn.runcode == 2:
content = ('【%s】[%s]在登录时学号或者密码错误!' % (datetime.datetime.now().strftime('%H:%M:%S'), xh))
writeLog(content)
return HttpResponse(json.dumps({'err':'学号或者密码错误'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
else:
content = ('【%s】[%s]在登录时网络或其它错误!' % (datetime.datetime.now().strftime('%H:%M:%S'), xh))
writeLog(content)
return HttpResponse(json.dumps({'err':'网络或token问题,请返回重试'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
except Exception as e:
if "Connection broken" in str(e) or 'ECONNRESET' in str(e):
# return get_pinfo(request)
return HttpResponse(json.dumps({'err':"请重新刷新一下"}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
else:
content = ('【%s】[%s]登录时出错' % (datetime.datetime.now().strftime('%H:%M:%S'), xh))
writeLog(content)
traceback.print_exc()
return mywarn("登录未知错误",str(e),xh,pswd)
else:
try:
startTime = time.time()
lgn = Login(base_url=base_url)
if myconfig.isKaptcha:
storage = login_pages_get(xh)
if storage is None:
return get_kaptcha(xh)
lgn.login_kaptcha(storage["cookies"],xh, pswd,storage["tokens"],storage["n"],storage["e"],kaptcha)
else:
lgn.login(xh, pswd)
if lgn.runcode == 1:
cookies = lgn.cookies
JSESSIONID = requests.utils.dict_from_cookiejar(cookies)["JSESSIONID"]
if myconfig.isKaptcha:
route = storage["cookies"]["route"]
else:
route = requests.utils.dict_from_cookiejar(cookies)["route"]
ncookies = requests.utils.cookiejar_from_dict({"JSESSIONID":JSESSIONID,"route":route})
person = GetInfo(base_url=base_url, cookies=ncookies)
pinfo = person.get_pinfo()
if pinfo.get("idNumber")[-6:] == pswd:
return HttpResponse(json.dumps({'err':"新生或专升本同学请在教务系统(jwxt.xcc.edu.cn)完善信息并审核且修改密码后登陆小程序!"}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
if pinfo.get('err'):
if pinfo.get('err') == "Connect Timeout":
return mywarn("登录超时","",xh,pswd)
else:
return pinfo
updateTime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
newstu = Students.create(int(pinfo["studentId"]), pinfo["name"], calSex(pinfo["idNumber"]), pinfo["collegeName"],
pinfo["majorName"], pinfo["className"], pinfo["phoneNumber"],
pinfo["birthDay"], pinfo["graduationSchool"], pinfo["domicile"],
pinfo["email"], pinfo["national"], pinfo["idNumber"],
JSESSIONID, route, updateTime)
newstu.save()
endTime = time.time()
spendTime = endTime - startTime
print('【%s】第一次登录' % pinfo["name"])
content = ('【%s】[%s]第一次登录,耗时%.2fs' % (
datetime.datetime.now().strftime('%H:%M:%S'), pinfo["name"], spendTime))
writeLog(content)
filename = ('Pinfo')
newData(xh, filename, json.dumps(pinfo, ensure_ascii=False))
return HttpResponse(json.dumps(pinfo, ensure_ascii=False),
content_type="application/json,charset=utf-8")
elif lgn.runcode == 4:
return HttpResponse(json.dumps({'err':'验证码错误'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
elif lgn.runcode == 2:
content = ('【%s】[%s]在第一次登录时学号或者密码错误!' % (datetime.datetime.now().strftime('%H:%M:%S'), xh))
writeLog(content)
return HttpResponse(json.dumps({'err':'学号或者密码错误'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
else:
content = ('【%s】[%s]在第一次登录时网络或其它错误!' % (datetime.datetime.now().strftime('%H:%M:%S'), xh))
writeLog(content)
return HttpResponse(json.dumps({'err':'网络或token问题,请返回重试'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
except Exception as e:
# print(e)
if "Connection broken" in str(e) or 'ECONNRESET' in str(e):
# return get_pinfo(request)
return HttpResponse(json.dumps({'err':"请重新刷新一下"}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
else:
content = ('【%s】[%s]第一次登录时出错' % (datetime.datetime.now().strftime('%H:%M:%S'), xh))
writeLog(content)
if str(e) == "'NoneType' object has no attribute 'get'":
return HttpResponse(json.dumps({'err':'教务系统挂掉了,请等待修复后重试~'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
traceback.print_exc()
return mywarn("登录未知错误",str(e),xh,pswd)
else:
return HttpResponse(json.dumps({'err':'请使用post并提交正确数据'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
def refresh_class(request):
myconfig = Config.objects.all().first()
if myconfig.apichange:
data = {
'xh':request.POST.get("xh"),
'pswd':request.POST.get("pswd")
}
res = requests.post(url=myconfig.otherapi+"/info/refreshclass",data=data)
return HttpResponse(json.dumps(json.loads(res.text), ensure_ascii=False),
content_type="application/json,charset=utf-8")
if myconfig.maintenance:
return HttpResponse(json.dumps({'err':'教务系统出错维护中,请静待教务系统恢复正常!'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
# if mpconfig["loginbad"]:
# return HttpResponse(json.dumps({'err':'当前教务系统无法请求登录,请待学校修复!'}, ensure_ascii=False),
# content_type="application/json,charset=utf-8")
if request.method == 'POST':
if request.POST:
xh = request.POST.get("xh")
pswd = request.POST.get("pswd")
else:
return HttpResponse(json.dumps({'err':'请提交正确的post数据'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
if not Students.objects.filter(studentId=int(xh)):
content = ('【%s】[%s]未登录更新班级信息' % (datetime.datetime.now().strftime('%H:%M:%S'), xh))
writeLog(content)
return HttpResponse(json.dumps({'err':'还未登录,请重新登录!'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
else:
stu = Students.objects.get(studentId=int(xh))
try:
startTime = time.time()
print('【%s】更新了班级信息' % stu.name)
JSESSIONID = str(stu.JSESSIONID)
route = str(stu.route)
cookies_dict = {
'JSESSIONID': JSESSIONID,
'route': route
}
cookies = requests.utils.cookiejar_from_dict(cookies_dict)
person = GetInfo(base_url=base_url, cookies=cookies)
nowClass = person.get_now_class()
try:
if nowClass.get('err'):
if nowClass.get('err') == "Connect Timeout":
return mywarn("更新班级超时","",xh,pswd)
except:
pass
if stu.className == nowClass:
return HttpResponse(json.dumps({'err':"你的班级并未发生变化~"}, ensure_ascii=False), content_type="application/json,charset=utf-8")
Students.objects.filter(studentId=int(xh)).update(className=nowClass)
endTime = time.time()
spendTime = endTime - startTime
content = ('【%s】[%s]更新了班级信息,耗时%.2fs' % (datetime.datetime.now().strftime('%H:%M:%S'), stu.name, spendTime))
writeLog(content)
return HttpResponse(json.dumps({'success':"你已成功变更到【"+ nowClass + "】!",'class':nowClass}, ensure_ascii=False), content_type="application/json,charset=utf-8")
except Exception as e:
content = ('【%s】[%s]更新班级信息出错' % (datetime.datetime.now().strftime('%H:%M:%S'), stu.name))
writeLog(content)
if str(e) == "'NoneType' object has no attribute 'get'":
return HttpResponse(json.dumps({'err':'教务系统挂掉了,请等待修复后重试~'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
if "Connection broken" in str(e) or 'ECONNRESET' in str(e):
return refresh_class(request)
if 'Expecting value' not in str(e):
traceback.print_exc()
return mywarn("更新班级错误",str(e),xh,pswd)
if myconfig.isKaptcha:
return get_kaptcha(xh)
else:
sta = update_cookies(request)
person = GetInfo(base_url=base_url, cookies=sta)
nowClass = person.get_now_class()
if stu.className == nowClass:
return HttpResponse(json.dumps({'err':"你的班级并未发生变化~"}, ensure_ascii=False), content_type="application/json,charset=utf-8")
Students.objects.filter(studentId=int(xh)).update(className=nowClass)
return HttpResponse(json.dumps({'success':"你已成功变更到【"+ nowClass + "】!",'class':nowClass}, ensure_ascii=False), content_type="application/json,charset=utf-8")
else:
return HttpResponse(json.dumps({'err':'请使用post并提交正确数据'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
def get_message(request):
myconfig = Config.objects.all().first()
if myconfig.apichange:
data = {
'xh':request.POST.get("xh"),
'pswd':request.POST.get("pswd")
}
res = requests.post(url=myconfig.otherapi+"/info/message",data=data)
return HttpResponse(json.dumps(json.loads(res.text), ensure_ascii=False),
content_type="application/json,charset=utf-8")
if myconfig.maintenance:
return HttpResponse(json.dumps({'err':'教务系统出错维护中,请静待教务系统恢复正常!'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
# if mpconfig["jwxtbad"]:
# return HttpResponse(json.dumps({'err':'当前教务系统无法访问(可能是学校机房断电或断网所致),小程序暂时无法登录和更新,请待学校修复!'}, ensure_ascii=False),
# content_type="application/json,charset=utf-8")
if request.method == 'POST':
if request.POST:
xh = request.POST.get("xh")
pswd = request.POST.get("pswd")
else:
return HttpResponse(json.dumps({'err':'请提交正确的post数据'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
if not Students.objects.filter(studentId=int(xh)):
content = ('【%s】[%s]未登录访问消息' % (datetime.datetime.now().strftime('%H:%M:%S'), xh))
writeLog(content)
return HttpResponse(json.dumps({'err':'还未登录,请重新登录!'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
else:
stu = Students.objects.get(studentId=int(xh))
try:
startTime = time.time()
# print('【%s】查看了消息' % stu.name)
JSESSIONID = str(stu.JSESSIONID)
route = str(stu.route)
cookies_dict = {
'JSESSIONID': JSESSIONID,
'route': route
}
cookies = requests.utils.cookiejar_from_dict(cookies_dict)
person = GetInfo(base_url=base_url, cookies=cookies)
message = person.get_message()
endTime = time.time()
spendTime = endTime - startTime
# content = ('【%s】[%s]访问了消息,耗时%.2fs' % (datetime.datetime.now().strftime('%H:%M:%S'), stu.name, spendTime))
# writeLog(content)
return HttpResponse(json.dumps(message, ensure_ascii=False), content_type="application/json,charset=utf-8")
except Exception as e:
if "Connection broken" in str(e) or 'ECONNRESET' in str(e):
# return get_message(request)
return HttpResponse(json.dumps({'err':"请重新刷新一下"}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
else:
content = ('【%s】[%s]访问消息出错' % (datetime.datetime.now().strftime('%H:%M:%S'), stu.name))
writeLog(content)
if str(e) == 'Expecting value: line 1 column 1 (char 0)':
return HttpResponse(json.dumps({'err':'教务系统挂掉了,请等待修复后重试~'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
if str(e) != 'Expecting value: line 6 column 1 (char 11)':
traceback.print_exc()
return mywarn("消息请求错误",str(e),xh,pswd)
if myconfig.isKaptcha:
return get_kaptcha(xh)
else:
sta = update_cookies(request)
person = GetInfo(base_url=base_url, cookies=sta)
message = person.get_message()
return HttpResponse(json.dumps(message, ensure_ascii=False), content_type="application/json,charset=utf-8")
else:
return HttpResponse(json.dumps({'err':'请使用post并提交正确数据'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
def get_study(request):
myconfig = Config.objects.all().first()
if myconfig.apichange:
data = {
'xh':request.POST.get("xh"),
'pswd':request.POST.get("pswd"),
'refresh':request.POST.get("refresh")
}
res = requests.post(url=myconfig.otherapi+"/info/study",data=data)
return HttpResponse(json.dumps(json.loads(res.text), ensure_ascii=False),
content_type="application/json,charset=utf-8")
if myconfig.maintenance:
return HttpResponse(json.dumps({'err':'教务系统出错维护中,请静待教务系统恢复正常!'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
# if mpconfig["studybad"]:
# return HttpResponse(json.dumps({'err':'当前教务系统无法请求学业,请待学校修复!'}, ensure_ascii=False),
# content_type="application/json,charset=utf-8")
if request.method == 'POST':
if request.POST:
xh = request.POST.get("xh")
pswd = request.POST.get("pswd")
refresh = request.POST.get("refresh")
else:
return HttpResponse(json.dumps({'err':'请提交正确的post数据'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
if not Students.objects.filter(studentId=int(xh)):
content = ('【%s】[%s]未登录访问学业情况' % (datetime.datetime.now().strftime('%H:%M:%S'), xh))
writeLog(content)
return HttpResponse(json.dumps({'err':'还未登录,请重新登录!'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
else:
stu = Students.objects.get(studentId=int(xh))
if refresh == "no":
filename = ('Study')
cache = cacheData(xh, filename)
if cache is not None:
# print('cache')
print('【%s】查看了学业缓存' % stu.name)
return HttpResponse(json.dumps(cache, ensure_ascii=False),
content_type="application/json,charset=utf-8")
else:
pass
try:
startTime = time.time()
print('【%s】查看了学业情况' % stu.name)
JSESSIONID = str(stu.JSESSIONID)
route = str(stu.route)
cookies_dict = {
'JSESSIONID': JSESSIONID,
'route': route
}
cookies = requests.utils.cookiejar_from_dict(cookies_dict)
person = GetInfo(base_url=base_url, cookies=cookies)
study = person.get_study(xh)
if study.get("err") == 'Connect Timeout':
if myconfig.isKaptcha:
return get_kaptcha(xh)
else:
sta = update_cookies(request)
person = GetInfo(base_url=base_url, cookies=sta)
study = person.get_study(xh)
gpa = str(study["gpa"]) if str(study["gpa"]) !="" or str(study["gpa"]) is not None else "init"
Students.objects.filter(studentId=int(xh)).update(gpa=gpa)
filename = ('Study')
newData(xh, filename, json.dumps(study, ensure_ascii=False))
return HttpResponse(json.dumps(study, ensure_ascii=False),
content_type="application/json,charset=utf-8")
endTime = time.time()
spendTime = endTime - startTime
content = ('【%s】[%s]访问了学业情况,耗时%.2fs' % (datetime.datetime.now().strftime('%H:%M:%S'), stu.name, spendTime))
writeLog(content)
gpa = str(study["gpa"]) if str(study["gpa"]) !="" or str(study["gpa"]) is not None else "init"
Students.objects.filter(studentId=int(xh)).update(gpa=gpa)
filename = ('Study')
newData(xh, filename, json.dumps(study, ensure_ascii=False))
return HttpResponse(json.dumps(study, ensure_ascii=False), content_type="application/json,charset=utf-8")
except Exception as e:
if "Connection broken" in str(e) or 'ECONNRESET' in str(e):
# return get_study(request)
return HttpResponse(json.dumps({'err':'更新出现问题,请待教务系统修复'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
elif "list index out of range" in str(e) and int(xh[0:2]) >= int(myconfig.nGrade[2:4]):
return HttpResponse(json.dumps({'err':'暂无学业信息或请先刷新“我的成绩”后访问'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
else:
content = ('【%s】[%s]访问学业情况出错' % (datetime.datetime.now().strftime('%H:%M:%S'), stu.name))
writeLog(content)
if str(e) != 'list index out of range':
traceback.print_exc()
return mywarn("学业请求错误",str(e),xh,pswd)
if myconfig.isKaptcha:
return get_kaptcha(xh)
else:
sta = update_cookies(request)
person = GetInfo(base_url=base_url, cookies=sta)
study = person.get_study(xh)
gpa = str(study["gpa"]) if str(study["gpa"]) !="" or str(study["gpa"]) is not None else "init"
Students.objects.filter(studentId=int(xh)).update(gpa=gpa)
filename = ('Study')
newData(xh, filename, json.dumps(study, ensure_ascii=False))
return HttpResponse(json.dumps(study, ensure_ascii=False), content_type="application/json,charset=utf-8")
else:
return HttpResponse(json.dumps({'err':'请使用post并提交正确数据'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
def get_grade(request):
myconfig = Config.objects.all().first()
if myconfig.apichange:
data = {
'xh':request.POST.get("xh"),
'pswd':request.POST.get("pswd"),
'year':request.POST.get("year"),
'term':request.POST.get("term"),
'refresh':request.POST.get("refresh")
}
res = requests.post(url=myconfig.otherapi,data=data)
return HttpResponse(json.dumps(json.loads(res.text), ensure_ascii=False),
content_type="application/json,charset=utf-8")
if myconfig.maintenance:
return HttpResponse(json.dumps({'err':'教务系统出错维护中,请静待教务系统恢复正常!'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
# if mpconfig["gradebad"]:
# return HttpResponse(json.dumps({'err':'当前教务系统无法请求成绩,请待学校修复!'}, ensure_ascii=False),
# content_type="application/json,charset=utf-8")
if request.method == 'POST':
if request.POST:
xh = request.POST.get("xh")
pswd = request.POST.get("pswd")
year = request.POST.get("year")
term = request.POST.get("term")
refresh = request.POST.get("refresh")
else:
return HttpResponse(json.dumps({'err':'请提交正确的post数据'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
if not Students.objects.filter(studentId=int(xh)):
content = ('【%s】[%s]未登录访问成绩' % (datetime.datetime.now().strftime('%H:%M:%S'), xh))
writeLog(content)
return HttpResponse(json.dumps({'err':'还未登录,请重新登录!'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
else:
stu = Students.objects.get(studentId=int(xh))
if refresh == "no":
filename = ('Grades-%s%s' % (str(year), str(term)))
cache = cacheData(xh, filename)
if cache is not None:
# print('cache')
def isLast(ny,nt,y,t):
ny = (myconfig.nGrade)[0:4]
nt = (myconfig.nGrade)[4:5]
if str(year) == ny:
pass
else:
if int(nt)-1 == 0 and int(term)==2:
pass
else:
print('【%s】查看了%s-%s的成绩缓存' % (stu.name, year, term))
return HttpResponse(json.dumps(cache, ensure_ascii=False),
content_type="application/json,charset=utf-8")
else:
pass
try:
startTime = time.time()
print('【%s】查看了%s-%s的成绩' % (stu.name, year, term))
JSESSIONID = str(stu.JSESSIONID)
route = str(stu.route)
cookies_dict = {
'JSESSIONID': JSESSIONID,
'route': route
}
cookies = requests.utils.cookiejar_from_dict(cookies_dict)
person = GetInfo(base_url=base_url, cookies=cookies)
grade = person.get_grade(year, term)
if grade.get("err"):
if grade.get("err") == "Connect Timeout":
# update_cookies(xh, pswd)
# return mywarn("成绩超时","",xh,pswd)
return get_kaptcha(xh)
elif grade.get("err") == "No Data":
if int(xh[0:2]) > int(myconfig.nGrade[2:4]):
return HttpResponse(json.dumps({'err':"当前你还没有任何成绩信息"}, ensure_ascii=False), content_type="application/json,charset=utf-8")
else:
return HttpResponse(json.dumps({'err':"还没有" + year+"-"+term + "学期的成绩,点击顶栏也看看以前的吧~"}, ensure_ascii=False), content_type="application/json,charset=utf-8")
elif grade.get("err") == "Error Term":
return HttpResponse(json.dumps({'err':"网络问题,请重新访问请求课程"}, ensure_ascii=False), content_type="application/json,charset=utf-8")
Students.objects.filter(studentId=int(xh)).update(gpa = grade.get("gpa") if grade.get("gpa")!="" or grade.get("gpa") is not None else "init")
endTime = time.time()
spendTime = endTime - startTime
content = ('【%s】[%s]访问了%s-%s的成绩,耗时%.2fs' % (
datetime.datetime.now().strftime('%H:%M:%S'), stu.name, year, term, spendTime))
writeLog(content)
filename = ('Grades-%s%s' % (str(year), str(term)))
newData(xh, filename, json.dumps(grade, ensure_ascii=False))
# print('write')
return HttpResponse(json.dumps(grade, ensure_ascii=False), content_type="application/json,charset=utf-8")
except Exception as e:
# print(e)
if "Connection broken" in str(e) or 'ECONNRESET' in str(e):
# return get_grade(request)
return HttpResponse(json.dumps({'err':"请重新刷新一下"}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
else:
content = ('【%s】[%s]访问成绩出错' % (datetime.datetime.now().strftime('%H:%M:%S'), stu.name))
writeLog(content)
if str(e) == 'Expecting value: line 1 column 1 (char 0)':
return HttpResponse(json.dumps({'err':'教务系统挂掉了,请等待修复后重试~'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
if str(e) != 'Expecting value: line 3 column 1 (char 4)':
traceback.print_exc()
return mywarn("成绩请求错误",str(e),xh,pswd)
if myconfig.isKaptcha:
return get_kaptcha(xh)
else:
sta = update_cookies(request)
person = GetInfo(base_url=base_url, cookies=sta)
grade = person.get_grade(year, term)
if grade.get("gpa") == "" or grade.get("gpa") is None:
return HttpResponse(json.dumps({'err':'平均学分绩点获取失败,请重试~'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
Students.objects.filter(studentId=int(xh)).update(gpa = grade.get("gpa"))
filename = ('Grades-%s%s' % (str(year), str(term)))
newData(xh, filename, json.dumps(grade, ensure_ascii=False))
return HttpResponse(json.dumps(grade, ensure_ascii=False), content_type="application/json,charset=utf-8")
else:
return HttpResponse(json.dumps({'err':'请使用post并提交正确数据'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
# def get_grade2(request):
# myconfig = Config.objects.all().first()
# if myconfig.apichange:
# data = {
# 'xh':request.POST.get("xh"),
# 'pswd':request.POST.get("pswd"),
# 'year':request.POST.get("year"),
# 'term':request.POST.get("term"),
# 'refresh':request.POST.get("refresh")
# }
# res = requests.post(url=myconfig.otherapi+"/info/grade",data=data)
# return HttpResponse(json.dumps(json.loads(res.text), ensure_ascii=False),
# content_type="application/json,charset=utf-8")
# if myconfig.maintenance:
# return HttpResponse(json.dumps({'err':'教务系统出错维护中,请静待教务系统恢复正常!'}, ensure_ascii=False),
# content_type="application/json,charset=utf-8")
# # if mpconfig["gradebad"]:
# # return HttpResponse(json.dumps({'err':'当前教务系统无法请求成绩,请待学校修复!'}, ensure_ascii=False),
# # content_type="application/json,charset=utf-8")
# if request.method == 'POST':
# if request.POST:
# xh = request.POST.get("xh")
# pswd = request.POST.get("pswd")
# year = request.POST.get("year")
# term = request.POST.get("term")
# refresh = request.POST.get("refresh")
# else:
# return HttpResponse(json.dumps({'err':'请提交正确的post数据'}, ensure_ascii=False),
# content_type="application/json,charset=utf-8")
# if not Students.objects.filter(studentId=int(xh)):
# content = ('【%s】[%s]未登录访问成绩' % (datetime.datetime.now().strftime('%H:%M:%S'), xh))
# writeLog(content)
# return HttpResponse(json.dumps({'err':'还未登录,请重新登录!'}, ensure_ascii=False),
# content_type="application/json,charset=utf-8")
# else:
# stu = Students.objects.get(studentId=int(xh))
# if refresh == "no":
# filename = ('GradesN-%s%s' % (str(year), str(term)))
# cache = cacheData(xh, filename)
# if cache is not None:
# # print('cache')
# print('【%s】查看了%s-%s的成绩缓存' % (stu.name, year, term))
# return HttpResponse(json.dumps(cache, ensure_ascii=False),
# content_type="application/json,charset=utf-8")
# else:
# pass
# try:
# startTime = time.time()
# print('【%s】查看了%s-%s的成绩' % (stu.name, year, term))
# JSESSIONID = str(stu.JSESSIONID)
# route = str(stu.route)
# cookies_dict = {
# 'JSESSIONID': JSESSIONID,
# 'route': route
# }
# cookies = requests.utils.cookiejar_from_dict(cookies_dict)
# person = GetInfo(base_url=base_url, cookies=cookies)
# grade = person.get_grade2(year, term)
# if grade.get("err") == "请求超时,鉴于教务系统特色,已帮你尝试重新登录,重试几次,还不行请麻烦你自行重新登录,或者在关于里面反馈!当然,也可能是教务系统挂了~":
# update_cookies(xh, pswd)
# return HttpResponse(json.dumps({'err':grade.get("err")}, ensure_ascii=False), content_type="application/json,charset=utf-8")
# if grade.get("err") == "看起来你这学期好像还没有出成绩,点击顶栏也看看以前的吧~":
# return HttpResponse(json.dumps({'err':grade.get("err")}, ensure_ascii=False), content_type="application/json,charset=utf-8")
# Students.objects.filter(studentId=int(xh)).update(gpa = grade.get("gpa"))
# endTime = time.time()
# spendTime = endTime - startTime
# content = ('【%s】[%s]访问了%s-%s的成绩,耗时%.2fs' % (
# datetime.datetime.now().strftime('%H:%M:%S'), stu.name, year, term, spendTime))
# writeLog(content)
# filename = ('GradesN-%s%s' % (str(year), str(term)))
# newData(xh, filename, json.dumps(grade, ensure_ascii=False))
# # print('write')
# return HttpResponse(json.dumps(grade, ensure_ascii=False), content_type="application/json,charset=utf-8")
# except Exception as e:
# # print(e)
# if "Connection broken" in str(e) or 'ECONNRESET' in str(e):
# # return get_grade2(request)
# return HttpResponse(json.dumps({'err':'更新出现问题,请待教务系统修复'}, ensure_ascii=False),
# content_type="application/json,charset=utf-8")
# else:
# content = ('【%s】[%s]访问成绩出错' % (datetime.datetime.now().strftime('%H:%M:%S'), stu.name))
# writeLog(content)
# if str(e) == 'Expecting value: line 1 column 1 (char 0)':
# return HttpResponse(json.dumps({'err':'教务系统挂掉了,请等待修复后重试~'}, ensure_ascii=False),
# content_type="application/json,charset=utf-8")
# if str(e) != 'Expecting value: line 3 column 1 (char 4)':
# traceback.print_exc()
# return mywarn("成绩请求错误",str(e),xh,pswd)
# sta = update_cookies(xh, pswd)
# person = GetInfo(base_url=base_url, cookies=sta)
# grade = person.get_grade2(year, term)
# if grade.get("gpa") == "" or grade.get("gpa") is None:
# return HttpResponse(json.dumps({'err':'平均学分绩点获取失败,请重试~'}, ensure_ascii=False),
# content_type="application/json,charset=utf-8")
# Students.objects.filter(studentId=int(xh)).update(gpa = grade.get("gpa"))
# filename = ('GradesN-%s%s' % (str(year), str(term)))
# newData(xh, filename, json.dumps(grade, ensure_ascii=False))
# return HttpResponse(json.dumps(grade, ensure_ascii=False), content_type="application/json,charset=utf-8")
# else:
# return HttpResponse(json.dumps({'err':'请使用post并提交正确数据'}, ensure_ascii=False),
# content_type="application/json,charset=utf-8")
def get_schedule(request):
myconfig = Config.objects.all().first()
if myconfig.apichange:
data = {
'xh':request.POST.get("xh"),
'pswd':request.POST.get("pswd"),
'year':request.POST.get("year"),
'term':request.POST.get("term"),
'refresh':request.POST.get("refresh")
}
res = requests.post(url=myconfig.otherapi+"/info/schedule",data=data)
return HttpResponse(json.dumps(json.loads(res.text), ensure_ascii=False),
content_type="application/json,charset=utf-8")
if myconfig.maintenance:
return HttpResponse(json.dumps({'err':'教务系统出错维护中,请静待教务系统恢复正常!'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
# if mpconfig["schedulebad"]:
# return HttpResponse(json.dumps({'err':'当前教务系统无法请求课表,请待学校修复!'}, ensure_ascii=False),
# content_type="application/json,charset=utf-8")
if request.method == 'POST':
if request.POST:
xh = request.POST.get("xh")
pswd = request.POST.get("pswd")
year = request.POST.get("year")
term = request.POST.get("term")
refresh = request.POST.get("refresh")
else:
return HttpResponse(json.dumps({'err':'请提交正确的post数据'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
if not Students.objects.filter(studentId=int(xh)):
content = ('【%s】[%s]未登录访问课程' % (datetime.datetime.now().strftime('%H:%M:%S'), xh))
writeLog(content)
return HttpResponse(json.dumps({'err':'还未登录,请重新登录!'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
else:
stu = Students.objects.get(studentId=int(xh))
if refresh == "no":
filename = ('Schedules-%s%s' % (str(year), str(term)))
cache = cacheData(xh, filename)
if cache is not None:
# print('cache')
print('【%s】查看了%s-%s的课表缓存' % (stu.name, year, term))
return HttpResponse(json.dumps(cache, ensure_ascii=False),
content_type="application/json,charset=utf-8")
else:
pass
try:
startTime = time.time()
print('【%s】查看了%s-%s的课程' % (stu.name, year, term))
JSESSIONID = str(stu.JSESSIONID)
route = str(stu.route)
cookies_dict = {
'JSESSIONID': JSESSIONID,
'route': route
}
cookies = requests.utils.cookiejar_from_dict(cookies_dict)
person = GetInfo(base_url=base_url, cookies=cookies)
schedule = person.get_schedule(year, term)
if schedule.get('err'):
if schedule.get('err') == "Connect Timeout":
return mywarn("更新课程超时","",xh,pswd)
elif schedule.get('err') == "Error Term":
return HttpResponse(json.dumps({'err':"网络问题,请重新访问请求课程"}, ensure_ascii=False), content_type="application/json,charset=utf-8")
endTime = time.time()
spendTime = endTime - startTime
content = ('【%s】[%s]访问了%s-%s的课程,耗时%.2fs' % (
datetime.datetime.now().strftime('%H:%M:%S'), stu.name, year, term, spendTime))
writeLog(content)
filename = ('Schedules-%s%s' % (str(year), str(term)))
newData(xh, filename, json.dumps(schedule, ensure_ascii=False))
# print('write')
return HttpResponse(json.dumps(schedule, ensure_ascii=False), content_type="application/json,charset=utf-8")
except Exception as e:
if "Connection broken" in str(e) or 'ECONNRESET' in str(e):
# return get_schedule(request)
return HttpResponse(json.dumps({'err':"请重新刷新一下"}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
else:
content = ('【%s】[%s]访问课程出错' % (datetime.datetime.now().strftime('%H:%M:%S'), stu.name))
writeLog(content)
if str(e) == 'Expecting value: line 1 column 1 (char 0)':
return HttpResponse(json.dumps({'err':'教务系统挂掉了,请等待修复后重试~'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
if str(e) != 'Expecting value: line 3 column 1 (char 4)':
traceback.print_exc()
return mywarn("课程请求错误",str(e),xh,pswd)
if myconfig.isKaptcha:
return get_kaptcha(xh)
else:
sta = update_cookies(request)
person = GetInfo(base_url=base_url, cookies=sta)
schedule = person.get_schedule(year, term)
filename = ('Schedules-%s%s' % (str(year), str(term)))
newData(xh, filename, json.dumps(schedule, ensure_ascii=False))
return HttpResponse(json.dumps(schedule, ensure_ascii=False), content_type="application/json,charset=utf-8")
else:
return HttpResponse(json.dumps({'err':'请使用post并提交正确数据'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
def joinDetail(request):
myconfig = Config.objects.all().first()
if myconfig.apichange:
res = requests.get(url=myconfig.otherapi+"/info/joindetail?type=" + request.GET.get("type"))
return HttpResponse(json.dumps(json.loads(res.text), ensure_ascii=False),
content_type="application/json,charset=utf-8")
type = request.GET.get("type")
allUsers = Students.objects.filter().all().count()
if type == 'college':
detail = [{
'collegeName': i["collegeName"],
'collegeNum': Students.objects.filter(collegeName=i["collegeName"]).count()
} for i in Students.objects.values('collegeName').distinct().order_by('collegeName')]
ndetail = sorted(detail,key=lambda keys:keys['collegeNum'], reverse=True)
res = {
'allUsers': allUsers,
'collegeNum': int(Students.objects.values('collegeName').distinct().order_by('collegeName').count()),
'detail': ndetail
}
elif type == 'major':
detail = [{
'majorName': i["majorName"],
'majorNum': Students.objects.filter(majorName=i["majorName"]).count()
} for i in Students.objects.values('majorName').distinct().order_by('majorName')]
ndetail = sorted(detail,key=lambda keys:keys['majorNum'], reverse=True)
res = {
'allUsers': allUsers,
'majorNum': int(Students.objects.values('majorName').distinct().order_by('majorName').count()),
'detail': ndetail
}
elif type == 'class':
detail = [{
'className': i["className"],
'classNum': Students.objects.filter(className=i["className"]).count()
} for i in Students.objects.values('className').distinct().order_by('className')]
ndetail = sorted(detail,key=lambda keys:keys['classNum'], reverse=True)
res = {
'allUsers': allUsers,
'classNum': int(Students.objects.values('className').distinct().order_by('className').count()),
'detail': ndetail
}
return HttpResponse(json.dumps(res, ensure_ascii=False),
content_type="application/json,charset=utf-8")
def get_position(request):
myconfig = Config.objects.all().first()
if myconfig.apichange:
res = requests.get(url=myconfig.otherapi+"/info/position?xh=" + request.GET.get("xh"))
return HttpResponse(json.dumps(json.loads(res.text), ensure_ascii=False),
content_type="application/json,charset=utf-8")
#print(request)
xh = request.GET.get("xh")
if xh is None:
return HttpResponse(json.dumps({'err':'参数不全'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
if not Students.objects.filter(studentId=int(xh)):
return HttpResponse(json.dumps({'err':'还未登录,请重新登录!'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
else:
stu = Students.objects.get(studentId=int(xh))
majorName = stu.majorName
className = stu.className
majorNum = Students.objects.filter(majorName=majorName,studentId__startswith=int(xh[0:2])).all().count()
classNum = Students.objects.filter(className=className).all().count()
if stu.gpa == "init":
gpa = "init"
return HttpResponse(json.dumps({'gpa': gpa,'majorCount':0,'classCount':0,'majorNum':majorNum,'classNum':classNum,'nMajorCount':"init",'nClassCount':"init"}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
else:
gpa = float(stu.gpa)
majorCount = 1
classCount = 1
nMajorCount = 0
nClassCount = 0
for m in Students.objects.filter(majorName=majorName).all().order_by('-gpa'):
if m.gpa == "init" and str(m.studentId)[0:2] == xh[0:2]:
nMajorCount += 1
elif m.gpa == "init" or str(m.studentId)[0:2] != xh[0:2]:
pass
elif gpa >= float(m.gpa):
break
else:
majorCount += 1
for c in Students.objects.filter(className=className).all().order_by('-gpa'):
if c.gpa == "init":
nClassCount += 1
elif gpa >= float(c.gpa):
break
else:
classCount += 1
return HttpResponse(json.dumps({'gpa': str(gpa),'majorCount':majorCount,'nMajorCount':nMajorCount,'nClassCount':nClassCount,'classCount':classCount,'majorNum':majorNum,'classNum':classNum}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
def searchTeacher(request):
myconfig = Config.objects.all().first()
if request.method == "GET":
xh = request.GET.get("xh")
tname = request.GET.get("tname")
if myconfig.apichange:
res = requests.get(url=myconfig.otherapi+"/info/steacher?xh=" + request.GET.get("xh") + "&tname=" + request.GET.get("tname"))
return HttpResponse(json.dumps(json.loads(res.text), ensure_ascii=False),
content_type="application/json,charset=utf-8")
elif request.method == "POST":
xh = request.POST.get("xh")
tname = request.POST.get("tname")
if myconfig.apichange:
data = {
'xh':request.POST.get("xh"),
'tname':request.POST.get("tname")
}
res = requests.post(url=myconfig.otherapi+"/info/steacher",data=data)
return HttpResponse(json.dumps(json.loads(res.text), ensure_ascii=False),
content_type="application/json,charset=utf-8")
if xh is None or tname is None:
return HttpResponse(json.dumps({'err': '参数不全'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
else:
if not Students.objects.filter(studentId=int(xh)):
return HttpResponse(json.dumps({'err':'还未登录,请重新登录!'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
else:
date = datetime.datetime.now().strftime('%Y-%m-%d')
stu = Students.objects.filter(studentId=int(xh))
thisStu = Students.objects.get(studentId=int(xh))
lastTime = thisStu.searchTimes.split(',')[0]
remainTimes = thisStu.searchTimes.split(',')[1]
if lastTime == date:
if remainTimes != '0':
searchList = []
for s in Teachers.objects.filter(name__contains=tname).order_by('name'):
item = {
'name': s.name,
'collegeName': s.collegeName,
'title': s.title,
'phoneNumber': s.phoneNumber
}
searchList.append(item)
content = ('【%s】%s学号查询[%s]' % (datetime.datetime.now().strftime('%H:%M:%S'), xh, tname))
writeLog(content)
if len(searchList) != 0:
nremainTimes = int(remainTimes) - 1
stu.update(searchTimes=lastTime+','+str(nremainTimes))
else:
nremainTimes = int(remainTimes)
return HttpResponse(json.dumps({'count': len(searchList),'result':searchList,'times':nremainTimes}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
else:
return HttpResponse(json.dumps({'err': '同学,你今天的查询次数已满哦~'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
else:
if thisStu.classMonitor == 1:
nlastTime = date
nremainTimes = '4'
ncontent = nlastTime + ',' + nremainTimes
stu.update(searchTimes=ncontent)
searchList = []
for s in Teachers.objects.filter(name__contains=tname).order_by('name'):
item = {
'name': s.name,
'collegeName': s.collegeName,
'title': s.title,
'phoneNumber': s.phoneNumber
}
searchList.append(item)
content = ('【%s】%s学号查询[%s]' % (datetime.datetime.now().strftime('%H:%M:%S'), xh, tname))
writeLog(content)
return HttpResponse(json.dumps({'count': len(searchList),'result':searchList,'times':int(nremainTimes)}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
else:
nlastTime = date
nremainTimes = '2'
ncontent = nlastTime + ',' + nremainTimes
stu.update(searchTimes=ncontent)
searchList = []
for s in Teachers.objects.filter(name__contains=tname).order_by('name'):
item = {
'name': s.name,
'collegeName': s.collegeName,
'title': s.title,
'phoneNumber': s.phoneNumber
}
searchList.append(item)
content = ('【%s】%s学号查询[%s]' % (datetime.datetime.now().strftime('%H:%M:%S'), xh, tname))
writeLog(content)
return HttpResponse(json.dumps({'count': len(searchList),'result':searchList,'times':int(nremainTimes)}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
def searchExcept(request):
myconfig = Config.objects.all().first()
if myconfig.apichange:
data = {
'xh':request.POST.get("xh"),
'tname':request.POST.get("tname"),
'collegeName':request.POST.get("collegeName"),
'content':request.POST.get("content")
}
res = requests.post(url=myconfig.otherapi+"/info/scallback",data=data)
return HttpResponse(json.dumps(json.loads(res.text), ensure_ascii=False),
content_type="application/json,charset=utf-8")
xh = request.POST.get("xh")
tname = request.POST.get("tname")
collegeName = request.POST.get("college")
content = request.POST.get("content")
ServerChan = config["ServerChan"]
text = "黄页反馈"
if ServerChan == "none":
return HttpResponse(json.dumps({'err':'反馈失败,管理员未打开反馈接口'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
else:
requests.get(ServerChan + 'text=' + text + '&desp=' + str(xh) + '\n' + str(tname) + str(collegeName) + '\n' + str(content))
return HttpResponse(json.dumps({'msg':'反馈成功'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
def classGrades(request):
myconfig = Config.objects.all().first()
if myconfig.apichange:
res = requests.get(url=myconfig.otherapi+"/info/classgrades?className=" + request.GET.get("className") + "&yt=" + request.GET.get("yt"))
return HttpResponse(json.dumps(json.loads(res.text), ensure_ascii=False),
content_type="application/json,charset=utf-8")
className = request.GET.get("className")
yt = request.GET.get("yt")
year = yt[0:4]
term = yt[4:5]
studentIdList = []
for i in Students.objects.filter(className=className).order_by("studentId"):
studentIdList.append(i.studentId)
res = []
lastCourses = []
try:
lastStu = Students.objects.filter(className=className).order_by("-updateTime")[0].studentId
with open('data/' + str(lastStu)[0:2] + '/' + str(lastStu) + '/Grades-' + yt + '.json') as l:
lastReq = json.loads(l.read())
for course in lastReq.get("course"):
if course.get("courseNature") != "通识教育任选" and course.get("courseNature") != "无" and course.get("gradeNature") == "正常考试":
lastCourses.append(course.get("courseTitle"))
except:
lastStu = Students.objects.filter(className=className).order_by("-updateTime")[1].studentId
with open('data/' + str(lastStu)[0:2] + '/' + str(lastStu) + '/Grades-' + yt + '.json') as l:
lastReq = json.loads(l.read())
for course in lastReq.get("course"):
if course.get("courseNature") != "通识教育任选" and course.get("courseNature") != "无" and course.get("gradeNature") == "正常考试":
lastCourses.append(course.get("courseTitle"))
for stu in studentIdList:
nowUrl = 'data/' + str(stu)[0:2] + '/' + str(stu) + '/Grades-' + yt + '.json'
try:
with open(nowUrl,mode='r',encoding='UTF-8') as f:
stuReq = json.loads(f.read())
stuRes = {
'name':stuReq.get("name"),
'xh':stuReq.get("studentId"),
'grades':[{
'n':item.get("courseTitle"),
'g':item.get("grade")
}for item in stuReq["course"] if item.get("courseNature") != "通识教育任选" and item.get("courseNature") != "无" and item.get("gradeNature") == "正常考试"]
}
res.append(stuRes)
except:
res.append({'name':Students.objects.get(studentId=int(str(stu))).name,'xh':str(stu),'grades':[]})
result = {'lastCourses':lastCourses,'res':res}
writeToExcel(result,'data/classes/'+className+'.xlsx')
try:
file = open('data/classes/'+className+'.xlsx', 'rb')
except:
return HttpResponse(json.dumps({'error': "文件不存在"}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
response = FileResponse(file)
response['Content-Type'] = 'application/octet-stream'
response["Content-Disposition"] = "attachment; filename*=UTF-8''{}".format(escape_uri_path(className)+'.xlsx')
return response
def book_search(request):
type = request.GET.get("type")
content = request.GET.get("content")
page = request.GET.get("page")
result = Search()
res = result.search_book(type,content,page)
return HttpResponse(json.dumps(res, ensure_ascii=False),
content_type="application/json,charset=utf-8")
def book_detail(request):
marc = request.GET.get("marc")
result = Search()
res = result.book_detail(marc)
return HttpResponse(json.dumps(res, ensure_ascii=False),
content_type="application/json,charset=utf-8")
def library_info(request):
xh = request.POST.get("xh")
ppswd = request.POST.get("ppswd")
lgn = PLogin()
cookies = lgn.login(xh,ppswd)
person = Personal(cookies)
res = person.get_info()
return HttpResponse(json.dumps(res, ensure_ascii=False),
content_type="application/json,charset=utf-8")
def library_list(request):
xh = request.POST.get("xh")
ppswd = request.POST.get("ppswd")
lgn = PLogin()
cookies = lgn.login(xh,ppswd)
person = Personal(cookies)
res = person.book_list()
return HttpResponse(json.dumps(res, ensure_ascii=False),
content_type="application/json,charset=utf-8")
def library_hist(request):
xh = request.POST.get("xh")
ppswd = request.POST.get("ppswd")
lgn = PLogin()
cookies = lgn.login(xh,ppswd)
person = Personal(cookies)
res = person.book_hist()
return HttpResponse(json.dumps(res, ensure_ascii=False),
content_type="application/json,charset=utf-8")
def library_paylist(request):
xh = request.POST.get("xh")
ppswd = request.POST.get("ppswd")
lgn = PLogin()
cookies = lgn.login(xh,ppswd)
person = Personal(cookies)
res = person.paylist()
return HttpResponse(json.dumps(res, ensure_ascii=False),
content_type="application/json,charset=utf-8")
def library_paydetail(request):
xh = request.POST.get("xh")
ppswd = request.POST.get("ppswd")
lgn = PLogin()
cookies = lgn.login(xh,ppswd)
person = Personal(cookies)
res = person.paydetail()
return HttpResponse(json.dumps(res, ensure_ascii=False),
content_type="application/json,charset=utf-8")
def school_card(request):
xh = request.POST.get("xh")
ppswd = request.POST.get("ppswd")
page = request.POST.get("page")
lgn = PLogin()
cookies = lgn.plogin(xh,ppswd)
person = Infos(cookies)
res = person.school_card(page)
return HttpResponse(json.dumps(res, ensure_ascii=False),
content_type="application/json,charset=utf-8")
def financial(request):
xh = request.POST.get("xh")
ppswd = request.POST.get("ppswd")
page = request.POST.get("page")
lgn = PLogin()
cookies = lgn.plogin(xh,ppswd)
person = Infos(cookies)
res = person.financial(page)
return HttpResponse(json.dumps(res, ensure_ascii=False),
content_type="application/json,charset=utf-8")
def award(request):
if request.method == "POST":
keyword = request.POST.get("keyword")
else:
keyword = request.GET.get("keyword")
url = "http://xcctw.cn/app/index.php?keyword=" + keyword + "&i=2&c=entry&a=site&do=fm&m=yoby_cha&rid=13"
res = requests.get(url=url)
soup = BeautifulSoup(res.text,'lxml')
if soup.find(class_="weui-msgbox"):
return HttpResponse(json.dumps({'err':"没有查询到结果"}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
list = soup.find_all(class_="weui-cell__bd")
result = []
for items in list:
name = (items.find_all(class_="f16")[0].get_text()[3:]).strip()
studentId = (items.find_all(class_="f16")[1].get_text()[3:]).strip()
college = (items.find_all(class_="f16")[2].get_text()[5:]).strip()
major = (items.find_all(class_="f16")[3].get_text()[3:]).strip()
detail = (items.find_all(class_="f16")[4].get_text()[5:]).strip()
number = (items.find_all(class_="f16")[5].get_text()[5:]).strip()
items = {'name':name,'studentId':studentId,'college':college,'major':major,'detail':detail,'number':number}
result.append(items)
return HttpResponse(json.dumps(result, ensure_ascii=False),
content_type="application/json,charset=utf-8")
def get_maps(request):
if request.method == "GET":
xh = request.GET.get("xh")
elif request.method == "POST":
xh = request.POST.get("xh")
allIn = Students.objects.all().count()
thisStu = Students.objects.get(studentId=int(xh))
thisStuBirthDayAndMonth = (thisStu.birthDay)[5:]
names = Students.objects.filter(name=thisStu.name).count() - 1
birthDay = Students.objects.filter(birthDay=thisStu.birthDay).count() - 1
birthDayAndMonth = Students.objects.filter(birthDay__contains=thisStuBirthDayAndMonth).count() - 1
classBirthDay = Students.objects.filter(className=thisStu.className,birthDay=thisStu.birthDay).count() - 1
classBirthDayAndMonth = Students.objects.filter(className=thisStu.className,birthDay__contains=thisStuBirthDayAndMonth).count() - 1
graduationSchool = Students.objects.filter(graduationSchool=thisStu.graduationSchool).count() - 1
classGraduationSchool = Students.objects.filter(className=thisStu.className,graduationSchool=thisStu.graduationSchool).count() - 1
domicile = Students.objects.filter(domicile=thisStu.domicile).count() - 1
classDomicile = Students.objects.filter(className=thisStu.className,domicile=thisStu.domicile).count() - 1
res = {
'allIn': allIn,
'name': names,
'birthDay': birthDay,
'birthDayAndMonth': birthDayAndMonth,
'classBirthDay': classBirthDay,
'classBirthDayAndMonth': classBirthDayAndMonth,
'graduationSchool': graduationSchool,
'classGraduationSchool': classGraduationSchool,
'domicile': domicile,
'places':thisStu.domicile,
'classDomicile': classDomicile
}
return HttpResponse(json.dumps(res, ensure_ascii=False),
content_type="application/json,charset=utf-8")
def isMonitor(request):
xh = request.GET.get("xh")
if Students.objects.filter(studentId=int(xh)):
thisStu = Students.objects.get(studentId=int(xh))
res = {"code":200,"monitor":True if thisStu.classMonitor == 1 else False}
return HttpResponse(json.dumps(res, ensure_ascii=False),
content_type="application/json,charset=utf-8")
else:
return HttpResponse(json.dumps({"err":"没有这个同学"}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
def freetime(request):
myconfig = Config.objects.all().first()
xh = request.GET.get("xh")
term = request.GET.get("term") if request.GET.get("term") is not None else myconfig.nSchedule
weeks = request.GET.get("weeks") if request.GET.get("weeks") is not None else myconfig.nowweek
mode = request.GET.get("mode") if request.GET.get("mode") is not None else "1"
datafile = 'data/' + xh[0:2] + "/" + xh + "/" + "Schedules-" + term + ".json"
fullSections = [1,2,3,4,5,6,7,8,9,10,11,12]
if os.path.exists(datafile):
with open(datafile,mode='r',encoding='UTF-8') as f:
schedule_data = json.loads(f.read())
res = {"Mon":[],"Tue":[],"Wed":[],"Thu":[],"Fri":[]}
for item in schedule_data["normalCourse"]:
if item["courseWeekday"] == "1" and int(weeks) in item["includeWeeks"]:
res["Mon"].extend(item["includeSection"])
elif item["courseWeekday"] == "2" and int(weeks) in item["includeWeeks"]:
res["Tue"].extend(item["includeSection"])
elif item["courseWeekday"] == "3" and int(weeks) in item["includeWeeks"]:
res["Wed"].extend(item["includeSection"])
elif item["courseWeekday"] == "4" and int(weeks) in item["includeWeeks"]:
res["Thu"].extend(item["includeSection"])
elif item["courseWeekday"] == "5" and int(weeks) in item["includeWeeks"]:
res["Fri"].extend(item["includeSection"])
else:
pass
if mode == "1":
res["Mon"] = diffList(fullSections,res["Mon"])
res["Tue"] = diffList(fullSections,res["Tue"])
res["Wed"] = diffList(fullSections,res["Wed"])
res["Thu"] = diffList(fullSections,res["Thu"])
res["Fri"] = diffList(fullSections,res["Fri"])
return HttpResponse(json.dumps(res, ensure_ascii=False),
content_type="application/json,charset=utf-8")
else:
return HttpResponse(json.dumps({"err":"原因:1.该同学没有使用“西院助手”小程序。2.没有在小程序请求过该学期课程信息。3.还未到该学期"}, ensure_ascii=False),
content_type="application/json,charset=utf-8") | en | 0.225259 | # if not os.path.exists(fileurl): # with open(fileurl, mode='w', encoding='utf-8') as n: # n.write(content) # print('原cookies:') # print('{JSESSIONID:%s,route:%s}' % (stu.JSESSIONID,stu.route)) # person = GetInfo(base_url=base_url, cookies=cookies) # print('新cookies:') # print(pinfo) # print(requests.utils.dict_from_cookiejar(cookies)) # if "Connection broken" in str(e) or 'ECONNRESET' in str(e): # return update_cookies(xh, pswd) # sheet1.column_dimensions[chr(67+c)].width = 8 # if mpconfig["loginbad"]: # return HttpResponse(json.dumps({'err':'当前教务系统无法请求登录,请待学校修复!'}, ensure_ascii=False), # content_type="application/json,charset=utf-8") # return get_pinfo(request) # print(e) # return get_pinfo(request) # if mpconfig["loginbad"]: # return HttpResponse(json.dumps({'err':'当前教务系统无法请求登录,请待学校修复!'}, ensure_ascii=False), # content_type="application/json,charset=utf-8") # if mpconfig["jwxtbad"]: # return HttpResponse(json.dumps({'err':'当前教务系统无法访问(可能是学校机房断电或断网所致),小程序暂时无法登录和更新,请待学校修复!'}, ensure_ascii=False), # content_type="application/json,charset=utf-8") # print('【%s】查看了消息' % stu.name) # content = ('【%s】[%s]访问了消息,耗时%.2fs' % (datetime.datetime.now().strftime('%H:%M:%S'), stu.name, spendTime)) # writeLog(content) # return get_message(request) # if mpconfig["studybad"]: # return HttpResponse(json.dumps({'err':'当前教务系统无法请求学业,请待学校修复!'}, ensure_ascii=False), # content_type="application/json,charset=utf-8") # print('cache') # return get_study(request) # if mpconfig["gradebad"]: # return HttpResponse(json.dumps({'err':'当前教务系统无法请求成绩,请待学校修复!'}, ensure_ascii=False), # content_type="application/json,charset=utf-8") # print('cache') # update_cookies(xh, pswd) # return mywarn("成绩超时","",xh,pswd) # print('write') # print(e) # return get_grade(request) # def get_grade2(request): # myconfig = Config.objects.all().first() # if myconfig.apichange: # data = { # 'xh':request.POST.get("xh"), # 'pswd':request.POST.get("pswd"), # 'year':request.POST.get("year"), # 'term':request.POST.get("term"), # 'refresh':request.POST.get("refresh") # } # res = requests.post(url=myconfig.otherapi+"/info/grade",data=data) # return HttpResponse(json.dumps(json.loads(res.text), ensure_ascii=False), # content_type="application/json,charset=utf-8") # if myconfig.maintenance: # return HttpResponse(json.dumps({'err':'教务系统出错维护中,请静待教务系统恢复正常!'}, ensure_ascii=False), # content_type="application/json,charset=utf-8") # # if mpconfig["gradebad"]: # # return HttpResponse(json.dumps({'err':'当前教务系统无法请求成绩,请待学校修复!'}, ensure_ascii=False), # # content_type="application/json,charset=utf-8") # if request.method == 'POST': # if request.POST: # xh = request.POST.get("xh") # pswd = request.POST.get("pswd") # year = request.POST.get("year") # term = request.POST.get("term") # refresh = request.POST.get("refresh") # else: # return HttpResponse(json.dumps({'err':'请提交正确的post数据'}, ensure_ascii=False), # content_type="application/json,charset=utf-8") # if not Students.objects.filter(studentId=int(xh)): # content = ('【%s】[%s]未登录访问成绩' % (datetime.datetime.now().strftime('%H:%M:%S'), xh)) # writeLog(content) # return HttpResponse(json.dumps({'err':'还未登录,请重新登录!'}, ensure_ascii=False), # content_type="application/json,charset=utf-8") # else: # stu = Students.objects.get(studentId=int(xh)) # if refresh == "no": # filename = ('GradesN-%s%s' % (str(year), str(term))) # cache = cacheData(xh, filename) # if cache is not None: # # print('cache') # print('【%s】查看了%s-%s的成绩缓存' % (stu.name, year, term)) # return HttpResponse(json.dumps(cache, ensure_ascii=False), # content_type="application/json,charset=utf-8") # else: # pass # try: # startTime = time.time() # print('【%s】查看了%s-%s的成绩' % (stu.name, year, term)) # JSESSIONID = str(stu.JSESSIONID) # route = str(stu.route) # cookies_dict = { # 'JSESSIONID': JSESSIONID, # 'route': route # } # cookies = requests.utils.cookiejar_from_dict(cookies_dict) # person = GetInfo(base_url=base_url, cookies=cookies) # grade = person.get_grade2(year, term) # if grade.get("err") == "请求超时,鉴于教务系统特色,已帮你尝试重新登录,重试几次,还不行请麻烦你自行重新登录,或者在关于里面反馈!当然,也可能是教务系统挂了~": # update_cookies(xh, pswd) # return HttpResponse(json.dumps({'err':grade.get("err")}, ensure_ascii=False), content_type="application/json,charset=utf-8") # if grade.get("err") == "看起来你这学期好像还没有出成绩,点击顶栏也看看以前的吧~": # return HttpResponse(json.dumps({'err':grade.get("err")}, ensure_ascii=False), content_type="application/json,charset=utf-8") # Students.objects.filter(studentId=int(xh)).update(gpa = grade.get("gpa")) # endTime = time.time() # spendTime = endTime - startTime # content = ('【%s】[%s]访问了%s-%s的成绩,耗时%.2fs' % ( # datetime.datetime.now().strftime('%H:%M:%S'), stu.name, year, term, spendTime)) # writeLog(content) # filename = ('GradesN-%s%s' % (str(year), str(term))) # newData(xh, filename, json.dumps(grade, ensure_ascii=False)) # # print('write') # return HttpResponse(json.dumps(grade, ensure_ascii=False), content_type="application/json,charset=utf-8") # except Exception as e: # # print(e) # if "Connection broken" in str(e) or 'ECONNRESET' in str(e): # # return get_grade2(request) # return HttpResponse(json.dumps({'err':'更新出现问题,请待教务系统修复'}, ensure_ascii=False), # content_type="application/json,charset=utf-8") # else: # content = ('【%s】[%s]访问成绩出错' % (datetime.datetime.now().strftime('%H:%M:%S'), stu.name)) # writeLog(content) # if str(e) == 'Expecting value: line 1 column 1 (char 0)': # return HttpResponse(json.dumps({'err':'教务系统挂掉了,请等待修复后重试~'}, ensure_ascii=False), # content_type="application/json,charset=utf-8") # if str(e) != 'Expecting value: line 3 column 1 (char 4)': # traceback.print_exc() # return mywarn("成绩请求错误",str(e),xh,pswd) # sta = update_cookies(xh, pswd) # person = GetInfo(base_url=base_url, cookies=sta) # grade = person.get_grade2(year, term) # if grade.get("gpa") == "" or grade.get("gpa") is None: # return HttpResponse(json.dumps({'err':'平均学分绩点获取失败,请重试~'}, ensure_ascii=False), # content_type="application/json,charset=utf-8") # Students.objects.filter(studentId=int(xh)).update(gpa = grade.get("gpa")) # filename = ('GradesN-%s%s' % (str(year), str(term))) # newData(xh, filename, json.dumps(grade, ensure_ascii=False)) # return HttpResponse(json.dumps(grade, ensure_ascii=False), content_type="application/json,charset=utf-8") # else: # return HttpResponse(json.dumps({'err':'请使用post并提交正确数据'}, ensure_ascii=False), # content_type="application/json,charset=utf-8") # if mpconfig["schedulebad"]: # return HttpResponse(json.dumps({'err':'当前教务系统无法请求课表,请待学校修复!'}, ensure_ascii=False), # content_type="application/json,charset=utf-8") # print('cache') # print('write') # return get_schedule(request) #print(request) | 2.178896 | 2 |
hosting/app.py | thesunRider/Lasec | 0 | 6618488 | from flask import Flask
app = Flask(__name__)
intruder_detected = False
device_status = True
@app.route("/register_intruder")
def register_intruder():
global intruder_detected
print("Registered intruder")
intruder_detected = True
return '{"status":"ok"}'
@app.route("/get_intruder")
def get_intruder():
global intruder_detected
print("Called from android app")
intruder_return = intruder_detected
intruder_detected = False
return '{"status":"' + str(intruder_return) + '"}'
@app.route("/device_status")
def device_status():
global device_status
return device_status
@app.route("/device_on")
def device_on():
global device_status
device_status = True
return '{"status":"ok"}'
@app.route("/device_off")
def device_off():
global device_status
device_status = False
return '{"status":"ok"}' | from flask import Flask
app = Flask(__name__)
intruder_detected = False
device_status = True
@app.route("/register_intruder")
def register_intruder():
global intruder_detected
print("Registered intruder")
intruder_detected = True
return '{"status":"ok"}'
@app.route("/get_intruder")
def get_intruder():
global intruder_detected
print("Called from android app")
intruder_return = intruder_detected
intruder_detected = False
return '{"status":"' + str(intruder_return) + '"}'
@app.route("/device_status")
def device_status():
global device_status
return device_status
@app.route("/device_on")
def device_on():
global device_status
device_status = True
return '{"status":"ok"}'
@app.route("/device_off")
def device_off():
global device_status
device_status = False
return '{"status":"ok"}' | none | 1 | 2.723841 | 3 | |
Rankcard/__init__.py | akoses/Python-discord-rankcard | 0 | 6618489 | """
Module for Rankcard
"""
from .Main import *
| """
Module for Rankcard
"""
from .Main import *
| en | 0.324556 | Module for Rankcard | 0.962614 | 1 |
chalicelib/ncaaf_espn.py | joshcvt/resetter | 2 | 6618490 | <reponame>joshcvt/resetter<filename>chalicelib/ncaaf_espn.py
#!/usr/bin/env python
import urllib.request, urllib.error, urllib.parse, json, traceback, time
from datetime import datetime, timedelta
from .reset_lib import joinOr, sentenceCap, NoGameException, NoTeamException, toOrdinal
from .ncaa_espn_lib import ncaaNickDict, displayOverrides, iaa, validFbSet
SCOREBOARD_ROOT_URL = "http://site.api.espn.com/apis/site/v2/sports/football/college-football/scoreboard"
# start with this to get weeks, then customize for this week and full scoreboard
#http://site.api.espn.com/apis/site/v2/sports/football/college-football/scoreboard?week=4&groups=80&limit=388&1577314600
# global for caching
__MOD = {}
# cache time for scoreboard
CACHE_INTERVAL = timedelta(minutes=1)
def get_scoreboard(file=None,iaa=False,debug=False):
"""Get scoreboard from site, or from file if specified for testing."""
FBS_GROUPS = "80"
FCS_GROUPS = "81"
SB_FORMAT_TAIL = '?week=%s&groups=%s&limit=388&%s'
if file:
print ("Using scoreboard from file: " + file)
with open(file) as f:
sb = json.load(f)
else:
if debug:
print("Root: " + SCOREBOARD_ROOT_URL)
try:
scoreboardWeekUrl = "unconstructed"
with urllib.request.urlopen(SCOREBOARD_ROOT_URL) as fh:
sb = json.load(fh)
now = datetime.now()
for week in sb['leagues'][0]['calendar'][0]['entries']:
if datetime.strptime(week['endDate'],'%Y-%m-%dT%H:%MZ') > now:
weekValue = week['value']
break
# scoreboardWeekUrl = SCOREBOARD_ROOT_URL + "?week=" + str(weekValue) + "&groups=" + FBS_GROUPS + "&limit=388&" + now.timestamp().__str__()
if iaa:
scoreboardWeekUrl = SCOREBOARD_ROOT_URL + SB_FORMAT_TAIL % (str(weekValue), FCS_GROUPS, now.timestamp().__str__())
else:
scoreboardWeekUrl = SCOREBOARD_ROOT_URL + SB_FORMAT_TAIL % (str(weekValue), FBS_GROUPS, now.timestamp().__str__())
if debug:
print("URL: " + scoreboardWeekUrl)
with urllib.request.urlopen(scoreboardWeekUrl) as fh:
sb = json.load(fh)
except urllib.error.HTTPError as e:
if e.code == 404:
raise NoGameException("Scoreboard HTTP 404. This probably means the season is over. Root = " + SCOREBOARD_ROOT_URL + ", week " + scoreboardWeekUrl + "\n")
else:
raise e
except Exception as e:
raise e
finally:
fh.close()
return sb
def find_game(sb,team):
"""Passed scoreboard dict and team string, get game."""
for event in sb['events']:
if test_game(event,team):
return event
return None
def test_game(game,team):
"""Broken out so we can test for all kinds of variations once we build the variation list."""
return (team.lower() in [game["competitions"][0]["competitors"][0]["team"]["location"].lower(),
game["competitions"][0]["competitors"][1]["team"]["location"].lower(),
game["competitions"][0]["competitors"][0]["team"]["displayName"].lower(),
game["competitions"][0]["competitors"][1]["team"]["displayName"].lower(),
game["competitions"][0]["competitors"][0]["team"]["abbreviation"].lower(),
game["competitions"][0]["competitors"][1]["team"]["abbreviation"].lower()])
def game_loc(game):
return "in " + game["competitions"][0]["venue"]["address"]["city"]
# probably want to get stadium and city for neutral-site games
def rank_name(team):
#return # could also be displayName which is full name
pref = team["team"]["location"]
#if pref.lower() in displayOverrides: pref = displayOverrides[raw.lower()]
if team["curatedRank"]['current'] == 99:
return pref
else:
return "#" + str(team["curatedRank"]['current']) + " " + pref
def scoreline(game):
# flip home first if they're leading, otherwise away-first convention if it's tied
t1 = game["competitions"][0]["competitors"][0]
t2 = game["competitions"][0]["competitors"][1]
if int(t1["score"]) > int(t2["score"]):
gleader = t1
gtrailer = t2
else:
gleader = t2
gtrailer = t1
return (rank_name(gleader) + " " + gleader["score"].strip() + ", " + rank_name(gtrailer) + " " + gtrailer["score"].strip())
def spaceday(game,sayToday=False):
(now, utcnow) = (datetime.now(),datetime.utcnow())
utcdiff = (utcnow - now).seconds
startLocal = datetime.strptime(game['competitions'][0]['startDate'], "%Y-%m-%dT%H:%MZ") - timedelta(seconds=utcdiff)
if startLocal.date() == now.date():
if sayToday:
return ' today'
else:
return ''
else:
return ' ' + startLocal.strftime("%A")
def status(game):
if game == None:
return None
statusnode = game["competitions"][0]["status"]
if statusnode["type"]["name"] == "STATUS_FINAL":
status = "Final " + game_loc(game) + ", " + scoreline(game)
if statusnode["type"]["detail"].endswith("OT)"):
status += statusnode["type"]["detail"].split("/")[1]
status += "."
elif statusnode["type"]["name"] == "STATUS_SCHEDULED":
status = rank_name(game["competitions"][0]['competitors'][1]) + " plays " + rank_name(game["competitions"][0]['competitors'][0]) + " at " + game["status"]["type"]["shortDetail"].split(' - ')[1] + spaceday(game) + " " + game_loc(game) + "."
else:
status = scoreline(game)
if statusnode["type"]["name"] == "STATUS_HALFTIME":
status += " at halftime "
elif statusnode["type"]["name"] == "STATUS_IN_PROGRESS" and statusnode["type"]["detail"].endswith("OT"):
status += " in " + statusnode["type"]["detail"] + " "
elif (statusnode["type"]["name"] == "STATUS_END_PERIOD") or ((statusnode["type"]["name"] == "STATUS_IN_PROGRESS") and (statusnode["displayClock"].strip() == "0:00")):
status += ", end of the " + toOrdinal(statusnode["period"]) + " quarter "
elif (statusnode["type"]["name"] == "STATUS_IN_PROGRESS") and (statusnode["displayClock"].strip() == "15:00"):
status += ", start of the " + toOrdinal(statusnode["period"]) + " quarter "
elif statusnode["type"]["name"] == "STATUS_IN_PROGRESS":
status += ", " + statusnode["displayClock"].strip() + " to go in the " + toOrdinal(statusnode["period"]) + " quarter "
else: # just dump it
status += ", " + statusnode["type"]["name"] + ' '
status += game_loc(game) + "."
if 0:
if 1:
pass
elif game["gameState"] in ("cancelled","postponed"):
status = rank_name(game["away"]) + " vs. " + rank_name(game["home"]) + " originally scheduled for" + spaceday(game,sayToday=True) + " " + game_loc(game) + " is " + game["gameState"] + "."
elif game["gameState"] in ("delayed"):
status = rank_name(game["away"]) + " vs. " + rank_name(game["home"]) + " " + game_loc(game) + " is " + game["gameState"] + "."
return sentenceCap(status)
def get(team,forceReload=False,debug=False,file=None):
global __MOD
tkey = team.lower().strip()
if debug:
print("tkey: " + tkey + ", ", end="")
if (tkey in iaa) or (tkey in ncaaNickDict and ncaaNickDict[tkey] in iaa):
# we're going to be lazy about caching and just always reload for I-AA games
if debug:
print ("I-AA load: ", end="")
sb = get_scoreboard(iaa=True,debug=debug)
elif tkey not in validFbSet:
raise NoTeamException(tkey + " is not a valid team.")
else: # main I-A schedule cycle
if forceReload \
or ("ncaafsb" not in __MOD) \
or (("ncaafsbdt" in __MOD) and (datetime.utcnow() - __MOD["ncaafsbdt"] > CACHE_INTERVAL)) \
or (("ncaafsb" in __MOD) and (("ncaaffile" not in __MOD) or (file != __MOD["ncaaffile"]))):
if debug:
print ("fresh load: ", end="")
__MOD["ncaaffile"] = file
__MOD["ncaafsb"] = get_scoreboard(debug=debug,file=file)
__MOD["ncaafsbdt"] = datetime.utcnow()
else:
if debug:
print ("cached: ", end="")
pass
sb = __MOD["ncaafsb"]
game = find_game(sb,team)
if game:
return status(game)
elif (tkey in ncaaNickDict):
if (ncaaNickDict[tkey].__class__ == list):
return "For " + team + ", please choose " + joinOr(ncaaNickDict[tkey]) + "."
else:
game = find_game(sb,ncaaNickDict[tkey])
if game:
return status(game)
# fallthru
ret = "No game this week for " + team
if ret[-1] != ".":
ret += "."
raise NoGameException(ret)
| #!/usr/bin/env python
import urllib.request, urllib.error, urllib.parse, json, traceback, time
from datetime import datetime, timedelta
from .reset_lib import joinOr, sentenceCap, NoGameException, NoTeamException, toOrdinal
from .ncaa_espn_lib import ncaaNickDict, displayOverrides, iaa, validFbSet
SCOREBOARD_ROOT_URL = "http://site.api.espn.com/apis/site/v2/sports/football/college-football/scoreboard"
# start with this to get weeks, then customize for this week and full scoreboard
#http://site.api.espn.com/apis/site/v2/sports/football/college-football/scoreboard?week=4&groups=80&limit=388&1577314600
# global for caching
__MOD = {}
# cache time for scoreboard
CACHE_INTERVAL = timedelta(minutes=1)
def get_scoreboard(file=None,iaa=False,debug=False):
"""Get scoreboard from site, or from file if specified for testing."""
FBS_GROUPS = "80"
FCS_GROUPS = "81"
SB_FORMAT_TAIL = '?week=%s&groups=%s&limit=388&%s'
if file:
print ("Using scoreboard from file: " + file)
with open(file) as f:
sb = json.load(f)
else:
if debug:
print("Root: " + SCOREBOARD_ROOT_URL)
try:
scoreboardWeekUrl = "unconstructed"
with urllib.request.urlopen(SCOREBOARD_ROOT_URL) as fh:
sb = json.load(fh)
now = datetime.now()
for week in sb['leagues'][0]['calendar'][0]['entries']:
if datetime.strptime(week['endDate'],'%Y-%m-%dT%H:%MZ') > now:
weekValue = week['value']
break
# scoreboardWeekUrl = SCOREBOARD_ROOT_URL + "?week=" + str(weekValue) + "&groups=" + FBS_GROUPS + "&limit=388&" + now.timestamp().__str__()
if iaa:
scoreboardWeekUrl = SCOREBOARD_ROOT_URL + SB_FORMAT_TAIL % (str(weekValue), FCS_GROUPS, now.timestamp().__str__())
else:
scoreboardWeekUrl = SCOREBOARD_ROOT_URL + SB_FORMAT_TAIL % (str(weekValue), FBS_GROUPS, now.timestamp().__str__())
if debug:
print("URL: " + scoreboardWeekUrl)
with urllib.request.urlopen(scoreboardWeekUrl) as fh:
sb = json.load(fh)
except urllib.error.HTTPError as e:
if e.code == 404:
raise NoGameException("Scoreboard HTTP 404. This probably means the season is over. Root = " + SCOREBOARD_ROOT_URL + ", week " + scoreboardWeekUrl + "\n")
else:
raise e
except Exception as e:
raise e
finally:
fh.close()
return sb
def find_game(sb,team):
"""Passed scoreboard dict and team string, get game."""
for event in sb['events']:
if test_game(event,team):
return event
return None
def test_game(game,team):
"""Broken out so we can test for all kinds of variations once we build the variation list."""
return (team.lower() in [game["competitions"][0]["competitors"][0]["team"]["location"].lower(),
game["competitions"][0]["competitors"][1]["team"]["location"].lower(),
game["competitions"][0]["competitors"][0]["team"]["displayName"].lower(),
game["competitions"][0]["competitors"][1]["team"]["displayName"].lower(),
game["competitions"][0]["competitors"][0]["team"]["abbreviation"].lower(),
game["competitions"][0]["competitors"][1]["team"]["abbreviation"].lower()])
def game_loc(game):
return "in " + game["competitions"][0]["venue"]["address"]["city"]
# probably want to get stadium and city for neutral-site games
def rank_name(team):
#return # could also be displayName which is full name
pref = team["team"]["location"]
#if pref.lower() in displayOverrides: pref = displayOverrides[raw.lower()]
if team["curatedRank"]['current'] == 99:
return pref
else:
return "#" + str(team["curatedRank"]['current']) + " " + pref
def scoreline(game):
# flip home first if they're leading, otherwise away-first convention if it's tied
t1 = game["competitions"][0]["competitors"][0]
t2 = game["competitions"][0]["competitors"][1]
if int(t1["score"]) > int(t2["score"]):
gleader = t1
gtrailer = t2
else:
gleader = t2
gtrailer = t1
return (rank_name(gleader) + " " + gleader["score"].strip() + ", " + rank_name(gtrailer) + " " + gtrailer["score"].strip())
def spaceday(game,sayToday=False):
(now, utcnow) = (datetime.now(),datetime.utcnow())
utcdiff = (utcnow - now).seconds
startLocal = datetime.strptime(game['competitions'][0]['startDate'], "%Y-%m-%dT%H:%MZ") - timedelta(seconds=utcdiff)
if startLocal.date() == now.date():
if sayToday:
return ' today'
else:
return ''
else:
return ' ' + startLocal.strftime("%A")
def status(game):
if game == None:
return None
statusnode = game["competitions"][0]["status"]
if statusnode["type"]["name"] == "STATUS_FINAL":
status = "Final " + game_loc(game) + ", " + scoreline(game)
if statusnode["type"]["detail"].endswith("OT)"):
status += statusnode["type"]["detail"].split("/")[1]
status += "."
elif statusnode["type"]["name"] == "STATUS_SCHEDULED":
status = rank_name(game["competitions"][0]['competitors'][1]) + " plays " + rank_name(game["competitions"][0]['competitors'][0]) + " at " + game["status"]["type"]["shortDetail"].split(' - ')[1] + spaceday(game) + " " + game_loc(game) + "."
else:
status = scoreline(game)
if statusnode["type"]["name"] == "STATUS_HALFTIME":
status += " at halftime "
elif statusnode["type"]["name"] == "STATUS_IN_PROGRESS" and statusnode["type"]["detail"].endswith("OT"):
status += " in " + statusnode["type"]["detail"] + " "
elif (statusnode["type"]["name"] == "STATUS_END_PERIOD") or ((statusnode["type"]["name"] == "STATUS_IN_PROGRESS") and (statusnode["displayClock"].strip() == "0:00")):
status += ", end of the " + toOrdinal(statusnode["period"]) + " quarter "
elif (statusnode["type"]["name"] == "STATUS_IN_PROGRESS") and (statusnode["displayClock"].strip() == "15:00"):
status += ", start of the " + toOrdinal(statusnode["period"]) + " quarter "
elif statusnode["type"]["name"] == "STATUS_IN_PROGRESS":
status += ", " + statusnode["displayClock"].strip() + " to go in the " + toOrdinal(statusnode["period"]) + " quarter "
else: # just dump it
status += ", " + statusnode["type"]["name"] + ' '
status += game_loc(game) + "."
if 0:
if 1:
pass
elif game["gameState"] in ("cancelled","postponed"):
status = rank_name(game["away"]) + " vs. " + rank_name(game["home"]) + " originally scheduled for" + spaceday(game,sayToday=True) + " " + game_loc(game) + " is " + game["gameState"] + "."
elif game["gameState"] in ("delayed"):
status = rank_name(game["away"]) + " vs. " + rank_name(game["home"]) + " " + game_loc(game) + " is " + game["gameState"] + "."
return sentenceCap(status)
def get(team,forceReload=False,debug=False,file=None):
global __MOD
tkey = team.lower().strip()
if debug:
print("tkey: " + tkey + ", ", end="")
if (tkey in iaa) or (tkey in ncaaNickDict and ncaaNickDict[tkey] in iaa):
# we're going to be lazy about caching and just always reload for I-AA games
if debug:
print ("I-AA load: ", end="")
sb = get_scoreboard(iaa=True,debug=debug)
elif tkey not in validFbSet:
raise NoTeamException(tkey + " is not a valid team.")
else: # main I-A schedule cycle
if forceReload \
or ("ncaafsb" not in __MOD) \
or (("ncaafsbdt" in __MOD) and (datetime.utcnow() - __MOD["ncaafsbdt"] > CACHE_INTERVAL)) \
or (("ncaafsb" in __MOD) and (("ncaaffile" not in __MOD) or (file != __MOD["ncaaffile"]))):
if debug:
print ("fresh load: ", end="")
__MOD["ncaaffile"] = file
__MOD["ncaafsb"] = get_scoreboard(debug=debug,file=file)
__MOD["ncaafsbdt"] = datetime.utcnow()
else:
if debug:
print ("cached: ", end="")
pass
sb = __MOD["ncaafsb"]
game = find_game(sb,team)
if game:
return status(game)
elif (tkey in ncaaNickDict):
if (ncaaNickDict[tkey].__class__ == list):
return "For " + team + ", please choose " + joinOr(ncaaNickDict[tkey]) + "."
else:
game = find_game(sb,ncaaNickDict[tkey])
if game:
return status(game)
# fallthru
ret = "No game this week for " + team
if ret[-1] != ".":
ret += "."
raise NoGameException(ret) | en | 0.846053 | #!/usr/bin/env python # start with this to get weeks, then customize for this week and full scoreboard #http://site.api.espn.com/apis/site/v2/sports/football/college-football/scoreboard?week=4&groups=80&limit=388&1577314600 # global for caching # cache time for scoreboard Get scoreboard from site, or from file if specified for testing. # scoreboardWeekUrl = SCOREBOARD_ROOT_URL + "?week=" + str(weekValue) + "&groups=" + FBS_GROUPS + "&limit=388&" + now.timestamp().__str__() Passed scoreboard dict and team string, get game. Broken out so we can test for all kinds of variations once we build the variation list. # probably want to get stadium and city for neutral-site games #return # could also be displayName which is full name #if pref.lower() in displayOverrides: pref = displayOverrides[raw.lower()] # flip home first if they're leading, otherwise away-first convention if it's tied # just dump it # we're going to be lazy about caching and just always reload for I-AA games # main I-A schedule cycle # fallthru | 2.842952 | 3 |
mediabrowser.py | nickw444/MediaBrowser | 0 | 6618491 | <filename>mediabrowser.py
import os
from flask import Flask, render_template, send_file, request, after_this_request, redirect, url_for, safe_join
import re
from config import MAX_FOLDER_DL_SIZE_BYTES, IGNORE_FILES, ROOT_PATHS
app = Flask(__name__)
def get_size(start_path):
total_size = 0
for dirpath, dirnames, filenames in os.walk(start_path):
for f in filenames:
fp = os.path.join(dirpath, f)
total_size += os.path.getsize(fp)
return total_size
import zipfile
def zipdir(path, ziph):
# ziph is zipfile handle
for root, dirs, files in os.walk(path):
for file in files:
ziph.write(
os.path.join(root, file),
arcname=os.path.join(root.replace(path, ''), file)
)
@app.route('/')
def index():
return render_template('index.html', items=ROOT_PATHS)
@app.route('/<int:id>/<path:path>')
@app.route('/<int:id>/')
def browse(id, path=''):
path = path.replace('../', '')
real_path = safe_join(ROOT_PATHS[id].path, path)
items = {
'dirs': [],
'files': [],
}
if os.path.isfile(real_path):
# If it's a file, send it.
return send_file(real_path,
as_attachment=request.args.get('download'))
else:
if request.args.get('download'):
folder_size = get_size(real_path)
if folder_size > MAX_FOLDER_DL_SIZE_BYTES:
print("TOO LARGE YO")
return "Folder too large. Exceeds maximum dl of {} '\
'bytes".format(MAX_FOLDER_DL_SIZE_BYTES)
print("Request for DL")
zipfilename = 'static/zips/{}.zip'.format(
os.path.basename(os.path.dirname(real_path))
)
zipf = zipfile.ZipFile(zipfilename, 'w')
zipdir(real_path, zipf)
zipf.close()
@after_this_request
def after(r):
os.unlink(zipfilename)
print("Done!")
return r
return send_file(zipfilename,
attachment_filename=os.path.basename(os.path.dirname(real_path)))
return "DL"
else:
for f in os.listdir(real_path):
if not re.match(IGNORE_FILES, f):
if os.path.isdir(os.path.join(real_path, f)):
item = (f, os.path.join(path, f) + '/')
items['dirs'].append(item)
else:
item = (f, os.path.join(path, f))
items['files'].append(item)
return render_template('browse.html', id=id, items=items)
return "lel"
if __name__ == '__main__':
import sys
if len(sys.argv) > 1 and sys.argv[1] == 'meinheld':
from meinheld import server
server.listen(("0.0.0.0", 8080))
server.run(app)
else:
app.debug = True
app.run(host="0.0.0.0", port=8080)
| <filename>mediabrowser.py
import os
from flask import Flask, render_template, send_file, request, after_this_request, redirect, url_for, safe_join
import re
from config import MAX_FOLDER_DL_SIZE_BYTES, IGNORE_FILES, ROOT_PATHS
app = Flask(__name__)
def get_size(start_path):
total_size = 0
for dirpath, dirnames, filenames in os.walk(start_path):
for f in filenames:
fp = os.path.join(dirpath, f)
total_size += os.path.getsize(fp)
return total_size
import zipfile
def zipdir(path, ziph):
# ziph is zipfile handle
for root, dirs, files in os.walk(path):
for file in files:
ziph.write(
os.path.join(root, file),
arcname=os.path.join(root.replace(path, ''), file)
)
@app.route('/')
def index():
return render_template('index.html', items=ROOT_PATHS)
@app.route('/<int:id>/<path:path>')
@app.route('/<int:id>/')
def browse(id, path=''):
path = path.replace('../', '')
real_path = safe_join(ROOT_PATHS[id].path, path)
items = {
'dirs': [],
'files': [],
}
if os.path.isfile(real_path):
# If it's a file, send it.
return send_file(real_path,
as_attachment=request.args.get('download'))
else:
if request.args.get('download'):
folder_size = get_size(real_path)
if folder_size > MAX_FOLDER_DL_SIZE_BYTES:
print("TOO LARGE YO")
return "Folder too large. Exceeds maximum dl of {} '\
'bytes".format(MAX_FOLDER_DL_SIZE_BYTES)
print("Request for DL")
zipfilename = 'static/zips/{}.zip'.format(
os.path.basename(os.path.dirname(real_path))
)
zipf = zipfile.ZipFile(zipfilename, 'w')
zipdir(real_path, zipf)
zipf.close()
@after_this_request
def after(r):
os.unlink(zipfilename)
print("Done!")
return r
return send_file(zipfilename,
attachment_filename=os.path.basename(os.path.dirname(real_path)))
return "DL"
else:
for f in os.listdir(real_path):
if not re.match(IGNORE_FILES, f):
if os.path.isdir(os.path.join(real_path, f)):
item = (f, os.path.join(path, f) + '/')
items['dirs'].append(item)
else:
item = (f, os.path.join(path, f))
items['files'].append(item)
return render_template('browse.html', id=id, items=items)
return "lel"
if __name__ == '__main__':
import sys
if len(sys.argv) > 1 and sys.argv[1] == 'meinheld':
from meinheld import server
server.listen(("0.0.0.0", 8080))
server.run(app)
else:
app.debug = True
app.run(host="0.0.0.0", port=8080)
| en | 0.968938 | # ziph is zipfile handle # If it's a file, send it. | 2.724571 | 3 |
codes/tests/binance/rl_common.py | bluebibi/trade | 2 | 6618492 | <reponame>bluebibi/trade<gh_stars>1-10
from tensortrade.actions import DiscreteActionStrategy
from tensortrade.features import FeaturePipeline
from tensortrade.features.scalers import MinMaxNormalizer
from tensortrade.features.stationarity import FractionalDifference
from tensortrade.rewards import SimpleProfitStrategy
timeframe = '1h'
symbol = 'ETH/BTC'
base_instrument = 'BTC'
# each ohlcv candle is a list of [ timestamp, open, high, low, close, volume ]
normalize = MinMaxNormalizer(inplace=True)
difference = FractionalDifference(
difference_order=0.6,
inplace=True
)
feature_pipeline = FeaturePipeline(steps=[normalize, difference])
reward_strategy = SimpleProfitStrategy()
action_strategy = DiscreteActionStrategy(n_actions=20, instrument_symbol='ETH/BTC')
| from tensortrade.actions import DiscreteActionStrategy
from tensortrade.features import FeaturePipeline
from tensortrade.features.scalers import MinMaxNormalizer
from tensortrade.features.stationarity import FractionalDifference
from tensortrade.rewards import SimpleProfitStrategy
timeframe = '1h'
symbol = 'ETH/BTC'
base_instrument = 'BTC'
# each ohlcv candle is a list of [ timestamp, open, high, low, close, volume ]
normalize = MinMaxNormalizer(inplace=True)
difference = FractionalDifference(
difference_order=0.6,
inplace=True
)
feature_pipeline = FeaturePipeline(steps=[normalize, difference])
reward_strategy = SimpleProfitStrategy()
action_strategy = DiscreteActionStrategy(n_actions=20, instrument_symbol='ETH/BTC') | en | 0.920224 | # each ohlcv candle is a list of [ timestamp, open, high, low, close, volume ] | 2.238369 | 2 |
src/com/inductiveautomation/ignition/common/messages/__init__.py | ignition-api/jython | 0 | 6618493 | __all__ = ["MessageInterface", "MessageReceiver", "UIResponse"]
from abc import ABCMeta, abstractmethod
from java.lang import Object
class MessageInterface(ABCMeta):
@abstractmethod
def addMessageReceiver(cls, protocol, rcv):
pass
@abstractmethod
def sendCall(cls, protocol, scope, msg):
pass
@abstractmethod
def sendMessage(cls, protocol, scope, msg):
pass
class MessageReceiver(ABCMeta):
@abstractmethod
def receiveCall(cls, msg):
pass
class UIResponse(Object):
def __init__(self, locale):
self.locale = locale
def attempt(self, method):
pass
def error(self, message, args):
pass
def getErrors(self):
pass
def getInfos(self):
pass
def getLocale(self):
pass
def getWarns(self):
pass
def info(self, message, args):
pass
def warn(self, message, args):
pass
def wrap(self, locale, fx):
pass
| __all__ = ["MessageInterface", "MessageReceiver", "UIResponse"]
from abc import ABCMeta, abstractmethod
from java.lang import Object
class MessageInterface(ABCMeta):
@abstractmethod
def addMessageReceiver(cls, protocol, rcv):
pass
@abstractmethod
def sendCall(cls, protocol, scope, msg):
pass
@abstractmethod
def sendMessage(cls, protocol, scope, msg):
pass
class MessageReceiver(ABCMeta):
@abstractmethod
def receiveCall(cls, msg):
pass
class UIResponse(Object):
def __init__(self, locale):
self.locale = locale
def attempt(self, method):
pass
def error(self, message, args):
pass
def getErrors(self):
pass
def getInfos(self):
pass
def getLocale(self):
pass
def getWarns(self):
pass
def info(self, message, args):
pass
def warn(self, message, args):
pass
def wrap(self, locale, fx):
pass
| none | 1 | 2.576623 | 3 | |
2018_1st/Q1.py | IT-SeanWANG/CodeJam | 0 | 6618494 | <reponame>IT-SeanWANG/CodeJam<gh_stars>0
#! /usr/bin/env python
# coding: utf-8
# python version: 2.7.9
__author__ = 'seanwa'
# main function
s = list(raw_input())
s.sort()
t = list(raw_input())
t.sort()
r = 0
for i in range(len(s)):
if s[i] != t[i]:
r = t[i]
break
if r == 0:
print t[-1]
else:
print r
| #! /usr/bin/env python
# coding: utf-8
# python version: 2.7.9
__author__ = 'seanwa'
# main function
s = list(raw_input())
s.sort()
t = list(raw_input())
t.sort()
r = 0
for i in range(len(s)):
if s[i] != t[i]:
r = t[i]
break
if r == 0:
print t[-1]
else:
print r | en | 0.351874 | #! /usr/bin/env python # coding: utf-8 # python version: 2.7.9 # main function | 3.507237 | 4 |
screensaver/__init__.py | todbot/circuitpython_screensaver | 8 | 6618495 |
# screensaver.py -- screensavers for CircuitPython
# 17 Aug 2021 - @todbot
#
import time, random
import board, displayio
import adafruit_imageload
try:
import rainbowio
def randcolor(): return rainbowio.colorwheel(random.randint(0,255))
except ImportError:
def randcolor(): return random.randint(0,0xffffff) # not as good but passable
# dvdlogo! currently our main screensaver
def screensaver_dvdlogo(display=board.DISPLAY, should_exit_func=None):
sprite_w = 70 # width of the sprite to create
sprite_fname="/screensaver/dvdlogo_70.bmp"
display.auto_refresh = False # only update display on display.refresh()
screen = displayio.Group() # group that holds everything
display.show(screen) # add main group to display
sprite1,sprite1_pal = adafruit_imageload.load(sprite_fname)
sprite1_pal.make_transparent(0)
sprite1_tg = displayio.TileGrid(sprite1, pixel_shader=sprite1_pal)
screen.append(sprite1_tg)
x, y = display.width/2, display.height/2 # starting position, middle of screen
vx,vy = display.width / 100, display.height / 150 # initial velocity that seems cool
sprite_hw = sprite_w//2 # integer half-width of our sprite, for bounce detection
while True:
if should_exit_func is not None and should_exit_func(): return
# update our position based on our velocity
x,y = x + vx, y + vy
# x,y is centered on our sprite, so to check bounds
# add in half-width to get at edges
# a bounce just changes the polarity of the velocity
if x - sprite_hw < 0 or x + sprite_hw > display.width:
vx = -vx # bounce!
sprite1_pal[1] = randcolor() # rainbowio.colorwheel(random.randint(0,255))
if y - sprite_hw < 0 or y + sprite_hw > display.height:
vy = -vy # bounce!
sprite1_pal[1] = randcolor() # rainbowio.colorwheel(random.randint(0,255))
# TileGrids are top-left referenced, so subtract that off
# and convert to integer pixel x,y before setting tilegrid xy
sprite1_tg.x = int(x - sprite_hw)
sprite1_tg.y = int(y - sprite_hw)
# this gives framerate of 20-24 FPS on FunHouse (ESP32S2 240x240 SPI TFT)
display.refresh(); time.sleep(0.01)
# whereas this is jerky: every other frame 11 FPS & 0 FPS, at 20 FPS rate
#display.refresh(target_frames_per_second=20, minimum_frames_per_second=0)
# flying toasters!
def screensaver_flyingtoasters(display=board.DISPLAY, should_exit_func=None,
num_toasters=2, num_toasts=3):
sprite_w = 48 # width of the sprites
sprite1_fname="/screensaver/toast_48.bmp"
sprite2_fname="/screensaver/toaster_48.bmp"
sprite2_tile_count = 4
display.auto_refresh = False # only update display on display.refresh()
screen = displayio.Group() # group that holds everything
display.show(screen) # add main group to display
sprite1,sprite1_pal = adafruit_imageload.load(sprite1_fname)
sprite1_pal.make_transparent(0)
sprite2,sprite2_pal = adafruit_imageload.load(sprite2_fname)
sprite2_pal.make_transparent(0)
sprite_hw = sprite_w//2 # integer half-width of our sprite, for bounce detection
class Sprite:
def __init__(self, tg, x,y, vx,vy, tile_count=1, anim_speed=0):
self.tg = tg
self.x,self.y = x,y
self.vx,self.vy = vx,vy
self.tile_count = tile_count
self.anim_speed = anim_speed
self.last_time = time.monotonic()
def update_pos(self):
self.x = self.x + self.vx
self.y = self.y + self.vy
# TileGrids are top-left referenced, so subtract that off
# and convert to integer pixel x,y before setting tilegrid xy
self.tg.x = int(self.x - sprite_hw)
self.tg.y = int(self.y - sprite_hw)
def next_tile(self):
if self.tile_count == 1: return
if time.monotonic() - self.last_time > self.anim_speed:
self.last_time = time.monotonic()
tilenum = (toaster.tg[0] + 1) % toaster.tile_count
toaster.tg[0] = tilenum
toasts = []
for i in range(num_toasts):
x,y = random.randint(0,display.width), random.randint(0,display.height)
vx,vy = -1.4 - random.uniform(0,0.8), 1 # standard toast velocity direction
tg = displayio.TileGrid(sprite1, pixel_shader=sprite1_pal)
sprite = Sprite(tg, x,y, vx,vy, 1)
toasts.append( sprite )
screen.append(tg)
toasters = []
for i in range(num_toasters):
x,y = random.randint(0,display.width), random.randint(0,display.height)
vx,vy = -1.3 - random.random(), 1 # standard toast velocity direction
tg = displayio.TileGrid(sprite2, pixel_shader=sprite2_pal,
width=1, height=1,
tile_width=sprite_w, tile_height=sprite_w)
sprite = Sprite(tg, x,y, vx,vy, tile_count=sprite2_tile_count, anim_speed=0.1)
sprite.tg[0] = random.randint(0, sprite2_tile_count-1) # randomize anim sequence
toasters.append(sprite)
screen.append(tg)
flap_time = time.monotonic()
while True:
if should_exit_func is not None and should_exit_func(): return
# update our position based on our velocity
for toast in toasts:
toast.update_pos()
if toast.x < 0 or toast.y > display.height:
toast.x = display.width
toast.y = random.randint(0,display.height)/2
for toaster in toasters:
toaster.update_pos()
toaster.next_tile()
if toaster.x < 0 or toaster.y > display.height:
toaster.x = display.width
toaster.y = random.randint(0,display.height)/2
toaster.tg[0] = random.randint(0, sprite2_tile_count-1)
# this gives framerate of 20-24 FPS on FunHouse (ESP32S2 240x240 SPI TFT)
display.refresh(); time.sleep(0.01)
# boingball! amiga bouncing ball
def screensaver_boingball(display=board.DISPLAY, should_exit_func=None,
bg_fname=None):
sprite_scale = 2
if display.height < 150: sprite_scale = 1
sprite_w = 32 # width of the sprite to create
sprite_fname="/screensaver/boingball_32.bmp"
sprite_tile_count = 18
display.auto_refresh = False # only update display on display.refresh()
screen = displayio.Group() # group that holds everything
display.show(screen) # add main group to display
# get background image, if there is one
if bg_fname is not None:
bg_img, bg_pal = adafruit_imageload.load(bg_fname)
screen.append(displayio.TileGrid(bg_img, pixel_shader=bg_pal))
sprite,sprite_pal = adafruit_imageload.load(sprite_fname)
sprite_pal.make_transparent(0)
sprite_pal.make_transparent(1)
sprite_tg = displayio.TileGrid(sprite, pixel_shader=sprite_pal,
width=1, height=1,
tile_width=sprite_w, tile_height=sprite_w)
sprite = displayio.Group(scale=sprite_scale)
sprite.append(sprite_tg)
screen.append(sprite)
x, y = display.width/2, display.height/2 # starting position, middle of screen
vx,vy = display.width / 55, display.height / 80 # initial velocity
sprite_hw = sprite_w//2 * sprite_scale # integer half-width for bounce detection
g = 0.25 # our gravity acceleration
tile_inc = 1 # which way we play the sprite animation tiles
last_tile_time = time.monotonic()
while True:
if should_exit_func is not None and should_exit_func(): return
# update our position based on our velocity
x,y = x + vx, y + vy
# update our velocity based on acceleration
vy = vy + g
# a bounce changes the polarity of the velocity
if x - sprite_hw < 0 or x + sprite_hw > display.width:
vx = -vx # bounce!
tile_inc = - tile_inc # change ball "spinning" direction
if y + sprite_hw > display.height:
vy = -(vy - g) # bounce! (and remove gravity we added before)
# TileGrids are top-left referenced, so subtract that off
# and convert to integer pixel x,y before setting tilegrid xy
sprite.x = int(x - sprite_hw)
sprite.y = int(y - sprite_hw)
# do the animation
if time.monotonic() - last_tile_time > 0.01:
last_tile_time = time.monotonic()
# get first thing in group (only thing), assume it's a TileGrid
# then access first space (only gridspace)
sprite[0][0] = (sprite[0][0] + tile_inc) % sprite_tile_count
# this gives framerate of 20-24 FPS on FunHouse (ESP32S2 240x240 SPI TFT)
display.refresh(); time.sleep(0.01)
|
# screensaver.py -- screensavers for CircuitPython
# 17 Aug 2021 - @todbot
#
import time, random
import board, displayio
import adafruit_imageload
try:
import rainbowio
def randcolor(): return rainbowio.colorwheel(random.randint(0,255))
except ImportError:
def randcolor(): return random.randint(0,0xffffff) # not as good but passable
# dvdlogo! currently our main screensaver
def screensaver_dvdlogo(display=board.DISPLAY, should_exit_func=None):
sprite_w = 70 # width of the sprite to create
sprite_fname="/screensaver/dvdlogo_70.bmp"
display.auto_refresh = False # only update display on display.refresh()
screen = displayio.Group() # group that holds everything
display.show(screen) # add main group to display
sprite1,sprite1_pal = adafruit_imageload.load(sprite_fname)
sprite1_pal.make_transparent(0)
sprite1_tg = displayio.TileGrid(sprite1, pixel_shader=sprite1_pal)
screen.append(sprite1_tg)
x, y = display.width/2, display.height/2 # starting position, middle of screen
vx,vy = display.width / 100, display.height / 150 # initial velocity that seems cool
sprite_hw = sprite_w//2 # integer half-width of our sprite, for bounce detection
while True:
if should_exit_func is not None and should_exit_func(): return
# update our position based on our velocity
x,y = x + vx, y + vy
# x,y is centered on our sprite, so to check bounds
# add in half-width to get at edges
# a bounce just changes the polarity of the velocity
if x - sprite_hw < 0 or x + sprite_hw > display.width:
vx = -vx # bounce!
sprite1_pal[1] = randcolor() # rainbowio.colorwheel(random.randint(0,255))
if y - sprite_hw < 0 or y + sprite_hw > display.height:
vy = -vy # bounce!
sprite1_pal[1] = randcolor() # rainbowio.colorwheel(random.randint(0,255))
# TileGrids are top-left referenced, so subtract that off
# and convert to integer pixel x,y before setting tilegrid xy
sprite1_tg.x = int(x - sprite_hw)
sprite1_tg.y = int(y - sprite_hw)
# this gives framerate of 20-24 FPS on FunHouse (ESP32S2 240x240 SPI TFT)
display.refresh(); time.sleep(0.01)
# whereas this is jerky: every other frame 11 FPS & 0 FPS, at 20 FPS rate
#display.refresh(target_frames_per_second=20, minimum_frames_per_second=0)
# flying toasters!
def screensaver_flyingtoasters(display=board.DISPLAY, should_exit_func=None,
num_toasters=2, num_toasts=3):
sprite_w = 48 # width of the sprites
sprite1_fname="/screensaver/toast_48.bmp"
sprite2_fname="/screensaver/toaster_48.bmp"
sprite2_tile_count = 4
display.auto_refresh = False # only update display on display.refresh()
screen = displayio.Group() # group that holds everything
display.show(screen) # add main group to display
sprite1,sprite1_pal = adafruit_imageload.load(sprite1_fname)
sprite1_pal.make_transparent(0)
sprite2,sprite2_pal = adafruit_imageload.load(sprite2_fname)
sprite2_pal.make_transparent(0)
sprite_hw = sprite_w//2 # integer half-width of our sprite, for bounce detection
class Sprite:
def __init__(self, tg, x,y, vx,vy, tile_count=1, anim_speed=0):
self.tg = tg
self.x,self.y = x,y
self.vx,self.vy = vx,vy
self.tile_count = tile_count
self.anim_speed = anim_speed
self.last_time = time.monotonic()
def update_pos(self):
self.x = self.x + self.vx
self.y = self.y + self.vy
# TileGrids are top-left referenced, so subtract that off
# and convert to integer pixel x,y before setting tilegrid xy
self.tg.x = int(self.x - sprite_hw)
self.tg.y = int(self.y - sprite_hw)
def next_tile(self):
if self.tile_count == 1: return
if time.monotonic() - self.last_time > self.anim_speed:
self.last_time = time.monotonic()
tilenum = (toaster.tg[0] + 1) % toaster.tile_count
toaster.tg[0] = tilenum
toasts = []
for i in range(num_toasts):
x,y = random.randint(0,display.width), random.randint(0,display.height)
vx,vy = -1.4 - random.uniform(0,0.8), 1 # standard toast velocity direction
tg = displayio.TileGrid(sprite1, pixel_shader=sprite1_pal)
sprite = Sprite(tg, x,y, vx,vy, 1)
toasts.append( sprite )
screen.append(tg)
toasters = []
for i in range(num_toasters):
x,y = random.randint(0,display.width), random.randint(0,display.height)
vx,vy = -1.3 - random.random(), 1 # standard toast velocity direction
tg = displayio.TileGrid(sprite2, pixel_shader=sprite2_pal,
width=1, height=1,
tile_width=sprite_w, tile_height=sprite_w)
sprite = Sprite(tg, x,y, vx,vy, tile_count=sprite2_tile_count, anim_speed=0.1)
sprite.tg[0] = random.randint(0, sprite2_tile_count-1) # randomize anim sequence
toasters.append(sprite)
screen.append(tg)
flap_time = time.monotonic()
while True:
if should_exit_func is not None and should_exit_func(): return
# update our position based on our velocity
for toast in toasts:
toast.update_pos()
if toast.x < 0 or toast.y > display.height:
toast.x = display.width
toast.y = random.randint(0,display.height)/2
for toaster in toasters:
toaster.update_pos()
toaster.next_tile()
if toaster.x < 0 or toaster.y > display.height:
toaster.x = display.width
toaster.y = random.randint(0,display.height)/2
toaster.tg[0] = random.randint(0, sprite2_tile_count-1)
# this gives framerate of 20-24 FPS on FunHouse (ESP32S2 240x240 SPI TFT)
display.refresh(); time.sleep(0.01)
# boingball! amiga bouncing ball
def screensaver_boingball(display=board.DISPLAY, should_exit_func=None,
bg_fname=None):
sprite_scale = 2
if display.height < 150: sprite_scale = 1
sprite_w = 32 # width of the sprite to create
sprite_fname="/screensaver/boingball_32.bmp"
sprite_tile_count = 18
display.auto_refresh = False # only update display on display.refresh()
screen = displayio.Group() # group that holds everything
display.show(screen) # add main group to display
# get background image, if there is one
if bg_fname is not None:
bg_img, bg_pal = adafruit_imageload.load(bg_fname)
screen.append(displayio.TileGrid(bg_img, pixel_shader=bg_pal))
sprite,sprite_pal = adafruit_imageload.load(sprite_fname)
sprite_pal.make_transparent(0)
sprite_pal.make_transparent(1)
sprite_tg = displayio.TileGrid(sprite, pixel_shader=sprite_pal,
width=1, height=1,
tile_width=sprite_w, tile_height=sprite_w)
sprite = displayio.Group(scale=sprite_scale)
sprite.append(sprite_tg)
screen.append(sprite)
x, y = display.width/2, display.height/2 # starting position, middle of screen
vx,vy = display.width / 55, display.height / 80 # initial velocity
sprite_hw = sprite_w//2 * sprite_scale # integer half-width for bounce detection
g = 0.25 # our gravity acceleration
tile_inc = 1 # which way we play the sprite animation tiles
last_tile_time = time.monotonic()
while True:
if should_exit_func is not None and should_exit_func(): return
# update our position based on our velocity
x,y = x + vx, y + vy
# update our velocity based on acceleration
vy = vy + g
# a bounce changes the polarity of the velocity
if x - sprite_hw < 0 or x + sprite_hw > display.width:
vx = -vx # bounce!
tile_inc = - tile_inc # change ball "spinning" direction
if y + sprite_hw > display.height:
vy = -(vy - g) # bounce! (and remove gravity we added before)
# TileGrids are top-left referenced, so subtract that off
# and convert to integer pixel x,y before setting tilegrid xy
sprite.x = int(x - sprite_hw)
sprite.y = int(y - sprite_hw)
# do the animation
if time.monotonic() - last_tile_time > 0.01:
last_tile_time = time.monotonic()
# get first thing in group (only thing), assume it's a TileGrid
# then access first space (only gridspace)
sprite[0][0] = (sprite[0][0] + tile_inc) % sprite_tile_count
# this gives framerate of 20-24 FPS on FunHouse (ESP32S2 240x240 SPI TFT)
display.refresh(); time.sleep(0.01)
| en | 0.739237 | # screensaver.py -- screensavers for CircuitPython # 17 Aug 2021 - @todbot # # not as good but passable # dvdlogo! currently our main screensaver # width of the sprite to create # only update display on display.refresh() # group that holds everything # add main group to display # starting position, middle of screen # initial velocity that seems cool # integer half-width of our sprite, for bounce detection # update our position based on our velocity # x,y is centered on our sprite, so to check bounds # add in half-width to get at edges # a bounce just changes the polarity of the velocity # bounce! # rainbowio.colorwheel(random.randint(0,255)) # bounce! # rainbowio.colorwheel(random.randint(0,255)) # TileGrids are top-left referenced, so subtract that off # and convert to integer pixel x,y before setting tilegrid xy # this gives framerate of 20-24 FPS on FunHouse (ESP32S2 240x240 SPI TFT) # whereas this is jerky: every other frame 11 FPS & 0 FPS, at 20 FPS rate #display.refresh(target_frames_per_second=20, minimum_frames_per_second=0) # flying toasters! # width of the sprites # only update display on display.refresh() # group that holds everything # add main group to display # integer half-width of our sprite, for bounce detection # TileGrids are top-left referenced, so subtract that off # and convert to integer pixel x,y before setting tilegrid xy # standard toast velocity direction # standard toast velocity direction # randomize anim sequence # update our position based on our velocity # this gives framerate of 20-24 FPS on FunHouse (ESP32S2 240x240 SPI TFT) # boingball! amiga bouncing ball # width of the sprite to create # only update display on display.refresh() # group that holds everything # add main group to display # get background image, if there is one # starting position, middle of screen # initial velocity # integer half-width for bounce detection # our gravity acceleration # which way we play the sprite animation tiles # update our position based on our velocity # update our velocity based on acceleration # a bounce changes the polarity of the velocity # bounce! # change ball "spinning" direction # bounce! (and remove gravity we added before) # TileGrids are top-left referenced, so subtract that off # and convert to integer pixel x,y before setting tilegrid xy # do the animation # get first thing in group (only thing), assume it's a TileGrid # then access first space (only gridspace) # this gives framerate of 20-24 FPS on FunHouse (ESP32S2 240x240 SPI TFT) | 3.211475 | 3 |
bin/virtualribosomev2/ncbi_genetic_codes.py | gitter-badger/vAMPirus | 10 | 6618496 | ncbi_gc_table = """
--**************************************************************************
-- This is the NCBI genetic code table
-- Initial base data set from <NAME> while at PIR International
-- Addition of Eubacterial and Alternative Yeast by J.Ostell at NCBI
-- Base 1-3 of each codon have been added as comments to facilitate
-- readability at the suggestion of <NAME>, EMBL
-- Later additions by Taxonomy Group staff at NCBI
--
-- Version 3.8
-- Added GTG start to Echinoderm mitochondrial code, code 9
--
-- Version 3.7
-- Added code 23 Thraustochytrium mitochondrial code
-- formerly OGMP code 93
-- submitted by <NAME>, Ph.D.
--
-- Version 3.6
-- Added code 22 TAG-Leu, TCA-stop
-- found in mitochondrial DNA of Scenedesmus obliquus
-- submitted by <NAME>, Ph.D.
-- Organelle Genome Megasequencing Program, Univ Montreal
--
-- Version 3.5
-- Added code 21, Trematode Mitochondrial
-- (as deduced from: Garey & Wolstenholme,1989; Ohama et al, 1990)
-- Added code 16, Chlorophycean Mitochondrial
-- (TAG can translated to Leucine instaed to STOP in chlorophyceans
-- and fungi)
--
-- Version 3.4
-- Added CTG,TTG as allowed alternate start codons in Standard code.
-- Prats et al. 1989, Hann et al. 1992
--
-- Version 3.3 - 10/13/95
-- Added alternate intiation codon ATC to code 5
-- based on complete mitochondrial genome of honeybee
-- Crozier and Crozier (1993)
--
-- Version 3.2 - 6/24/95
-- Code Comments
-- 10 Alternative Ciliate Macronuclear renamed to Euplotid Macro...
-- 15 Bleharisma Macro.. code added
-- 5 Invertebrate Mito.. GTG allowed as alternate initiator
-- 11 Eubacterial renamed to Bacterial as most alternate starts
-- have been found in Achea
--
--
-- Version 3.1 - 1995
-- Updated as per <NAME> at NCBI
-- Complete documentation in NCBI toolkit documentation
-- Note: 2 genetic codes have been deleted
--
-- Old id Use id - Notes
--
-- id 7 id 4 - Kinetoplast code now merged in code id 4
-- id 8 id 1 - all plant chloroplast differences due to RNA edit
--
--*************************************************************************
Genetic-code-table ::= {
{
name "Standard" ,
name "SGC0" ,
id 1 ,
ncbieaa "FFLLSSSSYY**CC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG",
sncbieaa "---M---------------M---------------M----------------------------"
-- Base1 TTTTTTTTTTTTTTTTCCCCCCCCCCCCCCCCAAAAAAAAAAAAAAAAGGGGGGGGGGGGGGGG
-- Base2 TTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGG
-- Base3 TCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAG
},
{
name "Vertebrate Mitochondrial" ,
name "SGC1" ,
id 2 ,
ncbieaa "FFLLSSSSYY**CCWWLLLLPPPPHHQQRRRRIIMMTTTTNNKKSS**VVVVAAAADDEEGGGG",
sncbieaa "--------------------------------MMMM---------------M------------"
-- Base1 TTTTTTTTTTTTTTTTCCCCCCCCCCCCCCCCAAAAAAAAAAAAAAAAGGGGGGGGGGGGGGGG
-- Base2 TTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGG
-- Base3 TCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAG
},
{
name "Yeast Mitochondrial" ,
name "SGC2" ,
id 3 ,
ncbieaa "FFLLSSSSYY**CCWWTTTTPPPPHHQQRRRRIIMMTTTTNNKKSSRRVVVVAAAADDEEGGGG",
sncbieaa "----------------------------------MM----------------------------"
-- Base1 TTTTTTTTTTTTTTTTCCCCCCCCCCCCCCCCAAAAAAAAAAAAAAAAGGGGGGGGGGGGGGGG
-- Base2 TTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGG
-- Base3 TCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAG
},
{
name "Mold Mitochondrial; Protozoan Mitochondrial; Coelenterate
Mitochondrial; Mycoplasma; Spiroplasma" ,
name "SGC3" ,
id 4 ,
ncbieaa "FFLLSSSSYY**CCWWLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG",
sncbieaa "--MM---------------M------------MMMM---------------M------------"
-- Base1 TTTTTTTTTTTTTTTTCCCCCCCCCCCCCCCCAAAAAAAAAAAAAAAAGGGGGGGGGGGGGGGG
-- Base2 TTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGG
-- Base3 TCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAG
},
{
name "Invertebrate Mitochondrial" ,
name "SGC4" ,
id 5 ,
ncbieaa "FFLLSSSSYY**CCWWLLLLPPPPHHQQRRRRIIMMTTTTNNKKSSSSVVVVAAAADDEEGGGG",
sncbieaa "---M----------------------------MMMM---------------M------------"
-- Base1 TTTTTTTTTTTTTTTTCCCCCCCCCCCCCCCCAAAAAAAAAAAAAAAAGGGGGGGGGGGGGGGG
-- Base2 TTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGG
-- Base3 TCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAG
},
{
name "Ciliate Nuclear; Dasycladacean Nuclear; Hexamita Nuclear" ,
name "SGC5" ,
id 6 ,
ncbieaa "FFLLSSSSYYQQCC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG",
sncbieaa "-----------------------------------M----------------------------"
-- Base1 TTTTTTTTTTTTTTTTCCCCCCCCCCCCCCCCAAAAAAAAAAAAAAAAGGGGGGGGGGGGGGGG
-- Base2 TTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGG
-- Base3 TCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAG
},
{
name "Echinoderm Mitochondrial" ,
name "SGC8" ,
id 9 ,
ncbieaa "FFLLSSSSYY**CCWWLLLLPPPPHHQQRRRRIIIMTTTTNNNKSSSSVVVVAAAADDEEGGGG",
sncbieaa "-----------------------------------M---------------M------------"
-- Base1 TTTTTTTTTTTTTTTTCCCCCCCCCCCCCCCCAAAAAAAAAAAAAAAAGGGGGGGGGGGGGGGG
-- Base2 TTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGG
-- Base3 TCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAG
},
{
name "Euplotid Nuclear" ,
name "SGC9" ,
id 10 ,
ncbieaa "FFLLSSSSYY**CCCWLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG",
sncbieaa "-----------------------------------M----------------------------"
-- Base1 TTTTTTTTTTTTTTTTCCCCCCCCCCCCCCCCAAAAAAAAAAAAAAAAGGGGGGGGGGGGGGGG
-- Base2 TTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGG
-- Base3 TCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAG
},
{
name "Bacterial and Plant Plastid" ,
id 11 ,
ncbieaa "FFLLSSSSYY**CC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG",
sncbieaa "---M---------------M------------MMMM---------------M------------"
-- Base1 TTTTTTTTTTTTTTTTCCCCCCCCCCCCCCCCAAAAAAAAAAAAAAAAGGGGGGGGGGGGGGGG
-- Base2 TTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGG
-- Base3 TCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAG
},
{
name "Alternative Yeast Nuclear" ,
id 12 ,
ncbieaa "FFLLSSSSYY**CC*WLLLSPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG",
sncbieaa "-------------------M---------------M----------------------------"
-- Base1 TTTTTTTTTTTTTTTTCCCCCCCCCCCCCCCCAAAAAAAAAAAAAAAAGGGGGGGGGGGGGGGG
-- Base2 TTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGG
-- Base3 TCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAG
},
{
name "Ascidian Mitochondrial" ,
id 13 ,
ncbieaa "FFLLSSSSYY**CCWWLLLLPPPPHHQQRRRRIIMMTTTTNNKKSSGGVVVVAAAADDEEGGGG",
sncbieaa "-----------------------------------M----------------------------"
-- Base1 TTTTTTTTTTTTTTTTCCCCCCCCCCCCCCCCAAAAAAAAAAAAAAAAGGGGGGGGGGGGGGGG
-- Base2 TTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGG
-- Base3 TCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAG
},
{
name "Flatworm Mitochondrial" ,
id 14 ,
ncbieaa "FFLLSSSSYYY*CCWWLLLLPPPPHHQQRRRRIIIMTTTTNNNKSSSSVVVVAAAADDEEGGGG",
sncbieaa "-----------------------------------M----------------------------"
-- Base1 TTTTTTTTTTTTTTTTCCCCCCCCCCCCCCCCAAAAAAAAAAAAAAAAGGGGGGGGGGGGGGGG
-- Base2 TTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGG
-- Base3 TCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAG
} ,
{
name "Blepharisma Macronuclear" ,
id 15 ,
ncbieaa "FFLLSSSSYY*QCC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG",
sncbieaa "-----------------------------------M----------------------------"
-- Base1 TTTTTTTTTTTTTTTTCCCCCCCCCCCCCCCCAAAAAAAAAAAAAAAAGGGGGGGGGGGGGGGG
-- Base2 TTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGG
-- Base3 TCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAG
} ,
{
name "Chlorophycean Mitochondrial" ,
id 16 ,
ncbieaa "FFLLSSSSYY*LCC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG",
sncbieaa "-----------------------------------M----------------------------"
-- Base1 TTTTTTTTTTTTTTTTCCCCCCCCCCCCCCCCAAAAAAAAAAAAAAAAGGGGGGGGGGGGGGGG
-- Base2 TTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGG
-- Base3 TCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAG
} ,
{
name "Trematode Mitochondrial" ,
id 21 ,
ncbieaa "FFLLSSSSYY**CCWWLLLLPPPPHHQQRRRRIIMMTTTTNNNKSSSSVVVVAAAADDEEGGGG",
sncbieaa "-----------------------------------M---------------M------------"
-- Base1 TTTTTTTTTTTTTTTTCCCCCCCCCCCCCCCCAAAAAAAAAAAAAAAAGGGGGGGGGGGGGGGG
-- Base2 TTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGG
-- Base3 TCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAG
} ,
{
name "Scenedesmus obliquus mitochondrial" ,
id 22 ,
ncbieaa "FFLLSS*SYY*LCC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG",
sncbieaa "-----------------------------------M----------------------------"
-- Base1 TTTTTTTTTTTTTTTTCCCCCCCCCCCCCCCCAAAAAAAAAAAAAAAAGGGGGGGGGGGGGGGG
-- Base2 TTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGG
-- Base3 TCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAG
} ,
{
name "Thraustochytrium mitochondrial code" ,
id 23 ,
ncbieaa "FF*LSSSSYY**CC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG",
sncbieaa "--------------------------------M--M---------------M------------"
-- Base1 TTTTTTTTTTTTTTTTCCCCCCCCCCCCCCCCAAAAAAAAAAAAAAAAGGGGGGGGGGGGGGGG
-- Base2 TTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGG
-- Base3 TCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAG
}
}
""" | ncbi_gc_table = """
--**************************************************************************
-- This is the NCBI genetic code table
-- Initial base data set from <NAME> while at PIR International
-- Addition of Eubacterial and Alternative Yeast by J.Ostell at NCBI
-- Base 1-3 of each codon have been added as comments to facilitate
-- readability at the suggestion of <NAME>, EMBL
-- Later additions by Taxonomy Group staff at NCBI
--
-- Version 3.8
-- Added GTG start to Echinoderm mitochondrial code, code 9
--
-- Version 3.7
-- Added code 23 Thraustochytrium mitochondrial code
-- formerly OGMP code 93
-- submitted by <NAME>, Ph.D.
--
-- Version 3.6
-- Added code 22 TAG-Leu, TCA-stop
-- found in mitochondrial DNA of Scenedesmus obliquus
-- submitted by <NAME>, Ph.D.
-- Organelle Genome Megasequencing Program, Univ Montreal
--
-- Version 3.5
-- Added code 21, Trematode Mitochondrial
-- (as deduced from: Garey & Wolstenholme,1989; Ohama et al, 1990)
-- Added code 16, Chlorophycean Mitochondrial
-- (TAG can translated to Leucine instaed to STOP in chlorophyceans
-- and fungi)
--
-- Version 3.4
-- Added CTG,TTG as allowed alternate start codons in Standard code.
-- Prats et al. 1989, Hann et al. 1992
--
-- Version 3.3 - 10/13/95
-- Added alternate intiation codon ATC to code 5
-- based on complete mitochondrial genome of honeybee
-- Crozier and Crozier (1993)
--
-- Version 3.2 - 6/24/95
-- Code Comments
-- 10 Alternative Ciliate Macronuclear renamed to Euplotid Macro...
-- 15 Bleharisma Macro.. code added
-- 5 Invertebrate Mito.. GTG allowed as alternate initiator
-- 11 Eubacterial renamed to Bacterial as most alternate starts
-- have been found in Achea
--
--
-- Version 3.1 - 1995
-- Updated as per <NAME> at NCBI
-- Complete documentation in NCBI toolkit documentation
-- Note: 2 genetic codes have been deleted
--
-- Old id Use id - Notes
--
-- id 7 id 4 - Kinetoplast code now merged in code id 4
-- id 8 id 1 - all plant chloroplast differences due to RNA edit
--
--*************************************************************************
Genetic-code-table ::= {
{
name "Standard" ,
name "SGC0" ,
id 1 ,
ncbieaa "FFLLSSSSYY**CC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG",
sncbieaa "---M---------------M---------------M----------------------------"
-- Base1 TTTTTTTTTTTTTTTTCCCCCCCCCCCCCCCCAAAAAAAAAAAAAAAAGGGGGGGGGGGGGGGG
-- Base2 TTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGG
-- Base3 TCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAG
},
{
name "Vertebrate Mitochondrial" ,
name "SGC1" ,
id 2 ,
ncbieaa "FFLLSSSSYY**CCWWLLLLPPPPHHQQRRRRIIMMTTTTNNKKSS**VVVVAAAADDEEGGGG",
sncbieaa "--------------------------------MMMM---------------M------------"
-- Base1 TTTTTTTTTTTTTTTTCCCCCCCCCCCCCCCCAAAAAAAAAAAAAAAAGGGGGGGGGGGGGGGG
-- Base2 TTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGG
-- Base3 TCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAG
},
{
name "Yeast Mitochondrial" ,
name "SGC2" ,
id 3 ,
ncbieaa "FFLLSSSSYY**CCWWTTTTPPPPHHQQRRRRIIMMTTTTNNKKSSRRVVVVAAAADDEEGGGG",
sncbieaa "----------------------------------MM----------------------------"
-- Base1 TTTTTTTTTTTTTTTTCCCCCCCCCCCCCCCCAAAAAAAAAAAAAAAAGGGGGGGGGGGGGGGG
-- Base2 TTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGG
-- Base3 TCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAG
},
{
name "Mold Mitochondrial; Protozoan Mitochondrial; Coelenterate
Mitochondrial; Mycoplasma; Spiroplasma" ,
name "SGC3" ,
id 4 ,
ncbieaa "FFLLSSSSYY**CCWWLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG",
sncbieaa "--MM---------------M------------MMMM---------------M------------"
-- Base1 TTTTTTTTTTTTTTTTCCCCCCCCCCCCCCCCAAAAAAAAAAAAAAAAGGGGGGGGGGGGGGGG
-- Base2 TTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGG
-- Base3 TCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAG
},
{
name "Invertebrate Mitochondrial" ,
name "SGC4" ,
id 5 ,
ncbieaa "FFLLSSSSYY**CCWWLLLLPPPPHHQQRRRRIIMMTTTTNNKKSSSSVVVVAAAADDEEGGGG",
sncbieaa "---M----------------------------MMMM---------------M------------"
-- Base1 TTTTTTTTTTTTTTTTCCCCCCCCCCCCCCCCAAAAAAAAAAAAAAAAGGGGGGGGGGGGGGGG
-- Base2 TTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGG
-- Base3 TCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAG
},
{
name "Ciliate Nuclear; Dasycladacean Nuclear; Hexamita Nuclear" ,
name "SGC5" ,
id 6 ,
ncbieaa "FFLLSSSSYYQQCC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG",
sncbieaa "-----------------------------------M----------------------------"
-- Base1 TTTTTTTTTTTTTTTTCCCCCCCCCCCCCCCCAAAAAAAAAAAAAAAAGGGGGGGGGGGGGGGG
-- Base2 TTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGG
-- Base3 TCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAG
},
{
name "Echinoderm Mitochondrial" ,
name "SGC8" ,
id 9 ,
ncbieaa "FFLLSSSSYY**CCWWLLLLPPPPHHQQRRRRIIIMTTTTNNNKSSSSVVVVAAAADDEEGGGG",
sncbieaa "-----------------------------------M---------------M------------"
-- Base1 TTTTTTTTTTTTTTTTCCCCCCCCCCCCCCCCAAAAAAAAAAAAAAAAGGGGGGGGGGGGGGGG
-- Base2 TTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGG
-- Base3 TCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAG
},
{
name "Euplotid Nuclear" ,
name "SGC9" ,
id 10 ,
ncbieaa "FFLLSSSSYY**CCCWLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG",
sncbieaa "-----------------------------------M----------------------------"
-- Base1 TTTTTTTTTTTTTTTTCCCCCCCCCCCCCCCCAAAAAAAAAAAAAAAAGGGGGGGGGGGGGGGG
-- Base2 TTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGG
-- Base3 TCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAG
},
{
name "Bacterial and Plant Plastid" ,
id 11 ,
ncbieaa "FFLLSSSSYY**CC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG",
sncbieaa "---M---------------M------------MMMM---------------M------------"
-- Base1 TTTTTTTTTTTTTTTTCCCCCCCCCCCCCCCCAAAAAAAAAAAAAAAAGGGGGGGGGGGGGGGG
-- Base2 TTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGG
-- Base3 TCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAG
},
{
name "Alternative Yeast Nuclear" ,
id 12 ,
ncbieaa "FFLLSSSSYY**CC*WLLLSPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG",
sncbieaa "-------------------M---------------M----------------------------"
-- Base1 TTTTTTTTTTTTTTTTCCCCCCCCCCCCCCCCAAAAAAAAAAAAAAAAGGGGGGGGGGGGGGGG
-- Base2 TTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGG
-- Base3 TCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAG
},
{
name "Ascidian Mitochondrial" ,
id 13 ,
ncbieaa "FFLLSSSSYY**CCWWLLLLPPPPHHQQRRRRIIMMTTTTNNKKSSGGVVVVAAAADDEEGGGG",
sncbieaa "-----------------------------------M----------------------------"
-- Base1 TTTTTTTTTTTTTTTTCCCCCCCCCCCCCCCCAAAAAAAAAAAAAAAAGGGGGGGGGGGGGGGG
-- Base2 TTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGG
-- Base3 TCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAG
},
{
name "Flatworm Mitochondrial" ,
id 14 ,
ncbieaa "FFLLSSSSYYY*CCWWLLLLPPPPHHQQRRRRIIIMTTTTNNNKSSSSVVVVAAAADDEEGGGG",
sncbieaa "-----------------------------------M----------------------------"
-- Base1 TTTTTTTTTTTTTTTTCCCCCCCCCCCCCCCCAAAAAAAAAAAAAAAAGGGGGGGGGGGGGGGG
-- Base2 TTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGG
-- Base3 TCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAG
} ,
{
name "Blepharisma Macronuclear" ,
id 15 ,
ncbieaa "FFLLSSSSYY*QCC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG",
sncbieaa "-----------------------------------M----------------------------"
-- Base1 TTTTTTTTTTTTTTTTCCCCCCCCCCCCCCCCAAAAAAAAAAAAAAAAGGGGGGGGGGGGGGGG
-- Base2 TTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGG
-- Base3 TCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAG
} ,
{
name "Chlorophycean Mitochondrial" ,
id 16 ,
ncbieaa "FFLLSSSSYY*LCC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG",
sncbieaa "-----------------------------------M----------------------------"
-- Base1 TTTTTTTTTTTTTTTTCCCCCCCCCCCCCCCCAAAAAAAAAAAAAAAAGGGGGGGGGGGGGGGG
-- Base2 TTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGG
-- Base3 TCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAG
} ,
{
name "Trematode Mitochondrial" ,
id 21 ,
ncbieaa "FFLLSSSSYY**CCWWLLLLPPPPHHQQRRRRIIMMTTTTNNNKSSSSVVVVAAAADDEEGGGG",
sncbieaa "-----------------------------------M---------------M------------"
-- Base1 TTTTTTTTTTTTTTTTCCCCCCCCCCCCCCCCAAAAAAAAAAAAAAAAGGGGGGGGGGGGGGGG
-- Base2 TTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGG
-- Base3 TCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAG
} ,
{
name "Scenedesmus obliquus mitochondrial" ,
id 22 ,
ncbieaa "FFLLSS*SYY*LCC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG",
sncbieaa "-----------------------------------M----------------------------"
-- Base1 TTTTTTTTTTTTTTTTCCCCCCCCCCCCCCCCAAAAAAAAAAAAAAAAGGGGGGGGGGGGGGGG
-- Base2 TTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGG
-- Base3 TCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAG
} ,
{
name "Thraustochytrium mitochondrial code" ,
id 23 ,
ncbieaa "FF*LSSSSYY**CC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG",
sncbieaa "--------------------------------M--M---------------M------------"
-- Base1 TTTTTTTTTTTTTTTTCCCCCCCCCCCCCCCCAAAAAAAAAAAAAAAAGGGGGGGGGGGGGGGG
-- Base2 TTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGG
-- Base3 TCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAG
}
}
""" | en | 0.417158 | --************************************************************************** -- This is the NCBI genetic code table -- Initial base data set from <NAME> while at PIR International -- Addition of Eubacterial and Alternative Yeast by J.Ostell at NCBI -- Base 1-3 of each codon have been added as comments to facilitate -- readability at the suggestion of <NAME>, EMBL -- Later additions by Taxonomy Group staff at NCBI -- -- Version 3.8 -- Added GTG start to Echinoderm mitochondrial code, code 9 -- -- Version 3.7 -- Added code 23 Thraustochytrium mitochondrial code -- formerly OGMP code 93 -- submitted by <NAME>, Ph.D. -- -- Version 3.6 -- Added code 22 TAG-Leu, TCA-stop -- found in mitochondrial DNA of Scenedesmus obliquus -- submitted by <NAME>, Ph.D. -- Organelle Genome Megasequencing Program, Univ Montreal -- -- Version 3.5 -- Added code 21, Trematode Mitochondrial -- (as deduced from: Garey & Wolstenholme,1989; Ohama et al, 1990) -- Added code 16, Chlorophycean Mitochondrial -- (TAG can translated to Leucine instaed to STOP in chlorophyceans -- and fungi) -- -- Version 3.4 -- Added CTG,TTG as allowed alternate start codons in Standard code. -- Prats et al. 1989, Hann et al. 1992 -- -- Version 3.3 - 10/13/95 -- Added alternate intiation codon ATC to code 5 -- based on complete mitochondrial genome of honeybee -- Crozier and Crozier (1993) -- -- Version 3.2 - 6/24/95 -- Code Comments -- 10 Alternative Ciliate Macronuclear renamed to Euplotid Macro... -- 15 Bleharisma Macro.. code added -- 5 Invertebrate Mito.. GTG allowed as alternate initiator -- 11 Eubacterial renamed to Bacterial as most alternate starts -- have been found in Achea -- -- -- Version 3.1 - 1995 -- Updated as per <NAME> at NCBI -- Complete documentation in NCBI toolkit documentation -- Note: 2 genetic codes have been deleted -- -- Old id Use id - Notes -- -- id 7 id 4 - Kinetoplast code now merged in code id 4 -- id 8 id 1 - all plant chloroplast differences due to RNA edit -- --************************************************************************* Genetic-code-table ::= { { name "Standard" , name "SGC0" , id 1 , ncbieaa "FFLLSSSSYY**CC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG", sncbieaa "---M---------------M---------------M----------------------------" -- Base1 TTTTTTTTTTTTTTTTCCCCCCCCCCCCCCCCAAAAAAAAAAAAAAAAGGGGGGGGGGGGGGGG -- Base2 TTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGG -- Base3 TCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAG }, { name "Vertebrate Mitochondrial" , name "SGC1" , id 2 , ncbieaa "FFLLSSSSYY**CCWWLLLLPPPPHHQQRRRRIIMMTTTTNNKKSS**VVVVAAAADDEEGGGG", sncbieaa "--------------------------------MMMM---------------M------------" -- Base1 TTTTTTTTTTTTTTTTCCCCCCCCCCCCCCCCAAAAAAAAAAAAAAAAGGGGGGGGGGGGGGGG -- Base2 TTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGG -- Base3 TCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAG }, { name "Yeast Mitochondrial" , name "SGC2" , id 3 , ncbieaa "FFLLSSSSYY**CCWWTTTTPPPPHHQQRRRRIIMMTTTTNNKKSSRRVVVVAAAADDEEGGGG", sncbieaa "----------------------------------MM----------------------------" -- Base1 TTTTTTTTTTTTTTTTCCCCCCCCCCCCCCCCAAAAAAAAAAAAAAAAGGGGGGGGGGGGGGGG -- Base2 TTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGG -- Base3 TCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAG }, { name "Mold Mitochondrial; Protozoan Mitochondrial; Coelenterate Mitochondrial; Mycoplasma; Spiroplasma" , name "SGC3" , id 4 , ncbieaa "FFLLSSSSYY**CCWWLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG", sncbieaa "--MM---------------M------------MMMM---------------M------------" -- Base1 TTTTTTTTTTTTTTTTCCCCCCCCCCCCCCCCAAAAAAAAAAAAAAAAGGGGGGGGGGGGGGGG -- Base2 TTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGG -- Base3 TCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAG }, { name "Invertebrate Mitochondrial" , name "SGC4" , id 5 , ncbieaa "FFLLSSSSYY**CCWWLLLLPPPPHHQQRRRRIIMMTTTTNNKKSSSSVVVVAAAADDEEGGGG", sncbieaa "---M----------------------------MMMM---------------M------------" -- Base1 TTTTTTTTTTTTTTTTCCCCCCCCCCCCCCCCAAAAAAAAAAAAAAAAGGGGGGGGGGGGGGGG -- Base2 TTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGG -- Base3 TCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAG }, { name "Ciliate Nuclear; Dasycladacean Nuclear; Hexamita Nuclear" , name "SGC5" , id 6 , ncbieaa "FFLLSSSSYYQQCC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG", sncbieaa "-----------------------------------M----------------------------" -- Base1 TTTTTTTTTTTTTTTTCCCCCCCCCCCCCCCCAAAAAAAAAAAAAAAAGGGGGGGGGGGGGGGG -- Base2 TTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGG -- Base3 TCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAG }, { name "Echinoderm Mitochondrial" , name "SGC8" , id 9 , ncbieaa "FFLLSSSSYY**CCWWLLLLPPPPHHQQRRRRIIIMTTTTNNNKSSSSVVVVAAAADDEEGGGG", sncbieaa "-----------------------------------M---------------M------------" -- Base1 TTTTTTTTTTTTTTTTCCCCCCCCCCCCCCCCAAAAAAAAAAAAAAAAGGGGGGGGGGGGGGGG -- Base2 TTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGG -- Base3 TCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAG }, { name "Euplotid Nuclear" , name "SGC9" , id 10 , ncbieaa "FFLLSSSSYY**CCCWLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG", sncbieaa "-----------------------------------M----------------------------" -- Base1 TTTTTTTTTTTTTTTTCCCCCCCCCCCCCCCCAAAAAAAAAAAAAAAAGGGGGGGGGGGGGGGG -- Base2 TTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGG -- Base3 TCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAG }, { name "Bacterial and Plant Plastid" , id 11 , ncbieaa "FFLLSSSSYY**CC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG", sncbieaa "---M---------------M------------MMMM---------------M------------" -- Base1 TTTTTTTTTTTTTTTTCCCCCCCCCCCCCCCCAAAAAAAAAAAAAAAAGGGGGGGGGGGGGGGG -- Base2 TTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGG -- Base3 TCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAG }, { name "Alternative Yeast Nuclear" , id 12 , ncbieaa "FFLLSSSSYY**CC*WLLLSPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG", sncbieaa "-------------------M---------------M----------------------------" -- Base1 TTTTTTTTTTTTTTTTCCCCCCCCCCCCCCCCAAAAAAAAAAAAAAAAGGGGGGGGGGGGGGGG -- Base2 TTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGG -- Base3 TCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAG }, { name "Ascidian Mitochondrial" , id 13 , ncbieaa "FFLLSSSSYY**CCWWLLLLPPPPHHQQRRRRIIMMTTTTNNKKSSGGVVVVAAAADDEEGGGG", sncbieaa "-----------------------------------M----------------------------" -- Base1 TTTTTTTTTTTTTTTTCCCCCCCCCCCCCCCCAAAAAAAAAAAAAAAAGGGGGGGGGGGGGGGG -- Base2 TTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGG -- Base3 TCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAG }, { name "Flatworm Mitochondrial" , id 14 , ncbieaa "FFLLSSSSYYY*CCWWLLLLPPPPHHQQRRRRIIIMTTTTNNNKSSSSVVVVAAAADDEEGGGG", sncbieaa "-----------------------------------M----------------------------" -- Base1 TTTTTTTTTTTTTTTTCCCCCCCCCCCCCCCCAAAAAAAAAAAAAAAAGGGGGGGGGGGGGGGG -- Base2 TTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGG -- Base3 TCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAG } , { name "Blepharisma Macronuclear" , id 15 , ncbieaa "FFLLSSSSYY*QCC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG", sncbieaa "-----------------------------------M----------------------------" -- Base1 TTTTTTTTTTTTTTTTCCCCCCCCCCCCCCCCAAAAAAAAAAAAAAAAGGGGGGGGGGGGGGGG -- Base2 TTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGG -- Base3 TCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAG } , { name "Chlorophycean Mitochondrial" , id 16 , ncbieaa "FFLLSSSSYY*LCC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG", sncbieaa "-----------------------------------M----------------------------" -- Base1 TTTTTTTTTTTTTTTTCCCCCCCCCCCCCCCCAAAAAAAAAAAAAAAAGGGGGGGGGGGGGGGG -- Base2 TTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGG -- Base3 TCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAG } , { name "Trematode Mitochondrial" , id 21 , ncbieaa "FFLLSSSSYY**CCWWLLLLPPPPHHQQRRRRIIMMTTTTNNNKSSSSVVVVAAAADDEEGGGG", sncbieaa "-----------------------------------M---------------M------------" -- Base1 TTTTTTTTTTTTTTTTCCCCCCCCCCCCCCCCAAAAAAAAAAAAAAAAGGGGGGGGGGGGGGGG -- Base2 TTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGG -- Base3 TCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAG } , { name "Scenedesmus obliquus mitochondrial" , id 22 , ncbieaa "FFLLSS*SYY*LCC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG", sncbieaa "-----------------------------------M----------------------------" -- Base1 TTTTTTTTTTTTTTTTCCCCCCCCCCCCCCCCAAAAAAAAAAAAAAAAGGGGGGGGGGGGGGGG -- Base2 TTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGG -- Base3 TCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAG } , { name "Thraustochytrium mitochondrial code" , id 23 , ncbieaa "FF*LSSSSYY**CC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG", sncbieaa "--------------------------------M--M---------------M------------" -- Base1 TTTTTTTTTTTTTTTTCCCCCCCCCCCCCCCCAAAAAAAAAAAAAAAAGGGGGGGGGGGGGGGG -- Base2 TTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGG -- Base3 TCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAG } } | 1.412858 | 1 |
drl4pdp/tasks/pdp.py | temur-kh/pdp-drl-project | 7 | 6618497 | <gh_stars>1-10
"""Defines the main task for the PDP.
The PDP is defined by the following traits:
1. Each city has a demand in [1, 9], which must be serviced by the vehicle
2. Each vehicle has a capacity (depends on problem), the must visit all cities
3. When the vehicle load is 0, it __must__ return to the depot to refill
"""
import os
import numpy as np
import torch
from torch.utils.data import Dataset
from torch.autograd import Variable
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
class VehicleRoutingDataset(Dataset):
def __init__(self, num_samples, input_size, max_load=20, max_demand=9,
seed=None):
super(VehicleRoutingDataset, self).__init__()
if max_load < max_demand:
raise ValueError(':param max_load: must be > max_demand')
if seed is None:
seed = np.random.randint(1234567890)
np.random.seed(seed)
torch.manual_seed(seed)
self.num_samples = num_samples
self.max_load = max_load
self.max_demand = max_demand
# Depot location will be the first node in each
locations = torch.rand((num_samples, 2, input_size + 1))
self.static = locations
# All states will broadcast the drivers current load
# Note that we only use a load between [0, 1] to prevent large
# numbers entering the neural network
dynamic_shape = (num_samples, 1, input_size + 1)
loads = torch.full(dynamic_shape, 1.)
# All states will have their own intrinsic demand in [1, max_demand),
# then scaled by the maximum load. E.g. if load=10 and max_demand=30,
# demands will be scaled to the range (0, 3)
demands = torch.randint(1, max_demand + 1, dynamic_shape, dtype=torch.float)
demands = demands / float(max_load)
demands[:, 0, 0] = 0 # depot starts with a demand of 0
self.dynamic = torch.tensor(np.concatenate((loads, demands), axis=1), dtype=torch.float)
def __len__(self):
return self.num_samples
def __getitem__(self, idx):
# (static, dynamic, start_loc)
return (self.static[idx], self.dynamic[idx], self.static[idx, :, 0:1])
def update_mask(mask, dynamic, chosen_idx=None):
"""Updates the mask used to hide non-valid states.
Parameters
----------
dynamic: torch.autograd.Variable of size (1, num_feats, seq_len)
"""
# Convert floating point to integers for calculations
loads = dynamic.data[:, 0] # (batch_size, seq_len)
demands = dynamic.data[:, 1] # (batch_size, seq_len)
# If there is no positive demand left, we can end the tour.
# Note that the first node is the depot, which always has a negative demand
if demands.eq(0).all():
return demands * 0.
# Otherwise, we can choose to go anywhere where demand is > 0
new_mask = demands.ne(0) * demands.lt(loads)
# We should avoid traveling to the depot back-to-back
repeat_home = chosen_idx.ne(0)
if repeat_home.any():
new_mask[repeat_home.nonzero(), 0] = 1.
if (~repeat_home).any():
new_mask[(~repeat_home).nonzero(), 0] = 0.
# ... unless we're waiting for all other samples in a minibatch to finish
has_no_load = loads[:, 0].eq(0).float()
has_no_demand = demands[:, 1:].sum(1).eq(0).float()
combined = (has_no_load + has_no_demand).gt(0)
if combined.any():
new_mask[combined.nonzero(), 0] = 1.
new_mask[combined.nonzero(), 1:] = 0.
return new_mask.float()
def update_dynamic(dynamic, chosen_idx):
"""Updates the (load, demand) dataset values."""
# Update the dynamic elements differently for if we visit depot vs. a city
visit = chosen_idx.ne(0)
depot = chosen_idx.eq(0)
# Clone the dynamic variable so we don't mess up graph
all_loads = dynamic[:, 0].clone()
all_demands = dynamic[:, 1].clone()
load = torch.gather(all_loads, 1, chosen_idx.unsqueeze(1))
demand = torch.gather(all_demands, 1, chosen_idx.unsqueeze(1))
# Across the minibatch - if we've chosen to visit a city, try to satisfy
# as much demand as possible
if visit.any():
new_load = torch.clamp(load - demand, min=0)
new_demand = torch.clamp(demand - load, min=0)
# Broadcast the load to all nodes, but update demand seperately
visit_idx = visit.nonzero().squeeze()
all_loads[visit_idx] = new_load[visit_idx]
all_demands[visit_idx, chosen_idx[visit_idx]] = new_demand[visit_idx].view(-1)
all_demands[visit_idx, 0] = -1. + new_load[visit_idx].view(-1)
# Return to depot to fill vehicle load
if depot.any():
all_loads[depot.nonzero().squeeze()] = 1.
all_demands[depot.nonzero().squeeze(), 0] = 0.
tensor = torch.cat((all_loads.unsqueeze(1), all_demands.unsqueeze(1)), 1)
return torch.tensor(tensor.data, device=dynamic.device)
def reward(static, tour_indices):
"""
Euclidean distance between all cities / nodes given by tour_indices
"""
# Convert the indices back into a tour
idx = tour_indices.unsqueeze(1).expand(-1, static.size(1), -1)
tour = torch.gather(static.data, 2, idx).permute(0, 2, 1)
# Ensure we're always returning to the depot - note the extra concat
# won't add any extra loss, as the euclidean distance between consecutive
# points is 0
start = static.data[:, :, 0].unsqueeze(1)
y = torch.cat((start, tour, start), dim=1)
# Euclidean distance between each consecutive point
tour_len = torch.sqrt(torch.sum(torch.pow(y[:, :-1] - y[:, 1:], 2), dim=2))
return tour_len.sum(1)
def render(static, tour_indices, save_path):
"""Plots the found solution."""
plt.close('all')
print('static_shape', static.shape)
print('tour_indices_shape', tour_indices.shape)
num_plots = 3 if int(np.sqrt(len(tour_indices))) >= 3 else 1
_, axes = plt.subplots(nrows=num_plots, ncols=num_plots,
sharex='col', sharey='row')
if num_plots == 1:
axes = [[axes]]
axes = [a for ax in axes for a in ax]
for i, ax in enumerate(axes):
# Convert the indices back into a tour
idx = tour_indices[i]
print('idx0', idx)
if len(idx.size()) == 1:
idx = idx.unsqueeze(0)
print('idx1', idx)
idx = idx.expand(static.size(1), -1)
print('idx2', idx)
data = torch.gather(static[i].data, 1, idx).cpu().numpy()
print('data', data)
start = static[i, :, 0].cpu().data.numpy()
x = np.hstack((start[0], data[0], start[0]))
y = np.hstack((start[1], data[1], start[1]))
print('x', x)
print('y', y)
# Assign each subtour a different colour & label in order traveled
idx = np.hstack((0, tour_indices[i].cpu().numpy().flatten(), 0))
print('idx3', idx)
where = np.where(idx == 0)[0]
print('where', where)
for j in range(len(where) - 1):
low = where[j]
high = where[j + 1]
if low + 1 == high:
continue
ax.plot(x[low: high + 1], y[low: high + 1], zorder=1, label=j)
ax.legend(loc="upper right", fontsize=3, framealpha=0.5)
ax.scatter(x, y, s=4, c='r', zorder=2)
ax.scatter(x[0], y[0], s=20, c='k', marker='*', zorder=3)
ax.set_xlim(0, 1)
ax.set_ylim(0, 1)
plt.tight_layout()
plt.savefig(save_path, bbox_inches='tight', dpi=400)
| """Defines the main task for the PDP.
The PDP is defined by the following traits:
1. Each city has a demand in [1, 9], which must be serviced by the vehicle
2. Each vehicle has a capacity (depends on problem), the must visit all cities
3. When the vehicle load is 0, it __must__ return to the depot to refill
"""
import os
import numpy as np
import torch
from torch.utils.data import Dataset
from torch.autograd import Variable
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
class VehicleRoutingDataset(Dataset):
def __init__(self, num_samples, input_size, max_load=20, max_demand=9,
seed=None):
super(VehicleRoutingDataset, self).__init__()
if max_load < max_demand:
raise ValueError(':param max_load: must be > max_demand')
if seed is None:
seed = np.random.randint(1234567890)
np.random.seed(seed)
torch.manual_seed(seed)
self.num_samples = num_samples
self.max_load = max_load
self.max_demand = max_demand
# Depot location will be the first node in each
locations = torch.rand((num_samples, 2, input_size + 1))
self.static = locations
# All states will broadcast the drivers current load
# Note that we only use a load between [0, 1] to prevent large
# numbers entering the neural network
dynamic_shape = (num_samples, 1, input_size + 1)
loads = torch.full(dynamic_shape, 1.)
# All states will have their own intrinsic demand in [1, max_demand),
# then scaled by the maximum load. E.g. if load=10 and max_demand=30,
# demands will be scaled to the range (0, 3)
demands = torch.randint(1, max_demand + 1, dynamic_shape, dtype=torch.float)
demands = demands / float(max_load)
demands[:, 0, 0] = 0 # depot starts with a demand of 0
self.dynamic = torch.tensor(np.concatenate((loads, demands), axis=1), dtype=torch.float)
def __len__(self):
return self.num_samples
def __getitem__(self, idx):
# (static, dynamic, start_loc)
return (self.static[idx], self.dynamic[idx], self.static[idx, :, 0:1])
def update_mask(mask, dynamic, chosen_idx=None):
"""Updates the mask used to hide non-valid states.
Parameters
----------
dynamic: torch.autograd.Variable of size (1, num_feats, seq_len)
"""
# Convert floating point to integers for calculations
loads = dynamic.data[:, 0] # (batch_size, seq_len)
demands = dynamic.data[:, 1] # (batch_size, seq_len)
# If there is no positive demand left, we can end the tour.
# Note that the first node is the depot, which always has a negative demand
if demands.eq(0).all():
return demands * 0.
# Otherwise, we can choose to go anywhere where demand is > 0
new_mask = demands.ne(0) * demands.lt(loads)
# We should avoid traveling to the depot back-to-back
repeat_home = chosen_idx.ne(0)
if repeat_home.any():
new_mask[repeat_home.nonzero(), 0] = 1.
if (~repeat_home).any():
new_mask[(~repeat_home).nonzero(), 0] = 0.
# ... unless we're waiting for all other samples in a minibatch to finish
has_no_load = loads[:, 0].eq(0).float()
has_no_demand = demands[:, 1:].sum(1).eq(0).float()
combined = (has_no_load + has_no_demand).gt(0)
if combined.any():
new_mask[combined.nonzero(), 0] = 1.
new_mask[combined.nonzero(), 1:] = 0.
return new_mask.float()
def update_dynamic(dynamic, chosen_idx):
"""Updates the (load, demand) dataset values."""
# Update the dynamic elements differently for if we visit depot vs. a city
visit = chosen_idx.ne(0)
depot = chosen_idx.eq(0)
# Clone the dynamic variable so we don't mess up graph
all_loads = dynamic[:, 0].clone()
all_demands = dynamic[:, 1].clone()
load = torch.gather(all_loads, 1, chosen_idx.unsqueeze(1))
demand = torch.gather(all_demands, 1, chosen_idx.unsqueeze(1))
# Across the minibatch - if we've chosen to visit a city, try to satisfy
# as much demand as possible
if visit.any():
new_load = torch.clamp(load - demand, min=0)
new_demand = torch.clamp(demand - load, min=0)
# Broadcast the load to all nodes, but update demand seperately
visit_idx = visit.nonzero().squeeze()
all_loads[visit_idx] = new_load[visit_idx]
all_demands[visit_idx, chosen_idx[visit_idx]] = new_demand[visit_idx].view(-1)
all_demands[visit_idx, 0] = -1. + new_load[visit_idx].view(-1)
# Return to depot to fill vehicle load
if depot.any():
all_loads[depot.nonzero().squeeze()] = 1.
all_demands[depot.nonzero().squeeze(), 0] = 0.
tensor = torch.cat((all_loads.unsqueeze(1), all_demands.unsqueeze(1)), 1)
return torch.tensor(tensor.data, device=dynamic.device)
def reward(static, tour_indices):
"""
Euclidean distance between all cities / nodes given by tour_indices
"""
# Convert the indices back into a tour
idx = tour_indices.unsqueeze(1).expand(-1, static.size(1), -1)
tour = torch.gather(static.data, 2, idx).permute(0, 2, 1)
# Ensure we're always returning to the depot - note the extra concat
# won't add any extra loss, as the euclidean distance between consecutive
# points is 0
start = static.data[:, :, 0].unsqueeze(1)
y = torch.cat((start, tour, start), dim=1)
# Euclidean distance between each consecutive point
tour_len = torch.sqrt(torch.sum(torch.pow(y[:, :-1] - y[:, 1:], 2), dim=2))
return tour_len.sum(1)
def render(static, tour_indices, save_path):
"""Plots the found solution."""
plt.close('all')
print('static_shape', static.shape)
print('tour_indices_shape', tour_indices.shape)
num_plots = 3 if int(np.sqrt(len(tour_indices))) >= 3 else 1
_, axes = plt.subplots(nrows=num_plots, ncols=num_plots,
sharex='col', sharey='row')
if num_plots == 1:
axes = [[axes]]
axes = [a for ax in axes for a in ax]
for i, ax in enumerate(axes):
# Convert the indices back into a tour
idx = tour_indices[i]
print('idx0', idx)
if len(idx.size()) == 1:
idx = idx.unsqueeze(0)
print('idx1', idx)
idx = idx.expand(static.size(1), -1)
print('idx2', idx)
data = torch.gather(static[i].data, 1, idx).cpu().numpy()
print('data', data)
start = static[i, :, 0].cpu().data.numpy()
x = np.hstack((start[0], data[0], start[0]))
y = np.hstack((start[1], data[1], start[1]))
print('x', x)
print('y', y)
# Assign each subtour a different colour & label in order traveled
idx = np.hstack((0, tour_indices[i].cpu().numpy().flatten(), 0))
print('idx3', idx)
where = np.where(idx == 0)[0]
print('where', where)
for j in range(len(where) - 1):
low = where[j]
high = where[j + 1]
if low + 1 == high:
continue
ax.plot(x[low: high + 1], y[low: high + 1], zorder=1, label=j)
ax.legend(loc="upper right", fontsize=3, framealpha=0.5)
ax.scatter(x, y, s=4, c='r', zorder=2)
ax.scatter(x[0], y[0], s=20, c='k', marker='*', zorder=3)
ax.set_xlim(0, 1)
ax.set_ylim(0, 1)
plt.tight_layout()
plt.savefig(save_path, bbox_inches='tight', dpi=400) | en | 0.889683 | Defines the main task for the PDP. The PDP is defined by the following traits: 1. Each city has a demand in [1, 9], which must be serviced by the vehicle 2. Each vehicle has a capacity (depends on problem), the must visit all cities 3. When the vehicle load is 0, it __must__ return to the depot to refill # Depot location will be the first node in each # All states will broadcast the drivers current load # Note that we only use a load between [0, 1] to prevent large # numbers entering the neural network # All states will have their own intrinsic demand in [1, max_demand), # then scaled by the maximum load. E.g. if load=10 and max_demand=30, # demands will be scaled to the range (0, 3) # depot starts with a demand of 0 # (static, dynamic, start_loc) Updates the mask used to hide non-valid states. Parameters ---------- dynamic: torch.autograd.Variable of size (1, num_feats, seq_len) # Convert floating point to integers for calculations # (batch_size, seq_len) # (batch_size, seq_len) # If there is no positive demand left, we can end the tour. # Note that the first node is the depot, which always has a negative demand # Otherwise, we can choose to go anywhere where demand is > 0 # We should avoid traveling to the depot back-to-back # ... unless we're waiting for all other samples in a minibatch to finish Updates the (load, demand) dataset values. # Update the dynamic elements differently for if we visit depot vs. a city # Clone the dynamic variable so we don't mess up graph # Across the minibatch - if we've chosen to visit a city, try to satisfy # as much demand as possible # Broadcast the load to all nodes, but update demand seperately # Return to depot to fill vehicle load Euclidean distance between all cities / nodes given by tour_indices # Convert the indices back into a tour # Ensure we're always returning to the depot - note the extra concat # won't add any extra loss, as the euclidean distance between consecutive # points is 0 # Euclidean distance between each consecutive point Plots the found solution. # Convert the indices back into a tour # Assign each subtour a different colour & label in order traveled | 3.337962 | 3 |
bootstrap/sync.py | lebenasa/dotfiles | 0 | 6618498 | #!/usr/bin/env python3
"""
Sync dotfiles to taget directory.
"""
| #!/usr/bin/env python3
"""
Sync dotfiles to taget directory.
"""
| en | 0.434217 | #!/usr/bin/env python3 Sync dotfiles to taget directory. | 1.036094 | 1 |
app/profiles/models.py | taha20181/share-and-colab | 0 | 6618499 | <reponame>taha20181/share-and-colab<filename>app/profiles/models.py
from flask import Flask, session
from flask_pymongo import PyMongo
import json
from bson import json_util
from bson.json_util import dumps
from bson.objectid import ObjectId
import bcrypt
# Custom imports
from app import *
from app import mongo
class Users:
def addNewuser(self,newuser):
user = {
"first name": newuser['first_name'],
"last name": newuser['last_name'],
"email": newuser['email'],
# "gender": newuser['gender'],
"username": newuser['username'],
"password": newuser['password'],
"account created": newuser['acc_created'],
"blog count": newuser['blog count']
}
mongo.db.users.insert_one(user)
def addPersonalInfo(self,info):
user_info = {
'first name' : info['first name'],
'last name' : info['last name'],
'occupation' : info['occupation'],
'company' : info['company'],
"github" : info['github'],
"linkedin" : info['linkedin'],
'country' : info['country'],
'skills' : info['skills'],
'about_me' : info['about_me']
}
print(session['EMAIL'])
mongo.db.users.update_one({'email':session['EMAIL']},{'$set':user_info})
def findUser(self,email,password):
found = mongo.db.users.find_one({"email":email},{"_id":0})
if found is not None:
if bcrypt.checkpw(password.encode('utf-8'), found["password"]):
# print("FOUND : ",found["username"])
return found["username"]
else:
return -1
else:
return 0
def getUser(self,email):
user = mongo.db.users.find_one({'email': email})
return user
class Data :
def getSkills(self):
skills = mongo.db.skills.find({'_id':0})
print(skills)
return skills
def addSkills(self,new_skill):
# mongo.db.users.insert_one({})
# mongo.db.skills.update({},{'$push':{'skills':new_skill}},upsert=True)
a = list(mongo.db.skills.find( {},{ 'skills': { '$elemMatch': new_skill } } ))
print("A : ",a) | from flask import Flask, session
from flask_pymongo import PyMongo
import json
from bson import json_util
from bson.json_util import dumps
from bson.objectid import ObjectId
import bcrypt
# Custom imports
from app import *
from app import mongo
class Users:
def addNewuser(self,newuser):
user = {
"first name": newuser['first_name'],
"last name": newuser['last_name'],
"email": newuser['email'],
# "gender": newuser['gender'],
"username": newuser['username'],
"password": newuser['password'],
"account created": newuser['acc_created'],
"blog count": newuser['blog count']
}
mongo.db.users.insert_one(user)
def addPersonalInfo(self,info):
user_info = {
'first name' : info['first name'],
'last name' : info['last name'],
'occupation' : info['occupation'],
'company' : info['company'],
"github" : info['github'],
"linkedin" : info['linkedin'],
'country' : info['country'],
'skills' : info['skills'],
'about_me' : info['about_me']
}
print(session['EMAIL'])
mongo.db.users.update_one({'email':session['EMAIL']},{'$set':user_info})
def findUser(self,email,password):
found = mongo.db.users.find_one({"email":email},{"_id":0})
if found is not None:
if bcrypt.checkpw(password.encode('utf-8'), found["password"]):
# print("FOUND : ",found["username"])
return found["username"]
else:
return -1
else:
return 0
def getUser(self,email):
user = mongo.db.users.find_one({'email': email})
return user
class Data :
def getSkills(self):
skills = mongo.db.skills.find({'_id':0})
print(skills)
return skills
def addSkills(self,new_skill):
# mongo.db.users.insert_one({})
# mongo.db.skills.update({},{'$push':{'skills':new_skill}},upsert=True)
a = list(mongo.db.skills.find( {},{ 'skills': { '$elemMatch': new_skill } } ))
print("A : ",a) | en | 0.271733 | # Custom imports # "gender": newuser['gender'], # print("FOUND : ",found["username"]) # mongo.db.users.insert_one({}) # mongo.db.skills.update({},{'$push':{'skills':new_skill}},upsert=True) | 2.963532 | 3 |
ilrdc/core/__init__.py | Retr0327/ilrdc-downloader | 0 | 6618500 | from .story import StoryDownloader
from .grammar import GrammarDownloader
from .vocabulary import VocabularyDownloader
| from .story import StoryDownloader
from .grammar import GrammarDownloader
from .vocabulary import VocabularyDownloader
| none | 1 | 1.031446 | 1 | |
TeamT2_ARC2017_src/t2_robot_vision/src/dist_embed3.py | warehouse-picking-automation-challenges/Team_T2 | 2 | 6618501 | <filename>TeamT2_ARC2017_src/t2_robot_vision/src/dist_embed3.py
# -*- coding: utf-8 -*-
"""
# Software License Agreement (BSD License)
#
# Copyright (c) 2017, Toshiba Corporation,
# Toshiba Infrastructure Systems & Solutions Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Toshiba Corporation, nor the Toshiba
# Infrastructure Systems & Solutions Corporation, nor the names
# of its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
import numpy
from chainer import cuda
from chainer import function
from chainer.utils import type_check
class DistEmbed(function.Function):
"""Distance Embedding loss function."""
def __init__(self, d_th):
if d_th <= 0:
raise ValueError('out_th should be positive value.')
self.d_th = d_th
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 3)
zi, zj, dist = in_types
type_check.expect(
zi.dtype == numpy.float32,
zj.dtype == numpy.float32,
dist.dtype == numpy.float32,
zi.shape == zj.shape,
zi.shape[0] == dist.shape[0],
zi.shape[0] > 0
)
def forward(self, inputs):
# data1つ分のloss
# = ┌ | dij - ||zi-zj|| | if dij < d_th
# └ max(0,dij - ||zi-zj||) if dij > d_th
#data数の分平均したものを出力する
xp = cuda.get_array_module(*inputs)
zi,zj,dij = inputs
N=zi.shape[0]
d_of_zi_zj = xp.linalg.norm(zi-zj,axis=1)
### dij<d_thの時のloss
isInTh = dij<self.d_th
lossAll = xp.linalg.norm( (dij - d_of_zi_zj) * isInTh, ord=1)
### dij>d_thの時のloss
lossAll += xp.sum( (1-isInTh) * xp.maximum(dij-d_of_zi_zj, 0) )
loss=lossAll/N
return xp.array(loss, dtype=xp.float32),
def backward(self, inputs, grad_outputs):
xp = cuda.get_array_module(*inputs)
zi,zj,dij = inputs
dE_dLoss, = grad_outputs #この値は1のはず
sa=zi-zj
d_of_zi_zj=xp.linalg.norm(sa,axis=1)
d_of_zi_zj=xp.maximum(d_of_zi_zj,1e-8) # avoid division by zero
d_of_zi_zj=d_of_zi_zj[:,xp.newaxis] #縦ベクトル化
dij=dij[:,xp.newaxis] #縦ベクトル化
A=(d_of_zi_zj<dij)
C=(dij<self.d_th)
#signの値
# +1, if dij < d_th < ||zi-zj||
# +1, if dij < ||zi-zj||< d_th
# 0, if d_th < dij < ||zi-zj||
# -1, if d_th < ||zi-zj||< dij
# -1, if ||zi-zj||< dij < d_th
# -1, if ||zi-zj||< d_th < dij
sign = -1*A + (1-A)*C
dLoss_dzi = sign*sa/d_of_zi_zj
dE_dzi = (dE_dLoss*dLoss_dzi).astype(xp.float32)
return dE_dzi, -dE_dzi, None
def dist_embed(zi,zj,dij, d_th=15.):
"""Computes Distance embedding loss."""
return DistEmbed(d_th)(zi,zj,dij)
| <filename>TeamT2_ARC2017_src/t2_robot_vision/src/dist_embed3.py
# -*- coding: utf-8 -*-
"""
# Software License Agreement (BSD License)
#
# Copyright (c) 2017, Toshiba Corporation,
# Toshiba Infrastructure Systems & Solutions Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Toshiba Corporation, nor the Toshiba
# Infrastructure Systems & Solutions Corporation, nor the names
# of its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
import numpy
from chainer import cuda
from chainer import function
from chainer.utils import type_check
class DistEmbed(function.Function):
"""Distance Embedding loss function."""
def __init__(self, d_th):
if d_th <= 0:
raise ValueError('out_th should be positive value.')
self.d_th = d_th
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 3)
zi, zj, dist = in_types
type_check.expect(
zi.dtype == numpy.float32,
zj.dtype == numpy.float32,
dist.dtype == numpy.float32,
zi.shape == zj.shape,
zi.shape[0] == dist.shape[0],
zi.shape[0] > 0
)
def forward(self, inputs):
# data1つ分のloss
# = ┌ | dij - ||zi-zj|| | if dij < d_th
# └ max(0,dij - ||zi-zj||) if dij > d_th
#data数の分平均したものを出力する
xp = cuda.get_array_module(*inputs)
zi,zj,dij = inputs
N=zi.shape[0]
d_of_zi_zj = xp.linalg.norm(zi-zj,axis=1)
### dij<d_thの時のloss
isInTh = dij<self.d_th
lossAll = xp.linalg.norm( (dij - d_of_zi_zj) * isInTh, ord=1)
### dij>d_thの時のloss
lossAll += xp.sum( (1-isInTh) * xp.maximum(dij-d_of_zi_zj, 0) )
loss=lossAll/N
return xp.array(loss, dtype=xp.float32),
def backward(self, inputs, grad_outputs):
xp = cuda.get_array_module(*inputs)
zi,zj,dij = inputs
dE_dLoss, = grad_outputs #この値は1のはず
sa=zi-zj
d_of_zi_zj=xp.linalg.norm(sa,axis=1)
d_of_zi_zj=xp.maximum(d_of_zi_zj,1e-8) # avoid division by zero
d_of_zi_zj=d_of_zi_zj[:,xp.newaxis] #縦ベクトル化
dij=dij[:,xp.newaxis] #縦ベクトル化
A=(d_of_zi_zj<dij)
C=(dij<self.d_th)
#signの値
# +1, if dij < d_th < ||zi-zj||
# +1, if dij < ||zi-zj||< d_th
# 0, if d_th < dij < ||zi-zj||
# -1, if d_th < ||zi-zj||< dij
# -1, if ||zi-zj||< dij < d_th
# -1, if ||zi-zj||< d_th < dij
sign = -1*A + (1-A)*C
dLoss_dzi = sign*sa/d_of_zi_zj
dE_dzi = (dE_dLoss*dLoss_dzi).astype(xp.float32)
return dE_dzi, -dE_dzi, None
def dist_embed(zi,zj,dij, d_th=15.):
"""Computes Distance embedding loss."""
return DistEmbed(d_th)(zi,zj,dij)
| en | 0.56182 | # -*- coding: utf-8 -*- # Software License Agreement (BSD License) # # Copyright (c) 2017, Toshiba Corporation, # Toshiba Infrastructure Systems & Solutions Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of the Toshiba Corporation, nor the Toshiba # Infrastructure Systems & Solutions Corporation, nor the names # of its contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. Distance Embedding loss function. # data1つ分のloss # = ┌ | dij - ||zi-zj|| | if dij < d_th # └ max(0,dij - ||zi-zj||) if dij > d_th #data数の分平均したものを出力する ### dij<d_thの時のloss ### dij>d_thの時のloss #この値は1のはず # avoid division by zero #縦ベクトル化 #縦ベクトル化 #signの値 # +1, if dij < d_th < ||zi-zj|| # +1, if dij < ||zi-zj||< d_th # 0, if d_th < dij < ||zi-zj|| # -1, if d_th < ||zi-zj||< dij # -1, if ||zi-zj||< dij < d_th # -1, if ||zi-zj||< d_th < dij Computes Distance embedding loss. | 1.30577 | 1 |
1_beginner/chapter5/solutions/echo_enhanced.py | code4tomorrow/Python | 4 | 6618502 | # Echo Enhanced
# Write a program that continuously prompts the user
# to enter a message, and echoes that message
# back to the user (prints it). If the message
# is 'q', the program should end. If the message is
# 'c', the program shouldn't echo anything but instead
# prompt the user to enter another message to echo.
# See a demo of the program here: https://youtu.be/Rb5LUiXzAcU
print("Enter a message, 'c' to cancel an echo, or 'q' to quit.")
while True:
message = input("Message: ")
if message == "q":
break # quit
elif message == "c":
continue # cancel echo
else:
print(message) # echo message
| # Echo Enhanced
# Write a program that continuously prompts the user
# to enter a message, and echoes that message
# back to the user (prints it). If the message
# is 'q', the program should end. If the message is
# 'c', the program shouldn't echo anything but instead
# prompt the user to enter another message to echo.
# See a demo of the program here: https://youtu.be/Rb5LUiXzAcU
print("Enter a message, 'c' to cancel an echo, or 'q' to quit.")
while True:
message = input("Message: ")
if message == "q":
break # quit
elif message == "c":
continue # cancel echo
else:
print(message) # echo message
| en | 0.775673 | # Echo Enhanced # Write a program that continuously prompts the user # to enter a message, and echoes that message # back to the user (prints it). If the message # is 'q', the program should end. If the message is # 'c', the program shouldn't echo anything but instead # prompt the user to enter another message to echo. # See a demo of the program here: https://youtu.be/Rb5LUiXzAcU # quit # cancel echo # echo message | 4.331403 | 4 |
datasets/bbbc-021/scripts/apply-tags.py | aws-samples/bioimage-search | 4 | 6618503 | import sys
import argparse
import boto3
from pathlib import Path
import bbbc021common as bb
s3c = boto3.client('s3')
sys.path.insert(0, "../../../cli/bioims/src")
import bioims
parser = argparse.ArgumentParser()
parser.add_argument('--bbbc021-bucket', type=str, required=True, help='bbbc021 bucket')
parser.add_argument('--bioims-resource-bucket', type=str, required=True, help='resource bucket')
parser.add_argument('--embeddingName', type=str, required=True, help='embedding name')
args = parser.parse_args()
BBBC021_BUCKET = args.bbbc021_bucket
BIOIMS_INPUT_BUCKET = args.bioims_resource_bucket
EMBEDDING = args.embeddingName
image_df, moa_df = bb.Bbbc021PlateInfoByDF.getDataFrames(BBBC021_BUCKET)
compound_moa_map = bb.Bbbc021PlateInfoByDF.getCompoundMoaMapFromDf(moa_df)
# We need to go from imageId->ImageSourceId->compound->moa
# 'Image_FileName_DAPI[:-4]' serves as the ImageSourceId
sourceCompoundMap={}
for i in range(len(image_df.index)):
r = image_df.iloc[i]
imageSourceId = r['Image_FileName_DAPI'][:-4]
compound = r['Image_Metadata_Compound']
sourceCompoundMap[imageSourceId]=compound
bbbc021ImageCount = len(image_df.index)
print("BBBC-021 image count={}".format(bbbc021ImageCount))
#imagesRemovedByCompound={}
moaDict={}
i=0
for k, v in compound_moa_map.items():
print("i={} key={} value={}".format(i,k,v))
moaDict[v]=True
# removedList = []
# imagesRemovedByCompound[k]=removedList
i+=1
imageClient = bioims.client('image-management')
trainingConfigurationClient = bioims.client('training-configuration')
tagClient = bioims.client('tag')
embeddingInfo = trainingConfigurationClient.getEmbeddingInfo(EMBEDDING)
print(embeddingInfo)
width = embeddingInfo['inputWidth']
height = embeddingInfo['inputHeight']
depth = embeddingInfo['inputDepth']
channels = embeddingInfo['inputChannels']
print("list compatible plates: width={} height={} depth={} channels={}".format(width, height, depth, channels))
plateList = imageClient.listCompatiblePlates(width, height, depth, channels)
pl=len(plateList)
print("found {} compatible plates".format(pl))
tagList = tagClient.getAllTags()
tagIdMap={}
for tagInfo in tagList:
print("{} {}".format(tagInfo['id'], tagInfo['tagValue']))
tagIdMap[tagInfo['tagValue']] = tagInfo['id']
def cleanLabel(label):
c1 = "".join(label.split())
c2 = c1.replace('/','-')
return c2
def getBatchTagFromPlateSourceId(psi):
ca = psi.split('_')
return "batch:" + ca[0]
for i, pi in enumerate(plateList):
plateId = pi['plateId']
print("Plate {} {}".format(i, plateId))
imageList = imageClient.getImagesByPlateId(plateId)
for imageItem in imageList:
image = imageItem['Item']
imageId = image['imageId']
imageSourceId = image['imageSourceId']
tagList = []
if 'plateSourceId' in image:
plateSourceId = image['plateSourceId']
batchTag = getBatchTagFromPlateSourceId(plateSourceId)
batchTagId = tagIdMap[batchTag]
tagList.append(batchTagId)
if imageSourceId in sourceCompoundMap:
imageCompound = cleanLabel(sourceCompoundMap[imageSourceId])
compoundTag = "compound:" + imageCompound
if compoundTag in tagIdMap:
compoundId = tagIdMap[compoundTag]
print("{} {} {}".format(imageId, compoundTag, compoundId))
tagList.append(compoundId)
if 'trainCategory' in image and 'trainLabel' in image:
trainCategory = image['trainCategory']
trainLabel = image['trainLabel']
if trainCategory=='moa' and trainLabel in moaDict:
moa = cleanLabel(trainLabel)
moaTag = "moa:" + moa
moaId = tagIdMap[moaTag]
print("{} {} {}".format(imageId, moaTag, moaId))
tagList.append(moaId)
if len(tagList)>0:
imageClient.updateImageTags(imageId, tagList)
| import sys
import argparse
import boto3
from pathlib import Path
import bbbc021common as bb
s3c = boto3.client('s3')
sys.path.insert(0, "../../../cli/bioims/src")
import bioims
parser = argparse.ArgumentParser()
parser.add_argument('--bbbc021-bucket', type=str, required=True, help='bbbc021 bucket')
parser.add_argument('--bioims-resource-bucket', type=str, required=True, help='resource bucket')
parser.add_argument('--embeddingName', type=str, required=True, help='embedding name')
args = parser.parse_args()
BBBC021_BUCKET = args.bbbc021_bucket
BIOIMS_INPUT_BUCKET = args.bioims_resource_bucket
EMBEDDING = args.embeddingName
image_df, moa_df = bb.Bbbc021PlateInfoByDF.getDataFrames(BBBC021_BUCKET)
compound_moa_map = bb.Bbbc021PlateInfoByDF.getCompoundMoaMapFromDf(moa_df)
# We need to go from imageId->ImageSourceId->compound->moa
# 'Image_FileName_DAPI[:-4]' serves as the ImageSourceId
sourceCompoundMap={}
for i in range(len(image_df.index)):
r = image_df.iloc[i]
imageSourceId = r['Image_FileName_DAPI'][:-4]
compound = r['Image_Metadata_Compound']
sourceCompoundMap[imageSourceId]=compound
bbbc021ImageCount = len(image_df.index)
print("BBBC-021 image count={}".format(bbbc021ImageCount))
#imagesRemovedByCompound={}
moaDict={}
i=0
for k, v in compound_moa_map.items():
print("i={} key={} value={}".format(i,k,v))
moaDict[v]=True
# removedList = []
# imagesRemovedByCompound[k]=removedList
i+=1
imageClient = bioims.client('image-management')
trainingConfigurationClient = bioims.client('training-configuration')
tagClient = bioims.client('tag')
embeddingInfo = trainingConfigurationClient.getEmbeddingInfo(EMBEDDING)
print(embeddingInfo)
width = embeddingInfo['inputWidth']
height = embeddingInfo['inputHeight']
depth = embeddingInfo['inputDepth']
channels = embeddingInfo['inputChannels']
print("list compatible plates: width={} height={} depth={} channels={}".format(width, height, depth, channels))
plateList = imageClient.listCompatiblePlates(width, height, depth, channels)
pl=len(plateList)
print("found {} compatible plates".format(pl))
tagList = tagClient.getAllTags()
tagIdMap={}
for tagInfo in tagList:
print("{} {}".format(tagInfo['id'], tagInfo['tagValue']))
tagIdMap[tagInfo['tagValue']] = tagInfo['id']
def cleanLabel(label):
c1 = "".join(label.split())
c2 = c1.replace('/','-')
return c2
def getBatchTagFromPlateSourceId(psi):
ca = psi.split('_')
return "batch:" + ca[0]
for i, pi in enumerate(plateList):
plateId = pi['plateId']
print("Plate {} {}".format(i, plateId))
imageList = imageClient.getImagesByPlateId(plateId)
for imageItem in imageList:
image = imageItem['Item']
imageId = image['imageId']
imageSourceId = image['imageSourceId']
tagList = []
if 'plateSourceId' in image:
plateSourceId = image['plateSourceId']
batchTag = getBatchTagFromPlateSourceId(plateSourceId)
batchTagId = tagIdMap[batchTag]
tagList.append(batchTagId)
if imageSourceId in sourceCompoundMap:
imageCompound = cleanLabel(sourceCompoundMap[imageSourceId])
compoundTag = "compound:" + imageCompound
if compoundTag in tagIdMap:
compoundId = tagIdMap[compoundTag]
print("{} {} {}".format(imageId, compoundTag, compoundId))
tagList.append(compoundId)
if 'trainCategory' in image and 'trainLabel' in image:
trainCategory = image['trainCategory']
trainLabel = image['trainLabel']
if trainCategory=='moa' and trainLabel in moaDict:
moa = cleanLabel(trainLabel)
moaTag = "moa:" + moa
moaId = tagIdMap[moaTag]
print("{} {} {}".format(imageId, moaTag, moaId))
tagList.append(moaId)
if len(tagList)>0:
imageClient.updateImageTags(imageId, tagList)
| en | 0.751586 | # We need to go from imageId->ImageSourceId->compound->moa # 'Image_FileName_DAPI[:-4]' serves as the ImageSourceId #imagesRemovedByCompound={} # removedList = [] # imagesRemovedByCompound[k]=removedList | 2.108356 | 2 |
gpi/convert_mm_to_json.py | katieefrey/glass-plates-inventory | 0 | 6618504 | <reponame>katieefrey/glass-plates-inventory
# standard lib packages
import sys
import os
import re
import os.path
import json
import csv
import datetime
from astropy import units as u
from astropy.coordinates import SkyCoord
def convertData():
records = []
fp = open("mariamitchell_data.txt", "r", encoding="utf-8")
mmap = (fp.read()).splitlines()
for row in mmap:
data = row.split(",")
print (data)
try:
dates = data[5].split("_")
x = datetime.datetime(1900+int(dates[0]), int(dates[1]), int(dates[2]))
thedate = x.strftime('%B %d, %Y')
except:
thedate = ""
if "." in data[2]:
pieces = data[2].split(".")
mins = pieces[0]
secs = pieces[1]*60
coords = SkyCoord(str(data[1]+":"+pieces[0]+":"+pieces[1]+" 0"), unit=(u.hourangle, u.deg))
else:
coords = SkyCoord(str(data[1]+":"+data[2]+":00 0"), unit=(u.hourangle, u.deg))
decira = coords.ra.deg
#decidec = coords.dec.deg
if data[3] != "":
deg = float(data[3])
else:
deg = None
if data[6] == "":
jd = None
elif data[6][0:2] == "24":
jd = float(data[6])
else:
jd = float("24"+data[6])
newrecord = {
"identifier" : data[0],
"archive": "mmoapc",
"obs_info" : {
"instrument" : "7.5-inch Cooke/Clark refractor",
"observatory" : "Maria Mitchell Observatory"
},
}
plate_info = {}
exposure_info = [
{
"number": 0,
"ra" : data[1]+":"+data[2]+":00",
"ra_deg" : decira,
"dec" : deg,
"dec_deg" : deg
}
]
if data[7] != "":
plate_info["emulsion"] = data[7]
if data[8] != "":
plate_info["notes"] = data[8]
if data[5] != "":
exposure_info[0]["calendar_date"] = thedate
if data[6] != "":
exposure_info[0]["jd2000"] = jd
if data[4] != "":
exposure_info[0]["duration"] = {
"value" : data[4],
"unit" : "min",
}
if plate_info != {}:
newrecord["plate_info"] = plate_info
if exposure_info != {}:
newrecord["exposure_info"] = exposure_info
records.append(newrecord)
with open('data_mm.json', 'w', encoding="utf-8") as f:
json.dump(records, f, ensure_ascii=False)
if __name__ == "__main__":
convertData()
| # standard lib packages
import sys
import os
import re
import os.path
import json
import csv
import datetime
from astropy import units as u
from astropy.coordinates import SkyCoord
def convertData():
records = []
fp = open("mariamitchell_data.txt", "r", encoding="utf-8")
mmap = (fp.read()).splitlines()
for row in mmap:
data = row.split(",")
print (data)
try:
dates = data[5].split("_")
x = datetime.datetime(1900+int(dates[0]), int(dates[1]), int(dates[2]))
thedate = x.strftime('%B %d, %Y')
except:
thedate = ""
if "." in data[2]:
pieces = data[2].split(".")
mins = pieces[0]
secs = pieces[1]*60
coords = SkyCoord(str(data[1]+":"+pieces[0]+":"+pieces[1]+" 0"), unit=(u.hourangle, u.deg))
else:
coords = SkyCoord(str(data[1]+":"+data[2]+":00 0"), unit=(u.hourangle, u.deg))
decira = coords.ra.deg
#decidec = coords.dec.deg
if data[3] != "":
deg = float(data[3])
else:
deg = None
if data[6] == "":
jd = None
elif data[6][0:2] == "24":
jd = float(data[6])
else:
jd = float("24"+data[6])
newrecord = {
"identifier" : data[0],
"archive": "mmoapc",
"obs_info" : {
"instrument" : "7.5-inch Cooke/Clark refractor",
"observatory" : "Maria Mitchell Observatory"
},
}
plate_info = {}
exposure_info = [
{
"number": 0,
"ra" : data[1]+":"+data[2]+":00",
"ra_deg" : decira,
"dec" : deg,
"dec_deg" : deg
}
]
if data[7] != "":
plate_info["emulsion"] = data[7]
if data[8] != "":
plate_info["notes"] = data[8]
if data[5] != "":
exposure_info[0]["calendar_date"] = thedate
if data[6] != "":
exposure_info[0]["jd2000"] = jd
if data[4] != "":
exposure_info[0]["duration"] = {
"value" : data[4],
"unit" : "min",
}
if plate_info != {}:
newrecord["plate_info"] = plate_info
if exposure_info != {}:
newrecord["exposure_info"] = exposure_info
records.append(newrecord)
with open('data_mm.json', 'w', encoding="utf-8") as f:
json.dump(records, f, ensure_ascii=False)
if __name__ == "__main__":
convertData() | en | 0.468019 | # standard lib packages #decidec = coords.dec.deg | 2.760552 | 3 |
Model/UserProfile.py | SWEN5236F19/EmergenSeat | 1 | 6618505 | <reponame>SWEN5236F19/EmergenSeat<filename>Model/UserProfile.py<gh_stars>1-10
from Model.CarSeat import CarSeat
class UserProfile:
def __init__(self, email, first_name, last_name, password):
self.email = email
self.password = <PASSWORD>(password)
self.first_name = first_name
self.last_name = last_name
self.car_seats = []
def __get__(self, instance, owner):
return instance
def __delete__(self, instance):
assert isinstance(instance, UserProfile)
del instance
def add_car_seat(self, car_seat):
self.car_seats.append(car_seat)
return car_seat
def delete_car_seat(self, serial_number):
if self.car_seats.__contains__(serial_number):
index = self.car_seats.index(serial_number)
self.car_seats.remove(serial_number)
def print_user_profile(self):
print("Email: " + self.email)
for car_seat in self.car_seats:
car_seat.print_car_seat()
def to_json(self):
profile = {"email": self.email,
"first_name": self.first_name,
"last_name": self.last_name,
"password": <PASSWORD>,
"car_seats": []}
for car_seat in self.car_seats:
profile["car_seats"].append(car_seat.to_json())
return profile
| from Model.CarSeat import CarSeat
class UserProfile:
def __init__(self, email, first_name, last_name, password):
self.email = email
self.password = <PASSWORD>(password)
self.first_name = first_name
self.last_name = last_name
self.car_seats = []
def __get__(self, instance, owner):
return instance
def __delete__(self, instance):
assert isinstance(instance, UserProfile)
del instance
def add_car_seat(self, car_seat):
self.car_seats.append(car_seat)
return car_seat
def delete_car_seat(self, serial_number):
if self.car_seats.__contains__(serial_number):
index = self.car_seats.index(serial_number)
self.car_seats.remove(serial_number)
def print_user_profile(self):
print("Email: " + self.email)
for car_seat in self.car_seats:
car_seat.print_car_seat()
def to_json(self):
profile = {"email": self.email,
"first_name": self.first_name,
"last_name": self.last_name,
"password": <PASSWORD>,
"car_seats": []}
for car_seat in self.car_seats:
profile["car_seats"].append(car_seat.to_json())
return profile | none | 1 | 3.098377 | 3 | |
experiments/1_Sampling_Naive_Likelihood_OC-SVM_DAE_BINet/april/fs.py | Business-Process-Analytics/AnomalyDetection | 0 | 6618506 | <reponame>Business-Process-Analytics/AnomalyDetection
# Copyright 2018 <NAME>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# ==============================================================================
from pathlib import Path
import arrow
# Base
ROOT_DIR = Path(__file__).parent.parent
# Base directories
OUT_DIR = ROOT_DIR / '.out' # For anything that is being generated
RES_DIR = ROOT_DIR / '.res' # For resources shipped with the repository
CACHE_DIR = OUT_DIR / '.cache' # Used to cache event logs, results, etc.
# Resources
PROCESS_MODEL_DIR = RES_DIR / 'process_models' # Randomly generated process models from PLG2
BPIC_DIR = RES_DIR / 'bpic' # BPIC logs in XES format
# Output
EVENTLOG_DIR = OUT_DIR / 'eventlogs' # For generated event logs
MODEL_DIR = OUT_DIR / 'models' # For anomaly detection models
PLOT_DIR = OUT_DIR / 'plots' # For plots
# Cache
EVENTLOG_CACHE_DIR = CACHE_DIR / 'eventlogs' # For caching datasets so the event log does not always have to be loaded
RESULT_DIR = CACHE_DIR / 'results' # For caching anomaly detection results
# Config
CONFIG_DIR = ROOT_DIR / '.config'
# Database
DATABASE_FILE = OUT_DIR / 'april.db'
# Extensions
MODEL_EXT = '.model'
RESULT_EXT = '.result'
# Misc
DATE_FORMAT = 'YYYYMMDD-HHmmss.SSSSSS'
def generate():
"""Generate directories."""
dirs = [
ROOT_DIR,
OUT_DIR,
RES_DIR,
CACHE_DIR,
RESULT_DIR,
EVENTLOG_CACHE_DIR,
MODEL_DIR,
PROCESS_MODEL_DIR,
EVENTLOG_DIR,
BPIC_DIR,
PLOT_DIR
]
for d in dirs:
if not d.exists():
d.mkdir()
def split_eventlog_name(name):
try:
s = name.split('-')
model = s[0]
p = float(s[1])
id = int(s[2])
except Exception:
model = None
p = None
id = None
return model, p, id
def split_model_name(name):
try:
s = name.split('_')
event_log_name = s[0]
ad = s[1]
date = arrow.get(s[2], DATE_FORMAT)
except Exception as e:
event_log_name = None
ad = None
date = None
return event_log_name, ad, date
class File(object):
ext = None
def __init__(self, path):
if not isinstance(path, Path):
path = Path(path)
self.path = path
self.file = self.path.name
self.name = self.path.stem
self.str_path = str(path)
def remove(self):
import os
if self.path.exists():
os.remove(self.path)
class EventLogFile(File):
def __init__(self, path):
if not isinstance(path, Path):
path = Path(path)
if '.json' not in path.suffixes:
path = Path(str(path) + '.json.gz')
if not path.is_absolute():
path = EVENTLOG_DIR / path.name
super(EventLogFile, self).__init__(path)
if len(self.path.suffixes) > 1:
self.name = Path(self.path.stem).stem
self.model, self.p, self.id = split_eventlog_name(self.name)
print('3 aprit-EventLogFile-init:self.model, self.p, self.id=', self.model, self.p, self.id)
@property
def cache_file(self):
print('5 april-fs-EventLogFile-cache_file:', EVENTLOG_CACHE_DIR / (self.name + '.pkl.gz'))
return EVENTLOG_CACHE_DIR / (self.name + '.pkl.gz')
class ModelFile(File):
ext = MODEL_EXT
def __init__(self, path):
if not isinstance(path, Path):
path = Path(path)
if path.suffix != self.ext:
path = Path(str(path) + self.ext)
if not path.is_absolute():
path = MODEL_DIR / path.name
super(ModelFile, self).__init__(path)
self.event_log_name, self.ad, self.date = split_model_name(self.name)
self.model, self.p, self.id = split_eventlog_name(self.event_log_name)
@property
def result_file(self):
return RESULT_DIR / (self.name + RESULT_EXT)
class ResultFile(File):
ext = RESULT_EXT
@property
def model_file(self):
return MODEL_DIR / (self.name + MODEL_EXT)
def get_event_log_files(path=None):
if path is None:
path = EVENTLOG_DIR
for f in path.glob('*.json*'):
yield EventLogFile(f)
def get_model_files(path=None):
if path is None:
path = MODEL_DIR
for f in path.glob(f'*{MODEL_EXT}'):
yield ModelFile(f)
def get_result_files(path=None):
if path is None:
path = RESULT_DIR
for f in path.glob(f'*{RESULT_EXT}'):
yield ResultFile(f)
def get_process_model_files(path=None):
if path is None:
path = PROCESS_MODEL_DIR
for f in path.glob('*.plg'):
yield f.stem
| # Copyright 2018 <NAME>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# ==============================================================================
from pathlib import Path
import arrow
# Base
ROOT_DIR = Path(__file__).parent.parent
# Base directories
OUT_DIR = ROOT_DIR / '.out' # For anything that is being generated
RES_DIR = ROOT_DIR / '.res' # For resources shipped with the repository
CACHE_DIR = OUT_DIR / '.cache' # Used to cache event logs, results, etc.
# Resources
PROCESS_MODEL_DIR = RES_DIR / 'process_models' # Randomly generated process models from PLG2
BPIC_DIR = RES_DIR / 'bpic' # BPIC logs in XES format
# Output
EVENTLOG_DIR = OUT_DIR / 'eventlogs' # For generated event logs
MODEL_DIR = OUT_DIR / 'models' # For anomaly detection models
PLOT_DIR = OUT_DIR / 'plots' # For plots
# Cache
EVENTLOG_CACHE_DIR = CACHE_DIR / 'eventlogs' # For caching datasets so the event log does not always have to be loaded
RESULT_DIR = CACHE_DIR / 'results' # For caching anomaly detection results
# Config
CONFIG_DIR = ROOT_DIR / '.config'
# Database
DATABASE_FILE = OUT_DIR / 'april.db'
# Extensions
MODEL_EXT = '.model'
RESULT_EXT = '.result'
# Misc
DATE_FORMAT = 'YYYYMMDD-HHmmss.SSSSSS'
def generate():
"""Generate directories."""
dirs = [
ROOT_DIR,
OUT_DIR,
RES_DIR,
CACHE_DIR,
RESULT_DIR,
EVENTLOG_CACHE_DIR,
MODEL_DIR,
PROCESS_MODEL_DIR,
EVENTLOG_DIR,
BPIC_DIR,
PLOT_DIR
]
for d in dirs:
if not d.exists():
d.mkdir()
def split_eventlog_name(name):
try:
s = name.split('-')
model = s[0]
p = float(s[1])
id = int(s[2])
except Exception:
model = None
p = None
id = None
return model, p, id
def split_model_name(name):
try:
s = name.split('_')
event_log_name = s[0]
ad = s[1]
date = arrow.get(s[2], DATE_FORMAT)
except Exception as e:
event_log_name = None
ad = None
date = None
return event_log_name, ad, date
class File(object):
ext = None
def __init__(self, path):
if not isinstance(path, Path):
path = Path(path)
self.path = path
self.file = self.path.name
self.name = self.path.stem
self.str_path = str(path)
def remove(self):
import os
if self.path.exists():
os.remove(self.path)
class EventLogFile(File):
def __init__(self, path):
if not isinstance(path, Path):
path = Path(path)
if '.json' not in path.suffixes:
path = Path(str(path) + '.json.gz')
if not path.is_absolute():
path = EVENTLOG_DIR / path.name
super(EventLogFile, self).__init__(path)
if len(self.path.suffixes) > 1:
self.name = Path(self.path.stem).stem
self.model, self.p, self.id = split_eventlog_name(self.name)
print('3 aprit-EventLogFile-init:self.model, self.p, self.id=', self.model, self.p, self.id)
@property
def cache_file(self):
print('5 april-fs-EventLogFile-cache_file:', EVENTLOG_CACHE_DIR / (self.name + '.pkl.gz'))
return EVENTLOG_CACHE_DIR / (self.name + '.pkl.gz')
class ModelFile(File):
ext = MODEL_EXT
def __init__(self, path):
if not isinstance(path, Path):
path = Path(path)
if path.suffix != self.ext:
path = Path(str(path) + self.ext)
if not path.is_absolute():
path = MODEL_DIR / path.name
super(ModelFile, self).__init__(path)
self.event_log_name, self.ad, self.date = split_model_name(self.name)
self.model, self.p, self.id = split_eventlog_name(self.event_log_name)
@property
def result_file(self):
return RESULT_DIR / (self.name + RESULT_EXT)
class ResultFile(File):
ext = RESULT_EXT
@property
def model_file(self):
return MODEL_DIR / (self.name + MODEL_EXT)
def get_event_log_files(path=None):
if path is None:
path = EVENTLOG_DIR
for f in path.glob('*.json*'):
yield EventLogFile(f)
def get_model_files(path=None):
if path is None:
path = MODEL_DIR
for f in path.glob(f'*{MODEL_EXT}'):
yield ModelFile(f)
def get_result_files(path=None):
if path is None:
path = RESULT_DIR
for f in path.glob(f'*{RESULT_EXT}'):
yield ResultFile(f)
def get_process_model_files(path=None):
if path is None:
path = PROCESS_MODEL_DIR
for f in path.glob('*.plg'):
yield f.stem | en | 0.847372 | # Copyright 2018 <NAME> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ============================================================================== # Base # Base directories # For anything that is being generated # For resources shipped with the repository # Used to cache event logs, results, etc. # Resources # Randomly generated process models from PLG2 # BPIC logs in XES format # Output # For generated event logs # For anomaly detection models # For plots # Cache # For caching datasets so the event log does not always have to be loaded # For caching anomaly detection results # Config # Database # Extensions # Misc Generate directories. | 1.607771 | 2 |
runScrape.py | awesome-archive/ReadableWebProxy | 0 | 6618507 | <filename>runScrape.py
#!flask/bin/python
if __name__ == "__main__":
import logSetup
import logging
logSetup.initLogging()
# logSetup.initLogging(logging.WARNING)
# Shut up fucking annoying psycopg2 vomit every exec.
import warnings
warnings.filterwarnings("ignore", category=UserWarning, module='psycopg2')
# This HAS to be included before the app, to prevent circular dependencies.
# import WebMirror.runtime_engines
import common.RunManager
import WebMirror.rules
import WebMirror.Runner
import WebMirror.UrlUpserter
import RawArchiver.RawRunner
import RawArchiver.RawUrlUpserter
import common.stuck
import common.process
import Misc.ls_open_file_handles
import common.redis
from settings import NO_PROCESSES
from settings import RAW_NO_PROCESSES
from settings import MAX_DB_SESSIONS
def go():
largv = [tmp.lower() for tmp in sys.argv]
rules = WebMirror.rules.load_rules()
runner = common.RunManager.Crawler(main_thread_count=NO_PROCESSES, raw_thread_count=RAW_NO_PROCESSES)
if "raw" in largv:
common.process.name_process("raw fetcher management thread")
print("RAW Scrape!")
RawArchiver.RawUrlUpserter.check_init_func()
if not "noreset" in largv:
print("Resetting any in-progress downloads.")
RawArchiver.RawUrlUpserter.resetRawInProgress()
else:
print("Not resetting in-progress downloads.")
RawArchiver.RawUrlUpserter.initializeRawStartUrls()
runner.run_raw()
else:
common.process.name_process("fetcher management thread")
if not "noreset" in largv:
print("Resetting any in-progress downloads.")
WebMirror.UrlUpserter.resetInProgress()
else:
print("Not resetting in-progress downloads.")
WebMirror.UrlUpserter.initializeStartUrls(rules)
runner.run()
print("Main runner returned!")
# print("Thread halted. App exiting.")
if __name__ == "__main__":
import sys
largv = [tmp.lower() for tmp in sys.argv]
if "scheduler" in sys.argv:
print("Please use runScheduler.py instead!")
sys.exit(1)
else:
started = False
if not started:
started = True
go()
| <filename>runScrape.py
#!flask/bin/python
if __name__ == "__main__":
import logSetup
import logging
logSetup.initLogging()
# logSetup.initLogging(logging.WARNING)
# Shut up fucking annoying psycopg2 vomit every exec.
import warnings
warnings.filterwarnings("ignore", category=UserWarning, module='psycopg2')
# This HAS to be included before the app, to prevent circular dependencies.
# import WebMirror.runtime_engines
import common.RunManager
import WebMirror.rules
import WebMirror.Runner
import WebMirror.UrlUpserter
import RawArchiver.RawRunner
import RawArchiver.RawUrlUpserter
import common.stuck
import common.process
import Misc.ls_open_file_handles
import common.redis
from settings import NO_PROCESSES
from settings import RAW_NO_PROCESSES
from settings import MAX_DB_SESSIONS
def go():
largv = [tmp.lower() for tmp in sys.argv]
rules = WebMirror.rules.load_rules()
runner = common.RunManager.Crawler(main_thread_count=NO_PROCESSES, raw_thread_count=RAW_NO_PROCESSES)
if "raw" in largv:
common.process.name_process("raw fetcher management thread")
print("RAW Scrape!")
RawArchiver.RawUrlUpserter.check_init_func()
if not "noreset" in largv:
print("Resetting any in-progress downloads.")
RawArchiver.RawUrlUpserter.resetRawInProgress()
else:
print("Not resetting in-progress downloads.")
RawArchiver.RawUrlUpserter.initializeRawStartUrls()
runner.run_raw()
else:
common.process.name_process("fetcher management thread")
if not "noreset" in largv:
print("Resetting any in-progress downloads.")
WebMirror.UrlUpserter.resetInProgress()
else:
print("Not resetting in-progress downloads.")
WebMirror.UrlUpserter.initializeStartUrls(rules)
runner.run()
print("Main runner returned!")
# print("Thread halted. App exiting.")
if __name__ == "__main__":
import sys
largv = [tmp.lower() for tmp in sys.argv]
if "scheduler" in sys.argv:
print("Please use runScheduler.py instead!")
sys.exit(1)
else:
started = False
if not started:
started = True
go()
| en | 0.681148 | #!flask/bin/python # logSetup.initLogging(logging.WARNING) # Shut up fucking annoying psycopg2 vomit every exec. # This HAS to be included before the app, to prevent circular dependencies. # import WebMirror.runtime_engines # print("Thread halted. App exiting.") | 2.282872 | 2 |
ALGORITHM/hmp_curiosity/trajectory.py | Harold0/hmp | 0 | 6618508 | <reponame>Harold0/hmp
# cython: language_level=3
import numpy as np
from .foundation import AlgorithmConfig
import copy
from UTILS.colorful import *
from UTILS.tensor_ops import __hash__
def _flatten_helper(T, N, _tensor):
return _tensor.view(T * N, *_tensor.size()[2:])
'''
轨迹
'''
class trajectory():
def __init__(self, traj_limit, env_id):
self.readonly_lock = False
self.traj_limit = traj_limit
self.time_pointer = 0
self.n_frame_clip = -1
self.key_dict = []
self.env_id = env_id
self.done_cut_tail = False
def remember(self, key, content):
assert not self.readonly_lock
if not (key in self.key_dict) and (content is not None):
assert isinstance(content, np.ndarray) or isinstance(content, float), (key, content.__class__)
assert self.time_pointer == 0, key
tensor_size = ((self.traj_limit,) + tuple(content.shape))
set_item = np.zeros(shape=tensor_size, dtype=content.dtype)
set_item[:] = np.nan if np.issubdtype(content.dtype, np.floating) else 0
setattr(self, key, set_item)
self.key_dict.append(key)
getattr(self, key)[self.time_pointer] = content
elif (key in self.key_dict) and (content is not None):
getattr(self, key)[self.time_pointer] = content
else:
pass # do nothing
def time_shift(self):
assert self.time_pointer < self.traj_limit
self.time_pointer += 1
def get_most_freq_pattern(self): # get_hyper_reward(self):
self.readonly_lock = True
n_frame = self.time_pointer
if not self.done_cut_tail:
self.done_cut_tail = True
# clip tail
for key in self.key_dict:
set_item = getattr(self, key)[:n_frame]
setattr(self, key, set_item)
# 根据这个轨迹上的NaN,删除所有无效时间点
# before clip NaN, push reward forward
reference_track = getattr(self, 'value_R')
reward = getattr(self, 'reward')
p_invalid = np.isnan(reference_track).squeeze()
p_valid = ~p_invalid
assert ~p_invalid[0]
for i in reversed(range(n_frame)):
if p_invalid[i] and i != 0 : # invalid, push reward forward
reward[i-1] += reward[i]
reward[i] = np.nan
# clip NaN
for key in self.key_dict:
set_item = getattr(self, key)
setattr(self, key, set_item[p_valid])
reward_key = 'reward'
reward = getattr(self, reward_key)
assert not np.isnan(reward).any()
# new finalize
def finalize(self, hyper_reward=None):
if hyper_reward is not None:
assert self.finalize
self.readonly_lock = True
n_frame = self.time_pointer
assert self.done_cut_tail
assert hyper_reward is not None
self.copy_track(origin_key='reward', new_key='h_reward')
h_rewards = getattr(self, 'h_reward')
# if self.env_id == 0: print(getattr(self, 'h_reward'), getattr(self, 'g_actions'))
assert not np.isnan(h_rewards[-1])
h_rewards[-1] += hyper_reward # reward fusion
self.gae_finalize_return(reward_key='h_reward', value_key='value_R', new_return_name='return_R')
self.gae_finalize_return(reward_key='reward', value_key='value_L', new_return_name='return_L')
def clip_reward_track(self, reward_key, n_frame_clip):
reward = getattr(self, reward_key)
reward_tail = reward[n_frame_clip:].sum()
reward[n_frame_clip-1] += reward_tail
set_item = reward[:n_frame_clip]
setattr(self, reward_key, set_item)
#return getattr(self, reward_key)
def copy_track(self, origin_key, new_key):
if hasattr(self, origin_key):
origin_handle = getattr(self, origin_key)
setattr(self, new_key, origin_handle.copy())
new_handle = getattr(self, new_key)
self.key_dict.append(new_key)
#return origin_handle, new_handle
else:
real_key_list = [real_key for real_key in self.__dict__ if (origin_key+'>' in real_key)]
assert len(real_key_list)>0
for real_key in real_key_list:
mainkey, subkey = real_key.split('>')
self.copy_track(real_key, (new_key+'>'+subkey))
#return
def gae_finalize_return(self, reward_key, value_key, new_return_name):
gamma = AlgorithmConfig.gamma # ------- gae parameters -------
tau = AlgorithmConfig.tau
# ------- -------------- -------
rewards = getattr(self, reward_key)
value = getattr(self, value_key)
length = rewards.shape[0]
assert rewards.shape[0]==value.shape[0]
gae = 0
# initalize two more tracks
setattr(self, new_return_name, np.zeros_like(value))
self.key_dict.append(new_return_name)
returns = getattr(self, new_return_name)
for step in reversed(range(length)):
if step==(length-1): # 最后一帧
value_preds_delta = rewards[step] - value[step]
gae = value_preds_delta
else:
value_preds_delta = rewards[step] + gamma * value[step + 1] - value[step]
gae = value_preds_delta + gamma * tau * gae
returns[step] = gae + value[step]
def calculate_sample_entropy(samples):
key = []
freq = []
n_sample = len(samples)
for s in samples:
if s not in key:
key.append(s)
freq.append(1)
else:
i = key.index(s)
freq[i] += 1
entropy = 0.0
for j,f in enumerate(freq):
freq[j] /= n_sample
entropy += -freq[j] * np.log(freq[j])
# print亮红(key)
# print亮红(freq)
return entropy
class TrajPoolManager(object):
def __init__(self, n_pool):
self.n_pool = n_pool
# self.traj_pool_history = []
self.hyper_reward = []
self.traj_pool_index = []
self.cnt = 0
self.clip_entropy_max = 4
self.entropy_coef = 0
def absorb_finalize_pool(self, pool):
# self.traj_pool_history.append(pool) # OOM
pattern = []
for traj_handle in pool:
traj_handle.get_most_freq_pattern()
# h_reward = np.array([]).mean()
# pattern_entropy = calculate_sample_entropy(pattern)
# print亮绿('entropy:%.3f'%pattern_entropy)
# h_reward = min(self.clip_entropy_max, pattern_entropy)*self.entropy_coef
h_reward = 0
# print亮绿('h_reward:%.3f'%h_reward)
for traj_handle in pool:
traj_handle.finalize(hyper_reward=h_reward)
self.cnt += 1
task = ['train_R']
# task = ['train_L']
return task
'''
轨迹池管理
'''
class BatchTrajManager():
templete = {
# exam that trajectory have at least following things
"on-policy": ['skip', 'obs', 'actions', 'reward', 'done', 'value', 'actionLogProbs'],
"off-policy": ['skip', 'obs', 'actions', 'reward', 'done'],
}
def __init__(self, n_env, traj_limit, templete, trainer_hook):
self.trainer_hook = trainer_hook
self.n_env = n_env
self.traj_limit = traj_limit
self.train_traj_needed = AlgorithmConfig.train_traj_needed
self.upper_training_epoch = AlgorithmConfig.upper_training_epoch
self.live_trajs = [trajectory(self.traj_limit, env_id=i) for i in range(n_env)]
self.live_traj_frame = [0 for _ in range(self.n_env)]
self.traj_pool = []
self.registered_keys = []
self._traj_lock_buf = None
self.pool_manager = TrajPoolManager(n_pool=self.upper_training_epoch)
self.patience = 1e3
self.update_cnt = 0
def update(self, traj_frag, index):
assert traj_frag is not None
for j, env_i in enumerate(index):
traj_handle = self.live_trajs[env_i]
for key in traj_frag:
if traj_frag[key] is None:
assert False, key
if isinstance(traj_frag[key], dict): # 如果是二重字典,特殊处理
for sub_key in traj_frag[key]:
content = traj_frag[key][sub_key][j]
traj_handle.remember(key + ">" + sub_key, content)
else:
content = traj_frag[key][j]
traj_handle.remember(key, content)
self.live_traj_frame[env_i] += 1
traj_handle.time_shift()
return
# 函数入口
def feed_traj(self, traj_frag, require_hook=False):
assert self._traj_lock_buf is None
# an unlock hook must be exected before new trajectory feed in
if require_hook: # the traj_frag is not intact, lock up traj_frag, wait for more
assert 'done' not in traj_frag
assert 'reward' not in traj_frag
self._traj_lock_buf = traj_frag
return self._unlock_hook
else:
assert 'done' in traj_frag
assert 'skip' in traj_frag
self.__batch_update(traj_frag=traj_frag)
def _unlock_hook(self, traj_frag):
assert self._traj_lock_buf is not None
traj_frag.update(self._traj_lock_buf)
self._traj_lock_buf = None
assert 'done' in traj_frag
assert 'skip' in traj_frag
self.__batch_update(traj_frag=traj_frag)
def ___check_integraty(self, traj_frag):
# can not alway waste time checking this
if self.patience < 0: return
self.patience -= 1
for key in traj_frag:
if key not in self.registered_keys:
self.registered_keys.append(key)
for key in self.registered_keys:
assert key in traj_frag, ('this key sometimes disappears from the traj_frag:', key)
def __batch_update(self, traj_frag):
self.___check_integraty(traj_frag)
done = traj_frag['done']; traj_frag.pop('done')
skip = traj_frag['skip']; traj_frag.pop('skip')
# single bool to list bool
if isinstance(done, bool): done = [done for i in range(self.n_env)]
if isinstance(skip, bool): skip = [skip for i in range(self.n_env)]
# feed
cnt = 0
for env_i, env_done, skip_this in zip(range(self.n_env), done, skip):
if skip_this: continue
# otherwise
frag_index = cnt; cnt += 1
env_index = env_i
traj_handle = self.live_trajs[env_index]
for key in traj_frag:
if traj_frag[key] is None:
traj_handle.remember(key, None)
elif isinstance(traj_frag[key], dict): # 如果是二重字典,特殊处理
for sub_key in traj_frag[key]:
content = traj_frag[key][sub_key][frag_index]
traj_handle.remember( "".join((key , ">" , sub_key)), content )
else:
content = traj_frag[key][frag_index]
traj_handle.remember(key, content)
self.live_traj_frame[env_index] += 1
traj_handle.time_shift()
if env_done:
self.traj_pool.append(traj_handle)
self.live_trajs[env_index] = trajectory(self.traj_limit, env_id=env_index)
self.live_traj_frame[env_index] = 0
def get_traj_frame(self):
return self.live_traj_frame
def train_and_clear_traj_pool(self):
print('do update %d'%self.update_cnt)
current_task_l = self.pool_manager.absorb_finalize_pool(pool=self.traj_pool)
for current_task in current_task_l:
ppo_update_cnt = self.trainer_hook(self.traj_pool, current_task)
self.traj_pool = []
self.update_cnt += 1
# assert ppo_update_cnt == self.update_cnt
return self.update_cnt
def can_exec_training(self):
if len(self.traj_pool) >= self.train_traj_needed:
return True
else:
return False | # cython: language_level=3
import numpy as np
from .foundation import AlgorithmConfig
import copy
from UTILS.colorful import *
from UTILS.tensor_ops import __hash__
def _flatten_helper(T, N, _tensor):
return _tensor.view(T * N, *_tensor.size()[2:])
'''
轨迹
'''
class trajectory():
def __init__(self, traj_limit, env_id):
self.readonly_lock = False
self.traj_limit = traj_limit
self.time_pointer = 0
self.n_frame_clip = -1
self.key_dict = []
self.env_id = env_id
self.done_cut_tail = False
def remember(self, key, content):
assert not self.readonly_lock
if not (key in self.key_dict) and (content is not None):
assert isinstance(content, np.ndarray) or isinstance(content, float), (key, content.__class__)
assert self.time_pointer == 0, key
tensor_size = ((self.traj_limit,) + tuple(content.shape))
set_item = np.zeros(shape=tensor_size, dtype=content.dtype)
set_item[:] = np.nan if np.issubdtype(content.dtype, np.floating) else 0
setattr(self, key, set_item)
self.key_dict.append(key)
getattr(self, key)[self.time_pointer] = content
elif (key in self.key_dict) and (content is not None):
getattr(self, key)[self.time_pointer] = content
else:
pass # do nothing
def time_shift(self):
assert self.time_pointer < self.traj_limit
self.time_pointer += 1
def get_most_freq_pattern(self): # get_hyper_reward(self):
self.readonly_lock = True
n_frame = self.time_pointer
if not self.done_cut_tail:
self.done_cut_tail = True
# clip tail
for key in self.key_dict:
set_item = getattr(self, key)[:n_frame]
setattr(self, key, set_item)
# 根据这个轨迹上的NaN,删除所有无效时间点
# before clip NaN, push reward forward
reference_track = getattr(self, 'value_R')
reward = getattr(self, 'reward')
p_invalid = np.isnan(reference_track).squeeze()
p_valid = ~p_invalid
assert ~p_invalid[0]
for i in reversed(range(n_frame)):
if p_invalid[i] and i != 0 : # invalid, push reward forward
reward[i-1] += reward[i]
reward[i] = np.nan
# clip NaN
for key in self.key_dict:
set_item = getattr(self, key)
setattr(self, key, set_item[p_valid])
reward_key = 'reward'
reward = getattr(self, reward_key)
assert not np.isnan(reward).any()
# new finalize
def finalize(self, hyper_reward=None):
if hyper_reward is not None:
assert self.finalize
self.readonly_lock = True
n_frame = self.time_pointer
assert self.done_cut_tail
assert hyper_reward is not None
self.copy_track(origin_key='reward', new_key='h_reward')
h_rewards = getattr(self, 'h_reward')
# if self.env_id == 0: print(getattr(self, 'h_reward'), getattr(self, 'g_actions'))
assert not np.isnan(h_rewards[-1])
h_rewards[-1] += hyper_reward # reward fusion
self.gae_finalize_return(reward_key='h_reward', value_key='value_R', new_return_name='return_R')
self.gae_finalize_return(reward_key='reward', value_key='value_L', new_return_name='return_L')
def clip_reward_track(self, reward_key, n_frame_clip):
reward = getattr(self, reward_key)
reward_tail = reward[n_frame_clip:].sum()
reward[n_frame_clip-1] += reward_tail
set_item = reward[:n_frame_clip]
setattr(self, reward_key, set_item)
#return getattr(self, reward_key)
def copy_track(self, origin_key, new_key):
if hasattr(self, origin_key):
origin_handle = getattr(self, origin_key)
setattr(self, new_key, origin_handle.copy())
new_handle = getattr(self, new_key)
self.key_dict.append(new_key)
#return origin_handle, new_handle
else:
real_key_list = [real_key for real_key in self.__dict__ if (origin_key+'>' in real_key)]
assert len(real_key_list)>0
for real_key in real_key_list:
mainkey, subkey = real_key.split('>')
self.copy_track(real_key, (new_key+'>'+subkey))
#return
def gae_finalize_return(self, reward_key, value_key, new_return_name):
gamma = AlgorithmConfig.gamma # ------- gae parameters -------
tau = AlgorithmConfig.tau
# ------- -------------- -------
rewards = getattr(self, reward_key)
value = getattr(self, value_key)
length = rewards.shape[0]
assert rewards.shape[0]==value.shape[0]
gae = 0
# initalize two more tracks
setattr(self, new_return_name, np.zeros_like(value))
self.key_dict.append(new_return_name)
returns = getattr(self, new_return_name)
for step in reversed(range(length)):
if step==(length-1): # 最后一帧
value_preds_delta = rewards[step] - value[step]
gae = value_preds_delta
else:
value_preds_delta = rewards[step] + gamma * value[step + 1] - value[step]
gae = value_preds_delta + gamma * tau * gae
returns[step] = gae + value[step]
def calculate_sample_entropy(samples):
key = []
freq = []
n_sample = len(samples)
for s in samples:
if s not in key:
key.append(s)
freq.append(1)
else:
i = key.index(s)
freq[i] += 1
entropy = 0.0
for j,f in enumerate(freq):
freq[j] /= n_sample
entropy += -freq[j] * np.log(freq[j])
# print亮红(key)
# print亮红(freq)
return entropy
class TrajPoolManager(object):
def __init__(self, n_pool):
self.n_pool = n_pool
# self.traj_pool_history = []
self.hyper_reward = []
self.traj_pool_index = []
self.cnt = 0
self.clip_entropy_max = 4
self.entropy_coef = 0
def absorb_finalize_pool(self, pool):
# self.traj_pool_history.append(pool) # OOM
pattern = []
for traj_handle in pool:
traj_handle.get_most_freq_pattern()
# h_reward = np.array([]).mean()
# pattern_entropy = calculate_sample_entropy(pattern)
# print亮绿('entropy:%.3f'%pattern_entropy)
# h_reward = min(self.clip_entropy_max, pattern_entropy)*self.entropy_coef
h_reward = 0
# print亮绿('h_reward:%.3f'%h_reward)
for traj_handle in pool:
traj_handle.finalize(hyper_reward=h_reward)
self.cnt += 1
task = ['train_R']
# task = ['train_L']
return task
'''
轨迹池管理
'''
class BatchTrajManager():
templete = {
# exam that trajectory have at least following things
"on-policy": ['skip', 'obs', 'actions', 'reward', 'done', 'value', 'actionLogProbs'],
"off-policy": ['skip', 'obs', 'actions', 'reward', 'done'],
}
def __init__(self, n_env, traj_limit, templete, trainer_hook):
self.trainer_hook = trainer_hook
self.n_env = n_env
self.traj_limit = traj_limit
self.train_traj_needed = AlgorithmConfig.train_traj_needed
self.upper_training_epoch = AlgorithmConfig.upper_training_epoch
self.live_trajs = [trajectory(self.traj_limit, env_id=i) for i in range(n_env)]
self.live_traj_frame = [0 for _ in range(self.n_env)]
self.traj_pool = []
self.registered_keys = []
self._traj_lock_buf = None
self.pool_manager = TrajPoolManager(n_pool=self.upper_training_epoch)
self.patience = 1e3
self.update_cnt = 0
def update(self, traj_frag, index):
assert traj_frag is not None
for j, env_i in enumerate(index):
traj_handle = self.live_trajs[env_i]
for key in traj_frag:
if traj_frag[key] is None:
assert False, key
if isinstance(traj_frag[key], dict): # 如果是二重字典,特殊处理
for sub_key in traj_frag[key]:
content = traj_frag[key][sub_key][j]
traj_handle.remember(key + ">" + sub_key, content)
else:
content = traj_frag[key][j]
traj_handle.remember(key, content)
self.live_traj_frame[env_i] += 1
traj_handle.time_shift()
return
# 函数入口
def feed_traj(self, traj_frag, require_hook=False):
assert self._traj_lock_buf is None
# an unlock hook must be exected before new trajectory feed in
if require_hook: # the traj_frag is not intact, lock up traj_frag, wait for more
assert 'done' not in traj_frag
assert 'reward' not in traj_frag
self._traj_lock_buf = traj_frag
return self._unlock_hook
else:
assert 'done' in traj_frag
assert 'skip' in traj_frag
self.__batch_update(traj_frag=traj_frag)
def _unlock_hook(self, traj_frag):
assert self._traj_lock_buf is not None
traj_frag.update(self._traj_lock_buf)
self._traj_lock_buf = None
assert 'done' in traj_frag
assert 'skip' in traj_frag
self.__batch_update(traj_frag=traj_frag)
def ___check_integraty(self, traj_frag):
# can not alway waste time checking this
if self.patience < 0: return
self.patience -= 1
for key in traj_frag:
if key not in self.registered_keys:
self.registered_keys.append(key)
for key in self.registered_keys:
assert key in traj_frag, ('this key sometimes disappears from the traj_frag:', key)
def __batch_update(self, traj_frag):
self.___check_integraty(traj_frag)
done = traj_frag['done']; traj_frag.pop('done')
skip = traj_frag['skip']; traj_frag.pop('skip')
# single bool to list bool
if isinstance(done, bool): done = [done for i in range(self.n_env)]
if isinstance(skip, bool): skip = [skip for i in range(self.n_env)]
# feed
cnt = 0
for env_i, env_done, skip_this in zip(range(self.n_env), done, skip):
if skip_this: continue
# otherwise
frag_index = cnt; cnt += 1
env_index = env_i
traj_handle = self.live_trajs[env_index]
for key in traj_frag:
if traj_frag[key] is None:
traj_handle.remember(key, None)
elif isinstance(traj_frag[key], dict): # 如果是二重字典,特殊处理
for sub_key in traj_frag[key]:
content = traj_frag[key][sub_key][frag_index]
traj_handle.remember( "".join((key , ">" , sub_key)), content )
else:
content = traj_frag[key][frag_index]
traj_handle.remember(key, content)
self.live_traj_frame[env_index] += 1
traj_handle.time_shift()
if env_done:
self.traj_pool.append(traj_handle)
self.live_trajs[env_index] = trajectory(self.traj_limit, env_id=env_index)
self.live_traj_frame[env_index] = 0
def get_traj_frame(self):
return self.live_traj_frame
def train_and_clear_traj_pool(self):
print('do update %d'%self.update_cnt)
current_task_l = self.pool_manager.absorb_finalize_pool(pool=self.traj_pool)
for current_task in current_task_l:
ppo_update_cnt = self.trainer_hook(self.traj_pool, current_task)
self.traj_pool = []
self.update_cnt += 1
# assert ppo_update_cnt == self.update_cnt
return self.update_cnt
def can_exec_training(self):
if len(self.traj_pool) >= self.train_traj_needed:
return True
else:
return False | en | 0.359081 | # cython: language_level=3 轨迹 # do nothing # get_hyper_reward(self): # clip tail # 根据这个轨迹上的NaN,删除所有无效时间点 # before clip NaN, push reward forward # invalid, push reward forward # clip NaN # new finalize # if self.env_id == 0: print(getattr(self, 'h_reward'), getattr(self, 'g_actions')) # reward fusion #return getattr(self, reward_key) #return origin_handle, new_handle #return # ------- gae parameters ------- # ------- -------------- ------- # initalize two more tracks # 最后一帧 # print亮红(key) # print亮红(freq) # self.traj_pool_history = [] # self.traj_pool_history.append(pool) # OOM # h_reward = np.array([]).mean() # pattern_entropy = calculate_sample_entropy(pattern) # print亮绿('entropy:%.3f'%pattern_entropy) # h_reward = min(self.clip_entropy_max, pattern_entropy)*self.entropy_coef # print亮绿('h_reward:%.3f'%h_reward) # task = ['train_L'] 轨迹池管理 # exam that trajectory have at least following things # 如果是二重字典,特殊处理 # 函数入口 # an unlock hook must be exected before new trajectory feed in # the traj_frag is not intact, lock up traj_frag, wait for more # can not alway waste time checking this # single bool to list bool # feed # otherwise # 如果是二重字典,特殊处理 # assert ppo_update_cnt == self.update_cnt | 2.052966 | 2 |
facepy/__init__.py | princearora111/facepy | 0 | 6618509 | <reponame>princearora111/facepy
from facepy.exceptions import FacepyError
from facepy.graph_api import GraphAPI
from facepy.signed_request import SignedRequest
from facepy.utils import get_application_access_token, get_extended_access_token
__all__ = [
'FacepyError',
'GraphAPI',
'SignedRequest',
'get_application_access_token',
'get_extended_access_token',
]
| from facepy.exceptions import FacepyError
from facepy.graph_api import GraphAPI
from facepy.signed_request import SignedRequest
from facepy.utils import get_application_access_token, get_extended_access_token
__all__ = [
'FacepyError',
'GraphAPI',
'SignedRequest',
'get_application_access_token',
'get_extended_access_token',
] | none | 1 | 1.373902 | 1 | |
scripts/fix_ecconfig.py | pagopa/pagopa-canone-unico | 0 | 6618510 | '''
This script fixes wrong IBAN
'''
import argparse
from azure.data.tables import TableServiceClient
from azure.core.credentials import AzureNamedKeyCredential
parser = argparse.ArgumentParser(description='Tool to fix wrong IBANs stored in Azure table storage', prog='fix_ecconfig.py')
parser.add_argument('--account-key', metavar='ACCOUNT_KEY', type=str, nargs='?',
help='Azure account name (default: local connection string)')
parser.add_argument('--table-name', metavar='TABLE_NAME', type=str, nargs='?',
help='Azure table name (default: ecconfig)')
parser.add_argument('--env', metavar='env', type=str, nargs='?',
help='Azure subscription (default: local')
args = parser.parse_args()
env = args.env or "local"
account_key = args.account_key or "<KEY>
if env == "local":
account_name = "devstoreaccount1"
endpoint = "http://127.0.0.1:10002/{}".format(account_name)
table_name = args.table_name or "ecconfig"
else:
account_name = "pagopa{}canoneunicosa".format(env[0])
table_name = args.table_name or "pagopa{}canoneunicosaecconfigtable".format(env[0])
endpoint = "https://{}.table.core.windows.net/".format(account_name)
print([env, account_name, endpoint, table_name], sep="|")
credential = AzureNamedKeyCredential(account_name, account_key)
with TableServiceClient(endpoint=endpoint, credential=credential) as service:
table = service.get_table_client(table_name=table_name)
for entity in table.list_entities():
if len(entity["Iban"]) > 27:
print(entity)
entity["Iban"] = entity["Iban"].replace(" ", "")
table.update_entity(entity=entity)
| '''
This script fixes wrong IBAN
'''
import argparse
from azure.data.tables import TableServiceClient
from azure.core.credentials import AzureNamedKeyCredential
parser = argparse.ArgumentParser(description='Tool to fix wrong IBANs stored in Azure table storage', prog='fix_ecconfig.py')
parser.add_argument('--account-key', metavar='ACCOUNT_KEY', type=str, nargs='?',
help='Azure account name (default: local connection string)')
parser.add_argument('--table-name', metavar='TABLE_NAME', type=str, nargs='?',
help='Azure table name (default: ecconfig)')
parser.add_argument('--env', metavar='env', type=str, nargs='?',
help='Azure subscription (default: local')
args = parser.parse_args()
env = args.env or "local"
account_key = args.account_key or "<KEY>
if env == "local":
account_name = "devstoreaccount1"
endpoint = "http://127.0.0.1:10002/{}".format(account_name)
table_name = args.table_name or "ecconfig"
else:
account_name = "pagopa{}canoneunicosa".format(env[0])
table_name = args.table_name or "pagopa{}canoneunicosaecconfigtable".format(env[0])
endpoint = "https://{}.table.core.windows.net/".format(account_name)
print([env, account_name, endpoint, table_name], sep="|")
credential = AzureNamedKeyCredential(account_name, account_key)
with TableServiceClient(endpoint=endpoint, credential=credential) as service:
table = service.get_table_client(table_name=table_name)
for entity in table.list_entities():
if len(entity["Iban"]) > 27:
print(entity)
entity["Iban"] = entity["Iban"].replace(" ", "")
table.update_entity(entity=entity)
| en | 0.6378 | This script fixes wrong IBAN | 2.305287 | 2 |
DynamicHistory.py | yogesh7132/DynamicHistory | 0 | 6618511 | <reponame>yogesh7132/DynamicHistory
import sqlite3
conn=sqlite3.connect(r"C:\Users\yy\AppData\Local\Google\Chrome\User Data\Default\History")
cursor=conn.cursor()
search_value="github" #Search Value
id=0
id_lst=[]
for row in cursor.execute("select id,url from urls where url like '%"+search_value+"%'"):
print(row)
id=row[0]
id_lst.append((id,))
cursor.executemany('Delete from urls where id=?',id_lst)
conn.commit()
conn.close()
| import sqlite3
conn=sqlite3.connect(r"C:\Users\yy\AppData\Local\Google\Chrome\User Data\Default\History")
cursor=conn.cursor()
search_value="github" #Search Value
id=0
id_lst=[]
for row in cursor.execute("select id,url from urls where url like '%"+search_value+"%'"):
print(row)
id=row[0]
id_lst.append((id,))
cursor.executemany('Delete from urls where id=?',id_lst)
conn.commit()
conn.close() | en | 0.468886 | #Search Value | 3.251432 | 3 |
cnns/nnlib/robustness/show_fft_same_scale_heatmap.py | anonymous-user-commits/perturb-net | 1 | 6618512 | import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import os
lim_x = 224
lim_y = 224
lim_x = 10
lim_y = 10
dir_path = os.path.dirname(os.path.realpath(__file__))
output_path = os.path.join(dir_path, "original_fft.csv.npy")
original_fft = np.load(output_path)
cmap_type = "custom"
vmin_heatmap = -6
vmax_heatmap = 10
labels = "Text" # "None" or "Text
if cmap_type == "custom":
# setting for the heat map
# cdict = {
# 'red': ((0.0, 0.25, .25), (0.02, .59, .59), (1., 1., 1.)),
# 'green': ((0.0, 0.0, 0.0), (0.02, .45, .45), (1., .97, .97)),
# 'blue': ((0.0, 1.0, 1.0), (0.02, .75, .75), (1., 0.45, 0.45))
# }
cdict = {'red': [(0.0, 0.0, 0.0),
(0.5, 1.0, 1.0),
(1.0, 1.0, 1.0)],
'green': [(0.0, 0.0, 0.0),
(0.25, 0.0, 0.0),
(0.75, 1.0, 1.0),
(1.0, 1.0, 1.0)],
'blue': [(0.0, 0.0, 0.0),
(0.5, 0.0, 0.0),
(1.0, 1.0, 1.0)]}
# cmap = matplotlib.colors.LinearSegmentedColormap('my_colormap', cdict, 1024)
# cmap = "hot"
# cmap = "YlGnBu"
# cmap = 'PuBu_r'
# cmap = "seismic"
# cmap_type = 'OrRd'
x = np.arange(0, lim_x, 1.)
y = np.arange(0, lim_y, 1.)
X, Y = np.meshgrid(x, y)
elif cmap_type == "standard":
# https://matplotlib.org/tutorials/colors/colormaps.html
# cmap = 'hot'
# cmap = 'rainbow'
# cmap = 'seismic'
# cmap = 'terrain'
cmap = 'OrRd'
interpolation = 'nearest'
else:
raise Exception(f"Unknown type of the cmap: {cmap_type}.")
np.save(output_path, original_fft)
# go back to the original print size
# np.set_printoptions(threshold=options['threshold'])
original_fft = original_fft[:lim_y, :lim_x]
if cmap_type == "standard":
plt.imshow(original_fft, cmap=cmap,
interpolation=interpolation)
heatmap_legend = plt.pcolor(original_fft)
plt.colorbar(heatmap_legend)
elif cmap_type == "custom":
fig, ax = plt.subplots()
# plt.pcolor(X, Y, original_fft, cmap=cmap, vmin=vmin_heatmap,
# vmax=vmax_heatmap)
cax = ax.matshow(original_fft, cmap='seismic', vmin=vmin_heatmap,
vmax=vmax_heatmap)
# plt.colorbar()
fig.colorbar(cax)
if labels == "Text":
for (i, j), z in np.ndenumerate(original_fft):
ax.text(j, i, '{:0.1f}'.format(z), ha='center', va='center')
channel = 0
# plt.axis('off')
plt.ylabel("fft-ed\nchannel " + str(channel))
plt.show(block=True)
| import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import os
lim_x = 224
lim_y = 224
lim_x = 10
lim_y = 10
dir_path = os.path.dirname(os.path.realpath(__file__))
output_path = os.path.join(dir_path, "original_fft.csv.npy")
original_fft = np.load(output_path)
cmap_type = "custom"
vmin_heatmap = -6
vmax_heatmap = 10
labels = "Text" # "None" or "Text
if cmap_type == "custom":
# setting for the heat map
# cdict = {
# 'red': ((0.0, 0.25, .25), (0.02, .59, .59), (1., 1., 1.)),
# 'green': ((0.0, 0.0, 0.0), (0.02, .45, .45), (1., .97, .97)),
# 'blue': ((0.0, 1.0, 1.0), (0.02, .75, .75), (1., 0.45, 0.45))
# }
cdict = {'red': [(0.0, 0.0, 0.0),
(0.5, 1.0, 1.0),
(1.0, 1.0, 1.0)],
'green': [(0.0, 0.0, 0.0),
(0.25, 0.0, 0.0),
(0.75, 1.0, 1.0),
(1.0, 1.0, 1.0)],
'blue': [(0.0, 0.0, 0.0),
(0.5, 0.0, 0.0),
(1.0, 1.0, 1.0)]}
# cmap = matplotlib.colors.LinearSegmentedColormap('my_colormap', cdict, 1024)
# cmap = "hot"
# cmap = "YlGnBu"
# cmap = 'PuBu_r'
# cmap = "seismic"
# cmap_type = 'OrRd'
x = np.arange(0, lim_x, 1.)
y = np.arange(0, lim_y, 1.)
X, Y = np.meshgrid(x, y)
elif cmap_type == "standard":
# https://matplotlib.org/tutorials/colors/colormaps.html
# cmap = 'hot'
# cmap = 'rainbow'
# cmap = 'seismic'
# cmap = 'terrain'
cmap = 'OrRd'
interpolation = 'nearest'
else:
raise Exception(f"Unknown type of the cmap: {cmap_type}.")
np.save(output_path, original_fft)
# go back to the original print size
# np.set_printoptions(threshold=options['threshold'])
original_fft = original_fft[:lim_y, :lim_x]
if cmap_type == "standard":
plt.imshow(original_fft, cmap=cmap,
interpolation=interpolation)
heatmap_legend = plt.pcolor(original_fft)
plt.colorbar(heatmap_legend)
elif cmap_type == "custom":
fig, ax = plt.subplots()
# plt.pcolor(X, Y, original_fft, cmap=cmap, vmin=vmin_heatmap,
# vmax=vmax_heatmap)
cax = ax.matshow(original_fft, cmap='seismic', vmin=vmin_heatmap,
vmax=vmax_heatmap)
# plt.colorbar()
fig.colorbar(cax)
if labels == "Text":
for (i, j), z in np.ndenumerate(original_fft):
ax.text(j, i, '{:0.1f}'.format(z), ha='center', va='center')
channel = 0
# plt.axis('off')
plt.ylabel("fft-ed\nchannel " + str(channel))
plt.show(block=True)
| en | 0.445299 | # "None" or "Text # setting for the heat map # cdict = { # 'red': ((0.0, 0.25, .25), (0.02, .59, .59), (1., 1., 1.)), # 'green': ((0.0, 0.0, 0.0), (0.02, .45, .45), (1., .97, .97)), # 'blue': ((0.0, 1.0, 1.0), (0.02, .75, .75), (1., 0.45, 0.45)) # } # cmap = matplotlib.colors.LinearSegmentedColormap('my_colormap', cdict, 1024) # cmap = "hot" # cmap = "YlGnBu" # cmap = 'PuBu_r' # cmap = "seismic" # cmap_type = 'OrRd' # https://matplotlib.org/tutorials/colors/colormaps.html # cmap = 'hot' # cmap = 'rainbow' # cmap = 'seismic' # cmap = 'terrain' # go back to the original print size # np.set_printoptions(threshold=options['threshold']) # plt.pcolor(X, Y, original_fft, cmap=cmap, vmin=vmin_heatmap, # vmax=vmax_heatmap) # plt.colorbar() # plt.axis('off') | 2.482386 | 2 |
pyunity/values/texture.py | pyunity/pyunity | 158 | 6618513 | __all__ = ["Material", "Color", "RGB", "HSV"]
import colorsys
class Material:
"""
Class to hold data on a material.
Attributes
----------
color : Color
An albedo tint.
texture : Texture2D
A texture to map onto the mesh provided by a MeshRenderer
"""
def __init__(self, color, texture=None):
self.color = color
self.texture = texture
class Color:
def to_string(self):
return str(self)
@staticmethod
def from_string(string):
if string.startswith("RGB"):
return RGB(*list(map(int, string[4:-1].split(", "))))
elif string.startswith("HSV"):
return HSV(*list(map(int, string[4:-1].split(", "))))
class RGB(Color):
"""
A class to represent an RGB color.
Parameters
----------
r : int
Red value (0-255)
g : int
Green value (0-255)
b : int
Blue value (0-255)
"""
def __truediv__(self, other):
a, b, c = tuple(self)
return a / other, b / other, c / other
def __mul__(self, other):
a, b, c = tuple(self)
return a * other, b * other, c * other
def __init__(self, r, g, b):
self.r = r
self.g = g
self.b = b
def __list__(self):
return [self.r, self.g, self.b]
def __iter__(self):
yield self.r
yield self.g
yield self.b
def __repr__(self):
return "RGB(%d, %d, %d)" % tuple(self)
def __str__(self):
return "RGB(%d, %d, %d)" % tuple(self)
def to_rgb(self):
return self
def to_hsv(self):
return HSV.from_rgb(self.r, self.g, self.b)
@staticmethod
def from_hsv(h, s, v):
r, g, b = colorsys.hsv_to_rgb(h / 360, s / 100, v / 100)
return RGB(int(r * 255), int(g * 255), int(b * 255))
class HSV(Color):
"""
A class to represent a HSV color.
Parameters
----------
h : int
Hue (0-360)
s : int
Saturation (0-100)
v : int
Value (0-100)
"""
def __init__(self, h, s, v):
self.h = h
self.s = s
self.v = v
def __list__(self):
return [self.h, self.s, self.v]
def __iter__(self):
yield self.h
yield self.s
yield self.v
def __repr__(self):
return "HSV(%d, %d, %d)" % tuple(self)
def __str__(self):
return "HSV(%d, %d, %d)" % tuple(self)
def to_rgb(self):
return RGB.from_hsv(self.h, self.s, self.v)
def to_hsv(self):
return self
@staticmethod
def from_rgb(r, g, b):
h, s, v = colorsys.rgb_to_hsv(r / 255, g / 255, b / 255)
return HSV(int(h * 360), int(s * 100), int(v * 100))
| __all__ = ["Material", "Color", "RGB", "HSV"]
import colorsys
class Material:
"""
Class to hold data on a material.
Attributes
----------
color : Color
An albedo tint.
texture : Texture2D
A texture to map onto the mesh provided by a MeshRenderer
"""
def __init__(self, color, texture=None):
self.color = color
self.texture = texture
class Color:
def to_string(self):
return str(self)
@staticmethod
def from_string(string):
if string.startswith("RGB"):
return RGB(*list(map(int, string[4:-1].split(", "))))
elif string.startswith("HSV"):
return HSV(*list(map(int, string[4:-1].split(", "))))
class RGB(Color):
"""
A class to represent an RGB color.
Parameters
----------
r : int
Red value (0-255)
g : int
Green value (0-255)
b : int
Blue value (0-255)
"""
def __truediv__(self, other):
a, b, c = tuple(self)
return a / other, b / other, c / other
def __mul__(self, other):
a, b, c = tuple(self)
return a * other, b * other, c * other
def __init__(self, r, g, b):
self.r = r
self.g = g
self.b = b
def __list__(self):
return [self.r, self.g, self.b]
def __iter__(self):
yield self.r
yield self.g
yield self.b
def __repr__(self):
return "RGB(%d, %d, %d)" % tuple(self)
def __str__(self):
return "RGB(%d, %d, %d)" % tuple(self)
def to_rgb(self):
return self
def to_hsv(self):
return HSV.from_rgb(self.r, self.g, self.b)
@staticmethod
def from_hsv(h, s, v):
r, g, b = colorsys.hsv_to_rgb(h / 360, s / 100, v / 100)
return RGB(int(r * 255), int(g * 255), int(b * 255))
class HSV(Color):
"""
A class to represent a HSV color.
Parameters
----------
h : int
Hue (0-360)
s : int
Saturation (0-100)
v : int
Value (0-100)
"""
def __init__(self, h, s, v):
self.h = h
self.s = s
self.v = v
def __list__(self):
return [self.h, self.s, self.v]
def __iter__(self):
yield self.h
yield self.s
yield self.v
def __repr__(self):
return "HSV(%d, %d, %d)" % tuple(self)
def __str__(self):
return "HSV(%d, %d, %d)" % tuple(self)
def to_rgb(self):
return RGB.from_hsv(self.h, self.s, self.v)
def to_hsv(self):
return self
@staticmethod
def from_rgb(r, g, b):
h, s, v = colorsys.rgb_to_hsv(r / 255, g / 255, b / 255)
return HSV(int(h * 360), int(s * 100), int(v * 100))
| en | 0.262116 | Class to hold data on a material. Attributes ---------- color : Color An albedo tint. texture : Texture2D A texture to map onto the mesh provided by a MeshRenderer A class to represent an RGB color. Parameters ---------- r : int Red value (0-255) g : int Green value (0-255) b : int Blue value (0-255) A class to represent a HSV color. Parameters ---------- h : int Hue (0-360) s : int Saturation (0-100) v : int Value (0-100) | 3.362994 | 3 |
projetinhos/ex#80a.py | dani-fn/Projetinhos_Python | 0 | 6618514 | lista = [1, 5, 2, 1, 3, 4, 6]
for index, teste in enumerate(lista):
print(index, teste)
# RUN:
# (0, 1)
# (1, 5)
# (2, 2)
# (3, 1)
# (4, 3)
# (5, 4)
# (6, 6)
a = '4, 5, 6, 7, 8'
a = a.replace('4', '1')
print(a)
lista = []
for pos in range(0, 5):
digitado = int(input('Digite um valor: '))
if pos == 0 or digitado >= lista[-1]:
lista.append(digitado)
print('Adicionado ao final da lista...')
else:
pos = 0
while pos < len(lista):
if digitado < lista[pos]:
lista.insert(pos, digitado)
print(f'Adicionado na posição [{pos}]...')
break
pos += 1
print('-' * 32)
print(f'Os valores digitados em ordem foram: {lista}')
| lista = [1, 5, 2, 1, 3, 4, 6]
for index, teste in enumerate(lista):
print(index, teste)
# RUN:
# (0, 1)
# (1, 5)
# (2, 2)
# (3, 1)
# (4, 3)
# (5, 4)
# (6, 6)
a = '4, 5, 6, 7, 8'
a = a.replace('4', '1')
print(a)
lista = []
for pos in range(0, 5):
digitado = int(input('Digite um valor: '))
if pos == 0 or digitado >= lista[-1]:
lista.append(digitado)
print('Adicionado ao final da lista...')
else:
pos = 0
while pos < len(lista):
if digitado < lista[pos]:
lista.insert(pos, digitado)
print(f'Adicionado na posição [{pos}]...')
break
pos += 1
print('-' * 32)
print(f'Os valores digitados em ordem foram: {lista}')
| en | 0.654758 | # RUN: # (0, 1) # (1, 5) # (2, 2) # (3, 1) # (4, 3) # (5, 4) # (6, 6) | 3.768204 | 4 |
main.py | cympfh/island | 1 | 6618515 | import collections
import logging
import random
from typing import List, Optional, Tuple
import implicit
from fastapi import FastAPI, HTTPException, Query
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import HTMLResponse, RedirectResponse
from rich.logging import RichHandler
from scipy.sparse import lil_matrix
from island.database import RDB, RecordDB, ReviewDB, WorkDB
from island.staff.model import StaffModel
logger = logging.getLogger("uvicorn")
class Matrix:
"""Matrix-decompositionable"""
def __init__(self):
"""Initialize as Empty"""
self.rows = []
self.cols = []
self.row_id = dict()
self.col_id = dict()
self.data = dict()
def insert(self, row: int, col: int, val: float):
"""Insert a value
Parameters
----------
row
workId
col
userId
val
reviewed?
"""
if row not in self.row_id:
self.rows.append(row)
self.row_id[row] = len(self.row_id)
assert self.rows[self.row_id[row]] == row
if col not in self.col_id:
self.cols.append(col)
self.col_id[col] = len(self.col_id)
assert self.cols[self.col_id[col]] == col
i = self.row_id[row]
j = self.col_id[col]
self.data[(i, j)] = val
def decomposition(self, factors: int):
"""Fitting"""
X = lil_matrix((len(self.rows), len(self.cols)))
for pos, val in self.data.items():
X[pos] = val
fact = implicit.als.AlternatingLeastSquares(factors=factors, iterations=10)
fact.fit(item_users=X.tocoo(), show_progress=False)
self.fact = fact
def stat(self):
"""Debug"""
logger.info(
f"Size: {len(self.rows)} x {len(self.cols)} = {len(self.rows) * len(self.cols)}"
)
logger.info(
f"{len(self.data)} cells have non-zero values (density={len(self.data) / len(self.rows) / len(self.cols)})"
)
def recommend(self, likes: List[int], n: int) -> List[Tuple[int, float]]:
"""Run Recommendation
Parameters
----------
likes
List of work_id
n
num of returns
Returns
-------
List of (work_id and score)
"""
user_items = lil_matrix((1, len(self.rows)))
for work_id in likes:
if work_id in self.row_id:
i = self.row_id[work_id]
user_items[(0, i)] = 2.0
recommend_items = self.fact.recommend(
0,
user_items.tocsr(),
n,
filter_already_liked_items=True,
recalculate_user=True,
)
return [(self.rows[int(i)], float(score)) for i, score in recommend_items]
class Recommendation:
"""Recommendation has a Matrix"""
def __init__(
self,
dataset: RDB,
limit_anime: int,
limit_user: int,
):
"""init
Parameters
----------
dataset
RDB of Record(work_id, user_id, rating)
This is reviews or records.
limit_anime
sub limit of freq of anime
limit_user
sub limit of freq of user
"""
logger.info("Initializing a Recommender for %s", dataset.table)
titles = dict() # work_id -> title
images = dict() # work_id -> ImageUrl
for work_id, title, image, _dt in WorkDB():
titles[work_id] = title
images[work_id] = image
rows = [] # List of (work_id, user_id, rating)
count_anime = collections.defaultdict(int) # work_id -> count
count_user = collections.defaultdict(int) # user_id -> count
def rate(rating: str) -> float:
if rating == "bad":
return -1
if rating == "good":
return 1
if rating == "great":
return 4
return 0.5
for _id, user_id, work_id, rating, _dt in dataset:
count_anime[work_id] += 1
count_user[user_id] += 1
if rating is None:
continue
rows.append((work_id, user_id, rate(rating)))
mat = Matrix()
for work_id, user_id, ratevalue in rows:
if count_anime[work_id] < limit_anime:
continue
if count_user[user_id] < limit_user:
continue
mat.insert(work_id, user_id, ratevalue)
mat.stat()
mat.decomposition(factors=200)
self.mat = mat
self.titles = titles
self.images = images
self.test()
def isknown(self, work_id: int) -> bool:
"""Known Anime?"""
return work_id in self.mat.row_id
def title(self, work_id: int) -> Optional[str]:
"""Anime Title"""
return self.titles.get(work_id, None)
def image(self, work_id: int) -> str:
"""Anime Image Url"""
return self.images.get(work_id, None)
def sample_animes(self, n: int) -> List[int]:
"""Returns List of random work_id"""
return random.sample(self.mat.rows, n)
def similar_items(self, work_id: int, n: int) -> List[Tuple[int, float]]:
"""Similar animes
Returns
-------
List of (work_id: int, score: float)
"""
if not self.isknown(work_id):
return []
i = self.mat.row_id[work_id]
similars = self.mat.fact.similar_items(i, n + 1)
return [
(self.mat.rows[int(j)], float(score))
for j, score in similars
if int(j) != i
][:n]
def __call__(self, likes: List[int], n: int) -> List[Tuple[int, float]]:
"""Recommend"""
if not any(self.isknown(work_id) for work_id in likes):
return []
return self.mat.recommend(likes, n)
def test(self):
"""Self Testing"""
random.seed(42)
sample_user_indices = random.sample(list(range(len(self.mat.cols))), 200)
# collect likes
likes = collections.defaultdict(list)
for (work_id, user_idx), rating in self.mat.data.items():
if user_idx not in sample_user_indices:
continue
if rating < 0:
continue
work_id = self.mat.rows[work_id]
likes[user_idx].append(work_id)
# testing
acc1 = 0
acc5 = 0
acc10 = 0
acc20 = 0
num = 0
for _ in range(5):
for user_idx in sample_user_indices:
if len(likes[user_idx]) < 3:
continue
ans = random.choice(likes[user_idx]) # pseudo answer
likes[user_idx].remove(ans) # pseudo input
pred = self.mat.recommend(likes[user_idx], 20)
num += 1
if ans in [pair[0] for pair in pred[:1]]:
acc1 += 1
if ans in [pair[0] for pair in pred[:5]]:
acc5 += 1
if ans in [pair[0] for pair in pred[:10]]:
acc10 += 1
if ans in [pair[0] for pair in pred[:20]]:
acc20 += 1
logger.info(f"Acc@1 = { acc1 / num }")
logger.info(f"Acc@5 = { acc5 / num }")
logger.info(f"Acc@10 = { acc10 / num }")
logger.info(f"Acc@20 = { acc20 / num }")
class MixRecommendation:
"""Wrapper of Multiple Recommendations"""
def __init__(self):
"""Init child recommenders"""
self.children = [
Recommendation(ReviewDB(), limit_anime=5, limit_user=5),
Recommendation(RecordDB(), limit_anime=5, limit_user=3),
]
def sample_animes(self, n: int) -> List[int]:
"""Returns List of work_id"""
i = random.randrange(len(self.children))
return random.sample(self.children[i].mat.rows, n)
def title(self, work_id: int) -> Optional[str]:
"""anime title"""
for child in self.children:
t = child.title(work_id)
if t:
return t
def image(self, work_id: int) -> Optional[str]:
"""image url"""
for child in self.children:
t = child.image(work_id)
if t:
return t
def __call__(self, likes: List[int], n: int) -> List[Tuple[int, float]]:
"""Mixture of recommend of children"""
items = sum([child(likes, n) for child in self.children], [])
items.sort(key=lambda item: item[1], reverse=True)
used = set()
ret = []
for work_id, score in items:
if work_id in used:
continue
used.add(work_id)
ret.append((work_id, score))
return ret[:n]
def isknown(self, work_id: int) -> bool:
"""is-known by any children"""
for child in self.children:
if child.isknown(work_id):
return True
return False
def similar_items(self, work_id: int, n: int) -> List[Tuple[int, float]]:
"""Mixture of similar_items of children"""
items = sum([child.similar_items(work_id, n) for child in self.children], [])
items.sort(key=lambda item: item[1], reverse=True)
used = set()
ret = []
for work_id, score in items:
if work_id in used:
continue
used.add(work_id)
ret.append((work_id, score))
return ret[:n]
recommender = MixRecommendation()
works = recommender.sample_animes(20)
staff_model = StaffModel()
logger.info("Launching a Web Server")
app = FastAPI()
origins = [
"http://cympfh.cc",
"http://s.cympfh.cc",
"http://localhost",
"http://localhost:8080",
]
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
logger.info("Ready")
@app.get("/anime/api/info")
async def anime_info(work_id: int):
"""Returns Info"""
if not recommender.isknown(work_id):
raise HTTPException(status_code=404, detail="Item not found")
relatives_watch = recommender.similar_items(work_id, 5)
relatives_staff = [
(work_id, score)
for (work_id, score) in staff_model.similar_items(work_id, 10)
if recommender.isknown(work_id)
][:5]
return {
"workId": work_id,
"title": recommender.title(work_id),
"image": recommender.image(work_id),
"relatives_watch": [
{
"workId": work_id,
"title": recommender.title(work_id),
"score": float(score),
}
for work_id, score in relatives_watch
],
"relatives_staff": [
{
"workId": work_id,
"title": recommender.title(work_id),
"score": float(score),
}
for work_id, score in relatives_staff
],
}
@app.get("/anime/api/recommend")
async def recommend(likes: List[int] = Query(None)):
"""Recommendation from user's likes
Parameters
----------
likes
List of workId
"""
if likes is None:
works = recommender.sample_animes(20)
return {
"items": [
{
"workId": work_id,
"title": recommender.title(work_id),
"image": recommender.image(work_id),
}
for work_id in works
]
}
recommend_items = recommender(likes, 20)
return {
"items": [
{
"workId": work_id,
"title": recommender.title(work_id),
"image": recommender.image(work_id),
"score": float(score),
}
for work_id, score in recommend_items
],
"source": {
"likes": [
{"workId": work_id, "title": recommender.title(work_id)}
for work_id in likes
]
},
}
@app.get("/anime/recommend", response_class=HTMLResponse)
async def index_recommend():
"""Recommendation Page"""
with open("./templates/recommend.html", "rt") as f:
return f.read()
@app.get("/anime/random", response_class=RedirectResponse)
async def index_random():
"""Redirect to Random /anime/{work_id}"""
work_id = recommender.sample_animes(1)[0]
return RedirectResponse(f"/anime/{work_id}")
@app.get("/anime/{work_id}", response_class=HTMLResponse)
async def index_anime_graph(work_id: int):
"""Index for Each Anime"""
if not recommender.isknown(work_id):
raise HTTPException(status_code=404, detail="Item not found")
with open("./templates/anime.html", "rt") as f:
return f.read()
@app.get("/", response_class=RedirectResponse)
async def index():
"""Redirect to /anime"""
return RedirectResponse("/anime")
@app.get("/anime", response_class=HTMLResponse)
async def index_anime():
"""Index of All"""
with open("./templates/index.html", "rt") as f:
return f.read()
| import collections
import logging
import random
from typing import List, Optional, Tuple
import implicit
from fastapi import FastAPI, HTTPException, Query
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import HTMLResponse, RedirectResponse
from rich.logging import RichHandler
from scipy.sparse import lil_matrix
from island.database import RDB, RecordDB, ReviewDB, WorkDB
from island.staff.model import StaffModel
logger = logging.getLogger("uvicorn")
class Matrix:
"""Matrix-decompositionable"""
def __init__(self):
"""Initialize as Empty"""
self.rows = []
self.cols = []
self.row_id = dict()
self.col_id = dict()
self.data = dict()
def insert(self, row: int, col: int, val: float):
"""Insert a value
Parameters
----------
row
workId
col
userId
val
reviewed?
"""
if row not in self.row_id:
self.rows.append(row)
self.row_id[row] = len(self.row_id)
assert self.rows[self.row_id[row]] == row
if col not in self.col_id:
self.cols.append(col)
self.col_id[col] = len(self.col_id)
assert self.cols[self.col_id[col]] == col
i = self.row_id[row]
j = self.col_id[col]
self.data[(i, j)] = val
def decomposition(self, factors: int):
"""Fitting"""
X = lil_matrix((len(self.rows), len(self.cols)))
for pos, val in self.data.items():
X[pos] = val
fact = implicit.als.AlternatingLeastSquares(factors=factors, iterations=10)
fact.fit(item_users=X.tocoo(), show_progress=False)
self.fact = fact
def stat(self):
"""Debug"""
logger.info(
f"Size: {len(self.rows)} x {len(self.cols)} = {len(self.rows) * len(self.cols)}"
)
logger.info(
f"{len(self.data)} cells have non-zero values (density={len(self.data) / len(self.rows) / len(self.cols)})"
)
def recommend(self, likes: List[int], n: int) -> List[Tuple[int, float]]:
"""Run Recommendation
Parameters
----------
likes
List of work_id
n
num of returns
Returns
-------
List of (work_id and score)
"""
user_items = lil_matrix((1, len(self.rows)))
for work_id in likes:
if work_id in self.row_id:
i = self.row_id[work_id]
user_items[(0, i)] = 2.0
recommend_items = self.fact.recommend(
0,
user_items.tocsr(),
n,
filter_already_liked_items=True,
recalculate_user=True,
)
return [(self.rows[int(i)], float(score)) for i, score in recommend_items]
class Recommendation:
"""Recommendation has a Matrix"""
def __init__(
self,
dataset: RDB,
limit_anime: int,
limit_user: int,
):
"""init
Parameters
----------
dataset
RDB of Record(work_id, user_id, rating)
This is reviews or records.
limit_anime
sub limit of freq of anime
limit_user
sub limit of freq of user
"""
logger.info("Initializing a Recommender for %s", dataset.table)
titles = dict() # work_id -> title
images = dict() # work_id -> ImageUrl
for work_id, title, image, _dt in WorkDB():
titles[work_id] = title
images[work_id] = image
rows = [] # List of (work_id, user_id, rating)
count_anime = collections.defaultdict(int) # work_id -> count
count_user = collections.defaultdict(int) # user_id -> count
def rate(rating: str) -> float:
if rating == "bad":
return -1
if rating == "good":
return 1
if rating == "great":
return 4
return 0.5
for _id, user_id, work_id, rating, _dt in dataset:
count_anime[work_id] += 1
count_user[user_id] += 1
if rating is None:
continue
rows.append((work_id, user_id, rate(rating)))
mat = Matrix()
for work_id, user_id, ratevalue in rows:
if count_anime[work_id] < limit_anime:
continue
if count_user[user_id] < limit_user:
continue
mat.insert(work_id, user_id, ratevalue)
mat.stat()
mat.decomposition(factors=200)
self.mat = mat
self.titles = titles
self.images = images
self.test()
def isknown(self, work_id: int) -> bool:
"""Known Anime?"""
return work_id in self.mat.row_id
def title(self, work_id: int) -> Optional[str]:
"""Anime Title"""
return self.titles.get(work_id, None)
def image(self, work_id: int) -> str:
"""Anime Image Url"""
return self.images.get(work_id, None)
def sample_animes(self, n: int) -> List[int]:
"""Returns List of random work_id"""
return random.sample(self.mat.rows, n)
def similar_items(self, work_id: int, n: int) -> List[Tuple[int, float]]:
"""Similar animes
Returns
-------
List of (work_id: int, score: float)
"""
if not self.isknown(work_id):
return []
i = self.mat.row_id[work_id]
similars = self.mat.fact.similar_items(i, n + 1)
return [
(self.mat.rows[int(j)], float(score))
for j, score in similars
if int(j) != i
][:n]
def __call__(self, likes: List[int], n: int) -> List[Tuple[int, float]]:
"""Recommend"""
if not any(self.isknown(work_id) for work_id in likes):
return []
return self.mat.recommend(likes, n)
def test(self):
"""Self Testing"""
random.seed(42)
sample_user_indices = random.sample(list(range(len(self.mat.cols))), 200)
# collect likes
likes = collections.defaultdict(list)
for (work_id, user_idx), rating in self.mat.data.items():
if user_idx not in sample_user_indices:
continue
if rating < 0:
continue
work_id = self.mat.rows[work_id]
likes[user_idx].append(work_id)
# testing
acc1 = 0
acc5 = 0
acc10 = 0
acc20 = 0
num = 0
for _ in range(5):
for user_idx in sample_user_indices:
if len(likes[user_idx]) < 3:
continue
ans = random.choice(likes[user_idx]) # pseudo answer
likes[user_idx].remove(ans) # pseudo input
pred = self.mat.recommend(likes[user_idx], 20)
num += 1
if ans in [pair[0] for pair in pred[:1]]:
acc1 += 1
if ans in [pair[0] for pair in pred[:5]]:
acc5 += 1
if ans in [pair[0] for pair in pred[:10]]:
acc10 += 1
if ans in [pair[0] for pair in pred[:20]]:
acc20 += 1
logger.info(f"Acc@1 = { acc1 / num }")
logger.info(f"Acc@5 = { acc5 / num }")
logger.info(f"Acc@10 = { acc10 / num }")
logger.info(f"Acc@20 = { acc20 / num }")
class MixRecommendation:
"""Wrapper of Multiple Recommendations"""
def __init__(self):
"""Init child recommenders"""
self.children = [
Recommendation(ReviewDB(), limit_anime=5, limit_user=5),
Recommendation(RecordDB(), limit_anime=5, limit_user=3),
]
def sample_animes(self, n: int) -> List[int]:
"""Returns List of work_id"""
i = random.randrange(len(self.children))
return random.sample(self.children[i].mat.rows, n)
def title(self, work_id: int) -> Optional[str]:
"""anime title"""
for child in self.children:
t = child.title(work_id)
if t:
return t
def image(self, work_id: int) -> Optional[str]:
"""image url"""
for child in self.children:
t = child.image(work_id)
if t:
return t
def __call__(self, likes: List[int], n: int) -> List[Tuple[int, float]]:
"""Mixture of recommend of children"""
items = sum([child(likes, n) for child in self.children], [])
items.sort(key=lambda item: item[1], reverse=True)
used = set()
ret = []
for work_id, score in items:
if work_id in used:
continue
used.add(work_id)
ret.append((work_id, score))
return ret[:n]
def isknown(self, work_id: int) -> bool:
"""is-known by any children"""
for child in self.children:
if child.isknown(work_id):
return True
return False
def similar_items(self, work_id: int, n: int) -> List[Tuple[int, float]]:
"""Mixture of similar_items of children"""
items = sum([child.similar_items(work_id, n) for child in self.children], [])
items.sort(key=lambda item: item[1], reverse=True)
used = set()
ret = []
for work_id, score in items:
if work_id in used:
continue
used.add(work_id)
ret.append((work_id, score))
return ret[:n]
recommender = MixRecommendation()
works = recommender.sample_animes(20)
staff_model = StaffModel()
logger.info("Launching a Web Server")
app = FastAPI()
origins = [
"http://cympfh.cc",
"http://s.cympfh.cc",
"http://localhost",
"http://localhost:8080",
]
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
logger.info("Ready")
@app.get("/anime/api/info")
async def anime_info(work_id: int):
"""Returns Info"""
if not recommender.isknown(work_id):
raise HTTPException(status_code=404, detail="Item not found")
relatives_watch = recommender.similar_items(work_id, 5)
relatives_staff = [
(work_id, score)
for (work_id, score) in staff_model.similar_items(work_id, 10)
if recommender.isknown(work_id)
][:5]
return {
"workId": work_id,
"title": recommender.title(work_id),
"image": recommender.image(work_id),
"relatives_watch": [
{
"workId": work_id,
"title": recommender.title(work_id),
"score": float(score),
}
for work_id, score in relatives_watch
],
"relatives_staff": [
{
"workId": work_id,
"title": recommender.title(work_id),
"score": float(score),
}
for work_id, score in relatives_staff
],
}
@app.get("/anime/api/recommend")
async def recommend(likes: List[int] = Query(None)):
"""Recommendation from user's likes
Parameters
----------
likes
List of workId
"""
if likes is None:
works = recommender.sample_animes(20)
return {
"items": [
{
"workId": work_id,
"title": recommender.title(work_id),
"image": recommender.image(work_id),
}
for work_id in works
]
}
recommend_items = recommender(likes, 20)
return {
"items": [
{
"workId": work_id,
"title": recommender.title(work_id),
"image": recommender.image(work_id),
"score": float(score),
}
for work_id, score in recommend_items
],
"source": {
"likes": [
{"workId": work_id, "title": recommender.title(work_id)}
for work_id in likes
]
},
}
@app.get("/anime/recommend", response_class=HTMLResponse)
async def index_recommend():
"""Recommendation Page"""
with open("./templates/recommend.html", "rt") as f:
return f.read()
@app.get("/anime/random", response_class=RedirectResponse)
async def index_random():
"""Redirect to Random /anime/{work_id}"""
work_id = recommender.sample_animes(1)[0]
return RedirectResponse(f"/anime/{work_id}")
@app.get("/anime/{work_id}", response_class=HTMLResponse)
async def index_anime_graph(work_id: int):
"""Index for Each Anime"""
if not recommender.isknown(work_id):
raise HTTPException(status_code=404, detail="Item not found")
with open("./templates/anime.html", "rt") as f:
return f.read()
@app.get("/", response_class=RedirectResponse)
async def index():
"""Redirect to /anime"""
return RedirectResponse("/anime")
@app.get("/anime", response_class=HTMLResponse)
async def index_anime():
"""Index of All"""
with open("./templates/index.html", "rt") as f:
return f.read()
| en | 0.752841 | Matrix-decompositionable Initialize as Empty Insert a value Parameters ---------- row workId col userId val reviewed? Fitting Debug Run Recommendation Parameters ---------- likes List of work_id n num of returns Returns ------- List of (work_id and score) Recommendation has a Matrix init Parameters ---------- dataset RDB of Record(work_id, user_id, rating) This is reviews or records. limit_anime sub limit of freq of anime limit_user sub limit of freq of user # work_id -> title # work_id -> ImageUrl # List of (work_id, user_id, rating) # work_id -> count # user_id -> count Known Anime? Anime Title Anime Image Url Returns List of random work_id Similar animes Returns ------- List of (work_id: int, score: float) Recommend Self Testing # collect likes # testing # pseudo answer # pseudo input Wrapper of Multiple Recommendations Init child recommenders Returns List of work_id anime title image url Mixture of recommend of children is-known by any children Mixture of similar_items of children Returns Info Recommendation from user's likes Parameters ---------- likes List of workId Recommendation Page Redirect to Random /anime/{work_id} Index for Each Anime Redirect to /anime Index of All | 2.217627 | 2 |
gui/gui.py | ken-fu/db_viewer | 0 | 6618516 | <filename>gui/gui.py
# -*- coding: utf-8 -*-
import sys
import sqlite3
import re
import configparser
import pyperclip
from PyQt5.QtWidgets import QWidget, QPushButton, QLabel, QComboBox, QTextEdit
from PyQt5.QtWidgets import QLineEdit, QAbstractItemView, QCheckBox, QMessageBox
from PyQt5.Qt import Qt
from file_manager.sql_manager import SqlManager
from file_manager.translate import translater
from file_manager.folder_check import get_all_database
from gui.base_widgets import MyTableModel, MainTreeView
from gui.sub_widgets import TagEditSubWin, CommentSubWin, OutputSubWin
class MainWidget(QWidget):
def __init__(self):
super().__init__()
self.resize(1080, 720)
self.move(100, 100)
self.setWindowTitle('Database Viewer')
self.import_tag()
self.create_tree()
self.create_filter_widgets()
self.create_widgets()
self.paper_list = []
self.headers = ["Add Date", "T", "C", "Title"]
self.show()
def create_tree(self):
'''create main tree widget'''
self.main_tree = MainTreeView(self)
self.main_tree.move(10, 50)
self.main_tree.setFixedSize(430, 600)
self.main_tree.clicked.connect(self.update_text)
self.main_tree.setEditTriggers(QAbstractItemView.NoEditTriggers)
def create_filter_widgets(self):
'''create filtering related widgets'''
self.label_dr = QLabel("Publish Date Range", self)
self.label_dr.move(450, 15)
self.combobox_y = QComboBox(self)
self.combobox_y.addItems(["2015", "2016", "2017", "2018",
"2019", "2020", "2021", "2022", "2023"])
self.combobox_y.move(600, 10)
self.combobox_y.setFixedWidth(80)
self.combobox_m = QComboBox(self)
for temp_mon in range(1, 13):
self.combobox_m.addItem(str(temp_mon))
self.combobox_m.move(670, 10)
self.combobox_m.setFixedWidth(60)
self.combobox_d = QComboBox(self)
for temp_day in range(1, 32):
self.combobox_d.addItem(str(temp_day))
self.combobox_d.move(720, 10)
self.combobox_d.setFixedWidth(60)
self.label_combo = QLabel("~", self)
self.label_combo.move(780, 15)
self.combobox_2y = QComboBox(self)
self.combobox_2y.addItems(["2015", "2016", "2017", "2018",
"2019", "2020", "2021", "2022", "2023"])
self.combobox_2y.move(800, 10)
self.combobox_2y.setFixedWidth(80)
self.combobox_2m = QComboBox(self)
for temp_mon in range(1, 13):
self.combobox_2m.addItem(str(temp_mon))
self.combobox_2m.move(870, 10)
self.combobox_2m.setFixedWidth(60)
self.combobox_2d = QComboBox(self)
for temp_day in range(1, 32):
self.combobox_2d.addItem(str(temp_day))
self.combobox_2d.move(920, 10)
self.combobox_2d.setFixedWidth(60)
self.checkbox_dr = QCheckBox("", self)
self.checkbox_dr.move(1005, 15)
self.checkbox_dr.stateChanged.connect(self.filter_check)
self.label_search = QLabel("Keyword Search", self)
self.label_search.move(450, 50)
self.textbox_search = QLineEdit(self)
self.textbox_search.setFixedWidth(400)
self.textbox_search.move(600, 50)
self.checkbox_search = QCheckBox("", self)
self.checkbox_search.move(1005, 50)
self.checkbox_search.stateChanged.connect(self.filter_check)
self.label_tagfilter = QLabel("Tag Filter", self)
self.label_tagfilter.move(450, 85)
self.combobox_tagfilter = QComboBox(self)
self.combobox_tagfilter.addItems(self.tag_list)
self.combobox_tagfilter.move(600, 80)
self.combobox_tagfilter.setFixedWidth(150)
self.checkbox_tagfilter = QCheckBox("", self)
self.checkbox_tagfilter.move(1005, 85)
self.checkbox_tagfilter.stateChanged.connect(self.filter_check)
def create_widgets(self):
'''create widgets on main window'''
self.article_list = get_all_database()
self.combobox_article = QComboBox(self)
self.combobox_article.addItem("----")
self.combobox_article.addItems(self.article_list)
self.combobox_article.move(15, 10)
self.combobox_article.activated[str].connect(self.import_database)
self.pbutton_output = QPushButton("Output", self)
self.pbutton_output.move(220, 10)
self.pbutton_output.clicked.connect(self.create_output_sub_win)
self.pbutton_tagedit = QPushButton("Tag Edit", self)
self.pbutton_tagedit.move(320, 10)
self.pbutton_tagedit.clicked.connect(self.create_tag_edit_sub_win)
self.label_title = QLabel("Title", self)
self.label_title.move(450, 130)
self.textbox_title = QTextEdit(self)
self.textbox_title.move(450, 150)
self.textbox_title.setFixedSize(600, 42)
self.label_fa = QLabel("First Auther", self)
self.label_fa.move(450, 200)
self.textbox_fa = QLineEdit(self)
self.textbox_fa.move(450, 220)
self.textbox_fa.setFixedWidth(420)
self.label_pd = QLabel("Publish Date", self)
self.label_pd.move(900, 200)
self.textbox_pd = QLineEdit(self)
self.textbox_pd.move(900, 220)
self.textbox_pd.setFixedWidth(150)
self.label_rg = QLabel("Research Group", self)
self.label_rg.move(450, 250)
self.textbox_rg = QTextEdit(self)
self.textbox_rg.move(450, 270)
self.textbox_rg.setFixedSize(600, 60)
self.label_doi = QLabel("DOI", self)
self.label_doi.move(450, 350)
self.textbox_doi = QLineEdit(self)
self.textbox_doi.move(450, 370)
self.textbox_doi.setFixedWidth(330)
self.pbutton_doi = QPushButton("Copy", self)
self.pbutton_doi.move(550, 335)
self.pbutton_doi.clicked.connect(self.doi_copy)
self.label_tag = QLabel("Tag", self)
self.label_tag.move(800, 350)
self.textbox_tag = QLineEdit(self)
self.textbox_tag.move(800, 370)
self.textbox_tag.setFixedWidth(120)
self.pbutton_tag = QPushButton("Write", self)
self.pbutton_tag.move(920, 335)
self.pbutton_tag.clicked.connect(self.tag_write)
self.combobox_tag = QComboBox(self)
self.combobox_tag.addItems(self.tag_list)
self.combobox_tag.move(920, 365)
self.combobox_tag.setFixedWidth(150)
self.label_abst = QLabel("Abstract", self)
self.label_abst.move(450, 410)
self.textbox_abst = QTextEdit(self)
self.textbox_abst.move(450, 430)
self.textbox_abst.setFixedSize(600, 220)
self.pbutton_comment = QPushButton("Comment", self)
self.pbutton_comment.move(450, 660)
self.comment_data = ''
self.pbutton_comment.clicked.connect(self.create_comment_sub_win)
self.pbutton_trans = QPushButton("En -> Jp", self)
self.pbutton_trans.move(550, 400)
self.pbutton_trans.clicked.connect(self.abst_translate)
self.pbutton_title_trans = QPushButton("En -> Jp", self)
self.pbutton_title_trans.move(550, 120)
self.pbutton_title_trans.clicked.connect(self.title_translate)
# Update the tree according to the selected journal
# paper_view_list is for display and is rewritten by filtering
def set_tree(self):
'''initialization main tree'''
if(self.paper_view_list==[]):
self.message_box = QMessageBox.information(
self, "", "No data found", QMessageBox.Close)
self.paper_view_list = self.paper_list[:]
self.model = MyTableModel(self.paper_view_list, self.headers)
self.main_tree.setModel(self.model)
self.main_tree.setColumnWidth(0, 90)
self.main_tree.setColumnWidth(1, 30)
self.main_tree.setColumnWidth(2, 5)
self.main_tree.setColumnWidth(3, 270)
self.main_tree.hideColumn(4)
self.main_tree.hideColumn(5)
self.main_tree.hideColumn(6)
self.main_tree.hideColumn(7)
self.main_tree.hideColumn(8)
self.main_tree.hideColumn(9)
def import_tag(self):
'''import tag data from tag_config.ini'''
while True:
try:
file = open('config/tag_config.ini', 'r')
file.close()
break
except FileNotFoundError:
file = open('config/tag_config.ini', 'a+')
file.write('[Tag]\n')
file.write('00 = Tag\n')
file.close()
break
self.conf_parser = configparser.ConfigParser()
self.conf_parser.read('config/tag_config.ini')
self.item_list = list(self.conf_parser['Tag'])
self.tag_list = []
for item in self.item_list:
self.tag_list.append(item+":"+self.conf_parser['Tag'][item])
def import_database(self, article_name):
'''import paper database'''
if article_name == '----':
return
self.db_data = sqlite3.connect('database/' + article_name + '.db')
self.db_data.execute("PRAGMA foreign_keys = 1")
sql = "select * from data_set"
self.paper_list = []
for row in list(self.db_data.execute(sql))[::-1]:
self.temp_comment_data = ''
if re.sub('\s', '', row[8]) != '':
self.temp_comment_data = '*'
row_out = (row[6], row[7], self.temp_comment_data, row[0],
row[1], row[2], row[3], row[4], row[5], row[8])
self.paper_list.append(row_out)
self.paper_view_list = self.paper_list[:]
self.set_tree()
def update_text(self):
'''update textbox of main window'''
index = self.main_tree.selectedIndexes()[0]
temp_data = self.model.display_data(index)
self.textbox_title.setText(temp_data[3])
self.textbox_abst.setText(temp_data[4])
self.textbox_fa.setText(temp_data[5])
self.textbox_rg.setText(temp_data[6])
self.textbox_doi.setText(temp_data[7])
self.textbox_pd.setText(temp_data[8])
if(temp_data[1].zfill(2) != '00'):
self.textbox_tag.setText(self.conf_parser['Tag'][temp_data[1].zfill(2)])
self.comment_data = temp_data[9]
def reset_text(self):
'''reset textbox of main window'''
self.textbox_title.clear()
self.textbox_abst.clear()
self.textbox_fa.clear()
self.textbox_rg.clear()
self.textbox_doi.clear()
self.textbox_pd.clear()
self.textbox_tag.clear()
self.comment_data = ''
def title_translate(self):
'''translate (en -> jp) title'''
title_text = self.textbox_title.toPlainText()
title_jp = translater(title_text)
self.textbox_title.setText(title_jp)
def abst_translate(self):
'''translate (en -> jp) abst'''
abst_text = self.textbox_abst.toPlainText()
abst_jp = translater(abst_text)
self.textbox_abst.setText(abst_jp)
def doi_copy(self):
'''copy doi text box'''
pyperclip.copy(self.textbox_doi.text())
# Write tag information of selected articles in database
def tag_write(self):
'''write tag data to database'''
self.sql_m = SqlManager(self.combobox_article.currentText()+'.db')
self.sql_m.write_tag_data(self.combobox_tag.currentText().split(':')[
0], re.sub('\s', '', self.textbox_doi.text()))
self.import_database(self.combobox_article.currentText())
self.filter_check()
def filter_by_keyword(self):
'''filtering by keyword of textbox'''
# Find out whether there is a list to filter
try:
self.paper_view_list
except AttributeError:
return
filter_words = self.textbox_search.text()
filter_words = re.sub('\s', '', filter_words)
if(filter_words == ''):
return
self.paper_temp_list = self.paper_view_list[:]
self.paper_view_list = []
for row in self.paper_temp_list:
# Determine whether the title or abst contains keywords
if (filter_words.lower() in row[3].lower() or filter_words.lower() in row[4].lower()):
self.paper_view_list.append(row)
def filter_by_date_range(self):
'''filtering by date range of combobox'''
try:
self.paper_view_list
except AttributeError:
return
self.startdate = int(self.combobox_y.currentText(
) + self.combobox_m.currentText().zfill(2) + self.combobox_d.currentText().zfill(2))
self.enddate = int(self.combobox_2y.currentText(
) + self.combobox_2m.currentText().zfill(2) + self.combobox_2d.currentText().zfill(2))
self.paper_temp_list = self.paper_view_list[:]
self.paper_view_list = []
for row in self.paper_temp_list:
# Determine whether it is within the specified period
p_date_list = row[8].split('-')
p_date = p_date_list[0] + \
p_date_list[1].zfill(2) + p_date_list[2].zfill(2)
if (int(p_date) >= self.startdate and int(p_date) <= self.enddate):
self.paper_view_list.append(row)
def filter_by_tag(self):
'''filtering by tag of combobox'''
try:
self.paper_view_list
except AttributeError:
return
self.paper_temp_list = self.paper_view_list[:]
self.paper_view_list = []
for row in self.paper_temp_list:
# Check that the selected tag matches the item in the list
if(row[1].replace('-', '').zfill(2) == self.combobox_tagfilter.currentText().split(':')[0]):
self.paper_view_list.append(row)
def filter_check(self):
'''filtering by keyword, date range, tag'''
self.paper_view_list = self.paper_list[:]
if self.checkbox_search.checkState() == Qt.Checked:
self.filter_by_keyword()
if self.checkbox_dr.checkState() == Qt.Checked:
self.filter_by_date_range()
if self.checkbox_tagfilter.checkState() == Qt.Checked:
self.filter_by_tag()
self.set_tree()
def create_tag_edit_sub_win(self):
'''create tag edit sub window'''
self.tagedit_sw = TagEditSubWin(self)
self.tagedit_sw.show()
self.import_tag()
self.combobox_tag.clear()
self.combobox_tag.addItems(self.tag_list)
self.combobox_tagfilter.clear()
self.combobox_tagfilter.addItems(self.tag_list)
def create_output_sub_win(self):
'''create output sub window'''
self.out_sw = OutputSubWin(self.paper_view_list, self)
self.out_sw.show()
def create_comment_sub_win(self):
'''create comment sub window'''
temp_article = self.combobox_article.currentText()
temp_doi = self.textbox_doi.text()
self.comment_sw = CommentSubWin(
temp_article, self.comment_data, temp_doi, self)
self.comment_sw.show()
self.import_database(temp_article)
self.set_tree()
self.reset_text()
self.filter_check()
| <filename>gui/gui.py
# -*- coding: utf-8 -*-
import sys
import sqlite3
import re
import configparser
import pyperclip
from PyQt5.QtWidgets import QWidget, QPushButton, QLabel, QComboBox, QTextEdit
from PyQt5.QtWidgets import QLineEdit, QAbstractItemView, QCheckBox, QMessageBox
from PyQt5.Qt import Qt
from file_manager.sql_manager import SqlManager
from file_manager.translate import translater
from file_manager.folder_check import get_all_database
from gui.base_widgets import MyTableModel, MainTreeView
from gui.sub_widgets import TagEditSubWin, CommentSubWin, OutputSubWin
class MainWidget(QWidget):
def __init__(self):
super().__init__()
self.resize(1080, 720)
self.move(100, 100)
self.setWindowTitle('Database Viewer')
self.import_tag()
self.create_tree()
self.create_filter_widgets()
self.create_widgets()
self.paper_list = []
self.headers = ["Add Date", "T", "C", "Title"]
self.show()
def create_tree(self):
'''create main tree widget'''
self.main_tree = MainTreeView(self)
self.main_tree.move(10, 50)
self.main_tree.setFixedSize(430, 600)
self.main_tree.clicked.connect(self.update_text)
self.main_tree.setEditTriggers(QAbstractItemView.NoEditTriggers)
def create_filter_widgets(self):
'''create filtering related widgets'''
self.label_dr = QLabel("Publish Date Range", self)
self.label_dr.move(450, 15)
self.combobox_y = QComboBox(self)
self.combobox_y.addItems(["2015", "2016", "2017", "2018",
"2019", "2020", "2021", "2022", "2023"])
self.combobox_y.move(600, 10)
self.combobox_y.setFixedWidth(80)
self.combobox_m = QComboBox(self)
for temp_mon in range(1, 13):
self.combobox_m.addItem(str(temp_mon))
self.combobox_m.move(670, 10)
self.combobox_m.setFixedWidth(60)
self.combobox_d = QComboBox(self)
for temp_day in range(1, 32):
self.combobox_d.addItem(str(temp_day))
self.combobox_d.move(720, 10)
self.combobox_d.setFixedWidth(60)
self.label_combo = QLabel("~", self)
self.label_combo.move(780, 15)
self.combobox_2y = QComboBox(self)
self.combobox_2y.addItems(["2015", "2016", "2017", "2018",
"2019", "2020", "2021", "2022", "2023"])
self.combobox_2y.move(800, 10)
self.combobox_2y.setFixedWidth(80)
self.combobox_2m = QComboBox(self)
for temp_mon in range(1, 13):
self.combobox_2m.addItem(str(temp_mon))
self.combobox_2m.move(870, 10)
self.combobox_2m.setFixedWidth(60)
self.combobox_2d = QComboBox(self)
for temp_day in range(1, 32):
self.combobox_2d.addItem(str(temp_day))
self.combobox_2d.move(920, 10)
self.combobox_2d.setFixedWidth(60)
self.checkbox_dr = QCheckBox("", self)
self.checkbox_dr.move(1005, 15)
self.checkbox_dr.stateChanged.connect(self.filter_check)
self.label_search = QLabel("Keyword Search", self)
self.label_search.move(450, 50)
self.textbox_search = QLineEdit(self)
self.textbox_search.setFixedWidth(400)
self.textbox_search.move(600, 50)
self.checkbox_search = QCheckBox("", self)
self.checkbox_search.move(1005, 50)
self.checkbox_search.stateChanged.connect(self.filter_check)
self.label_tagfilter = QLabel("Tag Filter", self)
self.label_tagfilter.move(450, 85)
self.combobox_tagfilter = QComboBox(self)
self.combobox_tagfilter.addItems(self.tag_list)
self.combobox_tagfilter.move(600, 80)
self.combobox_tagfilter.setFixedWidth(150)
self.checkbox_tagfilter = QCheckBox("", self)
self.checkbox_tagfilter.move(1005, 85)
self.checkbox_tagfilter.stateChanged.connect(self.filter_check)
def create_widgets(self):
'''create widgets on main window'''
self.article_list = get_all_database()
self.combobox_article = QComboBox(self)
self.combobox_article.addItem("----")
self.combobox_article.addItems(self.article_list)
self.combobox_article.move(15, 10)
self.combobox_article.activated[str].connect(self.import_database)
self.pbutton_output = QPushButton("Output", self)
self.pbutton_output.move(220, 10)
self.pbutton_output.clicked.connect(self.create_output_sub_win)
self.pbutton_tagedit = QPushButton("Tag Edit", self)
self.pbutton_tagedit.move(320, 10)
self.pbutton_tagedit.clicked.connect(self.create_tag_edit_sub_win)
self.label_title = QLabel("Title", self)
self.label_title.move(450, 130)
self.textbox_title = QTextEdit(self)
self.textbox_title.move(450, 150)
self.textbox_title.setFixedSize(600, 42)
self.label_fa = QLabel("First Auther", self)
self.label_fa.move(450, 200)
self.textbox_fa = QLineEdit(self)
self.textbox_fa.move(450, 220)
self.textbox_fa.setFixedWidth(420)
self.label_pd = QLabel("Publish Date", self)
self.label_pd.move(900, 200)
self.textbox_pd = QLineEdit(self)
self.textbox_pd.move(900, 220)
self.textbox_pd.setFixedWidth(150)
self.label_rg = QLabel("Research Group", self)
self.label_rg.move(450, 250)
self.textbox_rg = QTextEdit(self)
self.textbox_rg.move(450, 270)
self.textbox_rg.setFixedSize(600, 60)
self.label_doi = QLabel("DOI", self)
self.label_doi.move(450, 350)
self.textbox_doi = QLineEdit(self)
self.textbox_doi.move(450, 370)
self.textbox_doi.setFixedWidth(330)
self.pbutton_doi = QPushButton("Copy", self)
self.pbutton_doi.move(550, 335)
self.pbutton_doi.clicked.connect(self.doi_copy)
self.label_tag = QLabel("Tag", self)
self.label_tag.move(800, 350)
self.textbox_tag = QLineEdit(self)
self.textbox_tag.move(800, 370)
self.textbox_tag.setFixedWidth(120)
self.pbutton_tag = QPushButton("Write", self)
self.pbutton_tag.move(920, 335)
self.pbutton_tag.clicked.connect(self.tag_write)
self.combobox_tag = QComboBox(self)
self.combobox_tag.addItems(self.tag_list)
self.combobox_tag.move(920, 365)
self.combobox_tag.setFixedWidth(150)
self.label_abst = QLabel("Abstract", self)
self.label_abst.move(450, 410)
self.textbox_abst = QTextEdit(self)
self.textbox_abst.move(450, 430)
self.textbox_abst.setFixedSize(600, 220)
self.pbutton_comment = QPushButton("Comment", self)
self.pbutton_comment.move(450, 660)
self.comment_data = ''
self.pbutton_comment.clicked.connect(self.create_comment_sub_win)
self.pbutton_trans = QPushButton("En -> Jp", self)
self.pbutton_trans.move(550, 400)
self.pbutton_trans.clicked.connect(self.abst_translate)
self.pbutton_title_trans = QPushButton("En -> Jp", self)
self.pbutton_title_trans.move(550, 120)
self.pbutton_title_trans.clicked.connect(self.title_translate)
# Update the tree according to the selected journal
# paper_view_list is for display and is rewritten by filtering
def set_tree(self):
'''initialization main tree'''
if(self.paper_view_list==[]):
self.message_box = QMessageBox.information(
self, "", "No data found", QMessageBox.Close)
self.paper_view_list = self.paper_list[:]
self.model = MyTableModel(self.paper_view_list, self.headers)
self.main_tree.setModel(self.model)
self.main_tree.setColumnWidth(0, 90)
self.main_tree.setColumnWidth(1, 30)
self.main_tree.setColumnWidth(2, 5)
self.main_tree.setColumnWidth(3, 270)
self.main_tree.hideColumn(4)
self.main_tree.hideColumn(5)
self.main_tree.hideColumn(6)
self.main_tree.hideColumn(7)
self.main_tree.hideColumn(8)
self.main_tree.hideColumn(9)
def import_tag(self):
'''import tag data from tag_config.ini'''
while True:
try:
file = open('config/tag_config.ini', 'r')
file.close()
break
except FileNotFoundError:
file = open('config/tag_config.ini', 'a+')
file.write('[Tag]\n')
file.write('00 = Tag\n')
file.close()
break
self.conf_parser = configparser.ConfigParser()
self.conf_parser.read('config/tag_config.ini')
self.item_list = list(self.conf_parser['Tag'])
self.tag_list = []
for item in self.item_list:
self.tag_list.append(item+":"+self.conf_parser['Tag'][item])
def import_database(self, article_name):
'''import paper database'''
if article_name == '----':
return
self.db_data = sqlite3.connect('database/' + article_name + '.db')
self.db_data.execute("PRAGMA foreign_keys = 1")
sql = "select * from data_set"
self.paper_list = []
for row in list(self.db_data.execute(sql))[::-1]:
self.temp_comment_data = ''
if re.sub('\s', '', row[8]) != '':
self.temp_comment_data = '*'
row_out = (row[6], row[7], self.temp_comment_data, row[0],
row[1], row[2], row[3], row[4], row[5], row[8])
self.paper_list.append(row_out)
self.paper_view_list = self.paper_list[:]
self.set_tree()
def update_text(self):
'''update textbox of main window'''
index = self.main_tree.selectedIndexes()[0]
temp_data = self.model.display_data(index)
self.textbox_title.setText(temp_data[3])
self.textbox_abst.setText(temp_data[4])
self.textbox_fa.setText(temp_data[5])
self.textbox_rg.setText(temp_data[6])
self.textbox_doi.setText(temp_data[7])
self.textbox_pd.setText(temp_data[8])
if(temp_data[1].zfill(2) != '00'):
self.textbox_tag.setText(self.conf_parser['Tag'][temp_data[1].zfill(2)])
self.comment_data = temp_data[9]
def reset_text(self):
'''reset textbox of main window'''
self.textbox_title.clear()
self.textbox_abst.clear()
self.textbox_fa.clear()
self.textbox_rg.clear()
self.textbox_doi.clear()
self.textbox_pd.clear()
self.textbox_tag.clear()
self.comment_data = ''
def title_translate(self):
'''translate (en -> jp) title'''
title_text = self.textbox_title.toPlainText()
title_jp = translater(title_text)
self.textbox_title.setText(title_jp)
def abst_translate(self):
'''translate (en -> jp) abst'''
abst_text = self.textbox_abst.toPlainText()
abst_jp = translater(abst_text)
self.textbox_abst.setText(abst_jp)
def doi_copy(self):
'''copy doi text box'''
pyperclip.copy(self.textbox_doi.text())
# Write tag information of selected articles in database
def tag_write(self):
'''write tag data to database'''
self.sql_m = SqlManager(self.combobox_article.currentText()+'.db')
self.sql_m.write_tag_data(self.combobox_tag.currentText().split(':')[
0], re.sub('\s', '', self.textbox_doi.text()))
self.import_database(self.combobox_article.currentText())
self.filter_check()
def filter_by_keyword(self):
'''filtering by keyword of textbox'''
# Find out whether there is a list to filter
try:
self.paper_view_list
except AttributeError:
return
filter_words = self.textbox_search.text()
filter_words = re.sub('\s', '', filter_words)
if(filter_words == ''):
return
self.paper_temp_list = self.paper_view_list[:]
self.paper_view_list = []
for row in self.paper_temp_list:
# Determine whether the title or abst contains keywords
if (filter_words.lower() in row[3].lower() or filter_words.lower() in row[4].lower()):
self.paper_view_list.append(row)
def filter_by_date_range(self):
'''filtering by date range of combobox'''
try:
self.paper_view_list
except AttributeError:
return
self.startdate = int(self.combobox_y.currentText(
) + self.combobox_m.currentText().zfill(2) + self.combobox_d.currentText().zfill(2))
self.enddate = int(self.combobox_2y.currentText(
) + self.combobox_2m.currentText().zfill(2) + self.combobox_2d.currentText().zfill(2))
self.paper_temp_list = self.paper_view_list[:]
self.paper_view_list = []
for row in self.paper_temp_list:
# Determine whether it is within the specified period
p_date_list = row[8].split('-')
p_date = p_date_list[0] + \
p_date_list[1].zfill(2) + p_date_list[2].zfill(2)
if (int(p_date) >= self.startdate and int(p_date) <= self.enddate):
self.paper_view_list.append(row)
def filter_by_tag(self):
'''filtering by tag of combobox'''
try:
self.paper_view_list
except AttributeError:
return
self.paper_temp_list = self.paper_view_list[:]
self.paper_view_list = []
for row in self.paper_temp_list:
# Check that the selected tag matches the item in the list
if(row[1].replace('-', '').zfill(2) == self.combobox_tagfilter.currentText().split(':')[0]):
self.paper_view_list.append(row)
def filter_check(self):
'''filtering by keyword, date range, tag'''
self.paper_view_list = self.paper_list[:]
if self.checkbox_search.checkState() == Qt.Checked:
self.filter_by_keyword()
if self.checkbox_dr.checkState() == Qt.Checked:
self.filter_by_date_range()
if self.checkbox_tagfilter.checkState() == Qt.Checked:
self.filter_by_tag()
self.set_tree()
def create_tag_edit_sub_win(self):
'''create tag edit sub window'''
self.tagedit_sw = TagEditSubWin(self)
self.tagedit_sw.show()
self.import_tag()
self.combobox_tag.clear()
self.combobox_tag.addItems(self.tag_list)
self.combobox_tagfilter.clear()
self.combobox_tagfilter.addItems(self.tag_list)
def create_output_sub_win(self):
'''create output sub window'''
self.out_sw = OutputSubWin(self.paper_view_list, self)
self.out_sw.show()
def create_comment_sub_win(self):
'''create comment sub window'''
temp_article = self.combobox_article.currentText()
temp_doi = self.textbox_doi.text()
self.comment_sw = CommentSubWin(
temp_article, self.comment_data, temp_doi, self)
self.comment_sw.show()
self.import_database(temp_article)
self.set_tree()
self.reset_text()
self.filter_check()
| en | 0.596007 | # -*- coding: utf-8 -*- create main tree widget create filtering related widgets create widgets on main window # Update the tree according to the selected journal # paper_view_list is for display and is rewritten by filtering initialization main tree import tag data from tag_config.ini import paper database update textbox of main window reset textbox of main window translate (en -> jp) title translate (en -> jp) abst copy doi text box # Write tag information of selected articles in database write tag data to database filtering by keyword of textbox # Find out whether there is a list to filter # Determine whether the title or abst contains keywords filtering by date range of combobox # Determine whether it is within the specified period filtering by tag of combobox # Check that the selected tag matches the item in the list filtering by keyword, date range, tag create tag edit sub window create output sub window create comment sub window | 2.260157 | 2 |
pyf/_Dumper.py | snoopyjc/pythonizer | 1 | 6618517 | <reponame>snoopyjc/pythonizer<gh_stars>1-10
_init_package('Data.Dumper')
Data.Dumper.Indent_v = 2
Data.Dumper.Trailingcomma_v = False
Data.Dumper.Purity_v = 0
Data.Dumper.Pad_v = ''
Data.Dumper.Varname_v = "VAR"
Data.Dumper.Useqq_v = 0
Data.Dumper.Terse_v = False
Data.Dumper.Freezer_v = ''
Data.Dumper.Toaster_v = ''
Data.Dumper.Deepcopy_v = 0
Data.Dumper.Quotekeys_v = 1
Data.Dumper.Bless_v = 'bless'
Data.Dumper.Pair_v = ':'
Data.Dumper.Maxdepth_v = 0
Data.Dumper.Maxrecurse_v = 1000
Data.Dumper.Useperl_v = 0
Data.Dumper.Sortkeys_v = 0
Data.Dumper.Deparse_v = False
Data.Dumper.Sparseseen_v = False
def _Dumper(*args):
"""Implementation of Data::Dumper"""
result = []
pp = pprint.PrettyPrinter(indent=Data.Dumper.Indent_v,
depth=None if Data.Dumper.Maxdepth_v==0 else Data.Dumper.Maxdepth_v,
compact=Data.Dumper.Terse_v,
sort_dicts=Data.Dumper.Sortkeys_v)
for i, arg in enumerate(args, start=1):
if Data.Dumper.Terse_v:
result.append(f"{Data.Dumper.Pad_v}" + pp.pformat(arg))
else:
result.append(f"{Data.Dumper.Pad_v}{Data.Dumper.Varname_v}{i} = " + pp.pformat(arg))
spacer = " " if Data.Dumper.Indent_v == 0 else "\n"
return spacer.join(result)
| _init_package('Data.Dumper')
Data.Dumper.Indent_v = 2
Data.Dumper.Trailingcomma_v = False
Data.Dumper.Purity_v = 0
Data.Dumper.Pad_v = ''
Data.Dumper.Varname_v = "VAR"
Data.Dumper.Useqq_v = 0
Data.Dumper.Terse_v = False
Data.Dumper.Freezer_v = ''
Data.Dumper.Toaster_v = ''
Data.Dumper.Deepcopy_v = 0
Data.Dumper.Quotekeys_v = 1
Data.Dumper.Bless_v = 'bless'
Data.Dumper.Pair_v = ':'
Data.Dumper.Maxdepth_v = 0
Data.Dumper.Maxrecurse_v = 1000
Data.Dumper.Useperl_v = 0
Data.Dumper.Sortkeys_v = 0
Data.Dumper.Deparse_v = False
Data.Dumper.Sparseseen_v = False
def _Dumper(*args):
"""Implementation of Data::Dumper"""
result = []
pp = pprint.PrettyPrinter(indent=Data.Dumper.Indent_v,
depth=None if Data.Dumper.Maxdepth_v==0 else Data.Dumper.Maxdepth_v,
compact=Data.Dumper.Terse_v,
sort_dicts=Data.Dumper.Sortkeys_v)
for i, arg in enumerate(args, start=1):
if Data.Dumper.Terse_v:
result.append(f"{Data.Dumper.Pad_v}" + pp.pformat(arg))
else:
result.append(f"{Data.Dumper.Pad_v}{Data.Dumper.Varname_v}{i} = " + pp.pformat(arg))
spacer = " " if Data.Dumper.Indent_v == 0 else "\n"
return spacer.join(result) | en | 0.540402 | Implementation of Data::Dumper | 2.214118 | 2 |
validate_blockchain.py | isidharthrai/Blockchain-Simulation-using-Python | 1 | 6618518 | <reponame>isidharthrai/Blockchain-Simulation-using-Python
import hashlib
s = hashlib.sha256()
from block import Block
import addBlock as add
def validate_blockchain(blockchain):
for i in range (1,len(blockchain)):
current_block = blockchain[i]
previous_block = blockchain[i-1]
#current hash and calulated hash
if ( current_block.hash != current_block.hash_block()):
print ("Invalid Stage 1 error for Block {}".format(current_block.index))
return False
#previous hash validation
if ( current_block.previous_hash != previous_block.hash):
print ("Invalid Stage 2 error for Block {}".format(current_block.index))
return False
print("Valid at all stages")
return True
validate_blockchain(add.blockchain)
| import hashlib
s = hashlib.sha256()
from block import Block
import addBlock as add
def validate_blockchain(blockchain):
for i in range (1,len(blockchain)):
current_block = blockchain[i]
previous_block = blockchain[i-1]
#current hash and calulated hash
if ( current_block.hash != current_block.hash_block()):
print ("Invalid Stage 1 error for Block {}".format(current_block.index))
return False
#previous hash validation
if ( current_block.previous_hash != previous_block.hash):
print ("Invalid Stage 2 error for Block {}".format(current_block.index))
return False
print("Valid at all stages")
return True
validate_blockchain(add.blockchain) | en | 0.632535 | #current hash and calulated hash #previous hash validation | 3.507188 | 4 |
gunlink/core/errors.py | Brijeshkrishna/gunlink | 0 | 6618519 | class lookupError(Exception):
def __init__(self):
super(Exception, self).__init__("It is not a IPV4 or IPv6 address")
class tinyUrlError(Exception):
def __init__(self):
super(Exception, self).__init__("Unable to shorten the URL ")
| class lookupError(Exception):
def __init__(self):
super(Exception, self).__init__("It is not a IPV4 or IPv6 address")
class tinyUrlError(Exception):
def __init__(self):
super(Exception, self).__init__("Unable to shorten the URL ")
| none | 1 | 2.769058 | 3 | |
slack_webhook.py | GregoryWiltshire/airflow-slack-webhook | 0 | 6618520 | import boto3
from os import environ
from airflow.contrib.operators.slack_webhook_operator import SlackWebhookOperator
secret_id = environ['SLACK_WEBHOOK_SECRET_ID']
client = boto3.client('secretsmanager')
slack_webhook_url = client.get_secret_value(SecretId=secret_id)['SecretString']
def send_message(context, msg):
message = SlackWebhookOperator(
webhook_token=slack_webhook_url,
message=msg,
task_id=context['task_instance'].task_id
)
return message.execute(context=context)
def failure_callback(context):
owners = str(context['dag'].owner).split(',')
ats = ''.join([f'<@{owner}> ' for owner in owners])
msg = f"""Task Failed.\n
Dag: {context['task_instance'].dag_id}
Task: {context['task_instance'].task_id}
Execution Time: {context['execution_date']}
<{context['task_instance'].log_url}|View Log>
{ats}
"""
send_message(context, msg)
| import boto3
from os import environ
from airflow.contrib.operators.slack_webhook_operator import SlackWebhookOperator
secret_id = environ['SLACK_WEBHOOK_SECRET_ID']
client = boto3.client('secretsmanager')
slack_webhook_url = client.get_secret_value(SecretId=secret_id)['SecretString']
def send_message(context, msg):
message = SlackWebhookOperator(
webhook_token=slack_webhook_url,
message=msg,
task_id=context['task_instance'].task_id
)
return message.execute(context=context)
def failure_callback(context):
owners = str(context['dag'].owner).split(',')
ats = ''.join([f'<@{owner}> ' for owner in owners])
msg = f"""Task Failed.\n
Dag: {context['task_instance'].dag_id}
Task: {context['task_instance'].task_id}
Execution Time: {context['execution_date']}
<{context['task_instance'].log_url}|View Log>
{ats}
"""
send_message(context, msg)
| en | 0.252067 | Task Failed.\n Dag: {context['task_instance'].dag_id} Task: {context['task_instance'].task_id} Execution Time: {context['execution_date']} <{context['task_instance'].log_url}|View Log> {ats} | 2.070374 | 2 |
nautobot_circuit_maintenance/migrations/0003_improve_rawnotification.py | nautobot/nautobot-plugin-circuit-maintenance | 18 | 6618521 | <filename>nautobot_circuit_maintenance/migrations/0003_improve_rawnotification.py
# Generated by Django 3.1.10 on 2021-06-10 09:15
from django.db import migrations, models
import django.db.models.deletion
def migrate_source(apps, schema_editor):
"""Migrate from old text Source to new reference to Notification Source."""
RawNotificationModel = apps.get_model("nautobot_circuit_maintenance", "RawNotification")
NotificationSourceModel = apps.get_model("nautobot_circuit_maintenance", "NotificationSource")
for raw_notification in RawNotificationModel.objects.all():
raw_notification.source = NotificationSourceModel.objects.get(name=raw_notification.source_old)
raw_notification.save()
class Migration(migrations.Migration):
dependencies = [
("nautobot_circuit_maintenance", "0002_notification_secrets_out_of_db"),
]
operations = [
migrations.AlterField(
model_name="rawnotification",
name="sender",
field=models.CharField(blank=True, default="", max_length=200, null=True),
),
migrations.RenameField(
model_name="rawnotification",
old_name="source",
new_name="source_old",
),
migrations.AddField(
model_name="rawnotification",
name="source",
field=models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="nautobot_circuit_maintenance.notificationsource",
),
),
migrations.RunPython(migrate_source),
migrations.RemoveField(
model_name="rawnotification",
name="source_old",
),
]
| <filename>nautobot_circuit_maintenance/migrations/0003_improve_rawnotification.py
# Generated by Django 3.1.10 on 2021-06-10 09:15
from django.db import migrations, models
import django.db.models.deletion
def migrate_source(apps, schema_editor):
"""Migrate from old text Source to new reference to Notification Source."""
RawNotificationModel = apps.get_model("nautobot_circuit_maintenance", "RawNotification")
NotificationSourceModel = apps.get_model("nautobot_circuit_maintenance", "NotificationSource")
for raw_notification in RawNotificationModel.objects.all():
raw_notification.source = NotificationSourceModel.objects.get(name=raw_notification.source_old)
raw_notification.save()
class Migration(migrations.Migration):
dependencies = [
("nautobot_circuit_maintenance", "0002_notification_secrets_out_of_db"),
]
operations = [
migrations.AlterField(
model_name="rawnotification",
name="sender",
field=models.CharField(blank=True, default="", max_length=200, null=True),
),
migrations.RenameField(
model_name="rawnotification",
old_name="source",
new_name="source_old",
),
migrations.AddField(
model_name="rawnotification",
name="source",
field=models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="nautobot_circuit_maintenance.notificationsource",
),
),
migrations.RunPython(migrate_source),
migrations.RemoveField(
model_name="rawnotification",
name="source_old",
),
]
| en | 0.841383 | # Generated by Django 3.1.10 on 2021-06-10 09:15 Migrate from old text Source to new reference to Notification Source. | 1.932741 | 2 |
yepes/model_mixins/logged.py | samuelmaudo/yepes | 0 | 6618522 | <filename>yepes/model_mixins/logged.py
# -*- coding:utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from django.utils.translation import ugettext_lazy as _
__all__ = ('Logged', )
class Logged(models.Model):
creation_date = models.DateTimeField(
auto_now_add=True,
db_index=True,
editable=False,
verbose_name=_('Creation Date'))
last_modified = models.DateTimeField(
auto_now=True,
db_index=True,
editable=False,
verbose_name=_('Last Modified'))
class Meta:
abstract = True
def save(self, **kwargs):
update_fields = kwargs.get('update_fields')
if update_fields is not None:
update_fields = set(update_fields)
update_fields.add('last_modified')
kwargs['update_fields'] = update_fields
super(Logged, self).save(**kwargs)
| <filename>yepes/model_mixins/logged.py
# -*- coding:utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from django.utils.translation import ugettext_lazy as _
__all__ = ('Logged', )
class Logged(models.Model):
creation_date = models.DateTimeField(
auto_now_add=True,
db_index=True,
editable=False,
verbose_name=_('Creation Date'))
last_modified = models.DateTimeField(
auto_now=True,
db_index=True,
editable=False,
verbose_name=_('Last Modified'))
class Meta:
abstract = True
def save(self, **kwargs):
update_fields = kwargs.get('update_fields')
if update_fields is not None:
update_fields = set(update_fields)
update_fields.add('last_modified')
kwargs['update_fields'] = update_fields
super(Logged, self).save(**kwargs)
| en | 0.736017 | # -*- coding:utf-8 -*- | 2.278734 | 2 |
test/08_StateChannel_test.disabled.py | tohrnii/RYO | 78 | 6618523 | <reponame>tohrnii/RYO<gh_stars>10-100
import pytest
import asyncio
from fixtures.account import account_factory
from utils import Signer
# admin, user, user
NUM_SIGNING_ACCOUNTS = 4
# How long a channel offer persists ('time-units')
OFFER_DURATION = 20
@pytest.fixture(scope='module')
def event_loop():
return asyncio.new_event_loop()
@pytest.fixture(scope='module')
async def game_factory(account_factory):
(starknet, accounts, signers) = account_factory
CONTROLLER_ADDRESS = 34567
channels = await starknet.deploy(
source="contracts/08_StateChannel.cairo",
constructor_calldata=[CONTROLLER_ADDRESS])
return starknet, accounts, signers, channels
@pytest.fixture(scope='module')
@pytest.mark.asyncio
@pytest.mark.parametrize('account_factory', [dict(num_signers=NUM_SIGNING_ACCOUNTS)], indirect=True)
async def test_channel_open(game_factory):
starknet, accounts, signers, channels = game_factory
user_1_signer = signers[1]
user_2_signer = signers[2]
user_3_signer = signers[3]
user_1 = accounts[1]
user_2 = accounts[2]
user_3 = accounts[3]
# User signals availability and submits a pubkey for the channel.
await user_1_signer.send_transaction(
account=user_1,
to=channels.contract_address,
selector_name='signal_available',
calldata=[OFFER_DURATION, user_1_signer.public_key])
res = await channels.status_of_player(user_1.contract_address).call()
assert res.result.game_key == user_1_signer.public_key
assert res.result.queue_len == 1
assert res.result.index_in_queue == 0
c = res.result.channel_details
assert c.id == 0 # Empty channel has zero ID.
assert c.addresses == (0, 0)
res = await channels.read_queue_length().call()
assert res.result.length == 1
# Second user signals availability and is matched.
await user_2_signer.send_transaction(
account=user_2,
to=channels.contract_address,
selector_name='signal_available',
calldata=[OFFER_DURATION, user_2_signer.public_key])
res = await channels.read_queue_length().call()
assert res.result.length == 0
res = await channels.status_of_player(user_2.contract_address).call()
assert res.result.game_key == user_2_signer.public_key
assert res.result.queue_len == 0
assert res.result.index_in_queue == 0
c = res.result.channel_details
assert c.id == 1 # First channel has id==1.
assert c.opened_at_block == 1
assert c.last_challenged_at_block == 1
# User 2 opens channel so is recorded at index 0 in the channel.
assert c.addresses[0] == user_2.contract_address
assert c.addresses[1] == user_1.contract_address
assert c.balance == (100, 100)
assert c.initial_channel_data == 987654321
print("Passed: Open a channel.")
return starknet, accounts, signers, channels
@pytest.mark.asyncio
@pytest.mark.parametrize('account_factory', [dict(num_signers=NUM_SIGNING_ACCOUNTS)], indirect=True)
async def test_final_move_submission(test_channel_open):
_, accounts, signers, channels = test_channel_open
user_1_signer = signers[1]
user_2_signer = signers[2]
user_3_signer = signers[3]
user_1 = accounts[1]
user_2 = accounts[2]
user_3 = accounts[3]
# Create and array representing a move
# TODO
# - Look at array_to_move_struct() and create a definitive order for
# how a Move is best represented as an array.
# - Make an array here
# - Test it by calling manual_state_update()
# Sign the array
# E.g.., Signer(move_array)
# Pass it to the other player
# Have them verify the signature/conditions
# Repeat the process N times
# Submit final move.
await user_1_signer.send_transaction(
account=user_1,
to=channels.contract_address,
selector_name='submit_final_move',
calldata=[move, hash, sig_r, sig_s])
# E.g., movement of assets to winner, record events as reportcard.
res = await channels.status_of_player(user_1.contract_address).call()
assert res.result.game_key == 0
assert res.result.queue_len == 0
assert res.result.index_in_queue == 0
c = res.result.channel_details
# Assert c is empty.
# assert balances are changed.
# assert report card administered.
@pytest.mark.asyncio
@pytest.mark.parametrize('account_factory', [dict(num_signers=NUM_SIGNING_ACCOUNTS)], indirect=True)
async def test_close_channel(test_channel_open):
_, accounts, signers, channels = test_channel_open
user_1_signer = signers[1]
user_2_signer = signers[2]
user_3_signer = signers[3]
user_1 = accounts[1]
user_2 = accounts[2]
user_3 = accounts[3]
await user_1_signer.send_transaction(
account=user_1,
to=channels.contract_address,
selector_name='submit_final_move',
calldata=[OFFER_DURATION, user_1_signer.public_key])
# TODO: Implement channel closure logic.
# E.g., movement of assets to winner, record events as reportcard.
res = await channels.status_of_player(user_1.contract_address).call()
assert res.result.game_key == 0
assert res.result.queue_len == 0
assert res.result.index_in_queue == 0
c = res.result.channel_details
# Assert c is empty.
# assert balances are changed.
# assert report card administered.
@pytest.mark.asyncio
@pytest.mark.parametrize('account_factory', [dict(num_signers=NUM_SIGNING_ACCOUNTS)], indirect=True)
async def test_queue_function(game_factory):
_, accounts, signers, channels = game_factory
user_1_signer = signers[1]
user_2_signer = signers[2]
user_3_signer = signers[3]
user_1 = accounts[1]
user_2 = accounts[2]
user_3 = accounts[3]
# User signals availability and submits a pubkey for the channel.
await user_1_signer.send_transaction(
account=user_1,
to=channels.contract_address,
selector_name='signal_available',
calldata=[OFFER_DURATION, user_1_signer.public_key])
res = await channels.read_queue_length().call()
assert res.result.length == 1
assert res.result.player_at_index_0 == user_1.contract_address
# User 1 cannot rejoin queue.
try:
await user_1_signer.send_transaction(
account=user_1,
to=channels.contract_address,
selector_name='signal_available',
calldata=[OFFER_DURATION, user_1_signer.public_key])
except Exception as e:
print(f'\nPassed: Prevent queue re-entry.')
# Second user signals availability and is matched.
await user_2_signer.send_transaction(
account=user_2,
to=channels.contract_address,
selector_name='signal_available',
calldata=[OFFER_DURATION, user_2_signer.public_key])
# User 2 matches, channel should open and queue length reduces.
res = await channels.read_queue_length().call()
assert res.result.length == 0
# User 1 cannot rejoin queue now they are in a channel.
try:
await user_1_signer.send_transaction(
account=user_1,
to=channels.contract_address,
selector_name='signal_available',
calldata=[OFFER_DURATION, user_1_signer.public_key])
except Exception as e:
print(f'\nPassed: Prevent queue entry once in channel.')
# Third user signals availability and is matched.
await user_3_signer.send_transaction(
account=user_3,
to=channels.contract_address,
selector_name='signal_available',
calldata=[OFFER_DURATION, user_3_signer.public_key])
# User 3 enters queue.
res = await channels.read_queue_length().call()
assert res.result.length == 1
res = await channels.status_of_player(user_3.contract_address).call()
assert res.result.game_key == user_3_signer.public_key
assert res.result.queue_len == 1
assert res.result.index_in_queue == 0 | import pytest
import asyncio
from fixtures.account import account_factory
from utils import Signer
# admin, user, user
NUM_SIGNING_ACCOUNTS = 4
# How long a channel offer persists ('time-units')
OFFER_DURATION = 20
@pytest.fixture(scope='module')
def event_loop():
return asyncio.new_event_loop()
@pytest.fixture(scope='module')
async def game_factory(account_factory):
(starknet, accounts, signers) = account_factory
CONTROLLER_ADDRESS = 34567
channels = await starknet.deploy(
source="contracts/08_StateChannel.cairo",
constructor_calldata=[CONTROLLER_ADDRESS])
return starknet, accounts, signers, channels
@pytest.fixture(scope='module')
@pytest.mark.asyncio
@pytest.mark.parametrize('account_factory', [dict(num_signers=NUM_SIGNING_ACCOUNTS)], indirect=True)
async def test_channel_open(game_factory):
starknet, accounts, signers, channels = game_factory
user_1_signer = signers[1]
user_2_signer = signers[2]
user_3_signer = signers[3]
user_1 = accounts[1]
user_2 = accounts[2]
user_3 = accounts[3]
# User signals availability and submits a pubkey for the channel.
await user_1_signer.send_transaction(
account=user_1,
to=channels.contract_address,
selector_name='signal_available',
calldata=[OFFER_DURATION, user_1_signer.public_key])
res = await channels.status_of_player(user_1.contract_address).call()
assert res.result.game_key == user_1_signer.public_key
assert res.result.queue_len == 1
assert res.result.index_in_queue == 0
c = res.result.channel_details
assert c.id == 0 # Empty channel has zero ID.
assert c.addresses == (0, 0)
res = await channels.read_queue_length().call()
assert res.result.length == 1
# Second user signals availability and is matched.
await user_2_signer.send_transaction(
account=user_2,
to=channels.contract_address,
selector_name='signal_available',
calldata=[OFFER_DURATION, user_2_signer.public_key])
res = await channels.read_queue_length().call()
assert res.result.length == 0
res = await channels.status_of_player(user_2.contract_address).call()
assert res.result.game_key == user_2_signer.public_key
assert res.result.queue_len == 0
assert res.result.index_in_queue == 0
c = res.result.channel_details
assert c.id == 1 # First channel has id==1.
assert c.opened_at_block == 1
assert c.last_challenged_at_block == 1
# User 2 opens channel so is recorded at index 0 in the channel.
assert c.addresses[0] == user_2.contract_address
assert c.addresses[1] == user_1.contract_address
assert c.balance == (100, 100)
assert c.initial_channel_data == 987654321
print("Passed: Open a channel.")
return starknet, accounts, signers, channels
@pytest.mark.asyncio
@pytest.mark.parametrize('account_factory', [dict(num_signers=NUM_SIGNING_ACCOUNTS)], indirect=True)
async def test_final_move_submission(test_channel_open):
_, accounts, signers, channels = test_channel_open
user_1_signer = signers[1]
user_2_signer = signers[2]
user_3_signer = signers[3]
user_1 = accounts[1]
user_2 = accounts[2]
user_3 = accounts[3]
# Create and array representing a move
# TODO
# - Look at array_to_move_struct() and create a definitive order for
# how a Move is best represented as an array.
# - Make an array here
# - Test it by calling manual_state_update()
# Sign the array
# E.g.., Signer(move_array)
# Pass it to the other player
# Have them verify the signature/conditions
# Repeat the process N times
# Submit final move.
await user_1_signer.send_transaction(
account=user_1,
to=channels.contract_address,
selector_name='submit_final_move',
calldata=[move, hash, sig_r, sig_s])
# E.g., movement of assets to winner, record events as reportcard.
res = await channels.status_of_player(user_1.contract_address).call()
assert res.result.game_key == 0
assert res.result.queue_len == 0
assert res.result.index_in_queue == 0
c = res.result.channel_details
# Assert c is empty.
# assert balances are changed.
# assert report card administered.
@pytest.mark.asyncio
@pytest.mark.parametrize('account_factory', [dict(num_signers=NUM_SIGNING_ACCOUNTS)], indirect=True)
async def test_close_channel(test_channel_open):
_, accounts, signers, channels = test_channel_open
user_1_signer = signers[1]
user_2_signer = signers[2]
user_3_signer = signers[3]
user_1 = accounts[1]
user_2 = accounts[2]
user_3 = accounts[3]
await user_1_signer.send_transaction(
account=user_1,
to=channels.contract_address,
selector_name='submit_final_move',
calldata=[OFFER_DURATION, user_1_signer.public_key])
# TODO: Implement channel closure logic.
# E.g., movement of assets to winner, record events as reportcard.
res = await channels.status_of_player(user_1.contract_address).call()
assert res.result.game_key == 0
assert res.result.queue_len == 0
assert res.result.index_in_queue == 0
c = res.result.channel_details
# Assert c is empty.
# assert balances are changed.
# assert report card administered.
@pytest.mark.asyncio
@pytest.mark.parametrize('account_factory', [dict(num_signers=NUM_SIGNING_ACCOUNTS)], indirect=True)
async def test_queue_function(game_factory):
_, accounts, signers, channels = game_factory
user_1_signer = signers[1]
user_2_signer = signers[2]
user_3_signer = signers[3]
user_1 = accounts[1]
user_2 = accounts[2]
user_3 = accounts[3]
# User signals availability and submits a pubkey for the channel.
await user_1_signer.send_transaction(
account=user_1,
to=channels.contract_address,
selector_name='signal_available',
calldata=[OFFER_DURATION, user_1_signer.public_key])
res = await channels.read_queue_length().call()
assert res.result.length == 1
assert res.result.player_at_index_0 == user_1.contract_address
# User 1 cannot rejoin queue.
try:
await user_1_signer.send_transaction(
account=user_1,
to=channels.contract_address,
selector_name='signal_available',
calldata=[OFFER_DURATION, user_1_signer.public_key])
except Exception as e:
print(f'\nPassed: Prevent queue re-entry.')
# Second user signals availability and is matched.
await user_2_signer.send_transaction(
account=user_2,
to=channels.contract_address,
selector_name='signal_available',
calldata=[OFFER_DURATION, user_2_signer.public_key])
# User 2 matches, channel should open and queue length reduces.
res = await channels.read_queue_length().call()
assert res.result.length == 0
# User 1 cannot rejoin queue now they are in a channel.
try:
await user_1_signer.send_transaction(
account=user_1,
to=channels.contract_address,
selector_name='signal_available',
calldata=[OFFER_DURATION, user_1_signer.public_key])
except Exception as e:
print(f'\nPassed: Prevent queue entry once in channel.')
# Third user signals availability and is matched.
await user_3_signer.send_transaction(
account=user_3,
to=channels.contract_address,
selector_name='signal_available',
calldata=[OFFER_DURATION, user_3_signer.public_key])
# User 3 enters queue.
res = await channels.read_queue_length().call()
assert res.result.length == 1
res = await channels.status_of_player(user_3.contract_address).call()
assert res.result.game_key == user_3_signer.public_key
assert res.result.queue_len == 1
assert res.result.index_in_queue == 0 | en | 0.907037 | # admin, user, user # How long a channel offer persists ('time-units') # User signals availability and submits a pubkey for the channel. # Empty channel has zero ID. # Second user signals availability and is matched. # First channel has id==1. # User 2 opens channel so is recorded at index 0 in the channel. # Create and array representing a move # TODO # - Look at array_to_move_struct() and create a definitive order for # how a Move is best represented as an array. # - Make an array here # - Test it by calling manual_state_update() # Sign the array # E.g.., Signer(move_array) # Pass it to the other player # Have them verify the signature/conditions # Repeat the process N times # Submit final move. # E.g., movement of assets to winner, record events as reportcard. # Assert c is empty. # assert balances are changed. # assert report card administered. # TODO: Implement channel closure logic. # E.g., movement of assets to winner, record events as reportcard. # Assert c is empty. # assert balances are changed. # assert report card administered. # User signals availability and submits a pubkey for the channel. # User 1 cannot rejoin queue. # Second user signals availability and is matched. # User 2 matches, channel should open and queue length reduces. # User 1 cannot rejoin queue now they are in a channel. # Third user signals availability and is matched. # User 3 enters queue. | 1.975285 | 2 |
archi/views.py | yabirgb/archs-arrows | 1 | 6618524 | from django.shortcuts import render
import json, random, requests, os, inspect
from .models import Recipe, Update, Category
from django.shortcuts import render_to_response, get_object_or_404
from django.http import HttpResponse
# Create your views here.
path = "/path/to/folder" #!!!!!!!!!!!!!!!!!
base = """#!/bin/bash
clear
if [ $(tput colors) ]; then # Checks if terminal supports colors
red="\e[31m"
green="\e[32m"
endcolor="\e[39m"
fi
echo ====================
echo "We are not responsible for any damages that may possibly occur while using Arrow"
echo ====================
echo " "
sleep 2
sudo -s <<ARROW
# Update pacman
echo "Updating pacman (may take a while)"
(
pacman -Syy
) &> /dev/null && echo -e "$green OK $endcolor" || echo -e "$red FAILED $endcolor";
"""
def join(packages, update, tag):
commit = """""" + base
for i in packages:
f = Recipe.objects.get(package_name = i)
js = f.json
if type(js["command"]) == list:
commit += "echo 'Installing {}\n'".format(js["name"])
for z in js["command"]:
commit += "( \n"
commit += z + " --needed --noconfirm" + "\n"
commit *= """
) &> /dev/null && echo -e "$green OK $endcolor" || echo -e "$red FAILED $endcolor";\n"""
else:
commit += "echo 'Installing {}'\n".format(js["name"])
commit += """( \n{}\n) &> /dev/null && echo -e "$green OK $endcolor" || echo -e "$red FAILED $endcolor"; \n""".format(js["command"] + " --noconfirm --needed")
if update:
commit += """echo "Upgrading old packages"\n(\npacman -Syu \n) &> /dev/null && echo -e "$green OK $endcolor" || echo -e "$red FAILED $endcolor";\n"""
commit += """ARROW\nexit 0"""
print(path + "file.sh")
with open(path + tag +".sh", "w") as f:
f.write(commit)
f.close
from dateutil import parser
import random
from django.template import RequestContext
def main(request):
print path
categories = Category.objects.all()
objects = Recipe.objects.all()
results = {}
dic = "abcdefghijklmnopqrstuvwxyz123456890ABCDEFGHIJKLMNOPQRSTUVWXYZ"
to_install = []
if request.method=='POST':
for i in request.POST:
if request.POST[i] == "on":
a,b,c,d,e,f = random.randint(0,60), random.randint(0,60), random.randint(0,60),random.randint(0,60), random.randint(0,60), random.randint(0,60)
tag = dic[a] + dic[b] + dic[c] + dic[d] + dic[e] + dic[f]
to_install.append(i)
results["tag"] = tag
join(to_install, True, tag)
objects = Recipe.objects.all()
results["apps"] = objects
results["categories"] = categories
return render_to_response("arrow/home.html", results, context_instance=RequestContext(request))
#http://stackoverflow.com/questions/2681338/django-serving-a-download-in-a-generic-view
def file_download(request, filename):
#song = Song.objects.get(id=song_id)
try:
fsock = open(path + '%s.sh' % filename, 'r')
response = HttpResponse(fsock, content_type='text')
response['Content-Disposition'] = "attachment; filename= %s.sh" % filename
return response
except:
return HttpResponse("File not found")
| from django.shortcuts import render
import json, random, requests, os, inspect
from .models import Recipe, Update, Category
from django.shortcuts import render_to_response, get_object_or_404
from django.http import HttpResponse
# Create your views here.
path = "/path/to/folder" #!!!!!!!!!!!!!!!!!
base = """#!/bin/bash
clear
if [ $(tput colors) ]; then # Checks if terminal supports colors
red="\e[31m"
green="\e[32m"
endcolor="\e[39m"
fi
echo ====================
echo "We are not responsible for any damages that may possibly occur while using Arrow"
echo ====================
echo " "
sleep 2
sudo -s <<ARROW
# Update pacman
echo "Updating pacman (may take a while)"
(
pacman -Syy
) &> /dev/null && echo -e "$green OK $endcolor" || echo -e "$red FAILED $endcolor";
"""
def join(packages, update, tag):
commit = """""" + base
for i in packages:
f = Recipe.objects.get(package_name = i)
js = f.json
if type(js["command"]) == list:
commit += "echo 'Installing {}\n'".format(js["name"])
for z in js["command"]:
commit += "( \n"
commit += z + " --needed --noconfirm" + "\n"
commit *= """
) &> /dev/null && echo -e "$green OK $endcolor" || echo -e "$red FAILED $endcolor";\n"""
else:
commit += "echo 'Installing {}'\n".format(js["name"])
commit += """( \n{}\n) &> /dev/null && echo -e "$green OK $endcolor" || echo -e "$red FAILED $endcolor"; \n""".format(js["command"] + " --noconfirm --needed")
if update:
commit += """echo "Upgrading old packages"\n(\npacman -Syu \n) &> /dev/null && echo -e "$green OK $endcolor" || echo -e "$red FAILED $endcolor";\n"""
commit += """ARROW\nexit 0"""
print(path + "file.sh")
with open(path + tag +".sh", "w") as f:
f.write(commit)
f.close
from dateutil import parser
import random
from django.template import RequestContext
def main(request):
print path
categories = Category.objects.all()
objects = Recipe.objects.all()
results = {}
dic = "abcdefghijklmnopqrstuvwxyz123456890ABCDEFGHIJKLMNOPQRSTUVWXYZ"
to_install = []
if request.method=='POST':
for i in request.POST:
if request.POST[i] == "on":
a,b,c,d,e,f = random.randint(0,60), random.randint(0,60), random.randint(0,60),random.randint(0,60), random.randint(0,60), random.randint(0,60)
tag = dic[a] + dic[b] + dic[c] + dic[d] + dic[e] + dic[f]
to_install.append(i)
results["tag"] = tag
join(to_install, True, tag)
objects = Recipe.objects.all()
results["apps"] = objects
results["categories"] = categories
return render_to_response("arrow/home.html", results, context_instance=RequestContext(request))
#http://stackoverflow.com/questions/2681338/django-serving-a-download-in-a-generic-view
def file_download(request, filename):
#song = Song.objects.get(id=song_id)
try:
fsock = open(path + '%s.sh' % filename, 'r')
response = HttpResponse(fsock, content_type='text')
response['Content-Disposition'] = "attachment; filename= %s.sh" % filename
return response
except:
return HttpResponse("File not found")
| en | 0.456512 | # Create your views here. #!!!!!!!!!!!!!!!!! #!/bin/bash clear if [ $(tput colors) ]; then # Checks if terminal supports colors red="\e[31m" green="\e[32m" endcolor="\e[39m" fi echo ==================== echo "We are not responsible for any damages that may possibly occur while using Arrow" echo ==================== echo " " sleep 2 sudo -s <<ARROW # Update pacman echo "Updating pacman (may take a while)" ( pacman -Syy ) &> /dev/null && echo -e "$green OK $endcolor" || echo -e "$red FAILED $endcolor"; ) &> /dev/null && echo -e "$green OK $endcolor" || echo -e "$red FAILED $endcolor";\n ( \n{}\n) &> /dev/null && echo -e "$green OK $endcolor" || echo -e "$red FAILED $endcolor"; \n echo "Upgrading old packages"\n(\npacman -Syu \n) &> /dev/null && echo -e "$green OK $endcolor" || echo -e "$red FAILED $endcolor";\n ARROW\nexit 0 #http://stackoverflow.com/questions/2681338/django-serving-a-download-in-a-generic-view #song = Song.objects.get(id=song_id) | 2.296801 | 2 |
padre/tests/test_process_utils.py | krislindgren/padre | 0 | 6618525 | <reponame>krislindgren/padre<filename>padre/tests/test_process_utils.py
from testtools import TestCase
from padre import process_utils as pu
class ProcessUtilsTest(TestCase):
def test_run(self):
r = pu.run(['bash', '-c', 'exit 0'])
r.raise_for_status()
self.assertEqual(r.exit_code, 0)
def test_run_capture(self):
r = pu.run(['bash', '-c', 'echo "hi"'],
stdout=pu.PIPE, stderr=pu.PIPE)
r.raise_for_status()
self.assertNotEqual("", r.stdout)
def test_run_bad(self):
r = pu.run(["bash", "-c", 'exit 1'], stdout=pu.PIPE, stderr=pu.PIPE)
self.assertRaises(pu.ProcessExecutionError, r.raise_for_status)
self.assertEqual(r.exit_code, 1)
| from testtools import TestCase
from padre import process_utils as pu
class ProcessUtilsTest(TestCase):
def test_run(self):
r = pu.run(['bash', '-c', 'exit 0'])
r.raise_for_status()
self.assertEqual(r.exit_code, 0)
def test_run_capture(self):
r = pu.run(['bash', '-c', 'echo "hi"'],
stdout=pu.PIPE, stderr=pu.PIPE)
r.raise_for_status()
self.assertNotEqual("", r.stdout)
def test_run_bad(self):
r = pu.run(["bash", "-c", 'exit 1'], stdout=pu.PIPE, stderr=pu.PIPE)
self.assertRaises(pu.ProcessExecutionError, r.raise_for_status)
self.assertEqual(r.exit_code, 1) | none | 1 | 2.478119 | 2 | |
Python/Dynamic_Programming/fibonacci_best.py | belikesayantan/DSA | 1 | 6618526 | <filename>Python/Dynamic_Programming/fibonacci_best.py
# Calculating Nth Fibonacci Number (Best Method)
# Time -> O(N)
# Space -> O(3)
def fibonacci_best(n: int) -> int:
val1, val2 = 0, 1
for _ in range(n):
val3 = 0
val2 += val1
val3 += val1
val1 = val2
val2 = val3
print(val1)
if __name__ == '__main__':
fibonacci_best(10)
| <filename>Python/Dynamic_Programming/fibonacci_best.py
# Calculating Nth Fibonacci Number (Best Method)
# Time -> O(N)
# Space -> O(3)
def fibonacci_best(n: int) -> int:
val1, val2 = 0, 1
for _ in range(n):
val3 = 0
val2 += val1
val3 += val1
val1 = val2
val2 = val3
print(val1)
if __name__ == '__main__':
fibonacci_best(10)
| en | 0.5606 | # Calculating Nth Fibonacci Number (Best Method) # Time -> O(N) # Space -> O(3) | 4.266778 | 4 |
dumpdata.py | udoewich/jvcprojectortools | 12 | 6618527 | <reponame>udoewich/jvcprojectortools
#!/usr/bin/env python3
"""Dump formatted data with limited number of items per line"""
import itertools
def dumpdata(prefix, formatstr, data, limit=32):
"""Dump formatted data with limited number of items per line"""
i = iter(data)
line = list(itertools.islice(i, limit))
if not line:
print(prefix, 'No data')
while line:
print(prefix, ' '.join(formatstr.format(c) for c in line))
line = list(itertools.islice(i, limit))
prefix = ' ' * len(prefix)
if __name__ == "__main__":
dumpdata('test 1-50:', '{:2d}', range(50), limit=10)
dumpdata('test no data:', '{:2d}', range(0), limit=10)
| #!/usr/bin/env python3
"""Dump formatted data with limited number of items per line"""
import itertools
def dumpdata(prefix, formatstr, data, limit=32):
"""Dump formatted data with limited number of items per line"""
i = iter(data)
line = list(itertools.islice(i, limit))
if not line:
print(prefix, 'No data')
while line:
print(prefix, ' '.join(formatstr.format(c) for c in line))
line = list(itertools.islice(i, limit))
prefix = ' ' * len(prefix)
if __name__ == "__main__":
dumpdata('test 1-50:', '{:2d}', range(50), limit=10)
dumpdata('test no data:', '{:2d}', range(0), limit=10) | en | 0.620966 | #!/usr/bin/env python3 Dump formatted data with limited number of items per line Dump formatted data with limited number of items per line | 3.410976 | 3 |
bin/get_ig.py | elleryq/oh-my-home | 0 | 6618528 | <reponame>elleryq/oh-my-home<filename>bin/get_ig.py<gh_stars>0
#!/usr/bin/env python
import os
import sys
import requests
from urlparse import urlparse
from pyquery import PyQuery
def get_ig(url):
resp = requests.get(url)
# print(req.content)
pq = PyQuery(resp.content)
img = pq('meta[property="og:image"]')
img_url = img.attr("content")
if not img_url:
print("og:image not found.")
return
pr = urlparse(img_url)
filename = os.path.basename(pr.path)
with open(filename, "wb") as fout:
resp = requests.get(img_url, stream=True)
if not resp.ok:
print("Download fail.")
return
for block in resp.iter_content(1024):
fout.write(block)
def main():
for url in sys.argv[1:]:
get_ig(url)
if __name__ == "__main__":
main()
| #!/usr/bin/env python
import os
import sys
import requests
from urlparse import urlparse
from pyquery import PyQuery
def get_ig(url):
resp = requests.get(url)
# print(req.content)
pq = PyQuery(resp.content)
img = pq('meta[property="og:image"]')
img_url = img.attr("content")
if not img_url:
print("og:image not found.")
return
pr = urlparse(img_url)
filename = os.path.basename(pr.path)
with open(filename, "wb") as fout:
resp = requests.get(img_url, stream=True)
if not resp.ok:
print("Download fail.")
return
for block in resp.iter_content(1024):
fout.write(block)
def main():
for url in sys.argv[1:]:
get_ig(url)
if __name__ == "__main__":
main() | en | 0.523474 | #!/usr/bin/env python # print(req.content) | 2.946378 | 3 |
gym_round_bot/envs/test_model.py | robotsthatdream/gym-round_bot | 2 | 6618529 | <filename>gym_round_bot/envs/test_model.py<gh_stars>1-10
#!/usr/bin/python
# -*- coding: utf-8 -*-
""" <NAME>
ISIR - CNRS / Sorbonne Université
02/2018
Small script for testing and understanding the model and windows (no gym env involved here)
"""
import round_bot_model
import round_bot_window
if __name__ == '__main__':
#world_name = 'square_1wall'
world_name = 'square'
world = {'name':world_name,'size':[45,45]}
winsize=[600,600]
model = round_bot_model.Model(world=world,texture='colours',distractors=False)
window = round_bot_window.MainWindow(
model,
#global_pov=(0,20,0),
global_pov=False,
perspective=True,
interactive=True,
width=winsize[0],
height=winsize[1],
caption='Round bot in '+world['name']+' world',
resizable=False,
visible=True,
)
secwindow = round_bot_window.SecondaryWindow(
model,
global_pov=True,#None,
perspective=False,
width=winsize[0],
height=winsize[1],
caption='Observation window '+world['name'],
visible=True
)
window.add_follower(secwindow)
window.start()
| <filename>gym_round_bot/envs/test_model.py<gh_stars>1-10
#!/usr/bin/python
# -*- coding: utf-8 -*-
""" <NAME>
ISIR - CNRS / Sorbonne Université
02/2018
Small script for testing and understanding the model and windows (no gym env involved here)
"""
import round_bot_model
import round_bot_window
if __name__ == '__main__':
#world_name = 'square_1wall'
world_name = 'square'
world = {'name':world_name,'size':[45,45]}
winsize=[600,600]
model = round_bot_model.Model(world=world,texture='colours',distractors=False)
window = round_bot_window.MainWindow(
model,
#global_pov=(0,20,0),
global_pov=False,
perspective=True,
interactive=True,
width=winsize[0],
height=winsize[1],
caption='Round bot in '+world['name']+' world',
resizable=False,
visible=True,
)
secwindow = round_bot_window.SecondaryWindow(
model,
global_pov=True,#None,
perspective=False,
width=winsize[0],
height=winsize[1],
caption='Observation window '+world['name'],
visible=True
)
window.add_follower(secwindow)
window.start()
| en | 0.614704 | #!/usr/bin/python # -*- coding: utf-8 -*- <NAME> ISIR - CNRS / Sorbonne Université 02/2018 Small script for testing and understanding the model and windows (no gym env involved here) #world_name = 'square_1wall' #global_pov=(0,20,0), #None, | 2.292629 | 2 |
openet/ssebop/image.py | spizwhiz/openet-ssebop-beta | 2 | 6618530 | <reponame>spizwhiz/openet-ssebop-beta<gh_stars>1-10
import datetime
import pprint
import ee
from . import utils
import openet.core.common as common
# TODO: import utils from common
# import openet.core.utils as utils
def lazy_property(fn):
"""Decorator that makes a property lazy-evaluated
https://stevenloria.com/lazy-properties/
"""
attr_name = '_lazy_' + fn.__name__
@property
def _lazy_property(self):
if not hasattr(self, attr_name):
setattr(self, attr_name, fn(self))
return getattr(self, attr_name)
return _lazy_property
class Image():
"""Earth Engine based SSEBop Image"""
def __init__(
self, image,
etr_source=None,
etr_band=None,
etr_factor=1.0,
dt_source='DAYMET_MEDIAN_V1',
elev_source='SRTM',
tcorr_source='IMAGE',
tmax_source='TOPOWX_MEDIAN_V0',
elr_flag=False,
tdiff_threshold=15,
dt_min=6,
dt_max=25,
):
"""Construct a generic SSEBop Image
Parameters
----------
image : ee.Image
A "prepped" SSEBop input image.
Image must have bands "ndvi" and "lst".
Image must have 'system:index' and 'system:time_start' properties.
etr_source : str, float, optional
Reference ET source (the default is 'IDAHO_EPSCOR/GRIDMET').
etr_band : str, optional
Reference ET band name (the default is 'etr').
etr_factor : float, optional
Reference ET scaling factor (the default is 1.0).
dt_source : {'DAYMET_MEDIAN_V0', 'DAYMET_MEDIAN_V1', or float}, optional
dT source keyword (the default is 'DAYMET_MEDIAN_V1').
elev_source : {'ASSET', 'GTOPO', 'NED', 'SRTM', or float}, optional
Elevation source keyword (the default is 'SRTM').
tcorr_source : {'FEATURE', 'FEATURE_MONTHLY', 'FEATURE_ANNUAL',
'IMAGE', 'IMAGE_DAILY', 'IMAGE_MONTHLY',
'IMAGE_ANNUAL', 'IMAGE_DEFAULT', or float}, optional
Tcorr source keyword (the default is 'IMAGE').
tmax_source : {'CIMIS', 'DAYMET', 'GRIDMET', 'CIMIS_MEDIAN_V1',
'DAYMET_MEDIAN_V1', 'GRIDMET_MEDIAN_V1',
'TOPOWX_MEDIAN_V0', or float}, optional
Maximum air temperature source (the default is 'TOPOWX_MEDIAN_V0').
elr_flag : bool, str, optional
If True, apply Elevation Lapse Rate (ELR) adjustment
(the default is False).
tdiff_threshold : float, optional
Cloud mask buffer using Tdiff [K] (the default is 15).
Pixels with (Tmax - LST) > Tdiff threshold will be masked.
dt_min : float, optional
Minimum allowable dT [K] (the default is 6).
dt_max : float, optional
Maximum allowable dT [K] (the default is 25).
Notes
-----
Input image must have a Landsat style 'system:index' in order to
lookup Tcorr value from table asset. (i.e. LC08_043033_20150805)
"""
self.image = ee.Image(image)
# Set as "lazy_property" below in order to return custom properties
# self.lst = self.image.select('lst')
# self.ndvi = self.image.select('ndvi')
# Copy system properties
self._id = self.image.get('system:id')
self._index = self.image.get('system:index')
self._time_start = self.image.get('system:time_start')
self._properties = {
'system:index': self._index,
'system:time_start': self._time_start,
'image_id': self._id,
}
# Build SCENE_ID from the (possibly merged) system:index
scene_id = ee.List(ee.String(self._index).split('_')).slice(-3)
self._scene_id = ee.String(scene_id.get(0)).cat('_')\
.cat(ee.String(scene_id.get(1))).cat('_')\
.cat(ee.String(scene_id.get(2)))
# Build WRS2_TILE from the scene_id
self._wrs2_tile = ee.String('p').cat(self._scene_id.slice(5, 8))\
.cat('r').cat(self._scene_id.slice(8, 11))
# Set server side date/time properties using the 'system:time_start'
self._date = ee.Date(self._time_start)
self._year = ee.Number(self._date.get('year'))
self._month = ee.Number(self._date.get('month'))
self._start_date = ee.Date(utils.date_to_time_0utc(self._date))
self._end_date = self._start_date.advance(1, 'day')
self._doy = ee.Number(self._date.getRelative('day', 'year')).add(1).int()
self._cycle_day = self._start_date.difference(
ee.Date.fromYMD(1970, 1, 3), 'day').mod(8).add(1).int()
#
self.etr_source = etr_source
self.etr_band = etr_band
self.etr_factor = etr_factor
# Model input parameters
self._dt_source = dt_source
self._elev_source = elev_source
self._tcorr_source = tcorr_source
self._tmax_source = tmax_source
self._elr_flag = elr_flag
self._tdiff_threshold = float(tdiff_threshold)
self._dt_min = float(dt_min)
self._dt_max = float(dt_max)
# Convert elr_flag from string to bool if necessary
if type(self._elr_flag) is str:
if self._elr_flag.upper() in ['TRUE']:
self._elr_flag = True
elif self._elr_flag.upper() in ['FALSE']:
self._elr_flag = False
else:
raise ValueError('elr_flag "{}" could not be interpreted as '
'bool'.format(self._elr_flag))
def calculate(self, variables=['et', 'etr', 'etf']):
"""Return a multiband image of calculated variables
Parameters
----------
variables : list
Returns
-------
ee.Image
"""
output_images = []
for v in variables:
if v.lower() == 'et':
output_images.append(self.et)
elif v.lower() == 'etf':
output_images.append(self.etf)
elif v.lower() == 'etr':
output_images.append(self.etr)
elif v.lower() == 'lst':
output_images.append(self.lst)
elif v.lower() == 'mask':
output_images.append(self.mask)
elif v.lower() == 'ndvi':
output_images.append(self.ndvi)
# elif v.lower() == 'qa':
# output_images.append(self.qa)
elif v.lower() == 'quality':
output_images.append(self.quality)
elif v.lower() == 'time':
output_images.append(self.time)
else:
raise ValueError('unsupported variable: {}'.format(v))
return ee.Image(output_images).set(self._properties)
@lazy_property
def lst(self):
"""Return land surface temperature (LST) image"""
return self.image.select(['lst']).set(self._properties).double()
@lazy_property
def ndvi(self):
"""Return NDVI image"""
return self.image.select(['ndvi']).set(self._properties).double()
@lazy_property
def etf(self):
"""Compute SSEBop ETf for a single image
Returns
-------
ee.Image
Notes
-----
Apply Tdiff cloud mask buffer (mask values of 0 are set to nodata)
"""
# Get input images and ancillary data needed to compute SSEBop ETf
lst = ee.Image(self.lst)
tcorr, tcorr_index = self._tcorr
tmax = ee.Image(self._tmax)
dt = ee.Image(self._dt)
# Adjust air temperature based on elevation (Elevation Lapse Rate)
if self._elr_flag:
tmax = ee.Image(self._lapse_adjust(tmax, ee.Image(self._elev)))
# Compute SSEBop ETf
etf = lst.expression(
'(lst * (-1) + tmax * tcorr + dt) / dt',
{'tmax': tmax, 'dt': dt, 'lst': lst, 'tcorr': tcorr})
etf = etf.updateMask(etf.lt(1.3))\
.clamp(0, 1.05)\
.updateMask(tmax.subtract(lst).lte(self._tdiff_threshold))\
.set(self._properties).rename(['etf']).double()
# Don't set TCORR and INDEX properties for IMAGE Tcorr sources
if (type(self._tcorr_source) is str and
'IMAGE' not in self._tcorr_source.upper()):
etf = etf.set({'tcorr': tcorr, 'tcorr_index': tcorr_index})
return etf
@lazy_property
def etr(self):
"""Compute reference ET for the image date"""
if utils.is_number(self.etr_source):
# Interpret numbers as constant images
# CGM - Should we use the ee_types here instead?
# i.e. ee.ee_types.isNumber(self.etr_source)
etr_img = ee.Image.constant(self.etr_source)
elif type(self.etr_source) is str:
# Assume a string source is an image collection ID (not an image ID)
etr_img = ee.Image(
ee.ImageCollection(self.etr_source)\
.filterDate(self._start_date, self._end_date)\
.select([self.etr_band])\
.first())
# elif type(self.etr_source) is list:
# # Interpret as list of image collection IDs to composite/mosaic
# # i.e. Spatial CIMIS and GRIDMET
# # CGM - Need to check the order of the collections
# etr_coll = ee.ImageCollection([])
# for coll_id in self.etr_source:
# coll = ee.ImageCollection(coll_id)\
# .select([self.etr_band])\
# .filterDate(self.start_date, self.end_date)
# etr_img = etr_coll.merge(coll)
# etr_img = etr_coll.mosaic()
# elif isinstance(self.etr_source, computedobject.ComputedObject):
# # Interpret computed objects as image collections
# etr_coll = ee.ImageCollection(self.etr_source)\
# .select([self.etr_band])\
# .filterDate(self.start_date, self.end_date)
else:
raise ValueError('unsupported etr_source: {}'.format(
self.etr_source))
# Map ETr values directly to the input (i.e. Landsat) image pixels
# The benefit of this is the ETr image is now in the same crs as the
# input image. Not all models may want this though.
# CGM - Should the output band name match the input ETr band name?
return self.ndvi.multiply(0).add(etr_img)\
.multiply(self.etr_factor)\
.rename(['etr']).set(self._properties)
@lazy_property
def et(self):
"""Compute actual ET as fraction of reference times reference"""
return self.etf.multiply(self.etr)\
.rename(['et']).set(self._properties).double()
@lazy_property
def mask(self):
"""Mask of all active pixels (based on the final etf)"""
return self.etf.multiply(0).add(1).updateMask(1)\
.rename(['mask']).set(self._properties).uint8()
@lazy_property
def quality(self):
"""Set quality to 1 for all active pixels (for now)"""
tcorr, tcorr_index = self._tcorr
return self.mask\
.rename(['quality']).set(self._properties)
@lazy_property
def time(self):
"""Return an image of the 0 UTC time (in milliseconds)"""
return self.mask\
.double().multiply(0).add(utils.date_to_time_0utc(self._date))\
.rename(['time']).set(self._properties)
# return ee.Image.constant(utils.date_to_time_0utc(self._date))\
# .double().rename(['time']).set(self._properties)
@lazy_property
def _dt(self):
"""
Returns
-------
ee.Image
Raises
------
ValueError
If `self._dt_source` is not supported.
"""
if utils.is_number(self._dt_source):
dt_img = ee.Image.constant(float(self._dt_source))
elif self._dt_source.upper() == 'DAYMET_MEDIAN_V0':
dt_coll = ee.ImageCollection('projects/usgs-ssebop/dt/daymet_median_v0')\
.filter(ee.Filter.calendarRange(self._doy, self._doy, 'day_of_year'))
dt_img = ee.Image(dt_coll.first())
elif self._dt_source.upper() == 'DAYMET_MEDIAN_V1':
dt_coll = ee.ImageCollection('projects/usgs-ssebop/dt/daymet_median_v1')\
.filter(ee.Filter.calendarRange(self._doy, self._doy, 'day_of_year'))
dt_img = ee.Image(dt_coll.first())
else:
raise ValueError('Invalid dt_source: {}\n'.format(self._dt_source))
return dt_img.clamp(self._dt_min, self._dt_max).rename('dt')
@lazy_property
def _elev(self):
"""
Returns
-------
ee.Image
Raises
------
ValueError
If `self._elev_source` is not supported.
"""
if utils.is_number(self._elev_source):
elev_image = ee.Image.constant(float(self._elev_source))
elif self._elev_source.upper() == 'ASSET':
elev_image = ee.Image('projects/usgs-ssebop/srtm_1km')
elif self._elev_source.upper() == 'GTOPO':
elev_image = ee.Image('USGS/GTOPO30')
elif self._elev_source.upper() == 'NED':
elev_image = ee.Image('USGS/NED')
elif self._elev_source.upper() == 'SRTM':
elev_image = ee.Image('USGS/SRTMGL1_003')
elif (self._elev_source.lower().startswith('projects/') or
self._elev_source.lower().startswith('users/')):
elev_image = ee.Image(self._elev_source)
else:
raise ValueError('Unsupported elev_source: {}\n'.format(
self._elev_source))
return elev_image.select([0], ['elev'])
@lazy_property
def _tcorr(self):
"""Get Tcorr from pre-computed assets for each Tmax source
Returns
-------
Raises
------
ValueError
If `self._tcorr_source` is not supported.
Notes
-----
Tcorr Index values indicate which level of Tcorr was used
0 - Scene specific Tcorr
1 - Mean monthly Tcorr per WRS2 tile
2 - Mean annual Tcorr per WRS2 tile
Annuals don't exist for feature Tcorr assets (yet)
3 - Default Tcorr
4 - User defined Tcorr
"""
# month_field = ee.String('M').cat(ee.Number(self.month).format('%02d'))
if utils.is_number(self._tcorr_source):
tcorr = ee.Number(float(self._tcorr_source))
tcorr_index = ee.Number(4)
return tcorr, tcorr_index
# DEADBEEF - Leaving 'SCENE' checking to be backwards compatible (for now)
elif ('FEATURE' in self._tcorr_source.upper() or
self._tcorr_source.upper() == 'SCENE'):
# Lookup Tcorr collections by keyword value
scene_coll_dict = {
'CIMIS': 'projects/usgs-ssebop/tcorr/cimis_scene',
'DAYMET': 'projects/usgs-ssebop/tcorr/daymet_scene',
'GRIDMET': 'projects/usgs-ssebop/tcorr/gridmet_scene',
# 'TOPOWX': 'projects/usgs-ssebop/tcorr/topowx_scene',
'CIMIS_MEDIAN_V1': 'projects/usgs-ssebop/tcorr/cimis_median_v1_scene',
'DAYMET_MEDIAN_V0': 'projects/usgs-ssebop/tcorr/daymet_median_v0_scene',
'DAYMET_MEDIAN_V1': 'projects/usgs-ssebop/tcorr/daymet_median_v1_scene',
'GRIDMET_MEDIAN_V1': 'projects/usgs-ssebop/tcorr/gridmet_median_v1_scene',
'TOPOWX_MEDIAN_V0': 'projects/usgs-ssebop/tcorr/topowx_median_v0_scene',
'TOPOWX_MEDIAN_V0B': 'projects/usgs-ssebop/tcorr/topowx_median_v0b_scene',
}
month_coll_dict = {
'CIMIS': 'projects/usgs-ssebop/tcorr/cimis_monthly',
'DAYMET': 'projects/usgs-ssebop/tcorr/daymet_monthly',
'GRIDMET': 'projects/usgs-ssebop/tcorr/gridmet_monthly',
# 'TOPOWX': 'projects/usgs-ssebop/tcorr/topowx_monthly',
'CIMIS_MEDIAN_V1': 'projects/usgs-ssebop/tcorr/cimis_median_v1_monthly',
'DAYMET_MEDIAN_V0': 'projects/usgs-ssebop/tcorr/daymet_median_v0_monthly',
'DAYMET_MEDIAN_V1': 'projects/usgs-ssebop/tcorr/daymet_median_v1_monthly',
'GRIDMET_MEDIAN_V1': 'projects/usgs-ssebop/tcorr/gridmet_median_v1_monthly',
'TOPOWX_MEDIAN_V0': 'projects/usgs-ssebop/tcorr/topowx_median_v0_monthly',
'TOPOWX_MEDIAN_V0B': 'projects/usgs-ssebop/tcorr/topowx_median_v0b_monthly',
}
# annual_coll_dict = {}
default_value_dict = {
'CIMIS': 0.978,
'DAYMET': 0.978,
'GRIDMET': 0.978,
'TOPOWX': 0.978,
'CIMIS_MEDIAN_V1': 0.978,
'DAYMET_MEDIAN_V0': 0.978,
'DAYMET_MEDIAN_V1': 0.978,
'GRIDMET_MEDIAN_V1': 0.978,
'TOPOWX_MEDIAN_V0': 0.978,
'TOPOWX_MEDIAN_V0B': 0.978,
}
# Check Tmax source value
tmax_key = self._tmax_source.upper()
if tmax_key not in default_value_dict.keys():
raise ValueError(
'\nInvalid tmax_source for tcorr: {} / {}\n'.format(
self._tcorr_source, self._tmax_source))
default_coll = ee.FeatureCollection([
ee.Feature(None, {'INDEX': 3, 'TCORR': default_value_dict[tmax_key]})])
month_coll = ee.FeatureCollection(month_coll_dict[tmax_key])\
.filterMetadata('WRS2_TILE', 'equals', self._wrs2_tile)\
.filterMetadata('MONTH', 'equals', self._month)
if self._tcorr_source.upper() in ['FEATURE', 'SCENE']:
scene_coll = ee.FeatureCollection(scene_coll_dict[tmax_key])\
.filterMetadata('SCENE_ID', 'equals', self._scene_id)
tcorr_coll = ee.FeatureCollection(
default_coll.merge(month_coll).merge(scene_coll)).sort('INDEX')
elif 'MONTH' in self._tcorr_source.upper():
tcorr_coll = ee.FeatureCollection(
default_coll.merge(month_coll)).sort('INDEX')
else:
raise ValueError(
'Invalid tcorr_source: {} / {}\n'.format(
self._tcorr_source, self._tmax_source))
tcorr_ftr = ee.Feature(tcorr_coll.first())
tcorr = ee.Number(tcorr_ftr.get('TCORR'))
tcorr_index = ee.Number(tcorr_ftr.get('INDEX'))
return tcorr, tcorr_index
elif 'IMAGE' in self._tcorr_source.upper():
# Lookup Tcorr collections by keyword value
daily_dict = {
'TOPOWX_MEDIAN_V0': 'projects/usgs-ssebop/tcorr_image/topowx_median_v0_daily'
}
month_dict = {
'TOPOWX_MEDIAN_V0': 'projects/usgs-ssebop/tcorr_image/topowx_median_v0_monthly',
}
annual_dict = {
'TOPOWX_MEDIAN_V0': 'projects/usgs-ssebop/tcorr_image/topowx_median_v0_annual',
}
default_dict = {
'TOPOWX_MEDIAN_V0': 'projects/usgs-ssebop/tcorr_image/topowx_median_v0_default'
}
# Check Tmax source value
tmax_key = self._tmax_source.upper()
if tmax_key not in default_dict.keys():
raise ValueError(
'\nInvalid tmax_source: {} / {}\n'.format(
self._tcorr_source, self._tmax_source))
default_img = ee.Image(default_dict[tmax_key])
mask_img = default_img.updateMask(0)
if (self._tcorr_source.upper() == 'IMAGE' or
'DAILY' in self._tcorr_source.upper()):
daily_coll = ee.ImageCollection(daily_dict[tmax_key])\
.filterDate(self._start_date, self._end_date)\
.select(['tcorr'])
daily_coll = daily_coll.merge(ee.ImageCollection(mask_img))
daily_img = ee.Image(daily_coll.mosaic())
# .filterMetadata('DATE', 'equals', self._date)
if (self._tcorr_source.upper() == 'IMAGE' or
'MONTH' in self._tcorr_source.upper()):
month_coll = ee.ImageCollection(month_dict[tmax_key])\
.filterMetadata('CYCLE_DAY', 'equals', self._cycle_day)\
.filterMetadata('MONTH', 'equals', self._month)\
.select(['tcorr'])
month_coll = month_coll.merge(ee.ImageCollection(mask_img))
month_img = ee.Image(month_coll.mosaic())
if (self._tcorr_source.upper() == 'IMAGE' or
'ANNUAL' in self._tcorr_source.upper()):
annual_coll = ee.ImageCollection(annual_dict[tmax_key])\
.filterMetadata('CYCLE_DAY', 'equals', self._cycle_day)\
.select(['tcorr'])
annual_coll = annual_coll.merge(ee.ImageCollection(mask_img))
annual_img = ee.Image(annual_coll.mosaic())
if self._tcorr_source.upper() == 'IMAGE':
# Composite Tcorr images to ensure that a value is returned
# (even if the daily image doesn't exist)
composite_coll = ee.ImageCollection([
default_img.addBands(default_img.multiply(0).add(3).uint8()),
annual_img.addBands(annual_img.multiply(0).add(2).uint8()),
month_img.addBands(month_img.multiply(0).add(1).uint8()),
daily_img.addBands(daily_img.multiply(0).uint8())])
composite_img = composite_coll.mosaic()
tcorr_img = composite_img.select([0], ['tcorr'])
index_img = composite_img.select([1], ['index'])
elif 'DAILY' in self._tcorr_source.upper():
tcorr_img = daily_img
index_img = daily_img.multiply(0).uint8()
elif 'MONTH' in self._tcorr_source.upper():
tcorr_img = month_img
index_img = month_img.multiply(0).add(1).uint8()
elif 'ANNUAL' in self._tcorr_source.upper():
tcorr_img = annual_img
index_img = annual_img.multiply(0).add(2).uint8()
elif 'DEFAULT' in self._tcorr_source.upper():
tcorr_img = default_img
index_img = default_img.multiply(0).add(3).uint8()
else:
raise ValueError(
'Invalid tcorr_source: {} / {}\n'.format(
self._tcorr_source, self._tmax_source))
return tcorr_img, index_img.rename(['index'])
else:
raise ValueError('Unsupported tcorr_source: {}\n'.format(
self._tcorr_source))
@lazy_property
def _tmax(self):
"""Fall back on median Tmax if daily image does not exist
Returns
-------
ee.Image
Raises
------
ValueError
If `self._tmax_source` is not supported.
"""
doy_filter = ee.Filter.calendarRange(self._doy, self._doy, 'day_of_year')
date_today = datetime.datetime.today().strftime('%Y-%m-%d')
if utils.is_number(self._tmax_source):
tmax_image = ee.Image.constant(float(self._tmax_source))\
.rename(['tmax'])\
.set('TMAX_VERSION', 'CUSTOM_{}'.format(self._tmax_source))
elif self._tmax_source.upper() == 'CIMIS':
daily_coll = ee.ImageCollection('projects/climate-engine/cimis/daily')\
.filterDate(self._start_date, self._end_date)\
.select(['Tx'], ['tmax']).map(utils.c_to_k)
daily_image = ee.Image(daily_coll.first())\
.set('TMAX_VERSION', date_today)
median_version = 'median_v1'
median_coll = ee.ImageCollection(
'projects/usgs-ssebop/tmax/cimis_{}'.format(median_version))
median_image = ee.Image(median_coll.filter(doy_filter).first())\
.set('TMAX_VERSION', median_version)
tmax_image = ee.Image(ee.Algorithms.If(
daily_coll.size().gt(0), daily_image, median_image))
elif self._tmax_source.upper() == 'DAYMET':
# DAYMET does not include Dec 31st on leap years
# Adding one extra date to end date to avoid errors
daily_coll = ee.ImageCollection('NASA/ORNL/DAYMET_V3')\
.filterDate(self._start_date, self._end_date.advance(1, 'day'))\
.select(['tmax']).map(utils.c_to_k)
daily_image = ee.Image(daily_coll.first())\
.set('TMAX_VERSION', date_today)
median_version = 'median_v0'
median_coll = ee.ImageCollection(
'projects/usgs-ssebop/tmax/daymet_{}'.format(median_version))
median_image = ee.Image(median_coll.filter(doy_filter).first())\
.set('TMAX_VERSION', median_version)
tmax_image = ee.Image(ee.Algorithms.If(
daily_coll.size().gt(0), daily_image, median_image))
elif self._tmax_source.upper() == 'GRIDMET':
daily_coll = ee.ImageCollection('IDAHO_EPSCOR/GRIDMET')\
.filterDate(self._start_date, self._end_date)\
.select(['tmmx'], ['tmax'])
daily_image = ee.Image(daily_coll.first())\
.set('TMAX_VERSION', date_today)
median_version = 'median_v1'
median_coll = ee.ImageCollection(
'projects/usgs-ssebop/tmax/gridmet_{}'.format(median_version))
median_image = ee.Image(median_coll.filter(doy_filter).first())\
.set('TMAX_VERSION', median_version)
tmax_image = ee.Image(ee.Algorithms.If(
daily_coll.size().gt(0), daily_image, median_image))
# elif self.tmax_source.upper() == 'TOPOWX':
# daily_coll = ee.ImageCollection('X')\
# .filterDate(self.start_date, self.end_date)\
# .select(['tmmx'], ['tmax'])
# daily_image = ee.Image(daily_coll.first())\
# .set('TMAX_VERSION', date_today)
#
# median_version = 'median_v1'
# median_coll = ee.ImageCollection(
# 'projects/usgs-ssebop/tmax/topowx_{}'.format(median_version))
# median_image = ee.Image(median_coll.filter(doy_filter).first())\
# .set('TMAX_VERSION', median_version)
#
# tmax_image = ee.Image(ee.Algorithms.If(
# daily_coll.size().gt(0), daily_image, median_image))
elif self._tmax_source.upper() == 'CIMIS_MEDIAN_V1':
median_version = 'median_v1'
median_coll = ee.ImageCollection(
'projects/usgs-ssebop/tmax/cimis_{}'.format(median_version))
tmax_image = ee.Image(median_coll.filter(doy_filter).first())\
.set('TMAX_VERSION', median_version)
elif self._tmax_source.upper() == 'DAYMET_MEDIAN_V0':
median_version = 'median_v0'
median_coll = ee.ImageCollection(
'projects/usgs-ssebop/tmax/daymet_{}'.format(median_version))
tmax_image = ee.Image(median_coll.filter(doy_filter).first())\
.set('TMAX_VERSION', median_version)
elif self._tmax_source.upper() == 'DAYMET_MEDIAN_V1':
median_version = 'median_v1'
median_coll = ee.ImageCollection(
'projects/usgs-ssebop/tmax/daymet_{}'.format(median_version))
tmax_image = ee.Image(median_coll.filter(doy_filter).first())\
.set('TMAX_VERSION', median_version)
elif self._tmax_source.upper() == 'GRIDMET_MEDIAN_V1':
median_version = 'median_v1'
median_coll = ee.ImageCollection(
'projects/usgs-ssebop/tmax/gridmet_{}'.format(median_version))
tmax_image = ee.Image(median_coll.filter(doy_filter).first())\
.set('TMAX_VERSION', median_version)
elif self._tmax_source.upper() == 'TOPOWX_MEDIAN_V0':
median_version = 'median_v0'
median_coll = ee.ImageCollection(
'projects/usgs-ssebop/tmax/topowx_{}'.format(median_version))
tmax_image = ee.Image(median_coll.filter(doy_filter).first())\
.set('TMAX_VERSION', median_version)
# elif self.tmax_source.upper() == 'TOPOWX_MEDIAN_V1':
# median_version = 'median_v1'
# median_coll = ee.ImageCollection(
# 'projects/usgs-ssebop/tmax/topowx_{}'.format(median_version))
# tmax_image = ee.Image(median_coll.filter(doy_filter).first())
else:
raise ValueError('Unsupported tmax_source: {}\n'.format(
self._tmax_source))
return ee.Image(tmax_image.set('TMAX_SOURCE', self._tmax_source))
@classmethod
def from_image_id(cls, image_id, **kwargs):
"""Constructs an SSEBop Image instance from an image ID
Parameters
----------
image_id : str
An earth engine image ID.
(i.e. 'LANDSAT/LC08/C01/T1_SR/LC08_044033_20170716')
kwargs
Keyword arguments to pass through to model init.
Returns
-------
new instance of Image class
"""
# DEADBEEF - Should the supported image collection IDs and helper
# function mappings be set in a property or method of the Image class?
collection_methods = {
'LANDSAT/LC08/C01/T1_RT_TOA': 'from_landsat_c1_toa',
'LANDSAT/LE07/C01/T1_RT_TOA': 'from_landsat_c1_toa',
'LANDSAT/LC08/C01/T1_TOA': 'from_landsat_c1_toa',
'LANDSAT/LE07/C01/T1_TOA': 'from_landsat_c1_toa',
'LANDSAT/LT05/C01/T1_TOA': 'from_landsat_c1_toa',
# 'LANDSAT/LT04/C01/T1_TOA': 'from_landsat_c1_toa',
'LANDSAT/LC08/C01/T1_SR': 'from_landsat_c1_sr',
'LANDSAT/LE07/C01/T1_SR': 'from_landsat_c1_sr',
'LANDSAT/LT05/C01/T1_SR': 'from_landsat_c1_sr',
# 'LANDSAT/LT04/C01/T1_SR': 'from_landsat_c1_sr',
}
try:
method_name = collection_methods[image_id.rsplit('/', 1)[0]]
except KeyError:
raise ValueError('unsupported collection ID: {}'.format(image_id))
except Exception as e:
raise Exception('unhandled exception: {}'.format(e))
method = getattr(Image, method_name)
return method(ee.Image(image_id), **kwargs)
@classmethod
def from_landsat_c1_toa(cls, toa_image, cloudmask_args={}, **kwargs):
"""Returns a SSEBop Image instance from a Landsat Collection 1 TOA image
Parameters
----------
toa_image : ee.Image
A raw Landsat Collection 1 TOA image.
cloudmask_args : dict
keyword arguments to pass through to cloud mask function
kwargs : dict
Keyword arguments to pass through to Image init function
Returns
-------
Image
"""
toa_image = ee.Image(toa_image)
# Use the SPACECRAFT_ID property identify each Landsat type
spacecraft_id = ee.String(toa_image.get('SPACECRAFT_ID'))
# Rename bands to generic names
# Rename thermal band "k" coefficients to generic names
input_bands = ee.Dictionary({
# 'LANDSAT_4': ['B1', 'B2', 'B3', 'B4', 'B5', 'B7', 'B6', 'BQA'],
'LANDSAT_5': ['B1', 'B2', 'B3', 'B4', 'B5', 'B7', 'B6', 'BQA'],
'LANDSAT_7': ['B1', 'B2', 'B3', 'B4', 'B5', 'B7', 'B6_VCID_1',
'BQA'],
'LANDSAT_8': ['B2', 'B3', 'B4', 'B5', 'B6', 'B7', 'B10', 'BQA']})
output_bands = ['blue', 'green', 'red', 'nir', 'swir1', 'swir2', 'lst',
'BQA']
k1 = ee.Dictionary({
# 'LANDSAT_4': 'K1_CONSTANT_BAND_6',
'LANDSAT_5': 'K1_CONSTANT_BAND_6',
'LANDSAT_7': 'K1_CONSTANT_BAND_6_VCID_1',
'LANDSAT_8': 'K1_CONSTANT_BAND_10'})
k2 = ee.Dictionary({
# 'LANDSAT_4': 'K2_CONSTANT_BAND_6',
'LANDSAT_5': 'K2_CONSTANT_BAND_6',
'LANDSAT_7': 'K2_CONSTANT_BAND_6_VCID_1',
'LANDSAT_8': 'K2_CONSTANT_BAND_10'})
prep_image = toa_image\
.select(input_bands.get(spacecraft_id), output_bands)\
.set('k1_constant', ee.Number(toa_image.get(k1.get(spacecraft_id))))\
.set('k2_constant', ee.Number(toa_image.get(k2.get(spacecraft_id))))
# Build the input image
input_image = ee.Image([cls._lst(prep_image), cls._ndvi(prep_image)])
# Apply the cloud mask and add properties
input_image = input_image\
.updateMask(common.landsat_c1_toa_cloud_mask(
toa_image, **cloudmask_args))\
.set({
'system:index': toa_image.get('system:index'),
'system:time_start': toa_image.get('system:time_start'),
'system:id': toa_image.get('system:id'),
})
# Instantiate the class
return cls(ee.Image(input_image), **kwargs)
@classmethod
def from_landsat_c1_sr(cls, sr_image, **kwargs):
"""Returns a SSEBop Image instance from a Landsat Collection 1 SR image
Parameters
----------
sr_image : ee.Image
A raw Landsat Collection 1 SR image.
Returns
-------
Image
"""
sr_image = ee.Image(sr_image)
# Use the SATELLITE property identify each Landsat type
spacecraft_id = ee.String(sr_image.get('SATELLITE'))
# Rename bands to generic names
# Rename thermal band "k" coefficients to generic names
input_bands = ee.Dictionary({
'LANDSAT_5': ['B1', 'B2', 'B3', 'B4', 'B5', 'B7', 'B6', 'pixel_qa'],
'LANDSAT_7': ['B1', 'B2', 'B3', 'B4', 'B5', 'B7', 'B6', 'pixel_qa'],
'LANDSAT_8': ['B2', 'B3', 'B4', 'B5', 'B6', 'B7', 'B10',
'pixel_qa']})
output_bands = ['blue', 'green', 'red', 'nir', 'swir1', 'swir2', 'lst',
'pixel_qa']
# TODO: Follow up with Simon about adding K1/K2 to SR collection
# Hardcode values for now
k1 = ee.Dictionary({
# 'LANDSAT_4': 607.76,
'LANDSAT_5': 607.76, 'LANDSAT_7': 666.09, 'LANDSAT_8': 774.8853})
k2 = ee.Dictionary({
# 'LANDSAT_4': 1260.56,
'LANDSAT_5': 1260.56, 'LANDSAT_7': 1282.71, 'LANDSAT_8': 1321.0789})
prep_image = sr_image\
.select(input_bands.get(spacecraft_id), output_bands)\
.set('k1_constant', ee.Number(k1.get(spacecraft_id)))\
.set('k2_constant', ee.Number(k2.get(spacecraft_id)))
# k1 = ee.Dictionary({
# # 'LANDSAT_4': 'K1_CONSTANT_BAND_6',
# 'LANDSAT_5': 'K1_CONSTANT_BAND_6',
# 'LANDSAT_7': 'K1_CONSTANT_BAND_6_VCID_1',
# 'LANDSAT_8': 'K1_CONSTANT_BAND_10'})
# k2 = ee.Dictionary({
# # 'LANDSAT_4': 'K2_CONSTANT_BAND_6',
# 'LANDSAT_5': 'K2_CONSTANT_BAND_6',
# 'LANDSAT_7': 'K2_CONSTANT_BAND_6_VCID_1',
# 'LANDSAT_8': 'K2_CONSTANT_BAND_10'})
# prep_image = sr_image\
# .select(input_bands.get(spacecraft_id), output_bands)\
# .set('k1_constant', ee.Number(sr_image.get(k1.get(spacecraft_id))))\
# .set('k2_constant', ee.Number(sr_image.get(k2.get(spacecraft_id))))
# Build the input image
input_image = ee.Image([cls._lst(prep_image), cls._ndvi(prep_image)])
# Apply the cloud mask and add properties
input_image = input_image\
.updateMask(common.landsat_c1_sr_cloud_mask(sr_image))\
.set({
'system:index': sr_image.get('system:index'),
'system:time_start': sr_image.get('system:time_start'),
'system:id': sr_image.get('system:id'),
})
# Instantiate the class
return cls(input_image, **kwargs)
@staticmethod
def _ndvi(toa_image):
"""Compute NDVI
Parameters
----------
toa_image : ee.Image
Renamed TOA image with 'nir' and 'red bands.
Returns
-------
ee.Image
"""
return ee.Image(toa_image).normalizedDifference(['nir', 'red'])\
.rename(['ndvi'])
@staticmethod
def _lst(toa_image):
"""Compute emissivity corrected land surface temperature (LST)
from brightness temperature.
Parameters
----------
toa_image : ee.Image
Renamed TOA image with 'red', 'nir', and 'lst' bands.
Image must also have 'k1_constant' and 'k2_constant' properties.
Returns
-------
ee.Image
Notes
-----
The corrected radiation coefficients were derived from a small number
of scenes in southern Idaho [Allen2007] and may not be appropriate for
other areas.
References
----------
.. [Allen2007] <NAME>, <NAME>, <NAME> (2007),
Satellite-Based Energy Balance for Mapping Evapotranspiration with
Internalized Calibration (METRIC) Model,
Journal of Irrigation and Drainage Engineering, Vol 133(4),
http://dx.doi.org/10.1061/(ASCE)0733-9437(2007)133:4(380)
"""
# Get properties from image
k1 = ee.Number(ee.Image(toa_image).get('k1_constant'))
k2 = ee.Number(ee.Image(toa_image).get('k2_constant'))
ts_brightness = ee.Image(toa_image).select(['lst'])
emissivity = Image._emissivity(toa_image)
# First back out radiance from brightness temperature
# Then recalculate emissivity corrected Ts
thermal_rad_toa = ts_brightness.expression(
'k1 / (exp(k2 / ts_brightness) - 1)',
{'ts_brightness': ts_brightness, 'k1': k1, 'k2': k2})
# tnb = 0.866 # narrow band transmissivity of air
# rp = 0.91 # path radiance
# rsky = 1.32 # narrow band clear sky downward thermal radiation
rc = thermal_rad_toa.expression(
'((thermal_rad_toa - rp) / tnb) - ((1. - emiss) * rsky)',
{
'thermal_rad_toa': thermal_rad_toa,
'emiss': emissivity,
'rp': 0.91, 'tnb': 0.866, 'rsky': 1.32})
lst = rc.expression(
'k2 / log(emiss * k1 / rc + 1)',
{'emiss': emissivity, 'rc': rc, 'k1': k1, 'k2': k2})
return lst.rename(['lst'])
@staticmethod
def _emissivity(toa_image):
"""Compute emissivity as a function of NDVI
Parameters
----------
toa_image : ee.Image
Returns
-------
ee.Image
"""
ndvi = Image._ndvi(toa_image)
Pv = ndvi.expression(
'((ndvi - 0.2) / 0.3) ** 2', {'ndvi': ndvi})
# ndviRangevalue = ndvi_image.where(
# ndvi_image.gte(0.2).And(ndvi_image.lte(0.5)), ndvi_image)
# Pv = ndviRangevalue.expression(
# '(((ndviRangevalue - 0.2)/0.3)**2',{'ndviRangevalue':ndviRangevalue})
# Assuming typical Soil Emissivity of 0.97 and Veg Emissivity of 0.99
# and shape Factor mean value of 0.553
dE = Pv.expression(
'(((1 - 0.97) * (1 - Pv)) * (0.55 * 0.99))', {'Pv': Pv})
RangeEmiss = dE.expression(
'((0.99 * Pv) + (0.97 * (1 - Pv)) + dE)', {'Pv': Pv, 'dE': dE})
# RangeEmiss = 0.989 # dE.expression(
# '((0.99*Pv)+(0.97 *(1-Pv))+dE)',{'Pv':Pv, 'dE':dE})
emissivity = ndvi\
.where(ndvi.lt(0), 0.985)\
.where(ndvi.gte(0).And(ndvi.lt(0.2)), 0.977)\
.where(ndvi.gt(0.5), 0.99)\
.where(ndvi.gte(0.2).And(ndvi.lte(0.5)), RangeEmiss)
emissivity = emissivity.clamp(0.977, 0.99)
return emissivity.select([0], ['emissivity'])
@staticmethod
def _lapse_adjust(temperature, elev, lapse_threshold=1500):
"""Compute Elevation Lapse Rate (ELR) adjusted temperature
Parameters
----------
temperature : ee.Image
Temperature [K].
elev : ee.Image
Elevation [m].
lapse_threshold : float
Minimum elevation to adjust temperature [m] (the default is 1500).
Returns
-------
ee.Image of adjusted temperature
"""
elr_adjust = ee.Image(temperature).expression(
'(temperature - (0.003 * (elev - threshold)))',
{
'temperature': temperature, 'elev': elev,
'threshold': lapse_threshold
})
return ee.Image(temperature).where(elev.gt(lapse_threshold), elr_adjust)
@lazy_property
def tcorr_image(self):
"""Compute Tcorr for the current image
Apply Tdiff cloud mask buffer (mask values of 0 are set to nodata)
"""
lst = ee.Image(self.lst)
ndvi = ee.Image(self.ndvi)
tmax = ee.Image(self._tmax)
# Compute tcorr
tcorr = lst.divide(tmax)
# Remove low LST and low NDVI
tcorr_mask = lst.gt(270).And(ndvi.gt(0.7))
# Filter extreme Tdiff values
tdiff = tmax.subtract(lst)
tcorr_mask = tcorr_mask.And(
tdiff.gt(0).And(tdiff.lte(self._tdiff_threshold)))
return tcorr.updateMask(tcorr_mask).rename(['tcorr'])\
.set({'system:index': self._index,
'system:time_start': self._time_start,
'TMAX_SOURCE': tmax.get('TMAX_SOURCE'),
'TMAX_VERSION': tmax.get('TMAX_VERSION')})
@lazy_property
def tcorr_stats(self):
"""Compute the Tcorr 5th percentile and count statistics"""
image_proj = self.image.select([0]).projection()
image_crs = image_proj.crs()
image_geo = ee.List(ee.Dictionary(
ee.Algorithms.Describe(image_proj)).get('transform'))
# image_shape = ee.List(ee.Dictionary(ee.List(ee.Dictionary(
# ee.Algorithms.Describe(self.image)).get('bands')).get(0)).get('dimensions'))
# print(image_shape.getInfo())
# print(image_crs.getInfo())
# print(image_geo.getInfo())
return ee.Image(self.tcorr_image).reduceRegion(
reducer=ee.Reducer.percentile([5]).combine(ee.Reducer.count(), '', True),
crs=image_crs,
crsTransform=image_geo,
geometry=ee.Image(self.image).geometry().buffer(1000),
bestEffort=False,
maxPixels=2*10000*10000,
tileScale=1)
| import datetime
import pprint
import ee
from . import utils
import openet.core.common as common
# TODO: import utils from common
# import openet.core.utils as utils
def lazy_property(fn):
"""Decorator that makes a property lazy-evaluated
https://stevenloria.com/lazy-properties/
"""
attr_name = '_lazy_' + fn.__name__
@property
def _lazy_property(self):
if not hasattr(self, attr_name):
setattr(self, attr_name, fn(self))
return getattr(self, attr_name)
return _lazy_property
class Image():
"""Earth Engine based SSEBop Image"""
def __init__(
self, image,
etr_source=None,
etr_band=None,
etr_factor=1.0,
dt_source='DAYMET_MEDIAN_V1',
elev_source='SRTM',
tcorr_source='IMAGE',
tmax_source='TOPOWX_MEDIAN_V0',
elr_flag=False,
tdiff_threshold=15,
dt_min=6,
dt_max=25,
):
"""Construct a generic SSEBop Image
Parameters
----------
image : ee.Image
A "prepped" SSEBop input image.
Image must have bands "ndvi" and "lst".
Image must have 'system:index' and 'system:time_start' properties.
etr_source : str, float, optional
Reference ET source (the default is 'IDAHO_EPSCOR/GRIDMET').
etr_band : str, optional
Reference ET band name (the default is 'etr').
etr_factor : float, optional
Reference ET scaling factor (the default is 1.0).
dt_source : {'DAYMET_MEDIAN_V0', 'DAYMET_MEDIAN_V1', or float}, optional
dT source keyword (the default is 'DAYMET_MEDIAN_V1').
elev_source : {'ASSET', 'GTOPO', 'NED', 'SRTM', or float}, optional
Elevation source keyword (the default is 'SRTM').
tcorr_source : {'FEATURE', 'FEATURE_MONTHLY', 'FEATURE_ANNUAL',
'IMAGE', 'IMAGE_DAILY', 'IMAGE_MONTHLY',
'IMAGE_ANNUAL', 'IMAGE_DEFAULT', or float}, optional
Tcorr source keyword (the default is 'IMAGE').
tmax_source : {'CIMIS', 'DAYMET', 'GRIDMET', 'CIMIS_MEDIAN_V1',
'DAYMET_MEDIAN_V1', 'GRIDMET_MEDIAN_V1',
'TOPOWX_MEDIAN_V0', or float}, optional
Maximum air temperature source (the default is 'TOPOWX_MEDIAN_V0').
elr_flag : bool, str, optional
If True, apply Elevation Lapse Rate (ELR) adjustment
(the default is False).
tdiff_threshold : float, optional
Cloud mask buffer using Tdiff [K] (the default is 15).
Pixels with (Tmax - LST) > Tdiff threshold will be masked.
dt_min : float, optional
Minimum allowable dT [K] (the default is 6).
dt_max : float, optional
Maximum allowable dT [K] (the default is 25).
Notes
-----
Input image must have a Landsat style 'system:index' in order to
lookup Tcorr value from table asset. (i.e. LC08_043033_20150805)
"""
self.image = ee.Image(image)
# Set as "lazy_property" below in order to return custom properties
# self.lst = self.image.select('lst')
# self.ndvi = self.image.select('ndvi')
# Copy system properties
self._id = self.image.get('system:id')
self._index = self.image.get('system:index')
self._time_start = self.image.get('system:time_start')
self._properties = {
'system:index': self._index,
'system:time_start': self._time_start,
'image_id': self._id,
}
# Build SCENE_ID from the (possibly merged) system:index
scene_id = ee.List(ee.String(self._index).split('_')).slice(-3)
self._scene_id = ee.String(scene_id.get(0)).cat('_')\
.cat(ee.String(scene_id.get(1))).cat('_')\
.cat(ee.String(scene_id.get(2)))
# Build WRS2_TILE from the scene_id
self._wrs2_tile = ee.String('p').cat(self._scene_id.slice(5, 8))\
.cat('r').cat(self._scene_id.slice(8, 11))
# Set server side date/time properties using the 'system:time_start'
self._date = ee.Date(self._time_start)
self._year = ee.Number(self._date.get('year'))
self._month = ee.Number(self._date.get('month'))
self._start_date = ee.Date(utils.date_to_time_0utc(self._date))
self._end_date = self._start_date.advance(1, 'day')
self._doy = ee.Number(self._date.getRelative('day', 'year')).add(1).int()
self._cycle_day = self._start_date.difference(
ee.Date.fromYMD(1970, 1, 3), 'day').mod(8).add(1).int()
#
self.etr_source = etr_source
self.etr_band = etr_band
self.etr_factor = etr_factor
# Model input parameters
self._dt_source = dt_source
self._elev_source = elev_source
self._tcorr_source = tcorr_source
self._tmax_source = tmax_source
self._elr_flag = elr_flag
self._tdiff_threshold = float(tdiff_threshold)
self._dt_min = float(dt_min)
self._dt_max = float(dt_max)
# Convert elr_flag from string to bool if necessary
if type(self._elr_flag) is str:
if self._elr_flag.upper() in ['TRUE']:
self._elr_flag = True
elif self._elr_flag.upper() in ['FALSE']:
self._elr_flag = False
else:
raise ValueError('elr_flag "{}" could not be interpreted as '
'bool'.format(self._elr_flag))
def calculate(self, variables=['et', 'etr', 'etf']):
"""Return a multiband image of calculated variables
Parameters
----------
variables : list
Returns
-------
ee.Image
"""
output_images = []
for v in variables:
if v.lower() == 'et':
output_images.append(self.et)
elif v.lower() == 'etf':
output_images.append(self.etf)
elif v.lower() == 'etr':
output_images.append(self.etr)
elif v.lower() == 'lst':
output_images.append(self.lst)
elif v.lower() == 'mask':
output_images.append(self.mask)
elif v.lower() == 'ndvi':
output_images.append(self.ndvi)
# elif v.lower() == 'qa':
# output_images.append(self.qa)
elif v.lower() == 'quality':
output_images.append(self.quality)
elif v.lower() == 'time':
output_images.append(self.time)
else:
raise ValueError('unsupported variable: {}'.format(v))
return ee.Image(output_images).set(self._properties)
@lazy_property
def lst(self):
"""Return land surface temperature (LST) image"""
return self.image.select(['lst']).set(self._properties).double()
@lazy_property
def ndvi(self):
"""Return NDVI image"""
return self.image.select(['ndvi']).set(self._properties).double()
@lazy_property
def etf(self):
"""Compute SSEBop ETf for a single image
Returns
-------
ee.Image
Notes
-----
Apply Tdiff cloud mask buffer (mask values of 0 are set to nodata)
"""
# Get input images and ancillary data needed to compute SSEBop ETf
lst = ee.Image(self.lst)
tcorr, tcorr_index = self._tcorr
tmax = ee.Image(self._tmax)
dt = ee.Image(self._dt)
# Adjust air temperature based on elevation (Elevation Lapse Rate)
if self._elr_flag:
tmax = ee.Image(self._lapse_adjust(tmax, ee.Image(self._elev)))
# Compute SSEBop ETf
etf = lst.expression(
'(lst * (-1) + tmax * tcorr + dt) / dt',
{'tmax': tmax, 'dt': dt, 'lst': lst, 'tcorr': tcorr})
etf = etf.updateMask(etf.lt(1.3))\
.clamp(0, 1.05)\
.updateMask(tmax.subtract(lst).lte(self._tdiff_threshold))\
.set(self._properties).rename(['etf']).double()
# Don't set TCORR and INDEX properties for IMAGE Tcorr sources
if (type(self._tcorr_source) is str and
'IMAGE' not in self._tcorr_source.upper()):
etf = etf.set({'tcorr': tcorr, 'tcorr_index': tcorr_index})
return etf
@lazy_property
def etr(self):
"""Compute reference ET for the image date"""
if utils.is_number(self.etr_source):
# Interpret numbers as constant images
# CGM - Should we use the ee_types here instead?
# i.e. ee.ee_types.isNumber(self.etr_source)
etr_img = ee.Image.constant(self.etr_source)
elif type(self.etr_source) is str:
# Assume a string source is an image collection ID (not an image ID)
etr_img = ee.Image(
ee.ImageCollection(self.etr_source)\
.filterDate(self._start_date, self._end_date)\
.select([self.etr_band])\
.first())
# elif type(self.etr_source) is list:
# # Interpret as list of image collection IDs to composite/mosaic
# # i.e. Spatial CIMIS and GRIDMET
# # CGM - Need to check the order of the collections
# etr_coll = ee.ImageCollection([])
# for coll_id in self.etr_source:
# coll = ee.ImageCollection(coll_id)\
# .select([self.etr_band])\
# .filterDate(self.start_date, self.end_date)
# etr_img = etr_coll.merge(coll)
# etr_img = etr_coll.mosaic()
# elif isinstance(self.etr_source, computedobject.ComputedObject):
# # Interpret computed objects as image collections
# etr_coll = ee.ImageCollection(self.etr_source)\
# .select([self.etr_band])\
# .filterDate(self.start_date, self.end_date)
else:
raise ValueError('unsupported etr_source: {}'.format(
self.etr_source))
# Map ETr values directly to the input (i.e. Landsat) image pixels
# The benefit of this is the ETr image is now in the same crs as the
# input image. Not all models may want this though.
# CGM - Should the output band name match the input ETr band name?
return self.ndvi.multiply(0).add(etr_img)\
.multiply(self.etr_factor)\
.rename(['etr']).set(self._properties)
@lazy_property
def et(self):
"""Compute actual ET as fraction of reference times reference"""
return self.etf.multiply(self.etr)\
.rename(['et']).set(self._properties).double()
@lazy_property
def mask(self):
"""Mask of all active pixels (based on the final etf)"""
return self.etf.multiply(0).add(1).updateMask(1)\
.rename(['mask']).set(self._properties).uint8()
@lazy_property
def quality(self):
"""Set quality to 1 for all active pixels (for now)"""
tcorr, tcorr_index = self._tcorr
return self.mask\
.rename(['quality']).set(self._properties)
@lazy_property
def time(self):
"""Return an image of the 0 UTC time (in milliseconds)"""
return self.mask\
.double().multiply(0).add(utils.date_to_time_0utc(self._date))\
.rename(['time']).set(self._properties)
# return ee.Image.constant(utils.date_to_time_0utc(self._date))\
# .double().rename(['time']).set(self._properties)
@lazy_property
def _dt(self):
"""
Returns
-------
ee.Image
Raises
------
ValueError
If `self._dt_source` is not supported.
"""
if utils.is_number(self._dt_source):
dt_img = ee.Image.constant(float(self._dt_source))
elif self._dt_source.upper() == 'DAYMET_MEDIAN_V0':
dt_coll = ee.ImageCollection('projects/usgs-ssebop/dt/daymet_median_v0')\
.filter(ee.Filter.calendarRange(self._doy, self._doy, 'day_of_year'))
dt_img = ee.Image(dt_coll.first())
elif self._dt_source.upper() == 'DAYMET_MEDIAN_V1':
dt_coll = ee.ImageCollection('projects/usgs-ssebop/dt/daymet_median_v1')\
.filter(ee.Filter.calendarRange(self._doy, self._doy, 'day_of_year'))
dt_img = ee.Image(dt_coll.first())
else:
raise ValueError('Invalid dt_source: {}\n'.format(self._dt_source))
return dt_img.clamp(self._dt_min, self._dt_max).rename('dt')
@lazy_property
def _elev(self):
"""
Returns
-------
ee.Image
Raises
------
ValueError
If `self._elev_source` is not supported.
"""
if utils.is_number(self._elev_source):
elev_image = ee.Image.constant(float(self._elev_source))
elif self._elev_source.upper() == 'ASSET':
elev_image = ee.Image('projects/usgs-ssebop/srtm_1km')
elif self._elev_source.upper() == 'GTOPO':
elev_image = ee.Image('USGS/GTOPO30')
elif self._elev_source.upper() == 'NED':
elev_image = ee.Image('USGS/NED')
elif self._elev_source.upper() == 'SRTM':
elev_image = ee.Image('USGS/SRTMGL1_003')
elif (self._elev_source.lower().startswith('projects/') or
self._elev_source.lower().startswith('users/')):
elev_image = ee.Image(self._elev_source)
else:
raise ValueError('Unsupported elev_source: {}\n'.format(
self._elev_source))
return elev_image.select([0], ['elev'])
@lazy_property
def _tcorr(self):
"""Get Tcorr from pre-computed assets for each Tmax source
Returns
-------
Raises
------
ValueError
If `self._tcorr_source` is not supported.
Notes
-----
Tcorr Index values indicate which level of Tcorr was used
0 - Scene specific Tcorr
1 - Mean monthly Tcorr per WRS2 tile
2 - Mean annual Tcorr per WRS2 tile
Annuals don't exist for feature Tcorr assets (yet)
3 - Default Tcorr
4 - User defined Tcorr
"""
# month_field = ee.String('M').cat(ee.Number(self.month).format('%02d'))
if utils.is_number(self._tcorr_source):
tcorr = ee.Number(float(self._tcorr_source))
tcorr_index = ee.Number(4)
return tcorr, tcorr_index
# DEADBEEF - Leaving 'SCENE' checking to be backwards compatible (for now)
elif ('FEATURE' in self._tcorr_source.upper() or
self._tcorr_source.upper() == 'SCENE'):
# Lookup Tcorr collections by keyword value
scene_coll_dict = {
'CIMIS': 'projects/usgs-ssebop/tcorr/cimis_scene',
'DAYMET': 'projects/usgs-ssebop/tcorr/daymet_scene',
'GRIDMET': 'projects/usgs-ssebop/tcorr/gridmet_scene',
# 'TOPOWX': 'projects/usgs-ssebop/tcorr/topowx_scene',
'CIMIS_MEDIAN_V1': 'projects/usgs-ssebop/tcorr/cimis_median_v1_scene',
'DAYMET_MEDIAN_V0': 'projects/usgs-ssebop/tcorr/daymet_median_v0_scene',
'DAYMET_MEDIAN_V1': 'projects/usgs-ssebop/tcorr/daymet_median_v1_scene',
'GRIDMET_MEDIAN_V1': 'projects/usgs-ssebop/tcorr/gridmet_median_v1_scene',
'TOPOWX_MEDIAN_V0': 'projects/usgs-ssebop/tcorr/topowx_median_v0_scene',
'TOPOWX_MEDIAN_V0B': 'projects/usgs-ssebop/tcorr/topowx_median_v0b_scene',
}
month_coll_dict = {
'CIMIS': 'projects/usgs-ssebop/tcorr/cimis_monthly',
'DAYMET': 'projects/usgs-ssebop/tcorr/daymet_monthly',
'GRIDMET': 'projects/usgs-ssebop/tcorr/gridmet_monthly',
# 'TOPOWX': 'projects/usgs-ssebop/tcorr/topowx_monthly',
'CIMIS_MEDIAN_V1': 'projects/usgs-ssebop/tcorr/cimis_median_v1_monthly',
'DAYMET_MEDIAN_V0': 'projects/usgs-ssebop/tcorr/daymet_median_v0_monthly',
'DAYMET_MEDIAN_V1': 'projects/usgs-ssebop/tcorr/daymet_median_v1_monthly',
'GRIDMET_MEDIAN_V1': 'projects/usgs-ssebop/tcorr/gridmet_median_v1_monthly',
'TOPOWX_MEDIAN_V0': 'projects/usgs-ssebop/tcorr/topowx_median_v0_monthly',
'TOPOWX_MEDIAN_V0B': 'projects/usgs-ssebop/tcorr/topowx_median_v0b_monthly',
}
# annual_coll_dict = {}
default_value_dict = {
'CIMIS': 0.978,
'DAYMET': 0.978,
'GRIDMET': 0.978,
'TOPOWX': 0.978,
'CIMIS_MEDIAN_V1': 0.978,
'DAYMET_MEDIAN_V0': 0.978,
'DAYMET_MEDIAN_V1': 0.978,
'GRIDMET_MEDIAN_V1': 0.978,
'TOPOWX_MEDIAN_V0': 0.978,
'TOPOWX_MEDIAN_V0B': 0.978,
}
# Check Tmax source value
tmax_key = self._tmax_source.upper()
if tmax_key not in default_value_dict.keys():
raise ValueError(
'\nInvalid tmax_source for tcorr: {} / {}\n'.format(
self._tcorr_source, self._tmax_source))
default_coll = ee.FeatureCollection([
ee.Feature(None, {'INDEX': 3, 'TCORR': default_value_dict[tmax_key]})])
month_coll = ee.FeatureCollection(month_coll_dict[tmax_key])\
.filterMetadata('WRS2_TILE', 'equals', self._wrs2_tile)\
.filterMetadata('MONTH', 'equals', self._month)
if self._tcorr_source.upper() in ['FEATURE', 'SCENE']:
scene_coll = ee.FeatureCollection(scene_coll_dict[tmax_key])\
.filterMetadata('SCENE_ID', 'equals', self._scene_id)
tcorr_coll = ee.FeatureCollection(
default_coll.merge(month_coll).merge(scene_coll)).sort('INDEX')
elif 'MONTH' in self._tcorr_source.upper():
tcorr_coll = ee.FeatureCollection(
default_coll.merge(month_coll)).sort('INDEX')
else:
raise ValueError(
'Invalid tcorr_source: {} / {}\n'.format(
self._tcorr_source, self._tmax_source))
tcorr_ftr = ee.Feature(tcorr_coll.first())
tcorr = ee.Number(tcorr_ftr.get('TCORR'))
tcorr_index = ee.Number(tcorr_ftr.get('INDEX'))
return tcorr, tcorr_index
elif 'IMAGE' in self._tcorr_source.upper():
# Lookup Tcorr collections by keyword value
daily_dict = {
'TOPOWX_MEDIAN_V0': 'projects/usgs-ssebop/tcorr_image/topowx_median_v0_daily'
}
month_dict = {
'TOPOWX_MEDIAN_V0': 'projects/usgs-ssebop/tcorr_image/topowx_median_v0_monthly',
}
annual_dict = {
'TOPOWX_MEDIAN_V0': 'projects/usgs-ssebop/tcorr_image/topowx_median_v0_annual',
}
default_dict = {
'TOPOWX_MEDIAN_V0': 'projects/usgs-ssebop/tcorr_image/topowx_median_v0_default'
}
# Check Tmax source value
tmax_key = self._tmax_source.upper()
if tmax_key not in default_dict.keys():
raise ValueError(
'\nInvalid tmax_source: {} / {}\n'.format(
self._tcorr_source, self._tmax_source))
default_img = ee.Image(default_dict[tmax_key])
mask_img = default_img.updateMask(0)
if (self._tcorr_source.upper() == 'IMAGE' or
'DAILY' in self._tcorr_source.upper()):
daily_coll = ee.ImageCollection(daily_dict[tmax_key])\
.filterDate(self._start_date, self._end_date)\
.select(['tcorr'])
daily_coll = daily_coll.merge(ee.ImageCollection(mask_img))
daily_img = ee.Image(daily_coll.mosaic())
# .filterMetadata('DATE', 'equals', self._date)
if (self._tcorr_source.upper() == 'IMAGE' or
'MONTH' in self._tcorr_source.upper()):
month_coll = ee.ImageCollection(month_dict[tmax_key])\
.filterMetadata('CYCLE_DAY', 'equals', self._cycle_day)\
.filterMetadata('MONTH', 'equals', self._month)\
.select(['tcorr'])
month_coll = month_coll.merge(ee.ImageCollection(mask_img))
month_img = ee.Image(month_coll.mosaic())
if (self._tcorr_source.upper() == 'IMAGE' or
'ANNUAL' in self._tcorr_source.upper()):
annual_coll = ee.ImageCollection(annual_dict[tmax_key])\
.filterMetadata('CYCLE_DAY', 'equals', self._cycle_day)\
.select(['tcorr'])
annual_coll = annual_coll.merge(ee.ImageCollection(mask_img))
annual_img = ee.Image(annual_coll.mosaic())
if self._tcorr_source.upper() == 'IMAGE':
# Composite Tcorr images to ensure that a value is returned
# (even if the daily image doesn't exist)
composite_coll = ee.ImageCollection([
default_img.addBands(default_img.multiply(0).add(3).uint8()),
annual_img.addBands(annual_img.multiply(0).add(2).uint8()),
month_img.addBands(month_img.multiply(0).add(1).uint8()),
daily_img.addBands(daily_img.multiply(0).uint8())])
composite_img = composite_coll.mosaic()
tcorr_img = composite_img.select([0], ['tcorr'])
index_img = composite_img.select([1], ['index'])
elif 'DAILY' in self._tcorr_source.upper():
tcorr_img = daily_img
index_img = daily_img.multiply(0).uint8()
elif 'MONTH' in self._tcorr_source.upper():
tcorr_img = month_img
index_img = month_img.multiply(0).add(1).uint8()
elif 'ANNUAL' in self._tcorr_source.upper():
tcorr_img = annual_img
index_img = annual_img.multiply(0).add(2).uint8()
elif 'DEFAULT' in self._tcorr_source.upper():
tcorr_img = default_img
index_img = default_img.multiply(0).add(3).uint8()
else:
raise ValueError(
'Invalid tcorr_source: {} / {}\n'.format(
self._tcorr_source, self._tmax_source))
return tcorr_img, index_img.rename(['index'])
else:
raise ValueError('Unsupported tcorr_source: {}\n'.format(
self._tcorr_source))
@lazy_property
def _tmax(self):
"""Fall back on median Tmax if daily image does not exist
Returns
-------
ee.Image
Raises
------
ValueError
If `self._tmax_source` is not supported.
"""
doy_filter = ee.Filter.calendarRange(self._doy, self._doy, 'day_of_year')
date_today = datetime.datetime.today().strftime('%Y-%m-%d')
if utils.is_number(self._tmax_source):
tmax_image = ee.Image.constant(float(self._tmax_source))\
.rename(['tmax'])\
.set('TMAX_VERSION', 'CUSTOM_{}'.format(self._tmax_source))
elif self._tmax_source.upper() == 'CIMIS':
daily_coll = ee.ImageCollection('projects/climate-engine/cimis/daily')\
.filterDate(self._start_date, self._end_date)\
.select(['Tx'], ['tmax']).map(utils.c_to_k)
daily_image = ee.Image(daily_coll.first())\
.set('TMAX_VERSION', date_today)
median_version = 'median_v1'
median_coll = ee.ImageCollection(
'projects/usgs-ssebop/tmax/cimis_{}'.format(median_version))
median_image = ee.Image(median_coll.filter(doy_filter).first())\
.set('TMAX_VERSION', median_version)
tmax_image = ee.Image(ee.Algorithms.If(
daily_coll.size().gt(0), daily_image, median_image))
elif self._tmax_source.upper() == 'DAYMET':
# DAYMET does not include Dec 31st on leap years
# Adding one extra date to end date to avoid errors
daily_coll = ee.ImageCollection('NASA/ORNL/DAYMET_V3')\
.filterDate(self._start_date, self._end_date.advance(1, 'day'))\
.select(['tmax']).map(utils.c_to_k)
daily_image = ee.Image(daily_coll.first())\
.set('TMAX_VERSION', date_today)
median_version = 'median_v0'
median_coll = ee.ImageCollection(
'projects/usgs-ssebop/tmax/daymet_{}'.format(median_version))
median_image = ee.Image(median_coll.filter(doy_filter).first())\
.set('TMAX_VERSION', median_version)
tmax_image = ee.Image(ee.Algorithms.If(
daily_coll.size().gt(0), daily_image, median_image))
elif self._tmax_source.upper() == 'GRIDMET':
daily_coll = ee.ImageCollection('IDAHO_EPSCOR/GRIDMET')\
.filterDate(self._start_date, self._end_date)\
.select(['tmmx'], ['tmax'])
daily_image = ee.Image(daily_coll.first())\
.set('TMAX_VERSION', date_today)
median_version = 'median_v1'
median_coll = ee.ImageCollection(
'projects/usgs-ssebop/tmax/gridmet_{}'.format(median_version))
median_image = ee.Image(median_coll.filter(doy_filter).first())\
.set('TMAX_VERSION', median_version)
tmax_image = ee.Image(ee.Algorithms.If(
daily_coll.size().gt(0), daily_image, median_image))
# elif self.tmax_source.upper() == 'TOPOWX':
# daily_coll = ee.ImageCollection('X')\
# .filterDate(self.start_date, self.end_date)\
# .select(['tmmx'], ['tmax'])
# daily_image = ee.Image(daily_coll.first())\
# .set('TMAX_VERSION', date_today)
#
# median_version = 'median_v1'
# median_coll = ee.ImageCollection(
# 'projects/usgs-ssebop/tmax/topowx_{}'.format(median_version))
# median_image = ee.Image(median_coll.filter(doy_filter).first())\
# .set('TMAX_VERSION', median_version)
#
# tmax_image = ee.Image(ee.Algorithms.If(
# daily_coll.size().gt(0), daily_image, median_image))
elif self._tmax_source.upper() == 'CIMIS_MEDIAN_V1':
median_version = 'median_v1'
median_coll = ee.ImageCollection(
'projects/usgs-ssebop/tmax/cimis_{}'.format(median_version))
tmax_image = ee.Image(median_coll.filter(doy_filter).first())\
.set('TMAX_VERSION', median_version)
elif self._tmax_source.upper() == 'DAYMET_MEDIAN_V0':
median_version = 'median_v0'
median_coll = ee.ImageCollection(
'projects/usgs-ssebop/tmax/daymet_{}'.format(median_version))
tmax_image = ee.Image(median_coll.filter(doy_filter).first())\
.set('TMAX_VERSION', median_version)
elif self._tmax_source.upper() == 'DAYMET_MEDIAN_V1':
median_version = 'median_v1'
median_coll = ee.ImageCollection(
'projects/usgs-ssebop/tmax/daymet_{}'.format(median_version))
tmax_image = ee.Image(median_coll.filter(doy_filter).first())\
.set('TMAX_VERSION', median_version)
elif self._tmax_source.upper() == 'GRIDMET_MEDIAN_V1':
median_version = 'median_v1'
median_coll = ee.ImageCollection(
'projects/usgs-ssebop/tmax/gridmet_{}'.format(median_version))
tmax_image = ee.Image(median_coll.filter(doy_filter).first())\
.set('TMAX_VERSION', median_version)
elif self._tmax_source.upper() == 'TOPOWX_MEDIAN_V0':
median_version = 'median_v0'
median_coll = ee.ImageCollection(
'projects/usgs-ssebop/tmax/topowx_{}'.format(median_version))
tmax_image = ee.Image(median_coll.filter(doy_filter).first())\
.set('TMAX_VERSION', median_version)
# elif self.tmax_source.upper() == 'TOPOWX_MEDIAN_V1':
# median_version = 'median_v1'
# median_coll = ee.ImageCollection(
# 'projects/usgs-ssebop/tmax/topowx_{}'.format(median_version))
# tmax_image = ee.Image(median_coll.filter(doy_filter).first())
else:
raise ValueError('Unsupported tmax_source: {}\n'.format(
self._tmax_source))
return ee.Image(tmax_image.set('TMAX_SOURCE', self._tmax_source))
@classmethod
def from_image_id(cls, image_id, **kwargs):
"""Constructs an SSEBop Image instance from an image ID
Parameters
----------
image_id : str
An earth engine image ID.
(i.e. 'LANDSAT/LC08/C01/T1_SR/LC08_044033_20170716')
kwargs
Keyword arguments to pass through to model init.
Returns
-------
new instance of Image class
"""
# DEADBEEF - Should the supported image collection IDs and helper
# function mappings be set in a property or method of the Image class?
collection_methods = {
'LANDSAT/LC08/C01/T1_RT_TOA': 'from_landsat_c1_toa',
'LANDSAT/LE07/C01/T1_RT_TOA': 'from_landsat_c1_toa',
'LANDSAT/LC08/C01/T1_TOA': 'from_landsat_c1_toa',
'LANDSAT/LE07/C01/T1_TOA': 'from_landsat_c1_toa',
'LANDSAT/LT05/C01/T1_TOA': 'from_landsat_c1_toa',
# 'LANDSAT/LT04/C01/T1_TOA': 'from_landsat_c1_toa',
'LANDSAT/LC08/C01/T1_SR': 'from_landsat_c1_sr',
'LANDSAT/LE07/C01/T1_SR': 'from_landsat_c1_sr',
'LANDSAT/LT05/C01/T1_SR': 'from_landsat_c1_sr',
# 'LANDSAT/LT04/C01/T1_SR': 'from_landsat_c1_sr',
}
try:
method_name = collection_methods[image_id.rsplit('/', 1)[0]]
except KeyError:
raise ValueError('unsupported collection ID: {}'.format(image_id))
except Exception as e:
raise Exception('unhandled exception: {}'.format(e))
method = getattr(Image, method_name)
return method(ee.Image(image_id), **kwargs)
@classmethod
def from_landsat_c1_toa(cls, toa_image, cloudmask_args={}, **kwargs):
"""Returns a SSEBop Image instance from a Landsat Collection 1 TOA image
Parameters
----------
toa_image : ee.Image
A raw Landsat Collection 1 TOA image.
cloudmask_args : dict
keyword arguments to pass through to cloud mask function
kwargs : dict
Keyword arguments to pass through to Image init function
Returns
-------
Image
"""
toa_image = ee.Image(toa_image)
# Use the SPACECRAFT_ID property identify each Landsat type
spacecraft_id = ee.String(toa_image.get('SPACECRAFT_ID'))
# Rename bands to generic names
# Rename thermal band "k" coefficients to generic names
input_bands = ee.Dictionary({
# 'LANDSAT_4': ['B1', 'B2', 'B3', 'B4', 'B5', 'B7', 'B6', 'BQA'],
'LANDSAT_5': ['B1', 'B2', 'B3', 'B4', 'B5', 'B7', 'B6', 'BQA'],
'LANDSAT_7': ['B1', 'B2', 'B3', 'B4', 'B5', 'B7', 'B6_VCID_1',
'BQA'],
'LANDSAT_8': ['B2', 'B3', 'B4', 'B5', 'B6', 'B7', 'B10', 'BQA']})
output_bands = ['blue', 'green', 'red', 'nir', 'swir1', 'swir2', 'lst',
'BQA']
k1 = ee.Dictionary({
# 'LANDSAT_4': 'K1_CONSTANT_BAND_6',
'LANDSAT_5': 'K1_CONSTANT_BAND_6',
'LANDSAT_7': 'K1_CONSTANT_BAND_6_VCID_1',
'LANDSAT_8': 'K1_CONSTANT_BAND_10'})
k2 = ee.Dictionary({
# 'LANDSAT_4': 'K2_CONSTANT_BAND_6',
'LANDSAT_5': 'K2_CONSTANT_BAND_6',
'LANDSAT_7': 'K2_CONSTANT_BAND_6_VCID_1',
'LANDSAT_8': 'K2_CONSTANT_BAND_10'})
prep_image = toa_image\
.select(input_bands.get(spacecraft_id), output_bands)\
.set('k1_constant', ee.Number(toa_image.get(k1.get(spacecraft_id))))\
.set('k2_constant', ee.Number(toa_image.get(k2.get(spacecraft_id))))
# Build the input image
input_image = ee.Image([cls._lst(prep_image), cls._ndvi(prep_image)])
# Apply the cloud mask and add properties
input_image = input_image\
.updateMask(common.landsat_c1_toa_cloud_mask(
toa_image, **cloudmask_args))\
.set({
'system:index': toa_image.get('system:index'),
'system:time_start': toa_image.get('system:time_start'),
'system:id': toa_image.get('system:id'),
})
# Instantiate the class
return cls(ee.Image(input_image), **kwargs)
@classmethod
def from_landsat_c1_sr(cls, sr_image, **kwargs):
"""Returns a SSEBop Image instance from a Landsat Collection 1 SR image
Parameters
----------
sr_image : ee.Image
A raw Landsat Collection 1 SR image.
Returns
-------
Image
"""
sr_image = ee.Image(sr_image)
# Use the SATELLITE property identify each Landsat type
spacecraft_id = ee.String(sr_image.get('SATELLITE'))
# Rename bands to generic names
# Rename thermal band "k" coefficients to generic names
input_bands = ee.Dictionary({
'LANDSAT_5': ['B1', 'B2', 'B3', 'B4', 'B5', 'B7', 'B6', 'pixel_qa'],
'LANDSAT_7': ['B1', 'B2', 'B3', 'B4', 'B5', 'B7', 'B6', 'pixel_qa'],
'LANDSAT_8': ['B2', 'B3', 'B4', 'B5', 'B6', 'B7', 'B10',
'pixel_qa']})
output_bands = ['blue', 'green', 'red', 'nir', 'swir1', 'swir2', 'lst',
'pixel_qa']
# TODO: Follow up with Simon about adding K1/K2 to SR collection
# Hardcode values for now
k1 = ee.Dictionary({
# 'LANDSAT_4': 607.76,
'LANDSAT_5': 607.76, 'LANDSAT_7': 666.09, 'LANDSAT_8': 774.8853})
k2 = ee.Dictionary({
# 'LANDSAT_4': 1260.56,
'LANDSAT_5': 1260.56, 'LANDSAT_7': 1282.71, 'LANDSAT_8': 1321.0789})
prep_image = sr_image\
.select(input_bands.get(spacecraft_id), output_bands)\
.set('k1_constant', ee.Number(k1.get(spacecraft_id)))\
.set('k2_constant', ee.Number(k2.get(spacecraft_id)))
# k1 = ee.Dictionary({
# # 'LANDSAT_4': 'K1_CONSTANT_BAND_6',
# 'LANDSAT_5': 'K1_CONSTANT_BAND_6',
# 'LANDSAT_7': 'K1_CONSTANT_BAND_6_VCID_1',
# 'LANDSAT_8': 'K1_CONSTANT_BAND_10'})
# k2 = ee.Dictionary({
# # 'LANDSAT_4': 'K2_CONSTANT_BAND_6',
# 'LANDSAT_5': 'K2_CONSTANT_BAND_6',
# 'LANDSAT_7': 'K2_CONSTANT_BAND_6_VCID_1',
# 'LANDSAT_8': 'K2_CONSTANT_BAND_10'})
# prep_image = sr_image\
# .select(input_bands.get(spacecraft_id), output_bands)\
# .set('k1_constant', ee.Number(sr_image.get(k1.get(spacecraft_id))))\
# .set('k2_constant', ee.Number(sr_image.get(k2.get(spacecraft_id))))
# Build the input image
input_image = ee.Image([cls._lst(prep_image), cls._ndvi(prep_image)])
# Apply the cloud mask and add properties
input_image = input_image\
.updateMask(common.landsat_c1_sr_cloud_mask(sr_image))\
.set({
'system:index': sr_image.get('system:index'),
'system:time_start': sr_image.get('system:time_start'),
'system:id': sr_image.get('system:id'),
})
# Instantiate the class
return cls(input_image, **kwargs)
@staticmethod
def _ndvi(toa_image):
"""Compute NDVI
Parameters
----------
toa_image : ee.Image
Renamed TOA image with 'nir' and 'red bands.
Returns
-------
ee.Image
"""
return ee.Image(toa_image).normalizedDifference(['nir', 'red'])\
.rename(['ndvi'])
@staticmethod
def _lst(toa_image):
"""Compute emissivity corrected land surface temperature (LST)
from brightness temperature.
Parameters
----------
toa_image : ee.Image
Renamed TOA image with 'red', 'nir', and 'lst' bands.
Image must also have 'k1_constant' and 'k2_constant' properties.
Returns
-------
ee.Image
Notes
-----
The corrected radiation coefficients were derived from a small number
of scenes in southern Idaho [Allen2007] and may not be appropriate for
other areas.
References
----------
.. [Allen2007] <NAME>, <NAME>, <NAME> (2007),
Satellite-Based Energy Balance for Mapping Evapotranspiration with
Internalized Calibration (METRIC) Model,
Journal of Irrigation and Drainage Engineering, Vol 133(4),
http://dx.doi.org/10.1061/(ASCE)0733-9437(2007)133:4(380)
"""
# Get properties from image
k1 = ee.Number(ee.Image(toa_image).get('k1_constant'))
k2 = ee.Number(ee.Image(toa_image).get('k2_constant'))
ts_brightness = ee.Image(toa_image).select(['lst'])
emissivity = Image._emissivity(toa_image)
# First back out radiance from brightness temperature
# Then recalculate emissivity corrected Ts
thermal_rad_toa = ts_brightness.expression(
'k1 / (exp(k2 / ts_brightness) - 1)',
{'ts_brightness': ts_brightness, 'k1': k1, 'k2': k2})
# tnb = 0.866 # narrow band transmissivity of air
# rp = 0.91 # path radiance
# rsky = 1.32 # narrow band clear sky downward thermal radiation
rc = thermal_rad_toa.expression(
'((thermal_rad_toa - rp) / tnb) - ((1. - emiss) * rsky)',
{
'thermal_rad_toa': thermal_rad_toa,
'emiss': emissivity,
'rp': 0.91, 'tnb': 0.866, 'rsky': 1.32})
lst = rc.expression(
'k2 / log(emiss * k1 / rc + 1)',
{'emiss': emissivity, 'rc': rc, 'k1': k1, 'k2': k2})
return lst.rename(['lst'])
@staticmethod
def _emissivity(toa_image):
"""Compute emissivity as a function of NDVI
Parameters
----------
toa_image : ee.Image
Returns
-------
ee.Image
"""
ndvi = Image._ndvi(toa_image)
Pv = ndvi.expression(
'((ndvi - 0.2) / 0.3) ** 2', {'ndvi': ndvi})
# ndviRangevalue = ndvi_image.where(
# ndvi_image.gte(0.2).And(ndvi_image.lte(0.5)), ndvi_image)
# Pv = ndviRangevalue.expression(
# '(((ndviRangevalue - 0.2)/0.3)**2',{'ndviRangevalue':ndviRangevalue})
# Assuming typical Soil Emissivity of 0.97 and Veg Emissivity of 0.99
# and shape Factor mean value of 0.553
dE = Pv.expression(
'(((1 - 0.97) * (1 - Pv)) * (0.55 * 0.99))', {'Pv': Pv})
RangeEmiss = dE.expression(
'((0.99 * Pv) + (0.97 * (1 - Pv)) + dE)', {'Pv': Pv, 'dE': dE})
# RangeEmiss = 0.989 # dE.expression(
# '((0.99*Pv)+(0.97 *(1-Pv))+dE)',{'Pv':Pv, 'dE':dE})
emissivity = ndvi\
.where(ndvi.lt(0), 0.985)\
.where(ndvi.gte(0).And(ndvi.lt(0.2)), 0.977)\
.where(ndvi.gt(0.5), 0.99)\
.where(ndvi.gte(0.2).And(ndvi.lte(0.5)), RangeEmiss)
emissivity = emissivity.clamp(0.977, 0.99)
return emissivity.select([0], ['emissivity'])
@staticmethod
def _lapse_adjust(temperature, elev, lapse_threshold=1500):
"""Compute Elevation Lapse Rate (ELR) adjusted temperature
Parameters
----------
temperature : ee.Image
Temperature [K].
elev : ee.Image
Elevation [m].
lapse_threshold : float
Minimum elevation to adjust temperature [m] (the default is 1500).
Returns
-------
ee.Image of adjusted temperature
"""
elr_adjust = ee.Image(temperature).expression(
'(temperature - (0.003 * (elev - threshold)))',
{
'temperature': temperature, 'elev': elev,
'threshold': lapse_threshold
})
return ee.Image(temperature).where(elev.gt(lapse_threshold), elr_adjust)
@lazy_property
def tcorr_image(self):
"""Compute Tcorr for the current image
Apply Tdiff cloud mask buffer (mask values of 0 are set to nodata)
"""
lst = ee.Image(self.lst)
ndvi = ee.Image(self.ndvi)
tmax = ee.Image(self._tmax)
# Compute tcorr
tcorr = lst.divide(tmax)
# Remove low LST and low NDVI
tcorr_mask = lst.gt(270).And(ndvi.gt(0.7))
# Filter extreme Tdiff values
tdiff = tmax.subtract(lst)
tcorr_mask = tcorr_mask.And(
tdiff.gt(0).And(tdiff.lte(self._tdiff_threshold)))
return tcorr.updateMask(tcorr_mask).rename(['tcorr'])\
.set({'system:index': self._index,
'system:time_start': self._time_start,
'TMAX_SOURCE': tmax.get('TMAX_SOURCE'),
'TMAX_VERSION': tmax.get('TMAX_VERSION')})
@lazy_property
def tcorr_stats(self):
"""Compute the Tcorr 5th percentile and count statistics"""
image_proj = self.image.select([0]).projection()
image_crs = image_proj.crs()
image_geo = ee.List(ee.Dictionary(
ee.Algorithms.Describe(image_proj)).get('transform'))
# image_shape = ee.List(ee.Dictionary(ee.List(ee.Dictionary(
# ee.Algorithms.Describe(self.image)).get('bands')).get(0)).get('dimensions'))
# print(image_shape.getInfo())
# print(image_crs.getInfo())
# print(image_geo.getInfo())
return ee.Image(self.tcorr_image).reduceRegion(
reducer=ee.Reducer.percentile([5]).combine(ee.Reducer.count(), '', True),
crs=image_crs,
crsTransform=image_geo,
geometry=ee.Image(self.image).geometry().buffer(1000),
bestEffort=False,
maxPixels=2*10000*10000,
tileScale=1) | en | 0.519232 | # TODO: import utils from common # import openet.core.utils as utils Decorator that makes a property lazy-evaluated https://stevenloria.com/lazy-properties/ Earth Engine based SSEBop Image Construct a generic SSEBop Image Parameters ---------- image : ee.Image A "prepped" SSEBop input image. Image must have bands "ndvi" and "lst". Image must have 'system:index' and 'system:time_start' properties. etr_source : str, float, optional Reference ET source (the default is 'IDAHO_EPSCOR/GRIDMET'). etr_band : str, optional Reference ET band name (the default is 'etr'). etr_factor : float, optional Reference ET scaling factor (the default is 1.0). dt_source : {'DAYMET_MEDIAN_V0', 'DAYMET_MEDIAN_V1', or float}, optional dT source keyword (the default is 'DAYMET_MEDIAN_V1'). elev_source : {'ASSET', 'GTOPO', 'NED', 'SRTM', or float}, optional Elevation source keyword (the default is 'SRTM'). tcorr_source : {'FEATURE', 'FEATURE_MONTHLY', 'FEATURE_ANNUAL', 'IMAGE', 'IMAGE_DAILY', 'IMAGE_MONTHLY', 'IMAGE_ANNUAL', 'IMAGE_DEFAULT', or float}, optional Tcorr source keyword (the default is 'IMAGE'). tmax_source : {'CIMIS', 'DAYMET', 'GRIDMET', 'CIMIS_MEDIAN_V1', 'DAYMET_MEDIAN_V1', 'GRIDMET_MEDIAN_V1', 'TOPOWX_MEDIAN_V0', or float}, optional Maximum air temperature source (the default is 'TOPOWX_MEDIAN_V0'). elr_flag : bool, str, optional If True, apply Elevation Lapse Rate (ELR) adjustment (the default is False). tdiff_threshold : float, optional Cloud mask buffer using Tdiff [K] (the default is 15). Pixels with (Tmax - LST) > Tdiff threshold will be masked. dt_min : float, optional Minimum allowable dT [K] (the default is 6). dt_max : float, optional Maximum allowable dT [K] (the default is 25). Notes ----- Input image must have a Landsat style 'system:index' in order to lookup Tcorr value from table asset. (i.e. LC08_043033_20150805) # Set as "lazy_property" below in order to return custom properties # self.lst = self.image.select('lst') # self.ndvi = self.image.select('ndvi') # Copy system properties # Build SCENE_ID from the (possibly merged) system:index # Build WRS2_TILE from the scene_id # Set server side date/time properties using the 'system:time_start' # # Model input parameters # Convert elr_flag from string to bool if necessary Return a multiband image of calculated variables Parameters ---------- variables : list Returns ------- ee.Image # elif v.lower() == 'qa': # output_images.append(self.qa) Return land surface temperature (LST) image Return NDVI image Compute SSEBop ETf for a single image Returns ------- ee.Image Notes ----- Apply Tdiff cloud mask buffer (mask values of 0 are set to nodata) # Get input images and ancillary data needed to compute SSEBop ETf # Adjust air temperature based on elevation (Elevation Lapse Rate) # Compute SSEBop ETf # Don't set TCORR and INDEX properties for IMAGE Tcorr sources Compute reference ET for the image date # Interpret numbers as constant images # CGM - Should we use the ee_types here instead? # i.e. ee.ee_types.isNumber(self.etr_source) # Assume a string source is an image collection ID (not an image ID) # elif type(self.etr_source) is list: # # Interpret as list of image collection IDs to composite/mosaic # # i.e. Spatial CIMIS and GRIDMET # # CGM - Need to check the order of the collections # etr_coll = ee.ImageCollection([]) # for coll_id in self.etr_source: # coll = ee.ImageCollection(coll_id)\ # .select([self.etr_band])\ # .filterDate(self.start_date, self.end_date) # etr_img = etr_coll.merge(coll) # etr_img = etr_coll.mosaic() # elif isinstance(self.etr_source, computedobject.ComputedObject): # # Interpret computed objects as image collections # etr_coll = ee.ImageCollection(self.etr_source)\ # .select([self.etr_band])\ # .filterDate(self.start_date, self.end_date) # Map ETr values directly to the input (i.e. Landsat) image pixels # The benefit of this is the ETr image is now in the same crs as the # input image. Not all models may want this though. # CGM - Should the output band name match the input ETr band name? Compute actual ET as fraction of reference times reference Mask of all active pixels (based on the final etf) Set quality to 1 for all active pixels (for now) Return an image of the 0 UTC time (in milliseconds) # return ee.Image.constant(utils.date_to_time_0utc(self._date))\ # .double().rename(['time']).set(self._properties) Returns ------- ee.Image Raises ------ ValueError If `self._dt_source` is not supported. Returns ------- ee.Image Raises ------ ValueError If `self._elev_source` is not supported. Get Tcorr from pre-computed assets for each Tmax source Returns ------- Raises ------ ValueError If `self._tcorr_source` is not supported. Notes ----- Tcorr Index values indicate which level of Tcorr was used 0 - Scene specific Tcorr 1 - Mean monthly Tcorr per WRS2 tile 2 - Mean annual Tcorr per WRS2 tile Annuals don't exist for feature Tcorr assets (yet) 3 - Default Tcorr 4 - User defined Tcorr # month_field = ee.String('M').cat(ee.Number(self.month).format('%02d')) # DEADBEEF - Leaving 'SCENE' checking to be backwards compatible (for now) # Lookup Tcorr collections by keyword value # 'TOPOWX': 'projects/usgs-ssebop/tcorr/topowx_scene', # 'TOPOWX': 'projects/usgs-ssebop/tcorr/topowx_monthly', # annual_coll_dict = {} # Check Tmax source value # Lookup Tcorr collections by keyword value # Check Tmax source value # .filterMetadata('DATE', 'equals', self._date) # Composite Tcorr images to ensure that a value is returned # (even if the daily image doesn't exist) Fall back on median Tmax if daily image does not exist Returns ------- ee.Image Raises ------ ValueError If `self._tmax_source` is not supported. # DAYMET does not include Dec 31st on leap years # Adding one extra date to end date to avoid errors # elif self.tmax_source.upper() == 'TOPOWX': # daily_coll = ee.ImageCollection('X')\ # .filterDate(self.start_date, self.end_date)\ # .select(['tmmx'], ['tmax']) # daily_image = ee.Image(daily_coll.first())\ # .set('TMAX_VERSION', date_today) # # median_version = 'median_v1' # median_coll = ee.ImageCollection( # 'projects/usgs-ssebop/tmax/topowx_{}'.format(median_version)) # median_image = ee.Image(median_coll.filter(doy_filter).first())\ # .set('TMAX_VERSION', median_version) # # tmax_image = ee.Image(ee.Algorithms.If( # daily_coll.size().gt(0), daily_image, median_image)) # elif self.tmax_source.upper() == 'TOPOWX_MEDIAN_V1': # median_version = 'median_v1' # median_coll = ee.ImageCollection( # 'projects/usgs-ssebop/tmax/topowx_{}'.format(median_version)) # tmax_image = ee.Image(median_coll.filter(doy_filter).first()) Constructs an SSEBop Image instance from an image ID Parameters ---------- image_id : str An earth engine image ID. (i.e. 'LANDSAT/LC08/C01/T1_SR/LC08_044033_20170716') kwargs Keyword arguments to pass through to model init. Returns ------- new instance of Image class # DEADBEEF - Should the supported image collection IDs and helper # function mappings be set in a property or method of the Image class? # 'LANDSAT/LT04/C01/T1_TOA': 'from_landsat_c1_toa', # 'LANDSAT/LT04/C01/T1_SR': 'from_landsat_c1_sr', Returns a SSEBop Image instance from a Landsat Collection 1 TOA image Parameters ---------- toa_image : ee.Image A raw Landsat Collection 1 TOA image. cloudmask_args : dict keyword arguments to pass through to cloud mask function kwargs : dict Keyword arguments to pass through to Image init function Returns ------- Image # Use the SPACECRAFT_ID property identify each Landsat type # Rename bands to generic names # Rename thermal band "k" coefficients to generic names # 'LANDSAT_4': ['B1', 'B2', 'B3', 'B4', 'B5', 'B7', 'B6', 'BQA'], # 'LANDSAT_4': 'K1_CONSTANT_BAND_6', # 'LANDSAT_4': 'K2_CONSTANT_BAND_6', # Build the input image # Apply the cloud mask and add properties # Instantiate the class Returns a SSEBop Image instance from a Landsat Collection 1 SR image Parameters ---------- sr_image : ee.Image A raw Landsat Collection 1 SR image. Returns ------- Image # Use the SATELLITE property identify each Landsat type # Rename bands to generic names # Rename thermal band "k" coefficients to generic names # TODO: Follow up with Simon about adding K1/K2 to SR collection # Hardcode values for now # 'LANDSAT_4': 607.76, # 'LANDSAT_4': 1260.56, # k1 = ee.Dictionary({ # # 'LANDSAT_4': 'K1_CONSTANT_BAND_6', # 'LANDSAT_5': 'K1_CONSTANT_BAND_6', # 'LANDSAT_7': 'K1_CONSTANT_BAND_6_VCID_1', # 'LANDSAT_8': 'K1_CONSTANT_BAND_10'}) # k2 = ee.Dictionary({ # # 'LANDSAT_4': 'K2_CONSTANT_BAND_6', # 'LANDSAT_5': 'K2_CONSTANT_BAND_6', # 'LANDSAT_7': 'K2_CONSTANT_BAND_6_VCID_1', # 'LANDSAT_8': 'K2_CONSTANT_BAND_10'}) # prep_image = sr_image\ # .select(input_bands.get(spacecraft_id), output_bands)\ # .set('k1_constant', ee.Number(sr_image.get(k1.get(spacecraft_id))))\ # .set('k2_constant', ee.Number(sr_image.get(k2.get(spacecraft_id)))) # Build the input image # Apply the cloud mask and add properties # Instantiate the class Compute NDVI Parameters ---------- toa_image : ee.Image Renamed TOA image with 'nir' and 'red bands. Returns ------- ee.Image Compute emissivity corrected land surface temperature (LST) from brightness temperature. Parameters ---------- toa_image : ee.Image Renamed TOA image with 'red', 'nir', and 'lst' bands. Image must also have 'k1_constant' and 'k2_constant' properties. Returns ------- ee.Image Notes ----- The corrected radiation coefficients were derived from a small number of scenes in southern Idaho [Allen2007] and may not be appropriate for other areas. References ---------- .. [Allen2007] <NAME>, <NAME>, <NAME> (2007), Satellite-Based Energy Balance for Mapping Evapotranspiration with Internalized Calibration (METRIC) Model, Journal of Irrigation and Drainage Engineering, Vol 133(4), http://dx.doi.org/10.1061/(ASCE)0733-9437(2007)133:4(380) # Get properties from image # First back out radiance from brightness temperature # Then recalculate emissivity corrected Ts # tnb = 0.866 # narrow band transmissivity of air # rp = 0.91 # path radiance # rsky = 1.32 # narrow band clear sky downward thermal radiation Compute emissivity as a function of NDVI Parameters ---------- toa_image : ee.Image Returns ------- ee.Image # ndviRangevalue = ndvi_image.where( # ndvi_image.gte(0.2).And(ndvi_image.lte(0.5)), ndvi_image) # Pv = ndviRangevalue.expression( # '(((ndviRangevalue - 0.2)/0.3)**2',{'ndviRangevalue':ndviRangevalue}) # Assuming typical Soil Emissivity of 0.97 and Veg Emissivity of 0.99 # and shape Factor mean value of 0.553 # RangeEmiss = 0.989 # dE.expression( # '((0.99*Pv)+(0.97 *(1-Pv))+dE)',{'Pv':Pv, 'dE':dE}) Compute Elevation Lapse Rate (ELR) adjusted temperature Parameters ---------- temperature : ee.Image Temperature [K]. elev : ee.Image Elevation [m]. lapse_threshold : float Minimum elevation to adjust temperature [m] (the default is 1500). Returns ------- ee.Image of adjusted temperature Compute Tcorr for the current image Apply Tdiff cloud mask buffer (mask values of 0 are set to nodata) # Compute tcorr # Remove low LST and low NDVI # Filter extreme Tdiff values Compute the Tcorr 5th percentile and count statistics # image_shape = ee.List(ee.Dictionary(ee.List(ee.Dictionary( # ee.Algorithms.Describe(self.image)).get('bands')).get(0)).get('dimensions')) # print(image_shape.getInfo()) # print(image_crs.getInfo()) # print(image_geo.getInfo()) | 2.24324 | 2 |
app_blue_points/webse/statistics/routes.py | mariobp-NHH/Sustainable_Energy_Web1_v2 | 0 | 6618531 | <filename>app_blue_points/webse/statistics/routes.py<gh_stars>0
import os
import secrets
import json
from datetime import timedelta, datetime
from PIL import Image
from flask import render_template, url_for, flash, redirect, request, abort, jsonify, Blueprint
from webse import app, db, bcrypt
from webse.models import User, Moduls, Announcement, Chat, Emissions
from flask_login import login_user, current_user, logout_user, login_required
statistics = Blueprint('statistics', __name__)
##################################
#### Block 11. Statistics ####
##################################
@statistics.route('/statistics', methods=['GET', 'POST'])
@login_required
def statistics_main():
entries = Moduls.query.filter_by(author=current_user).filter(Moduls.title_mo.is_('---')).order_by(Moduls.date_exercise.desc()).all()
return render_template('statistics/statistics.html',entries=entries, correct=0, incorrect=0)
@statistics.route('/statistics/se_ch1', methods=['GET', 'POST'])
@login_required
def statistics_se_ch1():
entries = Moduls.query.filter_by(author=current_user). \
filter(Moduls.title_mo.is_('Sustainable Energy')). \
filter(Moduls.title_ch.is_('Chapter 1. Frame')). \
filter(Moduls.question_option.is_(50)). \
order_by(Moduls.question_num.asc()).all()
incorrect = Moduls.query.filter_by(author=current_user). \
filter(Moduls.question_result.is_(0)). \
filter(Moduls.title_mo.is_('Sustainable Energy')). \
filter(Moduls.title_ch.is_('Chapter 1. Frame')). \
filter(Moduls.question_option.is_(50)). \
order_by(Moduls.question_num.asc()).count()
correct = Moduls.query.filter_by(author=current_user). \
filter(Moduls.question_result.is_(1)). \
filter(Moduls.title_mo.is_('Sustainable Energy')). \
filter(Moduls.title_ch.is_('Chapter 1. Frame')). \
filter(Moduls.question_option.is_(50)). \
order_by(Moduls.question_num.asc()).count()
flash('Your answer has been submitted!', 'success')
return render_template('statistics/statistics_se_ch1.html', entries=entries, correct=correct, incorrect=incorrect)
@statistics.route('/statistics/se_ch2', methods=['GET', 'POST'])
@login_required
def statistics_se_ch2():
entries = Moduls.query.filter_by(author=current_user). \
filter(Moduls.title_mo.is_('Sustainable Energy')). \
filter(Moduls.title_ch.is_('Ch2. Ecological Footprint and Biocapacity')). \
filter(Moduls.question_option.is_(50)). \
order_by(Moduls.question_num.asc()).all()
incorrect = Moduls.query.filter_by(author=current_user). \
filter(Moduls.question_result.is_(0)). \
filter(Moduls.title_mo.is_('Sustainable Energy')). \
filter(Moduls.title_ch.is_('Ch2. Ecological Footprint and Biocapacity')). \
filter(Moduls.question_option.is_(50)). \
order_by(Moduls.question_num.asc()).count()
correct = Moduls.query.filter_by(author=current_user). \
filter(Moduls.question_result.is_(1)). \
filter(Moduls.title_mo.is_('Sustainable Energy')). \
filter(Moduls.title_ch.is_('Ch2. Ecological Footprint and Biocapacity')). \
filter(Moduls.question_option.is_(50)). \
order_by(Moduls.question_num.asc()).count()
flash('Your answer has been submitted!', 'success')
return render_template('statistics/statistics_se_ch2.html', entries=entries, correct=correct, incorrect=incorrect)
@statistics.route('/statistics/se_ch3', methods=['GET', 'POST'])
@login_required
def statistics_se_ch3():
entries = Moduls.query.filter_by(author=current_user). \
filter(Moduls.title_mo.is_('Sustainable Energy')). \
filter(Moduls.title_ch.is_('Ch3. Human Development for the Anthropocene')). \
filter(Moduls.question_option.is_(50)). \
order_by(Moduls.question_num.asc()).all()
incorrect = Moduls.query.filter_by(author=current_user). \
filter(Moduls.question_result.is_(0)). \
filter(Moduls.title_mo.is_('Sustainable Energy')). \
filter(Moduls.title_ch.is_('Ch3. Human Development for the Anthropocene')). \
filter(Moduls.question_option.is_(50)). \
order_by(Moduls.question_num.asc()).count()
correct = Moduls.query.filter_by(author=current_user). \
filter(Moduls.question_result.is_(1)). \
filter(Moduls.title_mo.is_('Sustainable Energy')). \
filter(Moduls.title_ch.is_('Ch3. Human Development for the Anthropocene')). \
filter(Moduls.question_option.is_(50)). \
order_by(Moduls.question_num.asc()).count()
flash('Your answer has been submitted!', 'success')
return render_template('statistics/statistics_se_ch3.html', entries=entries, correct=correct, incorrect=incorrect) | <filename>app_blue_points/webse/statistics/routes.py<gh_stars>0
import os
import secrets
import json
from datetime import timedelta, datetime
from PIL import Image
from flask import render_template, url_for, flash, redirect, request, abort, jsonify, Blueprint
from webse import app, db, bcrypt
from webse.models import User, Moduls, Announcement, Chat, Emissions
from flask_login import login_user, current_user, logout_user, login_required
statistics = Blueprint('statistics', __name__)
##################################
#### Block 11. Statistics ####
##################################
@statistics.route('/statistics', methods=['GET', 'POST'])
@login_required
def statistics_main():
entries = Moduls.query.filter_by(author=current_user).filter(Moduls.title_mo.is_('---')).order_by(Moduls.date_exercise.desc()).all()
return render_template('statistics/statistics.html',entries=entries, correct=0, incorrect=0)
@statistics.route('/statistics/se_ch1', methods=['GET', 'POST'])
@login_required
def statistics_se_ch1():
entries = Moduls.query.filter_by(author=current_user). \
filter(Moduls.title_mo.is_('Sustainable Energy')). \
filter(Moduls.title_ch.is_('Chapter 1. Frame')). \
filter(Moduls.question_option.is_(50)). \
order_by(Moduls.question_num.asc()).all()
incorrect = Moduls.query.filter_by(author=current_user). \
filter(Moduls.question_result.is_(0)). \
filter(Moduls.title_mo.is_('Sustainable Energy')). \
filter(Moduls.title_ch.is_('Chapter 1. Frame')). \
filter(Moduls.question_option.is_(50)). \
order_by(Moduls.question_num.asc()).count()
correct = Moduls.query.filter_by(author=current_user). \
filter(Moduls.question_result.is_(1)). \
filter(Moduls.title_mo.is_('Sustainable Energy')). \
filter(Moduls.title_ch.is_('Chapter 1. Frame')). \
filter(Moduls.question_option.is_(50)). \
order_by(Moduls.question_num.asc()).count()
flash('Your answer has been submitted!', 'success')
return render_template('statistics/statistics_se_ch1.html', entries=entries, correct=correct, incorrect=incorrect)
@statistics.route('/statistics/se_ch2', methods=['GET', 'POST'])
@login_required
def statistics_se_ch2():
entries = Moduls.query.filter_by(author=current_user). \
filter(Moduls.title_mo.is_('Sustainable Energy')). \
filter(Moduls.title_ch.is_('Ch2. Ecological Footprint and Biocapacity')). \
filter(Moduls.question_option.is_(50)). \
order_by(Moduls.question_num.asc()).all()
incorrect = Moduls.query.filter_by(author=current_user). \
filter(Moduls.question_result.is_(0)). \
filter(Moduls.title_mo.is_('Sustainable Energy')). \
filter(Moduls.title_ch.is_('Ch2. Ecological Footprint and Biocapacity')). \
filter(Moduls.question_option.is_(50)). \
order_by(Moduls.question_num.asc()).count()
correct = Moduls.query.filter_by(author=current_user). \
filter(Moduls.question_result.is_(1)). \
filter(Moduls.title_mo.is_('Sustainable Energy')). \
filter(Moduls.title_ch.is_('Ch2. Ecological Footprint and Biocapacity')). \
filter(Moduls.question_option.is_(50)). \
order_by(Moduls.question_num.asc()).count()
flash('Your answer has been submitted!', 'success')
return render_template('statistics/statistics_se_ch2.html', entries=entries, correct=correct, incorrect=incorrect)
@statistics.route('/statistics/se_ch3', methods=['GET', 'POST'])
@login_required
def statistics_se_ch3():
entries = Moduls.query.filter_by(author=current_user). \
filter(Moduls.title_mo.is_('Sustainable Energy')). \
filter(Moduls.title_ch.is_('Ch3. Human Development for the Anthropocene')). \
filter(Moduls.question_option.is_(50)). \
order_by(Moduls.question_num.asc()).all()
incorrect = Moduls.query.filter_by(author=current_user). \
filter(Moduls.question_result.is_(0)). \
filter(Moduls.title_mo.is_('Sustainable Energy')). \
filter(Moduls.title_ch.is_('Ch3. Human Development for the Anthropocene')). \
filter(Moduls.question_option.is_(50)). \
order_by(Moduls.question_num.asc()).count()
correct = Moduls.query.filter_by(author=current_user). \
filter(Moduls.question_result.is_(1)). \
filter(Moduls.title_mo.is_('Sustainable Energy')). \
filter(Moduls.title_ch.is_('Ch3. Human Development for the Anthropocene')). \
filter(Moduls.question_option.is_(50)). \
order_by(Moduls.question_num.asc()).count()
flash('Your answer has been submitted!', 'success')
return render_template('statistics/statistics_se_ch3.html', entries=entries, correct=correct, incorrect=incorrect) | de | 0.849904 | ################################## #### Block 11. Statistics #### ################################## | 1.999894 | 2 |
utils/start_server.py | TheSavageTeddy/RoxBot | 0 | 6618532 | <gh_stars>0
import sys
import subprocess
import requests
def start_server():
minecraft_server = subprocess.Popen(["java", "-jar", "spigot-1.8.8-R0.1-SNAPSHOT-latest.jar"], cwd="/home/ronan/Server/BedWarsOnly")
ngrok = subprocess.Popen(["./ngrok", "tcp", "-region", "au", "25565"], cwd="/home/ronan/")
def get_ip():
#curl --silent http://127.0.0.1:4040/api/tunnels | jq '.tunnels[0].public_url'
resp = requests.get(url="http://127.0.0.1:4040/api/tunnels")
dictResp = resp.json()
return dictResp["tunnels"][0]["public_url"].replace("tcp://", "")
if __name__ == "__main__":
get_ip() | import sys
import subprocess
import requests
def start_server():
minecraft_server = subprocess.Popen(["java", "-jar", "spigot-1.8.8-R0.1-SNAPSHOT-latest.jar"], cwd="/home/ronan/Server/BedWarsOnly")
ngrok = subprocess.Popen(["./ngrok", "tcp", "-region", "au", "25565"], cwd="/home/ronan/")
def get_ip():
#curl --silent http://127.0.0.1:4040/api/tunnels | jq '.tunnels[0].public_url'
resp = requests.get(url="http://127.0.0.1:4040/api/tunnels")
dictResp = resp.json()
return dictResp["tunnels"][0]["public_url"].replace("tcp://", "")
if __name__ == "__main__":
get_ip() | zh | 0.208557 | #curl --silent http://127.0.0.1:4040/api/tunnels | jq '.tunnels[0].public_url' | 2.69519 | 3 |
applications/physics/cosmology/ExaGAN/DistConvGAN.py | ekmixon/lbann | 0 | 6618533 | <filename>applications/physics/cosmology/ExaGAN/DistConvGAN.py<gh_stars>0
import lbann
import lbann.modules.base
import lbann.models.resnet
def list2str(l):
return ' '.join([str(i) for i in l])
class ConvBNRelu(lbann.modules.Module):
"""Convolution -> Batch normalization -> ReLU
Adapted from ResNets. Assumes image data in NCDHW format.
"""
def __init__(self, out_channels, kernel_size, stride, padding,
use_bn, bn_zero_init, bn_statistics_group_size,
activation, name,
conv_weights):
"""Initialize ConvBNRelu module.
Args:
out_channels (int): Number of output channels, i.e. number
of convolution filters.
kernel_size (int): Size of convolution kernel.
stride (int): Convolution stride.
padding (int): Convolution padding.
use_bn (bool): Whether or not batch normalization layers are used.
bn_zero_init (bool): Zero-initialize batch normalization
scale.
bn_statistics_group_size (int): Aggregation size for batch
normalization statistics.
activation (lbann.Layer): The activation function.
name (str): Module name.
conv_weights (lbann.Weights): Pre-defined weights.
"""
super().__init__()
self.name = name
self.instance = 0
self.bn_statistics_group_size = bn_statistics_group_size
self.activation = activation
self.use_bn = use_bn
self.conv_weights = conv_weights
# Initialize convolution
self.conv = lbann.modules.Convolution3dModule(
out_channels, kernel_size,
stride=stride, padding=padding,
bias=False, weights=self.conv_weights,
name=self.name + '_conv')
# Initialize batch normalization
if self.use_bn:
bn_scale_init = 0.0 if bn_zero_init else 1.0
bn_scale = lbann.Weights(
initializer=lbann.ConstantInitializer(value=bn_scale_init),
name=self.name + '_bn_scale')
bn_bias = lbann.Weights(
initializer=lbann.ConstantInitializer(value=0.0),
name=self.name + '_bn_bias')
self.bn_weights = [bn_scale, bn_bias]
def forward(self, x):
self.instance += 1
layer = self.conv(x)
if self.use_bn:
layer = lbann.BatchNormalization(
layer, weights=self.bn_weights,
statistics_group_size=self.bn_statistics_group_size,
decay=0.999,
name='{0}_bn_instance{1}'.format(
self.name, self.instance))
if self.activation:
layer = self.activation(
layer,
name='{0}_activation_instance{1}'.format(
self.name, self.instance))
return layer
class Deconvolution3dModule(lbann.modules.ConvolutionModule):
"""Basic block for 3D deconvolutional neural networks.
Applies a deconvolution and a nonlinear activation function.
This is a wrapper class for ConvolutionModule.
"""
def __init__(self, *args, **kwargs):
super().__init__(3, transpose=True, *args, **kwargs)
class Exa3DGAN(lbann.modules.Module):
global_count = 0 # Static counter, used for default names
def __init__(self, input_width, input_channel, name=None):
self.instance = 0
self.name = (name if name
else 'Exa3DGAN{0}'.format(Exa3DGAN.global_count))
convbnrelu = ConvBNRelu
fc = lbann.modules.FullyConnectedModule
conv = lbann.modules.Convolution3dModule
bn_stats_grp_sz = -1 #0 global, 1 local
self.input_width = input_width
self.input_channel = input_channel
assert self.input_width in [128, 256, 512]
w = [input_width]*3
w.insert(0,input_channel)
self.input_dims = w
print("INPUT W C DIM ", self.input_width, " ", self.input_channel, " ", self.input_dims , " ", list2str(self.input_dims))
#last_conv_dim = [512,8,8,8]
#Use Glorot for conv?
#initializer=lbann.GlorotUniformInitializer())]
self.inits = {'dense': lbann.NormalInitializer(mean=0,standard_deviation=0.02),
'conv': lbann.NormalInitializer(mean=0,standard_deviation=0.02), #should be truncated Normal
'convT':lbann.NormalInitializer(mean=0,standard_deviation=0.02)}
#Discriminator
d_channels = [64,128,256,512]
self.d1_conv = [convbnrelu(d_channels[i], 2, 2, 0, False, bn_stats_grp_sz, False,
name=self.name+'_disc1_conv'+str(i),
activation=lbann.LeakyRelu,
conv_weights=[lbann.Weights(initializer=self.inits['conv'])])
for i in range(len(d_channels))]
self.d1_fc = fc(1,name=self.name+'_disc1_fc',
weights=[lbann.Weights(initializer=self.inits['dense'])])
#stacked_discriminator, this will be frozen, no optimizer,
#layer has to be named for callback
self.d2_conv = [convbnrelu(d_channels[i], 2, 2, 0, False, bn_stats_grp_sz, False,
name=self.name+'_disc2_conv'+str(i),
activation=lbann.LeakyRelu,
conv_weights=[lbann.Weights(initializer=self.inits['conv'])])
for i in range(len(d_channels))]
self.d2_fc = fc(1,name=self.name+'_disc2_fc',
weights=[lbann.Weights(initializer=self.inits['dense'])])
#Generator
#3D=512*8*8*8, 2D== 512*4*4
self.g_fc1 = fc(512*8*8*8,name=self.name+'_gen_fc1',
weights=[lbann.Weights(initializer=self.inits['dense'])])
g_channels = [256,128,64]
self.g_convT = [conv(g_channels[i], 2, stride=2, padding=0, transpose=True,
weights=[lbann.Weights(initializer=self.inits['convT'])])
for i in range(len(g_channels))]
self.g_convT3 = conv(input_channel, 2, stride=2, padding=0, activation=lbann.Tanh,name='gen_img',transpose=True,
weights=[lbann.Weights(initializer=self.inits['convT'])])
def forward(self, img, z):
#description
d1_real = self.forward_discriminator1(img) #instance1
gen_img = self.forward_generator(z)
d1_fake = self.forward_discriminator1(lbann.StopGradient(gen_img)) #instance2
d_adv = self.forward_discriminator2(gen_img) #instance 3 //need to freeze
#d1s share weights, d1_w is copied to d_adv (through replace weight callback) and freeze
return d1_real, d1_fake, d_adv,gen_img
def forward_discriminator1(self,y):
y = lbann.Reshape(y, dims=list2str(self.input_dims))
x = lbann.LeakyRelu(self.d1_conv[0](y), negative_slope=0.2)
x = lbann.LeakyRelu(self.d1_conv[1](x), negative_slope=0.2)
x = lbann.LeakyRelu(self.d1_conv[2](x), negative_slope=0.2)
x = lbann.LeakyRelu(self.d1_conv[3](x), negative_slope=0.2)
#@todo, get rid of reshape, infer from conv shape
#return self.d1_fc(lbann.Reshape(x,dims='32768',device='CPU'))
return self.d1_fc(lbann.Reshape(x,dims='262144'))
def forward_discriminator2(self,y):
y = lbann.Reshape(y, dims=list2str(self.input_dims))
x = lbann.LeakyRelu(self.d2_conv[0](y), negative_slope=0.2)
x = lbann.LeakyRelu(self.d2_conv[1](x), negative_slope=0.2)
x = lbann.LeakyRelu(self.d2_conv[2](x), negative_slope=0.2)
x = lbann.LeakyRelu(self.d2_conv[3](x), negative_slope=0.2)
#return self.d2_fc(lbann.Reshape(x,dims='32768',name='d2_out_reshape', device='CPU'))
#@todo, get rid of reshape, infer from conv shape
return self.d2_fc(lbann.Reshape(x,dims='262144',name='d2_out_reshape'))
def forward_generator(self,z):
#x = lbann.Relu(lbann.BatchNormalization(self.g_fc1(z),decay=0.9,scale_init=1.0,epsilon=1e-5))
x = lbann.Relu(self.g_fc1(z))
#x = lbann.Reshape(x, dims='512 8 8') #channel first
x = lbann.Reshape(x, dims='512 8 8 8',name='gen_zin_reshape') #new
#x = lbann.Relu(lbann.BatchNormalization(self.g_convT[0](x),decay=0.9,scale_init=1.0,epsilon=1e-5))
#x = lbann.Relu(lbann.BatchNormalization(self.g_convT[1](x),decay=0.9,scale_init=1.0,epsilon=1e-5))
#x = lbann.Relu(lbann.BatchNormalization(self.g_convT[2](x),decay=0.9,scale_init=1.0,epsilon=1e-5))
x = lbann.Relu(self.g_convT[0](x))
x = lbann.Relu(self.g_convT[1](x))
x = lbann.Relu(self.g_convT[2](x))
return self.g_convT3(x)
| <filename>applications/physics/cosmology/ExaGAN/DistConvGAN.py<gh_stars>0
import lbann
import lbann.modules.base
import lbann.models.resnet
def list2str(l):
return ' '.join([str(i) for i in l])
class ConvBNRelu(lbann.modules.Module):
"""Convolution -> Batch normalization -> ReLU
Adapted from ResNets. Assumes image data in NCDHW format.
"""
def __init__(self, out_channels, kernel_size, stride, padding,
use_bn, bn_zero_init, bn_statistics_group_size,
activation, name,
conv_weights):
"""Initialize ConvBNRelu module.
Args:
out_channels (int): Number of output channels, i.e. number
of convolution filters.
kernel_size (int): Size of convolution kernel.
stride (int): Convolution stride.
padding (int): Convolution padding.
use_bn (bool): Whether or not batch normalization layers are used.
bn_zero_init (bool): Zero-initialize batch normalization
scale.
bn_statistics_group_size (int): Aggregation size for batch
normalization statistics.
activation (lbann.Layer): The activation function.
name (str): Module name.
conv_weights (lbann.Weights): Pre-defined weights.
"""
super().__init__()
self.name = name
self.instance = 0
self.bn_statistics_group_size = bn_statistics_group_size
self.activation = activation
self.use_bn = use_bn
self.conv_weights = conv_weights
# Initialize convolution
self.conv = lbann.modules.Convolution3dModule(
out_channels, kernel_size,
stride=stride, padding=padding,
bias=False, weights=self.conv_weights,
name=self.name + '_conv')
# Initialize batch normalization
if self.use_bn:
bn_scale_init = 0.0 if bn_zero_init else 1.0
bn_scale = lbann.Weights(
initializer=lbann.ConstantInitializer(value=bn_scale_init),
name=self.name + '_bn_scale')
bn_bias = lbann.Weights(
initializer=lbann.ConstantInitializer(value=0.0),
name=self.name + '_bn_bias')
self.bn_weights = [bn_scale, bn_bias]
def forward(self, x):
self.instance += 1
layer = self.conv(x)
if self.use_bn:
layer = lbann.BatchNormalization(
layer, weights=self.bn_weights,
statistics_group_size=self.bn_statistics_group_size,
decay=0.999,
name='{0}_bn_instance{1}'.format(
self.name, self.instance))
if self.activation:
layer = self.activation(
layer,
name='{0}_activation_instance{1}'.format(
self.name, self.instance))
return layer
class Deconvolution3dModule(lbann.modules.ConvolutionModule):
"""Basic block for 3D deconvolutional neural networks.
Applies a deconvolution and a nonlinear activation function.
This is a wrapper class for ConvolutionModule.
"""
def __init__(self, *args, **kwargs):
super().__init__(3, transpose=True, *args, **kwargs)
class Exa3DGAN(lbann.modules.Module):
global_count = 0 # Static counter, used for default names
def __init__(self, input_width, input_channel, name=None):
self.instance = 0
self.name = (name if name
else 'Exa3DGAN{0}'.format(Exa3DGAN.global_count))
convbnrelu = ConvBNRelu
fc = lbann.modules.FullyConnectedModule
conv = lbann.modules.Convolution3dModule
bn_stats_grp_sz = -1 #0 global, 1 local
self.input_width = input_width
self.input_channel = input_channel
assert self.input_width in [128, 256, 512]
w = [input_width]*3
w.insert(0,input_channel)
self.input_dims = w
print("INPUT W C DIM ", self.input_width, " ", self.input_channel, " ", self.input_dims , " ", list2str(self.input_dims))
#last_conv_dim = [512,8,8,8]
#Use Glorot for conv?
#initializer=lbann.GlorotUniformInitializer())]
self.inits = {'dense': lbann.NormalInitializer(mean=0,standard_deviation=0.02),
'conv': lbann.NormalInitializer(mean=0,standard_deviation=0.02), #should be truncated Normal
'convT':lbann.NormalInitializer(mean=0,standard_deviation=0.02)}
#Discriminator
d_channels = [64,128,256,512]
self.d1_conv = [convbnrelu(d_channels[i], 2, 2, 0, False, bn_stats_grp_sz, False,
name=self.name+'_disc1_conv'+str(i),
activation=lbann.LeakyRelu,
conv_weights=[lbann.Weights(initializer=self.inits['conv'])])
for i in range(len(d_channels))]
self.d1_fc = fc(1,name=self.name+'_disc1_fc',
weights=[lbann.Weights(initializer=self.inits['dense'])])
#stacked_discriminator, this will be frozen, no optimizer,
#layer has to be named for callback
self.d2_conv = [convbnrelu(d_channels[i], 2, 2, 0, False, bn_stats_grp_sz, False,
name=self.name+'_disc2_conv'+str(i),
activation=lbann.LeakyRelu,
conv_weights=[lbann.Weights(initializer=self.inits['conv'])])
for i in range(len(d_channels))]
self.d2_fc = fc(1,name=self.name+'_disc2_fc',
weights=[lbann.Weights(initializer=self.inits['dense'])])
#Generator
#3D=512*8*8*8, 2D== 512*4*4
self.g_fc1 = fc(512*8*8*8,name=self.name+'_gen_fc1',
weights=[lbann.Weights(initializer=self.inits['dense'])])
g_channels = [256,128,64]
self.g_convT = [conv(g_channels[i], 2, stride=2, padding=0, transpose=True,
weights=[lbann.Weights(initializer=self.inits['convT'])])
for i in range(len(g_channels))]
self.g_convT3 = conv(input_channel, 2, stride=2, padding=0, activation=lbann.Tanh,name='gen_img',transpose=True,
weights=[lbann.Weights(initializer=self.inits['convT'])])
def forward(self, img, z):
#description
d1_real = self.forward_discriminator1(img) #instance1
gen_img = self.forward_generator(z)
d1_fake = self.forward_discriminator1(lbann.StopGradient(gen_img)) #instance2
d_adv = self.forward_discriminator2(gen_img) #instance 3 //need to freeze
#d1s share weights, d1_w is copied to d_adv (through replace weight callback) and freeze
return d1_real, d1_fake, d_adv,gen_img
def forward_discriminator1(self,y):
y = lbann.Reshape(y, dims=list2str(self.input_dims))
x = lbann.LeakyRelu(self.d1_conv[0](y), negative_slope=0.2)
x = lbann.LeakyRelu(self.d1_conv[1](x), negative_slope=0.2)
x = lbann.LeakyRelu(self.d1_conv[2](x), negative_slope=0.2)
x = lbann.LeakyRelu(self.d1_conv[3](x), negative_slope=0.2)
#@todo, get rid of reshape, infer from conv shape
#return self.d1_fc(lbann.Reshape(x,dims='32768',device='CPU'))
return self.d1_fc(lbann.Reshape(x,dims='262144'))
def forward_discriminator2(self,y):
y = lbann.Reshape(y, dims=list2str(self.input_dims))
x = lbann.LeakyRelu(self.d2_conv[0](y), negative_slope=0.2)
x = lbann.LeakyRelu(self.d2_conv[1](x), negative_slope=0.2)
x = lbann.LeakyRelu(self.d2_conv[2](x), negative_slope=0.2)
x = lbann.LeakyRelu(self.d2_conv[3](x), negative_slope=0.2)
#return self.d2_fc(lbann.Reshape(x,dims='32768',name='d2_out_reshape', device='CPU'))
#@todo, get rid of reshape, infer from conv shape
return self.d2_fc(lbann.Reshape(x,dims='262144',name='d2_out_reshape'))
def forward_generator(self,z):
#x = lbann.Relu(lbann.BatchNormalization(self.g_fc1(z),decay=0.9,scale_init=1.0,epsilon=1e-5))
x = lbann.Relu(self.g_fc1(z))
#x = lbann.Reshape(x, dims='512 8 8') #channel first
x = lbann.Reshape(x, dims='512 8 8 8',name='gen_zin_reshape') #new
#x = lbann.Relu(lbann.BatchNormalization(self.g_convT[0](x),decay=0.9,scale_init=1.0,epsilon=1e-5))
#x = lbann.Relu(lbann.BatchNormalization(self.g_convT[1](x),decay=0.9,scale_init=1.0,epsilon=1e-5))
#x = lbann.Relu(lbann.BatchNormalization(self.g_convT[2](x),decay=0.9,scale_init=1.0,epsilon=1e-5))
x = lbann.Relu(self.g_convT[0](x))
x = lbann.Relu(self.g_convT[1](x))
x = lbann.Relu(self.g_convT[2](x))
return self.g_convT3(x)
| en | 0.521192 | Convolution -> Batch normalization -> ReLU Adapted from ResNets. Assumes image data in NCDHW format. Initialize ConvBNRelu module. Args: out_channels (int): Number of output channels, i.e. number of convolution filters. kernel_size (int): Size of convolution kernel. stride (int): Convolution stride. padding (int): Convolution padding. use_bn (bool): Whether or not batch normalization layers are used. bn_zero_init (bool): Zero-initialize batch normalization scale. bn_statistics_group_size (int): Aggregation size for batch normalization statistics. activation (lbann.Layer): The activation function. name (str): Module name. conv_weights (lbann.Weights): Pre-defined weights. # Initialize convolution # Initialize batch normalization Basic block for 3D deconvolutional neural networks. Applies a deconvolution and a nonlinear activation function. This is a wrapper class for ConvolutionModule. # Static counter, used for default names #0 global, 1 local #last_conv_dim = [512,8,8,8] #Use Glorot for conv? #initializer=lbann.GlorotUniformInitializer())] #should be truncated Normal #Discriminator #stacked_discriminator, this will be frozen, no optimizer, #layer has to be named for callback #Generator #3D=512*8*8*8, 2D== 512*4*4 #description #instance1 #instance2 #instance 3 //need to freeze #d1s share weights, d1_w is copied to d_adv (through replace weight callback) and freeze #@todo, get rid of reshape, infer from conv shape #return self.d1_fc(lbann.Reshape(x,dims='32768',device='CPU')) #return self.d2_fc(lbann.Reshape(x,dims='32768',name='d2_out_reshape', device='CPU')) #@todo, get rid of reshape, infer from conv shape #x = lbann.Relu(lbann.BatchNormalization(self.g_fc1(z),decay=0.9,scale_init=1.0,epsilon=1e-5)) #x = lbann.Reshape(x, dims='512 8 8') #channel first #new #x = lbann.Relu(lbann.BatchNormalization(self.g_convT[0](x),decay=0.9,scale_init=1.0,epsilon=1e-5)) #x = lbann.Relu(lbann.BatchNormalization(self.g_convT[1](x),decay=0.9,scale_init=1.0,epsilon=1e-5)) #x = lbann.Relu(lbann.BatchNormalization(self.g_convT[2](x),decay=0.9,scale_init=1.0,epsilon=1e-5)) | 2.529088 | 3 |
testdata/gen_alu_tests.py | racerxdl/biggateboy | 17 | 6618534 | #!/usr/bin/env python3
FlagZero = 1 << 3
FlagSub = 1 << 2
FlagHalfCarry = 1 << 1
FlagCarry = 1 << 0
# ALU Operations
OpADD = 0x00
OpADC = 0x01
OpSUB = 0x02
OpSBC = 0x03
OpAND = 0x04
OpXOR = 0x05
OpOR = 0x06
OpCP = 0x07
OpRLC = 0x10
OpRRC = 0x11
OpRL = 0x12
OpRR = 0x13
OpDAA = 0x14
OpCPL = 0x15
OpSCF = 0x16
OpCCF = 0x17
OpSLA = 0x24
OpSRA = 0x25
OpSRL = 0x26
OpSWAP = 0x27
OpBIT = 0x30
OpRES = 0x40
OpSET = 0x50
OpADD16 = 0x60
def Test(op, x=0, y=0, f=0, o=0, fresult=0):
return {"op":op,"x": x,"y": y,"f":f,"o": o,"fresult" : fresult}
ALUTests = [
# ADD
Test(OpADD, x= 1, y= 2, f= 0, o= 3, fresult=0), # [ 0] No Carry, No Half Carry
Test(OpADD, x= 15, y= 2, f= 0, o= 17, fresult=FlagHalfCarry), # [ 1] No Carry, Half Carry
Test(OpADD, x=65535, y= 2, f= 0, o= 1, fresult=FlagCarry | FlagHalfCarry), # [ 2] Carry, Half Carry
Test(OpADD, x=65535, y= 1, f= 0, o= 0, fresult=FlagZero | FlagCarry | FlagHalfCarry), # [ 3] Carry, Half Carry, Zero
# SUB
Test(OpSUB, x= 1, y= 2, f= 0, o= 65535, fresult=FlagCarry | FlagHalfCarry | FlagSub), # [ 4] Carry, Half Carry
Test(OpSUB, x= 16, y= 2, f= 0, o= 14, fresult=FlagHalfCarry| FlagSub), # [ 5] No Carry, Half Carry
Test(OpSUB, x=65535, y= 2, f= 0, o= 65533, fresult=FlagSub), # [ 6] No Carry, No Half Carry
Test(OpSUB, x= 1, y= 1, f= 0, o= 0, fresult=FlagZero | FlagSub), # [ 7] Zero
# ADC
Test(OpADC, x= 1, y= 2, f= 0, o= 3, fresult=0), # [ 8] No Carry Input, No Carry Output, No Half Carry
Test(OpADC, x= 15, y= 2, f= 0, o= 17, fresult=FlagHalfCarry), # [ 9] No Carry Input, No Carry Output, Half Carry
Test(OpADC, x=65535, y= 2, f= 0, o= 1, fresult=FlagCarry | FlagHalfCarry), # [ 10] No Carry Input, Carry Output, Half Carry
Test(OpADC, x= 1, y= 2, f= 1, o= 4, fresult=0), # [ 11] Carry Input, No Carry Output, No Half Carry
Test(OpADC, x= 13, y= 2, f= 1, o= 16, fresult=FlagHalfCarry), # [ 12] Carry Input, No Carry Output, Half Carry
Test(OpADC, x=65535, y= 2, f= 1, o= 2, fresult=FlagCarry | FlagHalfCarry), # [ 13] Carry Input, Carry Output, Half Carry
Test(OpADC, x=65535, y= 0, f= 1, o= 0, fresult=FlagZero | FlagCarry | FlagHalfCarry), # [ 14] Carry Input, Carry Output, Half Carry Zero
# SBC
Test(OpSBC, x= 1, y= 2, f= 0, o= 65535, fresult=FlagCarry | FlagHalfCarry | FlagSub), # [ 15] No Carry Input, No Carry Output, No Half Carry
Test(OpSBC, x= 16, y= 2, f= 0, o= 14, fresult=FlagHalfCarry | FlagSub), # [ 16] No Carry Input, No Carry Output, Half Carry
Test(OpSBC, x=65535, y= 2, f= 0, o= 65533, fresult=FlagSub), # [ 17] No Carry Input, Carry Output, Half Carry
Test(OpSBC, x= 1, y= 2, f= 1, o= 65534, fresult=FlagCarry | FlagHalfCarry | FlagSub), # [ 18] Carry Input, No Carry Output, No Half Carry
Test(OpSBC, x= 16, y= 2, f= 1, o= 13, fresult=FlagHalfCarry | FlagSub), # [ 19] Carry Input, No Carry Output, Half Carry
Test(OpSBC, x=65535, y= 2, f= 1, o= 65532, fresult=FlagSub), # [ 20] Carry Input
Test(OpSBC, x= 1, y= 0, f= 1, o= 0, fresult=FlagSub | FlagZero), # [ 21] Carry Input, Zero
# OR
Test(OpOR, x= 0, y= 0, f= 0, o= 0, fresult=FlagZero), # [ 22] Zero
Test(OpOR, x=65535, y= 0, f= 0, o= 255, fresult=0), # [ 23]
Test(OpOR, x= 0, y=65535, f= 0, o= 255, fresult=0), # [ 24]
Test(OpOR, x= 0, y=65280, f= 0, o= 0, fresult=FlagZero), # [ 25]
Test(OpOR, x=65280, y= 0, f= 0, o= 0, fresult=FlagZero), # [ 26]
]
for i in range(8):
ALUTests.append( Test(OpBIT + i, x= 255, y= 0, f= 0, o= 255, fresult=FlagHalfCarry))
ALUTests.append( Test(OpBIT + i, x= 0, y= 0, f= 0, o= 0, fresult=FlagZero|FlagHalfCarry))
if i % 2 == 0:
ALUTests.append(Test(OpBIT + i, x= 170, y= 0, f= 0, o= 170, fresult=FlagZero|FlagHalfCarry))
else:
ALUTests.append(Test(OpBIT + i, x= 170, y= 0, f= 0, o= 170, fresult=FlagHalfCarry))
for i in range(8):
result = 0xFF & (~(1 << i))
ALUTests.append( Test(OpRES + i, x= 255, y= 0, f= 0, o= result, fresult=0))
for i in range(8):
result = 1 << i
ALUTests.append( Test(OpSET + i, x= 0, y= 0, f= 0, o= result, fresult=0))
def PackTest(op, x, y, f, o, fresult):
'''
OP(5), X(16), Y(16), F(4), O(16), FResult(4), Padding(3) == Total(64)
'''
# This doesnt need to be fast, so fuck it
packedString = ""
packedString += format(op , "08b" ) # Operation [ 8 bits ]
packedString += format(x , "016b") # Operator X [ 16 bits ]
packedString += format(y , "016b") # Operator Y [ 16 bits ]
packedString += format(f , "04b" ) # Input Flag [ 4 bits ]
packedString += format(o , "016b") # Result [ 16 bits ]
packedString += format(fresult, "04b" ) # Result Flag [ 4 bits ]
return packedString.encode("ascii")
f = open("alu_tests.mem", "wb")
for i in range(len(ALUTests)):
test = ALUTests[i]
f.write(PackTest(**test))
f.write(b"\r\n")
print("Number of tests: %d" % len(ALUTests)) | #!/usr/bin/env python3
FlagZero = 1 << 3
FlagSub = 1 << 2
FlagHalfCarry = 1 << 1
FlagCarry = 1 << 0
# ALU Operations
OpADD = 0x00
OpADC = 0x01
OpSUB = 0x02
OpSBC = 0x03
OpAND = 0x04
OpXOR = 0x05
OpOR = 0x06
OpCP = 0x07
OpRLC = 0x10
OpRRC = 0x11
OpRL = 0x12
OpRR = 0x13
OpDAA = 0x14
OpCPL = 0x15
OpSCF = 0x16
OpCCF = 0x17
OpSLA = 0x24
OpSRA = 0x25
OpSRL = 0x26
OpSWAP = 0x27
OpBIT = 0x30
OpRES = 0x40
OpSET = 0x50
OpADD16 = 0x60
def Test(op, x=0, y=0, f=0, o=0, fresult=0):
return {"op":op,"x": x,"y": y,"f":f,"o": o,"fresult" : fresult}
ALUTests = [
# ADD
Test(OpADD, x= 1, y= 2, f= 0, o= 3, fresult=0), # [ 0] No Carry, No Half Carry
Test(OpADD, x= 15, y= 2, f= 0, o= 17, fresult=FlagHalfCarry), # [ 1] No Carry, Half Carry
Test(OpADD, x=65535, y= 2, f= 0, o= 1, fresult=FlagCarry | FlagHalfCarry), # [ 2] Carry, Half Carry
Test(OpADD, x=65535, y= 1, f= 0, o= 0, fresult=FlagZero | FlagCarry | FlagHalfCarry), # [ 3] Carry, Half Carry, Zero
# SUB
Test(OpSUB, x= 1, y= 2, f= 0, o= 65535, fresult=FlagCarry | FlagHalfCarry | FlagSub), # [ 4] Carry, Half Carry
Test(OpSUB, x= 16, y= 2, f= 0, o= 14, fresult=FlagHalfCarry| FlagSub), # [ 5] No Carry, Half Carry
Test(OpSUB, x=65535, y= 2, f= 0, o= 65533, fresult=FlagSub), # [ 6] No Carry, No Half Carry
Test(OpSUB, x= 1, y= 1, f= 0, o= 0, fresult=FlagZero | FlagSub), # [ 7] Zero
# ADC
Test(OpADC, x= 1, y= 2, f= 0, o= 3, fresult=0), # [ 8] No Carry Input, No Carry Output, No Half Carry
Test(OpADC, x= 15, y= 2, f= 0, o= 17, fresult=FlagHalfCarry), # [ 9] No Carry Input, No Carry Output, Half Carry
Test(OpADC, x=65535, y= 2, f= 0, o= 1, fresult=FlagCarry | FlagHalfCarry), # [ 10] No Carry Input, Carry Output, Half Carry
Test(OpADC, x= 1, y= 2, f= 1, o= 4, fresult=0), # [ 11] Carry Input, No Carry Output, No Half Carry
Test(OpADC, x= 13, y= 2, f= 1, o= 16, fresult=FlagHalfCarry), # [ 12] Carry Input, No Carry Output, Half Carry
Test(OpADC, x=65535, y= 2, f= 1, o= 2, fresult=FlagCarry | FlagHalfCarry), # [ 13] Carry Input, Carry Output, Half Carry
Test(OpADC, x=65535, y= 0, f= 1, o= 0, fresult=FlagZero | FlagCarry | FlagHalfCarry), # [ 14] Carry Input, Carry Output, Half Carry Zero
# SBC
Test(OpSBC, x= 1, y= 2, f= 0, o= 65535, fresult=FlagCarry | FlagHalfCarry | FlagSub), # [ 15] No Carry Input, No Carry Output, No Half Carry
Test(OpSBC, x= 16, y= 2, f= 0, o= 14, fresult=FlagHalfCarry | FlagSub), # [ 16] No Carry Input, No Carry Output, Half Carry
Test(OpSBC, x=65535, y= 2, f= 0, o= 65533, fresult=FlagSub), # [ 17] No Carry Input, Carry Output, Half Carry
Test(OpSBC, x= 1, y= 2, f= 1, o= 65534, fresult=FlagCarry | FlagHalfCarry | FlagSub), # [ 18] Carry Input, No Carry Output, No Half Carry
Test(OpSBC, x= 16, y= 2, f= 1, o= 13, fresult=FlagHalfCarry | FlagSub), # [ 19] Carry Input, No Carry Output, Half Carry
Test(OpSBC, x=65535, y= 2, f= 1, o= 65532, fresult=FlagSub), # [ 20] Carry Input
Test(OpSBC, x= 1, y= 0, f= 1, o= 0, fresult=FlagSub | FlagZero), # [ 21] Carry Input, Zero
# OR
Test(OpOR, x= 0, y= 0, f= 0, o= 0, fresult=FlagZero), # [ 22] Zero
Test(OpOR, x=65535, y= 0, f= 0, o= 255, fresult=0), # [ 23]
Test(OpOR, x= 0, y=65535, f= 0, o= 255, fresult=0), # [ 24]
Test(OpOR, x= 0, y=65280, f= 0, o= 0, fresult=FlagZero), # [ 25]
Test(OpOR, x=65280, y= 0, f= 0, o= 0, fresult=FlagZero), # [ 26]
]
for i in range(8):
ALUTests.append( Test(OpBIT + i, x= 255, y= 0, f= 0, o= 255, fresult=FlagHalfCarry))
ALUTests.append( Test(OpBIT + i, x= 0, y= 0, f= 0, o= 0, fresult=FlagZero|FlagHalfCarry))
if i % 2 == 0:
ALUTests.append(Test(OpBIT + i, x= 170, y= 0, f= 0, o= 170, fresult=FlagZero|FlagHalfCarry))
else:
ALUTests.append(Test(OpBIT + i, x= 170, y= 0, f= 0, o= 170, fresult=FlagHalfCarry))
for i in range(8):
result = 0xFF & (~(1 << i))
ALUTests.append( Test(OpRES + i, x= 255, y= 0, f= 0, o= result, fresult=0))
for i in range(8):
result = 1 << i
ALUTests.append( Test(OpSET + i, x= 0, y= 0, f= 0, o= result, fresult=0))
def PackTest(op, x, y, f, o, fresult):
'''
OP(5), X(16), Y(16), F(4), O(16), FResult(4), Padding(3) == Total(64)
'''
# This doesnt need to be fast, so fuck it
packedString = ""
packedString += format(op , "08b" ) # Operation [ 8 bits ]
packedString += format(x , "016b") # Operator X [ 16 bits ]
packedString += format(y , "016b") # Operator Y [ 16 bits ]
packedString += format(f , "04b" ) # Input Flag [ 4 bits ]
packedString += format(o , "016b") # Result [ 16 bits ]
packedString += format(fresult, "04b" ) # Result Flag [ 4 bits ]
return packedString.encode("ascii")
f = open("alu_tests.mem", "wb")
for i in range(len(ALUTests)):
test = ALUTests[i]
f.write(PackTest(**test))
f.write(b"\r\n")
print("Number of tests: %d" % len(ALUTests)) | en | 0.56419 | #!/usr/bin/env python3 # ALU Operations # ADD # [ 0] No Carry, No Half Carry # [ 1] No Carry, Half Carry # [ 2] Carry, Half Carry # [ 3] Carry, Half Carry, Zero # SUB # [ 4] Carry, Half Carry # [ 5] No Carry, Half Carry # [ 6] No Carry, No Half Carry # [ 7] Zero # ADC # [ 8] No Carry Input, No Carry Output, No Half Carry # [ 9] No Carry Input, No Carry Output, Half Carry # [ 10] No Carry Input, Carry Output, Half Carry # [ 11] Carry Input, No Carry Output, No Half Carry # [ 12] Carry Input, No Carry Output, Half Carry # [ 13] Carry Input, Carry Output, Half Carry # [ 14] Carry Input, Carry Output, Half Carry Zero # SBC # [ 15] No Carry Input, No Carry Output, No Half Carry # [ 16] No Carry Input, No Carry Output, Half Carry # [ 17] No Carry Input, Carry Output, Half Carry # [ 18] Carry Input, No Carry Output, No Half Carry # [ 19] Carry Input, No Carry Output, Half Carry # [ 20] Carry Input # [ 21] Carry Input, Zero # OR # [ 22] Zero # [ 23] # [ 24] # [ 25] # [ 26] OP(5), X(16), Y(16), F(4), O(16), FResult(4), Padding(3) == Total(64) # This doesnt need to be fast, so fuck it # Operation [ 8 bits ] # Operator X [ 16 bits ] # Operator Y [ 16 bits ] # Input Flag [ 4 bits ] # Result [ 16 bits ] # Result Flag [ 4 bits ] | 2.529042 | 3 |
odoo-13.0/doc/_extensions/autojsdoc/parser/jsdoc.py | VaibhavBhujade/Blockchain-ERP-interoperability | 0 | 6618535 | # -*- coding: utf-8 -*-
import re
import collections
import pyjsdoc
def strip_stars(doc_comment):
"""
Version of jsdoc.strip_stars which always removes 1 space after * if
one is available.
"""
return re.sub('\n\s*?\*[\t ]?', '\n', doc_comment[3:-2]).strip()
class ParamDoc(pyjsdoc.ParamDoc):
"""
Replace ParamDoc because FunctionDoc doesn't properly handle optional
params or default values (TODO: or compounds) if guessed_params is used
=> augment paramdoc with "required" and "default" items to clean up name
"""
def __init__(self, text):
super(ParamDoc, self).__init__(text)
# param name and doc can be separated by - or :, strip it
self.doc = self.doc.strip().lstrip('-:').lstrip()
self.optional = False
self.default = None
# there may not be a space between the param name and the :, in which
# case the : gets attached to the name, strip *again*
# TODO: formal @param/@property parser to handle this crap properly once and for all
self.name = self.name.strip().rstrip(':')
if self.name.startswith('['):
self.name = self.name.strip('[]')
self.optional = True
if '=' in self.name:
self.name, self.default = self.name.rsplit('=', 1)
def to_dict(self):
d = super(ParamDoc, self).to_dict()
d['optional'] = self.optional
d['default'] = self.default
return d
pyjsdoc.ParamDoc = ParamDoc
class CommentDoc(pyjsdoc.CommentDoc):
namekey = object()
is_constructor = False
@property
def name(self):
return self[self.namekey] or self['name'] or self['guessed_name']
def set_name(self, name):
# not great...
if name != '<exports>':
self.parsed['guessed_name'] = name
@property
def is_private(self):
return 'private' in self.parsed
def to_dict(self):
d = super(CommentDoc, self).to_dict()
d['name'] = self.name
return d
# don't resolve already resolved docs (e.g. a literal dict being
# include-ed in two different classes because I don't even care anymore
def become(self, modules):
return self
class PropertyDoc(CommentDoc):
@classmethod
def from_param(cls, s, sourcemodule=None):
parsed = ParamDoc(s).to_dict()
parsed['sourcemodule'] = sourcemodule
return cls(parsed)
@property
def type(self):
return self['type'].strip('{}')
def to_dict(self):
d = super(PropertyDoc, self).to_dict()
d['type'] = self.type
d['is_private'] = self.is_private
return d
class InstanceDoc(CommentDoc):
@property
def cls(self):
return self['cls']
def to_dict(self):
return dict(super(InstanceDoc, self).to_dict(), cls=self.cls)
class LiteralDoc(CommentDoc):
@property
def type(self):
if self['type']:
return self['type']
valtype = type(self['value'])
if valtype is bool:
return 'Boolean'
elif valtype is float:
return 'Number'
elif valtype is type(u''):
return 'String'
return ''
@property
def value(self):
return self['value']
def to_dict(self):
d = super(LiteralDoc, self).to_dict()
d['type'] = self.type
d['value'] = self.value
return d
class FunctionDoc(CommentDoc):
type = 'Function'
namekey = 'function'
@property
def is_constructor(self):
return self.name == 'init'
@property
def params(self):
tag_texts = self.get_as_list('param')
# turns out guessed_params is *almost* (?) always set to a list,
# if empty list of guessed params fall back to @params
if not self['guessed_params']:
# only get "primary" params (no "." in name)
return [
p for p in map(ParamDoc, tag_texts)
if '.' not in p.name
]
else:
param_dict = {}
for text in tag_texts:
param = ParamDoc(text)
param_dict[param.name] = param
return [param_dict.get(name) or ParamDoc('{} ' + name)
for name in self.get('guessed_params')]
@property
def return_val(self):
ret = self.get('return') or self.get('returns')
type = self.get('type')
if '{' in ret and '}' in ret:
if not '} ' in ret:
# Ensure that name is empty
ret = re.sub(r'\}\s*', '} ', ret)
return ParamDoc(ret)
if ret and type:
return ParamDoc('{%s} %s' % (type, ret))
return ParamDoc(ret)
def to_dict(self):
d = super(FunctionDoc, self).to_dict()
d['name'] = self.name
d['params'] = [param.to_dict() for param in self.params]
d['return_val']= self.return_val.to_dict()
return d
class NSDoc(CommentDoc):
namekey = 'namespace'
def __init__(self, parsed_comment):
super(NSDoc, self).__init__(parsed_comment)
self.members = collections.OrderedDict()
def add_member(self, name, member):
"""
:type name: str
:type member: CommentDoc
"""
member.set_name(name)
self.members[name] = member
@property
def properties(self):
if self.get('property'):
return [
(p.name, p)
for p in (
PropertyDoc.from_param(p, self['sourcemodule'])
for p in self.get_as_list('property')
)
]
return list(self.members.items()) or self['_members'] or []
def has_property(self, name):
return self.get_property(name) is not None
def get_property(self, name):
return next((p for n, p in self.properties if n == name), None)
def to_dict(self):
d = super(NSDoc, self).to_dict()
d['properties'] = [(n, p.to_dict()) for n, p in self.properties]
return d
class MixinDoc(NSDoc):
namekey = 'mixin'
class ModuleDoc(NSDoc):
namekey = 'module'
def __init__(self, parsed_comment):
super(ModuleDoc, self).__init__(parsed_comment)
#: callbacks to run with the modules mapping once every module is resolved
self._post_process = []
def post_process(self, modules):
for callback in self._post_process:
callback(modules)
@property
def module(self):
return self # lol
@property
def dependencies(self):
"""
Returns the immediate dependencies of a module (only those explicitly
declared/used).
"""
return self.get('dependency', None) or set()
@property
def exports(self):
"""
Returns the actual item exported from the AMD module, can be a
namespace, a class, a function, an instance, ...
"""
return self.get_property('<exports>')
def to_dict(self):
vars = super(ModuleDoc, self).to_dict()
vars['dependencies'] = self.dependencies
vars['exports'] = self.exports
return vars
def __str__(self):
s = super().__str__()
if self['sourcefile']:
s += " in file " + self['sourcefile']
return s
class ClassDoc(NSDoc):
namekey = 'class'
@property
def constructor(self):
return self.get_property('init')
@property
def superclass(self):
return self['extends'] or self['base']
def get_property(self, method_name):
if method_name == 'extend':
return FunctionDoc({
'doc': 'Create subclass for %s' % self.name,
'guessed_function': 'extend',
})
# FIXME: should ideally be a proxy namespace
if method_name == 'prototype':
return self
return super(ClassDoc, self).get_property(method_name)\
or (self.superclass and self.superclass.get_property(method_name))
@property
def mixins(self):
return self.get_as_list('mixes')
def to_dict(self):
d = super(ClassDoc, self).to_dict()
d['mixins'] = self.mixins
return d
DEFAULT = object()
class UnknownNS(NSDoc):
params = () # TODO: log warning when (somehow) trying to access / document an unknown object as ctor?
def get_property(self, name):
return super(UnknownNS, self).get_property(name) or \
UnknownNS({'name': '{}.{}'.format(self.name, name)})
def __getitem__(self, item):
if self._probably_not_property(item):
return super().__getitem__(item)
return self.get_property(item)
def _probably_not_property(self, item):
return (
not isinstance(item, str)
or item in (self.namekey, 'name', 'params')
or item.startswith(('_', 'guessed_'))
or item in self.parsed
)
class Unknown(CommentDoc):
@classmethod
def from_(cls, source):
def builder(parsed):
inst = cls(parsed)
inst.parsed['source'] = source
return inst
return builder
@property
def name(self):
return self['name'] + ' ' + self['source']
@property
def type(self):
return "Unknown"
def get_property(self, p):
return Unknown(dict(self.parsed, source=self.name, name=p + '<'))
def parse_comments(comments, doctype=None):
# find last comment which starts with a *
docstring = next((
c['value']
for c in reversed(comments or [])
if c['value'].startswith(u'*')
), None) or u""
# \n prefix necessary otherwise parse_comment fails to take first
# block comment parser strips delimiters, but strip_stars fails without
# them
extract = '\n' + strip_stars('/*' + docstring + '\n*/')
parsed = pyjsdoc.parse_comment(extract, u'')
if doctype == 'FunctionExpression':
doctype = FunctionDoc
elif doctype == 'ObjectExpression' or doctype is None:
doctype = guess
if doctype is guess:
return doctype(parsed)
# in case a specific doctype is given, allow overriding it anyway
return guess(parsed, default=doctype)
def guess(parsed, default=UnknownNS):
if 'class' in parsed:
return ClassDoc(parsed)
if 'function' in parsed:
return FunctionDoc(parsed)
if 'mixin' in parsed:
return MixinDoc(parsed)
if 'namespace' in parsed:
return NSDoc(parsed)
if 'module' in parsed:
return ModuleDoc(parsed)
if 'type' in parsed:
return PropertyDoc(parsed)
return default(parsed)
| # -*- coding: utf-8 -*-
import re
import collections
import pyjsdoc
def strip_stars(doc_comment):
"""
Version of jsdoc.strip_stars which always removes 1 space after * if
one is available.
"""
return re.sub('\n\s*?\*[\t ]?', '\n', doc_comment[3:-2]).strip()
class ParamDoc(pyjsdoc.ParamDoc):
"""
Replace ParamDoc because FunctionDoc doesn't properly handle optional
params or default values (TODO: or compounds) if guessed_params is used
=> augment paramdoc with "required" and "default" items to clean up name
"""
def __init__(self, text):
super(ParamDoc, self).__init__(text)
# param name and doc can be separated by - or :, strip it
self.doc = self.doc.strip().lstrip('-:').lstrip()
self.optional = False
self.default = None
# there may not be a space between the param name and the :, in which
# case the : gets attached to the name, strip *again*
# TODO: formal @param/@property parser to handle this crap properly once and for all
self.name = self.name.strip().rstrip(':')
if self.name.startswith('['):
self.name = self.name.strip('[]')
self.optional = True
if '=' in self.name:
self.name, self.default = self.name.rsplit('=', 1)
def to_dict(self):
d = super(ParamDoc, self).to_dict()
d['optional'] = self.optional
d['default'] = self.default
return d
pyjsdoc.ParamDoc = ParamDoc
class CommentDoc(pyjsdoc.CommentDoc):
namekey = object()
is_constructor = False
@property
def name(self):
return self[self.namekey] or self['name'] or self['guessed_name']
def set_name(self, name):
# not great...
if name != '<exports>':
self.parsed['guessed_name'] = name
@property
def is_private(self):
return 'private' in self.parsed
def to_dict(self):
d = super(CommentDoc, self).to_dict()
d['name'] = self.name
return d
# don't resolve already resolved docs (e.g. a literal dict being
# include-ed in two different classes because I don't even care anymore
def become(self, modules):
return self
class PropertyDoc(CommentDoc):
@classmethod
def from_param(cls, s, sourcemodule=None):
parsed = ParamDoc(s).to_dict()
parsed['sourcemodule'] = sourcemodule
return cls(parsed)
@property
def type(self):
return self['type'].strip('{}')
def to_dict(self):
d = super(PropertyDoc, self).to_dict()
d['type'] = self.type
d['is_private'] = self.is_private
return d
class InstanceDoc(CommentDoc):
@property
def cls(self):
return self['cls']
def to_dict(self):
return dict(super(InstanceDoc, self).to_dict(), cls=self.cls)
class LiteralDoc(CommentDoc):
@property
def type(self):
if self['type']:
return self['type']
valtype = type(self['value'])
if valtype is bool:
return 'Boolean'
elif valtype is float:
return 'Number'
elif valtype is type(u''):
return 'String'
return ''
@property
def value(self):
return self['value']
def to_dict(self):
d = super(LiteralDoc, self).to_dict()
d['type'] = self.type
d['value'] = self.value
return d
class FunctionDoc(CommentDoc):
type = 'Function'
namekey = 'function'
@property
def is_constructor(self):
return self.name == 'init'
@property
def params(self):
tag_texts = self.get_as_list('param')
# turns out guessed_params is *almost* (?) always set to a list,
# if empty list of guessed params fall back to @params
if not self['guessed_params']:
# only get "primary" params (no "." in name)
return [
p for p in map(ParamDoc, tag_texts)
if '.' not in p.name
]
else:
param_dict = {}
for text in tag_texts:
param = ParamDoc(text)
param_dict[param.name] = param
return [param_dict.get(name) or ParamDoc('{} ' + name)
for name in self.get('guessed_params')]
@property
def return_val(self):
ret = self.get('return') or self.get('returns')
type = self.get('type')
if '{' in ret and '}' in ret:
if not '} ' in ret:
# Ensure that name is empty
ret = re.sub(r'\}\s*', '} ', ret)
return ParamDoc(ret)
if ret and type:
return ParamDoc('{%s} %s' % (type, ret))
return ParamDoc(ret)
def to_dict(self):
d = super(FunctionDoc, self).to_dict()
d['name'] = self.name
d['params'] = [param.to_dict() for param in self.params]
d['return_val']= self.return_val.to_dict()
return d
class NSDoc(CommentDoc):
namekey = 'namespace'
def __init__(self, parsed_comment):
super(NSDoc, self).__init__(parsed_comment)
self.members = collections.OrderedDict()
def add_member(self, name, member):
"""
:type name: str
:type member: CommentDoc
"""
member.set_name(name)
self.members[name] = member
@property
def properties(self):
if self.get('property'):
return [
(p.name, p)
for p in (
PropertyDoc.from_param(p, self['sourcemodule'])
for p in self.get_as_list('property')
)
]
return list(self.members.items()) or self['_members'] or []
def has_property(self, name):
return self.get_property(name) is not None
def get_property(self, name):
return next((p for n, p in self.properties if n == name), None)
def to_dict(self):
d = super(NSDoc, self).to_dict()
d['properties'] = [(n, p.to_dict()) for n, p in self.properties]
return d
class MixinDoc(NSDoc):
namekey = 'mixin'
class ModuleDoc(NSDoc):
namekey = 'module'
def __init__(self, parsed_comment):
super(ModuleDoc, self).__init__(parsed_comment)
#: callbacks to run with the modules mapping once every module is resolved
self._post_process = []
def post_process(self, modules):
for callback in self._post_process:
callback(modules)
@property
def module(self):
return self # lol
@property
def dependencies(self):
"""
Returns the immediate dependencies of a module (only those explicitly
declared/used).
"""
return self.get('dependency', None) or set()
@property
def exports(self):
"""
Returns the actual item exported from the AMD module, can be a
namespace, a class, a function, an instance, ...
"""
return self.get_property('<exports>')
def to_dict(self):
vars = super(ModuleDoc, self).to_dict()
vars['dependencies'] = self.dependencies
vars['exports'] = self.exports
return vars
def __str__(self):
s = super().__str__()
if self['sourcefile']:
s += " in file " + self['sourcefile']
return s
class ClassDoc(NSDoc):
namekey = 'class'
@property
def constructor(self):
return self.get_property('init')
@property
def superclass(self):
return self['extends'] or self['base']
def get_property(self, method_name):
if method_name == 'extend':
return FunctionDoc({
'doc': 'Create subclass for %s' % self.name,
'guessed_function': 'extend',
})
# FIXME: should ideally be a proxy namespace
if method_name == 'prototype':
return self
return super(ClassDoc, self).get_property(method_name)\
or (self.superclass and self.superclass.get_property(method_name))
@property
def mixins(self):
return self.get_as_list('mixes')
def to_dict(self):
d = super(ClassDoc, self).to_dict()
d['mixins'] = self.mixins
return d
DEFAULT = object()
class UnknownNS(NSDoc):
params = () # TODO: log warning when (somehow) trying to access / document an unknown object as ctor?
def get_property(self, name):
return super(UnknownNS, self).get_property(name) or \
UnknownNS({'name': '{}.{}'.format(self.name, name)})
def __getitem__(self, item):
if self._probably_not_property(item):
return super().__getitem__(item)
return self.get_property(item)
def _probably_not_property(self, item):
return (
not isinstance(item, str)
or item in (self.namekey, 'name', 'params')
or item.startswith(('_', 'guessed_'))
or item in self.parsed
)
class Unknown(CommentDoc):
@classmethod
def from_(cls, source):
def builder(parsed):
inst = cls(parsed)
inst.parsed['source'] = source
return inst
return builder
@property
def name(self):
return self['name'] + ' ' + self['source']
@property
def type(self):
return "Unknown"
def get_property(self, p):
return Unknown(dict(self.parsed, source=self.name, name=p + '<'))
def parse_comments(comments, doctype=None):
# find last comment which starts with a *
docstring = next((
c['value']
for c in reversed(comments or [])
if c['value'].startswith(u'*')
), None) or u""
# \n prefix necessary otherwise parse_comment fails to take first
# block comment parser strips delimiters, but strip_stars fails without
# them
extract = '\n' + strip_stars('/*' + docstring + '\n*/')
parsed = pyjsdoc.parse_comment(extract, u'')
if doctype == 'FunctionExpression':
doctype = FunctionDoc
elif doctype == 'ObjectExpression' or doctype is None:
doctype = guess
if doctype is guess:
return doctype(parsed)
# in case a specific doctype is given, allow overriding it anyway
return guess(parsed, default=doctype)
def guess(parsed, default=UnknownNS):
if 'class' in parsed:
return ClassDoc(parsed)
if 'function' in parsed:
return FunctionDoc(parsed)
if 'mixin' in parsed:
return MixinDoc(parsed)
if 'namespace' in parsed:
return NSDoc(parsed)
if 'module' in parsed:
return ModuleDoc(parsed)
if 'type' in parsed:
return PropertyDoc(parsed)
return default(parsed)
| en | 0.801372 | # -*- coding: utf-8 -*- Version of jsdoc.strip_stars which always removes 1 space after * if one is available. Replace ParamDoc because FunctionDoc doesn't properly handle optional params or default values (TODO: or compounds) if guessed_params is used => augment paramdoc with "required" and "default" items to clean up name # param name and doc can be separated by - or :, strip it # there may not be a space between the param name and the :, in which # case the : gets attached to the name, strip *again* # TODO: formal @param/@property parser to handle this crap properly once and for all # not great... # don't resolve already resolved docs (e.g. a literal dict being # include-ed in two different classes because I don't even care anymore # turns out guessed_params is *almost* (?) always set to a list, # if empty list of guessed params fall back to @params # only get "primary" params (no "." in name) # Ensure that name is empty :type name: str :type member: CommentDoc #: callbacks to run with the modules mapping once every module is resolved # lol Returns the immediate dependencies of a module (only those explicitly declared/used). Returns the actual item exported from the AMD module, can be a namespace, a class, a function, an instance, ... # FIXME: should ideally be a proxy namespace # TODO: log warning when (somehow) trying to access / document an unknown object as ctor? # find last comment which starts with a * # \n prefix necessary otherwise parse_comment fails to take first # block comment parser strips delimiters, but strip_stars fails without # them # in case a specific doctype is given, allow overriding it anyway | 3.02285 | 3 |
src/nebulo/gql/resolve/resolvers/asynchronous.py | olirice/nebulo | 76 | 6618536 | <reponame>olirice/nebulo<gh_stars>10-100
from __future__ import annotations
import typing
from flupy import flu
from nebulo.config import Config
from nebulo.gql.alias import FunctionPayloadType, MutationPayloadType, ObjectType, ResolveInfo, ScalarType
from nebulo.gql.parse_info import parse_resolve_info
from nebulo.gql.relay.node_interface import NodeIdStructure, to_node_id_sql
from nebulo.gql.resolve.resolvers.claims import build_claims
from nebulo.gql.resolve.transpile.mutation_builder import build_mutation
from nebulo.gql.resolve.transpile.query_builder import sql_builder, sql_finalize
from nebulo.sql.table_base import TableProtocol
from sqlalchemy import literal_column, select
async def async_resolver(_, info: ResolveInfo, **kwargs) -> typing.Any:
"""Awaitable GraphQL Entrypoint resolver
Expects:
info.context['engine'] to contain an sqlalchemy.ext.asyncio.AsyncEngine
"""
context = info.context
engine = context["engine"]
default_role = context["default_role"]
jwt_claims = context["jwt_claims"]
tree = parse_resolve_info(info)
async with engine.begin() as trans:
# Set claims for transaction
if jwt_claims or default_role:
claims_stmt = build_claims(jwt_claims, default_role)
await trans.execute(claims_stmt)
result: typing.Dict[str, typing.Any]
if isinstance(tree.return_type, FunctionPayloadType):
sql_function = tree.return_type.sql_function
function_args = [val for key, val in tree.args["input"].items() if key != "clientMutationId"]
func_call = sql_function.to_executable(function_args)
# Function returning table row
if isinstance(sql_function.return_sqla_type, TableProtocol):
# Unpack the table row to columns
return_sqla_model = sql_function.return_sqla_type
core_table = return_sqla_model.__table__
func_alias = func_call.alias("named_alias")
stmt = select([literal_column(c.name).label(c.name) for c in core_table.c]).select_from(func_alias) # type: ignore
stmt_alias = stmt.alias()
node_id_stmt = select([to_node_id_sql(return_sqla_model, stmt_alias).label("nodeId")]).select_from(stmt_alias) # type: ignore
((row,),) = await trans.execute(node_id_stmt)
node_id = NodeIdStructure.from_dict(row)
# Add nodeId to AST and query
query_tree = next(iter([x for x in tree.fields if x.name == "result"]), None)
if query_tree is not None:
query_tree.args["nodeId"] = node_id
base_query = sql_builder(query_tree)
query = sql_finalize(query_tree.alias, base_query)
((stmt_result,),) = await trans.execute(query)
else:
stmt_result = {}
else:
stmt = select([func_call.label("result")])
(stmt_result,) = await trans.execute(stmt)
maybe_mutation_id = tree.args["input"].get("clientMutationId")
mutation_id_alias = next(
iter([x.alias for x in tree.fields if x.name == "clientMutationId"]),
"clientMutationId",
)
result = {tree.alias: {**stmt_result, **{mutation_id_alias: maybe_mutation_id}}}
elif isinstance(tree.return_type, MutationPayloadType):
stmt = build_mutation(tree)
((row,),) = await trans.execute(stmt)
node_id = NodeIdStructure.from_dict(row)
maybe_mutation_id = tree.args["input"].get("clientMutationId")
mutation_id_alias = next(
iter([x.alias for x in tree.fields if x.name == "clientMutationId"]),
"clientMutationId",
)
node_id_alias = next(iter([x.alias for x in tree.fields if x.name == "nodeId"]), "nodeId")
output_row_name: str = Config.table_name_mapper(tree.return_type.sqla_model)
query_tree = next(iter([x for x in tree.fields if x.name == output_row_name]), None)
sql_result = {}
if query_tree:
# Set the nodeid of the newly created record as an arg
query_tree.args["nodeId"] = node_id
base_query = sql_builder(query_tree)
query = sql_finalize(query_tree.alias, base_query)
((sql_result,),) = await trans.execute(query)
result = {
tree.alias: {**sql_result, mutation_id_alias: maybe_mutation_id},
mutation_id_alias: maybe_mutation_id,
node_id_alias: node_id,
}
elif isinstance(tree.return_type, (ObjectType, ScalarType)):
base_query = sql_builder(tree)
query = sql_finalize(tree.name, base_query)
((query_json_result,),) = await trans.execute(query)
if isinstance(tree.return_type, ScalarType):
# If its a scalar, unwrap the top level name
result = flu(query_json_result.values()).first(None)
else:
result = query_json_result
else:
raise Exception("sql builder could not handle return type")
# Stash result on context to enable dumb resolvers to not fail
context["result"] = result
return result
| from __future__ import annotations
import typing
from flupy import flu
from nebulo.config import Config
from nebulo.gql.alias import FunctionPayloadType, MutationPayloadType, ObjectType, ResolveInfo, ScalarType
from nebulo.gql.parse_info import parse_resolve_info
from nebulo.gql.relay.node_interface import NodeIdStructure, to_node_id_sql
from nebulo.gql.resolve.resolvers.claims import build_claims
from nebulo.gql.resolve.transpile.mutation_builder import build_mutation
from nebulo.gql.resolve.transpile.query_builder import sql_builder, sql_finalize
from nebulo.sql.table_base import TableProtocol
from sqlalchemy import literal_column, select
async def async_resolver(_, info: ResolveInfo, **kwargs) -> typing.Any:
"""Awaitable GraphQL Entrypoint resolver
Expects:
info.context['engine'] to contain an sqlalchemy.ext.asyncio.AsyncEngine
"""
context = info.context
engine = context["engine"]
default_role = context["default_role"]
jwt_claims = context["jwt_claims"]
tree = parse_resolve_info(info)
async with engine.begin() as trans:
# Set claims for transaction
if jwt_claims or default_role:
claims_stmt = build_claims(jwt_claims, default_role)
await trans.execute(claims_stmt)
result: typing.Dict[str, typing.Any]
if isinstance(tree.return_type, FunctionPayloadType):
sql_function = tree.return_type.sql_function
function_args = [val for key, val in tree.args["input"].items() if key != "clientMutationId"]
func_call = sql_function.to_executable(function_args)
# Function returning table row
if isinstance(sql_function.return_sqla_type, TableProtocol):
# Unpack the table row to columns
return_sqla_model = sql_function.return_sqla_type
core_table = return_sqla_model.__table__
func_alias = func_call.alias("named_alias")
stmt = select([literal_column(c.name).label(c.name) for c in core_table.c]).select_from(func_alias) # type: ignore
stmt_alias = stmt.alias()
node_id_stmt = select([to_node_id_sql(return_sqla_model, stmt_alias).label("nodeId")]).select_from(stmt_alias) # type: ignore
((row,),) = await trans.execute(node_id_stmt)
node_id = NodeIdStructure.from_dict(row)
# Add nodeId to AST and query
query_tree = next(iter([x for x in tree.fields if x.name == "result"]), None)
if query_tree is not None:
query_tree.args["nodeId"] = node_id
base_query = sql_builder(query_tree)
query = sql_finalize(query_tree.alias, base_query)
((stmt_result,),) = await trans.execute(query)
else:
stmt_result = {}
else:
stmt = select([func_call.label("result")])
(stmt_result,) = await trans.execute(stmt)
maybe_mutation_id = tree.args["input"].get("clientMutationId")
mutation_id_alias = next(
iter([x.alias for x in tree.fields if x.name == "clientMutationId"]),
"clientMutationId",
)
result = {tree.alias: {**stmt_result, **{mutation_id_alias: maybe_mutation_id}}}
elif isinstance(tree.return_type, MutationPayloadType):
stmt = build_mutation(tree)
((row,),) = await trans.execute(stmt)
node_id = NodeIdStructure.from_dict(row)
maybe_mutation_id = tree.args["input"].get("clientMutationId")
mutation_id_alias = next(
iter([x.alias for x in tree.fields if x.name == "clientMutationId"]),
"clientMutationId",
)
node_id_alias = next(iter([x.alias for x in tree.fields if x.name == "nodeId"]), "nodeId")
output_row_name: str = Config.table_name_mapper(tree.return_type.sqla_model)
query_tree = next(iter([x for x in tree.fields if x.name == output_row_name]), None)
sql_result = {}
if query_tree:
# Set the nodeid of the newly created record as an arg
query_tree.args["nodeId"] = node_id
base_query = sql_builder(query_tree)
query = sql_finalize(query_tree.alias, base_query)
((sql_result,),) = await trans.execute(query)
result = {
tree.alias: {**sql_result, mutation_id_alias: maybe_mutation_id},
mutation_id_alias: maybe_mutation_id,
node_id_alias: node_id,
}
elif isinstance(tree.return_type, (ObjectType, ScalarType)):
base_query = sql_builder(tree)
query = sql_finalize(tree.name, base_query)
((query_json_result,),) = await trans.execute(query)
if isinstance(tree.return_type, ScalarType):
# If its a scalar, unwrap the top level name
result = flu(query_json_result.values()).first(None)
else:
result = query_json_result
else:
raise Exception("sql builder could not handle return type")
# Stash result on context to enable dumb resolvers to not fail
context["result"] = result
return result | en | 0.690196 | Awaitable GraphQL Entrypoint resolver Expects: info.context['engine'] to contain an sqlalchemy.ext.asyncio.AsyncEngine # Set claims for transaction # Function returning table row # Unpack the table row to columns # type: ignore # type: ignore # Add nodeId to AST and query # Set the nodeid of the newly created record as an arg # If its a scalar, unwrap the top level name # Stash result on context to enable dumb resolvers to not fail | 1.877476 | 2 |
growler/urls.py | abi-ba-hacka/Ser-Veza | 0 | 6618537 | """Growler URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.conf.urls import url, include
from rest_framework import routers
from django.conf import settings
from django.conf.urls.static import static
from api import views
from rest_framework import viewsets
from rest_framework.response import Response
admin.site.site_header = 'Growler Mania Admin'
class SettingsViewSet(viewsets.GenericViewSet):
def list(self, request, *args, **kwargs):
return Response(settings.EXPORTED_SETTINGS)
router = routers.DefaultRouter()
router.register(r'settings', SettingsViewSet, base_name='settings')
# Wire up our API using automatic URL routing.
# Additionally, we include login URLs for the browsable API.
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^api/auth/', include('rest_framework.urls', namespace='rest_framework')),
url(r'^api/v1/', include(router.urls)),
url(r'^$', views.index, name='refill_index'),
url(r'^refill/$', views.index, name='refill_index'),
url(r'^refill/(?P<refill_id>[0-9a-zA-Z_-]+)/$', views.show, name='refill_show'),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| """Growler URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.conf.urls import url, include
from rest_framework import routers
from django.conf import settings
from django.conf.urls.static import static
from api import views
from rest_framework import viewsets
from rest_framework.response import Response
admin.site.site_header = 'Growler Mania Admin'
class SettingsViewSet(viewsets.GenericViewSet):
def list(self, request, *args, **kwargs):
return Response(settings.EXPORTED_SETTINGS)
router = routers.DefaultRouter()
router.register(r'settings', SettingsViewSet, base_name='settings')
# Wire up our API using automatic URL routing.
# Additionally, we include login URLs for the browsable API.
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^api/auth/', include('rest_framework.urls', namespace='rest_framework')),
url(r'^api/v1/', include(router.urls)),
url(r'^$', views.index, name='refill_index'),
url(r'^refill/$', views.index, name='refill_index'),
url(r'^refill/(?P<refill_id>[0-9a-zA-Z_-]+)/$', views.show, name='refill_show'),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| en | 0.643428 | Growler URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/1.10/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.conf.urls import url, include 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls')) # Wire up our API using automatic URL routing. # Additionally, we include login URLs for the browsable API. | 2.261802 | 2 |
yang_et_al/nnattack/models/keras_model.py | wagner-group/geoadex | 4 | 6618538 | import itertools
import threading
from cleverhans.attacks import ProjectedGradientDescent, FastGradientMethod
from cleverhans.utils_keras import KerasModelWrapper
from cleverhans.loss import CrossEntropy
from cleverhans.train import train
from cleverhans.utils_tf import initialize_uninitialized_global_variables
import tensorflow as tf
#import tensorflow.keras as keras
#from tensorflow.keras.models import Model
#from tensorflow.keras.layers import Dense, Input
#from tensorflow.keras.optimizers import Adam, Nadam
#from tensorflow.keras.regularizers import l2
#from tensorflow.keras.models import clone_model
import keras
from keras.models import Model, clone_model
from keras.layers import Dense, Input
from keras.optimizers import Adam, Nadam
from keras.regularizers import l2
import numpy as np
from sklearn.base import BaseEstimator
#from sklearn.linear_model import LogisticRegression
from .robust_nn.eps_separation import find_eps_separated_set
def get_adversarial_acc_metric(model, fgsm, fgsm_params):
def adv_acc(y, _):
# Generate adversarial examples
#x_adv = fgsm.generate(model.input, **fgsm_params)
x_adv = fgsm.generate(model.get_input_at(0), **fgsm_params)
# Consider the attack to be constant
x_adv = tf.stop_gradient(x_adv)
# Accuracy on the adversarial examples
preds_adv = model(x_adv)
return keras.metrics.categorical_accuracy(y, preds_adv)
return adv_acc
def get_adversarial_loss(model, fgsm, fgsm_params):
def adv_loss(y, preds):
# Cross-entropy on the legitimate examples
cross_ent = keras.losses.categorical_crossentropy(y, preds)
# Generate adversarial examples
#x_adv = fgsm.generate(model.input, **fgsm_params)
x_adv = fgsm.generate(model.get_input_at(0), **fgsm_params)
# Consider the attack to be constant
x_adv = tf.stop_gradient(x_adv)
# Cross-entropy on the adversarial examples
preds_adv = model(x_adv)
cross_ent_adv = keras.losses.categorical_crossentropy(y, preds_adv)
return 0.5 * cross_ent + 0.5 * cross_ent_adv
return adv_loss
def logistic_regression(input_x, input_shape, n_classes, l2_weight=0.0, **kwargs):
inputs = Input(shape=input_shape, tensor=input_x)
x = Dense(n_classes, activation='softmax', kernel_regularizer=l2(l2_weight))(inputs)
return Model(inputs=[inputs], outputs=[x]), None
def mlp(input_x, input_shape, n_classes, l2_weight=0.0, **kwargs):
inputs = Input(shape=input_shape, tensor=input_x)
x = Dense(128, activation='relu', kernel_regularizer=l2(l2_weight))(inputs)
x = Dense(n_classes, activation='softmax', kernel_regularizer=l2(l2_weight))(x)
return Model(inputs=[inputs], outputs=[x]), None
class KerasModel(BaseEstimator):
def __init__(self, lbl_enc, n_features, n_classes, sess,
learning_rate=1e-3, batch_size=128, epochs=20, optimizer='adam',
l2_weight=1e-5, architecture='arch_001', random_state=None,
attacker=None, callbacks=None, train_type:str=None, eps:float=0.1,
ord=np.inf, eps_list=None):
keras.backend.set_session(sess)
self.n_features = n_features
self.n_classes = n_classes
self.batch_size = batch_size
self.learning_rate = learning_rate
self.architecture = architecture
self.epochs = epochs
self.lbl_enc = lbl_enc
self.optimizer_name = optimizer
if optimizer == 'nadam':
self.optimizer = Nadam()
elif optimizer == 'adam':
self.optimizer = Adam(lr=self.learning_rate)
self.l2_weight = l2_weight
self.callbacks=callbacks
self.loss = 'categorical_crossentropy'
self.random_state = random_state
self.train_type = train_type
input_shape = tuple(n_features)
model, self.preprocess_fn = globals()[self.architecture](
None, input_shape, n_classes, self.l2_weight)
#model.summary()
self.model = model
### Attack ####
if eps_list is None:
eps_list = [e*0.01 for e in range(100)]
else:
eps_list = [e for e in eps_list]
self.sess = sess
self.eps = eps
self.ord = ord
###############
def fit(self, X, y, sample_weight=None):
if self.train_type is not None:
pass
if self.train_type == 'adv':
#self.model.compile(loss=self.loss, optimizer=self.optimizer, metrics=[])
#Y = self.lbl_enc.transform(y.reshape(-1, 1))
#initialize_uninitialized_global_variables(self.sess)
#input_generator = InputGenerator(X, Y, sample_weight,
# attacker=self.attacker, shuffle=True, batch_size=self.batch_size,
# random_state=self.random_state)
#self.model.fit_generator(
# input_generator,
# steps_per_epoch=((X.shape[0]*2 - 1) // self.batch_size) + 1,
# epochs=self.epochs,
# verbose=1,
#)
#######################################
#Y = self.lbl_enc.transform(y.reshape(-1, 1))
#train_params = {
# 'init_all': True,
# 'rng': self.random_state,
# 'nb_epochs': self.epochs,
# 'batch_size': self.batch_size,
# 'learning_rate': self.learning_rate,
# 'optimizor': tf.train.RMSPropOptimizer,
#}
#wrap = KerasModelWrapper(self.model)
#pgd = ProjectedGradientDescent(wrap, sess=self.sess, nb_iter=20)
#pgd_params = {'eps': self.eps}
##attack = pgd.generate(x, y=y, **pgd_params)
#def attack(x):
# return pgd.generate(x, **pgd_params)
#loss = CrossEntropy(wrap, smoothing=0.1, attack=attack)
#def evaluate():
# #print("XDDD %f", self.sess.run(loss))
# print('Test accuracy on legitimate examples: %0.4f' % self.score(X, y))
#train(self.sess, loss, X.astype(np.float32), Y.astype(np.float32),
# args=train_params, evaluate=evaluate)
######################################
Y = self.lbl_enc.transform(y.reshape(-1, 1))
wrap_2 = KerasModelWrapper(self.model)
fgsm_2 = ProjectedGradientDescent(wrap_2, sess=self.sess)
self.model(self.model.input)
fgsm_params = {'eps': self.eps}
# Use a loss function based on legitimate and adversarial examples
adv_loss_2 = get_adversarial_loss(self.model, fgsm_2, fgsm_params)
adv_acc_metric_2 = get_adversarial_acc_metric(self.model, fgsm_2, fgsm_params)
self.model.compile(
#optimizer=keras.optimizers.Adam(self.learning_rate),
optimizer=keras.optimizers.Nadam(),
loss=adv_loss_2,
metrics=['accuracy', adv_acc_metric_2]
)
self.model.fit(X, Y,
batch_size=self.batch_size,
epochs=self.epochs,
verbose=2,
sample_weight=sample_weight,
)
print((self.model.predict(X).argmax(1) == y).mean())
self.augX, self.augy = None, None
elif self.train_type == 'advPruning':
y = y.astype(int)*2-1
self.augX, self.augy = find_eps_separated_set(
X, self.eps/2, y, ord=self.ord)
self.augy = (self.augy+1)//2
self.model.compile(loss=self.loss, optimizer=self.optimizer, metrics=[])
Y = self.lbl_enc.transform(self.augy.reshape(-1, 1))
self.model.fit(self.augX, Y, batch_size=self.batch_size, verbose=0,
epochs=self.epochs, sample_weight=sample_weight)
print("number of augX", np.shape(self.augX), len(self.augy))
elif self.train_type is None:
self.model.compile(loss=self.loss, optimizer=self.optimizer, metrics=[])
Y = self.lbl_enc.transform(y.reshape(-1, 1))
self.model.fit(X, Y, batch_size=self.batch_size, verbose=0,
epochs=self.epochs, sample_weight=sample_weight)
else:
raise ValueError("Not supported train type: %s", self.train_type)
def predict(self, X):
X = np.asarray(X)
if self.preprocess_fn is not None:
X = self.preprocess_fn(X)
pred = self.model.predict(X)
return pred.argmax(1)
#return self.lbl_enc.inverse_transform(pred).reshape(-1)
def predict_proba(self, X):
X = np.asarray(X)
if self.preprocess_fn is not None:
X = self.preprocess_fn(X)
pred = self.model.predict(X)
return np.hstack((1-pred, pred))
def score(self, X, y):
pred = self.predict(X)
return (pred == y).mean()
def _get_pert(self, X, Y, eps:float, model, ord):
x = tf.placeholder(tf.float32, shape=([None] + list(self.n_features)))
y = tf.placeholder(tf.float32, shape=(None, self.n_classes))
wrap = KerasModelWrapper(model)
pgd = ProjectedGradientDescent(wrap, sess=self.sess)
if eps >= 0.05:
adv_x = pgd.generate(x, y=y, eps=eps, ord=ord)
else:
adv_x = pgd.generate(x, y=y, eps=eps, eps_iter=eps/5, ord=ord)
adv_x = tf.stop_gradient(adv_x)
ret = adv_x - x
return ret.eval(feed_dict={x: X, y: Y}, session=self.sess)
def perturb(self, X, y, eps=0.1):
if len(y.shape) == 1:
Y = self.lbl_enc.transform(y.reshape(-1, 1))
else:
Y = y
#Y[:, 0], Y[:, 1] = Y[:, 1], Y[:, 0]
if isinstance(eps, list):
rret = []
for ep in eps:
rret.append(self._get_pert(X, Y, ep, self.model, self.ord))
return rret
elif isinstance(eps, float):
ret = self._get_pert(X, Y, eps, self.model, self.ord)
else:
raise ValueError
return ret
class InputGenerator(object):
def __init__(self, X, Y=None, sample_weight=None, attacker=None,
shuffle=False, batch_size=256, eps:float=0.1, random_state=None):
self.X = X
self.Y = Y
self.lock = threading.Lock()
if random_state is None:
random_state = np.random.RandomState()
if attacker is not None:
# assume its a multiple of 2
batch_size = batch_size // 2
self.index_generator = self._flow_index(X.shape[0], batch_size, shuffle,
random_state)
self.attacker = attacker
self.sample_weight = sample_weight
self.eps = eps
def __iter__(self):
return self
def __next__(self, *args, **kwargs):
return self.next(*args, **kwargs)
def _flow_index(self, n, batch_size, shuffle, random_state):
index = np.arange(n)
for epoch_i in itertools.count():
if shuffle:
random_state.shuffle(index)
for batch_start in range(0, n, batch_size):
batch_end = min(batch_start + batch_size, n)
yield epoch_i, index[batch_start: batch_end]
def next(self):
with self.lock:
_, index_array = next(self.index_generator)
batch_X = self.X[index_array]
if self.Y is None:
return batch_X
else:
batch_Y = self.Y[index_array]
if self.attacker is not None:
adv_X = batch_X + self.attacker.perturb(batch_X, batch_Y, eps=self.eps)
batch_X = np.concatenate((batch_X, adv_X), axis=0)
if self.sample_weight is not None:
batch_weight = self.sample_weight[index_array]
if self.attacker is not None:
batch_Y = np.concatenate((batch_Y, batch_Y), axis=0)
batch_weight = np.concatenate((batch_weight, batch_weight), axis=0)
return batch_X, batch_Y, batch_weight
else:
if self.attacker is not None:
batch_Y = np.concatenate((batch_Y, batch_Y), axis=0)
return batch_X, batch_Y
| import itertools
import threading
from cleverhans.attacks import ProjectedGradientDescent, FastGradientMethod
from cleverhans.utils_keras import KerasModelWrapper
from cleverhans.loss import CrossEntropy
from cleverhans.train import train
from cleverhans.utils_tf import initialize_uninitialized_global_variables
import tensorflow as tf
#import tensorflow.keras as keras
#from tensorflow.keras.models import Model
#from tensorflow.keras.layers import Dense, Input
#from tensorflow.keras.optimizers import Adam, Nadam
#from tensorflow.keras.regularizers import l2
#from tensorflow.keras.models import clone_model
import keras
from keras.models import Model, clone_model
from keras.layers import Dense, Input
from keras.optimizers import Adam, Nadam
from keras.regularizers import l2
import numpy as np
from sklearn.base import BaseEstimator
#from sklearn.linear_model import LogisticRegression
from .robust_nn.eps_separation import find_eps_separated_set
def get_adversarial_acc_metric(model, fgsm, fgsm_params):
def adv_acc(y, _):
# Generate adversarial examples
#x_adv = fgsm.generate(model.input, **fgsm_params)
x_adv = fgsm.generate(model.get_input_at(0), **fgsm_params)
# Consider the attack to be constant
x_adv = tf.stop_gradient(x_adv)
# Accuracy on the adversarial examples
preds_adv = model(x_adv)
return keras.metrics.categorical_accuracy(y, preds_adv)
return adv_acc
def get_adversarial_loss(model, fgsm, fgsm_params):
def adv_loss(y, preds):
# Cross-entropy on the legitimate examples
cross_ent = keras.losses.categorical_crossentropy(y, preds)
# Generate adversarial examples
#x_adv = fgsm.generate(model.input, **fgsm_params)
x_adv = fgsm.generate(model.get_input_at(0), **fgsm_params)
# Consider the attack to be constant
x_adv = tf.stop_gradient(x_adv)
# Cross-entropy on the adversarial examples
preds_adv = model(x_adv)
cross_ent_adv = keras.losses.categorical_crossentropy(y, preds_adv)
return 0.5 * cross_ent + 0.5 * cross_ent_adv
return adv_loss
def logistic_regression(input_x, input_shape, n_classes, l2_weight=0.0, **kwargs):
inputs = Input(shape=input_shape, tensor=input_x)
x = Dense(n_classes, activation='softmax', kernel_regularizer=l2(l2_weight))(inputs)
return Model(inputs=[inputs], outputs=[x]), None
def mlp(input_x, input_shape, n_classes, l2_weight=0.0, **kwargs):
inputs = Input(shape=input_shape, tensor=input_x)
x = Dense(128, activation='relu', kernel_regularizer=l2(l2_weight))(inputs)
x = Dense(n_classes, activation='softmax', kernel_regularizer=l2(l2_weight))(x)
return Model(inputs=[inputs], outputs=[x]), None
class KerasModel(BaseEstimator):
def __init__(self, lbl_enc, n_features, n_classes, sess,
learning_rate=1e-3, batch_size=128, epochs=20, optimizer='adam',
l2_weight=1e-5, architecture='arch_001', random_state=None,
attacker=None, callbacks=None, train_type:str=None, eps:float=0.1,
ord=np.inf, eps_list=None):
keras.backend.set_session(sess)
self.n_features = n_features
self.n_classes = n_classes
self.batch_size = batch_size
self.learning_rate = learning_rate
self.architecture = architecture
self.epochs = epochs
self.lbl_enc = lbl_enc
self.optimizer_name = optimizer
if optimizer == 'nadam':
self.optimizer = Nadam()
elif optimizer == 'adam':
self.optimizer = Adam(lr=self.learning_rate)
self.l2_weight = l2_weight
self.callbacks=callbacks
self.loss = 'categorical_crossentropy'
self.random_state = random_state
self.train_type = train_type
input_shape = tuple(n_features)
model, self.preprocess_fn = globals()[self.architecture](
None, input_shape, n_classes, self.l2_weight)
#model.summary()
self.model = model
### Attack ####
if eps_list is None:
eps_list = [e*0.01 for e in range(100)]
else:
eps_list = [e for e in eps_list]
self.sess = sess
self.eps = eps
self.ord = ord
###############
def fit(self, X, y, sample_weight=None):
if self.train_type is not None:
pass
if self.train_type == 'adv':
#self.model.compile(loss=self.loss, optimizer=self.optimizer, metrics=[])
#Y = self.lbl_enc.transform(y.reshape(-1, 1))
#initialize_uninitialized_global_variables(self.sess)
#input_generator = InputGenerator(X, Y, sample_weight,
# attacker=self.attacker, shuffle=True, batch_size=self.batch_size,
# random_state=self.random_state)
#self.model.fit_generator(
# input_generator,
# steps_per_epoch=((X.shape[0]*2 - 1) // self.batch_size) + 1,
# epochs=self.epochs,
# verbose=1,
#)
#######################################
#Y = self.lbl_enc.transform(y.reshape(-1, 1))
#train_params = {
# 'init_all': True,
# 'rng': self.random_state,
# 'nb_epochs': self.epochs,
# 'batch_size': self.batch_size,
# 'learning_rate': self.learning_rate,
# 'optimizor': tf.train.RMSPropOptimizer,
#}
#wrap = KerasModelWrapper(self.model)
#pgd = ProjectedGradientDescent(wrap, sess=self.sess, nb_iter=20)
#pgd_params = {'eps': self.eps}
##attack = pgd.generate(x, y=y, **pgd_params)
#def attack(x):
# return pgd.generate(x, **pgd_params)
#loss = CrossEntropy(wrap, smoothing=0.1, attack=attack)
#def evaluate():
# #print("XDDD %f", self.sess.run(loss))
# print('Test accuracy on legitimate examples: %0.4f' % self.score(X, y))
#train(self.sess, loss, X.astype(np.float32), Y.astype(np.float32),
# args=train_params, evaluate=evaluate)
######################################
Y = self.lbl_enc.transform(y.reshape(-1, 1))
wrap_2 = KerasModelWrapper(self.model)
fgsm_2 = ProjectedGradientDescent(wrap_2, sess=self.sess)
self.model(self.model.input)
fgsm_params = {'eps': self.eps}
# Use a loss function based on legitimate and adversarial examples
adv_loss_2 = get_adversarial_loss(self.model, fgsm_2, fgsm_params)
adv_acc_metric_2 = get_adversarial_acc_metric(self.model, fgsm_2, fgsm_params)
self.model.compile(
#optimizer=keras.optimizers.Adam(self.learning_rate),
optimizer=keras.optimizers.Nadam(),
loss=adv_loss_2,
metrics=['accuracy', adv_acc_metric_2]
)
self.model.fit(X, Y,
batch_size=self.batch_size,
epochs=self.epochs,
verbose=2,
sample_weight=sample_weight,
)
print((self.model.predict(X).argmax(1) == y).mean())
self.augX, self.augy = None, None
elif self.train_type == 'advPruning':
y = y.astype(int)*2-1
self.augX, self.augy = find_eps_separated_set(
X, self.eps/2, y, ord=self.ord)
self.augy = (self.augy+1)//2
self.model.compile(loss=self.loss, optimizer=self.optimizer, metrics=[])
Y = self.lbl_enc.transform(self.augy.reshape(-1, 1))
self.model.fit(self.augX, Y, batch_size=self.batch_size, verbose=0,
epochs=self.epochs, sample_weight=sample_weight)
print("number of augX", np.shape(self.augX), len(self.augy))
elif self.train_type is None:
self.model.compile(loss=self.loss, optimizer=self.optimizer, metrics=[])
Y = self.lbl_enc.transform(y.reshape(-1, 1))
self.model.fit(X, Y, batch_size=self.batch_size, verbose=0,
epochs=self.epochs, sample_weight=sample_weight)
else:
raise ValueError("Not supported train type: %s", self.train_type)
def predict(self, X):
X = np.asarray(X)
if self.preprocess_fn is not None:
X = self.preprocess_fn(X)
pred = self.model.predict(X)
return pred.argmax(1)
#return self.lbl_enc.inverse_transform(pred).reshape(-1)
def predict_proba(self, X):
X = np.asarray(X)
if self.preprocess_fn is not None:
X = self.preprocess_fn(X)
pred = self.model.predict(X)
return np.hstack((1-pred, pred))
def score(self, X, y):
pred = self.predict(X)
return (pred == y).mean()
def _get_pert(self, X, Y, eps:float, model, ord):
x = tf.placeholder(tf.float32, shape=([None] + list(self.n_features)))
y = tf.placeholder(tf.float32, shape=(None, self.n_classes))
wrap = KerasModelWrapper(model)
pgd = ProjectedGradientDescent(wrap, sess=self.sess)
if eps >= 0.05:
adv_x = pgd.generate(x, y=y, eps=eps, ord=ord)
else:
adv_x = pgd.generate(x, y=y, eps=eps, eps_iter=eps/5, ord=ord)
adv_x = tf.stop_gradient(adv_x)
ret = adv_x - x
return ret.eval(feed_dict={x: X, y: Y}, session=self.sess)
def perturb(self, X, y, eps=0.1):
if len(y.shape) == 1:
Y = self.lbl_enc.transform(y.reshape(-1, 1))
else:
Y = y
#Y[:, 0], Y[:, 1] = Y[:, 1], Y[:, 0]
if isinstance(eps, list):
rret = []
for ep in eps:
rret.append(self._get_pert(X, Y, ep, self.model, self.ord))
return rret
elif isinstance(eps, float):
ret = self._get_pert(X, Y, eps, self.model, self.ord)
else:
raise ValueError
return ret
class InputGenerator(object):
def __init__(self, X, Y=None, sample_weight=None, attacker=None,
shuffle=False, batch_size=256, eps:float=0.1, random_state=None):
self.X = X
self.Y = Y
self.lock = threading.Lock()
if random_state is None:
random_state = np.random.RandomState()
if attacker is not None:
# assume its a multiple of 2
batch_size = batch_size // 2
self.index_generator = self._flow_index(X.shape[0], batch_size, shuffle,
random_state)
self.attacker = attacker
self.sample_weight = sample_weight
self.eps = eps
def __iter__(self):
return self
def __next__(self, *args, **kwargs):
return self.next(*args, **kwargs)
def _flow_index(self, n, batch_size, shuffle, random_state):
index = np.arange(n)
for epoch_i in itertools.count():
if shuffle:
random_state.shuffle(index)
for batch_start in range(0, n, batch_size):
batch_end = min(batch_start + batch_size, n)
yield epoch_i, index[batch_start: batch_end]
def next(self):
with self.lock:
_, index_array = next(self.index_generator)
batch_X = self.X[index_array]
if self.Y is None:
return batch_X
else:
batch_Y = self.Y[index_array]
if self.attacker is not None:
adv_X = batch_X + self.attacker.perturb(batch_X, batch_Y, eps=self.eps)
batch_X = np.concatenate((batch_X, adv_X), axis=0)
if self.sample_weight is not None:
batch_weight = self.sample_weight[index_array]
if self.attacker is not None:
batch_Y = np.concatenate((batch_Y, batch_Y), axis=0)
batch_weight = np.concatenate((batch_weight, batch_weight), axis=0)
return batch_X, batch_Y, batch_weight
else:
if self.attacker is not None:
batch_Y = np.concatenate((batch_Y, batch_Y), axis=0)
return batch_X, batch_Y
| en | 0.323551 | #import tensorflow.keras as keras #from tensorflow.keras.models import Model #from tensorflow.keras.layers import Dense, Input #from tensorflow.keras.optimizers import Adam, Nadam #from tensorflow.keras.regularizers import l2 #from tensorflow.keras.models import clone_model #from sklearn.linear_model import LogisticRegression # Generate adversarial examples #x_adv = fgsm.generate(model.input, **fgsm_params) # Consider the attack to be constant # Accuracy on the adversarial examples # Cross-entropy on the legitimate examples # Generate adversarial examples #x_adv = fgsm.generate(model.input, **fgsm_params) # Consider the attack to be constant # Cross-entropy on the adversarial examples #model.summary() ### Attack #### ############### #self.model.compile(loss=self.loss, optimizer=self.optimizer, metrics=[]) #Y = self.lbl_enc.transform(y.reshape(-1, 1)) #initialize_uninitialized_global_variables(self.sess) #input_generator = InputGenerator(X, Y, sample_weight, # attacker=self.attacker, shuffle=True, batch_size=self.batch_size, # random_state=self.random_state) #self.model.fit_generator( # input_generator, # steps_per_epoch=((X.shape[0]*2 - 1) // self.batch_size) + 1, # epochs=self.epochs, # verbose=1, #) ####################################### #Y = self.lbl_enc.transform(y.reshape(-1, 1)) #train_params = { # 'init_all': True, # 'rng': self.random_state, # 'nb_epochs': self.epochs, # 'batch_size': self.batch_size, # 'learning_rate': self.learning_rate, # 'optimizor': tf.train.RMSPropOptimizer, #} #wrap = KerasModelWrapper(self.model) #pgd = ProjectedGradientDescent(wrap, sess=self.sess, nb_iter=20) #pgd_params = {'eps': self.eps} ##attack = pgd.generate(x, y=y, **pgd_params) #def attack(x): # return pgd.generate(x, **pgd_params) #loss = CrossEntropy(wrap, smoothing=0.1, attack=attack) #def evaluate(): # #print("XDDD %f", self.sess.run(loss)) # print('Test accuracy on legitimate examples: %0.4f' % self.score(X, y)) #train(self.sess, loss, X.astype(np.float32), Y.astype(np.float32), # args=train_params, evaluate=evaluate) ###################################### # Use a loss function based on legitimate and adversarial examples #optimizer=keras.optimizers.Adam(self.learning_rate), #return self.lbl_enc.inverse_transform(pred).reshape(-1) #Y[:, 0], Y[:, 1] = Y[:, 1], Y[:, 0] # assume its a multiple of 2 | 2.298816 | 2 |
tests/test_state_lattice_planner.py | ryuichiueda/PythonRobotics | 1 | 6618539 | <reponame>ryuichiueda/PythonRobotics
import conftest # Add root path to sys.path
from PathPlanning.StateLatticePlanner import state_lattice_planner as m
from PathPlanning.ModelPredictiveTrajectoryGenerator \
import model_predictive_trajectory_generator as m2
def test1():
m.show_animation = False
m2.show_animation = False
m.main()
| import conftest # Add root path to sys.path
from PathPlanning.StateLatticePlanner import state_lattice_planner as m
from PathPlanning.ModelPredictiveTrajectoryGenerator \
import model_predictive_trajectory_generator as m2
def test1():
m.show_animation = False
m2.show_animation = False
m.main() | en | 0.557969 | # Add root path to sys.path | 1.555894 | 2 |
app/network_services/forms.py | 5genesis/Portal | 1 | 6618540 | from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField, TextAreaField, SelectField
from wtforms.validators import DataRequired
class BaseNsForm(FlaskForm):
name = StringField('Name', validators=[DataRequired()])
description = TextAreaField('Description')
public = SelectField('Visibility', choices=[('Public', 'Public'), ('Private', 'Private')])
class NewNsForm(BaseNsForm):
create = SubmitField('Create')
class EditNsForm(BaseNsForm):
update = SubmitField('Update')
preloadVnfd = SubmitField('Pre-load')
selectVnfd = SubmitField('Add')
preloadVim = SubmitField('Pre-load')
onboardVim = SubmitField('Onboard')
deleteVim = SubmitField('Delete')
selectVim = SubmitField('Select')
preloadNsd = SubmitField('Pre-load')
onboardNsd = SubmitField('Onboard')
deleteNsd = SubmitField('Delete')
selectNsd = SubmitField('Select')
closeAction = SubmitField('Commit')
cancelAction = SubmitField('Cancel')
| from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField, TextAreaField, SelectField
from wtforms.validators import DataRequired
class BaseNsForm(FlaskForm):
name = StringField('Name', validators=[DataRequired()])
description = TextAreaField('Description')
public = SelectField('Visibility', choices=[('Public', 'Public'), ('Private', 'Private')])
class NewNsForm(BaseNsForm):
create = SubmitField('Create')
class EditNsForm(BaseNsForm):
update = SubmitField('Update')
preloadVnfd = SubmitField('Pre-load')
selectVnfd = SubmitField('Add')
preloadVim = SubmitField('Pre-load')
onboardVim = SubmitField('Onboard')
deleteVim = SubmitField('Delete')
selectVim = SubmitField('Select')
preloadNsd = SubmitField('Pre-load')
onboardNsd = SubmitField('Onboard')
deleteNsd = SubmitField('Delete')
selectNsd = SubmitField('Select')
closeAction = SubmitField('Commit')
cancelAction = SubmitField('Cancel')
| none | 1 | 2.456207 | 2 | |
futoin/cid/tool/jfrogtool.py | futoin/citool | 13 | 6618541 | #
# Copyright 2015-2020 <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from ..runenvtool import RunEnvTool
from .curltoolmixin import CurlToolMixIn
class jfrogTool(CurlToolMixIn, RunEnvTool):
"""JFrog: Command Line Interface for Artifactory and Bintray
Home: https://www.jfrog.com/confluence/display/CLI/JFrog+CLI
"""
__slots__ = ()
def _installTool(self, env):
ospath = self._ospath
os = self._os
if self._detect.isMacOS():
self._install.brew('jfrog-cli-go')
return
dst_dir = env['jfrogDir']
get_url = env['jfrogGet']
jfrog_bin = ospath.join(dst_dir, 'jfrog')
if not ospath.exists(dst_dir):
os.makedirs(dst_dir)
self._callCurl(env, [get_url, '-o', jfrog_bin])
stat = self._ext.stat
os.chmod(jfrog_bin, stat.S_IRWXU | stat.S_IRGRP |
stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH)
def _updateTool(self, env):
if self._detect.isMacOS():
return
self.uninstallTool(env)
self._installTool(env)
def uninstallTool(self, env):
if self._detect.isMacOS():
return
jfrog_bin = env['jfrogBin']
if self._ospath.exists(jfrog_bin):
self._os.remove(jfrog_bin)
self._have_tool = False
def envNames(self):
return ['jfrogDir', 'jfrogBin', 'jfrogGet']
def initEnv(self, env):
bin_dir = env.setdefault('jfrogDir', env['binDir'])
pkg = None
url_base = 'https://api.bintray.com/content/jfrog/jfrog-cli-go/$latest'
detect = self._detect
if detect.isMacOS():
pass
elif detect.isAMD64():
pkg = 'jfrog-cli-linux-amd64'
else:
pkg = 'jfrog-cli-linux-386'
if pkg:
env.setdefault(
'jfrogGet',
'https://api.bintray.com/content/jfrog/jfrog-cli-go/$latest/{0}/jfrog?bt_package={0}'.format(
pkg)
)
self._pathutil.addBinPath(bin_dir)
super(jfrogTool, self).initEnv(env)
if self._have_tool:
env['jfrogDir'] = self._ospath.dirname(env['jfrogBin'])
| #
# Copyright 2015-2020 <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from ..runenvtool import RunEnvTool
from .curltoolmixin import CurlToolMixIn
class jfrogTool(CurlToolMixIn, RunEnvTool):
"""JFrog: Command Line Interface for Artifactory and Bintray
Home: https://www.jfrog.com/confluence/display/CLI/JFrog+CLI
"""
__slots__ = ()
def _installTool(self, env):
ospath = self._ospath
os = self._os
if self._detect.isMacOS():
self._install.brew('jfrog-cli-go')
return
dst_dir = env['jfrogDir']
get_url = env['jfrogGet']
jfrog_bin = ospath.join(dst_dir, 'jfrog')
if not ospath.exists(dst_dir):
os.makedirs(dst_dir)
self._callCurl(env, [get_url, '-o', jfrog_bin])
stat = self._ext.stat
os.chmod(jfrog_bin, stat.S_IRWXU | stat.S_IRGRP |
stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH)
def _updateTool(self, env):
if self._detect.isMacOS():
return
self.uninstallTool(env)
self._installTool(env)
def uninstallTool(self, env):
if self._detect.isMacOS():
return
jfrog_bin = env['jfrogBin']
if self._ospath.exists(jfrog_bin):
self._os.remove(jfrog_bin)
self._have_tool = False
def envNames(self):
return ['jfrogDir', 'jfrogBin', 'jfrogGet']
def initEnv(self, env):
bin_dir = env.setdefault('jfrogDir', env['binDir'])
pkg = None
url_base = 'https://api.bintray.com/content/jfrog/jfrog-cli-go/$latest'
detect = self._detect
if detect.isMacOS():
pass
elif detect.isAMD64():
pkg = 'jfrog-cli-linux-amd64'
else:
pkg = 'jfrog-cli-linux-386'
if pkg:
env.setdefault(
'jfrogGet',
'https://api.bintray.com/content/jfrog/jfrog-cli-go/$latest/{0}/jfrog?bt_package={0}'.format(
pkg)
)
self._pathutil.addBinPath(bin_dir)
super(jfrogTool, self).initEnv(env)
if self._have_tool:
env['jfrogDir'] = self._ospath.dirname(env['jfrogBin'])
| en | 0.823422 | # # Copyright 2015-2020 <NAME> <<EMAIL>> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # JFrog: Command Line Interface for Artifactory and Bintray Home: https://www.jfrog.com/confluence/display/CLI/JFrog+CLI | 1.806794 | 2 |
exercicio410.py | profnssorg/henriqueJoner1 | 0 | 6618542 | """
Descrição: Este programa calcula a sua fatura de energia elétrica
Autor:<NAME>
Versão:0.0.1
Data:25/11/2018
"""
#Inicialização de variáveis
consumo = 0
tipo = 0
fatura = 0
preco = 0
#Entrada de dados
tipo = input("Informe o tipo de estabelecimento: I para industrial, R para residencial e C para comercial. ")
consumo = float(input("Informe o consumo em kWh: "))
preco = 0.4
#Processamento de dados
if tipo == "I":
preco = 0.55
if consumo > 5000:
preco = 0.6
elif tipo == "R":
preco = 0.4
if consumo > 500:
preco = 0.65
elif tipo == "C":
preco = 0.55
if consumo > 1000:
preco = 0.60
else:
print("CÓDIGO INVÁLIDO! CÓDIGO INVÁLIDO! CÓDIGO INVÁLIDO! CÓDIGO INVÁLIDO!")
preco = 0
consumo = 0
fatura = preco * consumo
#Saída de dados
print("O seu consumo foi de %5.2f kWh, a sua classificação é %s e por isso sua fatura será de R$ %5.2f!" % (consumo, tipo, fatura))
| """
Descrição: Este programa calcula a sua fatura de energia elétrica
Autor:<NAME>
Versão:0.0.1
Data:25/11/2018
"""
#Inicialização de variáveis
consumo = 0
tipo = 0
fatura = 0
preco = 0
#Entrada de dados
tipo = input("Informe o tipo de estabelecimento: I para industrial, R para residencial e C para comercial. ")
consumo = float(input("Informe o consumo em kWh: "))
preco = 0.4
#Processamento de dados
if tipo == "I":
preco = 0.55
if consumo > 5000:
preco = 0.6
elif tipo == "R":
preco = 0.4
if consumo > 500:
preco = 0.65
elif tipo == "C":
preco = 0.55
if consumo > 1000:
preco = 0.60
else:
print("CÓDIGO INVÁLIDO! CÓDIGO INVÁLIDO! CÓDIGO INVÁLIDO! CÓDIGO INVÁLIDO!")
preco = 0
consumo = 0
fatura = preco * consumo
#Saída de dados
print("O seu consumo foi de %5.2f kWh, a sua classificação é %s e por isso sua fatura será de R$ %5.2f!" % (consumo, tipo, fatura))
| pt | 0.994593 | Descrição: Este programa calcula a sua fatura de energia elétrica Autor:<NAME> Versão:0.0.1 Data:25/11/2018 #Inicialização de variáveis #Entrada de dados #Processamento de dados #Saída de dados | 3.907141 | 4 |
eos/automation/lib/python/community/eos/config.py | CrazyIvan359/eos | 0 | 6618543 | """
Eos Lighting
Config value loader
"""
# Copyright (c) 2020 Eos Lighting contributors
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from community.eos import log
from community.eos.constants import *
from community.eos import constants
import sys, copy, collections
__all__ = ["load"]
def _get_conf_value(name, valid_types=None, default=None):
"""Gets ``name`` from configuration.
Returns ``default`` if not present or not one of types in ``valid_types``
"""
# importing here so we can reload each time and catch any updates the user may have made
try:
import configuration
reload(configuration)
except:
return default
if hasattr(configuration, name):
value = getattr(configuration, name)
if valid_types is None or isinstance(value, valid_types):
log.debug(
"Got '{name}': '{value}' from configuration".format(
name=name, value=value
)
)
return value
else:
log.error(
"Configuration value for '{name}' is type '{type}', must be one of {valid_types}".format(
name=name, type=type(value), valid_types=valid_types
)
)
return default
else:
log.debug(
"No value for '{name}' specified in configuration, using default '{value}'".format(
name=name, value=default
)
)
return default
def update_dict(d, u):
"""
Recursively update dict ``d`` with dict ``u``
"""
for k in u:
dv = d.get(k, {})
if not isinstance(dv, collections.Mapping):
d[k] = u[k]
elif isinstance(u[k], collections.Mapping):
d[k] = update_dict(dv, u[k])
else:
d[k] = u[k]
return d
def load():
this = sys.modules[__name__]
this.master_group_name = _get_conf_value(CONF_KEY_MASTER_GROUP, str, "")
this.scene_item_prefix = _get_conf_value(CONF_KEY_SCENE_PREFIX, str, "")
this.scene_item_suffix = _get_conf_value(CONF_KEY_SCENE_SUFFIX, str, "")
this.reinit_item_name = _get_conf_value(CONF_KEY_REINIT_ITEM, str, "")
this.log_trace = _get_conf_value(CONF_KEY_LOG_TRACE, None, False)
this.global_settings = update_dict(
copy.deepcopy(constants._global_settings),
_get_conf_value(CONF_KEY_GLOBAL_SETTINGS, dict, {}),
)
| """
Eos Lighting
Config value loader
"""
# Copyright (c) 2020 Eos Lighting contributors
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from community.eos import log
from community.eos.constants import *
from community.eos import constants
import sys, copy, collections
__all__ = ["load"]
def _get_conf_value(name, valid_types=None, default=None):
"""Gets ``name`` from configuration.
Returns ``default`` if not present or not one of types in ``valid_types``
"""
# importing here so we can reload each time and catch any updates the user may have made
try:
import configuration
reload(configuration)
except:
return default
if hasattr(configuration, name):
value = getattr(configuration, name)
if valid_types is None or isinstance(value, valid_types):
log.debug(
"Got '{name}': '{value}' from configuration".format(
name=name, value=value
)
)
return value
else:
log.error(
"Configuration value for '{name}' is type '{type}', must be one of {valid_types}".format(
name=name, type=type(value), valid_types=valid_types
)
)
return default
else:
log.debug(
"No value for '{name}' specified in configuration, using default '{value}'".format(
name=name, value=default
)
)
return default
def update_dict(d, u):
"""
Recursively update dict ``d`` with dict ``u``
"""
for k in u:
dv = d.get(k, {})
if not isinstance(dv, collections.Mapping):
d[k] = u[k]
elif isinstance(u[k], collections.Mapping):
d[k] = update_dict(dv, u[k])
else:
d[k] = u[k]
return d
def load():
this = sys.modules[__name__]
this.master_group_name = _get_conf_value(CONF_KEY_MASTER_GROUP, str, "")
this.scene_item_prefix = _get_conf_value(CONF_KEY_SCENE_PREFIX, str, "")
this.scene_item_suffix = _get_conf_value(CONF_KEY_SCENE_SUFFIX, str, "")
this.reinit_item_name = _get_conf_value(CONF_KEY_REINIT_ITEM, str, "")
this.log_trace = _get_conf_value(CONF_KEY_LOG_TRACE, None, False)
this.global_settings = update_dict(
copy.deepcopy(constants._global_settings),
_get_conf_value(CONF_KEY_GLOBAL_SETTINGS, dict, {}),
)
| en | 0.769064 | Eos Lighting Config value loader # Copyright (c) 2020 Eos Lighting contributors # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. Gets ``name`` from configuration. Returns ``default`` if not present or not one of types in ``valid_types`` # importing here so we can reload each time and catch any updates the user may have made Recursively update dict ``d`` with dict ``u`` | 1.87287 | 2 |
ESTACIO/EX ESTACIO 03.py | gnabaes/Exe-Python | 0 | 6618544 | '''ATRIBUTOS DE UM ARQUIVO'''
arquivo = open('dados1.txt','r')
print('nome do arquivo: ', arquivo.name)
print('modo do arquivo: ', arquivo.mode)
print('Arquivo Fechado', arquivo.closed)
arquivo.close()
print('arquivo fechado ', arquivo.closed)
| '''ATRIBUTOS DE UM ARQUIVO'''
arquivo = open('dados1.txt','r')
print('nome do arquivo: ', arquivo.name)
print('modo do arquivo: ', arquivo.mode)
print('Arquivo Fechado', arquivo.closed)
arquivo.close()
print('arquivo fechado ', arquivo.closed)
| es | 0.424615 | ATRIBUTOS DE UM ARQUIVO | 3.193632 | 3 |
examples/state_machine_examples/uart_triggered_state_change.py | ckarageorgkaneen/pybpod-api | 1 | 6618545 | <reponame>ckarageorgkaneen/pybpod-api<gh_stars>1-10
# !/usr/bin/python3
# -*- coding: utf-8 -*-
"""
Example adapted from <NAME>' original version on Sanworks Bpod repository
"""
from pybpodapi.protocol import Bpod, StateMachine
"""
Run this protocol now
"""
my_bpod = Bpod()
sma = StateMachine(my_bpod)
sma.add_state(
state_name='Port1Light',
state_timer=0,
state_change_conditions={Bpod.Events.Serial2_3: 'Port2Light'}, # Go to Port2Light when byte 0x3 arrives on UART port 2
output_actions=[(Bpod.OutputChannels.PWM1, 255)])
sma.add_state(
state_name='Port2Light',
state_timer=0,
state_change_conditions={Bpod.Events.Tup: 'exit'},
output_actions=[(Bpod.OutputChannels.PWM2, 255)])
my_bpod.send_state_machine(sma)
my_bpod.run_state_machine(sma)
print("Current trial info: ", my_bpod.session.current_trial)
my_bpod.close() | # !/usr/bin/python3
# -*- coding: utf-8 -*-
"""
Example adapted from <NAME>' original version on Sanworks Bpod repository
"""
from pybpodapi.protocol import Bpod, StateMachine
"""
Run this protocol now
"""
my_bpod = Bpod()
sma = StateMachine(my_bpod)
sma.add_state(
state_name='Port1Light',
state_timer=0,
state_change_conditions={Bpod.Events.Serial2_3: 'Port2Light'}, # Go to Port2Light when byte 0x3 arrives on UART port 2
output_actions=[(Bpod.OutputChannels.PWM1, 255)])
sma.add_state(
state_name='Port2Light',
state_timer=0,
state_change_conditions={Bpod.Events.Tup: 'exit'},
output_actions=[(Bpod.OutputChannels.PWM2, 255)])
my_bpod.send_state_machine(sma)
my_bpod.run_state_machine(sma)
print("Current trial info: ", my_bpod.session.current_trial)
my_bpod.close() | en | 0.712815 | # !/usr/bin/python3 # -*- coding: utf-8 -*- Example adapted from <NAME>' original version on Sanworks Bpod repository Run this protocol now # Go to Port2Light when byte 0x3 arrives on UART port 2 | 2.456212 | 2 |
rabbitai/models/schedules.py | psbsgic/rabbitai | 0 | 6618546 | """Models for scheduled execution of jobs"""
import enum
from typing import Optional, Type
from flask_appbuilder import Model
from sqlalchemy import Boolean, Column, Enum, ForeignKey, Integer, String, Text
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.orm import relationship, RelationshipProperty
from rabbitai import security_manager
from rabbitai.models.alerts import Alert
from rabbitai.models.helpers import AuditMixinNullable, ImportExportMixin
metadata = Model.metadata # pylint: disable=no-member
class ScheduleType(str, enum.Enum):
slice = "slice"
dashboard = "dashboard"
alert = "alert"
class EmailDeliveryType(str, enum.Enum):
attachment = "Attachment"
inline = "Inline"
class SliceEmailReportFormat(str, enum.Enum):
visualization = "Visualization"
data = "Raw data"
class EmailSchedule:
"""Schedules for emailing slices / dashboards"""
__tablename__ = "email_schedules"
id = Column(Integer, primary_key=True)
active = Column(Boolean, default=True, index=True)
crontab = Column(String(50))
@declared_attr
def user_id(self) -> int:
return Column(Integer, ForeignKey("ab_user.id"))
@declared_attr
def user(self) -> RelationshipProperty:
return relationship(
security_manager.user_model,
backref=self.__tablename__,
foreign_keys=[self.user_id],
)
recipients = Column(Text)
slack_channel = Column(Text)
deliver_as_group = Column(Boolean, default=False)
delivery_type = Column(Enum(EmailDeliveryType))
class DashboardEmailSchedule(
Model, AuditMixinNullable, ImportExportMixin, EmailSchedule
):
__tablename__ = "dashboard_email_schedules"
dashboard_id = Column(Integer, ForeignKey("dashboards.id"))
dashboard = relationship(
"Dashboard", backref="email_schedules", foreign_keys=[dashboard_id]
)
class SliceEmailSchedule(Model, AuditMixinNullable, ImportExportMixin, EmailSchedule):
__tablename__ = "slice_email_schedules"
slice_id = Column(Integer, ForeignKey("slices.id"))
slice = relationship("Slice", backref="email_schedules", foreign_keys=[slice_id])
email_format = Column(Enum(SliceEmailReportFormat))
def get_scheduler_model(report_type: str) -> Optional[Type[EmailSchedule]]:
if report_type == ScheduleType.dashboard:
return DashboardEmailSchedule
if report_type == ScheduleType.slice:
return SliceEmailSchedule
if report_type == ScheduleType.alert:
return Alert
return None
| """Models for scheduled execution of jobs"""
import enum
from typing import Optional, Type
from flask_appbuilder import Model
from sqlalchemy import Boolean, Column, Enum, ForeignKey, Integer, String, Text
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.orm import relationship, RelationshipProperty
from rabbitai import security_manager
from rabbitai.models.alerts import Alert
from rabbitai.models.helpers import AuditMixinNullable, ImportExportMixin
metadata = Model.metadata # pylint: disable=no-member
class ScheduleType(str, enum.Enum):
slice = "slice"
dashboard = "dashboard"
alert = "alert"
class EmailDeliveryType(str, enum.Enum):
attachment = "Attachment"
inline = "Inline"
class SliceEmailReportFormat(str, enum.Enum):
visualization = "Visualization"
data = "Raw data"
class EmailSchedule:
"""Schedules for emailing slices / dashboards"""
__tablename__ = "email_schedules"
id = Column(Integer, primary_key=True)
active = Column(Boolean, default=True, index=True)
crontab = Column(String(50))
@declared_attr
def user_id(self) -> int:
return Column(Integer, ForeignKey("ab_user.id"))
@declared_attr
def user(self) -> RelationshipProperty:
return relationship(
security_manager.user_model,
backref=self.__tablename__,
foreign_keys=[self.user_id],
)
recipients = Column(Text)
slack_channel = Column(Text)
deliver_as_group = Column(Boolean, default=False)
delivery_type = Column(Enum(EmailDeliveryType))
class DashboardEmailSchedule(
Model, AuditMixinNullable, ImportExportMixin, EmailSchedule
):
__tablename__ = "dashboard_email_schedules"
dashboard_id = Column(Integer, ForeignKey("dashboards.id"))
dashboard = relationship(
"Dashboard", backref="email_schedules", foreign_keys=[dashboard_id]
)
class SliceEmailSchedule(Model, AuditMixinNullable, ImportExportMixin, EmailSchedule):
__tablename__ = "slice_email_schedules"
slice_id = Column(Integer, ForeignKey("slices.id"))
slice = relationship("Slice", backref="email_schedules", foreign_keys=[slice_id])
email_format = Column(Enum(SliceEmailReportFormat))
def get_scheduler_model(report_type: str) -> Optional[Type[EmailSchedule]]:
if report_type == ScheduleType.dashboard:
return DashboardEmailSchedule
if report_type == ScheduleType.slice:
return SliceEmailSchedule
if report_type == ScheduleType.alert:
return Alert
return None
| en | 0.761544 | Models for scheduled execution of jobs # pylint: disable=no-member Schedules for emailing slices / dashboards | 2.297478 | 2 |
src/av2/map/pedestrian_crossing.py | jhonykaesemodel/av2-api | 26 | 6618547 | <filename>src/av2/map/pedestrian_crossing.py
# <Copyright 2022, Argo AI, LLC. Released under the MIT license.>
"""Class representing a pedestrian crossing (crosswalk)."""
from __future__ import annotations
from dataclasses import dataclass
from typing import Any, Dict, Tuple
import numpy as np
from av2.map.map_primitives import Polyline
from av2.utils.typing import NDArrayFloat
@dataclass
class PedestrianCrossing:
"""Represents a pedestrian crossing (i.e. crosswalk) as two edges along its principal axis.
Both lines should be pointing in nominally the same direction and a pedestrian is expected to
move either roughly parallel to both lines or anti-parallel to both lines.
Args:
id: unique identifier of this pedestrian crossing.
edge1: 3d polyline representing one edge of the crosswalk, with 2 waypoints.
edge2: 3d polyline representing the other edge of the crosswalk, with 2 waypoints.
"""
id: int
edge1: Polyline
edge2: Polyline
def get_edges_2d(self) -> Tuple[NDArrayFloat, NDArrayFloat]:
"""Retrieve the two principal edges of the crosswalk, in 2d.
Returns:
edge1: array of shape (2,2), a 2d polyline representing one edge of the crosswalk, with 2 waypoints.
edge2: array of shape (2,2), a 2d polyline representing the other edge of the crosswalk, with 2 waypoints.
"""
return (self.edge1.xyz[:, :2], self.edge2.xyz[:, :2])
def __eq__(self, other: object) -> bool:
"""Check if two pedestrian crossing objects are equal, up to a tolerance."""
if not isinstance(other, PedestrianCrossing):
return False
return np.allclose(self.edge1.xyz, other.edge1.xyz) and np.allclose(self.edge2.xyz, other.edge2.xyz)
@classmethod
def from_dict(cls, json_data: Dict[str, Any]) -> PedestrianCrossing:
"""Generate a PedestrianCrossing object from a dictionary read from JSON data."""
edge1 = Polyline.from_json_data(json_data["edge1"])
edge2 = Polyline.from_json_data(json_data["edge2"])
return PedestrianCrossing(id=json_data["id"], edge1=edge1, edge2=edge2)
@property
def polygon(self) -> NDArrayFloat:
"""Return the vertices of the polygon representing the pedestrian crossing.
Returns:
array of shape (N,3) representing vertices. The first and last vertex that are provided are identical.
"""
v0, v1 = self.edge1.xyz
v2, v3 = self.edge2.xyz
return np.array([v0, v1, v3, v2, v0])
| <filename>src/av2/map/pedestrian_crossing.py
# <Copyright 2022, Argo AI, LLC. Released under the MIT license.>
"""Class representing a pedestrian crossing (crosswalk)."""
from __future__ import annotations
from dataclasses import dataclass
from typing import Any, Dict, Tuple
import numpy as np
from av2.map.map_primitives import Polyline
from av2.utils.typing import NDArrayFloat
@dataclass
class PedestrianCrossing:
"""Represents a pedestrian crossing (i.e. crosswalk) as two edges along its principal axis.
Both lines should be pointing in nominally the same direction and a pedestrian is expected to
move either roughly parallel to both lines or anti-parallel to both lines.
Args:
id: unique identifier of this pedestrian crossing.
edge1: 3d polyline representing one edge of the crosswalk, with 2 waypoints.
edge2: 3d polyline representing the other edge of the crosswalk, with 2 waypoints.
"""
id: int
edge1: Polyline
edge2: Polyline
def get_edges_2d(self) -> Tuple[NDArrayFloat, NDArrayFloat]:
"""Retrieve the two principal edges of the crosswalk, in 2d.
Returns:
edge1: array of shape (2,2), a 2d polyline representing one edge of the crosswalk, with 2 waypoints.
edge2: array of shape (2,2), a 2d polyline representing the other edge of the crosswalk, with 2 waypoints.
"""
return (self.edge1.xyz[:, :2], self.edge2.xyz[:, :2])
def __eq__(self, other: object) -> bool:
"""Check if two pedestrian crossing objects are equal, up to a tolerance."""
if not isinstance(other, PedestrianCrossing):
return False
return np.allclose(self.edge1.xyz, other.edge1.xyz) and np.allclose(self.edge2.xyz, other.edge2.xyz)
@classmethod
def from_dict(cls, json_data: Dict[str, Any]) -> PedestrianCrossing:
"""Generate a PedestrianCrossing object from a dictionary read from JSON data."""
edge1 = Polyline.from_json_data(json_data["edge1"])
edge2 = Polyline.from_json_data(json_data["edge2"])
return PedestrianCrossing(id=json_data["id"], edge1=edge1, edge2=edge2)
@property
def polygon(self) -> NDArrayFloat:
"""Return the vertices of the polygon representing the pedestrian crossing.
Returns:
array of shape (N,3) representing vertices. The first and last vertex that are provided are identical.
"""
v0, v1 = self.edge1.xyz
v2, v3 = self.edge2.xyz
return np.array([v0, v1, v3, v2, v0])
| en | 0.903101 | # <Copyright 2022, Argo AI, LLC. Released under the MIT license.> Class representing a pedestrian crossing (crosswalk). Represents a pedestrian crossing (i.e. crosswalk) as two edges along its principal axis. Both lines should be pointing in nominally the same direction and a pedestrian is expected to move either roughly parallel to both lines or anti-parallel to both lines. Args: id: unique identifier of this pedestrian crossing. edge1: 3d polyline representing one edge of the crosswalk, with 2 waypoints. edge2: 3d polyline representing the other edge of the crosswalk, with 2 waypoints. Retrieve the two principal edges of the crosswalk, in 2d. Returns: edge1: array of shape (2,2), a 2d polyline representing one edge of the crosswalk, with 2 waypoints. edge2: array of shape (2,2), a 2d polyline representing the other edge of the crosswalk, with 2 waypoints. Check if two pedestrian crossing objects are equal, up to a tolerance. Generate a PedestrianCrossing object from a dictionary read from JSON data. Return the vertices of the polygon representing the pedestrian crossing. Returns: array of shape (N,3) representing vertices. The first and last vertex that are provided are identical. | 3.413987 | 3 |
src/prime_number.py | baggakunal/learning-python | 0 | 6618548 | <reponame>baggakunal/learning-python<filename>src/prime_number.py
from math import sqrt
def is_prime(num: int) -> bool:
if num < 2:
return False
for i in range(2, int(sqrt(num)) + 1):
if num % i == 0:
return False
return True
def main():
print([n for n in range(101) if is_prime(n)])
if __name__ == '__main__':
main()
| from math import sqrt
def is_prime(num: int) -> bool:
if num < 2:
return False
for i in range(2, int(sqrt(num)) + 1):
if num % i == 0:
return False
return True
def main():
print([n for n in range(101) if is_prime(n)])
if __name__ == '__main__':
main() | none | 1 | 3.907286 | 4 | |
app/domain/entities.py | globocom/enforcement | 7 | 6618549 | <reponame>globocom/enforcement
from typing import Dict, List
from pydantic import BaseModel
class Cluster(BaseModel):
name: str
url: str
token: str
id: str
additional_data: dict = dict()
class Helm(BaseModel):
parameters: Dict[str, str] = None
class RancherSource(BaseModel):
filters: Dict[str, str] = None
labels: Dict[str, str] = None
ignore: List[str] = None
class EnforcementSource(BaseModel):
rancher: RancherSource = None
secretName: str = None
class Enforcement(BaseModel):
name: str
repo: str
path: str = None
namespace: str = "default"
helm: Helm = None
labels: dict = None
class TriggerConfig(BaseModel):
endpoint: str
timeout: int = 5
class TriggersConfig(BaseModel):
beforeInstall: TriggerConfig = None
afterInstall: TriggerConfig = None
class ClusterRule(BaseModel):
enforcements: List[Enforcement]
source: EnforcementSource
triggers: TriggersConfig = None
class ClusterRuleStatus(BaseModel):
clusters: List[dict] = []
install_errors: List[str] = []
class Secret(BaseModel):
token: str
url: str
| from typing import Dict, List
from pydantic import BaseModel
class Cluster(BaseModel):
name: str
url: str
token: str
id: str
additional_data: dict = dict()
class Helm(BaseModel):
parameters: Dict[str, str] = None
class RancherSource(BaseModel):
filters: Dict[str, str] = None
labels: Dict[str, str] = None
ignore: List[str] = None
class EnforcementSource(BaseModel):
rancher: RancherSource = None
secretName: str = None
class Enforcement(BaseModel):
name: str
repo: str
path: str = None
namespace: str = "default"
helm: Helm = None
labels: dict = None
class TriggerConfig(BaseModel):
endpoint: str
timeout: int = 5
class TriggersConfig(BaseModel):
beforeInstall: TriggerConfig = None
afterInstall: TriggerConfig = None
class ClusterRule(BaseModel):
enforcements: List[Enforcement]
source: EnforcementSource
triggers: TriggersConfig = None
class ClusterRuleStatus(BaseModel):
clusters: List[dict] = []
install_errors: List[str] = []
class Secret(BaseModel):
token: str
url: str | none | 1 | 2.254152 | 2 | |
efficient_charCRNN/model/net.py | jaeminkim87/nlp | 11 | 6618550 | <gh_stars>10-100
import torch
import torch.nn as nn
import torch.nn.functional as F
from model.ops import Flatten, Permute
from gluonnlp import Vocab
class EfficientCharCRNN(nn.Module):
def __init__(self, args, vocab, word_dropout_ratio: float = .5):
super(EfficientCharCRNN, self).__init__()
self._dim = args.word_dim
self._word_dropout_ratio = word_dropout_ratio
self._embedding = nn.Embedding(len(vocab), self._dim, vocab.to_indices(vocab.padding_token))
self._conv = nn.Conv1d(in_channels=self._dim, out_channels=128, kernel_size=5, stride=1, padding=1)
self._conv1 = nn.Conv1d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=1)
self._maxpool = nn.MaxPool1d(2, stride=2)
self._maxpool1 = nn.MaxPool1d(2, stride=2)
self._dropout = nn.Dropout()
self._bilstm = nn.LSTM(128, 128, dropout=self._word_dropout_ratio, batch_first=True, bidirectional=True)
self._fc = nn.Linear(256, args.classes)
def forward(self, x: torch.Tensor) -> torch.Tensor:
if self.training:
m = x.bernoulli(self._word_dropout_ratio)
x = torch.where(m == 1, torch.tensor(0).to(x.device), x)
embedding = self._embedding(x).permute(0, 2, 1)
r = self._conv(embedding)
r = F.relu(r)
r = self._maxpool(r)
r = self._conv1(r)
r = F.relu(r)
r = self._maxpool1(r)
r = r.permute(0, 2, 1)
_, r = self._bilstm(r)
feature = torch.cat([*r[0]], dim=1)
r = self._dropout(feature)
score = self._fc(r)
return score
| import torch
import torch.nn as nn
import torch.nn.functional as F
from model.ops import Flatten, Permute
from gluonnlp import Vocab
class EfficientCharCRNN(nn.Module):
def __init__(self, args, vocab, word_dropout_ratio: float = .5):
super(EfficientCharCRNN, self).__init__()
self._dim = args.word_dim
self._word_dropout_ratio = word_dropout_ratio
self._embedding = nn.Embedding(len(vocab), self._dim, vocab.to_indices(vocab.padding_token))
self._conv = nn.Conv1d(in_channels=self._dim, out_channels=128, kernel_size=5, stride=1, padding=1)
self._conv1 = nn.Conv1d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=1)
self._maxpool = nn.MaxPool1d(2, stride=2)
self._maxpool1 = nn.MaxPool1d(2, stride=2)
self._dropout = nn.Dropout()
self._bilstm = nn.LSTM(128, 128, dropout=self._word_dropout_ratio, batch_first=True, bidirectional=True)
self._fc = nn.Linear(256, args.classes)
def forward(self, x: torch.Tensor) -> torch.Tensor:
if self.training:
m = x.bernoulli(self._word_dropout_ratio)
x = torch.where(m == 1, torch.tensor(0).to(x.device), x)
embedding = self._embedding(x).permute(0, 2, 1)
r = self._conv(embedding)
r = F.relu(r)
r = self._maxpool(r)
r = self._conv1(r)
r = F.relu(r)
r = self._maxpool1(r)
r = r.permute(0, 2, 1)
_, r = self._bilstm(r)
feature = torch.cat([*r[0]], dim=1)
r = self._dropout(feature)
score = self._fc(r)
return score | none | 1 | 2.422082 | 2 |