index int64 | repo_name string | branch_name string | path string | content string | import_graph string |
|---|---|---|---|---|---|
41,990 | shuohan/espreso | refs/heads/master | /isbi/get_brain_pics.py | #!/usr/bin/env python
import nibabel as nib
import numpy as np
from pathlib import Path
from PIL import Image
from image_processing_3d import quantile_scale
from scipy.ndimage import zoom
filename = '20121_02_FLAIRPre_2D.nii'
obj = nib.load(filename)
data = obj.get_fdata(dtype=np.float32)
zooms = obj.header.get_zooms()
data = quantile_scale(data, upper_pct=0.99, upper_th=1) * 255
print(data.shape)
th_slice_ind = 135
in_slice_ind = 40
factor = zooms[2] / zooms[0]
ysize = 119
xsize = 90
# ysize = 200
# xsize = 200
xstart = 25
ystart = 40
in_im = data[:, :, in_slice_ind].astype(np.uint8).T
in_im = in_im[xstart : xstart + xsize, ystart : ystart + ysize]
xstart = 10
ystart = 10
th_im = data[th_slice_ind, :, :].astype(np.uint8).T
th_im = th_im[::-1, :]
th_im = zoom(th_im, (factor, 1), order=0, prefilter=False)
th_im = th_im[xstart : xstart + xsize, ystart : ystart + ysize]
in_im = Image.fromarray(in_im)
in_im.save('flair_in.png')
th_im = Image.fromarray(th_im)
th_im.save('flair_th.png')
filename = '20208_03_T1Post.nii'
obj = nib.load(filename)
data = obj.get_fdata(dtype=np.float32)
data = quantile_scale(data, upper_pct=0.99, upper_th=1) * 255
zooms = obj.header.get_zooms()
factor = zooms[2] / zooms[0]
in_slice_ind = 30
xstart = 35
ystart = 40
in_im = data[:, :, in_slice_ind].astype(np.uint8).T
in_im = in_im[xstart : xstart + xsize, ystart : ystart + ysize]
xstart = 30
ystart = 20
th_im = data[th_slice_ind, :, :].astype(np.uint8).T
th_im = th_im[::-1, :]
th_im = zoom(th_im, (factor, 1), order=0, prefilter=False)
th_im = th_im[xstart : xstart + xsize, ystart : ystart + ysize]
in_im = Image.fromarray(in_im)
in_im.save('t1_in.png')
th_im = Image.fromarray(th_im)
th_im.save('t1_th.png')
| {"/tests/test_loss.py": ["/ssp/loss.py"], "/ssp/train.py": ["/ssp/config.py", "/ssp/loss.py", "/ssp/utils.py"], "/tests/test_networks.py": ["/ssp/network.py"], "/scripts/compare_kernel.py": ["/ssp/utils.py"], "/ssp/network.py": ["/ssp/config.py"], "/tests/test_fwhm.py": ["/ssp/utils.py"], "/tests/test_kernel.py": ["/ssp/network.py"], "/scripts/train2d.py": ["/ssp/config.py", "/ssp/train.py", "/ssp/network.py", "/ssp/utils.py"]} |
41,991 | shuohan/espreso | refs/heads/master | /setup.py | import os
from setuptools import setup, find_packages
from glob import glob
scripts = glob('scripts/*')
version = '0.1.0'
setup(name='ssp',
version=version,
description='Slice selection profile estimation in 2D MRI',
author='Shuo Han',
url='https://github.com/shuohan/ssp',
author_email='shan50@jhu.edu',
scripts=scripts,
license='MIT',
packages=['ssp']
)
| {"/tests/test_loss.py": ["/ssp/loss.py"], "/ssp/train.py": ["/ssp/config.py", "/ssp/loss.py", "/ssp/utils.py"], "/tests/test_networks.py": ["/ssp/network.py"], "/scripts/compare_kernel.py": ["/ssp/utils.py"], "/ssp/network.py": ["/ssp/config.py"], "/tests/test_fwhm.py": ["/ssp/utils.py"], "/tests/test_kernel.py": ["/ssp/network.py"], "/scripts/train2d.py": ["/ssp/config.py", "/ssp/train.py", "/ssp/network.py", "/ssp/utils.py"]} |
41,992 | shuohan/espreso | refs/heads/master | /isbi/get_brain_profiles.py | #!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
from pathlib import Path
import matplotlib
from psf_est.utils import calc_fwhm
def plot(est, filename):
font = {'size': 8}
matplotlib.rc('font', **font)
est_hm = np.max(est) / 2
est_fwhm, est_left, est_right = calc_fwhm(est)
dpi = 100
figx = 168
figy = 120
figl = 0.20
figr = 0.01
figb = 0.20
figt = 0.05
position = [figl, figb, 1 - figl - figr, 1 - figb - figt]
fig = plt.figure(figsize=(figx/dpi, figy/dpi), dpi=dpi)
ax = fig.add_subplot(111, position=position)
plt.plot(est, '-', color='tab:blue')
plt.plot([est_left, est_right], [est_hm] * 2, '--o', color='tab:blue',
markersize=5)
tl = est_right + 1.5
plt.text(tl, est_hm, '%.2f' % est_fwhm, color='tab:blue',
va='center')
plt.xticks(np.arange(0, len(est), 2))
plt.yticks([0, 0.1, 0.2, 0.3])
plt.ylim([-0.02, 0.41])
plt.savefig(filename)
if __name__ == '__main__':
est_filename = '20121_02_FLAIRPre_2D_kernel.npy'
est = np.load(est_filename).squeeze()
plot(est, '20121_02_FLAIRPre_2D_kernel.pdf')
est_filename = '20208_03_T1Post_kernel.npy'
est = np.load(est_filename).squeeze()
plot(est, '20208_03_T1Post_kernel.pdf')
| {"/tests/test_loss.py": ["/ssp/loss.py"], "/ssp/train.py": ["/ssp/config.py", "/ssp/loss.py", "/ssp/utils.py"], "/tests/test_networks.py": ["/ssp/network.py"], "/scripts/compare_kernel.py": ["/ssp/utils.py"], "/ssp/network.py": ["/ssp/config.py"], "/tests/test_fwhm.py": ["/ssp/utils.py"], "/tests/test_kernel.py": ["/ssp/network.py"], "/scripts/train2d.py": ["/ssp/config.py", "/ssp/train.py", "/ssp/network.py", "/ssp/utils.py"]} |
41,993 | shuohan/espreso | refs/heads/master | /tests/test_interp.py | #!/usr/bin/env python
import numpy as np
import torch
import torch.nn.functional as F
import matplotlib.pyplot as plt
# data = torch.arange(20)[None, None, ...].float()
# output = F.interpolate(data, scale_factor=0.5, mode='linear').squeeze()
# print(data)
# print(output)
data = plt.imread('lena.png')[:256, :256, 0]
data = torch.tensor(data, requires_grad=True).float()
fft = torch.rfft(data, 1, onesided=False)
fft = torch.roll(fft, shifts=fft.size(1)//2, dims=1)
fft = F.pad(fft, (0, 0, 50, 50))
new_data = torch.roll(fft, shifts=(fft.size(0)//2, fft.size(0)//2), dims=(0, 1))
new_data = torch.irfft(new_data, 1)
# numpy_fft = np.fft.fftshift(np.fft.fft2(data.detach().numpy()))
# numpy_fft =np.fft.fft2(data.detach().numpy())
# assert np.allclose(np.real(numpy_fft), fft[:, :, 0].detach().numpy())
# diff = np.abs(np.real(numpy_fft) - fft[:, :, 1].detach().numpy())
# diff = diff / np.abs(np.real(numpy_fft))
# print(np.max(diff), np.min(diff))
# plt.imshow(np.abs(np.real(numpy_fft) - fft[:, :, 0].detach().numpy()))
# loss = torch.sum(new_data)
# loss.backward()
# print(new_data.shape)
print(fft.shape)
fft_mag = torch.sqrt(fft[..., 0] ** 2 + fft[..., 1] ** 2)
fft_mag = fft_mag.detach().cpu().numpy().squeeze()
plt.imshow(np.log(fft_mag + 1))
plt.gcf().savefig('fft.png')
#
# plt.figure()
# plt.subplot(1, 2, 1)
# plt.imshow(data.detach().numpy(), cmap='gray')
# plt.subplot(1, 2, 2)
# plt.imshow(new_data.detach().numpy(), cmap='gray')
# plt.gcf().savefig('fft_image.png')
| {"/tests/test_loss.py": ["/ssp/loss.py"], "/ssp/train.py": ["/ssp/config.py", "/ssp/loss.py", "/ssp/utils.py"], "/tests/test_networks.py": ["/ssp/network.py"], "/scripts/compare_kernel.py": ["/ssp/utils.py"], "/ssp/network.py": ["/ssp/config.py"], "/tests/test_fwhm.py": ["/ssp/utils.py"], "/tests/test_kernel.py": ["/ssp/network.py"], "/scripts/train2d.py": ["/ssp/config.py", "/ssp/train.py", "/ssp/network.py", "/ssp/utils.py"]} |
41,994 | shuohan/espreso | refs/heads/master | /tests/test_fwhm.py | #!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
from pathlib import Path
from ssp.utils import calc_fwhm
from lr_simu.kernel import create_gaussian_kernel
def test_fwhm():
dirname = Path('results_fwhm')
dirname.mkdir(exist_ok=True)
kernel = create_gaussian_kernel(1 / 8)
fwhm, left, right = calc_fwhm(kernel)
print(fwhm, left, right)
assert np.round(fwhm) == 8
plt.plot(kernel)
plt.plot([left] * 2, [0, np.max(kernel)], 'k')
plt.plot([right] * 2, [0, np.max(kernel)], 'k')
plt.plot([0, len(kernel) - 1], [np.max(kernel) / 2] * 2, 'k')
plt.gcf().savefig(dirname.joinpath('kernel.png'))
if __name__ == '__main__':
test_fwhm()
| {"/tests/test_loss.py": ["/ssp/loss.py"], "/ssp/train.py": ["/ssp/config.py", "/ssp/loss.py", "/ssp/utils.py"], "/tests/test_networks.py": ["/ssp/network.py"], "/scripts/compare_kernel.py": ["/ssp/utils.py"], "/ssp/network.py": ["/ssp/config.py"], "/tests/test_fwhm.py": ["/ssp/utils.py"], "/tests/test_kernel.py": ["/ssp/network.py"], "/scripts/train2d.py": ["/ssp/config.py", "/ssp/train.py", "/ssp/network.py", "/ssp/utils.py"]} |
41,995 | shuohan/espreso | refs/heads/master | /isbi/calc_phantom_fwhm.py | #!/usr/bin/env python
import numpy as np
import pandas as pd
import nibabel as nib
from pathlib import Path
from collections import OrderedDict
from psf_est.utils import calc_fwhm
est_dirname = '../tests/results_isbi2021_phantom_final'
est_pattern = 'phantom_%smm_%smm_smooth-1.0/kernel/avg_epoch-30000.npy'
fwhm = ['2', '4']
scale = {'2': ['gapn0p5', 'gap0', 'gap0p5', 'gap1', 'gap2'],
'4': ['gapn2', 'gapn1', 'gap0', 'gap1', 'gap2',]}
df = list()
for f in fwhm:
for s in scale[f]:
est_filename = Path(est_dirname, est_pattern % (f, s))
est = np.load(est_filename).squeeze()
est_fwhm = calc_fwhm(est)[0]
tab = OrderedDict([('thick', f),
('scale', s),
('fwhm ', '%.2f' % est_fwhm)])
df.append(tab)
df = pd.DataFrame(df).T
print(df)
| {"/tests/test_loss.py": ["/ssp/loss.py"], "/ssp/train.py": ["/ssp/config.py", "/ssp/loss.py", "/ssp/utils.py"], "/tests/test_networks.py": ["/ssp/network.py"], "/scripts/compare_kernel.py": ["/ssp/utils.py"], "/ssp/network.py": ["/ssp/config.py"], "/tests/test_fwhm.py": ["/ssp/utils.py"], "/tests/test_kernel.py": ["/ssp/network.py"], "/scripts/train2d.py": ["/ssp/config.py", "/ssp/train.py", "/ssp/network.py", "/ssp/utils.py"]} |
41,996 | shuohan/espreso | refs/heads/master | /tests/compare_interp.py | #!/usr/bin/env python
import numpy as np
import torch
from PIL import Image
import matplotlib.pyplot as plt
from scipy.interpolate import interp2d
from scipy.ndimage import zoom
from torch.nn.functional import interpolate
scale = 1 / 4
# data = np.array(Image.open('lena.png').convert('L')).astype(np.float32) / 255
data = np.zeros((256, 256), dtype=np.float32)
for i in range(256):
for j in range(256):
if np.sqrt((i - 128) ** 2 + (j - 128) ** 2) < 64:
data[i, j] = 1
data_torch = torch.tensor(data)[None, None, ...]
# scipy_interp2d_data = interp2d
scipy_zoom_filter = zoom(data, scale, order=3, prefilter=True, mode='nearest')
scipy_zoom_nofilter = zoom(data, scale, order=3, prefilter=False, mode='nearest')
torch_interp = interpolate(data_torch, size=scipy_zoom_filter.shape,
mode='bicubic', align_corners=True)
torch_interp = torch_interp.detach().numpy().squeeze()
print(scipy_zoom_filter.shape, torch_interp.shape)
print('no filter diff', np.sum(np.abs(scipy_zoom_nofilter - torch_interp)))
print('filter diff', np.sum(np.abs(scipy_zoom_filter - torch_interp)))
plt.subplot(2, 3, 1)
plt.imshow(scipy_zoom_nofilter, cmap='gray')
plt.title('scipy zoom no filter')
plt.subplot(2, 3, 2)
plt.imshow(scipy_zoom_filter, cmap='gray')
plt.title('scipy zoom filter')
plt.subplot(2, 3, 3)
plt.imshow(torch_interp, cmap='gray')
plt.title('torch interp')
plt.subplot(2, 3, 5)
plt.imshow(np.abs(scipy_zoom_filter - scipy_zoom_nofilter), cmap='gray')
plt.subplot(2, 3, 6)
plt.imshow(np.abs(torch_interp - scipy_zoom_nofilter), cmap='gray')
# plt.gcf().savefig('compare_interp.png')
plt.figure()
plt.plot(scipy_zoom_filter[:, 32])
plt.plot(scipy_zoom_nofilter[:, 32])
plt.plot(torch_interp[:, 32])
plt.legend(['filter', 'nofilter', 'torch'])
plt.show()
| {"/tests/test_loss.py": ["/ssp/loss.py"], "/ssp/train.py": ["/ssp/config.py", "/ssp/loss.py", "/ssp/utils.py"], "/tests/test_networks.py": ["/ssp/network.py"], "/scripts/compare_kernel.py": ["/ssp/utils.py"], "/ssp/network.py": ["/ssp/config.py"], "/tests/test_fwhm.py": ["/ssp/utils.py"], "/tests/test_kernel.py": ["/ssp/network.py"], "/scripts/train2d.py": ["/ssp/config.py", "/ssp/train.py", "/ssp/network.py", "/ssp/utils.py"]} |
41,997 | shuohan/espreso | refs/heads/master | /tests/test_kernel.py | #!/usr/bin/env python
import matplotlib.pyplot as plt
from pathlib import Path
from ssp.network import KernelNet1d
def test_kernel():
dirname = Path('results_kernel')
dirname.mkdir(exist_ok=True)
filename = dirname.joinpath('kernel.png')
net = KernelNet1d().cuda()
assert net.impulse.shape == (1, 1, 25)
assert str(net.impulse.device) == 'cuda:0'
kernel = net.calc_kernel().kernel
assert kernel.shape == (1, 1, 13)
kernel = kernel.cpu().detach().numpy().squeeze()
fig = plt.figure()
plt.plot(kernel)
plt.grid(True)
fig.savefig(filename)
if __name__ == '__main__':
test_kernel()
| {"/tests/test_loss.py": ["/ssp/loss.py"], "/ssp/train.py": ["/ssp/config.py", "/ssp/loss.py", "/ssp/utils.py"], "/tests/test_networks.py": ["/ssp/network.py"], "/scripts/compare_kernel.py": ["/ssp/utils.py"], "/ssp/network.py": ["/ssp/config.py"], "/tests/test_fwhm.py": ["/ssp/utils.py"], "/tests/test_kernel.py": ["/ssp/network.py"], "/scripts/train2d.py": ["/ssp/config.py", "/ssp/train.py", "/ssp/network.py", "/ssp/utils.py"]} |
41,998 | shuohan/espreso | refs/heads/master | /isbi/get_simu_pics.py | #!/usr/bin/env python
import nibabel as nib
import numpy as np
from pathlib import Path
from PIL import Image
from image_processing_3d import quantile_scale
from scipy.ndimage import zoom
dirname = '/data/phantom/simu'
basename = 'SUPERRES-ADNIPHANTOM_20200711_PHANTOM-T2-TSE-3D-CORONAL-PRE-ACQ1-01mm_resampled_type-gauss_fwhm-8p0_scale-0p25_len-13.nii'
filename = Path(dirname, basename)
obj = nib.load(filename)
data = obj.get_fdata(dtype=np.float32)
data = quantile_scale(data, upper_pct=0.99, upper_th=1) * 255
th_slice_ind = 128
in_slice_ind = 22
factor = 4
ysize = 119
xsize = 60
xstart = 60
ystart = 40
in_im = data[:, :, in_slice_ind].astype(np.uint8).T
in_im = in_im[xstart : xstart + xsize, ystart : ystart + ysize]
xstart = 80
ystart = 40
th_im = data[th_slice_ind, :, :].astype(np.uint8).T
th_im = zoom(th_im, (factor, 1), order=0, prefilter=False)
th_im = th_im[xstart : xstart + xsize, ystart : ystart + ysize]
in_im = Image.fromarray(in_im)
in_im.save('gauss_in.png')
th_im = Image.fromarray(th_im)
th_im.save('gauss_th.png')
dirname = '/data/phantom/simu'
basename = 'SUPERRES-ADNIPHANTOM_20200711_PHANTOM-T2-TSE-3D-CORONAL-PRE-ACQ1-01mm_resampled_type-rect_fwhm-9p0_scale-0p5_len-13.nii'
filename = Path(dirname, basename)
obj = nib.load(filename)
data = obj.get_fdata(dtype=np.float32)
data = quantile_scale(data, upper_pct=0.99, upper_th=1) * 255
factor = 2
in_slice_ind = 45
xstart = 80
xstart = 60
ystart = 40
in_im = data[:, :, in_slice_ind].astype(np.uint8).T
in_im = in_im[xstart : xstart + xsize, ystart : ystart + ysize]
xstart = 80
ystart = 40
th_im = data[th_slice_ind, :, :].astype(np.uint8).T
th_im = zoom(th_im, (factor, 1), order=0, prefilter=False)
th_im = th_im[xstart : xstart + xsize, ystart : ystart + ysize]
in_im = Image.fromarray(in_im)
in_im.save('rect_in.png')
th_im = Image.fromarray(th_im)
th_im.save('rect_th.png')
| {"/tests/test_loss.py": ["/ssp/loss.py"], "/ssp/train.py": ["/ssp/config.py", "/ssp/loss.py", "/ssp/utils.py"], "/tests/test_networks.py": ["/ssp/network.py"], "/scripts/compare_kernel.py": ["/ssp/utils.py"], "/ssp/network.py": ["/ssp/config.py"], "/tests/test_fwhm.py": ["/ssp/utils.py"], "/tests/test_kernel.py": ["/ssp/network.py"], "/scripts/train2d.py": ["/ssp/config.py", "/ssp/train.py", "/ssp/network.py", "/ssp/utils.py"]} |
41,999 | shuohan/espreso | refs/heads/master | /ssp/loss.py | import torch
import numpy as np
import torch.nn.functional as F
class GANLoss(torch.nn.Module):
r"""Loss of the original GAN with cross entropy.
For the discriminator :math:`D`, this loss minimizes the binary cross
entropy with logits:
.. math::
l = - \mathrm{mean}_x (y \ln(\sigma(D(x)))
+ (1 - y) \ln(1 - \sigma(D(x)))),
where :math:`\sigma` is the sigmoid function. If
:math:`x \in \mathrm{\{truth\}}`, we have :math:`y = 1` and
.. math::
l = - \mathrm{mean}_x \ln(\sigma(D(x))).
If :math:`x \in \mathrm{\{generated\}}`, i.e. :math:`\exists z` s.t.
:math:`x = G(z)` where `G` is the generator, we have :math:`y = 0` and
.. math::
l = - \mathrm{mean}_x \ln(1 - \sigma(D(G(z)))).
Combine the above two terms together, we can get the loss for the
discriminator.
For the generator :math:`G`, this loss minimizes the binary cross entropy
with the same form and :math:`y = 1`, which is to minimize
.. math::
l = - \mathrm{mean}_x \ln(\sigma(D(G(z)))).
This is the modified GAN loss which minimizes
:math:`l = \mathrm{mean}_x \ln(1 - \sigma(D(G(z))))`.
"""
def __init__(self):
super().__init__()
def forward(self, x, is_real):
target = torch.ones_like(x) if is_real else torch.zeros_like(x)
loss = F.binary_cross_entropy_with_logits(x, target)
return loss
class SmoothnessLoss(torch.nn.Module):
r"""L2 norm of derivative.
This loss minimizes
.. math::
l = \lVert \nabla k \rVert_2^2,
where :math:`k` is the kernel, to encourage smoothness.
"""
def forward(self, kernel):
device = kernel.device
operator = torch.tensor([1, -1], dtype=torch.float32, device=device)
operator = operator[None, None, ..., None]
derivative = F.conv2d(kernel, operator)
loss = torch.sqrt(torch.sum(derivative ** 2))
return loss
class CenterLoss(torch.nn.Module):
r"""Penalizes off-center.
This loss minimizes the differences between the center of the kernel and the
center of the vector:
.. math::
l = \left(\sum_x k(x) x - C \right) ^ 2,
where :math:`k(x)` is the kernel, :math:`x` is vector indices, and :math:`C`
is the center of the vector. Assume the sum of the kernel equals 1.
"""
def __init__(self, kernel_length):
super().__init__()
self.kernel_length = kernel_length
center = torch.tensor(self.kernel_length // 2, dtype=torch.float32)
self.register_buffer('center', center)
locs = torch.arange(self.kernel_length, dtype=torch.float32)
self.register_buffer('locs', locs)
def forward(self, kernel):
kernel_center = torch.sum(kernel.squeeze() * self.locs)
loss = F.mse_loss(kernel_center, self.center)
return loss
class BoundaryLoss(torch.nn.Module):
r"""Penalizes non-zero values at kernel boundary.
This loss minimizes the weighted sum:
.. math::
l = \sum_x | m(x) k(x) |
where :math:`m` is created from an inverted Gaussian function with the
center set to zero.
"""
def __init__(self, kernel_length):
super().__init__()
self.kernel_length = kernel_length
mask = torch.tensor(self._create_penalty_mask()).float()
self.register_buffer('mask', mask[None, None, ..., None])
def _create_penalty_mask(self):
# center = self.kernel_length // 2
# locs = np.arange(self.kernel_length) - center
# mask = np.exp(-locs ** 2 / (2 * self.kernel_length ** 2))
# mask = 1 - mask / np.max(mask)
# margin = (self.kernel_length - center) // 2 - 2
mask = torch.ones(self.kernel_length).float()
mask[2:-2] = 0
print(mask)
return mask
def forward(self, kernel):
return torch.sum(torch.abs(kernel * self.mask))
| {"/tests/test_loss.py": ["/ssp/loss.py"], "/ssp/train.py": ["/ssp/config.py", "/ssp/loss.py", "/ssp/utils.py"], "/tests/test_networks.py": ["/ssp/network.py"], "/scripts/compare_kernel.py": ["/ssp/utils.py"], "/ssp/network.py": ["/ssp/config.py"], "/tests/test_fwhm.py": ["/ssp/utils.py"], "/tests/test_kernel.py": ["/ssp/network.py"], "/scripts/train2d.py": ["/ssp/config.py", "/ssp/train.py", "/ssp/network.py", "/ssp/utils.py"]} |
42,000 | shuohan/espreso | refs/heads/master | /ssp/config.py | """Handles the configuration of this algorithm.
"""
from singleton_config import Config as Config_
class Config(Config_):
"""The algorithm configuration.
Attributes:
kn_num_channels (int): The number of channels for
:class:`psf_est.network.KernelNet`.
kn_kernel_sizes (list[int]): The kernel sizes for each of
:class:`psf_est.network.KernelNet` conv weights.
kernel_length (int): The length of the kernel to estimate.
kernel_avg_beta (float): The kernel averaging beta for
:class:`psf_est.network.KernelNet`.
lrd_num_convs (int): The number of convolutions in
:class:`psf_est.network.LowResDiscriminator`.
lrd_num_channels (int): The number of channels in the first conv of
:class:`psf_est.network.LowResDiscriminator`.
lrelu_neg_slope (float): The negative slope of leaky ReLU in
:class:`psf_est.network.LowResDiscriminator`.
"""
def __init__(self):
super().__init__()
self.add_config('kn_num_channels', 1024)
self.add_config('kn_num_linears', 4)
self.add_config('kn_update_step', 1)
self.add_config('lrd_update_step', 1)
self.add_config('kernel_avg_beta', 0.99)
self.add_config('kernel_length', 21)
self.add_config('lrd_num_convs', 5)
self.add_config('lrd_num_channels', 64)
self.add_config('lrelu_neg_slope', 0.1)
self.add_config('patch_size', 20)
self.add_config('scale_factor', 1)
self.add_config('batch_size', 32)
self.add_config('num_epochs', 10000)
self.add_config('num_init_epochs', 0)
self.add_config('smoothness_loss_weight', 1e-5)
self.add_config('center_loss_weight', 1)
self.add_config('boundary_loss_weight', 10)
self.add_config('weight_decay', 0)
self.add_config('image_save_step', 100)
self.add_config('eval_step', 100)
self.add_config('image_save_zoom', 1)
self.add_config('interp_mode', 'bicubic')
| {"/tests/test_loss.py": ["/ssp/loss.py"], "/ssp/train.py": ["/ssp/config.py", "/ssp/loss.py", "/ssp/utils.py"], "/tests/test_networks.py": ["/ssp/network.py"], "/scripts/compare_kernel.py": ["/ssp/utils.py"], "/ssp/network.py": ["/ssp/config.py"], "/tests/test_fwhm.py": ["/ssp/utils.py"], "/tests/test_kernel.py": ["/ssp/network.py"], "/scripts/train2d.py": ["/ssp/config.py", "/ssp/train.py", "/ssp/network.py", "/ssp/utils.py"]} |
42,001 | shuohan/espreso | refs/heads/master | /scripts/train2d.py | #!/usr/bin/env python
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input', help='Input image.')
parser.add_argument('-o', '--output', help='Output directory.')
parser.add_argument('-bs', '--batch-size', default=32, type=int,
help='The number of samples per mini-batch.')
parser.add_argument('-s', '--scale-factor', default=None, type=float,
help='Super resolution scale factor.')
parser.add_argument('-e', '--num-epochs', default=10000, type=int,
help='The number of epochs (iterations).')
parser.add_argument('-iss', '--image-save-step', default=100, type=int,
help='The image saving step.')
parser.add_argument('-k', '--true-kernel', default=None)
parser.add_argument('-l', '--kernel-length', default=21, type=int)
parser.add_argument('-na', '--no-aug', action='store_true')
parser.add_argument('-w', '--num-workers', default=0, type=int)
parser.add_argument('-z', '--z-axis', default=2, type=int)
parser.add_argument('-isz', '--image-save-zoom', default=1, type=int)
parser.add_argument('-sw', '--smoothness-loss-weight', default=0, type=float)
parser.add_argument('-bw', '--boundary-loss-weight', default=10, type=float)
parser.add_argument('-b', '--kernel-avg-beta', default=0.99, type=float)
args = parser.parse_args()
import os
import nibabel as nib
import numpy as np
from pathlib import Path
from torch.optim import Adam
from torch.utils.data import DataLoader, WeightedRandomSampler
import warnings
from sssrlib.patches import Patches, PatchesOr
from sssrlib.transform import create_rot_flip
from ssp.config import Config
from ssp.train import TrainerHRtoLR, KernelSaver, KernelEvaluator
from ssp.network import KernelNet2d, LowResDiscriminator2d
from ssp.utils import calc_patch_size
from pytorch_trainer.log import DataQueue, EpochPrinter, EpochLogger
from pytorch_trainer.save import ImageSaver
warnings.filterwarnings("ignore")
args.output = Path(args.output)
args.output.mkdir(parents=True, exist_ok=True)
im_output = args.output.joinpath('patches')
kernel_output = args.output.joinpath('kernel')
log_output = args.output.joinpath('loss.csv')
eval_log_output = args.output.joinpath('eval_loss.csv')
config_output = args.output.joinpath('config.json')
xy = [0, 1, 2]
xy.remove(args.z_axis)
obj = nib.load(args.input)
image = obj.get_fdata(dtype=np.float32)
if args.scale_factor is None:
zooms = obj.header.get_zooms()
args.scale_factor = float(zooms[args.z_axis] / zooms[xy[0]])
# if zooms[xy[0]] != zooms[xy[1]] and not args.no_aug:
# raise RuntimeError('The resolutions of x and y are different.')
if args.scale_factor < 1:
raise RuntimeError('Scale factor should be greater or equal to 1.')
config = Config()
nz = image.shape[args.z_axis]
hr_ps, lr_ps= calc_patch_size(config.patch_size, args.scale_factor, nz)
for key, value in args.__dict__.items():
if hasattr(config, key):
setattr(config, key, value)
config.add_config('input_image', os.path.abspath(str(args.input)))
config.add_config('output_dirname', os.path.abspath(str(args.output)))
kn = KernelNet2d().cuda()
lrd = LowResDiscriminator2d().cuda()
kn_optim = Adam(kn.parameters(), lr=1e-4, betas=(0.5, 0.999),
weight_decay=config.weight_decay)
lrd_optim = Adam(lrd.parameters(), lr=1e-4, betas=(0.5, 0.999))
hr_ps = hr_ps + kn.input_size_reduced
config.add_config('hr_patch_size', hr_ps)
config.add_config('lr_patch_size', lr_ps)
print(config)
config.save_json(config_output)
print(kn)
print(lrd)
print(kn_optim)
print(lrd_optim)
transforms = [] if args.no_aug else create_rot_flip()
hr_patches = Patches(image, (hr_ps, hr_ps, 1), transforms=transforms,
x=xy[0], y=xy[1], z=args.z_axis).cuda()
hr_loader = hr_patches.get_dataloader(config.batch_size, args.num_workers)
if args.no_aug:
lr_patches = Patches(image, (lr_ps, hr_ps, 1), x=args.z_axis, y=xy[1], z=xy[0]).cuda()
else:
lr_patches_xy = Patches(image, (lr_ps, hr_ps, 1), x=args.z_axis, y=xy[1], z=xy[0]).cuda()
lr_patches_yx = Patches(image, (lr_ps, hr_ps, 1), x=args.z_axis, y=xy[0], z=xy[1]).cuda()
lr_patches = PatchesOr(lr_patches_xy, lr_patches_yx)
lr_loader = lr_patches.get_dataloader(config.batch_size, args.num_workers)
print('HR patches')
print('----------')
print(hr_patches)
print()
print('LR patches')
print('----------')
print(lr_patches)
trainer = TrainerHRtoLR(kn, lrd, kn_optim, lrd_optim, hr_loader, lr_loader)
queue = DataQueue(['kn_gan_loss', 'smoothness_loss', 'center_loss',
'boundary_loss', 'kn_tot_loss', 'lrd_tot_loss'])
printer = EpochPrinter(print_sep=False)
logger = EpochLogger(log_output)
attrs = ['lr', 'hr', 'blur', 'alias']
im_saver = ImageSaver(im_output, attrs=attrs, step=config.image_save_step,
file_struct='epoch/sample', save_type='png_norm',
save_init=False, prefix='patch',
zoom=config.image_save_zoom, ordered=True)
attrs = ['lrd_pred_real', 'lrd_pred_fake', 'lrd_pred_kn']
pred_saver = ImageSaver(im_output, attrs=attrs, step=config.image_save_step,
file_struct='epoch/sample', save_type='png',
image_type='sigmoid', save_init=False, prefix='lrd',
zoom=config.image_save_zoom, ordered=True)
kernel_saver = KernelSaver(kernel_output, step=config.image_save_step,
save_init=True)
if args.true_kernel is not None:
true_kernel = np.load(args.true_kernel)
evaluator = KernelEvaluator(true_kernel, config.kernel_length).cuda()
eval_queue = DataQueue(['mae'])
eval_printer = EpochPrinter(print_sep=False)
eval_logger = EpochLogger(eval_log_output)
eval_queue.register(eval_printer)
eval_queue.register(eval_logger)
evaluator.register(eval_queue)
trainer.register(evaluator)
queue.register(printer)
queue.register(logger)
trainer.register(queue)
trainer.register(im_saver)
trainer.register(pred_saver)
trainer.register(kernel_saver)
trainer.train()
| {"/tests/test_loss.py": ["/ssp/loss.py"], "/ssp/train.py": ["/ssp/config.py", "/ssp/loss.py", "/ssp/utils.py"], "/tests/test_networks.py": ["/ssp/network.py"], "/scripts/compare_kernel.py": ["/ssp/utils.py"], "/ssp/network.py": ["/ssp/config.py"], "/tests/test_fwhm.py": ["/ssp/utils.py"], "/tests/test_kernel.py": ["/ssp/network.py"], "/scripts/train2d.py": ["/ssp/config.py", "/ssp/train.py", "/ssp/network.py", "/ssp/utils.py"]} |
42,003 | KkLearner/MacDonaldAnalyse | refs/heads/master | /MacDonaldAnalyse/spiders/TestSpider.py | import scrapy,re,json,uuid
from scrapy.selector import Selector
from MacDonaldAnalyse.items import CommunityItem
import urllib
class TestSpider(scrapy.Spider):
name = 'Test'
handle_httpstatus_list = [404]
allowed_domains = ['map.baidu.com','fang.com','leju.com','anjuke.com','sina.com.cn',
'soufun.com','github.com']
url = 'http://api.map.baidu.com/place/v2/search?query=麦当劳&output=json&ak=LI5syaP0yQLSsgDXdPkRXb0rMnEZBhOx&page_num=0&scope=2'
baiduUrl = 'http://api.map.baidu.com/place/v2/search?query=%E5%B0%8F%E5%8C%BA&location=23.1527,113.262&radius=1000&output=json&ak=LI5syaP0yQLSsgDXdPkRXb0rMnEZBhOx&scope=2&radius_limit=true&page_size=20&page_num=0'
communityUrl = 'http://api.map.baidu.com/place/v2/search?query=小区&location={0},{1}' \
'&radius={2}&output=json&ak={3}&scope=2&page_num={4}&page_size=20' \
'&radius_limit=true'
index = 0
info = {}
ak = 'LI5syaP0yQLSsgDXdPkRXb0rMnEZBhOx'
start_urls = [communityUrl]
def parse(self, response):
request = scrapy.Request(self.communityUrl.format(23.1279, 113.374
, 1000, self.ak, 0), callback=self.parseCommunity)
request.meta['index'] = 0
yield request
def parseCommunity(self, response):
# print('小区', urllib.parse.unquote(response.url))
# print()
result = json.loads(response.body)
resultList = result.get('results', '')
if '' != resultList and len(resultList) > 0:
for result in resultList:
item = CommunityItem()
item['macDonaldId'] = 14
item['communityName'] = result['name']
item['communitylat'] = result['location']['lat']
item['communitylng'] = result['location']['lng']
item['communityAddress'] = result['address']
item['communityUid'] = result['uid']
item['communityDistance'] = result['detail_info']['distance']
item['communityBaiduDetail'] = result['detail_info'].get('detail_url', '')
item['communityOtherDetail'] = ''
item['communityPrice'] = result['detail_info'].get('price', None)
item['communityTotal'] = None
item['belong_mac'] = -1
item['type'] = 0
if item['communityBaiduDetail'] != '' and not self.info.get(item['communityUid'], None):
item['belong_mac'] = str(uuid.uuid1())[:23].replace('-', '')
self.info[item['communityUid']] = {'uuid': item['belong_mac'],
'communityOtherDetail': item['communityOtherDetail'],
'communityPrice': item['communityPrice'],
'communityTotal': item['communityTotal']}
detail = scrapy.Request(item['communityBaiduDetail'], callback=self.parseDetail)
detail.meta['item'] = item
yield detail
elif item['communityBaiduDetail'] != '':
temp = self.info[item['communityUid']]
item['communityOtherDetail'] = temp['communityOtherDetail']
item['communityPrice'] = temp['communityPrice']
item['communityTotal'] = temp['communityTotal']
item['belong_mac'] = temp['uuid']
item['type'] = 1
yield item
index = response.meta['index'] + 1
if index <= 5:
request = scrapy.Request(self.communityUrl.format(23.1279, 113.374
, 1000, self.ak,index),callback=self.parseCommunity)
request.meta['index'] = index
yield request
def parseDetail(self,response):
item = response.meta['item']
sel = Selector(response)
ahrefs = sel.xpath("//div[@class='partnernav']//a[@class='from']")
if len(ahrefs) <= 0:
yield item
else:
other = scrapy.Request(urllib.parse.unquote(ahrefs[0].re(r'url=([^&]*)')[0]), callback=self.parseOther)
other.meta['item'] = item
yield other
def parseOther(self,response):
item = response.meta['item']
print(urllib.parse.unquote(response.url), response.status)
if response.status != 404:
otherUrl = urllib.parse.unquote(response.url)
item['communityOtherDetail'] = otherUrl
sel = Selector(response)
if 'fang.com' in otherUrl or 'soufun.com' in otherUrl:
self.setItem(sel.xpath("//div[@class='Rbiginfo']") \
.xpath("//span[@class='prib']//text()").extract(), item, 'communityPrice')
self.setItem(sel.xpath("//div[@class='Rinfolist']") \
.re(r'<strong>房屋总数</strong>([^</li>]*)'), item, 'communityTotal')
elif 'anjuke.com' in otherUrl:
html = response.body.decode('utf-8')
index = html.find('comm_midprice')
item['communityPrice'] = re.sub('\D', '', html[index + 16:index + 25])
self.setItem(sel.xpath("//dd[@class='other-dd']")[1] \
.xpath(".//text()").extract(), item, 'communityTotal')
elif 'gz.esf.leju.com' in otherUrl or 'sina.com.cn' in otherUrl:
self.setItem(sel.xpath("//ul[@class='com-details-t0']//li"
"[@class='t1']//span[@class='s2']//text()").extract(), item, 'communityPrice')
item['communityTotal'] = sel.xpath("//div[@class='panelB']//td")[2] \
.xpath('.//text()').extract()[1]
self.info[item['communityUid']] = {'uuid': item['belong_mac'],
'communityOtherDetail': item['communityOtherDetail'],
'communityPrice': item['communityPrice'],
'communityTotal': item['communityTotal']}
yield item
def setItem(self, temp, item, name):
if len(temp) > 0:
item[name] = re.sub('\D', '', temp[0])
| {"/MacDonaldAnalyse/spiders/TestSpider.py": ["/MacDonaldAnalyse/items.py"], "/MacDonaldAnalyse/spiders/CommunitySpider.py": ["/MacDonaldAnalyse/items.py"], "/MacDonaldAnalyse/spiders/FoodSpider.py": ["/MacDonaldAnalyse/items.py"], "/MacDonaldAnalyse/spiders/MacDonaldSpider.py": ["/MacDonaldAnalyse/items.py"], "/MacDonaldAnalyse/spiders/Dianping_Business_District.py": ["/MacDonaldAnalyse/items.py"], "/MacDonaldAnalyse/spiders/FangchengSpider.py": ["/MacDonaldAnalyse/items.py"], "/MacDonaldAnalyse/pipelines.py": ["/MacDonaldAnalyse/items.py"], "/MacDonaldAnalyse/spiders/BaiduSpider.py": ["/MacDonaldAnalyse/items.py"], "/MacDonaldAnalyse/spiders/SchoolSpider.py": ["/MacDonaldAnalyse/items.py"]} |
42,004 | KkLearner/MacDonaldAnalyse | refs/heads/master | /MacDonaldAnalyse/spiders/CommunitySpider.py | import scrapy,json,urllib,MySQLdb,re,logging, uuid
from scrapy.selector import Selector
from MacDonaldAnalyse.items import CommunityItem
class CommunitySpider(scrapy.Spider):
name = 'Community'
handle_httpstatus_list = [404]
allowed_domains = ['map.baidu.com','fang.com','leju.com','anjuke.com','sina.com.cn',
'soufun.com']
ak = 'LI5syaP0yQLSsgDXdPkRXb0rMnEZBhOx'
radius = 1000
url = 'http://api.map.baidu.com/place/v2/search?query=小区&location={0},{1}' \
'&radius={2}&output=json&ak={3}&scope=2&page_num={4}&page_size=20' \
'&radius_limit=true'
start_urls = [url]
def __init__(self):
self.conn = MySQLdb.connect(
host='localhost',
db='macdonald',
user='root',
passwd='',
charset='utf8',
use_unicode=True,
)
self.cursor = self.conn.cursor()
sql = 'select id,lat,lng from macdonalditem'
self.cursor.execute(sql)
self.macdonalds = self.cursor.fetchall()
self.cursor.close()
self.conn.close()
self.log = [{'logname':'community','filename':'loggerCommunity.log','format':'%(asctime)s %(levelname)-8s: %(message)s'},
{'logname': 'done', 'filename': 'done.log','format':''}]
self.loggers = ['','']
for i in range(len(self.log)):
self.loggers[i] = logging.getLogger(self.log[i]['logname'])
handler = logging.FileHandler(self.log[i]['filename'])
handler.setFormatter(logging.Formatter(self.log[i]['format']))
self.loggers[i].setLevel(logging.INFO)
self.loggers[i].addHandler(handler)
self.info = {}
def parse(self, response):
done = []
with open(self.log[1]['filename'],'r') as f:
for line in f:
done.append(line.strip())
for temp in self.macdonalds:
if str(temp[0]) in done:
continue
request = scrapy.Request(self.url.format(temp[1], temp[2]
, self.radius, self.ak, 0), callback=self.parseCommunity)
request.meta['macdonald'] = temp
request.meta['index'] = 0
yield request
def parseCommunity(self, response):
result = json.loads(response.body)
resultList = result.get('results', '')
macdonald = response.meta['macdonald']
# print('小区', macdonald[0], response.meta['index'])
if '' != resultList and len(resultList) > 0:
for result in resultList:
item = CommunityItem()
item['macDonaldId'] = macdonald[0]
item['communityName'] = result['name']
item['communitylat'] = result['location']['lat']
item['communitylng'] = result['location']['lng']
item['communityAddress'] = result['address']
item['communityUid'] = result['uid']
item['communityDistance'] = result['detail_info']['distance']
item['communityBaiduDetail'] = result['detail_info'].get('detail_url', '')
item['communityOtherDetail'] = ''
item['communityPrice'] = result['detail_info'].get('price', None)
item['communityTotal'] = None
item['belong_mac'] = -1
item['type'] = 0
if item['communityBaiduDetail'] != '' and not self.info.get(item['communityUid'], None):
item['belong_mac'] = str(uuid.uuid1())[:23].replace('-','')
self.info[item['communityUid']] = {'uuid': item['belong_mac'],
'communityOtherDetail': item['communityOtherDetail'],
'communityPrice': item['communityPrice'],
'communityTotal': item['communityTotal']}
detail = scrapy.Request(item['communityBaiduDetail'], callback=self.parseDetail)
detail.meta['item'] = item
yield detail
elif item['communityBaiduDetail'] != '':
temp = self.info[item['communityUid']]
item['communityOtherDetail'] = temp['communityOtherDetail']
item['communityPrice'] = temp['communityPrice']
item['communityTotal'] = temp['communityTotal']
item['belong_mac'] = temp['uuid']
item['type'] = 1
yield item
index = response.meta['index'] + 1
if index <= 5:
request = scrapy.Request(self.url.format(macdonald[1], macdonald[2]
, self.radius, self.ak, index), callback=self.parseCommunity)
request.meta['macdonald'] = macdonald
request.meta['index'] = index
yield request
else:
self.loggers[0].info('%s is end', macdonald[0])
self.loggers[1].info('%s', macdonald[0])
else:
self.loggers[0].info('%s is end',macdonald[0])
self.loggers[1].info('%s', macdonald[0])
def parseDetail(self, response):
item = response.meta['item']
sel = Selector(response)
ahrefs = sel.xpath("//div[@class='partnernav']//a[@class='from']")
if len(ahrefs) <= 0:
yield item
else:
other = scrapy.Request(urllib.parse.unquote(ahrefs[0].re(r'url=([^&]*)')[0]),
callback=self.parseOther)
other.meta['item'] = item
yield other
def parseOther(self, response):
item = response.meta['item']
print(urllib.parse.unquote(response.url), response.status)
if response.status != 404:
otherUrl = urllib.parse.unquote(response.url)
item['communityOtherDetail'] = otherUrl
sel = Selector(response)
if 'fang.com' in otherUrl or 'soufun.com' in otherUrl:
self.setItem(sel.xpath("//div[@class='Rbiginfo']") \
.xpath("//span[@class='prib']//text()").extract(), item, 'communityPrice')
self.setItem(sel.xpath("//div[@class='Rinfolist']") \
.re(r'<strong>房屋总数</strong>([^</li>]*)'), item, 'communityTotal')
elif 'anjuke.com' in otherUrl:
html = response.body.decode('utf-8')
index = html.find('comm_midprice')
item['communityPrice'] = re.sub('\D', '', html[index + 16:index + 25])
self.setItem(sel.xpath("//dd[@class='other-dd']")[1] \
.xpath(".//text()").extract(), item, 'communityTotal')
elif 'gz.esf.leju.com' in otherUrl or 'sina.com.cn' in otherUrl:
self.setItem(sel.xpath("//ul[@class='com-details-t0']//li"
"[@class='t1']//span[@class='s2']//text()").extract(), item, 'communityPrice')
item['communityTotal'] = re.sub('\D', '', sel.xpath("//div[@class='panelB']//td")[2] \
.xpath('.//text()').extract()[1])
self.info[item['communityUid']] = {'uuid': item['belong_mac'],
'communityOtherDetail': item['communityOtherDetail'],
'communityPrice': item['communityPrice'],
'communityTotal': item['communityTotal']}
yield item
def setItem(self, temp, item, name):
if len(temp) > 0:
item[name] = re.sub('\D', '', temp[0])
| {"/MacDonaldAnalyse/spiders/TestSpider.py": ["/MacDonaldAnalyse/items.py"], "/MacDonaldAnalyse/spiders/CommunitySpider.py": ["/MacDonaldAnalyse/items.py"], "/MacDonaldAnalyse/spiders/FoodSpider.py": ["/MacDonaldAnalyse/items.py"], "/MacDonaldAnalyse/spiders/MacDonaldSpider.py": ["/MacDonaldAnalyse/items.py"], "/MacDonaldAnalyse/spiders/Dianping_Business_District.py": ["/MacDonaldAnalyse/items.py"], "/MacDonaldAnalyse/spiders/FangchengSpider.py": ["/MacDonaldAnalyse/items.py"], "/MacDonaldAnalyse/pipelines.py": ["/MacDonaldAnalyse/items.py"], "/MacDonaldAnalyse/spiders/BaiduSpider.py": ["/MacDonaldAnalyse/items.py"], "/MacDonaldAnalyse/spiders/SchoolSpider.py": ["/MacDonaldAnalyse/items.py"]} |
42,005 | KkLearner/MacDonaldAnalyse | refs/heads/master | /MacDonaldAnalyse/spiders/FoodSpider.py | import scrapy,json,urllib,MySQLdb
from MacDonaldAnalyse.items import BaiduFoodItem
class FoodSpider(scrapy.Spider):
name = 'Food'
allowed_domains = ['map.baidu.com']
ak = 'LI5syaP0yQLSsgDXdPkRXb0rMnEZBhOx'
radius = 1000
url = 'http://api.map.baidu.com/place/v2/search?query=美食&location={0},{1}' \
'&radius={2}&output=json&ak={3}&scope=2&page_num={4}&page_size=20' \
'&radius_limit=true'
start_urls = [url]
def __init__(self):
self.conn = MySQLdb.connect(
host='localhost',
db='macdonald',
user='root',
passwd='',
charset='utf8',
use_unicode=True,
)
self.cursor = self.conn.cursor()
sql = 'select id,lat,lng from macdonalditem'
self.cursor.execute(sql)
self.macdonalds = self.cursor.fetchall()
self.cursor.close()
self.conn.close()
def parse(self, response):
for temp in self.macdonalds:
request = scrapy.Request(self.url.format(temp[1], temp[2]
, self.radius, self.ak, 0), callback=self.parseFood)
request.meta['macdonald'] = temp
request.meta['index'] = 0
yield request
def parseFood(self, response):
print('美食', urllib.parse.unquote(response.url))
print()
result = json.loads(response.body)
resultList = result.get('results', '')
macdonald = response.meta['macdonald']
if '' != resultList and len(resultList) > 0:
for result in resultList:
item = BaiduFoodItem()
item['macDonaldId'] = macdonald[0]
item['businessName'] = result['name']
item['businesslat'] = result['location']['lat']
item['businesslng'] = result['location']['lng']
item['businessAddress'] = result['address']
item['businessUid'] = result['uid']
item['businessDistance'] = result['detail_info']['distance']
item['businessDetail'] = result['detail_info'].get('detail_url', '')
item['businessPrice'] = result['detail_info'].get('price', None)
item['businessOverall_rating'] = result['detail_info'].get('overall_rating', None)
yield item
index = response.meta['index'] + 1
request = scrapy.Request(self.url.format(macdonald[1], macdonald[2]
, self.radius, self.ak, index), callback=self.parseFood)
request.meta['macdonald'] = macdonald
request.meta['index'] = index
yield request
| {"/MacDonaldAnalyse/spiders/TestSpider.py": ["/MacDonaldAnalyse/items.py"], "/MacDonaldAnalyse/spiders/CommunitySpider.py": ["/MacDonaldAnalyse/items.py"], "/MacDonaldAnalyse/spiders/FoodSpider.py": ["/MacDonaldAnalyse/items.py"], "/MacDonaldAnalyse/spiders/MacDonaldSpider.py": ["/MacDonaldAnalyse/items.py"], "/MacDonaldAnalyse/spiders/Dianping_Business_District.py": ["/MacDonaldAnalyse/items.py"], "/MacDonaldAnalyse/spiders/FangchengSpider.py": ["/MacDonaldAnalyse/items.py"], "/MacDonaldAnalyse/pipelines.py": ["/MacDonaldAnalyse/items.py"], "/MacDonaldAnalyse/spiders/BaiduSpider.py": ["/MacDonaldAnalyse/items.py"], "/MacDonaldAnalyse/spiders/SchoolSpider.py": ["/MacDonaldAnalyse/items.py"]} |
42,006 | KkLearner/MacDonaldAnalyse | refs/heads/master | /MacDonaldAnalyse/spiders/MacDonaldSpider.py | import scrapy,json,urllib
from MacDonaldAnalyse.items import MacDonaldItem
class MacDonaldSpider(scrapy.Spider):
name = 'MacDonald'
allowed_domains = ['map.baidu.com']
ak = 'LI5syaP0yQLSsgDXdPkRXb0rMnEZBhOx'
index = 0
distance = 1000
url = 'http://api.map.baidu.com/place/v2/search?query=麦当劳&tag=美食®ion=广州' \
'&output=json&ak={0}&page_num={1}&scope=2&page_size=20&city_limit=true'
start_urls = [url]
def parse(self, response):
result = scrapy.Request(self.url.format(self.ak, self.index), callback=self.parseMacDonald)
yield result
def parseMacDonald(self, response):
print('麦当劳', urllib.parse.unquote(response.url))
print()
result = json.loads(response.body)
resultList = result.get('results', '')
if '' != resultList and len(resultList) > 0:
for result in resultList:
item = MacDonaldItem()
item['name'] = result['name']
item['lat'] = result['location']['lat']
item['lng'] = result['location']['lng']
item['address'] = result['address']
item['uid'] = result['uid']
item['detail_url'] = result['detail_info'].get('detail_url', '')
item['price'] = result['detail_info'].get('price', None)
item['overall_rating'] = result['detail_info'].get('overall_rating', None)
yield item
self.index = self.index + 1
yield scrapy.Request(self.url.format(self.ak, self.index), callback=self.parseMacDonald) | {"/MacDonaldAnalyse/spiders/TestSpider.py": ["/MacDonaldAnalyse/items.py"], "/MacDonaldAnalyse/spiders/CommunitySpider.py": ["/MacDonaldAnalyse/items.py"], "/MacDonaldAnalyse/spiders/FoodSpider.py": ["/MacDonaldAnalyse/items.py"], "/MacDonaldAnalyse/spiders/MacDonaldSpider.py": ["/MacDonaldAnalyse/items.py"], "/MacDonaldAnalyse/spiders/Dianping_Business_District.py": ["/MacDonaldAnalyse/items.py"], "/MacDonaldAnalyse/spiders/FangchengSpider.py": ["/MacDonaldAnalyse/items.py"], "/MacDonaldAnalyse/pipelines.py": ["/MacDonaldAnalyse/items.py"], "/MacDonaldAnalyse/spiders/BaiduSpider.py": ["/MacDonaldAnalyse/items.py"], "/MacDonaldAnalyse/spiders/SchoolSpider.py": ["/MacDonaldAnalyse/items.py"]} |
42,007 | KkLearner/MacDonaldAnalyse | refs/heads/master | /MacDonaldAnalyse/items.py | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class ScrapytestItem(scrapy.Item):
# define the fields for your item here like:
name = scrapy.Field()
list = scrapy.Field()
set = scrapy.Field()
class FangchengItem(scrapy.Item):
title = scrapy.Field()
radius = scrapy.Field()
number = scrapy.Field()
class BaiduTrafficItem(scrapy.Item):
macDonaldId = scrapy.Field() #所属麦当劳id
trafficName = scrapy.Field() #交通设施名称
trafficlat = scrapy.Field() #纬度
trafficlng = scrapy.Field() #经度
trafficLine = scrapy.Field() #交通路线情况
trafficuid = scrapy.Field() #设施uid
distance = scrapy.Field() #距离中心点距离
class MacDonaldItem(scrapy.Item):
name = scrapy.Field() #名称
lat = scrapy.Field() #纬度
lng = scrapy.Field() #经度
address = scrapy.Field() #具体地址
uid = scrapy.Field() #uid
detail_url = scrapy.Field() #详情url
price = scrapy.Field() #平均价格
overall_rating = scrapy.Field() #平均评价
class ParkingLotItem(scrapy.Item):
macDonaldId = scrapy.Field() # 所属麦当劳id
parkingLotname = scrapy.Field() #停车场名字
parkingLotlat = scrapy.Field() #纬度
parkingLotlng = scrapy.Field() #经度
parkingLotAdress = scrapy.Field() #具体地址
parkingLotUid = scrapy.Field() #uid
parkingLotDistance = scrapy.Field() #距离中心点距离
class Business_District_Item(scrapy.Item):
# 爬取该麦当劳分店所在商圈的商户的数据
business_district_name = scrapy.Field() #商圈名字
shop_name = scrapy.Field() # 店铺名称
shop_mean_price = scrapy.Field() # 店铺人均消费
shop_review_num = scrapy.Field() # 评论人数
shop_rank_stars = scrapy.Field() # 评价等级
shop_tag= scrapy.Field() # 标签
shop_addr = scrapy.Field() # 具体地址
class BaiduFoodItem(scrapy.Item):
macDonaldId = scrapy.Field() # 所属麦当劳id
businessName = scrapy.Field() #商铺名
businesslat = scrapy.Field() #商铺纬度
businesslng = scrapy.Field() #商铺经度
businessAddress = scrapy.Field() #商铺地址
businessUid = scrapy.Field() #商铺uid
businessDistance = scrapy.Field() #商铺离中心点距离
businessDetail = scrapy.Field() #商铺详情
businessPrice = scrapy.Field() #美食平均价格
businessOverall_rating = scrapy.Field() #美食平均评价
class SchoolItem(scrapy.Item):
macDonaldId = scrapy.Field() # 所属麦当劳id
schoolName = scrapy.Field() #学校名
schoollat = scrapy.Field() #学校纬度
schoollng = scrapy.Field() #学校经度
schoolAddress = scrapy.Field() #学校地址
schoolUid = scrapy.Field() #学校uid
schoolDistance = scrapy.Field() #学校离中心点距离
schoolDetail = scrapy.Field() #学校详情
type = scrapy.Field() #学校类型
total = scrapy.Field() #学校人数
class CommunityItem(scrapy.Item):
macDonaldId = scrapy.Field() # 所属麦当劳id
communityName = scrapy.Field() #小区名
communitylat = scrapy.Field() #小区纬度
communitylng = scrapy.Field() #小区经度
communityAddress = scrapy.Field() #小区地址
communityUid = scrapy.Field() #小区uid
communityDistance = scrapy.Field() #小区离中心点距离
communityBaiduDetail = scrapy.Field() #小区在百度的详情
communityOtherDetail = scrapy.Field() # 小区在其他的详情
communityPrice = scrapy.Field() #小区平均价格
communityTotal = scrapy.Field() #小区总户数
belong_mac = scrapy.Field()
type = scrapy.Field() | {"/MacDonaldAnalyse/spiders/TestSpider.py": ["/MacDonaldAnalyse/items.py"], "/MacDonaldAnalyse/spiders/CommunitySpider.py": ["/MacDonaldAnalyse/items.py"], "/MacDonaldAnalyse/spiders/FoodSpider.py": ["/MacDonaldAnalyse/items.py"], "/MacDonaldAnalyse/spiders/MacDonaldSpider.py": ["/MacDonaldAnalyse/items.py"], "/MacDonaldAnalyse/spiders/Dianping_Business_District.py": ["/MacDonaldAnalyse/items.py"], "/MacDonaldAnalyse/spiders/FangchengSpider.py": ["/MacDonaldAnalyse/items.py"], "/MacDonaldAnalyse/pipelines.py": ["/MacDonaldAnalyse/items.py"], "/MacDonaldAnalyse/spiders/BaiduSpider.py": ["/MacDonaldAnalyse/items.py"], "/MacDonaldAnalyse/spiders/SchoolSpider.py": ["/MacDonaldAnalyse/items.py"]} |
42,008 | KkLearner/MacDonaldAnalyse | refs/heads/master | /MacDonaldAnalyse/spiders/Dianping_Business_District.py | from scrapy import Request
from scrapy.spiders import Spider
from MacDonaldAnalyse.items import Business_District_Item
import random # 用于随机更换UserAgent
import json
class BusinessDistrictShop(Spider):
name = "BusinessDistrictShop"
def start_requests(self):
File = open('DaZhongDianPing_json.txt')
district_list = File.readlines() # 所有商圈信息的列表
File.close()
start_urls = []
i = 0
#print(district_list)
# 命令行调试代码
# from scrapy.shell import inspect_response
# inspect_response(response,self)
for line in district_list:
district_link = json.loads(line)
cur_url = district_link['McDonalds_tag_href']
yield Request(cur_url)
def parse(self, response):
# 命令行调试代码
# from scrapy.shell import inspect_response
# inspect_response(response,self)
item = Business_District_Item()
shops = response.xpath('.//div[@id="McDonald-all-list"]/ul/li')
for shop in shops:
item['business_district_name'] = response.meta['business_district_name'] # 商圈名
item['shop_name'] = shop.xpath('.//div[@class="tit"]/a/@title').extract_first() # 店铺名称
item['shop_mean_price'] = shop.xpath('.//div[@class="comment"]/a[@class="mean-price"]/b/text()').extract_first() # 店铺人均消费
item['shop_review_num'] = shop.xpath('.//div[@class="comment"]/a[@class="review-num"]/b/text()').extract_first() # 评论人数
item['shop_rank_stars'] = shop.xpath('.//div[@class="comment"]/span/@title').extract_first() # 评价等级
item['shop_tag'] = shop.xpath('.//div[@class="tag-addr"]/a[last()]/@href').extract_first() # 标签背后的超链接
item['shop_addr'] = shop.xpath('.//div[@class="tag-addr"]/span[@class="addr"]/text()').extract_first() # 具体地址
yield item
| {"/MacDonaldAnalyse/spiders/TestSpider.py": ["/MacDonaldAnalyse/items.py"], "/MacDonaldAnalyse/spiders/CommunitySpider.py": ["/MacDonaldAnalyse/items.py"], "/MacDonaldAnalyse/spiders/FoodSpider.py": ["/MacDonaldAnalyse/items.py"], "/MacDonaldAnalyse/spiders/MacDonaldSpider.py": ["/MacDonaldAnalyse/items.py"], "/MacDonaldAnalyse/spiders/Dianping_Business_District.py": ["/MacDonaldAnalyse/items.py"], "/MacDonaldAnalyse/spiders/FangchengSpider.py": ["/MacDonaldAnalyse/items.py"], "/MacDonaldAnalyse/pipelines.py": ["/MacDonaldAnalyse/items.py"], "/MacDonaldAnalyse/spiders/BaiduSpider.py": ["/MacDonaldAnalyse/items.py"], "/MacDonaldAnalyse/spiders/SchoolSpider.py": ["/MacDonaldAnalyse/items.py"]} |
42,009 | KkLearner/MacDonaldAnalyse | refs/heads/master | /MacDonaldAnalyse/spiders/FangchengSpider.py | import scrapy
from scrapy.selector import Selector
from MacDonaldAnalyse.items import FangchengItem
class FangchengSpider(scrapy.Spider):
name = 'Fangcheng'
allowed_domains = ['fangcheng.cn']
def start_requests(self):
urls = []
for i in range(1,6):
if i != 2:
for j in (3,5):
urls.append(scrapy.Request('http://www.fangcheng.cn/details/slot?population=%s'
'&distance=%s&id=2&mall_id=362'%(i,j)))
return urls
def parse(self, response):
sel = Selector(response)
item = FangchengItem()
item['title'] = sel.xpath('//select[@class="population"]').re(r'<option.*?selected>(.*)</option>')
item['radius'] = sel.xpath('//select[@class="distance"]').re(r'<option.*?selected>(.*)</option>')
list = sel.xpath("//div[@class='detail_buss_around_info']").xpath('//p')
number = ''
for i in range(len(list)):
em = list[i].xpath('.//em')
for j in range(len(em)):
parent = em[j].xpath('..//text()').extract()
number = number + parent[0] + parent[1] + parent[2]
if j != len(em)-1:
number = number + ','
else:
number = number + '.'
item['number'] = number
return item
| {"/MacDonaldAnalyse/spiders/TestSpider.py": ["/MacDonaldAnalyse/items.py"], "/MacDonaldAnalyse/spiders/CommunitySpider.py": ["/MacDonaldAnalyse/items.py"], "/MacDonaldAnalyse/spiders/FoodSpider.py": ["/MacDonaldAnalyse/items.py"], "/MacDonaldAnalyse/spiders/MacDonaldSpider.py": ["/MacDonaldAnalyse/items.py"], "/MacDonaldAnalyse/spiders/Dianping_Business_District.py": ["/MacDonaldAnalyse/items.py"], "/MacDonaldAnalyse/spiders/FangchengSpider.py": ["/MacDonaldAnalyse/items.py"], "/MacDonaldAnalyse/pipelines.py": ["/MacDonaldAnalyse/items.py"], "/MacDonaldAnalyse/spiders/BaiduSpider.py": ["/MacDonaldAnalyse/items.py"], "/MacDonaldAnalyse/spiders/SchoolSpider.py": ["/MacDonaldAnalyse/items.py"]} |
42,010 | KkLearner/MacDonaldAnalyse | refs/heads/master | /MacDonaldAnalyse/pipelines.py | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
import codecs, MySQLdb, json, traceback
from MacDonaldAnalyse.items import MacDonaldItem\
, BaiduTrafficItem, BaiduFoodItem, ParkingLotItem, SchoolItem, CommunityItem
import MySQLdb.cursors
class ScrapytestPipeline(object):
def process_item(self, item, spider):
return item
class JsonWithEncodingTutorialPipeline(object):
def __init__(self):
self.file = codecs.open('MacDonaldAnalyse.json', 'w', encoding='utf-8')
def process_item(self, item, spider):
line = json.dumps(dict(item), ensure_ascii=False) + '\n\n'
self.file.write(line)
return item
def spider_closed(self, spider):
self.file.close()
class MysqlTutorialPipeline(object):
def __init__(self):
self.conn = MySQLdb.connect(
host='localhost',
db='macdonald',
user='root', # replace with you user name
passwd='', # replace with you password
charset='utf8',
use_unicode=True,
)
def process_item(self, item, spider):
self.conn.ping(True)
try:
cursor = self.conn.cursor()
if isinstance(item, MacDonaldItem):
sql = """
insert into macdonalditem(name,lat,lng,address,uid,detail_url,price,overall_rating)
values (%s,%s,%s,%s,%s,%s,%s,%s);
"""
cursor.execute(sql, (item["name"], item["lat"], item["lng"], item["address"],
item["uid"], item["detail_url"], item["price"], item["overall_rating"]))
self.conn.commit()
elif isinstance(item,BaiduTrafficItem):
sql = """
insert into trafficitem(macDonaldId,trafficName,trafficlat,trafficlng
,trafficLine,trafficuid,distance) values (%s,%s,%s,%s,%s,%s,%s);
"""
cursor.execute(sql, (item['macDonaldId'], item['trafficName'], item['trafficlat']
, item['trafficlng'], item['trafficLine'], item['trafficuid'], item['distance']))
self.conn.commit()
elif isinstance(item,ParkingLotItem):
sql = """
insert into parkinglotitem(macDonaldId,parkingLotname,parkingLotlat,parkingLotlng
,parkingLotAdress,parkingLotUid,parkingLotDistance) values (%s,%s,%s,%s,%s,%s,%s);
"""
cursor.execute(sql, (item['macDonaldId'], item['parkingLotname'], item['parkingLotlat']
, item['parkingLotlng'], item['parkingLotAdress'], item['parkingLotUid'], item['parkingLotDistance']))
self.conn.commit()
elif isinstance(item,BaiduFoodItem):
sql = """
insert into fooditem(macDonaldId,businessName,businesslat,businesslng
,businessAddress,businessUid,businessDistance,businessDetail
,businessPrice,businessOverall_rating) values (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s);
"""
cursor.execute(sql, (item['macDonaldId'], item['businessName'], item['businesslat']
, item['businesslng'], item['businessAddress'], item['businessUid']
, item['businessDistance'],item['businessDetail'], item['businessPrice']
, item['businessOverall_rating']))
self.conn.commit()
elif isinstance(item, SchoolItem):
sql = """
insert into schoolitem(macDonaldId,scohoolName,schoollat,schoollng
,schoolAddress,schooldUid,distance,schoolUrl,type,total)
values (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s);
"""
cursor.execute(sql, (item['macDonaldId'], item['schoolName'], item['schoollat']
, item['schoollng'], item['schoolAddress'], item['schoolUid']
, item['schoolDistance'],item['schoolDetail'], item['type']
, item['total']))
self.conn.commit()
elif isinstance(item, CommunityItem):
sql = """
insert into communityitem(macDonaldId,communityName,communitylat,communitylng
,communityAddress,communityUid,communityDistance,communityBaiduDetail,
communityOtherDetail,communityPrice,communityTotal,belong_mac,type)
values (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s);
"""
cursor.execute(sql, (item['macDonaldId'], item['communityName'], item['communitylat']
, item['communitylng'], item['communityAddress'], item['communityUid']
, item['communityDistance'], item['communityBaiduDetail'], item['communityOtherDetail']
, item['communityPrice'], item['communityTotal'], item['belong_mac']
, item['type']))
self.conn.commit()
except Exception:
print(traceback.format_exc())
self.conn.rollback()
finally:
cursor.close()
def spider_closed(self, spider):
pass | {"/MacDonaldAnalyse/spiders/TestSpider.py": ["/MacDonaldAnalyse/items.py"], "/MacDonaldAnalyse/spiders/CommunitySpider.py": ["/MacDonaldAnalyse/items.py"], "/MacDonaldAnalyse/spiders/FoodSpider.py": ["/MacDonaldAnalyse/items.py"], "/MacDonaldAnalyse/spiders/MacDonaldSpider.py": ["/MacDonaldAnalyse/items.py"], "/MacDonaldAnalyse/spiders/Dianping_Business_District.py": ["/MacDonaldAnalyse/items.py"], "/MacDonaldAnalyse/spiders/FangchengSpider.py": ["/MacDonaldAnalyse/items.py"], "/MacDonaldAnalyse/pipelines.py": ["/MacDonaldAnalyse/items.py"], "/MacDonaldAnalyse/spiders/BaiduSpider.py": ["/MacDonaldAnalyse/items.py"], "/MacDonaldAnalyse/spiders/SchoolSpider.py": ["/MacDonaldAnalyse/items.py"]} |
42,011 | KkLearner/MacDonaldAnalyse | refs/heads/master | /MacDonaldAnalyse/spiders/BaiduSpider.py | import scrapy,json,re,urllib
from scrapy.selector import Selector
from MacDonaldAnalyse.items import MacDonaldItem,BaiduTrafficItem,ParkingLotItem
class BaiduSpider(scrapy.Spider):
name = 'baidu'
allowed_domains = ['map.baidu.com']
ak = 'LI5syaP0yQLSsgDXdPkRXb0rMnEZBhOx'
index = 0
distance = 1000
url = 'http://api.map.baidu.com/place/v2/search?query=麦当劳&tag=美食®ion=广州' \
'&output=json&ak={0}&page_num={1}&scope=2&page_size=20'
urlTraffic = 'http://api.map.baidu.com/place/v2/search?query={0}&location={1},{2}' \
'&radius={3}&output=json&ak={4}&scope=2&page_num={5}&page_size=20' \
'&radius_limit=true'
start_urls = [url]
def parse(self, response):
result = scrapy.Request(self.url.format(self.ak,self.index),callback=self.parseBaidu)
yield result
def parseBaidu(self,response):
print('麦当劳',urllib.parse.unquote(response.url))
print()
result = json.loads(response.body)
resultList = result.get('results','')
if '' != resultList and len(resultList) > 0:
for result in resultList:
item = MacDonaldItem()
item['name'] = result['name']
item['lat'] = result['location']['lat']
item['lng'] = result['location']['lng']
item['address'] = result['address']
item['uid'] = result['uid']
item['detail_url'] = result['detail_info'].get('detail_url','')
item['price'] = result['detail_info'].get('price','')
item['overall_rating'] = result['detail_info'].get('overall_rating','')
item['traffic'] = []
item['parking'] = []
yield self.nextRequest(item,'地铁站$公交站',0)
self.index = self.index + 1
yield scrapy.Request(self.url.format(self.ak,self.index), callback=self.parseBaidu)
def parseTraffic(self,response):
print(response.meta['keyword'],urllib.parse.unquote(response.url))
print()
jsonTraffic = json.loads(response.body)
baidu = response.meta['item']
trafficResult = jsonTraffic.get('results','')
keyword = response.meta['keyword']
if '' != trafficResult and len(trafficResult) > 0:
hasNext = True
for result in trafficResult:
if result['detail_info']['distance'] <= self.distance:
if keyword == '地铁站$公交站':
self.setBaiduTrafficItem(result,baidu)
else:
self.setParkingLotItem(result,baidu)
else:
hasNext = False
break
if hasNext:
yield self.nextRequest(baidu, keyword, response.meta['nextIndex'] + 1)
elif keyword == '地铁站$公交站':
yield self.nextRequest(baidu,'停车场',0)
else:
yield baidu
elif keyword == '地铁站$公交站':
yield self.nextRequest(baidu,'停车场',0)
else:
yield baidu
def nextRequest(self,baidu,keyword,nextIndex):
request = scrapy.Request(self.urlTraffic.format(keyword, baidu['lat'], baidu['lng']
, self.distance, self.ak, nextIndex), callback=self.parseTraffic)
request.meta['keyword'] = keyword
request.meta['nextIndex'] = nextIndex
request.meta['item'] = baidu
return request
def setBaiduTrafficItem(self,result,baidu):
item = BaiduTrafficItem()
item['trafficName'] = result['name']
item['trafficlat'] = result['location']['lat']
item['trafficlng'] = result['location']['lng']
item['trafficLine'] = result['address']
item['trafficuid'] = result['uid']
item['distance'] = result['detail_info']['distance']
baidu['traffic'].append(dict(item))
def setParkingLotItem(self,result,baidu):
item = ParkingLotItem()
item['parkingLotname'] = result['name']
item['parkingLotlat'] = result['location']['lat']
item['parkingLotlng'] = result['location']['lng']
item['parkingLotAdress'] = result['address']
item['parkingLotUid'] = result['uid']
item['parkingLotDistance'] = result['detail_info']['distance']
baidu['parking'].append(dict(item)) | {"/MacDonaldAnalyse/spiders/TestSpider.py": ["/MacDonaldAnalyse/items.py"], "/MacDonaldAnalyse/spiders/CommunitySpider.py": ["/MacDonaldAnalyse/items.py"], "/MacDonaldAnalyse/spiders/FoodSpider.py": ["/MacDonaldAnalyse/items.py"], "/MacDonaldAnalyse/spiders/MacDonaldSpider.py": ["/MacDonaldAnalyse/items.py"], "/MacDonaldAnalyse/spiders/Dianping_Business_District.py": ["/MacDonaldAnalyse/items.py"], "/MacDonaldAnalyse/spiders/FangchengSpider.py": ["/MacDonaldAnalyse/items.py"], "/MacDonaldAnalyse/pipelines.py": ["/MacDonaldAnalyse/items.py"], "/MacDonaldAnalyse/spiders/BaiduSpider.py": ["/MacDonaldAnalyse/items.py"], "/MacDonaldAnalyse/spiders/SchoolSpider.py": ["/MacDonaldAnalyse/items.py"]} |
42,012 | KkLearner/MacDonaldAnalyse | refs/heads/master | /MacDonaldAnalyse/spiders/SchoolSpider.py | import scrapy,json,urllib,MySQLdb
from MacDonaldAnalyse.items import SchoolItem
class SchoolSpider(scrapy.Spider):
name = 'School'
allowed_domains = ['map.baidu.com']
ak = 'LI5syaP0yQLSsgDXdPkRXb0rMnEZBhOx'
radius = 1000
tag = {"教育培训;高等院校": 0, "教育培训;中学": 1,
"教育培训;小学": 2, "教育培训;幼儿园": 3,
"教育培训;成人教育": 4, "教育培训;亲子教育":5 ,
"教育培训;特殊教育学校": 6, "教育培训;留学中介机构": 7,
"教育培训;科研机构": 8, "教育培训;培训机构": 9,
"教育培训;图书馆": 10, "教育培训;科技馆": 11
}
url = 'http://api.map.baidu.com/place/v2/search?query=学校&location={0},{1}' \
'&radius={2}&output=json&ak={3}&scope=2&page_num={4}&page_size=20' \
'&radius_limit=true'
start_urls = [url]
def __init__(self):
self.conn = MySQLdb.connect(
host='localhost',
db='macdonald',
user='root',
passwd='',
charset='utf8',
use_unicode=True,
)
self.cursor = self.conn.cursor()
sql = 'select id,lat,lng from macdonalditem'
self.cursor.execute(sql)
self.macdonalds = self.cursor.fetchall()
self.cursor.close()
self.conn.close()
def parse(self, response):
for temp in self.macdonalds:
request = scrapy.Request(self.url.format(temp[1], temp[2]
, self.radius, self.ak, 0), callback=self.parseSchool)
request.meta['macdonald'] = temp
request.meta['index'] = 0
yield request
def parseSchool(self, response):
print('学校', urllib.parse.unquote(response.url))
print()
result = json.loads(response.body)
resultList = result.get('results', '')
macdonald = response.meta['macdonald']
if '' != resultList and len(resultList) > 0:
for result in resultList:
item = SchoolItem()
item['macDonaldId'] = macdonald[0]
item['schoolName'] = result['name']
item['schoollat'] = result['location']['lat']
item['schoollng'] = result['location']['lng']
item['schoolAddress'] = result['address']
item['schoolUid'] = result['uid']
item['schoolDistance'] = result['detail_info']['distance']
item['schoolDetail'] = result['detail_info'].get('detail_url', '')
item['type'] = self.tag.get(result['detail_info']['tag'], None)
item['total'] = None
yield item
index = response.meta['index'] + 1
if index <= 4:
request = scrapy.Request(self.url.format(macdonald[1], macdonald[2]
, self.radius, self.ak, index), callback=self.parseSchool)
request.meta['macdonald'] = macdonald
request.meta['index'] = index
yield request
| {"/MacDonaldAnalyse/spiders/TestSpider.py": ["/MacDonaldAnalyse/items.py"], "/MacDonaldAnalyse/spiders/CommunitySpider.py": ["/MacDonaldAnalyse/items.py"], "/MacDonaldAnalyse/spiders/FoodSpider.py": ["/MacDonaldAnalyse/items.py"], "/MacDonaldAnalyse/spiders/MacDonaldSpider.py": ["/MacDonaldAnalyse/items.py"], "/MacDonaldAnalyse/spiders/Dianping_Business_District.py": ["/MacDonaldAnalyse/items.py"], "/MacDonaldAnalyse/spiders/FangchengSpider.py": ["/MacDonaldAnalyse/items.py"], "/MacDonaldAnalyse/pipelines.py": ["/MacDonaldAnalyse/items.py"], "/MacDonaldAnalyse/spiders/BaiduSpider.py": ["/MacDonaldAnalyse/items.py"], "/MacDonaldAnalyse/spiders/SchoolSpider.py": ["/MacDonaldAnalyse/items.py"]} |
42,024 | ueslialmeida/renu | refs/heads/master | /linkrepo/admin.py | from django.contrib import admin
from .models import Link
class LinkController(admin.ModelAdmin):
list_display = ["__str__", "create", "updated"]
search_fields = ('title',)
class Meta:
model = Link
# Register your models here.
admin.site.register(Link, LinkController)
| {"/linkrepo/admin.py": ["/linkrepo/models.py"]} |
42,025 | ueslialmeida/renu | refs/heads/master | /linkrepo/models.py | from django.db import models
# Create your models here.
class Link(models.Model):
title = models.CharField(max_length=256, blank=False, null=False)
notes = models.TextField(blank=True, null=True)
url = models.URLField(blank=True, null=True)
create = models.DateTimeField(auto_now_add=True, auto_now=False)
updated = models.DateTimeField(auto_now_add=False, auto_now=True)
def __str__(self):
return self.title
| {"/linkrepo/admin.py": ["/linkrepo/models.py"]} |
42,035 | Esenbahar/Django | refs/heads/master | /books/urls.py |
from django.urls import path
from . import views
urlpatterns = [
path('',views.index, name='index'), # anasayfaya istek gelirse indexi aç.
#buradan sonra ana url de bunları işle.
path('books',views.books, name='books'),
path('authors',views.authors, name='authors'),
path('authordetails/<int:authorId>',views.authorDetails, name='authordetails')
] | {"/books/views.py": ["/books/models.py"]} |
42,036 | Esenbahar/Django | refs/heads/master | /books/models.py | from django.db import models
# Create your models here.
class Author(models.Model):
def __str__(self):
return self.name # ben obje olarak istemiyorum direk olarak yazarın ismini istiyorum demiiş oluyoruz.
name = models.CharField(max_length=50)
created = models.DateTimeField('date created')
# bu yazar tablosunu oluşturdum. bunu veritabanına aktarmak istiyorum.
#settings.py de Installed_APSS e ekleriz. 'books.apps.BooksConfig' yazarak.
class Book(models.Model):
def __str__(self):
return self.name # ben obje olarak istemiyorum direk olarak yazarın ismini istiyorum demiiş oluyoruz.
name = models.CharField(max_length=50)
created = models.DateTimeField('date created')
author = models.ForeignKey(Author,on_delete = models.CASCADE) #Olurda bir yazar silinirse o yazarın bütün kütaplarıda veritabanından silinsin.
price = models.DecimalField(decimal_places=2, max_digits=4, null=True)
# burada yazar ile kitap arasında bire çok ilişkisi söz konusu.
| {"/books/views.py": ["/books/models.py"]} |
42,037 | Esenbahar/Django | refs/heads/master | /books/views.py | from django.shortcuts import render
from django.http import HttpResponse
from django.template import loader
from .models import Author
from django.http import Http404
# Create your views here.
#Kullanıcı request gönderir yani bir istek gönderir bu istekleri burada gerçekleştireceğiz.bunun için kütüphane ekleriz.
#from django.http import HttpResponse
def index(request):
return HttpResponse("Anasayfa") #index sayfasına bir request istek gelirse yanıt olarak Anasayfa dönsün.
#şimdi indexi kabul eetiğimizi belirttiğimiz bir url e ihtiyacımız var.bunun için urls.py dosyası oluştururuz.
def authors(request):
template = loader.get_template('authors.html')
context = {
'authors_list' : Author.objects.all()
}
return HttpResponse(template.render(context,request))
def books(request):
return HttpResponse("Kitaplar")
def authorDetails(request,authorId ):
try :
context = {
'author_detail' : Author.objects.get(pk=authorId)
}
except Author.DoesNotExist:
raise Http404("Yazar bulunamadı")
template = loader.get_template('authorDetail.html')
return HttpResponse(template.render(context,request))
| {"/books/views.py": ["/books/models.py"]} |
42,038 | doruktiktiklar/sadedegel | refs/heads/master | /tests/test_bertcluster_summ.py | # pylint: skip-file
import numpy as np
import pytest
@pytest.mark.skip()
def test_bert_cluster_model():
tokenizer = AutoTokenizer.from_pretrained("dbmdz/bert-base-turkish-cased") # noqa: F821
model = BertModel.from_pretrained("dbmdz/bert-base-turkish-cased", output_hidden_states=True) # noqa: F821
summarizer = AnnotatedExtractiveSummarizer(tokenizer, model, k=4, layers=[11], random_state=42, # noqa: E126 F821
doEval=True)
jfile = '../work/Labeled/bitmemis-evlilikler-375881_labeled.json'
summary = summarizer.summarize(jfile)
assert np.unique(summary == np.array(
['Kadın ve erkek evliliklerini bitirirler, bazen kadın, bazen erkek bazen de aynı anda boşanma kararı alırlar.',
('Boşanmanın üzerinden biraz zaman geçince taraflardan biri (çoğu zaman erkek) eski eşinin yeni '
'bir hayata başlamasına, yeni insanlarla görüşme ihtimaline bile dayanamaz.'),
'Basit olarak diyebiliriz ki aynı doğadaki gibi dişi, hayatta kalma konusunda daha başarılıdır.',
('Boşanmanıza rağmen eski eşinize ‘Eski’ derken hala diliniz sürçüyorsa, zorlanıyorsanız, bitmemiş hesaplar, '
'öyküler, eskiye dair takılıp kalmalar varsa, kendinizi onun evininin önünde buluyorsanız, '
'evinin ışığını kontrol ediyor, içeride olanları merak ediyorsanız, kapısını çalıyorsanız, '
'orada kalmak istiyorsanız, hayatına müdahale etmek istiyorsanız, kıskanıyorsanız, boşanmanın '
'üzerinden yıllar geçmesine rağmen hayatınıza yeni birini almayı reddediyorsanız ya da alamıyorsanız, '
'buna benzer süreçler yaşıyorsanız gerçekte boşanmamışsınız anlamına gelebilir.')],
dtype='<U559'))[0]
@pytest.mark.skip()
def test_bert_cluster_scorer():
tokenizer = AutoTokenizer.from_pretrained("dbmdz/bert-base-turkish-cased") # noqa: F821
model = BertModel.from_pretrained("dbmdz/bert-base-turkish-cased", output_hidden_states=True) # noqa: F821
summarizer = AnnotatedExtractiveSummarizer(tokenizer, model, k=4, layers=[11], random_state=42, # noqa: E126 F821
doEval=True)
jfile = '../work/Labeled/bitmemis-evlilikler-375881_labeled.json'
_ = summarizer.summarize(jfile)
assert summarizer.score().astype(np.float16) == np.float16(0.2433)
| {"/sadedegel/dataset/_core.py": ["/sadedegel/dataset/util.py"], "/tests/datasets/context.py": ["/sadedegel/dataset/extended/__init__.py"], "/sadedegel/dataset/extended/__init__.py": ["/sadedegel/dataset/extended/_core.py"], "/tests/datasets/test_filepath.py": ["/tests/datasets/context.py"], "/sadedegel/tokenize/__init__.py": ["/sadedegel/tokenize/_sent.py"], "/tests/datasets/test_dataset_loaders.py": ["/tests/datasets/context.py"], "/sadedegel/ml/__init__.py": ["/sadedegel/ml/sbd.py"], "/scripts/merger.py": ["/sadedegel/dataset/_core.py"], "/tests/datasets/test_util.py": ["/tests/datasets/context.py"], "/sadedegel/summarize/rouge.py": ["/sadedegel/bblock/__init__.py"], "/tests/server/test_basics.py": ["/tests/server/context.py"]} |
42,039 | doruktiktiklar/sadedegel | refs/heads/master | /sadedegel/dataset/_core.py | import glob
from os.path import dirname, join, expanduser, basename, splitext
from loguru import logger
import numpy as np
import re
from enum import Enum
from .util import safe_json_load, safe_read
from pathlib import Path
class CorpusTypeEnum(Enum):
RAW = ('raw', '*.txt')
SENTENCE = ('sents', '*.json')
ANNOTATED = ('annotated', '*.json')
def __init__(self, dir, pattern):
self.dir = dir
self.pattern = pattern
def file_paths(corpus_type: CorpusTypeEnum = CorpusTypeEnum.RAW, noext=False, use_basename=False, base_path=None):
if base_path is None:
base_path = dirname(__file__)
if type(corpus_type) == CorpusTypeEnum:
search_pattern = Path(base_path).expanduser() / corpus_type.dir
logger.debug(f"Search pattern for {corpus_type}: {search_pattern}")
files = search_pattern.glob(corpus_type.pattern)
else:
raise ValueError(f"Ensure that corpus_type is a one of raw, sentence or annotated")
if use_basename:
if noext:
return sorted([splitext(basename(fn))[0] for fn in files])
else:
return sorted([basename(fn) for fn in files])
else:
return sorted(files)
def cleaner(doc: str):
return re.sub(r'\d+\s+[a-zA-ZŞşğĞüÜıİ]+\s+\d{4}\s+PAYLAŞ\s+yorum\s+yaz(\s+a)?', '', doc, flags=re.I)
def load_raw_corpus(return_iter: bool = True, base_path=None, clean=True):
"""Load corpus of sample news tokenized into sentences.
Examples
--------
>>> from sadedegel.dataset import load_raw_corpus
>>> sents = load_raw_corpus(return_iter=False)
>>> type(sents[0])
<class 'str'>
"""
if base_path is None:
base_path = dirname(__file__)
search_pattern = join(expanduser(base_path), 'raw', '*.txt')
logger.debug("Search path {}".format(search_pattern))
files = sorted(glob.glob(search_pattern))
if return_iter:
if clean:
return (cleaner(safe_read(file)) for file in files)
else:
return (safe_read(file) for file in files)
else:
if clean:
return [cleaner(safe_read(file)) for file in files]
else:
return [safe_read(file) for file in files]
def load_sentence_corpus(return_iter: bool = True, base_path=None):
"""Load corpus of sample news tokenized into sentences.
Examples
--------
>>> from sadedegel.dataset import load_sentence_corpus
>>> sents = load_sentence_corpus(return_iter=False)
>>> type(sents[0])
<class 'dict'>
>>> len(sents[0]['sentences'])
62
"""
if base_path is None:
base_path = dirname(__file__)
search_pattern = join(expanduser(base_path), 'sents', '*.json')
logger.debug("Search path {}".format(search_pattern))
files = sorted(glob.glob(search_pattern))
if return_iter:
return map(safe_json_load, files)
else:
return [safe_json_load(file) for file in files]
def load_annotated_corpus(return_iter: bool = True, base_path=None):
"""Load corpus of sample news tokenized into sentences and scored based on human annotation"""
files = file_paths(CorpusTypeEnum.ANNOTATED, base_path)
def to_dict(d):
return dict(sentences=[s['content'] for s in d['sentences']],
relevance=np.array([s['deletedInRound'] for s in d['sentences']]))
if return_iter:
return map(to_dict,
map(safe_json_load, files))
else:
return [to_dict(safe_json_load(file)) for file in files]
| {"/sadedegel/dataset/_core.py": ["/sadedegel/dataset/util.py"], "/tests/datasets/context.py": ["/sadedegel/dataset/extended/__init__.py"], "/sadedegel/dataset/extended/__init__.py": ["/sadedegel/dataset/extended/_core.py"], "/tests/datasets/test_filepath.py": ["/tests/datasets/context.py"], "/sadedegel/tokenize/__init__.py": ["/sadedegel/tokenize/_sent.py"], "/tests/datasets/test_dataset_loaders.py": ["/tests/datasets/context.py"], "/sadedegel/ml/__init__.py": ["/sadedegel/ml/sbd.py"], "/scripts/merger.py": ["/sadedegel/dataset/_core.py"], "/tests/datasets/test_util.py": ["/tests/datasets/context.py"], "/sadedegel/summarize/rouge.py": ["/sadedegel/bblock/__init__.py"], "/tests/server/test_basics.py": ["/tests/server/context.py"]} |
42,040 | doruktiktiklar/sadedegel | refs/heads/master | /sadedegel/dataset/extended/_core.py | from os.path import expanduser, getsize
from pathlib import Path
import json
from typing import Iterator, Tuple
import glob
import click
__download_message__ = """Ensure that you have properly downloaded extended corpus using
python -m sadedegel.dataset.extended download --access-key xxx --secret-key xxxx
Unfortunately due to data licensing issues we could not share data publicly.
Get in touch with sadedegel team to obtain a download key.
"""
def check_directory_structure(path: str) -> bool:
if not Path(expanduser(path)).exists():
click.secho(f"{path} not found.\n", fg="red")
click.secho(__download_message__, fg="red")
return False
elif not (Path(expanduser(path)) / 'extended' / 'raw').exists():
click.secho(f"raw directory in {path} not found.\n", fg="red")
click.secho(__download_message__, fg="red")
return False
elif not (Path(expanduser(path)) / 'extended' / 'sents').exists():
click.secho(f"sents directory in {path} not found.\n", fg="red")
click.secho(__download_message__, fg="red")
return False
else:
return True
def raw_stats(data_home: str) -> Tuple[int, int]:
n, sz = 0, 0
for f in glob.glob(str((Path(expanduser(data_home)) / 'extended' / 'raw' / '*' / '*.txt').absolute())):
n += 1
sz += getsize(f)
return n, sz
def sents_stats(data_home: str) -> Tuple[int, int]:
n, sz = 0, 0
for f in glob.glob(str((Path(expanduser(data_home)) / 'extended' / 'sents' / '*' / '*.json').absolute())):
n += 1
sz += getsize(f)
return n, sz
def load_extended_metadata(data_home="~/.sadedegel_data"):
if check_directory_structure(data_home):
raw_count, raw_bytes = raw_stats(data_home)
sents_count, sents_bytes = sents_stats(data_home)
return dict(count=dict(raw=raw_count, sents=sents_count), byte=dict(raw=raw_bytes, sents=sents_bytes))
else:
return None
def load_extended_raw_corpus(data_home="~/.sadedegel_data") -> Iterator[str]:
if check_directory_structure(data_home):
for f in glob.glob(str((Path(expanduser(data_home)) / 'extended' / 'raw' / '*' / '*.txt').absolute())):
with open(f) as fp:
yield fp.read()
else:
return None
def load_extended_sents_corpus(data_home="~/.sadedegel_data") -> Iterator[dict]:
if check_directory_structure(data_home):
for f in glob.glob(str((Path(expanduser(data_home)) / 'extended' / 'sents' / '*' / '*.json').absolute())):
with open(f) as fp:
yield json.load(fp)
else:
return None
| {"/sadedegel/dataset/_core.py": ["/sadedegel/dataset/util.py"], "/tests/datasets/context.py": ["/sadedegel/dataset/extended/__init__.py"], "/sadedegel/dataset/extended/__init__.py": ["/sadedegel/dataset/extended/_core.py"], "/tests/datasets/test_filepath.py": ["/tests/datasets/context.py"], "/sadedegel/tokenize/__init__.py": ["/sadedegel/tokenize/_sent.py"], "/tests/datasets/test_dataset_loaders.py": ["/tests/datasets/context.py"], "/sadedegel/ml/__init__.py": ["/sadedegel/ml/sbd.py"], "/scripts/merger.py": ["/sadedegel/dataset/_core.py"], "/tests/datasets/test_util.py": ["/tests/datasets/context.py"], "/sadedegel/summarize/rouge.py": ["/sadedegel/bblock/__init__.py"], "/tests/server/test_basics.py": ["/tests/server/context.py"]} |
42,041 | doruktiktiklar/sadedegel | refs/heads/master | /tests/summarizer/test_tags.py | from .context import Rouge1Summarizer
from .context import KMeansSummarizer, AutoKMeansSummarizer, DecomposedKMeansSummarizer
from .context import RandomSummarizer, PositionSummarizer, LengthSummarizer, BandSummarizer
def test_baseline_tags():
rand = RandomSummarizer()
pos = PositionSummarizer()
length = LengthSummarizer()
band = BandSummarizer()
assert "baseline" in rand
assert "baseline" in pos
assert "baseline" in length
assert "baseline" in band
def test_cluster_tags():
km = KMeansSummarizer()
autokm = AutoKMeansSummarizer()
decomkm = DecomposedKMeansSummarizer()
assert "cluster" in km
assert "cluster" in autokm
assert "cluster" in decomkm
def test_ss_tags():
rouge1 = Rouge1Summarizer()
assert "self-supervised" in rouge1
| {"/sadedegel/dataset/_core.py": ["/sadedegel/dataset/util.py"], "/tests/datasets/context.py": ["/sadedegel/dataset/extended/__init__.py"], "/sadedegel/dataset/extended/__init__.py": ["/sadedegel/dataset/extended/_core.py"], "/tests/datasets/test_filepath.py": ["/tests/datasets/context.py"], "/sadedegel/tokenize/__init__.py": ["/sadedegel/tokenize/_sent.py"], "/tests/datasets/test_dataset_loaders.py": ["/tests/datasets/context.py"], "/sadedegel/ml/__init__.py": ["/sadedegel/ml/sbd.py"], "/scripts/merger.py": ["/sadedegel/dataset/_core.py"], "/tests/datasets/test_util.py": ["/tests/datasets/context.py"], "/sadedegel/summarize/rouge.py": ["/sadedegel/bblock/__init__.py"], "/tests/server/test_basics.py": ["/tests/server/context.py"]} |
42,042 | doruktiktiklar/sadedegel | refs/heads/master | /tests/datasets/context.py | import sys
from pathlib import Path
sys.path.insert(0, (Path(__file__) / '..' / '..').absolute())
from sadedegel.dataset import load_raw_corpus, load_sentence_corpus,load_annotated_corpus # noqa # pylint: disable=unused-import, wrong-import-position
from sadedegel.dataset.extended import load_extended_metadata, load_extended_sents_corpus, load_extended_raw_corpus # noqa # pylint: disable=unused-import, wrong-import-position
from sadedegel.dataset import util # noqa # pylint: disable=unused-import, wrong-import-position
from sadedegel.dataset import file_paths, CorpusTypeEnum # noqa # pylint: disable=unused-import, wrong-import-position
| {"/sadedegel/dataset/_core.py": ["/sadedegel/dataset/util.py"], "/tests/datasets/context.py": ["/sadedegel/dataset/extended/__init__.py"], "/sadedegel/dataset/extended/__init__.py": ["/sadedegel/dataset/extended/_core.py"], "/tests/datasets/test_filepath.py": ["/tests/datasets/context.py"], "/sadedegel/tokenize/__init__.py": ["/sadedegel/tokenize/_sent.py"], "/tests/datasets/test_dataset_loaders.py": ["/tests/datasets/context.py"], "/sadedegel/ml/__init__.py": ["/sadedegel/ml/sbd.py"], "/scripts/merger.py": ["/sadedegel/dataset/_core.py"], "/tests/datasets/test_util.py": ["/tests/datasets/context.py"], "/sadedegel/summarize/rouge.py": ["/sadedegel/bblock/__init__.py"], "/tests/server/test_basics.py": ["/tests/server/context.py"]} |
42,043 | doruktiktiklar/sadedegel | refs/heads/master | /sadedegel/ml/sbd.py | from os.path import dirname
from pathlib import Path
from sklearn.ensemble import RandomForestClassifier # type: ignore
from sklearn.feature_extraction import DictVectorizer # type: ignore
from sklearn.pipeline import Pipeline # type: ignore
from joblib import dump, load # type: ignore
from loguru import logger
def create_model():
"""Creates a new sbd model detector."""
return Pipeline([('feat', DictVectorizer()), ('dt', RandomForestClassifier())])
def save_model(model, name="sbd.pickle"):
model_file = (Path(dirname(__file__)) / 'model' / name).absolute()
dump(model, model_file)
def load_model(name="sbd.pickle"):
model_file = (Path(dirname(__file__)) / 'model' / name).absolute()
logger.info(f"Loading sbd model from {model_file}")
return load(model_file)
| {"/sadedegel/dataset/_core.py": ["/sadedegel/dataset/util.py"], "/tests/datasets/context.py": ["/sadedegel/dataset/extended/__init__.py"], "/sadedegel/dataset/extended/__init__.py": ["/sadedegel/dataset/extended/_core.py"], "/tests/datasets/test_filepath.py": ["/tests/datasets/context.py"], "/sadedegel/tokenize/__init__.py": ["/sadedegel/tokenize/_sent.py"], "/tests/datasets/test_dataset_loaders.py": ["/tests/datasets/context.py"], "/sadedegel/ml/__init__.py": ["/sadedegel/ml/sbd.py"], "/scripts/merger.py": ["/sadedegel/dataset/_core.py"], "/tests/datasets/test_util.py": ["/tests/datasets/context.py"], "/sadedegel/summarize/rouge.py": ["/sadedegel/bblock/__init__.py"], "/tests/server/test_basics.py": ["/tests/server/context.py"]} |
42,044 | doruktiktiklar/sadedegel | refs/heads/master | /tests/test_metrics.py | from pytest import approx, raises
from .context import rouge1_score
def test_rouge1_recall():
assert rouge1_score(["big", "cat", "on", "bed"], ["My", "little", "cat", "is", "on", "bed", "!"],
metric="recall") == approx(3 / 4)
def test_rouge1_precision():
assert rouge1_score(["big", "cat", "on", "bed"], ["My", "little", "cat", "is", "on", "bed", "!"],
metric="precision") == approx(3 / 7)
def test_rouge_f1():
recall = 3 / 4
precision = 3 / 7
expected_f1 = (2 * recall * precision) / (recall + precision)
assert rouge1_score(["big", "cat", "on", "bed"], ["My", "little", "cat", "is", "on", "bed", "!"],
metric="f1") == approx(expected_f1)
# Test Case: https://www.freecodecamp.org/news/what-is-rouge-and-how-it-works-for-evaluation-of-summaries-e059fb8ac840/
def test_rouge1_recall_2():
assert rouge1_score(["the", "cat", "was", "under", "the", "bed"],
["the", "cat", "was", "found", "under", "the", "bed"],
metric="recall") == 1.
def test_rouge1_precision_2():
assert rouge1_score(["the", "cat", "was", "under", "the", "bed"],
["the", "cat", "was", "found", "under", "the", "bed"],
metric="precision") == approx(6 / 7)
def test_rouge_f1_2():
recall = 1.
precision = 6 / 7
expected_f1 = (2 * recall * precision) / (recall + precision)
assert rouge1_score(["the", "cat", "was", "under", "the", "bed"],
["the", "cat", "was", "found", "under", "the", "bed"],
metric="f1") == approx(expected_f1)
def test_rouge1_empty_ycand():
assert rouge1_score([], ["test"], metric="f1") == 0.0
def test_rouge1_empty_yref():
assert rouge1_score(["test"], [], metric="f1") == 0.0
def test_rouge1_empty_all():
assert rouge1_score([], [], metric="f1") == 0.0
def test_rouge1_no_common():
assert rouge1_score(["the", "cat"], ["a", "dog"], metric="f1") == 0.0
def test_raise_for_metric():
with raises(ValueError):
assert rouge1_score(["the", "cat"], ["a", "dog"], metric="intercluster-distance") == 0.0
def test_raise_for_input_type():
with raises(ValueError, match="should be of list type.$"):
assert rouge1_score(iter(["the", "cat"]), ["a", "dog"], metric="f1") == 0.0
| {"/sadedegel/dataset/_core.py": ["/sadedegel/dataset/util.py"], "/tests/datasets/context.py": ["/sadedegel/dataset/extended/__init__.py"], "/sadedegel/dataset/extended/__init__.py": ["/sadedegel/dataset/extended/_core.py"], "/tests/datasets/test_filepath.py": ["/tests/datasets/context.py"], "/sadedegel/tokenize/__init__.py": ["/sadedegel/tokenize/_sent.py"], "/tests/datasets/test_dataset_loaders.py": ["/tests/datasets/context.py"], "/sadedegel/ml/__init__.py": ["/sadedegel/ml/sbd.py"], "/scripts/merger.py": ["/sadedegel/dataset/_core.py"], "/tests/datasets/test_util.py": ["/tests/datasets/context.py"], "/sadedegel/summarize/rouge.py": ["/sadedegel/bblock/__init__.py"], "/tests/server/test_basics.py": ["/tests/server/context.py"]} |
42,045 | doruktiktiklar/sadedegel | refs/heads/master | /sadedegel/dataset/extended/__init__.py | from ._core import load_extended_metadata, load_extended_raw_corpus, load_extended_sents_corpus
| {"/sadedegel/dataset/_core.py": ["/sadedegel/dataset/util.py"], "/tests/datasets/context.py": ["/sadedegel/dataset/extended/__init__.py"], "/sadedegel/dataset/extended/__init__.py": ["/sadedegel/dataset/extended/_core.py"], "/tests/datasets/test_filepath.py": ["/tests/datasets/context.py"], "/sadedegel/tokenize/__init__.py": ["/sadedegel/tokenize/_sent.py"], "/tests/datasets/test_dataset_loaders.py": ["/tests/datasets/context.py"], "/sadedegel/ml/__init__.py": ["/sadedegel/ml/sbd.py"], "/scripts/merger.py": ["/sadedegel/dataset/_core.py"], "/tests/datasets/test_util.py": ["/tests/datasets/context.py"], "/sadedegel/summarize/rouge.py": ["/sadedegel/bblock/__init__.py"], "/tests/server/test_basics.py": ["/tests/server/context.py"]} |
42,046 | doruktiktiklar/sadedegel | refs/heads/master | /tests/server/context.py | import sys
from pathlib import Path
sys.path.insert(0, (Path(__file__) / '..' / '..').absolute())
from sadedegel.server.__main__ import app # noqa # pylint: disable=unused-import, wrong-import-position
| {"/sadedegel/dataset/_core.py": ["/sadedegel/dataset/util.py"], "/tests/datasets/context.py": ["/sadedegel/dataset/extended/__init__.py"], "/sadedegel/dataset/extended/__init__.py": ["/sadedegel/dataset/extended/_core.py"], "/tests/datasets/test_filepath.py": ["/tests/datasets/context.py"], "/sadedegel/tokenize/__init__.py": ["/sadedegel/tokenize/_sent.py"], "/tests/datasets/test_dataset_loaders.py": ["/tests/datasets/context.py"], "/sadedegel/ml/__init__.py": ["/sadedegel/ml/sbd.py"], "/scripts/merger.py": ["/sadedegel/dataset/_core.py"], "/tests/datasets/test_util.py": ["/tests/datasets/context.py"], "/sadedegel/summarize/rouge.py": ["/sadedegel/bblock/__init__.py"], "/tests/server/test_basics.py": ["/tests/server/context.py"]} |
42,047 | doruktiktiklar/sadedegel | refs/heads/master | /tests/datasets/test_filepath.py | import pytest
from pathlib import Path
from .context import file_paths, CorpusTypeEnum
def test_corpustype_enum():
with pytest.raises(ValueError):
file_paths("my_flumsy_corpus_type")
def test_corpus_equality():
fp_raw = file_paths(CorpusTypeEnum.RAW, use_basename=True, noext=True)
fp_sent = file_paths(CorpusTypeEnum.SENTENCE, use_basename=True, noext=True)
assert fp_raw == fp_sent
@pytest.mark.parametrize("corpus_type", [CorpusTypeEnum.RAW, CorpusTypeEnum.SENTENCE, CorpusTypeEnum.ANNOTATED])
def test_corpus_equality(corpus_type):
files = file_paths(corpus_type, use_basename=True, noext=False)
assert all((Path(file).name == file for file in files))
| {"/sadedegel/dataset/_core.py": ["/sadedegel/dataset/util.py"], "/tests/datasets/context.py": ["/sadedegel/dataset/extended/__init__.py"], "/sadedegel/dataset/extended/__init__.py": ["/sadedegel/dataset/extended/_core.py"], "/tests/datasets/test_filepath.py": ["/tests/datasets/context.py"], "/sadedegel/tokenize/__init__.py": ["/sadedegel/tokenize/_sent.py"], "/tests/datasets/test_dataset_loaders.py": ["/tests/datasets/context.py"], "/sadedegel/ml/__init__.py": ["/sadedegel/ml/sbd.py"], "/scripts/merger.py": ["/sadedegel/dataset/_core.py"], "/tests/datasets/test_util.py": ["/tests/datasets/context.py"], "/sadedegel/summarize/rouge.py": ["/sadedegel/bblock/__init__.py"], "/tests/server/test_basics.py": ["/tests/server/context.py"]} |
42,048 | doruktiktiklar/sadedegel | refs/heads/master | /tests/test_sbd_ml.py | from sklearn.feature_extraction import FeatureHasher
from sklearn.linear_model import PassiveAggressiveClassifier as PA
from sklearn.pipeline import Pipeline
from .context import load_raw_corpus, load_sentence_corpus, Doc, flatten, is_eos, create_model, load_model, save_model
def test_span_feature_hashed():
raw_corpus = load_raw_corpus()
features = flatten([[span.span_features() for span in Doc(raw).spans] for raw in raw_corpus])
hasher = FeatureHasher()
X = hasher.transform(features)
assert X.shape[1] == hasher.n_features
def test_model_train_explicit():
raw_corpus = load_raw_corpus(False)
sent_corpus = load_sentence_corpus(False)
features = flatten([[span.span_features() for span in Doc(raw).spans] for raw in raw_corpus])
y = flatten(
[[is_eos(span, sent['sentences']) for span in Doc(raw).spans] for raw, sent in zip(raw_corpus, sent_corpus)])
assert len(features) == len(y)
pipeline = Pipeline([('hasher', FeatureHasher()), ('pa', PA())])
pipeline.fit(features, y)
def test_model_train_implicit():
raw_corpus = load_raw_corpus(False)
sent_corpus = load_sentence_corpus(False)
features = flatten([[span.span_features() for span in Doc(raw).spans] for raw in raw_corpus])
y = flatten(
[[is_eos(span, sent['sentences']) for span in Doc(raw).spans] for raw, sent in zip(raw_corpus, sent_corpus)])
assert len(features) == len(y)
sbd_model = create_model()
sbd_model.fit(features, y)
save_model(sbd_model, "sbd.test.pickle")
del sbd_model
_ = load_model("sbd.test.pickle")
| {"/sadedegel/dataset/_core.py": ["/sadedegel/dataset/util.py"], "/tests/datasets/context.py": ["/sadedegel/dataset/extended/__init__.py"], "/sadedegel/dataset/extended/__init__.py": ["/sadedegel/dataset/extended/_core.py"], "/tests/datasets/test_filepath.py": ["/tests/datasets/context.py"], "/sadedegel/tokenize/__init__.py": ["/sadedegel/tokenize/_sent.py"], "/tests/datasets/test_dataset_loaders.py": ["/tests/datasets/context.py"], "/sadedegel/ml/__init__.py": ["/sadedegel/ml/sbd.py"], "/scripts/merger.py": ["/sadedegel/dataset/_core.py"], "/tests/datasets/test_util.py": ["/tests/datasets/context.py"], "/sadedegel/summarize/rouge.py": ["/sadedegel/bblock/__init__.py"], "/tests/server/test_basics.py": ["/tests/server/context.py"]} |
42,049 | doruktiktiklar/sadedegel | refs/heads/master | /sadedegel/summarize/__main__.py | from collections import defaultdict
from math import ceil
from typing import List, Union
from tqdm import tqdm # type: ignore
import click
from tabulate import tabulate
import warnings
import numpy as np # type: ignore
from sklearn.metrics import ndcg_score # type: ignore
from sadedegel.dataset import load_annotated_corpus
from sadedegel.summarize import RandomSummarizer, PositionSummarizer, Rouge1Summarizer, KMeansSummarizer, \
AutoKMeansSummarizer, \
DecomposedKMeansSummarizer, LengthSummarizer
from sadedegel import Sentences, Doc
from sadedegel import tokenizer_context
SUMMARIZERS = [('Random Summarizer', RandomSummarizer()), ('FirstK Summarizer', PositionSummarizer()),
('LastK Summarizer', PositionSummarizer('last')), ('Rouge1 Summarizer (f1)', Rouge1Summarizer()),
('Rouge1 Summarizer (precision)', Rouge1Summarizer('precision')),
('Rouge1 Summarizer (recall)', Rouge1Summarizer('recall')),
('Length Summarizer (char)', LengthSummarizer('token')),
('Length Summarizer (token)', LengthSummarizer('char')),
('KMeans Summarizer', KMeansSummarizer()),
('AutoKMeans Summarizer', AutoKMeansSummarizer()),
('DecomposedKMeans Summarizer', DecomposedKMeansSummarizer())]
def to_sentence_list(sents: List[str]) -> List[Sentences]:
l: List[Sentences] = []
for i, sent in enumerate(sents):
l.append(Sentences(i, sent, l))
return l
@click.group(help="SadedeGel summarizer commandline")
def cli():
pass
@cli.command()
@click.option("-f", "--table-format", default="github")
@click.option("-t", "--tag", default=["extractive"], multiple=True)
@click.option("-d", "--debug", default=False)
def evaluate(table_format, tag, debug):
"""Evaluate all summarizers in sadedeGel"""
if not debug:
warnings.filterwarnings("ignore")
anno = load_annotated_corpus(False)
summarizers = [summ for summ in SUMMARIZERS if any(_tag in summ[1] for _tag in tag)]
scores = defaultdict(list)
for word_tokenizer in tqdm(['simple', 'bert'], unit=" word-tokenizer"):
with tokenizer_context(word_tokenizer):
for name, summarizer in tqdm(summarizers, unit=" method"):
# skip simple tokenizer for clustering models
if "cluster" in summarizer and word_tokenizer == "simple":
continue
for doc in tqdm(anno, unit=" doc", desc=f"Evaluating {name}"):
y_true = [doc['relevance']]
d = Doc.from_sentences(doc['sentences'])
y_pred = [summarizer.predict(d.sents)]
score_10 = ndcg_score(y_true, y_pred, k=ceil(len(doc['sentences']) * 0.1))
score_50 = ndcg_score(y_true, y_pred, k=ceil(len(doc['sentences']) * 0.5))
score_80 = ndcg_score(y_true, y_pred, k=ceil(len(doc['sentences']) * 0.8))
scores[f"{name} - {word_tokenizer}"].append((score_10, score_50, score_80))
table = [[algo, np.array([s[0] for s in scores]).mean(), np.array([s[1] for s in scores]).mean(),
np.array([s[2] for s in scores]).mean()] for
algo, scores in scores.items()]
# TODO: Sample weight of instances.
print(
tabulate(table, headers=['Method & Tokenizer', 'ndcg(k=0.1)', 'ndcg(k=0.5)', 'ndcg(k=0.8)'],
tablefmt=table_format,
floatfmt=".4f"))
if debug:
click.echo(np.array(table).shape)
if __name__ == '__main__':
cli()
| {"/sadedegel/dataset/_core.py": ["/sadedegel/dataset/util.py"], "/tests/datasets/context.py": ["/sadedegel/dataset/extended/__init__.py"], "/sadedegel/dataset/extended/__init__.py": ["/sadedegel/dataset/extended/_core.py"], "/tests/datasets/test_filepath.py": ["/tests/datasets/context.py"], "/sadedegel/tokenize/__init__.py": ["/sadedegel/tokenize/_sent.py"], "/tests/datasets/test_dataset_loaders.py": ["/tests/datasets/context.py"], "/sadedegel/ml/__init__.py": ["/sadedegel/ml/sbd.py"], "/scripts/merger.py": ["/sadedegel/dataset/_core.py"], "/tests/datasets/test_util.py": ["/tests/datasets/context.py"], "/sadedegel/summarize/rouge.py": ["/sadedegel/bblock/__init__.py"], "/tests/server/test_basics.py": ["/tests/server/context.py"]} |
42,050 | doruktiktiklar/sadedegel | refs/heads/master | /tests/summarizer/test_cluster.py | from pytest import warns
import pytest
from .context import KMeansSummarizer, AutoKMeansSummarizer, DecomposedKMeansSummarizer, Doc, SimpleTokenizer, \
BertTokenizer, tokenizer_context
@pytest.mark.parametrize("normalized", [True, False])
@pytest.mark.parametrize("tokenizer", [SimpleTokenizer.__name__, BertTokenizer.__name__])
@pytest.mark.parametrize("method", [KMeansSummarizer, AutoKMeansSummarizer, DecomposedKMeansSummarizer])
def test_kmeans(normalized, tokenizer, method):
with tokenizer_context(tokenizer):
d = Doc('ali topu tut. oya ip atla. ahmet topu at.')
if tokenizer == SimpleTokenizer.__name__:
with warns(UserWarning, match="Changing tokenizer to"):
assert len(method(normalize=normalized).predict(d)) == 3
else:
assert len(method(normalize=normalized).predict(d)) == 3
| {"/sadedegel/dataset/_core.py": ["/sadedegel/dataset/util.py"], "/tests/datasets/context.py": ["/sadedegel/dataset/extended/__init__.py"], "/sadedegel/dataset/extended/__init__.py": ["/sadedegel/dataset/extended/_core.py"], "/tests/datasets/test_filepath.py": ["/tests/datasets/context.py"], "/sadedegel/tokenize/__init__.py": ["/sadedegel/tokenize/_sent.py"], "/tests/datasets/test_dataset_loaders.py": ["/tests/datasets/context.py"], "/sadedegel/ml/__init__.py": ["/sadedegel/ml/sbd.py"], "/scripts/merger.py": ["/sadedegel/dataset/_core.py"], "/tests/datasets/test_util.py": ["/tests/datasets/context.py"], "/sadedegel/summarize/rouge.py": ["/sadedegel/bblock/__init__.py"], "/tests/server/test_basics.py": ["/tests/server/context.py"]} |
42,051 | doruktiktiklar/sadedegel | refs/heads/master | /sadedegel/tokenize/_sent.py | from abc import ABCMeta, abstractmethod
from typing import List
import re
import nltk
alphabets = "([A-Za-z])"
prefixes = "(Mr|St|Mrs|Ms|Dr)[.]"
suffixes = "(Inc|Ltd|Jr|Sr|Co)"
starters = r"(Mr|Mrs|Ms|Dr|He\s|She\s|It\s|They\s|Their\s|Our\s|We\s|But\s|However\s|That\s|This\s|Wherever)"
acronyms = "([A-Z][.][A-Z][.](?:[A-Z][.])?)"
websites = "[.](com|net|org|io|gov)"
class SentencesTokenizer(object, metaclass=ABCMeta):
def __init__(self):
pass
def __call__(self, doc: str) -> List[str]:
return self._split(doc)
@abstractmethod
def _split(self, text):
pass
class RegexpSentenceTokenizer(SentencesTokenizer):
def _split(self, text: str) -> List[str]:
text = " " + text + " "
text = text.replace(r"\n", " ")
text = re.sub(prefixes, "\\1<prd>", text)
text = re.sub(websites, "<prd>\\1", text)
if "Ph.D" in text:
text = text.replace("Ph.D.", "Ph<prd>D<prd>")
text = re.sub(r"\s" + alphabets + "[.] ", " \\1<prd> ", text)
text = re.sub(acronyms + " " + starters, "\\1<stop> \\2", text)
text = re.sub(alphabets + "[.]" + alphabets + "[.]" + alphabets + "[.]", "\\1<prd>\\2<prd>\\3<prd>", text)
text = re.sub(alphabets + "[.]" + alphabets + "[.]", "\\1<prd>\\2<prd>", text)
text = re.sub(" " + suffixes + "[.] " + starters, " \\1<stop> \\2", text)
text = re.sub(" " + suffixes + "[.]", " \\1<prd>", text)
text = re.sub(" " + alphabets + "[.]", " \\1<prd>", text)
if "”" in text:
text = text.replace(".”", "”.")
if "\"" in text:
text = text.replace(".\"", "\".")
if "!" in text:
text = text.replace("!\"", "\"!")
if "?" in text:
text = text.replace("?\"", "\"?")
text = text.replace(".", ".<stop>")
text = text.replace("?", "?<stop>")
text = text.replace("!", "!<stop>")
text = text.replace("<prd>", ".")
sentences = text.split("<stop>")
sentences = sentences[:-1]
sentences = [s.strip() for s in sentences if len(s) > 1]
return sentences
class NLTKPunctTokenizer(SentencesTokenizer):
def __init__(self):
super().__init__()
self.sent_detector = nltk.data.load('tokenizers/punkt/turkish.pickle')
def _split(self, text: str) -> List[str]:
return self.sent_detector.tokenize(text)
| {"/sadedegel/dataset/_core.py": ["/sadedegel/dataset/util.py"], "/tests/datasets/context.py": ["/sadedegel/dataset/extended/__init__.py"], "/sadedegel/dataset/extended/__init__.py": ["/sadedegel/dataset/extended/_core.py"], "/tests/datasets/test_filepath.py": ["/tests/datasets/context.py"], "/sadedegel/tokenize/__init__.py": ["/sadedegel/tokenize/_sent.py"], "/tests/datasets/test_dataset_loaders.py": ["/tests/datasets/context.py"], "/sadedegel/ml/__init__.py": ["/sadedegel/ml/sbd.py"], "/scripts/merger.py": ["/sadedegel/dataset/_core.py"], "/tests/datasets/test_util.py": ["/tests/datasets/context.py"], "/sadedegel/summarize/rouge.py": ["/sadedegel/bblock/__init__.py"], "/tests/server/test_basics.py": ["/tests/server/context.py"]} |
42,052 | doruktiktiklar/sadedegel | refs/heads/master | /sadedegel/tokenize/__init__.py | from ._sent import RegexpSentenceTokenizer, NLTKPunctTokenizer # noqa: F401
from ..bblock.util import tr_lower, tr_upper, __tr_upper__, __tr_lower__ # noqa: F401
# This is kepts for backward compatibility
# from .. import Doc, Sentences # noqa: F401
from .. import Doc, Sentences
| {"/sadedegel/dataset/_core.py": ["/sadedegel/dataset/util.py"], "/tests/datasets/context.py": ["/sadedegel/dataset/extended/__init__.py"], "/sadedegel/dataset/extended/__init__.py": ["/sadedegel/dataset/extended/_core.py"], "/tests/datasets/test_filepath.py": ["/tests/datasets/context.py"], "/sadedegel/tokenize/__init__.py": ["/sadedegel/tokenize/_sent.py"], "/tests/datasets/test_dataset_loaders.py": ["/tests/datasets/context.py"], "/sadedegel/ml/__init__.py": ["/sadedegel/ml/sbd.py"], "/scripts/merger.py": ["/sadedegel/dataset/_core.py"], "/tests/datasets/test_util.py": ["/tests/datasets/context.py"], "/sadedegel/summarize/rouge.py": ["/sadedegel/bblock/__init__.py"], "/tests/server/test_basics.py": ["/tests/server/context.py"]} |
42,053 | doruktiktiklar/sadedegel | refs/heads/master | /work/_summ.py | # pylint: skip-file
import json
from typing import List, Any
from sklearn.cluster import KMeans
from sklearn.metrics import euclidean_distances
from sklearn.base import BaseEstimator, TransformerMixin
import pandas as pd
import numpy as np
import torch
from torch.nn.utils.rnn import pad_sequence
class JsonFileTokenizer():
def __init__(self, jsonFile, tokenizer, forEval=False):
self.jsonFile = jsonFile
self.tokenizer = tokenizer
self.labels = None
self.forEval = forEval
self.len_text = None
self.max_len = 0
self.sentences = None
self.tokenized_sentences = []
self.token_ids = []
self.token_segments = []
self.tensor_pairs = []
self.padded_sequence_tensor = None
self.segments_tensor = None
def read_json(self):
with open(self.jsonFile, 'rb') as f:
jfile = json.load(f)
sents = [sentence['content'] for sentence in jfile['sentences']]
if self.forEval:
self.labels = np.array([sentence['deletedInRound'] for sentence in jfile['sentences']])
self.sentences = sents
self.len_text = len(sents)
return sents
"""
Add special tokens for BERT
"""
@staticmethod
def add_special_tokens(sentence):
sentence = '[CLS] ' + sentence + ' [SEP]'
return sentence
"""
Tokenize Single Sentence
"""
def tokenize_single_sentence(self, sentence):
sentenceForBert = JsonFileTokenizer.add_special_tokens(sentence)
token_list = self.tokenizer.tokenize(sentenceForBert)
return token_list
"""
Tokenize Sentences by Iterating over them
"""
def tokenize_text(self):
self.read_json()
for sent in self.sentences:
tokenized_sentence = self.tokenize_single_sentence(sent)
if len(tokenized_sentence) > self.max_len:
self.max_len = len(tokenized_sentence)
self.tokenized_sentences.append(tokenized_sentence)
return self.tokenized_sentences
def token_to_ids(self, output=False):
self.tokenize_text()
for token_list in self.tokenized_sentences:
id_list = self.tokenizer.convert_tokens_to_ids(token_list)
segment_list = [1] * len(id_list)
self.token_ids.append(id_list)
self.token_segments.append(segment_list)
if output:
return self.token_ids
def prepare_for_single_inference(self, output=False):
self.clear_state()
self.token_to_ids()
assert len(self.token_ids) == len(self.token_segments)
for tokens, segments in zip(self.token_ids, self.token_segments):
token_tensor = torch.tensor([tokens])
segment_tensor = torch.tensor([segments])
self.tensor_pairs.append((token_tensor, segment_tensor))
if output:
return self.tensor_pairs
def prepare_for_batch_inference(self):
self.prepare_for_single_inference()
token_tensor_list = [x[0].T for x in self.tensor_pairs]
self.padded_sequence_tensor = pad_sequence(token_tensor_list).T
self.segments_tensor = torch.ones(self.padded_sequence_tensor.shape)
return self.padded_sequence_tensor, self.segments_tensor
def clear_state(self):
self.len_text = None
self.max_len = 0
self.sentences = None
self.tokenized_sentences = []
self.token_ids = []
self.token_segments = []
self.tensor_pairs = []
self.padded_sequence_tensor = None
self.segments_tensor = None
def select_layer(bertOut: tuple, layers: List[int], return_cls: Any) -> np.ndarray:
"""
Selects and averages layers from BERT output
Parameters:
bertOut: tuple
Tuple containing output of 12 intermediate layers after feeding a document.
layers: List of integers
List that contains which layer to choose. max = 11, min = 0.
return_cls: bool
Whether to use CLS token embedding as sentence embedding instead of averaging token embeddings.
Returns:
numpy.ndarray (n_sentences, embedding_size) Embedding size if default to 768.
"""
n_layers = len(layers)
n_sentences = bertOut[0].shape[0]
n_tokens = bertOut[0].shape[1]
assert min(layers) > -1
assert max(layers) < 12
if return_cls:
cls_matrix = np.zeros((n_layers, n_sentences, 768))
l_ix = 0
for l, layer in enumerate(bertOut): # noqa
if l not in layers:
continue
else:
l_ix = l_ix + 1
for s, sentence in enumerate(layer):
cls_tensor = sentence[0].numpy()
cls_matrix[l_ix - 1, s, :] = cls_tensor
layer_mean_cls = np.mean(cls_matrix, axis=0)
return layer_mean_cls
else:
token_matrix = np.zeros((n_layers, n_sentences, n_tokens - 2, 768))
for l, layer in enumerate(bertOut): # noqa
l_ix = 0
if l not in layers:
continue
else:
l_ix = l_ix + 1
for s, sentence in enumerate(layer):
for t, token in enumerate(sentence[1:-1]): # Exclude [CLS] and [SEP] embeddings
token_tensor = sentence[t].numpy()
token_matrix[l_ix - 1, s, t, :] = token_tensor
tokenwise_mean = np.mean(token_matrix, axis=2)
layer_mean_token = np.mean(tokenwise_mean, axis=0)
return layer_mean_token
class NDCG():
def __init__(self, k):
self.k = k
self.max_score = None
self.summary_score = None
def __call__(self, labels, summ_index):
self.max_score = np.sum(sorted(labels)[::-1][:self.k])
self.summary_score = np.sum(labels[summ_index])
return self.summary_score / self.max_score
class ClusterEmbeddings(BaseEstimator, TransformerMixin):
def __init__(self, k, random_state=None):
super(ClusterEmbeddings, self).__init__()
self.k = k
self.random_state = random_state
self.cluster_centers = None
self.selected_sentence_indices = []
def fit_transform(self, X):
self._X = X
assert self._X.shape[1] == 768
kmeans = KMeans(n_clusters=self.k, random_state=self.random_state)
kmeans.fit_transform(X)
self.cluster_centers = kmeans.cluster_centers_
cluster_df = pd.DataFrame(self.cluster_centers)
euc_dist = euclidean_distances(self._X, cluster_df)
for centroid in range(euc_dist.shape[1]):
self.selected_sentence_indices.append(np.argmin(euc_dist[:, centroid]))
return sorted(self.selected_sentence_indices)
class AnnotatedExtractiveSummarizer():
"""Run summarization and score on annotated data
"""
def __init__(self, tokenizer, model, k=4, layers=[11], use_CLS_token=False, doEval=True, random_state=None,
verbose=False):
super(AnnotatedExtractiveSummarizer, self).__init__()
self.tokenizer = tokenizer
self.doEval = doEval
self.model = model
self.layers = layers
self.use_CLS_token = use_CLS_token
self.k = k
self.random_state = random_state
self.verbose = verbose
def summarize(self, jsonPath):
self._jsonTokenizer = JsonFileTokenizer(jsonPath, self.tokenizer, forEval=self.doEval)
self._tokens, self._segments = self._jsonTokenizer.prepare_for_batch_inference()
self.model.eval()
if self.verbose:
print('Generating Embeddings...')
with torch.no_grad():
outputs = self.model(self._tokens[0], self._segments[0])
self._twelve_layers = outputs[2][1:]
self._sentence_embeddings = select_layer(self._twelve_layers, [11], return_cls=self.use_CLS_token)
self._cluster_model = ClusterEmbeddings(self.k, self.random_state)
self._selected_indices = self._cluster_model.fit_transform(self._sentence_embeddings)
selected_sentences = np.array(self._jsonTokenizer.sentences)[self._selected_indices]
return selected_sentences
def score(self):
if self.doEval:
ndcg_score = NDCG(k=self.k)
self._score = ndcg_score(self._jsonTokenizer.labels, self._selected_indices)
return self._score
else:
raise Exception("Not in evaluation mode")
| {"/sadedegel/dataset/_core.py": ["/sadedegel/dataset/util.py"], "/tests/datasets/context.py": ["/sadedegel/dataset/extended/__init__.py"], "/sadedegel/dataset/extended/__init__.py": ["/sadedegel/dataset/extended/_core.py"], "/tests/datasets/test_filepath.py": ["/tests/datasets/context.py"], "/sadedegel/tokenize/__init__.py": ["/sadedegel/tokenize/_sent.py"], "/tests/datasets/test_dataset_loaders.py": ["/tests/datasets/context.py"], "/sadedegel/ml/__init__.py": ["/sadedegel/ml/sbd.py"], "/scripts/merger.py": ["/sadedegel/dataset/_core.py"], "/tests/datasets/test_util.py": ["/tests/datasets/context.py"], "/sadedegel/summarize/rouge.py": ["/sadedegel/bblock/__init__.py"], "/tests/server/test_basics.py": ["/tests/server/context.py"]} |
42,054 | doruktiktiklar/sadedegel | refs/heads/master | /tests/test_config.py | from .context import get_all_configs, describe_config
def test_all_configs():
assert isinstance(get_all_configs(), dict)
def test_describe_config_str():
assert isinstance(describe_config('word_tokenizer'), str)
def test_describe_config_print(capsys):
describe_config('word_tokenizer', True)
captured = capsys.readouterr()
assert 'Change the default word tokenizer used by sadedegel' in captured.out or \
'Change the default word tokenizer used by sadedegel' in captured.err
| {"/sadedegel/dataset/_core.py": ["/sadedegel/dataset/util.py"], "/tests/datasets/context.py": ["/sadedegel/dataset/extended/__init__.py"], "/sadedegel/dataset/extended/__init__.py": ["/sadedegel/dataset/extended/_core.py"], "/tests/datasets/test_filepath.py": ["/tests/datasets/context.py"], "/sadedegel/tokenize/__init__.py": ["/sadedegel/tokenize/_sent.py"], "/tests/datasets/test_dataset_loaders.py": ["/tests/datasets/context.py"], "/sadedegel/ml/__init__.py": ["/sadedegel/ml/sbd.py"], "/scripts/merger.py": ["/sadedegel/dataset/_core.py"], "/tests/datasets/test_util.py": ["/tests/datasets/context.py"], "/sadedegel/summarize/rouge.py": ["/sadedegel/bblock/__init__.py"], "/tests/server/test_basics.py": ["/tests/server/context.py"]} |
42,055 | doruktiktiklar/sadedegel | refs/heads/master | /tests/datasets/test_dataset_loaders.py | from .context import load_raw_corpus, load_sentence_corpus, load_annotated_corpus
import pytest
raw_corpus_parameters = [(load_raw_corpus, True, 98),
(load_raw_corpus, False, 98)]
sentence_including_corpus_parameters = [(load_sentence_corpus, True, 98),
(load_sentence_corpus, False, 98),
(load_annotated_corpus, True, 96),
(load_annotated_corpus, False, 96)]
corpus_parameters = raw_corpus_parameters + sentence_including_corpus_parameters
@pytest.mark.parametrize("loader, return_iter, expected_count", corpus_parameters)
def test_corpus_size(loader, return_iter, expected_count):
docs = loader(return_iter=return_iter)
assert sum(1 for _ in docs) == expected_count
if not return_iter:
assert len(docs) == expected_count
@pytest.mark.parametrize("loader, return_iter, expected_count", sentence_including_corpus_parameters)
def test_sentence_including_corpus_integrity(loader, return_iter, expected_count):
docs = loader(return_iter=return_iter)
assert all((('sentences' in doc) for doc in docs))
if loader == load_annotated_corpus:
assert all((('relevance' in doc) for doc in docs))
| {"/sadedegel/dataset/_core.py": ["/sadedegel/dataset/util.py"], "/tests/datasets/context.py": ["/sadedegel/dataset/extended/__init__.py"], "/sadedegel/dataset/extended/__init__.py": ["/sadedegel/dataset/extended/_core.py"], "/tests/datasets/test_filepath.py": ["/tests/datasets/context.py"], "/sadedegel/tokenize/__init__.py": ["/sadedegel/tokenize/_sent.py"], "/tests/datasets/test_dataset_loaders.py": ["/tests/datasets/context.py"], "/sadedegel/ml/__init__.py": ["/sadedegel/ml/sbd.py"], "/scripts/merger.py": ["/sadedegel/dataset/_core.py"], "/tests/datasets/test_util.py": ["/tests/datasets/context.py"], "/sadedegel/summarize/rouge.py": ["/sadedegel/bblock/__init__.py"], "/tests/server/test_basics.py": ["/tests/server/context.py"]} |
42,056 | doruktiktiklar/sadedegel | refs/heads/master | /sadedegel/ml/__init__.py | from .sbd import create_model, load_model, save_model
| {"/sadedegel/dataset/_core.py": ["/sadedegel/dataset/util.py"], "/tests/datasets/context.py": ["/sadedegel/dataset/extended/__init__.py"], "/sadedegel/dataset/extended/__init__.py": ["/sadedegel/dataset/extended/_core.py"], "/tests/datasets/test_filepath.py": ["/tests/datasets/context.py"], "/sadedegel/tokenize/__init__.py": ["/sadedegel/tokenize/_sent.py"], "/tests/datasets/test_dataset_loaders.py": ["/tests/datasets/context.py"], "/sadedegel/ml/__init__.py": ["/sadedegel/ml/sbd.py"], "/scripts/merger.py": ["/sadedegel/dataset/_core.py"], "/tests/datasets/test_util.py": ["/tests/datasets/context.py"], "/sadedegel/summarize/rouge.py": ["/sadedegel/bblock/__init__.py"], "/tests/server/test_basics.py": ["/tests/server/context.py"]} |
42,057 | doruktiktiklar/sadedegel | refs/heads/master | /sadedegel/dataset/util.py | import warnings
import json
def safe_read(file: str):
try:
with open(file) as fp:
return fp.read()
except:
warnings.warn(f"Error in reading {file}", UserWarning)
raise
def safe_json_load(file: str):
try:
return json.loads(safe_read(file))
except:
warnings.warn(f"JSON Decoding error for {file}", UserWarning)
raise
| {"/sadedegel/dataset/_core.py": ["/sadedegel/dataset/util.py"], "/tests/datasets/context.py": ["/sadedegel/dataset/extended/__init__.py"], "/sadedegel/dataset/extended/__init__.py": ["/sadedegel/dataset/extended/_core.py"], "/tests/datasets/test_filepath.py": ["/tests/datasets/context.py"], "/sadedegel/tokenize/__init__.py": ["/sadedegel/tokenize/_sent.py"], "/tests/datasets/test_dataset_loaders.py": ["/tests/datasets/context.py"], "/sadedegel/ml/__init__.py": ["/sadedegel/ml/sbd.py"], "/scripts/merger.py": ["/sadedegel/dataset/_core.py"], "/tests/datasets/test_util.py": ["/tests/datasets/context.py"], "/sadedegel/summarize/rouge.py": ["/sadedegel/bblock/__init__.py"], "/tests/server/test_basics.py": ["/tests/server/context.py"]} |
42,058 | doruktiktiklar/sadedegel | refs/heads/master | /scripts/merger.py | from pathlib import Path
from typing import Iterable
from os.path import basename, splitext
import sys
from difflib import context_diff
import click
import numpy as np
from sadedegel.dataset._core import safe_json_load
from sadedegel.dataset import load_sentence_corpus, file_paths
def file_diff(i1: Iterable, i2: Iterable):
l1, l2 = list(i1), list(i2)
if len(l1) != len(l2):
click.secho(f"Iterable sizes are not equal {len(l1)} != {len(l2)}")
s1, s2 = set(l1), set(l2)
if len(s1) != len(s2):
click.secho(f"Set sizes are not equal {len(s1)} != {len(s2)}")
for e1 in list(s1):
if e1 not in s2:
click.secho(f"{e1} in I1 but not in I2")
for e2 in list(s2):
if e2 not in s1:
click.secho(f"{e2} in I2 but not in I1")
@click.command()
def cli():
sents = load_sentence_corpus(False)
fns = [splitext(basename(fp))[0] for fp in file_paths()]
reference = dict((fn, sent['sentences']) for fn, sent in zip(fns, sents))
for fn in fns:
anno_path = Path('sadedegel/work/Labeled') / f"{fn}_labeled.json"
if anno_path.exists():
anno = safe_json_load(anno_path)
anno_sents = [s['content'] for s in anno['sentences']]
_ = np.array([s['deletedInRound'] for s in anno['sentences']])
refe_sents = reference[fn]
if refe_sents != anno_sents:
click.secho(f"Mismatch in number of sentences for document {fn}", fg="red")
diff = context_diff(refe_sents, anno_sents)
click.secho('\n'.join(diff), fg="red")
sys.exit(1)
else:
click.secho(f"MATCH: {fn}", fg="green")
else:
click.secho(f"Annotated corpus member {anno_path} not found.", fg="red")
if __name__ == '__main__':
cli()
| {"/sadedegel/dataset/_core.py": ["/sadedegel/dataset/util.py"], "/tests/datasets/context.py": ["/sadedegel/dataset/extended/__init__.py"], "/sadedegel/dataset/extended/__init__.py": ["/sadedegel/dataset/extended/_core.py"], "/tests/datasets/test_filepath.py": ["/tests/datasets/context.py"], "/sadedegel/tokenize/__init__.py": ["/sadedegel/tokenize/_sent.py"], "/tests/datasets/test_dataset_loaders.py": ["/tests/datasets/context.py"], "/sadedegel/ml/__init__.py": ["/sadedegel/ml/sbd.py"], "/scripts/merger.py": ["/sadedegel/dataset/_core.py"], "/tests/datasets/test_util.py": ["/tests/datasets/context.py"], "/sadedegel/summarize/rouge.py": ["/sadedegel/bblock/__init__.py"], "/tests/server/test_basics.py": ["/tests/server/context.py"]} |
42,059 | doruktiktiklar/sadedegel | refs/heads/master | /tests/datasets/test_util.py | import pytest
from json.decoder import JSONDecodeError
from .context import util
def test_safe_file_open_failed():
with pytest.warns(UserWarning, match=r'Error in reading \w+$'), pytest.raises(FileNotFoundError):
_ = util.safe_read("NoSuchFile")
@pytest.fixture(scope="session")
def corrupted_json(tmpdir_factory):
fn = tmpdir_factory.mktemp("data").join("CorruptedJson.json")
with open(fn, 'w') as wp:
print('["name:"Jack"]', file=wp)
return fn
def test_safe_json_load_failed(corrupted_json):
with pytest.warns(UserWarning, match=fr'JSON Decoding error for {corrupted_json}$'), pytest.raises(
JSONDecodeError):
_ = util.safe_json_load(corrupted_json)
| {"/sadedegel/dataset/_core.py": ["/sadedegel/dataset/util.py"], "/tests/datasets/context.py": ["/sadedegel/dataset/extended/__init__.py"], "/sadedegel/dataset/extended/__init__.py": ["/sadedegel/dataset/extended/_core.py"], "/tests/datasets/test_filepath.py": ["/tests/datasets/context.py"], "/sadedegel/tokenize/__init__.py": ["/sadedegel/tokenize/_sent.py"], "/tests/datasets/test_dataset_loaders.py": ["/tests/datasets/context.py"], "/sadedegel/ml/__init__.py": ["/sadedegel/ml/sbd.py"], "/scripts/merger.py": ["/sadedegel/dataset/_core.py"], "/tests/datasets/test_util.py": ["/tests/datasets/context.py"], "/sadedegel/summarize/rouge.py": ["/sadedegel/bblock/__init__.py"], "/tests/server/test_basics.py": ["/tests/server/context.py"]} |
42,060 | doruktiktiklar/sadedegel | refs/heads/master | /sadedegel/summarize/rouge.py | from typing import List
import numpy as np # type: ignore
from ._base import ExtractiveSummarizer
from ..bblock import Sentences
class Rouge1Summarizer(ExtractiveSummarizer):
"""Assign a higher importance score based on ROUGE1 score of the sentences within the document.
metric : {'f1', 'precision','recall'}, default='f1'
Metric to be used for ROUGE1 computation.
normalize : bool, optional (default=True)
If ``False``, return a raw score vector.
Otherwise, return L2 normalized score vector.
"""
tags = ExtractiveSummarizer.tags + ['self-supervised', 'ml']
def __init__(self, metric='f1', normalize=True):
super().__init__(normalize)
if metric not in ['f1', 'precision', 'recall']:
raise ValueError(f"mode should be one of 'f1', 'precision','recall'")
self.metric = metric
def _predict(self, sentences: List[Sentences]):
return np.array([sent.rouge1(self.metric) for sent in sentences])
| {"/sadedegel/dataset/_core.py": ["/sadedegel/dataset/util.py"], "/tests/datasets/context.py": ["/sadedegel/dataset/extended/__init__.py"], "/sadedegel/dataset/extended/__init__.py": ["/sadedegel/dataset/extended/_core.py"], "/tests/datasets/test_filepath.py": ["/tests/datasets/context.py"], "/sadedegel/tokenize/__init__.py": ["/sadedegel/tokenize/_sent.py"], "/tests/datasets/test_dataset_loaders.py": ["/tests/datasets/context.py"], "/sadedegel/ml/__init__.py": ["/sadedegel/ml/sbd.py"], "/scripts/merger.py": ["/sadedegel/dataset/_core.py"], "/tests/datasets/test_util.py": ["/tests/datasets/context.py"], "/sadedegel/summarize/rouge.py": ["/sadedegel/bblock/__init__.py"], "/tests/server/test_basics.py": ["/tests/server/context.py"]} |
42,061 | doruktiktiklar/sadedegel | refs/heads/master | /sadedegel/bblock/__init__.py | from .doc import Doc, Sentences
from .word_tokenizer import BertTokenizer, SimpleTokenizer, WordTokenizer
| {"/sadedegel/dataset/_core.py": ["/sadedegel/dataset/util.py"], "/tests/datasets/context.py": ["/sadedegel/dataset/extended/__init__.py"], "/sadedegel/dataset/extended/__init__.py": ["/sadedegel/dataset/extended/_core.py"], "/tests/datasets/test_filepath.py": ["/tests/datasets/context.py"], "/sadedegel/tokenize/__init__.py": ["/sadedegel/tokenize/_sent.py"], "/tests/datasets/test_dataset_loaders.py": ["/tests/datasets/context.py"], "/sadedegel/ml/__init__.py": ["/sadedegel/ml/sbd.py"], "/scripts/merger.py": ["/sadedegel/dataset/_core.py"], "/tests/datasets/test_util.py": ["/tests/datasets/context.py"], "/sadedegel/summarize/rouge.py": ["/sadedegel/bblock/__init__.py"], "/tests/server/test_basics.py": ["/tests/server/context.py"]} |
42,062 | doruktiktiklar/sadedegel | refs/heads/master | /sadedegel/config.py | __all__ = ['set_config', 'get_config', 'describe_config', 'get_all_configs']
from typing import Any
from functools import wraps
from collections import namedtuple
from contextlib import contextmanager
import warnings
from .bblock.doc import Sentences
Configuration = namedtuple("Configuration", "config, description, valid_values")
configs = {
"word_tokenizer": Configuration(config="word_tokenizer",
description="Change the default word tokenizer used by sadedegel",
valid_values=None)
}
def check_config(f):
@wraps(f)
def wrapper(*args, **kwds):
config = args[0]
if config not in configs:
raise Exception((f"{config} is not a valid configuration for sadegel."
"Use sadedegel.get_all_configs() to access list of valid configurations."))
return f(*args, **kwds)
return wrapper
def check_value(f):
@wraps(f)
def wrapper(*args, **kwds):
config, value = args[0], args[1]
cfg = configs.get(config, None)
if cfg:
if value not in cfg.valid_values:
raise Exception(
f"{value} is not a valid value for {config}. Choose one of {', '.join(cfg.valid_values)}")
else:
raise Exception((f"{config} is not a valid configuration for sadegel."
"Use sadedegel.get_all_configs() to access list of valid configurations."))
return f(*args, **kwds)
return wrapper
@check_config
def set_config(config: str, value: Any):
if config == "word_tokenizer":
Sentences.set_word_tokenizer(value)
@contextmanager
def tokenizer_context(tokenizer_name, warning=False):
current = Sentences.tokenizer.__name__
if warning and current != tokenizer_name:
warnings.warn(f"Changing tokenizer to {tokenizer_name}")
try:
set_config("word_tokenizer", tokenizer_name)
yield
finally:
set_config("word_tokenizer", current)
@check_config
def get_config(config: str): # pylint: disable=inconsistent-return-statements
if config == "word_tokenizer":
return Sentences.tokenizer.__name__
@check_config
def describe_config(config: str, print_desc=False): # pylint: disable=inconsistent-return-statements
if configs[config].valid_values is not None:
valid_values_fragment = "\n\nValid values are\n" + "\n".join(configs[config].valid_values)
else:
valid_values_fragment = ""
config_doc = f"{configs[config].description}{valid_values_fragment}"
if print_desc:
print(config_doc)
else:
return config_doc
def get_all_configs():
return configs
| {"/sadedegel/dataset/_core.py": ["/sadedegel/dataset/util.py"], "/tests/datasets/context.py": ["/sadedegel/dataset/extended/__init__.py"], "/sadedegel/dataset/extended/__init__.py": ["/sadedegel/dataset/extended/_core.py"], "/tests/datasets/test_filepath.py": ["/tests/datasets/context.py"], "/sadedegel/tokenize/__init__.py": ["/sadedegel/tokenize/_sent.py"], "/tests/datasets/test_dataset_loaders.py": ["/tests/datasets/context.py"], "/sadedegel/ml/__init__.py": ["/sadedegel/ml/sbd.py"], "/scripts/merger.py": ["/sadedegel/dataset/_core.py"], "/tests/datasets/test_util.py": ["/tests/datasets/context.py"], "/sadedegel/summarize/rouge.py": ["/sadedegel/bblock/__init__.py"], "/tests/server/test_basics.py": ["/tests/server/context.py"]} |
42,063 | doruktiktiklar/sadedegel | refs/heads/master | /sadedegel/metrics/__init__.py | from ._score import rouge1_score
| {"/sadedegel/dataset/_core.py": ["/sadedegel/dataset/util.py"], "/tests/datasets/context.py": ["/sadedegel/dataset/extended/__init__.py"], "/sadedegel/dataset/extended/__init__.py": ["/sadedegel/dataset/extended/_core.py"], "/tests/datasets/test_filepath.py": ["/tests/datasets/context.py"], "/sadedegel/tokenize/__init__.py": ["/sadedegel/tokenize/_sent.py"], "/tests/datasets/test_dataset_loaders.py": ["/tests/datasets/context.py"], "/sadedegel/ml/__init__.py": ["/sadedegel/ml/sbd.py"], "/scripts/merger.py": ["/sadedegel/dataset/_core.py"], "/tests/datasets/test_util.py": ["/tests/datasets/context.py"], "/sadedegel/summarize/rouge.py": ["/sadedegel/bblock/__init__.py"], "/tests/server/test_basics.py": ["/tests/server/context.py"]} |
42,064 | doruktiktiklar/sadedegel | refs/heads/master | /tests/server/test_basics.py | from fastapi.testclient import TestClient
import pytest
from itertools import product
from .context import app
client = TestClient(app)
def test_random_summarizer_req():
response = client.post("/api/summarizer/random", json=dict(
doc="Kapıyı aç Veysel Efendi! Mahmut Hoca'nın emriyle Uganda Cumhurbaşkanı'nı karşılamaya gidiyoruz.", wpm=1,
duration=1))
assert response.status_code == 200
assert 'sentences' in response.json()
def test_firstk_summarizer_req():
response = client.post("/api/summarizer/random", json=dict(
doc="Kapıyı aç Veysel Efendi! Mahmut Hoca'nın emriyle Uganda Cumhurbaşkanı'nı karşılamaya gidiyoruz.", wpm=1,
duration=1))
assert response.status_code == 200
assert 'sentences' in response.json()
def test_random_summarizer_wpm0():
response = client.post("/api/summarizer/random", json=dict(
doc="Kapıyı aç Veysel Efendi! Mahmut Hoca'nın emriyle Uganda Cumhurbaşkanı'nı karşılamaya gidiyoruz.", wpm=0,
duration=1))
assert response.status_code == 200
assert 'sentences' in response.json()
def test_random_summarizer_duration0():
response = client.post("/api/summarizer/random", json=dict(
doc="Kapıyı aç Veysel Efendi! Mahmut Hoca'nın emriyle Uganda Cumhurbaşkanı'nı karşılamaya gidiyoruz.", wpm=150,
duration=0))
assert response.status_code == 200
assert 'sentences' in response.json()
def test_random_summarizer_wpm0_duration0():
response = client.post("/api/summarizer/random", json=dict(
doc="Kapıyı aç Veysel Efendi! Mahmut Hoca'nın emriyle Uganda Cumhurbaşkanı'nı karşılamaya gidiyoruz.", wpm=0,
duration=0))
assert response.status_code == 200
assert 'sentences' in response.json()
def test_random_summarizer_nosentence():
response = client.post("/api/summarizer/random", json=dict(
doc="Kapıyı aç Veysel Efendi", wpm=0,
duration=0))
assert response.status_code == 200
assert 'sentences' in response.json()
assert len(response.json()['sentences']) == 1
@pytest.mark.parametrize("summarizer, ", ['random', 'rouge1', 'firstk'])
def test_in_order(summarizer):
doc = "aa aa aa. bb bb cc. aa bb."
sents = ['aa aa aa.', 'bb bb cc.', 'aa bb.']
response = client.post(f"/api/summarizer/{summarizer}", json=dict(
doc=doc, wpm=4,
duration=3))
assert response.status_code == 200 and 'sentences' in response.json()
indexes = [sents.index(s) for s in response.json()['sentences']]
assert indexes == sorted(indexes)
testdata = [('/api/summarizer/random', 'https://www.hurriyet.com.tr', 'POST'),
('/', 'https://www.hurriyet.com.tr', 'GET'),
('/api/summarizer/random', 'http://0.0.0.0:8000/sadedegel', 'POST'),
('/api/summarizer/random', 'https://www.milliyet.com.tr', 'GET'),
('/api/summarizer/random', 'https://www.sozcu.com.tr', 'GET')]
@pytest.mark.parametrize("url, origin, method", testdata)
def test_CORS(url, origin, method):
response = client.options(url, headers={"Origin": origin,
'Access-Control-Request-Method': method})
assert response.status_code == 200
| {"/sadedegel/dataset/_core.py": ["/sadedegel/dataset/util.py"], "/tests/datasets/context.py": ["/sadedegel/dataset/extended/__init__.py"], "/sadedegel/dataset/extended/__init__.py": ["/sadedegel/dataset/extended/_core.py"], "/tests/datasets/test_filepath.py": ["/tests/datasets/context.py"], "/sadedegel/tokenize/__init__.py": ["/sadedegel/tokenize/_sent.py"], "/tests/datasets/test_dataset_loaders.py": ["/tests/datasets/context.py"], "/sadedegel/ml/__init__.py": ["/sadedegel/ml/sbd.py"], "/scripts/merger.py": ["/sadedegel/dataset/_core.py"], "/tests/datasets/test_util.py": ["/tests/datasets/context.py"], "/sadedegel/summarize/rouge.py": ["/sadedegel/bblock/__init__.py"], "/tests/server/test_basics.py": ["/tests/server/context.py"]} |
42,065 | doruktiktiklar/sadedegel | refs/heads/master | /tests/test_dataset_loaders.py | import pytest
from .context import load_raw_corpus, load_sentence_corpus
__corpus_length__ = 98
# MOVED to datasets/test_dataset_loaders
@pytest.mark.skip()
def test_load_raw_iter():
doc_iter = load_raw_corpus(return_iter=True)
assert sum(1 for _ in doc_iter) == __corpus_length__
@pytest.mark.skip()
def test_load_raw_list():
doc_list = load_raw_corpus(return_iter=False)
assert len(doc_list) == __corpus_length__
@pytest.mark.skip()
def test_load_sent_iter():
doc_iter = load_sentence_corpus(return_iter=True)
assert sum(1 for _ in doc_iter) == __corpus_length__
@pytest.mark.skip()
def test_load_sent_list():
doc_list = load_sentence_corpus(return_iter=False)
assert len(doc_list) == __corpus_length__
@pytest.mark.skip()
def test_proper_dictionary():
doc_list = load_sentence_corpus(return_iter=False)
assert all((('sentences' in doc) for doc in doc_list))
| {"/sadedegel/dataset/_core.py": ["/sadedegel/dataset/util.py"], "/tests/datasets/context.py": ["/sadedegel/dataset/extended/__init__.py"], "/sadedegel/dataset/extended/__init__.py": ["/sadedegel/dataset/extended/_core.py"], "/tests/datasets/test_filepath.py": ["/tests/datasets/context.py"], "/sadedegel/tokenize/__init__.py": ["/sadedegel/tokenize/_sent.py"], "/tests/datasets/test_dataset_loaders.py": ["/tests/datasets/context.py"], "/sadedegel/ml/__init__.py": ["/sadedegel/ml/sbd.py"], "/scripts/merger.py": ["/sadedegel/dataset/_core.py"], "/tests/datasets/test_util.py": ["/tests/datasets/context.py"], "/sadedegel/summarize/rouge.py": ["/sadedegel/bblock/__init__.py"], "/tests/server/test_basics.py": ["/tests/server/context.py"]} |
42,069 | jkimbo/pyinfra | refs/heads/develop | /tests/test_api.py | # pyinfra
# File: tests/test_api.py
# Desc: tests for the pyinfra API
from unittest import TestCase
from socket import gaierror, error as socket_error
from mock import patch, mock_open
from paramiko import SSHException, AuthenticationException
# Patch in paramiko fake classes
from pyinfra.api import ssh
from .paramiko_util import (
FakeSSHClient, FakeSFTPClient, FakeRSAKey,
FakeAgentRequestHandler
)
ssh.SSHClient = FakeSSHClient
ssh.SFTPClient = FakeSFTPClient
ssh.RSAKey = FakeRSAKey
ssh.AgentRequestHandler = FakeAgentRequestHandler
from pyinfra.api import Inventory, Config, State
from pyinfra.api.ssh import connect_all, connect
from pyinfra.api.operation import add_op
from pyinfra.api.operations import run_ops
from pyinfra.api.exceptions import PyinfraError
from pyinfra.modules import files, server
from .util import create_host
def make_inventory(hosts=('somehost', 'anotherhost'), **kwargs):
return Inventory(
(hosts, {}),
test_group=([
'somehost'
], {
'group_data': 'hello world'
}),
ssh_user='vagrant',
ssh_key='test',
**kwargs
)
def make_config(FAIL_PERCENT=0, TIMEOUT=1, **kwargs):
return Config(
FAIL_PERCENT=FAIL_PERCENT,
TIMEOUT=TIMEOUT,
**kwargs
)
class TestApi(TestCase):
def test_inventory_creation(self):
inventory = make_inventory()
# Get a host
host = inventory['somehost']
self.assertEqual(host.data.ssh_user, 'vagrant')
# Check our group data
self.assertEqual(
inventory.get_groups_data(['test_group']).dict(),
{
'group_data': 'hello world'
}
)
def test_connect_all(self):
inventory = make_inventory()
state = State(inventory, make_config())
connect_all(state)
self.assertEqual(len(inventory.connected_hosts), 2)
def test_connect_all_password(self):
inventory = make_inventory(ssh_password='test')
state = State(inventory, make_config())
connect_all(state)
self.assertEqual(len(inventory.connected_hosts), 2)
def test_connect_exceptions_fail(self):
for exception in (
AuthenticationException, SSHException,
gaierror, socket_error
):
host = create_host(name='nowt', data={
'ssh_hostname': AuthenticationException
})
self.assertEqual(connect(host), None)
def test_fail_percent(self):
inventory = make_inventory(('somehost', SSHException))
state = State(inventory, make_config())
with self.assertRaises(PyinfraError):
connect_all(state)
def test_basic_op(self):
state = State(make_inventory(), make_config())
connect_all(state)
add_op(
state, files.file,
'/var/log/pyinfra.log',
user='pyinfra',
group='pyinfra',
mode='644',
sudo=True
)
run_ops(state)
def test_file_op(self):
state = State(make_inventory(), make_config())
connect_all(state)
# Test normal
with patch('pyinfra.modules.files.open', mock_open(read_data='test!'), create=True):
add_op(
state, files.put,
'files/file.txt',
'/home/vagrant/file.txt'
)
# And with sudo
with patch('pyinfra.modules.files.open', mock_open(read_data='test!'), create=True):
add_op(
state, files.put,
'files/file.txt',
'/home/vagrant/file.txt',
sudo=True,
sudo_user='pyinfra'
)
run_ops(state)
def test_run_ops(self):
state = State(make_inventory(), make_config())
connect_all(state)
add_op(
state, server.shell,
'echo "hello world"'
)
run_ops(state)
def test_run_ops_serial(self):
state = State(make_inventory(), make_config())
connect_all(state)
add_op(
state, server.shell,
'echo "hello world"'
)
run_ops(state, serial=True)
def test_run_ops_no_wait(self):
state = State(make_inventory(), make_config())
connect_all(state)
add_op(
state, server.shell,
'echo "hello world"'
)
run_ops(state, no_wait=True)
| {"/tests/test_api.py": ["/pyinfra/api/ssh.py"], "/pyinfra/api/state.py": ["/pyinfra/api/util.py"], "/pyinfra/api/ssh.py": ["/pyinfra/api/util.py"]} |
42,070 | jkimbo/pyinfra | refs/heads/develop | /pyinfra/api/state.py | # pyinfra
# File: pyinfra/api/state.py
# Desc: class that represents the current pyinfra.state
from __future__ import division, unicode_literals, print_function
from uuid import uuid4
from inspect import getargspec
import six
from gevent.pool import Pool
from pyinfra import logger
from .config import Config
# from .facts import get_facts
from .util import sha1_hash
from .exceptions import PyinfraError
class PipelineFacts(object):
def __init__(self, state):
self.state = state
def __enter__(self):
self.state.pipelining = True
self.state.ops_to_pipeline = []
self.state.facts_to_pipeline = {}
def __exit__(self, type_, value, traceback):
self.state.pipelining = False
# Get pipelined facts!
# for name, args in six.iteritems(self.state.facts_to_pipeline):
# get_facts(self.state, name, pipeline_args=args)
# Actually build our ops
for (host_name, func, args, kwargs) in self.state.ops_to_pipeline:
logger.debug(
'Replaying op: {0}, args={1}, kwargs={2}'.format(func, args, kwargs)
)
func(self.state, self.state.inventory[host_name], *args, **kwargs)
def process(self, func, decorated_func, args, kwargs):
pipeline_facts = getattr(decorated_func, 'pipeline_facts', None)
if pipeline_facts:
func_args = list(getargspec(func).args)
func_args = func_args[2:]
for fact_name, arg_name in six.iteritems(pipeline_facts):
index = func_args.index(arg_name)
if len(args) >= index:
fact_arg = args[index]
else:
fact_arg = kwargs.get(arg_name)
if fact_arg:
# Get the sudo/sudo_user state, because facts are uniquely hashed
# using their name, command and sudo/sudo_user.
sudo = kwargs.get('sudo', self.state.config.SUDO)
sudo_user = kwargs.get('sudo_user', self.state.config.SUDO_USER)
self.state.facts_to_pipeline.setdefault(
(fact_name, sudo, sudo_user), set()
).add(fact_arg)
class State(object):
'''
Manages state for a pyinfra deploy.
'''
inventory = None # a pyinfra.api.Inventory which stores all our pyinfra.api.Host's
config = None # a pyinfra.api.Config
pool = None # main gevent pool
in_op = False # whether we are in an @operation (so inner ops aren't wrapped)
# Current op args tuple (sudo, sudo_user, ignore_errors) for use w/facts
current_op_meta = None
# Flag for pipelining mode
pipelining = False
# Flags for printing
print_output = False # print output from the actual deploy (-v)
print_fact_info = False # log fact gathering as INFO > DEBUG (-v)
print_fact_output = False # print output from facts (-vv)
print_lines = False # print blank lines between operations (always in CLI)
# Used in CLI
deploy_dir = None # base directory for locating files/templates/etc
active = True # used to disable operation calls when scanning deploy.py for config
def __init__(self, inventory, config=None):
# Connection storage
self.ssh_connections = {}
self.sftp_connections = {}
# Facts storage
self.facts = {}
self.fact_locks = {}
# Work on all hosts
self.limit_hosts = []
if config is None:
config = Config()
if not config.PARALLEL:
config.PARALLEL = len(inventory)
# Setup greenlet pools
self.pool = Pool(config.PARALLEL)
self.fact_pool = Pool(config.PARALLEL)
# Assign inventory/config
self.inventory = inventory
self.config = config
# Assign self to inventory & config
inventory.state = config.state = self
# Host tracking
self.active_hosts = set()
self.connected_hosts = set()
hostnames = [host.name for host in inventory]
# Op basics
self.op_order = [] # list of operation hashes
self.op_meta = {} # maps operation hash -> names/etc
self.ops_run = set() # list of ops which have been started/run
# Op dict for each host
self.ops = {
hostname: {}
for hostname in hostnames
}
# Meta dict for each host
self.meta = {
hostname: {
'ops': 0, # one function call in a deploy file
'commands': 0, # actual # of commands to run
'latest_op_hash': None
}
for hostname in hostnames
}
# Results dict for each host
self.results = {
hostname: {
'ops': 0, # success_ops + failed ops w/ignore_errors
'success_ops': 0,
'error_ops': 0,
'commands': 0
}
for hostname in hostnames
}
# Pipeline facts context manager attached to self
self.pipeline_facts = PipelineFacts(self)
def fail_hosts(self, hosts_to_fail):
# Remove the failed hosts
self.inventory.active_hosts -= hosts_to_fail
# Check we're not above the fail percent
active_hosts = self.inventory.active_hosts
if self.config.FAIL_PERCENT is not None:
percent_failed = (1 - len(active_hosts) / len(self.inventory)) * 100
if percent_failed > self.config.FAIL_PERCENT:
raise PyinfraError('Over {0}% of hosts failed'.format(
self.config.FAIL_PERCENT
))
# No hosts left!
if not active_hosts:
raise PyinfraError('No hosts remaining!')
def get_temp_filename(self, hash_key=None):
'''
Generate a temporary filename for this deploy.
'''
if not hash_key:
hash_key = str(uuid4())
temp_filename = sha1_hash(hash_key)
return '{0}/{1}'.format(self.config.TEMP_DIR, temp_filename)
| {"/tests/test_api.py": ["/pyinfra/api/ssh.py"], "/pyinfra/api/state.py": ["/pyinfra/api/util.py"], "/pyinfra/api/ssh.py": ["/pyinfra/api/util.py"]} |
42,071 | jkimbo/pyinfra | refs/heads/develop | /pyinfra/api/util.py | # pyinfra
# File: pyinfra/api/util.py
# Desc: utility functions
from __future__ import division, unicode_literals, print_function
import re
from hashlib import sha1
from copy import deepcopy
from imp import load_source
from types import FunctionType
import six
from jinja2 import Template
from .attrs import AttrBase
BLOCKSIZE = 65536
# Template cache
TEMPLATES = {}
def exec_file(filename, return_locals=False):
'''
Execute a Python file and optionally return it's attributes as a dict.
'''
module_name = '_pyinfra_{0}'.format(filename.replace('.', '_'))
module = load_source(module_name, filename)
if return_locals:
return {
key: getattr(module, key)
for key in dir(module)
}
def get_template(filename_or_string, is_string=False):
'''
Gets a jinja2 ``Template`` object for the input filename or string, with caching
based on the filename of the template, or the SHA1 of the input string.
'''
if is_string:
# Cache against sha1 of the template
cache_key = sha1_hash(filename_or_string)
# Set the input string as our template
template_string = filename_or_string
else:
# Load template data into memory
file_io = open(filename_or_string)
template_string = file_io.read()
# Cache against filename
cache_key = filename_or_string
if cache_key in TEMPLATES:
return TEMPLATES[cache_key]
TEMPLATES[cache_key] = Template(template_string, keep_trailing_newline=True)
return TEMPLATES[cache_key]
def underscore(name):
'''
Transform CamelCase -> snake_case.
'''
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
def sha1_hash(string):
'''
Return the SHA1 of the input string.
'''
hasher = sha1()
hasher.update(string.encode())
return hasher.hexdigest()
def make_command(command, env=None, sudo=False, sudo_user=None):
'''
Builds a shell command with various kwargs.
'''
# Use env & build our actual command
if env:
env_string = ' '.join([
'{0}={1}'.format(key, value)
for key, value in six.iteritems(env)
])
command = '{0} {1}'.format(env_string, command)
# Escape "'s
command = command.replace("'", "\\'")
# No sudo, just sh wrap the command
if not sudo:
command = "sh -c '{0}'".format(command)
# Otherwise, work out sudo
else:
# Sudo with a user, then sh
if sudo_user:
command = "sudo -H -u {0} -S sh -c '{1}'".format(sudo_user, command)
# Sudo then sh
else:
command = "sudo -H -S sh -c '{0}'".format(command)
return command
def get_arg_value(state, host, arg):
'''
Runs string arguments through the jinja2 templating system with a state and host. Used
to avoid string formatting in deploy operations which result in one operation per
host/variable. By parsing the commands after we generate the ``op_hash``, multiple
command variations can fall under one op.
'''
if isinstance(arg, six.string_types):
template = get_template(arg, is_string=True)
data = {
'host': host,
'inventory': state.inventory
}
return template.render(data)
elif isinstance(arg, list):
return [get_arg_value(state, host, value) for value in arg]
elif isinstance(arg, dict):
return {
key: get_arg_value(state, host, value)
for key, value in six.iteritems(arg)
}
return arg
def get_arg_name(arg):
'''
Returns the name or value of an argument as passed into an operation. Will use pyinfra
attr key where available, and function names instead of references. See attrs.py for a
more in-depth description.
'''
return (
arg.pyinfra_attr_key
if isinstance(arg, AttrBase)
else arg.__name__
if isinstance(arg, FunctionType)
else arg
)
def make_hash(obj):
'''
Make a hash from an arbitrary nested dictionary, list, tuple or set, used to generate
ID's for operations based on their name & arguments.
'''
if type(obj) in (set, tuple, list):
return hash(tuple([make_hash(e) for e in obj]))
elif not isinstance(obj, dict):
return hash(obj)
new_obj = deepcopy(obj)
for k, v in new_obj.items():
new_obj[k] = make_hash(v)
return hash(tuple(set(new_obj.items())))
def get_file_sha1(filename):
'''
Calculates the SHA1 of a file or file object using a buffer to handle larger files.
'''
# If we have a read attribute, just use the object as-is
if hasattr(filename, 'read'):
file_io = filename
# Otherwise, assume a filename and open it up
else:
file_io = open(filename)
# Ensure we're at the start of the file
file_io.seek(0)
buff = file_io.read(BLOCKSIZE)
hasher = sha1()
while len(buff) > 0:
hasher.update(buff.encode())
buff = file_io.read(BLOCKSIZE)
return hasher.hexdigest()
def read_buffer(buff, print_output=False, print_func=False):
'''
Reads a file-like buffer object into lines and optionally prints the output.
'''
out = []
for line in buff:
# Handle local Popen shells returning list of bytes, not strings
if not isinstance(line, six.text_type):
line = line.decode('utf-8')
line = line.strip()
out.append(line)
if print_output:
if print_func:
print(print_func(line))
else:
print(line)
return out
| {"/tests/test_api.py": ["/pyinfra/api/ssh.py"], "/pyinfra/api/state.py": ["/pyinfra/api/util.py"], "/pyinfra/api/ssh.py": ["/pyinfra/api/util.py"]} |
42,072 | jkimbo/pyinfra | refs/heads/develop | /pyinfra/api/ssh.py | # pyinfra
# File: pyinfra/api/ssh.py
# Desc: handle all SSH related stuff
from __future__ import division, unicode_literals, print_function
from os import path
from socket import (
gaierror,
error as socket_error, timeout as timeout_error
)
import six
import gevent
from termcolor import colored
from paramiko.agent import AgentRequestHandler
from paramiko import (
SSHClient, SFTPClient, RSAKey,
MissingHostKeyPolicy, SSHException, AuthenticationException
)
from pyinfra import logger
from pyinfra.api.util import read_buffer, make_command
def connect(host, **kwargs):
'''
Connect to a single host. Returns the SSH client if succesful. Stateless by design so
can be run in parallel.
'''
logger.debug('Connecting to: {0} ({1})'.format(host.name, kwargs))
name = host.name
hostname = host.data.ssh_hostname or name
try:
# Create new client & connect to the host
client = SSHClient()
client.set_missing_host_key_policy(MissingHostKeyPolicy())
client.connect(hostname, **kwargs)
# Enable SSH forwarding
session = client.get_transport().open_session()
AgentRequestHandler(session)
# Log
logger.info('[{0}] {1}'.format(
colored(name, attrs=['bold']),
colored('Connected', 'green')
))
return client
except AuthenticationException as e:
logger.error('Auth error on: {0}, {1}'.format(name, e))
except SSHException as e:
logger.error('SSH error on: {0}, {1}'.format(name, e))
except gaierror:
if hostname != name:
logger.error('Could not resolve {0} host: {1}'.format(name, hostname))
else:
logger.error('Could not resolve {0}'.format(name))
except socket_error as e:
logger.error('Could not connect: {0}:{1}, {2}'.format(
name, kwargs.get('port', 22), e)
)
except EOFError as e:
logger.error('EOF error connecting to {0}: {1}'.format(name, e))
def connect_all(state):
'''
Connect to all the configured servers in parallel. Reads/writes state.inventory.
Args:
state (``pyinfra.api.State`` obj): the state containing an inventory to connect to
'''
greenlets = {}
for host in state.inventory:
kwargs = {
'username': host.data.ssh_user,
'port': host.data.ssh_port or 22,
'timeout': state.config.TIMEOUT,
# At this point we're assuming a password/key are provided
'allow_agent': False,
'look_for_keys': False
}
# Password auth (boo!)
if host.data.ssh_password:
kwargs['password'] = host.data.ssh_password
# Key auth!
elif host.data.ssh_key:
ssh_key_filenames = [
# Global from executed directory
path.expanduser(host.data.ssh_key)
]
# Relative to the deploy
if state.deploy_dir:
ssh_key_filenames.append(
path.join(state.deploy_dir, host.data.ssh_key)
)
for filename in ssh_key_filenames:
if path.isfile(filename):
kwargs['pkey'] = RSAKey.from_private_key_file(
filename=filename,
password=host.data.ssh_key_password
)
break
# No key or password, so let's have paramiko look for SSH agents and user keys
else:
kwargs['allow_agent'] = True
kwargs['look_for_keys'] = True
greenlets[host.name] = state.pool.spawn(connect, host, **kwargs)
gevent.wait(greenlets.values())
# Get/set the results
failed_hosts = set()
connected_hosts = set()
for name, greenlet in six.iteritems(greenlets):
client = greenlet.get()
if not client:
failed_hosts.add(name)
else:
state.ssh_connections[name] = client
connected_hosts.add(name)
# Add connected hosts to inventory
state.inventory.connected_hosts = connected_hosts
# Add all the hosts as active
state.inventory.active_hosts = set(greenlets.keys())
# Remove those that failed, triggering FAIL_PERCENT check
state.fail_hosts(failed_hosts)
def run_shell_command(
state, hostname, command,
sudo=False, sudo_user=None, env=None, timeout=None, print_output=False
):
'''
Execute a command on the specified host.
Args:
state (``pyinfra.api.State`` obj): state object for this command
hostname (string): hostname of the target
command (string): actual command to execute
sudo (boolean): whether to wrap the command with sudo
sudo_user (string): user to sudo to
env (dict): envrionment variables to set
timeout (int): timeout for this command to complete before erroring
Returns:
tuple: (channel, stdout, stderr)
Channel is a Paramiko channel object, mainly used for it's ``.exit_code`` attribute.
stdout and stderr are both lists of strings from each buffer.
'''
print_prefix = '[{0}] '.format(colored(hostname, attrs=['bold']))
if env is None:
env = {}
logger.debug('Running command on {0}: "{1}"'.format(hostname, command))
logger.debug('Command sudo?: {0}, sudo user: {1}, env: {2}'.format(
sudo, sudo_user, env
))
command = make_command(command, env=env, sudo=sudo, sudo_user=sudo_user)
if print_output:
print('{0}>>> {1}'.format(print_prefix, command))
# Get the connection for this hostname
connection = state.ssh_connections[hostname]
# Run it! Get stdout, stderr & the underlying channel
_, stdout_buffer, stderr_buffer = connection.exec_command(command)
channel = stdout_buffer.channel
# Iterate through outputs to get an exit status and generate desired list output,
# done in two greenlets so stdout isn't printed before stderr. Not attached to
# state.pool to avoid blocking it with 2x n-hosts greenlets.
stdout_reader = gevent.spawn(
read_buffer, stdout_buffer,
print_output=print_output,
print_func=lambda line: '{0}{1}'.format(print_prefix, line)
)
stderr_reader = gevent.spawn(
read_buffer, stderr_buffer,
print_output=print_output,
print_func=lambda line: '{0}{1}'.format(print_prefix, colored(line, 'red'))
)
# Wait on output, with our timeout (or None)
greenlets = gevent.wait((stdout_reader, stderr_reader), timeout=timeout)
# Timeout doesn't raise an exception, but gevent.wait returns the greenlets which did
# complete. So if both haven't completed, we kill them and fail with a timeout.
if len(greenlets) != 2:
stdout_reader.kill()
stderr_reader.kill()
raise timeout_error()
stdout = stdout_reader.get()
stderr = stderr_reader.get()
return channel, stdout, stderr
def _get_sftp_connection(state, hostname):
# SFTP connections aren't *required* for deploys, so we create them on-demand
if hostname in state.sftp_connections:
return state.sftp_connections[hostname]
ssh_connection = state.ssh_connections[hostname]
transport = ssh_connection.get_transport()
client = SFTPClient.from_transport(transport)
state.sftp_connections[hostname] = client
return client
def _put_file(state, hostname, file_io, remote_location):
# Ensure we're at the start of the file
file_io.seek(0)
# Upload it via SFTP
sftp = _get_sftp_connection(state, hostname)
sftp.putfo(file_io, remote_location)
def put_file(
state, hostname, file_io, remote_file,
sudo=False, sudo_user=None, print_output=False
):
'''
Upload file-ios to the specified host using SFTP. Supports uploading files with sudo
by uploading to a temporary directory then moving & chowning.
'''
print_prefix = '[{0}] '.format(colored(hostname, attrs=['bold']))
if not sudo:
_put_file(state, hostname, file_io, remote_file)
else:
# sudo is a little more complicated, as you can only sftp with the SSH user
# connected, so upload to tmp and copy/chown w/sudo
# Get temp file location
temp_file = state.get_temp_filename(remote_file)
_put_file(state, hostname, file_io, temp_file)
# Execute run_shell_command w/sudo to mv/chown it
command = 'mv {0} {1}'.format(temp_file, remote_file)
if sudo_user:
command = '{0} && chown {1} {2}'.format(command, sudo_user, remote_file)
channel, _, stderr = run_shell_command(
state, hostname, command,
sudo=sudo, sudo_user=sudo_user,
print_output=print_output
)
if channel.exit_status > 0:
logger.error('File error: {0}'.format('\n'.join(stderr)))
return False
if print_output:
print('{0}file uploaded: {1}'.format(print_prefix, remote_file))
| {"/tests/test_api.py": ["/pyinfra/api/ssh.py"], "/pyinfra/api/state.py": ["/pyinfra/api/util.py"], "/pyinfra/api/ssh.py": ["/pyinfra/api/util.py"]} |
42,079 | mlindysay/Boxlog | refs/heads/master | /main/admin.py | from django.contrib import admin
from .models import Box, Item
admin.site.register(Box)
admin.site.register(Item) | {"/main/admin.py": ["/main/models.py"]} |
42,080 | mlindysay/Boxlog | refs/heads/master | /main/models.py | from __future__ import unicode_literals
import datetime
from django.db import models
from django.utils import timezone
class Box(models.Model):
box_id = models.CharField(max_length=200)
box_label = models.CharField(max_length=200)
pub_date = models.DateTimeField('date published')
def __str__(self):
return self.box_label
def was_published_recently(self):
return self.pub_date >= timezone.now() - datetime.timedelta(days=1)
class Item(models.Model):
box = models.ForeignKey(Box, on_delete=models.CASCADE)
item_name = models.CharField(max_length=200)
item_description = models.CharField(max_length=200)
votes = models.IntegerField(default=0)
def __str__(self):
return self.item_name
| {"/main/admin.py": ["/main/models.py"]} |
42,081 | mlindysay/Boxlog | refs/heads/master | /main/urls.py | from django.conf.urls import url
from . import views
app_name = 'main'
urlpatterns = [
url(r'^$', views.index),
url(r'^id/(\w+)/$', views.viewBox),
url(r'^updateBox/$', views.updateBox),
url(r'^addItem/$', views.addItem),
url(r'^label/(\w+)/$', views.genLabel),
#url(r'^(?P<pk>[0-9]+)/$', views.DetailView.as_view(), name='detail'),
#url(r'^$', views.IndexView.as_view(), name='index'),
#url(r'^recent/$', views.RecentView.as_view(), name='recent'),
#url(r'^(?P<pk>[0-9]+)/results/$', views.ResultsView.as_view(), name='results'),
#url(r'^(?P<box_id>[0-9]+)/vote/$', views.vote, name='vote'),
]
| {"/main/admin.py": ["/main/models.py"]} |
42,124 | gegealf/mon-site-marchand | refs/heads/master | /modele.py | import sqlite3
ma_base_de_donnees = "bdd_site_marchand"
socket = sqlite3.connect(ma_base_de_donnees)
request = socket.cursor()
# création des tables :
request.execute(
"""
CREATE TABLE IF NOT EXISTS utilisateurs(
id_utilisateur INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
email TEXT NOT NULL UNIQUE,
mdp TEXT NOT NULL,
nom TEXT NOT NULL,
prenom TEXT NOT NULL,
tel INT NOT NULL,
numero_voie TEXT,
nom_voie TEXT NOT NULL,
code_postal INT NOT NULL,
ville TEXT NOT NULL
)
"""
)
request.execute(
"""
CREATE TABLE IF NOT EXISTS administrateurs(
email TEXT NOT NULL PRIMARY KEY UNIQUE,
mdp TEXT NOT NULL
)
"""
)
request.execute(
"""
CREATE TABLE IF NOT EXISTS ventes(
numero_vente INTEGER PRIMARY KEY AUTOINCREMENT UNIQUE,
date_vente DATE NOT NULL,
montant_vente NUM NOT NULL,
email TEXT,
FOREIGN KEY (email) REFERENCES utilisateurs(email)
)
"""
)
request.execute(
"""
CREATE TABLE IF NOT EXISTS produits(
numero_produit INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
prix_produit_unite NUM NOT NULL,
categorie TEXT NOT NULL,
commentaire TEXT NOT NULL,
lien_photo TEXT NOT NULL,
en_stock NUMERIC DEFAULT 1,
reapprovisionnement_en_cours NUMERIC DEFAULT 0,
baisse_de_prix NUMERIC DEFAULT 0,
nouveaute NUMERIC DEFAULT 1
)
"""
)
request.execute(
"""
CREATE TABLE IF NOT EXISTS produits_vendus(
numero_produit_vendu INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
quantite_produit INT NOT NULL,
numero_vente INTEGER,
numero_produit INTEGER,
FOREIGN KEY (numero_vente) REFERENCES ventes(numero_vente),
FOREIGN KEY (numero_produit) REFERENCES produits(numero_produit)
)
"""
)
# création de mon compte admin:
request.execute(
"""
INSERT INTO administrateurs
SELECT 'admin@admin.gege', '46d67f3083f7c097922e45295137d48e0827ca3484bb27749cbeca5743906203'
WHERE NOT EXISTS (SELECT * FROM administrateurs WHERE email = 'admin@admin.gege')
"""
)
# création de mon compte utilisateur:
request.execute(
"""
INSERT INTO utilisateurs (email, mdp, nom, prenom, tel, numero_voie, nom_voie, code_postal, ville)
SELECT 'gege@gege.com', '46d67f3083f7c097922e45295137d48e0827ca3484bb27749cbeca5743906203',
'alf', 'gege', 0600000000, 2, 'rue machinchose', 75011, 'paris'
WHERE NOT EXISTS (SELECT * FROM utilisateurs WHERE email = 'gege@gege.com')
"""
)
'''
# test ajout d'un produit dans la base de données:
request.execute(
"""
INSERT INTO produits (prix_produit_unite, categorie , commentaire , lien_photo, en_stock,
reapprovisionnement_en_cours, baisse_de_prix, nouveaute)
VALUES
('3.20', 'Clés USB', "Quand tu fais le calcul, je suis mon meilleur modèle car c'est un très, très gros travail puisque the final conclusion of the spirit is perfection C'est pour ça que j'ai fait des films avec des replicants.",
'images/4go_noname_cle_usb2.jpg', 1, 0, 0, 0),
('10.99', 'Cartes mémoire', "Mesdames, messieurs, la crise actuelle nous assure à toutes et à tous les moyens d'aller dans le sens d'un processus allant vers plus d'égalité.",
'images/2go_sandisk_sdcard.png', 1, 0, 0, 1),
('10.99', 'Nouveautés et baisses de prix', "Mesdames, messieurs, la crise actuelle nous assure à toutes et à tous les moyens d'aller dans le sens d'un processus allant vers plus d'égalité.",
'images/2go_sandisk_sdcard.png', 1, 0, 0, 1),
('14.99', 'Cartes mémoire', "Le passage latin classique qui ne vieillit jamais, apprécie autant (ou aussi peu) le lorem ipsum que vous pouvez manipuler avec notre générateur de texte de remplissage facile à utiliser.", 'images/16go_sandisk_sdhc_card.png', 0, 1, 0, 0),
('16.99', 'Cartes mémoire', "Si vous n'avez pas vu Game of Thrones, allez le voir tout de suite. Si vous avez alors, vous comprendrez totalement pourquoi ce générateur de lorem ipsum sur le thème de Hodor est tout simplement génial.",
'images/32go_sandisk_sdhc.jpg', 1, 0, 0, 0),
('17.00', 'Cartes mémoire', "Le Hipster Ipsum est une version artisanale et artisanale du petit générateur classique de lipsem ipsum, qui donnera à vos mocks une touche bleue.",
'images/64go_noname_sdxc.jpg', 1, 0, 15, 0),
('17.00', 'Nouveautés et baisses de prix', "Le Hipster Ipsum est une version artisanale et artisanale du petit générateur classique de lipsem ipsum, qui donnera à vos mocks une touche bleue.",
'images/64go_noname_sdxc.jpg', 1, 0, 15, 0),
('32.50', 'Cartes mémoire', "Si vous ne lisez pas Twitter, les nouvelles, ou si vous ne pouvez pas obtenir assez de l'oraison légendaire de l'apprenti hôte, essayez ce générateur Trump lorem ipsum pour la taille.",
'images/128go_sandisk_extreme.png', 1, 0, 0, 0),
('42.70', 'Cartes mémoire', "Comme votre lorem ipsum extra croustillant? Ensuite, Bacon Ipsum est le générateur de texte d'espace réservé pour vous. Le côté des oeufs et des hashbrowns est facultatif, mais recommandé.",
'images/128go_sandisk_sdxc_extremepro.jpg', 1, 0, 10, 0),
('42.70', 'Nouveautés et baisses de prix', "Comme votre lorem ipsum extra croustillant? Ensuite, Bacon Ipsum est le générateur de texte d'espace réservé pour vous. Le côté des oeufs et des hashbrowns est facultatif, mais recommandé.",
'images/128go_sandisk_sdxc_extremepro.jpg', 1, 0, 10, 0),
('12.99', 'Clés USB', "Soulevez votre conception des morts avec une armée de Zombie Ipsum, texte de remplissage effrayant qui ne mourra pas. Essayez le lorem ipsum des morts-vivants si vous osez...",
'images/32go_sandisk_cle_usb3.webp', 1, 0, 0, 0),
('7.99', 'Clés USB', "Explorez les contrées lointaines de la galaxie avec ce générateur de texte fictif sur le thème de l'espace, avec des citations de classiques TV comme Star Trek et de vrais astronautes eux-mêmes.",
'images/16go_dtse_cle_usb2.webp', 0, 1, 0, 0),
('45.12', 'Clés USB', "Tu vois, premièrement, il faut se recréer... pour recréer... a better you et ça, c'est très dur, et, et, et... c'est très facile en même temps. C'est pour ça que j'ai fait des films avec des replicants.",
'images/128go_sandisk_cle_usb3.webp', 1, 0, 0, 1),
('45.12', 'Nouveautés et baisses de prix', "Tu vois, premièrement, il faut se recréer... pour recréer... a better you et ça, c'est très dur, et, et, et... c'est très facile en même temps. C'est pour ça que j'ai fait des films avec des replicants.",
'images/128go_sandisk_cle_usb3.webp', 1, 0, 0, 1),
('32.45', 'SSD', "Riche en fibres et bon pour votre cœur, Veggie Ipsum livre le texte le plus organique, cueilli à la main, lorem ipsum placeholder à votre porte (ou navigateur... je suppose).",
'images/120go_samsung_ssd.webp', 1, 0, 0, 0),
('59.99', 'SSD', "Sentez comme un vrai meathead dans vos maquettes avec Bro Ipsum, spécialisé dans un lorem ipsum composé de phrases-clés tendrement déployées dans la plus terne des conversations.",
'images/240go_crucial_ssd.webp', 1, 0, 0, 0),
('38.55', 'HDD', "You see, je suis mon meilleur modèle car il faut se recréer... pour recréer... a better you et je ne cherche pas ici à mettre un point ! Donc on n'est jamais seul spirituellement !",
'images/1to_maxtor_hdd.jfif', 1, 0, 0, 0),
('62.00', 'HDD', "Très chers compatriotes, vous le savez et je vous le redit que la prise de conscience de nos dirigeants a pour conséquence obligatoire l'urgente nécessité d'un processus allant vers plus d'égalité.",
'images/2to_wd-hdd.jfif', 1, 0, 0, 0),
('22.99', 'HDD', "Je me souviens en fait, après il faut s'intégrer tout ça dans les environnements et il faut se recréer... pour recréer... a better you parce que spirituellement, on est tous ensemble, ok ?",
'images/500go_samsung_hdd.jfif', 0, 1, 0, 1),
('59.20', 'HDD', "Ces feuilles de lettrage pourraient être frottées sur n'importe où et ont été rapidement adoptés par les graphistes, les imprimeurs, les architectes et les annonceurs pour leur aspect professionnel et la facilité d'utilisation.",
'images/3to_noname_hdd.jfif', 1, 0, 50, 0),
('59.20', 'Nouveautés et baisses de prix', "Ces feuilles de lettrage pourraient être frottées sur n'importe où et ont été rapidement adoptés par les graphistes, les imprimeurs, les architectes et les annonceurs pour leur aspect professionnel et la facilité d'utilisation.",
'images/3to_noname_hdd.jfif', 1, 0, 50, 0),
('9.99', 'RAM', "Tu comprends, je sais que, grâce à ma propre vérité c'est un très, très gros travail et c'est très, très beau d'avoir son propre moi-même ! Ça respire le meuble de Provence, hein ?",
'images/2go_kingston_ram.jfif', 0, 1, 0, 0),
('7.10', 'RAM', "Loin, très loin, au delà des monts Mots, à mille lieues des pays Voyellie et Consonnia, demeurent les Bolos Bolos. Ils vivent en retrait, à Bourg-en-Lettres, sur les côtes de la Sémantique, un vaste océan de langues.",
'images/1go_noname_ram.jfif', 1, 0, 0, 0),
('89.99', 'RAM', "La liberté ne tiens qu'à un fils et c'est pourquoi je tiens à vous dire que la crise actuelle doit s'intégrer à la finalisation globale d'un rappel des droits fondamentaux de notre pays",
'images/8gox4_gskill_ram.jfif', 1, 0, 25, 0),
('89.99', 'Nouveautés et baisses de prix', "La liberté ne tiens qu'à un fils et c'est pourquoi je tiens à vous dire que la crise actuelle doit s'intégrer à la finalisation globale d'un rappel des droits fondamentaux de notre pays",
'images/8gox4_gskill_ram.jfif', 1, 0, 25, 0)
"""
)
'''
socket.commit()
class MaBaseDeDonnees:
def __init__(self):
ma_base_de_donnees = "bdd_site_marchand" # file containing the SQLite Database
self.socket = sqlite3.connect(ma_base_de_donnees) # creating connection to the database
# creating a cursor that will contain the SQL queries to execute
self.request = self.socket.cursor()
def verifier_si_compte_utilisateur_existe_deja(self, email, mdp_hashe):
""" vérifier si le compte avec cet email et mot de passe existe dans la base de données """
self.request.execute(
"""SELECT count(*) FROM utilisateurs
WHERE email = '{}' AND mdp = '{}' """.format(email, mdp_hashe)
)
data = self.request.fetchone()[0]
if data == 0:
return "faux"
else:
return "vrai"
def verifier_si_compte_administrateur_existe_deja(self, email, mdp_hashe):
""" vérifier si le compte avec cet email et mot de passe existe dans la base de données """
self.request.execute(
"""SELECT count(*) FROM administrateurs
WHERE email = '{}' AND mdp = '{}' """.format(email, mdp_hashe)
)
data = self.request.fetchone()[0]
if data == 0:
return "faux"
else:
return "vrai"
def trouver_nom_prenom_utilisateur(self, email, mdp_hashe):
""" """
self.request.execute(
"""SELECT nom, prenom FROM utilisateurs
WHERE email = '{}' AND mdp = '{}' """.format(email, mdp_hashe)
)
data = self.request.fetchone()
return data[1] + " " + data[0][0].upper() + "."
def trouver_id_utilisateur(self, email, mdp_hashe):
""" """
self.request.execute(
"""SELECT id_utilisateur FROM utilisateurs
WHERE email = '{}' AND mdp = '{}' """.format(email, mdp_hashe)
)
data = self.request.fetchone()[0]
print(data)
return data
def verifier_email(self, email_utilisateur):
""" """
self.request.execute(
"""SELECT count(*) FROM utilisateurs
WHERE email = '{}' """.format(email_utilisateur)
)
data = self.request.fetchone()[0]
if data == 0:
return False
else:
return True
def ajouter_utilisateur(self, utilisateur):
""" """
self.request.execute(
"""INSERT INTO utilisateurs (email, mdp, nom, prenom, tel, numero_voie, nom_voie,
code_postal, ville) VALUES ('{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}')""".format(
utilisateur[0], utilisateur[1], utilisateur[2],
utilisateur[3], utilisateur[4], utilisateur[5],
utilisateur[6], utilisateur[7], utilisateur[8])
)
self.socket.commit()
def recuperer_liste_produits(self, liste_categories):
""" """
data = {}
for i in range(0, len(liste_categories)):
self.request.execute(
"""SELECT prix_produit_unite, commentaire, lien_photo, en_stock,
reapprovisionnement_en_cours, baisse_de_prix, nouveaute, numero_produit
from produits WHERE categorie = '{}' """.format(liste_categories[i])
)
data[liste_categories[i]] = self.request.fetchall()
return data
def recuperer_produit(self, numero_produit):
""" """
self.request.execute(
"""SELECT * FROM produits
WHERE numero_produit = '{}' """.format(numero_produit)
)
return self.request.fetchall()[0]
def verifier_numero_produit(self, numero_produit):
""" """
self.request.execute(
"""SELECT numero_produit FROM produits """
)
data = self.request.fetchall()
if numero_produit > len(data):
return False
return True
def mauvaise_requete(self):
""" """
self.request.execute(
"""SELECT numero_prod FROM produits """
)
return 'rien'
| {"/main.py": ["/controleur.py"], "/controleur.py": ["/modele.py"]} |
42,125 | gegealf/mon-site-marchand | refs/heads/master | /main.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from flask import Flask, render_template # pip install flask
from os import path
import logging.config
import logging
# pour le fichier de config :
log_file_path = path.join(path.dirname(path.abspath(__file__)), 'log.config')
logging.config.fileConfig(log_file_path)
log = logging.getLogger(__name__) # définition du logger pour la classe courante
app = Flask(__name__)
app.config['SECRET_KEY'] = "tandiS_quE_leS_crachatS_rougeS"
@app.errorhandler(404)
def erreur_404(e):
log.error(e)
return render_template('erreur_404.html')
@app.errorhandler(500)
def erreur_500(e):
log.error(e)
return render_template('erreur_500.html')
if __name__ == "__main__": # python main.py
import controleur
app.add_url_rule('/', 'page_d_accueil', view_func=controleur.page_d_accueil)
app.add_url_rule('/page_d_authentification', 'page_d_authentification', methods=['GET', 'POST'],
view_func=controleur.page_d_authentification)
app.add_url_rule('/page_administrateur', 'page_administrateur', view_func=controleur.page_administrateur)
app.add_url_rule('/page_creation_compte_utilisateur', 'page_creation_compte_utilisateur', methods=['GET', 'POST'],
view_func=controleur.page_creation_compte_utilisateur)
app.add_url_rule('/deconnexion', 'deconnexion', view_func=controleur.deconnexion)
app.add_url_rule('/page_fiche_produit/<int:numero_produit>', 'page_fiche_produit',
view_func=controleur.page_fiche_produit)
app.add_url_rule('/page_d_accueil/ajouter_au_panier/<int:numero_produit>', 'page_d_accueil/ajouter_au_panier',
view_func=controleur.ajouter_au_panier)
app.add_url_rule('/page_fiche_produit/<int:numero_produit>/ajouter_au_panier',
'page_fiche_produit/ajouter_au_panier', view_func=controleur.ajouter_au_panier)
app.add_url_rule('/page_panier', 'page_panier', view_func=controleur.page_panier)
app.add_url_rule('/page_panier/<int:numero_produit>/supprimer_du_panier', 'page_panier/supprimer_du_panier',
view_func=controleur.supprimer_du_panier)
app.add_url_rule('/page_d_erreur', 'page_d_erreur', view_func=controleur.page_d_erreur)
app.add_url_rule('/test_500_html', 'test_500_html', view_func=controleur.test_500_html)
app.add_url_rule('/test_500_serveur', 'test_500_serveur', view_func=controleur.test_500_serveur)
try:
log.info('démarrage de l\'application')
SESSION_COOKIE_DOMAIN = '127.0.0.1'
app.run(ssl_context=('cert.pem', 'key.pem'), host='127.0.0.1', port=8000) # lancement de l'application en local
# app.run(host='0.0.0.0', port=4001)
log.info('arrêt normal de l\'application')
except Exception as ex:
log.exception('l\'application s\'est arretée à cause d\'une erreur inattendue')
pass
| {"/main.py": ["/controleur.py"], "/controleur.py": ["/modele.py"]} |
42,126 | gegealf/mon-site-marchand | refs/heads/master | /controleur.py | from flask import render_template, session, request, redirect, url_for, abort
import logging
import hashlib
import re
from modele import MaBaseDeDonnees as MBDD
log = logging.getLogger(__name__)
def page_d_accueil():
""" """
liste_categories = _recuperer_categories()
liste_produits = _recuperer_liste_produits()
session['page_precedente'] = []
if not session.get('vous_etes_loggue'):
log.debug('connexion à la page d\'accueil SANS authentification')
return render_template("page_d_accueil.html", message="",
liste_categories=liste_categories, lenc=len(liste_categories),
liste_produits=liste_produits
)
log.debug('connexion à la page d\'accueil AVEC authentification')
message1 = "bienvenue"
message2 = message1 + " " + session.get('utilisateur')
liste_categories = _recuperer_categories()
return render_template("page_d_accueil.html", message1=message1, message2=message2,
liste_categories=liste_categories, lenc=len(liste_categories),
liste_produits=liste_produits, nombre_produits_dans_panier=len(session.get('panier'))
)
def page_d_authentification():
""" """
log.debug('connexion à la page d\'authentification')
message_d_erreur = None
page_precedente = session.get('panier')
pp = page_precedente
pp.append(_redirect_url())
session['page_precedente'] = pp
if request.method == 'POST':
mdp_utilisateur = request.form['mot_de_passe']
email_utilisateur = request.form['email']
compte_utilisateur_valide = _verifier_le_compte(email_utilisateur, mdp_utilisateur)
if compte_utilisateur_valide != "vrai":
session['vous_etes_loggue'] = False
if compte_utilisateur_valide == "accès_admin":
session['vous_etes_loggue'] = 'jesuisadminpastoi'
return redirect(url_for('page_administrateur'))
log.debug('erreur lors de l\'authentification')
message_d_erreur = 'erreur lors de l\'authentification, veuillez recommencer'
else:
log.debug('connexion à la page d\'accueil/fiche_produit après authentification')
session['vous_etes_loggue'] = True
session['panier'] = []
return redirect(session.get('page_precedente')[0])
return render_template('page_d_authentification.html', message_d_erreur=message_d_erreur)
def deconnexion():
""" """
log.debug('deconnexion du compte effectuée et panier vidé')
session['vous_etes_loggue'] = False
session['panier'] = []
return redirect(_redirect_url())
def page_administrateur():
""" """
if session.get('vous_etes_loggue') != 'jesuisadminpastoi':
log.error('tentative de connexion à la page administrateur sans autorisation')
return render_template('page_d_erreur.html')
log.debug('connexion à la page administrateur')
message = "bienvenue"
return render_template('page_administrateur.html', message=message)
def page_creation_compte_utilisateur():
""" """
log.debug('connexion à la page de création de compte utilisateur')
if request.method == 'POST':
email_utilisateur = request.form['email']
mdp_utilisateur = request.form['mot_de_passe']
nom_utilisateur = request.form['nom']
prenom_utilisateur = request.form['prenom']
numero_de_telephone_utilisateur = request.form['numero_de_telephone']
numero_de_voie = request.form['numero_de_voie']
nom_de_voie = request.form['nom_de_voie']
code_postal = request.form['code_postal']
ville = request.form['ville']
mdp_hashe = __hashage_mdp__(mdp_utilisateur)
utilisateur = [
email_utilisateur,
mdp_hashe,
nom_utilisateur,
prenom_utilisateur,
numero_de_telephone_utilisateur,
numero_de_voie,
nom_de_voie,
code_postal,
ville
]
if _verifier_format_email(email_utilisateur) and _verifier_format_mdp(mdp_utilisateur) and \
_verifier_format_donnees(utilisateur):
log.debug('formats de l\'adresse mail, du mot de passe et des données valides')
db = MBDD()
if not db.verifier_email(email_utilisateur):
log.debug('ajout du compte utilisateur à la base de données')
db = MBDD()
db.ajouter_utilisateur(utilisateur)
message = "votre compte à bien été enregistré: "
return render_template('page_creation_compte_utilisateur.html', message=message)
log.debug('erreur lors de la validation del\'email: déjà utilisé')
message_d_erreur = "erreur lors de la validation, veuillez recommencer"
return render_template('page_creation_compte_utilisateur.html', message_d_erreur=message_d_erreur)
return render_template('page_creation_compte_utilisateur.html')
def page_fiche_produit(numero_produit):
""" """
session['page_precedente'] = []
db = MBDD()
if not db.verifier_numero_produit(numero_produit):
log.error('tentative de connexion à une fiche produit inexistante')
return render_template('page_d_erreur.html')
produit = db.recuperer_produit(numero_produit)
infos_produit = produit[4].split('_', 2)
infos = [
infos_produit[2].split('.', 1)[0].replace('_', ' '),
infos_produit[0].split('/', 1)[1],
infos_produit[1]
]
if not session.get('vous_etes_loggue'):
log.debug('connexion SANS authentification à la fiche du produit avec le numero: %s', numero_produit)
return render_template('page_fiche_produit.html', numero_produit=numero_produit, produit=produit,
message="", infos=infos
)
log.debug('connexion AVEC authentification à la fiche du produit avec le numero: %s', numero_produit)
message1 = "bienvenue"
message2 = message1 + " " + session.get('utilisateur')
return render_template('page_fiche_produit.html', numero_produit=numero_produit, produit=produit,
message1=message1, message2=message2, infos=infos,
nombre_produits_dans_panier=len(session.get('panier'))
)
def page_panier():
""" """
if not session.get('vous_etes_loggue'):
log.error('tentative de connexion SANS authentification à la page panier')
return render_template('page_d_erreur.html')
db = MBDD()
liste_produits = {}
for numero_produit in session.get('panier'):
# TODO: prévoir cas plusieurs fois le même produit
liste_produits[numero_produit] = db.recuperer_produit(numero_produit)
log.debug('accès à la page panier avec les produit numero: %s dans le panier', session['panier'])
message1 = "bienvenue"
message2 = message1 + " " + session.get('utilisateur')
message3 = "panier"
return render_template('page_panier.html', liste_produits=liste_produits, message1=message1, message2=message2,
message3=message3
)
def ajouter_au_panier(numero_produit):
""" """
if not session.get('vous_etes_loggue'):
return redirect(_redirect_url())
log.debug('ajout du produit numero: %s dans le panier', numero_produit)
session['panier'] += [numero_produit]
return redirect(_redirect_url())
def supprimer_du_panier(numero_produit):
""" """
if not session.get('vous_etes_loggue'):
return render_template('page_d_erreur.html')
log.debug('suppression du produit numero: %s dans le panier', numero_produit)
ma_nouvelle_liste = session.get('panier')
ma_nouvelle_liste.remove(numero_produit)
session['panier'] = ma_nouvelle_liste
if len(session.get('panier')) == 0:
return redirect(url_for('page_d_accueil'))
return redirect(_redirect_url())
def page_d_erreur():
return render_template('page_d_erreur.html')
def test_500_html():
""" """
render_template('page_qui_n_existe_pas.html')
def test_500_serveur():
""" """
db = MBDD()
r = db.mauvaise_requete()
return r
def __hashage_mdp__(mot_de_passe_en_clair):
""" création d'un mot de passe hashé """
log.debug('hashage du mot de passe')
a = bytes(mot_de_passe_en_clair, "utf-8")
mdp_hashe = hashlib.sha256(a).hexdigest()
return mdp_hashe
def _verifier_le_compte(email_utilisateur, mdp_utilisateur):
""" appel des méthodes de classe MaBaseDeDonnees permettant de vérifier le compte avec email/mot de passe """
log.debug('verification email et mot de passe')
db = MBDD()
mdp_hashe = __hashage_mdp__(mdp_utilisateur)
access1 = db.verifier_si_compte_utilisateur_existe_deja(email_utilisateur, mdp_hashe)
if access1 == "vrai":
session['utilisateur'] = db.trouver_nom_prenom_utilisateur(email_utilisateur, mdp_hashe)
return "vrai"
else:
access2 = db.verifier_si_compte_administrateur_existe_deja(email_utilisateur, mdp_hashe)
if access2 == "vrai":
return "accès_admin"
return "faux"
def _verifier_format_email(email_utilisateur):
""" """
log.debug('vérification du format de l\'adresse mail')
if (re.search("^\w+([\.-]?\w+)*@\w+([\.-]?\w+)*(\.\w{2,3})+$", email_utilisateur)):
return True
log.debug('erreur de format de l\'adresse mail')
return False
def _verifier_format_mdp(mdp_utilisateur):
log.debug('vérification du format du mdp')
if (
re.search("^(?=.*?[A-Z])(?=(.*[a-z]){1,})(?=(.*[\d]){1,})(?=(.*[\W]){1,})(?!.*\s).{8,14}$",
mdp_utilisateur)):
return True
log.debug('erreur de format du mdp')
return False
def _verifier_format_donnees(utilisateur):
""" """
log.debug('vérification du format des données')
if utilisateur[2] and utilisateur[3] and utilisateur[4] and utilisateur[4].isdigit() \
and (len(utilisateur[4]) == 10 or len(utilisateur[4]) == 13) \
and utilisateur[6] and utilisateur[7] and utilisateur[8]:
return True
log.debug('erreur au niveau des données utilisateurs')
return False
def _recuperer_categories():
""" ici on peut définir pour l'ensemble de l'application, les onglets et
leur ordre d'apparition dans la page d'accueil """
liste_categories = ['Nouveautés et baisses de prix', 'Cartes mémoire', 'Clés USB', 'SSD', 'HDD', 'RAM']
return liste_categories
def _recuperer_liste_produits():
""" fournie tous les produits de la table sous la forme d'un dictionnaire,
avec en clé la catégorie et en valeur la liste des produits de cette catégorie,
pour permettre l'affichage par onglets catégories dans la page d'accueil
"""
db = MBDD()
liste_categories = _recuperer_categories()
return db.recuperer_liste_produits(liste_categories)
def _redirect_url():
return request.args.get('next') or request.referrer or url_for('page_d_accueil')
| {"/main.py": ["/controleur.py"], "/controleur.py": ["/modele.py"]} |
42,139 | bryantutiven2/instituto_angular | refs/heads/main | /backend/servicios_institucion/views.py | from django.http import HttpResponse, Http404
from django.db.models import Q
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from rest_framework import viewsets
from django.conf import settings
from django.core.mail import send_mail
from .models import Colegio, Pagina_inicio, Red_social, Secciones_educativas, Telefono, Noticia
from .serializers import ColegioSerializer, PaginaSerializer, RedSerializer, SeccionSerializer, TelefonoSerializer, NoticiaSerializer
class ColegioViewSet(APIView):
def get_object(self, id):
try:
return Colegio.objects.get(id_colegio = id)
except Colegio.DoesNotExist:
raise Http404
def get(self, request, id, format=None):
colegioObj = self.get_object(id)
serializer = ColegioSerializer(colegioObj)
return Response(serializer.data)
class PaginaViewSet(APIView):
def get(self, request, format=None):
paginaObj = Pagina_inicio.objects.all()
serializer = PaginaSerializer(paginaObj, many=True)
return Response(serializer.data)
class RedViewSet(APIView):
def get(self, request, format=None):
redObj = Red_social.objects.all()
serializer = RedSerializer(redObj, many=True)
return Response(serializer.data)
class SeccionViewSet(APIView):
def get(self, request, format=None):
seccionObj = Secciones_educativas.objects.all()
serializer = SeccionSerializer(seccionObj, many=True)
return Response(serializer.data)
class TelefonoViewSet(APIView):
def get(self, request, format=None):
telefonoObj = Telefono.objects.all()
serializer = TelefonoSerializer(telefonoObj, many=True)
return Response(serializer.data)
class NoticiaViewSet(APIView):
def get(self, request, format=None):
noticiaObj = Noticia.objects.all()
serializer = NoticiaSerializer(noticiaObj, many=True)
return Response(serializer.data)
class EnviarCorreo(APIView):
def post(self, request, format=None):
print(request.data)
subject = 'Página de Contacto Institución Educativa Provincia de Los Ríos'
nombre = request.data['nombre']
correo = request.data['correo']
texto = request.data['mensaje']
message = 'La persona con nombre: ' + nombre +'\n'\
'Con correo: ' + correo +'\n'\
'Le acaba de enviar el siguiente mensaje: ' + texto
email_from = settings.EMAIL_HOST_USER
recipient_list = [correo, ]
send_mail(subject, message, email_from, recipient_list)
data = { 'resp': True}
return Response(data, status=status.HTTP_201_CREATED) | {"/backend/servicios_institucion/views.py": ["/backend/servicios_institucion/models.py", "/backend/servicios_institucion/serializers.py"], "/backend/servicios_institucion/admin.py": ["/backend/servicios_institucion/models.py"], "/backend/servicios_institucion/serializers.py": ["/backend/servicios_institucion/models.py"]} |
42,140 | bryantutiven2/instituto_angular | refs/heads/main | /backend/servicios_institucion/admin.py | from django.contrib import admin
from .models import Colegio, Pagina_inicio, Red_social, Secciones_educativas, Telefono, Noticia
modelos = [ Colegio, Pagina_inicio, Red_social, Secciones_educativas, Telefono, Noticia ]
# Register your models here.
admin.site.register(modelos) | {"/backend/servicios_institucion/views.py": ["/backend/servicios_institucion/models.py", "/backend/servicios_institucion/serializers.py"], "/backend/servicios_institucion/admin.py": ["/backend/servicios_institucion/models.py"], "/backend/servicios_institucion/serializers.py": ["/backend/servicios_institucion/models.py"]} |
42,141 | bryantutiven2/instituto_angular | refs/heads/main | /backend/servicios_institucion/models.py | from django.db import models
class Colegio(models.Model):
id_colegio = models.AutoField(primary_key = True, unique=True)
nombre = models.CharField(max_length= 100)
direccion = models.CharField(max_length=100)
email = models.EmailField(max_length=100, default=None, null=True, blank=True)
logo = models.ImageField(upload_to='imagen', max_length=200, null=True, blank=True)
provincia = models.CharField(max_length= 50)
canton = models.CharField(max_length= 50)
parroquia = models.CharField(max_length= 10)
tipo_educacion = models.CharField(max_length= 50)
tipo_unidad = models.CharField(max_length= 50)
zona = models.CharField(max_length= 100)
regimen_escolar = models.CharField(max_length= 100)
educacion = models.CharField(max_length= 100)
nivel_educativo = models.CharField(max_length= 50)
modalidad = models.CharField(max_length= 50)
num_estudiantes = models.IntegerField()
num_docentes = models.IntegerField()
codigo_amie = models.CharField(max_length= 50)
class Telefono(models.Model):
id_telefono = models.AutoField(primary_key = True, unique=True)
numero = models.CharField(max_length= 15)
id_colegio = models.ForeignKey(Colegio, on_delete=models.PROTECT, null=True, blank=True)
class Red_social(models.Model):
id_red = models.AutoField(primary_key = True, unique=True)
nombre = models.CharField(max_length= 15)
link = models.CharField(max_length= 200)
id_colegio = models.ForeignKey(Colegio, on_delete=models.PROTECT, null=True, blank=True)
class Pagina_inicio(models.Model):
id_pagina = models.AutoField(primary_key = True, unique=True)
imagen = models.ImageField(upload_to='imagen', max_length=200, null=True, blank=True)
id_colegio = models.ForeignKey(Colegio, on_delete=models.PROTECT, null=True, blank=True)
class Secciones_educativas(models.Model):
id_seccion = models.AutoField(primary_key = True, unique=True)
titulo = models.CharField(max_length= 200, null=True, blank=True)
jornada = models.CharField(max_length= 10, null=True, blank=True)
horario = models.CharField(max_length= 100, null=True, blank=True)
texto = models.CharField(max_length= 500)
imagen = models.ImageField(upload_to='imagen', max_length=200, null=True, blank=True)
id_colegio = models.ForeignKey(Colegio, on_delete=models.PROTECT, null=True, blank=True)
class Noticia(models.Model):
id_noticia = models.AutoField(primary_key = True, unique=True)
titulo = models.CharField(max_length=250, null=True, blank=True)
texto = models.CharField(max_length= 1000)
imagen = models.ImageField(upload_to='imagen', max_length=200, null=True, blank=True)
id_colegio = models.ForeignKey(Colegio, on_delete=models.PROTECT, null=True, blank=True) | {"/backend/servicios_institucion/views.py": ["/backend/servicios_institucion/models.py", "/backend/servicios_institucion/serializers.py"], "/backend/servicios_institucion/admin.py": ["/backend/servicios_institucion/models.py"], "/backend/servicios_institucion/serializers.py": ["/backend/servicios_institucion/models.py"]} |
42,142 | bryantutiven2/instituto_angular | refs/heads/main | /backend/servicios_institucion/apps.py | from django.apps import AppConfig
class ServiciosInstitucionConfig(AppConfig):
name = 'servicios_institucion'
| {"/backend/servicios_institucion/views.py": ["/backend/servicios_institucion/models.py", "/backend/servicios_institucion/serializers.py"], "/backend/servicios_institucion/admin.py": ["/backend/servicios_institucion/models.py"], "/backend/servicios_institucion/serializers.py": ["/backend/servicios_institucion/models.py"]} |
42,143 | bryantutiven2/instituto_angular | refs/heads/main | /backend/servicios_institucion/migrations/0002_secciones_educativas_titulo.py | # Generated by Django 3.1 on 2020-10-16 21:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('servicios_institucion', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='secciones_educativas',
name='titulo',
field=models.CharField(blank=True, max_length=200, null=True),
),
]
| {"/backend/servicios_institucion/views.py": ["/backend/servicios_institucion/models.py", "/backend/servicios_institucion/serializers.py"], "/backend/servicios_institucion/admin.py": ["/backend/servicios_institucion/models.py"], "/backend/servicios_institucion/serializers.py": ["/backend/servicios_institucion/models.py"]} |
42,144 | bryantutiven2/instituto_angular | refs/heads/main | /backend/servicios_institucion/urls.py | from django.urls import path
from django.conf.urls import include
from rest_framework.urlpatterns import format_suffix_patterns
from servicios_institucion import views
urlpatterns = [
path('get_colegio/<str:id>/', views.ColegioViewSet.as_view()),
path('get_telefono/', views.TelefonoViewSet.as_view()),
path('get_redes/', views.RedViewSet.as_view()),
path('get_pagina/', views.PaginaViewSet.as_view()),
path('get_noticia/', views.NoticiaViewSet.as_view()),
path('get_secciones/', views.SeccionViewSet.as_view()),
path('enviar_correo/', views.EnviarCorreo.as_view()),
]
urlpatterns = format_suffix_patterns(urlpatterns) | {"/backend/servicios_institucion/views.py": ["/backend/servicios_institucion/models.py", "/backend/servicios_institucion/serializers.py"], "/backend/servicios_institucion/admin.py": ["/backend/servicios_institucion/models.py"], "/backend/servicios_institucion/serializers.py": ["/backend/servicios_institucion/models.py"]} |
42,145 | bryantutiven2/instituto_angular | refs/heads/main | /backend/servicios_institucion/migrations/0003_auto_20201016_1636.py | # Generated by Django 3.1 on 2020-10-16 21:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('servicios_institucion', '0002_secciones_educativas_titulo'),
]
operations = [
migrations.AddField(
model_name='secciones_educativas',
name='horario',
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AddField(
model_name='secciones_educativas',
name='jornada',
field=models.CharField(blank=True, max_length=10, null=True),
),
]
| {"/backend/servicios_institucion/views.py": ["/backend/servicios_institucion/models.py", "/backend/servicios_institucion/serializers.py"], "/backend/servicios_institucion/admin.py": ["/backend/servicios_institucion/models.py"], "/backend/servicios_institucion/serializers.py": ["/backend/servicios_institucion/models.py"]} |
42,146 | bryantutiven2/instituto_angular | refs/heads/main | /backend/servicios_institucion/serializers.py | from .models import Colegio, Pagina_inicio, Red_social, Secciones_educativas, Telefono, Noticia
from rest_framework import serializers
class ColegioSerializer(serializers.ModelSerializer):
class Meta:
model = Colegio
fields = '__all__'
class PaginaSerializer(serializers.ModelSerializer):
class Meta:
model = Pagina_inicio
fields = '__all__'
class NoticiaSerializer(serializers.ModelSerializer):
class Meta:
model = Noticia
fields = '__all__'
class RedSerializer(serializers.ModelSerializer):
class Meta:
model = Red_social
fields = '__all__'
class TelefonoSerializer(serializers.ModelSerializer):
class Meta:
model = Telefono
fields = '__all__'
class SeccionSerializer(serializers.ModelSerializer):
class Meta:
model = Secciones_educativas
fields = '__all__' | {"/backend/servicios_institucion/views.py": ["/backend/servicios_institucion/models.py", "/backend/servicios_institucion/serializers.py"], "/backend/servicios_institucion/admin.py": ["/backend/servicios_institucion/models.py"], "/backend/servicios_institucion/serializers.py": ["/backend/servicios_institucion/models.py"]} |
42,147 | bryantutiven2/instituto_angular | refs/heads/main | /backend/servicios_institucion/migrations/0004_noticia_titulo.py | # Generated by Django 3.1 on 2020-10-16 22:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('servicios_institucion', '0003_auto_20201016_1636'),
]
operations = [
migrations.AddField(
model_name='noticia',
name='titulo',
field=models.CharField(blank=True, max_length=200, null=True),
),
]
| {"/backend/servicios_institucion/views.py": ["/backend/servicios_institucion/models.py", "/backend/servicios_institucion/serializers.py"], "/backend/servicios_institucion/admin.py": ["/backend/servicios_institucion/models.py"], "/backend/servicios_institucion/serializers.py": ["/backend/servicios_institucion/models.py"]} |
42,148 | bryantutiven2/instituto_angular | refs/heads/main | /backend/servicios_institucion/migrations/0001_initial.py | # Generated by Django 3.1 on 2020-10-15 19:56
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Colegio',
fields=[
('id_colegio', models.AutoField(primary_key=True, serialize=False, unique=True)),
('nombre', models.CharField(max_length=100)),
('direccion', models.CharField(max_length=100)),
('email', models.EmailField(blank=True, default=None, max_length=100, null=True)),
('logo', models.ImageField(blank=True, max_length=200, null=True, upload_to='imagen')),
('provincia', models.CharField(max_length=50)),
('canton', models.CharField(max_length=50)),
('parroquia', models.CharField(max_length=10)),
('tipo_educacion', models.CharField(max_length=50)),
('tipo_unidad', models.CharField(max_length=50)),
('zona', models.CharField(max_length=100)),
('regimen_escolar', models.CharField(max_length=100)),
('educacion', models.CharField(max_length=100)),
('nivel_educativo', models.CharField(max_length=50)),
('modalidad', models.CharField(max_length=50)),
('num_estudiantes', models.IntegerField()),
('num_docentes', models.IntegerField()),
('codigo_amie', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='Telefono',
fields=[
('id_telefono', models.AutoField(primary_key=True, serialize=False, unique=True)),
('numero', models.CharField(max_length=15)),
('id_colegio', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to='servicios_institucion.colegio')),
],
),
migrations.CreateModel(
name='Secciones_educativas',
fields=[
('id_seccion', models.AutoField(primary_key=True, serialize=False, unique=True)),
('texto', models.CharField(max_length=500)),
('imagen', models.ImageField(blank=True, max_length=200, null=True, upload_to='imagen')),
('id_colegio', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to='servicios_institucion.colegio')),
],
),
migrations.CreateModel(
name='Red_social',
fields=[
('id_red', models.AutoField(primary_key=True, serialize=False, unique=True)),
('nombre', models.CharField(max_length=15)),
('link', models.CharField(max_length=200)),
('id_colegio', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to='servicios_institucion.colegio')),
],
),
migrations.CreateModel(
name='Pagina_inicio',
fields=[
('id_pagina', models.AutoField(primary_key=True, serialize=False, unique=True)),
('imagen', models.ImageField(blank=True, max_length=200, null=True, upload_to='imagen')),
('id_colegio', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to='servicios_institucion.colegio')),
],
),
migrations.CreateModel(
name='Noticia',
fields=[
('id_noticia', models.AutoField(primary_key=True, serialize=False, unique=True)),
('texto', models.CharField(max_length=1000)),
('imagen', models.ImageField(blank=True, max_length=200, null=True, upload_to='imagen')),
('id_colegio', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to='servicios_institucion.colegio')),
],
),
]
| {"/backend/servicios_institucion/views.py": ["/backend/servicios_institucion/models.py", "/backend/servicios_institucion/serializers.py"], "/backend/servicios_institucion/admin.py": ["/backend/servicios_institucion/models.py"], "/backend/servicios_institucion/serializers.py": ["/backend/servicios_institucion/models.py"]} |
42,152 | raselTarikul/simulation | refs/heads/master | /src/gridsingularity/apps/simulation/models.py | from django.db import models
from gridsingularity.apps.base.models import BaseModel
# Create your models here.
class SimulationResult(BaseModel):
""" This Class represent the simulation result """
active = models.DecimalField(max_digits=5, decimal_places=2)
reactive = models.DecimalField(max_digits=5, decimal_places=2)
def __str__(self):
return str(self.active) + ' ' + str(self.reactive)
| {"/src/gridsingularity/apps/simulation/serializers.py": ["/src/gridsingularity/apps/simulation/models.py"], "/src/gridsingularity/apps/simulation/urls.py": ["/src/gridsingularity/apps/simulation/views.py"], "/src/gridsingularity/apps/simulation/views.py": ["/src/gridsingularity/apps/simulation/models.py", "/src/gridsingularity/apps/simulation/serializers.py"]} |
42,153 | raselTarikul/simulation | refs/heads/master | /src/gridsingularity/apps/simulation/serializers.py | from rest_framework import serializers
from .models import SimulationResult
class SimulationResultSerializer(serializers.ModelSerializer):
class Meta:
model = SimulationResult
fields = ('id', 'active', 'reactive')
| {"/src/gridsingularity/apps/simulation/serializers.py": ["/src/gridsingularity/apps/simulation/models.py"], "/src/gridsingularity/apps/simulation/urls.py": ["/src/gridsingularity/apps/simulation/views.py"], "/src/gridsingularity/apps/simulation/views.py": ["/src/gridsingularity/apps/simulation/models.py", "/src/gridsingularity/apps/simulation/serializers.py"]} |
42,154 | raselTarikul/simulation | refs/heads/master | /src/gridsingularity/apps/simulation/urls.py | from django.urls import path
from .views import CreateSimulation, GetSimulation, GetSimulationActive, GetSimulationReActive
urlpatterns = [
path('simulations/', CreateSimulation.as_view(),
name='create-simulation'),
path('simulations/<int:id>/', GetSimulation.as_view(),
name='get-simulation'),
path('simulations/<int:id>/active/', GetSimulationActive.as_view(),
name='get-simulation-active'),
path('simulations/<int:id>/reactive/', GetSimulationReActive.as_view(),
name='get-simulation-reactive'),
]
| {"/src/gridsingularity/apps/simulation/serializers.py": ["/src/gridsingularity/apps/simulation/models.py"], "/src/gridsingularity/apps/simulation/urls.py": ["/src/gridsingularity/apps/simulation/views.py"], "/src/gridsingularity/apps/simulation/views.py": ["/src/gridsingularity/apps/simulation/models.py", "/src/gridsingularity/apps/simulation/serializers.py"]} |
42,155 | raselTarikul/simulation | refs/heads/master | /src/gridsingularity/exceptions/exceptions.py | from rest_framework.exceptions import APIException
class NotFoundException(APIException):
status_code = 404
| {"/src/gridsingularity/apps/simulation/serializers.py": ["/src/gridsingularity/apps/simulation/models.py"], "/src/gridsingularity/apps/simulation/urls.py": ["/src/gridsingularity/apps/simulation/views.py"], "/src/gridsingularity/apps/simulation/views.py": ["/src/gridsingularity/apps/simulation/models.py", "/src/gridsingularity/apps/simulation/serializers.py"]} |
42,156 | raselTarikul/simulation | refs/heads/master | /src/gridsingularity/apps/simulation/tests.py | from django.urls import reverse
from rest_framework import status
from rest_framework.test import APITestCase
from gridsingularity.apps.simulation.models import SimulationResult
class CreateSimulationTest(APITestCase):
"""
Test crate silulation
"""
def test_create_silulation(self):
"""
Test if we get 200 response on post
"""
url = reverse('create-simulation')
data = {}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(SimulationResult.objects.count(), 1)
class GetSimulationTest(APITestCase):
"""
Test get Simulation
"""
def setUp(self):
SimulationResult.objects.create(active=0.50, reactive=0.2)
def test_get_silulation(self):
"""
Test object get by id successfully
"""
response = self.client.get('/api/v1/simulations/1/', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['active'], '0.50')
self.assertEqual(response.data['reactive'], '0.20')
def test_get_silulation_404(self):
response = self.client.get('/api/v1/simulations/2/', format='json')
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
class GetSimulationActiveTest(APITestCase):
"""
Test get Simulation
"""
def setUp(self):
SimulationResult.objects.create(active=0.50, reactive=0.2)
def test_get_silulation(self):
"""
Test object get by id successfully
"""
response = self.client.get('/api/v1/simulations/1/active/', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['active'], '0.50')
def test_get_silulation_404(self):
response = self.client.get('/api/v1/simulations/2/active/', format='json')
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
class GetSimulationReActiveTest(APITestCase):
"""
Test get Simulation
"""
def setUp(self):
SimulationResult.objects.create(active=0.50, reactive=0.2)
def test_get_silulation(self):
"""
Test object get by id successfully
"""
response = self.client.get('/api/v1/simulations/1/reactive/', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['reactive'], '0.20')
def test_get_silulation_404(self):
response = self.client.get('/api/v1/simulations/2/reactive/', format='json')
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
| {"/src/gridsingularity/apps/simulation/serializers.py": ["/src/gridsingularity/apps/simulation/models.py"], "/src/gridsingularity/apps/simulation/urls.py": ["/src/gridsingularity/apps/simulation/views.py"], "/src/gridsingularity/apps/simulation/views.py": ["/src/gridsingularity/apps/simulation/models.py", "/src/gridsingularity/apps/simulation/serializers.py"]} |
42,157 | raselTarikul/simulation | refs/heads/master | /src/gridsingularity/apps/simulation/views.py | from rest_framework import status
from rest_framework.response import Response
from rest_framework.views import APIView
from django.core.exceptions import ObjectDoesNotExist
from gridsingularity.utils import sim
from gridsingularity.exceptions.exceptions import NotFoundException
from .models import SimulationResult
from .serializers import SimulationResultSerializer
class CreateSimulation(APIView):
"""
Api view for Create simulation
"""
def post(self, request):
"""
Accept request object start simulation
Parameters:
request : request with the payloads
Returns:
response: Return a json object
"""
active, reactive = sim.run_simulation()
serializer = SimulationResultSerializer(data={'active': active, 'reactive': reactive})
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_200_OK)
else:
return Response(serializer.errors, status=status.HTTP_406_NOT_ACCEPTABLE)
class GetSimulation(APIView):
"""
Api view for Get simulation result
"""
def get(self, request, id):
"""
Return simulation result
Returns:
response: Return a json object
"""
# Get the last simulation
try:
simulation = SimulationResult.objects.get(id=id)
serializer = SimulationResultSerializer(simulation)
return Response(serializer.data)
except ObjectDoesNotExist:
raise NotFoundException('Simulation not found')
class GetSimulationActive(APIView):
"""
Api view for Get simulation result active only
"""
def get(self, request, id):
"""
Return result simulation result active only
Returns:
response: Return a json object
"""
# Get the last simulation
try:
simulation = SimulationResult.objects.get(id=id)
serializer = SimulationResultSerializer(simulation)
return Response({'active': serializer.data['active'] })
except ObjectDoesNotExist:
raise NotFoundException('Simulation not found')
class GetSimulationReActive(APIView):
"""
Api view for Get simulation result reactive only
"""
def get(self, request, id):
"""
Return result simulation result reactive only
Returns:
response: Return a json object
"""
# Get the last simulation
try:
simulation = SimulationResult.objects.get(id=id)
serializer = SimulationResultSerializer(simulation)
return Response({'reactive': serializer.data['reactive'] })
except ObjectDoesNotExist:
raise NotFoundException('Simulation not found')
| {"/src/gridsingularity/apps/simulation/serializers.py": ["/src/gridsingularity/apps/simulation/models.py"], "/src/gridsingularity/apps/simulation/urls.py": ["/src/gridsingularity/apps/simulation/views.py"], "/src/gridsingularity/apps/simulation/views.py": ["/src/gridsingularity/apps/simulation/models.py", "/src/gridsingularity/apps/simulation/serializers.py"]} |
42,158 | raselTarikul/simulation | refs/heads/master | /src/gridsingularity/apps/base/models.py | from django.db import models
# Create your models here.
class BaseModel(models.Model):
"""Abstract model that defines a set of fields which are common to all
models"""
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Meta:
abstract = True
| {"/src/gridsingularity/apps/simulation/serializers.py": ["/src/gridsingularity/apps/simulation/models.py"], "/src/gridsingularity/apps/simulation/urls.py": ["/src/gridsingularity/apps/simulation/views.py"], "/src/gridsingularity/apps/simulation/views.py": ["/src/gridsingularity/apps/simulation/models.py", "/src/gridsingularity/apps/simulation/serializers.py"]} |
42,170 | IvanRmz/FSDI114 | refs/heads/master | /assignment2.py |
def reverseStr(string):
length = len(string)
if length == 1:
return string
return string[length-1] + reverseStr(string[0:length-1])
def productOfItems(list):
lengthOfList = len(list)
newList = []
for i in range(lengthOfList):
newList.append(1)
for j in range(lengthOfList):
if i == j:
continue
else:
newList[i]*=list[j]
return newList
if __name__ == "__main__":
print(reverseStr("Ivan"))
print(productOfItems([1,2,3,4])) | {"/main.py": ["/MyQueue.py"]} |
42,171 | IvanRmz/FSDI114 | refs/heads/master | /assignment1.py | def areAnagrams(string_one, string_two):
if len(string_one) != len(string_two):
return False
for character in string_one:
if character not in string_two:
break
string_two = string_two.replace(character, "", 1)
return len(string_two) == 0
if __name__ == "__main__":
str1 = "cars"
str2 = "scar"
print(f'Are anagrams ( {str1} - {str2} )? = {areAnagrams(str1,str2)}') | {"/main.py": ["/MyQueue.py"]} |
42,172 | IvanRmz/FSDI114 | refs/heads/master | /MyQueue.py | class Queue:
def __init__(self):
self.items = []
def is_empty(self):
return self.items == []
def enqueue(self,item):
self.items.insert(0, item)
def dequeue(self):
return self.items.pop()
def size(self):
return len(self.items)
class Queue2Stacks:
def __init__(self):
self.stack1 = []
self.stack2 = []
def enqueue(self,element):
self.stack1.append(element)
def dequeue(self):
if not self.stack2:
while self.stack1:
self.stack2.append(self.stack1.pop())
if len(self.stack2) == 0 :
return "The Queue it's empty"
return self.stack2.pop()
| {"/main.py": ["/MyQueue.py"]} |
42,173 | IvanRmz/FSDI114 | refs/heads/master | /MyLinkedList.py | class Node:
def __init__(self, data):
self.data = data
self.next = None
class DLNode:
def __init__(self, data):
self.prev = None
super().__init__(data)
class LinkedList:
def __init__(self):
self.head = None
def add(self, data, prev_node=None, position=None):
new_node = Node(data)
if not self.head:
self.head = new_node
return True
if prev_node:
temp = prev_node.next
prev_node.next = new_node
new_node.next = temp
return True
if position:
last = self.find_index(index=position)
if last == self.head:
last.next = self.head
self.head = last
return True
return self.add(data, prev_node=last)
last = self.find_index()
last.next = new_node
return True
def find_index(self, index=None):
last = self.head
counter = 0
while last.next:
last = last.next
if counter == index:
break
counter += 1
return last
def removeNode(self, data=None, position=None, prev_node=None):
if not self.head or (not data and not position and not prev_node):
return False
last = self.head
if prev_node:
temp = prev_node.next
next_node = temp.next
prev_node = next_node
return True
if data:
if last.data == data:
last = last.next
return True
while last.next and last.next.data != data:
last = last.next
if last.next and last.next.data == data:
last.next = last.next.next
return True
else:
return False
if position:
counter = 0
while last.next and counter < position-1:
last = last.next
counter+=1
if last.next and counter == position-1:
last.next = last.next.next
return True
return False
def printLinkedList(self):
print("List")
list = ''
temp = self.head
while temp.next:
list += f'{temp.data} -> '
temp = temp.next
list += f'{temp.data}'
print(list)
class DLList:
def __init__(self):
self.head = None
self.tail = None
def add(self, data):
new_node = DLNode(data)
if not self.head:
self.head = new_node
self.tail = new_node
return True
self.tail.next = new_node
new_node.prev = self.tail
self.tail = new_node
return True
def insert_after(self, prev_node, data):
new_node = DLNode(data)
temp = prev_node.next
prev_node.next = new_node
new_node.prev = prev_node
if temp:
new_node.next = temp
temp.prev = new_node
if not temp:
self.tail = new_node
return True
def remove(self, data):
if not self.head:
return False
node = self.head
while node.next and node.data != data:
node = node.next
if node.data == data:
prev_node = node.prev
next_node = node.next
prev_node.next = next_node
if next_node:
next_node.prev = prev_node
if not next_node:
self.tail = prev_node
return True
return False
def printDLL(self):
print("DLL")
list = ''
temp = self.head
while temp.next:
list += f'{temp.data} <-> '
temp = temp.next
list += f'{temp.data}'
print(list) | {"/main.py": ["/MyQueue.py"]} |
42,174 | IvanRmz/FSDI114 | refs/heads/master | /main.py | from MyStack import Stack
from MyQueue import Queue2Stacks
def reverseStr(string):
stack = Stack()
for character in string:
stack.push(character)
newStr = ''
while stack.size() > 0:
newStr += stack.pop()
return newStr
specialCase = {
'[' : ']',
']' : '[',
'(' : ')',
')' : '(',
'{' : '}',
'}' : '{',
}
def balance_check(s):
if len(s) == 0 or len(s)%2 != 0:
return False
j = len(s)-1
for i in range(j):
if i > j:
return True
elif s[i] in specialCase:
if specialCase[s[i]] == s[j]:
j-=1
continue
else:
return False
elif s[i] == s[j]:
j-=1
continue
else:
return False
def test ():
q = Queue2Stacks()
for i in range(5):
q.enqueue(i)
for i in range(5):
print(q.dequeue())
def fact(n):
if n == 0:
return 1
return n * fact(n-1)
def fib(n):
if n < 2:
return n
return fib(n-1) + fib(n-2)
if __name__ == "__main__":
# string = "Ivan"
# print(f'Reverse ({string}) = {reverseStr("Ivan")}')
# stringToBalanceCheck = "[(dd)]"
# print(f'String to check balance ({stringToBalanceCheck}) = {balance_check(stringToBalanceCheck)}')
# test()
n = 5
print(f'Factorial of {n} = {fact(n)}')
print(f'Fibonacii of {n} = {fib(n)}') | {"/main.py": ["/MyQueue.py"]} |
42,176 | fs6/ha_cloud_music | refs/heads/master | /custom_components/ha_cloud_music/api_voice.py | import re
class ApiVoice():
def __init__(self, hass, api_music):
self.hass = hass
self.api_music = api_music
async def text_event(self,event):
hass = self.hass
_text = event.data.get('text')
#_log_info('监听语音小助手的文本:' + _text)
# 我想听xxx的歌
pattern = re.compile(r"我想听(.+)的歌")
singer = pattern.findall(_text)
if len(singer) == 1:
# 正在播放xxx的歌
singerName = singer[0]
# 开始搜索当前歌手的热门歌曲
await self.api_music.play_singer_hotsong(singerName)
# 播放电台 xxxx
if _text.find('播放电台') == 0:
_name = _text.split('播放电台')[1]
await self.api_music.play_dj_hotsong(_name)
# 播放歌单 xxxx
if _text.find('播放歌单') == 0:
_name = _text.split('播放歌单')[1]
await self.api_music.play_list_hotsong(_name)
# 播放歌曲 xxxx
if _text.find('播放歌曲') == 0:
_name = _text.split('播放歌曲')[1]
await self.api_music.play_song(_name)
# 音乐控制解析
if '下一曲' == _text:
await hass.services.async_call('media_player', 'media_next_track', {'entity_id': 'media_player.ha_cloud_music'})
elif '上一曲' == _text:
await hass.services.async_call('media_player', 'media_previous_track', {'entity_id': 'media_player.ha_cloud_music'})
elif '播放音乐' == _text:
await hass.services.async_call('media_player', 'media_play', {'entity_id': 'media_player.ha_cloud_music'})
elif '暂停音乐' == _text:
await hass.services.async_call('media_player', 'media_pause', {'entity_id': 'media_player.ha_cloud_music'}) | {"/custom_components/ha_cloud_music/media_player.py": ["/custom_components/ha_cloud_music/api_const.py", "/custom_components/ha_cloud_music/api_media.py", "/custom_components/ha_cloud_music/api_voice.py", "/custom_components/ha_cloud_music/api_tts.py"], "/custom_components/ha_cloud_music/api_media.py": ["/custom_components/ha_cloud_music/api_vlc.py"], "/custom_components/ha_cloud_music/api_tts.py": ["/custom_components/ha_cloud_music/api_const.py"]} |
42,177 | fs6/ha_cloud_music | refs/heads/master | /custom_components/ha_cloud_music/media_player.py | import json
import os
import logging
import voluptuous as vol
import requests
import time
import datetime
import random
import re
import urllib.parse
import uuid
import math
import base64
import asyncio
################### 接口定义 ###################
# 常量
from .api_const import DOMAIN, VERSION, ROOT_PATH, TrueOrFalse, write_config_file, read_config_file
# 网易云接口
from .api_music import ApiMusic
# 网关视图
from .api_view import ApiView
# 媒体接口
from .api_media import ApiMedia
# 语音接口
from .api_voice import ApiVoice
# TTS接口
from .api_tts import ApiTTS
################### 接口定义 ###################
from homeassistant.helpers.entity import Entity
from homeassistant.helpers import config_validation as cv, intent
from homeassistant.helpers.event import track_time_interval, async_call_later
from homeassistant.components.http import HomeAssistantView
import aiohttp
from aiohttp import web
from aiohttp.web import FileResponse
from typing import Optional
from homeassistant.helpers.state import AsyncTrackStates
from urllib.request import urlopen, quote
from homeassistant.core import Event
from homeassistant.components.media_player import (
MediaPlayerDevice)
from homeassistant.components.media_player.const import (
MEDIA_TYPE_MUSIC,MEDIA_TYPE_URL, SUPPORT_PAUSE, SUPPORT_PLAY, SUPPORT_NEXT_TRACK, SUPPORT_PREVIOUS_TRACK, SUPPORT_TURN_ON, SUPPORT_TURN_OFF,
SUPPORT_PLAY_MEDIA, SUPPORT_STOP, SUPPORT_VOLUME_MUTE, SUPPORT_VOLUME_SET, SUPPORT_SELECT_SOURCE, SUPPORT_CLEAR_PLAYLIST, SUPPORT_STOP,
SUPPORT_SELECT_SOUND_MODE, SUPPORT_SHUFFLE_SET, SUPPORT_SEEK, SUPPORT_VOLUME_STEP)
from homeassistant.const import (
CONF_NAME, STATE_IDLE, STATE_PAUSED, STATE_PLAYING, STATE_OFF, STATE_UNAVAILABLE, EVENT_HOMEASSISTANT_STOP)
import homeassistant.helpers.config_validation as cv
import homeassistant.util.dt as dt_util
from homeassistant.helpers import discovery, device_registry as dr
_LOGGER = logging.getLogger(__name__)
###################媒体播放器##########################
SUPPORT_VLC = SUPPORT_PAUSE | SUPPORT_VOLUME_SET | SUPPORT_VOLUME_MUTE | SUPPORT_STOP | SUPPORT_SELECT_SOUND_MODE | SUPPORT_TURN_ON | SUPPORT_TURN_OFF | \
SUPPORT_PLAY_MEDIA | SUPPORT_PLAY | SUPPORT_STOP | SUPPORT_NEXT_TRACK | SUPPORT_PREVIOUS_TRACK | SUPPORT_SELECT_SOURCE | SUPPORT_CLEAR_PLAYLIST | \
SUPPORT_SHUFFLE_SET | SUPPORT_SEEK | SUPPORT_VOLUME_STEP
# 定时器时间
TIME_BETWEEN_UPDATES = datetime.timedelta(seconds=1)
###################媒体播放器##########################
def setup_platform(hass, config, add_entities, discovery_info=None):
################### 系统配置 ###################
# 名称与图标
sidebar_title = config.get("sidebar_title", "云音乐")
sidebar_icon = config.get("sidebar_icon","mdi:music")
# 网易云音乐用户ID
uid = str(config.get("uid", ''))
# 用户名和密码
user = str(config.get("user", ''))
password = str(config.get("password", ''))
# 显示模式 全屏:fullscreen
show_mode = config.get("show_mode", "default")
# 网易云音乐接口地址
api_url = str(config.get("api_url", '')).strip('/')
# TTS相关配置
tts_before_message = config.get("tts_before_message", '')
tts_after_message = config.get("tts_after_message", '')
tts_mode = config.get("tts_mode", 4)
#### (启用/禁用)配置 ####
# 是否开启语音文字处理程序(默认启用)
is_voice = config.get('is_voice', True)
# 是否启用通知(默认启用)
is_notify = config.get('is_notify', True)
# 是否调试模式(默认启用)
is_debug = config.get('is_debug', True)
# 检测配置
if api_url == '':
_LOGGER.error("检测到未配置api_url参数!")
return
################### 系统配置 ###################
################### 定义实体类 ###################
# 播放器实例
mp = MediaPlayer(hass)
mp.api_media = ApiMedia(mp, {
# 是否通知
'is_notify': is_notify,
# 是否调试
'is_debug': is_debug,
'_LOGGER': _LOGGER
})
mp.api_tts = ApiTTS(mp,{
'tts_before_message': tts_before_message,
'tts_after_message': tts_after_message,
'tts_mode': tts_mode
})
mp.api_music = ApiMusic(mp, {
'api_url': api_url,
'uid': uid,
'user': user,
'password': password
})
# 开始登录
new_loop = asyncio.new_event_loop()
asyncio.set_event_loop(new_loop)
loop = asyncio.get_event_loop()
loop.run_until_complete(mp.api_music.login())
hass.data[DOMAIN] = mp
# 添加实体
add_entities([mp])
################### 定义实体类 ###################
################### 注册静态目录与接口网关 ###################
local = hass.config.path("custom_components/ha_cloud_music/dist")
if os.path.isdir(local):
# 注册静态目录
hass.http.register_static_path(ROOT_PATH, local, False)
# 注册网关接口
hass.http.register_view(ApiView)
# 添加状态卡片
hass.components.frontend.add_extra_js_url(hass, ROOT_PATH + '/data/more-info-ha_cloud_music.js')
# 注册菜单栏
hass.components.frontend.async_register_built_in_panel(
"iframe",
sidebar_title,
sidebar_icon,
DOMAIN,
{"url": ROOT_PATH + "/index.html?ver=" + VERSION
+ "&show_mode=" + show_mode
+ "&uid=" + mp.api_music.uid},
require_admin=True
)
################### 注册静态目录与接口网关 ###################
################### 注册服务 ###################
# 注册服务【加载歌单】
hass.services.register(DOMAIN, 'load', mp.load_songlist)
# 注册服务【点歌】
hass.services.register(DOMAIN, 'pick', mp.pick_song)
# 注册服务【配置】
hass.services.register(DOMAIN, 'config', mp.config)
# 注册服务【tts】
hass.services.register(DOMAIN, 'tts', mp.api_tts.speak)
hass.services.register(DOMAIN, 'tts_clear', mp.api_tts.clear)
# 监听语音小助手的文本
if is_voice == True:
_ApiVoice = ApiVoice(hass, mp.api_music)
hass.bus.listen('ha_voice_text_event', _ApiVoice.text_event)
################### 注册服务 ###################
# 显示插件信息
_LOGGER.info('''
-------------------------------------------------------------------
ha_cloud_music云音乐插件【作者QQ:635147515】
版本:''' + VERSION + '''
介绍:这是一个网易云音乐的HomeAssistant播放器插件
项目地址:https://github.com/shaonianzhentan/ha_cloud_music
配置信息:
API_URL:''' + api_url + '''
内置VLC播放器:''' + TrueOrFalse(mp.api_media.supported_vlc, '支持', '不支持') + '''
侧边栏名称:''' + sidebar_title + '''
侧边栏图标:''' + sidebar_icon + '''
显示模式:''' + TrueOrFalse(show_mode == 'fullscreen', '全局模式', '默认模式') + '''
用户ID:''' + mp.api_music.uid + '''
-------------------------------------------------------------------''')
return True
###################媒体播放器##########################
class MediaPlayer(MediaPlayerDevice):
"""Representation of a vlc player."""
def __init__(self, hass):
"""Initialize the vlc device."""
self._hass = hass
self.music_playlist = None
self.music_index = 0
self._name = DOMAIN
self._media_image_url = None
self._media_url = None
self._media_title = None
self._media_name = None
self._media_artist = None
self._media_album_name = None
self._volume = None
self._state = STATE_IDLE
self._source_list = None
self._source = None
self._sound_mode_list = None
self._sound_mode = None
# 播放模式(0:列表循环,1:顺序播放,2:随机播放,3:单曲循环)
self._play_mode = 0
self._media_playlist = None
self._media_position_updated_at = None
self._media_position = 0
self._media_duration = None
# 错误计数
self.error_count = 0
self.loading = False
# 定时器操作计数
self.next_count = 0
self._media = None
# 是否启用定时器
self._timer_enable = True
self._notify = True
# 定时器
track_time_interval(hass, self.interval, TIME_BETWEEN_UPDATES)
# 读取配置文件
music_playlist = read_config_file('music_playlist.json')
if music_playlist != None:
self._media_playlist = json.dumps(music_playlist)
self.music_playlist = music_playlist
def interval(self, now):
# 如果当前状态是播放,则进度累加(虽然我定时的是1秒,但不知道为啥2秒执行一次)
if self._media != None:
# 走内置播放器的逻辑
if self._sound_mode == "内置VLC播放器":
if self._timer_enable == True:
# 如果内置播放器状态为off,说明播放结束了
if (self._source_list != None and len(self._source_list) > 0
and self._media.state == STATE_OFF
and self.next_count > 0):
self.next_count = -15
self.media_end_next()
# 计数器累加
self.next_count += 1
if self.next_count > 100:
self.next_count = 100
# 获取当前进度
self._media_position = int(self._media.attributes['media_position'])
else:
self.api_media.debug('当前时间:%s,当前进度:%s,总进度:%s', self._media_position_updated_at, self._media_position, self.media_duration)
self.api_media.debug('源播放器状态 %s,云音乐状态:%s', self._media.state, self._state)
# 没有进度的,下一曲判断逻辑
if self._timer_enable == True:
# 如果进度条结束了,则执行下一曲
# 执行下一曲之后,15秒内不能再次执行操作
if (self._source_list != None
and len(self._source_list) > 0
and self.media_duration > 3
and self.next_count > 0):
# MPD的下一曲逻辑
if self.player_type == "mpd":
_isEnd = self.media_duration - self.media_position <= 3
if _isEnd == True:
self.next_count = -15
# 先停止再播放
self._hass.services.call('media_player', 'media_stop', {"entity_id": self._sound_mode}, True)
self.api_media.log('MPD播放器更新 下一曲')
self.media_end_next()
else:
# 如果当前总进度 - 当前进度 小于 11,则下一曲 (十一是下一次更新的时间)
_isEnd = self.media_duration - self.media_position <= 11
# 如果进度结束,则下一曲
if _isEnd == True:
self.next_count = -15
self.api_media.log('【播放器更新-下一曲】总时长:%s,当前进度:%s', self.media_duration, self.media_position)
self.media_end_next()
# 计数器累加
self.next_count += 1
if self.next_count > 100:
self.next_count = 100
self.update()
# 如果存在进度,则取源进度
if 'media_position' in self._media.attributes:
# 判断是否为kodi播放器
if self.player_type == "kodi":
self._hass.services.call('homeassistant', 'update_entity', {"entity_id": self._sound_mode})
if 'media_position' in self._media.attributes:
self._media_position = int(self._media.attributes['media_position']) + 5
else:
# 兼容mpd的奇葩格式,真6
_media_position = self._media.attributes['media_position']
# 如果进度是字符串,并且包含冒号
if isinstance(_media_position, str) and ':' in _media_position:
arr = _media_position.split(':')
self._media_position = int(arr[0]) + 11
else:
self._media_position = int(self._media.attributes['media_position'])
# 如果当前是播放状态,则进行进度累加。。。
elif self._state == STATE_PLAYING and self._media_position_updated_at != None:
_media_position = self._media_position
_today = (now - self._media_position_updated_at)
_seconds = _today.seconds + _today.microseconds / 1000000.0
self.api_media.debug('当前相差的秒:%s', _seconds)
self._media_position += _seconds
# 更新当前播放进度时间
self._media_position_updated_at = now
def update(self):
"""Get the latest details from the device."""
if self._sound_mode == None:
self.init_sound_mode()
return False
# 如果播放器列表有变化,则更新
self.update_sound_mode_list()
# 使用内置VLC
if self._sound_mode == "内置VLC播放器":
self.api_media.init_vlc_player()
else:
self.api_media.release_vlc_player()
# 获取源播放器
self._media = self._hass.states.get(self._sound_mode)
# 如果状态不一样,则更新源播放器
if self._state != self._media.state:
self._hass.services.call('homeassistant', 'update_entity', {"entity_id": self._sound_mode})
self._hass.services.call('homeassistant', 'update_entity', {"entity_id": 'media_player.'+DOMAIN})
self._media_duration = self.media_duration
self._state = self._media.state
return True
@property
def state_attributes(self):
"""Return the state attributes."""
attr = super().state_attributes
attr.update({'custom_ui_more_info': 'more-info-ha_cloud_music'})
return attr
# 判断当前关联的播放器类型
@property
def player_type(self):
if self._media != None:
attr = self._media.attributes
if 'supported_features' in attr:
supported_features = attr['supported_features']
if supported_features == 54847:
return "kodi"
elif ('media_position' not in attr or 'media_duration' not in attr):
# 如果没有进度or没有总进度,则判断为mpd
return "mpd"
# 判断是否内置播放器
@property
def is_vlc(self):
return self._sound_mode == "内置VLC播放器"
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def registry_name(self):
"""返回实体的friendly_name属性."""
return '网易云音乐'
@property
def app_id(self):
"""ID of the current running app."""
return self._name
@property
def app_name(self):
"""Name of the current running app."""
return '网易云音乐'
@property
def media_image_url(self):
"""当前播放的音乐封面地址."""
if self._media_image_url != None:
return self._media_image_url + "?param=500y500"
return self._media_image_url
@property
def media_image_remotely_accessible(self) -> bool:
"""图片远程访问"""
return True
@property
def source_list(self):
"""Return the name of the device."""
return self._source_list
@property
def source(self):
"""Return the name of the device."""
return self._source
@property
def sound_mode_list(self):
"""Return the name of the device."""
return self._sound_mode_list
@property
def sound_mode(self):
"""Return the name of the device."""
return self._sound_mode
@property
def media_album_name(self):
"""专辑名称."""
return self._media_album_name
@property
def media_playlist(self):
"""当前播放列表"""
return self._media_playlist
@property
def media_title(self):
"""歌曲名称."""
return self._media_title
@property
def media_artist(self):
"""歌手"""
return self._media_artist
@property
def state(self):
"""Return the state of the device."""
# 如果状态是关,则显示idle
if self._state == STATE_OFF or self._state == STATE_UNAVAILABLE:
return STATE_IDLE
return self._state
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
if self._media == None:
return None
if 'volume_level' in self._media.attributes:
return self._media.attributes['volume_level']
return 1
@property
def is_volume_muted(self):
"""Boolean if volume is currently muted."""
if self._media == None:
return None
if 'is_volume_muted' in self._media.attributes:
return self._media.attributes['is_volume_muted']
return False
@property
def shuffle(self):
"""随机播放开关."""
return self._play_mode == 2
@property
def media_season(self):
"""播放模式(没有找到属性,所以使用这个)"""
if self._play_mode == 1:
return '顺序播放'
elif self._play_mode == 2:
return '随机播放'
elif self._play_mode == 3:
return '单曲循环'
else:
return '列表循环'
@property
def supported_features(self):
"""Flag media player features that are supported."""
return SUPPORT_VLC
@property
def media_content_type(self):
"""Content type of current playing media."""
return MEDIA_TYPE_MUSIC
@property
def media_duration(self):
"""Duration of current playing media in seconds."""
if self._media == None:
return None
attr = self._media.attributes
if 'media_duration' in attr:
return int(attr['media_duration'])
# 如果当前歌曲没有总长度,也没有进度,则取当前列表里的
if ('media_duration' not in attr and 'media_position' not in attr
and self.music_playlist != None and len(self.music_playlist) > 0 and self.music_index >= 0):
music_info = self.music_playlist[self.music_index]
return int(music_info['duration'])
# 判断mpd的奇葩格式
if ':' in attr['media_position']:
return int(attr['media_position'].split(':')[1])
return 0
@property
def media_position(self):
"""Position of current playing media in seconds."""
if self._media == None:
return None
return self._media_position
@property
def media_position_updated_at(self):
"""When was the position of the current playing media valid."""
if self._media == None:
return None
if 'media_position_updated_at' in self._media.attributes:
return self._media.attributes['media_position_updated_at']
return self._media_position_updated_at
def set_shuffle(self, shuffle):
"""禁用/启用 随机模式."""
if shuffle:
self._play_mode = 2
else:
self._play_mode = 0
def media_seek(self, position):
"""将媒体设置到特定位置."""
self.api_media.log('【设置播放位置】:%s', position)
self.call('media_seek', {"position": position})
def mute_volume(self, mute):
"""静音."""
self.call('volume_mute', {"is_volume_muted": mute})
def set_volume_level(self, volume):
"""Set volume level, range 0..1."""
self.api_media.log('【设置音量】:%s', volume)
self.call('volume_set', {"volume": volume})
def media_play(self):
"""Send play command."""
self.call('media_play')
self._state = STATE_PLAYING
def media_pause(self):
"""Send pause command."""
self.call('media_pause')
self._state = STATE_PAUSED
def media_stop(self):
"""Send stop command."""
self.call('media_stop')
self._state = STATE_IDLE
def play_media(self, media_type, media_id, **kwargs):
"""Play media from a URL or file."""
self.api_media.log('【播放列表类型】:%s', media_type)
if media_type == MEDIA_TYPE_MUSIC:
self._timer_enable = False
url = media_id
elif media_type == 'music_load':
self.music_index = int(media_id)
music_info = self.music_playlist[self.music_index]
url = self.get_url(music_info)
elif media_type == MEDIA_TYPE_URL:
self.api_media.log('加载播放列表链接:%s', media_id)
res = requests.get(media_id)
play_list = res.json()
self._media_playlist = play_list
self.music_playlist = play_list
music_info = self.music_playlist[0]
url = self.get_url(music_info)
#数据源
source_list = []
for index in range(len(self.music_playlist)):
music_info = self.music_playlist[index]
source_list.append(str(index + 1) + '.' + music_info['song'] + ' - ' + music_info['singer'])
self._source_list = source_list
#初始化源播放器
self.media_stop()
self.api_media.log('绑定数据源:%s', self._source_list)
elif media_type == 'music_playlist':
self.api_media.log('初始化播放列表')
# 如果是list类型,则进行操作
if isinstance(media_id, list):
self._media_playlist = json.dumps(media_id)
self.music_playlist = media_id
else:
dict = json.loads(media_id)
self._media_playlist = dict['list']
self.music_playlist = json.loads(self._media_playlist)
self.music_index = dict['index']
# 保存音乐播放列表到本地
write_config_file('music_playlist.json', self.music_playlist)
music_info = self.music_playlist[self.music_index]
url = self.get_url(music_info)
#数据源
source_list = []
for index in range(len(self.music_playlist)):
music_info = self.music_playlist[index]
source_list.append(str(index + 1) + '.' + music_info['song'] + ' - ' + music_info['singer'])
self._source_list = source_list
#初始化源播放器
self.media_stop()
# 防止进行自动下一曲的操作
self.next_count = -15
self._timer_enable = True
else:
_LOGGER.error(
"不受支持的媒体类型 %s",media_type)
return
self.api_media.log('【当前播放音乐】【%s】:【%s】' , self._media_name, url)
# 默认为music类型,如果指定视频,则替换
play_type = "music"
try:
if 'media_type' in music_info and music_info['media_type'] == 'video':
play_type = "video"
# 如果没有url则下一曲(如果超过3个错误,则停止)
# 如果是云音乐播放列表 并且格式不是mp3不是m4a,则下一曲
elif url == None or (media_type == 'music_load' and url.find(".mp3") < 0 and url.find('.m4a') < 0):
self.api_media.notification("没有找到【" + self._media_name + "】的播放链接,自动为您跳到下一首", "load_song_url")
self.error_count = self.error_count + 1
if self.error_count < 3:
self.media_next_track()
return
else:
self.api_media.notification("正在播放【" + self._media_name + "】", "load_song_url")
except Exception as e:
print('这是一个正常的错误:', e)
# 重置错误计数
self.error_count = 0
# 重置播放进度
self._media_position = 0
self._media_position_updated_at = None
#播放音乐
self._media_url = url
self.call('play_media', {"url": url,"type": play_type})
# 音乐结束自动下一曲
def media_end_next(self):
playlist_count = len(self.music_playlist) - 1
# 如果是顺序播放,最后一曲,则暂停
if self._play_mode == 1 and self.music_index >= playlist_count:
return
# 如果是单曲循环,则索引往前移一位
if self._play_mode == 3:
self.music_index = self.music_index - 1
# 如果启用了随机模式,则每次都生成随机值
elif self._play_mode == 2:
# 这里的索引会在下一曲后加一
self.music_index = random.randint(0, playlist_count)
self.media_next_track()
def media_next_track(self):
self.music_index = self.music_index + 1
self.api_media.log('【下一曲】:%s', self.music_index)
self.next_count = -15
self.music_load()
def media_previous_track(self):
self.music_index = self.music_index - 1
self.api_media.log('【上一曲】:%s', self.music_index)
self.music_load()
def select_source(self, source):
self.api_media.log('【选择音乐】:%s', source)
#选择播放
self._state = STATE_IDLE
self.music_index = self._source_list.index(source)
self.play_media('music_load', self.music_index)
def select_sound_mode(self, sound_mode):
self._sound_mode = sound_mode
self._state = STATE_IDLE
write_config_file('sound_mode.json', {'state': self._sound_mode})
self.api_media.log('【选择源播放器】:%s', sound_mode)
def clear_playlist(self):
self.api_media.log('【重置播放器】')
self.music_playlist = None
self.music_index = 0
self._media_title = None
self._media_name = None
self._source_list = None
self._media_album_name = None
self._source = None
self._shuffle = False
self._media_image_url = None
self._media_artist = None
self._media_playlist = None
self._media_position_updated_at = None
self._media_position = 0
self._media_duration = None
self.media_stop()
# 关闭播放器
def turn_off(self):
self.clear_playlist()
# 更新播放器列表
def update_sound_mode_list(self):
entity_list = self._hass.states.entity_ids('media_player')
if len(entity_list) != len(self._sound_mode_list):
self.init_sound_mode()
# 读取当前保存的播放器
def init_sound_mode(self):
sound_mode = None
res = read_config_file('sound_mode.json')
if res is not None:
sound_mode = res['state']
# 过滤云音乐
entity_list = self._hass.states.entity_ids('media_player')
filter_list = filter(lambda x: x.count('media_player.' + DOMAIN) == 0, entity_list)
_list = list(filter_list)
if self.api_media.supported_vlc == True:
_list.insert(0, "内置VLC播放器")
self._sound_mode_list = _list
# 如果保存的是【内置VLC播放器】,则直接加载
if sound_mode == "内置VLC播放器":
self._sound_mode = "内置VLC播放器"
self.api_media.init_vlc_player()
return
if len(self._sound_mode_list) > 0:
# 判断存储的值是否为空
if sound_mode != None and self._sound_mode_list.count(sound_mode) == 1:
self._sound_mode = sound_mode
elif self.api_media.supported_vlc == True:
self._sound_mode = "内置VLC播放器"
self.api_media.init_vlc_player()
else:
self._sound_mode = self._sound_mode_list[0]
elif self.api_media.supported_vlc == True:
self._sound_mode = "内置VLC播放器"
self.api_media.init_vlc_player()
#self.api_media.log(self._sound_mode_list)
def get_url(self, music_info):
self._media_name = music_info['song'] + ' - ' + music_info['singer']
self._source = str(self.music_index + 1) + '.' + self._media_name
# 歌名
self._media_title = music_info['song']
# 歌手
self._media_artist = music_info['singer']
# 设置图片
if 'image' in music_info:
self._media_image_url = music_info['image']
# 设置专辑名称
if 'album' in music_info:
self._media_album_name = music_info['album']
# 如果有传入类型,则根据类型处理
if 'type' in music_info:
if music_info['type'] == 'url':
# 如果传入的是能直接播放的音频
return music_info['url']
elif music_info['type'] == 'djradio' or music_info['type'] == 'cloud':
# 如果传入的是网易电台
return self.api_music.get_song_url(music_info['id'])
url = self.api_music.get_redirect_url(music_info['url'])
# 如果没有url,则去咪咕搜索
if url == None:
return self.api_music.migu_search(music_info['song'], music_info['singer'])
return url
def call(self, action, info = None):
_dict = {"entity_id": self._sound_mode}
if info != None:
if 'url' in info:
_dict['media_content_id'] = info['url']
if 'type' in info:
_dict['media_content_type'] = info['type']
if 'volume' in info:
_dict['volume_level'] = info['volume']
if 'position' in info:
_dict['seek_position'] = info['position']
# 如果是MPD,则直接赋值
if self.player_type == "mpd":
self._media_position = info['position']
if 'is_volume_muted' in info:
_dict['is_volume_muted'] = info['is_volume_muted']
#调用服务
self.api_media.log('【调用服务[' + str(self._sound_mode) + ']】%s:%s', action, _dict)
if self._sound_mode == "内置VLC播放器":
if action == "play_media":
self._media.load(info['url'])
elif action == "media_pause":
self._media.pause()
elif action == "media_play":
self._media.play()
elif action == "volume_set":
self._media.volume_set(info['volume'])
elif action == "media_seek":
self._media.seek(info['position'])
elif action == "volume_mute":
self._media.mute_volume(info['is_volume_muted'])
# 执行完操作之后,强制更新当前播放器
if action != "play_media":
self._hass.services.call('homeassistant', 'update_entity', {"entity_id": 'media_player.'+DOMAIN})
else:
self._hass.services.call('media_player', action, _dict)
self._hass.services.call('homeassistant', 'update_entity', {"entity_id": self._sound_mode})
self._hass.services.call('homeassistant', 'update_entity', {"entity_id": 'media_player.'+DOMAIN})
def music_load(self):
if self.music_playlist == None:
self.api_media.log('【结束播放,没有播放列表】')
return
self._timer_enable = True
playlist_count = len(self.music_playlist)
if self.music_index >= playlist_count:
self.music_index = 0
elif self.music_index < 0:
self.music_index = playlist_count - 1
self.play_media('music_load', self.music_index)
# 设置播放模式
def set_play_mode(self, _mode):
mode_names = ['列表循环', '顺序播放', '随机播放', '单曲循环']
mode_list = [0, 1, 2, 3]
if mode_list.count(_mode) == 0:
_mode = 0
self._play_mode = _mode
self.api_media.log('【设置播放模式】:%s', mode_names[_mode])
######### 服务 ##############
def config(self, call):
_obj = call.data
self.api_media.log('【调用内置服务】 %s', _obj)
# 设置播放模式
if 'play_mode' in _obj:
self.set_play_mode(_obj['play_mode'])
# 设置TTS声音模式
if 'tts_mode' in _obj:
mode_list = [1, 2, 3, 4]
_mode = _obj['tts_mode']
if mode_list.count(_mode) == 0:
_mode = 4
self.api_tts.tts_mode = _mode
self.api_media.notification('设置TTS声音模式:' + str(_mode), 'config')
# (禁用/启用)通知
if 'is_notify' in _obj:
is_notify = bool(_obj['is_notify'])
_str = TrueOrFalse(is_notify, '启用通知', '禁用通知')
# 如果没有启用通知,则现在启用
if self.api_media.is_notify == False:
self.api_media.is_notify = True
self.api_media.notification(_str, 'config')
self.api_media.is_notify = is_notify
# (禁用/启用)日志
if 'is_debug' in _obj:
self.api_media.is_debug = bool(_obj['is_debug'])
self.api_media.notification(TrueOrFalse(self.api_media.is_debug, '启用日志', '禁用日志'), 'config')
# 加载播放列表
def load_songlist(self, call):
list_index = 0
# 如果传入了id和type,则按最新的服务逻辑来操作
if 'id' in call.data and 'type' in call.data:
_id = call.data['id']
if call.data['type'] == 'playlist':
_type = "playlist"
elif call.data['type'] == 'djradio':
_type = "djradio"
elif call.data['type'] == 'ximalaya':
_type = "ximalaya"
else:
self.api_media.notification("加载播放列表:type参数错误", "load_songlist")
return "type参数错误"
elif 'id' in call.data:
_id = call.data['id']
_type = "playlist"
elif 'rid' in call.data:
_id = call.data['rid']
_type = "djradio"
# 兼容旧的格式
if 'list_index' in call.data:
list_index = int(call.data['list_index']) - 1
# 新的参数
if 'index' in call.data:
list_index = int(call.data['index']) - 1
if self.loading == True:
self.api_media.notification("正在加载歌单,请勿重复调用服务", "load_songlist")
return
self.loading = True
try:
if _type == "playlist":
self.api_media.log("【加载歌单列表】,ID:%s", _id)
# 获取播放列表
obj = self.api_music.music_playlist(_id)
if obj != None and len(obj['list']) > 0:
_newlist = obj['list']
if list_index < 0 or list_index >= len(_newlist):
list_index = 0
self.music_index = list_index
self.play_media('music_playlist', _newlist)
self.api_media.notification("正在播放歌单【"+obj['name']+"】", "load_songlist")
else:
# 这里弹出提示
self.api_media.notification("没有找到id为【"+_id+"】的歌单信息", "load_songlist")
elif _type == "djradio":
self.api_media.log("【加载电台列表】,ID:%s", _id)
# 获取播放列表
offset = 0
if list_index >= 50:
offset = math.floor((list_index + 1) / 50)
# 取余
list_index = list_index % 50
_list = self.api_music.djradio_playlist(_id, offset, 50)
if len(_list) > 0:
self.music_index = list_index
self.play_media('music_playlist', _list)
self.api_media.notification("正在播放专辑【" + _list[0]['album'] + "】", "load_songlist")
else:
self.api_media.notification("没有找到id为【"+_id+"】的电台信息", "load_songlist")
elif _type == 'ximalaya':
self.api_media.log("【加载喜马拉雅专辑列表】,ID:%s", _id)
# 播放第几条音乐
music_index = list_index % 50
# 获取第几页
list_index = math.floor(list_index / 50) + 1
_list = self.api_music.ximalaya_playlist(_id, list_index, 50)
if len(_list) > 0:
self.music_index = music_index
self.play_media('music_playlist', _list)
self.api_media.notification("正在播放专辑【" + _list[0]['album'] + "】", "load_songlist")
else:
self.api_media.notification("没有找到id为【"+_id+"】的专辑信息", "load_songlist")
except Exception as e:
self.api_media.log(e)
self.api_media.notification("加载歌单的时候出现了异常", "load_songlist")
finally:
# 这里重置
self.loading = False
# 单曲点歌
async def pick_song(self, call):
if 'name' in call.data:
_name = call.data['name']
self.api_media.log("【单曲点歌】,歌名:%s", _name)
await self.api_music.play_song(_name)
###################媒体播放器########################## | {"/custom_components/ha_cloud_music/media_player.py": ["/custom_components/ha_cloud_music/api_const.py", "/custom_components/ha_cloud_music/api_media.py", "/custom_components/ha_cloud_music/api_voice.py", "/custom_components/ha_cloud_music/api_tts.py"], "/custom_components/ha_cloud_music/api_media.py": ["/custom_components/ha_cloud_music/api_vlc.py"], "/custom_components/ha_cloud_music/api_tts.py": ["/custom_components/ha_cloud_music/api_const.py"]} |
42,178 | fs6/ha_cloud_music | refs/heads/master | /custom_components/ha_cloud_music/api_media.py | # 内置VLC播放器
from .api_vlc import VlcPlayer
class ApiMedia():
def __init__(self, media, cfg):
self.hass = media._hass
self.media = media
self.is_notify = bool(cfg['is_notify'])
self.is_debug = bool(cfg['is_debug'])
self._LOGGER = cfg['_LOGGER']
# 判断是否支持VLC
self._supported_vlc = None
###################### 内置VLC播放器相关方法 ######################
# 判断是否支持VLC
@property
def supported_vlc(self):
"""判断是否支持vlc模块."""
if self._supported_vlc != None:
return self._supported_vlc
try:
# 执行引入vlc操作,如果报错,则不支持vlc
import vlc
instance = vlc.Instance()
instance.media_player_new()
instance.release()
self._supported_vlc = True
return True
except Exception as e:
self._supported_vlc = False
return False
# 初始化内置VLC播放器
def init_vlc_player(self):
try:
if self.media._media == None or hasattr(self.media._media, 'ha_cloud_music') == False:
self.media._media = VlcPlayer()
except Exception as e:
print("【初始化内置VLC播放器】出现错误", e)
# 释放vlc对象
def release_vlc_player(self):
if self.media._media != None and hasattr(self.media._media, 'ha_cloud_music') == True:
self.media._media.release()
###################### 内置VLC播放器相关方法 ######################
# 通知
def notification(self, message, type):
if self.is_notify == True:
self.hass.services.call('persistent_notification', 'create',
{"message": message,
"title": "云音乐",
"notification_id":
"ha-cloud-music-" + type})
###################### 调试日志 ######################
# 日志
def log(self, *args):
if self.is_debug == True:
self._LOGGER.info(*args)
# 调试日志
def debug(self, *args):
self._LOGGER.debug(*args)
###################### 调试日志 ######################
| {"/custom_components/ha_cloud_music/media_player.py": ["/custom_components/ha_cloud_music/api_const.py", "/custom_components/ha_cloud_music/api_media.py", "/custom_components/ha_cloud_music/api_voice.py", "/custom_components/ha_cloud_music/api_tts.py"], "/custom_components/ha_cloud_music/api_media.py": ["/custom_components/ha_cloud_music/api_vlc.py"], "/custom_components/ha_cloud_music/api_tts.py": ["/custom_components/ha_cloud_music/api_const.py"]} |
42,179 | fs6/ha_cloud_music | refs/heads/master | /custom_components/ha_cloud_music/api_const.py | import os, json
DOMAIN = 'ha_cloud_music'
VERSION = '2.4.1'
DOMAIN_API = '/' + DOMAIN + '-api'
ROOT_PATH = '/' + DOMAIN + '-local/' + VERSION
def TrueOrFalse(val, trueStr, falseStr):
if val:
return trueStr
return falseStr
# 获取配置路径
def get_config_path(name):
return os.path.join(os.path.dirname(__file__), 'dist/cache/' + name).replace('\\','/')
# 读取配置
def read_config_file(name):
fn = get_config_path(name)
if os.path.isfile(fn):
with open(fn,'r',encoding='utf-8') as f:
content = json.load(f)
return content
return None
# 写入配置
def write_config_file(name, obj):
with open(get_config_path(name),'w',encoding='utf-8') as f:
json.dump(obj,f,ensure_ascii=False) | {"/custom_components/ha_cloud_music/media_player.py": ["/custom_components/ha_cloud_music/api_const.py", "/custom_components/ha_cloud_music/api_media.py", "/custom_components/ha_cloud_music/api_voice.py", "/custom_components/ha_cloud_music/api_tts.py"], "/custom_components/ha_cloud_music/api_media.py": ["/custom_components/ha_cloud_music/api_vlc.py"], "/custom_components/ha_cloud_music/api_tts.py": ["/custom_components/ha_cloud_music/api_const.py"]} |
42,180 | fs6/ha_cloud_music | refs/heads/master | /custom_components/ha_cloud_music/binary_sensor.py | """
功能:判断当前日期是否节假日.
配置
binary_sensor:
- platform: ha_cloud_music
"""
import logging
import time
import requests
import json
from datetime import datetime, timedelta
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import CONF_NAME, WEEKDAYS
from homeassistant.components.binary_sensor import BinarySensorDevice
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = "节假日"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Workday sensor."""
sensor_name = config.get(CONF_NAME)
_sensor = IsHolidaySensor(sensor_name)
_LOGGER.info('''
--------------------------------------------
节假日传感器 安装成功
今天是:'''+ _sensor.today +'''
--------------------------------------------
''')
add_entities(
[_sensor],
True,
)
class IsHolidaySensor(BinarySensorDevice):
"""Implementation of a Workday sensor."""
def __init__(self, name):
"""Initialize the Workday sensor."""
self._name = name
self._state = False
self._today = None
# 忌
self._avoid = None
# 宜
self._suit = None
self._holiday_name = None
self._week = None
# 五行
self._wuxing = None
self._suici = None
self._nongli = None
self._gongli = None
self._fu = None
self._xi = None
self._cai = None
self._shengxiao = None
self._xingzuo = None
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def is_on(self):
"""Return the state of the device."""
return self._state
@property
def today(self):
localtime = time.localtime(time.time())
return "{}-{}-{}".format(localtime.tm_year,localtime.tm_mon,localtime.tm_mday)
@property
def state_attributes(self):
"""Return the attributes of the entity."""
return {
'忌': self._avoid,
'宜': self._suit,
'今日': self.today,
'星期': self._week,
'农历': self._nongli,
'公历': self._gongli,
'生肖': self._shengxiao,
'星座': self._xingzuo,
'五行': self._wuxing,
'岁次': self._suici,
'财神方位': self._cai,
'喜神方位': self._xi,
'福神方位': self._fu,
'假日名称': self._holiday_name,
}
# 获取详细信息
def get_details(self, _date):
try:
localtime = time.localtime(_date)
self._week = ['一','二','三','四','五','六','日'][localtime.tm_wday]
_a = time.strftime("%Y/%m/%Y%m%d", localtime)
# http://www.nongli.cn/rili/api/app/god/2019/01/20190101.js
res = requests.get('http://www.nongli.cn/rili/api/app/god/'+_a+'.js')
r = str(res.content, encoding='utf-8')
_r = parse('json:'+r)
_obj = _r['html']
self._wuxing = _obj['wuxing'].strip('"')
# 农历
_nongli = _obj['nongli'].strip('"').split(' ')
self._nongli = _nongli[0]
self._shengxiao = _nongli[1]
# 公历
_gongli = _obj['gongli'].strip('"').split(' ')
self._gongli = _gongli[0]
self._xingzuo = _gongli[1]
self._suici = _obj['suici'].strip('"')
self._cai = _obj['cai'].strip('"')
self._xi = _obj['xi'].strip('"')
self._fu = _obj['fu'].strip('"')
except Exception as e:
print(e)
# 日期格式化
def date_format(self, _date):
return time.mktime(time.strptime(_date,"%Y-%m-%d"))
# 查询列表是否有假日
def findHoliday(self, today, _list):
if (len(list(filter(lambda x: x['date'] == today and x['status'] == '1', _list))) > 0):
return True
# 如果有状态为2的,说明这一天是双休日,但是要上班
if (len(list(filter(lambda x: x['date'] == today and x['status'] == '2', _list))) > 0):
return False
return None
# 判断当天是否假日
def is_holiday(self, _date):
# 获取当前日期
localtime = time.localtime(_date)
ym = "{}年{}月".format(localtime.tm_year,localtime.tm_mon)
today = "{}-{}-{}".format(localtime.tm_year,localtime.tm_mon,localtime.tm_mday)
#today = '2019-10-1'
#_LOGGER.info('获取当前日期: %s',today)
# 获取百度日历
res = requests.get('https://sp0.baidu.com/8aQDcjqpAAV3otqbppnN2DJv/api.php?query=' + ym
+ '&co=&resource_id=6018&t=1573873782858&ie=utf8&oe=gbk&cb=op_aladdin_callback&format=json&tn=baidu&cb=&_=1573873715796')
r = res.json()
obj = r['data'][0]
# 获取宜忌
_almanac = list(filter(lambda x: x['date'] == today, obj['almanac']))
if len(_almanac) > 0:
self._avoid = _almanac[0]['avoid']
self._suit = _almanac[0]['suit']
# 判断是否在我国传统假日列表里
_list = list(filter(lambda x: x['startday'] == today, obj['holidaylist']))
if (len(_list) > 0):
self._holiday_name = _list[0]['name']
return True
# 判断是否在传统假日连休列表里
if 'holiday' in obj:
_holiday = obj['holiday']
if isinstance(_holiday,list):
for item in _holiday:
_result = self.findHoliday(today, item['list'])
if _result != None:
self._holiday_name = item['name']
return _result
elif isinstance(_holiday,dict):
_result = self.findHoliday(today, _holiday['list'])
if _result != None:
self._holiday_name = _holiday['name']
return _result
# 判断是否双休日
if localtime.tm_wday == 5:
self._holiday_name = '周六'
return True
if localtime.tm_wday == 6:
self._holiday_name = '周日'
return True
return False
async def async_update(self):
"""判断是否节假日,获取当天详细信息."""
# if self._today != self.today:
now = time.time()
self.get_details(now)
# 重置假日名称
self._holiday_name = None
self._state = self.is_holiday(now)
self._today = self.today
# --------------字符串转JSON-----------------------
def skip_ws(txt, pos):
while pos < len(txt) and txt[pos].isspace():
pos += 1
return pos
def parse_str(txt, pos, allow_ws=False, delimiter=[',',':','}',']']):
while pos < len(txt):
if not allow_ws and txt[pos].isspace():
break
if txt[pos] in delimiter:
break
pos += 1
return pos
def parse_obj(txt, pos):
obj = dict()
while True:
pos = skip_ws(txt, pos+1)
end = parse_str(txt, pos, True, [':'])
if end >= len(txt):
raise ValueError("unexpected end when parsing object key")
key = txt[pos:end].strip()
pos = skip_ws(txt, end+1)
if pos >= len(txt):
raise ValueError("unexpected end when parsing object value")
if txt[pos] == '[':
value, pos = parse_array(txt, pos)
elif txt[pos] == '{':
value, pos = parse_obj(txt, pos)
else:
end = parse_str(txt, pos, True, [',','}'])
if end >= len(txt):
raise ValueError("unexpected end when parsing object value")
value = txt[pos:end].strip()
pos = end
obj[key] = value
pos = skip_ws(txt, pos)
if pos >= len(txt):
raise ValueError("unexpected end when object value finish")
if txt[pos] == '}':
return obj, pos+1
def parse_array(txt, pos):
array = list()
while True:
pos = skip_ws(txt, pos+1)
if pos >= len(txt):
raise ValueError("unexpected end when parsing array item")
if txt[pos] == '[':
value, pos = parse_array(txt, pos)
elif txt[pos] == '{':
value, pos = parse_obj(txt, pos)
else:
end = parse_str(txt, pos, True, [',',']'])
if end >= len(txt):
raise ValueError("unexpected end when parsing array item")
value = txt[pos:end].strip()
pos = end
array.append(value)
pos = skip_ws(txt, pos)
if pos >= len(txt):
raise ValueError("unexpected end when array item finish")
if txt[pos] == ']':
return array, pos+1
def parse(txt):
if txt.startswith('json'):
pos = txt.find(':')
if pos != -1:
pos = skip_ws(txt, pos+1)
if txt[pos] == '{':
obj, pos = parse_obj(txt, pos)
return obj
elif txt[pos] == '[':
array, pos = parse_array(txt, pos)
return array
raise ValueError("format error when parsing root") | {"/custom_components/ha_cloud_music/media_player.py": ["/custom_components/ha_cloud_music/api_const.py", "/custom_components/ha_cloud_music/api_media.py", "/custom_components/ha_cloud_music/api_voice.py", "/custom_components/ha_cloud_music/api_tts.py"], "/custom_components/ha_cloud_music/api_media.py": ["/custom_components/ha_cloud_music/api_vlc.py"], "/custom_components/ha_cloud_music/api_tts.py": ["/custom_components/ha_cloud_music/api_const.py"]} |
42,181 | fs6/ha_cloud_music | refs/heads/master | /custom_components/ha_cloud_music/api_tts.py | import os, hashlib, asyncio, threading, time, requests
from mutagen.mp3 import MP3
from urllib.request import urlopen, quote, urlretrieve
from homeassistant.helpers import template
from homeassistant.const import (STATE_IDLE, STATE_PAUSED, STATE_PLAYING, STATE_OFF, STATE_UNAVAILABLE)
session = requests.session()
from .api_const import ROOT_PATH
from .api_config import ApiConfig
class ApiTTS():
def __init__(self, media, cfg):
self.hass = media._hass
self.media = media
self.media_position = None
self.media_url = None
self.thread = None
self.tts_before_message = cfg['tts_before_message']
self.tts_after_message = cfg['tts_after_message']
tts_mode = cfg['tts_mode']
if [1, 2, 3, 4].count(tts_mode) == 0:
tts_mode = 4
self.tts_mode = tts_mode
self.api_config = ApiConfig(os.path.join(os.path.dirname(__file__), 'dist/cache'))
def log(self, name,value):
self.media.api_media.log('【文本转语音】%s:%s',name,value)
# 异步进行TTS逻辑
def async_tts(self, text):
# 如果当前正在播放,则暂停当前播放,保存当前播放进度
if self.media._media != None and self.media._state == STATE_PLAYING:
self.media.media_pause()
self.media_position = self.media._media_position
self.media_url = self.media._media_url
# 播放当前文字内容
self.play_url(text)
# 恢复当前播放到保存的进度
if self.media_url != None:
self.log('恢复当前播放URL', self.media_url)
self.hass.services.call('media_player', 'play_media', {
'entity_id': 'media_player.ha_cloud_music',
'media_content_id': self.media_url,
'media_content_type': 'music'
})
time.sleep(2)
self.log('恢复当前进度', self.media_position)
self.hass.services.call('media_player', 'media_seek', {
'entity_id': 'media_player.ha_cloud_music',
'seek_position': self.media_position
})
# 启用自动下一曲
self.media._timer_enable = True
self.media_url = None
# 获取语音URL
def play_url(self, text):
# 生成文件名
f_name = self.api_config.md5(text + str(self.tts_mode)) + ".mp3"
# 创建目录名称
_dir = self.api_config.get_path('tts')
self.api_config.mkdir(_dir)
# 生成缓存文件名称
ob_name = _dir + '/' + f_name
self.log('本地文件路径', ob_name)
# 文件不存在,则获取下载
if os.path.isfile(ob_name) == False:
session.get('https://ai.baidu.com/tech/speech/tts')
res = session.post('https://ai.baidu.com/aidemo',{
'type': 'tns',
'spd': 5,
'pit': 5,
'vol': 5,
'per': self.tts_mode,
'tex': text
})
r = res.json()
if r['errno'] == 0:
base64_data = r['data'].replace('data:audio/x-mpeg;base64,','')
self.api_config.base64_to_file(base64_data, ob_name)
else:
# 如果没有下载,则延时1秒
time.sleep(1)
# 生成播放地址
local_url = self.hass.config.api.base_url + ROOT_PATH + '/cache/tts/' + f_name
self.log('本地URL', local_url)
self.hass.services.call('media_player', 'play_media', {
'entity_id': 'media_player.ha_cloud_music',
'media_content_id': local_url,
'media_content_type': 'music'
})
# 计算当前文件时长,设置超时播放时间
audio = MP3(ob_name)
self.log('音频时长', audio.info.length)
time.sleep(audio.info.length + 4)
async def speak(self, call):
try:
text = call.data['message']
# 解析模板
tpl = template.Template(text, self.hass)
text = self.tts_before_message + tpl.async_render(None) + self.tts_after_message
self.log('解析后的内容', text)
if self.thread != None:
self.thread.join()
self.thread = threading.Thread(target=self.async_tts, args=(text,))
self.thread.start()
except Exception as ex:
self.log('出现异常', ex)
# 清除缓存
async def clear(self, call):
try:
_path = self.api_config.get_path('tts')
self.api_config.delete(_path)
except Exception as ex:
self.log('出现异常', ex) | {"/custom_components/ha_cloud_music/media_player.py": ["/custom_components/ha_cloud_music/api_const.py", "/custom_components/ha_cloud_music/api_media.py", "/custom_components/ha_cloud_music/api_voice.py", "/custom_components/ha_cloud_music/api_tts.py"], "/custom_components/ha_cloud_music/api_media.py": ["/custom_components/ha_cloud_music/api_vlc.py"], "/custom_components/ha_cloud_music/api_tts.py": ["/custom_components/ha_cloud_music/api_const.py"]} |
42,182 | fs6/ha_cloud_music | refs/heads/master | /custom_components/ha_cloud_music/api_vlc.py | import datetime
from homeassistant.const import (STATE_IDLE, STATE_PAUSED, STATE_PLAYING, STATE_OFF, STATE_UNAVAILABLE)
###################内置VLC播放器##########################
class VlcPlayer():
def __init__(self):
import vlc
self.vlc = vlc
self._instance = vlc.Instance()
self._vlc = self._instance.media_player_new()
self.state = STATE_IDLE
self.attributes = {
"volume_level": 1,
"is_volume_muted": False,
"media_duration": 0,
"media_position_updated_at": None,
"media_position": 0,
}
self.ha_cloud_music = True
self._event_manager = self._vlc.event_manager()
self._event_manager.event_attach(vlc.EventType.MediaPlayerEndReached, self.end)
self._event_manager.event_attach(vlc.EventType.MediaPlayerPositionChanged, self.update)
def release(self):
self._vlc.release()
self._instance.release()
def end(self, event):
self.state = STATE_OFF
def update(self, event):
try:
status = self._vlc.get_state()
if status == self.vlc.State.Playing:
self.state = STATE_PLAYING
elif status == self.vlc.State.Paused:
self.state = STATE_PAUSED
else:
self.state = STATE_IDLE
media_duration = self._vlc.get_length() / 1000
self.attributes['media_duration'] = media_duration
self.attributes['media_position'] = self._vlc.get_position() * media_duration
self.attributes['media_position_updated_at'] = datetime.datetime.now()
self.attributes['volume_level'] = self._vlc.audio_get_volume() / 100
self.attributes['is_volume_muted'] = (self._vlc.audio_get_mute() == 1)
except Exception as e:
print(e)
def load(self, url):
self._vlc.set_media(self._instance.media_new(url))
self._vlc.play()
self.state = STATE_PLAYING
def play(self):
if self._vlc.is_playing() == False:
self._vlc.play()
self.state = STATE_PLAYING
def pause(self):
if self._vlc.is_playing() == True:
self._vlc.pause()
self.state = STATE_PAUSED
def volume_set(self, volume_level):
self.attributes['volume_level'] = volume_level
self._vlc.audio_set_volume(int(volume_level * 100))
# 设置位置
def seek(self, position):
self.attributes['media_position'] = position
track_length = self._vlc.get_length()/1000
self._vlc.set_position(position/track_length)
# 静音
def mute_volume(self, mute):
self.attributes['is_volume_muted'] = mute
self._vlc.audio_set_mute(mute) | {"/custom_components/ha_cloud_music/media_player.py": ["/custom_components/ha_cloud_music/api_const.py", "/custom_components/ha_cloud_music/api_media.py", "/custom_components/ha_cloud_music/api_voice.py", "/custom_components/ha_cloud_music/api_tts.py"], "/custom_components/ha_cloud_music/api_media.py": ["/custom_components/ha_cloud_music/api_vlc.py"], "/custom_components/ha_cloud_music/api_tts.py": ["/custom_components/ha_cloud_music/api_const.py"]} |
42,198 | sunitsai/2userDjango | refs/heads/master | /app/views.py | from django.shortcuts import render
from .models import *
from random import randint
from .utils import *
# Create your views here.
def RegistrationPage(request):
return render(request,"app/register.html")
def LoginPage(request):
return render(request,"app/login.html")
def ImagePage(request):
return render(request,"app/image.html")
def JsPage(request):
return render(request,"app/jsDemo.html")
def RegisterUser(request):
try:
if request.POST['role']=="doctor":
role = request.POST['role']
firstname = request.POST['firstname']
lastname = request.POST['lastname']
password = request.POST['password']
confirmpassword = request.POST['confirmpassword']
gender = request.POST['gender']
email = request.POST['email']
speciality = request.POST['speciality']
dateofbirth = request.POST['birthdate']
city = request.POST['city']
mobile = str(request.POST['phone'])
user = User.objects.filter(email=email)
if user:
# This is our first validation on server side
message = "User already Exist"
return render(request,"app/register.html",{'message':message})
else:
if password==confirmpassword:
otp = randint(10000,99999)
newuser = User.objects.create(role=role,email=email,password=password,otp=otp)
newdoctor = Doctor.objects.create(user_id=newuser,firstname=firstname,lastname=lastname,gender=gender,speciality=speciality,birthdate=dateofbirth,
city=city,mobile=mobile)
email_subject = "Account Verification"
sendmail(email_subject,"mail_template",email,{'name':firstname,'otp':otp})
return render(request,"app/login.html")
else:
message = "Password doesnot match"
return render(request,"app/register.html",{'message':message})
except Exception as e1:
print("Registration Exception -------------->",e1)
def LoginUser(request):
if request.POST['role']=="doctor":
email = request.POST['email']
password = request.POST['password']
user = User.objects.get(email=email) # Master Table Data
if user:
if user.password==password and user.role=="doctor":
doctor = Doctor.objects.get(user_id=user) # Child Table Data
request.session['firstname'] = doctor.firstname
request.session['id'] = user.id
request.session['email'] = user.email
request.session['lastname'] = doctor.lastname
return render(request,"app/home.html")
else:
message = "Password and role doesnot match"
return render(request,"app/login.html",{'message':message})
else:
message = "User doesnot exist"
return render(request,"app/login.html",{'message':message})
else:
print("Paitent Area")
def ImageUpload(request):
image = request.FILES['image']
newimg = FileUpload.objects.create(image=image)
message ="File Uploaded"
return render(request,"app/image.html",{'message':message})
def FetchFile(request):
all_img = FileUpload.objects.all()
return render(request,"app/image.html",{'all_img':all_img}) | {"/app/views.py": ["/app/models.py"]} |
42,199 | sunitsai/2userDjango | refs/heads/master | /app/urls.py |
from django.urls import path,include
from . import views
urlpatterns = [
path("",views.RegistrationPage,name="registerpage"),
path("registeruser/",views.RegisterUser,name="registeruser"),
path("loginpage/",views.LoginPage,name="loginpage"),
path("loginuser/",views.LoginUser,name="loginuser"),
path("imagepage/",views.ImagePage,name="imagepage"),
path("upload/",views.ImageUpload,name="upload"),
path("fetchdata/",views.FetchFile,name="fetch"),
path("jspage/",views.JsPage,name="jspage"),
]
| {"/app/views.py": ["/app/models.py"]} |
42,200 | sunitsai/2userDjango | refs/heads/master | /app/models.py | from django.db import models
# Create your models here.
class User(models.Model):
email = models.EmailField(max_length=50)
password = models.CharField(max_length=50)
otp = models.IntegerField()
role = models.CharField(max_length=50)
is_verified = models.BooleanField(default=False)
is_created = models.DateTimeField(auto_now=True)
is_updated = models.DateTimeField(auto_now=True)
is_active = models.BooleanField(default=True)
class Doctor(models.Model):
user_id = models.ForeignKey(User, on_delete = models.CASCADE)
firstname = models.CharField(max_length=50)
lastname = models.CharField(max_length=50)
qualification = models.CharField(max_length=100, blank= True)
speciality = models.CharField(max_length = 100)
mobile = models.CharField(max_length = 10)
clinic = models.CharField(max_length= 100,blank = True)
address = models.CharField(max_length= 500, blank= True)
city = models.CharField(max_length = 50)
state = models.CharField(max_length = 50, blank= True)
gender = models.CharField(max_length= 10)
birthdate = models.DateField()
location = models.CharField(max_length= 30, blank= True)
about_doc = models.CharField(max_length= 100, blank= True)
profile_pic=models.FileField(upload_to='app/img/',default='doc_male.png')
class Patient(models.Model):
user_id = models.ForeignKey(User, on_delete = models.CASCADE)
firstname = models.CharField(max_length=50)
lastname = models.CharField(max_length=50)
mobile = models.CharField(max_length = 10)
address = models.CharField(max_length= 500, blank = True)
city = models.CharField(max_length = 50)
state = models.CharField(max_length = 50, blank = True)
gender = models.CharField(max_length= 10)
birthdate = models.DateField()
#updated patient profile
blood_group=models.CharField(max_length=10,blank= True)
blood_presure=models.CharField(max_length=10,blank= True)
sugar=models.CharField(max_length=10,blank= True)
Haemoglobin=models.CharField(max_length=10,blank= True)
profile_pic=models.FileField(upload_to='app/img/',default='patient_icon.png')
class FileUpload(models.Model):
image = models.ImageField(upload_to="img/") | {"/app/views.py": ["/app/models.py"]} |
42,203 | vicenteneto/foursquare-api-sample | refs/heads/master | /foursquare_sample_util/constants.py | class Settings:
CONFIG_SUFFIX = 'Config'
VIEWS = 'views'
| {"/manage.py": ["/foursquare_sample/app.py"], "/foursquare_sample/app.py": ["/foursquare_sample_util/constants.py"]} |
42,204 | vicenteneto/foursquare-api-sample | refs/heads/master | /manage.py | from flask_script import Manager, Server
from foursquare_sample.app import create_app
manager = Manager(create_app)
manager.add_option('-c', '--config', default='Development', required=False, dest='config')
manager.add_command('runserver', Server(threaded=True))
if __name__ == '__main__':
manager.run()
| {"/manage.py": ["/foursquare_sample/app.py"], "/foursquare_sample/app.py": ["/foursquare_sample_util/constants.py"]} |
42,205 | vicenteneto/foursquare-api-sample | refs/heads/master | /foursquare_sample/app.py | from importlib import import_module
from flask import Flask
from foursquare_sample import settings
from foursquare_sample.settings import VIEWS
from foursquare_sample_util.constants import Settings
def create_app(config):
app = Flask(__name__)
if config is not None:
configuration = getattr(settings, config + Settings.CONFIG_SUFFIX)
app.config.from_object(configuration)
return app
def __register_blueprints(app):
views = [import_module('%s.%s' % (module_name, Settings.VIEWS)) for module_name in VIEWS]
for view in views:
app.register_blueprint(view.blueprint)
| {"/manage.py": ["/foursquare_sample/app.py"], "/foursquare_sample/app.py": ["/foursquare_sample_util/constants.py"]} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.