seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
75171241702 | from setuptools import setup, find_packages
import sys, os
here = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(here, 'README.rst')).read()
NEWS = open(os.path.join(here, 'NEWS.txt')).read()
version = '0.1'
install_requires = [
# List your project dependencies here.
# For more details, see:
# http://packages.python.org/distribute/setuptools.html#declaring-dependencies
"scipy>=1.4",
"seaborn>=0.10",
"statsmodels>=0.11",
"matplotlib>=3.2",
"numpy>=1.18",
"pandas>=1.0"
]
setup(name='starplot',
version=version,
description="Create barplots or boxplots with significant level annotations.",
long_description=README + '\n\n' + NEWS,
classifiers=[
# Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
],
keywords='python numpy scipy statsmodels matplotlib seaborn t-test f-test mann-whitney non-parametric boxplots barplots significance-stars',
author='yufongpeng',
author_email='sciphypar@gmail.com',
url='https://github.com/yufongpeng/starplot',
license='',
packages=find_packages('src'),
package_dir = {'': 'src'},include_package_data=True,
zip_safe=False,
install_requires=install_requires,
entry_points={
'console_scripts':
['starplot=starplot:main']
}
)
| yufongpeng/starplot | setup.py | setup.py | py | 1,336 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.abspath",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_nu... |
14231636802 | #!/usr/bin/env python3
import sys
import mrcfile
args = sys.argv
from IsoNet.util.filter import maxmask,stdmask
import numpy as np
#import cupy as cp
import os
def make_mask_dir(tomo_dir,mask_dir,side = 8, density_percentage=30,std_percentage=1,surface=None):
tomo_list = ["{}/{}".format(tomo_dir,f) for f in os.listdir(tomo_dir)]
try:
os.makedirs(mask_dir)
except FileExistsError:
import shutil
shutil.rmtree(mask_dir)
os.makedirs(mask_dir)
mask_list = ["{}/{}_mask.mrc".format(mask_dir,f.split('.')[0]) for f in os.listdir(tomo_dir)]
for i,tomo in enumerate(tomo_list):
print('tomo and mask',tomo, mask_list[i])
make_mask(tomo, mask_list[i],side = side,density_percentage=density_percentage,std_percentage=std_percentage,surface=surface)
def make_mask(tomo_path, mask_name, mask_boundary = None, side = 5, density_percentage=50., std_percentage=50., surface=None):
from scipy.ndimage.filters import gaussian_filter
from skimage.transform import resize
with mrcfile.open(tomo_path, permissive=True) as n:
header_input = n.header
#print(header_input)
pixel_size = n.voxel_size
tomo = n.data.astype(np.float32)
sp=np.array(tomo.shape)
sp2 = sp//2
bintomo = resize(tomo,sp2,anti_aliasing=True)
gauss = gaussian_filter(bintomo, side/2)
if density_percentage <=99.8:
mask1 = maxmask(gauss,side=side, percentile=density_percentage)
else:
mask1 = np.ones(sp2)
if std_percentage <=99.8:
mask2 = stdmask(gauss,side=side, threshold=std_percentage)
else:
mask2 = np.ones(sp2)
out_mask_bin = np.multiply(mask1,mask2)
if mask_boundary is not None:
from IsoNet.util.filter import boundary_mask
mask3 = boundary_mask(bintomo, mask_boundary)
out_mask_bin = np.multiply(out_mask_bin, mask3)
if (surface is not None) and surface < 1:
for i in range(int(surface*sp2[0])):
out_mask_bin[i] = 0
for i in range(int((1-surface)*sp2[0]),sp2[0]):
out_mask_bin[i] = 0
out_mask = np.zeros(sp)
out_mask[0:-1:2,0:-1:2,0:-1:2] = out_mask_bin
out_mask[0:-1:2,0:-1:2,1::2] = out_mask_bin
out_mask[0:-1:2,1::2,0:-1:2] = out_mask_bin
out_mask[0:-1:2,1::2,1::2] = out_mask_bin
out_mask[1::2,0:-1:2,0:-1:2] = out_mask_bin
out_mask[1::2,0:-1:2,1::2] = out_mask_bin
out_mask[1::2,1::2,0:-1:2] = out_mask_bin
out_mask[1::2,1::2,1::2] = out_mask_bin
out_mask = (out_mask>0.5).astype(np.uint8)
with mrcfile.new(mask_name,overwrite=True) as n:
n.set_data(out_mask)
n.header.extra2 = header_input.extra2
n.header.origin = header_input.origin
n.header.nversion = header_input.nversion
n.voxel_size = pixel_size
#print(n.header)
# with mrcfile.new('./test_mask1.rec',overwrite=True) as n:
# n.set_data(mask1.astype(np.float32))
# with mrcfile.new('./test_mask2.rec',overwrite=True) as n:
# n.set_data(mask2.astype(np.float32))
if __name__ == "__main__":
# the first arg is tomo name the second is mask name
make_mask(args[1],args[2])
| IsoNet-cryoET/IsoNet | bin/make_mask.py | make_mask.py | py | 3,191 | python | en | code | 49 | github-code | 36 | [
{
"api_name": "sys.argv",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.makedirs",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "shutil.rmtree",
"line_number"... |
15593721054 | import copy
import math
import numpy as np
import random as rd
from typing import List
class Layer:
def __init__(self, size: int, next_size: int):
self.size = size
self.neurons = np.zeros((size,))
self.biases = np.zeros((size,))
self.weights = np.zeros((size, next_size))
class Point:
x: int
y: int
type: int
def __init__(self, x, y, type) -> None:
self.x = x
self.y = y
self.type = type
class NeuralNetwork:
learningRate: float
layers: List[Layer]
def __init__(self, learningRate, sizes) -> None:
self.learningRate = learningRate
self.layers = copy.copy(sizes)
self.epoch = 0
for i, _ in enumerate(sizes):
next_size = 0
if (i < len(sizes) - 1):
next_size = sizes[i + 1]
self.layers[i] = Layer(sizes[i], next_size)
for j in range(sizes[i]):
self.layers[i].biases[j] = rd.random() * 2.0 - 1.0
for k in range(next_size):
self.layers[i].weights[j][k] = rd.random() * 2.0 - 1.0
def sigmoid(self, x):
return 1 / (1 + math.exp(-x))
def dsigmoid(self, y):
return y * (1 - y)
def feed_forfard(self, inputs: List[float]):
layers = self.layers
for i, v in enumerate(inputs):
layers[0].neurons[i] = v
for i in range(1, len(layers)):
l = layers[i-1]
l1 = layers[i]
for j in range(l1.size):
l1.neurons[j] = 0
for k in range(l.size):
l1.neurons[j] += l.neurons[k] * l.weights[k][j]
l1.neurons[j] += l1.biases[j]
l1.neurons[j] = self.sigmoid(l1.neurons[j])
return layers[len(layers)-1].neurons
def back_propagation(self, targets: List[float]):
layers = self.layers
errors = np.zeros((layers[len(layers)-1].size,))
for i in range(layers[len(layers)-1].size):
errors[i] = targets[i] - layers[len(layers)-1].neurons[i]
for k in range(len(layers)-2, -1, -1):
l = layers[k]
l1 = layers[k+1]
errors_next = np.zeros((l.size,))
gradients = np.zeros((l1.size,))
for i in range(l1.size):
gradients[i] = errors[i] * \
self.dsigmoid(layers[k + 1].neurons[i])
gradients[i] *= self.learningRate
deltas = np.zeros((l1.size, l.size))
for i in range(l1.size):
for j in range(l.size):
deltas[i][j] = gradients[i] * l.neurons[j]
for i in range(l.size):
errors_next[i] = 0
for j in range(l1.size):
errors_next[i] += l.weights[i][j] * errors[j]
errors = np.zeros((l.size,))
for i, v in enumerate(errors_next):
errors[i] = v
weights_new = np.zeros((len(l.weights), len(l.weights[0])))
for i in range(l1.size):
for j in range(l.size):
weights_new[j][i] = l.weights[j][i] + deltas[i][j]
l.weights = weights_new
for i in range(l1.size):
l1.biases[i] += gradients[i]
return None
| vbalabin/mp_practical | pscripts/neural.py | neural.py | py | 3,323 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.zeros",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "typing.List",
"line_number": ... |
20474535065 | import torch
import torch.nn as nn
import numpy as np
from dataset_windows import SatelliteSet, flatten_batch_data, standardize_data
from torchvision.models.segmentation.deeplabv3 import DeepLabHead
from torchvision import models
import torch.nn.functional as F
from tqdm import tqdm
import h5py
from PIL import Image
import matplotlib.pyplot as plt
# def createDeepLabv3(outputchannels=1):
# """DeepLabv3 class with custom head
# Args:
# outputchannels (int, optional): The number of output channels in your dataset masks. Defaults to 1.
# Returns:
# model: Returns the DeepLabv3 model with the ResNet101 backbone.
# """
# model = models.segmentation.deeplabv3_resnet101(pretrained=True, progress=True)
# model.conv1 = nn.Conv2d(4, 64, kernel_size=7, stride=2, padding=3, bias=False)
# model.classifier = DeepLabHead(2048, outputchannels)
# # Set the model in training mode
# model.eval()
# return model
#
#
# class MyModel(nn.Module):
# def __init__(self):
# super(MyModel, self).__init__()
#
# image_modules = list(createDeepLabv3(32).children())[:-1]
# self.layer1 = nn.Conv2d(4, 3, kernel_size=3, stride=1, padding=1)
# self.model = nn.Sequential(*image_modules)
#
# def forward(self, tensor):
# a = self.layer1(tensor)
# # a = torch.tensor(a, dtype=torch.float64)
# a = self.model(a.double())
#
# return a
class ResNet_101(nn.Module):
"""Load pretrained model resnet101"""
def __init__(self, in_channels=4, conv1_out=64):
super(ResNet_101, self).__init__()
backbone = models.resnet101(pretrained=True)
# Change the input channels from 3 to 4 and set the same weight for NIR as R
pretrained_conv1 = backbone.conv1.weight.clone()
self.conv1 = nn.Conv2d(in_channels, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.conv1.weight[:, :3] = pretrained_conv1
self.conv1.weight[:, -1] = pretrained_conv1[:, 0]
self.bn1 = backbone.bn1
self.relu = nn.ReLU(inplace=True)
self.maxpool = backbone.maxpool
self.layer1 = backbone.layer1
self.layer2 = backbone.layer2
self.layer3 = backbone.layer3
def forward(self, x):
x = self.relu(self.bn1(self.conv1(x)))
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
return x
class ASSP(nn.Module):
def __init__(self, in_channels, out_channels=256):
super(ASSP, self).__init__()
self.relu = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(in_channels, out_channels, 1, padding=0, dilation=1, bias=False)
self.bn1 = nn.BatchNorm2d(out_channels)
self.conv2 = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=1, padding=6,
dilation=6, bias=False)
self.bn2 = nn.BatchNorm2d(out_channels)
self.conv3 = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=1, padding=12,
dilation=12, bias=False)
self.bn3 = nn.BatchNorm2d(out_channels)
self.conv4 = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=1, padding=18,
dilation=18, bias=False)
self.bn4 = nn.BatchNorm2d(out_channels)
self.conv5 = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=1, padding=0,
dilation=1, bias=False)
self.bn5 = nn.BatchNorm2d(out_channels)
self.convf = nn.Conv2d(in_channels=out_channels * 5, out_channels=out_channels, kernel_size=1, stride=1,
padding=0, dilation=1, bias=False)
self.bnf = nn.BatchNorm2d(out_channels)
self.adapool = nn.AdaptiveAvgPool2d(1)
def forward(self, x):
x1 = self.conv1(x)
x1 = self.bn1(x1)
x1 = self.relu(x1)
x2 = self.conv2(x)
x2 = self.bn2(x2)
x2 = self.relu(x2)
x3 = self.conv3(x)
x3 = self.bn3(x3)
x3 = self.relu(x3)
x4 = self.conv4(x)
x4 = self.bn4(x4)
x4 = self.relu(x4)
x5 = self.adapool(x)
x5 = self.conv5(x5)
x5 = self.bn5(x5)
x5 = self.relu(x5)
x5 = F.interpolate(x5, size=tuple(x4.shape[-2:]), mode='bilinear', align_corners=True)
x = torch.cat((x1, x2, x3, x4, x5), dim=1) # channels first
x = self.convf(x)
x = self.bnf(x)
x = self.relu(x)
return x
class Deeplabv3Resnet101(nn.Module):
"""Consturct Deeplabv3_Resnet101"""
def __init__(self, nc=2, input_channel=4):
super(Deeplabv3Resnet101, self).__init__()
self.nc = nc
self.backbone = ResNet_101(input_channel)
self.assp = ASSP(in_channels=1024)
self.out1 = nn.Sequential(nn.Conv2d(in_channels=256, out_channels=256, kernel_size=1, stride=1), nn.ReLU())
self.dropout1 = nn.Dropout(0.5)
self.up4 = nn.Upsample(scale_factor=4)
self.up2 = nn.Upsample(scale_factor=2)
self.conv1x1 = nn.Sequential(nn.Conv2d(1024, 256, 1, bias=False), nn.ReLU())
self.conv3x3 = nn.Sequential(nn.Conv2d(512, self.nc, 1), nn.ReLU())
self.dec_conv = nn.Sequential(nn.Conv2d(256, 256, 3, padding=1), nn.ReLU())
def forward(self, x):
x = self.backbone(x)
out1 = self.assp(x)
out1 = self.out1(out1)
out1 = self.dropout1(out1)
out1 = self.up4(out1)
# print(out1.shape)
dec = self.conv1x1(x)
dec = self.dec_conv(dec)
dec = self.up4(dec)
concat = torch.cat((out1, dec), dim=1)
out = self.conv3x3(concat)
out = self.up4(out)
return out
if __name__ == "__main__":
# input_tensor = torch.rand(4, 4, 128, 128) # batch_size,input_channel,input_h,input_w
# print(input_tensor)
# model = Deeplabv3Resnet101(nc=32, input_channel=4)
# out = model(input_tensor)
# print(out.shape)
# Parameters for loading data
TRAIN_PATH = '../data/data_test_rgbReduced_delBlankRotations_Standardized.hdf5'
IMAGE_NUM = 2
WINDOWSIZE = 224
# Sampling data
SAMPLE = True
SAMPLESIZE = 0.5
# Batch size for train dataloader
BATCH_SIZE = 128
# Output features
OUTPUT_FEATURES = 32
# Set seed, so that results can be reproduced
np.random.seed(2021)
# Loading data
print("Loading data...")
train_dset = SatelliteSet(TRAIN_PATH, IMAGE_NUM, WINDOWSIZE)
print(f"Original dataset contains {len(train_dset)} samples")
# Sampling
if SAMPLE:
print("Sampling...")
#sampling = np.random.randint(len(train_dset), size=round(len(train_dset) * SAMPLESIZE)
length = round(len(train_dset) * SAMPLESIZE)
sampling = list(range(1*length, 2*length))
train_dset = torch.utils.data.Subset(train_dset, sampling)
print(f"TRAIN dataset contains {len(train_dset)} windows")
# Create dataloader
train_loader = torch.utils.data.DataLoader(train_dset,
batch_size=BATCH_SIZE,
num_workers=0,
shuffle=False)
# Initialize models
print("Loading model...")
model = Deeplabv3Resnet101(nc=OUTPUT_FEATURES, input_channel=4)
with h5py.File("P:\pf\pfstud\II_jingyli\data_test_features_c32_pic2.hdf5", "a") as f:
x_features = np.zeros((len(train_dset), OUTPUT_FEATURES, WINDOWSIZE, WINDOWSIZE))
y_gt = np.zeros((len(train_dset), WINDOWSIZE, WINDOWSIZE))
i = 0
for x, y in tqdm(train_loader):
print(x.shape)
print(y.shape)
x_out = model(x)
del x
print(x_out.shape)
x_out = x_out.detach().numpy()
x_features[i*BATCH_SIZE:(i+1)*BATCH_SIZE] = x_out
del x_out
y = y.detach().numpy()
y_gt[i*BATCH_SIZE:(i+1)*BATCH_SIZE] = y
del y
i += 1
_ = f.create_dataset("Features", data=x_features)
del _
_ = f.create_dataset("GT", data=y_gt)
del _
# img = Image.open('../1.jpg')
#
# import torchvision.transforms as T
#
# trf = T.Compose([T.Resize(256),
# T.CenterCrop(224),
# T.ToTensor()])
# inp = trf(img).unsqueeze(0)
# print(inp.shape)
#
# model = createDeepLabv3(24)
# out = model(inp)["out"]
# print(out.shape)
| jingyan-li/Vege_Height_Regression | feature_extraction/DeepLabv3_ResNet101.py | DeepLabv3_ResNet101.py | py | 8,660 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.nn.Module",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "torchvision.models.resnet101",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "torchvisio... |
42774121207 | from asyncio.locks import Lock
from typing import Union
import markovify
import asyncio
class InitializationError(Exception):
"""Error thrown when the model is not initialized"""
pass
class MarkovHandler:
"""
Manages the state of the internal markov model
"""
def __init__(self):
self._markov = None
self._lock = Lock()
async def add(self, text: str) -> None:
"""
Add a list of strings to the markov model's corpus
"""
async with self._lock:
if self._markov is None:
self._markov = await asyncio.get_event_loop().run_in_executor(
None, markovify.Text, text
)
else:
new_model = await asyncio.get_event_loop().run_in_executor(
None, markovify.Text, text
)
self._markov = await asyncio.get_event_loop().run_in_executor(
None, markovify.utils.combine, (new_model, self._markov)
)
async def generate(self) -> Union[str, None]:
"""
Generate sentences using the markov model
"""
if self._markov is not None:
return await asyncio.get_event_loop().run_in_executor(
None, self._markov.make_sentence
)
else:
raise InitializationError("Model not initialized")
async def info(self) -> dict:
"""
Returns some stats about the state of the markov model
"""
async def reset(self):
"""
Reset the internal markov model
"""
self._markov = markovify.NewlineText()
| anmolw/markov-generator-service | markovhandler.py | markovhandler.py | py | 1,669 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "asyncio.locks.Lock",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "asyncio.get_event_loop",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "markovify.Text",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "asyncio... |
13292259369 | from statistics import mode
from sklearn.feature_extraction import image
import torch
import torch.nn as nn
import numpy as np
import matplotlib.pyplot as plt
import os
import random
import pandas as pd
from utils.Metric import compute_meandice_multilabel
from utils.Visualize import plot_whole_imgs
from utils.Utils import concat_bg
from utils.Data import convert_label_to_brats
random_seed = 2022
torch.manual_seed(random_seed)
torch.cuda.manual_seed(random_seed)
torch.cuda.manual_seed_all(random_seed) # if use multi-GPU
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
np.random.seed(random_seed)
random.seed(random_seed)
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
def remove_input(tensor, axis=0, mode='zeros'):
"""Remove a single input modality
input tensor: B x C x H x W
ouptut tensor: B x C x H x W
"""
output_tensor = torch.zeros_like(tensor)
single_size = (tensor.size(0), *tensor.size()[2:])
for i in range(output_tensor.size(1)):
if i == axis:
if mode == 'zeros':
output_tensor[:, i] = torch.zeros(single_size)
elif mode == 'random':
output_tensor[:, i] = torch.randn(single_size)
else:
output_tensor[:, i] = tensor[:, i]
return output_tensor
if __name__ == "__main__":
from utils.Device import gpu_setting, model_dataparallel
from utils.Model import load_model_weights
from utils.Data import load_dataloader
from utils.Uncertainty import get_dropout_uncertainty
from model.unetr import UNETR
from tqdm import tqdm
import monai.transforms as tf
sigmoid = nn.Sigmoid()
# Model setting
amp = True
device, multi_gpu = gpu_setting()
unetr = UNETR(in_channels=4,
out_channels=4,
img_size=240,
feature_size=8,
dropout_rate=0.3,
hidden_size=64,
num_heads=4,
mlp_dim=128,
pos_embed='conv',
norm_name='instance',
spatial_dims=2).to(device)
model_weights = os.path.join("./result/exps/unetr-merge-4layer-withaug", "best.pth")
unetr = load_model_weights(unetr, model_weights, dp=False)
unetr = model_dataparallel(unetr, multi_gpu)
test_loader_params = dict(
batch_size=8,
shuffle=False
)
# validation and test transforms
val_transforms = tf.Compose([
tf.LoadImaged(reader="NibabelReader", keys=['image', 'label']),
# tf.AsDiscreted(keys=['label'], threshold_values=True),
tf.EnsureTyped(keys=["image", "label"]),
tf.ToTensord(keys=['image', 'label']),
tf.ToDeviced(keys=["image", "label"], device=device)
])
# Logging directories
img_save_dir = os.path.join("./asset", "input_removed")
os.makedirs(img_save_dir, exist_ok=True)
root_dir = "/cluster/projects/mcintoshgroup/BraTs2020/data_monai/"
# Test Dataloder
test_dataloader = load_dataloader(root_dir, "test", val_transforms, test_loader_params)
# Image seqs
images_seqs = ["T1", "T1Gd", "T2", "FLAIR"]
tumors_names = ["TC", "WT", "ET"]
model_dice = list()
# replace_modes = ["random", "zeros"]
replace_modes = ["zeros"]
models = [unetr]
for replace_mode in replace_modes:
for model_idx, model in enumerate(models):
dice_dict = {
"original": 0.,
"T1": 0.,
"T1Gd": 0.,
"T2": 0.,
"FLAIR": 0.
}
total_num_imgs = 0
model.eval()
with torch.no_grad():
for batch_idx, batch in tqdm(enumerate(test_dataloader)):
inputs, labels = batch["image"], convert_label_to_brats(concat_bg(batch["label"])) # use all four mods
labels_np = labels.detach().cpu().numpy()
inputs = inputs.to(device)
total_num_imgs += inputs.size(0)
# remove
outputs = torch.where(sigmoid(model(inputs)) > 0.5, 1, 0)
original_dice = compute_meandice_multilabel(outputs, labels, include_background=False) * inputs.size(0)
dice_dict["original"] += float(original_dice.data)
if float(original_dice / inputs.size(0)) > 0.75 and (labels_np[:, 1:].sum(1).sum() > 10000):
for i in range(4): # Iterate over image sequences
removed_input = remove_input(inputs, i, mode=replace_mode)
removed_outputs = torch.where(sigmoid(model(removed_input)) > 0.5, 1, 0)
input_np = inputs[:, i].detach().cpu().numpy()
out_np = outputs[:, 1:].sum(1).detach().cpu().numpy()
label_np = labels_np[:, 1:].sum(1)
removed_out_np = removed_outputs[:, 1:].sum(1).detach().cpu().numpy()
plot_whole_imgs(input_np,
os.path.join(img_save_dir, f"inputs-{images_seqs[i]}-{replace_mode}-{batch_idx}.jpg"),
num_cols=2)
img_to_vis = np.concatenate([label_np, out_np, removed_out_np], -1)
plot_whole_imgs(img_to_vis,
os.path.join(img_save_dir, f"outputs-{images_seqs[i]}_removed-{replace_mode}-{batch_idx}.jpg"),
num_cols=2)
if batch_idx == 192:
break
for key, val in dice_dict.items():
dice_dict[key] = val / total_num_imgs
model_dice.append(list(dice_dict.values()))
| SWKoreaBME/brats2020 | UNet_transformer/remove_input.py | remove_input.py | py | 6,135 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "torch.manual_seed",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "torch.cuda.manual_seed",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "torch.cuda.m... |
39445267791 | # -*- coding: utf-8 -*-
#
"""
Utility functions related to input/output.
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from io import open # pylint: disable=redefined-builtin
import os
#import logging
import importlib
import yaml
import tensorflow as tf
from tensorflow import gfile
as_text = tf.compat.as_text
__all__ = [
"write_paired_text",
"load_config_single",
"load_config"
]
#def get_tf_logger(fname,
# verbosity=tf.logging.INFO,
# to_stdio=False,
# stdio_verbosity=None):
# """Creates TF logger that allows to specify log filename and whether to
# print to stdio at the same time.
#
# Args:
# fname (str): The log filename.
# verbosity: The threshold for what messages will be logged. Default is
# `INFO`. Other options include `DEBUG`, `ERROR`, `FATAL`, and `WARN`.
# See :tf_main:`tf.logging <logging>`.
# to_stdio (bool): Whether to print messages to stdio at the same time.
# stido_verbosity (optional): The verbosity level when printing to stdio.
# If `None` (default), the level is set to be the same as
# :attr:`verbosity`. Ignored if :attr:`to_stdio` is False.
#
# Returns:
# The TF logger.
# """
def _load_config_python(fname):
config = {}
config_module = importlib.import_module(fname.rstrip('.py'))
for key in dir(config_module):
if not (key.startswith('__') and key.endswith('__')):
config[key] = getattr(config_module, key)
return config
def _load_config_yaml(fname):
with gfile.GFile(fname) as config_file:
config = yaml.load(config_file)
return config
def load_config_single(fname, config=None):
"""Loads config from a single file.
The config file can be either a Python file (with suffix '.py')
or a YAML file. If the filename is not suffixed with '.py', the file is
parsed as YAML.
Args:
fname (str): The config file name.
config (dict, optional): A config dict to which new configurations are
added. If `None`, a new config dict is created.
Returns:
A `dict` of configurations.
"""
if fname.endswith('.py'):
new_config = _load_config_python(fname)
else:
new_config = _load_config_yaml(fname)
if config is None:
config = new_config
else:
for key, value in new_config.items():
if key in config:
if isinstance(config[key], dict):
config[key].update(value)
else:
config[key] = value
else:
config[key] = value
return config
def load_config(config_path, config=None):
"""Loads configs from (possibly multiple) file(s).
Args:
config_path: Paths to configuration files. This can be a `list` of
config file names, or a path to a directory in which all files
are loaded, or a string of multiple file names separated by commas.
config (dict, optional): A config dict to which new configurations are
added. If `None`, a new config dict is created.
Returns:
A `dict` of configurations.
"""
fnames = []
if isinstance(config_path, (list, tuple)):
fnames = list(config_path)
elif gfile.IsDirectory(config_path):
for fname in gfile.ListDirectory(config_path):
fname = os.path.join(config_path, fname)
if not gfile.IsDirectory(fname):
fnames.append(fname)
else:
for fname in config_path.split(","):
fname = fname.strip()
if not fname:
continue
fnames.append(fname)
if config is None:
config = {}
for fname in fnames:
config = load_config_single(fname, config)
return config
def write_paired_text(src, tgt, fname, append=False, mode='h', sep='\t'):
"""Writes paired text to a file.
Args:
src: A list (or array) of `str` source text.
ttg: A list (or array) of `str` target text.
fname (str): The output filename.
append (bool): Whether appending to the end of the file if exists.
mode (str): The mode of writing, with the following options:
- :attr:`'h'`: The "horizontal" mode. Each source target pair is \
written in one line, intervened with :attr:`sep`, e.g.,
source_1 target_1
source_2 target_2
- :attr:`'v'`: The "vertical" mode. Each source target pair is \
written in two consecutive lines, e.g,
source_1
target_1
source_2
target_2
- :attr:`'s'`: The "separate" mode. Each source target pair is \
written in corresponding lines of two files named \
as :attr:`fname`.src and :attr:`fname`.tgt, respectively.
sep (str): The string intervening between source and target. Used
when :attr:`mode`='h'.
Returns:
The fileanme(s). If :attr:`mode`=='h' or :attr:`mode`=='v', returns
:attr:`fname`. If :attr:`mode`=='s', returns a list of filenames
`[':attr:`fname`.src', ':attr:`fname`.tgt']`.
"""
fmode = 'a' if append else 'w'
if mode == 's':
fn_src = '{}.src'.format(fname)
fn_tgt = '{}.tgt'.format(fname)
with open(fn_src, fmode, encoding='utf-8') as fs:
fs.write(as_text('\n'.join(src)))
fs.write('\n')
with open(fn_tgt, fmode, encoding='utf-8') as ft:
ft.write(as_text('\n'.join(tgt)))
ft.write('\n')
return fn_src, fn_tgt
else:
with open(fname, fmode, encoding='utf-8') as f:
for s, t in zip(src, tgt):
if mode == 'h':
text = '{}{}{}\n'.format(as_text(s), sep, as_text(t))
f.write(as_text(text))
elif mode == 'v':
text = '{}\n{}\n'.format(as_text(s), as_text(t))
f.write(as_text(text))
else:
raise ValueError('Unknown mode: {}'.format(mode))
return fname
| VegB/Text_Infilling | texar/utils/utils_io.py | utils_io.py | py | 6,380 | python | en | code | 26 | github-code | 36 | [
{
"api_name": "tensorflow.compat",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "importlib.import_module",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "tensorflow.gfile.GFile",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": ... |
23836096348 | # 1. Реализовать скрипт, в котором должна быть предусмотрена функция расчёта заработной платы сотрудника.
# Используйте в нём формулу: (выработка в часах*ставка в час) + премия. Во время выполнения расчёта для конкретных
# значений необходимо запускать скрипт с параметрами.
from sys import argv
script_name, work_hours, rate_per_hour, premium = argv
print("Название скрипта", script_name)
print("Отработано часов", work_hours)
print("Ставка в час", rate_per_hour)
print("Премия", premium)
print("ИТОГО: ", int(work_hours) * int(rate_per_hour) + int(premium))
# 2. Представлен список чисел. Необходимо вывести элементы исходного списка, значения которых больше предыдущего элемента.
# Подсказка: элементы, удовлетворяющие условию, оформить в виде списка. Для его формирования используйте генератор.
# Пример исходного списка: [300, 2, 12, 44, 1, 1, 4, 10, 7, 1, 78, 123, 55].
# Результат: [12, 44, 4, 10, 78, 123].
control_list = [1, 2, 50, 1, 300, 600, 1, 12, 6, 50]
print(f' Контрольный список: {control_list}')
end_list = [control_list[index] for index in range(1, len(control_list)) if control_list[index] > control_list[index-1]]
print("Значение больше предыдущего: ", end_list)
# попробую изменить условие на меньше чем предыдущий:
control_list = [1, 2, 50, 1, 300, 600, 1, 12, 6, 50]
#print(f' Контрольный список: {control_list}')
end_list_min = [control_list[index] for index in range(1, len(control_list)) if control_list[index] < control_list[index-1]]
print("Значение меньше предыдущего: ", end_list_min)
# Контрольный список: [1, 2, 50, 1, 300, 600, 1, 12, 6, 50]
# Значение больше предыдущего: [2, 50, 300, 600, 12, 50]
# Значение меньше предыдущего: [1, 1, 6]
# 3. Для чисел в пределах от 20 до 240 найти числа, кратные 20 или 21. Решите задание в одну строку.
# Подсказка: используйте функцию range() и генератор.
x = [el for el in range(20, 241) if el % 20 == 0 or el % 21 == 0]
print(x)
# Для чистоты эксперимента заменим другими значениями:
# x = [el for el in range(20, 501) if el % 17 == 0 or el % 57 == 0]
# print(x)
# 4. Представлен список чисел. Определите элементы списка, не имеющие повторений. Сформируйте итоговый массив чисел,
# соответствующих требованию. Элементы выведите в порядке их следования в исходном списке. Для выполнения задания
# обязательно используйте генератор.
# Пример исходного списка: [2, 2, 2, 7, 23, 1, 44, 44, 3, 2, 10, 7, 4, 11].
# Результат: [23, 1, 3, 10, 4, 11]
from random import randint
generat = [randint(55, 60) for i in range(20)]
print(f'Сгенерированный список: {generat}')
itog = [el for el in generat if generat.count(el) == 1]
print(f'Не повторяются следующие: {itog}')
#print("При генерации повторилось- ", len(generat)-len(itog))
# Сгенерированный список: [59, 57, 58, 58, 59, 59, 60, 58, 60, 55, 55, 57, 58, 55, 60, 56, 57, 57, 56, 56]
# Не повторяются следующие: []
# При генерации повторилось- 20
# 5. Реализовать формирование списка, используя функцию range() и возможности генератора. В список должны войти чётные
# числа от 100 до 1000 (включая границы). Нужно получить результат вычисления произведения всех элементов списка.
# Подсказка: использовать функцию reduce().
from functools import reduce
generat = [i for i in range(100, 1001, 2)]
print(f'Сгенерированный список: {generat}')
itog = reduce(lambda x, y: x*y, generat)
print(itog)
# 6. Реализовать два небольших скрипта:
# итератор, генерирующий целые числа, начиная с указанного;
# итератор, повторяющий элементы некоторого списка, определённого заранее. Подсказка: используйте функцию count() и
# cycle() модуля itertools. Обратите внимание, что создаваемый цикл не должен быть бесконечным. Предусмотрите условие его
# завершения.
# #### Например, в первом задании выводим целые числа, начиная с 3. При достижении числа 10 — завершаем цикл.
# # Вторым пунктом необходимо предусмотреть условие, при котором повторение элементов списка прекратится.
from itertools import count, cycle
import sys
# первая часть
start_from = 10
def integer_generator(start_from):
for element in count(start_from):
if element > start_from+5:
break
yield element
abc = []
for element in integer_generator(1):
print(element)
abc.append(element)
print(abc)
# Вторая часть
x = "135"
x_count = 0
abc = []
for i in cycle(x):
if i == x[0]:
x_count += 1
if x_count < 3:
print(i)
abc.append(i)
else:
break
print(abc)
# 7. Реализовать генератор с помощью функции с ключевым словом yield, создающим очередное значение. При вызове функции
# должен создаваться объект-генератор. Функция вызывается следующим образом: for el in fact(n). Она отвечает за получение
# факториала числа. В цикле нужно выводить только первые n чисел, начиная с 1! и до n!.
# Подсказка: факториал числа n — произведение чисел от 1 до n. Например, факториал четырёх 4! = 1 * 2 * 3 * 4 = 24.
from functools import reduce
from itertools import count
from math import factorial
def fact(n):
result = 1
for el in count(1):
if el <= n:
result *= el
#result = factorial(el)
yield result
else:
break
abc = []
for i in fact(10):
print(i)
abc.append(i)
print(abc)
| AndrewSus/Python_Study-I | lesson-04.py | lesson-04.py | py | 7,609 | python | ru | code | 0 | github-code | 36 | [
{
"api_name": "sys.argv",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "random.randint",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "functools.reduce",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "itertools.count",
"line_n... |
5223935084 | from env import *
# import os
import re
import sys
import argparse
from os.path import join
from tools import *
import logging
from core.api import set_api_logger
from core.chat import ChatBot, Turn, set_chat_logger
import gradio as gr
from prompts.dialogue import *
args: argparse.Namespace = None
bot: ChatBot = None
# Global Hyper Parameters
no_long_term_memory = False
naive_memory = False
embed_summary = False
translation_map = {}
def summarize_embed_one_turn(bot: ChatBot, dialogue_text, dialogue_text_with_index):
global embed_summary
lang2template = {
LANG_EN: en_turn_summarization_prompt,
LANG_ZH: zh_turn_summarization_prompt
}
tmp = choose_language_template(lang2template, dialogue_text)
input_text = tmp.format(input=dialogue_text)
logger.info(f'turn summarization input_text: \n\n{input_text}')
# 如果原文很短,保留原文即可
summarization = dialogue_text_with_index
if get_token_count_davinci(input_text) > 300:
logger.info(f'current turn text token count > 300, summarize !\n\n')
summarization = bot.ask(input_text)
logger.info(f'Summarization is:\n\n{summarization}\n\n')
else:
logger.info(f'Raw content is short, keep raw content as summarization:\n\n{summarization}\n\n')
if embed_summary:
embedding = bot.vectorize(summarization)
else:
embedding = bot.vectorize(dialogue_text_with_index)
return summarization, embedding
def get_concat_input(user_str, pre_sre, hist_str=None):
lang2template = {
LANG_EN: en_no_history_agent_prompt,
LANG_ZH: zh_no_history_agent_prompt
}
templates_no_hist = choose_language_template(lang2template, user_str)
lang2template = {
LANG_EN: en_history_agent_prompt,
LANG_ZH: zh_history_agent_prompt
}
templates_hist = choose_language_template(lang2template, user_str)
if hist_str:
input_text = templates_hist.format(history_turn_text=hist_str, pre_turn_text=pre_sre, input=user_str)
else:
input_text = templates_no_hist.format(pre_turn_text=pre_sre, input=user_str)
return input_text
def check_key_file(key_file):
if not os.path.exists(key_file):
print(f'[{key_file}] not found! Please put your apikey in the txt file.')
sys.exit(-1)
def get_first_prompt(user_text, model_name):
if model_name in [ENGINE_TURBO]:
return user_text
else:
lang2template = {
LANG_EN: en_start_prompt,
LANG_ZH: zh_start_prompt
}
tmp = choose_language_template(lang2template, user_text)
concat_input = tmp.format(input=user_text)
return concat_input
def check_string_format(input_str):
input_str = input_str.strip()
if 'filename' not in input_str or 'dial_id' not in input_str:
return False
filename = dial_id = False
for item in input_str.split('; '):
if 'filename' in item:
if item.split(': ')[1]:
filename = True
elif 'dial_id' in item:
if item.split(': ')[1]:
dial_id = True
return filename and dial_id
def extract_values(input_str):
if 'filename' not in input_str or 'dial_id' not in input_str:
return False
filename, dial_id = None, None
for item in input_str.split('; '):
if 'filename' in item:
filename = item.split(': ')[1]
elif 'dial_id' in item:
dial_id = item.split(': ')[1]
return filename, dial_id
def replace_code(s: str) -> str:
start_index = s.find("```")
end_index = s.rfind("```")
if start_index != -1 and end_index != -1:
end_index = min(end_index+3, len(s)-1)
s = s[:start_index] + "Ommit Code Here ..." + s[end_index:]
return s
def load_history_dialogue(filename, dial_id):
data = load_json_file(filename)
for item in data:
if dial_id == item['id']:
return item['dialogue']
raise ValueError('Invalid dial_id: {dial_id}')
def initialize_bot_and_dial(dialogues, dial_id):
history = []
turn_idx = 0
history.append(('请输入待标注的对话ID', dial_id))
total = len(dialogues) // 2
for i in range(0, len(dialogues), 2):
turn_idx += 1
if i+1 < len(dialogues):
user_text = dialogues[i]
user_text_display = user_text
if translation_map and translation_map.get(user_text, None):
zh_text = translation_map.get(user_text)
zh_text = replace_code(zh_text)
user_text_display += f"\n\n{zh_text}"
user_text_display = user_text_display.replace('__', 'prefix_')
# user_text = dialogues[i].replace('\\n', '\n')
assistant_text = dialogues[i+1]
assistant_text_display = assistant_text
if translation_map and translation_map.get(assistant_text, None):
zh_text = translation_map.get(assistant_text)
zh_text = replace_code(zh_text)
assistant_text_display += f"\n\n{zh_text}"
assistant_text_display = assistant_text_display.replace('__', 'prefix_')
# assistant_text = dialogues[i+1].replace('\\n', '\n')
cur = (replace_newline(user_text_display), replace_newline(assistant_text_display))
# cur = (user_text_display, assistant_text_display)
history.append(cur)
cur_text_without_index = '用户:{}\n\n助手:{}'.format(user_text, assistant_text)
cur_text_with_index = '[第{}轮]\n\n用户:{}\n\n助手:{}'.format(turn_idx, user_text, assistant_text)
if detect_language(user_text+assistant_text) == LANG_EN:
cur_text_without_index = 'User: {}\n\nAssistant: {}'.format(user_text, assistant_text)
cur_text_with_index = '[Turn {}]\n\nUser: {}\n\nAssistant: {}'.format(turn_idx, user_text, assistant_text)
print(f"loading progress : {turn_idx} / {total}, {cur_text_with_index[:200]} ...\n")
summary, embedding = summarize_embed_one_turn(bot, cur_text_without_index, cur_text_with_index)
cur_turn = Turn(user_input=user_text, system_response=assistant_text, user_sys_text=cur_text_with_index, summ=summary, embedding=embedding)
bot.add_turn_history(turn = cur_turn)
return history
def my_chatbot(user_input, history):
global no_long_term_memory
global naive_memory
history = history or []
user_input = user_input.strip()
my_history = list(sum(history, ()))
COMMAND_RETURN = '命令已成功执行!'
if user_input in ['清空', 'reset']:
# history.append((user_input, COMMAND_RETURN))
history = []
bot.clear_history()
logger.info(f'[User Command]: {user_input} {COMMAND_RETURN}')
return history, history
elif user_input in ['导出', 'export']:
# history.append((user_input, COMMAND_RETURN))
bot.export_history()
logger.info(f'[User Command]: {user_input} {COMMAND_RETURN}')
return history, history
elif user_input in ['回退', '回滚', 'roll back']:
history.pop()
bot.roll_back()
logger.info(f'[User Command]: {user_input} {COMMAND_RETURN}')
return history, history
elif check_string_format(user_input):
filename, dial_id = extract_values(user_input)
dialogues = load_history_dialogue(filename, dial_id)
history = initialize_bot_and_dial(dialogues, dial_id)
return history, history
len_hist = len(bot.history)
cur_turn_index = len_hist + 1
if len_hist == 0:
concat_input = get_first_prompt(user_input, args.model_name)
else:
retrieve = None
if no_long_term_memory:
pass
elif cur_turn_index > 2:
if naive_memory:
retrieve = bot.get_related_turn(user_input, k=args.similar_top_k, naive=True)
else:
retrieve = bot.get_related_turn(user_input, k=args.similar_top_k)
else:
pass
logger.info(f"no_long_term_memory: {no_long_term_memory}")
logger.info(f"retrieve: \n{retrieve}\n")
concat_input = get_concat_input(user_input, bot.get_turn_for_previous(), hist_str=retrieve)
logger.info(f'\n--------------\n[第{cur_turn_index}轮] concat_input:\n\n{concat_input}\n--------------\n')
try:
rsp: str = bot.ask(concat_input)
except Exception as e:
logger.error(f'ERROR: \n\n{e}')
rsp = 'System error, please check log file for details.'
history.append((user_input, rsp))
return history, history
system_text = rsp.strip()
logger.info(f'\n--------------\n[第{cur_turn_index}轮] system_text:\n\n{system_text}\n--------------\n')
my_history.append(user_input)
output = system_text
output_display = replace_newline(output)
history.append((user_input, output_display))
return history, history
if __name__ == '__main__':
parser = argparse.ArgumentParser()
model_choices = [ENGINE_DAVINCI_003, ENGINE_TURBO]
parser.add_argument("--apikey_file", type=str, default="./config/apikey.txt")
parser.add_argument("--model_name", type=str, default=ENGINE_DAVINCI_003, choices=model_choices)
parser.add_argument("--logfile", type=str, default="./logs/load_dialogue_log.txt")
parser.add_argument("--translation_file", type=str, default=None)
parser.add_argument("--no_long_term_memory", action='store_true', help='do not use long-term memory, default False')
parser.add_argument("--naive_memory", action='store_true', help='naive concat topk memory and concate history')
parser.add_argument("--embed_summary", action='store_true', help='use summary embedding for each turn')
# embed_summary
parser.add_argument("--similar_top_k", type=int, default=6)
args = parser.parse_args()
check_key_file(args.apikey_file)
log_path = args.logfile
makedirs(log_path)
# 配置日志记录
logger = logging.getLogger('dialogue_logger')
logger.setLevel(logging.INFO)
formatter = logging.Formatter('【%(asctime)s - %(levelname)s】 - %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
file_handler = logging.FileHandler(log_path, encoding='utf-8')
file_handler.setLevel(logging.INFO)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
set_chat_logger(logger)
set_api_logger(logger)
logger.info('\n\n\n')
logger.info('#################################')
logger.info('#################################')
logger.info('#################################')
logger.info('\n\n\n')
logger.info(f"args: \n\n{args}\n")
stamp = datetime2str()
# print(stamp)
if args.translation_file:
translation_map = load_json_file(args.translation_file)
bot = ChatBot(model_name=args.model_name)
# whether use scm for history memory
no_long_term_memory = True if args.no_long_term_memory else False
naive_memory = True if args.naive_memory else False
embed_summary = True if args.embed_summary else False
with gr.Blocks() as demo:
gr.Markdown(f"<h1><center>Long Dialogue Chatbot ({args.model_name}) for test</center></h1>")
chatbot = gr.Chatbot()
state = gr.State()
txt = gr.Textbox(show_label=False, placeholder="Ask me a question and press enter.").style(container=False)
txt.submit(my_chatbot, inputs=[txt, state], outputs=[chatbot, state])
demo.launch(share = False) | wbbeyourself/SCM4LLMs | dialogue_test.py | dialogue_test.py | py | 11,654 | python | en | code | 20 | github-code | 36 | [
{
"api_name": "argparse.Namespace",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "core.chat.ChatBot",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "core.chat.ChatBot",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "os.path.p... |
31804320199 | # /usr/bin/python3.6
# -*- coding:utf-8 -*-
import collections
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def isCompleteTree(self, root):
"""
:type root: TreeNode
:rtype: bool
"""
m = collections.defaultdict(list)
def dfs(root, val, depth):
if root:
m[depth].append(val)
dfs(root.left, val<<1, depth+1)
dfs(root.right, (val<<1)+1, depth+1)
dfs(root,0,1)
for i in range(1, max(m.keys())):
if len(m[i]) != (1<<i):
return False
buff = m[max(m.keys())]
return sum(buff) == (len(buff)-1)*len(buff)/2
def main():
s = Solution()
if __name__ == "__main__":
main()
| bobcaoge/my-code | python/leetcode/958_Check_Completeness_of_a_Binary_Tree.py | 958_Check_Completeness_of_a_Binary_Tree.py | py | 852 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "collections.defaultdict",
"line_number": 19,
"usage_type": "call"
}
] |
1794733837 | from flask import Flask, request
from flask import jsonify
app = Flask(__name__)
@app.route('/', methods=['GET','POST'])
def hello():
tipo_de_peticion = request.method
json_de_entrada = request.get_json()
print(tipo_de_peticion)
print(json_de_entrada)
json_de_respuesta = {
"text": "Hola Mundo",
"tipo_peticion": tipo_de_peticion
}
return jsonify(json_de_respuesta)
@app.route('/webhook', methods=['GET','POST'])
def webhook():
tipo_de_peticion = request.method
if tipo_de_peticion == 'GET':
if request.args.get('hub.verify_token') == 'PALABRA_SECRETA':
return request.args.get('hub.challenge')
else:
return "NECESITAS INGRESAR EL TOKEN DE VERIFICACION :D"
elif tipo_de_peticion == 'POST':
print("LLEGÓ UN MENSAJE NUEVO")
mensaje = request.get_json()
print(mensaje)
if __name__ == '__main__':
app.run()
| JoseAngelChepo/curso-chatbot | app.py | app.py | py | 932 | python | es | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "flask.request.method",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "flask.request.get_js... |
14359223498 | from datasets import load_conll2003_en
from conll_dictorizer import CoNLLDictorizer
from dictionizer import dictionize
from sklearn.feature_extraction import DictVectorizer
from keras.preprocessing.sequence import pad_sequences
from keras import models, layers
from keras.utils import to_categorical
from keras.layers import LSTM, Bidirectional, SimpleRNN, Dense
import numpy as np
def build_sequences(dic):
X, Y = [], []
for sentence in dic:
x, y = [], []
for word in sentence:
x.append(word['form'].lower())
y.append(word['ner'])
X.append(x)
Y.append(y)
return X,Y
def vocabulary(train_dict, dictionizer):
vocabulary_words = []
for dic in train_dict:
for d in dic:
vocabulary_words.append(d['form'].lower())
for d in dictionizer:
vocabulary_words.append(d)
return sorted(list(set(vocabulary_words)))
def to_index(seq, idx):
tmp_seq = []
for s in seq:
s_idx = []
for l in s:
#Get the value, if not in word_idx => 0 else value
if l in idx:
s_idx.append(idx[l])
else:
s_idx.append(0)
tmp_seq.append(s_idx)
return tmp_seq
if __name__ == "__main__":
train_sentences, dev_sentences, test_sentences, column_names = load_conll2003_en()
conll_dict = CoNLLDictorizer(column_names, col_sep=' +')
train_dict = conll_dict.transform(train_sentences)
test_dict = conll_dict.transform(test_sentences)
X, Y = build_sequences(train_dict)
dic = dictionize('glove.6B.100d.txt')
vocabulary = vocabulary(train_dict, dic)
#tmp_y = []
#for y in Y:
# for y_i in y:
# tmp_y.append(y_i)
tmp_y = sorted(list(set([ner for sentence in Y for ner in sentence])))
rev_word_idx = dict(enumerate(vocabulary, start=2))
word_idx = {v: k for k, v in rev_word_idx.items()}
rev_ner_id = dict(enumerate(tmp_y, start=2))
ner_idx = {v: k for k, v in rev_ner_id.items()}
nb_classes = len(tmp_y)
print(nb_classes)
M = len(vocabulary) + 2
#print(M)
N = 100
matrix = np.random.rand(M, N)
for word in vocabulary:
if word in dic.keys():
matrix[word_idx[word]] = dic[word]
#dict_vect = DictVectorizer(sparse=False)
dev_dict = conll_dict.transform(dev_sentences)
X_dev, Y_dev = build_sequences(dev_dict)
X_dev_i = to_index(X_dev, word_idx)
Y_dev_i = to_index(Y_dev, ner_idx)
X_dev_pad = pad_sequences(X_dev_i)
Y_dev_pad = pad_sequences(Y_dev_i)
Y_dev_cat = to_categorical(Y_dev_pad, num_classes=nb_classes + 2)
#Ta X och gör om till index-värden
X_idx = to_index(X, word_idx)
Y_idx = to_index(Y, ner_idx)
padded_x = pad_sequences(X_idx, maxlen=150)
padded_y = pad_sequences(Y_idx, maxlen=150)
y_train = to_categorical(padded_y, num_classes=nb_classes + 2)
model = models.Sequential()
model.add(layers.Embedding(
M,
N,
mask_zero=True,
input_length=None
))
model.layers[0].set_weights([matrix])
model.layers[0].trainable = False
#model.add(SimpleRNN(100, return_sequences=True))
model.add(layers.Dropout(0.25))
model.add(Bidirectional(LSTM(100, recurrent_dropout=0.25, return_sequences=True)))
model.add(layers.Dropout(0.25))
model.add(Bidirectional(LSTM(100, recurrent_dropout=0.25, return_sequences=True)))
model.add(layers.Dropout(0.25))
#model.add(Dense(512, activation='relu'))
#model.add(layers.Dropout(0.25))
model.add(Dense(nb_classes + 2, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['acc'])
model.summary()
model.fit(padded_x, y_train, epochs=10, batch_size=128, validation_data=(X_dev_pad, Y_dev_cat))
X_test, Y_test = build_sequences(test_dict)
X_test_idx = to_index(X_test, word_idx)
Y_test_idx = to_index(Y_test, ner_idx)
#print('X[0] test idx', X_test_idx[0])
#print('Y[0] test idx', Y_test_idx[0])
X_test_padded = pad_sequences(X_test_idx)
Y_test_padded = pad_sequences(Y_test_idx)
#print('X[0] test idx passed', X_test_padded[0])
#print('Y[0] test idx padded', Y_test_padded[0])
Y_test_padded_vectorized = to_categorical(Y_test_padded, num_classes=nb_classes + 2)
#print('Y[0] test idx padded vectorized', Y_test_padded_vectorized[0])
print(X_test_padded.shape)
print(Y_test_padded_vectorized.shape)
test_loss, test_acc = model.evaluate(X_test_padded, Y_test_padded_vectorized)
#print('Loss:', test_loss)
#print('Accuracy:', test_acc)
print('X_test', X_test[0])
print('X_test_padded', X_test_padded[0])
corpus_ner_predictions = model.predict(X_test_padded)
print('Y_test', Y_test[0])
print('Y_test_padded', Y_test_padded[0])
print('predictions', corpus_ner_predictions[0])
ner_pred_num = []
for sent_nbr, sent_ner_predictions in enumerate(corpus_ner_predictions):
ner_pred_num += [sent_ner_predictions[-len(X_test[sent_nbr]):]]
print(ner_pred_num[:2])
ner_pred = []
for sentence in ner_pred_num:
ner_pred_idx = list(map(np.argmax, sentence))
ner_pred_cat = list(map(rev_ner_id.get, ner_pred_idx))
ner_pred += [ner_pred_cat]
result = open("result_ltsm_no_rnn.txt", "w+")
for id_s, sentence in enumerate(X_test):
for id_w, word in enumerate(sentence):
result.write(f"{word} {Y_test[id_s][id_w]} {ner_pred[id_s][id_w]}\n")
result.close() | niklashedstrom/EDAN95 | lab4/index_builder.py | index_builder.py | py | 5,607 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "datasets.load_conll2003_en",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "conll_dictorizer.CoNLLDictorizer",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "dictionizer.dictionize",
"line_number": 56,
"usage_type": "call"
},
{
... |
4406266697 | from tweepy.streaming import StreamListener
from tweepy import OAuthHandler
from tweepy import Stream
import os
import py.twitter_credentials
import sys
class TwitterAuthenticator:
def twitter_authenticate(self):
auth = OAuthHandler(py.twitter_credentials.CONSUMER_KEY, py.twitter_credentials.CONSUMER_SECRET)
auth.set_access_token(py.twitter_credentials.ACCESS_TOKEN, py.twitter_credentials.ACCESS_TOKEN_SECRET)
return auth
class TwitterListener(StreamListener):
def __init__(self, fetched_tweets_filename, max_tweets):
self.fetched_tweets_filename = fetched_tweets_filename
self.counter = 0
self.max_tweets = max_tweets
try:
os.remove(self.fetched_tweets_filename)
except:
pass
def on_data(self, data):
self.counter += 1
if self.counter > self.max_tweets:
return False
print("(" + str(self.counter) + "/" + str(self.max_tweets) + ")")
try:
#print(data)
with open(self.fetched_tweets_filename, "a", newline='\n') as fp:
fp.write(data)
return True
except BaseException as e:
print("Error: %s\n" % str(e))
return False
def on_error(self, status):
if status == 420:
return False
print(status)
class TwitterStreamer:
def __init__(self):
self.twitterAuthenticator = TwitterAuthenticator()
def stream_tweets(self, fetched_tweets_filename, hashtag_list, max_tweets):
listener = TwitterListener(fetched_tweets_filename, max_tweets)
auth = self.twitterAuthenticator.twitter_authenticate()
myStream = Stream(auth, listener)
myStream.filter(track = hashtag_list)
if __name__ == '__main__':
tweets_filename = "tweets.json"
hashtags = ['fifa', 'messi', 'ronaldo', 'fifawc', 'world cup']
myTwitterStreamer = TwitterStreamer()
try:
myTwitterStreamer.stream_tweets(tweets_filename, hashtags, 5)
except:
print("Connection Error")
#twitterClient = TwitterClient('evilhag')
#print(twitterClient.get_user_timeline_tweets(1))
| mukul29/TwiXtract | src/py/tweepy_streamer.py | tweepy_streamer.py | py | 1,911 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "tweepy.OAuthHandler",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "py.twitter_credentials.twitter_credentials",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "py.twitter_credentials",
"line_number": 12,
"usage_type": "name"
},... |
15255951210 | from celery import Celery
import mysql.connector
import pandas as pd
from celery import shared_task
from snippets.models import SnippetHistory
from testproject.settings import DB_CONFIG
from datetime import datetime
app = Celery('tasks', broker='redis://localhost')
@app.task
def add(x, y):
return x + y
@shared_task
def ImportFromXLSX(excel_file):
xl = pd.ExcelFile(excel_file)
print(xl.sheet_names)
cnx = None
try:
cnx = mysql.connector.connect(host=DB_CONFIG["HOST"], user=DB_CONFIG["USER"],
password=DB_CONFIG["PASSWORD"], database=DB_CONFIG["DATABASE"])
except mysql.connector.Error as err:
print('mysql connect err=={}', err)
params =["code", "serial_num", "value"]
params_str = ",".join(params)
duplicates = []
for sheet_name in xl.sheet_names:
df = xl.parse(sheet_name, header=None)
for val in df.values:
cursor = cnx.cursor()
insert_sql = "INSERT INTO {}({}) SELECT '{}', '{}', '{}' WHERE NOT EXISTS(SELECT 1 FROM {} " \
"WHERE {}='{}')".format(DB_CONFIG["TABLE"], params_str, val[0], val[1], val[2],
DB_CONFIG["TABLE"], "code", val[0])
cursor.execute(insert_sql)
if cursor.rowcount == 0:
SnippetHistory.objects.create(code=val[0], serial_num=val[1], date_time=datetime.now())
duplicate = {"code": val[0], "serial_num": val[1], "value" : val[2]}
duplicates.append(duplicate)
else:
cnx.commit()
cursor.close()
return duplicates | crazy-djactor/amazon_for_test | snippets/tasks.py | tasks.py | py | 1,664 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "celery.Celery",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pandas.ExcelFile",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "mysql.connector.connector.connect",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "mysql... |
28924145111 | from typing import List
class Solution:
def maxProfit(self, prices: List[int]) -> int:
profit = 0
maximum = -1e9
minimum = 1e9
for price in prices:
#Change maximum or minimum.
if price>=maximum : maximum = price
if price<=minimum : minimum = price
#if you get local minimum, and current value is higher than it, it's new candidate!
if price > minimum:
profit = max(price- minimum, profit)
return profit
| GuSangmo/BOJ_practice | Leetcode/121.bestTimetoBuyandSellStock.py | 121.bestTimetoBuyandSellStock.py | py | 544 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "typing.List",
"line_number": 3,
"usage_type": "name"
}
] |
37656686792 | import sqlite3
def create_table():
conn = sqlite3.connect('avinogradov.db')
c = conn.cursor()
c.execute('CREATE TABLE IF NOT EXISTS avinogradov(nimi TEXT, vanus INTEGER)')
conn.commit()
conn.close()
def auto_lisamine():
name = input("Sisesta nimi: ")
surname = input("Sisesta perekonnanimi: ")
email = input("Sisesta e-posti aadress: ")
car_make = input("Sisesta auto mark: ")
car_model = input("Sisesta auto mudel: ")
car_year = int(input("Sisesta auto aasta: "))
price = int(input("Sisesta auto hind: "))
conn.execute("""
INSERT INTO cars (firs_name, last_name, email, car_make, car_model, car_year, car_price)
VALUES (?, ?, ?, ?, ?, ?, ?)
""", (first_name, last_name, email, car_make, car_model, car_year, price))
conn.commit()
conn.close()
print("Auto lisatud edukalt!")
def lisa_kasutaja():
nimi = input('Sisestage kasutaja nimi: ')
vanus = int(input('Sisestage kasutaja vanus: '))
conn = sqlite3.connect('avinogradov.db')
c = conn.cursor()
c.execute('INSERT INTO avinogradov VALUES(?,?)', (nimi, vanus))
conn.commit()
conn.close()
def auto_otsimine():
# Ühendus andmebaasiga
conn = sqlite3.connect('avinogradov.db')
c = conn.cursor()
# SQL päring
c.execute("SELECT * FROM avinogradov WHERE car_year < 2000 ORDER BY car_year ASC LIMIT 20;")
result = c.fetchall()
for results in result:
print(results)
conn.close()
def loe_avinogradov():
print("valisid 2")
conn = sqlite3.connect('avinogradov.db')
c = conn.cursor()
c.execute('SELECT * FROM avinogradov')
tulemused = c.fetchall()
for tulemus in tulemused:
print(tulemus)
conn.close()
def otsi_kasutajat():
nimi = input('Sisestage kasutaja nimi: ')
conn = sqlite3.connect('avinogradov.db')
c = conn.cursor()
c.execute('SELECT * FROM avinogradov WHERE first_name = ?', (nimi,))
tulemused = c.fetchall()
for tulemus in tulemused:
print(tulemus)
conn.close()
def kustuta_kasutaja():
nimi = input('Sisestage kasutaja nimi: ')
conn = sqlite3.connect('avinogradov.db')
c = conn.cursor()
c.execute('DELETE FROM avinogradov WHERE first_name = ?', (nimi,))
conn.commit()
conn.close()
def valikud():
print('Valige, mida soovite teha:')
print('1. Lisa kasutaja')
print('2. Loetle kõik avinogradov')
print('3. Otsi kasutajat')
print('4. Kustuta kasutaja')
print('5. adnmed in one l line')
print('6. Autod of "2000"')
print('7. Välju programmist')
def main():
create_table()
while True:
valikud()
valik = input('Sisestage valiku number: ')
if valik == '1':
lisa_kasutaja()
elif valik == '2':
loe_avinogradov()
elif valik == '3':
otsi_kasutajat()
elif valik == '4':
kustuta_kasutaja()
elif valik == '5':
auto_lisamine()
elif valik == '6':
auto_otsimine()
elif valik == '7':
break
else:
print('Vale valik. Palun proovige uuesti.')
if __name__ == '__main__':
main()
| ArtjomVinogradov/sqlite3 | sqlite3-main/sqlite3-main/sqlite-dll-win64-x64-3410200-20230508T093537Z-001/sqlite-dll-win64-x64-3410200/sqlite-tools-win32-x86-3410200/h3.py | h3.py | py | 3,324 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sqlite3.connect",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "sqlite3.connect",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "sqlite3.connect",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "sqlite3.connect",
... |
71088718504 | from django.urls import path
from . import views
urlpatterns = [
path('user/', views.UserAPI.as_view()),
path('login/', views.KakaoLogin.as_view()),
path('kakaopay/', views.kakaopay),
path('kakaopay/approval/', views.kakaopay_approval),
path('kakaopay/info/', views.kakaopay_info),
path('kakaopay/refund/', views.kakaopay_refund),
path('subscribes/', views.SubscribeInfo.as_view())
]
| epser93/Narang_Norang | backend/accounts/urls.py | urls.py | py | 413 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.urls.path",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
... |
8738194119 | import random
import numpy as np
import pandas as pd
from tqdm import tqdm
from os import mkdir
from os.path import join, exists
from pytorch_transformers import RobertaTokenizer
max_length = 100
tokenizer = RobertaTokenizer.from_pretrained('roberta-base')
SOS_ID = tokenizer.encode('<s>')[0]
EOS_ID = tokenizer.encode('</s>')[0]
def get_old_random_idx(read_path):
random_idx = []
print('Getting the old random indices...')
with open(read_path, 'r') as f:
lines = f.read().splitlines()
for line in tqdm(lines):
s, t = line.split('\t')[-1].split(',')
random_idx.append(int(t) - int(s))
return random_idx
def create_dataset(csv_file_path, write_path, cascading = True, old_random_idx_path = None):
if not cascading:
old_random_idx = get_old_random_idx(old_random_idx_path)
print(set(old_random_idx), len(old_random_idx))
print('Reading from {}...'.format(csv_file_path))
df = pd.read_csv(csv_file_path)
print('DataFrame shape:', df.shape)
if not exists(write_path):
mkdir(write_path)
f_enc = open(join(write_path, 'encoded.txt'), 'w')
f_uttr = open(join(write_path, 'uttrs.txt'), 'w')
uttr_emots = []
N_rows = df.shape[0]
current_dialog_id = -1
current_dialog = []
current_dialog_emots = []
uttr_index = 0
N_examples = 0
final_dialog_ids = set()
for i in tqdm(range(N_rows)):
dialog_id = df.iloc[i]['dialogue_id']
uttr = df.iloc[i]['text']
emot = df.iloc[i,-42:-1].values.astype(np.float32)
if dialog_id != current_dialog_id:
if len(current_dialog) >= 2:
even_idx = [x * 2 + 1 for x in range(len(current_dialog) // 2)][:2]
random_idx = random.choice(even_idx)
uttr_ids = tokenizer.encode(current_dialog[0])
inp_ids = [SOS_ID] + uttr_ids + [EOS_ID]
inp_seg_ids = [0] * (len(uttr_ids) + 2)
break_point = len(current_dialog)
for k in range(1, len(current_dialog)):
uttr_ids = tokenizer.encode(current_dialog[k])
tar_ids = [SOS_ID] + uttr_ids + [EOS_ID]
tar_seg_ids = [k % 2] * (len(uttr_ids) + 2)
if len(inp_ids) <= max_length and len(tar_ids) - 1 <= max_length:
inp_str = ','.join([str(x) for x in inp_ids])
inp_seg_str = ','.join([str(x) for x in inp_seg_ids])
tar_str = ','.join([str(x) for x in tar_ids])
tar_seg_str = ','.join([str(x) for x in tar_seg_ids])
if not cascading:
if current_dialog_id not in final_dialog_ids and k == old_random_idx[N_examples]:
f_enc.write('{}\t{}\t{}\t{}\t{},{}\n'.format(
inp_str, inp_seg_str, tar_str, tar_seg_str, uttr_index, uttr_index + k))
final_dialog_ids.add(current_dialog_id)
N_examples += 1
else:
f_enc.write('{}\t{}\t{}\t{}\t{},{}\n'.format(
inp_str, inp_seg_str, tar_str, tar_seg_str, uttr_index, uttr_index + k))
N_examples += 1
else:
break_point = k
break
inp_ids += ([EOS_ID] + uttr_ids + [EOS_ID])
inp_seg_ids += tar_seg_ids
if break_point > 1:
uttr_index += break_point
for k in range(break_point):
f_uttr.write('{} | {}\n'.format(current_dialog_id, current_dialog[k]))
uttr_emots.append(current_dialog_emots[k])
current_dialog = [uttr]
current_dialog_emots = [emot]
current_dialog_id = dialog_id
else:
current_dialog.append(uttr)
current_dialog_emots.append(emot)
print(N_examples)
if len(current_dialog) >= 2:
even_idx = [x * 2 + 1 for x in range(len(current_dialog) // 2)][:2]
random_idx = random.choice(even_idx)
uttr_ids = tokenizer.encode(current_dialog[0])
inp_ids = [SOS_ID] + uttr_ids + [EOS_ID]
inp_seg_ids = [0] * (len(uttr_ids) + 2)
break_point = len(current_dialog)
for k in range(1, len(current_dialog)):
uttr_ids = tokenizer.encode(current_dialog[k])
tar_ids = [SOS_ID] + uttr_ids + [EOS_ID]
tar_seg_ids = [k % 2] * (len(uttr_ids) + 2)
if len(inp_ids) <= max_length and len(tar_ids) - 1 <= max_length:
inp_str = ','.join([str(x) for x in inp_ids])
inp_seg_str = ','.join([str(x) for x in inp_seg_ids])
tar_str = ','.join([str(x) for x in tar_ids])
tar_seg_str = ','.join([str(x) for x in tar_seg_ids])
if not cascading:
if current_dialog_id not in final_dialog_ids and k == old_random_idx[N_examples]:
f_enc.write('{}\t{}\t{}\t{}\t{},{}\n'.format(
inp_str, inp_seg_str, tar_str, tar_seg_str, uttr_index, uttr_index + k))
final_dialog_ids.add(current_dialog_id)
N_examples += 1
else:
f_enc.write('{}\t{}\t{}\t{}\t{},{}\n'.format(
inp_str, inp_seg_str, tar_str, tar_seg_str, uttr_index, uttr_index + k))
N_examples += 1
else:
break_point = k
break
inp_ids += ([EOS_ID] + uttr_ids + [EOS_ID])
inp_seg_ids += tar_seg_ids
if break_point > 1:
uttr_index += break_point
for k in range(break_point):
f_uttr.write('{} | {}\n'.format(current_dialog_id, current_dialog[k]))
uttr_emots.append(current_dialog_emots[k])
print('Number of examples:', N_examples)
np.save(join(write_path, 'uttr_emots.npy'), np.array(uttr_emots))
f_enc.close()
f_uttr.close()
# create_dataset('os_emobert/train.csv', 'os_emobert/train')
# create_dataset('os_emobert/valid.csv', 'os_emobert/valid')
# create_dataset('os_emobert/test.csv', 'os_emobert/test')
create_dataset('os_emobert/test.csv', 'os_emobert/test_human', cascading = False, old_random_idx_path = 'os/test_human/encoded.txt')
# create_dataset('osed_emobert/train.csv', 'osed_emobert/train')
# create_dataset('osed_emobert/valid.csv', 'osed_emobert/valid')
# create_dataset('osed_emobert/test.csv', 'osed_emobert/test')
# create_dataset('osed_emobert/test.csv', 'osed_emobert/test_human', cascading = False, old_random_idx_path = 'osed/test_human/encoded.txt')
| yehchunhung/EPIMEED | datasets/encode_os_yubo.py | encode_os_yubo.py | py | 6,874 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "pytorch_transformers.RobertaTokenizer.from_pretrained",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pytorch_transformers.RobertaTokenizer",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "tqdm.tqdm",
"line_number": 19,
"usage_type": ... |
9134097621 | #!/usr/bin/python
import argparse
def find_max_profit(prices):
max_profit = float("-inf") # start with neg. infinity (account for least worse loss)
for p in range(1,len(prices)):
profit = prices[p] - min(prices[:p])
if profit > max_profit:
max_profit = profit
return max_profit
if __name__ == '__main__':
# This is just some code to accept inputs from the command line
parser = argparse.ArgumentParser(description='Find max profit from prices.')
parser.add_argument('integers', metavar='N', type=int, nargs='+', help='an integer price')
args = parser.parse_args()
print("A profit of ${profit} can be made from the stock prices {prices}.".format(profit=find_max_profit(args.integers), prices=args.integers))
| Tclack88/Lambda | CS/CS-2-Algorithms/2-algorithms/stock_prices/stock_prices.py | stock_prices.py | py | 772 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 16,
"usage_type": "call"
}
] |
71575293223 | #-*- encoding:utf-8 -*-
#written by: Jiao Zhongxiao
import os
import shutil
import xml.etree.ElementTree as ET
import io
import binascii
import sys
from threading import Thread
from ftplib import FTP
#import subprocess
os.chdir( os.path.split( sys.argv[0] )[0] )
#config----------------------------------------------------------
publishDir = "D:\\" #缓存目录
devDir = "E:\\workspace\\TowerFacebookDev\\branches\\TD_Branch_001\\release\\flash"
assets = ".json,.jpg,.png,.swf,.xml,.mp3"
#ver_config为必须
#bin-debug, bin-release目录的文件只会加上版本号后缀
#其它目录文件的会根据算法生成CDN版本后缀
#(CRC32(文件内容)& 0xFFFFFFFF)的32进制表示
publishDirs = "ver_config,bin-debug,release"
#end config------------------------------------------------------
ver_config = "ver_config"
projectDir = os.getcwd() + "/../"
newFile = 0
print( "欢迎使用Tower版本发布工具" )
print( "使用前请确保已编辑了此文件做好了相关配置" )
#检查运行环境
#print( os.environ["PATH"] )
version = ""
def checkVersion():
if len(version) > 0:
return True
return False
while True:
version = input( "请输入这次的版本号后 <Enter 回车> 继续:\n" )
if checkVersion():
break
#version = "13"
#1、更新SVN-------------------------------------------------------
print( "1、更新SVN" )
def checkToolPath( tool ):
environ = os.environ["PATH"].split( ";" )
for possiblePath in environ:
possiblePath = os.path.join( possiblePath, tool )
#print( possiblePath )
if os.path.exists( possiblePath ):
return True;
return False;
hasSVN = checkToolPath( "svn.exe" )
def updateSVN():
if hasSVN:
os.system( "svn update " + projectDir )
os.system( "svn update " + devDir )
else:
print( "没有SVN工具或没有添加到环境变量" )
input( "请手动更新SVN,完成后 <Enter 回车> 继续" )
updateSVN();
#2、编译新版本-----------------------------------------------------
print( "2、编译新版本" )
hasFlexSDK = checkToolPath( "mxmlc.exe" )
def publishRelease():
if hasFlexSDK:
os.system( "publish.bat" )
else:
print( "没有Flex工具或没有添加到环境变量" )
input( "请手动导出发行版,完成后 <Enter 回车> 继续" )
publishRelease()
#清理bin-debug目录
def clearDir( dirPath ):
print( "清理目录:" + dirPath )
for file in os.listdir( dirPath ):
filepath = os.path.join( dirPath, file )
if os.path.isdir( filepath ) and filepath.find( ".svn" ) == -1:
os.system( "rd /s /q " + filepath )
clearDir( os.path.abspath( os.path.join( projectDir, "bin-debug" ) ) )
clearDir( os.path.abspath( os.path.join( projectDir, "release/swffile/towerDefenseFight" ) ) )
#重新生成缓存目录
topDir = os.path.join( publishDir, str(version) )
os.system( "rd /s /q " + topDir )
os.mkdir( topDir )
#Copy所有文件
def copyFiles():
dirs = publishDirs.split( "," )
for dir in dirs:
print( "Now working in:" + dir )
fixDir = os.path.join( projectDir, dir )
for rootDir, assetsDirs, files in os.walk( fixDir ):
for file in files:
ext = os.path.splitext( file )[1]
#print( ext )
if ext != "" and assets.find( ext ) != -1:
if dir == "bin-debug" or dir == "bin-release":
#
shutil.copy( os.path.join( rootDir, file ), topDir )
else:
relPath = os.path.relpath( os.path.join( rootDir, file ), projectDir )
print( "原始:" + relPath )
relPath = os.path.join( topDir, relPath )
print( "新的:" + relPath )
fileDir = os.path.dirname( relPath )
print( "FileDir:" + fileDir )
if not os.path.exists( fileDir ):
os.makedirs( fileDir )
shutil.copyfile( os.path.join( rootDir, file ), relPath )
print( "Copy " + os.path.join( rootDir, file ) + " 完成" )
copyFiles()
def addVersionFix():
global newFile
files = os.listdir( topDir )
for file in files:
oldFile = os.path.join( topDir, file )
print( oldFile + "---" + str(os.path.isfile( oldFile )) )
if os.path.isfile( oldFile ):
ext = os.path.splitext( file )[1]
newFile = oldFile.replace( ext, "_" + str(version) + ext )
os.replace( oldFile, newFile )
print( "重命名:" + oldFile + "<>" + newFile )
#加版本号
curDir = os.path.join( topDir, ver_config )
files = os.listdir( curDir )
for file in files:
oldFile = os.path.join( curDir, file )
newFile = os.path.join( curDir, file )
print( oldFile + "---" + str(os.path.isfile( oldFile )) )
if os.path.isfile( oldFile ):
ext = os.path.splitext( file )[1]
newFile = oldFile.replace( ext, "_" + str(version) + ext )
os.replace( oldFile, newFile )
print( "重命名:" + oldFile + "<>" + newFile )
addVersionFix()
#打版本
arr32 = [ "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", ]
def getcrc32( crc ):
crc32 = []
while crc >= 0:
res = crc % 32
crc32.append( arr32[res] )
if crc <= 0 or crc == res:
break
else:
crc = int((crc - res) / 32)
crc32.reverse()
return "".join( crc32 )
def packVersion():
global newFile
configfile = newFile
config = ET.parse( newFile )
root = config.getroot()
for folderElement in root:
folder = folderElement.get( "folder" )
for item in folderElement:
file = os.path.join( topDir, folder + item.text )
# if not os.path.exists( file ):
# if item.get( "id" ) == "magicUpgradePanel":
# iitem = root.find( "./swffile/item[@id='magicResearchPanel']" )
# item.set( "size", iitem.get( "size" ) )
# item.text = iitem.text
# continue
if os.path.exists( file ):
ext = os.path.splitext( file )[1]
print( "Processing:" + file )
fio = io.FileIO( file, "r" )
fdata = fio.readall()
fio.close()
crc = binascii.crc32( fdata ) & 0xFFFFFFFF
crc = getcrc32( crc )
item.set( "size", str(os.path.getsize( file )) )
item.text = item.text.replace( ext, "_" + crc + ext )
newFile = file.replace( ext, "_" + crc + ext )
os.replace( file, newFile )
config.write( configfile )
packVersion()
#复制到DEV目录
def copyToDev():
for rootDir, assetsDirs, files in os.walk( topDir ):
for file in files:
print( "Copying:" + file )
if os.path.isfile( os.path.join( rootDir, file ) ):
relPath = os.path.relpath( os.path.join( rootDir, file ), topDir )
print( "原始:" + relPath )
relPath = os.path.join( devDir, relPath )
print( "新的:" + relPath )
fileDir = os.path.dirname( relPath )
print( "FileDir:" + fileDir )
if not os.path.exists( fileDir ):
os.makedirs( fileDir )
shutil.copyfile( os.path.join( rootDir, file ), relPath )
print( "Copy " + os.path.join( rootDir, file ) + " 完成" )
copyToDev()
#提交SVN
def commitSVN():
print( "提交SVN----------------------------------------------" )
if hasSVN:
os.chdir( devDir )
os.system( "svn add * --force" )
msg = input( "请输入SVN日志:\n" )
if ( len(msg) <= 1 ):
msg = "版本更新,提交测试!"
os.system( "svn commit -m " + msg )
else:
input( "请手动提交SVN后 <Enter 回车> 继续" )
commitSVN()
#上传FTP
print( "上传FTP----------------------------------------------" )
ftp = FTP( "172.17.0.78", "www", "uiEd53do64" )
#ftp = FTP( "127.0.0.1", "jiaox99" )
ftp.cwd( "dev-fb-td.shinezoneapp.com/web/dev_branch/flash" )
def sendFile( allFiles ):
while len(allFiles) > 0:
uploadfile = allFiles.pop( 0 )
print( "Uploading:" + uploadfile )
relPath = os.path.relpath( uploadfile, topDir ).replace( "\\", "/" )
fileDir = os.path.dirname( relPath )
if len(fileDir) > 0:
try:
ftp.mkd( fileDir )
except:
print( "Dir is ok!" )
curUploadingFile = open( uploadfile, 'rb' )
ftp.storbinary( "STOR " + relPath, curUploadingFile )
def collectFiles():
allFiles = []
for rootDir, assetsDirs, files in os.walk( topDir ):
for file in files:
filePath = os.path.join( topDir, os.path.join( rootDir, file ) )
allFiles.append( filePath )
return allFiles
sendFile( collectFiles() )
input( "Well done!版本发布完成! <Enter 回车> 退出" ) | edwardandy/kylinProject | KylinGame/py/towerPublishTool.py | towerPublishTool.py | py | 8,123 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.chdir",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.path.split",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 13,... |
29101112137 | import numpy
from scipy import stats
speed = [99, 86, 87, 88, 111, 86, 103, 87, 94, 78, 77, 85, 86]
ages = [5, 31, 43, 48, 50, 41, 7, 11, 15, 39, 80, 82, 32, 2, 8, 6, 25, 36, 27, 61, 31]
mean = numpy.mean(speed)
median = numpy.median(speed)
mode = stats.mode(speed)
std = numpy.std(speed)
var = numpy.var(speed)
percentile = numpy.percentile(ages, 75)
print(mean)
print(median)
print(mode)
print(std)
print(var)
print(percentile)
| maxyvisser/Python-projects | ML/ML intro.py | ML intro.py | py | 452 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.mean",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "numpy.median",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "scipy.stats.mode",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "scipy.stats",
"line_number"... |
4827725138 | #!/usr/bin/env python3
from pynput import keyboard
from pynput.keyboard import Key, Controller
import pyperclip
import sys
f=''
usekey=''
autoenter=False
def on_press(key):
global usekey
if key == Key.esc:
f.close()
return False # stop listener
if usekey=='':
usekey=key
print('Will use: '+str(usekey))
elif key==usekey:
print('UseKey pressed: ' + str(key))
line = f.readline()
if not line:
print('Reached EOF')
f.close()
return False
else:
pyperclip.copy(line.strip())
print(pyperclip.paste())
try:
keyboardController = Controller()
keyboardController.press(Key.ctrl.value)
keyboardController.press('v')
keyboardController.release('v')
keyboardController.release(Key.ctrl.value)
if autoenter:
keyboardController.press(Key.enter.value)
keyboardController.release(Key.enter.value)
except:
print('failed to press crtl-v')
def main(filename, enter: ('makes an enter after insert', 'flag', 'e')):
"Insert line by line of a file into a GUI by just pressing a key"
global f
global autoenter
autoenter=enter
f=open(filename,"r")
print('Define the Key to use by pressing it:')
listener = keyboard.Listener(on_press=on_press)
listener.start() # start to listen on a separate thread
listener.join() # remove if main thread is polling self.keys
if __name__ == '__main__':
import plac; plac.call(main)
| boba2fett/ShitCollection | python/insFiles/insFilesCLI.py | insFilesCLI.py | py | 1,670 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pynput.keyboard.Key.esc",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "pynput.keyboard.Key",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "pyperclip.copy",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "pyper... |
347715655 | from scipy.sparse.linalg import eigs, eigsh
import utils.utils as utils
import sys
import numpy as np
import time
import math
from sklearn import mixture
sys.path.append("..")
from utils.laplacian import calLaplacianMatrix
from sklearn.cluster import KMeans
def run_ES_SCOREplus(W, k, c=0.1):
start = time.time()
r = k + 1
n = len(W)
Degree = np.sum(W, axis=1)
# D = np.diag(Degree)
delta = c * max(Degree)
# I = np.identity(len(Degree))
d = 1. / np.sqrt(np.add(delta, Degree))
# d = 1. / np.add(delta, Degree)
# D^(-1/2) L D^(-1/2)
sqrtMatrix = np.diag(d)
L = np.dot(np.dot(sqrtMatrix, W), sqrtMatrix)
eig_val, eig_vect = eigsh(L, r, which='LM')
tao = 0.2
ratio = eig_val[k] / eig_val[k - 1]
F = np.dot(eig_vect[:, :r], np.diag(eig_val[: r]))
if ratio < 1 + tao:
F = F[:, :]
for i in range(r - 1):
F[:, i] = np.multiply(eig_vect[:, i], 1. / eig_vect[:, r - 1])
temp = (eig_val[0] - eig_val[1]) / eig_val[1]
# print(temp)
if temp < c:
F = F[:, 1:(r-1)]
# sp_kmeans = KMeans(n_clusters=k).fit(F)
sp_kmeans = mixture.BayesianGaussianMixture(n_components=k + 1, covariance_type='full').fit(F)
else:
F = F[:, :r - 1]
for i in range(r - 1):
F[:, i] = np.multiply(eig_vect[:, i], 1. / eig_vect[:, r - 1])
temp = (eig_val[0] - eig_val[1]) / eig_val[1]
# print(temp)
if temp < c:
F = F[:, 1:(r - 1)]
# sp_kmeans = KMeans(n_clusters=k).fit(F)
sp_kmeans = mixture.BayesianGaussianMixture(n_components=k + 1, covariance_type='full').fit(F)
# print(ratio, 1 + tao)
end = time.time()
# print(p, max(l)-min(l)+1)
# return sp_kmeans.labels_, end - start
return sp_kmeans.predict(F), end - start
| yz24/RBF-SCORE | rbf-score/ES_SCOREplus.py | ES_SCOREplus.py | py | 1,894 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.path.append",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "time.time",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": ... |
25469063444 | # -*- coding: utf-8 -*-
"""
Created on Thu Oct 4 18:04:51 2018
@author: 우람
"""
# Example 1: Multi-class classification
#
# Classify stocks into deciles on their returns
# Features: past 3, 6, 12 month returns
# y: class label (0, ..., 9) based on the future 1 month return.
import numpy as np
import pandas as pd
import keras
from keras import regularizers
from keras.layers import Input, Dense, Dropout
from keras.models import Model
import pickle
import os
import tensorflow as tf
os.chdir('C:\\Users\\우람\\Desktop\\kaist\\3차학기\\알고\\practice')
#############################################
# Get data
#############################################
sample_universe = pickle.load(open("data/sample1.pickle", "rb"))
sess=tf.Session()
sess.run(tf.global_variables_initializer())
x = {}
y = {}
ret1m = {}
for m in range(36):
current_sample = sample_universe[m]
x[m] = current_sample.loc[:, ['ret3', 'ret6', 'ret12']]
y[m] = current_sample.loc[:, 'label']
ret1m[m] = current_sample.loc[:, 'target_ret_1']
# Split the sample
# Training set
x_tra = np.concatenate([v for k, v in x.items() if k < 12])
y_tra = np.concatenate([v for k, v in y.items() if k < 12])
# Validation set
x_val = np.concatenate([v for k, v in x.items() if k >= 12 if k < 24])
y_val = np.concatenate([v for k, v in y.items() if k >= 12 if k < 24])
# Test set
x_tes = np.concatenate([v for k, v in x.items() if k >= 24 if k < 36])
y_tes = np.concatenate([v for k, v in y.items() if k >= 24 if k < 36])
#############################################
# Train the model
#############################################
# Model building
num_layer = 5
num_neuron = [64, 32, 16, 8, 4]
activation = 'relu'
optimizer = 'adam'
dropout_rate = 0
l1_norm = 0
num_class = 10
input = Input(shape=(x_tra.shape[1],)) #input layer 구성
hidden = input
for i in range(num_layer): #hidden layer를 하나씩 생성해서 쌓아주기
#num_neuron 몇개의 뉴런을 만들건지
#l1 regularization = Lasso.. 오버피팅을 피하기 위한 방법. 하나의 인풋에 너무 많은 웨이트를 주지 않기 위함
hidden = Dense(num_neuron[i], activation=activation, kernel_regularizer=regularizers.l1(l1_norm))(hidden)
hidden = Dropout(dropout_rate)(hidden)
output = Dense(num_class, activation='softmax')(hidden) #output에선 softmax를 써야한다.
model = Model(inputs=input, outputs=output)
model.compile(loss='sparse_categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])
model.summary()
# Normalize input
x_mean = x_tra.mean(axis=0)
x_std = x_tra.std(axis=0) #x_val이든, x_tes이든 정규화할 때 평균과 표준편차는 x_tra로 한다!! 실제론 미래의 mean과 std를 모르니까'''
x_tra = (x_tra - x_mean) / x_std
x_val = (x_val - x_mean) / x_std
x_tes = (x_tes - x_mean) / x_std
# Fit the model: early stopping based on validation loss
early_stop = keras.callbacks.EarlyStopping(monitor='val_loss', min_delta=0.0, patience=2, verbose=2)
model.fit(x_tra, y_tra, validation_data=(x_val, y_val), epochs=25,
batch_size=32, callbacks=[early_stop], verbose=2) #메모리 낭비를 막기위해서 배치사이즈는 보통 2의 배수로 한다..
# Evaluate
loss_tra, acc_tra = model.evaluate(x_tra, y_tra, verbose=0)
loss_val, acc_val = model.evaluate(x_val, y_val, verbose=0)
loss_tes, acc_tes = model.evaluate(x_tes, y_tes, verbose=0)
stat_performance = pd.DataFrame({'Train':[loss_tra, acc_tra], 'Valid':[loss_val, acc_val], 'Test':[loss_tes, acc_tes]} ,index=['loss', 'acc'])
print(stat_performance)
# Test set class returns
class_return = np.zeros([12,10])
class_count = np.zeros([12,10]) #매 월 몇 개의 주식이 들어가 있는지
for m in range(24,36):
x_m = (x[m] - x_mean) / x_std #normalize!
class_prob = model.predict(x_m)
pre_class = class_prob.argmax(axis=-1)
for cls in range(num_class):
idx = pre_class == cls
if any(idx):
class_return[m-24][cls] = np.mean(ret1m[m].values[pre_class == cls])
class_count[m-24][cls] = np.sum([pre_class == cls])
print(pd.DataFrame(class_count))
print(pd.DataFrame(class_return))
r_mean = np.mean(class_return, axis=0)
r_std = np.std(class_return, axis=0)
financial_performance = pd.DataFrame([r_mean, r_std, r_mean/r_std], index=['mean','std','sr'])
print(financial_performance)
tf.reset_default_graph()
| KWOOR/Python-Algorithm | exercise1.py | exercise1.py | py | 4,384 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.chdir",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "tensorflow.Session",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "tensorflow.global_variables_i... |
24925094334 | import sys
import itertools
import copy
input = sys.stdin.readline
n = int(input())
A = list(map(int, input().strip().split()))
b = list(map(int, input().strip().split()))
B = []
for i in range(4):
for j in range(b[i]):
B.append(i)
B = list(itertools.permutations(B, n-1))
result = []
for i in range(len(B)):
C = copy.copy(A)
for j in range(1, n):
if B[i][j-1] == 0:
C[j] = C[j-1] + C[j]
elif B[i][j-1] == 1:
C[j] = C[j-1] - C[j]
elif B[i][j-1] == 2:
C[j] = C[j-1] * C[j]
elif B[i][j-1] == 3:
C[j] = int(C[j-1] / C[j])
result.append(C[-1])
print(max(result))
print(min(result))
| pla2n/python_practice | python/backjoon/14888_연산자 끼워넣기.py | 14888_연산자 끼워넣기.py | py | 685 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.stdin",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "itertools.permutations",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "copy.copy",
"line_number": 18,
"usage_type": "call"
}
] |
31530311323 | import cv2
# ^ Must be installed via pip
import os
import re
# ^ Should be there by default idk why it didnt work
import pytesseract
from PIL import Image
def optimize_image_for_ocr(image_path, lang='eng', image_dpi=300, image_format='png', whitelist = None, blacklist = None):
# Load the image
img = cv2.imread(image_path)
# Preprocess the image
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = cv2.medianBlur(img, 1)
img = cv2.threshold(img, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
# Save the preprocessed image
preprocessed_image_path = f"{image_path}_preprocessed.{image_format}"
cv2.imwrite(preprocessed_image_path, img)
# Load the preprocessed image using PIL
pil_image = Image.open(preprocessed_image_path)
# Set the image DPI
pil_image = pil_image.resize((pil_image.width * image_dpi // 72, pil_image.height * image_dpi // 72))
# Specify the language of the text
pytesseract.pytesseract.tesseract_cmd = r'C:\Program Files\Tesseract-OCR\tesseract.exe'
# Specify whitelist + blacklist
if whitelist is not None:
whitelist_option = f"tessedit_char_whitelist={whitelist}"
else:
whitelist_option = ''
if blacklist is not None:
blacklist_option = f"tessedit_char_blacklist={blacklist}"
else:
blacklist_option = ''
custom_config = f'-l {lang} --dpi {image_dpi} {whitelist_option} {blacklist_option}'
# Extract text from the image
text = pytesseract.image_to_string(pil_image, config=custom_config)
# Remove the preprocessed image file
os.remove(preprocessed_image_path)
return text
# testing
# ingredient_list = optimize_image_for_ocr(r"sem2\brownine_mix.png", whitelist='0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ()[],')
# print(ingredient_list)
def parse_ingredients(text):
# Remove parentheses and their contents
text = re.sub(r'\([^)]*\)', '', text)
# Replace newline characters with spaces
text = text.replace('\n', ' ')
# Remove paranthesis again in case any were left over
text = text.replace('(','').replace(')','')
# Split ingredients by commas
ingredients = text.split(',')
# Remove leading and trailing whitespace from each ingredient
ingredients = [ingredient.strip() for ingredient in ingredients]
# Remove any empty ingredients
ingredients = [ingredient for ingredient in ingredients if ingredient]
return ingredients
# testing
# print(parse_ingredients(ingredient_list))
| WorcestershireSample/FIA-Project | Tesseract/OCRfunc.py | OCRfunc.py | py | 2,641 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "cv2.imread",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2GRAY",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "cv2.medianBlur",
... |
33359438698 | import datetime
from typing import Optional
from zoneinfo import ZoneInfo
from fastapi import HTTPException
from passlib import pwd
from sqlalchemy import extract, or_, and_, func, Float, text, desc
from sqlalchemy.orm import aliased
import app.auth as auth
import app.models as models
import app.schemas as schemas
from app.database import SessionLocal
def get_plant_by_client(db: SessionLocal, client_id: int):
return db.query(models.Plant).filter(models.Plant.client_id == client_id).all()
def get_machine_by_plant(db: SessionLocal, plant_id: int):
return db.query(models.Machine).filter(models.Machine.plant_id == plant_id).order_by(models.Machine.code).all()
def create_machine(db: SessionLocal, machine: schemas.MachineCreate):
db_machine = models.Machine(date_created=datetime.datetime.now(ZoneInfo("Europe/Rome")), name=machine.name,
code=machine.code,
brand=machine.brand, model=machine.model, serial_number=machine.serial_number,
production_year=machine.production_year, cost_center=machine.cost_center,
description=machine.description, plant_id=machine.plant_id,
robotic_island=machine.robotic_island)
db.add(db_machine)
db.commit()
db.refresh(db_machine)
return db_machine
def get_plants(db: SessionLocal):
return db.query(models.Plant, models.Client).join(models.Client,
models.Plant.client_id == models.Client.id).order_by(
models.Plant.id).all()
def get_machines(db: SessionLocal, sort: Optional[str] = None, limit: Optional[int] = None,
order: Optional[str] = None, q: Optional[str] = None):
query = db.query(models.Machine, models.Plant, models.Client).join(models.Plant,
models.Machine.plant_id == models.Plant.id).join(
models.Client, models.Plant.client_id == models.Client.id)
if order:
if order == 'desc':
query = query.order_by(desc(text(sort)))
else:
query = query.order_by(text(sort))
if not order:
query = query.order_by(models.Machine.code)
if q:
query = query.filter(or_(models.Machine.name.ilike(f"%{q}%"),
models.Machine.code.ilike(f"%{q}%"),
models.Machine.brand.ilike(f"%{q}%"),
models.Machine.production_year.ilike(f"%{q}%"),
models.Machine.cost_center.ilike(f"%{q}%"),
models.Plant.city.ilike(f"%{q}%"),
models.Plant.address.ilike(f"%{q}%"),
models.Client.name.ilike(f"%{q}%")))
return query.limit(limit).all()
def get_reports(db: SessionLocal, user_id: Optional[int] = None, limit: Optional[int] = None):
query = db.query(
models.Report,
models.Commission.id.label("commission_id"),
models.Commission.code.label("commission_code"),
models.Machine.id.label("machine_id"),
models.Machine.name.label("machine_name"),
models.Machine.brand.label("machine_brand"),
models.Machine.code.label("machine_code"),
models.Machine.cost_center.label("cost_center"),
models.User.id.label("operator_id"),
models.User.first_name,
models.User.last_name,
models.Client.id.label("client_id"),
models.Client.name.label("client_name"),
models.Plant.id.label("plant_id"),
models.Plant.city.label("plant_city"),
models.Plant.address.label("plant_address")
).select_from(models.Report).outerjoin(models.Commission,
and_(models.Report.type == "commission",
models.Report.work_id == models.Commission.id)).outerjoin(
models.Machine, and_(models.Report.type == "machine",
models.Report.work_id == models.Machine.id)).join(models.User,
models.Report.operator_id == models.
User.id).outerjoin(
models.Plant, and_(models.Report.type == "machine", models.Machine.plant_id == models.Plant.id)).join(
models.Client,
or_(models.Commission.client_id == models.Client.id,
and_(models.Plant.client_id == models.Client.id,
models.Report.type == "machine")))
if user_id:
query = query.filter(models.Report.operator_id == user_id)
return query.order_by(models.Report.date.desc()).limit(limit).all()
def get_report_by_id(db: SessionLocal, report_id: int):
supervisor = aliased(models.User)
return db.query(
models.Report,
models.Commission.id.label("commission_id"),
models.Commission.code.label("commission_code"),
models.Commission.description.label("commission_description"),
models.Machine.id.label("machine_id"),
models.Machine.name.label("machine_name"),
models.Machine.brand.label("machine_brand"),
models.Machine.code.label("machine_code"),
models.Machine.cost_center.label("cost_center"),
models.User.id.label("operator_id"),
models.User.first_name,
models.User.last_name,
models.Client.id.label("client_id"),
models.Client.name.label("client_name"),
models.Client.city.label("client_city"),
models.Plant.id.label("plant_id"),
models.Plant.name.label("plant_name"),
models.Plant.city.label("plant_city"),
models.Plant.address.label("plant_address"),
supervisor.id.label("supervisor_id"),
supervisor.first_name.label("supervisor_first_name"),
supervisor.last_name.label("supervisor_last_name")
).select_from(models.Report).outerjoin(
models.Commission,
and_(models.Report.type == "commission", models.Report.work_id == models.Commission.id)).outerjoin(
models.Machine, and_(models.Report.type == "machine", models.Report.work_id == models.Machine.id)).join(
models.User, models.Report.operator_id == models.User.id).outerjoin(
models.Plant, models.Machine.plant_id == models.Plant.id
).join(
models.Client,
or_(models.Plant.client_id == models.Client.id, models.Commission.client_id == models.Client.id)
).join(supervisor, models.Report.supervisor_id == supervisor.id
).filter(models.Report.id == report_id).first()
def get_months(db: SessionLocal, user_id: Optional[int] = None, client_id: Optional[int] = None):
query = db.query(models.Report.date)
if user_id:
query = query.filter(models.Report.operator_id == user_id)
if client_id:
query = query.filter(models.Client.id == client_id)
query = query.group_by(models.Report.date).order_by(models.Report.date)
dates = query.all()
return sorted(set([datetime.datetime.strftime(date[0], "%m/%Y") for date in dates]))
def get_monthly_reports(db: SessionLocal, month: Optional[str] = '0', user_id: Optional[int] = 0,
client_id: Optional[int] = 0,
plant_id: Optional[int] = 0, work_id: Optional[int] = 0):
supervisor = aliased(models.User)
query = db.query(
models.Report,
models.Commission.id.label("commission_id"),
models.Commission.code.label("commission_code"),
models.Commission.description.label("commission_description"),
models.Machine.id.label("machine_id"),
models.Machine.name.label("machine_name"),
models.Machine.brand.label("machine_brand"),
models.Machine.code.label("machine_code"),
models.Machine.cost_center.label("cost_center"),
models.User.id.label("operator_id"),
models.User.first_name,
models.User.last_name,
models.Client.id.label("client_id"),
models.Client.name.label("client_name"),
models.Plant.id.label("plant_id"),
models.Plant.name.label("plant_name"),
models.Plant.city.label("plant_city"),
models.Plant.address.label("plant_address"),
supervisor.id.label("supervisor_id"),
supervisor.first_name.label("supervisor_first_name"),
supervisor.last_name.label("supervisor_last_name")
).select_from(models.Report).outerjoin(
models.Commission,
and_(models.Report.type == "commission", models.Report.work_id == models.Commission.id)
).outerjoin(
models.Machine,
and_(models.Report.type == "machine", models.Report.work_id == models.Machine.id)
).join(models.User, models.Report.operator_id == models.User.id).outerjoin(
models.Plant, models.Machine.plant_id == models.Plant.id
).join(
models.Client,
or_(models.Plant.client_id == models.Client.id, models.Commission.client_id == models.Client.id)
).join(supervisor, models.Report.supervisor_id == supervisor.id)
if month != '0':
start_date = datetime.datetime.strptime(month, "%m/%Y").date()
query = query.filter(
extract('month', models.Report.date) == start_date.month,
extract('year', models.Report.date) == start_date.year
)
if user_id:
query = query.filter(models.Report.operator_id == user_id)
if client_id:
query = query.filter(models.Client.id == client_id)
if plant_id == 0:
query = query.filter(models.Report.type == "machine")
if plant_id != 0:
query = query.filter(models.Plant.id == plant_id)
if work_id:
query = query.filter(models.Report.work_id == work_id)
return query.order_by(models.Report.date).all()
def get_interval_reports(db: SessionLocal, start_date: Optional[str] = None, end_date: Optional[str] = None,
user_id: Optional[int] = 0,
client_id: Optional[int] = 0,
plant_id: Optional[int] = 0, work_id: Optional[int] = 0):
supervisor = aliased(models.User)
query = db.query(
models.Report,
models.Commission.id.label("commission_id"),
models.Commission.code.label("commission_code"),
models.Commission.description.label("commission_description"),
models.Machine.id.label("machine_id"),
models.Machine.name.label("machine_name"),
models.Machine.brand.label("machine_brand"),
models.Machine.code.label("machine_code"),
models.Machine.cost_center.label("cost_center"),
models.User.id.label("operator_id"),
models.User.first_name,
models.User.last_name,
models.Client.id.label("client_id"),
models.Client.name.label("client_name"),
models.Plant.id.label("plant_id"),
models.Plant.name.label("plant_name"),
models.Plant.city.label("plant_city"),
models.Plant.address.label("plant_address"),
supervisor.id.label("supervisor_id"),
supervisor.first_name.label("supervisor_first_name"),
supervisor.last_name.label("supervisor_last_name")
).select_from(models.Report).outerjoin(
models.Commission,
and_(models.Report.type == "commission", models.Report.work_id == models.Commission.id)
).outerjoin(
models.Machine,
and_(models.Report.type == "machine", models.Report.work_id == models.Machine.id)
).join(models.User, models.Report.operator_id == models.User.id).outerjoin(
models.Plant, models.Machine.plant_id == models.Plant.id
).join(
models.Client,
or_(models.Plant.client_id == models.Client.id, models.Commission.client_id == models.Client.id)
).join(supervisor, models.Report.supervisor_id == supervisor.id)
if start_date != '' and end_date != '':
start_date_dt = datetime.datetime.strptime(start_date, "%Y-%m-%d")
end_date_dt = datetime.datetime.strptime(end_date, "%Y-%m-%d")
query = query.filter(models.Report.date >= start_date_dt,
models.Report.date <= end_date_dt)
else:
if start_date != '':
start_date_dt = datetime.datetime.strptime(start_date, "%Y-%m-%d")
query = query.filter(models.Report.date >= start_date_dt)
if end_date != '':
end_date_dt = datetime.datetime.strptime(end_date, "%Y-%m-%d")
query = query.filter(models.Report.date <= end_date_dt)
if user_id:
query = query.filter(models.Report.operator_id == user_id)
if client_id:
query = query.filter(models.Client.id == client_id)
if plant_id == 0:
query = query.filter(models.Report.type == "machine")
if plant_id != 0:
query = query.filter(models.Plant.id == plant_id)
if work_id:
query = query.filter(models.Report.work_id == work_id)
return query.order_by(models.Report.date).all()
def get_monthly_commission_reports(db: SessionLocal, month: str, user_id: Optional[int] = None,
client_id: Optional[int] = None, work_id: Optional[int] = None):
supervisor = aliased(models.User)
query = db.query(
models.Report,
models.Commission.id.label("commission_id"),
models.Commission.code.label("commission_code"),
models.Commission.description.label("commission_description"),
models.User.id.label("operator_id"),
models.User.first_name,
models.User.last_name,
models.Client.id.label("client_id"),
models.Client.name.label("client_name"),
supervisor.id.label("supervisor_id"),
supervisor.first_name.label("supervisor_first_name"),
supervisor.last_name.label("supervisor_last_name")
).select_from(models.Report).join(
models.Commission,
and_(models.Report.type == "commission", models.Report.work_id == models.Commission.id)
).join(models.User, models.Report.operator_id == models.User.id).join(
models.Client, models.Commission.client_id == models.Client.id
).join(supervisor, models.Report.supervisor_id == supervisor.id)
if month != '0':
start_date = datetime.datetime.strptime(month, "%m/%Y").date()
query = query.filter(
extract('month', models.Report.date) == start_date.month,
extract('year', models.Report.date) == start_date.year
)
if user_id:
query = query.filter(models.Report.operator_id == user_id)
if client_id:
query = query.filter(models.Client.id == client_id)
if work_id:
query = query.filter(models.Report.work_id == work_id)
return query.order_by(models.Report.date).all()
def get_interval_commission_reports(db: SessionLocal, start_date: Optional[str] = None, end_date: Optional[str] = None,
user_id: Optional[int] = None,
client_id: Optional[int] = None, work_id: Optional[int] = None):
supervisor = aliased(models.User)
query = db.query(
models.Report,
models.Commission.id.label("commission_id"),
models.Commission.code.label("commission_code"),
models.Commission.description.label("commission_description"),
models.User.id.label("operator_id"),
models.User.first_name,
models.User.last_name,
models.Client.id.label("client_id"),
models.Client.name.label("client_name"),
supervisor.id.label("supervisor_id"),
supervisor.first_name.label("supervisor_first_name"),
supervisor.last_name.label("supervisor_last_name")
).select_from(models.Report).join(
models.Commission,
and_(models.Report.type == "commission", models.Report.work_id == models.Commission.id)
).join(models.User, models.Report.operator_id == models.User.id).join(
models.Client, models.Commission.client_id == models.Client.id
).join(supervisor, models.Report.supervisor_id == supervisor.id)
if start_date != '' and end_date != '':
start_date_dt = datetime.datetime.strptime(start_date, "%Y-%m-%d")
end_date_dt = datetime.datetime.strptime(end_date, "%Y-%m-%d")
query = query.filter(models.Report.date >= start_date_dt,
models.Report.date <= end_date_dt)
else:
if start_date != '':
start_date_dt = datetime.datetime.strptime(start_date, "%Y-%m-%d")
query = query.filter(models.Report.date >= start_date_dt)
if end_date != '':
end_date_dt = datetime.datetime.strptime(end_date, "%Y-%m-%d")
query = query.filter(models.Report.date <= end_date_dt)
if user_id:
query = query.filter(models.Report.operator_id == user_id)
if client_id:
query = query.filter(models.Client.id == client_id)
if work_id:
query = query.filter(models.Report.work_id == work_id)
return query.order_by(models.Report.date).all()
def get_daily_hours_in_month(db: SessionLocal, month: str, user_id: int):
start_date = datetime.datetime.strptime(month, "%m/%Y").date()
end_date = (start_date.replace(day=1) + datetime.timedelta(days=32)).replace(day=1) - datetime.timedelta(days=1)
dates = []
current_date = start_date
while current_date <= end_date:
dates.append(current_date)
current_date += datetime.timedelta(days=1)
query = db.query(
func.date_trunc('day', models.Report.date).label('day'),
func.sum(func.cast(models.Report.intervention_duration, Float)).label('hours'),
func.count().label('count')
).filter(
models.Report.date >= start_date,
models.Report.date < end_date + datetime.timedelta(days=1),
models.Report.operator_id == user_id
).group_by(
func.date_trunc('day', models.Report.date)
).order_by(
func.date_trunc('day', models.Report.date)
)
items = query.all()
result = []
for date in dates:
result_dict = {'date': date.strftime('%d/%m/%Y'), 'hours': 0, 'count': 0}
for item in items:
if item.day.date() == date:
result_dict['hours'] = item.hours
result_dict['count'] = item.count
break
result.append(result_dict)
return result
def edit_report(db: SessionLocal, report_id: int, report: schemas.ReportCreate, user_id: int):
db_report = db.query(models.Report).filter(models.Report.id == report_id).first()
if db_report:
db_report.type = report.type
db_report.date = report.date
db_report.intervention_duration = report.intervention_duration
db_report.intervention_type = report.intervention_type
db_report.intervention_location = report.intervention_location
db_report.work_id = report.work_id
db_report.supervisor_id = report.supervisor_id
db_report.description = report.description
db_report.notes = report.notes
db_report.trip_kms = report.trip_kms
db_report.cost = report.cost
db_report.operator_id = user_id
db.commit()
return db_report
return {"detail": "Errore"}, 400
def edit_client(db: SessionLocal, client_id: int, client: schemas.ClientCreate):
db_client = db.query(models.Client).filter(models.Client.id == client_id).first()
if db_client:
db_client.name = client.name
db_client.city = client.city
db_client.address = client.address
db_client.email = client.email
db_client.contact = client.contact
db_client.phone_number = client.phone_number
db_client.province = client.province
db_client.cap = client.cap
db.commit()
return db_client
return {"detail": "Errore"}, 400
def edit_commission(db: SessionLocal, commission_id: int, commission: schemas.CommissionCreate):
db_commission = db.query(models.Commission).filter(models.Commission.id == commission_id).first()
if db_commission:
db_commission.client_id = commission.client_id
db_commission.code = commission.code
db_commission.description = commission.description
db_commission.open = commission.open
db.commit()
return db_commission
return {"detail": "Errore"}, 400
def edit_plant(db: SessionLocal, plant_id: int, plant: schemas.PlantCreate):
db_plant = db.query(models.Plant).filter(models.Plant.id == plant_id).first()
if db_plant:
db_plant.client_id = plant.client_id
db_plant.name = plant.name
db_plant.city = plant.city
db_plant.address = plant.address
db_plant.email = plant.email
db_plant.contact = plant.contact
db_plant.phone_number = plant.phone_number
db_plant.province = plant.province
db_plant.cap = plant.cap
db.commit()
return db_plant
return {"detail": "Errore"}, 400
def edit_machine(db: SessionLocal, machine_id: int, machine: schemas.MachineCreate):
db_machine = db.query(models.Machine).filter(models.Machine.id == machine_id).first()
if db_machine:
db_machine.plant_id = machine.plant_id
db_machine.robotic_island = machine.robotic_island
db_machine.code = machine.code
db_machine.name = machine.name
db_machine.brand = machine.brand
db_machine.model = machine.model
db_machine.serial_number = machine.serial_number
db_machine.production_year = machine.production_year
db_machine.cost_center = machine.cost_center
db_machine.description = machine.description
db.commit()
return db_machine
return {"detail": "Errore"}, 400
def get_user_by_id(db: SessionLocal, user_id: int):
return db.query(models.User, models.Role.name.label('role'), models.Client.name.label('client_name'),
models.Client.city.label('client_city')).join(models.Role,
models.User.role_id == models.Role.id).join(
models.Client, models.User.client_id == models.Client.id).filter(models.User.id == user_id).first()
def get_client_by_id(db: SessionLocal, client_id: int):
return db.query(models.Client).filter(models.Client.id == client_id).first()
def get_plant_by_id(db: SessionLocal, plant_id: int):
return db.query(models.Plant, models.Client).filter(models.Plant.id == plant_id).join(
models.Client,
models.Plant.client_id == models.Client.id).first()
def get_commission_by_id(db: SessionLocal, commission_id: int):
return db.query(models.Commission, models.Client).filter(models.Commission.id == commission_id).join(
models.Client,
models.Commission.client_id == models.Client.id).first()
def get_machine_by_id(db: SessionLocal, machine_id: int):
return db.query(models.Machine, models.Plant, models.Client).filter(models.Machine.id == machine_id).join(
models.Plant,
models.Machine.plant_id == models.Plant.id).join(
models.Client, models.Plant.client_id == models.Client.id).first()
def create_user(db: SessionLocal, user: schemas.UserCreate):
db_user = db.query(models.User).filter(models.User.username == user.username).first()
if db_user:
raise HTTPException(status_code=400, detail="Username già registrato")
db_user = db.query(models.User).filter(models.User.email == user.email).first()
if db_user:
raise HTTPException(status_code=400, detail="Email già registrata")
tmp_password = user.password if user.password else pwd.genword()
tmp_password_hashed = auth.get_password_hash(tmp_password)
db_user = models.User(first_name=user.first_name, last_name=user.last_name, email=user.email,
phone_number=user.phone_number, username=user.username, role_id=user.role_id,
client_id=user.client_id, temp_password=tmp_password, password=tmp_password_hashed)
db.add(db_user)
db.commit()
db.refresh(db_user)
return db_user
def delete_user(db: SessionLocal, user_id: int, current_user_id: int):
user = db.query(models.User).get(user_id)
if user_id == 1 or user_id == current_user_id or db.query(models.User).filter(
models.Report.operator_id == user_id).first() or db.query(models.User).filter(
models.Report.supervisor_id == user_id).first():
raise HTTPException(status_code=403, detail="Non puoi eliminare questo utente")
if not user:
raise HTTPException(status_code=404, detail="Utente non trovato")
db.delete(user)
db.commit()
return {"detail": "Utente eliminato"}
def delete_client(db: SessionLocal, client_id: int):
client = db.query(models.Client).get(client_id)
exists = db.query(models.Commission).filter(models.Commission.client_id == client_id).first()
if not client:
raise HTTPException(status_code=404, detail="Cliente non trovato")
if exists:
raise HTTPException(status_code=400, detail="Non puoi eliminare questo cliente")
db.delete(client)
db.commit()
return {"detail": "Cliente eliminato"}
def delete_commission(db: SessionLocal, commission_id: int):
commission = db.query(models.Commission).get(commission_id)
exists = db.query(models.Report).filter(models.Report.work_id == commission_id).filter(
models.Report.type == 'commission').first()
if not commission:
raise HTTPException(status_code=404, detail="Commessa non trovata")
if exists:
raise HTTPException(status_code=400, detail="Non puoi eliminare questa commessa")
db.delete(commission)
db.commit()
return {"detail": "Commessa eliminata"}
def delete_machine(db: SessionLocal, machine_id: int):
machine = db.query(models.Machine).get(machine_id)
exists = db.query(models.Report).filter(models.Report.work_id == machine_id).filter(
models.Report.type == 'machine').first()
if not machine:
raise HTTPException(status_code=404, detail="Macchina non trovata")
if exists:
raise HTTPException(status_code=400, detail="Non puoi eliminare questa macchina")
db.delete(machine)
db.commit()
return {"detail": "Macchina eliminata"}
def delete_plant(db: SessionLocal, plant_id: int):
plant = db.query(models.Plant).get(plant_id)
if not plant:
raise HTTPException(status_code=404, detail="Stabilimento non trovato")
exists = db.query(models.Machine).filter(models.Machine.plant_id == plant_id).first()
if exists:
raise HTTPException(status_code=400, detail="Non puoi eliminare questo stabilimento")
db.delete(plant)
db.commit()
return {"detail": "Stabilimento eliminato"}
def delete_report(db: SessionLocal, report_id: int, user_id: int):
report = db.query(models.Report).get(report_id)
user = db.query(models.User).get(user_id)
if not report:
raise HTTPException(status_code=404, detail="Intervento non trovato")
if report.operator_id != user_id and user.role_id != 1:
raise HTTPException(status_code=403, detail="Non sei autorizzato a eliminare questo intervento")
db.delete(report)
db.commit()
return {"detail": "Intervento eliminato"}
def create_report(db: SessionLocal, report: schemas.ReportCreate, user_id: int):
if report.trip_kms == '':
report.trip_kms = '0.0'
if report.cost == '':
report.cost = '0.0'
db_report = models.Report(date=report.date, intervention_duration=report.intervention_duration,
intervention_type=report.intervention_type, type=report.type,
intervention_location=report.intervention_location,
work_id=report.work_id, description=report.description,
supervisor_id=report.supervisor_id,
notes=report.notes, trip_kms=report.trip_kms, cost=report.cost, operator_id=user_id,
date_created=datetime.datetime.now(ZoneInfo("Europe/Rome")))
db.add(db_report)
db.commit()
db.refresh(db_report)
return db_report
def create_commission(db: SessionLocal, commission: schemas.CommissionCreate):
db_commission = db.query(models.Commission).filter(models.Commission.code == commission.code).first()
if db_commission:
raise HTTPException(status_code=400, detail="Codice commessa già registrato")
db_commission = models.Commission(date_created=datetime.datetime.now(ZoneInfo("Europe/Rome")),
code=commission.code, description=commission.description,
client_id=commission.client_id, open=True)
db.add(db_commission)
db.commit()
db.refresh(db_commission)
return db_commission
def create_client(db: SessionLocal, client: schemas.ClientCreate):
if db.query(models.Client).filter(models.Client.name == client.name).first():
raise HTTPException(status_code=400, detail="Cliente già registrato")
db_client = models.Client(name=client.name, address=client.address, city=client.city, email=client.email,
phone_number=client.phone_number, contact=client.contact, province=client.province,
cap=client.cap,
date_created=datetime.datetime.now(ZoneInfo("Europe/Rome")))
db.add(db_client)
db.commit()
db.refresh(db_client)
return db_client
def get_commissions(db: SessionLocal, client_id: Optional[int] = None):
query = db.query(models.Commission, models.Client).join(models.Client,
models.Commission.client_id == models.Client.id)
if client_id:
query = query.filter(
models.Commission.client_id == client_id)
return query.order_by(models.Client.name).all()
def get_open_commissions(db: SessionLocal, client_id: Optional[int] = None):
query = db.query(models.Commission, models.Client).join(models.Client,
models.Commission.client_id == models.Client.id)
if client_id:
query = query.filter(
models.Commission.client_id == client_id)
return query.filter(models.Commission.open).order_by(models.Client.name).all()
def create_plant(db: SessionLocal, plant: schemas.PlantCreate):
exists = db.query(models.Plant).filter(models.Plant.address == plant.address).first()
if exists:
raise HTTPException(status_code=400, detail="Esiste già uno stabilimento con questo indirizzo")
db_plant = models.Plant(date_created=datetime.datetime.now(ZoneInfo("Europe/Rome")), name=plant.name,
address=plant.address,
province=plant.province, cap=plant.cap,
city=plant.city, email=plant.email, phone_number=plant.phone_number, contact=plant.contact,
client_id=plant.client_id)
db.add(db_plant)
db.commit()
db.refresh(db_plant)
return db_plant
def change_password(db: SessionLocal, old_password: str, new_password: str, user_id: int):
user = db.query(models.User).get(user_id)
if not user:
raise HTTPException(status_code=404, detail="Utente non trovato")
if len(new_password) < 8:
raise HTTPException(status_code=400, detail="La password deve essere lunga almeno 8 caratteri")
if ' ' in new_password:
raise HTTPException(status_code=400, detail="La password non può contenere spazi")
if old_password == new_password:
raise HTTPException(status_code=400, detail="La password nuova deve essere diversa da quella attuale")
if not auth.verify_password(old_password, user.password):
raise HTTPException(status_code=400, detail="Password errata")
user.password = auth.get_password_hash(new_password)
user.temp_password = 'changed'
db.commit()
return {"detail": "Password modificata"}
def edit_user(db: SessionLocal, user_id: int, user: schemas.UserUpdate):
db_user = db.query(models.User).filter(models.User.id == user_id).first()
if db_user:
db_user.email = user.email
db_user.phone_number = user.phone_number
if db_user.client_id != user.client_id:
db_user.client_id = user.client_id
db.commit()
return db_user
return {"detail": "Errore"}, 400
def get_supervisors_by_client(db: SessionLocal, client_id: int):
return db.query(models.User).filter(models.User.client_id == client_id).all()
def reset_password(db: SessionLocal, user_id: int):
user = db.query(models.User).get(user_id)
if not user:
raise HTTPException(status_code=404, detail="Utente non trovato")
tmp_password = pwd.genword()
tmp_password_hashed = auth.get_password_hash(tmp_password)
user.temp_password = tmp_password
user.password = tmp_password_hashed
db.commit()
db.refresh(user)
return {"detail": "Password resettata", "password": tmp_password}
def edit_report_email_date(db: SessionLocal, report_id: int, email_date: datetime.datetime):
db_report = db.query(models.Report).get(report_id)
if not db_report:
raise HTTPException(status_code=404, detail="Intervento non trovato")
db_report.email_date = email_date
db.commit()
db.refresh(db_report)
return db_report
def close_commission(db: SessionLocal, commission_id: int):
db_commission = db.query(models.Commission).get(commission_id)
if not db_commission:
raise HTTPException(status_code=404, detail="Commessa non trovata")
if not db_commission.open:
db_commission.open = True
db_commission.date_closed = None
db.commit()
db.refresh(db_commission)
return db_commission
db_commission.open = False
db_commission.date_closed = datetime.datetime.now(ZoneInfo("Europe/Rome"))
db.commit()
db.refresh(db_commission)
return db_commission
def create_ticket(db: SessionLocal, ticket: schemas.TicketCreate, user_id: int):
db_ticket = models.Ticket(title=ticket.title, status='open', priority=ticket.priority,
date_created=datetime.datetime.now(ZoneInfo("Europe/Rome")),
requested_by=user_id, machine_id=ticket.machine_id, description=ticket.description)
db.add(db_ticket)
db.commit()
db.refresh(db_ticket)
return db_ticket
def get_tickets(db: SessionLocal):
return db.query(models.Ticket, models.Machine).join(models.Machine,
models.Ticket.machine_id == models.Machine.id).all()
def get_my_client(db: SessionLocal, user_id: int):
user = db.query(models.User).get(user_id)
if not user:
raise HTTPException(status_code=404, detail="Utente non trovato")
client = db.query(models.Client).get(user.client_id)
if not client:
raise HTTPException(status_code=404, detail="L'utente non ha un cliente associato")
return [client]
def search_reports(db: SessionLocal, search: str):
search = '%' + search + '%'
return db.query(
models.Report,
models.Commission.id.label("commission_id"),
models.Commission.code.label("commission_code"),
models.Machine.id.label("machine_id"),
models.Machine.name.label("machine_name"),
models.Machine.brand.label("machine_brand"),
models.Machine.code.label("machine_code"),
models.Machine.cost_center.label("cost_center"),
models.User.id.label("operator_id"),
models.User.first_name,
models.User.last_name,
models.Client.id.label("client_id"),
models.Client.name.label("client_name"),
models.Plant.id.label("plant_id"),
models.Plant.city.label("plant_city"),
models.Plant.address.label("plant_address")
).select_from(models.Report).outerjoin(
models.Commission,
and_(models.Report.type == "commission", models.Report.work_id == models.Commission.id)
).outerjoin(
models.Machine,
and_(models.Report.type == "machine", models.Report.work_id == models.Machine.id)
).outerjoin(models.Plant, models.Machine.plant_id == models.Plant.id).join(
models.Client,
or_(models.Plant.client_id == models.Client.id, models.Commission.client_id == models.Client.id)
).join(models.User, models.Report.operator_id == models.User.id).filter(
or_(
models.Report.description.ilike(search),
models.Report.notes.ilike(search),
models.Report.intervention_type.ilike(search),
models.Report.intervention_location.ilike(search),
models.Report.intervention_duration.ilike(search),
models.Commission.code.ilike(search),
models.Commission.description.ilike(search),
models.Machine.code.ilike(search),
models.Machine.name.ilike(search),
models.Machine.brand.ilike(search),
models.Machine.model.ilike(search),
models.Plant.name.ilike(search),
models.Plant.city.ilike(search),
models.Plant.address.ilike(search),
models.Client.name.ilike(search),
models.Client.city.ilike(search),
models.User.first_name.ilike(search),
models.User.last_name.ilike(search)
)
).order_by(models.Report.date).all()
| GabrielAndreata/gestione-backend | app/crud.py | crud.py | py | 37,649 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "app.database.SessionLocal",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "app.models.Plant",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "app.models",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "app.databa... |
259844212 | from c3d3.infrastructure.c3.interfaces.cex_order_history_screener.interface import iCexOrderHistoryScreenerHandler
from c3d3.domain.c3.wrappers.binance.usdtm.wrapper import BinanceUsdtmExchange
from c3d3.core.decorators.to_dataframe.decorator import to_dataframe
import datetime
import time
import requests as r
class BinanceUsdtmCexOrderHistoryScreenerHandler(BinanceUsdtmExchange, iCexOrderHistoryScreenerHandler):
def __str__(self):
return __class__.__name__
def __init__(
self,
ticker: str, label: str,
start_time: datetime.datetime, end_time: datetime.datetime,
is_child: bool = False,
*args, **kwargs
) -> None:
if not is_child:
BinanceUsdtmExchange.__init__(self, *args, **kwargs)
iCexOrderHistoryScreenerHandler.__init__(self, ticker=ticker, label=label, *args, **kwargs)
self.start_time = start_time
self.end_time = end_time
@property
def start(self):
return self.start_time
@property
def end(self):
return self.end_time
def _formatting(self, json_: dict) -> dict:
ts = datetime.datetime.fromtimestamp(json_['time'] / 10 ** 3)
update_ts = datetime.datetime.fromtimestamp(json_['updateTime'] / 10 ** 3)
return {
self._EXCHANGE_COLUMN: self.key,
self._LABEL_COLUMN: self.label,
self._TICKER_COLUMN: self.ticker,
self._MARKET_PRICE_COLUMN: float(json_['avgPrice']) if json_['type'] == 'MARKET' else None,
self._LIMIT_PRICE_COLUMN: float(json_['price']) if json_['type'] == 'LIMIT' else None,
self._QTY_COLUMN: float(json_['executedQty']),
self._ORDER_ID_COLUMN: json_['orderId'],
self._SIDE_COLUMN: json_['side'],
self._STATUS_COLUMN: json_['status'],
self._TYPE_COLUMN: json_['type'],
self._TS_UPDATE_COLUMN: update_ts,
self._TS_COLUMN: ts
}
def _handle(self, start: int, end: int):
all_orders = self.allOrders(
symbol=self.ticker,
startTime=start,
endTime=end,
limit=1000,
timestamp=int(time.time() * 1000)
)
if not self._validate_response(all_orders):
raise r.HTTPError(f'Invalid status code for allOrders in {self.__class__.__name__}')
all_orders = all_orders.json()
return all_orders
@to_dataframe
def do(self):
overviews: list = list()
end = int(self.end.timestamp()) * 1000
all_orders = self._handle(start=int(self.start.timestamp()) * 1000, end=end)
overviews.extend([self._formatting(json_=order) for order in all_orders])
while True:
start = all_orders[-1]['time'] + 1
all_orders = self._handle(start=start, end=end)
if not all_orders:
break
overviews.extend([self._formatting(json_=order) for order in all_orders])
return overviews
| e183b796621afbf902067460/c3d3-framework | c3d3/infrastructure/c3/handlers/cex_order_history_screener/binance/usdtm/handler.py | handler.py | py | 3,034 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "c3d3.domain.c3.wrappers.binance.usdtm.wrapper.BinanceUsdtmExchange",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "c3d3.infrastructure.c3.interfaces.cex_order_history_screener.interface.iCexOrderHistoryScreenerHandler",
"line_number": 10,
"usage_type": "name"
... |
1946291291 | from collections import deque
def bfs(l,startx,starty,goalx,goaly):
visited = [[0]*l for _ in range(l)]
queue = deque()
queue.append((startx,starty))
visited[startx][starty] = 1
while queue:
nowx, nowy = queue.popleft()
if nowx == goalx and nowy == goaly:
return visited[nowx][nowy]-1
dx = [1, 2, 2, 1, -1, -2, -2, -1]
dy = [2, 1, -1, -2, -2, -1, 1, 2]
for i in range(8):
nextx = nowx + dx[i]
nexty = nowy + dy[i]
if 0<=nextx<l and 0<=nexty<l and visited[nextx][nexty] == 0:
queue.append((nextx,nexty))
visited[nextx][nexty] = visited[nowx][nowy] + 1
def solution(l,startx,starty,goalx,goaly):
print(bfs(l,startx,starty,goalx,goaly))
t = int(input())
for _ in range(t):
l = int(input())
startx, starty = map(int, input().split())
goalx, goaly = map(int, input().split())
solution(l,startx,starty,goalx,goaly)
| hellokena/2022 | DFS & BFS/re/7562 나이트의 이동.py | 7562 나이트의 이동.py | py | 972 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "collections.deque",
"line_number": 5,
"usage_type": "call"
}
] |
40567581461 | # Initialize an empty dictionary to store the data
import numpy as np
import math
import matplotlib.pyplot as plt
import matplotlib as mpl
mpl.use('TkAgg')
from mpl_toolkits.mplot3d import Axes3D
def projectFull(H, X, Y):
w11, w12, w21, w22, w31, w32, t1, t2, t3 = H
u = (fu * w11 + u0 * w31) * X + (fu * w12 + u0 * w32) * Y + fu * t1 + u0 * t3
v = (fv * w21 + v0 * w31) * X + (fv * w22 + v0 * w32) * Y + fv * t2 + v0 * t3
return u,v
data_dict = {}
# Read the data from the file
with open('back.txt', 'r') as file:
lines = file.readlines()
current_image = None
current_data = []
for line in lines:
line = line.strip()
if line.endswith('back.jpg'):
# Store the previous image's data if it exists
if current_image is not None:
data_dict[current_image] = current_data
# Set the current image and initialize data list
current_image = line
current_data = []
else:
# Parse and store the data
x, y = map(int, line[1:-1].split(','))
current_data.append((x, y))
# Store the data for the last image
if current_image is not None:
data_dict[current_image] = current_data
# Initialize an empty dictionary to store the data
data_dict2 = {}
# Read the data from the file
with open('ABback.txt', 'r') as file:
lines = file.readlines()
current_image = None
current_data = []
for line in lines:
line = line.strip()
if line.endswith('back.jpg'):
# Store the previous image's data if it exists
if current_image is not None:
data_dict2[current_image] = current_data
# Set the current image and initialize data list
current_image = line
current_data = []
elif line.endswith(')'):
# Store the previous im:
# Parse and store the data
x, y = map(float, line[1:-1].split(','))
current_data.append((x, y))
elif line.endswith('0'):
a, b = map(float, line.split(';')[:2])
current_data.append((a, b))
# Store the data for the last image
if current_image is not None:
data_dict2[current_image] = current_data
image_points = data_dict
laserAB = data_dict2
AB = []
laser = []
image_p = []
for image in image_points.keys():
image_p.append(image_points[image][0])
AB.append(laserAB[image][0])
laser.append(laserAB[image][1])
image_p = np.array(image_p)
AB = np.array(AB)
laser = np.array(laser)
fu = 250.001420127782
fv = 253.955300723887
u0 = 239.731339559399
v0 = 246.917074981568
# Create a 3D figure
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# Create the 3D scatter plot
ax.scatter(laser[:,0], laser[:,1], AB[:,0], c=AB[:,0], cmap='viridis', marker='o')
# Set labels for the axes
ax.set_xlabel('X Laser')
ax.set_ylabel('Y Laser')
ax.set_zlabel('A')
plt.show()
A_matrix = []
for (A, B), (xt, yt) in zip(AB, laser):
# w11, w12, w21, w22, w31, w32, t1, t2, t3 = H
row = [A * fu * xt, A * fu * yt, B * fv * xt, B * fv * yt, (A * u0 + B * v0) * xt, (A * u0 + B * v0) * yt, A * fu,
B * fv, (A * u0 + B * v0)]
A_matrix.append(row)
A_matrix = np.array(A_matrix)
# A_matrix = A_matrix[::5,:]
B_matrix = np.ones(A_matrix.shape[0])
observed_points = image_p
laser_points = laser
# print(A_matrix)
# print(B_matrix)
U, S, VT = np.linalg.svd(A_matrix, full_matrices=False)
# Calculate the pseudoinverse of A
Ainv = np.dot(VT.transpose(), np.dot(np.diag(S ** -1), U.transpose()))
x = np.matmul(Ainv, B_matrix)
# Use the least-squares method to solve for x
# x, residuals, rank, s = np.linalg.lstsq(A_matrix, B_matrix, rcond=None)
print("Results: ")
print(x)
# Initialize an array to store the squared errors
squared_errors = []
for laser_p, image_p in zip(laser_points, observed_points):
proj_x, proj_y = projectFull(x, laser_p[0], laser_p[1])
# print(proj_x, proj_y)
squared_error = (proj_x - image_p[0]) ** 2 + (proj_y - image_p[1]) ** 2
squared_errors.append(squared_error)
avg_err = 0
for (a,b), (u,v) in zip(AB, observed_points):
error = np.sum(np.abs(a * u + b * v - 1)) / math.sqrt(
np.sum(a ** 2 + b ** 2)) # Use np.abs() for element-wise absolute value
avg_err += error
# Calculate the RMSE
rmse = np.sqrt(np.mean(squared_errors))
print("Root Mean Square Error (RMSE):", rmse)
print("avg err", avg_err / len(observed_points)) | bach05/PanNote | src/auto_calibration_tools/scripts/camera_laser_calibration/readTest.py | readTest.py | py | 4,496 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "matplotlib.use",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number"... |
23988113634 | import re
import ast
import json
import tiktoken
from langchain.prompts import PromptTemplate
from ..embeddings.chroma import chroma_openai_cwe_collection, chroma_openai_attack_collection
tokenizer = tiktoken.get_encoding("cl100k_base")
general_cyber_security_prompt = PromptTemplate(
input_variables=["query"],
template="""
You are a cyber-security expert and will answer the following question.
Question: '''{query}'''
"""
)
cve_to_attack_prompt = PromptTemplate(
input_variables=["prompt"],
template="""
{prompt}
"""
)
def get_required_cve_data(similar_cves):
metadatas = similar_cves['metadatas'][0]
documents = similar_cves['documents'][0]
top_cves_match = []
descriptions = []
techniques = []
token_lens_cve = []
for metadata, description in zip(metadatas, documents):
top_cves_match.append(metadata['cve'])
descriptions.append(description)
techniques_list = [i.replace('\'','').replace(
'\"','').strip() for i in metadata['technique'].split(',')]
techniques.append(techniques_list)
token_lens_cve.append(metadata['cve_description_token_len'])
return top_cves_match, descriptions, techniques, token_lens_cve
def get_required_technique_data(similar_techniques):
metadatas = similar_techniques['metadatas'][0]
documents = similar_techniques['documents'][0]
top_techniques_match = []
descriptions = []
token_lens_technique = []
for metadata, description in zip(metadatas, documents):
techniques_list = [i.replace('\'','').replace(
'\"','').strip() for i in metadata['technique'].split(',')]
top_techniques_match.extend(techniques_list)
descriptions.append(description)
token_lens_technique.append(metadata['technique_description_token_len'])
return top_techniques_match, descriptions, token_lens_technique
def get_cve_examples(top_cves_match, descriptions, techniques):
cve_similarity_prediction = []
for technique in techniques:
for t in technique:
if t not in cve_similarity_prediction:
cve_similarity_prediction.append(t)
example_cves = ""
for i, j, k in zip(top_cves_match, descriptions, techniques):
j = j.replace('\n',' ')
json_format_ = "{\"related_attacks\": " + str(k) + "}"
example_cves += f"{i}: {j}\n{json_format_}\n"
return example_cves, cve_similarity_prediction
def get_similar_cves(query, num_results=5):
similar_cves = chroma_openai_cwe_collection.query(
query_texts=query,
n_results=num_results
)
return similar_cves
def get_similar_techniques(query):
similar_techniques = chroma_openai_attack_collection.query(
query_texts=query,
n_results=10
)
return similar_techniques
def remove_links_citations(text):
# use .*? for non-greedy regex
regex_links = r"\(https?.*?\)" # all links were present inside round brackets
text = re.sub(regex_links, '', text, flags = re.I)
regex_cite = r"\(Citation:.*?\)" # all citations were also present inside round brackets
text = re.sub(regex_cite,'',text,flags=re.I)
text = " ".join(text.split())
return text
def get_len_token(text):
tokens = tokenizer.encode(str(text))
num_ip_tokens = len(tokens)
return num_ip_tokens
def make_few_shot_prompt(cve_description, attack_descriptions, example_cves, json_format):
prompt = f"""
You have been given an user search string below and the possible attack descriptions that it can be related to.
Your task is to find out the exact attack descriptions that the user search string can map to.
You can make use of the examples given below.
User search string: '''{cve_description}'''
Attack descriptions that above user search string can map to:
'''
{attack_descriptions}
'''
Examples of similar CVEs and the attack descriptions that they exactly map to:
'''
{example_cves}
'''
You should first write down the CVEs from the examples that are most similar to the user search string.
Then you should write down the reasons why the most similar CVEs from examples are mapped to the attack descriptions provided in the examples.
Now based on this information, attack descriptions and using critical reasoning map the given user search string with the exact attack descriptions, and write down the reasoning for it.
Finally fill the below json and wite it down in correct json format:
{json_format}
"""
return prompt
def create_prompt_in_token_limit(
cve_description, attacks_1, attacks_2, example_cves, json_format
):
attacks_combined = []
for att in attacks_1:
if att not in attacks_combined:
attacks_combined.append(att)
for attack in attacks_2:
if attack not in attacks_combined:
attacks_combined.append(attack)
prompt_initial = make_few_shot_prompt(cve_description, '', example_cves, json_format)
prompt_len = get_len_token(prompt_initial)
i=1
while prompt_len < 4000 and i<len(attacks_combined):
attack_descriptions = ""
for technique in attacks_combined[:i]:
technique_description = chroma_openai_attack_collection.get(
where={"technique": technique},
)
technique_description = technique_description['documents'][0]
technique_description = remove_links_citations(technique_description)
attack_descriptions += f"{technique}: {technique_description}\n"
prompt = make_few_shot_prompt(
cve_description, attack_descriptions, example_cves, json_format
)
prompt_len = get_len_token(prompt)
i+=1
return prompt
def get_json_from_text(text):
try:
text_reversed = text[::-1]
for idx, t in enumerate(text_reversed):
if t == '}':
last_bracket = len(text) - idx
elif t == '{':
first_bracket = len(text) -1 -idx
break
json_text = text[first_bracket:last_bracket]
try:
json_to_dict = json.loads(json_text)
except:
try:
json_to_dict = ast.literal_eval(json_text)
except:
json_to_dict = {"related_attacks":[]}
return json_to_dict
except Exception as e:
print(f"Exception: {e}")
return {"related_attacks":[]}
def make_cve_to_attack_prompt(query):
similar_cves = get_similar_cves(query)
top_cves_match, cve_descriptions, \
techniques, token_lens_cve = get_required_cve_data(similar_cves)
cve_descriptions = [remove_links_citations(i) for i in cve_descriptions]
example_cves, cve_similarity_prediction = get_cve_examples(
top_cves_match, cve_descriptions, techniques)
similar_techniques = get_similar_techniques(query)
top_techniques_match, technique_descriptions, token_lens_technique = get_required_technique_data(
similar_techniques)
technique_descriptions = [remove_links_citations(i) for i in technique_descriptions]
json_format = "{\"related_attacks\":[]}"
cve_description = query #f"{question_cve}: {question_cve_description}"
prompt = create_prompt_in_token_limit(
cve_description, cve_similarity_prediction,
top_techniques_match, example_cves, json_format
)
return cve_to_attack_prompt, prompt
def search_similar_cves(query, num_results):
similar_cves = get_similar_cves(query, num_results)
top_cves_match, cve_descriptions, \
techniques, token_lens_cve = get_required_cve_data(similar_cves)
response = [
{
"cve_name":top_cves_match[i],
"cve_description":cve_descriptions[i],
"attack_techniques": techniques[i]
} for i in range(len(top_cves_match))
]
return response
def search_similar_cves_with_technique_descp(query, num_results):
similar_cves = get_similar_cves(query, num_results)
top_cves_match, cve_descriptions, \
techniques, token_lens_cve = get_required_cve_data(similar_cves)
response = [
{
"cve_name":top_cves_match[i],
"cve_description":cve_descriptions[i],
"attack_techniques":[
{
"attack_technique_name":technique,
"attack_technique_description":chroma_openai_attack_collection.get(
where={"technique": technique}
)["documents"][0]
} for technique in techniques[i]
]
} for i in range(len(top_cves_match))
]
return response | yadneshSalvi/cybersec_genai | src/cve_to_attack/cve_to_attack_utils.py | cve_to_attack_utils.py | py | 7,776 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "tiktoken.get_encoding",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "langchain.prompts.PromptTemplate",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "langchain.prompts.PromptTemplate",
"line_number": 18,
"usage_type": "call"
},
{... |
36008270872 | #!/bin/python
# -*- coding: utf-8 -*-
# Created by 顾洋溢
from mqtt_test.mqtt_bussiness.iot_base64 import Base64
import base64
from Crypto.Cipher import AES
import json
import demjson
from mqtt_test.mqtt_bussiness import iot_sha256
class Iot_encry_decry(object):
def __init__(self, msg, AESkey, secretuId):
self.msg = msg
self.AESkey = AESkey
self.secretuId = secretuId
self.mode = AES.MODE_ECB
self.unpad = lambda s: s[0:-ord(s[-1])]
def decry(self):
# 直接传获取到的消息
message = self.msg.decode()
#print("message is ",message)
getanswer = json.loads(message, encoding='utf-8')
msgContent = getanswer['msgContent']
msgContent_base64 = Base64(msgContent)
msgContent_base64 = msgContent_base64.b64decode()
secretuId = iot_sha256.md5(self.secretuId)
AESkey = self.AESkey + secretuId[-16:] + "==" # 首先字符串拼接==
AESkey = AESkey.encode("utf-8")
AESkey = base64.b64decode(AESkey)
cryptor = AES.new(AESkey, self.mode)
cipherdata = cryptor.decrypt(msgContent_base64)
cipherdata = cipherdata.decode()
cipherdata = self.unpad(cipherdata)
cipherdata = json.loads(cipherdata, encoding='utf-8')
return cipherdata
| xuxiuke/Android_test | SmartHomeV6Code_TestTeam-InterfaceTest/mqtt_test/mqtt_bussiness/iot_encry_decry.py | iot_encry_decry.py | py | 1,326 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "Crypto.Cipher.AES.MODE_ECB",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "Crypto.Cipher.AES",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "json.loads",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "mqtt_tes... |
73970333864 | from .rl import train_rl_agent
import numpy as np
import os
from os import path
from datetime import datetime
import time
import json
import subprocess
from threading import Lock
from typing import Union, Callable, Optional
from multiprocessing import Process, Pipe
import optuna
def train_rl_agent_worker(pipe, hyperparameters):
try:
try:
#should prune
def prune_func(train_reward, env_step):
pipe.send(["shouldprune", {
"rew": train_reward,
"step": env_step
}])
is_prune = pipe.recv()
if is_prune:
raise optuna.TrialPruned()
#train model
final_reward = train_rl_agent(
prune_func,
**hyperparameters
)
pipe.send(["end", final_reward])
except optuna.TrialPruned:
pipe.send(["pruned", None])
except Exception as e:
pipe.send(["error", e])
finally:
pipe.close()
def find_available_gpus(util_threshold=20, mem_threshold=10, test_time=10):
gpu_isbusy = {}
for _ in range(test_time):
result = subprocess.check_output(
[
'nvidia-smi', '--query-gpu=utilization.gpu,memory.used,memory.total',
'--format=csv,nounits,noheader'
], encoding='utf-8')
gpu_info_list = [[float(item.strip()) for item in line.split(',')] for line in result.strip().split('\n')]
for id, gpu_info in enumerate(gpu_info_list):
#push id into dict
if not id in gpu_isbusy:
gpu_isbusy[id] = False
#check if busy
gpu_util = gpu_info[0]
gpu_mem = gpu_info[1] / gpu_info[2]
print("GPU {} Utilization {:.2f}% Mem {:.2f}%".format(id, gpu_util, gpu_mem))
if not ((gpu_util < util_threshold) and (gpu_mem < mem_threshold)):
gpu_isbusy[id] = True
time.sleep(0.1)
available_gpus = ["cuda:" + str(id) for id, busy in gpu_isbusy.items() if not busy]
print("Available GPUs: " + str(available_gpus))
return available_gpus
def train_rl_agent_auto(
#Env config
env_name: str = "LunarLanderContinuous-v2",
env_reward_threshold: Optional[int] = 200,
#Hyperband config
max_env_steps: int = 1e6,
min_trial_env_steps: int = 1e5,
reduction_factor: float = 2,
#Number of trials
num_trials: Optional[int] = None,
#Thread per GPU
num_threads_per_gpu: int = 4,
#Env per thread
parallel_envs: int = 8,
#Logdir
logdir: str = "logs/autotune_with_ans_{}".format(datetime.now().strftime("%m-%d_%H-%M-%S"))
):
#GPU usage statistics
global_lock = Lock()
global_num_gpu_threads = {}
#Get free GPUs
gpu_available = find_available_gpus()
num_gpus = len(gpu_available)
global_num_gpu_threads = {device_name: 0 for device_name in gpu_available}
def train_rl_agent_trial(trial: optuna.Trial):
#query which gpu to occupy
device = ""
global_lock.acquire()
for k, v in global_num_gpu_threads.items():
if v < num_threads_per_gpu:
device = k
break
global_num_gpu_threads[device] += 1
global_lock.release()
if not device:
raise RuntimeError("No available GPU devices.")
try:
current_logdir = path.join(logdir, "{}".format(trial.number))
#make log dir
os.makedirs(current_logdir, exist_ok=True)
#suggest hyperparameters
hyperparameters = {
"lr_actor": trial.suggest_loguniform("lr_actor", 1e-4, 1e-3),
"lr_critic": trial.suggest_loguniform("lr_critic", 1e-4, 1e-3),
"repeat": 5 * trial.suggest_int("repeat", 1, 8),
"target_entropy": trial.suggest_uniform("target_entropy", -5, 0),
}
#update env config
hyperparameters.update({
"env_name": env_name,
"env_reward_threshold": env_reward_threshold
})
#update config
hyperparameters.update({
"max_env_steps": max_env_steps,
"parallel_envs": parallel_envs,
"log_dir": current_logdir,
"device": device
})
#write hyperparameter set
with open(path.join(current_logdir, "hyperparameters.json"), "w") as f:
json.dump(hyperparameters, f, indent=4)
f.close()
#train in subprocess
pipe_parent, pipe_child = Pipe()
train_process = Process(target=train_rl_agent_worker, args=(pipe_child, hyperparameters))
train_process.start()
try:
while True:
msg, payload = pipe_parent.recv()
if msg == "shouldprune":
trial.report(payload["rew"], payload["step"])
pipe_parent.send(trial.should_prune())
elif msg == "pruned":
raise optuna.TrialPruned()
elif msg == "error":
raise payload
elif msg == "end":
return payload
except KeyboardInterrupt:
train_process.kill()
finally:
train_process.join()
finally:
#release occupied gpu
global_lock.acquire()
global_num_gpu_threads[device] -= 1
global_lock.release()
return None
#make log dir
os.makedirs(logdir, exist_ok=True)
study = optuna.create_study(
direction="maximize",
pruner=optuna.pruners.HyperbandPruner(
min_resource=int(min_trial_env_steps),
max_resource=int(max_env_steps),
reduction_factor=reduction_factor
),
#SQLite do not support multithreading!!!
#storage=os.path.join("sqlite:///", logdir, "optuna.db"),
#load_if_exists=True
)
#print brackets
brackets = np.floor(np.log(max_env_steps / min_trial_env_steps) / np.log(reduction_factor)) + 1
if brackets < 4 or brackets > 6:
print("[WARN] Bracket number should be in [4,6].")
#print info
print("Parallel envs: {}\nSearch jobs: {}\nHyperband brackets: {}".format(parallel_envs, num_gpus * num_threads_per_gpu, brackets))
study.optimize(train_rl_agent_trial,
n_trials=num_trials,
n_jobs=num_gpus * num_threads_per_gpu
)
print(study.best_params)
if __name__ == '__main__':
train_rl_agent_auto() | imoneoi/xrl-script | rl-auto-gpu.py | rl-auto-gpu.py | py | 6,769 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "optuna.TrialPruned",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "rl.train_rl_agent",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "optuna.TrialPruned",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "subproce... |
38293660056 | """
ResBlocks for WGAN-GP.
"""
import torch.nn as nn
import torch.functional as F
from torch_mimicry.modules import resblocks
class GBlock(resblocks.GBlock):
r"""
Residual block for generator.
Modifies original resblock definitions with small changes.
Uses bilinear (rather than nearest) interpolation, and align_corners
set to False. This is as per how torchvision does upsampling, as seen in:
https://github.com/pytorch/vision/blob/master/torchvision/models/segmentation/_utils.py
Attributes:
in_channels (int): The channel size of input feature map.
out_channels (int): The channel size of output feature map.
hidden_channels (int): The channel size of intermediate feature maps.
upsample (bool): If True, upsamples the input feature map.
num_classes (int): If more than 0, uses conditional batch norm instead.
spectral_norm (bool): If True, uses spectral norm for convolutional layers.
"""
def __init__(self,
in_channels,
out_channels,
hidden_channels=None,
upsample=False,
num_classes=0,
spectral_norm=False,
**kwargs):
super().__init__(in_channels=in_channels,
out_channels=out_channels,
hidden_channels=hidden_channels,
upsample=upsample,
num_classes=num_classes,
spectral_norm=spectral_norm,
**kwargs)
# Redefine shortcut layer without act.
if self.learnable_sc:
self.c_sc = nn.Conv2d(self.in_channels,
self.out_channels,
1,
1,
padding=0)
class DBlock(resblocks.DBlock):
r"""
Residual block for discriminator.
Modifies original resblock definition by including layer norm and removing
act for shortcut. Convs are LN-ReLU-Conv. See official TF code:
https://github.com/igul222/improved_wgan_training/blob/master/gan_cifar_resnet.py#L105
Attributes:
in_channels (int): The channel size of input feature map.
out_channels (int): The channel size of output feature map.
hidden_channels (int): The channel size of intermediate feature maps.
downsample (bool): If True, downsamples the input feature map.
spectral_norm (bool): If True, uses spectral norm for convolutional layers.
"""
def __init__(self,
in_channels,
out_channels,
hidden_channels=None,
downsample=False,
spectral_norm=False,
**kwargs):
super().__init__(in_channels=in_channels,
out_channels=out_channels,
hidden_channels=hidden_channels,
downsample=downsample,
spectral_norm=spectral_norm,
**kwargs)
# Redefine shortcut layer without act.
# TODO: Maybe can encapsulate defining of learnable sc in a fn
# then override it later? Might be cleaner.
if self.learnable_sc:
self.c_sc = nn.Conv2d(self.in_channels, self.out_channels, 1, 1, 0)
self.norm1 = None
self.norm2 = None
# TODO: Verify again. Interestingly, LN has no effect on FID. Not using LN
# has almost no difference in FID score.
# def residual(self, x):
# r"""
# Helper function for feedforwarding through main layers.
# """
# if self.norm1 is None:
# self.norm1 = nn.LayerNorm(
# [self.in_channels, x.shape[2], x.shape[3]])
# h = x
# h = self.norm1(h)
# h = self.activation(h)
# h = self.c1(h)
# if self.norm2 is None:
# self.norm2 = nn.LayerNorm(
# [self.hidden_channels, h.shape[2], h.shape[3]])
# h = self.norm2(h)
# h = self.activation(h)
# h = self.c2(h)
# if self.downsample:
# h = F.avg_pool2d(h, 2)
# return h
class DBlockOptimized(resblocks.DBlockOptimized):
r"""
Optimized residual block for discriminator.
Does not have any normalisation. See official TF Code:
https://github.com/igul222/improved_wgan_training/blob/master/gan_cifar_resnet.py#L139
Attributes:
in_channels (int): The channel size of input feature map.
out_channels (int): The channel size of output feature map.
spectral_norm (bool): If True, uses spectral norm for convolutional layers.
"""
def __init__(self,
in_channels,
out_channels,
spectral_norm=False,
**kwargs):
super().__init__(in_channels=in_channels,
out_channels=out_channels,
spectral_norm=spectral_norm,
**kwargs)
# Redefine shortcut layer
self.c_sc = nn.Conv2d(self.in_channels, self.out_channels, 1, 1, 0)
| kwotsin/mimicry | torch_mimicry/nets/wgan_gp/wgan_gp_resblocks.py | wgan_gp_resblocks.py | py | 5,220 | python | en | code | 593 | github-code | 36 | [
{
"api_name": "torch_mimicry.modules.resblocks.GBlock",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "torch_mimicry.modules.resblocks",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 45,
"usage_type": "call"
}... |
36165455796 | # coding=utf-8
from django.utils import timezone
from django.db import models
from tinymce.models import HTMLField
class News(models.Model):
title = models.CharField(u'заголовок', max_length=255)
announce = HTMLField(u'анонс')
content = HTMLField(u'описание', blank=True, null=True)
on_main_page = models.BooleanField(u'показывать на главной', default=True)
is_published = models.BooleanField(u'опубликовано', default=True)
image = models.ImageField('изображение', upload_to=u'images/news/', blank=True)
created_at = models.DateTimeField(u'дата начала', default=timezone.now)
close_at = models.DateTimeField(u'дата закрытия', blank=True, null=True, help_text=u'дата автоматического снятия с показа')
class Meta:
ordering = ('-created_at',)
verbose_name = u'новость'
verbose_name_plural = u'новости'
get_latest_by = 'date_add'
def __unicode__(self):
return self.title
| lambospeed/ryabina | apps/newsboard/models.py | models.py | py | 1,084 | python | ru | code | 0 | github-code | 36 | [
{
"api_name": "django.db.models.Model",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "... |
36253389052 | import threading
import time
from datetime import timedelta
from typing import Callable
from src.models import Team
from src.score_scraper import ScoreScraper
# Monitors a match and notifies the observer when the score changes
class ScoreChangeMonitor:
def __init__(
self,
score_scraper: ScoreScraper,
polling_interval: timedelta,
callback: Callable[[Team], None],
) -> None:
super().__init__()
self._scraper = score_scraper
self._wait_time = polling_interval
self._callback = callback
def start(self):
print("Checking initial score...")
prev_score = self._scraper.get_current_score()
print(prev_score)
while True:
print("Checking current score...")
current_score = self._scraper.get_current_score()
print(current_score)
self.notify_if_score_changed(prev_score.home_team, current_score.home_team)
self.notify_if_score_changed(prev_score.away_team, current_score.away_team)
prev_score = current_score
# Delay before checking again
print(
f"Waiting {self._wait_time.total_seconds()} seconds before checking again..."
)
time.sleep(self._wait_time.total_seconds())
def notify_if_score_changed(self, prev_team: Team, current_team: Team) -> None:
if prev_team.points != current_team.points:
print(
f"{current_team.name} score changed from {prev_team.points} to {current_team.points}"
)
# Notify in a new thread so we don't have to wait for the callback to finish
threading.Thread(target=lambda: self._callback(current_team)).start()
| holstt/celebrating-hue-lights | src/score_monitor.py | score_monitor.py | py | 1,755 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "src.score_scraper.ScoreScraper",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "datetime.timedelta",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "typing.Callable",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "src... |
12638712949 | import numpy as np
from collections import OrderedDict
from dataset.mnist import load_mnist
def numerical_gradient(f, x):
h = 1e-4 # 0.0001
grad = np.zeros_like(x)
it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])
while not it.finished:
idx = it.multi_index
tmp_val = x[idx]
x[idx] = float(tmp_val) + h
fxh1 = f(x) # f(x+h)
x[idx] = tmp_val - h
fxh2 = f(x) # f(x-h)
grad[idx] = (fxh1 - fxh2) / (2 * h)
x[idx] = tmp_val # 값 복원
it.iternext()
return grad
def softmax(a):
c = np.max(a) # Overflow Measures
exp_a = np.exp(a - c)
sum_exp_a = np.sum(exp_a)
y = exp_a / sum_exp_a
return y
def cross_entropy_error(y, t):
delta = 1e-7
return -np.sum(t * np.log(y + delta))
class Relu:
def __init__(self):
self.mask = None
def forward(self, x):
self.mask = (x <= 0) # x <= 0 : True, x > 0 : False
out = x.copy()
out[self.mask] = 0
return out
def backward(self, dout):
dout[self.mask] = 0
dx = dout
return dx
class Affine:
def __init__(self, W, b):
self.W = W
self.b = b
self.x = None
self.dW = None
self.db = None
def forward(self, x):
self.x = x
out = np.dot(x, self.W) + self.b
return out
def backward(self, dout):
dx = np.dot(dout, self.W.T)
self.dW = np.dot(self.x.T, dout)
self.db = np.sum(dout, axis=0)
return dx
class SoftmaxWithLoss:
def __init__(self):
self.loss = None
self.y = None
self.t = None
def forward(self, x, t):
self.t = t
self.y = softmax(x)
self.loss = cross_entropy_error(self.y, self.t)
return self.loss
def backward(self, dout=1):
batch_size = self.t.shape[0]
dx = (self.y - self.t) / batch_size
return dx
class TwoLayerNet:
def __init__(self, input_size, hidden_size, output_size, weight_init_std=0.01):
self.params = {}
self.params['W1'] = weight_init_std * np.random.randn(input_size, hidden_size)
self.params['b1'] = np.zeros(hidden_size)
self.params['W2'] = weight_init_std * np.random.randn(hidden_size, output_size)
self.params['b2'] = np.zeros(output_size)
self.layers = OrderedDict() # Ordered Dictionary Object
self.layers['Affine1'] = Affine(self.params['W1'], self.params['b1'])
self.layers['Relu1'] = Relu()
self.layers['Affine2'] = Affine(self.params['W2'], self.params['b2'])
self.lastLayer = SoftmaxWithLoss()
def predict(self, x):
for layer in self.layers.values():
x = layer.forward(x)
return x
def loss(self, x, t):
y = self.predict(x)
return self.lastLayer.forward(y, t)
def accuracy(self, x, t):
y = self.predict(x)
y = np.array(y, axis=1)
if t.ndim != 1 : t = np.argmax(t, axis=1)
accuracy = np.sum(y == t) / float(x.shape[0])
return accuracy
def numerical_gradient(self, x, t):
loss_W = lambda W: self.loss(x, t)
grads = {}
grads['W1'] = numerical_gradient(loss_W, self.params['W1'])
grads['b1'] = numerical_gradient(loss_W, self.params['b1'])
grads['W2'] = numerical_gradient(loss_W, self.params['W2'])
grads['b2'] = numerical_gradient(loss_W, self.params['W2'])
return grads
def gradient(self, x, t):
self.loss(x, t)
dout = 1
dout = self.lastLayer.backward(dout)
layers = list(self.layers.values())
layers.reverse()
for layer in layers:
dout = layer.backward(dout)
grads = {}
grads['W1'] = self.layers['Affine1'].dW
grads['b1'] = self.layers['Affine1'].db
grads['W2'] = self.layers['Affine2'].dW
grads['b2'] = self.layers['Affine2'].db
return grads
(x_train, t_train), (t_test, t_test) = load_mnist(normalize=True, one_hot_label=True)
network = TwoLayerNet(input_size=784, hidden_size=50, output_size=10)
x_batch = x_train[:3]
t_batch = t_train[:3]
grad_numerical = network.numerical_gradient(x_batch, t_batch)
grad_backprop = network.gradient(x_batch, t_batch)
for key in grad_numerical.keys():
diff = np.average(np.abs(grad_backprop[key] - grad_numerical[key]))
print(key + ":" + str(diff))
| wk1219/Data-Science | AI/Back-Propagation/Back-propagation.py | Back-propagation.py | py | 4,461 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "numpy.zeros_like",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "numpy.nditer",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "numpy.exp",
"line_number": ... |
11952058238 | from pymongo.errors import DuplicateKeyError
import requests,os,sys,inspect,uuid
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir+"/../dao/")
sys.path.insert(0,parentdir)
from hashVTDAO import createHashVTDAO, getHashVTDAO
parentdir = os.path.dirname(currentdir+"/../tools/")
sys.path.insert(0,parentdir)
from response import bodyMessageError, bodyMessageValid
def createHashVTService(data_vt):
try:
create_hash_service = createHashVTDAO(data_vt)
return bodyMessageValid(data_vt)
except DuplicateKeyError as dke:
print("[createHashVTService] L'id {} est déjà présent dans la table.".format(data_vt["_id"]))
return bodyMessageError("L'id {} est déjà présent dans la table.".format(data_vt["_id"]))
except KeyError as k:
print("[createHashVTService] La donnée {} n'a pas été spécifié.".format(k))
return bodyMessageError("La donnée {} n'a pas été spécifié.".format(k))
except Exception as e:
print("[createHashVTService] La donnée {} n'a pas été spécifié.".format(k))
return bodyMessageError("Une erreur est survenue.")
def getHashVTService(hash_id):
try:
get_hash_vt = getHashVTDAO(hash_id)
if len(get_hash_vt) <= 0:
return bodyMessageError("Aucune donnée trouvée.")
return bodyMessageValid(get_hash_vt[0])
except KeyError as k:
print("[getHashVTService] La donnée {} n'a pas été spécifié.".format(k))
return bodyMessageError("La donnée {} n'a pas été spécifié.".format(k))
except Exception as e:
print("[getHashVTService] La donnée {} n'a pas été spécifié.".format(k))
return bodyMessageError("Une erreur est survenue.") | Gershpenst/PA-EDR-LIKE | service/hashVTService.py | hashVTService.py | py | 1,801 | python | fr | code | 0 | github-code | 36 | [
{
"api_name": "os.path.dirname",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "inspect.getfile",
"line... |
34885776973 | # %%
from enum import Enum
from typing import List, AnyStr, Callable, Sequence, Union
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from transformers.pipelines.base import Pipeline
import tweepy as tw
import plotly.express as px
import os
import json
from transformers import (AutoModelForSequenceClassification, AutoTokenizer, pipeline)
import torch
from icecream import ic
import numpy as np
import pandas as pd
import textwrap
import ipywidgets as widgets
from ipywidgets import interact, interact_manual
import IPython.display
from IPython.display import display, clear_output
import plotly.graph_objects as go
import datetime
import dash
from dash import dcc
from dash import html
import plotly
from dash.dependencies import Input, Output, State, ALL, ALLSMALLER, MATCH, DashDependency
# %%
# help(go.Stream)
# %%
# %%
api_key = os.environ['TWITTER_API_KEY']
api_secret = os.environ['TWITTER_API_KEY_SECRET']
access_token = os.environ['TWITTER_ACCESS_TOKEN']
access_token_secret = os.environ['TWITTER_ACCESS_TOKEN_SECRET']
model_name = 'nlptown/bert-base-multilingual-uncased-sentiment'
# %%
# API Setup
# auth = tw.OAuthHandler(api_key, api_secret)
# auth.set_access_token(access_token, access_token_secret)
# api = tw.API(auth, wait_on_rate_limit=True)
# hashtag = "#VenomReleasesInMumbai"
# query = tw.Cursor(api.search_tweets, q=hashtag).items()
# tweets = [{'Tweet': tweet.text, 'Timestamp': tweet.created_at} for tweet in query]
# df = pd.DataFrame.from_dict(tweets)
# df.head()
# %%
print('Loading pipeline...')
classifier = pipeline('sentiment-analysis', model=model_name)
print('Pipeline load completed.')
# tokenizer = AutoTokenizer.from_pretrained(model_name)
# model = AutoModelForSequenceClassification.from_pretrained(model_name)
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
# %%
def classify_sentiment(tweets: Union[AnyStr, Sequence[AnyStr]], classifier: Pipeline) -> Sequence[int]:
return list(
pd.DataFrame.from_dict(classifier(tweets))['label']
.apply(lambda x: x.split()[0])
.astype(int)
.values
)
print(classify_sentiment(["This is worse", "This is fun"], classifier))
# %%
'''
def get_sentiment_score(tweet_text: AnyStr, model, tokenizer):
classification_result = model(tokenizer.encode(tweet_text, return_tensors='pt'))
return int(torch.argmax(classification_result.logits)) + 1
class SentimentAnalyzer():
def get_scores(tweet: AnyStr, func: Callable[[AnyStr, Union[Pipeline, (AutoModelForSequenceClassification, AutoTokenizer)]], int]) -> List[int]:
return func(tweet)
class PlotType(Enum):
Current = 1
Cumulative = 2
class PeriodType(Enum):
Current = 1
Day = 2
Week = 3
ByWeek = 4
Month = 5
Quarter = 6
HalfYear = 7
Year = 8
BiYear = 9
ThreeYear = 10
FiveYear = 11
class Plotter:
def plot_sentiments(plot_type: PlotType, period_type: PeriodType):
pass
class LayoutGenerator:
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
def get_layout(self):
LayoutGenerator.app.layout = html.Div(
html.Div([
html.H4('Live Twitter Sentiment analysis'),
html.Div(id='live-update-tweet'),
dcc.Graph(id='live-update-graph')
])
)
@app.callback(Output('live-update-tweet', 'children'))
def update_tweet(tweets):
style = {'padding': '5px', 'fontSize': '16px'}
return html.Span(tweets, style=style)
'''
app = dash.Dash(__name__, external_stylesheets=external_stylesheets,
suppress_callback_exceptions=True,prevent_initial_callbacks=True,
meta_tags=[{'name': 'viewport',
'content': 'width=device-width, initial-scale=1.0'}])
app.layout = html.Div(
html.Div([
html.H4('Live Twitter Sentiment analysis'),
html.Div(id='live-update-tweet'),
# dcc.Graph(id='live-update-graph'),
html.Div(id='1', style={'display': 'none'}, children=None)
])
)
@app.callback(Output('live-update-tweet', 'children'), Input('1', 'children'), prevent_initial_call=True)
def update_tweet(tweets):
print("update_tweet called")
style = {'padding': '5px', 'fontSize': '16px'}
return html.Span(tweets, style=style)
class Listener(tw.Stream):
def __init__(self, consumer_key, consumer_secret, access_token, access_token_secret, *, chunk_size=512, daemon=False, max_retries=10, proxy=None, verify=True):
super().__init__(consumer_key, consumer_secret, access_token, access_token_secret, chunk_size=chunk_size, daemon=daemon, max_retries=max_retries, proxy=proxy, verify=verify)
self.df_cols = ["text", "created_at"]
self.tweets_df = pd.DataFrame(columns=[*self.df_cols, "score"])
def on_data(self, raw_data):
data = json.loads(raw_data)
data = [data.get(field) for field in self.df_cols]
temp_df = pd.DataFrame(data=data).T
temp_df.columns = self.df_cols
tweets = list(temp_df['text'].apply(lambda x: str(x)[:512]).values)
temp_df["score"] = classify_sentiment(tweets, classifier)
# app.callback(Output('live-update-tweet', 'children'))(update_tweet)
# update_tweet("<br>".join(tweets))
self.tweets_df = self.tweets_df.append(temp_df, ignore_index=True)
del temp_df
return super().on_data(raw_data)
def on_status(self, status):
return super().on_status(status)
def on_request_error(self, status_code):
if status_code == 420:
return False
return super().on_request_error(status_code)
def on_connection_error(self):
self.disconnect()
def start(self, keywords):
return super().filter(track=keywords)
# %%
listener = Listener(api_key, api_secret, access_token, access_token_secret)
print("Startting server")
_ = app.run_server(debug=True)
print("Streaming tweets")
_ = listener.start(['Python'])
# %%
| talk2sunil83/BERTLearnings | twitter_sentimant_analysis.py | twitter_sentimant_analysis.py | py | 6,031 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "os.environ",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"lin... |
4192973907 | import sqlite3
from flask import Flask, render_template,request
from flask_paginate import Pagination, get_page_parameter
app = Flask(__name__)
@app.route('/')
def index():
page = request.args.get(get_page_parameter(), type=int, default=1)
per_page = 8
offset = (page - 1) * per_page
conn = sqlite3.connect('app_store.db')
cur = conn.cursor()
cur.execute("SELECT COUNT(*) FROM app_store")
total = cur.fetchone()[0]
cur.execute("SELECT * FROM app_store ORDER BY id LIMIT ? OFFSET ?", (per_page, offset))
data = cur.fetchall()
conn.close()
pagination = Pagination(page=page, per_page=per_page, total=total, css_framework='bootstrap4')
return render_template('index.html', data=data, pagination=pagination)
@app.route('/detail/<int:app_id>')
def detail(app_id):
# connect to the database
conn = sqlite3.connect('app_store.db')
c = conn.cursor()
# query the database to get the app details
c.execute("SELECT * FROM app_store left JOIN app_store_desc ON app_store.app_id=app_store_desc.app_id WHERE app_store.app_id = ?", (app_id,))
columns = [col[0] for col in c.description]
product = dict(zip(columns, c.fetchone()))
# close the database connection
conn.close()
# pass the product details to the template
return render_template('detail.html', product=product)
| grace-lliu/CS551P-assignment | index.py | index.py | py | 1,361 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "flask.request.args.get",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "flask.request... |
24059635257 | from flask import session
# Dictionary uniter
def MagerDicts(dict1, dict2):
if isinstance(dict1, list) and isinstance(dict2, list):
return dict1 + dict2
elif isinstance(dict1, dict) and isinstance(dict2, dict):
return dict(list(dict1.items()) + list(dict2.items()))
return False
# Cart items simulated
dictitems1 = {'1': {'name': "Travel Scope", 'price': 86.00, 'quantity': 2, 'total_price': 172.00,
'stock': 15, 'brand': "Celestron", 'Mount': 'Altazimutal', 'Lens': 'Refractor', 'desc': " ",
'image': 'static/images/product-images/TravelScope.png', 'cost': 86.00}}
dictitems2 = {'3': {'name': "Advanced VX", 'price': 3699.00, 'quantity': 1, 'total_price': 3699.00,
'stock': 10, 'brand': "Celestron", 'Mount': 'Ecuatorial', 'Lens': 'Catadioptrico', 'desc': " ",
'image': "static/images/product-images/AdvancedVX.jpg", 'cost': 3699.00}}
def getCartModel():
# Checking if cart is in session or not and adding the dictionaries to it
if 'cart' in session:
session['cart'] = MagerDicts(session['cart'], dictitems1)
else:
session['cart'] = dictitems1
if 'cart' in session:
session['cart'] = MagerDicts(session['cart'], dictitems2)
else:
session['cart'] = dictitems2
return
def addCartModel():
# make changes to cart here
# not in use at the moment
return
def deleteCartItemModel():
# delete item from cart
# not in use at the moment
return
| Jean029/DataBaseProyect | frontend_model/cartModel.py | cartModel.py | py | 1,528 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.session",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "flask.session",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "flask.session",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "flask.session",
"line_n... |
15380008623 | import sqlalchemy as sql
from sqlalchemy.sql.expression import func
from datetime import datetime
import _pickle as cPickle
import logging
logging.basicConfig(filename='overlaps.log', level=logging.DEBUG,
format='%(asctime)s %(levelname)s %(name)s %(message)s')
logger=logging.getLogger(__name__)
class OverlapsManager:
start_time = None
end_time = None
engine = None
metadata_obj = None
overlaps = None
def __init__(self, *, start_time, end_time, db_url):
self.start_time = start_time
self.end_time = end_time
self.engine = sql.create_engine(db_url)
self.metadata_obj = sql.MetaData()
def run(self, gen_chatter_sets = True, calc_chatter_overlaps = True):
logging.info("Starting Overlaps Run")
#Get list of channels to calculate overlaps for
channels_table = sql.Table('channels', self.metadata_obj, autoload_with=self.engine)
stmt = sql.select(channels_table.c.url_name).where(channels_table.c.is_current_top_stream == True)
with self.engine.connect() as conn:
res = conn.execute(stmt).fetchall()
channels = [r for r, in res] #Flatten tuple response into a list
if(gen_chatter_sets):
#Get chatters from each channel and dump sets into pkl files for overlap calculations
self.generate_chatter_sets(channels)
if(calc_chatter_overlaps):
#Get combinations of channels to calculate overlaps for, prevents duplicate calculations
combinations = self.get_top_channel_combinations(channels)
#Get number of overlaping chatters for each combinations
overlaps = self.calc_overlaps(combinations)
#data to be inserted into Overlaps table
self.overlaps = overlaps
def dump_overlaps_to_db(self):
logging.info("Dumping overlaps to database")
overlaps = self.overlaps
overlaps_table = sql.Table("channel_overlaps", self.metadata_obj, autoload_with=self.engine)
#Get next batch_id
stmt = sql.select(func.max(overlaps_table.c.batch_id))
with self.engine.connect() as conn:
res = conn.execute(stmt).fetchall()
#Calculate new batch_id
prev_batch_id = res[0][0]
if prev_batch_id != None: #Needs to trigger if = 0
new_batch_id = int(prev_batch_id) + 1
else:
new_batch_id = 0
logging.info(f"New batch id is: {new_batch_id}")
for overlap in overlaps:
overlap["batch_id"] = new_batch_id
chunks = [overlaps[x:x+100] for x in range(0, len(overlaps), 100)]
#Insert data into database in chunks to prevent absurdly long queries that can fail due to high message content
with self.engine.connect() as conn:
index = 1
for chunk in chunks:
logging.info(f"Insertion chunk progress: {index}/{len(chunks)}")
stmt = sql.insert(overlaps_table).values(chunk)
conn.execute(stmt)
index += 1
#Delete all files in tmp directory
self.delete_dir('./tmp')
def delete_dir(self, path):
import shutil
shutil.rmtree(path)
def get_top_channel_combinations(self, channels):
combinations = {}
for i, channel in enumerate(channels):
combinations[channel] = channels[i+1:]
return combinations
def condense_chatters(self, res):
channel_chatters = set()
for entry in res:
chatters = entry['chatters']
channel_chatters |= set(chatters)
return channel_chatters
def get_chatters(self, chatters_table, channel):
stmt = sql.select(chatters_table.c.chatters_json).where(
chatters_table.c.url_name == channel,
chatters_table.c.log_time >= self.start_time,
chatters_table.c.log_time <= self.end_time
)
with self.engine.connect() as conn:
res = conn.execute(stmt).fetchall()
res = [r for r, in res]
return self.condense_chatters(res)
def calc_overlaps(self, channel_combinations):
data = []
counter = 0
combination_count = sum([len(combinations) for c1, combinations in channel_combinations.items()])
for c1, combinations in channel_combinations.items():
logging.info(f"Calculating {len(combinations)} Overlaps for Channel {c1}")
#Load chatters from pkl object
with open(f'tmp/channel_{c1}_set.pkl', 'rb') as handle:
c1_set = cPickle.load(handle)
for c2 in combinations:
counter += 1
#Load comparison chatters from pkl object
with open(f'tmp/channel_{c2}_set', 'rb') as handle:
c2_set = cPickle.load(handle)
#Calculate overlaps and append to result
overlap_count = len(c1_set & c2_set)
data.append({"source": c1, "target": c2, "weight": overlap_count, "log_time": datetime.utcnow()})
return data
def generate_chatter_sets(self, channels):
logging.info("Generating chatter sets as pkl objects")
chatters_table = sql.Table('chatters', self.metadata_obj, autoload_with=self.engine)
#Get chatters within time window from each selected channel, dumping sets into individual pickle objects
#This is done to reduce sql queries required while preserving memory usage. File I/O times are long but better than thousands of unnecesary queries from a slow database
with self.engine.connect() as conn:
for channel in channels:
chatter_set = self.get_chatters(chatters_table, channel)
with open(f'tmp/channel_{channel}_set.pkl', 'wb') as handle:
cPickle.dump(chatter_set, handle)
logging.info(f"Dumped chatter set for {channel}")
return
def calc_stats(self, batch_id):
import networkx as nx
overlaps_table = sql.Table("channel_overlaps", self.metadata_obj, autoload_with=self.engine)
#Get next batch_id
stmt = sql.select(overlaps_table.c.source, overlaps_table.c.target, overlaps_table.c.weight).where((overlaps_table.c.batch_id == batch_id) & (overlaps_table.c.weight >= 1000))
with self.engine.connect() as conn:
res = conn.execute(stmt).fetchall()
network_data = {}
for source, target, weight in res:
if source in network_data:
network_data[source][target] = {"weight": weight}
else:
network_data[source] = {target: {"weight": weight}}
G = nx.from_dict_of_dicts(network_data)
ec = nx.eigenvector_centrality(G, weight='weight', max_iter=1000)
ec = sorted([(v, c) for v, c in ec.items()], key=lambda x: x[1], reverse=True)
bc = nx.betweenness_centrality(G, weight='weight')
bc = sorted([(v, c) for v, c in bc.items()], key=lambda x: x[1], reverse=True)
cc = nx.closeness_centrality(G)
cc = sorted([(v, c) for v, c in cc.items()], key=lambda x: x[1], reverse=True)
return {
"eigenvector_centrality": ec,
"betweeness_centrality": bc,
"closeness_centrality": cc
}
if __name__ == "__main__":
import os
from dotenv import load_dotenv
load_dotenv()
start_time = "2022-10-10 00:00:00.000"
end_time = "2022-11-08 00:00:00.000"
om = OverlapsManager(start_time=start_time, end_time=end_time, db_url=os.environ.get("DB_URL"))
om.run(gen_chatter_sets = True, calc_chatter_overlaps = True)
om.dump_overlaps_to_db()
| KiranGershenfeld/VisualizingTwitchCommunities | AtlasGeneration/Python/CalculateOverlaps.py | CalculateOverlaps.py | py | 7,918 | python | en | code | 338 | github-code | 36 | [
{
"api_name": "logging.basicConfig",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "logging.getLogger",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.crea... |
36913924087 | import struct
from dh import create_dh_key, calculate_dh_secret
from .xor import XOR
from enum import Enum
import hmac
import hashlib
# part 2 - start
from CA import Certificate_Authority
# part 2 - end
# Add messages
class Message(bytes, Enum):
LIST = bytes("LIST", "ascii")
AUTH = bytes("AUTH", "ascii")
ECHO = bytes("ECHO", "ascii")
ERROR = bytes("ERROR", "ascii")
CHAT = bytes("CHAT", "ascii")
ACK = bytes("OK", "ascii")
CHAT_SESSION = bytes("PORT", "ascii")
FILE = bytes("FILE", "ascii")
FILE_TRANSFER = bytes("TRANSFER", "ascii")
class StealthConn(object):
def __init__(self, conn,
client=False,
server=False,
user=None,
verbose=False):
self._secret = None
self.conn = conn
self.cipher = None
self.client = client
self.server = server
self.verbose = verbose
self.user = user
# part 2 - start
self.state = 0
self.localID = None
self.remoteID = "server"
self.pub_key = None
self.remote_pub_key = None
self.certificate_authority = None
self.certificate = None
# part 2 - end
self.cipher, self._secret = self.generate_secret()
def generate_secret(self):
# Perform the initial connection handshake for agreeing on a shared secret
# This can be broken into code run just on the server or just on the clientasdsad
if self.server or self.client:
my_public_key, my_private_key = create_dh_key()
# Send them our public key
self.send(bytes(str(my_public_key), "ascii"))
# Receive their public key
their_public_key = int(self.recv())
# Obtain our shared secret
shared_hash = calculate_dh_secret(their_public_key,
my_private_key)
# part 2 - start
self.pub_key = my_public_key
self.remote_pub_key = their_public_key
# part 2 - end
self._secret = shared_hash
# if self.verbose:
#print("Shared hash: {}".format(shared_hash.encode("utf-8").hex()))
# Default XOR algorithm can only take a key of length 32
self.cipher = XOR.new(shared_hash[:4].encode("utf-8"))
return self.cipher, self._secret
def send(self, data_to_send):
hmac_key = "Sahil's Secret Key"
hmac_object = hmac.new(key=hmac_key.encode(), msg=data_to_send, digestmod=hashlib.sha256)
message_digest = hmac_object.digest()
data = bytes(message_digest) + bytes(data_to_send)
#print("Going to send original Message: ", data_to_send)
#print("with appended HMAC: ", message_digest)
#print("Full Message: ", data)
if self.cipher:
encrypted_data = self.cipher.encrypt(data)
if self.verbose:
# print("Original data: {}".format(data))
# print("HMAC data: {}".format(message_digest))
# print("Encrypted data: {}".format(repr(encrypted_data)))
print("Sending packet of length {}".format(len(encrypted_data)))
else:
encrypted_data = data
if self.verbose:
print("Send state is ", self.state)
# part 2 - start
if self.state == 1 and self.localID is not None:
self.certificate_authority = Certificate_Authority()
self.certificate = self.certificate_authority.generateCertificate(self.localID, self.pub_key)
self.state = 2
if self.state == 2:
encrypted_data = self.certificate + encrypted_data
# part 2 - end
# Encode the data's length into an unsigned two byte int ('H')
pkt_len = struct.pack('H', len(encrypted_data))
self.conn.sendall(pkt_len)
self.conn.sendall(encrypted_data)
# for testing
# print("Sending to ", self.conn)
return struct.pack('H', len(encrypted_data)), encrypted_data
def recv(self):
# Decode the data's length from an unsigned two byte int ('H')
pkt_len_packed = self.conn.recv(struct.calcsize('H'))
unpacked_contents = struct.unpack('H', pkt_len_packed)
pkt_len = unpacked_contents[0]
encrypted_data = self.conn.recv(pkt_len)
if self.verbose:
print("Receive state is ", self.state)
# part 2 - start
if self.state == 1 and self.localID is not None:
self.certificate_authority = Certificate_Authority()
self.certificate = self.certificate_authority.generateCertificate(self.localID, self.pub_key)
self.state = 2
if self.state != 0 and pkt_len > 128:
signature = encrypted_data[:128]
msg = encrypted_data[128:]
pkt_len = pkt_len - 128
if self.state == 2:
# verify signature
user_certificate = self.certificate_authority.generateCertificate(self.remoteID, self.remote_pub_key)
if user_certificate == signature:
print("User Signature Verified")
else:
msg = encrypted_data
# part 2 - end
if self.cipher:
data = self.cipher.decrypt(msg)
if self.verbose:
print("Receiving packet of length {}".format(pkt_len))
#print("Encrypted data: {}".format(repr(encrypted_data)))
#print("Original data: {}".format(data))
else:
data = msg
my_hmac_key = "Sahil's Secret Key"
hmac_key = bytes(data[:32])
hmac_msg = data[32:]
hmac_object = hmac.new(key=my_hmac_key.encode(), msg=hmac_msg, digestmod=hashlib.sha256)
message_digest = hmac_object.digest()
if self.verbose:
#print("Message Received: ", data)
#print("Locally calculated HMAC: ", message_digest)
print("Actual Message Received: ", hmac_msg)
return hmac_msg
def close(self):
self.conn.close()
| Sahil123445/SecureChat | SecureChat Project/Code/lib/comms.py | comms.py | py | 6,218 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "enum.Enum",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "dh.create_dh_key",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "dh.calculate_dh_secret",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "xor.XOR.new",
"... |
7946314672 |
import csv;
from datetime import datetime;
from urllib.request import urlopen;
from bs4 import BeautifulSoup;
import os
os.chmod("D:\program\python", 0o777);
def convertString(string):
x,y = string.split(",");
string = x + "." + y;
return float(string);
quote_page = "https://www.avanza.se/fonder/om-fonden.html/788395/avanza-auto-1"
page = urlopen(quote_page);
soup = BeautifulSoup(page, "html.parser");
name = soup.find("h1", attrs={"class" : "large marginBottom10px"});
name = name.text.strip();
price = soup.find(attrs={"class" : "SText bold"});
price = price.text.strip();
print(name + ": NAV "+ price);
price = str(convertString(price)) + "kr";
with open("D:\program\python\index.csv", "a") as csv_file:
writer = csv.writer(csv_file, delimiter=";");
writer.writerow([name, price, datetime.now().strftime("20%y/%m/%d - %H:%M")]);
| brjj/Avanza | avanza.py | avanza.py | py | 889 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.chmod",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "urllib.request.urlopen",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "csv.writer",
"li... |
4029209236 | import pandas as pd
from bs4 import BeautifulSoup
import requests
x = input('ievadi loterijas nosaukumu(eurojackpot;viking-lotto;superbingo;latloto;keno;loto5;joker;joker7) \n')
y = []
i = 1
if x =='loto5':
web = 'https://www.latloto.lv/lv/rezultati/loto5'
source = requests.get(web).text
soup = BeautifulSoup(source, 'lxml')
skaitli = soup.find_all('div', class_='numbered-items')
for mammite in skaitli:
milfene = mammite.text
milf = milfene.split('\n')
milf.remove('')
milf.remove('')
y.append(milf)
else:
web = 'https://www.latloto.lv/lv/arhivs/'
while(i<=5):
m = str(i)
saite = web+x+'/'+m
source = requests.get(saite).text
soup = BeautifulSoup(source, 'lxml')
skaitli = soup.find_all('div', class_='numbered-items')
i = i+1
for mammite in skaitli:
milfene = mammite.text
milf = milfene.split('\n')
milf.remove('')
milf.remove('')
y.append(milf)
df=pd.DataFrame(y)
df = df.mode()
print(df)
| kakaoenjoyer/-odien_paveiksies | sodien_paveiksies.py | sodien_paveiksies.py | py | 1,175 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"... |
18036337899 | from utils.cs_parser import CsharpParser
from utils.html import HtmlMaker
import argparse
import os
def main(filename, destination):
with open(filename, 'rt') as file:
lines = file.readlines()
csparser = CsharpParser()
oop_result = csparser.parse_file(lines)
name = filename.split(os.sep)[-1]
maker = HtmlMaker()
result = maker.format_documentation(oop_result, name[:-3])
if destination:
path = f"{destination}"
else:
path = ""
with open(f"{path + '/' if path else ''}{name[:-3]}.html", 'w') as f:
f.write(result)
print(f"Html document is at {path if path else 'code folder'}")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-f', dest="filename", required=True)
parser.add_argument('-d', dest="destination")
args = parser.parse_args()
main(args.filename, args.destination)
| eyeless12/2html | converter.py | converter.py | py | 934 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "utils.cs_parser.CsharpParser",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.sep",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "utils.html.HtmlMaker",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "argpars... |
32472626332 | import colorama
from colorama import Fore, Back, Style
import re
import analyze
colorama.init(autoreset=True)
#Input is list of word objects. Prints all words, unknown words being marked red.
def mark_unknown_words_red(words):
for word in words:
if word.known is False:
print(Back.RED + word.word, end=' ')
else:
print(word.word, end=' ')
#Return dictionary of unknown nouns, verbs, adjectives, adverbs
def get_parts_of_speech_of_new_words(words):
dict_pos = {}
dict_pos['Nouns'] = []
dict_pos['Adjectives'] = []
dict_pos['Verbs'] = []
dict_pos['Adverbs'] = []
for word in words:
if word.known is False:
if word.pos == 'NOM':
dict_pos['Nouns'].append(word.lemma)
elif word.pos == 'ADJ':
dict_pos['Adjectives'].append(word.lemma)
elif word.pos.startswith('VER'):
dict_pos['Verbs'].append(word.lemma)
elif word.pos == 'ADV':
dict_pos['Adverbs'].append(word.lemma)
else:
continue
return dict_pos
#Print new words and their synonyms.
def print_new_words_with_synonyms(words):
parts_of_speech = get_parts_of_speech_of_new_words(words)
print('New nouns (' + str(len(parts_of_speech.get('Nouns'))) + ')\n')
if len(parts_of_speech.get('Nouns')) == 0:
print('-')
else:
for noun in set(parts_of_speech.get('Nouns')):
print(noun + ' - ' + analyze.get_synonyms(noun, 'noun'))
print('\nNew verbs (' + str(len(parts_of_speech.get('Verbs'))) + ')\n')
if len(parts_of_speech.get('Verbs')) == 0:
print('-')
else:
for verb in set(parts_of_speech.get('Verbs')):
print(verb + ' - ' + analyze.get_synonyms(verb, 'verb'))
print('\nNew adjectives (' + str(len(parts_of_speech.get('Adjectives'))) + ')\n')
if len(parts_of_speech.get('Adjectives')) == 0:
print('-')
else:
for adj in set(parts_of_speech.get('Adjectives')):
print(adj + ' - ' + analyze.get_synonyms(adj, 'adjective'))
print('\nNew adverbs (' + str(len(parts_of_speech.get('Adverbs'))) + ')\n')
if len(parts_of_speech.get('Adverbs')) == 0:
print('-')
else:
for adv in set(parts_of_speech.get('Adverbs')):
print(adv + ' - ' + analyze.get_synonyms(adv, 'adverb'))
#Print new words and their translations.
def print_new_words_with_translations(words):
parts_of_speech = get_parts_of_speech_of_new_words(words)
print('New nouns (' + str(len(parts_of_speech.get('Nouns'))) + ')\n')
if len(parts_of_speech.get('Nouns')) == 0:
print('-')
else:
for noun in set(parts_of_speech.get('Nouns')):
print(noun + ' - ' + analyze.translate_lemma(noun))
print('\nNew verbs (' + str(len(parts_of_speech.get('Verbs'))) + ')\n')
if len(parts_of_speech.get('Verbs')) == 0:
print('-')
else:
for verb in set(parts_of_speech.get('Verbs')):
print(verb + ' - ' + analyze.translate_lemma(verb))
print('\nNew adjectives (' + str(len(parts_of_speech.get('Adjectives'))) + ')\n')
if len(parts_of_speech.get('Adjectives')) == 0:
print('-')
else:
for adj in set(parts_of_speech.get('Adjectives')):
print(adj + ' - ' + analyze.translate_lemma(adj))
print('\nNew adverbs (' + str(len(parts_of_speech.get('Adverbs'))) + ')\n')
if len(parts_of_speech.get('Adverbs')) == 0:
print('-')
else:
for adv in set(parts_of_speech.get('Adverbs')):
print(adv + ' - ' + analyze.translate_lemma(adv))
#Print attributes of Word objects as sentences.
def print_objects_as_string(words):
for i in range(len(words)):
if i < len(words)-1:
current = re.match("[^\w]", words[i].lemma)
next = re.match("[^\w]", words[i+1].lemma)
if current and next:
print(words[i].word, end='')
elif current is False and next is True:
print(words[i].word, end='')
elif current and next is False:
print(words[i].word, end=' ')
else:
print(words[i].word)
#Print attributes of Word objects as sentences, all unknown words are printed red.
def print_unknown_words_red(words):
for i in range(len(words)):
if i < len(words)-1:
z = re.match("[^\w]", words[i+1].word)
if z:
print(words[i].word, end='')
else:
if words[i].known == False:
print(Back.RED + words[i].word, end=' ')
else:
print(words[i].word, end=' ')
else:
print(words[i].word) | estakaad/Pronto | pronto/display.py | display.py | py | 4,765 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "colorama.init",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "colorama.Back.RED",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "colorama.Back",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "analyze.get_synonym... |
27784230868 | import yaml
# save_load将yaml数据流准成python对象
# save_dump将python对象转换成yanl格式
def get_data():
with open('yaml01.yaml', encoding='utf-8') as f:
datas = yaml.safe_load(f)
return datas
def get_yaml():
aa = {'language': ['ruby', 'python', 'java'], 'websites': {'yaml': 'YAML', 'python': 'PYTHON'}}
yamlstr = yaml.safe_dump(aa)
return yamlstr
print(get_data())
# print(get_yaml())
| BrandonLau-liuyifei/TestByPython | python_base/test_framework_foundation/yamldemo/getdata.py | getdata.py | py | 440 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "yaml.safe_load",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "yaml.safe_dump",
"line_number": 14,
"usage_type": "call"
}
] |
12601628610 | from typing import List
class Solution:
def deleteAndEarn(self, nums: List[int]) -> int:
calculation = [n for n in range(0, max(nums)+1)]
arr_size = len(calculation)
for i in range(0,arr_size):
calculation[i] = 0
for n in nums:
calculation[n] += n
arr_size = len(calculation)
for idx in range(2, arr_size):
calculation[idx] = max(calculation[idx] + calculation[idx-2], calculation[idx-1])
return calculation[arr_size-1]
| goldy1992/algorithms | delete_and_earn/delete.py | delete.py | py | 523 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "typing.List",
"line_number": 6,
"usage_type": "name"
}
] |
35851739766 | from __future__ import print_function
from future import standard_library
standard_library.install_aliases()
import configparser
import sys
from paste.script import command
import http.cookiejar, urllib.request, urllib.error, urllib.parse
class PackCommand(command.Command):
max_args = 2
min_args = 1
usage = "pack <ini file> [login_url]"
summary = "Pack ZODB"
group_name = "w20e.pycms"
parser = command.Command.standard_parser(verbose=True)
def command(self):
try:
ini_file = self.args[0]
config = configparser.ConfigParser()
config.readfp(open(ini_file))
except:
print("Please provide an ini file as argument")
sys.exit(-1)
url = None
if len(self.args) > 1:
url = self.args[1]
if not url:
host = config.get('server:main', 'host')
port = config.get('server:main', 'port')
url = "http://%s:%s/login" % (host, port)
usr, pwd = config.get('server:main', "pycms.admin_user").split(":")
cj = http.cookiejar.CookieJar()
opener = urllib.request.build_opener(urllib.request.HTTPCookieProcessor(cj))
urllib.request.install_opener(opener)
data = "login=%s&password=%s&form.submitted=1&came_from=/script_pack" % (usr, pwd)
req = urllib.request.Request(url, data.encode('utf-8'))
handle = urllib.request.urlopen(req)
print(handle.info())
result = handle.read()
print(result)
| wyldebeast-wunderliebe/w20e.pycms | w20e/pycms/pack.py | pack.py | py | 1,538 | python | en | code | 11 | github-code | 36 | [
{
"api_name": "future.standard_library.install_aliases",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "future.standard_library",
"line_number": 3,
"usage_type": "name"
},
{
"api_name": "paste.script.command.Command",
"line_number": 10,
"usage_type": "attribute"... |
73433072104 | #!/usr/bin/env python3
"""
sklearn_pack.py: Do Model training with large sparse matrix with sklearn
"""
__author__ = "Yanshi Luo"
__license__ = "GPL"
__email__ = "yluo82@wisc.edu"
import pandas as pd
def random_forest(finalX_train, finalY_train, finalX_test, n_parallel=1, write_csv=False, write_filename='rf_pref.csv'):
from sklearn.ensemble import RandomForestClassifier
clf = RandomForestClassifier(n_estimators=10, n_jobs=n_parallel)
clf = clf.fit(finalX_train, finalY_train)
finalY_pred = clf.predict(finalX_test)
finalY_pred_DF = pd.DataFrame(finalY_pred)
if write_csv:
finalY_pred_DF.to_csv(write_filename, index=False)
return finalY_pred_DF
def decision_tree(finalX_train, finalY_train, finalX_test, dot=False):
from sklearn import tree
clf = tree.DecisionTreeClassifier()
clf = clf.fit(finalX_train, finalY_train)
if dot:
tree.export_graphviz(clf, out_file='sklearn_tree.dot')
finalY_pred = clf.predict(finalX_test)
return pd.DataFrame(finalY_pred)
def neural_network(finalX_train, finalY_train, finalX_test):
from sklearn.neural_network import MLPClassifier
clf = MLPClassifier(solver='adam', alpha=1e-5, random_state=1)
clf = clf.fit(finalX_train, finalY_train)
finalY_pred = clf.predict(finalX_test)
return pd.DataFrame(finalY_pred)
| yanshil/STAT628_GM2_Yelp | code/Yanshi/sklearn_pack.py | sklearn_pack.py | py | 1,351 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sklearn.ensemble.RandomForestClassifier",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "sklearn.tree.DecisionTreeClassifier",
"line_number": 31,
"usage_type": "call"
... |
940338792 | import json
input_path_labor = "/input/labor/input.json"
input_path_divorce = "/input/divorce/input.json"
input_path_loan = "/input/loan/input.json"
output_path_labor = "/output/labor/output.json"
output_path_divorce = "/output/divorce/output.json"
output_path_loan = "/output/loan/output.json"
def predict(input_path, output_path):
inf = open(input_path, "r", encoding='utf-8')
ouf = open(output_path, "w", encoding='utf-8')
for line in inf:
pre_doc = json.loads(line)
new_pre_doc = []
for sent in pre_doc:
sent['labels'] = [] # 将该空列表替换成你的模型预测的要素列表结果
new_pre_doc.append(sent)
json.dump(new_pre_doc, ouf, ensure_ascii=False)
ouf.write('\n')
inf.close()
ouf.close()
# labor领域预测
predict(input_path_labor, output_path_labor)
# loan领域预测
predict(input_path_loan, output_path_loan)
# divorce领域预测
predict(input_path_divorce, output_path_divorce)
| china-ai-law-challenge/CAIL2019 | 要素识别/python_sample/main.py | main.py | py | 999 | python | en | code | 331 | github-code | 36 | [
{
"api_name": "json.loads",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 21,
"usage_type": "call"
}
] |
33221925828 | import logging
from sqlalchemy import case, create_engine, select
from . import schema
from .. import client
from ..common import Results, date_range
from ..constants import MARKS, PERIODS
logger = logging.getLogger(__name__)
class Store:
def __init__(self, bind=None):
if bind is None:
self.bind = create_engine('sqlite:///:memory:')
else:
self.bind = bind
def initialize(self):
"""Creates all the tables and then seeds the ones that need to be prepopulated.
Currently only the marks and periods tables need to be prepopulated.
"""
logger.info('Initialization started...')
with self.bind.begin() as conn:
logger.info('Creating the tables...')
schema.metadata.create_all(conn)
logger.info('Seeding the marks table...')
conn.execute(
schema.marks.insert().prefix_with('OR IGNORE'),
[{ 'number': m.number, 'name': m.name } for m in MARKS.values()]
)
logger.info('Seeding the periods table...')
conn.execute(
schema.periods.insert().prefix_with('OR IGNORE'),
[{ 'abbr': p.abbr, 'label': p.label, 'time_of_day': p.time_of_day } for p in PERIODS.values()]
)
logger.info('Initialization done!')
def load(self, csvfile):
"""Inserts results from the given CSV file."""
logger.info('Loading started...')
logger.info('Reading the results from the CSV file...')
results = Results.from_csvfile(csvfile)
logger.info('Inserting the results...')
insert(self.bind, results)
logger.info('Loading done!')
def update(self, fetch=client.fetch, today=None):
"""Updates results with the latest from the server."""
kwargs = {}
if today is not None:
kwargs['today'] = today
with self.bind.connect() as conn:
last_result = conn.execute(select_last_result()).fetchone()
if last_result is not None:
kwargs['start_date'] = last_result.date
kwargs['period'] = last_result.period_abbr
try:
if last_result is None:
logger.info('Update started...')
else:
logger.info('Update resumed...')
for year, month in date_range(**kwargs):
logger.info('Updating year={}, month={}...'.format(year, month))
insert(conn, fetch(year, month))
logger.info('Update for year={}, month={} done!'.format(year, month))
except KeyboardInterrupt:
logger.info('Update stopped!')
else:
logger.info('Update done!')
def insert(bind, results):
if results:
bind.execute(
schema.results.insert().prefix_with('OR IGNORE'),
[{ 'draw': r.draw, 'date': r.date, 'period_abbr': r.period, 'mark_number': r.number } for r in results]
)
if not results.all_valid():
logger.error(results.full_error_messages())
PERIODS_DESC = {
'EM': 3,
'AM': 2,
'AN': 1,
'PM': 0
}
def select_last_result():
return select([schema.results]). \
order_by(schema.results.c.date.desc()). \
order_by(case(PERIODS_DESC, value=schema.results.c.period_abbr)). \
order_by(schema.results.c.draw.desc()). \
limit(1)
| dwayne/playwhe | playwhe/cli/store.py | store.py | py | 3,488 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.create_engine",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "constants.MARKS.values",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "con... |
24793623369 | import open3d as o3d
import random
import csv
import numpy as np
import torch
import torch.utils.data as torchdata
from torchvision import transforms
import torchaudio
import librosa
from . import point_transforms as ptransforms
class BaseDataset(torchdata.Dataset):
def __init__(self, list_sample, opt, max_sample=-1, split='train'):
# params
self.num_frames = opt.num_frames
self.stride_frames = opt.stride_frames
self.frameRate = opt.frameRate
self.voxel_size = opt.voxel_size
self.audRate = opt.audRate
self.audLen = opt.audLen
self.audSec = 1. * self.audLen / self.audRate
self.binary_mask = opt.binary_mask
self.rgbs_feature = opt.rgbs_feature
# STFT params
self.log_freq = opt.log_freq
self.stft_frame = opt.stft_frame
self.stft_hop = opt.stft_hop
self.HS = opt.stft_frame // 2 + 1
self.WS = (self.audLen + 1) // self.stft_hop
self.split = split
self.seed = opt.seed
random.seed(self.seed)
# initialize point transform
self._init_ptransform()
self.num_channels = opt.num_channels
# list_sample can be a python list or a csv file of list
if isinstance(list_sample, str):
self.list_sample = []
for row in csv.reader(open(list_sample, 'r'), delimiter=','):
if len(row) < 2:
continue
self.list_sample.append(row)
elif isinstance(list_sample, list):
self.list_sample = list_sample
else:
raise('Error list_sample!')
if self.split == 'train':
self.list_sample *= opt.dup_trainset
random.shuffle(self.list_sample)
if max_sample > 0:
self.list_sample = self.list_sample[0:max_sample]
num_sample = len(self.list_sample)
assert num_sample > 0
print('# samples: {}'.format(num_sample))
def __len__(self):
return len(self.list_sample)
def _init_ptransform(self):
point_transform_list = []
color_transform_list = []
if self.split == 'train':
point_transform_list.append(ptransforms.RandomRotation(axis=np.array([0, 1, 0])))
point_transform_list.append(ptransforms.RandomTranslation(0.4))
point_transform_list.append(ptransforms.RandomScale(0.5, 2))
point_transform_list.append(ptransforms.RandomShear())
color_transform_list.append(ptransforms.RandomGaussianNoise())
color_transform_list.append(ptransforms.RandomValue())
color_transform_list.append(ptransforms.RandomSaturation())
else:
pass #apply no transformation in evaluation mode
self.point_transform = transforms.Compose(point_transform_list)
self.color_transform = transforms.Compose(color_transform_list)
def _create_coords(self, points):
coords = []
for xyz in points:
coords.append(np.floor(xyz/self.voxel_size))
return coords
def _load_frames(self, paths):
points = []
rgbs = []
for path in paths:
xyz, color = self._load_frame(path)
points.append(xyz)
rgbs.append(color)
points = self.point_transform(points)
if self.rgbs_feature:
rgbs = self.color_transform(rgbs)
coords = self._create_coords(points)
return points, coords, rgbs
def _load_frame(self, path):
pcd = o3d.io.read_point_cloud(path)
points = np.array(pcd.points)
colors = np.array(pcd.colors)
return points, colors
def _stft(self, audio):
spec = librosa.stft(
audio, n_fft=self.stft_frame, hop_length=self.stft_hop)
amp = np.abs(spec)
phase = np.angle(spec)
return torch.from_numpy(amp), torch.from_numpy(phase)
def _load_audio_file(self, path):
if path.endswith('.mp3'):
audio_raw, rate = torchaudio.load(path, channels_first=False)
audio_raw = audio_raw.numpy().astype(np.float32)
# convert to mono
if audio_raw.shape[1] == 2:
audio_raw = (audio_raw[:, 0] + audio_raw[:, 1]) / 2
else:
audio_raw = audio_raw[:, 0]
else:
audio_raw, rate = librosa.load(path, sr=None, mono=True)
return audio_raw, rate
def _load_audio(self, path, nearest_resample=False):
audio = np.zeros(self.audLen, dtype=np.float32)
# load audio
audio_raw, rate = self._load_audio_file(path)
# repeat if audio is too short
if audio_raw.shape[0] < rate * self.audSec:
n = int(rate * self.audSec / audio_raw.shape[0]) + 1
audio_raw = np.tile(audio_raw, n)
# resample
if rate > self.audRate:
print('resmaple {}->{}'.format(rate, self.audRate))
if nearest_resample:
audio_raw = audio_raw[::rate//self.audRate]
else:
audio_raw = librosa.resample(audio_raw, rate, self.audRate)
len_raw = audio_raw.shape[0]
center = np.random.randint(self.audLen//2 + 1, len_raw - self.audLen//2)
start = max(0, center - self.audLen // 2)
end = min(len_raw, center + self.audLen // 2)
audio[self.audLen//2-(center-start): self.audLen//2+(end-center)] = \
audio_raw[start:end]
# randomize volume
if self.split == 'train':
scale = random.random() + 0.5 # 0.5-1.5
audio *= scale
audio[audio > 1.] = 1.
audio[audio < -1.] = -1.
return audio
def _mix_n_and_stft(self, audios):
N = len(audios)
mags = [None for n in range(N)]
# mix
for n in range(N):
audios[n] /= N
audio_mix = np.asarray(audios).sum(axis=0)
# STFT
amp_mix, phase_mix = self._stft(audio_mix)
for n in range(N):
ampN, _ = self._stft(audios[n])
mags[n] = ampN.unsqueeze(0)
# to tensor
for n in range(N):
audios[n] = torch.from_numpy(audios[n])
return amp_mix.unsqueeze(0), mags, phase_mix.unsqueeze(0)
| francesclluis/point-cloud-source-separation | dataset/base.py | base.py | py | 6,285 | python | en | code | 8 | github-code | 36 | [
{
"api_name": "torch.utils.data.Dataset",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "torch.utils.data",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "random.seed",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "csv.reader... |
17253375732 | import os
import traceback
from datetime import datetime, timedelta
import pytz
import config
from manage_transactions import get_first_transaction_timestamp, get_transaction_data
from util import logging
# structure /terra-data/raw/stats_daily_transaction/<type>/<token>.csv
STORE_DAILY_TRANSACTIONS_DIRECTORY = '/terra-data/v2/raw/stats_daily_transactions'
# structure /terra-data/raw/stats_daily_address_payments/<token>/<date>.csv
STORE_DAILY_ADDRESS_PAYMENTS_DIRECTORY = '/terra-data/v2/raw/stats_daily_address_payments'
log = logging.get_custom_logger(__name__, config.LOG_LEVEL)
def calculate_daily_transaction_data():
# symbol = token['symbol']
# symbol_file = STORE_DIRECTORY + symbol
os.makedirs(STORE_DAILY_TRANSACTIONS_DIRECTORY, exist_ok=True)
max_time = datetime.utcnow()
max_time = max_time.replace(hour=0, minute=0, second=0, microsecond=0, tzinfo=pytz.UTC)
stop_processing = False
date_to_process = get_first_transaction_timestamp()
date_last_processed = _get_last_processed_date()
date_to_process = max(date_to_process, date_last_processed + timedelta(days=1))
log.debug('calculate: total amount of transactions per coin per type')
# TODO remove all lines from STORE_DAILY_PAYMENTS_DIRECTORY which are in the future from date_to_process
# TODO remove all files from STORE_DAILY_ADDRESS_PAYMENTS_DIRECTORY which are in the future from date_to_process
if date_to_process >= max_time:
return
# with open(symbol_file, 'a') as file:
while not stop_processing:
log.debug('analysing transaction data for ' + date_to_process.strftime('%Y-%m-%d'))
transactions = get_transaction_data(date_to_process)
types = dict()
for transaction in transactions:
type = transaction[0]
block = transaction[1]
timestamp = transaction[2]
tx_hash = transaction[3]
if type not in types.keys():
types[type] = {
'count': 0,
'currencies': dict(),
}
currency = None
if type == 'bank_MsgMultiSend':
currency = transaction[5]
elif type == 'bank_MsgSend':
currency = transaction[5]
elif type == 'distribution_MsgWithdrawDelegationReward':
currency = None
elif type == 'distribution_MsgWithdrawValidatorCommission':
currency = None
elif type == 'gov_MsgDeposit':
currency = transaction[7]
elif type == 'gov_MsgSubmitProposal':
currency = None
elif type == 'market_MsgSwap':
currency = None
elif type == 'oracle_MsgDelegateFeedConsent':
currency = None
elif type == 'oracle_MsgExchangeRatePrevote':
currency = transaction[5]
elif type == 'oracle_MsgExchangeRateVote':
currency = transaction[5]
elif type == 'staking_MsgCreateValidator':
currency = transaction[6]
elif type == 'staking_MsgDelegate':
currency = transaction[7]
elif type == 'staking_MsgEditValidator':
currency = None
if currency and currency not in types[type]['currencies']:
types[type]['currencies'][currency] = {
'count': 0,
}
if currency:
types[type]['currencies'][currency]['count'] += 1
else:
types[type]['count'] += 1
# print(types)
for type in types.keys():
os.makedirs(os.path.join(STORE_DAILY_TRANSACTIONS_DIRECTORY, type), exist_ok=True)
if len(types[type]['currencies']) > 0:
for currency in types[type]['currencies']:
with open(os.path.join(STORE_DAILY_TRANSACTIONS_DIRECTORY, type, currency + '.csv'), 'a') as file:
file.write(','.join([date_to_process.strftime('%Y-%m-%d'),
str(types[type]['currencies'][currency]['count']),
]) + '\n')
else:
with open(os.path.join(STORE_DAILY_TRANSACTIONS_DIRECTORY, type, 'default.csv'), 'a') as file:
file.write(','.join(
[date_to_process.strftime('%Y-%m-%d'),
str(types[type]['count']),
]) + '\n')
date_to_process += timedelta(days=1)
if date_to_process >= max_time:
stop_processing = True
#
# if datum[0] in token['token_contracts'] or datum[0] in token['lending_contracts']:
# token_contracts_balance += int(datum[1])
#
# elif datum[0] in token['team_accounts']:
# team_balance += int(datum[1])
#
# elif datum[0] in known_addresses.exchange_addresses:
# exchange_balance += int(datum[1])
#
# else:
# remaining_accounts.append({'account': datum[0], 'balance': int(datum[1]), })
#
# remaining_accounts.sort(key=lambda element: element['balance'], reverse=True)
#
# top20 = list()
# top50 = list()
# top100 = list()
# top200 = list()
# retail = list()
#
# i = 0
# for account in remaining_accounts:
#
# if i < 20:
# top20.append(account)
# elif i < 50:
# top50.append(account)
# elif i < 100:
# top100.append(account)
# elif i < 200:
# top200.append(account)
# else:
# retail.append(account)
#
# i += 1
#
# date_string = date_to_process.strftime('%Y-%m-%d')
# result = {
# 'date': date_string,
# 'token_contracts_balance': token_contracts_balance,
# 'team_balance': team_balance,
# 'exchanges_balance': exchange_balance,
# 'top20': functools.reduce(lambda a, b: a + b['balance'], top20, 0),
# 'top50': functools.reduce(lambda a, b: a + b['balance'], top50, 0),
# 'top100': functools.reduce(lambda a, b: a + b['balance'], top100, 0),
# 'top200': functools.reduce(lambda a, b: a + b['balance'], top200, 0),
# 'retail': functools.reduce(lambda a, b: a + b['balance'], retail, 0), }
#
# file.write(result['date'] + ',' + str((result['token_contracts_balance'] / pow(10, 18))) + ',' + str(
# (result['team_balance'] / pow(10, 18))) + ',' + str(
# (result['exchanges_balance'] / pow(10, 18))) + ',' + str((result['top20'] / pow(10, 18))) + ',' + str(
# (result['top50'] / pow(10, 18))) + ',' + str((result['top100'] / pow(10, 18))) + ',' + str(
# (result['top200'] / pow(10, 18))) + ',' + str((result['retail'] / pow(10, 18))) + '\n')
# file.flush()
#
# log.debug('calculate_token_holder_stats for ' + date_string)
#
# date_to_process += timedelta(days=1)
#
# if date_to_process >= max_time:
# stop_processing = True
def _get_last_processed_date():
directories = [f for f in os.listdir(STORE_DAILY_TRANSACTIONS_DIRECTORY) if
os.path.isdir(os.path.join(STORE_DAILY_TRANSACTIONS_DIRECTORY, f))]
last_file_timestamp = datetime.strptime('1970-01-01', '%Y-%m-%d')
last_file_timestamp = last_file_timestamp.replace(tzinfo=pytz.UTC)
for directory in directories:
target_dir = os.path.join(STORE_DAILY_TRANSACTIONS_DIRECTORY, directory)
files = [f for f in os.listdir(target_dir) if os.path.isfile(os.path.join(target_dir, f))]
# get the file with the highest timestamp
for file in files:
if file.startswith('.'):
continue
symbol_file = os.path.join(target_dir, file)
with open(symbol_file, 'r') as file:
for line in file:
line_parts = line.split(',')
this_timestamp = datetime.strptime(line_parts[0], '%Y-%m-%d')
this_timestamp = this_timestamp.replace(tzinfo=pytz.UTC)
last_file_timestamp = max(last_file_timestamp, this_timestamp)
return last_file_timestamp
def get_data(date, transaction_name):
target_dir = os.path.join(STORE_DAILY_TRANSACTIONS_DIRECTORY, transaction_name)
files = [f for f in os.listdir(target_dir) if os.path.isfile(os.path.join(target_dir, f))]
return_data = {}
for filename in files:
try:
token_name = filename.split('.')[0]
with open(os.path.join(target_dir, filename), 'r') as file:
for line in file:
if len(line) <= 0:
continue
line_parts = line.split(',')
line_date = datetime.strptime(line_parts[0], '%Y-%m-%d')
if line_date == date:
return_data[token_name] = {
'count': int(line_parts[1]),
}
break
except:
log.debug('error fetching data')
traceback.print_exc()
return return_data
def get_user(date):
last_file_timestamp = None
directories = [f for f in os.listdir(STORE_DAILY_ADDRESS_PAYMENTS_DIRECTORY) if os.path.isdir(os.path.join(STORE_DAILY_ADDRESS_PAYMENTS_DIRECTORY, f))]
return_data = {}
for dir in directories:
token_dir = os.path.join(STORE_DAILY_ADDRESS_PAYMENTS_DIRECTORY, dir)
filename = date.strftime('%Y-%m-%d')
file_path = os.path.join(token_dir, filename + '.csv')
if not os.path.isfile(file_path):
continue
return_data[dir] = []
files = [f for f in os.listdir(token_dir) if os.path.isfile(os.path.join(token_dir, f))]
# get the file with the highest timestamp
with open(file_path, mode='r') as file:
for line in file:
line_split = line.split(',')
return_data[dir].append({
'address': line_split[0],
'amount': line_split[1],
'count': line_split[2],
})
return return_data
| joergkiesewetter/terra-analytics | calculate_daily_transaction_data.py | calculate_daily_transaction_data.py | py | 10,541 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "util.logging.get_custom_logger",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "util.logging",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "config.LOG_LEVEL",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "os.... |
2672903172 | from django.forms import ModelForm
from .models import Ticket, Comment
from django import forms
from datetime import date
from dateutil.relativedelta import relativedelta
from bootstrap_datepicker_plus import DatePickerInput
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth import get_user_model
def valid_deadline(value):
"""
validate deadline: checks if given date is in the past
or in the remote future(10 years from now)
param: value(datetime.date) - given date which we want to validate
return: value if no error else ValidationError
"""
today = date.today()
upper_limit = today + relativedelta(years=10)
if value < today:
raise forms.ValidationError(_('Das Datum darf nicht in der Vergangenheit liegen.'))
if value > upper_limit:
raise forms.ValidationError(_('Das Datum liegt zu weit in der Zukunft.'))
return value
class AddTicketForm(ModelForm):
"""
modelform for creating a ticket, used in new_ticket.html/ NewTicketView
"""
deadline = forms.DateField(input_formats=['%d.%m.%Y'],widget=DatePickerInput(format='%d.%m.%Y',attrs={'placeholder': 'TT.MM.JJJJ'}, options={
"showTodayButton": False,
"locale": "de-DE",
"minDate": date.today().isoformat(),
"allowInputToggle": True,
"keepInvalid": False,
"useCurrent": False,
}), required=False, help_text=_("Der Tag bis das Ticket erledigt sein soll."), validators=[valid_deadline])
assigned_user = forms.ModelChoiceField(queryset=get_user_model().objects.all().order_by('username'),required=False)
class Meta:
model = Ticket
fields = ["name", "text", "assigned_user", "assigned_group", "priority", "deadline"]
class AddCommentForm(ModelForm):
"""
modelform for adding a comment to a ticket
used in ShowTicketView/show_ticket.html
"""
class Meta:
model = Comment
fields = ["comment"]
labels = {
"comment": _("Schreibe einen Kommentar"),
}
widgets = {
'comment': forms.Textarea(
attrs={'placeholder': _('Schreibe einen Kommentar')}),
}
class EditTicketForm(ModelForm):
"""
modelform for editing a ticket
used in EditTicketView/edit_ticket.html
"""
deadline = forms.DateField(input_formats=['%d.%m.%Y'],widget=DatePickerInput(format='%d.%m.%Y',attrs={'placeholder': 'TT.MM.JJJJ'}, options={
"showTodayButton": False,
"locale": "de-DE",
"minDate": date.today().isoformat(),
"allowInputToggle": True,
"keepInvalid": True,
"useCurrent": False,
}), required=False, help_text=_("Der Tag bis das Ticket erledigt sein soll."))
class Meta:
model = Ticket
fields = ["name", "state", "text", "assigned_user", "assigned_group", "priority", "deadline"]
def clean(self):
"""
overrides modelform clean method
checks if given deadline is valid else raises ValidationError
"""
cleaned_data = super().clean()
if self.has_changed() and self.is_valid():
if 'deadline' in self.changed_data:
try:
valid_deadline(self.cleaned_data["deadline"])
except forms.ValidationError as err:
self.add_error('deadline', err)
| d120/pyticket | ticket/forms.py | forms.py | py | 3,129 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "datetime.date.today",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "dateutil.relativedelta.relativedelta",
"line_number": 18,
"usage_type": "call"
},
{
"api_name":... |
30524149041 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Author: HuHao <huhao1@cmcm.com>
Date: '2018/7/21'
Info:
"""
import os,traceback,time,socket,multiprocessing
from threading import Thread
def countdown(n):
while n>0:
print('T-minus',n)
n -=1
time.sleep(2)
def start():
# 创建一个线程,target 为目标函数,args 为入参,默认是 集合类型
t = Thread(target=countdown,args=(3,),name='child-01')
# 启动线程
t.start()
# 监控线程生命状态
while True:
if t.is_alive():
print('Still running')
time.sleep(1)
else:
print('Completed')
break
class CountdownTask:
def __init__(self):
self._running = True
def terminate(self):
self._running = False
def awake(self):
self._running = True
def run(self,n):
while n >0:
if self._running:
print('T-minus',n)
n -=1
time.sleep(2)
def test_join():
# 创建线程实现类
c = CountdownTask()
# 创建线程实例
t = Thread(target=c.run,args=(3,))
# 启动线程
t.start()
# 暂停线程,并休眠 3s
c.terminate()
print('terminate for 3 seconds ...')
time.sleep(3)
# 唤醒线程
c.awake()
# 阻塞主进程
t.join() # 暂停当前主线程,等待 t 结束,在继续向下执行,相当于 阻塞功能
print('over')
class IOTask:
def terminate(self):
self._running = False
def run(self,sock):
sock.settimeout(5)
while self._running:
try:
# IO 现场监护人那个阻塞过程,如果 sock 断开,线程将永久被阻塞,此时需要使用 超时循环操作线程
data = sock.recv(8192)
break
except socket.timeout:
continue
# 继承方式创建线程实例
class CountdownThread(Thread):
def __init__(self,n):
super().__init__()
self.n = n
def run(self):
while self.n>0:
print('T-minus',self.n)
self.n -=1
time.sleep(2)
def start_thread():
c = CountdownThread(3)
c.start()
c.join() # 阻塞
# 通过multiprocessing 模块创建多线程
def start_thread2():
c = CountdownTask()
p = multiprocessing.Process(target=c.run,args=(3,))
p.start()
p.join()
if __name__=="__main__":
try:
# start()
# test_join()
# start_thread()
start_thread2()
pass
except:
traceback.print_exc()
finally:
os._exit(0)
| happy-place/data-base | api-test/py-test/Part3_Python_CookBook/test_thread.py | test_thread.py | py | 2,251 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "time.sleep",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "threading.Thread",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number"... |
34337388292 | import cv2
import time
def cam(i):
bs = cv2.createBackgroundSubtractorKNN(detectShadows = True)
camera = cv2.VideoCapture(i)
while True:
ret, frame = camera.read()
fgmask = bs.apply(frame)
# img = frame
th = cv2.threshold(fgmask.copy(), 244, 255, cv2.THRESH_BINARY)[1]
th = cv2.erode(th, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3,3)), iterations = 2)
dilated = cv2.dilate(th, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (8,3)), iterations = 2)
image, contours, hier = cv2.findContours(dilated, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for c in contours:
if cv2.contourArea(c) > 1000:
(x,y,w,h) = cv2.boundingRect(c)
# cv2.rectangle(frame, (x,y), (x+w, y+h), (255, 255, 0), 2)
cv2.imwrite('./img/shutter/{}.jpg'.format(time.strftime('%Y%m%d%H%M%S')), frame)
cv2.imshow("detection", frame)
k = cv2.waitKey(30) & 0xff
if k == 27:
break
camera.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
cam(0) | 0x024/MS | Ubuntu/cam.py | cam.py | py | 968 | python | en | code | 23 | github-code | 36 | [
{
"api_name": "cv2.createBackgroundSubtractorKNN",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "cv2.VideoCapture",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "cv2.threshold",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "cv2.TH... |
30330479959 | # !/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2021/3/26 1:57 下午
# @Author : wangHua
# @File : ProductAddConsumer.py
# @Software: PyCharm
from app.consumers import BaseConsumer
from utils import Logger, Http
from app.proxies import get_proxy_engine
from app.exceptions import NotFoundException, CrawlErrorException
from app.enums import RedisListKeyEnum
from app.entities import ProductJobEntity, ProductAddJobEntity
from app.crawlers import ProductAddCrawler
import common
class ProductAddConsumer(BaseConsumer):
"""
添加新的asin时,插入对应的任务,该消费者会判断某个站点中的产品是否存在,如果存在,则添加对应站点的asin数据,并产生新的任务
"""
# ignore = True # 忽略该消费者
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8'
',application/signed-exchange;v=b3;q=0.9',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/89.0.4389.82 Safari/537.36',
'accept-encoding': 'gzip, deflate, br',
'accept-language': 'zh-CN,zh;q=0.9',
'cache-control': 'max-age=0',
'sec-fetch-dest': 'document',
'sec-fetch-mode': 'navigate',
'sec-fetch-site': 'none',
'sec-fetch-user': '?1',
'upgrade-insecure-requests': '1',
}
def __init__(self):
self.http = None
self.proxy_engine = None
BaseConsumer.__init__(self)
def set_job_key(self) -> str:
return RedisListKeyEnum.product_add_crawl_job
def run_job(self):
Logger().info('product_consumer start')
self.http = Http()
self.proxy_engine = get_proxy_engine()
self.http.set_headers(self.headers)
while True:
job_dict = self.get_job_obj()
if job_dict:
job_entity = ProductAddJobEntity.instance(job_dict)
try:
if self.proxy_engine:
# product 反扒比较苛刻,这边用了随机IP的代理
self.http.set_proxy(self.proxy_engine.get_proxy())
crawler = ProductAddCrawler(job_entity, self.http)
if crawler.productItem:
job_dict['product_item_id'] = crawler.productItem.id
new_job = ProductJobEntity.instance(job_dict)
self.set_job_by_key(RedisListKeyEnum.product_crawl_job, new_job)
except CrawlErrorException:
# 爬虫失败异常,http 连续失败次数+1
self.set_error_job(job_entity)
except NotFoundException:
# 页面不存在,不做处理
pass
common.sleep_random()
| whale-fall-wh/producer-consumer | app/consumers/ProductAddConsumer.py | ProductAddConsumer.py | py | 2,913 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "app.consumers.BaseConsumer",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "app.consumers.BaseConsumer.__init__",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "app.consumers.BaseConsumer",
"line_number": 41,
"usage_type": "name"
},
... |
37384049849 | import logging
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from ..items import BbcNewsItem
from readability import Document
import html2text
from datetime import datetime
from goose3 import Goose
class BbcSpider(CrawlSpider):
name = 'bbc'
start_urls = ['https://www.bbc.com/']
rules = [
Rule(LinkExtractor(allow='https://www.bbc.com/news'),
callback='parse_news', follow=True)
]
def parse_news(self, response):
'''
DESCRIPTION:
-----------
* This function is the callback for parsing URL content response,
PARAMETERS:
----------
1. the response to be parsed
'''
if response.status == 200:
url = response.url
logging.info(' BBC News URL : ' + url)
item = BbcNewsItem()
item['url'] = url
item['headline'] = response.xpath('//title/text()').extract_first()
item['authors'] = response.xpath("//meta[@property='article:author']/@content").extract()
article_text = self.get_article_text(response=response)
item['text'] = article_text
publish_datetime = self.get_publish_datetime(response=response)
item['publish_datetime'] = publish_datetime
item['crawling_datetime'] = datetime.now().strftime("%Y-%m-%dT%H:%M:%S.000Z")
logging.info(' BBC News Item ' + str(item))
return item
def get_article_text(self, response):
'''
DESCRIPTION:
-----------
* This function cleanse the page of superfluous content such as advertising and HTML
PARAMETERS:
----------
1. response
'''
doc = Document(response.text)
article_html = Document(doc.content()).summary()
h = html2text.HTML2Text()
h.ignore_links = True
article_text = h.handle(article_html)
article_text = article_text.replace('\r', ' ').replace('\n', ' ').strip()
return article_text
def get_publish_datetime(self, response):
'''
DESCRIPTION:
-----------
* This function is used for extract publish date time
PARAMETERS:
----------
1. response
'''
publish_datetime = response.css('span > time::attr(datetime)').get()
return publish_datetime
| ahmad-haggag/bbc-news-scraper | scraper/scraper/spiders/bbc.py | bbc.py | py | 2,514 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "scrapy.spiders.CrawlSpider",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "scrapy.spiders.Rule",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "scrapy.linkextractors.LinkExtractor",
"line_number": 16,
"usage_type": "call"
},
{
... |
70562634984 | import sys
from collections import deque
input = sys.stdin.readline
M, N, H = map(int, input().rstrip().split())
box = [[list(map(int, input().rstrip().split())) for _ in range(N)] for _ in range(H)]
visited = [[[False for _ in range(M)] for _ in range(N)] for _ in range(H)]
tomato = []
check = 0
for h in range(H):
for n in range(N):
for m in range(M):
if box[h][n][m] == 1:
tomato.append((h, n, m, 0))
visited[h][n][m] = True
elif box[h][n][m] == 0:
check += 1
queue = deque(tomato)
dz = [-1, 1, 0, 0, 0, 0]
dy = [0, 0, -1, 1, 0, 0]
dx = [0, 0, 0, 0, -1, 1]
due = 0
while queue:
z, y, x, cnt = queue.popleft()
due = max(due, cnt)
for d in range(6):
nz, ny, nx = z+dz[d], y+dy[d], x+dx[d]
if 0 <= nz <= H-1 and 0 <= ny <= N-1 and 0 <= nx <= M-1:
if box[nz][ny][nx] == 0 and not visited[nz][ny][nx]:
queue.append((nz, ny, nx, cnt+1))
visited[nz][ny][nx] = True
check-=1
print(due) if check == 0 else print(-1) | zsmalla/algorithm-jistudy-season1 | src/chapter3/3_DFS와BFS(2)/7569_python_임지수.py | 7569_python_임지수.py | py | 1,089 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.stdin",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "collections.deque",
"line_number": 22,
"usage_type": "call"
}
] |
2882981479 | import requests
import json
urlApiProductos = "http://localhost:8085/colaboradores"
# Petición get
response = requests.get(urlApiProductos)
print(response)
print(response.status_code)
print(response.encoding)
print(response.headers)
print(response.text)
producto = json.loads(response.text)
print(producto)
print(json.dumps(producto, indent=4, sort_keys=True))
#______________________________________________________
url = "http://localhost:8085/colaboradores/insertar"
# Payload
datos = {
"apellido": "galaxia",
"cargo": "aprendiz",
"estado": "activo",
"fecharegistro": "2018-12-31",
"id_colaboradores": 13,
"nombre": "andromeda",
"usuariored": "na"
}
# Envía la solicitud a la API
response = requests.post(url, json=datos)
# Imprime la respuesta
if response.ok:
print(response.json()["resultado"]) | ariasDev/semillero | Python/Curso Python/python/api.py | api.py | py | 842 | python | es | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": ... |
74872050985 | import io
import os
# Imports the Google Cloud client library
from google.cloud import speech
from google.cloud.speech import enums
from google.cloud.speech import types
credential_path = "C:/Users/nickj/OneDrive/Documents/Capstone/voice.json"
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = credential_path
# Instantiates a client
client = speech.SpeechClient()
# The name of the audio file to transcribe
file_name = "C:/Users/nickj/OneDrive/Documents/Soundrecordings/milk.m4a"
# Loads the audio into memory
with io.open(file_name, 'rb') as audio_file:
content = audio_file.read()
audio = types.RecognitionAudio(content=content)
config = types.RecognitionConfig(
encoding=enums.RecognitionConfig.AudioEncoding.LINEAR16,
sample_rate_hertz=16000,
language_code='en-US')
# Detects speech in the audio file
response = client.recognize(config, audio)
print(len(response.results))
for result in response.results:
print('Transcript: {}'.format(result.alternatives[0].transcript))
| refridgerators/raspicode | voice.py | voice.py | py | 1,005 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.environ",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "google.cloud.speech.SpeechClient",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "google.cloud.speech",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "... |
15979499427 | import logging
class LoggingConfig:
"""
Configure and use logs with logging for repository use.
"""
@staticmethod
def configureLog(level=logging.INFO, filename="run.log"):
"""
Configure the logging settings.
Args:
level (int, optional): The logging level (DEBUG, INFO, WARNING, ERROR, CRITICAL). Defaults to INFO.
filename (str, optional): The name of the log file. Defaults to 'your_log_file.log'.
"""
logging.basicConfig(
level=level,
format="%(asctime)s - %(levelname)s - %(message)s",
filename=filename,
)
@staticmethod
def getLog(name):
"""
Get a logger instance with the specified name.
Args:
name (str): The name of the logger.
Returns:
logging.Logger: A logger instance.
"""
return logging.getLogger(name)
| nemacdonald/otis | src/utils/logger.py | logger.py | py | 929 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.INFO",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "logging.basicConfig",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "logging.getLogger",
"line_number": 35,
"usage_type": "call"
}
] |
11562594754 | from django.contrib import admin
from django.urls import path
from interview.views import createInterview, showInterviews, uploadResume
urlpatterns = [
path('admin/', admin.site.urls),
path('', createInterview),
path('create', createInterview),
path('interviews', showInterviews),
path('interviews/<slug:edit_id>', showInterviews),
path('upload', uploadResume),
path('upload/<slug:upload_id>', uploadResume),
]
| strikeraryu/scaler_interview | scaler_interview/urls.py | urls.py | py | 441 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.urls.path",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.contrib.admin.site",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "dja... |
74029485864 | import re
from datetime import datetime
def format_date(date):
"""Format the date to datetime string"""
month, day, year = date.split("/")
return datetime(int(year), int(month), int(day)).strftime(
"%Y-%m-%dT%H:%M:%S%Z"
)
def get_ajax_identifier(page_text):
"""Extract from raw HTML the AJAX identifier used in the POST request payload"""
return (
re.findall(r"\"ajaxIdentifier\":\".*?\"", page_text)[1]
.split(":")[-1]
.replace('"', "")
)
| guicrespo/farascraper | farascraper/farascraper/spiders/utils.py | utils.py | py | 504 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "datetime.datetime",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 16,
"usage_type": "call"
}
] |
73921571944 | import torch
import numpy as np
def inference(network, image, checkpoint_path):
"""
Network inference function
Args:
network: Network object to make the inference
image: Numpy array of the image to infer on
checkpoint_path: Path to the weights to use for the inference
Returns:
Numpy array of the resulting image
"""
# Weight loading
network.load_state_dict(torch.load(checkpoint_path))
# Image preparation
mean = [0.43110137, 0.43996549, 0.36798606]
variance = [0.2103285, 0.1981421, 0.18789765]
image = ((image / 255) - mean) / variance
image = np.transpose(image, (2, 0, 1))
image = torch.FloatTensor(image)
# Inference
with torch.no_grad():
output = network(image.unsqueeze(0))
# Output transformation
output_image = output.squeeze(0).numpy()
output_image = np.transpose(output_image, (1, 2, 0))
output_image = np.clip(255 * (output_image * variance + mean), 0, 255).astype(np.uint8)
return output_image
| ValentinFigue/Images-Restoration | Network/inference.py | inference.py | py | 1,037 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.load",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "numpy.transpose",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "torch.FloatTensor",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "torch.no_grad",
"lin... |
29158312367 |
import sys
from layers import WEIGHT_DECAY_KEY
sys.path.append('/gan_segmentation/models/discriminators')
import largeFOV, smallFOV, stanford_background_dataset_discriminator
sys.path.append('/gan_segmentation/models/generators')
import fcn32, unet, deeplab_v3
import tensorflow as tf
import numpy as np
from utils import compute_accuracy
import pdb
class GAN_trainer:
def __init__(self, x, y, num_classes, generator = 'fcn32', discriminator = 'smallFOV', optimizer = 'adagrad', is_train = 1):
self.x = x
self.y = y
self.num_classes = num_classes
self.generator = generator
self.discriminator = discriminator
self.optimizer = optimizer
self.loss_bce_weight = tf.placeholder(tf.float32)
self.pkeep = tf.placeholder(tf.float32)
self.lr_g = tf.placeholder(tf.float32)
self.lr_d = tf.placeholder(tf.float32)
self.weight_decay = tf.placeholder(tf.float32)
if is_train == 1:
self.is_train = tf.constant(True)
else:
self.is_train = tf.constant(False)
def make_dis_input(self, G_logits, image, label):
variant = "scaling" # basic, product or scaling
tau = tf.constant(0.9, dtype=tf.float32)
# 0. down-sample labels and image
logits_shape = tf.shape(G_logits)
downsampling_shape = [logits_shape[1], logits_shape[2]]
label = tf.image.resize_images(label, size = downsampling_shape)
label = tf.cast(label, dtype=tf.int32)
image = tf.image.resize_images(image, size = downsampling_shape)
# 1. one hot representation of labels
G_probs = tf.nn.softmax(G_logits, name='softmax_tensor')
batch = tf.cast(tf.shape(label)[0], dtype=tf.int32)
height = tf.cast(tf.shape(label)[1], dtype=tf.int32)
width = tf.cast(tf.shape(label)[2], dtype=tf.int32)
one_hot_flat_y = tf.one_hot(tf.reshape(label, [-1, ]), self.num_classes, axis=1)
one_hot_y = tf.reshape(one_hot_flat_y,[batch, height, width, self.num_classes])
if variant == "basic":
# define operations between generator and discriminator - version "basic"
self.c_prime = self.num_classes
fake_disciminator_input = G_probs
real_disciminator_input = one_hot_y
return real_disciminator_input, fake_disciminator_input
elif variant == "product":
# define operations between generator and discriminator - version "product"
self.c_prime = self.num_classes*3
# 2. Slice r,g,b components
blue = tf.slice(image, [0,0,0,0], [1, height, width, 1])
green = tf.slice(image, [0, 0, 0, 1], [1, height, width, 1])
red = tf.slice(image, [0, 0, 0, 2], [1, height, width, 1])
# 3. Generate fake discriminator input
product_b = G_probs * blue
product_g = G_probs * green
product_r = G_probs * red
fake_disciminator_input = tf.concat([product_b, product_g, product_r], axis=3)
# 4. Generate also real discriminator input
product_b = one_hot_y * blue
product_g = one_hot_y * green
product_r = one_hot_y * red
real_disciminator_input = tf.concat([product_b, product_g, product_r], axis=3)
return real_disciminator_input, fake_disciminator_input
elif variant == "scaling":
# define operations between generator and discriminator - version "scaling"
self.c_prime = self.num_classes
fake_disciminator_input = G_probs
#2. replace labels
yil = tf.reduce_sum(tf.where(tf.greater(one_hot_y, 0.),
tf.maximum(G_probs, tau),
tf.zeros_like(one_hot_y)), axis = 3)
sil = tf.reduce_sum(tf.where(tf.equal(one_hot_y, tf.constant(1, dtype=tf.float32)),
G_probs,
tf.zeros_like(one_hot_y)), axis=3)
yil = tf.expand_dims(yil, axis=3)
sil = tf.expand_dims(sil, axis=3)
yil = tf.concat([yil]*self.num_classes, axis=3)
sil = tf.concat([sil]*self.num_classes, axis=3)
real_disciminator_input = tf.where(tf.equal(one_hot_y, 1.),
yil,
G_probs*((1-yil)/(1-sil)))
return real_disciminator_input, fake_disciminator_input
def get_loss_discriminator(self, real_D_logits, fake_D_logits):
# Define logits batch_size, height, width
in_shape = tf.shape(fake_D_logits)
batch_size = tf.cast(in_shape[0], dtype=tf.int32)
height = tf.cast(in_shape[1], dtype=tf.int32)
width = tf.cast(in_shape[2], dtype=tf.int32)
# Reshape logits by num of classes
fake_D_logits_by_num_classes = tf.reshape(fake_D_logits, [-1, 2])
real_D_logits_by_num_classes = tf.reshape(real_D_logits, [-1, 2])
# Define real/fake labels
label_real = tf.cast(tf.fill([batch_size*height*width], 1.0), dtype=tf.int32)
label_fake = tf.cast(tf.fill([batch_size*height*width], 0.0), dtype=tf.int32)
# Compute loss
loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=real_D_logits_by_num_classes,
labels=label_real,
name="bce_1"))
loss += tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=fake_D_logits_by_num_classes,
labels=label_fake,
name="bce_2"))
# Add l2 cost
costs = [tf.nn.l2_loss(var) for var in tf.get_collection(WEIGHT_DECAY_KEY, scope="model/discriminator")]
l2_loss = tf.multiply(self.weight_decay, tf.add_n(costs))
total_loss = loss + l2_loss
return total_loss
def get_loss_generator(self, labels, G_logits, fake_D_logits):
# Find valid indices
logits_by_num_classes = tf.reshape(G_logits, [-1, self.num_classes])
preds = tf.argmax(G_logits, axis=3, output_type=tf.int32)
preds_flat = tf.reshape(preds, [-1, ])
labels_flat = tf.reshape(labels, [-1, ])
valid_indices = tf.multiply(tf.to_int32(labels_flat <= self.num_classes - 1), tf.to_int32(labels_flat > -1))
# Prepare segmentation model logits and labels
valid_logits = tf.dynamic_partition(logits_by_num_classes, valid_indices, num_partitions=2)[1]
valid_labels = tf.dynamic_partition(labels_flat, valid_indices, num_partitions=2)[1]
valid_preds = tf.dynamic_partition(preds_flat, valid_indices, num_partitions=2)[1]
in_shape = tf.shape(fake_D_logits)
batch_size = tf.cast(in_shape[0], dtype=tf.int32)
height = tf.cast(in_shape[1], dtype=tf.int32)
width = tf.cast(in_shape[2], dtype=tf.int32)
fake_D_logits_by_num_classes = tf.reshape(fake_D_logits, [-1, 2])
label_real = tf.cast(tf.fill([batch_size*height*width], 1.0), dtype=tf.int32)
l_mce = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=valid_logits,
labels=valid_labels,
name="g_mce"))
l_bce = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=fake_D_logits_by_num_classes,
labels=label_real,
name="l_bce"))
loss = l_mce + self.loss_bce_weight * l_bce
# Add l2 loss
costs = [tf.nn.l2_loss(var) for var in tf.get_collection(WEIGHT_DECAY_KEY, scope="model/generator")]
l2_loss = tf.multiply(self.weight_decay, tf.add_n(costs))
total_loss = loss + l2_loss
return total_loss, valid_logits, valid_labels, valid_preds
def train_op(self):
image = self.x
labels = self.y
# Define generator
if self.generator == 'fcn32':
#G_logits = fcn32.fcn32(image, self.pkeep, self.num_classes)
G_unscaled, G_logits = fcn32.fcn32(image, self.pkeep, self.num_classes)
elif self.generator == 'fcn32_DCGAN':
G_logits = fcn32_DCGAN.fcn32(image, self.pkeep, self.num_classes)
elif self.generator == 'unet':
G_logits = unet.unet(image, self.pkeep, self.num_classes, channels=3, num_layers=5)
elif self.generator == 'deeplab_v3':
G_logits = deeplab_v3.deeplab_net(image, self.num_classes, is_train=self.is_train, pkeep=self.pkeep)
else:
print('error! specified generator is not valid')
sys.exit(1)
# Define discriminators input
#labels, real_disciminator_input, fake_disciminator_input = self.make_dis_input(G_logits, image, labels)
real_disciminator_input, fake_disciminator_input = self.make_dis_input(G_unscaled, image, labels)
# Define 2 discriminators: fake and real
if self.discriminator == 'smallFOV':
print('Building discriminator smallFOV')
with tf.variable_scope('discriminator'):
real_D_logits = smallFOV.smallFOV(real_disciminator_input, c_prime=self.c_prime)
with tf.variable_scope('discriminator', reuse=True):
fake_D_logits = smallFOV.smallFOV(fake_disciminator_input, c_prime=self.c_prime)
elif self.discriminator == 'smallFOV_DCGAN':
print('Building discriminator smallFOV_DCGAN')
with tf.variable_scope('discriminator'):
real_D_logits = smallFOV_DCGAN.smallFOV(real_disciminator_input, c_prime=self.c_prime)
with tf.variable_scope('discriminator', reuse=True):
fake_D_logits = smallFOV_DCGAN.smallFOV(fake_disciminator_input, c_prime=self.c_prime)
elif self.discriminator == 'largeFOV':
print('Building discriminator largeFOV')
with tf.variable_scope('discriminator'):
real_D_logits = largeFOV.largeFOV(real_disciminator_input, c_prime=self.c_prime)
with tf.variable_scope('discriminator', reuse=True):
fake_D_logits = largeFOV.largeFOV(fake_disciminator_input, c_prime=self.c_prime)
elif self.discriminator == 'largeFOV_DCGAN':
print('Building discriminator largeFOV_DCGAN')
with tf.variable_scope('discriminator'):
real_D_logits = largeFOV_DCGAN.largeFOV(real_disciminator_input, c_prime=self.c_prime)
with tf.variable_scope('discriminator', reuse=True):
fake_D_logits = largeFOV_DCGAN.largeFOV(fake_disciminator_input, c_prime=self.c_prime)
elif self.discriminator == 'sbd':
print('Building discriminator SBD')
with tf.variable_scope('discriminator'):
real_D_logits = stanford_background_dataset_discriminator.stanford_bd_model(image,
real_disciminator_input,
c_prime=self.c_prime)
with tf.variable_scope('discriminator', reuse=True):
fake_D_logits = stanford_background_dataset_discriminator.stanford_bd_model(image,
fake_disciminator_input,
c_prime=self.c_prime)
else:
print('error! specified discriminator is not valid')
sys.exit(1)
# Define losses
D_loss = self.get_loss_discriminator(real_D_logits, fake_D_logits)
G_loss, valid_logits, valid_labels, valid_preds = self.get_loss_generator(labels, G_logits, fake_D_logits)
# Improved wasserstein gan penalty
#epsilon = tf.random_uniform([], 0.0, 1.0)
#x_hat = real_disciminator_input * epsilon + (1 - epsilon) * fake_disciminator_input
#with tf.variable_scope('discriminator', reuse=True):
# d_hat = largeFOV.largeFOV(x_hat, c_prime=self.c_prime)
#gradients = tf.gradients(d_hat, x_hat)[0]
#slopes = tf.sqrt(tf.reduce_sum(tf.square(gradients), reduction_indices=[1]))
#gradient_penalty = 10 * tf.reduce_mean((slopes - 1.0) ** 2)
#D_loss += gradient_penalty
# Define segmentation accuracy
pixel_acc, mean_iou_acc, mean_per_class_acc = compute_accuracy(valid_preds, valid_labels, self.num_classes, 'accuracy')
# Define optimizers
if self.optimizer == 'adagrad':
D_optimizer = tf.train.AdagradOptimizer(learning_rate=self.lr_d)
G_optimizer = tf.train.AdagradOptimizer(learning_rate=self.lr_g)
elif self.optimizer == 'sgd':
D_optimizer = tf.train.GradientDescentOptimizer(learning_rate=self.lr_d)
G_optimizer = tf.train.GradientDescentOptimizer(learning_rate=self.lr_g)
elif self.optimizer == 'adam':
D_optimizer = tf.train.AdamOptimizer(learning_rate=self.lr_d, epsilon=1e-05)
G_optimizer = tf.train.AdamOptimizer(learning_rate=self.lr_g, epsilon=1e-05)
D_grads = D_optimizer.compute_gradients(D_loss, var_list= tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='model/discriminator'))
G_grads = G_optimizer.compute_gradients(G_loss, var_list= tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='model/generator'))
D_train_step = D_optimizer.apply_gradients(D_grads)
G_train_step = G_optimizer.apply_gradients(G_grads)
return G_train_step, D_train_step, G_loss, D_loss, pixel_acc, mean_iou_acc, mean_per_class_acc
| ChangqingHui/Semantic-Segmentation-with-Adversarial-Networks | updater.py | updater.py | py | 14,171 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "sys.path.append",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "sys.path.append",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_numbe... |
24945678151 | # -*- coding: utf-8 -*-
# ____ __ __ ___ _ _ _
# |_ /___ / _|/ _|/ __| (_)___ _ _| |_
# / // -_) _| _| (__| | / -_) ' \ _|
# /___\___|_| |_| \___|_|_\___|_||_\__|
#
"""Zeff unstructured temporal data."""
__author__ = """Lance Finn Helsten <lanhel@zeff.ai>"""
__copyright__ = """Copyright © 2019, Ziff, Inc. — All Rights Reserved"""
__license__ = """
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# pylint: disable=duplicate-code
# pylint: disable=too-few-public-methods
__all__ = ["UnstructuredTemporalData"]
import dataclasses
import typing
import datetime
from .file import FileContext
from .unstructureddata import UnstructuredData
@dataclasses.dataclass
class UnstructuredTemporalData(UnstructuredData):
"""Single item of unstructured temporal data in a record.
:property temporal_window: Size of window to analyze. This
is a required property and if missing the data will
fail in validation.
:property start_crop_time: Time location from begining of file
that will mark the start of the interval. Content before start
point will not be used to train the Ai.
:property end_crop_time: Time location from begining of file
that will mark the end of the interval. Content after end
point will not be used to train the Ai.
:property file_context:
"""
temporal_window: typing.Optional[datetime.time] = None
start_crop_time: typing.Optional[datetime.time] = None
end_crop_time: typing.Optional[datetime.time] = None
file_contexts: typing.List[FileContext] = dataclasses.field(default_factory=list)
| zeff-ai/ZeffClient | src/zeff/record/unstructuredtemporaldata.py | unstructuredtemporaldata.py | py | 2,133 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "unstructureddata.UnstructuredData",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "datetime.time",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_nam... |
3756817154 | import jpype as jp
from monostrategy import Monostrategy
class Kmeans(Monostrategy):
def __init__(self,arff_string,weights,nb_seeds,nb_steps):
if not jp.isThreadAttachedToJVM():
jp.attachThreadToJVM()
Monostrategy.__init__(self,arff_string,weights)
self.nb_seeds = int(nb_seeds)
self.nb_steps = int(nb_steps)
def get_method(self):
Monostrategy.get_method(self)
ClassifierKmeans = jp.JClass('jcl.learning.methods.monostrategy.kmeans.ClassifierKmeans')
return ClassifierKmeans(self.param, None)
def get_parameters(self):
Monostrategy.get_parameters(self)
ParametersKmeans = jp.JClass('jcl.learning.methods.monostrategy.kmeans.ParametersKmeans')
return ParametersKmeans(self.nb_seeds, self.nb_steps, self.global_weights);
| Alshak/jcl | jcl/methods/kmeans.py | kmeans.py | py | 847 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "monostrategy.Monostrategy",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "jpype.isThreadAttachedToJVM",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "jpype.attachThreadToJVM",
"line_number": 7,
"usage_type": "call"
},
{
"api_na... |
477494635 | #coding=utf-8
#__author__ = 'zgs'
import socket
import logging
import struct
import com_config
class Connection():
def __init__(self, host='127.0.0.1', port=1000):
if host == '127.0.0.1' and port == 1000:
self.host = com_config.access_host
self.port = com_config.access_port
else:
self.host = host
self.port = port
self.tcp_client = None
def create_conn(self):
print("Connection create_conn")
if self.tcp_client is not None:
return
try:
self.tcp_client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.tcp_client.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.tcp_client.setblocking(True)
# self.tcp_client.settimeout(2)
self.tcp_client.connect((self.host, self.port))
return True
except Exception as ex:
return False
def close(self):
if self.tcp_client is not None:
self.tcp_client.close()
self.tcp_client = None
def send_message(self, content):
try:
if self.tcp_client is None:
if not self.create_conn():
return False
# print(len(content))
ret = self.tcp_client.sendall(struct.pack('I', socket.htonl(len(content))) + content.encode())
if ret != None:
raise Exception("send message error")
return True
except Exception as ex:
logging.error("exception ex, ", ex)
return False
def read_n_message(self, read_len):
data = b""
while read_len:
try:
recv_buf = self.tcp_client.recv(read_len)
read_len -= len(recv_buf)
data += recv_buf
except Exception as ex:
return False, b""
return True, data
def decode_len(self, content):
return socket.ntohl(struct.unpack('I', content)[0])
def read_message(self):
# read content len: integer type
ret, data = self.read_n_message(4)
if not ret:
return False, ""
content_len = self.decode_len(data)
# read content
ret, content = self.read_n_message(content_len)
return ret, content.decode()[:-1]
if __name__ == '__main__':
pass | Oreobird/face_id | src/test/common/connection.py | connection.py | py | 2,483 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "com_config.access_host",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "com_config.access_port",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "socket.socket",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": ... |
73571223143 | import config
import regex
from models.candidate import Candidate
def fuzzy_search(pattern, string, error_max):
"""Find approximately matching pattern in string
Args:
pattern (string): regex pattern
string (string): where search is performed
error_max (int)
Returns:
errors (int | None): None if no match
regex_match (Match)
"""
regex_pattern = f"({pattern}){{e<={error_max}}}"
match = regex.search(regex_pattern, string, regex.BESTMATCH)
if match is None:
return None, None
# match.fuzzy_counts: (n_substitutions, n_insertions, n_deletes)
errors = sum(match.fuzzy_counts)
return errors, match
def search_string(detected_text_list, pattern, region=None, n_candidates=1):
"""Searches for a string in all provided detected texts
Args
detected_text_list ([DetectedText])
pattern (string): regex pattern
region (BoundingBox | None): check if candidate bounding box's center
is inside region
n_candidates (int): number of candidates to return
Returns:
candidates ([Candidates]): n_candidates first candidates matching the
pattern
"""
candidates = []
for detected_text in detected_text_list:
if region is not None:
center = detected_text.bbox.get_barycenter()
if not region.contains_point(center):
continue
error, regex_match = fuzzy_search(
pattern, detected_text.text, config.ERROR_MAX
)
if error is None:
continue
candidates.append(Candidate(detected_text, error, regex_match, region))
candidates.sort(key=lambda c: c.errors)
return candidates[:n_candidates]
def search_string_relative(
reference,
detected_text_list,
pattern,
region,
region_is_relative=True,
n_candidates=1,
include_reference=False,
):
"""Searches for a string relative to a reference
Args:
reference (Candidate)
detected_text_list ([detected_text])
pattern (string): regex pattern
region (BoundingBox)
region_is_relative (bool): if True, region is relative to reference's
center and in units of reference.line_height
n_candidates (int): number of candidates to return
include_reference (bool): if True, include the cropped reference in the
search
Returns:
candidates ([Candidates]): n_candidates first candidates. May return
an element which is not originally in detected_text_list if
include_reference is True.
"""
# Compute absolute region
if region_is_relative:
region = region.copy()
ref_center = reference.detected_text.bbox.get_barycenter()
line_height = reference.detected_text.line_height
for p in region.points:
p.x *= line_height
p.y *= line_height
p.x += ref_center.x
p.y += ref_center.y
# If reference included:
# Build modified detected_text_list not containing the full "reference"
# text, which may contain a match before reference's end index.
# Example: reference text is "unwanted-value key: value", we should
# prevent "unwanted-value" from matching.
if include_reference:
detected_text_list = (
detected_text_list.copy()
) # don't mutate original list
if reference.detected_text in detected_text_list:
detected_text_list.remove(reference.detected_text)
cropped = reference.detected_text.copy()
cropped.text = cropped.text[reference.regex_match.span()[1] :]
detected_text_list.append(cropped)
return search_string(detected_text_list, pattern, region, n_candidates)
def search_string_on_right(
reference, detected_text_list, pattern, region_width=None, n_candidates=1
):
"""Searches for a string on the right of a reference
The checked region includes the reference bounding box, in case the
searched string is inside the reference.
Args:
reference (Candidate)
detected_text_list ([detected_text])
pattern (string): regex pattern
region_width (float): width added to the reference bounding box where
text is searched.
added width = reference.line_height * region_width.
If None, search the whole srceen width.
n_candidates (int): number of candidates to return
Returns:
candidates ([Candidates]): n_candidates first candidates. May return
an element which is not originally in detected_text_list.
"""
# Create searched region by expanding the reference's bounding box
region = reference.detected_text.bbox.copy()
if region_width is not None:
added_width = region_width * reference.detected_text.line_height
else:
added_width = 1 # full screen width
region.bottom_right.x += added_width
region.top_right.x += added_width
return search_string_relative(
reference,
detected_text_list,
pattern,
region,
region_is_relative=False,
n_candidates=n_candidates,
include_reference=True,
)
def search_string_below(
reference, detected_text_list, pattern, region_height=1, n_candidates=1
):
"""Searches for a string below a reference
The checked region is the reference bounding box shifted down by one
line_height.
Args:
reference (Candidate)
detected_text_list ([detected_text])
pattern (string): regex pattern
region_height (float): height of the region where text is searched,
in units of reference.line_height. A greater value extends the
region downwards.
n_candidates (int): number of candidates to return
Returns:
candidates ([Candidates]): n_candidates first candidates
"""
# Create searched region by shifting the reference's bounding box down
region = reference.detected_text.bbox.copy()
line_height = reference.detected_text.line_height
for p in region.points:
p.y -= reference.detected_text.line_height
# Change region height by expanding the bottom
region.bottom_left.y -= (region_height - 1) * line_height
region.bottom_right.y -= (region_height - 1) * line_height
return search_string_relative(
reference,
detected_text_list,
pattern,
region,
region_is_relative=False,
n_candidates=n_candidates,
)
| TemryL/EyeDocScanner_API | reader_scripts/search.py | search.py | py | 6,576 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "regex.search",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "regex.BESTMATCH",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "config.ERROR_MAX",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "models.candid... |
2446313714 | import unittest
from scripts.experiment import ExperimentalMicData
class ExperimentalMicDataTestCase(unittest.TestCase):
def setUp(self):
head = '/home/akhil/Sound-Source-Localization/data/'
self.sample_filename = "".join([head, 'CMU_ARCTIC/cmu_us_bdl_arctic/wav/',
'arctic_a0001.wav'])
self.sample_room_dimensions = [50/100, 50/100, 50/100]
self.sample_microphone_location = [15 / 100, 0 / 100, 3 / 100]
self.sample_source_location = [2.5 / 100, 4.5 / 100, 7.8 / 100]
class InitTestCase(ExperimentalMicDataTestCase):
"""
Test that the initial attributes are set up correctly.
"""
def test_no_keyword_arguments(self):
with self.assertRaises(ValueError):
self.src1_no_args = ExperimentalMicData(self.sample_filename)
def test_no_source_dimensions(self):
with self.assertRaises(ValueError):
self.src1_no_args = ExperimentalMicData(self.sample_filename,
room_dim=self.sample_room_dimensions)
def test_no_microphone_location(self):
with self.assertRaises(ValueError):
self.src1_no_args = ExperimentalMicData(self.sample_filename,
room_dim=self.sample_room_dimensions,
source_dim=self.sample_source_location)
def test_file_type(self):
test_case = 56
with self.assertRaises(TypeError):
self.src1 = ExperimentalMicData(test_case,
room_dim=self.sample_room_dimensions,
source_dim=self.sample_source_location,
mic_location=self.sample_microphone_location)
class ReadWavFileTestCase(ExperimentalMicDataTestCase):
"""
Test that the .wav file is read in correctly.
"""
def test_incorrect_file_name(self):
self.sample_wrong_filename = "".join(['/home/akhil/Sound-Source-Localization/data/',
'CMU_ARCTIC/cmu_us_bdl_arctic/wav/',
'arctic.wav'])
with self.assertRaises(FileNotFoundError):
self.src1 = ExperimentalMicData(self.sample_wrong_filename,
room_dim=self.sample_room_dimensions,
source_dim=self.sample_source_location,
mic_location=self.sample_microphone_location)._read_wav_file()
if __name__ == '__main__':
unittest.main()
| akhilvasvani/Sound-Source-Localization | test/unit/test_experiment.py | test_experiment.py | py | 2,693 | python | en | code | 27 | github-code | 36 | [
{
"api_name": "unittest.TestCase",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "scripts.experiment.ExperimentalMicData",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "scripts.experiment.ExperimentalMicData",
"line_number": 28,
"usage_type": "c... |
25460961095 | '''import pandas as pd
garden_data = pd.read_csv("gardenAll.csv", encoding='euc-kr')
plant = "청옥"
data2 = garden_data[garden_data['name'] == "청옥"]
print(data2['temp(°C)'])
print(data2['hd(%)'])
print(data2['light(Lux)'])
print(data2['water'])
temp1 = data2['temp(°C)']
if float(temp1) >= 5:
for i in range(5):
print(i)'''
from flask import Flask,render_template,redirect,request
import requests
from multiprocessing import Process
import time
import sen_all
from concurrent.futures import ThreadPoolExecutor
import RPi.GPIO as gp
import pandas as pd
# gardenAll 데이터 불러오기
plant_data = pd.read_csv("gardenAll.csv", encoding="euc-kr")
app = Flask(__name__)
# 1. 히터(18),팬(22),물펌프(23) GPIO 값 설정
gp.setwarnings(False)
gp.setmode(gp.BCM)
gp.setup(18,gp.OUT)
gp.setup(22,gp.OUT)
gp.setup(23,gp.OUT)
# 2. temp(온도), hd(습도), lux(조도), waterA(급수), waterD(배수), moi(토양습도) 센서값 띄어쓰기로 분리
# 아두이노에서 받은 센서값 계속 오라클db로 전달
# 반복 작업
def get_url(url):
return requests.get(url)
data = sen_all.data_send()
temp, hd, lux, waterA, waterD, moi = data.split(' ')
def tes(data):
list_of_urls = [f"http://192.168.70.104:8088?data={data}"]
with ThreadPoolExecutor(max_workers=10) as pool:
response_list = list(pool.map(get_url,list_of_urls))
for i in range(10):
for response in response_list:
print(response)
pro1 = Process(args=(data,), target=tes)
pro1.start()
if __name__ == "__main__":
app.run(host="192.168.70.113", port=5022) | Yoonseungwook/dailylog | aa.py | aa.py | py | 1,601 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.read_csv",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "flask.Flask",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "RPi.GPIO.setwarnings",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "RPi.GPIO",
"line... |
4078678340 | """Figure 1: teaser figure"""
import argparse
import os
import sys
from os.path import join
SCRIPT_DIR = os.path.realpath(os.path.dirname(__file__))
sys.path.append(join(SCRIPT_DIR, '..'))
from common import *
import configs
import tqdm
from constants import OUTPUT_DIR
fig_name = 'teaser'
fig_dir = join(FIGURE_DIR, fig_name)
def main(force=0):
assert force <= 2
output_dir = OUTPUT_DIR
config = configs.Warp()
resx = 1024
resy = 1024
spp = 256
# Render all stored iterations
show_iters = np.arange(0, 512, 64).tolist() + [511, 'final']
result = {'scene': 'bench',
'opt_config': 'diffuse-32-hq',
'y_rotation': -120,
'y_offset': -0.09}
# Re-generate the optimization results if needed
print('[+] Running Optimizations')
run_optimization(result['scene'], config, result['opt_config'],
output_dir, force=force >= 2)
# For each optimization, render our target view for the visualization in high quality
print('[+] Rendering views')
pbar = tqdm.tqdm(len(show_iters) + 2)
scene_name, opt_config = result['scene'], result['opt_config']
scene_outputs = join(FIGURE_DIR, fig_name, scene_name)
kwargs = {'image_output_dir': scene_outputs, 'resx': resx, 'resy': resy, 'spp': spp,
'rotate_y': result['y_rotation'], 'translate_y': result['y_offset'],
'output_dir': output_dir, 'force': force > 0}
render_optimization_result(scene_name, config, opt_config, show_iters, **kwargs, pbar=pbar)
render_reconstructed_geometry(scene_name, config, opt_config, **kwargs)
pbar.update()
render_reference_object(scene_name, opt_config, **kwargs)
pbar.update()
mi.Thread.wait_for_tasks()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--force', action='count', default=0)
args = parser.parse_args(sys.argv[1:])
main(force=args.force)
| rgl-epfl/differentiable-sdf-rendering | figures/teaser/teaser.py | teaser.py | py | 1,970 | python | en | code | 810 | github-code | 36 | [
{
"api_name": "os.path.realpath",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "sys.path.append",
"lin... |
14366833514 | from flask import Flask, Response
import json
import pickle
import pmdarima
from datetime import datetime
import pandas as pd
def predecir(period):
pred_temp = pickle.load( open('./modelo_temperatura.p', 'rb') )
pred_hum = pickle.load( open('./modelo_humedad.p', 'rb') )
prediccion_temp = pred_temp.predict(n_periods=period)
prediccion_hum = pred_hum.predict(n_periods=period)
predicciones = []
hora = datetime.now().hour
for tiempo, humedad in zip(prediccion_temp, prediccion_hum):
predicciones.append(
{
'hour': str(hora%24) + ":00",
'temp': tiempo,
'hum': humedad
}
)
hora += 1
return predicciones
app = Flask(__name__)
@app.route('/api/v1/24horas', methods=['GET'])
def api_predecir_24():
res = Response(json.dumps(predecir(24)), status = 200)
res.headers['Content-Type']='application/json'
return res
@app.route('/api/v1/48horas', methods=['GET'])
def api_predecir_48():
res = Response(json.dumps(predecir(48)), status = 200)
res.headers['Content-Type']='application/json'
return res
@app.route('/api/v1/72horas', methods=['GET'])
def api_predecir_72():
res = Response(json.dumps(predecir(72)), status = 200)
res.headers['Content-Type']='application/json'
return res | fer227/API_WeatherPredictor | apiV1/apiV1.py | apiV1.py | py | 1,349 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pickle.load",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
... |
36829592590 | # mne.python.loading.data
import os
import numpy as np
import mne
# 1. Loading data
# mne-python库支持直接读取各种格式的数据文件,支持众多EEG数据采集设备
# 主要包括设置数据文件夹,原始数据读取到文件句柄,通过句柄传递给变量
sample_data_folder = mne.datasets.sample.data_path() # 数据文件夹
sample_data_raw_file = os.path.join(sample_data_folder, 'MEG', 'sample', # 数据读到句柄
'sample_audvis_filt-0-40_raw.fif')
raw = mne.io.read_raw_fif(sample_data_raw_file) # 句柄传给变量
# 未知如下的嵌套方式的文件读取是否可行:
# raw = mne.io.read_raw_fif(os.path.join(mne.datasets.sample.data_path(),'MEG','sample','sample_audvis_filt-0-40_raw.fif'))
# 查看原始数据
print(raw)
# 查看原始数据的信息(采集过程中设备和采样率等信息)
print(raw.info)
# raw对象本身带有一些内建的方法
raw.plot_psd(fmax=50) # 绘制50Hz以下的信号
raw.plot(duration=5, n_channels=30) # 交互式的绘图方式,可以滚动、缩放、不良通道标记、注释
# 2. Preprocessing
# 包含很多信号预处理方法(麦克斯韦滤波,信号空间投影,独立分量分析,滤波,下采样)
# 此案例将展示独立成分分析(ICA)清理数据
# set up and fit the ICA
ica = mne.preprocessing.ICA(n_components=20, random_state=97, max_iter=800)
ica.fit(raw)
ica.exclude = [1, 2] # details on how we picked these are omitted here
ica.plot_properties(raw, picks=ica.exclude)
orig_raw = raw.copy()
raw.load_data()
ica.apply(raw)
# show some frontal channels to clearly illustrate the artifact removal
chs = ['MEG 0111', 'MEG 0121', 'MEG 0131', 'MEG 0211', 'MEG 0221', 'MEG 0231',
'MEG 0311', 'MEG 0321', 'MEG 0331', 'MEG 1511', 'MEG 1521', 'MEG 1531',
'EEG 001', 'EEG 002', 'EEG 003', 'EEG 004', 'EEG 005', 'EEG 006',
'EEG 007', 'EEG 008']
chan_idxs = [raw.ch_names.index(ch) for ch in chs]
orig_raw.plot(order=chan_idxs, start=12, duration=4)
raw.plot(order=chan_idxs, start=12, duration=4)
| oca-john/EEG.DeepLearning-xi | Python(EEG)/mne.python.loading.data--.py | mne.python.loading.data--.py | py | 2,168 | python | zh | code | 1 | github-code | 36 | [
{
"api_name": "mne.datasets.sample.data_path",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "mne.datasets",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.path"... |
39207374957 | # --- carolin schieferstein & jose c. garcia alanis
# --- utf-8
# --- Python 3.7 / mne 0.20
#
# --- EEG prepossessing - dpx-r40 [WIP]
# --- version march 2020 [WIP]
import mne
import re
import glob
import os
from mne import pick_types, Epochs, combine_evoked
import pandas as pd
import numpy as np
output_dir = '/Volumes/Recovery/ern_soc_eeg/derivatives/segmentation/epochs'
output_dir_ave = '/Volumes/Recovery/ern_soc_eeg/derivatives/segmentation/epochs/average'
data_path = '/Volumes/Recovery/ern_soc_eeg/derivatives/pruned'
for file in sorted(glob.glob(os.path.join(data_path, '*.fif'))):
filepath, filename = os.path.split(file)
filename, ext = os.path.splitext(filename)
# Each time the loop goes through a new iteration,
# add a subject integer to the data path
data_path = '/Volumes/Recovery/ern_soc_eeg'
subj = re.findall(r'\d+', filename)[0]
# Read the raw EEG data that has been pre-processed, create an event file and down-sample the data for easier handling.
raw = mne.io.read_raw_fif(file, preload=True)
evs = mne.find_events(raw, stim_channel='Status', output='onset',
min_duration=0.002)
new_evs = evs.copy()
# Global variables
broken = []
trial = 0
# Recode reactions
for i in range(len(new_evs[:, 2])):
if new_evs[:, 2][i] == 71:
next_t = new_evs[range(i, i + 3)]
if [k for k in list(next_t[:, 2]) if k in {101, 102, 201, 202}]:
valid = True
trial += 1
continue
else:
broken.append(trial)
valid = False
trial += 1
continue
elif new_evs[:, 2][i] in {11, 12, 21, 22}:
if new_evs[:, 2][i] in {11, 12}:
suffix = 1 # Congr.
elif new_evs[:, 2][i] in {21, 22}:
suffix = 2 # Incongr.
continue
# Check if event preceded by other reaction
elif new_evs[:, 2][i] in {101, 102, 201, 202} and valid:
if trial <= 48:
if new_evs[:, 2][i] in [101, 102] and suffix == 1:
new_evs[:, 2][i] = 1091 # Correct Congr.
elif new_evs[:, 2][i] in [101, 102] and suffix == 2:
new_evs[:, 2][i] = 1092 # Correct Incongr.
elif new_evs[:, 2][i] in [201, 202] and suffix == 1:
new_evs[:, 2][i] = 1093 # Incorrect Congr.
elif new_evs[:, 2][i] in [201, 202] and suffix == 2:
new_evs[:, 2][i] = 1094 # Incorrect Incongr.
valid = False
continue
elif trial <= 448:
if new_evs[:, 2][i] in [101, 102] and suffix == 1:
new_evs[:, 2][i] = 2091 # Correct Congr.
elif new_evs[:, 2][i] in [101, 102] and suffix == 2:
new_evs[:, 2][i] = 2092 # Correct Incongr.
elif new_evs[:, 2][i] in [201, 202] and suffix == 1:
new_evs[:, 2][i] = 2093 # Incorrect Congr.
elif new_evs[:, 2][i] in [201, 202] and suffix == 2:
new_evs[:, 2][i] = 2094 # Incorrect Incongr.
valid = False
continue
elif trial <= 848:
if new_evs[:, 2][i] in [101, 102] and suffix == 1:
new_evs[:, 2][i] = 3091 # Correct Congr.
elif new_evs[:, 2][i] in [101, 102] and suffix == 2:
new_evs[:, 2][i] = 3092 # Correct Incongr.
elif new_evs[:, 2][i] in [201, 202] and suffix == 1:
new_evs[:, 2][i] = 3093 # Incorrect Congr.
elif new_evs[:, 2][i] in [201, 202] and suffix == 2:
new_evs[:, 2][i] = 3094 # Incorrect Incongr.
valid = False
continue
elif trial <= 1248:
if new_evs[:, 2][i] in [101, 102] and suffix == 1:
new_evs[:, 2][i] = 4091 # Correct Congr.
elif new_evs[:, 2][i] in [101, 102] and suffix == 2:
new_evs[:, 2][i] = 4092 # Correct Incongr.
elif new_evs[:, 2][i] in [201, 202] and suffix == 1:
new_evs[:, 2][i] = 4093 # Incorrect Congr.
elif new_evs[:, 2][i] in [201, 202] and suffix == 2:
new_evs[:, 2][i] = 4094 # Incorrect Incongr.
valid = False
continue
elif new_evs[:, 2][i] in {101, 102, 201, 202} and not valid:
continue
# --- 4) PICK CHANNELS TO SAVE -----------------------------
picks = pick_types(raw.info,
meg=False,
eeg=True,
eog=False,
stim=False)
if int(subj) in {1002, 1004, 1006, 1008,
1010, 1011, 1013, 1015,
1017, 1019, 1021, 1023,
1028}:
# set event ids
choice_event_id = {'P_Correct congr.': 1091,
'P_Correct incongr.': 1092,
'P_Incorrect congr.': 1093, # back to 1093
'P_Incorrect incongr.': 1094,
'S_Correct congr.': 2091,
'S_Correct incongr.': 2092,
'S_Incorrect congr.': 2093,
'S_Incorrect incongr.': 2094,
'+_Correct congr.': 3091,
'+_Correct incongr.': 3092,
'+_Incorrect congr.': 3093,
'+_Incorrect incongr.': 3094,
'-_Correct congr.': 4091,
'-_Correct incongr.': 4092,
'-_Incorrect congr.': 4093,
'-_Incorrect incongr.': 4094
}
else:
choice_event_id = {'P_Correct congr.': 1091,
'P_Correct incongr.': 1092,
'P_Incorrect congr.': 1093,
'P_Incorrect incongr.': 1094,
'S_Correct congr.': 2091,
'S_Correct incongr.': 2092,
'S_Incorrect congr.': 2093,
'S_Incorrect incongr.': 2094,
'-_Correct congr.': 3091,
'-_Correct incongr.': 3092,
'-_Incorrect congr.': 3093,
'-_Incorrect incongr.': 3094,
'+_Correct congr.': 4091,
'+_Correct incongr.': 4092,
'+_Incorrect congr.': 4093,
'+_Incorrect incongr.': 4094
}
# reject_criteria = dict(mag=4000e-15, # 4000 fT
# grad=4000e-13, # 4000 fT/cm
# eeg=150e-6, # 150 μV
# eog=250e-6)
choice_epochs = Epochs(raw, new_evs, choice_event_id,
on_missing='ignore',
tmin=-1,
tmax=1,
baseline=(-.550, -.300),
reject_by_annotation=True,
preload=True,
picks=['FCz', 'Cz'])
choice_epochs = choice_epochs.resample(sfreq=100, npad='auto')
choice_epochs = choice_epochs.crop(tmin=.0, tmax=0.1)
index, scaling_time, scalings = ['epoch', 'time'], 1e3, dict(grad=1e13)
df = choice_epochs.to_data_frame(picks=None, scalings=scalings,
scaling_time=scaling_time, index=index)
df = df.reset_index()
factors = ['condition', 'epoch']
df = df.assign(subject=subj)
df = pd.DataFrame(df)
df.to_csv(os.path.join(output_dir, 'sub-%s.tsv' % subj),
sep='\t',
index=True)
# fig = mne.viz.plot_events(new_evs, sfreq=raw.info['sfreq'])
# ig.subplots_adjust(right=0.7)
| CarolinSchieferstein/Master-DPX-EEG | python_scripts/07_export_epochs.py | 07_export_epochs.py | py | 8,133 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "glob.glob",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "os.path.split",
"line_number"... |
13962392009 | from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.http import Http404
from django.shortcuts import render, redirect, get_object_or_404
from django.utils.translation import gettext as _
from django.views.decorators.cache import never_cache
from account.templatetags.globalpermissions import has_addevent_group
from helfertool.utils import nopermission, serve_file
from ..decorators import archived_not_available
from ..forms import (
EventForm,
EventAdminRolesForm,
EventAdminRolesAddForm,
EventDeleteForm,
EventArchiveForm,
EventDuplicateForm,
EventMoveForm,
PastEventForm,
)
from ..models import Event, EventAdminRoles
from ..permissions import has_access, ACCESS_EVENT_EDIT
from datetime import datetime
from dateutil.relativedelta import relativedelta
import logging
logger = logging.getLogger("helfertool.registration")
@login_required
@never_cache
def edit_event(request, event_url_name=None):
event = None
# check permission
if event_url_name:
event = get_object_or_404(Event, url_name=event_url_name)
if not has_access(request.user, event, ACCESS_EVENT_EDIT):
return nopermission(request)
else:
# event will be created -> superuser or addevent group
if not (request.user.is_superuser or has_addevent_group(request.user)):
return nopermission(request)
# handle form
form = EventForm(request.POST or None, request.FILES or None, instance=event)
if form.is_valid():
event = form.save()
if not event_url_name:
# event was created at the moment -> add user as admin
if not request.user.is_superuser:
event.admins.add(request.user)
event.save()
logger.info(
"event created",
extra={
"user": request.user,
"event": event,
"source_url": None,
"source_pk": None,
},
)
messages.success(request, _("Event was created: %(event)s") % {"event": event.name})
else:
logger.info(
"event changed",
extra={
"user": request.user,
"event": event,
},
)
# redirect to this page, so reload does not send the form data again
# if the event was created, this redirects to the event settings
return redirect("edit_event", event_url_name=form["url_name"].value())
# get event without possible invalid modifications from form
saved_event = None
if event_url_name:
saved_event = get_object_or_404(Event, url_name=event_url_name)
# render page
context = {"event": saved_event, "form": form}
return render(request, "registration/admin/edit_event.html", context)
@login_required
@never_cache
def edit_event_admins(request, event_url_name=None):
event = get_object_or_404(Event, url_name=event_url_name)
# check permission
if not has_access(request.user, event, ACCESS_EVENT_EDIT):
return nopermission(request)
# one form per existing admin (differentiated by prefix)
all_forms = []
event_admin_roles = EventAdminRoles.objects.filter(event=event)
for event_admin in event_admin_roles:
form = EventAdminRolesForm(
request.POST or None, instance=event_admin, prefix="user_{}".format(event_admin.user.pk)
)
all_forms.append(form)
# another form to add one new admin
add_form = EventAdminRolesAddForm(request.POST or None, prefix="add", event=event)
# we got a post request -> save
if request.POST and (all_forms or add_form.is_valid()):
# remove users without any role from admins (no roles = invalid forms)
for form in all_forms:
if form.is_valid():
if form.has_changed():
logger.info(
"event adminchanged",
extra={
"user": request.user,
"event": event,
"changed_user": form.instance.user.username,
"roles": ",".join(form.instance.roles),
},
)
form.save()
else:
logger.info(
"event adminremoved",
extra={
"user": request.user,
"event": event,
"changed_user": form.instance.user.username,
},
)
form.instance.delete()
# and save the form for a new admin
if add_form.is_valid():
new_admin = add_form.save()
if new_admin:
logger.info(
"event adminadded",
extra={
"user": request.user,
"event": event,
"changed_user": new_admin.user.username,
"roles": ",".join(new_admin.roles),
},
)
return redirect("edit_event_admins", event_url_name=event_url_name)
context = {"event": event, "forms": all_forms, "add_form": add_form}
return render(request, "registration/admin/edit_event_admins.html", context)
@login_required
@never_cache
def delete_event(request, event_url_name):
event = get_object_or_404(Event, url_name=event_url_name)
# check permission
if not has_access(request.user, event, ACCESS_EVENT_EDIT):
return nopermission(request)
# form
form = EventDeleteForm(request.POST or None, instance=event)
if form.is_valid():
form.delete()
logger.info(
"event deleted",
extra={
"user": request.user,
"event": event,
},
)
messages.success(request, _("Event deleted: %(name)s") % {"name": event.name})
# redirect to shift
return redirect("index")
# render page
context = {"event": event, "form": form}
return render(request, "registration/admin/delete_event.html", context)
@login_required
@never_cache
@archived_not_available
def archive_event(request, event_url_name):
event = get_object_or_404(Event, url_name=event_url_name)
# check permission
if not has_access(request.user, event, ACCESS_EVENT_EDIT):
return nopermission(request)
# form
form = EventArchiveForm(request.POST or None, instance=event)
if form.is_valid():
form.archive()
logger.info(
"event archived",
extra={
"user": request.user,
"event": event,
},
)
return redirect("edit_event", event_url_name=event_url_name)
# render page
context = {"event": event, "form": form}
return render(request, "registration/admin/archive_event.html", context)
@login_required
@never_cache
def duplicate_event(request, event_url_name):
event = get_object_or_404(Event, url_name=event_url_name)
# check permission
if not has_access(request.user, event, ACCESS_EVENT_EDIT):
return nopermission(request)
# form
form = EventDuplicateForm(request.POST or None, other_event=event, user=request.user)
if form.is_valid():
form.save()
logger.info(
"event created",
extra={
"user": request.user,
"event": form.instance,
"source_url": event.url_name,
"source_pk": event.pk,
},
)
messages.success(request, _("Event was duplicated: %(event)s") % {"event": form["name"].value()})
return redirect("edit_event", event_url_name=form["url_name"].value())
# render page
context = {"event": event, "form": form}
return render(request, "registration/admin/duplicate_event.html", context)
@login_required
@never_cache
@archived_not_available
def move_event(request, event_url_name):
event = get_object_or_404(Event, url_name=event_url_name)
# check permission
if not has_access(request.user, event, ACCESS_EVENT_EDIT):
return nopermission(request)
# form
form = EventMoveForm(request.POST or None, instance=event)
if form.is_valid():
event = form.save()
logger.info(
"event moved",
extra={
"user": request.user,
"event": event,
"new_date": event.date,
},
)
messages.success(request, _("Event was moved: %(event)s") % {"event": event.name})
return redirect("edit_event", event_url_name=event_url_name)
# render page
context = {"event": event, "form": form}
return render(request, "registration/admin/move_event.html", context)
@login_required
@never_cache
def past_events(request):
if not request.user.is_superuser:
return nopermission(request)
# form for months
months = 4 # the default value
form = PastEventForm(request.GET or None, initial={"months": months})
if form.is_valid():
months = form.cleaned_data.get("months")
# get events
deadline = datetime.today() - relativedelta(months=months)
events = Event.objects.filter(archived=False, date__lte=deadline).order_by("date")
context = {
"form": form,
"events": events,
}
return render(request, "registration/admin/past_events.html", context)
@login_required
def get_event_logo(request, event_url_name, logotype):
if logotype not in ["default", "social"]:
raise Http404()
event = get_object_or_404(Event, url_name=event_url_name)
# check permission
if not has_access(request.user, event, ACCESS_EVENT_EDIT):
return nopermission(request)
# output
if logotype == "default":
return serve_file(event.logo)
else:
return serve_file(event.logo_social)
| helfertool/helfertool | src/registration/views/event.py | event.py | py | 10,145 | python | en | code | 52 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.get_object_or_404",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "models.Event",
"line_number": 40,
"usage_type": "argument"
},
{
"api_name": ... |
71249204904 | __author__ = 'zfh'
import os
import utils
import matplotlib.pyplot as plt
artistList=set()
dir=os.path.join(utils.allResultPath,'bestanalysis')
files = os.listdir(dir)
for file in files:
with open(os.path.join(dir,file),'r') as csvfile:
while True:
line=csvfile.readline().strip('\n')
if not line:
break
line=line.split(',')
artistId=line[0]
artistList.add(artistId)
break
fileDict={}
dir=os.path.join(utils.allResultPath,'bestanalysis')
files = os.listdir(dir)
for file in files:
fileDict[str(file)]={}
with open(os.path.join(dir,file),'r') as csvfile:
while True:
line=csvfile.readline().strip('\n')
if not line:
break
line=line.split(',')
artistId=line[0]
play=int(line[1])
if artistId not in fileDict[str(file)].keys():
fileDict[str(file)][artistId]=[]
fileDict[str(file)][artistId].append(play)
resultPath=os.path.join(dir,'result')
if not os.path.exists(resultPath):
os.makedirs(resultPath)
plt.figure(figsize=(10,10))
for artistId in artistList:
for file,artistDict in fileDict.items():
plt.plot(artistDict[artistId],label=file)
plt.legend()
plt.savefig(os.path.join(resultPath,artistId))
plt.clf() | ifenghao/tianchi_contest | test/plotresults.py | plotresults.py | py | 1,360 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.join",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "utils.allResultPath",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"lin... |
72242724903 | import requests
import re
from airports.serializers import AirportSerializer, RunwaySerializer, AirportCommSerializer
from airports.models import Airport, Runway, AirportComm
from datetime import datetime, timedelta, timezone
class FlightPlanAPIClient(object):
"""
API Client class used to construct requests to FlightPlaneDatabase's Airport Information API
"""
HOST = 'https://api.flightplandatabase.com/'
AIRPORT_MAPPER = {
'ICAO': 'icao',
'name': 'name',
'regionName': 'region',
'elevation': 'elevation',
}
WEATHER_MAPPER = {
'METAR': 'metar',
'TAF': 'taf',
}
RUNWAY_MAPPER = {
'ident': 'name',
'surface': 'surface_type',
'length': 'length',
'width': 'width',
'bearing': 'bearing'
}
COMM_MAPPER = {
'type': 'type',
'frequency': 'frequency'
}
passthrough_converter = {
'COM': AirportComm.CTAF,
'REC': AirportComm.ATIS,
'GND': AirportComm.GROUND,
'TWR': AirportComm.TOWER,
'APP': AirportComm.APPROACH,
'DEP': AirportComm.DEPARTURE,
'CLD': AirportComm.CLEARANCE
}
def __init__(self, *args, **kwargs):
self.http_session = requests.Session()
def get(self, icao):
airport = None
icao_db = icao.upper().strip()
query = Airport.objects.filter(icao=icao_db)
# If an Airport already exists, get it. Otherwise create it and all its Foreign Key relations
if query.exists():
airport = query.get()
# Check the last time the weather was updated. This site has limited access to the external API, so to
# avoid overloading with requests, we do not need to hit the external API if the airport already exists
# and the weather was updated in the last 30 minutes
if airport.last_weather:
difference = (datetime.now(timezone.utc) - airport.last_weather).seconds
if difference < 1800:
return {'pk': airport.pk}
url = u'{}nav/airport/{}'.format(self.HOST, icao_db)
headers = {
'Authorization': 'KM2iNdFY4lVKtHTiwsqEP40Wgarbm4z9MAbEVWqY',
'Accept': 'application/json',
'X-Units': 'AVIATION',
}
response = self.http_session.get(url, headers=headers)
# If the external API dpes not return data
response_code = response.status_code
if not response_code == 200:
# If this is an entirely new airport return an error, else return the current airport data
if not airport:
return {'error': response_code}
else:
return {'pk': airport.pk}
json = response.json()
if not airport:
airport = self.create_airport(json)
weather_data = self.set_weather_data(json)
weather_data.update({'last_weather': datetime.now()})
if weather_data and airport:
Airport.objects.filter(pk=airport.pk).update(**weather_data)
# Return the updated Airport pk to the view to handle the details page
return {'pk': airport.pk} if airport else {'pk': None}
def create_airport(self, json):
data = self.field_mapper_logic(self.AIRPORT_MAPPER, json)
data = self.airport_field_validator(data)
airport_serializer = AirportSerializer(data=data)
if airport_serializer.is_valid(raise_exception=False):
airport = Airport.objects.create(**airport_serializer.validated_data)
# With the newly created airport, also create Runway and AirportComm objects
self.create_runways(airport, json)
self.create_airport_comms(airport, json)
return airport
return None
def create_runways(self, airport, json):
length = int(len(json['runways'])/2)
for runway_data in json['runways'][:length]:
data = self.field_mapper_logic(self.RUNWAY_MAPPER, runway_data)
data.update({'airport': airport.pk})
data = self.runway_field_validator(data)
data = self.combine_runway_data(data)
runway_serializer = RunwaySerializer(data=data)
if runway_serializer.is_valid(raise_exception=False):
Runway.objects.get_or_create(**runway_serializer.validated_data)
return
def create_airport_comms(self, airport, json):
for airport_comm_data in json['frequencies']:
data = self.field_mapper_logic(self.COMM_MAPPER, airport_comm_data)
data.update({'airport': airport.pk})
data = self.airport_comm_field_validator(data)
airport_comm_serializer = AirportCommSerializer(data=data)
if airport_comm_serializer.is_valid(raise_exception=False):
AirportComm.objects.get_or_create(**airport_comm_serializer.validated_data)
return
def airport_field_validator(self, data):
value = data['name']
converted_value = value.lower().title()
data['name'] = converted_value
elev = data['elevation']
converted_elev = round(elev)
data['elevation'] = converted_elev
return data
def runway_field_validator(self, data):
# These fields come in from the API in raw format
strings_to_decimals = {'length': 0, 'width': 0, 'bearing': 2}
for field in strings_to_decimals:
value = data[field]
converted_value = round(float(value), strings_to_decimals[field])
data[field] = converted_value
# Convert API values to Django Field Choices
runway_dict = dict(Runway.SURFACE_CHOICES)
for key in runway_dict:
if runway_dict[key].lower() == data['surface_type'].lower():
data['surface_type'] = key
return data
data['surface_type'] = Runway.OTHER
return data
@staticmethod
def combine_runway_data(data):
name = data['name']
numerical_name = int(re.findall("\d+", name)[0]) + 18
letter_name = ''
if 'L' in name.upper():
letter_name = 'R'
if 'R' in name.upper():
letter_name = 'L'
if 'C' in name.upper():
letter_name = 'C'
new_name = u'{}/{}{}'.format(data['name'], numerical_name, letter_name)
data.update({'name': new_name})
bearing = data['bearing']
new_bearing = u'{}/{}'.format(bearing, round(float(bearing + 180), 2))
data.update({'bearing': new_bearing})
return data
def airport_comm_field_validator(self, data):
strings_to_decimals = ['frequency']
for field in strings_to_decimals:
value = data[field]
converted_value = round(float(value/1000000), 3)
data[field] = converted_value
# Convert API values to Django Field Choices
# Requires extra conversion to convert from API raw value to clean Django Choice
for key in self.passthrough_converter:
if key == data['type'].upper():
data['type'] = self.passthrough_converter[key]
return data
# If not matches, set as UNICOM
data['type'] = AirportComm.UNICOM
return data
def set_weather_data(self, json):
# Once an airport is set, update the weather on this airport based on the current data
weather_data = json['weather']
data = self.field_mapper_logic(self.WEATHER_MAPPER, weather_data)
data = self.weather_field_validator(data)
# Get offset from raw data
offset = json['timezone'].get('offset', 0)
# Get sunrise/sunset values from API raw data and convert to DateTime
# We must remove the trailing Zulu and mid-line T from the API raw_value
raw_sunset = json['times'].get('sunset', None)
if raw_sunset:
sunset = datetime.strptime(raw_sunset[:-1].replace('T', ' '), '%Y-%m-%d %H:%M:%S.%f')
sunset = sunset + timedelta(seconds=offset)
data['sunset'] = sunset.replace(second=0, microsecond=0)
raw_sunrise = json['times'].get('sunrise', None)
if raw_sunrise:
sunrise = datetime.strptime(raw_sunrise[:-1].replace('T', ' '), '%Y-%m-%d %H:%M:%S.%f')
sunrise = sunrise + timedelta(seconds=offset)
data['sunrise'] = sunrise.replace(second=0, microsecond=0)
return data
@staticmethod
def weather_field_validator(data):
metar = data['metar']
taf = data['taf']
metar_length = 512
if not metar:
data.pop('metar', None)
elif len(metar) > metar_length:
data['metar'] = metar[:(metar_length-3)] + '...'
taf_length = 1024
if not taf:
data.pop('taf', None)
elif len(taf) > taf_length:
data['taf'] = taf[:(taf_length - 3)] + '...'
return data
@staticmethod
def field_mapper_logic(field_mapper, json):
data = {}
for field in field_mapper:
if field in json:
data[field_mapper[field]] = json[field]
return data
| bfolks2/django-aviation | api/flightplan_client.py | flightplan_client.py | py | 9,259 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "airports.models.AirportComm.CTAF",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "airports.models.AirportComm",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "airports.models.AirportComm.ATIS",
"line_number": 42,
"usage_type": "at... |
23411926390 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('compras', '0181_auto_20160908_2139'),
]
operations = [
migrations.RemoveField(
model_name='compradetalle',
name='unidad_medida_compra',
),
migrations.AlterField(
model_name='compra',
name='estado_compra',
field=models.ForeignKey(default=5, verbose_name=b'Estado Compra', to='bar.OrdenCompraEstado', help_text=b'Se asignan los Estados de la Orden de Compra.'),
),
migrations.AlterField(
model_name='compra',
name='usuario_registro_compra',
field=models.ForeignKey(related_name='usuario_registro_compra', default=17, verbose_name=b'Confirmado por?', to='personal.Empleado', help_text=b'Usuario que registro la Compra.'),
),
migrations.AlterField(
model_name='ordencompra',
name='fecha_entrega_orden_compra',
field=models.DateTimeField(default=datetime.datetime(2016, 9, 10, 15, 49, 29, 901000, tzinfo=utc), help_text=b'Indique la fecha y hora en la que el proveedor debe entregar la Orden de Compra.', verbose_name=b'Fecha/hora de Entrega'),
),
]
| pmmrpy/SIGB | compras/migrations/0182_auto_20160909_1149.py | 0182_auto_20160909_1149.py | py | 1,387 | python | es | code | 0 | github-code | 36 | [
{
"api_name": "django.db.migrations.Migration",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "django.db.migrations",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "django.db.migrations.RemoveField",
"line_number": 16,
"usage_type": "call"
},
... |
20136022872 | from abc import ABC, abstractmethod
from dataclasses import dataclass
from typing import ByteString, Callable, Generic, Tuple, TypeVar
from network import UDPLink
import torch
import numpy as np
# import aestream
T = TypeVar("T")
class Observation():
def __init__(self, tof, bat, imu, uwb):
self.tof = tof
self.bat = bat
self.imu = imu
self.uwb = uwb
class Sensor(ABC, Generic[T]):
@abstractmethod
def read(self) -> T:
pass
# class DVSSensor(Sensor[torch.Tensor]):
# def __init__(self, shape: Tuple[int, int], port: int):
# self.source = aestream.UDPInput(shape, port)
# def read(self) -> torch.Tensor:
# return self.source.read()
class DurinSensor(Sensor[Observation]):
def __init__(self, link: UDPLink):
self.link = link
self.tof = np.zeros((8,8*8))
self.uwb = np.zeros((0,))
self.charge = 0
self.voltage = 0
self.imu = np.zeros((3,3))
def read(self) -> Observation:
(sensor_id, data) = self.link.get()
if sensor_id >= 128 and sensor_id <= 131:
idx = sensor_id - 128
self.tof[:,idx*16:idx*16+16] = data
if sensor_id == 132:
self.charge = data[0]
self.voltage = data[1]
self.imu = data[2]
if sensor_id == 133:
self.uwb = data
return Observation(self.tof, (self.charge, self.voltage), self.imu, self.uwb)
| jpromerob/MockUpBot | durin/sensor.py | sensor.py | py | 1,470 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "typing.TypeVar",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "abc.ABC",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "typing.Generic",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "abc.abstractmethod",
"line_... |
10742295231 | import xml.etree.ElementTree as et
def import_monster(name):
tree = et.parse("stats/monster_manual.xml")
root = tree.getroot()
if (root.tag != "mm"):
print ("Monster manual not found!\n")
return None
# Iterate through entries in monster manual
for monster in root:
for child in monster:
if (child.tag == "name" and child.text == name):
print ("Found!")
return monster
else:
continue
print ("Not found!")
return None
| MontyKhan/DnD_Scripts | monster_manual.py | monster_manual.py | py | 451 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "xml.etree.ElementTree.parse",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 4,
"usage_type": "name"
}
] |
28040851490 | # import dependencies
from os.path import join
import pandas as pd
# import functions
from eir_functions import scrape_all, to_binary, to_csv, binary_to_csvs, clean_metadata, clean_measurements, clean_csvs, to_individuals
# bring in config values
from sys import path
path.insert(0, "..")
from config import eir_raw_source, eir_cleaned_destination_csv, eir_cleaned_destination_bin, eir_anchor_search_term, eir_file_extension
# toggle routine steps
save_load_binary = False
save_csv = False
clean_existing_raw = True
# cleans existing raw data
if clean_existing_raw:
# clean the raw csvs
clean_csvs(eir_cleaned_destination_csv)
else:
# specify targeted data
scraped_workbook_qty = 0 # sets a limit on how many workbooks will be scraped, if 0 then no limit
randomize_workbooks = False # will select workbooks at random, if qty = 0 then set this to False
overwrite_targeted_workbooks = [] # if list is empty then scrape_all will use the quantity/randomized parameters
# scrape all the relevant data from the electronic inspection workbooks
raw_results_all = scrape_all(
eir_raw_source,
eir_anchor_search_term,
eir_file_extension,
qty_limit = scraped_workbook_qty,
is_random = randomize_workbooks,
workbooks = overwrite_targeted_workbooks)
raw_metadata_df, raw_measurements_df = raw_results_all
if (raw_metadata_df is not None) and (raw_measurements_df is not None):
# saves results to then loads from a binary file
if save_load_binary:
# binary file name
raw_bin_file_name = "raw_results_all.pkl"
cln_bin_file_name = "cln_results_all.pkl"
# saves raw results to binary
to_binary(eir_cleaned_destination_bin, raw_bin_file_name, raw_results_all)
# saves cleaned results to binary
raw_metadata_df, raw_measurements_df = raw_results_all
cln_metadata_df = clean_metadata(raw_metadata_df)
cln_measurements_df = clean_measurements(raw_measurements_df)
to_binary(eir_cleaned_destination_bin, cln_bin_file_name, (cln_metadata_df, cln_measurements_df))
# loads from binary and resaves to csv if the results aren't already being converted directly to csv
if not save_csv:
binary_to_csvs(eir_cleaned_destination_bin, eir_cleaned_destination_csv, raw_bin_file_name, cln_bin_file_name)
# saves results directly to csv files
if save_csv:
# extract then clean the raw results
raw_metadata_df, raw_measurements_df = raw_results_all
raw_metadata_df.to_csv(join(eir_cleaned_destination_csv, "raw_metadata_df.csv"), index = False)
raw_measurements_df.to_csv(join(eir_cleaned_destination_csv, "raw_measurements_df.csv"), index = False)
cln_metadata_df = clean_metadata(raw_metadata_df)
cln_measurements_df = clean_measurements(raw_measurements_df)
# saves to csv files
to_csv(eir_cleaned_destination_csv, raw_results_all, (cln_metadata_df, cln_measurements_df))
# saves as individual files
to_individuals(cln_metadata_df, cln_measurements_df, join(eir_cleaned_destination_csv, "_individual_files"))
else:
print("Extracted data is null") | seneubauer/qc-modernization | eir_conversion/extract_eir_info.py | extract_eir_info.py | py | 3,365 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.path.insert",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "eir_functions.clean_csvs",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "config.eir_cleaned_... |
6704842108 | import os, sys
import asyncio
import aiohttp # pip install aiohttp
import aiofiles # pip install aiofiles
def download_files_from_report(file_name):
if sys.version_info[0] == 3 and sys.version_info[1] >= 8 and sys.platform.startswith('win'):
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
REPORTS_FOLDER = "zips"
FILES_PATH = os.path.join(REPORTS_FOLDER)
urls = []
with open(f'{file_name}.txt', 'r', encoding='utf-8') as links:
for line in links:
line = line.strip()
urls.append(line)
os.makedirs(FILES_PATH, exist_ok=True)
sema = asyncio.BoundedSemaphore(5)
async def fetch_file(url):
fname = url.split("/")[-1]
async with sema, aiohttp.ClientSession() as session:
async with session.get(url) as resp:
assert resp.status == 200
data = await resp.read()
async with aiofiles.open(
os.path.join(FILES_PATH, fname), "wb"
) as outfile:
await outfile.write(data)
loop = asyncio.get_event_loop()
tasks = [loop.create_task(fetch_file(url)) for url in urls]
loop.run_until_complete(asyncio.wait(tasks))
loop.close() | NurlanTanatar/download_mangas | get_files.py | get_files.py | py | 1,228 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.version_info",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "sys.platform.startswith",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "sys.platform",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "asyncio.... |
14521290687 | #imports contab package
from crontab import CronTab
#creates the cron job of Detect-IP.py script to run every hour
cron = CronTab(user="root")
detectIPjob = cron.new(command="python3 Detect-IP.py")
detectIPjob.hour.every(1)
cron.write()
#creates the cron job of Backup.py to run every Friday
cron = CronTab(user="root")
Backupjob = cron.new(command="python3 Backup.py")
Backupjob.day.on(5)
cron.write()
| Splixxy/Cron-Job | Main.py | Main.py | py | 403 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "crontab.CronTab",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "crontab.CronTab",
"line_number": 9,
"usage_type": "call"
}
] |
3845969200 | # С5.6. Итоговое практическое задание
# Телеграм-бот: Конвертор валют
# Студент: Кулагин Станислав
# Поток: FWP_123
import requests
import json
from config import keys, HEADERS
class APIException(Exception):
pass
class CryptoConvertor:
@staticmethod
def get_price(quote: str, base: str, amount: str):
if quote == base:
raise APIException(f'Вы указали одинаковые валюты {base}.')
try:
quote_ticker = keys[quote]
except KeyError:
raise APIException(f"Не удалось обработать валюту {quote}")
try:
base_ticker = keys[base]
except KeyError:
raise APIException(f"Не удалось обработать валюту {base}")
try:
amount = float(amount)
except ValueError:
raise APIException(f"Ну удалось обработать количество {amount}")
url = f'https://api.apilayer.com/currency_data/convert?to={base_ticker}&from={quote_ticker}&amount={amount}'
r = requests.get(url, headers=HEADERS) # добавляем apikey в заголовок запроса
total_base = json.loads(r.content)['result']
return total_base | kulstas/Skillfactory | С5.6._Telegram-bot/extensions.py | extensions.py | py | 1,359 | python | ru | code | 0 | github-code | 36 | [
{
"api_name": "config.keys",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "config.keys",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "requests.get",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "config.HEADERS",
"line_numbe... |
36784973558 | from collections import defaultdict
from nltk import ngrams
import pickle
class SentsFromCorpus():
def __init__(self, path):
self.path = path
def __iter__(self):
with open(self.path) as f:
for ln in f:
if ln == '\n':
continue
yield [x.split('_')[0].lower() for x in ln.split('\t')]
class SentsFromCorpusPP():
def __init__(self, path):
self.path = path
def __iter__(self):
with open(self.path) as f:
for ln in f:
if ln == '\n':
continue
if 'PP' in ln:
yield [x for x in ln.split('\t')]
def news_ngrams(cpath):
corpus_sentences = SentsFromCorpusPP(cpath)
ngram_dict = defaultdict(int)
folder_path = '/home/adam/Documents/lexical_blends_project/lexicon_wordlists/'
with open(folder_path + 'saldo_news_wordlist_f.pickle', 'rb') as f:
lexicon = pickle.load(f)
for i, sentence in enumerate(corpus_sentences):
for ng in ngrams(sentence, 3):
if 'PP' in ng[1]:
#print(ng)
w1 = ng[0].split('_')[0]
w2 = ng[2].split('_')[0]
if w1 in lexicon and w2 in lexicon:
ngram_dict[(w1,w2)] += 1
for k, v in ngram_dict.items():
ngram_dict[k] = ngram_dict[k]/lexicon[k[1]]
# (w_-1, w_0)
with open('3-gramsPP_saldo.pickle', '+wb') as o:
pickle.dump(ngram_dict, o)
if __name__ == '__main__':
news_ngrams('/home/adam/data/news/sentence_segmented/newscorpus.txt') | adamlek/swedish-lexical-blends | ngrams.py | ngrams.py | py | 1,633 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "collections.defaultdict",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "nltk.ngrams",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "pickle.dump",
"li... |
10166527789 | import os
from datetime import datetime
import tensorflow as tf
import numpy as np
import json
from sklearn.model_selection import train_test_split
DATA_IN_PATH ='./data_in/'
DATA_OUT_PATH = './data_out/'
FILE_DIR_PATH = DATA_IN_PATH
INPUT_TRAIN_DATA_FILE_NAME = 'nsmc_train_input.npy'
LABEL_TRAIN_DATA_FILE_NAME = 'nsmc_train_label.npy'
DATA_CONFIGS_FILE_NAME = 'data_cnofigs.json'
input_data = np.load(open(FILE_DIR_PATH + INPUT_TRAIN_DATA_FILE_NAME, 'rb'))
label_data = np.load(open(FILE_DIR_PATH + LABEL_TRAIN_DATA_FILE_NAME, 'rb'))
prepro_configs = json.load(open(FILE_DIR_PATH + DATA_CONFIGS_FILE_NAME, 'r'))
TEST_SPLIT= 0.1
RNG_SEED = 13371447
input_train, input_eval, label_train, label_eval = train_test_split(input_data, label_data, test_size=TEST_SPLIT, random_state=RNG_SEED)
def mapping_fn (X, Y):
input, label = {'x':X}, Y
return input,label
#hiper parameter
BATCH_SIZE = 16
NUM_EPOCHS = 10
vocab_size = prepro_configs['vocab_size']
embedding_size = 128
def train_input_fn():
dataset = tf.data.Dataset.from_tensor_slices(input_train,label_train)
dataset = dataset.shuffle(buffer_size=len(input_train))
dataset = dataset.batch(BATCH_SIZE)
dataset = dataset.map(mapping_fn)
dataset = dataset.repeat(count=NUM_EPOCHS)
iterator = dataset.make_one_shot_iterator()
return iterator.get_next()
def eval_input_fn():
dataset = tf.data.Dataset.from_tensor_slices(input_eval, label_eval)
dataset = dataset.shuffle(buffer_size=len(input_eval))
dataset = dataset.batch(16)
dataset = dataset.map(mapping_fn)
iterator = dataset.make_one_shot_iterator()
return iterator.get_next()
def model_fn(features, labels, mode, params):
TRAIN = mode == tf.estimator.ModeKeys.TRAIN
EVAL = mode == tf.estimator.ModeKeys.EVAL
PREDICT = mode == tf.estimator.ModeKeys.PREDICT
# make network
embedding_layer = tf.keras.layers.Embedding(VOCAB_SIZE, EMB_SIZE)(features['x'])
dropout_emb = tf.keras.layers.Dropout(rate=0.2)(embedding_layer)
conv = tf.keras.layers.Conv1d(filters=32,kernel_size=3,padding='same', activation=tf.nn.relu)(dropout_emb)
pool = tf.keras.layers.GlobalMaxPool1D()(conv)
hidden = tf.keras.layers.Dense(units=250, activation=tf.nn.relu)(pool)
dropout_hidden = tf.keras.layers.Dropout(rate=0.2, training=TRAIN)(hidden)
logits = tf.keras.layers.Dense(units=1)(dropout_hidden)
if labels is not None:
labels= tf.reshape(labels, [-1,1])
if TRAIN:
global_step = tf.train.get_global_step()
loss = tf.losses.sigmoid_cross_entropy(labels,logits)
train_op = tf.train.AdamOptimizer(0.001).minimize(loss, global_step)
return tf.estimator.EstimatorSpec(mode=mode, trian_op=train_op, loss=loss)
elif EVAL:
loss=tf.losses.sigmoid_cross_entropy(labels,logits)
pred = tf.nn.sigmoid(logits)
accuracy = tf.metrics.accuracy(labels,tf.round(pred))
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, eval_metric_ops = {'acc':accuracy})
elif PREDICT:
return tf.estimator.EstimatorSpec(mode=mode,loss=loss, predictions={'proc':tf.nn.sigmoid(logits),})
est = tf.estimator.EstimatorSpec(model_fn, model_dir='data_out/checkpoint/cnn_model')
time_start = datetime.utcnow()
est.train(train_input_fn)
time_end = datetime.utcnow()
time_elapsed = time_end - time_start
print("Experiment elapsed time: {} seconds", format(time_elapsed.total_seconds()))
valid = est.evaluate(eval_input_fn)
#test
INPUT_TEST_DATA_FILE_NAME = 'nsmc_test_input.npy'
LABEL_TEST_DATA_FILE_NAME = 'nsmc_test_label.npy'
test_input_data = np.load(open(FILE_DIR_PATH + INPUT_TEST_DATA_FILE_NAME, 'rb'))
test_label_data = np.load(open(FILE_DIR_PATH + LABEL_TEST_DATA_FILE_NAME, 'rb'))
def test_input_fn():
dataset = tf.data.Dataset.from_tensor_slices(test_input_data,test_label_data)
dataset = dataset.batch(BATCH_SIZE)
dataset = dataset.map(mapping_fn)
iterator = dataset.make_one_shot_iterator()
return iterator.get_next()
predict = est.evaluate(test_input_fn)
| minkyujoo/TCL_NLP | TCL_NLP/textclassifier_modeling.py | textclassifier_modeling.py | py | 3,951 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.load",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "numpy.load",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.train_test_sp... |
5057340753 | #!/usr/bin/python3
import logging
import logging.handlers
import speedtest
import thingspeak
import traceback
import json
import os
rootLogger = logging.getLogger('')
rootLogger.setLevel(logging.INFO)
def joinPathToScriptDirectory(path):
return os.path.join(os.path.dirname(os.path.abspath(__file__)), path)
def configure_log():
root = logging.getLogger()
log_file_name = joinPathToScriptDirectory('speedtest.log')
h = logging.handlers.RotatingFileHandler(log_file_name, maxBytes=1024*1024, backupCount=5)
f = logging.Formatter('%(asctime)s %(name)s %(levelname)-8s %(message)s')
h.setFormatter(f)
root.addHandler(h)
def main():
configure_log()
global channel
try:
config_file_path = joinPathToScriptDirectory('thingspeak.json')
with open(config_file_path) as config_file:
config = json.load(config_file)
channel_id = config["channel"]
write_key = config["writekey"]
#for server in speedtest.list_servers():
# print('%(id)4s) %(sponsor)s (%(name)s, %(country)s) ''[%(d)0.2f km]' % server)
ping, download, upload, server = speedtest.test_speed(timeout=30, secure=True)
download = download /(1000.0*1000.0)*8
upload = upload /(1000.0*1000.0)*8
logging.info('Ping %dms; Download: %2f; Upload %2f', ping, download, upload)
channel = thingspeak.Channel(id=channel_id, write_key=write_key)
response = channel.update({1: ping, 2: download, 3: upload})
print(response)
except KeyboardInterrupt:
print('\nCancelling...')
speedtest.cancel_test()
except Exception:
logging.exception("Exception has occurred")
traceback.print_exc()
if __name__ == '__main__':
main()
| barakwei/speedtestReporter | speedtestReporter.py | speedtestReporter.py | py | 1,783 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_... |
28064994078 | from math import prod
from torch import zeros
from torch.nn import Module, Sequential, Conv1d, ReLU, Linear
class SimpleFFDQN(Module):
def __init__(self, obs_len, n_actions):
super().__init__()
self.fc_val = Sequential(
Linear(obs_len, 512),
ReLU(),
Linear(512, 512),
ReLU(),
Linear(512, 1)
)
self.fc_adv = Sequential(
Linear(obs_len, 512),
ReLU(),
Linear(512, 512),
ReLU(),
Linear(512, n_actions)
)
def forward(self, x):
adv, val = self.fc_adv(x), self.fc_val(x)
return val + (adv - adv.mean(dim=1, keepdim=True))
class DQNConv1D(Module):
def __init__(self, input_shape, n_actions):
super().__init__()
self.conv = Sequential(
Conv1d(input_shape[0], 128, 5),
ReLU(),
Conv1d(128, 128, 5),
ReLU(),
)
conv_out_size = self._get_conv_out(input_shape)
self.fc_val = Sequential(
Linear(conv_out_size, 512),
ReLU(),
Linear(521, 1)
)
self.fc_adv = Sequential(
Linear(conv_out_size, 512),
ReLU(),
Linear(512, n_actions)
)
def _get_conv_out(self, shape):
o = self.conv(zeros(1, *shape)) # 1 is the batch size
return prod(o.size())
def forward(self, x):
conv_out = self.conv(x).flatten(start_dim=1)
adv, val = self.fc_adv(conv_out), self.fc_val(conv_out)
return val + (adv - adv.mean(dim=1, keepdim=True))
| Daggerfall-is-the-best-TES-game/reinforcement-learning | Chapter10/lib/models.py | models.py | py | 1,624 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "torch.nn.Module",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "torch.nn.Linear",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "torch.nn.ReLU",
... |
9829642790 | from collections import deque
PAID_COMMAND = "Paid"
END_COMMAND = "End"
q = deque()
while True:
command = input()
if command == PAID_COMMAND:
while q:
print(q.popleft())
elif command == END_COMMAND:
print(f"{len(q)} people remaining.")
break
else:
q.append(command)
| skafev/Python_advanced | 01First_week/03Supermarket.py | 03Supermarket.py | py | 329 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "collections.deque",
"line_number": 6,
"usage_type": "call"
}
] |
40574898325 | import pytest
from game.radio.tacan import (
OutOfTacanChannelsError,
TacanBand,
TacanChannel,
TacanRegistry,
TacanUsage,
)
ALL_VALID_X_TR = [1, *range(31, 46 + 1), *range(64, 126 + 1)]
ALL_VALID_X_A2A = [*range(37, 63 + 1), *range(100, 126 + 1)]
def test_allocate_first_few_channels() -> None:
registry = TacanRegistry()
chan1 = registry.alloc_for_band(TacanBand.X, TacanUsage.TransmitReceive)
chan2 = registry.alloc_for_band(TacanBand.X, TacanUsage.TransmitReceive)
chan3 = registry.alloc_for_band(TacanBand.X, TacanUsage.TransmitReceive)
assert chan1 == TacanChannel(1, TacanBand.X)
assert chan2 == TacanChannel(31, TacanBand.X)
assert chan3 == TacanChannel(32, TacanBand.X)
def test_allocate_different_usages() -> None:
"""Make sure unallocated channels for one use don't make channels unavailable for other usage"""
registry = TacanRegistry()
chanA2AX = registry.alloc_for_band(TacanBand.X, TacanUsage.AirToAir)
chanA2AY = registry.alloc_for_band(TacanBand.Y, TacanUsage.AirToAir)
assert chanA2AX == TacanChannel(37, TacanBand.X)
assert chanA2AY == TacanChannel(37, TacanBand.Y)
chanTRX = registry.alloc_for_band(TacanBand.X, TacanUsage.TransmitReceive)
chanTRY = registry.alloc_for_band(TacanBand.Y, TacanUsage.TransmitReceive)
assert chanTRX == TacanChannel(1, TacanBand.X)
assert chanTRY == TacanChannel(1, TacanBand.Y)
def test_reserve_all_valid_transmit_receive() -> None:
registry = TacanRegistry()
print("All valid x", ALL_VALID_X_TR)
for num in ALL_VALID_X_TR:
registry.mark_unavailable(TacanChannel(num, TacanBand.X))
with pytest.raises(OutOfTacanChannelsError):
registry.alloc_for_band(TacanBand.X, TacanUsage.TransmitReceive)
# Check that we still can allocate an a2a channel even
# though the T/R channels are used up
chanA2A = registry.alloc_for_band(TacanBand.X, TacanUsage.AirToAir)
assert chanA2A == TacanChannel(47, TacanBand.X)
def test_reserve_all_valid_a2a() -> None:
registry = TacanRegistry()
print("All valid x", ALL_VALID_X_A2A)
for num in ALL_VALID_X_A2A:
registry.mark_unavailable(TacanChannel(num, TacanBand.X))
with pytest.raises(OutOfTacanChannelsError):
registry.alloc_for_band(TacanBand.X, TacanUsage.AirToAir)
# Check that we still can allocate an a2a channel even
# though the T/R channels are used up
chanTR = registry.alloc_for_band(TacanBand.X, TacanUsage.TransmitReceive)
assert chanTR == TacanChannel(1, TacanBand.X)
def test_reserve_again() -> None:
registry = TacanRegistry()
registry.mark_unavailable(TacanChannel(1, TacanBand.X))
registry.mark_unavailable(TacanChannel(1, TacanBand.X))
def test_tacan_parsing() -> None:
assert TacanChannel.parse("1X") == TacanChannel(1, TacanBand.X)
assert TacanChannel.parse("1Y") == TacanChannel(1, TacanBand.Y)
assert TacanChannel.parse("10X") == TacanChannel(10, TacanBand.X)
assert TacanChannel.parse("100X") == TacanChannel(100, TacanBand.X)
with pytest.raises(ValueError):
TacanChannel.parse("1000X")
with pytest.raises(ValueError):
TacanChannel.parse("0X")
with pytest.raises(ValueError):
TacanChannel.parse("1Z")
with pytest.raises(ValueError):
TacanChannel.parse("X")
with pytest.raises(ValueError):
TacanChannel.parse("1")
with pytest.raises(ValueError):
TacanChannel.parse("1 X")
with pytest.raises(ValueError):
TacanChannel.parse(" 1X")
with pytest.raises(ValueError):
TacanChannel.parse("1X ")
with pytest.raises(ValueError):
TacanChannel.parse("1x")
| dcs-liberation/dcs_liberation | tests/test_tacan.py | test_tacan.py | py | 3,689 | python | en | code | 647 | github-code | 36 | [
{
"api_name": "game.radio.tacan.TacanRegistry",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "game.radio.tacan.TacanBand.X",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "game.radio.tacan.TacanBand",
"line_number": 17,
"usage_type": "name"
}... |
17094646997 | """ Cheddargetter models used in framework."""
from collections import namedtuple
from hashlib import md5
import requests
try:
from urllib import urlencode
except ImportError:
from urllib.parse import urlencode
from . import six
from .settings import settings
from .client import Client
from .utils import namedtuple_as_dict, classproperty
class FactoryMeta(type):
""" Proxy objects metaclass. """
__store__ = dict()
def __new__(class_, name, bases, params):
cls = super(FactoryMeta, class_).__new__(class_, name, bases, params)
class_.__store__[name] = cls
return cls
class Factory(object):
@staticmethod
def instantiate(class_name, **kwargs):
class_name = str(class_name)
cls_mixin_name = class_name + "Mixin"
cls_mixin = FactoryMeta.__store__.get(cls_mixin_name, object)
cls_base = namedtuple(class_name, kwargs.keys())
methods = {"as_dict": property(namedtuple_as_dict)}
cls = type(class_name, (cls_base, cls_mixin), methods)
return cls(**kwargs)
class ClientMixin(object):
@classproperty
@classmethod
def client(cls):
return Client(
settings.USERNAME,
settings.PASSWORD,
settings.PRODUCT_CODE
)
@six.add_metaclass(FactoryMeta)
class CustomerMixin(ClientMixin):
@property
def key(self):
""" Customer key used in url links.
The Customer Key is a hash of the Customer Code and your Product
Secret Key. Customers will also be able to update their data through
links. Some of the links should have customer key as a parameter.
:return str: customer key
"""
KEY_LENGTH = 10
key = md5("{}|{}".format(
self.code, settings.PASSWORD)).\
hexdigest()[:KEY_LENGTH]
return key
@property
def create_url(self):
params = urlencode(dict(code=self.code)) if hasattr(self, "code") \
else ""
url = "{}/create?{}".format(settings.BASE_URL, params)
return url
@property
def update_url(self):
params = urlencode(dict(key=self.key, code=self.code))
url = "{}/update?{}".format(settings.BASE_URL, params)
return url
@property
def cancel_url(self):
params = urlencode(dict(key=self.key, code=self.code))
url = "{}/cancel?{}".format(settings.BASE_URL, params)
return url
@property
def status_url(self):
params = urlencode(dict(key=self.key, code=self.code))
url = "{}/status?{}".format(settings.BASE_URL, params)
return url
@property
def status(self):
""" Return Customer status.
http://support.cheddargetter.com/kb/hosted-payment-pages/
hosted-payment-pages-setup-guide#status
:return str status: ('active'|'canceled'|'pending')
"""
return requests.get(self.status_url, verify=False).content
@classmethod
def get_all(cls):
return cls.client.get_all_customers()
def delete(self):
return self.client.delete_customer(self.code)
| pavlov99/mouse | mouse/models.py | models.py | py | 3,132 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "collections.namedtuple",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "utils.namedtuple_as_dict",
"line_number": 37,
"usage_type": "argument"
},
{
"api_name": "client.Client",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "set... |
43471654353 | from fastapi import APIRouter, Query, Depends, status
from fastapi.responses import JSONResponse
from fastapi.encoders import jsonable_encoder
from pydantic.error_wrappers import ValidationError
from online_inference.model_load import get_model, make_prediction
from online_inference import schema_utils
from online_inference.schemas import Patient, ResponseTarget
testing_router = APIRouter()
@testing_router.post("/predict/one", response_model=ResponseTarget)
async def one(
age: int,
trestbps: int,
chol: int,
thalach: int,
oldpeak: float,
sex: str = Query(
enum=schema_utils.Sex.keys(),
description="Sex.",
),
cp: str = Query(
enum=schema_utils.ChestPain.keys(),
description="Chest pain type.",
),
fbs: str = Query(
enum=schema_utils.FastingBlood.keys(),
description="Fasting blood sugar > 120 mg/dl",
),
restecg: str = Query(
enum=schema_utils.EegResult.keys(),
description="Resting electrocardiographic results.",
),
exang: str = Query(
enum=schema_utils.ExInducedAngina.keys(),
description="Exercise induced angina.",
),
slope: str = Query(
enum=schema_utils.SlopeSegment.keys(),
description="The slope of the peak exercise ST segment.",
),
ca: int = Query(
enum=[0, 1, 2, 3],
description="Number of major vessels .",
),
thal: str = Query(
enum=schema_utils.Thal.keys(),
description="Have no idea what the fuck is this.",
),
model=Depends(get_model),
):
kwargs = locals().copy()
kwargs["sex"] = getattr(schema_utils.Sex, sex).value
kwargs["exang"] = getattr(schema_utils.ExInducedAngina, exang).value
kwargs["cp"] = getattr(schema_utils.ChestPain, cp).value
kwargs["fbs"] = getattr(schema_utils.FastingBlood, fbs).value
kwargs["restecg"] = getattr(schema_utils.EegResult, restecg).value
kwargs["slope"] = getattr(schema_utils.SlopeSegment, slope).value
kwargs["thal"] = getattr(schema_utils.Thal, thal).value
try:
patient = Patient(**kwargs)
except ValidationError as ve:
return JSONResponse(
status_code=status.HTTP_422_UNPROCESSABLE_ENTITY,
content=jsonable_encoder({"detail": ve.errors()}),
)
return make_prediction(model, [patient])[0]
| made-mlops-2022/mlops_LisinFedor | src/online_inference/testing_router.py | testing_router.py | py | 2,361 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "fastapi.APIRouter",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "fastapi.Query",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "online_inference.schema_utils.Sex.keys",
"line_number": 22,
"usage_type": "call"
},
{
"api_name":... |
19862950052 | from cvxopt import matrix, solvers
import numpy as np
import cvxopt
'''
问题描述:
minimize xQx+px
subject Gx <= h
Ax = b
注:Q=[[2, .5], [.5, 1]],即xQx=2x1^2+x^2+x1*x2
'''
Q = 2*matrix(np.array([[2, .5], [.5, 1]])) # 一定要乘以2
p = matrix([1.0, 1.0])
G = matrix([[-1.0, 0.0], [0.0, -1.0]])
h = matrix([0.0, 0.0])
A = matrix([1.0, 1.0], (1, 2))
b = matrix(1.0)
sol = solvers.qp(Q, p, G, h, A, b)
print(sol['x'], sol['y']) | 08zhangyi/multi-factor-gm-wind-joinquant | 掘金多因子开发测试/算法编写模板/CVXOPT/cvx_opt示例.py | cvx_opt示例.py | py | 449 | python | en | code | 180 | github-code | 36 | [
{
"api_name": "cvxopt.matrix",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "cvxopt.matrix",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "cvxopt.matrix",
"line_num... |
16155570690 | import requests
from lxml import etree as et
import multiprocessing
import google.cloud.bigquery as bq
import os
import traceback
def position(arg):
l = arg
if l == 0:
pos = 1
return (pos)
else:
return (l + 1)
def foc_cum_call(url):
try:
return_list = [1]
response = requests.get(url)
tree = et.fromstring(response.content)
cum_srh = tree.xpath('//srh/text()')
cum_power_id = tree.xpath('//cumulativePower/@id')
if len(cum_power_id) > 0:
cum_href = tree.xpath('//href/text()')
cum_srh = tree.xpath('//srh/text()')
cum_start_date = tree.xpath('//interval/startDate/text()')
cum_end_date = tree.xpath('//interval/endDate/text()')
cum_total_power = tree.xpath('//totalPower/text()')
meter_list = tree.xpath('//meter/@id')
meter_list_len = len(meter_list)
rang = range(meter_list_len)
if meter_list_len > 0:
for l, i in zip(rang, meter_list):
interim_list = []
pos = position(l)
meter_id = tree.xpath("//meter[position()='%s'][@id='%s']/@id" % (pos, i))
meter_active = tree.xpath("//meter[position()='%s'][@id='%s']/@active" % (pos, i))
meter_serialnum = tree.xpath("//meter[position()='%s'][@id='%s']/serialNumber/text()" % (pos, i))
meter_provider = tree.xpath("//meter[position()='%s'][@id='%s']/provider/text()" % (pos, i))
meter_power = tree.xpath("//meter[position()='%s'][@id='%s']/meterPower/text()" % (pos, i))
meter_start_dt = tree.xpath(
"//meter[position()='%s'][@id='%s']/interval/startDate/text()" % (pos, i))
meter_end_dt = tree.xpath("//meter[position()='%s'][@id='%s']/interval/endDate/text()" % (pos, i))
if meter_power == ['0.0']:
meter_start_read = ['']
meter_end_read = ['']
meter_first_read_dt = ['']
meter_last_read_dt = ['']
else:
meter_start_read = tree.xpath(
"//meter[position()='%s'][@id='%s']/readings/reading[position()='1']/text()" % (pos, i))
meter_end_read = tree.xpath(
"//meter[position()='%s'][@id='%s']/readings/reading[position()='2']/text()" % (pos, i))
meter_first_read_dt = tree.xpath(
"//meter[position()='%s'][@id='%s']/readings/reading[position()='1']/@time" % (pos, i))
meter_last_read_dt = tree.xpath(
"//meter[position()='%s'][@id='%s']/readings/reading[position()='2']/@time" % (pos, i))
interim_list = [cum_srh, cum_power_id, cum_start_date[0], cum_end_date[0], cum_total_power,
meter_id, meter_active, meter_serialnum, meter_provider, meter_start_dt,
meter_end_dt, meter_start_read, meter_end_read, meter_first_read_dt,
meter_last_read_dt]
l = []
for item in interim_list:
l.append(''.join(item))
return_list.append(l)
except Exception as errmsg:
print(errmsg)
return_list = [1]
return return_list
if __name__ == '__main__':
p = 20
pool = multiprocessing.Pool(p)
try:
failed_list = []
success_list = []
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = "noc_service_account_sr-dev-datahub.json"
project_id = 'sr-dev-datahub'
bigquery_client = bq.Client(project=project_id)
select_sql = """SELECT Agreement_Number__c,PTO__c FROM `sr-dev-datahub.Datahub.ORN_SERVICE_CONTRACT_V` order by 1 LIMIT 1"""
select_job = bigquery_client.query(select_sql)
select_results = select_job.result()
print("...............................", select_results)
for row in select_results:
srh = row[0]
endDate = row[1].strftime('%Y-%m-%d')
# url = 'https://foc.sunrun.com/foc/rest/v1/power?srh={}&endDate={}'.format(srh, endDate)
url = 'https://foc.sunrun.com/foc/rest/v1/power?srh={}&endDate={}'.format('03', '2018-11-01')
if len(srh) > 0 and len(endDate) > 0:
return_list = foc_cum_call(url)
print(return_list)
if return_list[0] == 1:
failed_list.append(url)
else:
success_list.append(url)
else:
failed_list.append(url)
print("failed list is : {}".format(failed_list))
print("success list is: {}".format(success_list))
except Exception as e:
print(traceback.format_exc())
| siva60/DataStructures | cumulative.py | cumulative.py | py | 5,114 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "lxml.etree.fromstring",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "lxml.etree",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "multiprocessing.Pool",
... |
32824298169 | speed = 3
game_state = "alive"
sythe = "standard scythe"
import pygame
import time
import sys
import random
money = 0
kills = 0
randX = random.randint(0,1626)
randY = random.randint(0,846)
pygame.init()
#This is where we set up the window that displays our game. The resolution is set here
screen = pygame.display.set_mode((1626,846))
#This sets the windows caption
pygame.display.set_caption("Maze Game")
clock = pygame.time.Clock()
platformerwall = pygame.image.load('platformer_wall.png')
wall = pygame.image.load('litterally_nothing.png')
acctualwall = pygame.image.load("Wall_revamped_resoluted.png")
#player = pygame.image.load('man-1.png')
normal_player = pygame.image.load('man-1.png')
doorimage = pygame.image.load('door_revamped_resoluted.png')
munniimage = pygame.image.load('gamemunni.png')
rich_player = pygame.image.load('money!.png')
crateimage = pygame.image.load('crate_resoluted.png')
evil_wall = pygame.image.load('evilwall_resoluted.png')
error = pygame.image.load('you_got_mail.png')
finish_image = pygame.image.load('finishsimpl.png')
player_left = pygame.image.load("character_left.png")
player_right = pygame.image.load("character_right.png")
player = pygame.image.load("character_left.png")
player_sythe_right = pygame.image.load("character_sythe_right.png")
player_sythe_left = pygame.image.load("character_sythe_left.png")
m_sythe_left = pygame.image.load("money_sythe_left.png")
m_sythe_right = pygame.image.load("money_sythe_right.png")
chest_rightimage = pygame.image.load("closed_chest.png")
painting_image = pygame.image.load("painting.png")
blood_sythe_left = pygame.image.load("b_sythe_left.png")
blood_sythe_right = pygame.image.load("b_sythe_right.png")
error = pygame.image.load("you_got_mail_enlargened.png")
ded = pygame.image.load("youdieddedscreen.png")
win = pygame.image.load("the_you_win_screen.png")
Jbarrierimage = pygame.image.load("wall_revamped_resoluted.png")
Sbarrierimage = pygame.image.load("wol.png")
grassimage = pygame.image.load("grazz.png")
jumpimage = pygame.image.load("sprheng.png")
health = 100
spawns = 1
location = (100,0)
x = y = 0
level = ["J #",
"# #",
"# DF",
"# #",
"# #",
"# #",
"# #",
"# K#",
"# K#",
"#C K#",
"# #",
"GGGGGGGGGGGGGGGGGGGGGGG"]
walls = []
doors = []
munniz = []
crates = []
ewalls = []
finishes = []
enemies = []
turnings = []
chest_rights = []
paintings = []
randXYs = []
Jbarriers = []
Sbarriers = []
platforms = []
Awalls = []
grassess = []
Jumps = []
for i in range (20):
xy = (random.randint(300,1326),random.randint(100,746))
randXYs.append(xy)
for col in level:
for row in col:
if row == "T":
rect1 = pygame.Rect(x,y ,72,72)
walls.append(rect1)
if row == "C":
location = pygame.Rect(87,0,32,32)
# player = {
# "rect": pygame.Rect(870,32,32)
# "speed": 3
# "image": pygame.image.load("player_right.png")
if row == "D":
door = pygame.Rect(x,y,72,72)
doors.append(door)
if row == "M":
munni = pygame.Rect(x +10,y +15,32,32)
munniz.append(munni)
if row == "S":
Sbarrier = pygame.Rect(x,y,72,5)
Sbarriers.append(Sbarrier)
if row == "E":
ewall = pygame.Rect(x,y,72,72)
ewalls.append(ewall)
if row == "F":
finish = pygame.Rect(x,y,72,72)
finishes.append(finish)
if row == "K":
enemy = {
"health": 100,
"rect": pygame.Rect(x,y,72,72),
"speed": 3,
"image": pygame.image.load('ye_eye_kite.png')
}
enemies.append(enemy)
if row == "P":
rect1 = pygame.Rect(x,y,72,72)
walls.append(rect1)
turning = pygame.Rect(x,y,72,72)
turnings.append(turning)
platform = pygame.Rect(x,y,72,72)
platforms.append(platform)
if row == "#":
rect1 = pygame.Rect(x,y,72,72)
walls.append(rect1)
turning = pygame.Rect(x,y,72,72)
turnings.append(turning)
Awall = pygame.Rect(x,y,72,72)
Awalls.append(Awall)
if row == "G":
rect1 = pygame.Rect(x,y,72,72)
walls.append(rect1)
turning = pygame.Rect(x,y,72,72)
turnings.append(turning)
grass = pygame.Rect(x,y,72,72)
grassess.append(grass)
if row == "P":
platform = pygame.Rect(x,y,72,72)
platforms.append(platform)
if row == "J":
Jbarrier = pygame.Rect(x,y,72,72)
Jbarriers.append(Jbarrier)
if row == "U":
Jump = pygame.Rect(x,y,72,72)
Jumps.append(Jump)
x = x + 72
y = y +72
x = 0
vx = 0.2
vy = 0
clock = pygame.time.Clock()
location.x = 82
location.y = 760
game_over = False
canjump = False
#12 x 23 (72)
while not game_over:
#This checks if the cross in the top right has been pressed (do not remove)
for event in pygame.event.get():
if event.type == pygame.QUIT:
game_over = True
#This will fill the window with a solid RGB colour
if game_state == "alive":
dt = clock.tick(30)
if game_state == "alive":
myfont = pygame.font.SysFont("ariel",32)
mytext = myfont.render("money: " + str(money) + " spawns: " + str(spawns) + " scythe: " + str(sythe),1,(0,0,0))
myfont2 = pygame.font.SysFont("ariel",25)
mytextbottom = myfont2.render("collecting money may help in upgrading the scythe",1,(0,0,0))
keys = pygame.key.get_pressed()
for s in Sbarriers:
if location.colliderect(s):
location.y = location.y +1
if keys[pygame.K_a]:
location.x = location.x - vx*dt
for w in walls:
if location.colliderect(w):
location.midleft=(w.midright[0],location.midleft[1])
if keys[pygame.K_d]:
location.x = location.x + vx*dt
for w in walls:
if location.colliderect(w):
location.midright=(w.midleft[0],location.midright[1])
Jbarrier.x+=speed
Jbarrier.x-=speed
if keys[pygame.K_d]:
player = player_right
if keys[pygame.K_e]:
if money < 10:
player = player_sythe_right
if money >= 10:
player = m_sythe_right
if keys[pygame.K_a]:
player = player_left
if keys[pygame.K_e]:
if money < 10:
player = player_sythe_left
if money >= 10:
player = m_sythe_left
if keys[pygame.K_SPACE] and canjump:
vy = -0.4
vy += 0.001 * dt
if vy > 0.4:
vy = 0.4
location.y += vy * dt
location.y+=speed
for w in walls:
if location.colliderect(w):
location.midbottom=(location.midbottom[0],w.midtop[1])
canjump = True
if money >= 10:
sythe = "money scythe"
if keys[pygame.K_m]:
money = 100
if keys[pygame.K_t]:
location.x = location.x +36
if keys[pygame.K_u]:
location.y = location.y +36
if keys[pygame.K_r]:
location.x = 72
location.y = 754
money = 0
for m in munniz:
if location.colliderect(m):
munniz.remove(m)
money = money +1
print(money)
for e in ewalls:
if location.colliderect(e):
spawns = spawns -1
location.x = 72
location.y = 0
for f in finishes:
if location.colliderect(f):
game_state = "win"
for J in Jbarriers:
if location.colliderect(J):
print("A")
for u in Jumps:
if location.colliderect(u):
vy = -0.8
vy += 0.001 * dt
if vy > 0.4:
vy = 0.4
Jumps.remove(u)
for s in crates:
if location.colliderect(s):
money = money +random.randint(1,4)
print(money)
crates.remove(s)
for enemy in enemies:
if location.colliderect(enemy["rect"]):
if keys[pygame.K_e] or keys [pygame.K_a]:
if money >= 10:
enemies.remove(enemy)
# kills = kill +1
if money <= 10:
spawns = spawns -1
location.x = 72
location.y = 754
else:
spawns = spawns -1
location.x = 72
location.y = 754
# for t in turnings:
# if colliderect location:
# turnings.remove turning
for enemy in enemies:
enemy['rect'].x = enemy['rect'].x + enemy['speed']
for turning in turnings:
if turning.colliderect(enemy["rect"]):
enemy["speed"] *= -1
enemy['rect'].x = enemy['rect'].x + enemy['speed']
if spawns <= -1:
game_state = "dead"
screen.fill((255,255,255))
for w in walls:
screen.blit(wall,w)
screen.blit(player,location)
for m in munniz:
screen.blit(munniimage,m)
for d in doors:
screen.blit(doorimage,d)
for s in crates:
screen.blit(crateimage,s)
for e in ewalls:
screen.blit(evil_wall,e)
for f in finishes:
screen.blit(finish_image,f)
for J in Jbarriers:
screen.blit(Jbarrierimage,J)
for S in Sbarriers:
screen.blit(Sbarrierimage,s)
for p in platforms:
screen.blit(platformerwall,p)
for a in Awalls:
screen.blit(acctualwall,a)
for g in grassess:
screen.blit(grassimage,g)
for u in Jumps:
screen.blit(jumpimage,u)
for enemy in enemies:
screen.blit(enemy["image"],enemy["rect"])
#1626,846
if game_state == "dead":
for i in range(20):
screen.blit(error,randXYs[i])
if game_state == "win":
screen.blit(win,(1,1))
#LEAVE THAT LAST (THE UPDATE)
pygame.display.update()
#This will quit the program when game_over is true
pygame.quit()
| GamesCreatorsClub/GCC-games-online | games/henrys-game/Main-platformer coppy.py | Main-platformer coppy.py | py | 11,324 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "random.randint",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pygame.init",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "pygame.display.set_mode",
... |
72594874024 | from django.urls import path
from .views import *
app_name = 'usermanagement'
urlpatterns = [
path('', login, name='login'),
path('dashboard', dashboard, name='dashboard'),
path('validate', login_validate, name='login_validate'),
path('logout/', logout, name='logout'),
] | kazimdrafiq/appraisal | usermanagement/urls.py | urls.py | py | 289 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.urls.path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.