blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5d268ade1ba4de5098f9b5fbbeb8dbc6846176be | 8fd9d353cca0fd220a901ed3ae78908cbac2d9f6 | /account-service-v2/scenario-ui/account-sort/account-sort.py | 09e471e6ddf53c3752745e5d60de9a8f693c118f | [] | no_license | pradipgit/api_automation_test | 04ba9e0c676fda0a8e112439f9cba847b4e70ebe | eab7ae9e10dc8e5422fadbb8054b15bd54dd8873 | refs/heads/master | 2022-06-21T04:42:12.452467 | 2020-05-14T05:40:15 | 2020-05-14T05:40:15 | 263,824,948 | 0 | 0 | null | 2020-05-14T05:40:53 | 2020-05-14T05:37:33 | Python | UTF-8 | Python | false | false | 8,594 | py | import time
import subprocess
import utils
from utils import assignOrder
from utils import assertEqual
from utils import assertContains
from utils import randomString
import threading
import queue
import random
from collections import OrderedDict
import logging
import pprint
import configparser
import json
import random
import requests
import datetime
import random
global status
status = {}
logger = logging.getLogger("Test Run")
config = configparser.ConfigParser()
config.read('settings.conf')
ResponseTime = config.get('params', 'response_time')
config.read('testdata.conf')
now = datetime.datetime.now()
# db=config.get('params','db')
# invalidUrl =config.get('params', 'invalidUrl')
x = random.randint(0, 50000)
global id_cred,metadata,audit_log_download_id
id_cred={}
metadata={}
audit_log_download_id={}
class AccountSort(object):
def __init__(self,client):
self.api_client = client
self.invoice_id = random.randint(100000,999999)
################### CORE-1909 ############################
@assignOrder(336)
def CORE1909_sortByActionDateDescending(self):
try:
passed = False
resp, body = self.api_client.sortByAction('desc','initiatedDate')
print (resp)
logger.info("API response:" + str(resp))
passOfResponseCode = assertEqual(resp, 200)
if (passOfResponseCode):
passed = True
status['CAM-APITest'] = passed
return passed
except:
status['CAM-APITest'] = False
return False
@assignOrder(337)
def CORE1909_sortByActionDateAscending(self):
try:
passed = False
resp, body = self.api_client.sortByAction('asc','initiatedDate')
print (resp)
logger.info("API response:" + str(resp))
passOfResponseCode = assertEqual(resp, 200)
if (passOfResponseCode):
passed = True
status['CAM-APITest'] = passed
return passed
except:
status['CAM-APITest'] = False
return False
@assignOrder(338)
def CORE1909_sortByActionTypeDescending(self):
try:
passed = False
resp, body = self.api_client.sortByAction('desc','messageType')
print (resp)
logger.info("API response:" + str(resp))
passOfResponseCode = assertEqual(resp, 200)
if (passOfResponseCode):
passed = True
status['CAM-APITest'] = passed
return passed
except:
status['CAM-APITest'] = False
return False
@assignOrder(339)
def CORE1909_sortByActionTypeAscending(self):
try:
passed = False
resp, body = self.api_client.sortByAction('asc','messageType')
print (resp)
logger.info("API response:" + str(resp))
passOfResponseCode = assertEqual(resp, 200)
if (passOfResponseCode):
passed = True
status['CAM-APITest'] = passed
return passed
except:
status['CAM-APITest'] = False
return False
@assignOrder(340)
def CORE1909_sortByComponentDescending(self):
try:
passed = False
resp, body = self.api_client.sortByAction('desc', 'component')
print (resp)
logger.info("API response:" + str(resp))
passOfResponseCode = assertEqual(resp, 200)
if (passOfResponseCode):
passed = True
status['CAM-APITest'] = passed
return passed
except:
status['CAM-APITest'] = False
return False
@assignOrder(341)
def CORE1909_sortByComponentAscending(self):
try:
passed = False
resp, body = self.api_client.sortByAction('asc', 'component')
print (resp)
logger.info("API response:" + str(resp))
passOfResponseCode = assertEqual(resp, 200)
if (passOfResponseCode):
passed = True
status['CAM-APITest'] = passed
return passed
except:
status['CAM-APITest'] = False
return False
@assignOrder(342)
def CORE1909_sortBySubComponentDescending(self):
try:
passed = False
resp, body = self.api_client.sortByAction('desc', 'subcomponent')
print (resp)
logger.info("API response:" + str(resp))
passOfResponseCode = assertEqual(resp, 200)
if (passOfResponseCode):
passed = True
status['CAM-APITest'] = passed
return passed
except:
status['CAM-APITest'] = False
return False
@assignOrder(343)
def CORE1909_sortBySubComponentAscending(self):
try:
passed = False
resp, body = self.api_client.sortByAction('asc', 'subcomponent')
print (resp)
logger.info("API response:" + str(resp))
passOfResponseCode = assertEqual(resp, 200)
if (passOfResponseCode):
passed = True
status['CAM-APITest'] = passed
return passed
except:
status['CAM-APITest'] = False
return False
@assignOrder(344)
def CORE1909_sortByUserDescending(self):
try:
passed = False
resp, body = self.api_client.sortByAction('desc', 'userId')
print (resp)
logger.info("API response:" + str(resp))
passOfResponseCode = assertEqual(resp, 200)
if (passOfResponseCode):
passed = True
status['CAM-APITest'] = passed
return passed
except:
status['CAM-APITest'] = False
return False
@assignOrder(345)
def CORE1909_sortByUserAscending(self):
try:
passed = False
resp, body = self.api_client.sortByAction('asc', 'userId')
print (resp)
logger.info("API response:" + str(resp))
passOfResponseCode = assertEqual(resp, 200)
if (passOfResponseCode):
passed = True
status['CAM-APITest'] = passed
return passed
except:
status['CAM-APITest'] = False
return False
@assignOrder(346)
def CORE1909_sortByUserTeamDescending(self):
try:
passed = False
resp, body = self.api_client.sortByAction('desc', 'teamId')
print (resp)
logger.info("API response:" + str(resp))
passOfResponseCode = assertEqual(resp, 200)
if (passOfResponseCode):
passed = True
status['CAM-APITest'] = passed
return passed
except:
status['CAM-APITest'] = False
return False
@assignOrder(347)
def CORE1909_sortByUserTeamAscending(self):
try:
passed = False
resp, body = self.api_client.sortByAction('asc', 'teamId')
print (resp)
logger.info("API response:" + str(resp))
passOfResponseCode = assertEqual(resp, 200)
if (passOfResponseCode):
passed = True
status['CAM-APITest'] = passed
return passed
except:
status['CAM-APITest'] = False
return False
@assignOrder(348)
def CORE1909_sortByWrongUserTeamAscending(self):
try:
passed = False
resp, body = self.api_client.sortByAction('asc', 'UserTeamXYZ')
print (resp)
logger.info("API response:" + str(resp))
passOfResponseCode = assertEqual(resp, 400)
if (passOfResponseCode):
passed = True
status['CAM-APITest'] = passed
return passed
except:
status['CAM-APITest'] = False
return False
@assignOrder(349)
def CORE1909_sortByWrongSortTypeDescending(self):
try:
passed = False
resp, body = self.api_client.sortByAction('xyz', 'UserXYZ')
print (resp)
logger.info("API response:" + str(resp))
passOfResponseCode = assertEqual(resp, 400)
if (passOfResponseCode):
passed = True
status['CAM-APITest'] = passed
return passed
except:
status['CAM-APITest'] = False
return False
| [
"prkarmak@in.ibm.com"
] | prkarmak@in.ibm.com |
9094b2a4f588f9d494e3c0207349a8b1663215b8 | 80de9393fd0c3d1efddc1f91609e67d9950a18b2 | /code/model.py | d8bbf94c0b7bcd946575b4283c8b2e983500019a | [] | no_license | zhang-cugb/A-data-driven-proxy-to-Stokes-flow-in-porous-media | 921c8d9db315538b0be0b34927b0792b3efd744f | e4b2d255e05054c82023513ca791932dd80f23e0 | refs/heads/master | 2021-10-25T05:06:44.173233 | 2019-04-01T15:01:19 | 2019-04-01T15:01:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,808 | py | import torch
import torch.nn as nn
import functools
import torch.utils.data as data
import os
import numpy as np
import torchvision.transforms as transforms
###############################################################################
# Functions
###############################################################################
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data.normal_(0.0, 0.02)
elif classname.find('BatchNorm2d') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
def get_norm_layer(norm_type='instance'):
if norm_type == 'batch':
norm_layer = functools.partial(nn.BatchNorm2d, affine=True)
elif norm_type == 'instance':
norm_layer = functools.partial(nn.InstanceNorm2d, affine=False)
return norm_layer
def define_net(input_nc, output_nc, ngf, norm='batch', use_dropout=False, gpu_ids=[], model_name='UNET'):
net = None
use_gpu = len(gpu_ids) > 0
norm_layer = get_norm_layer(norm_type=norm)
if use_gpu:
assert(torch.cuda.is_available())
if model_name == 'UNET':
net = UnetGenerator(input_nc, output_nc, 8, ngf, norm_layer=norm_layer, use_dropout=use_dropout, gpu_ids=gpu_ids)
elif model_name == 'RESNET':
net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=9, gpu_ids=gpu_ids)
elif model_name == 'URESNET':
net = UResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=4, gpu_ids=gpu_ids)
if len(gpu_ids) > 0:
net.cuda(device=gpu_ids[0])
net.apply(weights_init)
return net
# Defines the submodule with skip connection.
# X -------------------identity---------------------- X
# |-- downsampling -- |submodule| -- upsampling --|
class UnetSkipConnectionBlock(nn.Module):
def __init__(self, outer_nc, inner_nc,
submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False, my_innermost=False, transposed=True,
size=None, add_tanh=True):
super(UnetSkipConnectionBlock, self).__init__()
self.outermost = outermost
downconv = nn.Conv2d(outer_nc, inner_nc, kernel_size=4, stride=2, padding=1)
downrelu = nn.LeakyReLU(0.2, True)
downnorm = norm_layer(inner_nc)
uprelu = nn.ReLU(True)
upnorm = norm_layer(outer_nc)
if outermost:
if transposed:
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc, kernel_size=4, stride=2, padding=1)
down = [downconv]
up = [uprelu, upconv, nn.Tanh()]
else:
upsamp = nn.Upsample(size=size)
upconv = nn.Conv2d(inner_nc * 2, outer_nc, kernel_size=3, stride=1, padding=1)
down = [downconv]
up = [uprelu, upsamp, upconv]
if add_tanh:
up.append(nn.Tanh())
model = down + [submodule] + up
elif innermost:
if transposed:
upconv = nn.ConvTranspose2d(inner_nc, outer_nc, kernel_size=4, stride=2, padding=1)
down = [downrelu, downconv]
up = [uprelu, upconv, upnorm]
else:
upsamp = nn.Upsample(size=size)
upconv = nn.Conv2d(inner_nc, outer_nc, kernel_size=3, stride=1, padding=1)
down = [downrelu, downconv]
up = [uprelu, upsamp, upconv, upnorm]
model = down + up
else:
mul = 2
if my_innermost:
mul = 1
if transposed:
upconv = nn.ConvTranspose2d(inner_nc * mul, outer_nc, kernel_size=4, stride=2, padding=1)
down = [downrelu, downconv, downnorm]
up = [uprelu, upconv, upnorm]
else:
upsamp = nn.Upsample(size=size)
upconv = nn.Conv2d(inner_nc * mul, outer_nc, kernel_size=3, stride=1, padding=1)
down = [downrelu, downconv, downnorm]
up = [uprelu, upsamp, upconv, upnorm]
if use_dropout:
model = down + [submodule] + up + [nn.Dropout(0.5)]
else:
model = down + [submodule] + up
self.model = nn.Sequential(*model)
self.outer_nc = outer_nc
self.inner_nc = inner_nc
def forward(self, x):
if self.outermost:
out = self.model(x)
else:
out = torch.cat([self.model(x), x], 1)
return out
# Define the corresponding class for your dataset
class my_dataset(data.Dataset):
def __init__(self, root_path, train_sample_num, val_sample_num, train_val_test=0):
assert os.path.isdir(root_path), '%s is not a valid directory' % root_path
# List all JPEG images
self.den = np.load(os.path.join(root_path, 'den.npy'))
self.den_max = self.den.max()
self.den_min = self.den.min()
self.den = (self.den - self.den_min)/(self.den_max - self.den_min)
self.res = np.load(os.path.join(root_path, 'vx.npy'))
self.res_max = self.res.max()
self.res_min = self.res.min()
self.res = (self.res - self.res_min)/(self.res_max - self.res_min)
if train_val_test == 0:
self.den = self.den[0:train_sample_num]
self.res = self.res[0:train_sample_num]
elif train_val_test == 1:
self.den = self.den[train_sample_num:train_sample_num+val_sample_num]
self.res = self.res[train_sample_num:train_sample_num+val_sample_num]
elif train_val_test == 2:
self.den = self.den[train_sample_num+val_sample_num:]
self.res = self.res[train_sample_num+val_sample_num:]
self.size = self.den.shape[0]
self.transform = transforms.Compose([transforms.ToTensor()])
def __getitem__(self, index):
flip = True if np.random.uniform(0, 1) > 0.5 else False
img = self.den[index % self.size]
if flip:
img = np.flip(img, axis=0).copy()
img = self.transform(img) # Apply the defined transform
out = self.res[index % self.size]
if flip:
out = np.flip(out, axis=0).copy()
out = self.transform(out) # Apply the defined transform
img = img.type(torch.FloatTensor)
out = out.type(torch.FloatTensor)
return img, out
def __len__(self):
# Provides the size of the dataset
return self.size
class ResnetGenerator(nn.Module):
def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, gpu_ids=[], padding_type='reflect'):
assert(n_blocks >= 0)
super(ResnetGenerator, self).__init__()
self.input_nc = input_nc
self.output_nc = output_nc
self.ngf = ngf
self.gpu_ids = gpu_ids
model = [nn.ReflectionPad2d(3),
nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0),
norm_layer(ngf),
nn.ReLU(True)]
n_downsampling = 2
for i in range(n_downsampling):
mult = 2**i
model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3,
stride=2, padding=1),
norm_layer(ngf * mult * 2),
nn.ReLU(True)]
mult = 2**n_downsampling
for i in range(n_blocks):
model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout)]
for i in range(n_downsampling):
mult = 2**(n_downsampling - i)
model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2),
kernel_size=3, stride=2,
padding=1, output_padding=1),
norm_layer(int(ngf * mult / 2)),
nn.ReLU(True)]
model += [nn.ReflectionPad2d(1)]
model += [nn.Conv2d(ngf, output_nc, kernel_size=3, padding=0)]
# model += [nn.Tanh()]
self.model = nn.Sequential(*model)
def forward(self, input):
if self.gpu_ids and isinstance(input.data, torch.cuda.FloatTensor):
return nn.parallel.data_parallel(self.model, input, self.gpu_ids)
else:
return self.model(input)
# Define a resnet block
class ResnetBlock(nn.Module):
def __init__(self, dim, padding_type, norm_layer, use_dropout):
super(ResnetBlock, self).__init__()
self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout)
def build_conv_block(self, dim, padding_type, norm_layer, use_dropout):
conv_block = []
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p),
norm_layer(dim),
nn.ReLU(True)]
if use_dropout:
conv_block += [nn.Dropout(0.5)]
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p),
norm_layer(dim)]
return nn.Sequential(*conv_block)
def forward(self, x):
out = x + self.conv_block(x)
return out
class UResnetGenerator(nn.Module):
# """Construct a Unet generator
# Parameters:
# input_nc (int) -- the number of channels in input images
# output_nc (int) -- the number of channels in output images
# num_downs (int) -- the number of downsamplings in UNet. For example, # if |num_downs| == 7,
# image of size 128x128 will become of size 1x1 # at the bottleneck
# ngf (int) -- the number of filters in the last conv layer
# norm_layer -- normalization layer
# We construct the U-Net from the innermost layer to the outermost layer.
# It is a recursive process.
# """
def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=True, n_blocks=6, gpu_ids=[], padding_type='reflect', input_size=256):
assert(n_blocks >= 0)
super(UResnetGenerator, self).__init__()
self.input_nc = input_nc
self.output_nc = output_nc
self.ngf = ngf
self.gpu_ids = gpu_ids
model = []
for i in range(n_blocks):
model += [ResnetBlock(ngf * 32, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout)] # ngf*32 = 512
unet_block = nn.Sequential(*model)
unet_block = UnetSkipConnectionBlock(ngf * 16, ngf * 32, unet_block, my_innermost=True, norm_layer=norm_layer, transposed=False, size=(int(input_size/32),int(input_size/32)))
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 16, unet_block, norm_layer=norm_layer, transposed=False, size=(int(input_size/16),int(input_size/16)))
unet_block = UnetSkipConnectionBlock(ngf * 4, ngf * 8, unet_block, norm_layer=norm_layer, transposed=False, size=(int(input_size/8 ),int(input_size/8)))
unet_block = UnetSkipConnectionBlock(ngf * 2, ngf * 4, unet_block, norm_layer=norm_layer, transposed=False, size=(int(input_size/4 ),int(input_size/4)))
unet_block = UnetSkipConnectionBlock(ngf , ngf * 2, unet_block, norm_layer=norm_layer, transposed=False, size=(int(input_size/2 ),int(input_size/2)))
unet_block = UnetSkipConnectionBlock(output_nc, ngf, unet_block, outermost=True, norm_layer=norm_layer, transposed=False, size=(input_size ,input_size), add_tanh=False) # model += [nn.Tanh()]
self.model = unet_block
def forward(self, input):
if self.gpu_ids and isinstance(input.data, torch.cuda.FloatTensor):
return nn.parallel.data_parallel(self.model, input, self.gpu_ids)
else:
return self.model(input)
| [
"noreply@github.com"
] | noreply@github.com |
8534041473d28f92fb8db6079f28b29f4e1c7743 | cae9ca1dda110cd6f65d5021c5891fdee76ec6fe | /day2/set/2.py | e7bb3bc3680e0158226f7a9475d6dce754b58602 | [] | no_license | shobhit-nigam/yagami | fb33d6de76a698a160f9e8df9d7d9f5b836797d8 | 283e2a464f74ac07c21ae7095b9a45fa632aa38a | refs/heads/main | 2023-07-04T09:46:51.057558 | 2021-08-10T05:13:27 | 2021-08-10T05:13:27 | 391,846,901 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 267 | py | basket_a = {'apple', 'banana', 'pear', 'apple', 'kiwi', 'banana', 'avocado'}
basket_b = {'orange', 'plum', 'grapes', 'apple', 'pear', 'raspberry'}
print(type(basket_a))
print("basket_a =", basket_a)
print("basket_b =", basket_b)
print("basket_a[2] =", basket_a[2])
| [
"noreply@github.com"
] | noreply@github.com |
654c3bc950e7ddde3eaff1bddd8c9718702a2352 | bfc2ba097b164af668efa29f883101673668456e | /nets/centernet_training.py | 109a2e753890bf22328bb9efcd06e247931de674 | [] | no_license | Sharpiless/Paddlepaddle-CenterNet | b4892e1ab85a65f655b44fc6699e61315f5a0274 | b02bca6bff55054bdb29ba370ac52b9e8951045a | refs/heads/main | 2023-06-17T17:22:35.265697 | 2021-07-17T02:46:33 | 2021-07-17T02:46:33 | 386,817,805 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,313 | py | import paddle
import paddle.nn.functional as F
def focal_loss(pred, target):
pred = paddle.transpose(pred, [0,2,3,1])
# pred = pred.permute(0,2,3,1)
#-------------------------------------------------------------------------#
# 找到每张图片的正样本和负样本
# 一个真实框对应一个正样本
# 除去正样本的特征点,其余为负样本
#-------------------------------------------------------------------------#
# pos_inds = target.equal(1).float()
pos_inds = target == 1
pos_inds = pos_inds.astype('float32')
# neg_inds = target.lt(1).float()
neg_inds = target < 1
neg_inds = neg_inds.astype('float32')
#-------------------------------------------------------------------------#
# 正样本特征点附近的负样本的权值更小一些
#-------------------------------------------------------------------------#
neg_weights = paddle.pow(1 - target, 4)
pred = paddle.clip(pred, 1e-6, 1 - 1e-6)
#-------------------------------------------------------------------------#
# 计算focal loss。难分类样本权重大,易分类样本权重小。
#-------------------------------------------------------------------------#
pos_loss = paddle.log(pred) * paddle.pow(1 - pred, 2) * pos_inds
neg_loss = paddle.log(1 - pred) * paddle.pow(pred, 2) * neg_weights * neg_inds
#-------------------------------------------------------------------------#
# 进行损失的归一化
#-------------------------------------------------------------------------#
num_pos = pos_inds.astype('float32').sum()
pos_loss = pos_loss.sum()
neg_loss = neg_loss.sum()
if num_pos == 0:
loss = -neg_loss
else:
loss = -(pos_loss + neg_loss) / num_pos
return loss
def reg_l1_loss(pred, target, mask):
#--------------------------------#
# 计算l1_loss
#--------------------------------#
# pred = pred.permute(0,2,3,1)
pred = paddle.transpose(pred, [0,2,3,1])
# expand_mask = paddle.unsqueeze(mask,-1).repeat(1,1,1,2)
expand_mask = paddle.tile(paddle.unsqueeze(mask,-1), [1,1,1,2])
loss = F.l1_loss(pred * expand_mask, target * expand_mask, reduction='sum')
loss = loss / (mask.sum() + 1e-4)
return loss
| [
"1691608003@qq.com"
] | 1691608003@qq.com |
9f2eaee40308723324858966dcd6932750b0241b | bc9f66258575dd5c8f36f5ad3d9dfdcb3670897d | /lib/googlecloudsdk/command_lib/storage/tasks/task_buffer.py | dee39b0c1165d965f0fa3a433725b4686336f215 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | google-cloud-sdk-unofficial/google-cloud-sdk | 05fbb473d629195f25887fc5bfaa712f2cbc0a24 | 392abf004b16203030e6efd2f0af24db7c8d669e | refs/heads/master | 2023-08-31T05:40:41.317697 | 2023-08-23T18:23:16 | 2023-08-23T18:23:16 | 335,182,594 | 9 | 2 | NOASSERTION | 2022-10-29T20:49:13 | 2021-02-02T05:47:30 | Python | UTF-8 | Python | false | false | 2,762 | py | # -*- coding: utf-8 -*- #
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements a buffer for tasks used in task_graph_executor.
See go/parallel-processing-in-gcloud-storage for more information.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from six.moves import queue
class _PriorityWrapper:
"""Wraps a buffered task and tracks priority information.
Attributes:
task (Union[task.Task, str]): A buffered item. Expected to be a task or a
string (to handle shutdowns) when used by task_graph_executor.
priority (int): The priority of this task. A task with a lower value will be
executed before a task with a higher value, since queue.PriorityQueue uses
a min-heap.
"""
def __init__(self, task, priority):
self.task = task
self.priority = priority
def __lt__(self, other):
return self.priority < other.priority
class TaskBuffer:
"""Stores and prioritizes tasks.
The current implementation uses a queue.PriorityQueue under the hood, since
in experiments we found that the heap it maintains did not add too much
overhead. If it does end up being a bottleneck, the same API can be
implemented with a collections.deque.
"""
def __init__(self):
self._queue = queue.PriorityQueue()
def get(self):
"""Removes and returns an item from the buffer.
Calls to `get` block if there are no elements in the queue, and return
prioritized items before non-prioritized items.
Returns:
A buffered item. Expected to be a task or a string (to handle shutdowns)
when used by task_graph_executor.
"""
return self._queue.get().task
def put(self, task, prioritize=False):
"""Adds an item to the buffer.
Args:
task (Union[task.Task, str]): A buffered item. Expected to be a task or a
string (to handle shutdowns) when used by task_graph_executor.
prioritize (bool): Tasks added with prioritize=True will be returned by
`get` before tasks added with prioritize=False.
"""
priority = 0 if prioritize else 1
prioritized_item = _PriorityWrapper(task, priority)
self._queue.put(prioritized_item)
| [
"cloudsdk.mirror@gmail.com"
] | cloudsdk.mirror@gmail.com |
420fde7ba568db208989a2a01889d71af5dccec9 | 6721bb9d78d772f50e62a675347fccface9d9d74 | /scripts/coord.py | 023afeb9a6afe68f4e1dd4d4d234a99beedb8521 | [] | no_license | claycoleman/twitterfeed | 20b21736b0d02173e9323b63cbc9c8c9ddfdd624 | f56c45d3393f7b24ed393aa1625b81f8d73eef67 | refs/heads/master | 2021-05-30T00:41:14.684180 | 2015-11-24T13:18:04 | 2015-11-24T13:18:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,788 | py | #! /usr/bin/env python
from __future__ import absolute_import
import requests, base64, json, tweepy, pprint, os, sys, urllib
from unidecode import unidecode
sys.path.append('..')
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'project.settings')
from app.models import Location, Trend, Tweet
from django.core.files import File
from django.core.files.temp import NamedTemporaryFile
from django.conf import settings
import django
django.setup()
countries_coords = {23424768: "307,350,299,354,281,357,267,385,273,409,286,412,323,433,333,463,346,483,345,509,358,509,385,478,393,466,436,442,431,382,372,347", 23424977:"113,152,98,168,89,184,91,210,107,215,140,222,149,230,161,230,173,246,231,230,235,252,239,233,247,224,296,178,315,156,254,168,225,147", 23424900: "99,223,107,248,123,264,145,286,181,302,208,268,194,276,185,278,177,279,167,262,174,250,165,243,162,233,137,225,125,223,106,217", 23424775: "113,96,113,122,117,141,146,145,216,142,262,155,256,167,268,165,282,159,294,158,303,155,307,155,318,150,338,141,364,134,361,97,351,57,268,43,196,43,164,43", 23424856: "1070,123,1084,152,1088,181,1081,209,1077,233,1122,231,1147,198,1123,159,1085,120", 23424975: "525,109,558,98,573,107,573,125,578,135,558,138,542,140,524,139", 23424781: "884,153,852,186,904,236,958,237,966,255,973,265,1021,276,1054,250,1049,216,1053,179,1060,149,1022,135,1008,131,1000,132,997,141,1003,150,1002,161,992,169,961,169,914,162,909,160", 23424936: "671,77,771,45,943,18,1033,47,1139,61,1173,89,1163,136,1083,158,1023,131,1002,126,998,131,998,133,994,135,973,143,915,137,886,145,841,126,804,119,781,130,781,133,767,137,741,135,742,148,746,161,740,163,711,156,664,101", 23424846: "822,306,965,412,1071,426,1202,418,1207,378,1141,321,1074,271,1016,298,1003,279,1003,270,987,270,955,240,922,237,887,231,869,217,865,205,830,188,848,183,858,167,876,155,895,148,913,157,952,160,975,160,996,162,995,141,974,141,949,141,919,140,897,140,881,142,855,139,844,135,833,131,793,125,751,141,744,141,767,217,772,236", 23424908: "508,215,612,200,641,226,711,258,722,283,736,299,747,311,752,317,764,317,774,315,772,331,766,346,746,360,730,392,731,410,731,423,733,443,702,460,702,489,681,508,662,520,639,518,616,415,615,393,603,346,589,345,572,345,542,349,515,340,500,312,493,300,501,263", 23424819: "550,146,576,137,585,142,591,142,591,152,594,157,594,173,585,171,576,171,564,171", 23424950: "537,165,537,176,545,177,543,187,541,198,556,201,562,196,569,185,573,174", 23424748: "1082,429,1032,440,1013,526,1077,547,1178,560,1200,486,1183,433,1130,420,1106,417", 23424834: "221,273,178,304,204,346,225,339,283,298,316,257,225,256,215,258", 23424738: "653,197,656,217,685,225,698,237,723,244,745,285,774,307,789,290,812,266,765,233,760,198,748,168,719,162,704,157", 23424916: "1220,499,1157,590,1247,598,1299,552,1350,402,1289,394,1226,437", 23424925:"529,174,542,178,538,199,527,195", 23424829: "588,125,650,103,682,129,702,139,702,147,676,176,663,182,648,193,610,193", 23424919: "271,379,285,353,314,345,323,359,339,355,347,345,327,339,304,332,303,322,272,322,258,322,246,342,241,348,229,362,230,387,231,407,237,420,239,427,245,440,264,449,269,465,274,521,292,573,308,599,327,605,327,581,343,540,352,520,339,506,334,496,327,485,315,453,312,437,294,424,282,424,268,414,260,402,259,394,265,383,271,376", 1:"57,486,50,500,50,511,50,529,53,542,59,550,69,559,79,567,92,571,108,572,122,568,135,560,146,545,151,529,150,512,147,498,139,482,123,471,108,466,84,465,69,475", 23424868:"1068,176,1064,174,1057,174,1054,178,1049,180,1047,182,1047,186,1047,191,1054,199,1057,203,1062,208,1066,212,1073,208,1077,206,1074,194,1068,192,1065,192,1062,186,1069,182"}
for country in Location.objects.all():
print country.name
country.coords = countries_coords.get(country.woeid)
country.saveSlug()
country.save()
print "success" | [
"coleclayman@gmail.com"
] | coleclayman@gmail.com |
9b61fb40b2d2ef6c60d489179aed2922c230f6e8 | b23eb00e069b55ca4157a64ab37339c70456a0cf | /modbus_simple_bridge/cp_lib/app_base.py | 28d3bbcee845dd19ebd82e0d2db30ed897b9b423 | [] | no_license | ampledata/sdk-samples | 511f3ddd1bfa3bce995d937771d86ae6407e9266 | 3e0002f38b8c70423b28f91d5276812da4a37118 | refs/heads/master | 2021-05-09T00:43:15.517673 | 2017-12-27T20:14:05 | 2017-12-27T20:14:05 | 119,755,556 | 1 | 0 | null | 2018-01-31T23:12:23 | 2018-01-31T23:12:23 | null | UTF-8 | Python | false | false | 11,980 | py | import logging
import sys
from threading import Lock
from cp_lib.data.data_tree import get_item_value, data_tree_clean, \
get_item_time_duration_to_seconds, put_item
class CradlepointRouterOffline(ConnectionError):
"""
A general failure to use when the AppBase fails to find the router
"""
pass
class CradlepointSdkDisabled(ConnectionError):
"""
A general failure to use when the SDK data isn't in status tree
"""
pass
class CradlepointAppBase(object):
"""
This object holds the data and status during the hand-off from the
Cradlepoint Router launching your SDK code, and your code running.
You can either sub-class your code as an instance of CradlepointAppBase,
or use it as a secondary object to accept the data & status, copy to
your own code, then launch your code.
"""
def __init__(self, full_name=None, log_level=None, call_router=True,
log_name=None):
"""
:param str full_name: mod name, such as "network.tcp_echo"
:param str log_level: allow an over-ride of logging level,
if not None, ignore settings
:param bool call_router: T to fetch router info, else F means
do not contact router (may be offline)
:param str log_name: optional NAME for the logger, to over-ride
any settings data
:return:
"""
# Import here to avoid circular import issues in sub-class and users
from cp_lib.cs_client import init_cs_client_on_my_platform
from cp_lib.load_product_info import load_product_info
from cp_lib.load_firmware_info import load_firmware_info
from cp_lib.load_settings_json import load_settings_json
from cp_lib.cp_logging import get_recommended_logger
# since we have to unpack these, might as well save results
self.run_name = None # like "network/tcp_echo/__init__.py"
self.app_path = None # like "network/tcp_echo/", to find files
self.app_name = None # like "tcp_echo"
self.mod_name = None # like "network.tcp_echo", for importlib
if full_name is not None:
# allow no name - for tools like MAKE or TARGET; no app directory
assert isinstance(full_name, str)
self.import_full_file_name(full_name)
# create a data LOCK to allow multi-thread access to settings & data
self.data_lock = Lock()
# follow SDK design to load settings, first from ./config, then
# over-lay from self.app_path.
self.settings = load_settings_json(self.app_path)
# convert 'None', 'true', and such strings into true Python values
data_tree_clean(self.settings)
# like settings, but into to null & are assumed 100% dynamic (TBD)
self.data = dict()
# use the settings to create the more complex logger, including
# Syslog if appropriate
try:
self.logger = get_recommended_logger(
self.settings, level=log_level, name=log_name)
except:
logging.exception('get_recommended_logger() failed')
raise
if sys.platform == "linux":
self.logger.info("Running under full Linux")
elif sys.platform == "win32":
self.logger.info("Running under Windows")
elif sys.platform == "linux2":
self.logger.info("Running on Cradlepoint router")
else:
self.logger.info("Running on platform {}".format(sys.platform))
# handle Router API client, which is different between PC and router
try:
self.cs_client = init_cs_client_on_my_platform(self.logger,
self.settings)
except:
self.logger.exception("CSClient init failed")
raise
# show NAME, Description, Version, UUID
self.show_router_app_info()
if call_router:
# load the PRODUCT INFO into settings
load_product_info(self.settings, self.cs_client)
try:
self.logger.info("Cradlepoint router is model:{}".format(
self.settings["product_info"]["product_name"]))
except KeyError:
pass
# load the FW INFO into settings
load_firmware_info(self.settings, self.cs_client)
try:
self.logger.info("Cradlepoint router FW is:{}".format(
self.settings["fw_info"]["version"]))
except KeyError:
pass
return
def get_product_name(self, full=False):
"""
Get the product model as string
:param bool full: T means return everything; F means 'cut' to
smaller subset, ignoring options
:exception: KeyError if router information is not in self.settings
:return:
"""
from cp_lib.load_product_info import split_product_name
value = self.settings["product_info"]["product_name"]
""" :type value: str """
if not full:
# then reduce/clean up
# returns IBR1100LPE would return as ("IBR1100", "LPE", True)
value, options, wifi = split_product_name(value)
self.settings["product_info"]["product_options"] = options
self.settings["product_info"]["product_has_wifi"] = wifi
return value
def get_setting(self, path, throw_exception=False, force_type=None):
"""
Given path, like "route_api.local_ip", return raw (likely str)
value
If throw_exception=False (the default), then None is returned if the
setting in 'path' is NOT FOUND, else throw a DataTreeItemNotFound
exception. This allows distinguishing between an existing setting
with value None, and one not found.
Normally, the object returned is 'native' per the settings.json,
which derived its values from the text settings.INI files.
This means values are LIKELY string values. You can use force_type to
do a smart-guess. For example, force_type=bool will cause the values
(True, 1, "1", "true", "TRUE", "on", "enabled") to all return a
simple bool() value True.
:param str path:
:param bool throw_exception: return None, else DataTreeItemNotFound
:param type force_type: if not None, try forcing a type
:return:
"""
self.data_lock.acquire()
result = get_item_value(self.settings, path, throw_exception,
force_type)
self.data_lock.release()
return result
def get_setting_time_secs(self, path, throw_exception=False):
"""
Given path, like "route_api.local_ip"
:param str path:
:param bool throw_exception: return None, else DataTreeItemNotFound
:rtype float:
"""
self.data_lock.acquire()
result = get_item_time_duration_to_seconds(
self.settings, path, throw_exception)
self.data_lock.release()
return result
def get_user_data(self, path, throw_exception=False, force_type=None):
"""
Given path, like "route_api.local_ip", return raw (likely str)
value
If throw_exception=False (the default), then None is returned if the
setting in 'path' is NOT FOUND, else throw a DataTreeItemNotFound
exception. This allows distinguishing between an existing setting
with value None, and one not found.
:param str path:
:param bool throw_exception: return None, else DataTreeItemNotFound
:param type force_type: if not None, try forcing a type
:return:
"""
self.data_lock.acquire()
result = get_item_value(self.data, path, throw_exception, force_type)
self.data_lock.release()
return result
def put_user_data(self, path, value, throw_exception=False):
"""
Given path, like "route_api.local_ip", return raw (likely str)
value
If throw_exception=False (the default), then None is returned if the
setting in 'path' is NOT FOUND, else throw a DataTreeItemNotFound
exception. This allows distinguishing between an existing setting
with value None, and one not found.
:param str path: a path, like 'tank01.level'
:param value: the value to set
:param bool throw_exception: return None, else DataTreeItemNotFound
:return:
"""
self.data_lock.acquire()
result = put_item(self.data, path, value, throw_exception)
self.data_lock.release()
return result
def show_router_app_info(self):
"""
Dump out some of the [application] information
:return:
"""
# [application]
# name=hello_world
# description=Hello World sample, using 3 subtasks
# version=1.9
# uuid=c69cfe79-5f11-4cae-986a-9d568bf96629
if 'application' in self.settings:
# it should be, but ignore if not!
if 'name' in self.settings['application']:
self.logger.info(
"Starting Router App named \"{}\"".format(
self.settings['application']['name']))
if 'description' in self.settings['application']:
self.logger.info(
"App Desc:{}".format(
self.settings['application']['description']))
if 'version' in self.settings['application']:
self.logger.info(
"App Vers:{}".format(
self.settings['application']['version']))
if 'uuid' in self.settings['application']:
self.logger.info(
"App UUID:{}".format(
self.settings['application']['uuid']))
return
def import_full_file_name(self, dot_name):
"""
take a name - such as network.tcp_echo, then see if we should run
either of these:
- first try ./network/tcp_echo/__init__.py, which must exist plus
be at least 5 bytes in size
- second try ./network/tcp_echo/tcp_echo.py, which merely has to exist
- else throw FileNotFoundError exception
:param str dot_name: like "network.tcp_echo" or "RouterSDKDemo"
:return: None
"""
from cp_lib.app_name_parse import normalize_app_name, \
get_module_name, get_app_name, get_app_path, get_run_name
# this handles any combination of:
# - "network\\tcp_echo\\file.py" or "network\\tcp_echo"
# - "network/tcp_echo/file.py" or "network/tcp_echo"
# - "network.tcp_echo.file.py" or "network.tcp_echo"
names = normalize_app_name(dot_name)
# normalize will have returned like one of these
# ["network", "tcp_echo", "__init__.py"]
# ["network", "tcp_echo", ""]
# ["tcp_echo", "__init__.py"]
# ["tcp_echo", ""]
# ["", "__init__.py"]
if names[0] == "":
raise ValueError(
"SDK App {} must be subdirectory, not ROOT".format(dot_name))
# will be like "network.tcp_echo"
self.mod_name = get_module_name(names)
# will be like "tcp_echo"
self.app_name = get_app_name(names)
# will be like "network/tcp_echo/"
self.app_path = get_app_path(names)
# see if the main app is like "network/tcp_echo/__init__.py"
# or like "network/tcp_echo/tcp_echo.py"
self.run_name = get_run_name(names, app_path=self.app_path,
app_name=self.app_name)
return
def run(self):
raise NotImplementedError("AppBase.run() not defined")
| [
"lynn0git@linse.org"
] | lynn0git@linse.org |
4a0bc12cda64cd80e1d4f2485bab5fd5149a609d | 8ac75ee9dcb850c966a05a72bdfaf34fcf35965d | /Hacker-rank/Dictionary and maps.py | 035894e8de7ab5b193c3103e99fdafd09f0599e2 | [] | no_license | mathankrish/Python-Programs | 3f7257ffa3a7084239dcc16eaf8bb7e534323102 | 2f76e850c92d3126b26007805df03bffd603fcdf | refs/heads/master | 2020-05-01T04:27:00.943545 | 2019-10-07T18:57:41 | 2019-10-07T18:57:41 | 177,274,558 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 427 | py | n = int(input().strip())
phonebook = {}
for i in range(n):
line = input().split()
# k, v = line.split()
phonebook[line[0]] = line[1]
while True:
try:
number = input()
except EOFError: # end of line error occurs when more number of inputs are given
break
if number in phonebook:
print("{}={}".format(number, phonebook[number]))
else:
print("Not found") | [
"noreply@github.com"
] | noreply@github.com |
48def4f085746f5749ea0da7bbd79fb4b4b02eff | 8519205cff66267f73b4518802672a70cab233fc | /VGN/settings.py | fe2a83b76f5f72576f2c0894e17c54ddcc52bb73 | [] | no_license | ELGABO12/fase3AlvaradoMeza001 | 15f448eae827f3c6ea8a0aab8bfe919ac645e18a | e3cae7c3a52c31cba6b341fb4f738605b041b7d1 | refs/heads/main | 2023-01-15T10:14:04.487742 | 2020-11-26T20:01:11 | 2020-11-26T20:01:11 | 316,302,353 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,444 | py | """
Django settings for VGN project.
Generated by 'django-admin startproject' using Django 3.1.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'b-t2mjyge+i^&&4s#qopj1=2g*l^-50$+&sus=#b)p7j2tq@r7'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'catalogo.apps.CatalogoConfig',
'rest_framework'
]
REST_FRAMEWORK = {
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination',
'PAGE_SIZE': 10
}
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'VGN.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['./templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'VGN.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'es'
TIME_ZONE = 'America/Santiago'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
'/catalogo/static',
)
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
LOGIN_REDIRECT_URL = 'index'
LOGOUT_REDIRECT_URL = 'index'
| [
"meza_123_@hotmail.com"
] | meza_123_@hotmail.com |
fcc74dbf63e2a18c90a50c8e90c031a789323eba | 645d1e9b73f382da28e2d0f6494b05fa5e278bf2 | /answers/pathInZigZagTree.py | b6356992d3482cbc1d9b3ccb7cfb59c7aa20df16 | [] | no_license | xxbeam/leetcode-python | 63efcba4f0fc1c09ceebca725778dacd9dfd27fd | 5f1282abb64651c4b67ce0b262456920827fe7dc | refs/heads/master | 2023-07-17T23:33:08.783011 | 2021-09-10T01:47:46 | 2021-09-10T01:47:46 | 345,580,038 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 810 | py | # 1104. 二叉树寻路
class Solution:
"""
计算出所属的行,向上查询,转换成正向顺序,然后判断行数奇偶,看需不需要反转
"""
def pathInZigZagTree(self, label: int) -> list[int]:
row = 0
k = label
while k > 0:
k = k // 2
row += 1
if row % 2 == 0:
label = ((2 ** row) - 1) - label + (2 ** (row - 1))
path = []
while row > 0:
if row % 2 == 0:
path.append(((2 ** row) - 1) - label + (2 ** (row - 1)))
else:
path.append(label)
label = label // 2
row -= 1
return path[-1::-1]
if __name__ == '__main__':
print(Solution().pathInZigZagTree(14))
print(Solution().pathInZigZagTree(26))
| [
"xiongxin@songxiaocai.com"
] | xiongxin@songxiaocai.com |
728ab8318005666802972f0b462a0c2812d6bda6 | 663b77ecca217bb84e88603bcada20bf7db597c3 | /train_test.py | 20f784cca93dd27fbd2e33fa39f2e940a0b171fa | [] | no_license | lllllllllllll-llll/SPAQ | 817ed567f774a666ec496dda2f03e30c4f767474 | b2578f914fa836811c137fb83bc16b6a42d7a9ab | refs/heads/master | 2023-01-30T04:03:06.925918 | 2020-12-09T11:20:04 | 2020-12-09T11:20:04 | 318,080,270 | 10 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,044 | py | import os
import torch
import argparse
import random
import numpy as np
from BL_Trainer import BL_Trainer
def main(config):
folder_path = {
'SPAQ': 'F:/0datasets/SPAQ/',
}
img_num = {
'SPAQ': list(range(0, 11125)),
}
sel_num = img_num[config.dataset]
srcc_all = np.zeros(config.train_test_num, dtype=np.float)
plcc_all = np.zeros(config.train_test_num, dtype=np.float)
print('Training and testing on %s dataset for %d rounds...' % (config.dataset, config.train_test_num))
for i in range(config.train_test_num):
print('Round %d' % (i + 1))
# Randomly select 80% images for training and the rest for testing
random.shuffle(sel_num)
train_index = sel_num[0:int(round(0.8 * len(sel_num)))]
test_index = sel_num[int(round(0.8 * len(sel_num))):len(sel_num)]
print('train_index', train_index)
print('test_index', test_index)
if config.model == 'BL':
solver = BL_Trainer(config, folder_path[config.dataset], train_index, test_index)
elif config.model == 'MT-E':
solver = MTE_Trainer(config, folder_path[config.dataset], train_index, test_index)
elif config.model == 'MT-A':
solver = MTA_Trainer(config, folder_path[config.dataset], train_index, test_index)
elif config.model == 'MT-S':
solver = MTS_Trainer(config, folder_path[config.dataset], train_index, test_index)
srcc_all[i], plcc_all[i] = solver.train()
srcc_med = np.median(srcc_all)
plcc_med = np.median(plcc_all)
print('Testing median SRCC %4.4f,\tmedian PLCC %4.4f' % (srcc_med, plcc_med))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', dest='dataset', type=str, default='SPAQ',
help='Support datasets: SPAQ')
parser.add_argument('--model', dest='model', type=str, default='BL',
help='Support model: BL, MT-E, MT-A, MT-S')
parser.add_argument('--train_patch_num', dest='train_patch_num', type=int, default=10,
help='Number of sample patches from training image')
parser.add_argument('--lr', dest='lr', type=float, default=1e-3, help='Learning rate')
parser.add_argument('--weight_decay', dest='weight_decay', type=float, default=0, help='Weight decay')
parser.add_argument('--lr_ratio', dest='lr_ratio', type=int, default=10,
help='Learning rate ratio for hyper network')
parser.add_argument('--batch_size', dest='batch_size', type=int, default=64, help='Batch size')
parser.add_argument('--epochs', dest='epochs', type=int, default=30, help='Epochs for training')
parser.add_argument('--patch_size', dest='patch_size', type=int, default=224,
help='Crop size for training & testing image patches')
parser.add_argument('--train_test_num', dest='train_test_num', type=int, default=5, help='Train-test times')
config = parser.parse_args()
main(config)
| [
"3014735464@qq.com"
] | 3014735464@qq.com |
deb0e404ff9fd02ef1f6c223e7fcd973c65650ef | 6704a21e1d8fd806c84c731da0f6ff08adbfef83 | /foodcartapp/admin.py | 6f1163c632922ade0a966dd49f4fde3c9053f9d0 | [] | no_license | AxmetES/Django-REST-Framework_Star-burger-web-shop | c6584e3c4738208edddcf0bca8be529c0a4e9791 | 18359807f629806378e9dc335a65331ecb92bbac | refs/heads/main | 2023-01-30T06:15:19.047629 | 2020-12-14T12:58:51 | 2020-12-14T12:58:51 | 306,328,052 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,449 | py | from django.contrib import admin
from django.forms import ModelForm
from django.http import HttpResponseRedirect
from django.shortcuts import reverse, redirect
from django.utils.html import format_html
from .models import Order
from .models import OrderDetails
from .models import Product
from .models import ProductCategory
from .models import Restaurant
from .models import RestaurantMenuItem
class RestaurantMenuItemInline(admin.TabularInline):
model = RestaurantMenuItem
extra = 0
class OrderOrderDetails(admin.TabularInline):
model = OrderDetails
extra = 0
@admin.register(Restaurant)
class RestaurantAdmin(admin.ModelAdmin):
search_fields = [
'name',
'address',
'contact_phone',
]
list_display = [
'name',
'address',
'contact_phone',
]
inlines = [
RestaurantMenuItemInline
]
@admin.register(Product)
class ProductAdmin(admin.ModelAdmin):
list_display = [
'get_image_list_preview',
'name',
'category',
'price',
]
list_display_links = [
'name',
]
list_filter = [
'category',
]
search_fields = [
# FIXME SQLite can not convert letter case for cyrillic words properly, so search will be buggy.
# Migration to PostgreSQL is necessary
'name',
'category',
]
inlines = [
RestaurantMenuItemInline
]
fieldsets = (
('Общее', {
'fields': [
'name',
'category',
'image',
'get_image_preview',
'price',
]
}),
('Подробно', {
'fields': [
'special_status',
'description',
],
'classes': [
'wide'
],
}),
)
readonly_fields = [
'get_image_preview',
]
class Media:
css = {
"all": (
"admin/foodcartapp.css",
)
}
def get_image_preview(self, obj):
if not obj.image:
return 'выберите картинку'
return format_html('<img src="{url}" height="200"/>', url=obj.image.url)
get_image_preview.short_description = 'превью'
def get_image_list_preview(self, obj):
if not obj.image or not obj.id:
return 'нет картинки'
edit_url = reverse('admin:foodcartapp_product_change', args=(obj.id,))
return format_html('<a href="{edit_url}"><img src="{src}" height="50"/></a>',
edit_url=edit_url,
src=obj.image.url)
get_image_list_preview.short_description = 'превью'
@admin.register(ProductCategory)
class ProductAdmin(admin.ModelAdmin):
pass
@admin.register(Order)
class OrderAdmin(admin.ModelAdmin):
form = ModelForm
def response_change(self, request, obj):
res = super(OrderAdmin, self).response_post_save_change(request, obj)
if "next" in request.GET:
return redirect(request.GET['next'])
else:
return res
list_display = [
'firstname',
'lastname',
'address'
]
inlines = [
OrderOrderDetails
]
@admin.register(OrderDetails)
class OrderDetailsAmin(admin.ModelAdmin):
list_display = [
'product',
'quantity',
]
| [
"fraktsia@gmail.com"
] | fraktsia@gmail.com |
03eaa514cfab5cc81bc4db949ac3024c60b20782 | 0a1a3aa6d923d03f1bbfe51b6391c8f6c1f05948 | /Leetcode_env/2019/6_14/Majority_Element.py | f7b776ebb83cfb9775d9c673ef1700e6d6871023 | [] | no_license | sam1208318697/Leetcode | 65256783c315da0b5cb70034f7f8a83be159d6b4 | 68cb93a86c11be31dc272a4553dd36283b6a5ff7 | refs/heads/master | 2020-06-06T05:24:39.163778 | 2020-03-01T09:18:36 | 2020-03-01T09:18:36 | 192,649,254 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,364 | py | # 169. 求众数
# 给定一个大小为 n 的数组,找到其中的众数。众数是指在数组中出现次数大于 ⌊ n/2 ⌋ 的元素。
# 你可以假设数组是非空的,并且给定的数组总是存在众数。
# 示例 1:
# 输入: [3,2,3]
# 输出: 3
# 示例 2:
# 输入: [2,2,1,1,1,2,2]
# 输出: 2
class Solution:
# 正常思路,会超出时间限制
def majorityElement(self, nums) -> int:
majority_element = 0
times = 0
for num in nums:
if times<nums.count(num):
times = nums.count(num)
majority_element = num
return majority_element
# 方法2,去重以后再查找
def majorityElement2(self, nums) -> int:
majority_element = 0
times = 0
new_nums = []
for num in nums:
if new_nums.count(num)==0:
new_nums.append(num)
for new_num in new_nums:
if times<nums.count(new_num):
times = nums.count(new_num)
majority_element = new_num
return majority_element
# 方法3,简化方法2
def majorityElement3(self, nums) -> int:
for num in list(set(nums)):
if nums.count(num)>len(nums)/2:
return num
return 0
sol = Solution()
print(sol.majorityElement3([2,2,1,1,1,2,2])) | [
"1208318697@qq.com"
] | 1208318697@qq.com |
8da2dd71d31554f46ea00a14fba16221fb226278 | 16e6fe2441ee1e081a03621290d7fe5216c79ab1 | /src/kegg_pull/rest.py | 9cabf83014af838b006eab994ef297f82bb47e11 | [
"BSD-3-Clause-Clear"
] | permissive | MoseleyBioinformaticsLab/kegg_pull | aec5221f4cf0dfda48e4cbe0d228d90ac5085631 | 0b8b0df33334c429bd629567e4ccc9a426753d5c | refs/heads/main | 2023-06-22T20:25:46.274451 | 2023-06-12T17:33:24 | 2023-06-12T17:33:24 | 506,780,256 | 11 | 1 | NOASSERTION | 2023-08-30T01:03:14 | 2022-06-23T20:20:50 | Python | UTF-8 | Python | false | false | 14,442 | py | """
KEGG REST API Operations
~~~~~~~~~~~~~~~~~~~~~~~~
|Interface for| the KEGG REST API including all its operations.
"""
import typing as t
import enum as e
import requests as rq
import time
import inspect as ins
import logging as log
from . import kegg_url as ku
from . import _utils as u
class KEGGresponse(u.NonInstantiable):
"""
Class containing details of a response from the KEGG REST API.
:ivar Status status: The status of the KEGG response.
:ivar AbstractKEGGurl kegg_url: The URL used in the request to the KEGG REST API that resulted in the KEGG response.
:ivar str text_body: The text version of the response body.
:ivar bytes binary_body: The binary version of the response body.
"""
class Status(e.Enum):
"""The status of a KEGG response."""
SUCCESS = 1
FAILED = 2
TIMEOUT = 3
def __init__(self, status: Status, kegg_url: ku.AbstractKEGGurl, text_body: str = None, binary_body: bytes = None) -> None:
"""
:param status: The status of the KEGG response.
:param kegg_url: The URL used in the request to the KEGG REST API that resulted in the KEGG response.
:param text_body: The text version of the response body.
:param binary_body: The binary version of the response body.
:raises ValueError: Raised if the status is SUCCESS but a response body is not provided.
"""
super(KEGGresponse, self).__init__()
if status == KEGGresponse.Status.SUCCESS and (text_body is None or binary_body is None or text_body == '' or binary_body == b''):
raise ValueError('A KEGG response cannot be marked as successful if its response body is empty')
self.status = status
self.kegg_url = kegg_url
self.text_body = text_body
self.binary_body = binary_body
class KEGGrest:
"""Class containing methods for making requests to the KEGG REST API, including all the KEGG REST API operations."""
def __init__(self, n_tries: int | None = 3, time_out: int | None = 60, sleep_time: float | None = 5.0):
"""
:param n_tries: The number of times to try to make a request (can succeed the first time, or any of n_tries, or none of the tries).
:param time_out: The number of seconds to wait for a request until marking it as timed out.
:param sleep_time: The number of seconds to wait in between timed out requests or blacklisted requests.
"""
self._n_tries = n_tries if n_tries is not None else 3
self._time_out = time_out if time_out is not None else 60
self._sleep_time = sleep_time if sleep_time is not None else 5.0
if self._n_tries < 1:
raise ValueError(f'{self._n_tries} is not a valid number of tries to make a KEGG request.')
def request(self, KEGGurl: type[ku.AbstractKEGGurl] = None, kegg_url: ku.AbstractKEGGurl = None, **kwargs) -> KEGGresponse:
""" General KEGG request function based on a given KEGG URL (either a class that is instantiated or an already instantiated KEGG URL object).
:param KEGGurl: Optional KEGG URL class (extended from AbstractKEGGurl) that's instantiated with provided keyword arguments.
:param kegg_url: Optional KEGGurl object that's already instantiated (used if KEGGurl class is not provided).
:param kwargs: The keyword arguments used to instantiate the KEGGurl class, if provided.
:return: The KEGG response.
"""
kegg_url = KEGGrest._get_kegg_url(KEGGurl=KEGGurl, kegg_url=kegg_url, **kwargs)
status: KEGGresponse.Status | None = None
for _ in range(self._n_tries):
try:
response = rq.get(url=kegg_url.url, timeout=self._time_out)
if response.status_code == 200:
return KEGGresponse(
status=KEGGresponse.Status.SUCCESS, kegg_url=kegg_url, text_body=response.text, binary_body=response.content)
else:
status = KEGGresponse.Status.FAILED
if response.status_code == 403:
# 403 forbidden. KEGG may have blocked the request due to too many requests in too little time.
# In case blacklisting, sleep to allow time for KEGG to unblock further requests.
time.sleep(self._sleep_time)
except rq.exceptions.Timeout:
status = KEGGresponse.Status.TIMEOUT
time.sleep(self._sleep_time)
return KEGGresponse(status=status, kegg_url=kegg_url)
@staticmethod
def _get_kegg_url(
KEGGurl: type[ku.AbstractKEGGurl] | None = None, kegg_url: ku.AbstractKEGGurl | None = None, **kwargs) -> ku.AbstractKEGGurl:
""" Gets the KEGGurl object to be used to make the request to KEGG.
:param KEGGurl: Optional KEGGurl class to instantiate a KEGGurl object using keyword arguments.
:param kegg_url: Instantiated KEGGurl object that's simply returned if provided (used if the KEGGurl class is not provided).
:param kwargs: The keyword arguments used to instantiate the KEGGurl object if a KEGGurl class is provided.
:return: The KEGGurl object.
:raises ValueError: Raised if both a class and object are provided or the class does not inherit from AbstractKEGGurl.
"""
if KEGGurl is None and kegg_url is None:
raise ValueError(
f'Either an instantiated kegg_url object must be provided or an extended class of '
f'{ku.AbstractKEGGurl.__name__} along with the corresponding kwargs for its constructor.')
if kegg_url is not None and KEGGurl is not None:
log.warning(
'Both an instantiated kegg_url object and KEGGurl class are provided. Using the instantiated object...')
if kegg_url is not None:
return kegg_url
if ku.AbstractKEGGurl not in ins.getmro(KEGGurl):
raise ValueError(
f'The value for KEGGurl must be an inherited class of {ku.AbstractKEGGurl.__name__}. '
f'The class "{KEGGurl.__name__}" is not.')
kegg_url = KEGGurl(**kwargs)
return kegg_url
def test(
self, KEGGurl: type[ku.AbstractKEGGurl] | None = None, kegg_url: ku.AbstractKEGGurl | None = None,
**kwargs) -> bool:
""" Tests if a KEGGurl will succeed upon being used in a request to the KEGG REST API.
:param KEGGurl: Optional KEGGurl class used to instantiate a KEGGurl object given keyword arguments.
:param kegg_url: KEGGurl object that's already instantiated (used if a KEGGurl class is not provided).
:param kwargs: The keyword arguments used to instantiated the KEGGurl object from the KEGGurl class, if provided.
:return: True if the URL would succeed, false if it would fail or time out.
"""
kegg_url = KEGGrest._get_kegg_url(KEGGurl=KEGGurl, kegg_url=kegg_url, **kwargs)
for _ in range(self._n_tries):
try:
response = rq.head(url=kegg_url.url, timeout=self._time_out)
if response.status_code == 200:
return True
except rq.exceptions.Timeout:
time.sleep(self._sleep_time)
return False
def list(self, database: str) -> KEGGresponse:
""" Executes the "list" KEGG API operation, pulling the entry IDs of the provided database.
:param database: The database from which to pull entry IDs.
:return: The KEGG response.
"""
return self.request(KEGGurl=ku.ListKEGGurl, database=database)
def get(self, entry_ids: t.List[str], entry_field: str | None = None) -> KEGGresponse:
""" Executes the "get" KEGG API operation, pulling the entries of the provided entry IDs.
:param entry_ids: The IDs of entries to pull.
:param entry_field: Optional field to extract from the entries.
:return: The KEGG response.
"""
return self.request(KEGGurl=ku.GetKEGGurl, entry_ids=entry_ids, entry_field=entry_field)
def info(self, database: str) -> KEGGresponse:
""" Executes the "info" KEGG API operation, pulling information about a KEGG database.
:param database: The database to pull information about.
:return: The KEGG response
"""
return self.request(KEGGurl=ku.InfoKEGGurl, database=database)
def keywords_find(self, database: str, keywords: t.List[str]) -> KEGGresponse:
""" Executes the "find" KEGG API operation, finding entry IDs based on keywords to search in entries.
:param database: The name of the database containing entries to search for.
:param keywords: The keywords to search in entries.
:return: The KEGG response
"""
return self.request(KEGGurl=ku.KeywordsFindKEGGurl, database=database, keywords=keywords)
def molecular_find(
self, database: str, formula: str | None = None, exact_mass: float | tuple[float, float] | None = None,
molecular_weight: int | tuple[int, int] | None = None) -> KEGGresponse:
""" Executes the "find" KEGG API operation, finding entry IDs in chemical databases based on one (and only one) choice of three molecular attributes of the entries.
:param database: The name of the chemical database to search for entries in.
:param formula: The chemical formula (one of three choices) of chemical entries to search for.
:param exact_mass: The exact mass (one of three choices) of chemical entries to search for (single value or range).
:param molecular_weight: The molecular weight (one of three choices) of chemical entries to search for (single value or range).
:return: The KEGG response
"""
return self.request(
KEGGurl=ku.MolecularFindKEGGurl, database=database, formula=formula, exact_mass=exact_mass, molecular_weight=molecular_weight)
def database_conv(self, kegg_database: str, outside_database: str) -> KEGGresponse:
""" Executes the "conv" KEGG API operation, converting the entry IDs of a KEGG database to those of an outside database.
:param kegg_database: The name of the KEGG database to pull converted entry IDs from.
:param outside_database: The name of the outside database to pull converted entry IDs from.
:return: The KEGG response.
"""
return self.request(KEGGurl=ku.DatabaseConvKEGGurl, kegg_database=kegg_database, outside_database=outside_database)
def entries_conv(self, target_database: str, entry_ids: t.List[str]) -> KEGGresponse:
""" Executes the "conv" KEGG API operation, converting provided entry IDs from one database to the form of a target database.
:param target_database: The name of the database to get converted entry IDs from.
:param entry_ids: The entry IDs to convert to the form of the target database.
:return: The KEGG response.
"""
return self.request(KEGGurl=ku.EntriesConvKEGGurl, target_database=target_database, entry_ids=entry_ids)
def database_link(self, target_database: str, source_database: str) -> KEGGresponse:
""" Executes the "link" KEGG API operation, showing the IDs of entries in one KEGG database that are connected/related to entries of another KEGG database.
:param target_database: One of the two KEGG databases to pull linked entries from.
:param source_database: The other KEGG database to link entries from the target database.
:return: The KEGG response
"""
return self.request(KEGGurl=ku.DatabaseLinkKEGGurl, target_database=target_database, source_database=source_database)
def entries_link(self, target_database: str, entry_ids: t.List[str]) -> KEGGresponse:
""" Executes the "link" KEGG API operation, showing the IDs of entries that are connected/related to entries of a provided databases.
:param target_database: The KEGG database to find links to the provided entries.
:param entry_ids: The IDs of the entries to link to entries in the target database.
:return: The KEGG response
"""
return self.request(KEGGurl=ku.EntriesLinkKEGGurl, target_database=target_database, entry_ids=entry_ids)
def ddi(self, drug_entry_ids: t.List[str]) -> KEGGresponse:
""" Executes the "ddi" KEGG API operation, searching for drug to drug interactions. Providing one entry ID reports all known interactions, while providing multiple checks if any drug pair in a given set of drugs is CI or P. If providing multiple, all entries must belong to the same database.
:param drug_entry_ids: The IDs of the drug entries within which search for drug interactions.
:return: The KEGG response
"""
return self.request(KEGGurl=ku.DdiKEGGurl, drug_entry_ids=drug_entry_ids)
def request_and_check_error(
kegg_rest: KEGGrest | None = None, KEGGurl: type[ku.AbstractKEGGurl] | None = None,
kegg_url: ku.AbstractKEGGurl = None, **kwargs) -> KEGGresponse:
""" Makes a general request to the KEGG REST API using a KEGGrest object. Creates the KEGGrest object if one is not provided.
Additionally, raises an exception if the request is not successful, specifying the URL that was unsuccessful.
:param kegg_rest: The KEGGrest object to perform the request. If None, one is created with the default parameters.
:param KEGGurl: Optional KEGG URL class (extended from AbstractKEGGurl) that's instantiated with provided keyword arguments.
:param kegg_url: Optional KEGGurl object that's already instantiated (used if KEGGurl class is not provided).
:param kwargs: The keyword arguments used to instantiate the KEGGurl class, if provided.
:return: The KEGG response
:raises RuntimeError: Raised if the request fails or times out.
"""
kegg_rest = kegg_rest if kegg_rest is not None else KEGGrest()
kegg_response = kegg_rest.request(KEGGurl=KEGGurl, kegg_url=kegg_url, **kwargs)
if kegg_response.status == KEGGresponse.Status.FAILED:
raise RuntimeError(f'The KEGG request failed with the following URL: {kegg_response.kegg_url.url}')
elif kegg_response.status == KEGGresponse.Status.TIMEOUT:
raise RuntimeError(f'The KEGG request timed out with the following URL: {kegg_response.kegg_url.url}')
return kegg_response
| [
"erik.huckvale2017@gmail.com"
] | erik.huckvale2017@gmail.com |
71a41e5537ad8dcdfb0d8fee0eafadb1bdb11080 | 7ef1ea34f982e6fa55b40b02230785ed35641d21 | /01 Unscramble Computer Science Problems/submit/Task0.py | e98b35bd02dd5425e2c17837ccfa7b729b2d968d | [] | no_license | V1997/Data-Structures-Algorithms | 1145829ade80bebe9e0b1ef3d4aac6d5bef05bfc | 68ff7d8fd3b8e5b3bb505f2739c46c5e9dcbb05b | refs/heads/master | 2022-07-19T17:45:10.845847 | 2020-05-21T04:00:42 | 2020-05-21T04:00:42 | 265,749,830 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 862 | py | """
Read file into texts and calls.
It's ok if you don't understand how to read files.
"""
import csv
with open('texts.csv', 'r') as f:
reader = csv.reader(f)
texts = list(reader)
with open('calls.csv', 'r') as f:
reader = csv.reader(f)
calls = list(reader)
"""
TASK 0:
What is the first record of texts and what is the last record of calls?
Print messages:
"First record of texts, <incoming number> texts <answering number> at time <time>"
"Last record of calls, <incoming number> calls <answering number> at time <time>, lasting <during> seconds"
"""
first = texts[0]
i1 = first[0]
a1 = first[1]
sT1 = first[2]
last = calls[-1]
i2 = last[0]
a2 = last[1]
sT2 = last[2]
l = last[3]
print(
f"First record of texts, {i1} texts {a1} at time {sT1}")
print(
f"Last record of calls, {i2} calls {a2} at time {sT2}, lasting {l} seconds") | [
"patelvasu1997@gmail.com"
] | patelvasu1997@gmail.com |
c1a6c56a2da20619663fc0c1627e483be5ef87f9 | a15ddfa2886fadacd6909ef85143f2b9decc5e70 | /turing_find_max.py | d05a69d0eefcd4c0e1a21fc720f498c2a4b71b3d | [] | no_license | es162008087/pycodeguide | 8de58b141298b5104e558d60cadba0d91a47ff2a | 2758dd59074a348123f13ab3c2bfb27a876ce1cd | refs/heads/master | 2022-11-23T17:06:05.260051 | 2020-07-19T21:56:11 | 2020-07-19T21:56:11 | 280,944,808 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 345 | py | # python3
# turing_find_max.py
# Code gathered from developers.turing.com to find max number in an array.
def find_max(nums):
max_num = float("-inf") # smaller than all other numbers
for num in nums:
if num > max_num:
# (Fill in the missing line here)
max_num = num
return max_num
print(find_max([3, 99, 11, 6, 50000000, -1, -444]))
| [
"atorizp@gmail.com"
] | atorizp@gmail.com |
428b845f68e1d7c602aa7f74a604609708605c11 | b35aea9f4411f5dc7942392d78dc31bb76c7ec73 | /ARTIN/index/forms.py | 145690526dcce52d9b06a9000dcf43e2949b4874 | [] | no_license | ashkanusefi/rondshow | 1079b81704fff55a1d54fa8dee2712ab61e92f4a | 7e5a80fcc6e326b8b1737a54fb53becc4195e475 | refs/heads/master | 2023-09-01T18:45:33.170465 | 2021-09-18T11:24:52 | 2021-09-18T11:24:52 | 407,820,565 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 760 | py | from django import forms
from index.models import Contactform
class Contact_Form(forms.ModelForm):
class Meta:
model = Contactform
widgets = {
'name': forms.TextInput(attrs={'placeholder': 'نام و نام خوانوادگی'}),
'email': forms.TextInput(attrs={'placeholder': 'ایمیل'}),
'subject': forms.TextInput(attrs={'placeholder': 'موضوع'}),
'phone': forms.TextInput(attrs={'placeholder': 'شماره تماس'}),
'description': forms.Textarea(attrs={'placeholder': 'پیام خود را وارد کنید'}),
}
fields = [
'name',
'email',
'subject',
'phone',
'description',
]
| [
"yousefi.ashkan96@gmail.com"
] | yousefi.ashkan96@gmail.com |
2b2612286a543b930c9c37895411d4ce1db5211b | 2d965748c6cbae2c896acbb4631fc039d3c0ca41 | /myproject/testapp/admin.py | 884a335adc48fd7453394c68f83855f3d448ab16 | [] | no_license | Turonk/back_forms_16 | abc3117c84e0d6e6d50d2cc0c5b1e185084db914 | 3e8d6b660bdb9a0561d3b95294fd85bf61b16160 | refs/heads/master | 2023-04-02T02:29:28.505273 | 2021-04-16T08:21:09 | 2021-04-16T08:21:09 | 358,526,752 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 490 | py | from django.contrib import admin
from .models import Task, Client
class TaskAdmin(admin.ModelAdmin):
list_display = ('text', 'pub_date', 'author')
list_filter = ("pub_date", 'author')
search_fields = ("text",)
empty_value_display = "-пусто-"
class ClientAdmin(admin.ModelAdmin):
list_display = ('name', 'company')
search_fields = ("name",)
empty_value_display = "-пусто-"
admin.site.register(Task, TaskAdmin)
admin.site.register(Client, ClientAdmin) | [
"gorlov.a@mail.ru"
] | gorlov.a@mail.ru |
aa095e04d9cf32acf6c60e5410f4c8cfae3d1767 | 3e6c35f8db6fa8e9d977d2f510ea73de56214904 | /NLP-Argumentation-Mining/model/__init__.py | bcadf345447a9e9f5281fe9e1e04732072ff188c | [
"LicenseRef-scancode-public-domain"
] | permissive | XuShiqiang9894/NLP-Argumentation-Mining | 170692dc65faa77ff4960985b8e050dc6120afcf | 67545719d27664f25a637dea2713ccbc5a71aa1e | refs/heads/main | 2023-02-01T20:29:47.341061 | 2020-12-18T12:32:39 | 2020-12-18T12:32:39 | 322,578,603 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 376 | py | class ArgMiningModel():
def __init__(self, params):
self.params = params
pass
def train_and_predict(self):
"""
Trains model on training data. Predicts on the test data.
:return: Predictions results in the form [(input_1, pred_1, truth_1), (input_2, pred_2, truth_2), ...]
"""
raise NotImplementedError | [
"noreply@github.com"
] | noreply@github.com |
d01f91d9636db43a6009f9ab2184f64149e3514d | 85d1322fcb2fb01cbe848aaa13c44a12ff6883b8 | /main.py | 7ceb7a322e7753518c12fd8bf81557f867827bf4 | [] | no_license | Wajih24/Online-Speech_Recognition | c09406c8aca845f0e5aabd0be10b040b4b65b2f8 | fd1f1140598754de43259863af5543e7bb4255c9 | refs/heads/main | 2023-07-14T13:15:26.216232 | 2021-08-23T10:36:04 | 2021-08-23T10:36:04 | 399,061,871 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,428 | py | import speech_recognition as sr
import pyttsx3
from datetime import datetime
from datetime import date
today = date.today()
day = today.strftime("%b-%d-%Y")
with open("log"+day+".txt", 'a') as f:
print('File created for this day : ', day)
f.close()
def speak(text):
engine = pyttsx3.init()
engine.say(text)
engine.runAndWait()
def speech_to_text():
required=-1
for index, name in enumerate(sr.Microphone.list_microphone_names()):
if "pulse" in name:
required= index
r = sr.Recognizer()
with sr.Microphone() as source:
r.adjust_for_ambient_noise(source)
print("Say something!")
audio = r.listen(source, phrase_time_limit=4)
try:
input = r.recognize_google(audio)
now = datetime.now()
dt_string = now.strftime("%d/%m/%Y %H:%M:%S")
print("date and time =", dt_string);print("You said: " + input)
text = dt_string + ": " + input +"\n"
with open("log"+day+".txt", 'a') as f:
f.write(text)
f.close()
#speak("You said: " + input +".")
return str(input)
except sr.UnknownValueError:
print("Google Speech Recognition could not understand audio")
except sr.RequestError as e:
print("Could not request results from Google Speech Recognition service; {0}".format(e))
while(True):
speech_to_text() | [
"noreply@github.com"
] | noreply@github.com |
74630a900649910f52610167dda5e5175c1009c7 | e3d33f5d82a541d7491e079c394dcebf1568f078 | /server/settings/migrations/0003_delete_tournamenttype.py | 746daa2db4b9fb8720e479ddcbeaca42296afee0 | [
"MIT"
] | permissive | MahjongRepository/mahjong-portal | 51bd1300c3e6b8a341fbddb67a750b268950627e | 20f01433858bed4610d60b27a98bafce5a810097 | refs/heads/master | 2023-07-09T09:05:23.155419 | 2023-07-08T10:47:14 | 2023-07-08T10:47:14 | 114,328,632 | 12 | 10 | MIT | 2022-07-29T01:29:59 | 2017-12-15T04:53:02 | Python | UTF-8 | Python | false | false | 435 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.8 on 2018-01-18 13:32
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('tournament', '0005_remove_tournament_tournament_type'),
('settings', '0002_auto_20180117_0643'),
]
operations = [
migrations.DeleteModel(
name='TournamentType',
),
]
| [
"lisikhin@gmail.com"
] | lisikhin@gmail.com |
f1f8b0f701a6fdc844cbfdca2f2e1e0fc63f2d2a | 6b7230155432f8122931e1ef657079125de77ef2 | /bgunfolding/mle.py | f13ae904bf3f120b4478187469b0e34840fbc256 | [
"MIT"
] | permissive | lrsppp/bgunfolding | 1ce241682445bb2b5a997437e81e5dd5b2d6811c | 0e60ebd88906d5cd50df6e926e855d5a7ee3051f | refs/heads/main | 2023-08-28T20:31:00.824607 | 2021-11-06T23:39:37 | 2021-11-06T23:39:37 | 425,059,087 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,271 | py | import numpy as np
from bgunfolding.base import UnfoldingBase
from scipy.optimize import minimize
import matplotlib.pyplot as plt
from bgunfolding.likelihood import llh_poisson, llh_tikhonov, hess_poisson, hess_tikhonov
class MLE(UnfoldingBase):
def __init__(self, C, x0, bounds = None):
"""
C : array-like
Tikhonov Matrix used for regularization. Default should be discrete second ordel
central derivative.
x0 : array-like
Prior for minimzation
bounds : list
Bounds for each entry of estimated density f.
"""
super(UnfoldingBase, self).__init__()
self.C = C
self.x0 = x0
self.bounds = bounds
self.cut_overflow_poisson = False
self.cut_overflow_tikhonov = True
def __repr__(self):
return 'plu'
def predict(self, tau):
"""
Parameters
----------
tau : float
Regularization parameter
Returns
-------
f_est : array-like
Estimated density.
"""
if tau is None:
print(type(tau))
print(f'No valid regularization parameter.')
else:
function = lambda f_est, tau: - llh_poisson(f_est, self.g, self.b, self.A, cut_overflow = self.cut_overflow_poisson)\
- llh_tikhonov(f_est, self.C, tau, self.acceptance, cut_overflow = self.cut_overflow_tikhonov)
if self.is_fitted == True:
res = minimize(function,
x0 = self.x0,
bounds = self.bounds,
args = (tau))
return res.x
else:
print('Not fitted yet.')
def predict_hess(self, f_est, tau):
"""
Parameters
----------
f_est : array-like
Estimated density f_est
tau : float
Regularization parameter
"""
res = hess_poisson(f_est, self.g, self.b, self.A) +\
hess_tikhonov(f_est, self.C, tau, self.acceptance)
return res
def estimate_tau(self, tau_min, tau_max, n_tau = 250, log = True):
"""
Does a scan of tau parameters within a logspace starting from 10^tau_min to 10^tau_max.
For each tau the predict method is called and the corresponding hessian matrix
is calculated. These are used to calculate the global correlation coefficients (glob_cc).
Parameters
----------
tau_min : int
Minimum exponent that defines the logspace (10^tau_min).
tau_max: int
Maximum exponent that defines the logspace (10^tau_max).
n_tau : int
Number of evenly spaced floats within the logspace.
"""
if log == True:
tau_space = np.logspace(tau_min, tau_max, n_tau)
elif log == False:
tau_space = np.linspace(tau_min, tau_max, n_tau)
glob_cc = np.zeros(n_tau)
hess = np.zeros((n_tau, self.n_bins_true - 2, self.n_bins_true - 2))
for i, tau in enumerate(tau_space):
res = self.predict(tau)
hess[i] = self.predict_hess(res, tau)
glob_cc[i] = self.calc_glob_cc(hess[i])
self.x0 = res
self.tau_est = self.estimate_minimum(tau_space, glob_cc)
self.hess = hess
self.glob_cc = glob_cc
self.tau_space = tau_space
return self.tau_est
def calc_glob_cc(self, cov):
"""
Calculate mean of global ccorrelation coefficients
\rho_j = \sqrt{1 - [(V_x)_{jj} \cdot (V_x)_{jj}^{-1}]^{-1}}
Parameters
----------
cov : ndarray
Covariance Matrix
Returns : float
Global Mean Correlation Coefficients
"""
glob_cc = np.mean(np.sqrt(1 - 1 / ( np.diag(cov) * np.diag(np.linalg.inv(cov)))))
return glob_cc
def estimate_minimum(self, tau_space, glob_cc):
"""
Estimate regularization parameter tau which correpsonds to minimum of
global correlation coefficients.
Parameters
----------
tau_space : array of length N
glob_cc : array of length N
"""
try:
tau_est = tau_space[np.where(glob_cc == np.min(glob_cc))[0][0]]
return tau_est
except:
print('Could not estimate regularization parameter.')
def plot_glob_cc(self):
"""
Helper function to quickly plot mean of global correlation coefficients versus regularization
parameter tau.
"""
plt.plot(self.tau_space, self.glob_cc, label = 'Mean of Global Correlation Coefficients')
plt.xscale('log')
plt.xlabel(r'$\mathrm{Regularization\,Parameter}\,\tau$')
plt.ylabel(r'$\hat{\rho}_{\tau}$')
plt.legend(loc = 'best')
plt.tight_layout()
| [
"lars.poppe@tu-dortmund.de"
] | lars.poppe@tu-dortmund.de |
131a6514521063970b45ff2764eff0225bf4020d | d2e400ae4add2041a4c74ef5e86463b700464ed3 | /test.py | c6374083e28cca45d8fe07b6ad43c6491f8f61a8 | [] | no_license | abiraja2004/awesome_nlp | 5fe3818d74bed16881082f0ede3b8478567b0c31 | c409f873e16fc3768737a09dec1e9285b4931567 | refs/heads/master | 2020-03-08T09:34:02.287354 | 2017-12-18T14:11:34 | 2017-12-18T14:11:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,972 | py | import argparse
import os
import baselines.run_sumy as extr
import baselines.return_first as first
import baselines.return_random as rd
import gensim
SUMM_DIR = "generated_summaries"
def generate_summaries(input_file, n):
with open(input_file, 'r') as document:
for line in document:
# Generate extractive sentences
summ = extr.gen_sum(line, n, "SumBasic")
with open(os.path.join(SUMM_DIR, "SumBasic_generated_summaries.txt"), 'w') as f:
f.write(str(summ) + "\n")
def naive_baseline(input_file, n):
print("Naive: {}".format(n))
with open(input_file, 'r') as document:
for line in document:
with open(os.path.join(SUMM_DIR, "naive_generated_summaries.txt"), "a") as f:
f.write(first.gen_sum(line, n) + "\n")
def random_baseline(input_file, n):
print("Random: {}".format(n))
with open(input_file, 'r') as document:
for line in document:
with open(os.path.join(SUMM_DIR, "random_generated_summaries.txt"), "a") as f:
f.write(rd.gen_sum(line, n) + "\n")
def pke_baseline():
print("PKE: {}".format(n))
with open(input_file, 'r') as document:
for line in document:
with open(os.path.join(SUMM_DIR, "pke_generated_summaries.txt"), "a") as f:
f.write(pk.gen_sum(line, n) + "\n")
def main(input_file, n):
# Create the SUMM_DIR
if not os.path.exists(SUMM_DIR):
os.makedirs(SUMM_DIR)
naive_baseline(input_file, n)
random_baseline(input_file, n)
print("Generated summaries in: {}".format(os.path.realpath(SUMM_DIR)))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--input", default="inputs.txt")
parser.add_argument("--words", default="8", help="how many words the \
summary should maximally contain", type=int)
args = parser.parse_args()
print(args)
main(args.input, args.words)
| [
"michiel@dutchnaoteam.nl"
] | michiel@dutchnaoteam.nl |
94966647b89a1e25bfe12b32b7748ca09e684956 | 92eb5b9d3761081f007ea5cebab4c2e46353bdf4 | /dictionary/migrations/0007_auto_20170210_2055.py | 4760d6e0a73d057d69d2cca84d241a63cd9ae0b0 | [] | no_license | boolow5/Qaamuus | 30f82eb6ba694f41cee398d59f954fd7982f6283 | cf468d9a63532acd556bfca7ce90fd75b529e33f | refs/heads/master | 2021-01-22T04:40:31.984675 | 2017-03-08T15:12:47 | 2017-03-08T15:12:47 | 81,566,041 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 683 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-02-10 20:55
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('dictionary', '0006_word_category'),
]
operations = [
migrations.AddField(
model_name='comment',
name='date_created',
field=models.DateTimeField(default=django.utils.timezone.now),
),
migrations.AddField(
model_name='word',
name='date_created',
field=models.DateTimeField(default=django.utils.timezone.now),
),
]
| [
"boolow5@gmail.com"
] | boolow5@gmail.com |
c45af0baa8bcafa1054d61852e13323c348e6b7e | 8f4bd927633c897170452235c343fc3f4d88aec4 | /Big_subtraction.py | ef573f08229624999e2681c316038a32f1c8b4b3 | [] | no_license | Krugger1982/24_1_squirrel | 378e13007556b03c057b056de694538025281670 | 9a8c6e3779bc2ea4c65de83b53267e36f8a8268a | refs/heads/master | 2021-05-20T23:42:43.733708 | 2020-04-29T15:47:34 | 2020-04-29T15:47:34 | 252,457,592 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,000 | py | def BigMinus(s1,s2):
S1 = list(s1)
S2 = list(s2)
bol = [0]
men = [0]
if len (S1) > len (S2):
bol = S1
men = S2
elif len (S2) > len (S1):
bol = S2
men = S1
else:
for i in range(len(S1)):
if S1[i] > S2[i]:
bol = S1
men = S2
break
elif S1[i] < S2[i]:
bol = S2
men = S1
break
if bol == [0]:
return '0'
res = []
for i in range(-1, -len(men)-1, -1):
if int(bol[i]) >= int(men[i]):
x = int(bol[i]) - int(men[i])
else:
x = int(bol[i]) + 10 - int(men[i])
bol[i-1] = (int(bol[i-1]) + 9) % 10
res.insert(0, str(x))
for i in range(len(bol) - len(men)):
res.insert(i, str(bol[i]))
i = 0
while len(res) > 0:
if str(res[i]) == '0':
del res[i]
else:
break
return ''.join(res)
| [
"noreply@github.com"
] | noreply@github.com |
823f52289936f88c00b2f7fb9d779e3d3223ce81 | f73eb5a699988c822554866cc6af9bb3dc3361a8 | /tools/events_list/admin.py | a46898b4db6ea445b52cf78ad3e7318d866f0c7f | [] | no_license | MaraJade/seniorproject | 5a41288ffee598c04e123ba16afa51f8fc711bc4 | 430591a02cad8e4b6d55989173c0e4818e862e97 | refs/heads/develop | 2021-01-18T21:54:04.745659 | 2016-01-29T02:16:19 | 2016-01-29T02:16:19 | 49,244,241 | 1 | 0 | null | 2016-01-29T01:38:46 | 2016-01-08T02:30:56 | Python | UTF-8 | Python | false | false | 262 | py | from django.contrib import admin
from .models import Event, Group, Hashtag, Log, Person, Topic
admin.site.register(Event)
admin.site.register(Group)
admin.site.register(Hashtag)
admin.site.register(Person)
admin.site.register(Topic)
admin.site.register(Log)
| [
"catbookgirl@gmail.com"
] | catbookgirl@gmail.com |
995cb49daa5331a9f96ba7e80b145e942ab1fcd4 | 219c426b1fd60c2cb7c6da9aaaff2d5973d680b4 | /z3_expe.py | 8a2128b9272519347f4afb1f5d82521715e35380 | [] | no_license | mshachnai/Loopless-Synthesis | 476f4dee97fe858cbfd308e6427cbf408424b194 | 9a1cece3a4b72692ca8e87f58840bdafe9db3580 | refs/heads/master | 2020-04-01T22:52:52.134013 | 2018-11-28T07:11:53 | 2018-11-28T07:11:53 | 153,731,272 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,871 | py | # P16 - Compute max of two integers
import z3
#from z3 import * considered bad practice but defines namespace z3
# List of Variables
I = z3.BitVec('I', 8)
J = z3.BitVec('J', 8)
O = z3.BitVec('O',8)
Y1 = z3.BitVec('Y1',8)
Y2 = z3.BitVec('Y2',8)
Y3 = z3.BitVec('Y3',8)
Y4 = z3.BitVec('Y4',8)
X11 = z3.BitVec('X11',8)
X12 = z3.BitVec('X12',8)
X21 = z3.BitVec('X21',8)
X22 = z3.BitVec('X22',8)
X31 = z3.BitVec('X31',8)
X32 = z3.BitVec('X32',8)
X41 = z3.BitVec('X41',8)
X42 = z3.BitVec('X42',8)
# List of numbering for each variables
ly1 = z3.Int('ly1')
ly2 = z3.Int('ly2')
ly3 = z3.Int('ly3')
ly4 = z3.Int('ly4')
lx11 = z3.Int('lx11')
lx12 = z3.Int('lx12')
lx21 = z3.Int('lx21')
lx22 = z3.Int('lx22')
lx31 = z3.Int('lx31')
lx32 = z3.Int('lx32')
lx41 = z3.Int('lx41')
lx42 = z3.Int('lx42')
# List of components. phi-lib
phi1 = (Y1 == X11 ^ X12)
phi2 = (Y2 == -z3.If(z3.UGT(X21, X22), z3.BitVecVal(0,8), z3.BitVecVal(1,8)))
phi3 = (Y3 == X31 & X32)
phi4 = (Y4 == X41 ^ X42)
# Write the spec
spec = z3.And(z3.Implies(z3.UGE(J, I), O == J),
z3.Implies(z3.UGT(I,J), O == I))
# phi cons = line number of two different instructions cannot be the same
phicons = z3.And(ly1!=ly2, ly2!=ly3, ly1!=ly3, ly1!=ly4, ly4!=ly2, ly4!=ly3)
# We only have three instructions.
# Bound the line number of each instruction and operand.
phibound = z3.And(ly1 >=1 , ly1 <=4,
ly2 >=1, ly2 <=4,
ly3 >=1, ly3 <=4,
ly4 >=1, ly4 <=4,
lx11 >=-1, lx11 <=4,
lx12 >=-1, lx12 <=4,
lx21 >=-1, lx21 <=4,
lx22 >=-1, lx22 <=4,
lx31 >=-1, lx31 <=4,
lx32 >=-1, lx32 <=4,
lx41 >=-1, lx41 <=4,
lx42 >=-1, lx42 <=4)
# The operands of an instruction should use variables from previous lines. acyclicity
phidep = z3.And(lx11 < ly1 , lx12 < ly1 , lx21 < ly2, lx22 < ly2, lx31 < ly3, lx32 < ly3,
lx41 < ly4, lx42 < ly4)
# Connection information:
# First, the simple ones: if lx == 0, then x gets info from I
# if ly == 3, then O is y
phiconn = z3.And(z3.Implies(lx11 == 0, X11 == I),
z3.Implies(lx12 == 0, X12 == I),
z3.Implies(lx21 == 0, X21 == I),
z3.Implies(lx22 == 0, X22 == I),
z3.Implies(lx31 == 0, X31 == I),
z3.Implies(lx32 == 0, X32 == I),
z3.Implies(lx41 == 0, X41 == I),
z3.Implies(lx42 == 0, X42 == I),
z3.Implies(lx11 == -1, X11 == J),
z3.Implies(lx12 == -1, X12 == J),
z3.Implies(lx21 == -1, X21 == J),
z3.Implies(lx22 == -1, X22 == J),
z3.Implies(lx31 == -1, X31 == J),
z3.Implies(lx32 == -1, X32 == J),
z3.Implies(lx41 == -1, X41 == J),
z3.Implies(lx42 == -1, X42 == J),
z3.Implies(ly1 == 4,Y1 == O),
z3.Implies(ly2 == 4,Y2 == O),
z3.Implies(ly3 == 4,Y3 == O),
z3.Implies(ly4 == 4,Y4 == O))
lys = [ly1, ly2, ly3, ly4]
lxs = [lx11, lx12, lx21, lx22, lx31, lx32, lx41, lx42]
lToVDict = {
ly1: Y1,
ly2: Y2,
ly3: Y3,
ly4: Y4,
lx11: X11,
lx12: X12,
lx21: X21,
lx22: X22,
lx31: X31,
lx32: X32,
lx41: X41,
lx42: X42
}
for i in lys :
for j in lxs:
phiconn = z3.And(phiconn, z3.Implies(i==j, lToVDict[i] == lToVDict[j]))
phiwfp = z3.And(phicons, phidep, phibound)
insideForAll = z3.ForAll([I, J, O, X11, X12, X21, X22, X31, X32, X41, X42, Y1, Y2, Y3, Y4], z3.Implies(z3.And(phi1, phi2, phi3, phi4, phiconn), spec))
final_formula = z3.And(phiwfp, insideForAll)
s = z3.Solver()
s.add(final_formula)
print (s.check())
print (s.model())
| [
"noreply@github.com"
] | noreply@github.com |
165b320a0f937ccc6fd4ef9e6bae85487e84034d | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/100/usersdata/199/49629/submittedfiles/prova1.py | 6b7bd8a5436b67d150ae6be7d451d557c92d2016 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 378 | py | # -*- coding: utf-8 -*-
import math
#COMECE SEU CÓDIGO ABAIXO DESTA LINHA
Carta1 =(int(input('Digite C1:'))
Carta2 =(int(input('Digite C2:'))
Carta3 =(int(input('Digite C3:'))
Carta4 =(int(input('Digite C4:'))
Carta5 =(int(input('Digite C5:'))
if Carta1>Carta2>Carta3>Carta4>Carta5:
print('D')
elif Carta1<Carta2<Carta3<Carta4>Carta5:
print('C')
else:
print('N')
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
f00c12fbb9ee81b6e24b4cd7f33e21abdf13e7fe | 4cb1fd4f6f4a8e57ca3d222007c1a5cde3deebfe | /Chapter02/checknetmiko1.py | b138aa0d3123e8dc3f071c0acc8ab1ceabed2675 | [
"MIT"
] | permissive | PacktPublishing/Python-Network-Programming | 838097e82762fed54fbc2abffe4db71d5acd350b | 416cea10fe56b2312cf25150ed7ba27837cddf18 | refs/heads/master | 2023-02-11T00:34:43.611524 | 2023-01-30T08:58:32 | 2023-01-30T08:58:32 | 166,771,428 | 52 | 51 | null | null | null | null | UTF-8 | Python | false | false | 511 | py | from netmiko import ConnectHandler
print ("Before config push")
device = ConnectHandler(device_type='cisco_ios', ip='192.168.255.249', username='cisco', password='cisco')
output = device.send_command("show running-config interface fastEthernet 0/0")
print (output)
configcmds=["interface fastEthernet 0/0", "description my test"]
device.send_config_set(configcmds)
print ("After config push")
output = device.send_command("show running-config interface fastEthernet 0/0")
print (output)
device.disconnect()
| [
"rutujay@packt.com"
] | rutujay@packt.com |
56a1de01ef11c05014cc0839f7d425d84bad66d9 | faa879600d274f8084ef44c44531fc40d428b139 | /jina/peapods/runtimes/base.py | aed4b12ada64160a31bdd07696b6c87fa6c26598 | [
"Apache-2.0"
] | permissive | zhutony/jina | cb61680413b13bc67d14bfd42bd21c88c6b66cc6 | 4bb5ad86452826c4485823ed1a73fe3f059c459e | refs/heads/master | 2023-05-07T05:19:21.050102 | 2021-05-30T02:42:41 | 2021-05-30T02:42:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,068 | py | import argparse
from ...logging import JinaLogger
class BaseRuntime:
"""A Jina Runtime is a procedure that blocks the main process once running (i.e. :meth:`run_forever`),
therefore must be put into a separated thread/process. Any program/library/package/module that blocks the main
process, can be formulated into a :class:`BaseRuntime` class and then be used in :class:`BasePea`.
In the sequel, we call the main process/thread as ``M``, the process/thread blocked :class:`Runtime` as ``S``.
In Jina, a :class:`BasePea` object is used to manage a :class:`Runtime` object's lifecycle. A :class:`BasePea`
is a subclass of :class:`multiprocessing.Process` or :class:`threading.Thread`, it starts from ``M`` and once the
``S`` is spawned, it calls :class:`Runtime` methods in the following order:
0. :meth:`__init__` in ``M``
1. :meth:`setup` in ``S``
2. :meth:`run_forever` in ``S``. Note that this will block ``S``, step 3 won't be
reached until it is unblocked by :meth:`cancel`
3. :meth:`teardown` in ``S``. Note that ``S`` is blocked by
:meth:`run_forever`, this step won't be reached until step 2 is unblocked by :meth:`cancel`
The :meth:`setup` and :meth:`teardown` pair together, which defines instructions that will be executed before
and after. In subclasses, they are optional.
The :meth:`run_forever` and :meth:`cancel` pair together, which introduces blocking to ``S`` and then
unblocking from it. They are mandatory for all subclasses.
Note that, there is no "exclusive" relation between :meth:`run_forever` and :meth:`teardown`, :meth:`teardown`
is not about "cancelling", it is about "cleaning".
Unlike other three methods that get invoked inside ``S``, the :meth:`cancel` is invoked in ``M`` to unblock ``S``.
Therefore, :meth:`cancel` usually requires some special communication between ``M`` and ``S``, e.g.
- Use :class:`threading.Event` or `multiprocessing.Event`, while :meth:`run_forever` polls for this event
- Use ZMQ to send a message, while :meth:`run_forever` polls for this message
- Use HTTP/REST to send a request, while :meth:`run_forever` listens to this request
Note, another way to jump out from :meth:`run_forever` is raise exceptions from it. This will immediately move to
:meth:`teardown`.
.. note::
Rule of thumb on exception handling: if you are not sure if you should handle exception inside
:meth:`run_forever`, :meth:`cancel`, :meth:`setup`, :meth:`teardown`, then DO NOT catch exception in them.
Exception is MUCH better handled by :class:`BasePea`.
.. seealso::
:class:`BasePea` for managing a :class:`Runtime` object's lifecycle.
"""
def run_forever(self):
"""Running the blocking procedure inside ``S``. Note, once this method is called,
``S`` is blocked.
.. note::
If this method raises any exception, :meth:`teardown` will be called.
.. seealso::
:meth:`cancel` for cancelling the forever loop.
"""
raise NotImplementedError
def cancel(self):
"""Cancelling :meth:`run_forever` from ``M``. :meth:`cancel` usually requires some special communication
between ``M`` and ``S``, e.g.
- Use :class:`threading.Event` or `multiprocessing.Event`, while :meth:`run_forever` polls for this event
- Use ZMQ to send a message, while :meth:`run_forever` polls for this message
- Use HTTP/REST to send a request, while :meth:`run_forever` listens to this request
.. seealso::
:meth:`run_forever` for blocking the process/thread.
"""
raise NotImplementedError
def activate(self):
"""Send activate control message."""
raise NotImplementedError
def deactivate(self):
"""Send deactivate control message."""
raise NotImplementedError
def setup(self):
"""Method called to prepare the runtime inside ``S``. Optional in subclasses.
The default implementation does nothing.
.. note::
If this method raises any exception, then :meth:`run_forever` and :meth:`teardown` won't be called.
.. note::
Unlike :meth:`__init__` called in ``M``, :meth:`setup` is called inside ``S``.
"""
pass
def teardown(self):
"""Method called immediately after :meth:`run_forever` is unblocked.
You can tidy up things here. Optional in subclasses. The default implementation does nothing.
.. note::
This method will only be called if the :meth:`setup` succeeds.
"""
self.logger.close()
def __init__(self, args: 'argparse.Namespace'):
super().__init__()
self.args = args
if args.name:
self.name = f'{args.name}/{self.__class__.__name__}'
else:
self.name = self.__class__.__name__
self.logger = JinaLogger(self.name, **vars(self.args))
| [
"noreply@github.com"
] | noreply@github.com |
9dc02dfb18c108bc0ddb641a4f05e8b275d9cb9e | e0029f9da7e2fbe6ceb58e8127fb18e7daad99c8 | /medicament/Form/form3.py | 90231e18ab47ae273ff77e57a02941a05889cc11 | [] | no_license | akayerov/Medic | 0a9cd4a0651bbe5ac337c6cc9e53553684509de3 | 97ff4ae76ceffb161921631acb8ca594d24059e2 | refs/heads/master | 2020-05-17T22:05:28.847974 | 2015-08-20T13:50:10 | 2015-08-20T13:50:10 | 33,241,118 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,179 | py | # -*- coding: utf-8 -*-
'''
@author: a_kayerov
'''
from django.db.models import Sum
from random import random
import openpyxl
from openpyxl.styles import Font
from medicament.oper_with_base import create_new_report, save_doc, get_name, get_period_namef, get_region_name
from medicament.models import Doc3
from _datetime import datetime
def create_report_form3(periodInt, datef):
''' Создание новых документов (в новом периоде)
Возвращает True, если добавление записей прошло успешно
В противном случае возвращает False
copy_fields_formX - функция начального заполнения
'''
return create_new_report(3,Doc3,periodInt,datef, copy_fields_form3)
def save_doc_form3(request, type, id_doc, mode_comment):
''' Сохранить запись Document + комментарий с новой записью в комментрии с действием пользователя
Установить собственый параментрв DOCx,set_fields_formx, is_valid_formx
'''
return save_doc(Doc3,set_fields_form3, is_valid_form3, request, type, id_doc, mode_comment)
def copy_fields_form3(ds, dd):
''' Копирование полей - указать все поля для копирования
Для каждой формы,
ВЫЗЫВАЕТСЯ ТОЛЬКО ДЛЯ ДОКУМЕНТОВ В СОСТОЯНИИ ЗАВЕШЕНО- незаполненные и несогласаванные документы такой обработке не подлежат!
'''
dd.c1_1_1 = ds.c1_1_1
dd.c1_1_2 = ds.c1_1_2
dd.c1_2 = ds.c1_2
dd.c2_1 = ds.c2_1
dd.c2_2 = ds.c2_2
dd.c3_1 = ds.c3_1
dd.c3_2_1 = ds.c3_2_1
dd.c3_2_2 = ds.c3_2_2
dd.c4_1 = ds.c4_1
'''
for f in dd._meta.get_all_field_names():
if f[0] == 'c':
obj, model, direct, m2m = dd._meta.get_field_by_name(f)
objs, models, directs, m2ms = ds._meta.get_field_by_name(f)
ares = objs.value_from_object(ds)
assert False
'''
# if isinstance(obj, GenericRelation):
# continue
# if not direct:
# continue
# if m2m:
# l = {}
# val = obj.value_from_object(dd)
# for ix,m in enumerate(obj.value_from_object(dd)):
# l.update({ix:m.__unicode__()})
# field_list.update({f:l})
# else:
# field_list.update({f:obj.value_to_string(q)})
def set_fields_form3(request, doc):
''' Заполнение полей модели данными формы .
Для каждой формы
'''
doc.c1_1_1 = request.POST['c1_1_1']
doc.c1_1_2 = request.POST['c1_1_2']
doc.c1_2 = request.POST['c1_2']
doc.c2_1 = request.POST['c2_1']
doc.c2_2 = request.POST['c2_2']
doc.c3_1 = request.POST['c3_1']
doc.c3_2_1 = request.POST['c3_2_1']
doc.c3_2_2 = request.POST['c3_2_2']
doc.c4_1 = request.POST['c4_1']
doc.c4_2 = request.POST['c4_2']
def is_valid_form3(doc, doc_prev):
''' Проверка заполнения формы на корректность
'''
ret = [True,'OK']
return ret
def calc_sum_form3(doc):
''' Возвращает Суммы данных отчетов
'''
# assert False
aq0= doc.aggregate(Sum('c1_1_1'),Sum('c1_1_2'),Sum('c1_2'),Sum('c2_1'),Sum('c2_2'), Sum('c3_1'), \
Sum('c3_2_1'),Sum('c3_2_2'),Sum('c4_1'), \
)
s = [["1. Показатель 1_1_1", aq0['c1_1_1__sum']],
["2. Позазатель 1_1_2", aq0['c1_1_2__sum']],
]
return s
def exp_to_excel_form3(doc, iperiod, iregion, mode, stat = None): # mode = 0 по региону или группе больниц mode = 1 - по конкретной больнице
res = calc_sum_form3(doc)
speriod = get_period_namef(iperiod)
sregion = get_region_name(mode,doc,iregion)
if mode == 1:
name_file = get_name("/static/Form/Form3.xlsx")
else:
name_file = get_name("/static/Form/Form3_All.xlsx")
wb = openpyxl.load_workbook(name_file)
sheet = wb.active
sheet['B2'] = speriod
sheet['B1'] = sregion
if mode==0:
sheet['B310'] = "Статистика по отчету"
sheet['B311'] = "Организаций предоставляющих, Всего"
sheet['C311'] = stat.rec_all
sheet['B312'] = "Отобрано в отчет, Всего"
sheet['C312'] = stat.rec_fltr
sheet['B313'] = "Завершено"
sheet['C313'] = stat.rec_complete
sheet['B314'] = "Согласование"
sheet['C314'] = stat.rec_soglas
sheet['B315'] = "Корректировка"
sheet['C315'] = stat.rec_correct
sheet['B316'] = "Редактирование"
sheet['C316'] = stat.rec_edit
startrow = 7
for i in range(0,296):
srA = "B" + str(startrow + i)
srB = "C" + str(startrow + i)
sheet[srA] = res[i][0]
sheet[srB] = res[i][1]
# вывод только для конкретной МО для все не выводится
if mode == 1:
# res = calc_valf3_form2(doc)
startrow = 307
for i in range(0,38):
srA = "B" + str(startrow + i)
srB = "C" + str(startrow + i)
sheet[srA] = res[i][0]
sheet[srB] = res[i][1]
sheet['A346'] = "Выведено в системе Мед+ " + str(datetime.now())
sheet['A346'].font = Font(size=5)
else:
sheet['A318'] = "Выведено в системе Мед+ " + str(datetime.now())
sheet['A318'].font = Font(size=5)
# name_file = get_name("\\medicament\\Form\\rep" + str(int(random()*100000000)) + ".xlsx")
name_file = get_name("/medicament/Form/rep" + str(int(random()*100000000)) + ".xlsx")
wb.save(name_file)
return name_file
| [
"akayerov@yandex.ru"
] | akayerov@yandex.ru |
1358e647fe643aad07d6b0dd1a0012c3ea949f28 | edbde68daaf9c72d8f13ecc1ed509137a15906db | /python/leetcode/word_search.py | 9d55855a27a6784381cae98f2e5f375078fa2dbc | [] | no_license | midasama3124/cracking-coding-interview | 1733a6be3796ca57ff464023d779ee1b2d938360 | ebee7ad487eecabbc29306dad1c3a96ee5d8308b | refs/heads/master | 2022-12-13T00:14:18.266973 | 2020-09-02T18:02:17 | 2020-09-02T18:02:17 | 274,759,239 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,122 | py | class Solution(object):
def exist(self, board, word):
"""
:type board: List[List[str]]
:type word: str
:rtype: bool
"""
m = len(board)
n = len(board[0])
# Recurse until word is found
for i in range(m):
for j in range(n):
if board[i][j] == word[0] and self.dfs(i, j, 0, board, word): return True
return False
def dfs(self, i, j, idx, board, word):
if idx == len(word) - 1: return True
board[i][j] = chr(ord(board[i][j]) - 65)
if i < len(board) - 1 and board[i + 1][j] == word[idx+1] and self.dfs(i + 1, j, idx+1, board, word): return True
if i > 0 and board[i - 1][j] == word[idx+1] and self.dfs(i - 1, j, idx+1, board, word): return True
if j > 0 and board[i][j - 1] == word[idx+1] and self.dfs(i, j - 1, idx+1, board, word): return True
if j < len(board[0]) - 1 and board[i][j + 1] == word[idx+1] and self.dfs(i, j + 1, idx+1, board, word): return True
board[i][j] = chr(ord(board[i][j]) + 65) # Recovering changed character
return False | [
"midasama3124@gmail.com"
] | midasama3124@gmail.com |
eee96575295629c28da3002abdd5d860f2976caf | 7e6298d3c8079231166bc31b115d04a9cb8833ed | /app/webapp/endpoints/routes.py | bd3ac98fc0f92d3abb035bc5f38aac0717b1dbbc | [
"MIT"
] | permissive | harveyr/thunderbox | 4a1f339ce101e5e43d5b9e4c3df8c21b0f0311c7 | 6bd4dbae1b40a3a4f61ea6941d6d54744aef2b3d | refs/heads/master | 2016-09-06T12:56:32.023465 | 2013-12-04T14:38:59 | 2013-12-04T14:38:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,645 | py | import os
import logging
import re
import datetime
from flask import (
request,
Blueprint,
abort,
)
from app.util import jsonify
from app.lintblame import git
from app.lintblame import py
logger = logging.getLogger(__name__)
blueprint = Blueprint('endpoints', __name__)
EXCLUDE_REX = re.compile('jquery|underscore', re.IGNORECASE)
def js_timestamp(dt):
epoch = datetime.datetime(1970, 1, 1)
return int((dt - epoch).total_seconds()) * 1000
def valid_target(path):
ext = os.path.splitext(path)[1]
return (
ext in ['.py', '.go', '.js', '.json'] and
not EXCLUDE_REX.search(path)
)
def get_path_or_400():
path = request.args.get('path')
if not path:
abort(400)
if path.startswith('~'):
path = os.path.expanduser(path)
return path
def paths_or_400():
joined_paths = request.args.get('paths')
if not joined_paths:
abort(400)
split_paths = joined_paths.split(',')
for i, p in enumerate(split_paths):
if p[0] == '~':
split_paths[i] = os.path.expanduser(p)
return split_paths
@blueprint.route('/dumb')
def dumb_route():
return jsonify({'success': True})
def get_path_targets(path):
logger.info('get_path_targets')
if os.path.isdir(path):
contents = [os.path.join(path, i) for i in os.listdir(path)
if not i.startswith('.')]
logger.info('contents: {0}'.format(contents))
return [i for i in contents if valid_target(i)]
else:
if valid_target[path]:
return [path]
else:
return []
@blueprint.route('/testpath')
def test_path():
path = get_path_or_400()
response = {
'path': path,
'exists': os.path.exists(path)
}
if response['exists']:
response['dir'] = os.path.isdir(path)
if request.args.get('branch'):
response['targets'] = git.git_branch_files(path)
else:
response['targets'] = get_path_targets(path)
git_branch = git.git_branch(path)
if git_branch:
response['branch'] = git_branch
response['vcs'] = 'git'
response['name'] = git.git_name()
return jsonify(response)
def _results_dict(path):
result = {}
with open(path, 'r') as f:
result['lines'] = f.read().splitlines()
result['blame'] = git.blame(path)
result['issues'] = []
if path.endswith('.py'):
result['issues'] += py.pylint_issues(path)
result['issues'] += py.pep8_issues(path)
result['issues'] += py.pyflakes_issues(path)
elif path.endswith('.js') or path.endswith('.json'):
result['issues'] += py.jshint_issues(path)
return result
@blueprint.route('/poll')
def poll_paths():
request_paths = paths_or_400()
branch_mode = request.args.get('branch')
if branch_mode and branch_mode.lower() != 'false':
poll_paths = [p for p in git.git_branch_files(request_paths[0])]
else:
poll_paths = get_path_targets(request_paths[0])
poll_paths = filter(valid_target, poll_paths)
full_scan = request.args.get('fullScan', False)
full_scan = full_scan and full_scan != 'false'
since = float(int(request.args.get('since')) / 1000)
response = {
'changed': {}
}
for p in poll_paths:
mod = os.path.getmtime(p)
if full_scan or mod > since:
response['changed'][p] = _results_dict(p)
response['changed'][p]['modtime'] = mod
if branch_mode:
response['delete'] = [i for i in request_paths if i not in poll_paths]
return jsonify(response)
| [
"harveyr@gmail.com"
] | harveyr@gmail.com |
b410d142b81c1ff46a841b791aac9e8f0c825de6 | 7c1b5af77fbfde1f4f2c698a489e07024c147edc | /docs/sphinxext/example.py | a3a898c3d74b35d6e48e079745f272267a2beaef | [] | no_license | migueldvb/pyasdf | e7812da935ee3e4fec6d3c61fb16425ac2e1bdc7 | 4a72952b0196ede261e07569fc4da2616fa5e4b3 | refs/heads/master | 2020-12-26T18:44:07.562442 | 2015-03-30T16:18:19 | 2015-03-30T16:18:19 | 29,930,850 | 0 | 0 | null | 2015-02-04T20:23:04 | 2015-01-27T19:29:17 | Python | UTF-8 | Python | false | false | 3,764 | py | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, unicode_literals, print_function
import atexit
import io
import os
import shutil
import tempfile
import textwrap
from docutils.parsers.rst import Directive
from docutils import nodes
from sphinx.util.nodes import set_source_info
from pyasdf import AsdfFile
from pyasdf.constants import ASDF_MAGIC, BLOCK_FLAG_STREAMED
from pyasdf import versioning
from pyasdf import yamlutil
version_string = versioning.version_to_string(versioning.default_version)
TMPDIR = tempfile.mkdtemp()
def delete_tmpdir():
shutil.rmtree(TMPDIR)
GLOBALS = {}
LOCALS = {}
FLAGS = {
BLOCK_FLAG_STREAMED: "BLOCK_FLAG_STREAMED"
}
class RunCodeDirective(Directive):
has_content = True
def run(self):
code = textwrap.dedent('\n'.join(self.content))
cwd = os.getcwd()
os.chdir(TMPDIR)
try:
try:
exec(code, GLOBALS, LOCALS)
except:
print(code)
raise
literal = nodes.literal_block(code, code)
literal['language'] = 'python'
set_source_info(self, literal)
finally:
os.chdir(cwd)
return [literal]
class AsdfDirective(Directive):
required_arguments = 1
def run(self):
filename = self.arguments[0]
cwd = os.getcwd()
os.chdir(TMPDIR)
parts = []
try:
code = AsdfFile.read(filename, _get_yaml_content=True)
code = '{0}{1}\n'.format(ASDF_MAGIC, version_string) + code.strip()
literal = nodes.literal_block(code, code)
literal['language'] = 'yaml'
set_source_info(self, literal)
parts.append(literal)
ff = AsdfFile.read(filename)
for i, block in enumerate(ff.blocks.internal_blocks):
data = block.data.tostring().encode('hex')
if len(data) > 40:
data = data[:40] + '...'
allocated = block._allocated
size = block._size
data_size = block._data_size
flags = block._flags
if flags & BLOCK_FLAG_STREAMED:
allocated = size = data_size = 0
lines = []
lines.append('BLOCK {0}:'.format(i))
human_flags = []
for key, val in FLAGS.items():
if flags & key:
human_flags.append(val)
if len(human_flags):
lines.append(' flags: {0}'.format(' | '.join(human_flags)))
if block.compression:
lines.append(' compression: {0}'.format(block.compression))
lines.append(' allocated_size: {0}'.format(allocated))
lines.append(' used_size: {0}'.format(size))
lines.append(' data_size: {0}'.format(data_size))
lines.append(' data: {0}'.format(data))
code = '\n'.join(lines)
literal = nodes.literal_block(code, code)
literal['language'] = 'yaml'
set_source_info(self, literal)
parts.append(literal)
finally:
os.chdir(cwd)
result = nodes.admonition()
textnodes, messages = self.state.inline_text(filename, self.lineno)
title = nodes.title(filename, '', *textnodes)
result += title
result.children.extend(parts)
return [result]
def setup(app):
app.add_directive('runcode', RunCodeDirective)
app.add_directive('asdf', AsdfDirective)
atexit.register(delete_tmpdir)
| [
"mdboom@gmail.com"
] | mdboom@gmail.com |
e2e3817a1e7ab097aac8071c3eb9fa89df0186b9 | 863509e794b069c9688f6263454c06d2c48868b2 | /backend/backend/ml_model/client_server/ml_client.py | e892990daa897d7cb55cee5604a142de6051e99b | [
"MIT"
] | permissive | TheDuckWhisperer/tournesol | c6df38185963bbecc6109dac275075a3ceca857a | 0fde7587e91a42e5a2218f2ffb70d4fc8cff7f73 | refs/heads/master | 2023-04-18T15:31:20.627952 | 2021-05-01T19:59:07 | 2021-05-01T19:59:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,881 | py | from jsonrpcclient.clients.http_client import HTTPClient
from multiprocessing import Process
from backend.rating_fields import VIDEO_FIELDS
import numpy as np
import pickle
from time import time
def time_cache_wrapper(f, expire_sec=3600):
"""Decorator which caches results for some seconds."""
# format pickle(x) -> (compute_time, value)
cache = {}
def wrapper(*args):
x_str = pickle.dumps(args)
if x_str in cache:
if time() - cache[x_str][0] <= expire_sec:
return cache[x_str][1]
result = f(*args)
cache[x_str] = (time(), result)
return result
return wrapper
class DatabaseLearnerCommunicator(object):
"""Communicate with training/inference workers."""
def __init__(
self,
port_inference=5000,
port_training=5001,
host='localhost'):
"""Initialize (remember ports)."""
self.port_inference = port_inference
self.port_training = port_training
self.host = host
def build_client(self, port):
"""Return an http client pointing to the worker."""
return HTTPClient("http://%s:%d" % (self.host, port))
@time_cache_wrapper
def __call__(self, x):
"""Transform embedding into preferences."""
try:
client = self.build_client(port=self.port_inference)
return client.call([float(t) for t in x]).data.result
except Exception as e:
print(e)
return np.zeros(len(VIDEO_FIELDS))
def fit(self):
"""Fit on data from the dataset."""
def fit_helper():
client = self.build_client(port=self.port_training)
client.fit()
client_inference = self.build_client(port=self.port_inference)
client_inference.reload()
Process(target=fit_helper).start()
| [
"sergei.volodin.ch@gmail.com"
] | sergei.volodin.ch@gmail.com |
9a534cd85b192474c3e54b2008097cfeec45916c | 48201d9a9d820c49ae08765b646d16c33e5b9d9d | /mordecai/geoparse.py | a09395343325aa60b1f90c8cbd0541413d391bfb | [
"MIT"
] | permissive | astorfi/mordecai | ce43bf21adb27b5cc5208966e3a88ed83eed3a23 | fcc16a8bbce92a01bf243c16ef4033d3fcc2a759 | refs/heads/master | 2020-03-22T01:21:09.335271 | 2018-06-07T11:38:39 | 2018-06-07T11:38:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 37,273 | py | import keras
import pandas as pd
from elasticsearch_dsl.query import MultiMatch
from elasticsearch_dsl import Search, Q
import numpy as np
from collections import Counter
import editdistance
import pkg_resources
import spacy
from . import utilities
from multiprocessing.pool import ThreadPool
from elasticsearch.exceptions import ConnectionTimeout
try:
from functools import lru_cache
except ImportError:
from backports.functools_lru_cache import lru_cache
print("Mordecai requires Python 3 and seems to be running in Python 2.")
try:
nlp
except NameError:
try:
nlp = spacy.load('en_core_web_lg', disable=['parser', 'tagger'])
except OSError:
print("ERROR: No spaCy NLP model installed. Install with this command: `python -m spacy download en_core_web_lg`.")
class Geoparser:
def __init__(self, es_ip="localhost", es_port="9200", verbose = False,
country_threshold = 0.6, n_threads = 4, mod_date = "2018-06-05"):
DATA_PATH = pkg_resources.resource_filename('mordecai', 'data/')
MODELS_PATH = pkg_resources.resource_filename('mordecai', 'models/')
self._cts = utilities.country_list_maker()
self._just_cts = utilities.country_list_maker()
self._inv_cts = utilities.make_inv_cts(self._cts)
country_state_city = utilities.other_vectors()
self._cts.update(country_state_city)
self._ct_nlp = utilities.country_list_nlp(self._cts)
self._prebuilt_vec = [w.vector for w in self._ct_nlp]
self._both_codes = utilities.make_country_nationality_list(self._cts, DATA_PATH + "nat_df.csv")
self._admin1_dict = utilities.read_in_admin1(DATA_PATH + "admin1CodesASCII.json")
self.conn = utilities.setup_es(es_ip, es_port)
self.country_model = keras.models.load_model(MODELS_PATH + "country_model.h5")
self.rank_model = keras.models.load_model(MODELS_PATH + "rank_model.h5")
self._skip_list = utilities.make_skip_list(self._cts)
self.training_setting = False # make this true if you want training formatted
# if the best country guess is below the country threshold, don't return anything at all
self.country_threshold = country_threshold
feature_codes = pd.read_csv(DATA_PATH + "feature_codes.txt", sep="\t", header = None)
self._code_to_text = dict(zip(feature_codes[1], feature_codes[3])) # human readable geonames IDs
self.verbose = verbose # return the full dictionary or just the good parts?
self.n_threads = n_threads
try:
# https://www.reddit.com/r/Python/comments/3a2erd/exception_catch_not_catching_everything/
#with nostderr():
self.conn.count()
except:
raise ConnectionError("Could not establish contact with Elasticsearch at {0} on port {1}. Are you sure it's running? \n".format(es_ip, es_port),
"Mordecai needs access to the Geonames/Elasticsearch gazetteer to function.",
"See https://github.com/openeventdata/mordecai#installation-and-requirements",
"for instructions on setting up Geonames/Elasticsearch")
es_date = utilities.check_geonames_date(self.conn)
if es_date != mod_date:
print("You may be using an outdated Geonames index. Your index is from {0}, while the most recent is {1}. Please see https://github.com/openeventdata/mordecai/ for instructions on updating.".format(es_date, mod_date))
def _feature_country_mentions(self, doc):
"""
Given a document, count how many times different country names and adjectives are mentioned.
These are features used in the country picking phase.
Parameters
---------
doc: a spaCy nlp'ed piece of text
Returns
-------
countries: dict
the top two countries (ISO code) and their frequency of mentions.
"""
c_list = []
for i in doc.ents:
try:
country = self._both_codes[i.text]
c_list.append(country)
except KeyError:
pass
count = Counter(c_list).most_common()
try:
top, top_count = count[0]
except:
top = ""
top_count = 0
try:
two, two_count = count[1]
except:
two = ""
two_count = 0
countries = (top, top_count, two, two_count)
return countries
def clean_entity(self, ent):
"""
Strip out extra words that often get picked up by spaCy's NER.
To do: preserve info about what got stripped out to help with ES/Geonames
resolution later.
Parameters
---------
ent: a spaCy named entity Span
Returns
-------
new_ent: a spaCy Span, with extra words stripped out.
"""
dump_list = ['province', 'the', 'area', 'airport', 'district', 'square',
'town', 'village', 'prison', "river", "valley", "provincial", "prison",
"region", "municipality", "state", "territory", "of", "in",
"county", "central"]
# maybe have 'city'? Works differently in different countries
# also, "District of Columbia". Might need to use cap/no cap
keep_positions = []
for word in ent:
if word.text.lower() not in dump_list:
keep_positions.append(word.i)
keep_positions = np.asarray(keep_positions)
try:
new_ent = ent.doc[keep_positions.min():keep_positions.max()+1]
# can't set directly
#new_ent.label_.__set__(ent.label_)
except ValueError:
new_ent = ent
return new_ent
def _feature_most_common(self, results):
"""
Find the most common country name in ES/Geonames results
Paramaters
----------
results: dict
output of `query_geonames`
Returns
-------
most_common: str
ISO code of most common country, or empty string if none
"""
try:
country_count = Counter([i['country_code3'] for i in results['hits']['hits']])
most_common = country_count.most_common()[0][0]
return most_common
except IndexError:
return ""
except TypeError:
return ""
def _feature_most_alternative(self, results, full_results = False):
"""
Find the placename with the most alternative names and return its country.
More alternative names are a rough measure of importance.
Paramaters
----------
results: dict
output of `query_geonames`
Returns
-------
most_alt: str
ISO code of country of place with most alternative names,
or empty string if none
"""
try:
alt_names = [len(i['alternativenames']) for i in results['hits']['hits']]
most_alt = results['hits']['hits'][np.array(alt_names).argmax()]
if full_results == True:
return most_alt
else:
return most_alt['country_code3']
except (IndexError, ValueError, TypeError):
return ""
def _feature_most_population(self, results):
"""
Find the placename with the largest population and return its country.
More population is a rough measure of importance.
Paramaters
----------
results: dict
output of `query_geonames`
Returns
-------
most_pop: str
ISO code of country of place with largest population,
or empty string if none
"""
try:
populations = [i['population'] for i in results['hits']['hits']]
most_pop = results['hits']['hits'][np.array(populations).astype("int").argmax()]
return most_pop['country_code3']
except Exception as e:
return ""
def _feature_word_embedding(self, text):
"""
Given a word, guess the appropriate country by word vector.
Parameters
---------
text: str
the text to extract locations from.
Returns
-------
country_picking: dict
The top two countries (ISO codes) and two measures
confidence for the first choice.
"""
try:
simils = np.dot(self._prebuilt_vec, text.vector)
except Exception as e:
#print("Vector problem, ", Exception, e)
return {"country_1" : "",
"confid_a" : 0,
"confid_b" : 0,
"country_2" : ""}
ranks = simils.argsort()[::-1]
best_index = ranks[0]
confid = simils.max()
confid2 = simils[ranks[0]] - simils[ranks[1]]
if confid == 0 or confid2 == 0:
return ""
country_code = self._cts[str(self._ct_nlp[ranks[0]])]
country_picking = {"country_1" : country_code,
"confid_a" : confid,
"confid_b" : confid2,
"country_2" : self._cts[str(self._ct_nlp[ranks[1]])]}
return country_picking
def _feature_first_back(self, results):
"""
Get the country of the first two results back from geonames.
Parameters
-----------
results: dict
elasticsearch results
Returns
-------
top: tuple
first and second results' country name (ISO)
"""
try:
first_back = results['hits']['hits'][0]['country_code3']
except (TypeError, IndexError):
# usually occurs if no Geonames result
first_back = ""
try:
second_back = results['hits']['hits'][1]['country_code3']
except (TypeError, IndexError):
second_back = ""
top = (first_back, second_back)
return top
def is_country(self, text):
"""Check if a piece of text is in the list of countries"""
ct_list = self._just_cts.keys()
if text in ct_list:
return True
else:
return False
@lru_cache(maxsize=250)
def query_geonames(self, placename):
"""
Wrap search parameters into an elasticsearch query to the geonames index
and return results.
Parameters
---------
conn: an elasticsearch Search conn, like the one returned by `setup_es()`
placename: str
the placename text extracted by NER system
Returns
-------
out: The raw results of the elasticsearch query
"""
# first first, try for country name
if self.is_country(placename):
q = {"multi_match": {"query": placename,
"fields": ['name', 'asciiname', 'alternativenames'],
"type" : "phrase"}}
r = Q("match", feature_code='PCLI')
res = self.conn.query(q).query(r)[0:5].execute() # always 5
#self.country_exact = True
else:
# second, try for an exact phrase match
q = {"multi_match": {"query": placename,
"fields": ['name^5', 'asciiname^5', 'alternativenames'],
"type" : "phrase"}}
res = self.conn.query(q)[0:50].execute()
# if no results, use some fuzziness, but still require all terms to be present.
# Fuzzy is not allowed in "phrase" searches.
if res.hits.total == 0:
# tried wrapping this in a {"constant_score" : {"query": ... but made it worse
q = {"multi_match": {"query": placename,
"fields": ['name', 'asciiname', 'alternativenames'],
"fuzziness" : 1,
"operator": "and"},
}
#self.fuzzy = True # idea was to preserve this info as a feature, but not using state like this
res = self.conn.query(q)[0:50].execute()
es_result = utilities.structure_results(res)
return es_result
@lru_cache(maxsize=250)
def query_geonames_country(self, placename, country):
"""
Like query_geonames, but this time limited to a specified country.
"""
# first, try for an exact phrase match
q = {"multi_match": {"query": placename,
"fields": ['name^5', 'asciiname^5', 'alternativenames'],
"type" : "phrase"}}
r = Q("match", country_code3=country)
res = self.conn.query(q).query(r)[0:50].execute()
# if no results, use some fuzziness, but still require all terms to be present.
# Fuzzy is not allowed in "phrase" searches.
if res.hits.total == 0:
# tried wrapping this in a {"constant_score" : {"query": ... but made it worse
q = {"multi_match": {"query": placename,
"fields": ['name', 'asciiname', 'alternativenames'],
"fuzziness" : 1,
"operator": "and"},
}
r = Q("match", country_code3=country)
res = self.conn.query(q).query(r)[0:50].execute()
out = utilities.structure_results(res)
return out
def _feature_location_type_mention(self, ent):
"""
Count forward 1 word from each entity, looking for defined terms that indicate
geographic feature types (e.g. "village" = "P").
Parameters
-----------
ent : spacy entity span
It has to be an entity to handle indexing in the document
Returns
--------
tuple (length 2)
(feature_code, feature_class) derived from explicit word usage
"""
P_list = ["city", "cities", "town", "towns", "villages", "village", "settlement",
"capital", "town", "towns", "neighborhood", "neighborhoods",
"municipality"]
ADM1_list = ["province", "governorate", "state", "department", "oblast",
"changwat"]
ADM2_list = ["district", "rayon", "amphoe", "county"]
A_other = ["region"]
AIRPORT_list = ["airport"]
TERRAIN_list = ["mountain", "mountains", "stream", "river"]
FOREST_list = ["forest"]
feature_positions = []
feature_class = feature_code = ""
interest_words = ent.doc[ent.end-1 : ent.end + 1] # last word or next word following
for word in interest_words: #ent.sent:
if ent.text in self._just_cts.keys():
feature_class = "A"
feature_code = "PCLI"
elif word.text.lower() in P_list:
feature_class = "P"
feature_code = ""
elif word.text.lower() in ADM1_list:
feature_class = "A"
feature_code = "ADM1"
elif word.text.lower() in ADM2_list:
feature_class = "A"
feature_code = "ADM2"
elif word.text.lower() in TERRAIN_list:
feature_class = "T"
feature_code = ""
elif word.text.lower() in AIRPORT_list:
feature_class = "S"
feature_code = "AIRP"
elif word.text.lower() in A_other:
feature_class = "A"
feature_code = ""
return (feature_class, feature_code)
def make_country_features(self, doc, require_maj = False):
"""
Create features for the country picking model. Function where all the individual
feature maker functions are called and aggregated. (Formerly "process_text")
Parameters
-----------
doc : str or spaCy doc
Returns
-------
task_list : list of dicts
Each entry has the word, surrounding text, span, and the country picking features.
This output can be put into Prodigy for labeling almost as-is (the "features" key needs
to be renamed "meta" or be deleted.)
"""
if not hasattr(doc, "ents"):
doc = nlp(doc)
# initialize the place to store finalized tasks
task_list = []
# get document vector
#doc_vec = self._feature_word_embedding(text)['country_1']
# get explicit counts of country names
ct_mention, ctm_count1, ct_mention2, ctm_count2 = self._feature_country_mentions(doc)
# now iterate through the entities, skipping irrelevant ones and countries
for ent in doc.ents:
if not ent.text.strip():
continue
if ent.label_ not in ["GPE","LOC","FAC"]:
continue
# don't include country names (make a parameter)
if ent.text.strip() in self._skip_list:
continue
## just for training purposes
#if ent.text.strip() in self._just_cts.keys():
# continue
#skip_list.add(ent.text.strip())
ent_label = ent.label_ # destroyed by trimming
ent = self.clean_entity(ent)
# vector for just the solo word
vp = self._feature_word_embedding(ent)
try:
word_vec = vp['country_1']
wv_confid = float(vp['confid_a'])
except TypeError:
# no idea why this comes up
word_vec = ""
wv_confid = "0"
# look for explicit mentions of feature names
class_mention, code_mention = self._feature_location_type_mention(ent)
##### ES-based features
try:
result = self.query_geonames(ent.text)
except ConnectionTimeout:
result = ""
# build results-based features
most_alt = self._feature_most_alternative(result)
most_common = self._feature_most_common(result)
most_pop = self._feature_most_population(result)
first_back, second_back = self._feature_first_back(result)
try:
maj_vote = Counter([word_vec, most_alt,
first_back, most_pop,
ct_mention
#doc_vec_sent, doc_vec
]).most_common()[0][0]
except Exception as e:
print("Problem taking majority vote: ", ent, e)
maj_vote = ""
if not maj_vote:
maj_vote = ""
# We only want all this junk for the labeling task. We just want to straight to features
# and the model when in production.
try:
start = ent.start_char - ent.sent.start_char
end = ent.end_char - ent.sent.start_char
iso_label = maj_vote
try:
text_label = self._inv_cts[iso_label]
except KeyError:
text_label = ""
task = {"text" : ent.sent.text,
"label" : text_label, # human-readable country name
"word" : ent.text,
"spans" : [{
"start" : start,
"end" : end,
} # make sure to rename for Prodigy
],
"features" : {
"maj_vote" : iso_label,
"word_vec" : word_vec,
"first_back" : first_back,
#"doc_vec" : doc_vec,
"most_alt" : most_alt,
"most_pop" : most_pop,
"ct_mention" : ct_mention,
"ctm_count1" : ctm_count1,
"ct_mention2" : ct_mention2,
"ctm_count2" : ctm_count2,
"wv_confid" : wv_confid,
"class_mention" : class_mention, # inferred geonames class from mentions
"code_mention" : code_mention,
#"places_vec" : places_vec,
#"doc_vec_sent" : doc_vec_sent
} }
task_list.append(task)
except Exception as e:
print(ent.text,)
print(e)
return task_list # rename this var
# Two modules that call `make_country_features`:
# 1. write out with majority vote for training
# 2. turn into features, run model, return countries
# A third, standalone function will convert the labeled JSON from Prodigy into
# features for updating the model.
def make_country_matrix(self, loc):
"""
Create features for all possible country labels, return as matrix for keras.
Parameters
----------
loc: dict
one entry from the list of locations and features that come out of make_country_features
Returns
--------
keras_inputs: dict with two keys, "label" and "matrix"
"""
top = loc['features']['ct_mention']
top_count = loc['features']['ctm_count1']
two = loc['features']['ct_mention2']
two_count = loc['features']['ctm_count2']
word_vec = loc['features']['word_vec']
first_back = loc['features']['first_back']
most_alt = loc['features']['most_alt']
most_pop = loc['features']['most_pop']
possible_labels = set([top, two, word_vec, first_back, most_alt, most_pop])
possible_labels = [i for i in possible_labels if i]
X_mat = []
for label in possible_labels:
inputs = np.array([word_vec, first_back, most_alt, most_pop])
x = inputs == label
x = np.asarray((x * 2) - 1) # convert to -1, 1
# get missing values
exists = inputs != ""
exists = np.asarray((exists * 2) - 1)
counts = np.asarray([top_count, two_count]) # cludgy, should be up with "inputs"
right = np.asarray([top, two]) == label
right = right*2 - 1
right[counts == 0] = 0
# get correct values
features = np.concatenate([x, exists, counts, right])
X_mat.append(np.asarray(features))
keras_inputs = {"labels": possible_labels,
"matrix" : np.asmatrix(X_mat)}
return keras_inputs
def infer_country(self, doc):
"""NLP a doc, find its entities, get their features, and return the model's country guess for each.
Maybe use a better name.
Parameters
-----------
doc: str or spaCy
the document to country-resolve the entities in
Returns
-------
proced: list of dict
the feature output of "make_country_features" updated with the model's
estimated country for each entity.
E.g.:
{'all_confidence': array([ 0.95783567, 0.03769876, 0.00454875], dtype=float32),
'all_countries': array(['SYR', 'USA', 'JAM'], dtype='<U3'),
'country_conf': 0.95783567,
'country_predicted': 'SYR',
'features': {'ct_mention': '',
'ct_mention2': '',
'ctm_count1': 0,
'ctm_count2': 0,
'first_back': 'JAM',
'maj_vote': 'SYR',
'most_alt': 'USA',
'most_pop': 'SYR',
'word_vec': 'SYR',
'wv_confid': '29.3188'},
'label': 'Syria',
'spans': [{'end': 26, 'start': 20}],
'text': "There's fighting in Aleppo and Homs.",
'word': 'Aleppo'}
"""
if not hasattr(doc, "ents"):
doc = nlp(doc)
proced = self.make_country_features(doc, require_maj=False)
if not proced:
pass
# logging!
#print("Nothing came back from make_country_features")
feat_list = []
#proced = self.ent_list_to_matrix(proced)
for loc in proced:
feat = self.make_country_matrix(loc)
#labels = loc['labels']
feat_list.append(feat)
#try:
# for each potential country...
for n, i in enumerate(feat_list):
labels = i['labels']
try:
prediction = self.country_model.predict(i['matrix']).transpose()[0]
ranks = prediction.argsort()[::-1]
labels = np.asarray(labels)[ranks]
prediction = prediction[ranks]
except ValueError:
prediction = np.array([0])
labels = np.array([""])
loc['country_predicted'] = labels[0]
loc['country_conf'] = prediction[0]
loc['all_countries'] = labels
loc['all_confidence'] = prediction
return proced
def get_admin1(self, country_code2, admin1_code):
"""
Convert a geonames admin1 code to the associated place name.
Parameters
---------
country_code2: string
The two character country code
admin1_code: string
The admin1 code to be converted. (Admin1 is the highest
subnational political unit, state/region/provice/etc.
admin1_dict: dictionary
The dictionary containing the country code + admin1 code
as keys and the admin1 names as values.
Returns
------
admin1_name: string
The admin1 name. If none is found, return "NA".
"""
lookup_key = ".".join([country_code2, admin1_code])
try:
admin1_name = self._admin1_dict[lookup_key]
return admin1_name
except KeyError:
#print("No admin code found for country {} and code {}".format(country_code2, admin1_code))
return "NA"
def features_for_rank(self, proc, results):
"""Compute features for ranking results from ES/geonames
Parameters
----------
proc : dict
One dictionary from the list that comes back from geoparse or from make_country_features (doesn't matter)
results : dict
the response from a geonames query
Returns
--------
X : numpy matrix
holding the computed features
meta: list of dicts
including feature information
"""
feature_list = []
meta = []
results = results['hits']['hits']
search_name = proc['word']
code_mention = proc['features']['code_mention']
class_mention = proc['features']['class_mention']
for rank, entry in enumerate(results):
# go through the results and calculate some features
# get population number and exists
try:
pop = int(entry['population'])
has_pop = 1
except Exception as e:
pop = 0
has_pop = 0
if pop > 0:
logp = np.log(pop)
else:
logp = 0
### order the results came back
adj_rank = 1 / np.log(rank + 2)
# alternative names
len_alt = len(entry['alternativenames'])
adj_alt = np.log(len_alt)
### feature class (just boost the good ones)
if entry['feature_class'] == "A" or entry['feature_class'] == "P":
good_type = 1
else:
good_type = 0
#fc_score = 3
### feature class/code matching
if entry['feature_class'] == class_mention:
good_class_mention = 1
else:
good_class_mention = 0
if entry['feature_code'] == code_mention:
good_code_mention = 1
else:
good_code_mention = 0
### edit distance
ed = editdistance.eval(search_name, entry['name'])
ed = ed # shrug
# maybe also get min edit distance to alternative names...
features = [has_pop, pop, logp, adj_rank, len_alt, adj_alt,
good_type, good_class_mention, good_code_mention, ed]
m = self.format_geonames(entry)
feature_list.append(features)
meta.append(m)
#meta = geo.format_geonames(results)
X = np.asmatrix(feature_list)
return (X, meta)
def ranker(self, X, meta):
"""
Sort the place features list by the score of its relevance.
"""
# total score is just a sum of each row
total_score = X.sum(axis=1).transpose()
total_score = np.squeeze(np.asarray(total_score)) # matrix to array
ranks = total_score.argsort()
ranks = ranks[::-1]
# sort the list of dicts according to ranks
sorted_meta = [meta[r] for r in ranks]
sorted_X = X[ranks]
return (sorted_X, sorted_meta)
def format_for_prodigy(self, X, meta, placename, return_feature_subset = False):
"""
Given a feature matrix, geonames data, and the original query,
construct a prodigy task.
Make meta nicely readable: "A town in Germany"
Parameters
----------
X: matrix
vector of features for ranking. Output of features_for_rank()
meta: list of dictionaries
other place information. Output of features_for_rank(). Used to provide
information like "city in Germany" to the coding task.
placename: str
The extracted place name from text
Returns
--------
task_list: list of dicts
Tasks ready to be written to JSONL and use in Prodigy. Each potential match includes
a text description to the annotator can pick the right one.
"""
all_tasks = []
sorted_X, sorted_meta = self.ranker(X, meta)
sorted_meta = sorted_meta[:4]
sorted_X = sorted_X[:4]
for n, i in enumerate(sorted_meta):
feature_code = i['feature_code']
try:
fc = self._code_to_text[feature_code]
except KeyError:
fc = ''
text = ''.join(['"', i['place_name'], '"',
", a ", fc,
" in ", i['country_code3'],
", id: ", i['geonameid']])
d = {"id" : n + 1, "text" : text}
all_tasks.append(d)
if return_feature_subset:
return (all_tasks, sorted_meta, sorted_X)
else:
return all_tasks
def format_geonames(self, entry, searchterm = None):
"""Pull out just the fields we want from a geonames entry
To do:
- switch to model picking
Parameters
-----------
res : dict
ES/geonames result
searchterm : str
(not implemented). Needed for better results picking
Returns
--------
new_res : dict
containing selected fields from selected geonames entry
"""
try:
lat, lon = entry['coordinates'].split(",")
new_res = {"admin1" : self.get_admin1(entry['country_code2'], entry['admin1_code']),
"lat" : lat,
"lon" : lon,
"country_code3" : entry["country_code3"],
"geonameid" : entry["geonameid"],
"place_name" : entry["name"],
"feature_class" : entry["feature_class"],
"feature_code" : entry["feature_code"]}
return new_res
except (IndexError, TypeError):
# two conditions for these errors:
# 1. there are no results for some reason (Index)
# 2. res is set to "" because the country model was below the thresh
new_res = {"admin1" : "",
"lat" : "",
"lon" : "",
"country_code3" : "",
"geonameid" : "",
"place_name" : "",
"feature_class" : "",
"feature_code" : ""}
return new_res
def clean_proced(self, proced):
"""Small helper function to delete the features from the final dictionary.
These features are mostly interesting for debugging but won't be relevant for most users.
"""
for loc in proced:
try:
del loc['all_countries']
except KeyError:
pass
try:
del loc['matrix']
except KeyError:
pass
try:
del loc['all_confidence']
except KeyError:
pass
try:
del loc['place_confidence']
except KeyError:
pass
try:
del loc['text']
except KeyError:
pass
try:
del loc['label']
except KeyError:
pass
try:
del loc['features']
except KeyError:
pass
return proced
def geoparse(self, doc, verbose = False):
"""Main geoparsing function. Text to extracted, resolved entities.
Parameters
----------
doc : str or spaCy
The document to be geoparsed. Can be either raw text or already spacy processed.
In some cases, it makes sense to bulk parse using spacy's .pipe() before sending
through to Mordecai
Returns
-------
proced : list of dicts
Each entity gets an entry in the list, with the dictionary including geo info, spans,
and optionally, the input features.
"""
if not hasattr(doc, "ents"):
doc = nlp(doc)
proced = self.infer_country(doc)
if not proced:
pass
# logging!
#print("Nothing came back from infer_country...")
for loc in proced:
if loc['country_conf'] >= self.country_threshold: # shrug
res = self.query_geonames_country(loc['word'], loc['country_predicted'])
elif loc['country_conf'] < self.country_threshold:
res = ""
# if the confidence is too low, don't use the country info
try:
_ = res['hits']['hits']
# If there's no geonames result, what to do?
# For now, just continue.
# In the future, delete? Or add an empty "loc" field?
except (TypeError, KeyError):
continue
# Pick the best place
X, meta = self.features_for_rank(loc, res)
if X.shape[1] == 0:
# This happens if there are no results...
continue
all_tasks, sorted_meta, sorted_X = self.format_for_prodigy(X, meta, loc['word'], return_feature_subset=True)
fl_pad = np.pad(sorted_X, ((0, 4 - sorted_X.shape[0]), (0, 0)), 'constant')
fl_unwrap = fl_pad.flatten()
prediction = self.rank_model.predict(np.asmatrix(fl_unwrap))
place_confidence = prediction.max()
loc['geo'] = sorted_meta[prediction.argmax()]
loc['place_confidence'] = place_confidence
if not verbose:
proced = self.clean_proced(proced)
return proced
def batch_geoparse(self, text_list):
"""
Batch geoparsing function. Take in a list of text documents and return a list of lists
of the geoparsed documents. The speed improvements come from using spaCy's `nlp.pipe` and by multithreading
calls to `geoparse`.
Parameters
----------
text_list : list of strs
List of documents. The documents should not have been pre-processed by spaCy.
Returns
-------
proced : list of list of dicts
The list is the same length as the input list of documents. Each element is a list of geolocated entities.
"""
nlped_docs = nlp.pipe(text_list, n_threads = self.n_threads)
pool = ThreadPool(self.n_threads)
processed = pool.map(self.geoparse, nlped_docs)
pool.close()
pool.join()
return processed
| [
"ahalterman0@gmail.com"
] | ahalterman0@gmail.com |
febfff3c83b53d29faf584b7e8420bb1c1c3d7ac | 36f3d006fb278e08729a048f71e100f748586e1d | /src/predictor/__init__.py | 8d2e444e1021f779b6e088fb68f7466f3bd05f27 | [
"MIT"
] | permissive | tinomolhash/projector_course_work | 2939a924f605be72988b9c420c8aee171a707337 | b6b03de5933bb44a72ac99ee6a6ec2ea9c24d8a9 | refs/heads/master | 2021-02-06T18:29:07.838939 | 2020-02-29T09:40:10 | 2020-02-29T09:40:10 | 243,939,584 | 0 | 0 | MIT | 2020-02-29T09:24:19 | 2020-02-29T09:24:18 | null | UTF-8 | Python | false | false | 42 | py | from .news_predictor import NewsPredictor
| [
"radchenko-v.i.p@mail.ru"
] | radchenko-v.i.p@mail.ru |
e8e47bd8bd9911bbf774dc986a6f1b9219a03379 | 9f915c2e68a7a46be8ee9909486a7a49eae21c3b | /Labs/lab1/venv/bin/pip3 | d2d8f59f6d5984f6aa25a8436fba187d113377a9 | [] | no_license | nsolovyoff/DB | 69423f0bbc9aafcb4285e75616e6bf83730b449f | ff4a190fcb1e6c758c9a178e23c9b529b3b65cbe | refs/heads/master | 2022-08-20T09:35:10.792763 | 2020-05-28T15:40:57 | 2020-05-28T15:40:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 254 | #!/Users/nikita/Desktop/bd2_lab1/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal.cli.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"nikita@Nikitas-MacBook-Pro.local"
] | nikita@Nikitas-MacBook-Pro.local | |
6b1d4d0ca0677bf2be973bbaede1ff3383e094c9 | 6a84f5d43788f283a623137ee8f0ff9a32231243 | /startup.py | 442920d9eac2dcc3e62242cd45060da844aa5cc0 | [] | no_license | harshj888/neuralnetwork | f97cfb4e6ff99038ec04864e105ce3fa126a050e | 909aa0452f32472a5e8cd00efaf0ea6718e732ac | refs/heads/master | 2022-11-29T04:34:59.838530 | 2020-08-09T16:35:54 | 2020-08-09T16:35:54 | 286,275,498 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,121 | py | # -*- coding: utf-8 -*-
"""
Created on Sun Aug 9 13:13:14 2020
@author: Harsh
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from keras.models import Sequential
from keras.layers import Dense, Activation,Layer,Lambda
startup = pd.read_csv("D:\STUDY\Excelr Assignment\Assignment 16 - Neural Networks\Startups.csv")
##Creating dummy variables for the state column
startup_dummy = pd.get_dummies(startup["State"])
Startup = pd.concat([startup,startup_dummy],axis=1)
Startup.drop(["State"],axis=1, inplace =True)
Data = Startup.describe()
##The scales of the data are differnt so we normalise
def norm_func(i):
x = (i - i.min())/(i.max()-i.min())
return (x)
Start_up = norm_func(Startup)
#Using this Data set and we build the model.
def prep_model(hidden_dim):
model = Sequential()
for i in range(1,len(hidden_dim)-1):
if (i==1):
model.add(Dense(hidden_dim[i],input_dim=hidden_dim[0],kernel_initializer="normal",activation="relu"))
else:
model.add(Dense(hidden_dim[i],activation="relu"))
model.add(Dense(hidden_dim[-1]))
model.compile(loss="mean_squared_error",optimizer="adam",metrics = ["accuracy"])
return (model)
predictors = Start_up.iloc[:,[0,1,2,4,5,6]]
target = Start_up.iloc[:,3]
#Partitioning the data
from sklearn.model_selection import train_test_split
x_train,x_test,y_train,y_test = train_test_split(predictors,target,test_size = 0.25)
first_model = prep_model([6,50,1])
first_model.fit(np.array(x_train),np.array(y_train),epochs=900)
pred_train = first_model.predict(np.array(x_train))
pred_train = pd.Series([i[0] for i in pred_train])
rmse_value = np.sqrt(np.mean((pred_train-y_train)**2))
np.corrcoef(pred_train,y_train)
#Visualising
plt.plot(pred_train,y_train,"bo")
##Predicting on test data
pred_test = first_model.predict(np.array(x_test))
pred_test = pd.Series([i[0] for i in pred_test])
rmse_test = np.sqrt(np.mean((pred_test-y_test)**2))
np.corrcoef(pred_test,y_test)
##Visualizing
plt.plot(pred_test,y_test,"bo")
| [
"noreply@github.com"
] | noreply@github.com |
4ed7c9073da93dc0f0202f19f7c23738b3dd80b6 | 600eac7f1edf13005ece6e39672bdc311a580bc0 | /ssh_login.py | 94019a665767681d06d414fae2a7a650a5938bec | [] | no_license | SIGILIPELLI/ssh_login_program | 61cd4096e115e51a6d2c2a0d3c3eead09f17d60e | 5eb935f4d0f6a343da91e01f2f580f05b0c7abe8 | refs/heads/master | 2020-05-19T22:14:17.688479 | 2019-05-07T15:13:35 | 2019-05-07T15:13:35 | 185,242,616 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 646 | py | import pxssh
import getpass
try:
s = pxssh.pxssh()
hostname = input('hostname: ')
username = input('username: ')
password = getpass.getpass('password: ')
s.login (hostname, username, password)
s.sendline ('uptime') # run a command
s.prompt() # match the prompt
print(s.before) # print everything before the prompt.
s.sendline ('ls -l')
s.prompt()
print(s.before)
s.sendline ('df')
s.prompt()
print(s.before)
s.logout()
except pxssh.ExceptionPxssh, e:
print "pxssh failed on login."
print str(e)
| [
"noreply@github.com"
] | noreply@github.com |
4afb6395738c94f6f3c5f69cd5aba31fac3f7ab9 | 23a56e0555d6b27aa444d8396ec32f9d2b678a39 | /07_ur_online/shifted_frames_setup/compas/packages/compas_fabrication/fabrication/grasshopper/utilities/sets.py | 0aa962839f412ffad8a96c1e5c3841c1df6bb963 | [
"MIT"
] | permissive | dtbinh/T1_python-exercises | 2ce1b01bc71f8032bbe8fb4ef8f71b648dcde1c5 | f4710c3dc2ba8ddb3e3e9069ab8d65df674463ab | refs/heads/master | 2020-04-04T20:00:52.191601 | 2018-01-09T08:14:36 | 2018-01-09T08:14:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,129 | py | from Grasshopper import DataTree as Tree
from Grasshopper.Kernel.Data import GH_Path as Path
from System import Array
def list_to_tree(alist, none_and_holes=False, base_path=[0]):
"""
Transforms nestings of lists or tuples to a Grasshopper DataTree
Usage:
mytree = [ [1,2], 3, [],[ 4,[5]] ]
a = list_to_tree(mytree)
b = list_to_tree(mytree, none_and_holes=True, base_path=[7,1])
"""
def process_one_item(alist, tree, track):
path = Path(Array[int](track))
if len(alist) == 0 and none_and_holes:
tree.EnsurePath(path)
return
for i,item in enumerate(alist):
if hasattr(item, '__iter__'): #if list or tuple
track.append(i)
process_one_item(item, tree, track)
track.pop()
else:
if none_and_holes:
tree.Insert(item, path, i)
elif item is not None:
tree.Add(item, path)
tree = Tree[object]()
if alist is not None:
process_one_item(alist, tree, base_path[:])
return tree | [
"rusenova"
] | rusenova |
18a6e21aeda01c6a19f315813bff0d01b04146e0 | b66450f669095b0ad013ea82cb1ae575b83d74c3 | /Interview Preparation 2/maze.py | eae0d33ab121c0679d4a9e3e440d6211bec9b2ad | [] | no_license | aulb/ToAsk | 2649a3fad357820e3c8809816967dfb274704735 | 1e54c76ab9f7772316186db74496735ca1da65ce | refs/heads/master | 2021-05-01T20:35:14.062678 | 2020-02-23T07:44:29 | 2020-02-23T07:44:29 | 33,289,737 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,032 | py | Implement a function to generate a maze.
from random import randint
import enum
class Condition(enum):
CLEAR = 0
RIGHT = 1
BOTTOM = 2
class Direction(enum):
RIGHT = 0
LEFT = 1
UP = 2
DOWN = 3
# Create maze from top left to bottom right
def generate_maze(row, col):
if row < 0 or col < 0: return []
# create a fresh matrix of all zeroes
maze = [[0 for _ in range(col)] for _ in range(row)]
def get_direction(condition=Condition.CLEAR):
direction = randint(1, 10)
if 1 <= direction <= 4:
if Condition.CLEAR: return Direction.RIGHT
elif Condition.RIGHT: return Direction.DOWN
else: return Direction.RIGHT
elif 5 <= direction <= 8:
if Condition.CLEAR: return Direction.DOWN
elif Condition.RIGHT: return Direction.DOWN
else: return Direction.RIGHT
elif direction == 9:
if Condition.CLEAR: return Direction.LEFT
elif Condition.RIGHT: return Direction.DOWN
else: return Direction.UP
else:
if Condition.CLEAR: return Direction.UP
elif Condition.RIGHT: return Direction.DOWN
else: return Direction.RIGHT
def create_key(cell):
return ‘{},{}’.format(cell[0], cell[1])
def create_path(maze):
# [[i, j]]
path = {‘0,0’: True}
visited_rows = [0 for _ in range(len(maze))]
# randomly pick direction
current_cell = [0, 0]
condition = Condition.CLEAR
while current_cell != [len(maze) - 1, len(maze[0]) - 1]:
new_direction = get_direction(condition)
if new_direction == Direction.RIGHT:
# check if we can go right
if current_cell[1] + 1 <= len(maze[0]) - 1:
current_cell[1] += 1
path.append(current_cell[:])
if new_direction == Direction.LEFT:
# check if we can go right
if current_cell[1] - 1 >= 0:
current_cell[1] -= 1
path.append(current_cell[:])
if new_direction == Direction.UP:
# check if we can go right
if current_cell[0] - 1 >= 0:
current_cell[0] -= 1
path.append(current_cell[:])
if new_direction == Direction.DOWN:
# check if we can go right
if current_cell[0] + 1 <= len(maze) - 1:
current_cell[0] += 1
path.append(current_cell[:])
return path
def draw_wall(maze, path):
pass
| [
"aalbertuntung@gmail.com"
] | aalbertuntung@gmail.com |
c055e54e0e196a95c7ae0b56dfe9a937db5f68ec | d92568c175ab73360198d9e0368a1f537ec74355 | /telnyx/api_resources/sim_card_order.py | 5e9cad181ed4520ed34e70852fe221067f1a957f | [
"MIT"
] | permissive | team-telnyx/telnyx-python | 7c62fc83cca30e41d215cc6f1aef03e8af577fad | f662b18f09f0ceadbf5d202ddde161f392e58094 | refs/heads/master | 2023-08-07T18:53:42.205519 | 2023-02-15T00:45:14 | 2023-02-15T00:45:14 | 194,716,223 | 41 | 10 | MIT | 2022-06-22T07:06:18 | 2019-07-01T17:32:08 | Python | UTF-8 | Python | false | false | 433 | py | from __future__ import absolute_import, division, print_function
from telnyx.api_resources.abstract import CreateableAPIResource, ListableAPIResource
class SIMCardOrder(CreateableAPIResource, ListableAPIResource):
OBJECT_NAME = "sim_card_order"
class SIMCardOrderPreview(CreateableAPIResource):
OBJECT_NAME = "sim_card_order_preview"
@classmethod
def class_url(cls):
return "/v2/sim_card_order_preview"
| [
"noreply@github.com"
] | noreply@github.com |
a8de8d2604e7b489da5edbaa37e4b59d778d2ff3 | 937c0d7c0ed0224fed676fe630b78d8c6cdc1cfe | /usr/share/dh-python/dhpython/interpreter.py | 03779cfc5f861cf5ae8c76b74f12575a73491bba | [] | no_license | Sayardiss/filesystem-rpi-projet2su | 5ec5aad1704dbe37d18b50ba83ab67a87199af16 | b7b7a1d93dec4f96673ecf11cd290e1db0657d59 | refs/heads/master | 2022-11-25T14:20:35.867296 | 2018-02-07T13:24:37 | 2018-02-07T13:24:37 | 118,009,115 | 2 | 1 | null | 2022-11-21T04:32:49 | 2018-01-18T16:36:17 | Python | UTF-8 | Python | false | false | 20,112 | py | # Copyright © 2012-2013 Piotr Ożarowski <piotr@debian.org>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import logging
import os
import re
from os.path import join, split
from dhpython import INTERPRETER_DIR_TPLS, PUBLIC_DIR_RE, OLD_SITE_DIRS
SHEBANG_RE = re.compile(r'''
(?:\#!\s*){0,1} # shebang prefix
(?P<path>
.*?/bin/.*?)?
(?P<name>
python|pypy)
(?P<version>
\d[\.\d]*)?
(?P<debug>
-dbg)?
(?P<options>.*)
''', re.VERBOSE)
EXTFILE_RE = re.compile(r'''
(?P<name>.*?)
(?:\.
(?P<stableabi>abi\d+)
|(?:\.
(?P<soabi>
(?P<impl>cpython|pypy)
-
(?P<ver>\d{2})
(?P<flags>[a-z]*)
)?
(?:
(?:(?<!\.)-)? # minus sign only if soabi is defined
(?P<multiarch>[^/]*?)
)?
))?
(?P<debug>_d)?
\.so$''', re.VERBOSE)
log = logging.getLogger('dhpython')
class Interpreter:
"""
:attr path: /usr/bin/ in most cases
:attr name: pypy or python (even for python3 and python-dbg) or empty string
:attr version: interpreter's version
:attr debug: -dbg version of the interpreter
:attr impl: implementation (cpytho2, cpython3 or pypy)
:attr options: options parsed from shebang
:type path: str
:type name: str
:type version: Version or None
:type debug: bool
:type impl: str
:type options: tuple
"""
path = '/usr/bin/'
name = 'python'
version = None
debug = False
impl = ''
options = ()
_cache = {}
def __init__(self, value=None, path=None, name=None, version=None,
debug=None, impl=None, options=None):
params = locals()
del params['self']
del params['value']
if isinstance(value, Interpreter):
for key in params.keys():
if params[key] is None:
params[key] = getattr(value, key)
elif value:
if value.replace('.', '').isdigit() and not version:
# version string
params['version'] = Version(value)
else:
# shebang or other string
for key, val in self.parse(value).items():
# prefer values passed to constructor over shebang ones:
if params[key] is None:
params[key] = val
for key, val in params.items():
if val is not None:
setattr(self, key, val)
elif key == 'version':
setattr(self, key, val)
def __setattr__(self, name, value):
if name == 'name':
if value not in ('python', 'pypy', ''):
raise ValueError("interpreter not supported: %s" % value)
if value == 'python':
if self.version:
if self.version.major == 3:
self.__dict__['impl'] = 'cpython3'
else:
self.__dict__['impl'] = 'cpython2'
elif value == 'pypy':
self.__dict__['impl'] = 'pypy'
elif name == 'version' and value is not None:
value = Version(value)
if not self.impl and self.name == 'python':
if value.major == 3:
self.impl = 'cpython3'
else:
self.impl = 'cpython2'
if name in ('path', 'name', 'impl', 'options') and value is None:
pass
elif name == 'debug':
self.__dict__[name] = bool(value)
else:
self.__dict__[name] = value
def __repr__(self):
result = self.path
if not result.endswith('/'):
result += '/'
result += self._vstr(self.version)
if self.options:
result += ' ' + ' '.join(self.options)
return result
def __str__(self):
return self._vstr(self.version)
def _vstr(self, version=None, consider_default_ver=False):
if self.impl == 'pypy':
# TODO: will Debian support more than one PyPy version?
return self.name
version = version or self.version or ''
if consider_default_ver and (not version or version == self.default_version):
version = '3' if self.impl == 'cpython3' else ''
elif isinstance(version, Version) and version == Version(major=2):
version = '' # do not promote /usr/bin/python2
if self.debug:
return 'python{}-dbg'.format(version)
return self.name + str(version)
def binary(self, version=None):
return '{}{}'.format(self.path, self._vstr(version))
@property
def binary_dv(self):
"""Like binary(), but returns path to default intepreter symlink
if version matches default one for given implementation.
"""
return '{}{}'.format(self.path, self._vstr(consider_default_ver=True))
@property
def default_version(self):
if self.impl:
return default(self.impl)
@staticmethod
def parse(shebang):
"""Return dict with parsed shebang
>>> sorted(Interpreter.parse('/usr/bin/python3.2-dbg').items())
[('debug', '-dbg'), ('name', 'python'), ('options', ()), ('path', '/usr/bin/'), ('version', '3.2')]
>>> sorted(Interpreter.parse('#! /usr/bin/python3.2').items())
[('debug', None), ('name', 'python'), ('options', ()), ('path', '/usr/bin/'), ('version', '3.2')]
>>> sorted(Interpreter.parse('/usr/bin/python3.2-dbg --foo --bar').items())
[('debug', '-dbg'), ('name', 'python'), ('options', ('--foo', '--bar')),\
('path', '/usr/bin/'), ('version', '3.2')]
"""
result = SHEBANG_RE.search(shebang)
if not result:
return {}
result = result.groupdict()
if 'options' in result:
# TODO: do we need "--key value" here?
result['options'] = tuple(result['options'].split())
if result['name'] == 'python' and result['version'] is None:
result['version'] = '2'
return result
@classmethod
def from_file(cls, fpath):
"""Read file's shebang and parse it."""
interpreter = Interpreter()
with open(fpath, 'rb') as fp:
data = fp.read(96)
if b"\0" in data:
raise ValueError('cannot parse binary file')
# make sure only first line is checkeed
data = str(data, 'utf-8').split('\n')[0]
if not data.startswith('#!'):
raise ValueError("doesn't look like a shebang: %s" % data)
parsed = cls.parse(data)
if not parsed:
raise ValueError("doesn't look like a shebang: %s" % data)
for key, val in parsed.items():
setattr(interpreter, key, val)
return interpreter
def sitedir(self, package=None, version=None, gdb=False):
"""Return path to site-packages directory.
Note that returned path is not the final location of .py files
>>> i = Interpreter('python')
>>> i.sitedir(version='3.1')
'/usr/lib/python3/dist-packages/'
>>> i.sitedir(version='2.5')
'/usr/lib/python2.5/site-packages/'
>>> i.sitedir(version=Version('2.7'))
'/usr/lib/python2.7/dist-packages/'
>>> i.sitedir(version='3.1', gdb=True, package='python3-foo')
'debian/python3-foo/usr/lib/debug/usr/lib/python3/dist-packages/'
>>> i.sitedir(version=Version('3.2'))
'/usr/lib/python3/dist-packages/'
"""
try:
version = Version(version or self.version)
except Exception as err:
raise ValueError("cannot find valid version: %s" % err)
if self.impl == 'pypy':
path = '/usr/lib/pypy/dist-packages/'
elif version << Version('2.6'):
path = "/usr/lib/python%s/site-packages/" % version
elif version << Version('3.0'):
path = "/usr/lib/python%s/dist-packages/" % version
else:
path = '/usr/lib/python3/dist-packages/'
if gdb:
path = "/usr/lib/debug%s" % path
if package:
path = "debian/%s%s" % (package, path)
return path
def old_sitedirs(self, package=None, version=None, gdb=False):
"""Return deprecated paths to site-packages directories."""
try:
version = Version(version or self.version)
except Exception as err:
raise ValueError("cannot find valid version: %s" % err)
result = []
for item in OLD_SITE_DIRS.get(self.impl, []):
if isinstance(item, str):
result.append(item.format(version))
else:
res = item(version)
if res is not None:
result.append(res)
if gdb:
result = ['/usr/lib/debug{}'.format(i) for i in result]
if self.impl.startswith('cpython'):
result.append('/usr/lib/debug/usr/lib/pyshared/python{}'.format(version))
if package:
result = ['debian/{}{}'.format(package, i) for i in result]
return result
def parse_public_dir(self, path):
"""Return version assigned to site-packages path
or True is it's unversioned public dir."""
match = PUBLIC_DIR_RE[self.impl].match(path)
if match:
vers = match.groups(0)
if vers and vers[0]:
return Version(vers)
return True
def should_ignore(self, path):
"""Return True if path is used by another interpreter implementation."""
cache_key = 'should_ignore_{}'.format(self.impl)
if cache_key not in self.__class__._cache:
expr = [v for k, v in INTERPRETER_DIR_TPLS.items() if k != self.impl]
regexp = re.compile('|'.join('({})'.format(i) for i in expr))
self.__class__._cache[cache_key] = regexp
else:
regexp = self.__class__._cache[cache_key]
return regexp.search(path)
def cache_file(self, fpath, version=None):
"""Given path to a .py file, return path to its .pyc/.pyo file.
This function is inspired by Python 3.2's imp.cache_from_source.
:param fpath: path to file name
:param version: Python version
>>> i = Interpreter('python')
>>> i.cache_file('foo.py', Version('3.1'))
'foo.pyc'
>>> i.cache_file('bar/foo.py', '3.4')
'bar/__pycache__/foo.cpython-34.pyc'
"""
version = Version(version or self.version)
last_char = 'o' if '-O' in self.options else 'c'
if version <= Version('3.1'):
return fpath + last_char
fdir, fname = split(fpath)
if not fname.endswith('.py'):
fname += '.py'
return join(fdir, '__pycache__', "%s.%s.py%s" %
(fname[:-3], self.magic_tag(version), last_char))
def magic_number(self, version=None):
"""Return magic number."""
version = Version(version or self.version)
if self.impl == 'cpython2':
return ''
result = self._execute('import imp; print(imp.get_magic())', version)
return eval(result)
def magic_tag(self, version=None):
"""Return Python magic tag (used in __pycache__ dir to tag files).
>>> i = Interpreter('python')
>>> i.magic_tag(version='3.4')
'cpython-34'
"""
version = Version(version or self.version)
if self.impl.startswith('cpython') and version << Version('3.2'):
return ''
return self._execute('import imp; print(imp.get_tag())', version)
def multiarch(self, version=None):
"""Return multiarch tag."""
version = Version(version or self.version)
try:
soabi, multiarch = self._get_config(version)[:2]
except Exception:
log.debug('cannot get multiarch', exc_info=True)
# interpreter without multiarch support
return ''
return multiarch
def stableabi(self, version=None):
version = Version(version or self.version)
# stable ABI was introduced in Python 3.3
if self.impl == 'cpython3' and version >> Version('3.2'):
return 'abi{}'.format(version.major)
def soabi(self, version=None):
"""Return SOABI flag (used to in .so files)."""
version = Version(version or self.version)
# NOTE: it's not the same as magic_tag
try:
soabi, multiarch = self._get_config(version)[:2]
except Exception:
log.debug('cannot get soabi', exc_info=True)
# interpreter without soabi support
return ''
return soabi
@property
def include_dir(self):
"""Return INCLUDE_DIR path.
>>> Interpreter('python2.7').include_dir
'/usr/include/python2.7'
>>> Interpreter('python3.4-dbg').include_dir
'/usr/include/python3.4dm'
"""
if self.impl == 'pypy':
return '/usr/lib/pypy/include'
try:
result = self._get_config()[2]
if result:
return result
except Exception:
result = ''
log.debug('cannot get include path', exc_info=True)
result = '/usr/include/{}'.format(self.name)
version = self.version
if self.debug:
if version << '3.3':
result += '_d'
else:
result += 'dm'
else:
if version >> '3.2':
result += 'm'
elif version == '3.2':
result += 'mu'
return result
@property
def library_file(self):
"""Return libfoo.so file path."""
if self.impl == 'pypy':
return ''
libpl, ldlibrary = self._get_config()[3:5]
if ldlibrary.endswith('.a'):
# python3.1-dbg, python3.2, python3.2-dbg returned static lib
ldlibrary = ldlibrary.replace('.a', '.so')
if libpl and ldlibrary:
return join(libpl, ldlibrary)
raise Exception('cannot find library file for {}'.format(self))
def check_extname(self, fname, version=None):
"""Return extension file name if file can be renamed."""
if not version and not self.version:
return
version = Version(version or self.version)
if '/' in fname:
fdir, fname = fname.rsplit('/', 1) # in case full path was passed
else:
fdir = ''
info = EXTFILE_RE.search(fname)
if not info:
return
info = info.groupdict()
if info['ver'] and (not version or version.minor is None):
# get version from soabi if version is not set of only major
# version number is set
version = Version("%s.%s" % (info['ver'][0], info['ver'][1]))
if info['stableabi']:
# files with stable ABI in name don't need changes
return
if info['debug'] and self.debug is False:
# do not change Python 2.X extensions already marked as debug
# (the other way arround is acceptable)
return
if info['soabi'] and info['multiarch']:
# already tagged, nothing we can do here
return
try:
soabi, multiarch = self._get_config(version)[:2]
except Exception:
log.debug('cannot get soabi/multiarch', exc_info=True)
return
if info['soabi'] and soabi and info['soabi'] != soabi:
return
tmp_soabi = info['soabi'] or soabi
tmp_multiarch = info['multiarch'] or multiarch
result = info['name']
if result.endswith('module') and result != 'module' and (
self.impl == 'cpython3' and version >> '3.2' or
self.impl == 'cpython2' and version == '2.7'):
result = result[:-6]
if tmp_soabi:
result = "{}.{}".format(result, tmp_soabi)
if tmp_multiarch and not (self.impl == 'cpython3' and version << '3.3') and tmp_multiarch not in soabi:
result = "{}-{}".format(result, tmp_multiarch)
elif self.impl == 'cpython2' and version == '2.7' and tmp_multiarch:
result = "{}.{}".format(result, tmp_multiarch)
if self.debug and self.impl == 'cpython2':
result += '_d'
result += '.so'
if fname == result:
return
return join(fdir, result)
def suggest_pkg_name(self, name):
"""Suggest binary package name with for given library name
>>> Interpreter('python3.1').suggest_pkg_name('foo')
'python3-foo'
>>> Interpreter('python3.4').suggest_pkg_name('foo')
'python3-foo'
>>> Interpreter('python2.7-dbg').suggest_pkg_name('bar')
'python-bar-dbg'
"""
if self.impl == 'pypy':
return 'pypy-{}'.format(name)
version = '3' if self.impl == 'cpython3' else ''
result = 'python{}-{}'.format(version, name)
if self.debug:
result += '-dbg'
return result
def _get_config(self, version=None):
version = Version(version or self.version)
# sysconfig module is available since Python 3.2
# (also backported to Python 2.7)
if self.impl == 'pypy' or self.impl.startswith('cpython') and (
version >> '2.6' and version << '3'
or version >> '3.1' or version == '3'):
cmd = 'import sysconfig as s;'
else:
cmd = 'from distutils import sysconfig as s;'
cmd += 'print("__SEP__".join(i or "" ' \
'for i in s.get_config_vars('\
'"SOABI", "MULTIARCH", "INCLUDEPY", "LIBPL", "LDLIBRARY")))'
conf_vars = self._execute(cmd, version).split('__SEP__')
if conf_vars[1] in conf_vars[0]:
# Python >= 3.5 includes MILTIARCH in SOABI
conf_vars[0] = conf_vars[0].replace("-%s" % conf_vars[1], '')
try:
conf_vars[1] = os.environ['DEB_HOST_MULTIARCH']
except KeyError:
pass
return conf_vars
def _execute(self, command, version=None, cache=True):
version = Version(version or self.version)
command = "{} -c '{}'".format(self._vstr(version), command.replace("'", "\'"))
if cache and command in self.__class__._cache:
return self.__class__._cache[command]
output = execute(command)
if output['returncode'] != 0:
log.debug(output['stderr'])
raise Exception('{} failed with status code {}'.format(command, output['returncode']))
result = output['stdout'].splitlines()
if len(result) == 1:
result = result[0]
if cache:
self.__class__._cache[command] = result
return result
# due to circular imports issue
from dhpython.tools import execute
from dhpython.version import Version, default
| [
"sayardiss@gmail.com"
] | sayardiss@gmail.com |
855617ea99f031e0e80b2b054a95363b3b16af6b | 43c268536a396b7f105f15e717c9f1f6b9044082 | /cltk/phonology/latin/transcription.py | 10b54850b62e788a602cee6b2e5adfc903a3f61d | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | codeSG/cltk | 9b5357d123d22fa423ecea3ffea9d2b9688445c3 | 6fbc96e3afa19f0c43b9eb64b739029c04f352b9 | refs/heads/master | 2020-12-31T00:29:35.046678 | 2017-06-04T05:26:23 | 2017-06-04T05:26:23 | 85,152,321 | 0 | 0 | null | 2017-03-24T11:01:04 | 2017-03-16T04:22:57 | Python | UTF-8 | Python | false | false | 23,177 | py | """Convert a word from Latin orthography into its hypothesized
pronunciation in the International Phonetic Alphabet (IPA).
https://raw.githubusercontent.com/j-duff/cltk/ipa/
cltk/phonology/latin/transcription.py
"""
from cltk.utils.cltk_logger import logger
from cltk.prosody.latin import macronizer as m
macronizer = m.Macronizer("tag_ngram_123_backoff")
from nltk.tokenize import wordpunct_tokenize
import re
import unicodedata
try:
# James Tauber's greek_accentuation package
from greek_accentuation import characters as chars
except ImportError as import_error:
print('Missing "greek_accentuation" package. Install with '
+ '`pip install greek-accentuation`.')
logger.error(import_error)
raise
__author__ = ['Jack Duff <jmunroeduff@gmail.com>']
__license__ = 'MIT License. See LICENSE.'
# Dictionaries of phonological reconstructions for use in transcribing.
# Allen, W. Sidney. 1965. Vox Latina.
LATIN = {'Classical': {
'Allen': {
'correspondence': {
'p': 'p',
't': 't̪',
'c': 'k',
'k': 'k',
'qu': 'kʷ',
'b': 'b',
'd': 'd̪',
'g': 'g',
'gu': 'gʷ',
'ph': 'pʰ',
'th': 't̪ʰ',
'ch': 'kʰ',
'n': 'n̪',
'm': 'm',
'r': 'r',
'rh': 'r', # Voiceless r was spelled but not pronounced.
'l': 'l',
'f': 'f',
's': 's',
'h': 'h',
'j': 'j',
'v': 'w',
'x': 'ks',
'z': 'z',
'ī': 'iː',
'ū': 'uː',
'i': 'ɪ',
'u': 'ʊ',
'e': 'ɛ',
'o': 'ɔ',
'ē': 'eː',
'ō': 'oː',
'a': 'a',
'ā': 'aː',
'y': 'y',
'ȳ': 'y:',
'ae': 'aj',
'au': 'aw',
'oe': 'oj',
'eu': 'ew',
'ei': 'ej'},
'diphthongs': # and digraphs
['qu', 'gu', 'ph', 'th', 'ch', 'rh',
'ae', 'au', 'oe', 'eu', 'ei'],
'punctuation':
['.', ',', ';', ':', '-', '–', '?', '!',
'(', ')', "'", "\"", "[", "]"],
'alternations': [
'j_maker', # word initial and intervocalic i is assumed j
'w_maker', # word initial and intervocalic u is assumed w
'wj_block', # prevents accidental sequence wj
'uj_diph_maker', # after w and j have been created, recognizes
# <ui> = [uj]
'b_devoice', # b devoices before /t/, /s/
'g_n_nasality_assimilation', # only before n
'n_place_assimilation', # should also do labial, and
# labio-dental before f.
'final_m_drop', # m drops and lengthens + nasalizes preceding
# vowel word-finally
'ns_nf_lengthening', # vowels lengthen before ns or nf
'l_darken', # l darkens to ɫ in coda
'j_z_doubling', # intervocalic j and z > jj and zz
'long_vowel_catcher', # corrects accidental instances of ɪː
# and similar.
'e_i_closer_before_vowel', # ɛ to ɛ̣, ɪ to ɪ̣ before another vowel
'intervocalic_j', # j glide between vowels
]
}
}
}
# Unhandled exceptions: preposition "ad" becomes [at̪] not [ad̪] before s and t
# subf > suff, subm > summ, subg > sugg, subc > succ, subr > rr
# j exceptions like ad*j*ectivum and con*j*unx
# All IPA characters used sorted by natural classes.
# WILL NEED ADDITIONS AS MORE RECONSTRUCTIONS USED
IPA = {'voiced': # [+voice]
['b', 'd̪', 'g', 'gʷ',
'm', 'n̪', 'ŋ', 'ɱ'
'l', 'ɫ', 'r', 'z'],
'labial': # [+labial, -labiodental]
['b', 'p', 'pʰ', 'm'],
'labiodental': # [+labial, +labiodental]
['f', 'ɱ'],
'coronal': # [+coronal]
['d̪', 't̪', 't̪ʰ', 'n̪', 's', 'z', 'r', 'l', 'ɫ'],
'velar': # [+velar]
['g', 'k', 'kʰ', 'kʷ', 'gʷ', 'ŋ'],
'nasal': # [+consonantal, +nasal]
['m', 'ɱ', 'n', 'ŋ'],
'approximant': # [+approximant]
['l', 'ɫ', 'r', 'j', 'w'],
'continuant': # [+continuant, +consonantal]
['h', 'f', 's', 'z', 'l', 'ɫ', 'r'],
'vowel': # [-consonantal -approximant]
['a', 'aː', 'ɛ', 'ɛ̣', 'eː', 'ɪ', 'ɪ̣', 'iː',
'ɔ', 'oː', 'ʊ', 'u', 'uː', 'y', 'yː',
'ãː', 'ẽː', 'ĩː', 'õː', 'ũː'],
'high': # [-consonantal, +high]
['ɪ', 'ɪ̣', 'iː', 'ʊ', 'u', 'uː', 'y', 'yː',
'ɪ̃', 'ɪ̣̃', 'ĩː', 'ʊ̃', 'ũ', 'ũː', 'ỹ', 'ỹː'],
'mid': # [-consonantal, -high, -low]
['ɛ', 'ɛ̣', 'eː', 'ɔ', 'oː',
'ɛ̃', 'ɛ̣̃', 'ẽː', 'ɔ̃', 'õː'],
'low': # [-consonantal, +low]
['a', 'aː',
'ã', 'ãː'],
'front': # [-consonantal, +front]
['ɪ', 'ɪ̣', 'iː', 'y', 'yː', 'ɛ', 'ɛ̣', 'eː',
'ɪ̃', 'ɪ̣̃', 'ĩː', 'ỹ', 'ỹː', 'ɛ̃', 'ɛ̣̃', 'ẽː'],
'central': # [-consonantal, -front, -back]
['a', 'aː',
'ã', 'ãː'],
'back': # [-consonantal, +back]
['ʊ', 'u', 'uː', 'ɔ', 'oː',
'ʊ̃', 'ũ', 'ũː', 'ɔ̃', 'õː'],
'boundary':
['#']}
class Phone:
'A phonological unit to be manipulated and represented as an IPA string.'
# Has a bundle of feature values that help classify it so that it can
# trigger contextual pronunciation changes.
def __init__(self, ipa_ch):
# eventually exported to output string
self.ipa = unicodedata.normalize('NFC', ipa_ch)
# will be assigned once in Word, as the pre-context of this phone
self.left = ""
# .... as the post-context of this phone
self.right = ""
# bundle of features, stored as booleans:
self.vce = self.ipa in IPA['voiced']
self.lab = self.ipa in IPA['labial']
self.lbd = self.ipa in IPA['labiodental']
self.cor = self.ipa in IPA['coronal']
self.vel = self.ipa in IPA['velar']
self.nas = self.ipa in IPA['nasal']
self.app = self.ipa in IPA['approximant']
self.cont = self.ipa in IPA['continuant']
self.vow = self.ipa in IPA['vowel']
self.hi = self.ipa in IPA['high']
self.mid = self.ipa in IPA['mid']
self.lo = self.ipa in IPA['low']
self.fr = self.ipa in IPA['front']
self.ctr = self.ipa in IPA['central']
self.bk = self.ipa in IPA['back']
self.bound = self.ipa in IPA['boundary']
class Word:
'Max. phonological unit, contains phones and triggers alternations.'
# An ordered collection of Phones, which are bundles of
# features/IPA strings.
def __init__(self, ipa_str, root):
self.string = unicodedata.normalize('NFC', ipa_str)
# Appropriate directory in the reconstruction dictionary
self.root = root
# list of contextual pronunciation alternations
self.alts = self.root['alternations']
# Turns string of IPA characters into list of Phones
self.phones = [Phone(c) for c
in re.findall(r'.[̪̣̃ʷʰ]*ː?', self.string)]
# Assigns left and right contexts for every phone
def _refresh(self):
for n in range(len(self.phones)):
p = self.phones[n]
if n != 0:
p.left = self.phones[n - 1]
else:
p.left = Phone("#")
if n != len(self.phones) - 1:
p.right = self.phones[n + 1]
else:
p.right = Phone("#")
def _j_maker(self):
# Assume word-initial or intervocalic i to be j
out_phones = self.phones
target = Phone("j")
for n in range(len(self.phones)):
p = self.phones[n]
if p.ipa == 'ɪ' and ((p.left.bound and p.right.vow)
or (p.left.vow and p.right.vow)):
out_phones[n] = target
self.phones = out_phones
self._refresh()
def _w_maker(self):
# Assume word-initial or intervocalic u to be w
out_phones = self.phones
target = Phone("w")
for n in range(len(self.phones)):
p = self.phones[n]
if (((p.ipa == 'ʊ') or (p.ipa =='u'))
and ((p.left.bound
and (p.right.vow or p.right.ipa == 'j'))
or (p.left.vow and p.right.vow))):
out_phones[n] = target
self.phones = out_phones
self._refresh()
def _wj_block(self):
# Addendum to correct possible 'wj' sequences
out_phones = self.phones
target = Phone("ɪ")
for n in range(len(self.phones)):
p = self.phones[n]
if p.left.ipa == 'w' and p.ipa == 'j':
out_phones[n] = target
self.phones = out_phones
self._refresh()
def _uj_diph_maker(self):
# Find accidental "ʊɪ" instances and treat as diphthong [uj].
out_phones = self.phones
for n in range(len(self.phones)):
p = self.phones[n]
if p.left.ipa == 'ʊ' and p.ipa == 'ɪ':
out_phones[n-1] = Phone('u')
out_phones[n] = Phone('j')
self.phones = out_phones
self._refresh()
def _b_devoice(self):
# Pronounce b as p when followed by s or t.
out_phones = self.phones
target = Phone("p")
for n in range(len(self.phones)):
p = self.phones[n]
if p.ipa == 'b' and (p.right.ipa == 's' or p.right.ipa == 't̪'):
out_phones[n] = target
self.phones = out_phones
self._refresh()
def _final_m_drop(self):
# Final m nasalizes and lengthens nucleus and drops.
out_phones = self.phones
for n in range(len(self.phones)):
p = self.phones[n]
if p.left.vow and p.ipa == 'm' and p.right.bound:
out_phones[n-1] = Phone(p.left.ipa + "̃ː")
del out_phones[n]
self.phones = out_phones
self._refresh()
def _n_place_assimilation(self):
# Pronounce n as ŋ when followed by velar.
out_phones = self.phones
target = Phone("ŋ")
for n in range(len(self.phones)):
p = self.phones[n]
if p.ipa == 'n̪' and p.right.vel:
out_phones[n] = target
self.phones = out_phones
self._refresh()
def _g_n_nasality_assimilation(self):
# Pronounce g as ŋ when followed by n.
out_phones = self.phones
target = Phone("ŋ")
for n in range(len(self.phones)):
p = self.phones[n]
if p.ipa == "g" and p.right.ipa == "n̪":
out_phones[n] = target
self.phones = out_phones
self._refresh()
def _ns_nf_lengthening(self):
# Lengthen vowel before ns or nf.
out_phones = self.phones
for n in range(len(self.phones)):
p = self.phones[n]
if (p.left.vow and "ː" not in p.left.ipa and p.ipa == "n̪"
and (p.right.ipa == "s" or p.right.ipa == "f")):
out_phones[n-1] = Phone(p.left.ipa + "ː")
self.phones = out_phones
self._refresh()
def _l_darken(self):
# Pronounce l as ɫ in coda.
out_phones = self.phones
target = Phone("ɫ")
for n in range(len(self.phones)):
p = self.phones[n]
if p.ipa == "l" and ((not p.right.vow) or p.right.bound):
out_phones[n] = target
self.phones = out_phones
self._refresh()
def _j_z_doubling(self):
# Double j and z between vowels.
out_phones = self.phones
dupl = []
for n in range(len(self.phones)):
p = self.phones[n]
if p.right.vow and (p.ipa == "j" or p.ipa == "z") and p.left.vow:
dupl.append((True, n - len(self.phones), p.ipa))
else: dupl.append((False, n - len(self.phones), None))
for t in sorted(dupl, key=lambda tup: tup[1]):
if t[0]:
out_phones.insert(t[1], Phone(t[2]))
self.phones = out_phones
self._refresh()
def _long_vowel_catcher(self):
# Replace ɪː with iː, ʊː with uː, and ɛː with eː.
out_phones = self.phones
target_dict = {'ɪː': 'iː', 'ʊː': 'uː', 'ɛː': 'eː',
'ɪ̃ː': 'ĩː', 'ʊ̃ː': 'ũː', 'ɛ̃ː': 'ẽː'}
for n in range(len(self.phones)):
p = self.phones[n]
if p.ipa in target_dict.keys():
out_phones[n] = Phone(target_dict[p.ipa])
self.phones = out_phones
self._refresh()
def _e_i_closer_before_vowel(self):
# e and i become closer (̣) when followed by a vowel.
out_phones = self.phones
for n in range(len(self.phones)):
p = self.phones[n]
if (p.ipa == "ɛ" or p.ipa == "ɪ") and p.right.vow:
out_phones[n] = Phone(p.ipa + "̣")
self.phones = out_phones
self._refresh()
def _intervocalic_j(self):
# epenthesize j between vowels
out_phones = self.phones
target = Phone("j")
j = []
for n in range(len(self.phones)):
p = self.phones[n]
if p.left.vow and p.vow:
j.append((True, n - len(self.phones)))
else: j.append((False, n - len(self.phones)))
for t in sorted(j, key=lambda tup: tup[1]):
if t[0]:
out_phones.insert(t[1], target)
self.phones = out_phones
self._refresh()
# list of all possible alternations
ALTERNATIONS = [("j_maker", _j_maker),
("w_maker", _w_maker),
("wj_block", _wj_block),
("uj_diph_maker", _uj_diph_maker),
("b_devoice", _b_devoice),
("final_m_drop", _final_m_drop),
("n_place_assimilation", _n_place_assimilation),
("g_n_nasality_assimilation", _g_n_nasality_assimilation),
("ns_nf_lengthening", _ns_nf_lengthening),
("l_darken", _l_darken),
("j_z_doubling", _j_z_doubling),
("long_vowel_catcher", _long_vowel_catcher),
("e_i_closer_before_vowel", _e_i_closer_before_vowel),
("intervocalic_j", _intervocalic_j)]
def _alternate(self):
# after setting left and right contexts for every phone...
self._refresh()
# runs all alternations
for a in Word.ALTERNATIONS:
if a[0] in self.alts:
a[1](self)
def _syllabify(self):
# takes Word input and returns a list of syllables as
# (onset, nucleus, coda) tuples
# where onset, nucleus, and coda are all lists of Phones.
nuclei = []
for n in range(len(self.phones)):
p = self.phones[n]
if p.vow:
nuclei.append(n)
# initialize syllables with a tuple for the first syllable
# where onset is everything before the first nucleus
# and coda remains unknown.
syllables = [[self.phones[0:nuclei[0]],
[self.phones[nuclei[0]]], []]]
# continue for every nucleus, assuming that everything between
# the previous nucleus and it is the onset.
for x in range(len(nuclei)-1):
i = nuclei[x+1]
onset = self.phones[nuclei[x]+1:i]
nucleus = [self.phones[i]]
syllables.append([onset, nucleus, []])
# assume that everything after the final nucleus is final coda.
syllables[-1][2] = self.phones[nuclei[-1]+1:]
# now go through and check onset viability
for x in range(len(syllables)-1):
onset = syllables[x+1][0]
nucleus = syllables[x+1][1]
coda = syllables[x+1][2]
# trim all onsets greater than the maximum 2 phones
# removing extra phones from the left
# and appending them to the previous coda
if len(onset) > 2:
trim = onset[:-2]
del onset[:-2]
syllables[x][2] = trim
# once onset is 2 phones...
if len(onset) == 2:
# stop + liquid is the only viable sequence and passes
if ((not onset[0].cont) and (not onset[0].app)
and (onset[1].nas or onset[1].app)):
break
# otherwise, onset must be right Phone only
# the left phone is appended to the previous coda
else:
trim = onset[0]
del onset[0]
syllables[x][2] += [trim]
self.syllables = syllables
return(syllables)
def _print_ipa(self, syllabify, accentuate):
# depending on the syllabify and accentuate parameters
# prints an appropriately marked up version of the transcription
out = ""
if syllabify:
syllables = self._syllabify()
# the ultima is the final syllable
ultima = syllables[-1]
# identify which syllable has stress and store index as accent
if accentuate:
# one syllable words have ultimate stress
if len(syllables) == 1:
accent = -1
# two syllable words have penultimate stress
elif len(syllables) == 2:
accent = -2
else:
# penult is second to last syllable
penult = syllables[-2]
# if penult is diphthong (long), penultimate stress
if len(penult[1]) > 1:
accent = -2
# if penult is long vowel, penultimate stress
elif "ː" in penult[1][0].ipa:
accent = -2
# if penult has coda (closed/long by position),
# penultimate stress
elif len(penult[2]) > 0:
accent = -2
# otherwise (penult is short) antepenultimate stress
else:
accent = -3
# loop over syllables by index
for x in range(len(syllables)):
s = syllables[x]
# if index matches accent index set above
if x-len(syllables) == accent:
# precede that syllable with
# IPA stress punctuation: '
out += "'"
# then, print IPA by syllable segment as usual
for n in s:
for p in n:
out += p.ipa
# seperate all syllables with IPA syllable punctuation: .
if s != ultima:
out += "."
# if no accentuation flag, proceed with syllabified printing
else:
for s in syllables:
for n in s:
for p in n:
out += p.ipa
# seperate all syllables with IPA syllable punctuation: .
if s != ultima:
out += "."
# if no syllabification flag, proceed with
# unsyllabified IPA printing
else:
for p in self.phones:
out += p.ipa
return out
class Transcriber:
'Uses a reconstruction to transcribe a orthographic string into IPA.'
def __init__(self, dialect, reconstruction):
self.lect = dialect
self.recon = reconstruction
self.root = LATIN[self.lect][self.recon]
self.table = self.root["correspondence"]
self.diphs = self.root["diphthongs"]
self.punc = self.root["punctuation"]
def _parse_diacritics(self, ch):
# Returns a string with seperated and organized diacritics
# for easier access later.
# EG: input with base a -> a/LENGTH/DIAERESIS/
out = chars.base(ch).lower() # Initialize out as base of character.
length = chars.length(ch)
dia = chars.diaeresis(ch)
out += "/" # Create 1st boundary
# If any length, place between 1st and 2nd boundary
if length != None:
out += length
out += "/" # Create 2nd boundary
if dia != None: # If any diaeresis,
out += dia # place between second and final boundary
out += "/" # Create final boundary
return out
def _prep_text(self, text):
# Performs preperatory tasks grouping and reordering characters
# in order to make transcription formulaic.
string_in = "".join([self._parse_diacritics(ch) for ch in text])
# searches for diphthongs and treats them as one phone
for d in self.diphs:
d1 = d[0]
d2 = d[1]
pattern = r"(" + d1 + r")\/\/\/(" + d2 + r")(\/\/\/)"
string_in = re.sub(pattern, r"\1\2\3", string_in)
tup_out = re.findall(r"(..?)\/([̄̆]*)\/(¨?)\/", string_in)
return tup_out
def transcribe(
self, text, macronize=True, syllabify=True, accentuate=True
):
# if macronize, will first use the tagger to macronize input
# otherwise, input will be the raw input string
if macronize:
text = macronizer.macronize_text(text)
# input is word-tokenized, stripped of non-diacritic punctuation,
# and diphthongs and diacritics are handled
inp = [self._prep_text(w) for w in wordpunct_tokenize(text)
if w not in self.punc]
words = []
for w in inp:
out = ""
for c in w:
if "̄" in c[1]:
macron_added = c[0]+'̄'
ipa = self.table.get(macron_added, macron_added)
else:
ipa = self.table.get(c[0], c[0])
out += ipa
transcription = Word(out, self.root)
transcription._alternate()
words.append(transcription)
# Encloses output in brackets, proper notation for surface form.
return "[" + " ".join([w._print_ipa(syllabify, accentuate)
for w in words]) + "]"
if __name__ == '__main__':
allen_transcriber = Transcriber("Classical", "Allen")
example = allen_transcriber.transcribe("Quo usque tandem, O Catilina, "
+ "abutere nostra patientia?")
print(example)
| [
"kyle@kyle-p-johnson.com"
] | kyle@kyle-p-johnson.com |
86d78fe7e43ffec6d812c38cf1ebbb7c5de82828 | 9e0796e19634e191cfa08f6244305cf85454d6b0 | /core/function.py | cc3f6e34f5a3db6f59b9acc6358265efcd626d05 | [
"BSD-2-Clause"
] | permissive | tanshinepan/interface_auto_test | 53a3094fca4f54878813cef8f4519dcf301ee2a0 | 9c2b177bd2ad60b5e1e8b1f3c7e06d6a534e9c00 | refs/heads/master | 2022-07-13T03:52:15.739207 | 2020-05-13T10:38:46 | 2020-05-13T10:38:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,767 | py | #coding=utf-8
"""
用户自定义函数模块
@author ceshixiaoxiao
2020-03-28
"""
import random
supports_function_name = ["random_choice","random_int","random_str"]
def excute_function( function_name, parameters):
"""
执行函数
:param function_name:
:param parameters: 参数列表
:return:
"""
if function_name not in supports_function_name:
raise Exception( function_name,"函数名错误")
else:
if function_name =="random_choice":
return random_choice(parameters)
elif function_name=="random_int":
return random_int(parameters)
elif function_name == "random_str":
return random_str(parameters)
def random_choice( parameters):
"""
从列表中随机选择一个数据
:param parameters:
:return:
"""
return random.choice(parameters)
def random_int( parameters):
"""
从范围中随机选择一个整数
random_int([100,200]) 随机从100-200之间取一个整数
:param parameters:
:return:
"""
try:
return random.randint( int(parameters[0]), int(parameters[1]))
except:
raise Exception( parameters ," random_int 函数参数错误")
def random_str(parameters):
"""
随机指定长度字符串,字符是字母和数字
:param parameters:
:return:
"""
try:
randomlength = int(parameters[0])
random_str = ''
base_str = 'ABCDEFGHIGKLMNOPQRSTUVWXYZabcdefghigklmnopqrstuvwxyz0123456789'
length = len(base_str) - 1
for i in range(randomlength):
random_str += base_str[random.randint(0, length)]
return random_str
except:
raise Exception(parameters, " random_str 函数参数错误")
| [
"huaizheng.xie@shuyun.com"
] | huaizheng.xie@shuyun.com |
ce0b5dfd44dc7340125be5d37e88fdfdc79ac9f8 | 35d39321fef3feff861eb24472d397774d0b1f71 | /flask/fundamentals/hello_flask/server.py | 4dab9792f6c0d58af22b4ff65b89841d31737054 | [] | no_license | youk0160/python | c46948e373e3085590f033bc506bde01c5d6844b | 5e11282867047c2ee16dad4d51f60868c6f59b6b | refs/heads/master | 2023-08-05T23:03:43.200423 | 2021-09-17T15:31:14 | 2021-09-17T15:31:14 | 402,506,282 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 854 | py | from flask import Flask # Import Flask to allow us to create our app
app = Flask(__name__) # Create a new instance of the Flask class called "app"
@app.route('/') # The "@" decorator associates this route with the function immediately following
def hello_world():
return 'Hello World!' # Return the string 'Hello World!' as a response
# import statements, maybe some other routes
@app.route('/success')
def success():
return "success"
# app.run(debug=True) should be the very last statement!
@app.route('/hello/<name>') # for a route '/hello/____' anything after '/hello/' gets passed as a variable 'name'
def hello(name):
print(name)
return "Hello, " + name
if __name__=="__main__": # Ensure this file is being run directly and not from a different module
app.run(debug=True) # Run the app in debug mode.
| [
"youk0160@gmail.com"
] | youk0160@gmail.com |
c810639691a23d9854660929620ec85be65a785a | 67019bd69a664b103ad29cc7b89f747f02ac80e5 | /mentain.py | 089e0bb7718b254a67b23e165d2831bd1ea04b59 | [] | no_license | teoionescu/artificial-intelligence-lab | 2d3edeba28d1e881755c4379366d72a5e296f3e7 | 7cbf8925cd5c3f17b4f53d95a0f51b43fe47fc5f | refs/heads/master | 2020-03-19T05:26:45.673431 | 2018-06-03T19:04:20 | 2018-06-03T19:04:20 | 135,931,818 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,739 | py |
import random
import numpy as np
from sklearn.model_selection import train_test_split
from inputReader import readTrainInput, readTestInput
from sklearn.metrics import classification_report, confusion_matrix
from difer import score
from difer import main
def inputSizes():
xl, yl = readTrainInput()
x = np.array(xl)
y = np.array(yl)
XL = readTestInput()
XT = np.array(XL)
X_train, X_test, y_train, y_test = train_test_split(x, y, random_state=1, train_size=0.95)
return X_test.shape[0], XT.shape[0]
def relative_y():
xl, yl = readTrainInput()
x = np.array(xl)
y = np.array(yl)
XL = readTestInput()
XT = np.array(XL)
X_train, X_test, y_train, y_test = train_test_split(x, y, random_state=1, train_size=0.95)
return y_test
def save_predictions(test_a, a):
np.save('./test_a.npy', test_a)
np.save('./a.npy', a)
def reset_predictions():
test_size, size = inputSizes()
xa = np.zeros((test_size, 10)).astype(int)
xb = np.zeros((size, 10)).astype(int)
save_predictions(xa, xb)
def load_predictions():
test_a = np.load('./test_a.npy')
a = np.load('./a.npy')
return test_a, a
def evaluate_sample():
test_a, a = load_predictions()
y = relative_y()
pred = test_a.argmax(1)
score(y, pred)
def to_onehot(a):
b = np.zeros((a.shape[0], 10))
b[np.arange(a.shape[0]), a] = 1
return b.astype(int)
def results():
test_a, a = load_predictions()
predictions = a.argmax(1)
for i in range(10):
print(i, ' ', sum(predictions == i))
import csv
with open('submission.csv', 'w') as csvfile:
writer = csv.writer(csvfile, delimiter=',')
writer.writerow(['Id', 'Prediction'])
for i in range(a.shape[0]):
writer.writerow([str(i+1), str(predictions[i])])
def train(rnd_seed = 0):
test_p, p = main(rnd_seed)
test_po = to_onehot(test_p)
po = to_onehot(p)
test_a, a = load_predictions()
test_po = np.matrix(test_po)
po = np.matrix(po)
test_a = np.matrix(test_a)
a = np.matrix(a)
test_a = np.array(test_a + test_po)
a = np.array(a + po)
save_predictions(test_a, a)
def training():
badg = [474131]
cdone = [526727, 207463, 170946] # chosen seeds
clist = [63484, 945057, 420993]
for i in cdone:
train(i)
print('Evaluation')
evaluate_sample()
def raport():
test_a, a = load_predictions()
print(confusion_matrix(relative_y(), test_a.argmax(1)))
rep = np.concatenate((test_a, relative_y()), axis = 1)
np.set_printoptions(threshold=np.inf)
#print(rep)
if __name__ == '__main__':
#reset_predictions()
#results()
#training()
#raport()
evaluate_sample()
pass
| [
"teoionescu32@gmail.com"
] | teoionescu32@gmail.com |
550f2d75ff2180b55e57946842e9405bc405477b | a9943c862683fd5e67250c14723914c42eb98d1a | /checkout/migrations/0001_initial.py | b4af6fd5f65cf502c591d548f6fbe01337b79be3 | [] | no_license | PeterLenting/e-commerce | 8e0509c2b5f121cb356ea26c274522ee7b326b13 | dd2ee9a6decbb1c659fbafd3159d0c184377f60b | refs/heads/master | 2022-12-06T01:43:53.327596 | 2019-12-07T13:20:28 | 2019-12-07T13:20:28 | 225,182,704 | 0 | 0 | null | 2022-11-22T04:53:00 | 2019-12-01T15:22:00 | Python | UTF-8 | Python | false | false | 1,653 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.24 on 2019-12-06 15:37
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('products', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('full_name', models.CharField(max_length=50)),
('phone_number', models.CharField(max_length=20)),
('country', models.CharField(max_length=40)),
('postcode', models.CharField(blank=True, max_length=20)),
('town_or_city', models.CharField(max_length=40)),
('street_address1', models.CharField(max_length=40)),
('street_address2', models.CharField(max_length=40)),
('county', models.CharField(max_length=40)),
('date', models.DateField()),
],
),
migrations.CreateModel(
name='OrderLineItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('quantity', models.IntegerField()),
('order', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='checkout.Order')),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='products.Product')),
],
),
]
| [
"pplenting@gmail.com"
] | pplenting@gmail.com |
c51462a251e42609bb187bab6ee21dd99f7cdd5e | be6b564c752038c108482a79febcabbdb42c92e0 | /autogorod_by.py | 7b9c559bc6347b8d39ffc3e9a5ffac9d4d1ae912 | [] | no_license | maximdushniak/parse_autogorod | ecc1daeb3fb334b1946bb274c20942aacd97e565 | 0c24439979c64e247c83671b996575801cbf1aa1 | refs/heads/master | 2021-03-12T23:59:13.990094 | 2015-09-22T09:10:06 | 2015-09-22T09:10:06 | 42,367,510 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,847 | py | #!/usr/bin/env python
import re
import csv
import sys
import time
import requests
import lxml.html
def get_proxies():
return {'http': '92.222.45.88:8888'}
#return {}
def get_params(article):
params = dict(article=article, time='false', ajax='true', sort='article')
return params
def get_headers():
headers = {'Host': 'avtogorod.by',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:40.0) Gecko/20100101 Firefox/40.0',
'Accept': '*/*', 'Accept-Language': 'ru-RU,ru;q=0.8,en-US;q=0.5,en;q=0.3',
'Accept-Encoding': 'gzip, deflate', 'DNT': '1', 'X-Requested-With': 'XMLHttpRequest',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8', 'Connection': 'keep-alive'}
return headers
def parse_result_table(doc, searchart='', searchmark=''):
d = []
for tr in doc.xpath(
'.//div/div/div[@id="ajax_analogs"]/table[@class="details-list filterResultTable xsmalls"]//tr[@class]'):
# Строки таблицы с результами
try:
place = tr.xpath(
'td[normalize-space(@class)="th-td-result-place cell td-color2"]|td[normalize-space(@class)="th-td-result-place td-color"]')[
0].text_content().strip()
if place.upper() == 'ПОД ЗАКАЗ': continue
brend = tr.xpath(
'td[normalize-space(@class)="th-td-result-brand cell td-color2"]/span|td[normalize-space(@class)="th-td-result-brand td-color"]/span')[
0].text_content().strip()
article = tr.xpath(
'td[normalize-space(@class)="th-td-result-article td-color"]/span/span/b|td[normalize-space(@class)="th-td-result-article cell td-color2"]/span/span/b')[
0].text_content().strip()
descr = tr.xpath(
'td[normalize-space(@class)="th-td-result-descr cell td-color2"]/span[@class="artlook-descr"]/span[@class="descr-hide-overflow"]|td[normalize-space(@class)="th-td-result-descr td-color"]/span[@class="artlook-descr"]/span[@class="descr-hide-overflow"]')[
0].text_content().strip()
price = tr.xpath(
'td[normalize-space(@class)="th-td-result-price box-price-view cell td-color2"]|td[normalize-space(@class)="th-td-result-price box-price-view td-color"]')[
0]
price_value = price.xpath('span[@itemprop="offers"]')[0].text_content().strip()
# price_curency = price.xpath('meta')
res_l = [searchmark, searchart, brend, article, descr, place, price_value]
d.append([normalize_string(i, '[a-zA-Zа-яА-я0-9 ]') for i in res_l])
except:
pass
return d
def search_article(article, brand=''):
url = 'http://avtogorod.by'
search_url = url + '/search/artlookup/'
headers = get_headers()
params = get_params(article)
proxies = get_proxies()
d = []
try:
r = requests.get(search_url, headers=headers, params=params, proxies=proxies)
except:
print("Request error. Article: ", article)
return d
# Парсим, если есть аналоги
doc = lxml.html.document_fromstring(r.text)
aaa = doc.xpath('.//div/div/table/tr/td/h1[@class="uppercase"]')
if len(aaa) == 1:
if aaa[0].text_content().strip().upper() == 'Производители'.upper():
for table in doc.find_class('details-list filterResultTable set-search-grid xsmalls'):
for tr in table.find_class('cursor'):
if brand.upper() in tr[1].text_content().strip().upper():
search_url = url + tr[3][0].get('href')
try:
r = requests.get(search_url, headers=headers, params=params, proxies=proxies)
except:
return d
doc = lxml.html.document_fromstring(r.text)
d += parse_result_table(doc, article, brand)
else:
d += parse_result_table(doc, article, brand)
return d
def normalize_string(str, pattern='[a-zA-Z0-9]'):
"""
:param str:
:param pattern:
:return:
"""
p = re.compile(pattern)
l = p.findall(str)
return ''.join(l)
if __name__ == '__main__':
start_datetime = time.time()
print('Start:', time.ctime(start_datetime))
print('-------------------------------------')
res_list = []
filename = 'search.txt'
if len(sys.argv) > 1:
filename = sys.argv[1]
with open(filename, newline='') as csvfile:
print('Read file: ' + filename)
print('')
reader = csv.reader(csvfile, dialect='excel', delimiter='\t')
rows = [row for row in reader]
len_row = len(rows)
n = 0
for row in rows:
n += 1
percent = round(100 * n / len_row, 2)
# print(['Parse:', row, round(100 * n / len_row, 2), '%'])
art = normalize_string(row[0])
mark = ''
if len(row) == 2:
mark = normalize_string(row[1])
art_list = []
try:
art_list = search_article(art, mark)
except:
print('Error parse', art)
print('Parse:', [art, mark], percent, '%', end=' ')
print('row:', len(art_list))
res_list += art_list
if len(res_list) > 0:
res_list = [['Искомый бренд', 'Искомый артикул', 'Бренд', 'Артикул', 'Наименование', 'Направление',
'Цена']] + res_list
result_file = open('result_file.csv', 'w', newline='')
wr = csv.writer(result_file, quoting=csv.QUOTE_ALL, delimiter=';')
print('')
len_list = len(res_list)
print('Result [', len_list ,'] row. Saving.')
for element in res_list:
try:
wr.writerow(element)
except:
print('Error write', element)
result_file.close()
print('')
print('-------------------------------------')
print('File safe: ', result_file.name)
else:
print('')
print('-------------------------------------')
print('Error: No data!!!')
end_datetime = time.time()
print('-------------------------------------')
print('Finish:', time.ctime(end_datetime))
duration = end_datetime - start_datetime
duration_h = int(duration//(60*60))
duration_m = int((duration-duration_h*60*60)//60)
duration_s = int((duration - duration_h*60*60 - duration_m*60))
print('Duration:', duration_h, 'h', duration_m, 'min', duration_s , 'sec')
print('')
input('Press any key..') | [
"maxim.dushniak@gmail.com"
] | maxim.dushniak@gmail.com |
d10192ab95a1b46d604aa924f07a235b10ff2971 | 4fbd844113ec9d8c526d5f186274b40ad5502aa3 | /algorithms/python3/number_of_digit_one.py | 538d9b720619759dbae129533367a918a55ffec3 | [] | no_license | capric8416/leetcode | 51f9bdc3fa26b010e8a1e8203a7e1bcd70ace9e1 | 503b2e303b10a455be9596c31975ee7973819a3c | refs/heads/master | 2022-07-16T21:41:07.492706 | 2020-04-22T06:18:16 | 2020-04-22T06:18:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 510 | py | # !/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Given an integer n, count the total number of digit 1 appearing in all non-negative integers less than or equal to n.
Example:
Input: 13
Output: 6
Explanation: Digit 1 occurred in the following numbers: 1, 10, 11, 12, 13.
"""
""" ==================== body ==================== """
class Solution:
def countDigitOne(self, n):
"""
:type n: int
:rtype: int
"""
""" ==================== body ==================== """
| [
"capric8416@gmail.com"
] | capric8416@gmail.com |
dca668af3ee705e63df35778cd21b88ad8453cc3 | 16d97d701c9ef52fab663f2bda410bfe6d64b859 | /read.py | 8fe8e6e528c11033d08a24de9d441a0157683e05 | [] | no_license | amdastro/CO-Dust | 4f6a0c739c7b8bf2d23dde03a9c85e8b57dc6217 | acba8895dda178833a3b8814a698d886ee6b2939 | refs/heads/master | 2020-04-06T13:00:59.096663 | 2016-09-14T16:58:42 | 2016-09-14T16:58:42 | 47,339,036 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 775 | py | import numpy as np
import parameters as par
print 'reading ', par.directory
T_cs, n, delta, R_cs, c_s = np.genfromtxt("runs/%s/thermo.txt"%par.directory, unpack=True,skip_footer=1)
t, Y_CO, Y_C_free, Y_O_free, Y_D, int_flag, adap_flag, sat = np.genfromtxt("runs/%s/fractions.txt"%par.directory, unpack=True,skip_footer=1)
t, X_CO, X_C_free, X_O_free, X_D, dNdt = np.genfromtxt("runs/%s/massfrac.txt"%par.directory, unpack=True, skip_footer=1)
K_ra, K_th, K_nth = np.genfromtxt("runs/%s/rates.txt"%par.directory, unpack=True, skip_footer=1)
dust, alldust, size, allcarbon = np.loadtxt("runs/%s/dust.txt"%par.directory, unpack=True)
dust = np.array(dust[:len(t)])
alldust = np.array(alldust[:len(t)])
size = np.array(size[:len(t)])
allcarbon = np.array(allcarbon[:len(t)]) | [
"aderdzinski@gmail.com"
] | aderdzinski@gmail.com |
3488bc4730540516dea9ddb3ebf2b884d2202ae4 | f132c4573ba5f972daf1ff3ab492bb43176be07d | /StadyApp/ckreditor_example/views.py | 72216d35c8d1d80cded7040860f046635583d131 | [] | no_license | sdfvb/django_start | 88abdc21d1bdbb22d0aa773721d7cd1815308950 | 78afe7fc0e0f5cbca0f8f4256f8351dc1dd79e87 | refs/heads/master | 2023-02-17T08:09:41.309909 | 2020-09-22T01:14:55 | 2020-09-22T01:14:55 | 285,529,280 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 563 | py | from django.shortcuts import render
from django.views.generic import TemplateView, DetailView, ListView
from .models import PostCKEditor
class PostView(ListView):
template_name = "CkedTemp/blog.html"
context_object_name = 'posts'
model = PostCKEditor
paginate_by = 2 # and that's it !!
class PostDetail(DetailView):
model = PostCKEditor
template_name = "CkedTemp/blog_detail.html"
context_object_name = 'post'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
return context
| [
"45214736+sdfvb@users.noreply.github.com"
] | 45214736+sdfvb@users.noreply.github.com |
e1070fb45b959faa9c631d562609a496f06c90f0 | 0a47b549836fb070f96be6889e40e991eba1edc8 | /Mac_changer.py | a8b040bcdb0b891511e84876f9fc7deaf2ac5eb8 | [
"MIT"
] | permissive | mrzero-cool/mac_changer | 8eacbcfae225a6636ffc9159d7b1a88e51980076 | 9e3b25bba48aebfda5a473625df8f6d872a7d200 | refs/heads/master | 2022-04-22T13:31:04.780127 | 2020-04-20T16:00:47 | 2020-04-20T16:00:47 | 256,533,445 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,100 | py | #!/usr/bin/env python
import subprocess
import optparse
import re
print('''
XX MMMMMMMMMMMMMMMMss''' '''ssMMMMMMMMMMMMMMMM XX
XX MMMMMMMMMMMMyy'' ''yyMMMMMMMMMMMM XX
XX MMMMMMMMyy'' ''yyMMMMMMMM XX
XX MMMMMy'' ''yMMMMM XX
XX MMMy' 'yMMM XX
XX Mh' 'hM XX
XX - - XX
XX XX
XX :: :: XX
XX MMhh. ..hhhhhh.. ..hhhhhh.. .hhMM XX
XX MMMMMh ..hhMMMMMMMMMMhh. .hhMMMMMMMMMMhh.. hMMMMM XX
XX ---MMM .hMMMMdd:::dMMMMMMMhh.. ..hhMMMMMMMd:::ddMMMMh. MMM--- XX
XX MMMMMM MMmm'' 'mmMMMMMMMMyy. .yyMMMMMMMMmm' ''mmMM MMMMMM XX
XX ---mMM '' 'mmMMMMMMMM MMMMMMMMmm' '' MMm--- XX
XX yyyym' . 'mMMMMm' 'mMMMMm' . 'myyyy XX
XX mm'' .y' ..yyyyy.. '''' '''' ..yyyyy.. 'y. ''mm XX
XX MN .sMMMMMMMMMss. . . .ssMMMMMMMMMs. NM XX
XX N` MMMMMMMMMMMMMN M M NMMMMMMMMMMMMM `N XX
XX + .sMNNNNNMMMMMN+ `N N` +NMMMMMNNNNNMs. + XX
XX o+++ ++++Mo M M oM++++ +++o XX
XX oo oo XX
XX oM oo oo Mo XX
XX oMMo M M oMMo XX
XX +MMMM s s MMMM+ XX
XX +MMMMM+ +++NNNN+ +NNNN+++ +MMMMM+ XX
XX +MMMMMMM+ ++NNMMMMMMMMN+ +NMMMMMMMMNN++ +MMMMMMM+ XX
XX MMMMMMMMMNN+++NNMMMMMMMMMMMMMMNNNNMMMMMMMMMMMMMMNN+++NNMMMMMMMMM XX
XX yMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMy XX
XX m yMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMy m XX
XX MMm yMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMy mMM XX
XX MMMm .yyMMMMMMMMMMMMMMMM MMMMMMMMMM MMMMMMMMMMMMMMMMyy. mMMM XX
XX MMMMd ''''hhhhh odddo obbbo hhhh'''' dMMMM XX
XX MMMMMd 'hMMMMMMMMMMddddddMMMMMMMMMMh' dMMMMM XX
XX MMMMMMd 'hMMMMMMMMMMMMMMMMMMMMMMh' dMMMMMM XX
XX MMMMMMM- ''ddMMMMMMMMMMMMMMdd'' -MMMMMMM XX
XX MMMMMMMM '::dddddddd::' MMMMMMMM XX
XX MMMMMMMM- -MMMMMMMM XX
XX MMMMMMMMM MMMMMMMMM XX
XX MMMMMMMMMy yMMMMMMMMM XX
XX MMMMMMMMMMy. .yMMMMMMMMMM XX
XX MMMMMMMMMMMMy. .yMMMMMMMMMMMM XX
XX MMMMMMMMMMMMMMy. .yMMMMMMMMMMMMMM XX
XX MMMMMMMMMMMMMMMMs. .sMMMMMMMMMMMMMMMM XX
XX MMMMMMMMMMMMMMMMMMss. .... .ssMMMMMMMMMMMMMMMMMM XX
XX MMMMMMMMMMMMMMMMMMMMNo oNNNNo oNMMMMMMMMMMMMMMMMMMMM XX
''')
def _arguments():
parser = optparse.OptionParser()
parser.add_option("-i","--interface", dest="interface" , help= "specify interface to change the mac")
parser.add_option("-m","--mac", dest="mac", help="Change macaddress")
(options,arguments)=parser.parse_args()
if not options.interface:
parser.error("\033[96m[-] please specify the interface usage:--help for more info ")
elif not options.mac:
parser.error("\033[96m[-] please specify the mac usage:--help for more info ")
return options
def Change_mac(interface,mac):
print("\033[91m[+] Changing Mac address for " + interface + " to " + mac)
subprocess.call(["ifconfig", interface,"down"])
subprocess.call(["ifconfig", interface , "hw","ether", mac])
subprocess.call(["ifconfig", interface,"up"])
def get_c_mac(interface):
ifconfig_result=subprocess.check_output(["ifconfig", options.interface])
mac_s_result=re.search(r"\w\w:\w\w:\w\w:\w\w:\w\w:\w\w", ifconfig_result)
if mac_s_result:
return mac_s_result.group(0)
else:
print("\033[96m[-] Could not read MAc_address")
options = _arguments()
current_mac = get_c_mac(options.interface)
print("\033[96mCurrent_Mac = " + str(current_mac))
Change_mac(options.interface, options.mac)
if current_mac == options.mac:
print(" \033[96m[+] MAc has Successfully Changed ")
else:
print("\033[96m[-] Could Not Change The Mac Address")
| [
"noreply@github.com"
] | noreply@github.com |
c15c86c655c7097fab8ef81a760eb7efbc1a107c | 86c3e14f7b941b73ec0506517da2430952acb45b | /scripts/save_noise_sound.py | c60d9b405009380a386e588a26c66c8449d7ace8 | [] | no_license | ykawamura96/sound_classification | 33dec9d68d8fbe816595ecf99e1cb1d8a05b1cf2 | cc98837d26359153ceed552b54d7d02b815c0c24 | refs/heads/master | 2020-09-05T17:48:19.250446 | 2019-09-13T11:51:58 | 2019-09-13T11:51:58 | 220,173,008 | 0 | 0 | null | 2019-11-07T07:05:17 | 2019-11-07T07:05:16 | null | UTF-8 | Python | false | false | 1,273 | py | #!/usr/bin/env python
# This node saves noise sound (environment sound)
import numpy as np
import os.path as osp
import rospkg
import rospy
from sound_classification.msg import Spectrum
import time
if __name__ == '__main__':
rospy.init_node('save_noise_sound.py', anonymous=True)
rospy.sleep(0.1) # do not save typing sound
record_time = 3.0
time_start = time.time()
mean_noise_sound = None
sound_count = 0
while(time.time() - time_start < record_time):
msg = rospy.wait_for_message('/microphone/sound_spec_raw', Spectrum)
if mean_noise_sound is None:
mean_noise_sound = np.array(msg.spectrum)
else:
mean_noise_sound = mean_noise_sound + np.array(msg.spectrum)
sound_count += 1
# mean noise sound
mean_noise_sound = mean_noise_sound / sound_count
# save noise sound
rospack = rospkg.RosPack()
file_name = osp.join(rospack.get_path(
'sound_classification'), 'scripts', 'mean_noise_sound')
np.save(file_name, mean_noise_sound)
rospy.loginfo('Record {} seconds'.format(record_time))
rospy.loginfo('Successfully saved {}.npy'.format(file_name))
rospy.logwarn('Please kill this program by pressing Ctrl-C')
while True:
time.sleep(1)
| [
"708yamaguchi@gmail.com"
] | 708yamaguchi@gmail.com |
86c22ca4ca7fe3b67919b54097bc9189805b71f3 | e4066b34668bbf7fccd2ff20deb0d53392350982 | /project_scrapy/spiders/grammarly.py | 19c27a8c3349910b1bd4ad1227155e4b6ced0815 | [] | no_license | sushma535/WebSites | 24a688b86e1c6571110f20421533f0e7fdf6e1a8 | 16a3bfa44e6c7e22ae230f5b336a059817871a97 | refs/heads/master | 2023-08-18T09:09:16.052555 | 2021-10-11T00:41:50 | 2021-10-11T00:41:50 | 415,621,279 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,541 | py | import scrapy
from scrapy.crawler import CrawlerProcess
import os
import csv
from csv import reader
import re
total_data = {}
class SimilarWeb(scrapy.Spider):
name = 'SW'
user_agent = 'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.157 Safari/537.36'
start_urls = ['https://www.grammarly.com/', 'https://www.similarsites.com/site/grammarly.com/']
csv_columns = ['Category', 'Description', 'Name', 'Url']
csv_file = 'websites1_data.csv'
count = 0
def parse(self, response):
data, desc, cat = '', '', ''
print('response url:', response.url)
if response.url == self.start_urls[0]:
data = response.css('title::text').get()
if data:
data = re.sub("\n\t\t", '', data)
total_data['Name'] = data
self.count += 1
elif response.url == self.start_urls[1]:
cat = response.css(
'div[class="StatisticsCategoriesDistribution__CategoryTitle-fnuckk-6 jsMDeK"]::text').getall()
desc = response.css('div[class="SiteHeader__Description-sc-1ybnx66-8 hhZNQm"]::text').get()
if cat:
cat = ": ".join(cat[:])
total_data['Category'] = cat
total_data['Description'] = desc
total_data['Url'] = self.start_urls[0]
self.count += 1
if self.count == 2:
print("total data", total_data)
new_data = [total_data['Category'], total_data['Description'], total_data['Name'],
total_data['Url']]
print("new data", new_data)
self.row_appending_to_csv_file(new_data)
def row_appending_to_csv_file(self, data):
if os.path.exists(self.csv_file):
need_to_add_headers = False
with open(self.csv_file, 'a+', newline='') as file:
file.seek(0)
csv_reader = reader(file)
if len(list(csv_reader)) == 0:
need_to_add_headers = True
csv_writer = csv.writer(file)
if need_to_add_headers:
csv_writer.writerow(self.csv_columns)
csv_writer.writerow(data)
else:
with open(self.csv_file, 'w', newline='') as file:
csv_writer = csv.writer(file)
csv_writer.writerow(self.csv_columns) # header
csv_writer.writerow(data)
process = CrawlerProcess()
process.crawl(SimilarWeb)
process.start()
| [
"sushmakusumareddy@gmail.com"
] | sushmakusumareddy@gmail.com |
43d3cc55120e6be67a3dcd98b31fafb9f27073d4 | 7c5ee2344572f13721f0771023c9b4771447c79c | /ArbolPatologia-python/ArbolBinario.py | bee1d15fad35f9514e013113c50846d9eaaf8025 | [] | no_license | reyes98/python_vs_java | c1c508b3dea147b4b746ceb8500fa8235c65b362 | 594d3d4e16cc0ae0f01841cfb51d3a2ba8fe03ac | refs/heads/master | 2023-08-16T10:45:40.687843 | 2021-09-13T18:02:03 | 2021-09-13T18:02:03 | 406,077,704 | 0 | 0 | null | null | null | null | ISO-8859-3 | Python | false | false | 9,779 | py | # Clase Nodo
class Nodo:
# Constructor 10
def __init__(self, clave, valor, izquierdo=None, derecho=None, padre=None):
self.clave = clave
self.cargaUtil = valor
self.hijoIzquierdo = izquierdo
self.hijoDerecho = derecho
self.padre = padre
# Metodos de la clase Nodo
# 2
def tieneHijoIzquierdo(self):
return self.hijoIzquierdo
#2
def tieneHijoDerecho(self):
return self.hijoDerecho
#6
def esHijoIzquierdo(self):
return self.padre and self.padre.hijoIzquierdo == self
#6
def esHijoDerecho(self):
return self.padre and self.padre.hijoDerecho == self
def esRaiz(self):
return not self.padre
def esHoja(self):
return not (self.hijoDerecho or self.hijoIzquierdo)
def tieneAlgunHijo(self):
return self.hijoDerecho or self.hijoIzquierdo
def tieneAmbosHijos(self):
return self.hijoDerecho and self.hijoIzquierdo
def reemplazarDatoDeNodo(self, clave, valor, hizq, hder):
self.clave = clave
self.cargaUtil = valor
self.hijoIzquierdo = hizq
self.hijoDerecho = hder
if self.tieneHijoIzquierdo():
self.hijoIzquierdo.padre = self
if self.tieneHijoDerecho():
self.hijoDerecho.padre = self
# -----------------------fin clase Nodo---------------------------------
# Clase ArbolBinario
class ArbolBinario:
# Construtor con peso 4
def __init__(self):
self.raiz = None
self.tamano = 0
#peso 1
def __len__(self):
return self.tamano
#complejidad = 18+ 2(8n+12)
def agregar(self, clave, valor):
if self.raiz:#2
self._agregar(clave, valor, self.raiz) #2(8n+12)
else:
self.raiz = Nodo(clave, valor)#12
self.tamano = self.tamano + 1 #4
#complejidad= (8n+12)2
def _agregar(self, clave, valor, nodoActual):
if clave < nodoActual.clave: #2
if nodoActual.tieneHijoIzquierdo(): #4
self._agregar(clave, valor, nodoActual.hijoIzquierdo)#n(8)
else:
nodoActual.hijoIzquierdo = Nodo(clave, valor, padre=nodoActual)#12
else:
if nodoActual.tieneHijoDerecho():#4
self._agregar(clave, valor, nodoActual.hijoDerecho)#n(8)
else:
nodoActual.hijoDerecho = Nodo(clave, valor, padre=nodoActual)#12
#complejidad = 19 + 2(8n+12)
def __setitem__(self, c, v):
self.agregar(c, v)#19+ 2(8n+12)
#complejidad = 11+20n
def obtener(self, clave):
if self.raiz:#2
res = self._obtener(clave, self.raiz)#4+20n
if res:#1
return res.cargaUtil#2
else:
return None #1
else:
return None#1
#complejidad = 20n
def _obtener(self, clave, nodoActual):
if not nodoActual:#1
return None#1
elif nodoActual.clave == clave:#2
return nodoActual#1
elif clave < nodoActual.clave:#2
return self._obtener(clave, nodoActual.hijoIzquierdo)#10n
else:
return self._obtener(clave, nodoActual.hijoDerecho)#10n
def __getitem__(self, clave):
return self.obtener(clave)#12+20n
def __contains__(self, clave):
if self._obtener(clave, self.raiz):
return True
else:
return False
def eliminar(self, clave):
if self.tamano > 1:
nodoAEliminar = self._obtener(clave, self.raiz)
if nodoAEliminar:
self.remover(nodoAEliminar)
self.tamano = self.tamano - 1
else:
raise KeyError('Error, la clave no esta en el arbol')
elif self.tamano == 1 and self.raiz.clave == clave:
self.raiz = None
self.tamano = self.tamano - 1
else:
raise KeyError('Error, la clave no esta en el arbol')
def __delitem__(self, clave):
self.eliminar(clave)
def empalmar(self):
if self.esHoja():
if self.esHijoIzquierdo():
self.padre.hijoIzquierdo = None
else:
self.padre.hijoDerecho = None
elif self.tieneAlgunHijo():
if self.tieneHijoIzquierdo():
if self.esHijoIzquierdo():
self.padre.hijoIzquierdo = self.hijoIzquierdo
else:
self.padre.hijoDerecho = self.hijoIzquierdo
self.hijoIzquierdo.padre = self.padre
else:
if self.esHijoIzquierdo():
self.padre.hijoIzquierdo = self.hijoDerecho
else:
self.padre.hijoDerecho = self.hijoDerecho
self.hijoDerecho.padre = self.padre
def encontrarSucesor(self):
suc = None
if self.tieneHijoDerecho():
suc = self.hijoDerecho.encontrarMin()
else:
if self.padre:
if self.esHijoIzquierdo():
suc = self.padre
else:
self.padre.hijoDerecho = None
suc = self.padre.encontrarSucesor()
self.padre.hijoDerecho = self
return suc
def encontrarMin(self):
actual = self
while actual.tieneHijoIzquierdo():
actual = actual.hijoIzquierdo
return actual
def remover(self, nodoActual):
if nodoActual.esHoja(): # hoja
if nodoActual == nodoActual.padre.hijoIzquierdo:
nodoActual.padre.hijoIzquierdo = None
else:
nodoActual.padre.hijoDerecho = None
elif nodoActual.tieneAmbosHijos(): # interior
suc = nodoActual.encontrarSucesor()
suc.empalmar()
nodoActual.clave = suc.clave
nodoActual.cargaUtil = suc.cargaUtil
else: # este nodo tiene un (1) hijo
if nodoActual.tieneHijoIzquierdo():
if nodoActual.esHijoIzquierdo():
nodoActual.hijoIzquierdo.padre = nodoActual.padre
nodoActual.padre.hijoIzquierdo = nodoActual.hijoIzquierdo
elif nodoActual.esHijoDerecho():
nodoActual.hijoIzquierdo.padre = nodoActual.padre
nodoActual.padre.hijoDerecho = nodoActual.hijoIzquierdo
else:
nodoActual.reemplazarDatoDeNodo(nodoActual.hijoIzquierdo.clave,
nodoActual.hijoIzquierdo.cargaUtil,
nodoActual.hijoIzquierdo.hijoIzquierdo,
nodoActual.hijoIzquierdo.hijoDerecho)
else:
if nodoActual.esHijoIzquierdo():
nodoActual.hijoDerecho.padre = nodoActual.padre
nodoActual.padre.hijoIzquierdo = nodoActual.hijoDerecho
elif nodoActual.esHijoDerecho():
nodoActual.hijoDerecho.padre = nodoActual.padre
nodoActual.padre.hijoDerecho = nodoActual.hijoDerecho
else:
nodoActual.reemplazarDatoDeNodo(nodoActual.hijoDerecho.clave,
nodoActual.hijoDerecho.cargaUtil,
nodoActual.hijoDerecho.hijoIzquierdo,
nodoActual.hijoDerecho.hijoDerecho)
# --------------------------Fin clase ArbolBinario----------------------------
# Clase LeerArchivo
class LeerArchivo:
def getTratamiento(self,nombreArchivo, patologia): #t(n)=16 + 3n
arreglo = None#2
archivo = open(nombreArchivo, "r")#3
for linea in archivo.readlines():#3
arreglo = linea.split(";")#2
if arreglo[0] == patologia:#2
archivo.close()#1
return arreglo[1]#2
#3n+10
archivo.close()#1
# ------------Fin LeerArchivo---------------------
# Pruebas
arbol = ArbolBinario() #6
arbol["1"] = "presenta sintomas internos?"#2
arbol["11"] = "Dolor de cabeza?"#2
arbol["10"] = "Dolor articular?"#2
arbol["111"] = "Tiene tos?"#2
arbol["110"] = "Dolor en el pecho?"#2
arbol["101"] = "Inmovilidad total?"#2
arbol["100"] = "Se presentan erosiones en la piel?"#2
arbol["1111"] = "gripe"#2
arbol["1110"] = "hipertension"#2
arbol["1101"] = "dificultades al respirar?"#2
arbol["1100"] = "dolor abdominal?"#2
arbol["1011"] = "fractura"#2
arbol["1010"] = "esguince"#2
arbol["1001"] = "alergia"#2
arbol["1000"] = "gripe"#2
arbol["11011"] = "asma"#2
arbol["11010"] = "gripe"#2
arbol["11001"] = "apendicitis"#2
arbol["11000"] = "vision borrosa?"#2
arbol["110001"] = "diabetes"#2
arbol["110000"] = "examenes"#2
#complejidad agreagacion y creación = (21(2)+ 21(19 + 2(8n+12))) + 6
nombre = raw_input('Ingrese su nombre:') #3
print('Bienvenido ' + nombre) #3
print ('Para contestar (1)si (0)no')#1
tratamiento = False #2
clave = "1" #2
#acumulado = 11 + (21(2)+ 21(19 + 2(8n+12))) + 6
while(tratamiento==False): #1
decision = raw_input(arbol[clave]) #14+20n
if decision == "1": #1
clave = clave+"1" #2
if decision == "0": #1
clave = clave + "0" #2
if not("?" in arbol[clave]): #14+20n
tratamiento = True #1
#7n(2(14+20n))
#acumulado = 11 + (21(2)+ 21(19 + 2(8n+12))) + 7n(2(14+20n))
lector = LeerArchivo() #3
print(nombre +" usted tiene "+ arbol[clave]) #4
print(lector.getTratamiento("tratamientos.txt", arbol[clave]))#18+3n + 12+20n
#total = 11 + (21(2)+ 21(19 + 2(8n+12))) + 7n(2(14+20n)) + 18+3n + 12+20n
#t(n)= 280n^2+555n+986
# ------------------------
| [
"34418186+reyes98@users.noreply.github.com"
] | 34418186+reyes98@users.noreply.github.com |
700a9fbcb89b1b66f52a940e26430e4a1f4c5494 | c96d9a76fe28630fe1b4cd7efa22e12fdce0399f | /kaggle/Song_popularity/optimize.py | a81690bd13b858784678e25cf5a75a1761a95589 | [] | no_license | tarunbhavnani/ml_diaries | 858839e8ab8817caae3d56d3dad6d4ee9176ddbe | 8d0700211a2881279df60ab2bea7095ef95ea8dc | refs/heads/master | 2023-08-18T08:28:50.881356 | 2023-08-16T09:39:34 | 2023-08-16T09:39:34 | 157,958,911 | 0 | 1 | null | 2023-03-13T05:17:52 | 2018-11-17T06:52:34 | Python | UTF-8 | Python | false | false | 1,620 | py | #!/usr/bin/env python3
"""
optimize.py
"""
import optuna
import optuna.integration.lightgbm as lgb
import pandas as pd
from lightgbm import early_stopping, log_evaluation
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import train_test_split
def objective(trial: optuna.Trial):
df = pd.read_csv("train-knn3.csv")
train_x, test_x, train_y, test_y = train_test_split(
df.drop(columns="song_popularity"), df["song_popularity"], test_size=0.2, stratify= df["song_popularity"], random_state=1
)
params = {
"metric": "auc",
"objective": "binary",
"reg_alpha": trial.suggest_float("reg_alpha", 1e-8, 10.0, log=True),
"reg_lambda": trial.suggest_float("reg_lambda", 1e-8, 10.0, log=True),
"n_estimators": trial.suggest_int("n_estimators", 1, 100),
"num_leaves": trial.suggest_int("num_leaves", 2, 256),
"feature_fraction": trial.suggest_float("feature_fraction", 0.4, 1.0),
"bagging_fraction": trial.suggest_float("bagging_fraction", 0.4, 1.0),
"min_child_samples": trial.suggest_int("min_child_samples", 5, 100),
}
dtrain = lgb.Dataset(train_x, label=train_y)
dval = lgb.Dataset(test_x, label=test_y)
model = lgb.train(
params,
dtrain,
valid_sets=[dtrain, dval],
callbacks=[early_stopping(100), log_evaluation(100)],
)
prediction = model.predict(test_x, num_iteration=model.best_iteration)
return roc_auc_score(test_y, prediction)
study = optuna.create_study()
study.optimize(objective, n_jobs=-1, n_trials=100)
print(study.best_params)
| [
"tarun.bhavnani@gmail.com"
] | tarun.bhavnani@gmail.com |
742f8a6dd2aee367cca6f94262b5612485524064 | 868b90e85541f1f76e1805346f18c2cb7675ffc8 | /cnn/02_introductory_cnn.py | 1eac1abda98b5a9c829bc11f7f50f4ba5b7d7589 | [] | no_license | WOW5678/tensorflow_study | a9a447c39c63a751046d4776eedc17589324634e | 39e202b102cd2ebc1ba16f793acc8ebe9ea0e752 | refs/heads/master | 2020-03-19T15:49:24.729778 | 2018-10-19T06:45:26 | 2018-10-19T06:45:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,639 | py | # Introductory CNN Model: MNIST Digits
#---------------------------------------
#
# In this example, we will download the MNIST handwritten
# digits and create a simple CNN network to predict the
# digit category (0-9)
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from tensorflow.contrib.learn.python.learn.datasets.mnist import read_data_sets
from tensorflow.python.framework import ops
ops.reset_default_graph()
# Start a graph session
sess = tf.Session()
# Load data
data_dir = 'temp'
mnist = read_data_sets(data_dir)
# Convert images into 28x28 (they are downloaded as 1x784)
train_xdata = np.array([np.reshape(x, (28,28)) for x in mnist.train.images])
test_xdata = np.array([np.reshape(x, (28,28)) for x in mnist.test.images])
# Convert labels into one-hot encoded vectors
train_labels = mnist.train.labels
test_labels = mnist.test.labels
# Set model parameters
batch_size = 100
learning_rate = 0.005
evaluation_size = 500
image_width = train_xdata[0].shape[0]
image_height = train_xdata[0].shape[1]
target_size = max(train_labels) + 1
num_channels = 1 # greyscale = 1 channel
generations = 500
eval_every = 5
conv1_features = 25
conv2_features = 50
max_pool_size1 = 2 # NxN window for 1st max pool layer
max_pool_size2 = 2 # NxN window for 2nd max pool layer
fully_connected_size1 = 100
# Declare model placeholders
x_input_shape = (batch_size, image_width, image_height, num_channels)
x_input = tf.placeholder(tf.float32, shape=x_input_shape)
y_target = tf.placeholder(tf.int32, shape=(batch_size))
eval_input_shape = (evaluation_size, image_width, image_height, num_channels)
eval_input = tf.placeholder(tf.float32, shape=eval_input_shape)
eval_target = tf.placeholder(tf.int32, shape=(evaluation_size))
# Declare model parameters
conv1_weight = tf.Variable(tf.truncated_normal([4, 4, num_channels, conv1_features],
stddev=0.1, dtype=tf.float32))
conv1_bias = tf.Variable(tf.zeros([conv1_features], dtype=tf.float32))
conv2_weight = tf.Variable(tf.truncated_normal([4, 4, conv1_features, conv2_features],
stddev=0.1, dtype=tf.float32))
conv2_bias = tf.Variable(tf.zeros([conv2_features], dtype=tf.float32))
# fully connected variables
resulting_width = image_width // (max_pool_size1 * max_pool_size2)
resulting_height = image_height // (max_pool_size1 * max_pool_size2)
full1_input_size = resulting_width * resulting_height * conv2_features
full1_weight = tf.Variable(tf.truncated_normal([full1_input_size, fully_connected_size1],
stddev=0.1, dtype=tf.float32))
full1_bias = tf.Variable(tf.truncated_normal([fully_connected_size1], stddev=0.1, dtype=tf.float32))
full2_weight = tf.Variable(tf.truncated_normal([fully_connected_size1, target_size],
stddev=0.1, dtype=tf.float32))
full2_bias = tf.Variable(tf.truncated_normal([target_size], stddev=0.1, dtype=tf.float32))
# Initialize Model Operations
def my_conv_net(input_data):
# First Conv-ReLU-MaxPool Layer
conv1 = tf.nn.conv2d(input_data, conv1_weight, strides=[1, 1, 1, 1], padding='SAME')
relu1 = tf.nn.relu(tf.nn.bias_add(conv1, conv1_bias))
max_pool1 = tf.nn.max_pool(relu1, ksize=[1, max_pool_size1, max_pool_size1, 1],
strides=[1, max_pool_size1, max_pool_size1, 1], padding='SAME')
# Second Conv-ReLU-MaxPool Layer
conv2 = tf.nn.conv2d(max_pool1, conv2_weight, strides=[1, 1, 1, 1], padding='SAME')
relu2 = tf.nn.relu(tf.nn.bias_add(conv2, conv2_bias))
max_pool2 = tf.nn.max_pool(relu2, ksize=[1, max_pool_size2, max_pool_size2, 1],
strides=[1, max_pool_size2, max_pool_size2, 1], padding='SAME')
# Transform Output into a 1xN layer for next fully connected layer
final_conv_shape = max_pool2.get_shape().as_list()
final_shape = final_conv_shape[1] * final_conv_shape[2] * final_conv_shape[3]
flat_output = tf.reshape(max_pool2, [final_conv_shape[0], final_shape])
# First Fully Connected Layer
fully_connected1 = tf.nn.relu(tf.add(tf.matmul(flat_output, full1_weight), full1_bias))
# Second Fully Connected Layer
final_model_output = tf.add(tf.matmul(fully_connected1, full2_weight), full2_bias)
return(final_model_output)
model_output = my_conv_net(x_input)
test_model_output = my_conv_net(eval_input)
# Declare Loss Function (softmax cross entropy)
loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=model_output, labels=y_target))
# Create a prediction function
prediction = tf.nn.softmax(model_output)
test_prediction = tf.nn.softmax(test_model_output)
# Create accuracy function
def get_accuracy(logits, targets):
batch_predictions = np.argmax(logits, axis=1)
num_correct = np.sum(np.equal(batch_predictions, targets))
return(100. * num_correct/batch_predictions.shape[0])
# Create an optimizer
my_optimizer = tf.train.MomentumOptimizer(learning_rate, 0.9)
train_step = my_optimizer.minimize(loss)
# Initialize Variables
init = tf.global_variables_initializer()
sess.run(init)
# Start training loop
train_loss = []
train_acc = []
test_acc = []
for i in range(generations):
rand_index = np.random.choice(len(train_xdata), size=batch_size)
rand_x = train_xdata[rand_index]
rand_x = np.expand_dims(rand_x, 3)
rand_y = train_labels[rand_index]
train_dict = {x_input: rand_x, y_target: rand_y}
sess.run(train_step, feed_dict=train_dict)
temp_train_loss, temp_train_preds = sess.run([loss, prediction], feed_dict=train_dict)
temp_train_acc = get_accuracy(temp_train_preds, rand_y)
if (i+1) % eval_every == 0:
eval_index = np.random.choice(len(test_xdata), size=evaluation_size)
eval_x = test_xdata[eval_index]
eval_x = np.expand_dims(eval_x, 3)
eval_y = test_labels[eval_index]
test_dict = {eval_input: eval_x, eval_target: eval_y}
test_preds = sess.run(test_prediction, feed_dict=test_dict)
temp_test_acc = get_accuracy(test_preds, eval_y)
# Record and print results
train_loss.append(temp_train_loss)
train_acc.append(temp_train_acc)
test_acc.append(temp_test_acc)
acc_and_loss = [(i+1), temp_train_loss, temp_train_acc, temp_test_acc]
acc_and_loss = [np.round(x,2) for x in acc_and_loss]
print('Generation # {}. Train Loss: {:.2f}. Train Acc (Test Acc): {:.2f} ({:.2f})'.format(*acc_and_loss))
# Matlotlib code to plot the loss and accuracies
eval_indices = range(0, generations, eval_every)
# Plot loss over time
plt.plot(eval_indices, train_loss, 'k-')
plt.title('Softmax Loss per Generation')
plt.xlabel('Generation')
plt.ylabel('Softmax Loss')
plt.show()
# Plot train and test accuracy
plt.plot(eval_indices, train_acc, 'k-', label='Train Set Accuracy')
plt.plot(eval_indices, test_acc, 'r--', label='Test Set Accuracy')
plt.title('Train and Test Accuracy')
plt.xlabel('Generation')
plt.ylabel('Accuracy')
plt.legend(loc='lower right')
plt.show()
# Plot some samples
# Plot the 6 of the last batch results:
actuals = rand_y[0:6]
predictions = np.argmax(temp_train_preds,axis=1)[0:6]
images = np.squeeze(rand_x[0:6])
Nrows = 2
Ncols = 3
for i in range(6):
plt.subplot(Nrows, Ncols, i+1)
plt.imshow(np.reshape(images[i], [28,28]), cmap='Greys_r')
plt.title('Actual: ' + str(actuals[i]) + ' Pred: ' + str(predictions[i]),
fontsize=10)
frame = plt.gca()
frame.axes.get_xaxis().set_visible(False)
frame.axes.get_yaxis().set_visible(False)
| [
"noreply@github.com"
] | noreply@github.com |
87e624c046d7ee1c73d71e7f9fb69741e1ad4cda | deab3d208515c90dae53e8f675ecbcbf5f9c0de2 | /LearningDjango/settings.py | 0adaa8387e0f9b26515b7a05b2b9d319438643db | [] | no_license | jimersylee/LearningDjango | 9a6e05f3d29ef3db60d811a655ceda154d42c470 | 32ac6e9cba9529e662ba697e478b985841e94b0a | refs/heads/master | 2020-04-07T04:18:11.693915 | 2018-12-20T01:17:59 | 2018-12-20T01:17:59 | 158,049,508 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,142 | py | """
Django settings for LearningDjango project.
Generated by 'django-admin startproject' using Django 2.1.3.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '-r^l47*&7c6da5j_=0ffmg2m7zvd*trbe1!%^=)$kfcc2vemq0'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'polls.apps.PollsConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'LearningDjango.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'LearningDjango.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
| [
"jimersylee@gmail.com"
] | jimersylee@gmail.com |
cd70acfcb63726d43d38f161933d4473e020bcb4 | 416ea1127f3e3a1a8e64dd980e59c7bf585379a0 | /read_favorite_number.py | 13e6938ce6b8bf90c1ab89faddd39721b21296a8 | [] | no_license | jocogum10/learning_python_crash_course | 6cf826e4324f91a49da579fb1fcd3ca623c20306 | c159d0b0de0be8e95eb8777a416e5010fbb9e2ca | refs/heads/master | 2020-12-10T02:55:40.757363 | 2020-01-13T01:22:44 | 2020-01-13T01:22:44 | 233,486,206 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 180 | py | import json
filename = 'favorite_number.json'
with open(filename) as file_object:
message = json.load(file_object)
print("I know your favorite number! It's " + message + "!")
| [
"jocogum10@gmail.com"
] | jocogum10@gmail.com |
18ff290030cc8d441adde214b3e61d991f58a2cd | 6df5a0ba285b74310b0b8349807c302b407623c7 | /wide_cnn_rnn/RNN_TF.py | be4a052ca9246306b55b1b9813d112f8042fb0ee | [] | no_license | denghuolanshan12/AppIdentificationHTTPS | cddf241d21e8ecb873b5c0195e9b0627413391b6 | 1beb06d2669a3932dd02f86eb628a3dec2b7a038 | refs/heads/master | 2022-01-22T09:17:02.326799 | 2019-08-01T11:13:50 | 2019-08-01T11:13:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,243 | py | #!/usr/bin/env python
#coding=utf-8
import tensorflow as tf
from result import figures
# 打印log
tf.logging.set_verbosity(tf.logging.INFO)
# 数据路径
path_tfrecords_train="../../data/train.tfrecord"
path_tfrecords_test="../../data/test.tfrecord"
# 定义解析函数
def parse(serialized):
features = {
'recordTypes': tf.FixedLenFeature([64], tf.int64),
'packetLength': tf.FixedLenFeature([64], tf.int64),
'packetPayload': tf.FixedLenFeature([1024], tf.int64),
'label': tf.FixedLenFeature([], tf.int64)
}
parsed_example = tf.parse_single_example(serialized=serialized,
features=features)
recordTypes = parsed_example['recordTypes']
packetLength = parsed_example['packetLength']
packetPayload = parsed_example['packetPayload']
label = parsed_example['label']
return recordTypes, packetLength, packetPayload, label
# 定义输入函数
def input_fn(filenames, train, batch_size=32, buffer_size=2048):
dataset = tf.data.TFRecordDataset(filenames=filenames)
dataset = dataset.map(parse)
if train:
dataset = dataset.shuffle(buffer_size=buffer_size)
num_repeat = None
else:
num_repeat = 1
dataset = dataset.repeat(num_repeat)
dataset = dataset.batch(batch_size)
iterator = dataset.make_one_shot_iterator()
recordTypes_batch, packetLength_batch, packetPayload_batch, label_batch= iterator.get_next()
packetPayload_batch=tf.cast(packetPayload_batch,tf.float32)
x = {"recordTypes":recordTypes_batch,
"packetLength":packetLength_batch,
"packetPayload":packetPayload_batch}
y = label_batch
return x, y
# 训练batch
def train_input_fn():
return input_fn(filenames=path_tfrecords_train, train=True)
# 测试batch
def test_input_fn():
return input_fn(filenames=path_tfrecords_test, train=False,batch_size=5000)
# 定义模型
def model_fn(features, labels, mode, params):
# x1 = features["packetPayload"]
# net1 = tf.reshape(x1, [-1, 32, 32, 1])
#
# # First convolutional layer.
# net1 = tf.layers.conv2d(inputs=net1, name='layer_conv1',
# filters=32, kernel_size=3,
# padding='same', activation=tf.nn.relu)
# net1 = tf.layers.max_pooling2d(inputs=net1, pool_size=2, strides=2)
#
# net1 = tf.layers.conv2d(inputs=net1, name='layer_conv2',
# filters=32, kernel_size=3,
# padding='same', activation=tf.nn.relu)
# net1 = tf.layers.max_pooling2d(inputs=net1, pool_size=2, strides=2)
# net1 = tf.contrib.layers.flatten(net1)
# net1 = tf.layers.dense(inputs=net1, name='layer_fc1',
# units=128, activation=tf.nn.relu)
x2 = features["recordTypes"]
net2 = tf.reshape(x2,[-1,64])
# Embedding
word_embeddings = tf.get_variable("word_embeddings",[257, 32])
net2 = tf.nn.embedding_lookup(word_embeddings, net2)
# Rnn
rnn_cell=tf.nn.rnn_cell.BasicRNNCell(64)
output, states = tf.nn.dynamic_rnn(rnn_cell, net2, dtype=tf.float32)
net = tf.layers.dense(inputs=output[:,-1,:], name='layer_rnn_fc_1',
units=128, activation=tf.nn.relu)
# Attention
attention_probs = tf.layers.dense(inputs=net, name="attention_probs",units=128,activation='softmax')
net=tf.multiply(net,attention_probs)
print("net.shape:",net.shape)
# fully connect 1
net = tf.layers.dense(inputs=net, name='layer_combine_fc_x',units=128,activation=tf.nn.relu)
# fully connect 2
net = tf.layers.dense(inputs=net, name='layer_combine_fc_y',units=14)
# Logits output of the neural network.
logits = net
# Softmax output of the neural network.
y_pred = tf.nn.softmax(logits=logits)
# Classification output of the neural network.
y_pred_cls = tf.argmax(y_pred, axis=1)
if mode == tf.estimator.ModeKeys.PREDICT:
spec = tf.estimator.EstimatorSpec(mode=mode,
predictions=y_pred_cls)
else:
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels,
logits=logits)
loss = tf.reduce_mean(cross_entropy)
# Define the optimizer for improving the neural network.
optimizer = tf.train.AdamOptimizer(learning_rate=params["learning_rate"])
# Get the TensorFlow op for doing a single optimization step.
train_op = optimizer.minimize(
loss=loss, global_step=tf.train.get_global_step())
accuracy = tf.metrics.accuracy(labels, y_pred_cls)
tf.summary.scalar('accuracy',accuracy[1])
metrics = \
{
"accuracy": accuracy
}
logging_hook = tf.train.LoggingTensorHook({"loss": loss,
"accuracy": accuracy}, every_n_iter=10)
# Wrap all of this in an EstimatorSpec.
spec = tf.estimator.EstimatorSpec(
mode=mode,
loss=loss,
train_op=train_op,
eval_metric_ops=metrics,
# training_hooks=[logging_hook]
)
return spec
params = {"learning_rate": 1e-4}
model = tf.estimator.Estimator(model_fn=model_fn,
params=params,
model_dir="./checkpoints_novpn_rnn/")
# 训练模型
model.train(input_fn=train_input_fn, steps=20000)
# 评估模型
result = model.evaluate (input_fn=test_input_fn)
print(result)
# 模型预测
predicts=model.predict(input_fn=test_input_fn)
print(predicts)
predicts=[p for p in predicts]
print(predicts)
_,y=test_input_fn()
sess = tf.Session()
init = tf.initialize_all_variables()
sess.run(init)
y_true=sess.run(y)
# alphabet=["baiduditu","baidutieba","cloudmusic","iqiyi","jingdong","jinritoutiao","meituan","qq","qqmusic","qqyuedu","taobao","weibo","xiecheng","zhihu"]
alphabet=["AIM","email","facebookchat","gmailchat","hangoutsaudio","hangoutschat","icqchat","netflix","skypechat","skypefile","spotify","vimeo","youtube","youtubeHTML5"]
figures.plot_confusion_matrix(y_true, predicts,alphabet, "./") | [
"tianmao1994@yahoo.com"
] | tianmao1994@yahoo.com |
789e83f567e4c9174af23960e667b0add9a2e621 | 0856b79000b77e177e9115b620fec948c2c9532d | /gardener/database_operations.py | 85c48b61516459d2a964bda1130bc96505b9f376 | [
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] | permissive | femmerling/backyard | 6174db0b9f1f75b66f748e9976a6d7013a956753 | d4d3eb5922d60f0e471d5e6a59bab5d68707d05d | refs/heads/master | 2021-01-02T08:21:51.001914 | 2015-02-22T10:15:08 | 2015-02-22T10:15:08 | 12,624,737 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,860 | py | import imp
import os.path
from app import db
from migrate.versioning import api
from config import UserConfig
config = UserConfig()
SQLALCHEMY_DATABASE_URI = config.SQLALCHEMY_DATABASE_URI
SQLALCHEMY_MIGRATE_REPO = config.SQLALCHEMY_MIGRATE_REPO
def db_create():
# This creates the new database.
db.create_all()
# If no repo existed, the creation will prepare for the first migration.
if not os.path.exists(SQLALCHEMY_MIGRATE_REPO):
api.create(SQLALCHEMY_MIGRATE_REPO, 'database repository')
api.version_control(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
print '\nDatabase creation completed\n'
else:
api.version_control(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO, api.db_version(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO))
def db_migrate():
# This is used for database migration. Newly created database should go through this as well.
migration = SQLALCHEMY_MIGRATE_REPO + '/versions/%03d_migration.py' % (api.db_version(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO) + 1)
tmp_module = imp.new_module('old_model')
old_model = api.create_model(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
exec old_model in tmp_module.__dict__
script = api.make_update_script_for_model(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO, tmp_module.meta, db.metadata)
open(migration, "wt").write(script)
api.upgrade(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
print 'New migration saved as ' + migration
print 'Current database version: ' + str(api.db_version(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)) + '\n'
def db_upgrade():
# This is used for database migration upgrade.
api.upgrade(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
print 'Database upgrade completed!'
print 'Current database version is: ' + str(api.db_version(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO))
def db_downgrade(version=None):
# This is used to downgrade the database schema to a certain version or to one version before.
# If you know exactly the version you wish to use then you can directly downgrade to that version.
if not version:
current_version = api.db_version(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
downgrade_version = current_version - 1
else:
downgrade_version = version
api.downgrade(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO, downgrade_version)
print 'Database downgrade completed!'
print 'Current database version: ' + str(api.db_version(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO))
def db_version():
# this is used to get the latest version in the database
current_version = api.db_version(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
print 'The current database version is ' + str(current_version)
# end of file | [
"erich@emfeld.com"
] | erich@emfeld.com |
13423cd0e461c0cae46874f40a88916e7e259d73 | 13c404b0f6e45049ed1f2dc788f5c55129c8bd57 | /TriblerGUI/widgets/settingspage.py | c2e60ceb1f7ad236ba96b0d0d482a0d5c9f5610e | [] | no_license | brussee/tribler | 5a849e7b260a5f95e360c2b079e4d7f5d065d7af | d1a1d0ba08ba4a3fca9ef99aad3e74859a7454e3 | refs/heads/android-app | 2021-01-24T01:10:49.293465 | 2016-12-02T16:32:38 | 2016-12-02T16:32:38 | 46,921,190 | 0 | 2 | null | 2016-05-19T13:48:56 | 2015-11-26T10:53:45 | Python | UTF-8 | Python | false | false | 11,404 | py | import json
from PyQt5.QtWidgets import QWidget
from TriblerGUI.defs import PAGE_SETTINGS_GENERAL, PAGE_SETTINGS_CONNECTION, PAGE_SETTINGS_BANDWIDTH, \
PAGE_SETTINGS_SEEDING, PAGE_SETTINGS_ANONYMITY, BUTTON_TYPE_NORMAL
from TriblerGUI.dialogs.confirmationdialog import ConfirmationDialog
from TriblerGUI.tribler_request_manager import TriblerRequestManager
from TriblerGUI.utilities import seconds_to_string, string_to_minutes, get_gui_setting
class SettingsPage(QWidget):
"""
This class is responsible for displaying and adjusting the settings present in Tribler.
"""
def __init__(self):
QWidget.__init__(self)
self.settings = None
self.settings_request_mgr = None
self.saved_dialog = None
def initialize_settings_page(self):
self.window().settings_tab.initialize()
self.window().settings_tab.clicked_tab_button.connect(self.clicked_tab_button)
self.window().settings_save_button.clicked.connect(self.save_settings)
self.window().developer_mode_enabled_checkbox.stateChanged.connect(self.on_developer_mode_checkbox_changed)
self.window().download_settings_anon_checkbox.stateChanged.connect(self.on_anon_download_state_changed)
def on_developer_mode_checkbox_changed(self, _):
self.window().gui_settings.setValue("debug", self.window().developer_mode_enabled_checkbox.isChecked())
self.window().left_menu_button_debug.setHidden(not self.window().developer_mode_enabled_checkbox.isChecked())
def on_anon_download_state_changed(self, _):
if self.window().download_settings_anon_checkbox.isChecked():
self.window().download_settings_anon_seeding_checkbox.setChecked(True)
self.window().download_settings_anon_seeding_checkbox.setEnabled(
not self.window().download_settings_anon_checkbox.isChecked())
def initialize_with_settings(self, settings):
self.settings = settings
settings = settings["settings"]
gui_settings = self.window().gui_settings
# General settings
self.window().developer_mode_enabled_checkbox.setChecked(get_gui_setting(gui_settings, "debug",
False, is_bool=True))
self.window().family_filter_checkbox.setChecked(settings['general']['family_filter'])
self.window().download_location_input.setText(settings['downloadconfig']['saveas'])
self.window().always_ask_location_checkbox.setChecked(
get_gui_setting(gui_settings, "ask_download_settings", True, is_bool=True))
self.window().download_settings_anon_checkbox.setChecked(get_gui_setting(
gui_settings, "default_anonymity_enabled", True, is_bool=True))
self.window().download_settings_anon_seeding_checkbox.setChecked(
get_gui_setting(gui_settings, "default_safeseeding_enabled", True, is_bool=True))
self.window().watchfolder_enabled_checkbox.setChecked(settings['watch_folder']['enabled'])
self.window().watchfolder_location_input.setText(settings['watch_folder']['watch_folder_dir'])
# Connection settings
self.window().firewall_current_port_input.setText(str(settings['general']['minport']))
self.window().lt_proxy_type_combobox.setCurrentIndex(settings['libtorrent']['lt_proxytype'])
if settings['libtorrent']['lt_proxyserver']:
self.window().lt_proxy_server_input.setText(settings['libtorrent']['lt_proxyserver'][0])
self.window().lt_proxy_port_input.setText(settings['libtorrent']['lt_proxyserver'][1])
if settings['libtorrent']['lt_proxyauth']:
self.window().lt_proxy_username_input.setText(settings['libtorrent']['lt_proxyauth'][0])
self.window().lt_proxy_password_input.setText(settings['libtorrent']['lt_proxyauth'][1])
self.window().lt_utp_checkbox.setChecked(settings['libtorrent']['utp'])
max_conn_download = settings['libtorrent']['max_connections_download']
if max_conn_download == -1:
max_conn_download = 0
self.window().max_connections_download_input.setText(str(max_conn_download))
# Bandwidth settings
self.window().upload_rate_limit_input.setText(str(settings['Tribler']['maxuploadrate']))
self.window().download_rate_limit_input.setText(str(settings['Tribler']['maxdownloadrate']))
# Seeding settings
getattr(self.window(), "seeding_" + settings['downloadconfig']['seeding_mode'] + "_radio").setChecked(True)
self.window().seeding_time_input.setText(seconds_to_string(settings['downloadconfig']['seeding_time']))
ind = self.window().seeding_ratio_combobox.findText(str(settings['downloadconfig']['seeding_ratio']))
if ind != -1:
self.window().seeding_ratio_combobox.setCurrentIndex(ind)
# Anonymity settings
self.window().allow_exit_node_checkbox.setChecked(settings['tunnel_community']['exitnode_enabled'])
self.window().number_hops_slider.setValue(int(settings['Tribler']['default_number_hops']) - 1)
self.window().multichain_enabled_checkbox.setChecked(settings['multichain']['enabled'])
def load_settings(self):
self.settings_request_mgr = TriblerRequestManager()
self.settings_request_mgr.perform_request("settings", self.initialize_with_settings)
def clicked_tab_button(self, tab_button_name):
if tab_button_name == "settings_general_button":
self.window().settings_stacked_widget.setCurrentIndex(PAGE_SETTINGS_GENERAL)
elif tab_button_name == "settings_connection_button":
self.window().settings_stacked_widget.setCurrentIndex(PAGE_SETTINGS_CONNECTION)
elif tab_button_name == "settings_bandwidth_button":
self.window().settings_stacked_widget.setCurrentIndex(PAGE_SETTINGS_BANDWIDTH)
elif tab_button_name == "settings_seeding_button":
self.window().settings_stacked_widget.setCurrentIndex(PAGE_SETTINGS_SEEDING)
elif tab_button_name == "settings_anonymity_button":
self.window().settings_stacked_widget.setCurrentIndex(PAGE_SETTINGS_ANONYMITY)
def save_settings(self):
# Create a dictionary with all available settings
settings_data = {'general': {}, 'Tribler': {}, 'downloadconfig': {}, 'libtorrent': {}, 'watch_folder': {},
'tunnel_community': {}, 'multichain': {}}
settings_data['general']['family_filter'] = self.window().family_filter_checkbox.isChecked()
settings_data['downloadconfig']['saveas'] = self.window().download_location_input.text()
settings_data['watch_folder']['enabled'] = self.window().watchfolder_enabled_checkbox.isChecked()
if settings_data['watch_folder']['enabled']:
settings_data['watch_folder']['watch_folder_dir'] = self.window().watchfolder_location_input.text()
settings_data['general']['minport'] = self.window().firewall_current_port_input.text()
settings_data['libtorrent']['lt_proxytype'] = self.window().lt_proxy_type_combobox.currentIndex()
if len(self.window().lt_proxy_server_input.text()) > 0 and len(self.window().lt_proxy_port_input.text()) > 0:
settings_data['libtorrent']['lt_proxyserver'] = [None, None]
settings_data['libtorrent']['lt_proxyserver'][0] = self.window().lt_proxy_server_input.text()
settings_data['libtorrent']['lt_proxyserver'][1] = self.window().lt_proxy_port_input.text()
if len(self.window().lt_proxy_username_input.text()) > 0 and \
len(self.window().lt_proxy_password_input.text()) > 0:
settings_data['libtorrent']['lt_proxyauth'] = [None, None]
settings_data['libtorrent']['lt_proxyauth'][0] = self.window().lt_proxy_username_input.text()
settings_data['libtorrent']['lt_proxyauth'][1] = self.window().lt_proxy_password_input.text()
settings_data['libtorrent']['utp'] = self.window().lt_utp_checkbox.isChecked()
try:
max_conn_download = int(self.window().max_connections_download_input.text())
except ValueError:
ConfirmationDialog.show_error(self.window(), "Invalid number of connections",
"You've entered an invalid format for the maximum number of connections.")
return
if max_conn_download == 0:
max_conn_download = -1
settings_data['libtorrent']['max_connections_download'] = max_conn_download
if self.window().upload_rate_limit_input.text():
settings_data['Tribler']['maxuploadrate'] = self.window().upload_rate_limit_input.text()
if self.window().download_rate_limit_input.text():
settings_data['Tribler']['maxdownloadrate'] = self.window().download_rate_limit_input.text()
seeding_modes = ['forever', 'time', 'never', 'ratio']
selected_mode = 'forever'
for seeding_mode in seeding_modes:
if getattr(self.window(), "seeding_" + seeding_mode + "_radio").isChecked():
selected_mode = seeding_mode
break
settings_data['downloadconfig']['seeding_mode'] = selected_mode
settings_data['downloadconfig']['seeding_ratio'] = self.window().seeding_ratio_combobox.currentText()
try:
settings_data['downloadconfig']['seeding_time'] = string_to_minutes(self.window().seeding_time_input.text())
except ValueError:
ConfirmationDialog.show_error(self.window(), "Invalid seeding time",
"You've entered an invalid format for the seeding time (expected HH:MM)")
return
settings_data['tunnel_community']['exitnode_enabled'] = self.window().allow_exit_node_checkbox.isChecked()
settings_data['Tribler']['default_number_hops'] = self.window().number_hops_slider.value() + 1
settings_data['multichain']['enabled'] = self.window().multichain_enabled_checkbox.isChecked()
self.settings_request_mgr = TriblerRequestManager()
self.settings_request_mgr.perform_request("settings", self.on_settings_saved,
method='POST', data=json.dumps(settings_data))
def on_settings_saved(self, _):
# Now save the GUI settings
self.window().gui_settings.setValue("ask_download_settings",
self.window().always_ask_location_checkbox.isChecked())
self.window().gui_settings.setValue("default_anonymity_enabled",
self.window().download_settings_anon_checkbox.isChecked())
self.window().gui_settings.setValue("default_safeseeding_enabled",
self.window().download_settings_anon_seeding_checkbox.isChecked())
self.saved_dialog = ConfirmationDialog(TriblerRequestManager.window, "Settings saved",
"Your settings have been saved.", [('close', BUTTON_TYPE_NORMAL)])
self.saved_dialog.button_clicked.connect(self.on_dialog_cancel_clicked)
self.saved_dialog.show()
self.window().fetch_settings()
def on_dialog_cancel_clicked(self, _):
self.saved_dialog.setParent(None)
self.saved_dialog = None
| [
"mdmartijndevos@gmail.com"
] | mdmartijndevos@gmail.com |
bc0805cf7d2b4574e211b320b50f28b56d729b88 | 61ba840220ad32959fa7897e9b0dc487185cb54b | /Utils/TextProcessingUtils.py | c7cba2970467f68bace25d5589d3b8717a8beea6 | [
"MIT"
] | permissive | kostrzmar/SimpTextAlignPython | 32280b28b65793a3b3a9530d2cf5c1c6820f0797 | 3e5cbefa2f4608a101e490b4ca7cc7a9850c773d | refs/heads/main | 2023-03-25T18:40:57.261563 | 2021-03-22T20:21:08 | 2021-03-22T20:21:08 | 335,767,737 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,008 | py | from Utils import DefinedConstants, VectorUtils
from textblob import TextBlob
from nltk.tokenize import RegexpTokenizer
import re
from Representations import Text2abstractRepresentation
def getCleanEmbeddingModelTokens( text):
#tokenizer = RegexpTokenizer(r"\w+|\$[\d\.]+|\S+|'")
#blob_object = TextBlob(text, tokenizer = tokenizer)
#corpus_words = blob_object.tokens
corpus_words = re.split(r' |_|=|;|\.|\,|\"|\'|\:|;|\*|%|\=|\!|\?|`|\-|&|\\\\|/',text)
cleanTokens = []
for token in corpus_words:
if isValidTokenForEmbeddingModel(token.lower()):
cleanTokens.append(token.lower())
return cleanTokens
def getCleanText( text, alignmentStrategy, similarityStrategy, model, lineLevel):
subtexts = getSubtexts(text,alignmentStrategy, lineLevel)
cleanSubtexts_ = cleanSubtexts(subtexts, similarityStrategy, model)
if similarityStrategy == DefinedConstants.WAVGstrategy:
VectorUtils.calculateWAVGs(cleanSubtexts_,model.em)
if alignmentStrategy == DefinedConstants.ParagraphSepEmptyLineAndSentenceLevel:
getCleanSublevelText(cleanSubtexts_, DefinedConstants.SentenceLevel, similarityStrategy, model, lineLevel)
return cleanSubtexts_
def getCleanSublevelText(cleanSubtexts, alignmentStrategy,
similarityStrategy, model, lineLevel):
for cleanText in cleanSubtexts:
cleanText.setSubLevelRepresentations(getCleanText(cleanText.getText(), alignmentStrategy, similarityStrategy, model, lineLevel))
def getSubtexts( text, alignemntStrategy, lineLevel):
subtexts = []
if alignemntStrategy == DefinedConstants.ParagraphSepEmptyLineLevel or alignemntStrategy == DefinedConstants.ParagraphSepEmptyLineAndSentenceLevel:
ar = re.split(r'\n\n', text)
for subtext in ar:
if len(subtext.strip())>0:
subtexts.append(subtext)
elif lineLevel == DefinedConstants.LineLevel:
ar = re.split(r'\n', text)
for subtext in ar:
if len(subtext.strip())>0:
subtexts.append(subtext)
elif alignemntStrategy == DefinedConstants.SentenceLevel:
blob_object = TextBlob(text)
sentences = blob_object.sentences
#sentences = re.split(r'\r?\n', text)
for s in sentences:
if len(s.strip())>0:
subtexts.append(s)
else:
print("Error: alignment level not recognized.")
exit(1)
return subtexts
def cleanSubtexts(subtexts, similarityStrategy, model):
cleanSubtexts = []
for subtext in subtexts:
if similarityStrategy == DefinedConstants.WAVGstrategy or similarityStrategy == DefinedConstants.CWASAstrategy:
cleanSubtexts.append(cleanSubtextForEmbeddingModel(subtext, model.em))
elif similarityStrategy == DefinedConstants.CNGstrategy:
cleanSubtexts.append(cleanSubtextForCNGmodel(subtext, model.nm))
return cleanSubtexts
def cleanSubtextForCNGmodel( subtext, nm):
cleanTokenIndices = nm.getCharNgramTFIDFmap(subtext)
return Text2abstractRepresentation(subtext, None, cleanTokenIndices)
def cleanSubtextForEmbeddingModel( subtext, em):
#StringTokenizer tokenizer = new StringTokenizer(subtext, " _&%=;.,-!?¡¿:;*/\\\"`''");
#tokenizer = RegexpTokenizer(r'\w+|\$[\d\.]+|\S+')
#blob_object = TextBlob(subtext, tokenizer = tokenizer)
#corpus_words = blob_object.tokens
corpus_words = re.split(r' |_|=|;|\.|\,|\"|\'|\:|;|\*|%|\=|\!|\?|`|\-|&|\\\\|/',str(subtext))
cleanTokenIndices = []
for token in corpus_words:
index = em.getIndex(token.lower())
if isValidTokenForEmbeddingModel(token.lower()) and index is not None:
cleanTokenIndices.append(index)
return Text2abstractRepresentation(subtext, cleanTokenIndices, None)
def isValidTokenForEmbeddingModel( token):
return len(token) > 1 and hasNoNumbers(token)
def hasNoNumbers( token):
for char in token:
if not char.isalpha():
return False
return True
| [
"marek.kostrzewa@uzh.ch"
] | marek.kostrzewa@uzh.ch |
5ccb0de7903a5ad587a6809250f6a95518d6d850 | 2151544cd386b550b137c4c738a6e57af542f50a | /src/pipelinex/extras/datasets/pillow/images_dataset.py | e787712d4c62212e94e720b17a91161cc9b33575 | [
"Apache-2.0",
"BSD-3-Clause"
] | permissive | MarchRaBBiT/pipelinex | b82725b7307a58f28d7d663f5072b969a2c9591d | ea8def32a71752b667f9f3522acba3fd79102fe1 | refs/heads/master | 2023-01-05T22:41:58.241802 | 2020-11-08T13:18:19 | 2020-11-08T13:18:19 | 311,065,793 | 0 | 0 | NOASSERTION | 2020-11-08T13:18:20 | 2020-11-08T13:12:11 | null | UTF-8 | Python | false | false | 9,864 | py | import copy
from pathlib import Path
from typing import Any, Dict, Union
from PIL import Image
import logging
import numpy as np
from ..core import AbstractVersionedDataSet, DataSetError, Version
from ...ops.numpy_ops import to_channel_first_arr, to_channel_last_arr, ReverseChannel
log = logging.getLogger(__name__)
class ImagesLocalDataSet(AbstractVersionedDataSet):
""" Loads/saves a dict of numpy 3-D or 2-D arrays from/to a folder containing images.
Works like ``kedro.extras.datasets.pillow.ImageDataSet`` and
``kedro.io.PartitionedDataSet`` with conversion between numpy arrays and Pillow images.
"""
def __init__(
self,
path: str,
load_args: Dict[str, Any] = None,
save_args: Dict[str, Any] = {"suffix": ".jpg"},
channel_first=False,
reverse_color=False,
version: Version = None,
) -> None:
"""
Args:
path: The folder path containing images
load_args: Args fed to:
https://pillow.readthedocs.io/en/stable/reference/Image.html#PIL.Image.open
save_args:
Args including:
`suffix`: File suffix such as ".jpg"
upper: If provided, used as the upper pixel value corresponding to 0xFF (255)
for linear scaling to ensure the pixel value is between 0 and 255.
lower: If provided, used as the lower pixel value corresponding to 0x00 (0)
for linear scaling to ensure the pixel value is between 0 and 255.
`mode` fed to:
https://pillow.readthedocs.io/en/stable/reference/Image.html#PIL.Image.fromarray
Other args fed to:
https://pillow.readthedocs.io/en/stable/reference/Image.html#PIL.Image.Image.save
channel_first: If true, the first dimension of 3-D array is
treated as channel (color) as in PyTorch.
If false, the last dimenstion of the 3-D array is
treated as channel (color) as in TensorFlow, Pillow, and OpenCV.
reverse_color: If true, the order of channel (color) is reversed
(RGB to BGR when loading, BGR to RGB when saving).
Set true to use packages wuch as OpenCV which uses BGR order natively.
version: If specified, should be an instance of
``kedro.io.core.Version``. If its ``load`` attribute is
None, the latest version will be loaded. If its ``save``
attribute is None, save version will be autogenerated.
"""
super().__init__(
filepath=Path(path), version=version, exists_function=self._exists,
)
self._load_args = load_args
self._save_args = save_args
self._channel_first = channel_first
self._reverse_color = reverse_color
def _load(self) -> Any:
load_path = Path(self._get_load_path())
load_args = copy.deepcopy(self._load_args)
load_args = load_args or dict()
dict_structure = load_args.pop("dict_structure", True)
as_numpy = load_args.pop("as_numpy", True)
channel_first = self._channel_first
reverse_color = self._reverse_color
if load_path.is_dir():
images_dict = {}
for p in load_path.glob("*"):
img = load_image(
p,
load_args,
as_numpy=as_numpy,
channel_first=channel_first,
reverse_color=reverse_color,
)
images_dict[p.stem] = img
if dict_structure is None:
return list(images_dict.values())
if dict_structure == "sep_names":
return dict(
images=list(images_dict.values()), names=list(images_dict.keys())
)
return images_dict
else:
return load_image(
load_path,
load_args,
as_numpy=self.as_numpy,
channel_first=channel_first,
reverse_color=reverse_color,
)
def _save(self, data: Union[dict, list, np.ndarray, type(Image.Image)]) -> None:
save_path = Path(self._get_save_path())
save_path.parent.mkdir(parents=True, exist_ok=True)
p = save_path
save_args = copy.deepcopy(self._save_args)
save_args = save_args or dict()
suffix = save_args.pop("suffix", ".jpg")
mode = save_args.pop("mode", None)
upper = save_args.pop("upper", None)
lower = save_args.pop("lower", None)
to_scale = (upper is not None) or (lower is not None)
if isinstance(data, dict):
images = list(data.values())
names = list(data.keys())
if "names" in names and "images" in names:
images = data.get("images")
names = data.get("names")
else:
images = data
names = None
if hasattr(images, "save"):
if not to_scale:
img = images
img.save(p, **save_args)
return None
else:
images = np.asarray(images)
if isinstance(images, np.ndarray):
if self._channel_first:
images = to_channel_last_arr(images)
if self._reverse_color:
images = ReverseChannel(channel_first=self._channel_first)(images)
if images.ndim in {2, 3}:
img = images
img = scale(lower=lower, upper=upper)(img)
img = np.squeeze(img)
img = Image.fromarray(img, mode=mode)
img.save(p, **save_args)
return None
elif images.ndim in {4}:
images = scale(lower=lower, upper=upper)(images)
dataset = Np3DArrDataset(images)
else:
raise ValueError(
"Unsupported number of dimensions: {}".format(images.ndim)
)
elif hasattr(images, "__getitem__") and hasattr(images, "__len__"):
if not to_scale:
p.mkdir(parents=True, exist_ok=True)
for i, img in enumerate(images):
if isinstance(img, np.ndarray):
if self._channel_first:
img = to_channel_last_arr(img)
if self._reverse_color:
img = ReverseChannel(channel_first=self._channel_first)(img)
img = np.squeeze(img)
img = Image.fromarray(img)
name = names[i] if names else "{:05d}".format(i)
s = p / "{}{}".format(name, suffix)
img.save(s, **save_args)
return None
else:
dataset = Np3DArrDatasetFromList(
images, transform=scale(lower=lower, upper=upper)
)
else:
raise ValueError("Unsupported data type: {}".format(type(images)))
p.mkdir(parents=True, exist_ok=True)
for i in range(len(dataset)):
img = dataset[i]
if isinstance(img, (tuple, list)):
img = img[0]
if self._channel_first:
img = to_channel_last_arr(img)
if self._reverse_color:
img = ReverseChannel(channel_first=self._channel_first)(img)
img = np.squeeze(img)
img = Image.fromarray(img, mode=mode)
name = names[i] if names else "{:05d}".format(i)
s = p / "{}{}".format(name, suffix)
img.save(s, **save_args)
return None
def _describe(self) -> Dict[str, Any]:
return dict(
filepath=self._filepath,
load_args=self._save_args,
save_args=self._save_args,
channel_first=self._channel_first,
reverse_color=self._reverse_color,
version=self._version,
)
def _exists(self) -> bool:
try:
path = self._get_load_path()
except DataSetError:
return False
return Path(path).exists()
def load_image(
load_path, load_args, as_numpy=False, channel_first=False, reverse_color=False
):
with load_path.open("rb") as local_file:
img = Image.open(local_file, **load_args)
if as_numpy:
img = np.asarray(img)
if channel_first:
img = to_channel_first_arr(img)
if reverse_color:
img = ReverseChannel(channel_first=channel_first)(img)
return img
def scale(**kwargs):
def _scale(a):
lower = kwargs.get("lower")
upper = kwargs.get("upper")
if (lower is not None) or (upper is not None):
max_val = a.max()
min_val = a.min()
stat_dict = dict(max_val=max_val, min_val=min_val)
log.info(stat_dict)
upper = upper or max_val
lower = lower or min_val
a = (
((a - min_val) / (max_val - min_val)) * (upper - lower) + lower
).astype(np.uint8)
return a
return _scale
class Np3DArrDataset:
def __init__(self, a):
self.a = a
def __getitem__(self, index):
return self.a[index, ...]
def __len__(self):
return len(self.a)
class Np3DArrDatasetFromList:
def __init__(self, a, transform=None):
self.a = a
self.transform = transform
def __getitem__(self, index):
item = np.asarray(self.a[index])
if self.transform:
item = self.transform(item)
return item
def __len__(self):
return len(self.a)
| [
"yusuke.minami86@gmail.com"
] | yusuke.minami86@gmail.com |
fa6cdeba91942d807fde795c7edcc0bdb379290c | 6b9c66787fc27ad7dc9165ebad7928e861786e32 | /migrations/versions/3657b8de770_.py | 8b5d7100b0946eb618af36d4d0fc308c736cc675 | [] | no_license | sany-soft/microblog | 768a4370d77725eec20a0d2bf85c4c2d8018c792 | 584f6191ab2b16ee93409616566a5ac9e250ccdb | refs/heads/master | 2021-12-16T02:10:31.128045 | 2017-09-12T11:11:50 | 2017-09-12T11:11:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 504 | py | """empty message
Revision ID: 3657b8de770
Revises: 2d9cf26fbfb0
Create Date: 2016-10-23 18:12:08.882000
"""
# revision identifiers, used by Alembic.
revision = '3657b8de770'
down_revision = '2d9cf26fbfb0'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
pass
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
pass
### end Alembic commands ###
| [
"2755289083@qq.comm"
] | 2755289083@qq.comm |
dc86b8c0440e4f976d81efdad9b406a392cb509d | 43f426e52180e543aa7b32996f0bc26295e11f9e | /06_src/fr/tagc/uorf/core/util/log/__init__.py | a947854e31a9c20a4f018e417b950b21df083fb2 | [] | no_license | TAGC-NetworkBiology/MetamORF | c96104845382544ea48c573211dbf3a35982f894 | d623ede53ececd804a6a105676b6d6bcc2941ee1 | refs/heads/master | 2023-02-03T01:09:48.191736 | 2020-12-20T16:01:43 | 2020-12-20T16:01:43 | 281,438,863 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 91 | py | # -*- coding: utf-8 -*-
from GeneRefLogger import GeneRefLogger
from Logger import Logger
| [
"sebastien.choteau@univ-amu.fr"
] | sebastien.choteau@univ-amu.fr |
3fb62c0b81aac929d34566a62f6ff4a0e5b40388 | 3aa4e976973ffca2501c7608e77fbc98ab5815b8 | /chat/migrations/0003_auto_20171123_2230.py | 01ad2c8c60fe4dfb3b56d2617efc74e54f395f89 | [] | no_license | xdnian/Tutoria | 96c72b326e8056941e5df3f15226acbcf67d3154 | d6460486f0a61c6668ba5494bbe43d9ead41995e | refs/heads/master | 2021-05-05T05:41:51.844808 | 2017-11-26T14:25:58 | 2017-11-26T14:25:58 | 105,421,612 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 422 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-11-23 14:30
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('chat', '0002_auto_20171123_1736'),
]
operations = [
migrations.RenameField(
model_name='chat',
old_name='created',
new_name='time',
),
]
| [
"junjiew2@illinois.edu"
] | junjiew2@illinois.edu |
2f6a912f37d34db640ba6cc5d6aa1f4875c6da07 | 6f6d846d140bc510d03c381dc11e4860c24246dc | /polls/migrations/0001_initial.py | 262edf092d97ea84be8161e61005c7c8cf1621d5 | [] | no_license | blooddark/djangoStudy | d3be67c00cdd28f09b52b5be8043df4ce2493088 | 762996d081b2f9591bcce4671fd3965ae2ee4d26 | refs/heads/master | 2020-04-16T18:23:09.340196 | 2019-01-15T08:58:11 | 2019-01-15T08:58:11 | 165,817,317 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,165 | py | # Generated by Django 2.1.5 on 2019-01-15 08:12
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Choice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('choice_text', models.CharField(max_length=200)),
('votes', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('question_text', models.CharField(max_length=200)),
('pub_date', models.DateTimeField(verbose_name='date published')),
],
),
migrations.AddField(
model_name='choice',
name='question',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='polls.Question'),
),
]
| [
"929609968@qq.com"
] | 929609968@qq.com |
110a37c782a24f234cb283d0b0e2b6a6e697b472 | 77d7958bf4a39414884df27b545b213d451ca6ec | /wordlist_generator.py | bab1584218e125334ccc8254760e3f3f0aa761b5 | [] | no_license | sashreek1/password_gen | 6056a6081e12008f8a85afbfc3f11ad6eba5ba49 | 986cfd35e51e465034ddc745b62996a987857831 | refs/heads/master | 2020-11-25T01:06:39.461845 | 2019-12-16T16:40:56 | 2019-12-16T16:40:56 | 228,422,966 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,478 | py | import re
basewords = []
name = basewords.append(input("Enter victim's first name : "))
last = basewords.append(input("Enter victim's last name : "))
dob= input("Enter victims birthday(dd/mm/yy) : ").split("/")
dob = ''.join(dob)
basewords.append(dob)
nick = basewords.append(input("enter victim's nick name : "))
pet = basewords.append(input("enter victim's pet's name : "))
partner = basewords.append(input("enter victim's partner's name : "))
leetmode = input('would you like leet mode ?(y/n) : ')
def numberadd(basewords):
wordlist = []
for baseword in basewords:
wordlist.extend([baseword, baseword.title(), baseword.lower(), baseword.upper()])
n = ["123", "111", "222", "333", "444", "555", "666", "777", "888", "999"]
for count in range(10):
wordlist.append(baseword.upper() + str(count))
wordlist.append(baseword.lower() + str(count))
wordlist.append(baseword.title() + str(count))
wordlist.append(str(count) + baseword.upper())
wordlist.append(str(count) + baseword.lower())
wordlist.append(str(count) + baseword.title())
if count < 10:
wordlist.append(baseword.upper() + str(n[count]))
wordlist.append(baseword.lower() + str(n[count]))
wordlist.append(baseword.title() + str(n[count]))
wordlist.append(str(n[count]) + baseword.upper())
wordlist.append(str(n[count]) + baseword.lower())
wordlist.append(str(n[count]) + baseword.title())
return wordlist
def leet(baselist):
wordlist = []
for item in baselist:
zeros = re.sub("o", "0", item)
ones = re.sub("i", "1", item)
threes = re.sub("e", "3", item)
fours = re.sub("a", "4", item)
fives = re.sub("s", "5", item)
wordlist.extend((zeros, ones, threes, fours, fives))
mix1 = re.sub("i", "1", zeros)
mix2 = re.sub("e", "3", mix1)
mix3 = re.sub("a", "4", mix2)
finalmix = re.sub("s", "5", mix3)
wordlist.append(finalmix)
return wordlist
if leetmode=='y':
wordlist = numberadd(basewords)
wordlist1 = leet(wordlist)
finallist = wordlist
finallist = finallist+wordlist1
lst = (list(set(finallist)))
for i in lst:
print(i)
if leetmode == 'n':
wordlist = numberadd(basewords)
finallist = wordlist
lst = (list(set(finallist)))
for i in lst:
print(i) | [
"noreply@github.com"
] | noreply@github.com |
a92044f9571336192d5f4ac4d928d8f9a911e1d8 | eae7322d17ce7555e7e1a4ebeae121572f73645a | /setup.py | ab8f81520c35d8ebf209609646b576c49549e4e2 | [] | no_license | arthurherbout/sudoku | 63a31a3594843eef01add1c5674df8c10c2359a2 | 42f7d839782cfea6426c514ee8e5f19db2b3f8d6 | refs/heads/master | 2020-04-15T16:34:02.067432 | 2019-01-23T04:39:57 | 2019-01-23T04:39:57 | 164,841,303 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 114 | py | from setuptools import setup
setup(name='gym_sudoku',
version='0.0.1',
install_requires=['gym'])
| [
"arthurherbout@gmail.com"
] | arthurherbout@gmail.com |
079d84ead2086b20c5c3e43113d9274666995188 | 4fae3fc52fe205c3e5061053e65de3108d516b22 | /prepare_with_spark.py | 2f63416b8665471589b30ebba8d8799a582b21f8 | [] | no_license | gliaffirm/dnn | 79d2ff748bc11f08f849e22cd8988b6a1083d2be | 116577ce6feb10aa66c771350236a34479817cfb | refs/heads/master | 2022-08-01T20:06:47.359914 | 2020-05-27T22:04:53 | 2020-05-27T22:04:53 | 267,435,968 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,644 | py | import boto3
import pickle
import numpy as np
import torch.nn as nn
from collections import OrderedDict
from pyspark.ml.feature import VectorAssembler
from pyspark.sql.types import IntegerType, DoubleType, ArrayType
from pyspark.sql import functions as fn
class Configs(object):
TRAINING_BASE_PATH = 's3://affirm-risk-sherlock/ml/adhoc/xz/dnn_v0/training/'
TRAINING_DATA_PATH = TRAINING_BASE_PATH + 'epoch/{}/'
VALIDATION_DATA_PATH = 's3://affirm-risk-sherlock/ml/adhoc/xz/dnn_v0/validation/'
TEST_DATA_PATH = 's3://affirm-risk-sherlock/ml/adhoc/xz/dnn_v0/test/'
JOINED_PATH = 's3://affirm-risk-sherlock/ml/adhoc/xz/dnn_v0/top10000/joined/'
MERCHANT_IDX_PATH = 's3://affirm-risk-sherlock/ml/adhoc/xz/dnn_v0/merchant_idx/'
INDEX_COL = [
'idx',
]
NUMERIC_FEATURES = [
'start_count_vector_minmax',
'capture_count_vector_minmax',
'capture_rate_vector_minmax',
'mean_amount_requested_vector_minmax',
'max_amount_requested_vector_minmax',
'mean_downpayment_amount_vector_minmax',
'mean_loan_amount_vector_minmax',
'mean_term_vector_minmax',
'mean_annual_income_vector_minmax',
'mean_fico_vector_minmax',
'user_age_days_vector_minmax',
'account_age_days_vector_minmax',
'time_since_last_app_days_vector_minmax',
]
CATEGORICAL_FEATURES = [
'merchant_industry_last_idx',
'product_type_last_idx',
'email_domain_last_idx',
'email_domain_suffix_last_idx',
'billing_address_city_last_idx',
'billing_address_region_last_idx',
]
MERCHANT_SET_FEATURES = [
'merchant_set',
]
LABEL_COL = [
'label',
]
NUMERIC_VECTOR = ['numeric_features'] # contains all numeric features concatenated together
JOINED_COLUMNS = (
INDEX_COL
+ NUMERIC_FEATURES
+ CATEGORICAL_FEATURES
+ MERCHANT_SET_FEATURES
+ LABEL_COL
)
TRAIN_VALIDATE_TEST_SPLIT = [0.7, 0.15, 0.15]
MERCHANT_SET_TRUNC = 5 # take only up to first 7 elements of each merchant set for padding
MERCHANT_SET_SIZE = 10 # pads up to MERCHANT_SET_SIZE elements for the merchant set
BUCKET_NAME = 'affirm-risk-sherlock'
CONFIG_KEY = 'ml/adhoc/xz/dnn_v0/configs.pkl'
TRAINING_PREFIX = 'ml/adhoc/xz/dnn_v0/training/epoch/{}/'
VALIDATION_PREFIX = 'ml/adhoc/xz/dnn_v0/validation/'
TEST_PREFIX = 'ml/adhoc/xz/dnn_v0/test/'
TRAINING_PARTITIONS = 6
VALIDATION_PARTITIONS = 2
TEST_PARTITIONS = 2
EMBEDDING_SIZES = {
'merchant_set': 256,
'merchant_industry_last_idx': 50,
'product_type_last_idx': 10,
'email_domain_last_idx': 50,
'email_domain_suffix_last_idx': 10,
'billing_address_city_last_idx': 128,
'billing_address_region_last_idx': 128,
}
EMBEDDINGBAG_MODE = 'sum'
NET_PARAMS = {
"learning_rate": 1e-3,
"batch_size": 4,
"num_workers": 4,
"num_epochs": 3,
"save_summary_steps": 100,
}
def _init__(self):
pass
class FeatureConfig(object):
def __init__(self,
merchant_count,
training_df,
numeric_features,
categorical_features,
merchant_set_features,
embedding_sizes,
):
self.merchant_count = merchant_count
self.training_df = training_df
self.numeric_features = numeric_features
self.categorical_features = categorical_features
self.merchant_set_features = merchant_set_features
self.embedding_sizes = embedding_sizes
def get_config(self): # returns a dictionary of tuples specifying size and embedding size if applicable
numeric_features_size = len(self.numeric_features)
categorical_token_counts = (
self.training_df
.agg(*[fn.countDistinct(c).alias(c) for c in self.categorical_features])
.collect()[0]
)
categorical_feature_vocabulary_sizes = OrderedDict(
(c, (categorical_token_counts[c] + 1, self.embedding_sizes[c])) # bucket at the end is for unknown
for c in self.categorical_features
)
merchant_vocabulary_size = (
self.merchant_count + 1, self.embedding_sizes['merchant_set']) # bucket at the end is for unknown
config = {
'numeric_features_size': (numeric_features_size, None),
'categorical_feature_vocabulary_sizes': categorical_feature_vocabulary_sizes,
'merchant_vocabulary_sizes': OrderedDict([('merchant_set', merchant_vocabulary_size)]),
}
return config
class LayerConfig(object):
def __init__(self, input_size, n_output_classes):
self.input_size = input_size
self.n_output_classes = n_output_classes
def get_config(self):
_out_sizes = [128, 64, ]
config = OrderedDict([
('fc0', {'layer_type': nn.Linear,
'args': {'in_features': self.input_size, 'out_features': _out_sizes[0], 'bias': True}}),
('activation0', {'layer_type': nn.ReLU, 'args': {}}),
('dropout0', {'layer_type': nn.Dropout, 'args': {'p': 0.5}}),
('fc1', {'layer_type': nn.Linear,
'args': {'in_features': _out_sizes[0], 'out_features': self.n_output_classes, 'bias': True}}),
('logsoftmax', {'layer_type': nn.LogSoftmax
, 'args': {'dim': 1}}),
])
return config, self.input_size, self.n_output_classes
configs = Configs()
def load_dfs(spark,
joined_path,
merchant_idx_path,
joined_columns,
index_column,
label_column,
numeric_features,
categorical_features,
merchant_set_features,
train_validate_test_split):
joined_df = spark.read.parquet(joined_path).select(joined_columns)
feature_assembler = VectorAssembler(
inputCols=numeric_features,
outputCol='numeric_features_vector'
)
joined_df = feature_assembler.transform(joined_df)
to_array_double = fn.udf(lambda x: x.toArray().tolist(), ArrayType(DoubleType()))
joined_df = joined_df.withColumn('numeric_features', to_array_double('numeric_features_vector'))
joined_df = joined_df.drop('numeric_features_vector')
training_df, validation_df, test_df = joined_df.select(
*(index_column
+ categorical_features
+ merchant_set_features
+ label_column
+ ['numeric_features'])
).randomSplit(train_validate_test_split, 23)
merchant_count = spark.read.parquet(merchant_idx_path).count()
return {
'training_df': training_df,
'validation_df': validation_df,
'test_df': test_df,
'merchant_count': merchant_count
}
# helper function for creating minibatches
def _repartition_and_shuffle(df, rand_multiplier, num_partitions, rand_seed):
df = df.withColumn('temp_col', fn.round(
rand_multiplier * fn.rand(seed=rand_seed)
).cast(IntegerType()))
cols = df.columns
df = (
df
.rdd
.map(lambda r: (r.temp_col, r))
.repartitionAndSortWithinPartitions(
numPartitions=num_partitions,
partitionFunc=lambda x: round(x) % num_partitions,
ascending=True,
keyfunc=lambda x: x,
)
.map(lambda x: x[1])
).toDF(cols)
df = df.drop('temp_col')
return df
def create_batch_files(df, filepath, num_partitions, epoch, shuffle=True):
# deterministically shuffle using epoch number * 100
if shuffle:
df = _repartition_and_shuffle(df, 500000, num_partitions, epoch)
df.write.parquet(filepath, mode='overwrite')
return True
# calculate merchant weights for loss function. Using the same logic as sklearn.utils.class_weight for
# 'balance' type.
def merchant_weight(df, col_name, _type='balance'):
df = df.select(fn.explode(df[col_name]).alias(col_name))
merchant_counts = df.groupby(col_name).count()
total_num_merchants = df.count()
num_merchants = merchant_counts.count()
if _type == 'balance':
weights = {i[col_name]: float(total_num_merchants) / (num_merchants * i['count']) for i in
merchant_counts.collect()}
# normalize weights to prevent large weight values on long tail merchants. The large weights
# may cause the gradient exploding.
min_count = np.min(weights.values())
max_min_range = np.ptp(weights.values())
weights = {i: (j - min_count) / max_min_range for i, j in weights.items()}
return weights
# Load data
data = load_dfs(spark,
configs.JOINED_PATH,
configs.MERCHANT_IDX_PATH,
configs.JOINED_COLUMNS,
configs.INDEX_COL,
configs.LABEL_COL,
configs.NUMERIC_FEATURES,
configs.CATEGORICAL_FEATURES,
configs.MERCHANT_SET_FEATURES,
configs.TRAIN_VALIDATE_TEST_SPLIT)
# Add to configs
training_df = data['training_df']
validation_df = data['validation_df']
test_df = data['test_df']
merchant_count = data['merchant_count']
params = configs.NET_PARAMS
# calculate weights for merchants
weights = merchant_weight(training_df, configs.MERCHANT_SET_FEATURES[0])
params['weights'] = weights
# specify the train and val dataset sizes
params['training_size'] = training_df.count()
params['validation_size'] = validation_df.count()
params['test_size'] = test_df.count()
params['merchant_count'] = merchant_count
# Define configs
feature_conf = FeatureConfig(merchant_count=merchant_count,
training_df=training_df,
numeric_features=configs.NUMERIC_FEATURES,
categorical_features=configs.CATEGORICAL_FEATURES,
merchant_set_features=configs.MERCHANT_SET_FEATURES,
embedding_sizes=configs.EMBEDDING_SIZES).get_config()
input_size = (
feature_conf['numeric_features_size'][0]
+ sum([v[1] for _, v in feature_conf['categorical_feature_vocabulary_sizes'].items()])
+ sum([v[1] for _, v in feature_conf['merchant_vocabulary_sizes'].items()])
)
params['input_size'] = input_size
layer_conf, _, _ = LayerConfig(input_size=input_size, n_output_classes=merchant_count).get_config()
# Shuffle and create files
bucket = boto3.resource('s3').Bucket(configs.BUCKET_NAME)
training_file_list = []
for epoch in range(configs.NET_PARAMS['num_epochs']):
create_batch_files(training_df,
configs.TRAINING_DATA_PATH.format(epoch),
configs.TRAINING_PARTITIONS,
epoch,
shuffle=True)
keys = [obj.key for obj in bucket.objects.filter(Prefix=configs.TRAINING_PREFIX.format(epoch)) if
'parquet' in obj.key]
training_file_list.append(['{bucket}/{key}'.format(bucket=configs.BUCKET_NAME, key=k) for k in keys])
create_batch_files(validation_df,
configs.VALIDATION_DATA_PATH,
configs.VALIDATION_PARTITIONS,
0,
shuffle=False)
keys = [obj.key for obj in bucket.objects.filter(Prefix=configs.VALIDATION_PREFIX) if 'parquet' in obj.key]
validation_file_list = ['{bucket}/{key}'.format(bucket=configs.BUCKET_NAME, key=k) for k in keys]
create_batch_files(test_df,
configs.TEST_DATA_PATH,
configs.TEST_PARTITIONS,
0,
shuffle=False)
keys = [obj.key for obj in bucket.objects.filter(Prefix=configs.TEST_PREFIX) if 'parquet' in obj.key]
test_file_list = ['{bucket}/{key}'.format(bucket=configs.BUCKET_NAME, key=k) for k in keys]
params['training_file_list'] = training_file_list
params['validation_file_list'] = validation_file_list
params['test_file_list'] = test_file_list
pickle_byte_obj = pickle.dumps([feature_conf, layer_conf, params])
s3_resource = boto3.resource('s3')
s3_resource.Object(configs.BUCKET_NAME, configs.CONFIG_KEY).put(Body=pickle_byte_obj)
# Load it up this way for now: make this something that is better configurable later
s3 = boto3.resource('s3')
feature_conf, layer_conf, params = pickle.loads(s3.Bucket(configs.BUCKET_NAME)
.Object(configs.CONFIG_KEY)
.get()['Body'].read())
| [
"liguoxinl@163.com"
] | liguoxinl@163.com |
0b24bac438e1c5c418018f3da148286e94d3b963 | 9cb0ab429b498f3941914dd5773f1f89f2394177 | /kzt_exchangerates/__init__.py | 75e9aaf0fa6184af250389ad2068a5b9c658c281 | [
"MIT"
] | permissive | kaamapulyaaa/python-kzt-exchangerates | 9b8e96442ce85d0176797c76df8d00c903fde78e | 60eca52b776f889848d631be43c843bd9bd50d06 | refs/heads/master | 2023-03-15T12:21:55.511418 | 2020-04-02T12:51:23 | 2020-04-02T12:51:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 41 | py | from kzt_exchangerates.rates import Rates | [
"dastand.climber@gmail.com"
] | dastand.climber@gmail.com |
fe3ded0d29d47adaf42b42d3141b7dffded06bb4 | f82553b6ad97b819e06e95bd77db5d9c5a856bf0 | /online_tutor/urls.py | 55749822c6a73f7ee7d8385635530df2142c7e4c | [] | no_license | ramesh1553/online_tutor | e49a40003e5b531b9d85b14a15bf8b487e7d94a2 | 2852480d306f1eb238374bdbe9406b91cd224f8c | refs/heads/master | 2023-08-02T14:02:47.522371 | 2021-09-21T10:11:13 | 2021-09-21T10:11:13 | 408,718,321 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,576 | py | """online_tutor URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import path,include
from online_tutor import settings
from tutor_app import user_urls, admin_urls, student_urls, tutour_urls, parent_urls
from tutor_app.views import IndexView, RegisterView,RegisterView1,RegisterView2,LoginViews
urlpatterns = [
path('', IndexView.as_view()),
path('register', RegisterView.as_view()),
path('register1',RegisterView1.as_view()),
path('register2',RegisterView2.as_view()),
path('login', LoginViews.as_view()),
path('user/', user_urls.urls()),
path('admin/',admin_urls.urls()),
path('student/', student_urls.urls()),
path('tutor/', tutour_urls.urls()),
path('parent/',parent_urls.urls()),
]
if settings.DEBUG:
urlpatterns+=static(settings.STATIC_URL,document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"ramesh.v1553@gmail.com"
] | ramesh.v1553@gmail.com |
ea9572dc849baf1317f801f9c39db6170004c6b0 | efc8a145869cf4e97b72d5e8c1d74b07e672be53 | /Python/SWEA/D3/5948. 새샘이의 7-3-5 게임/5948_새샘이의7-3-5게임.py | a0b06311d3e7c40a4838e338d5309398f9f77eee | [] | no_license | jngmk/Training | f179f8c3d5e2f9b93ed28e0c9b42933bc142a7e7 | ec9e135d59300db2a42f7693137252a0a2cb7b24 | refs/heads/master | 2020-07-20T05:31:16.976630 | 2020-05-09T14:33:47 | 2020-05-09T14:33:47 | 196,009,955 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 421 | py | import sys
sys.stdin = open('input.txt', 'r')
T = int(input())
for i in range(T):
num_list = list(map(int, input().split()))
result_set = set()
for a in range(5):
for b in range(a+1, 6):
for c in range(b+1, 7):
num = num_list[a] + num_list[b] + num_list[c]
result_set.add(num)
result = sorted(result_set)[-5]
print('#{0} {1}'.format(i+1, result))
| [
"rlawjd7186@naver.com"
] | rlawjd7186@naver.com |
fe3d56f534757e85bb71da0c2dbc4bd05d60ac04 | eae1bcedfacc74ea9dc01f0ba4fff292e50fafe6 | /Module 13.2.py | a2067db00a151f9b96be6b322c09b75a8e0d88ce | [] | no_license | SztMar/Module13--SQLite | bead47425ebaf83f8eacf4e089df3db8d86a0cb4 | a048f18d39451ba77f821f03eb780fbe6965d349 | refs/heads/master | 2023-01-18T16:15:45.137187 | 2020-11-21T18:16:51 | 2020-11-21T18:16:51 | 314,874,045 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,466 | py | # `ex_01_conection_to_db.py`
import sqlite3
from sqlite3 import Error
def create_connection(db_file):
""" create a database connection to the SQLite database
specified by db_file
:param db_file: database file
:return: Connection object or None
"""
conn = None
try:
conn = sqlite3.connect(db_file)
return conn
except Error as e:
print(e)
return conn
def execute_sql(conn, sql):
""" Execute sql
:param conn: Connection object
:param sql: a SQL script
:return:
"""
try:
c = conn.cursor()
c.execute(sql)
except Error as e:
print(e)
def add_projekt(conn, projekt):
"""
Create a new projekt into the projects table
:param conn:
:param projekt:
:return: projekt id
"""
sql = '''INSERT INTO projects(nazwa, start_date, end_date)
VALUES(?,?,?)'''
cur = conn.cursor()
cur.execute(sql, projekt)
conn.commit()
return cur.lastrowid
if __name__ == "__main__":
create_projects_sql = """
-- projects table
CREATE TABLE IF NOT EXISTS projects (
id integer PRIMARY KEY,
nazwa text NOT NULL,
start_date text,
end_date text
);
"""
create_tasks_sql = """
-- zadanie table
CREATE TABLE IF NOT EXISTS tasks (
id integer PRIMARY KEY,
projekt_id integer NOT NULL,
nazwa VARCHAR(250) NOT NULL,
opis TEXT,
status VARCHAR(15) NOT NULL,
start_date text NOT NULL,
end_date text NOT NULL,
FOREIGN KEY (projekt_id) REFERENCES projects (id)
);
"""
add_project_1 = """
-- insert into table projects
INSERT INTO projects(id, nazwa, start_date, end_date)
VALUES (1,
"Zrób zadania",
"2020-05-08 00:00:00",
"2020-05-10 00:00:00"
);
"""
add_project_2 = """
-- insert into table projects
INSERT INTO projects(nazwa, start_date, end_date)
VALUES ( "Zrób zadania 2",
"2020-05-08 00:00:00",
"2020-05-10 00:00:00"
);
"""
db_file = "database.db"
conn = create_connection(db_file)
projekt = ("Powtórka z angielskiego", "2020-05-11 00:00:00", "2020-05-13 00:00:00")
pr_id = add_projekt(conn, projekt)
if conn is not None:
#execute_sql(conn, create_projects_sql)
#execute_sql(conn, add_project_1)
#execute_sql(conn, add_project_2)
#execute_sql(conn, create_tasks_sql)
pr_id
conn.close()
| [
"mareksztuczka1992@gmail.com"
] | mareksztuczka1992@gmail.com |
8b98c2424e83ef7085af2687a5062744cae8479e | 53e0a9a522b496ab1d1a909ef7ad88fe5a0b88c3 | /pyskel/plugins/python_serverless_lambda/hooks.py | f12a995122d820e7bb1f7ccd476bb3bf2006ce1b | [] | no_license | tyevans/pyskel | ed183bfd4ffd4c22529108d11147852a43c150a2 | 44be44296b7b81e0aafb3a44f3b61d16e2930fc4 | refs/heads/master | 2020-12-30T04:57:36.778616 | 2020-02-07T07:41:19 | 2020-02-07T07:41:19 | 238,868,079 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 722 | py | import jinja2
import pyskel
options = {
"name": "python_serverless_lambda",
"template_module": "pyskel.plugins.python_serverless_lambda",
"dependencies": [
# Other plugins that are required for this one to work.
"python_base",
],
"manifest": [
# Files to write into new projects
"package.json",
"serverless.yml",
],
}
@pyskel.hookimpl
def register_plugin_templates(app_templates):
template_module = options.get('template_module')
if template_module:
app_templates[options['name']] = jinja2.PackageLoader(template_module)
return app_templates
@pyskel.hookimpl
def register_project(projects_avail):
projects_avail.append(options)
| [
"tyevans@gmail.com"
] | tyevans@gmail.com |
f10cf0c87250140ac5312dc31eb6fb3097b38031 | 5983bf3f4cbd49e222a4448f6e738946c1012553 | /aicall/apps/info/migrations/0002_auto_20210322_1329.py | eb91fe911bbbd8d6a9721e302847df2eb2ef5cea | [] | no_license | RympeR/aciall | 7fa88eaf6799444aef84dba1ce9974de858ddfd4 | ca238234160d93a1058f725121e1f3fbe71be33c | refs/heads/master | 2023-04-04T14:45:39.390609 | 2021-04-05T10:02:00 | 2021-04-05T10:02:00 | 354,790,599 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 474 | py | # Generated by Django 3.1.7 on 2021-03-22 11:29
from django.db import migrations
import imagekit.models.fields
class Migration(migrations.Migration):
dependencies = [
('info', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='talkthemes',
name='image_svg',
field=imagekit.models.fields.ProcessedImageField(blank=True, null=True, upload_to='', verbose_name='ImageSVG'),
),
]
| [
"georg.rashkov@gmail.com"
] | georg.rashkov@gmail.com |
8c95d0e31b93f5a75ff25c6de415f7a290dbb8a2 | 354d480e67b995869125553485e0b43221431b8d | /chess/ai.py | d5739c4b925a4b79421e532cc16b5563579d3b9d | [] | no_license | oliverclark15/chessmasta | 7fa551a801cb5fc9ba286e6a24da54431fc140f3 | e3343fe40e90b51b3d2a0ef8de9e782fda9435a9 | refs/heads/master | 2020-12-27T08:28:21.559822 | 2020-02-18T21:35:40 | 2020-02-18T21:35:40 | 237,833,104 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,393 | py | from chess import *
class Agent:
def __init__(self,max_depth):
self.max_depth = max_depth
self.best_move = None
def minimax(self, current_depth, board, alpha=-1000, beta=1000):
# Recursive base check
if (current_depth == self.max_depth) or (board.gameover()):
return board.evaluate()
# White = MAX
# Black = MIN
max_turn = board.turn == "White"
best_score = float('-inf') if max_turn else float('inf')
best_move = None
possible_moves = board.get_all_moves(board.turn)
#board.print_board()
#print(f"Iterating over {len(possible_moves)} possible moves @ depth {current_depth}")
#board.print_board()
#print(possible_moves)
for possible_move in possible_moves:
next_board = board.get_next_board(possible_move) # board.perform needs implementation (return value needed)
child_score = self.minimax(current_depth + 1, next_board, alpha, beta)
if max_turn and best_score < child_score:
best_score = child_score
best_move = possible_move
alpha = max(alpha, best_score)
if beta <= alpha:
break
if (not max_turn) and best_score > child_score:
best_score = child_score
best_move = possible_move
beta = min(beta, best_score)
if beta <= alpha:
break
#print(f"Concluded iterating over {len(possible_moves)} possible moves")
#print(best_score)
print(best_move)
self.best_move = best_move
return best_score
a = Agent(4)
b = Board()
b.perform_move(Move(6,3,4,3))
b.perform_move(Move(1,5,3,5))
b.perform_move(Move(7,2,3,6))
b.perform_move(Move(1,7,2,7))
b.perform_move(Move(3,6,4,5))
b.perform_move(Move(1,6,3,6))
b.perform_move(Move(4,5,5,6))
b.perform_move(Move(3,5,4,5))
b.perform_move(Move(6,4,5,4))
b.perform_move(Move(2,7,3,7))
b.perform_move(Move(7,3,3,7))
b.perform_move(Move(7,5,5,3))
b.perform_move(Move(5,3,2,6))
b.perform_move(Move(0,7,2,7))
b.perform_move(Move(7,3,3,7))
b.perform_move(Move(2,7,3,7))
#b.perform_move(Move(0,7,2,7))
print(b.is_in_checkmate("Black"))
b.print_board()
print(a.minimax(0,b))
print(a.best_move)
| [
"oliver.clark2@mail.mcgill.ca"
] | oliver.clark2@mail.mcgill.ca |
9fe2c0d104d0b3349e465be78200a2e358149718 | 175a4a051a66a09ba7be98ba7ffcae32ff644ea3 | /venv/bin/gunicorn | ca56742f10112a37b2f682b3737cc4774f31dbee | [] | no_license | DavidMantilla/flask | 8f49e33dde9cef6b7e311d85bd398b539469a293 | 80d4d278a6c1a81bba9d27817a256696803b950d | refs/heads/master | 2023-03-28T11:23:34.062526 | 2021-04-02T17:54:47 | 2021-04-02T17:54:47 | 353,855,373 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 242 | #!/home/david/python/flask/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from gunicorn.app.wsgiapp import run
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(run())
| [
"mantillardavid_21@hotmail.com"
] | mantillardavid_21@hotmail.com | |
dfe2d2b6601b7ca76c5e0c710158960065091f9b | 15e6f3401d640e0666c47b56b2082ee6baa310e5 | /model_ui.py | f0e62f035cf4e0cf56c096169508a9f86bf2fac0 | [] | no_license | Lving/RiskModeler | 4f0d4a1dbc5e0965db4a00924bc5e5f87f941ed8 | 6c4131f297c21fd45ff9d6511e5d58372e2433e2 | refs/heads/master | 2022-11-29T20:48:14.250633 | 2020-08-10T15:26:03 | 2020-08-10T15:26:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 109,966 | py | import tkinter as tk
from tkinter import ttk
from tkinter import *
import pandas as pd
from pandastable import Table
import pickle as pickle
from .func import binning
import math
from .model import lrmodel
lrmodel = lrmodel()
import datetime
binning = binning()
import statsmodels.api as sm
from sklearn.linear_model.logistic import LogisticRegression
from joblib import Parallel, delayed
import joblib
from .model_result_ui import scorecard_result_ui
import threading
from sklearn.metrics import roc_curve, auc
import os
import numpy as np
import random
from .var_clus import VarClus
class model():
def __init__(self, mianframe, project_info):
self.master = mianframe
# project参数
self.save = 'N'
self.node_type = 'SCR'
self.project_info = project_info
self.project_path = os.path.split(project_info[project_info['模块类型'] == 'project']['保存地址'][0])[0]
self.node_name = 'model'
self.exist_data = list(project_info['模块名字'])
self.load = 'N'
self.finsh = 'N'
self.n_job = joblib.cpu_count() - 2
# IGN参数
self.IGN_node_name = None
self.IGN_par_train_dataname = None
self.IGN_par_reject_dataname = None
self.IGN_par_oot_dataname = None
self.IGN_par_use_freezing_flag = None
self.IGN_par_import_groupdataname = None
self.IGN_par_num_s_bin = None
self.IGN_par_use_specialcode_flag = None
self.IGN_par_specialcode_data = None
self.IGN_par_sepcialcode_dataname = None
self.IGN_par_char_restric_flag = None
self.IGN_par_char_restric_num = None
self.IGN_par_char_restric_pct = None
self.IGN_par_tree_criterion = None
self.IGN_par_num_f_group = None
self.IGN_par_min_num_group = None
self.IGN_par_min_pct_group = None
self.IGN_par_variable_reject_flag = None
self.IGN_par_variable_reject_iv = None
self.IGN_IGNvariable_setting = None
self.IGN_groupingdata = pd.DataFrame()
self.IGN_f_group_report = pd.DataFrame()
self.IGN_s_group_report = pd.DataFrame()
self.IGN_not_use = []
self.IGN_grouped_train_data = pd.DataFrame()
self.IGN_grouped_valid_data = pd.DataFrame()
self.IGN_grouped_reject_data = pd.DataFrame()
self.IGN_grouped_oot_data = pd.DataFrame()
self.IGN_check_list = None
self.IGN_par_traindatavariable_setting = None
self.IGN_node_time = None
self.timeid_reject = None
self.timeid_oot = None
self.timeid_train = None
self.target_reject = None
self.target_oot = None
self.target_train = None
# 模型参数
self.par_use_freezing_flag = '否'
self.par_inditor_help = False
self.par_import_modelname = None
self.par_intercept_flag = True
self.par_p_value = 0.05
self.par_stay_p_value = 0.05
self.par_criterion = 'aic'
self.par_direction = 'stepwise'
self.par_variable_type = 'WOE'
self.par_odds_ratio = 20
self.par_odds_score_ratio = 600
self.par_odds_double_score = 20
self.par_intercept_scorecard = '是'
self.par_inditor_pct = 0.01
self.par_inditor_sample = 0.01
self.model_start_flag = 'N'
self.lasso_flag = '否'
# 分组过程参数
# 评分卡变量
self.predict_train_data = pd.DataFrame()
self.predict_vaild_data = pd.DataFrame()
self.predict_reject_data = pd.DataFrame()
self.predict_oot_data = pd.DataFrame()
self.model_ppp = []
self.scorecard_df = pd.DataFrame()
self.lasso_df = pd.DataFrame()
self.pre_data()
# 模块参数
def thread_it(self, func, *args):
'''将函数放入线程中执行'''
# 创建线程
t = threading.Thread(target=func, args=args)
# 守护线程
t.setDaemon(True)
# 启动线程
t.start()
def import_node(self, node_data, ac):
self.node_setting = node_data[0]
self.node_type = node_data[0]['node_type']
self.node_name = node_data[0]['node_name']
self.node_time = node_data[0]['time']
self.node_save_path = node_data[0]['node_save_path']
# self.par_use_freezing_flag=node_data[0]['ign_node']
self.par_use_freezing_flag = node_data[0]['par_use_freezing_flag']
self.par_inditor_help = node_data[0]['par_inditor_help']
self.par_import_modelname = node_data[0]['par_import_modelname']
self.par_intercept_flag = node_data[0]['par_intercept_flag']
self.par_p_value = node_data[0]['par_p_value']
self.par_stay_p_value = node_data[0]['par_stay_p_value']
self.par_criterion = node_data[0]['par_criterion']
self.par_direction = node_data[0]['par_direction']
self.par_variable_type = node_data[0]['par_variable_type']
self.par_odds_ratio = node_data[0]['par_odds_ratio']
self.par_odds_score_ratio = node_data[0]['par_odds_score_ratio']
self.par_odds_double_score = node_data[0]['par_odds_double_score']
self.par_intercept_scorecard = node_data[0]['par_intercept_scorecard']
# 分组过程参数
# 评分卡变量
self.predict_train_data = node_data[0]['predict_train_data']
self.predict_vaild_data = node_data[0]['predict_vaild_data']
self.predict_reject_data = node_data[0]['predict_reject_data']
self.predict_oot_data = node_data[0]['predict_oot_data']
self.model_ppp = node_data[0]['model']
self.f_scorecard = node_data[0]['scorecard_df']
self.lasso_df = node_data[0]['lasso_df']
self.lasso_flag = node_data[0]['lasso_flag']
self.var_clus = node_data[0]['var_clus']
self.target_train = node_data[0]['report_para']['train_target']
self.target_oot = node_data[0]['report_para']['oot_target']
self.target_reject = node_data[0]['report_para']['reject_target']
self.timeid_train = node_data[0]['report_para']['timeid_train']
self.timeid_oot = node_data[0]['report_para']['timeid_oot']
self.timeid_reject = node_data[0]['report_para']['timeid_reject']
self.IGN_f_group_report = node_data[0]['report_para']['f_group_report']
self.vari_list = node_data[0]['report_para']['vari_list']
self.IGN_node_name = node_data[0]['previous_node_name'][0]
self.IGN_node_time = node_data[0]['previous_node_time'][0]
self.IGN_groupingdata = node_data[0]['IGN_grouping_data']
self.load = 'Y'
if ac == 'setting':
path_list = self.project_info[self.project_info['创建时间'] == self.IGN_node_time]['保存地址']
error_list = []
if len(path_list) == 0:
error_list = error_list + [{'name': self.IGN_node_name, 'time': self.IGN_node_time}]
def continu(event):
for child in self.master.winfo_children():
child.destroy()
# 以前数据集更新了就重新更新结果
self.load = 'N'
self.Start_UI()
self.adjustsetting()
def back(event):
self.IGN_grouped_train_data = self.predict_train_data
self.IGN_grouped_valid_data = self.predict_vaild_data
self.IGN_grouped_reject_data = self.predict_reject_data
self.IGN_grouped_oot_data = self.predict_oot_data
for child in self.master.winfo_children():
child.destroy()
self.Start_UI()
self.adjustsetting()
if len(error_list) > 0:
self.master.title('提示')
L00 = Label(self.master, width=80, text="该模块引用的%s 模块 没有在项目中找到,\n可能该模块已经更新,删除,"
"或未导入\n您可以重新选择分组数据或查看旧模型信息" % (error_list))
L00.grid(column=0, row=0, columnspan=3, sticky=(W))
button_contin = ttk.Button(self.master, text='重新选择分组')
button_contin.grid(column=0, row=1, sticky=(W), padx=10, pady=10)
button_contin.bind("<Button-1>", continu)
button_back = ttk.Button(self.master, text='查看旧模型')
button_back.grid(column=2, row=1, sticky=(W), padx=10, pady=10)
button_back.bind("<Button-1>", back)
else:
try:
path = path_list[0]
fr = open(path, 'rb')
node_data = pickle.load(fr)
fr.close()
self.IGN_node_name = node_data[0]['node_name']
self.IGN_node_time = node_data[0]['time']
self.IGN_previous_usedlist = node_data[0]['use_node']
self.IGN_par_train_dataname = node_data[0]['previous_node_name'][0]
self.IGN_par_reject_dataname = node_data[0]['previous_node_name'][1]
self.IGN_par_oot_dataname = node_data[0]['previous_node_name'][2]
self.IGN_par_use_freezing_flag = node_data[0]['par_use_freezing_flag']
self.IGN_par_import_groupdataname = node_data[0]['par_import_groupdataname']
self.IGN_par_num_s_bin = node_data[0]['par_num_s_bin']
self.IGN_par_use_specialcode_flag = node_data[0]['par_use_specialcode_flag']
self.IGN_par_specialcode_data = node_data[0]['par_specialcode_data']
self.IGN_par_sepcialcode_dataname = node_data[0]['par_sepcialcode_dataname']
self.IGN_par_char_restric_flag = node_data[0]['par_char_restric_flag']
self.IGN_par_char_restric_num = node_data[0]['par_char_restric_num']
self.IGN_par_char_restric_pct = node_data[0]['par_char_restric_pct']
self.IGN_par_tree_criterion = node_data[0]['par_tree_criterion']
self.IGN_par_num_f_group = node_data[0]['par_num_f_group']
self.IGN_par_min_num_group = node_data[0]['par_min_num_group']
self.IGN_par_min_pct_group = node_data[0]['par_min_pct_group']
self.IGN_par_traindatavariable_setting = node_data[0]['data_variable_setting']
self.IGN_par_variable_reject_flag = node_data[0]['par_variable_reject_flag']
self.IGN_par_variable_reject_iv = node_data[0]['par_variable_reject_iv']
self.IGN_IGNvariable_setting = node_data[0]['IGNvariable_setting']
self.IGN_groupingdata = node_data[1][0]
self.IGN_f_group_report = node_data[1][1]
self.IGN_s_group_report = node_data[1][2]
self.IGN_not_use = node_data[1][3]
self.IGN_grouped_train_data = node_data[2]
self.IGN_grouped_valid_data = node_data[3]
self.IGN_grouped_reject_data = node_data[4]
self.IGN_grouped_oot_data = node_data[5]
# self.IGN_check_list = node_data[0]['check_change']
self.IGN_IGNvariable_setting['是否使用'] = self.IGN_IGNvariable_setting.apply(
lambda x: '不使用' if x['变量名称'] in self.IGN_not_use else x['是否使用'], axis=1)
except Exception as e:
self.IGN_grouped_train_data = self.predict_train_data
self.IGN_grouped_valid_data = self.predict_vaild_data
self.IGN_grouped_reject_data = self.predict_reject_data
self.IGN_grouped_oot_data = self.predict_oot_data
tk.messagebox.showwarning('错误', "%s数据集导入错误:%s" % (self.IGN_node_name, e))
if ac == 'setting' and len(error_list) == 0:
self.Start_UI()
self.adjustsetting()
# 'data_variable_setting': self.par_traindatavariable_setting,
# 'reject_data_variable_setting': self.par_rejectdatavariable_setting,
# 'oot_data_variable_setting': self.par_ootdatavariable_setting,
# node_data[0]['use_node': [self.node_name] + self.IGN_previous_usedlist
def load_data(self, event, datatype):
try:
if datatype == 'train':
if len(str(self.comboxlist_train_data.get())) < 1:
tk.messagebox.showwarning('错误', '请先创建交互式分组模块')
else:
path = self.project_info[self.project_info['模块名字'] == self.comboxlist_train_data.get()]['保存地址'][0]
fr = open(path, 'rb')
node_data = pickle.load(fr)
fr.close()
self.IGN_node_name = node_data[0]['node_name']
self.IGN_node_time = node_data[0]['time']
self.IGN_previous_usedlist = node_data[0]['use_node']
self.IGN_par_train_dataname = node_data[0]['previous_node_name'][0]
self.IGN_par_reject_dataname = node_data[0]['previous_node_name'][1]
self.IGN_par_oot_dataname = node_data[0]['previous_node_name'][2]
self.IGN_par_use_freezing_flag = node_data[0]['par_use_freezing_flag']
self.IGN_par_import_groupdataname = node_data[0]['par_import_groupdataname']
self.IGN_par_num_s_bin = node_data[0]['par_num_s_bin']
self.IGN_par_use_specialcode_flag = node_data[0]['par_use_specialcode_flag']
self.IGN_par_specialcode_data = node_data[0]['par_specialcode_data']
self.IGN_par_sepcialcode_dataname = node_data[0]['par_sepcialcode_dataname']
self.IGN_par_char_restric_flag = node_data[0]['par_char_restric_flag']
self.IGN_par_char_restric_num = node_data[0]['par_char_restric_num']
self.IGN_par_char_restric_pct = node_data[0]['par_char_restric_pct']
self.IGN_par_tree_criterion = node_data[0]['par_tree_criterion']
self.IGN_par_num_f_group = node_data[0]['par_num_f_group']
self.IGN_par_min_num_group = node_data[0]['par_min_num_group']
self.IGN_par_min_pct_group = node_data[0]['par_min_pct_group']
self.IGN_par_traindatavariable_setting = node_data[0]['data_variable_setting']
self.IGN_par_variable_reject_flag = node_data[0]['par_variable_reject_flag']
self.IGN_par_variable_reject_iv = node_data[0]['par_variable_reject_iv']
self.IGN_IGNvariable_setting = node_data[0]['IGNvariable_setting']
self.IGN_groupingdata = node_data[1][0]
self.IGN_f_group_report = node_data[1][1]
self.IGN_s_group_report = node_data[1][2]
self.IGN_not_use = node_data[1][3]
self.IGN_grouped_train_data = node_data[2]
self.IGN_grouped_valid_data = node_data[3]
self.IGN_grouped_reject_data = node_data[4]
self.IGN_grouped_oot_data = node_data[5]
# self.IGN_check_list = node_data[0]['check_change']
self.IGN_IGNvariable_setting['是否使用'] = self.IGN_IGNvariable_setting.apply(
lambda x: '不使用' if x['变量名称'] in self.IGN_not_use else x['是否使用'], axis=1)
except Exception as e:
tk.messagebox.showwarning('错误', "%s数据集导入错误:%s" % (datatype, e))
def pre_data(self):
try:
dd = list(self.project_info[(self.project_info['模块类型'] == 'IGN') &
(self.project_info['状态'] == 'Good')]['模块名字'])
self.IGN_list = dd
except Exception as e:
tk.messagebox.showwarning('错误', e)
def Start_UI(self):
self.start_window_base = self.master
width = self.master.winfo_screenwidth() * 0.2
height = self.master.winfo_screenheight() * 0.7
screenwidth = self.master.winfo_screenwidth()
screenheight = self.master.winfo_screenheight()
self.start_window_base.geometry(
'%dx%d+%d+%d' % (width, height, (screenwidth - width) / 2, (screenheight - height) / 2))
self.start_window_base.title('评分卡模型')
def adjustsetting(self):
# 导入数据
self.node_intro = LabelFrame(self.start_window_base, text='模块名称:')
L8 = Label(self.node_intro, width=25, text="模块名称:", justify="left")
L8.grid(column=0, row=0, sticky=(W))
if (self.load == 'N') & (self.finsh == 'N'):
node_name = tk.StringVar(value=self.node_name)
self.entry_node_name = Entry(self.node_intro, textvariable=node_name, bd=1, width=18)
self.entry_node_name.grid(column=1, row=0, sticky=(W))
else:
L88 = Label(self.node_intro, width=25, text="%s" % self.node_name, justify="left")
L88.grid(column=1, row=0, sticky=(W))
self.node_intro.grid(columnspan=3, sticky=(W), padx=10, pady=10)
self.start_window_data = LabelFrame(self.start_window_base, text='导入分组数据:')
L1 = Label(self.start_window_data, width=25, text="分组数据:", justify="left")
L1.grid(column=0, row=0, sticky=(W))
self.comboxlist_train_data = ttk.Combobox(self.start_window_data, width=15)
self.comboxlist_train_data["value"] = self.IGN_list
if self.IGN_grouped_train_data.empty != True:
for i in range(len(self.IGN_list)):
if self.IGN_list[i] == self.IGN_node_name:
self.comboxlist_train_data.current(i)
self.comboxlist_train_data.bind("<<ComboboxSelected>>", lambda event: self.load_data(event, datatype='train'))
self.comboxlist_train_data.grid(column=1, row=0, sticky=(W))
L3 = Label(self.start_window_data, width=25, text="自变量:", justify="left")
L3.grid(column=0, row=2, sticky=(W))
self.comboxlist_variable_type = ttk.Combobox(self.start_window_data, width=15)
self.comboxlist_variable_type["value"] = ['WOE', 'GRP', 'GRP_ind']
if self.par_variable_type == 'WOE':
self.comboxlist_variable_type.current(0)
else:
self.comboxlist_variable_type.current(1)
self.comboxlist_variable_type.grid(column=1, row=2, sticky=(W))
L4 = Label(self.start_window_data, width=25, text="变量设置:", justify="left")
L4.grid(column=0, row=3, sticky=(W))
self.button_data_variablesetting = ttk.Button(self.start_window_data, text='设置:')
self.button_data_variablesetting.grid(column=1, row=3, sticky=(W))
self.button_data_variablesetting.bind("<Button-1>", self.show_variabledetail)
L8 = Label(self.start_window_data, width=25, text="冻结入模变量:", justify="left")
L8.grid(column=0, row=5, sticky=(W))
self.comboxlist_freezing_code = ttk.Combobox(self.start_window_data, width=15)
self.comboxlist_freezing_code["value"] = ['是', '否']
if self.par_use_freezing_flag == '否':
self.comboxlist_freezing_code.current(1)
else:
self.comboxlist_freezing_code.current(0)
self.comboxlist_freezing_code.grid(column=1, row=5, sticky=(W))
self.start_window_data.grid(columnspan=3, sticky=(W), padx=10, pady=10)
# 模型参数
# self.start_window_model_setting = LabelFrame(self.start_window_base, text='模型参数:')
# L5 = Label(self.start_window_model_setting, width=20, text="导入模型数据:")
# L5.grid(column=0, row=6, sticky=(W))
#
# L55 = Label(self.start_window_model_setting, width=20, text=self.par_import_modelname)
# L55.grid(column=1, row=6, sticky=(W))
# self.button_data_grouping_data_import = ttk.Button(self.start_window_model_setting, text='导入:')
# self.button_data_grouping_data_import.grid(column=1, row=7, sticky=(W))
# # self.button_data_grouping_data_import.bind("<Button-1>", self.loading_grouping_data)
# self.start_window_model_setting.grid(columnspan=3, sticky=(W), padx=10, pady=10)
# 模型设置
self.start_window_LR_setting = LabelFrame(self.start_window_base, text='模型参数设置:')
L6 = Label(self.start_window_LR_setting, width=25, text="模型方法:", justify="left")
L6.grid(column=0, row=5, sticky=(W))
L7 = Label(self.start_window_LR_setting, width=25, text="逻辑回归", bd=1, justify="left")
L7.grid(column=1, row=5, sticky=(W))
L8 = Label(self.start_window_LR_setting, width=25, text="模型评价标准:", justify="left")
L8.grid(column=0, row=6, sticky=(W))
self.comboxlist_model_creterion = ttk.Combobox(self.start_window_LR_setting, width=15)
self.comboxlist_model_creterion["value"] = ['aic', 'bic', 'llr']
if self.par_criterion == 'aic':
self.comboxlist_model_creterion.current(0)
elif self.par_criterion == 'bic':
self.comboxlist_model_creterion.current(1)
else:
self.comboxlist_model_creterion.current(2)
self.comboxlist_model_creterion.grid(column=1, row=6, sticky=(W))
# self.entry_s_bin_num.bind('<Return>', lambda event: self.int_num_check(event, 'entry_s_bin_num', 'int'))
L9 = Label(self.start_window_LR_setting, width=25, text="模型方向:", justify="left")
L9.grid(column=0, row=7, sticky=(W))
self.comboxlist_model_direction = ttk.Combobox(self.start_window_LR_setting, width=15)
self.comboxlist_model_direction["value"] = ['NO', 'forward', 'stepwise']
if self.par_direction == 'NO':
self.comboxlist_model_direction.current(0)
elif self.par_direction == 'forward':
self.comboxlist_model_direction.current(1)
else:
self.comboxlist_model_direction.current(2)
self.comboxlist_model_direction.grid(column=1, row=7, sticky=(W))
L10 = Label(self.start_window_LR_setting, width=25, text="变量进入模型P值:", justify="left")
L10.grid(column=0, row=8, sticky=(W))
pvalue = tk.StringVar(value=self.par_p_value)
self.entry_pvalue = Entry(self.start_window_LR_setting, textvariable=pvalue, bd=1, width=18)
self.entry_pvalue.grid(column=1, row=8, sticky=(W))
L10_1 = Label(self.start_window_LR_setting, width=25, text="变量保留模型P值:", justify="left")
L10_1.grid(column=0, row=9, sticky=(W))
pvalue = tk.StringVar(value=self.par_stay_p_value)
self.entry_s_pvalue = Entry(self.start_window_LR_setting, textvariable=pvalue, bd=1, width=18)
self.entry_s_pvalue.grid(column=1, row=9, sticky=(W))
L11 = Label(self.start_window_LR_setting, width=25, text="模型训练使用截距:", justify="left")
L11.grid(column=0, row=10, sticky=(W))
self.comboxlist_model_intercept = ttk.Combobox(self.start_window_LR_setting, width=15)
self.comboxlist_model_intercept["value"] = ['是', '否']
if self.par_intercept_flag == True:
self.comboxlist_model_intercept.current(0)
else:
self.comboxlist_model_intercept.current(1)
self.comboxlist_model_intercept.grid(column=1, row=10, sticky=(W))
L12 = Label(self.start_window_LR_setting, width=25, text="模型训练是否使用辅助变量:", justify="left")
L12.grid(column=0, row=11, sticky=(W))
self.comboxlist_inditor_help = ttk.Combobox(self.start_window_LR_setting, width=15)
self.comboxlist_inditor_help["value"] = ['是', '否']
if self.par_inditor_help == True:
self.comboxlist_inditor_help.current(0)
else:
self.comboxlist_inditor_help.current(1)
self.comboxlist_inditor_help.grid(column=1, row=11, sticky=(W))
self.par_inditor_pct = 0.01
self.par_inditor_sample = 0.01
L10_12 = Label(self.start_window_LR_setting, width=20, text="辅助变量最小坏账率偏移:", justify="left")
L10_12.grid(column=0, row=12, sticky=(W))
par_inditor_pct = tk.StringVar(value=self.par_inditor_pct)
self.entry_par_inditor_pct = Entry(self.start_window_LR_setting, textvariable=par_inditor_pct, bd=1, width=18)
self.entry_par_inditor_pct.grid(column=1, row=12, sticky=(W))
L10_13 = Label(self.start_window_LR_setting, width=20, text="辅助变量最小样本占比:", justify="left")
L10_13.grid(column=0, row=13, sticky=(W))
par_inditor_sample = tk.StringVar(value=self.par_inditor_sample)
self.entry_par_inditor_sample = Entry(self.start_window_LR_setting, textvariable=par_inditor_sample, bd=1,
width=18)
self.entry_par_inditor_sample.grid(column=1, row=13, sticky=(W))
L13 = Label(self.start_window_LR_setting, width=20, text="模型训练并行数:", justify="left")
L13.grid(column=0, row=14, sticky=(W))
self.comboxlist_n_job = ttk.Combobox(self.start_window_LR_setting, width=15)
self.comboxlist_n_job["value"] = [x for x in range(1, max(self.n_job + 1, joblib.cpu_count() - 1))]
self.comboxlist_n_job.current(self.n_job - 1)
self.comboxlist_n_job.grid(column=1, row=14, sticky=(W))
L14 = Label(self.start_window_LR_setting, width=20, text="LASSO变量选择:", justify="left")
L14.grid(column=0, row=15, sticky=(W))
self.comboxlist_lasso_flag = ttk.Combobox(self.start_window_LR_setting, width=15)
self.comboxlist_lasso_flag["value"] = ['是', '否']
if self.lasso_flag == '否':
self.comboxlist_lasso_flag.current(1)
else:
self.comboxlist_lasso_flag.current(0)
self.comboxlist_lasso_flag.grid(column=1, row=15, sticky=(W))
self.start_window_LR_setting.grid(columnspan=3, sticky=(W), padx=10, pady=10)
# 评分卡设置
self.start_window_scorecard_setting = LabelFrame(self.start_window_base, text='评分卡设置:')
L15 = Label(self.start_window_scorecard_setting, width=20, text="是否显示截距", justify="left")
L15.grid(column=0, row=4, sticky=(W))
self.comboxlist_intercept_scorecard = ttk.Combobox(self.start_window_scorecard_setting, width=15)
self.comboxlist_intercept_scorecard["value"] = ['是', '否']
if self.par_intercept_scorecard == '是':
self.comboxlist_intercept_scorecard.current(0)
else:
self.comboxlist_intercept_scorecard.current(1)
self.comboxlist_intercept_scorecard.grid(column=1, row=4, sticky=(W))
odds_ratio = tk.StringVar(value=self.par_odds_ratio)
L13 = Label(self.start_window_scorecard_setting, width=20, text="优比:", justify="left")
L13.grid(column=0, row=5, sticky=(W))
self.entry_odds_ratio = Entry(self.start_window_scorecard_setting, textvariable=odds_ratio, bd=1, width=18)
self.entry_odds_ratio.grid(column=1, row=5, sticky=(W))
# self.entry_odds_ratio.bind('<Return>', lambda event: self.int_num_check(event, 'entry_min_num_char', 'gg'))
odds_score_ratio = tk.StringVar(value=self.par_odds_score_ratio)
L14 = Label(self.start_window_scorecard_setting, width=20, text="评分卡点数:", justify="left")
L14.grid(column=0, row=6, sticky=(W))
self.entry_odds_score = Entry(self.start_window_scorecard_setting, bd=1, textvariable=odds_score_ratio,
width=18)
self.entry_odds_score.grid(column=1, row=6, sticky=(W))
# self.entry_odds_score.bind('<Return>', lambda event: self.int_num_check(event, 'entry_min_pct_char', 'gg'))
self.start_window_scorecard_setting.grid(columnspan=3, sticky=(W), padx=10, pady=10)
L10 = Label(self.start_window_scorecard_setting, width=20, text="翻倍点数:", justify="left")
L10.grid(column=0, row=10, sticky=(W))
f_num_bin = tk.StringVar(value=self.par_odds_double_score)
self.entry_f_bin_num = Entry(self.start_window_scorecard_setting, textvariable=f_num_bin, width=18, bd=1)
self.entry_f_bin_num.grid(column=1, row=10, sticky=(W))
# self.entry_f_bin_num.bind('<Return>', lambda event: self.int_num_check(event, 'entry_f_bin_num', 'gg'))
self.button_setting_save = ttk.Button(self.start_window_base, text='(保存)退出')
self.button_setting_save.grid(column=0, row=7, sticky=(W), padx=10, pady=10)
self.button_setting_save.bind("<Button-1>", self.save_project)
if (self.load == 'Y') | (self.finsh == 'Y'):
self.check_result = ttk.Button(self.start_window_base, text='查看结果')
self.check_result.grid(column=1, row=7, sticky=(W), padx=10, pady=10)
self.check_result.bind("<Button-1>", self.scorecard_result_show_ui)
if (self.load == 'N') & (self.finsh == 'N'):
self.button_setting_run = ttk.Button(self.start_window_base, text='应用'
# ,command=lambda event :self.thread_it(self.LR() , event)
)
self.button_setting_run.grid(column=2, row=7, sticky=(W))
# self.button_setting_run.bind("<Button-1>", lambda event :self.thread_it(self.LR(event) ))
self.button_setting_run.bind("<Button-1>", self.LR)
else:
self.button_refresh_run = ttk.Button(self.start_window_base, text='刷新结果')
self.button_refresh_run.grid(column=2, row=7, sticky=(W))
self.button_refresh_run.bind("<Button-1>", self.LR)
self.button_modify_manually = ttk.Button(self.start_window_base, text='手动调整变量')
self.button_modify_manually.grid(column=0, row=8, sticky=(W), padx=10, pady=10)
self.button_modify_manually.bind("<Button-1>", self.modify_model)
self.button_output = ttk.Button(self.start_window_base, text='导出数据集')
self.button_output.grid(column=1, row=8, sticky=(W), padx=10, pady=10)
self.button_output.bind("<Button-1>", self.out_dataset)
def out_dataset(self, event):
try:
word = '导出数据集:\n 训练集:%s/%s_train.csv \n' % (self.project_path, self.node_name)
self.predict_train_data.to_csv(self.project_path + '/' + '%s_train.csv' % self.node_name, index=False,
encoding='utf-8')
if self.predict_vaild_data.empty == False:
word = word + '验证集:%s/%s_valid.csv \n' % (self.project_path, self.node_name)
self.predict_vaild_data.to_csv(self.project_path + '/' + '%s_valid.csv' % self.node_name, index=False,
encoding='utf-8')
if self.predict_reject_data.empty == False:
word = word + '验证集:%s/%s_reject.csv \n' % (self.project_path, self.node_name)
self.predict_reject_data.to_csv(self.project_path + '/' + '%s_reject.csv' % self.node_name, index=False,
encoding='utf-8')
if self.predict_oot_data.empty == False:
word = word + '验证集:%s/%s_oot.csv \n' % (self.project_path, self.node_name)
self.predict_oot_data.to_csv(self.project_path + '/' + '%s_oot.csv' % self.node_name, index=False,
encoding='utf-8')
tk.messagebox.showwarning('成功', word)
except Exception as e:
tk.messagebox.showwarning('错误', e)
def modify_model(self, event):
try:
if self.modify_model_ui.state() == 'normal':
tk.messagebox.showwarning('错误', "请先处理当前打开窗口")
except:
self.df_modify_model, self.record_list_modify, self.model_modify, self.model_variable_df_modify = self.add_delet_var(
record_list=self.model_ppp[0]
, input_model=self.model_ppp[1], model_variable_df=self.model_ppp[2], modify_var='f', flag='first',
var_list=self.vari_list, n_job=self.n_job,
predict_train_data=self.predict_train_data,
target_train=self.target_train, predict_vaild_data=self.predict_vaild_data,
par_intercept_flag=self.par_intercept_flag, par_variable_type=self.par_variable_type
)
self.modify_model_ui = Toplevel(self.start_window_base)
f = LabelFrame(self.modify_model_ui, text='调整模型')
screen_width = f.winfo_screenwidth() * 0.5
screen_height = f.winfo_screenheight() * 0.8
self.table_sum = self.pt = Table(f, dataframe=self.df_modify_model, height=screen_height,
width=screen_width)
self.pt.show()
self.table_sum.bind("<Button-3>", self.handle_left_click)
# self.table.bind("<Button-1>",self.handle_left_click)
self.table_sum.bind("<Button-2>", self.handle_left_click)
# self.table_sum.bind("<Double-Button-3>", self.handle_left_click)
# self.table_sum.bind("<Double-Button-1>", self.handle_left_click)
# self.table_sum.bind("<Double-Button-2>", self.handle_left_click)
self.table_sum.bind("<Triple-Button-3>", self.handle_left_click)
self.table_sum.bind("<Triple-Button-1>", self.handle_left_click)
self.table_sum.bind("<Triple-Button-2>", self.handle_left_click)
f.pack()
def handle_left_click(self, event):
rowclicked = self.pt.get_row_clicked(event)
self.modify_var = self.df_modify_model.iloc[rowclicked]['变量名称']
if self.modify_var in list(self.df_modify_model[self.df_modify_model['是否在模型中'] == 'Y']['变量名称']):
flag = tk.messagebox.askyesno('提示', '是否要把%s从模型中删除:' % self.modify_var)
self.modify_var_flag = 'del'
else:
flag = tk.messagebox.askyesno('提示', '是否要把%s添加到现有模型中:' % self.modify_var)
self.modify_var_flag = 'add'
if flag == True:
self.modify_model_calcu()
def modify_model_calcu(self):
self.df_modify_model, self.record_list_modify, self.model_modify, self.model_variable_df_modify = self.add_delet_var(
record_list=self.record_list_modify, input_model=self.model_modify,
model_variable_df=self.model_variable_df_modify, modify_var=self.modify_var, flag=self.modify_var_flag,
var_list=self.vari_list, n_job=self.n_job, predict_train_data=self.predict_train_data,
target_train=self.target_train, predict_vaild_data=self.predict_vaild_data,
par_intercept_flag=self.par_intercept_flag, par_variable_type=self.par_variable_type)
self.record_list_modify.append('手动%s %s' % (self.modify_var_flag, self.modify_var))
self.record_list_modify.append(self.model_modify.summary2())
self.modify_model_ui.destroy()
self.modify_model_ui = Toplevel(self.master)
def close(event):
self.modify_model_ui.destroy()
def model_save(event):
self.modify_model_ui.destroy()
error2_f = Toplevel(self.master)
screenwidth = self.master.winfo_screenwidth()
screenheight = self.master.winfo_screenheight()
error2_f.geometry('%dx%d+%d+%d' % (150, 100, (screenwidth - 150) / 2, (screenheight - 100) / 2))
L2 = Label(error2_f, text="保存中。。。")
L2.grid()
self.master.update()
self.model_ppp = [self.record_list_modify, self.model_modify, self.model_variable_df_modify, ]
self.f_scorecard = self.scorecard_data_pre(self.model_ppp)
self.node_setting['model'] = self.model_ppp
try:
error2_f.destroy()
except:
pass
d = LabelFrame(self.modify_model_ui)
self.check_result = ttk.Button(d, text='保存(关闭)')
self.check_result.pack(side=LEFT)
self.check_result.bind("<Button-1>", model_save)
self.check_result = ttk.Button(d, text='关闭')
self.check_result.pack(side=LEFT)
self.check_result.bind("<Button-1>", close)
d.pack()
f = LabelFrame(self.modify_model_ui, text='调整模型')
screen_width = f.winfo_screenwidth() * 0.5
screen_height = f.winfo_screenheight() * 0.8
self.table_sum = self.pt = Table(f, dataframe=self.df_modify_model, height=screen_height, width=screen_width)
self.pt.show()
self.table_sum.bind("<Button-3>", self.handle_left_click)
# self.table.bind("<Button-1>",self.handle_left_click)
self.table_sum.bind("<Button-2>", self.handle_left_click)
self.table_sum.bind("<Double-Button-3>", self.handle_left_click)
self.table_sum.bind("<Double-Button-1>", self.handle_left_click)
self.table_sum.bind("<Double-Button-2>", self.handle_left_click)
self.table_sum.bind("<Triple-Button-3>", self.handle_left_click)
self.table_sum.bind("<Triple-Button-1>", self.handle_left_click)
self.table_sum.bind("<Triple-Button-2>", self.handle_left_click)
f.pack()
def save_project(self, event):
try:
node_save_path = self.project_path + '/' + '%s.model' % self.node_name
error2 = Toplevel(self.master)
screenwidth = self.master.winfo_screenwidth()
screenheight = self.master.winfo_screenheight()
error2.geometry('%dx%d+%d+%d' % (150, 100, (screenwidth - 150) / 2, (screenheight - 100) / 2))
L2 = Label(error2, text="保存中")
L2.grid()
self.master.update()
filename = node_save_path
fw = open(filename, 'wb')
pickle.dump([self.node_setting, 'A'], fw, protocol=4)
fw.close()
self.save = 'Y'
try:
error2.destroy()
except:
pass
try:
self.master.destroy()
except:
pass
except Exception as e:
tk.messagebox.showwarning('错误', e)
def get_par(self):
self.par_use_freezing_flag = self.comboxlist_freezing_code.get()
self.par_import_modelname = None
self.par_intercept_flag = self.comboxlist_model_intercept.get() == '是'
self.par_p_value = float(self.entry_pvalue.get())
self.par_stay_p_value = float(self.entry_s_pvalue.get())
self.par_criterion = self.comboxlist_model_creterion.get()
self.par_direction = self.comboxlist_model_direction.get()
self.par_variable_type = self.comboxlist_variable_type.get()
self.par_odds_ratio = float(self.entry_odds_ratio.get())
self.par_odds_score_ratio = float(self.entry_odds_score.get())
self.par_odds_double_score = float(self.entry_f_bin_num.get())
self.par_inditor_help = self.comboxlist_inditor_help.get() == '是'
self.par_intercept_scorecard = self.comboxlist_intercept_scorecard.get()
self.par_inditor_pct = float(self.entry_par_inditor_pct.get())
self.par_inditor_sample = float(self.entry_par_inditor_sample.get())
self.lasso_flag = self.comboxlist_lasso_flag.get()
self.n_job = int(self.comboxlist_n_job.get())
if (self.finsh == 'N') & (self.load == 'N'):
self.node_name = self.entry_node_name.get()
def check_all_setting(self):
self.get_par()
mm = 0
if (self.node_name in self.exist_data) & (self.load == 'N'):
mm = mm + 1
tk.messagebox.showwarning('错误', "该名称已经被占用,请更改")
elif len(self.comboxlist_train_data.get()) < 1:
mm = mm + 1
tk.messagebox.showwarning('错误', "请选择分组数据")
elif len(self.node_name) < 1:
mm = mm + 1
tk.messagebox.showwarning('错误', "请输入模块名称")
else:
total = ['par_odds_ratio', 'par_odds_score_ratio', 'par_odds_double_score', 'par_p_value',
'par_stay_p_value', 'par_inditor_pct', 'par_inditor_sample']
for p in total:
entry_p = p
pp = self.int_num_check(entry_p)
mm = mm + pp
return mm
def int_num_check(self, entry_p):
a = 0
if entry_p == 'par_odds_ratio':
inputnum = self.entry_odds_ratio.get()
tip = '优比:'
elif entry_p == 'par_odds_score_ratio':
inputnum = self.entry_odds_score.get()
tip = '评分卡点数:'
elif entry_p == 'par_odds_double_score':
inputnum = self.entry_f_bin_num.get()
tip = '翻倍点数:'
elif entry_p == 'par_p_value':
inputnum = self.entry_pvalue.get()
tip = '变量进入模型P值:'
elif entry_p == 'par_stay_p_value':
inputnum = self.entry_s_pvalue.get()
tip = '变量保留模型P值:'
elif entry_p == 'par_inditor_pct':
inputnum = self.entry_par_inditor_pct.get()
tip = '辅助变量坏账率偏移值:'
elif entry_p == 'par_inditor_sample':
inputnum = self.entry_par_inditor_sample.get()
tip = '辅助变量样本占比:'
else:
pass
try:
num = float(inputnum)
except Exception as e:
a = a + 1
tk.messagebox.showwarning('错误', '%s:%s' % (tip, e))
return a
def modify_variable_role(self, event):
try:
self.comboxlist_modify_f_group.destroy()
except:
pass
self.rowclicked = self.ptm.get_row_clicked(event)
self.colclicked = self.ptm.get_col_clicked(event)
if list(self.IGN_IGNvariable_setting.columns)[self.colclicked] == '是否使用':
try:
self.comboxlist_modify_f_group = ttk.Combobox(self.data_variable_set_ui)
self.comboxlist_modify_f_group["value"] = ['使用', '不使用']
self.comboxlist_modify_f_group.place(x=event.x_root - self.data_variable_set_ui.winfo_rootx(),
y=event.y_root - self.data_variable_set_ui.winfo_rooty())
self.comboxlist_modify_f_group.bind("<<ComboboxSelected>>", self.variable_role_update)
except:
pass
else:
pass
def show_variabledetail(self, event):
self.data_variable_set_ui = Toplevel(self.master)
self.data_variable_set_ui.title('变量设置')
self.refresh_datavariable_df()
def refresh_datavariable_df(self):
f = Frame(self.data_variable_set_ui)
f.grid(column=0, row=1,
columnspan=6, sticky=(E, W))
screen_width = f.winfo_screenwidth() * 0.7
screen_height = f.winfo_screenheight() * 0.9
self.table = self.ptm = Table(f, dataframe=self.IGN_IGNvariable_setting, colspan=7,
height=screen_height, width=screen_width)
self.ptm.show()
self.table.bind("<Button-3>", self.modify_variable_role)
self.table.bind("<Button-2>", self.modify_variable_role)
self.table.bind("<Double-Button-3>", self.modify_variable_role)
self.table.bind("<Double-Button-1>", self.modify_variable_role)
self.table.bind("<Double-Button-2>", self.modify_variable_role)
self.table.bind("<Triple-Button-3>", self.modify_variable_role)
self.table.bind("<Triple-Button-1>", self.modify_variable_role)
self.table.bind("<Triple-Button-2>", self.modify_variable_role)
def variable_role_update(self, event):
variable = self.IGN_IGNvariable_setting.iloc[self.rowclicked]['变量名称']
if variable in self.IGN_not_use:
self.comboxlist_modify_f_group.destroy()
tk.messagebox.showwarning('错误', "%s 已经再前一个模块被禁用" % variable)
else:
value = self.comboxlist_modify_f_group.get()
self.IGN_IGNvariable_setting.iloc[self.rowclicked, self.colclicked] = value
self.comboxlist_modify_f_group.destroy()
self.refresh_datavariable_df()
def LR(self, event):
# 检查各个数据集变量情况
# try:
error_num = self.check_all_setting()
if (error_num == 0) & (self.model_start_flag == 'N'):
self.model_start_flag = 'Y'
self.target_train = \
list(self.IGN_par_traindatavariable_setting[self.IGN_par_traindatavariable_setting['变量角色'] == '目标'][
'变量名称'])[0]
if len(self.IGN_par_traindatavariable_setting[
self.IGN_par_traindatavariable_setting['变量角色'] == 'TimeID']) == 1:
self.flag_timeid_train = True
self.timeid_train = self.IGN_par_traindatavariable_setting.loc[
self.IGN_par_traindatavariable_setting['变量角色'] == 'TimeID'][
'变量名称'].values[0]
else:
self.flag_timeid_train = False
self.timeid_train = None
if self.IGN_grouped_reject_data.empty != True:
# 拒绝集变量
try:
self.target_reject = list(
self.IGN_IGNvariable_setting[self.IGN_IGNvariable_setting['变量角色_拒绝样本'] == '目标'][
'变量名称_拒绝样本'])[0]
except:
self.target_reject = None
self.varchar_reject = list(
self.IGN_IGNvariable_setting[(self.IGN_IGNvariable_setting['是否使用'] == '使用') &
(self.IGN_IGNvariable_setting[
'变量角色'] == '自变量') &
(self.IGN_IGNvariable_setting[
'变量类型'] == '字符型')][
'变量名称_拒绝样本'])
self.varnum_reject = list(
self.IGN_IGNvariable_setting[(self.IGN_IGNvariable_setting['是否使用'] == '使用') &
(self.IGN_IGNvariable_setting['变量角色'] == '自变量') &
(self.IGN_IGNvariable_setting['变量类型'] == '数值型')][
'变量名称_拒绝样本'])
if len(self.IGN_IGNvariable_setting[
self.IGN_IGNvariable_setting['变量角色_拒绝样本'] == 'TimeID']) == 1:
self.flag_timeid_reject = True
self.timeid_reject = self.IGN_IGNvariable_setting.loc[
self.IGN_IGNvariable_setting['变量角色_拒绝样本'] == 'TimeID']['变量名称_拒绝样本'].values[0]
else:
self.target_reject = None
self.flag_timeid_reject = False
self.timeid_reject = None
if self.IGN_grouped_oot_data.empty != True:
# oot变量
try:
self.target_oot = \
list(self.IGN_IGNvariable_setting[self.IGN_IGNvariable_setting['变量角色_时间外样本'] == '目标'][
'变量名称_时间外样本'])[0]
except:
self.target_oot = None
self.varchar_oot = list(
self.IGN_IGNvariable_setting[(self.IGN_IGNvariable_setting['是否使用'] == '使用') &
(self.IGN_IGNvariable_setting['变量角色'] == '自变量') &
(self.IGN_IGNvariable_setting['变量类型'] == '字符型')][
'变量名称_时间外样本'])
self.varnum_oot = list(self.IGN_IGNvariable_setting[(self.IGN_IGNvariable_setting['是否使用'] == '使用') &
(self.IGN_IGNvariable_setting[
'变量角色'] == '自变量') &
(self.IGN_IGNvariable_setting[
'变量类型'] == '数值型')][
'变量名称_时间外样本'])
if len(self.IGN_IGNvariable_setting[self.IGN_IGNvariable_setting['变量角色_时间外样本'] == 'TimeID']) == 1:
self.flag_timeid_oot = True
self.timeid_oot = \
self.IGN_IGNvariable_setting.loc[self.IGN_IGNvariable_setting['变量角色_时间外样本'] == 'TimeID'][
'变量名称_时间外样本'].values[0]
else:
self.target_oot = None
self.flag_timeid_oot = False
self.timeid_oot = None
# 训练集变量
self.varchar = list(self.IGN_IGNvariable_setting[(self.IGN_IGNvariable_setting['是否使用'] == '使用') &
(self.IGN_IGNvariable_setting['变量角色'] == '自变量') &
(self.IGN_IGNvariable_setting['变量类型'] == '字符型')][
'变量名称'])
self.varnum = list(self.IGN_IGNvariable_setting[(self.IGN_IGNvariable_setting['是否使用'] == '使用') &
(self.IGN_IGNvariable_setting['变量角色'] == '自变量') &
(self.IGN_IGNvariable_setting['变量类型'] == '数值型')][
'变量名称'])
self.vari_list = self.varchar + self.varnum
try:
if self.temp.state() == 'normal':
tk.messagebox.showwarning('错误', "请先处理当前打开窗口")
except:
error_num = self.check_all_setting()
if error_num == 0:
if (self.par_variable_type == 'WOE') and (
(self.par_use_freezing_flag == '否') | (self.model_ppp == [])):
self.model_ppp = lrmodel.woe_logistic_regression(mianframe=self.start_window_base,
inditor_pct=self.par_inditor_pct,
inditor_sample=self.par_inditor_sample,
var=self.varnum + self.varchar,
p_value_entry=self.par_p_value,
p_value_stay=self.par_stay_p_value,
add_inditor=self.par_inditor_help,
intercept=self.par_intercept_flag,
criterion=self.par_criterion,
df=self.IGN_grouped_train_data,
response=self.target_train,
direction=self.par_direction,
show_step=True, apply_restrict=True,
n_job=self.n_job)
elif (self.par_variable_type == 'GRP') and (
(self.par_use_freezing_flag == '否') | (self.model_ppp == [])):
self.model_ppp = lrmodel.grp_logistic_regression(mianframe=self.start_window_base,
var=self.vari_list,
p_value_entry=self.par_p_value,
p_value_stay=self.par_stay_p_value,
intercept=self.par_intercept_flag,
criterion=self.par_criterion,
df=self.IGN_grouped_train_data,
response=self.target_train,
direction=self.par_direction,
show_step=True, apply_restrict=True,
n_job=self.n_job)
elif (self.par_variable_type == 'GRP_ind') and (
(self.par_use_freezing_flag == '否') | (self.model_ppp == [])):
self.model_ppp = lrmodel.grp_ind_logistic_regression(mianframe=self.start_window_base,
var=self.vari_list,
p_value_entry=self.par_p_value,
p_value_stay=self.par_stay_p_value,
intercept=self.par_intercept_flag,
criterion=self.par_criterion,
df=self.IGN_grouped_train_data,
response=self.target_train,
direction=self.par_direction,
show_step=True, apply_restrict=True,
n_job=self.n_job)
else:
if self.par_variable_type == 'WOE':
woe_model_re = self.model_ppp[1]
cof = pd.DataFrame(woe_model_re.params).reset_index().rename(
{'index': 'woe_variable_name', 0: 'coff'}, axis=1)
variable_df = self.model_ppp[2]
woe_score = pd.merge(variable_df, cof, how='outer', left_on='model_var',
right_on='woe_variable_name')
woe_score['ori_var'][woe_score['woe_variable_name'] == 'const'] = 'const'
ori_var = list(
woe_score[(woe_score['var_type'] == 'ori') & (woe_score['coff'].isnull() == False)][
'ori_var'])
try:
ori_var.remove('const')
except:
pass
ori_var = ['woe_' + x for x in ori_var]
woe_var=ori_var
group_variable =[]
if len(woe_score[(woe_score['var_type'] == 'add') & (
woe_score['coff'].isnull() == False)]) > 0:
add_variabile_df = woe_score[
(woe_score['var_type'] == 'add') & (woe_score['coff'].isnull() == False)]
add_variabile_df['group_a'] = add_variabile_df['group_a'].astype('int')
add_variabile_df['group_b'] = add_variabile_df['group_b'].astype('int')
add_variabile_df['f_group_variable']=add_variabile_df.apply(lambda x: 'ind_f_group_%s_%s_f_group_%s_%s' % (x['vara'], int(x['group_a']), x['varb'], int(x['group_b'])),axis=1)
ori_var = ori_var + ['f_group_' + x for x in add_variabile_df['vara']] + [
'f_group_' + x for x in add_variabile_df['varb']]
group_variable=list(add_variabile_df['f_group_variable'])
if list(set(ori_var) - set(self.IGN_grouped_train_data.columns)) == []:
df = self.IGN_grouped_train_data.copy()
if len(woe_score[(woe_score['var_type'] == 'add') & (
woe_score['coff'].isnull() == False)]) > 0:
def add_indictor(vara, varb, groupa, df, groupb):
df['ind_f_group_%s_%s_f_group_%s_%s' % (
vara, int(groupa), varb, int(groupb))] = df.apply(
lambda x: 1 if (x['f_group_%s' % vara] == groupa) & (
x['f_group_%s' % varb] == groupb) else 0, axis=1)
df['ind_f_group_%s_%s_f_group_%s_%s' % (
vara, int(groupa), varb, int(groupb))] = df[
'ind_f_group_%s_%s_f_group_%s_%s' % (
vara, int(groupa), varb, int(groupb))].astype('int8')
add_variabile_df.apply(
lambda x: add_indictor(df=df, vara=x['vara'], varb=x['varb'],
groupa=x['group_a'], groupb=x['group_b']), axis=1)
model_ppp_select = lrmodel.woe_logistic_regression(mianframe=self.start_window_base,
inditor_pct=self.par_inditor_pct,
inditor_sample=self.par_inditor_sample,
var=list(set(woe_var+group_variable)),
p_value_entry=self.par_p_value,
p_value_stay=self.par_stay_p_value,
add_inditor=self.par_inditor_help,
intercept=self.par_intercept_flag,
criterion=self.par_criterion,
df=df,
response=self.target_train,
direction='NO',
show_step=True,
apply_restrict=False,
flag_IGN=False,
n_job=self.n_job)
modelvar_match_df = pd.DataFrame()
woe_varlist = ['woe_' + x for x in self.varnum + self.varchar]
modelvar_match_df['ori_var'] = self.varnum + self.varchar
modelvar_match_df['model_var'] = woe_varlist
modelvar_match_df['var_type'] = 'ori'
try:
add_df=self.model_ppp[2].copy()
modelvar_match_df = modelvar_match_df.append(add_df[add_df['var_type'] == 'add'])
except Exception as e:
pass
self.model_ppp = [model_ppp_select[0], model_ppp_select[1], modelvar_match_df]
else:
tk.messagebox.showwarning('错误', "训练集中没有如下变量%" % (
list(set(ori_var) - set(self.IGN_grouped_train_data.columns))))
else:
grp_ppp = self.model_ppp
grp_model = grp_ppp[1]
cof = pd.DataFrame(grp_model.params).reset_index().rename(
{'index': 'grp_variable_name', 0: 'coff'}, axis=1)
variable_df = grp_ppp[2]
grp_score = pd.merge(variable_df, cof, how='outer', left_on='model_var',
right_on='grp_variable_name')
grp_score['ori_var'][grp_score['grp_variable_name'] == 'const'] = 'const'
ori_var = list(
grp_score[(grp_score['var_type'] == 'ori') & (grp_score['coff'].isnull() == False)][
'ori_var'])
if list(set(ori_var) - set(self.IGN_grouped_train_data.columns)) == []:
if self.par_variable_type == 'GRP':
self.model_ppp = lrmodel.grp_logistic_regression(
mianframe=self.start_window_base,
var=list(set(ori_var)),
p_value_entry=self.par_p_value,
p_value_stay=self.par_stay_p_value,
intercept=self.par_intercept_flag,
criterion=self.par_criterion,
df=self.IGN_grouped_train_data,
response=self.target_train,
direction=self.par_direction,
show_step=True, apply_restrict=False,
n_job=1)
else:
self.model_ppp = lrmodel.grp_ind_logistic_regression(
mianframe=self.start_window_base,
var=list(set(ori_var)),
p_value_entry=self.par_p_value,
p_value_stay=self.par_stay_p_value,
intercept=self.par_intercept_flag,
criterion=self.par_criterion,
df=self.IGN_grouped_train_data,
response=self.target_train,
direction=self.par_direction,
show_step=True,
apply_restrict=False,
n_job=1)
else:
tk.messagebox.showwarning('错误', "训练集中没有如下变量%" % (
list(set(ori_var) - set(self.IGN_grouped_train_data.columns))))
self.f_scorecard = self.scorecard_data_pre(self.model_ppp)
if self.lasso_flag == '是':
self.lasso_df = self.func_lasso_df(variable_list=self.vari_list, train_target=self.target_train,
predict_train_data=self.predict_train_data,
predict_vaild_data=self.predict_vaild_data,
n_job=self.n_job)
else:
self.lasso_df = pd.DataFrame()
self.var_clus = self.func_var_clus(variable_list=self.vari_list,
predict_train_data=self.predict_train_data,
scorecarddf=self.f_scorecard)
node_save_path = self.project_path + '/' + '%s.model' % self.node_name
nowTime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')
self.node_setting = {'node_type': 'SCR',
'node_name': self.node_name,
'node_save_path': node_save_path,
# 'ign_node': self.par_use_freezing_flag,
'par_use_freezing_flag': self.par_use_freezing_flag,
'par_inditor_help': self.par_inditor_help,
'par_import_modelname': self.par_import_modelname,
'par_intercept_flag': self.par_intercept_flag,
'par_p_value': self.par_p_value,
'par_stay_p_value': self.par_stay_p_value,
'par_criterion': self.par_criterion,
'par_direction': self.par_direction,
'par_variable_type': self.par_variable_type,
'par_odds_ratio': self.par_odds_ratio,
'par_odds_score_ratio': self.par_odds_score_ratio,
'par_odds_double_score': self.par_odds_double_score,
'par_intercept_scorecard': self.par_intercept_scorecard,
# 分组过程参数
# 评分卡变量
'predict_train_data': self.predict_train_data,
'predict_vaild_data': self.predict_vaild_data,
'predict_reject_data': self.predict_reject_data,
'predict_oot_data': self.predict_oot_data,
'model': self.model_ppp,
'scorecard_df': self.f_scorecard,
'lasso_df': self.lasso_df,
'lasso_flag': self.lasso_flag,
'time': nowTime,
'previous_node_name': [self.IGN_node_name],
'var_clus': self.var_clus,
'previous_node_time': [self.IGN_node_time],
'IGN_grouping_data': self.IGN_groupingdata,
'report_para': {'train_target': self.target_train,
'oot_target': self.target_oot,
'reject_target': self.target_reject,
'timeid_train': self.timeid_train,
'timeid_oot': self.timeid_oot,
'timeid_reject': self.timeid_reject,
'f_group_report': self.IGN_f_group_report,
'vari_list': self.vari_list},
# 'data_variable_setting': self.par_traindatavariable_setting,
# 'reject_data_variable_setting': self.par_rejectdatavariable_setting,
# 'oot_data_variable_setting': self.par_ootdatavariable_setting,
'use_node': [self.node_name] + self.IGN_previous_usedlist
}
self.finsh = 'Y'
for child in self.master.winfo_children():
child.destroy()
self.adjustsetting()
self.model_start_flag = 'N'
# except Exception as e:
# tk.messagebox.showwarning('错误', e)
# self.model_start_flag = 'N'
def scorecard_result_show_ui(self, event):
try:
if self.result_page.state() == 'normal':
tk.messagebox.showwarning('错误', "请先处理当前打开窗口")
except:
self.result_page = Toplevel(self.master)
scorecard_result_ui(mainframe=self.result_page, project_path=self.project_path, node_name=self.node_name,
predict_train_data=self.predict_train_data, predict_vaild_data=self.predict_vaild_data,
predict_oot_data=self.predict_oot_data, predict_reject_data=self.predict_reject_data,
train_target=self.target_train, oot_target=self.target_oot,
reject_target=self.target_reject,
train_time_id=self.timeid_train, oot_time_id=self.timeid_oot,
reject_time_id=self.timeid_reject,
record_list=self.model_ppp[0], model=self.model_ppp[1], scorecarddf=self.f_scorecard,
f_group_report=self.IGN_f_group_report
, variable_list=self.vari_list, lasso_df=self.lasso_df,
model_var_type=self.par_variable_type, var_clus=self.var_clus)
def scorecard_data_pre(self, model_re):
def score_predict(scorecard, df):
if len(scorecard[scorecard['variable_name']=='const'])==1:
df['SCORE']=list(scorecard[scorecard['variable_name'] =='const']['scorecard'])[0]
else:
df['SCORE'] = 0
for var in list(scorecard['variable_name'].unique()):
if var != 'const':
df['SCR_%s' % var] = 0
for group in scorecard[scorecard['variable_name'] == var]['f_group']:
df['SCR_%s' % var][df['f_group_%s' % var] == group] = \
list(scorecard[(scorecard['variable_name'] == var) & (scorecard['f_group'] == group)]['scorecard'])[0]
df['SCORE'] = df['SCORE'] + df['SCR_%s' % var]
return df
if self.par_variable_type == 'WOE':
# woe评分卡
def woe_predict(model, intercept, df, woe_score):
if len(woe_score[(woe_score['var_type'] == 'add') & (woe_score['coff'].isnull() == False)]) > 0:
add_variabile_df = woe_score[
(woe_score['var_type'] == 'add') & (woe_score['coff'].isnull() == False)]
add_variabile_df['group_a'] = add_variabile_df['group_a'].astype('int')
add_variabile_df['group_b'] = add_variabile_df['group_b'].astype('int')
def add_indictor(vara, varb, groupa, df, groupb):
df['ind_f_group_%s_%s_f_group_%s_%s' % (vara, int(groupa), varb, int(groupb))] = df.apply(
lambda x: 1 if (x['f_group_%s' % vara] == groupa) & (
x['f_group_%s' % varb] == groupb) else 0, axis=1)
df['f_group_ind_f_group_%s_%s_f_group_%s_%s' % (vara, int(groupa), varb, int(groupb))] = df[
'ind_f_group_%s_%s_f_group_%s_%s' % (vara, int(groupa), varb, int(groupb))].astype('int8')
add_variabile_df.apply(
lambda x: add_indictor(df=df, vara=x['vara'], varb=x['varb'], groupa=x['group_a'],
groupb=x['group_b']), axis=1)
input_list = list(
pd.DataFrame(model.params).reset_index().rename({'index': 'woe_variable_name', 0: 'coff'}, axis=1)[
'woe_variable_name'])
try:
input_list.remove('const')
except:
pass
if intercept == True:
df['SCORECARD_LR_p_1'] = model.predict(sm.add_constant(df[input_list]))
else:
df['SCORECARD_LR_p_1'] = model.predict(df[input_list])
return df
woe_model_re = model_re[1]
cof = pd.DataFrame(woe_model_re.params).reset_index().rename({'index': 'woe_variable_name', 0: 'coff'},
axis=1)
group_report = self.IGN_f_group_report
total = group_report.groupby(['variable_name'])['f_N_obs'].sum().reset_index().rename(
{'f_N_obs': 'total_count'}, axis=1)
total_bad = group_report.groupby(['variable_name'])['f_N_bad'].sum().reset_index().rename(
{'f_N_bad': 'total_count_bad'}, axis=1)
total_num = total['total_count'].max()
total_bad = total_bad['total_count_bad'].max()
group_report = pd.merge(group_report, total, how='left', on='variable_name')
group_report['pct_f_N_obs'] = group_report['f_N_obs'] / group_report['total_count']
# cof=dd
variable_df = model_re[2]
woe_score = pd.merge(variable_df, cof, how='outer', left_on='model_var', right_on='woe_variable_name')
woe_score['ori_var'][woe_score['woe_variable_name'] == 'const'] = 'const'
use = woe_score.groupby('ori_var')['coff'].max().reset_index()
use = list(use[use['coff'].isnull() == False]['ori_var'])
woe_model_df = woe_score[woe_score['ori_var'].isin(use)].fillna(0)
woe_model_df = woe_model_df.rename({'group': 'f_group'}, axis=1)
woe_model_df['variable_name'] = woe_model_df['ori_var']
# 有辅助变量加入
scorecard = pd.merge(woe_model_df, group_report, how='left', on=['variable_name'])[
['variable_name', 'f_group', 'var_type', 'label', 'f_N_obs', 'f_Bad_rate', 'woe', 'pct_f_N_obs',
'coff']]
if len(woe_score[(woe_score['var_type'] == 'add') & (woe_score['coff'].isnull() == False)]) > 0:
add_variabile_df = woe_score[(woe_score['var_type'] == 'add') & (woe_score['coff'].isnull() == False)]
add_variabile_df_0 = add_variabile_df.copy()
add_variabile_df_0['label'] = add_variabile_df_0.apply(
lambda x: 'f_group_%s != %s or f_group_%s != %s ' % (
x['vara'], int(x['group_a']), x['varb'], int(x['group_b'])), axis=1)
add_variabile_df_0['f_group'] = 0
add_variabile_df_0['woe'] = 0
add_variabile_df_0['count'] = total_num - add_variabile_df_0['count']
add_variabile_df_0['badrate'] = (total_bad - add_variabile_df_0['count'] * add_variabile_df_0[
'badrate']) / add_variabile_df_0['count']
add_variabile_df_0['pct_f_N_obs'] = add_variabile_df_0['count'] / total_num
add_variabile_df['pct_f_N_obs'] = add_variabile_df['count'] / total_num
add_variabile_df['label'] = add_variabile_df.apply(lambda x: 'f_group_%s = %s and f_group_%s = %s ' % (
x['vara'], int(x['group_a']), x['varb'], int(x['group_b'])), axis=1)
add_variabile_df['f_group'] = 1
add_variabile_df['woe'] = 1
add_variabile_df = add_variabile_df.rename(
{'model_var': 'variable_name', 'badrate': 'f_Bad_rate', 'count': 'f_N_obs'}, axis=1)
add_variabile_df_0 = add_variabile_df_0.rename(
{'model_var': 'variable_name', 'badrate': 'f_Bad_rate', 'count': 'f_N_obs'}, axis=1)
add_scorecard = add_variabile_df.append(add_variabile_df_0)[
['variable_name', 'f_group', 'label', 'f_N_obs', 'var_type', 'f_Bad_rate', 'woe', 'pct_f_N_obs',
'coff']]
add_scorecard['f_Bad_rate'] = add_scorecard.apply(lambda x: "%.2f%%" % (x['f_Bad_rate'] * 100), axis=1)
scorecard = scorecard.append(add_scorecard)
B = self.par_odds_double_score / math.log(2)
A = self.par_odds_score_ratio - B * math.log(self.par_odds_ratio)
if self.par_intercept_flag == False:
add_df=pd.DataFrame([{'variable_name':'const','coff':0}])
scorecard=scorecard.append(add_df)
scorecard['SCORE'] = scorecard.apply(
lambda x: A - B * x['coff'] if x['variable_name'] == 'const' else -B * x['coff'] * x['woe'], axis=1)
score_adjust = scorecard.groupby('variable_name')['SCORE'].min().reset_index().rename(
{'SCORE': 'score_min'}, axis=1)
adjust_num = score_adjust[score_adjust['score_min'] < 0]['score_min'].sum()
score_adjust['score_min'][score_adjust['variable_name'] == 'const'] = -adjust_num
f_scorecard = pd.merge(scorecard, score_adjust, how='left', on='variable_name')
f_scorecard['scorecard'] = f_scorecard['SCORE'] - f_scorecard['score_min']
f_scorecard['scorecard'] = f_scorecard['scorecard'].apply(lambda x: int(x))
f_scorecard['coff'] = round(f_scorecard['coff'], 4)
f_scorecard = f_scorecard.fillna(0)
f_scorecard['f_N_obs'] = f_scorecard['f_N_obs'].astype('int')
f_scorecard['pct_f_N_obs'] = f_scorecard.apply(lambda x: "%.2f%%" % (x['pct_f_N_obs'] * 100), axis=1)
f_scorecard = f_scorecard.sort_values(by=['variable_name', 'f_group'])
f_scorecard = f_scorecard[f_scorecard['variable_name'] == 'const'].append(f_scorecard[f_scorecard['variable_name'] != 'const'])
# 给数据集打分
self.predict_train_data = woe_predict(model=woe_model_re, intercept=self.par_intercept_flag,
df=self.IGN_grouped_train_data, woe_score=woe_score)
self.predict_train_data =score_predict(f_scorecard, self.predict_train_data )
if self.IGN_grouped_valid_data.empty == False:
self.predict_vaild_data = woe_predict(model=woe_model_re, intercept=self.par_intercept_flag,
df=self.IGN_grouped_valid_data, woe_score=woe_score)
self.predict_vaild_data = score_predict(f_scorecard, self.predict_vaild_data)
else:
self.predict_vaild_data = pd.DataFrame()
if self.IGN_grouped_reject_data.empty == False:
woe_vari_list = list(
woe_score[(woe_score['coff'].isnull() == False) & (woe_score['var_type'] == 'ori')]['model_var'])
try:
add_variable_a = list(
woe_score[(woe_score['coff'].isnull() == False) & (woe_score['var_type'] == 'add')]['vara'])
add_variable_b = list(
woe_score[(woe_score['coff'].isnull() == False) & (woe_score['var_type'] == 'add')]['varb'])
add_variable_list = list(set(['f_group_' + x for x in add_variable_b + add_variable_a]))
except:
add_variable_list = []
datacol = list(self.IGN_grouped_reject_data.columns)
not_exist = []
for va in add_variable_list + woe_vari_list:
if (va in datacol) == False:
not_exist.append(va)
if len(not_exist) > 0:
tip = Toplevel(self.master)
tip.title('警告:')
lb = Label(tip, text='下面变量没有在拒绝样本中\n找到%s' % not_exist)
lb.pack()
else:
self.predict_reject_data = woe_predict(model=woe_model_re, intercept=self.par_intercept_flag,
df=self.IGN_grouped_reject_data, woe_score=woe_score)
self.predict_reject_data = score_predict(f_scorecard, self.predict_reject_data)
else:
self.predict_reject_data = pd.DataFrame()
if self.IGN_grouped_oot_data.empty == False:
woe_vari_list = list(
woe_score[(woe_score['coff'].isnull() == False) & (woe_score['var_type'] == 'ori')]['model_var'])
try:
add_variable_a = list(
woe_score[(woe_score['coff'].isnull() == False) & (woe_score['var_type'] == 'add')]['vara'])
add_variable_b = list(
woe_score[(woe_score['coff'].isnull() == False) & (woe_score['var_type'] == 'add')]['varb'])
add_variable_list = list(set(['f_group_' + x for x in add_variable_b + add_variable_a]))
except:
add_variable_list = []
datacol = list(self.IGN_grouped_oot_data.columns)
not_exist = []
for va in add_variable_list + woe_vari_list:
if (va in datacol) == False:
not_exist.append(va)
if len(not_exist) > 0:
tip1 = Toplevel(self.master)
tip1.title('警告:')
lb = Label(tip1, text='下面变量没有在OOT样本中\n找到%s' % not_exist)
lb.pack()
else:
self.predict_oot_data = woe_predict(model=woe_model_re, intercept=self.par_intercept_flag,
df=self.IGN_grouped_oot_data, woe_score=woe_score)
self.predict_oot_data = score_predict(f_scorecard, self.predict_oot_data)
else:
self.predict_oot_data = pd.DataFrame()
else:
# group 评分卡
grp_ppp = model_re
grp_model = grp_ppp[1]
cof = pd.DataFrame(grp_model.params).reset_index().rename({'index': 'grp_variable_name', 0: 'coff'}, axis=1)
group_report = self.IGN_f_group_report
variable_df = grp_ppp[2]
total = group_report.groupby(['variable_name'])['f_N_obs'].sum().reset_index().rename(
{'f_N_obs': 'total_count'}, axis=1)
group_report = pd.merge(group_report, total, how='left', on='variable_name')
group_report['pct_f_N_obs'] = group_report['f_N_obs'] / group_report['total_count']
grp_score = pd.merge(variable_df, cof, how='outer', left_on='model_var', right_on='grp_variable_name')
grp_score['variable'][grp_score['grp_variable_name'] == 'const'] = 'const'
use = grp_score.groupby('variable')['coff'].max().reset_index()
use = list(use[use['coff'].isnull() == False]['variable'])
grp_model_df = grp_score[grp_score['variable'].isin(use)].fillna(0)
grp_model_df = grp_model_df.rename({'group': 'f_group'}, axis=1)
grp_model_df['variable_name'] = grp_model_df['variable'].apply(lambda x: 'const' if x == 'const' else x[8:])
scorecard = pd.merge(grp_model_df, group_report, how='left', on=['variable_name', 'f_group'])[
['variable_name', 'f_group', 'var_type', 'f_N_obs', 'label', 'f_Bad_rate', 'pct_f_N_obs', 'coff',
'woe']]
if self.par_intercept_flag == False:
add_df=pd.DataFrame([{'variable_name':'const','coff':0}])
scorecard=scorecard.append(add_df)
B = self.par_odds_double_score / math.log(2)
A = self.par_odds_score_ratio + B * math.log(self.par_odds_ratio)
scorecard['SCORE'] = scorecard.apply(
lambda x: A - B * x['coff'] if x['variable_name'] == 'const' else -B * x['coff'], axis=1)
score_adjust = scorecard.groupby('variable_name')['SCORE'].min().reset_index().rename(
{'SCORE': 'score_min'}, axis=1)
adjust_num = score_adjust[score_adjust['score_min'] < 0]['score_min'].sum()
score_adjust['score_min'][score_adjust['variable_name'] == 'const'] = -adjust_num
f_scorecard = pd.merge(scorecard, score_adjust, how='left', on='variable_name')
f_scorecard['scorecard'] = f_scorecard['SCORE'] - f_scorecard['score_min']
f_scorecard['scorecard'] = f_scorecard['scorecard'].apply(lambda x: int(x))
f_scorecard['coff'] = round(f_scorecard['coff'], 4)
f_scorecard = f_scorecard.fillna(0)
f_scorecard['f_N_obs'] = f_scorecard['f_N_obs'].astype('int')
f_scorecard['pct_f_N_obs'] = f_scorecard.apply(lambda x: "%.2f%%" % (x['pct_f_N_obs'] * 100), axis=1)
f_scorecard = f_scorecard.sort_values(by=['variable_name', 'f_group'])
f_scorecard = f_scorecard[f_scorecard['variable_name'] == 'const'].append(
f_scorecard[f_scorecard['variable_name'] != 'const'])
# 给数据集打分
def grp_predict(model, intercept, df):
input_list = list(
pd.DataFrame(model.params).reset_index().rename({'index': 'grp_variable_name', 0: 'coff'}, axis=1)[
'grp_variable_name'])
try:
input_list.remove('const')
except:
pass
if intercept == True:
df['SCORECARD_LR_p_1'] = model.predict(sm.add_constant(df[input_list]))
else:
df['SCORECARD_LR_p_1'] = model.predict(df[input_list])
return df
def group_data_pre(df, f_scorecard):
for varable in list(set(f_scorecard[f_scorecard['variable_name'] != 'const']['variable_name'])):
grouplist = list(set(f_scorecard[f_scorecard['variable_name'] == varable]['f_group']))
for value in grouplist:
df['f_group_%s_%s' % (varable, int(value))] = df['f_group_%s' % varable].apply(
lambda x: 1 if x == int(value) else 0)
df['f_group_%s_%s' % (varable, int(value))] = df[
'f_group_%s_%s' % (varable, int(value))].astype('int8')
return df
self.predict_train_data = grp_predict(model=grp_model, intercept=self.par_intercept_flag,
df=group_data_pre(self.IGN_grouped_train_data, f_scorecard))
self.predict_train_data = score_predict(f_scorecard, self.predict_train_data)
if self.IGN_grouped_valid_data.empty == False:
self.predict_vaild_data = grp_predict(model=grp_model, intercept=self.par_intercept_flag,
df=group_data_pre(self.IGN_grouped_valid_data, f_scorecard))
if self.IGN_grouped_reject_data.empty == False:
grp_vari_list = list(set(
grp_score[(grp_score['coff'].isnull() == False) & (grp_score['var_type'] == 'ori')]['variable']))
datacol = list(self.IGN_grouped_reject_data.columns)
not_exist = []
for va in grp_vari_list:
if (va in datacol) == False:
not_exist.append(va)
if len(not_exist) > 0:
tip2 = Toplevel(self.master)
tip2.title('警告:')
self.text = StringVar()
self.label_list = '下面变量没有在拒绝样本中找到%s' % not_exist
lb = Label(tip2, textvariable=self.text)
lb.pack()
else:
self.predict_reject_data = grp_predict(model=grp_model, intercept=self.par_intercept_flag,
df=group_data_pre(self.IGN_grouped_reject_data, f_scorecard))
self.predict_reject_data = score_predict(f_scorecard, self.predict_reject_data)
if self.IGN_grouped_oot_data.empty == False:
grp_vari_list = list(
set(grp_score[(grp_score['coff'].isnull() == False) & (grp_score['var_type'] == 'ori')][
'variable']))
datacol = list(self.IGN_grouped_oot_data.columns)
not_exist = []
for va in grp_vari_list:
if (va in datacol) == False:
not_exist.append(va)
if len(not_exist) > 0:
tip3 = Toplevel(self.master)
tip3.title('警告:')
self.text = StringVar()
self.label_list = '下面变量没有在OOT样本中找到%s' % not_exist
lb = Label(tip3, textvariable=self.text)
lb.pack()
else:
self.predict_oot_data = grp_predict(model=grp_model, intercept=self.par_intercept_flag,
df=group_data_pre(self.IGN_grouped_oot_data, f_scorecard))
self.predict_oot_data = score_predict(f_scorecard, self.predict_oot_data)
return f_scorecard
def func_lasso_df(self, variable_list, train_target, predict_train_data, predict_vaild_data, n_job):
woe_list = ['woe_' + x for x in variable_list]
x_train = predict_train_data[woe_list]
y_train = predict_train_data[train_target]
tlist = [num / 100 for num in range(1, 100, 1)] + [num / 5000 for num in range(1, 50, 1)] + [num for num in
range(1, 20, 1)]
random.shuffle(tlist)
lent = math.ceil(len(tlist) / n_job)
def func(num):
# summ = pd.DataFrame()
summ = []
for l in range((num - 1) * lent, min(len(tlist), num * lent)):
p_c = tlist[l]
model = LogisticRegression(penalty='l1', C=p_c, solver='liblinear')
h = model.fit(x_train, y_train)
temp = predict_train_data
temp['lasso_p_pro'] = pd.DataFrame(h.predict_proba(x_train))[1]
temp['llr'] = np.log(temp['lasso_p_pro']) * temp[train_target] + np.log(1 - temp['lasso_p_pro']) * (
1 - temp[train_target])
llr = temp['llr'].sum()
var_num = []
for cof in h.coef_[0]:
if cof != 0:
var_num.append(cof)
k = len(var_num)
num = list(h.coef_[0])
var = woe_list
# add_ = pd.DataFrame(dict(zip(var, num)), index=[1])
add_ = dict(zip(var, num))
add_['C'] = p_c
add_['k'] = k
add_['llr'] = llr
try:
if predict_vaild_data.empty == False:
temp_v = predict_vaild_data
x_train_v = predict_vaild_data[woe_list]
temp_v['lasso_p_pro'] = pd.DataFrame(h.predict_proba(x_train_v))[1]
temp_v['llr'] = np.log(temp_v['lasso_p_pro']) * temp_v[train_target] + np.log(
1 - temp_v['lasso_p_pro']) * (1 - temp_v[train_target])
llr_v = temp_v['llr'].sum()
add_['llr_v'] = llr_v
except:
pass
# summ = summ.append(add_)
summ.append(add_)
summ_d = pd.DataFrame(summ)
return summ_d
scores_with_candidates = Parallel(n_jobs=n_job, max_nbytes=None, verbose=5)(
delayed(func)(num) for num in range(1, 1 + n_job))
score_df = pd.DataFrame()
for tt in scores_with_candidates:
sc = pd.DataFrame(tt)
score_df = score_df.append(sc)
score_df['aic'] = score_df.apply(lambda x: 2 * x['k'] - 2 * x['llr'], axis=1)
score_df['bic'] = score_df.apply(lambda x: math.log(len(predict_train_data)) * x['k'] - 2 * x['llr'],
axis=1)
try:
score_df['aic_v'] = score_df.apply(lambda x: 2 * x['k'] - 2 * x['llr_v'], axis=1)
score_df['bic_v'] = score_df.apply(lambda x: math.log(len(predict_train_data)) * x['k'] - 2 * x['llr_v'],
axis=1)
except Exception as e:
pass
return score_df
def func_var_clus(self, variable_list, predict_train_data, scorecarddf):
variable_list = ['woe_' + x for x in variable_list]
df = predict_train_data[variable_list]
clus = VarClus()
clus.decompose(dataframe=df)
model_list = ['woe_' + x for x in list(set(scorecarddf['variable_name']))]
h = clus.print_cluster_structure(model_variable=model_list, h_space=5)
h = '算法来自https://github.com/jingmin1987/variable-clustering \n ****为入模变量 ---为未如模变量 \n\n' + h
return h
def reult_show_only(self, result_page):
scorecard_result_ui(mainframe=result_page, project_path=self.project_path, node_name=self.node_name,
predict_train_data=self.predict_train_data, predict_vaild_data=self.predict_vaild_data,
predict_oot_data=self.predict_oot_data, predict_reject_data=self.predict_reject_data,
train_target=self.target_train, oot_target=self.target_oot,
reject_target=self.target_reject,
train_time_id=self.timeid_train, oot_time_id=self.timeid_oot,
reject_time_id=self.timeid_reject,
record_list=self.model_ppp[0], model=self.model_ppp[1], scorecarddf=self.f_scorecard,
f_group_report=self.IGN_f_group_report
, variable_list=self.vari_list, lasso_df=self.lasso_df,
model_var_type=self.par_variable_type, var_clus=self.var_clus)
def add_delet_var(self, record_list, input_model, model_variable_df, modify_var, flag, par_variable_type, var_list,
n_job, predict_train_data, target_train, predict_vaild_data, par_intercept_flag):
error2_f = Toplevel(self.master)
screenwidth = self.master.winfo_screenwidth()
screenheight = self.master.winfo_screenheight()
error2_f.geometry('%dx%d+%d+%d' % (150, 100, (screenwidth - 150) / 2, (screenheight - 100) / 2))
L2 = Label(error2_f, text="计算中。。。")
L2.grid()
self.master.update()
if par_variable_type == 'WOE':
cof = pd.DataFrame(input_model.params).reset_index().rename({'index': 'woe_variable_name', 0: 'coff'},
axis=1)
all_list = list(set(['woe_' + x for x in var_list]) | set(cof['woe_variable_name']))
if flag == 'add':
selected_list = list(cof['woe_variable_name']) + [modify_var]
elif flag == 'del':
selected_list = list(set(cof['woe_variable_name']) - set([modify_var]))
else:
selected_list = list(set(cof['woe_variable_name']))
try:
selected_list.remove('const')
except:
pass
try:
all_list.remove('const')
except:
pass
lent = math.ceil(len(all_list) / n_job)
def func(num):
# summ = pd.DataFrame()
summ = []
for l in range((num - 1) * lent, min(len(all_list), num * lent)):
candidate = all_list[l]
# for candidate in all_list:
if candidate in selected_list:
model_list = list(set(selected_list) - set([candidate]))
else:
model_list = list(set(selected_list + [candidate]))
if par_intercept_flag: # 是否有截距
logit_mod = sm.Logit(predict_train_data[target_train],
sm.add_constant(predict_train_data[model_list]))
else:
logit_mod = sm.Logit(predict_train_data[target_train], predict_train_data[model_list])
result = logit_mod.fit(method='lbfgs', maxiter=100)
pvalue = max(result.pvalues)
fpr_t, tpr_t, threshold_t = roc_curve(predict_train_data[target_train],
result.predict()) ###计算真正率和假正率
roc_auc_t = auc(fpr_t, tpr_t) ###计算auc的值
if predict_vaild_data.empty == False:
if par_intercept_flag:
pre_v = result.predict(sm.add_constant(predict_vaild_data[model_list]))
else:
pre_v = result.predict(predict_vaild_data[model_list])
fpr_v, tpr_v, threshold_v = roc_curve(predict_vaild_data[target_train], pre_v) ###计算真正率和假正率
roc_auc_v = auc(fpr_v, tpr_v) ###计算auc的值
else:
roc_auc_v = None
summ.append({'var': candidate, 'pvalue': pvalue, 'auc_t': roc_auc_t, 'auc_v': roc_auc_v})
summ_df = pd.DataFrame(summ)
return summ_df
scores_with_candidates = Parallel(n_jobs=n_job, max_nbytes=None, verbose=5)(
delayed(func)(num) for num in range(1, 1 + n_job))
score_df = pd.DataFrame()
for tt in scores_with_candidates:
sc = pd.DataFrame(tt)
score_df = score_df.append(sc)
# 现在模型
if par_intercept_flag: # 是否有截距
logit_mod = sm.Logit(predict_train_data[target_train],
sm.add_constant(predict_train_data[selected_list]))
else:
logit_mod = sm.Logit(predict_train_data[target_train], predict_train_data[selected_list])
result = logit_mod.fit(method='lbfgs', maxiter=100)
var = 'current_model'
pvalue = max(result.pvalues)
fpr_t, tpr_t, threshold_t = roc_curve(predict_train_data[target_train], result.predict()) ###计算真正率和假正率
roc_auc_t = auc(fpr_t, tpr_t) ###计算auc的值
if predict_vaild_data.empty == False:
if par_intercept_flag:
pre_v = result.predict(sm.add_constant(predict_vaild_data[selected_list]))
else:
pre_v = result.predict(predict_vaild_data[selected_list])
fpr_v, tpr_v, threshold_v = roc_curve(predict_vaild_data[target_train], pre_v) ###计算真正率和假正率
roc_auc_v = auc(fpr_v, tpr_v) ###计算auc的值
else:
roc_auc_v = None
current = pd.DataFrame([{'var': var, 'pvalue': pvalue, 'auc_t': roc_auc_t, 'auc_v': roc_auc_v}])
score_df = score_df.append(current)
score_df['use_or'] = score_df['var'].apply(lambda x: 'Y' if x in selected_list else 'N')
else:
# GRP
group_varlist = ['f_group_' + x for x in var_list]
df = predict_train_data.copy()
df_v = predict_vaild_data
for varable in group_varlist:
mm = len(df[varable].unique())
grouplist = list(
df[varable].groupby(df[varable]).agg({'count'}).reset_index().sort_values(by='count')[varable][
0:mm - 1])
for value in grouplist:
df['%s_%s' % (varable, int(value))] = df[varable].apply(lambda x: 1 if x == value else 0)
df['%s_%s' % (varable, int(value))] = df['%s_%s' % (varable, int(value))].astype('int8')
if predict_vaild_data.empty == False:
df_v['%s_%s' % (varable, int(value))] = df_v[varable].apply(lambda x: 1 if x == value else 0)
df_v['%s_%s' % (varable, int(value))] = df_v['%s_%s' % (varable, int(value))].astype('int8')
grp_model = input_model
cof = pd.DataFrame(grp_model.params).reset_index().rename({'index': 'grp_variable_name', 0: 'coff'}, axis=1)
variable_df = model_variable_df
grp_score = pd.merge(variable_df, cof, how='outer', left_on='model_var', right_on='grp_variable_name')
grp_score['variable'][grp_score['grp_variable_name'] == 'const'] = 'const'
use = grp_score.groupby('variable')['coff'].max().reset_index()
use_t = list(use[use['coff'].isnull() == False]['variable'])
if flag == 'add':
use = use_t + [modify_var]
elif flag == 'del':
use = list(set(use_t) - set([modify_var]))
else:
use = use_t
try:
use.remove(('const'))
except:
pass
selected = ['f_group_' + x for x in use]
result_list = []
if selected != []:
group_variable_select = variable_df[variable_df['variable'].isin(selected)]['model_var'].sum()
else:
group_variable_select = []
for candidate in group_varlist:
group_variable_candidate = variable_df[variable_df['variable'].isin([candidate])]['model_var'].sum()
if candidate in selected:
model_list = list(set(group_variable_select) - set(group_variable_candidate))
else:
model_list = group_variable_select + group_variable_candidate
if par_intercept_flag: # 是否有截距
logit_mod = sm.Logit(df[target_train], sm.add_constant(df[model_list]))
else:
logit_mod = sm.Logit(df[target_train], df[model_list])
result = logit_mod.fit(method='lbfgs', maxiter=100)
var = candidate
pvalue = max(result.pvalues)
fpr_t, tpr_t, threshold_t = roc_curve(predict_train_data[target_train], result.predict()) ###计算真正率和假正率
roc_auc_t = auc(fpr_t, tpr_t) ###计算auc的值
if predict_vaild_data.empty == False:
if par_intercept_flag:
pre_v = result.predict(sm.add_constant(df_v[model_list]))
else:
pre_v = result.predict(df_v[model_list])
fpr_v, tpr_v, threshold_v = roc_curve(predict_vaild_data[target_train], pre_v) ###计算真正率和假正率
roc_auc_v = auc(fpr_v, tpr_v) ###计算auc的值
else:
roc_auc_v = None
result_list.append({'var': var, 'pvalue': pvalue, 'auc_t': roc_auc_t, 'auc_v': roc_auc_v})
if par_intercept_flag: # 是否有截距
logit_mod = sm.Logit(df[target_train], sm.add_constant(df[group_variable_select]))
else:
logit_mod = sm.Logit(df[target_train], df[group_variable_select])
result = logit_mod.fit(method='lbfgs', maxiter=100)
var = 'current_model'
pvalue = max(result.pvalues)
fpr_t, tpr_t, threshold_t = roc_curve(predict_train_data[target_train], result.predict()) ###计算真正率和假正率
roc_auc_t = auc(fpr_t, tpr_t) ###计算auc的值
if predict_vaild_data.empty == False:
if par_intercept_flag:
pre_v = result.predict(sm.add_constant(df_v[group_variable_select]))
else:
pre_v = result.predict(df_v[group_variable_select])
fpr_v, tpr_v, threshold_v = roc_curve(predict_vaild_data[target_train], pre_v) ###计算真正率和假正率
roc_auc_v = auc(fpr_v, tpr_v) ###计算auc的值
else:
roc_auc_v = None
result_list.append({'var': var, 'pvalue': pvalue, 'auc_t': roc_auc_t, 'auc_v': roc_auc_v})
score_df = pd.DataFrame(result_list)
score_df['use_or'] = score_df['var'].apply(lambda x: 'Y' if x in ['f_group_' + x for x in use] else 'N')
score_df = score_df.rename(
{'var': '变量名称', 'pvalue': '调整后模型最大p值', 'auc_t': '调整后训练集auc', 'auc_v': '调整后验证集auc', 'use_or': '是否在模型中'},
axis=1)
score_df = score_df[['变量名称', '调整后训练集auc', '调整后验证集auc', '调整后模型最大p值', '是否在模型中']]
score_df = score_df.sort_values(by=['是否在模型中', '调整后训练集auc'], ascending=[False, False])
score_df = score_df[score_df['变量名称'] == 'current_model'].append(score_df[score_df['变量名称'] != 'current_model'])
try:
error2_f.destroy()
except:
pass
return score_df, record_list, result, model_variable_df
| [
"noreply@github.com"
] | noreply@github.com |
50b38f5aa112634f69a1964367b345d28107fa78 | 62153e297ca84bf9d76eef56b28408f5337113f9 | /tasks/migrations/0005_announcements_picture.py | 470373d027006370d1516280f097642bee71c5a1 | [] | no_license | zarif007/HRTaskManager | 22b72c80d2cac99fa9d3f7f0cfd480cb832ff910 | 4c7e7f04b82f138a7177f659bb347c7e189c6220 | refs/heads/main | 2023-06-23T22:05:33.812024 | 2021-07-31T19:55:11 | 2021-07-31T19:55:11 | 373,304,992 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 408 | py | # Generated by Django 3.2.4 on 2021-07-26 12:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tasks', '0004_announcements'),
]
operations = [
migrations.AddField(
model_name='announcements',
name='picture',
field=models.ImageField(blank=True, null=True, upload_to=''),
),
]
| [
"zarifhuq786@gmail.com"
] | zarifhuq786@gmail.com |
933571926c8d8cf31a0d5b6659cbb7ef47e6d448 | ca1c5071ab4b265ea2e2462bea39f214b5cf2104 | /SisVentasRivera/SisVentasRivera/urls.py | 5866cb5c0ba9e3ab28bc81fe3de62824bcbef5c5 | [] | no_license | GhianCoForFree/SistemaWebVentas | b8f51d391bf985b443db71c985c41e7d8153ea52 | a1764a79054cdf13f8846bb79639a45f190b1fd6 | refs/heads/master | 2016-09-08T01:55:31.413697 | 2014-07-30T02:31:06 | 2014-07-30T02:31:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 475 | py | from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^admin/', include(admin.site.urls)),
url(r'^', include('SisVentasRivera.Apps.ventas.urls')),
url(r'^$','django.contrib.auth.views.login',{'template_name':'Principal/index.html'},name="login"),
url(r'^$','django.contrib.auth.views.logout_then_login',name="salir"),
url(r'^accounts/', include('registration.urls')),
)
| [
"ghiancoremix2011@hotmail.com"
] | ghiancoremix2011@hotmail.com |
3b42cab023c4dd9c67e936c89f7d0e9e1a725a1b | 3556a5a253866cdc2d0e53731656adbbc840736c | /test/test_random.py | ad4ca69e9db42d86af188152386885dc86a429b1 | [
"MIT"
] | permissive | linyc74/ngslite | 96066666d485edd25304f92edf8f2b4796b1a444 | 3705298be2fa1c87a7e456a73a872cd0fb6ed9e6 | refs/heads/master | 2021-08-06T19:46:57.597093 | 2021-07-03T22:49:32 | 2021-07-03T22:49:32 | 158,335,635 | 1 | 1 | MIT | 2021-07-03T22:49:32 | 2018-11-20T05:30:21 | Python | UTF-8 | Python | false | false | 776 | py | import shutil
from ngslite.random import random_sample
from .setup import setup_dirs, TestCase
class TestRandom(TestCase):
def setUp(self):
self.indir, self.workdir, self.outdir = setup_dirs(__file__)
def tearDown(self):
shutil.rmtree(self.workdir)
shutil.rmtree(self.outdir)
def test_fa(self):
file = f'{self.indir}/NC_000866.fa'
fraction = 0.1
output = f'{self.outdir}/NC_000866_{fraction}.fa'
random_sample(
file=file,
fraction=fraction,
output=output)
i = 0
with open(output) as fh:
for line in fh:
if line.startswith('>'):
i += 1
self.assertEqual(27, i)
| [
"yclin.python@gmail.com"
] | yclin.python@gmail.com |
e6de32f4ceb357cd73fac610304e0c9d175ffc43 | e3afdbdf6bfb9951b8a8d59fe0c65e848be5e311 | /users/forms.py | df14bd12d491beef7bee8581f25e4b3c8f2729e5 | [] | no_license | Goulish75/matProject | b028b2c0046759164a22e3a30539df97ff34b13c | fd34dc88b71288e74db2158e1de00c8567d72892 | refs/heads/master | 2023-04-08T16:51:48.504820 | 2021-04-13T13:41:09 | 2021-04-13T13:41:09 | 356,962,703 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 760 | py | from django import forms
from django.contrib.auth.models import User
class UserCreateForm(forms.ModelForm):
def __init__(self,*args,**kwargs):
super(UserCreateForm, self).__init__(*args,**kwargs)
for field in self.fields:
self.fields[field].widget.attrs = {"class":"form-control"}
self.fields['password'].widget = forms.PasswordInput(attrs={"class":"form-control"})
class Meta:
model = User
fields = ["username","first_name","last_name","password"]
class LoginForm(forms.Form):
username = forms.CharField(max_length=50,required=True,widget=forms.TextInput(attrs={"class":"form-control"}))
password = forms.CharField(max_length=50,widget=forms.PasswordInput(attrs={"class":"form-control"}))
| [
"furkancatal6060@gmail.com"
] | furkancatal6060@gmail.com |
3734e13259f4b245375820776dc260e0f60a01d5 | 2455062787d67535da8be051ac5e361a097cf66f | /Producers/BSUB/TrigProd_amumu_a5_dR5/trigger_amumu_producer_cfg_TrigProd_amumu_a5_dR5_129.py | 7569fce71cd2fd69603dc29210d1978acc440b9c | [] | no_license | kmtos/BBA-RecoLevel | 6e153c08d5ef579a42800f6c11995ee55eb54846 | 367adaa745fbdb43e875e5ce837c613d288738ab | refs/heads/master | 2021-01-10T08:33:45.509687 | 2015-12-04T09:20:14 | 2015-12-04T09:20:14 | 43,355,189 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,360 | py | import FWCore.ParameterSet.Config as cms
process = cms.Process("PAT")
#process.load("BBA/Analyzer/bbaanalyzer_cfi")
process.load("FWCore.MessageLogger.MessageLogger_cfi")
process.load('Configuration.EventContent.EventContent_cff')
process.load("Configuration.Geometry.GeometryRecoDB_cff")
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
process.load("PhysicsTools.PatAlgos.producersLayer1.patCandidates_cff")
process.load("PhysicsTools.PatAlgos.selectionLayer1.selectedPatCandidates_cff")
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, 'MCRUN2_71_V1::All', '')
process.load("Configuration.StandardSequences.MagneticField_cff")
####################
# Message Logger
####################
process.MessageLogger.cerr.FwkReport.reportEvery = cms.untracked.int32(100)
process.options = cms.untracked.PSet( wantSummary = cms.untracked.bool(True) )
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
## switch to uncheduled mode
process.options.allowUnscheduled = cms.untracked.bool(True)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(500)
)
####################
# Input File List
####################
# Input source
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring('root://eoscms//eos/cms/store/user/ktos/RECO_Step3_amumu_a5/RECO_Step3_amumu_a5_129.root'),
secondaryFileNames = cms.untracked.vstring()
)
############################################################
# Defining matching in DeltaR, sorting by best DeltaR
############################################################
process.mOniaTrigMatch = cms.EDProducer("PATTriggerMatcherDRLessByR",
src = cms.InputTag( 'slimmedMuons' ),
matched = cms.InputTag( 'patTrigger' ), # selections of trigger objects
matchedCuts = cms.string( 'type( "TriggerMuon" ) && path( "HLT_Mu16_TkMu0_dEta18_Onia*")' ), # input does not yet have the 'saveTags' parameter in HLT
maxDPtRel = cms.double( 0.5 ), # no effect here
maxDeltaR = cms.double( 0.3 ), #### selection of matches
maxDeltaEta = cms.double( 0.2 ), # no effect here
resolveAmbiguities = cms.bool( True ),# definition of matcher output
resolveByMatchQuality = cms.bool( True )# definition of matcher output
)
# talk to output module
process.out = cms.OutputModule("PoolOutputModule",
fileName = cms.untracked.string("file:RECO_Step3_amumu_a5_TrigProd_129.root"),
outputCommands = process.MINIAODSIMEventContent.outputCommands
)
process.out.outputCommands += [ 'drop *_*_*_*',
'keep *_*slimmed*_*_*',
'keep *_pfTausEI_*_*',
'keep *_hpsPFTauProducer_*_*',
'keep *_hltTriggerSummaryAOD_*_*',
'keep *_TriggerResults_*_HLT',
'keep *_patTrigger*_*_*',
'keep *_prunedGenParticles_*_*',
'keep *_mOniaTrigMatch_*_*'
]
################################################################################
# Running the matching and setting the the trigger on
################################################################################
from PhysicsTools.PatAlgos.tools.trigTools import *
switchOnTrigger( process ) # This is optional and can be omitted.
switchOnTriggerMatching( process, triggerMatchers = [ 'mOniaTrigMatch'
])
process.outpath = cms.EndPath(process.out)
| [
"kmtos@ucdavis.edu"
] | kmtos@ucdavis.edu |
fd0816dae9157631a8d5823e89d9650f7806a979 | 75ce5b7fee397fe4e67ed15a58f4cd42e0f8de9f | /PythonMasterclass/HelloWorld/Strings.py | 3b45aebdb6a60836687406b37ebae58981f463c5 | [] | no_license | lukbast/stuff | 7fd03b7e035394802c307682a25621dfd667960b | 160e1d77d1b592fac099b9c7139fb4e2f7f8dbbe | refs/heads/main | 2023-08-06T21:39:55.334812 | 2021-09-23T17:37:47 | 2021-09-23T17:37:47 | 409,684,114 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,083 | py | greeting = 'Hello'
name = 'Bruce'
# print(greeting+' ' + name)
# sample comment
# name2 = input('Please enter your name')
# print(greeting + ' ' + name2)
# splitString = 'This string has been\nsplit\nover\nseveral\nlines'
# print(splitString)
tabbedStrings = '1\t2\t3\t4\t5'
print(tabbedStrings)
print('The pet shop owner said "No, no, \'e \'s uh,...he\'s resting".')
# or
print("The pet shop owner said \"No, no, 'e 's uh,...he's resting\".")
# or
print('''The pet shop owner said "Oh, no, 'e 's uh,...he's resting".''')
anotherSplitString = '''This string has been
split over
several
lines'''
# print(anotherSplitString)
# parrot = 'Norwegian blue'
# print(parrot[3])
# print(parrot[len(parrot)-1])
# print()
# print(parrot[3])
# print(parrot[6])
# print(parrot[8])
#
# print()
#
# print(parrot[-11])
# print(parrot[-1])
# print()
# print(parrot[-11])
# print(parrot[-8])
# print(parrot[-6])
#
# print(parrot[10:len(parrot)])
num = 666
wordd = f'fbfbff {num} bbfgbfbfgbg ngngngng'
word = "ssddsdsvs {0} fnfgfngfn {1:} fnfggff {2:.3f}".format(1, 2+2, 11/7)
print(wordd)
| [
"jaimperator99@gmail.com"
] | jaimperator99@gmail.com |
cb810715b2d6581046532bb0947961ad21cdf2a1 | 65898c45b59b5c5d5ee523f69ab09e139c30cd0a | /application.py | 052e8a638d333010e212ca26f8a99ac1431648f9 | [] | no_license | calumbell/schwach | ffe4ada2de07185bc6d77ecaa00b157917e85214 | f248d7940a19be68876c86b4a06bd48a5df907a4 | refs/heads/master | 2022-01-19T09:27:33.945532 | 2019-05-24T08:53:06 | 2019-05-24T08:53:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,802 | py | import os
import sys
from flask import Flask, render_template, request, url_for
from flask_socketio import SocketIO, emit
channels = {}
maxMsgPerCh = 100
messageID = 0
# Initialise Flask and SocketIO
app = Flask(__name__)
app.config["SECRET_KEY"] = os.getenv("SECRET_KEY")
socketio = SocketIO(app)
@app.route("/")
def index():
return render_template("index.html")
@socketio.on('get id count')
def getIdCount():
global messageID
emit('msg id from server', {"id": messageID})
""" loadChannelList: on a 'load channel list' event, creates a new channel link
for each channel stored in server memeory. This happens as soon as SocketIO is
connected to reload channels made in other windows.
"""
@socketio.on('load channel list')
def loadChannelList():
for channel in channels:
emit('create channel', {"channel": channel})
""" newChannel: on a 'submit new channel' event add a key to the channels dict.
This key will hold the channel name as a string and an empty list for messages.
"""
@socketio.on('submit new channel')
def newChannel(data):
channels[data["channel"]] = {"name": data["channel"], "msgs": []}
emit('create channel', data, broadcast=True)
@socketio.on('submit message')
def msg(data):
channel = data["channel"]
# add message to the front of the list at channel names dict key
channels[channel]["msgs"].insert(0, data)
# Increment message ID and add it to message data
global messageID
messageID += 1
data["id"] = messageID
# if no. messages exceeds max. remove the oldest msg
if len(channels[channel]["msgs"]) >= maxMsgPerCh:
channels[channel]["msgs"].pop(maxMsgPerCh)
# Tell client that this message wasn't loaded from server side memory
data["fromMem"] = False
# broadcast message to all users
emit('new message', data, broadcast=True)
@socketio.on('delete message')
def deleteMessage(data):
ch = data["channel"]
for i in range(len(channels[ch]["msgs"])):
if channels[ch]["msgs"][i]["id"] == data["id"]:
del channels[ch]["msgs"][i]
break
emit('remove message from DOM', data, broadcast=True)
@socketio.on('open channel')
def openChannel(data):
channelName = data["channel"]
# Make sure that channel exists in memory
if channelName in channels:
channel = channels[channelName]["msgs"]
# Iterate through history backwards so that newest messages are top
for i in range(len(channel), 0, -1):
# add key to msg to tell client that this msg was loaded from memory
params = channel[i-1]["fromMem"] = True
# for each message in channel, emit a 'new message'
emit('new message', channel[i-1])
if __name__ == '__main__':
socketio.run(app, debug=True)
| [
"calumabell@googlemail.com"
] | calumabell@googlemail.com |
be4fa32415684c642d29d2fe12a21a6f8942bcf4 | 734ed763b6275da70ba5519ebe498f7cb3cafd14 | /oscar_webpay/gateway.py | 4502405a344efce2ed18f41ee25d51c633bee3d4 | [] | no_license | RaydelMiranda/django-oscar-webpay | edda3f505fbdb8dbd36dc1aae67d8ad9bf9d83eb | 9f6be28ba22af95c77b8196898df2e7651a38fdb | refs/heads/master | 2022-07-20T06:22:41.674356 | 2017-04-29T00:09:47 | 2017-04-29T00:09:47 | 88,999,127 | 7 | 2 | null | 2022-07-08T19:12:59 | 2017-04-21T15:39:50 | Python | UTF-8 | Python | false | false | 1,541 | py | from oscar_webpay.libwebpay.webpay import Webpay
from oscar_webpay.certificates import cert_normal
from oscar_webpay.libwebpay.configuration import Configuration
from oscar_webpay.oscar_webpay_settings import oscar_webpay_settings
from django.core.urlresolvers import reverse
def get_webpay_conf():
certificate = cert_normal.certDictionary.dictionaryCert()
configuration = Configuration()
configuration.setEnvironment(certificate['environment'])
configuration.setCommerceCode(certificate['commerce_code'])
configuration.setPrivateKey(certificate['private_key'])
configuration.setPublicCert(certificate['public_cert'])
configuration.setWebpayCert(certificate['webpay_cert'])
return configuration
def get_webpay_client(order_number, total):
webpay = Webpay(get_webpay_conf())
amount = total
buy_order = order_number
session_id = None
final_url = 'http://{}:{}{}'.format(
oscar_webpay_settings.WEBPAY_RETURN_IP_ADDRESS,
oscar_webpay_settings.WEBPAY_RETURN_PORT,
reverse('webpay-cancel')
)
return_url = 'http://{}:{}{}'.format(
oscar_webpay_settings.WEBPAY_RETURN_IP_ADDRESS,
oscar_webpay_settings.WEBPAY_RETURN_PORT,
reverse('webpay-details-returns')
)
client = webpay.getNormalTransaction().initTransaction(amount, buy_order, session_id, return_url, final_url)
return client
def confirm_transaction(token):
webpay = Webpay(get_webpay_conf())
return webpay.getNormalTransaction().getTransaction(token)
| [
"raydel.miranda.gomez@gmail.com"
] | raydel.miranda.gomez@gmail.com |
8380622bbde43b92e00ac4f96152d1afa7c46f30 | bb4241ec40d0f3bc7484957a3aad2c7921f3ab5f | /src/tracewhack/log.py | c21fc7cf7481ebc5c882c0be2775dba2f2ae8ccc | [
"BSD-3-Clause"
] | permissive | wingu/tracewhack | a17b7e54cbe7cc74cc99511cdf490cd2f12e4184 | a324705c23ddd8921ed829152f07fa9ff758de0f | refs/heads/master | 2020-06-04T17:25:08.534182 | 2013-01-28T15:29:20 | 2013-01-28T15:29:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 354 | py | """
Logging utilities.
"""
# LATER Right now these are just dumb placeholders.
def warn(msg):
"""
Log a warning.
"""
print msg
def verbose(msg, options):
"""
Log only if verbose mode
"""
if options and options.get('verbose', False):
print msg
def error(msg):
"""
Log an error.
"""
print msg
| [
"tomheon@gmail.com"
] | tomheon@gmail.com |
d1646861c8c6d64a271baf7fbda6efb400ffa444 | 3b4d2fe8539c5f1e49355c8acbb6cddaba1ec077 | /main_login_registration/urls.py | c170a8dcd18d0cb68bf6075c3e49d94d326d7762 | [] | no_license | SamLobodiak/Login-and-Registration-with-Amir | 20fb66c9436199efed12bb7960c4c752ec76e6a2 | e1bf534627ba6b9e7df7689e5b41a6539cece04f | refs/heads/master | 2021-05-15T19:52:54.906015 | 2017-10-23T17:55:03 | 2017-10-23T17:55:03 | 107,730,924 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 836 | py | """main_login_registration URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url(r'^', include('apps.first_app.urls')),
url(r'^admin/', admin.site.urls),
]
| [
"samuel.lobodiak@gmail.com"
] | samuel.lobodiak@gmail.com |
3444f7edd6f1163f10d9be2255fc8f130c62da24 | 9ae6ce54bf9a2a86201961fdbd5e7b0ec913ff56 | /google/ads/googleads/v11/errors/types/asset_group_listing_group_filter_error.py | 0aa4caab0394edc17f6c1870dd1f51adb4f736d5 | [
"Apache-2.0"
] | permissive | GerhardusM/google-ads-python | 73b275a06e5401e6b951a6cd99af98c247e34aa3 | 676ac5fcb5bec0d9b5897f4c950049dac5647555 | refs/heads/master | 2022-07-06T19:05:50.932553 | 2022-06-17T20:41:17 | 2022-06-17T20:41:17 | 207,535,443 | 0 | 0 | Apache-2.0 | 2019-09-10T10:58:55 | 2019-09-10T10:58:55 | null | UTF-8 | Python | false | false | 1,792 | py | # -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v11.errors",
marshal="google.ads.googleads.v11",
manifest={"AssetGroupListingGroupFilterErrorEnum",},
)
class AssetGroupListingGroupFilterErrorEnum(proto.Message):
r"""Container for enum describing possible asset group listing
group filter errors.
"""
class AssetGroupListingGroupFilterError(proto.Enum):
r"""Enum describing possible asset group listing group filter
errors.
"""
UNSPECIFIED = 0
UNKNOWN = 1
TREE_TOO_DEEP = 2
UNIT_CANNOT_HAVE_CHILDREN = 3
SUBDIVISION_MUST_HAVE_EVERYTHING_ELSE_CHILD = 4
DIFFERENT_DIMENSION_TYPE_BETWEEN_SIBLINGS = 5
SAME_DIMENSION_VALUE_BETWEEN_SIBLINGS = 6
SAME_DIMENSION_TYPE_BETWEEN_ANCESTORS = 7
MULTIPLE_ROOTS = 8
INVALID_DIMENSION_VALUE = 9
MUST_REFINE_HIERARCHICAL_PARENT_TYPE = 10
INVALID_PRODUCT_BIDDING_CATEGORY = 11
CHANGING_CASE_VALUE_WITH_CHILDREN = 12
SUBDIVISION_HAS_CHILDREN = 13
CANNOT_REFINE_HIERARCHICAL_EVERYTHING_ELSE = 14
__all__ = tuple(sorted(__protobuf__.manifest))
| [
"noreply@github.com"
] | noreply@github.com |
6c51695f222b85f96026e756baa780aa3285965a | 35f4b55e28fd2c4f733f5063c6e4d96dd21f5581 | /Assignment2/2.2/plot.py | 04bf1f495f39ca68b2b4d44f3eedfeafc119ac9f | [] | no_license | swap612/CS633-ParallelComputing-Assignments | 7448db20d61ed2cb5b0cb1f98cc284ba0eaace41 | be52391f13f9edcc99c5cc9c5dda4b514de07a03 | refs/heads/master | 2020-05-23T21:49:49.264856 | 2019-05-16T06:17:55 | 2019-05-16T06:17:55 | 186,961,805 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,174 | py | import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
x= ['0', '2', '4', '6', '8', '9', '10']
n4 = [[ 1501.092991, 1595.196755, 1617.133016, 1616.405935, 1610.097505, 1622.031840, 1619.005661, 1619.943611, 1620.360827, 1615.472076],
[ 1509.715434, 1691.081141, 1723.952253, 1721.446337, 1720.240204, 1720.769517, 1722.654163, 1721.799672, 1723.037486, 1721.946937],
[ 1732.571940, 1737.423826, 1748.651267, 1751.496074, 1750.384434, 1752.334545, 1750.513799, 1748.719616, 1751.313242, 1750.026877],
[ 1840.571956, 1845.079962, 1845.498566, 1845.101099, 1846.473930, 1844.325687, 1846.543789, 1848.805433, 1845.743897, 1839.943264],
[ 1860.029698, 1868.408772, 1856.419903, 1870.584188, 1868.806587, 1864.844680, 1867.072942, 1868.504687, 1855.265161, 1849.882558],
[ 1864.518967, 1871.509324, 1870.917458, 1867.061849, 1872.644645, 1868.087771, 1864.236252, 1873.532863, 1861.934300, 1874.029619],
[ 1871.101119, 1866.669374, 1867.190096, 1867.043588, 1864.455699, 1868.776636, 1871.303432, 1875.550544, 1873.597974, 1874.980539]]
n4=np.array(n4);
n4 = n4.T;
#dataframe = pd.DataFrame(n4, columns=x) #plotting graph for N=4
n16 = [[ 430.247908, 481.309563, 480.619237, 482.325667, 481.619981, 481.264001, 481.147604, 481.656853, 481.793327, 481.631042],
[ 442.183796, 505.985299, 506.623337, 506.336906, 506.473456, 504.917574, 506.117588, 505.410395, 505.761076, 505.995473],
[ 473.827109, 517.738773, 518.519471, 513.225550, 519.340404, 519.147427, 517.514789, 517.263720, 517.826430, 518.270361],
[ 479.574114, 515.528562, 518.466225, 517.141390, 518.526307, 518.522092, 518.314360, 517.429200, 517.564173, 514.724142],
[ 478.729197, 517.640650, 516.271124, 516.713824, 515.889768, 516.701717, 516.471063, 516.124521, 515.839212, 516.502121],
[ 478.928078, 515.818052, 516.640500, 516.464786, 516.519354, 516.351698, 516.650822, 517.074601, 516.346285, 515.033356],
[ 469.957668, 515.945774, 516.205211, 516.284572, 472.932698, 494.635387, 516.095269, 515.692911, 515.827912, 493.009119]]
n16 = np.array(n16);
n16 = n16.T;
dataframe = pd.DataFrame(n16, columns=x)
dataframe.plot.box(grid='True')
| [
"swapnilr@cse.iitk.ac.in"
] | swapnilr@cse.iitk.ac.in |
c6f1591386821919b4b8b6930ea1cb7a2c37e9ac | b070398628647221d8d5fa82430c1414ca7582f3 | /env/bin/easy_install-3.8 | 1eed5e05292ab3df321b8f17f47173b9c4bab19a | [] | no_license | tonioshikanlu/Hospital_Management_System | ddd8c8a804e5ae4e57e1ce3bef141277d65536e2 | 31ddf96070db546b5b6d076558bb5ecac0f03252 | refs/heads/master | 2021-05-20T15:16:59.739185 | 2020-04-18T03:31:14 | 2020-04-18T03:31:14 | 252,346,303 | 2 | 0 | null | 2021-03-20T03:20:58 | 2020-04-02T03:30:06 | Python | UTF-8 | Python | false | false | 292 | 8 | #!/Users/lozoya/Documents/DBMS_Project/Hospital_Management_System/env/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"latifat.ozoya@bison.howard.edu"
] | latifat.ozoya@bison.howard.edu |
4d3e87116d6556c0d297a5c799aedc741f817923 | 1854841ff9de3391f1c858fcb9f5ccd7dc5488eb | /backend/aidin_27554/wsgi.py | 989a9b46785cae5d98a29e68726486205737a9bf | [] | no_license | crowdbotics-apps/aidin-27554 | 84f8ddd4e63ebfa233fc8d4a2b617af399371b50 | fd1e1768ee18919395a1740c70bbcd337360174e | refs/heads/master | 2023-05-05T15:50:29.989821 | 2021-05-29T14:26:12 | 2021-05-29T14:26:12 | 371,992,096 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 399 | py | """
WSGI config for aidin_27554 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'aidin_27554.settings')
application = get_wsgi_application()
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
5c611d24871d72ff959099b07e7ec08de63e467e | c3da4b4ed0670b820330f59ac85f938571a45dcb | /Coding_Answers/__init__.py | 0fa4835061504bf51500d82d28bdaf0645ed8d97 | [] | no_license | OmarJabri7/Aliz_Technologies_Answers | e4f9cfe31a48209f26469ba453b4916ed78367f5 | a2f861b18bf989a13b43316101ea98991cb9a2c3 | refs/heads/master | 2020-07-14T20:42:50.107317 | 2019-08-30T14:32:46 | 2019-08-30T14:32:46 | 205,396,434 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,753 | py | # -*- coding: utf-8 -*-
"""
Created on Fri Aug 30 15:28:35 2019
@author: omarj
"""
import pandas as pd
from Threshold import ThresholdBinarizer
from Estimator import custom_editor
def main():
#Main computations:
# Importing the dataset
dataset = pd.read_csv('Social_Network_Ads.csv')
X = dataset.iloc[:, [2, 3]].values
y = dataset.iloc[:, 4].values
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0)
# Feature Scaling to avoid high variance between the attributes
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
#importing the Logistic regression model to use in the transformer and binarizer
from sklearn.linear_model import LogisticRegression
lr = LogisticRegression(random_state = 0)
#fitting the model in the binarizer
tb = ThresholdBinarizer(lr)
tb.fit(X_train,y_train)
T = tb.findThreshold(X_train)
X = tb.transform(X_test)
classifier = custom_editor()
classifier.fit(X,y_train)
y_pred = classifier.predict(X,T)
#finding the conufsion matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
print(cm)
#finding the accuracy score of the model based on the confusion matrix
from sklearn.metrics import accuracy_score
score = accuracy_score(y_pred,y_test)
print(score)
if __name__ == "__main__":
main() | [
"noreply@github.com"
] | noreply@github.com |
68e9f2a40aaf0f77dbcaa591c3352c44de7c0d00 | 1fa19b841f331aa21a33d48fdea837b90dae9597 | /ConnectFour/ConnectFour.py | 674a79eedcbb1ee225b6ec53d031fa4316b5abac | [
"Apache-2.0"
] | permissive | resolutedreamer/ConnectFour | 3cb760b04c1e1bfdbaf86e50e5b91320263b0891 | 18f35483083ae19e6363acf06390c3aa2079fa75 | refs/heads/master | 2021-01-12T13:10:43.828305 | 2016-12-03T05:58:52 | 2016-12-03T05:58:52 | 72,137,626 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,323 | py | #!/usr/bin/env python
class ConnectFour():
def __init__(self, height, width):
self.height = height
self.width = width
self.board = []
self.columnCounts = [0]*width
for i in range(width):
self.board.append([0]*height)
# False while game is being played; True when a player wins
self.gameOver = False
# True for player 1, False for player 2
self.currentPlayer = True
def getPlayerNum(self):
if self.currentPlayer == True:
return '1'
if self.currentPlayer == False:
return '2'
def print_board(self):
for cidx, column in enumerate(self.board):
for ridx, row in enumerate(column):
print self.board[cidx][ridx],
print
def play(self):
while not self.gameOver:
self.print_board()
print "Player" + self.getPlayerNum() + "'s Turn"
performedMove = self.getPlayerMove()
self.checkVictory(performedMove)
if(self.gameOver):
print "Player " + self.currentPlayer + " wins!"
else:
self.currentPlayer = not self.currentPlayer
def getPlayerMove(self):
validMove = False
column = -1
while not validMove:
column_str = raw_input("Please select a column: ")
if column_str.isdigit():
column_int = int(column)
validMove = self.checkValidMove(column_int)
return self.performPlayerMove(column_int)
def checkValidMove(self, column):
if column >=0 and column < self.width:
#column is in the width of the board
if self.columnCounts[column] < self.height:
#column is not already filled
return True
else:
print "Column full"
return False
def performPlayerMove(self, column):
nextEmptySpace = self.columnCounts[column]
playerToken = self.getToken()
print "Performing Move"
curCol = self.board[column]
curCol[nextEmptySpace] = playerToken
self.columnCounts[column] += 1
return (column, nextEmptySpace)
def checkMatch(self, x, y, playerToken, inARow = 0):
# Recursively checks to see if there are 4 of playerToken in a row
if inARow == 4:
# There are 4 in a row
return True
elif x <= 0 or x >= self.width - 1:
# Cannot look past the left or right edge of the board
return False
#check left
if (self.board[x-1][y] == playerToken):
#keep searching left
self.checkMatch(x-1, y, playerToken, inARow + 1)
#check right
if(self.board[x+1][y] == playerToken):
#keep searching right
self.checkMatch(x+1, y, playerToken, inARow + 1)
if y == 0:
# This is the bottom of a column, can't look below the bottom
return False
#check below
elif(self.board[x][y-1] == playerToken, inARow):
#keep searching down
self.checkMatch(x, y-1, playerToken, inARow + 1)
#check belowleft
elif(self.board[x-1][y-1] == playerToken, inARow):
#keep searching belowleft
self.checkMatch(x-1, y-1, playerToken, inARow + 1)
#check belowright
elif(self.board[x+1][y-1] == playerToken, inARow):
#keep searching belowright
self.checkMatch(x+1, y-1, playerToken, inARow + 1)
else:
#there was no further match to the left, right, below, belowleft, or belowright
return False
def getToken(self):
if self.currentPlayer:
playerToken = 1
else:
playerToken = -1
return playerToken
def checkVictory(self, currentMove):
playerToken = self.getToken()
column = currentMove[0]
row = currentMove[1]
if self.checkMatch(column, row, playerToken, 0):
self.gameOver = True
else:
self.gameOver = False
def main():
newGame = ConnectFour(8, 9)
newGame.play()
if __name__ == '__main__':
main() | [
"anthony.c.nguyen@ucla.edu"
] | anthony.c.nguyen@ucla.edu |
e6f1f95c32197c7c576178271138facd8a391b2c | acfdc6c6f0e70aaaa8bd424c996146f18dda1fe1 | /scripts/generate_pub_names.py | 101aac39331942b296840be06d2b608b307036a2 | [] | no_license | Samson-Dai/ctructure | bdfd938f5943c9750487a5408243de07f2fcebcc | 0e969693ca9987cd4ce96a3ef26cb074057f3a69 | refs/heads/master | 2020-04-04T14:45:28.952642 | 2017-12-15T12:38:22 | 2017-12-15T12:38:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 423 | py | # generate the names of the publications from the laws
from os import listdir
from os.path import isfile, join
onlyfiles = [f for f in listdir("demo_laws") if isfile(join("demo_laws", f))]
f = open("publication_names.txt", 'w')
for name in onlyfiles:
#print name
name = '/' + name
ff = open("demo_laws"+name, 'r')
pub_name = ff.read().split('\n')[3]
f.write(pub_name + '\n')
ff.close()
f.close()
| [
"katricia.herring@gmail.com"
] | katricia.herring@gmail.com |
030e30da9d4ce1d9b85729f6d5707b383c54a550 | 15062f7fb642694c60b6acf3fd9125b68d706221 | /google_codejam/2020/esab_atad.py | fdaecbe461fea449589fdd9985b91f3469894ab0 | [] | no_license | Rhysoshea/daily_coding_challenges | 4f4ba14b0adc922069309ee8b3e5feb6330327c8 | 42794391218dfe1ec8596cf0e8acaf2a806d7d78 | refs/heads/master | 2021-07-20T06:37:27.166233 | 2020-10-01T05:50:58 | 2020-10-01T05:50:58 | 217,301,710 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,843 | py | """
Problem
Last year, a research consortium had some trouble with a distributed database system that sometimes lost pieces of the data. You do not need to read or understand that problem in order to solve this one!
The consortium has decided that distributed systems are too complicated, so they are storing B bits of important information in a single array on one awesome machine. As an additional layer of security, they have made it difficult to obtain the information quickly; the user must query for a bit position between 1 and B, and then they receive that bit of the stored array as a response.
Unfortunately, this ultra-modern machine is subject to random quantum fluctuations! Specifically, after every 1st, 11th, 21st, 31st... etc. query is sent, but before the response is given, quantum fluctuation causes exactly one of the following four effects, with equal probability:
25% of the time, the array is complemented: every 0 becomes a 1, and vice versa.
25% of the time, the array is reversed: the first bit swaps with the last bit, the second bit swaps with the second-to-last bit, and so on.
25% of the time, both of the things above (complementation and reversal) happen to the array. (Notice that the order in which they happen does not matter.)
25% of the time, nothing happens to the array.
Moreover, there is no indication of what effect the quantum fluctuation has had each time. The consortium is now concerned, and it has hired you to get its precious data back, in whatever form it is in! Can you find the entire array, such that your answer is accurate as of the time that you give it? Answering does not count as a query, so if you answer after your 30th query, for example, the array will be the same as it was after your 21st through 30th queries.
Input and output
This is an interactive problem. You should make sure you have read the information in the Interactive Problems section of our FAQ.
Initially, your program should read a single line containing two integers T and B: the number of test cases and the number of bits in the array, respectively. Note that B is the same for every test case.
Then, you need to process T test cases. In each case, the judge begins with a predetermined B-bit array; note that this array can vary from test case to test case, and is not necessarily chosen at random. Then, you may make up to 150 queries of the following form:
Your program outputs one line containing a single integer P between 1 and B, inclusive, indicating which position in the array you wish to look at.
If the number of queries you have made so far ends with a 1, the judge chooses one of the four possibilities described above (complementation, reversal, complementation + reversal, or nothing), uniformly at random and independently of all other choices, and alters the stored array accordingly. (Notice that this will happen on the very first query you make.)
The judge responds with one line containing a single character 0 or 1, the value it currently has stored at bit position P, or N if you provided a malformed line (e.g., an invalid position).
Then, after you have made as many of the 150 queries above as you want, you must make one more exchange of the following form:
Your program outputs one line containing a string of B characters, each of which is 0 or 1, representing the bits currently stored in the array (which will not necessarily match the bits that were initially present!)
The judge responds with one line containing a single letter: uppercase Y if your answer was correct, and uppercase N if it was not (or you provided a malformed line). If you receive Y, you should begin the next test case, or stop sending input if there are no more test cases.
After the judge sends N to your input stream, it will not send any other output. If your program continues to wait for the judge after receiving N, your program will time out, resulting in a Time Limit Exceeded error. Notice that it is your responsibility to have your program exit in time to receive a Wrong Answer judgment instead of a Time Limit Exceeded error. As usual, if the memory limit is exceeded, or your program gets a runtime error, you will receive the appropriate judgment.
Limits
Time limit: 40 seconds per test set.
Memory limit: 1GB.
1 ≤ T ≤ 100.
Test set 1 (Visible Verdict)
B = 10.
Test set 2 (Visible Verdict)
B = 20.
Test set 3 (Hidden Verdict)
B = 100.
Testing Tool
You can use this testing tool to test locally or on our servers. To test locally, you will need to run the tool in parallel with your code; you can use our interactive runner for that. The interactive runner was changed after the 2019 contest. Be sure to download the latest version. For more information, read the Interactive Problems section of the FAQ.
Local Testing Tool
To better facilitate local testing, we provide you the following script. Instructions are included inside. You are encouraged to add more test cases for better testing. Please be advised that although the testing tool is intended to simulate the judging system, it is NOT the real judging system and might behave differently.
If your code passes the testing tool but fails the real judge, please check the Coding section of our FAQ to make sure that you are using the same compiler as us.
Download local testing tool
Sample Interaction
The following interaction corresponds to Test Set 1.
t, b = readline_int_list() // reads 100 into t and 10 into b.
// The judge starts with the predetermined array for this test case:
// 0001101111. (Note: the actual Test Set 1 will not necessarily
// use this array.)
printline 1 to stdout // we ask about position 1.
flush stdout
// Since this is our 1st query, and 1 is 1 mod 10, the judge secretly and
// randomly chooses one of the four possible quantum fluctuation effects, as
// described above. It happens to choose complementation + reversal, so now
// the stored value is 0000100111.
r = readline_chr() // reads 0.
printline 6 to stdout // we ask about position 6.
flush stdout
// Since this is our 2nd query, and 2 is 2 mod 10, the judge does not choose
// a quantum fluctuation effect.
r = readline_chr() // reads 0.
...
// We have omitted the third through tenth queries in this example.
...
printline 1 to stdout // we decide to ask about position 1 again.
flush stdout
// Since this is our 11th query, and 11 is 1 mod 10, the judge secretly and
// randomly chooses a quantum fluctuation effect, and happens to get
// reversal, so now the stored value is 1110010000.
r = readline_chr() // reads 1.
printline 1110110000 to stdout // we try to answer. why?!?!
flush stdout
ok = readline_chr() // reads N -- we have made a mistake!
exit // exits to avoid an ambiguous TLE error
"""
| [
"rhys.shea.10@ucl.ac.uk"
] | rhys.shea.10@ucl.ac.uk |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.