blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3e4b0fec0223f8d4349d05e49bcdb966e9c9a1c8 | 05d11b9cda35371669195e7c07e476dfb95ccaef | /triple_net_tensorboard/triple_net_tensorboard.py | fc850e5b407123b0a6d87506fed40fdf8fc101a3 | [] | no_license | Continue7777/DSSM- | d32a105c033f4a8074d67c3fee56543d65622669 | af018562123cb3c81fde9b27becf0bc042bafd79 | refs/heads/master | 2021-04-15T09:33:14.255692 | 2018-04-14T16:05:19 | 2018-04-14T16:05:19 | 126,166,329 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 10,410 | py | #-*- coding:utf-8 -*-
import pandas as pd
from scipy.sparse import coo_matrix
import collections
import random
import time
import numpy as np
import tensorflow as tf
from data_input_fast import Data_set
from utils import *
#**************************************feed_dict***********************************************
def pull_all(index_list):
#该地方插入函数,把query_iin,doc_positive_in,doc_negative_in转化成one_hot,再转化成coo_matrix
query_in = train_data_set.get_one_hot_from_batch(index_list,'query')
doc_positive_in = train_data_set.get_one_hot_from_batch(index_list,'main_question')
doc_negative_in = train_data_set.get_one_hot_from_batch(index_list,'other_question')
query_in = coo_matrix(query_in)
doc_positive_in = coo_matrix(doc_positive_in)
doc_negative_in = coo_matrix(doc_negative_in)
query_in = tf.SparseTensorValue(
np.transpose([np.array(query_in.row, dtype=np.int64), np.array(query_in.col, dtype=np.int64)]),
np.array(query_in.data, dtype=np.float),
np.array(query_in.shape, dtype=np.int64))
doc_positive_in = tf.SparseTensorValue(
np.transpose([np.array(doc_positive_in.row, dtype=np.int64), np.array(doc_positive_in.col, dtype=np.int64)]),
np.array(doc_positive_in.data, dtype=np.float),
np.array(doc_positive_in.shape, dtype=np.int64))
doc_negative_in = tf.SparseTensorValue(
np.transpose([np.array(doc_negative_in.row, dtype=np.int64), np.array(doc_negative_in.col, dtype=np.int64)]),
np.array(doc_negative_in.data, dtype=np.float),
np.array(doc_negative_in.shape, dtype=np.int64))
return query_in, doc_positive_in, doc_negative_in
def pull_batch(index_list,batch_id):
if (batch_id + 1) * query_BS >= len(index_list):
print "batch outof index"
return None
batch_index_list = index_list[batch_id * query_BS:(batch_id + 1) * query_BS]
query_in, doc_positive_in, doc_negative_in = pull_all(batch_index_list)
return query_in, doc_positive_in, doc_negative_in
def feed_dict_train(train_index_list,test_index_list,on_training, Train, batch_id):
"""
input: data_sets is a dict and the value type is numpy
describe: to match the text classification the data_sets's content is the doc in df
"""
if Train:
query, doc_positive, doc_negative = pull_batch(train_index_list,batch_id)
else:
query, doc_positive, doc_negative = pull_batch(test_index_list,batch_id)
return {query_in: query, doc_positive_in: doc_positive, doc_negative_in: doc_negative,
on_train: on_training}
def feed_dict_predict(sentence,doc_positive_spt,on_training=True):
"""
input: data_sets is a dict and the value type is numpy
describe: to match the text classification the data_sets's content is the doc in df
"""
#该地方插入函数,把query_iin,doc_positive_in,doc_negative_in转化成one_hot,再转化成coo_matrix
query = train_data_set.get_one_hot_from_sentence(sentence)
query = coo_matrix(query)
query = tf.SparseTensorValue(
np.transpose([np.array(query.row, dtype=np.int64), np.array(query.col, dtype=np.int64)]),
np.array(query.data, dtype=np.float),
np.array(query.shape, dtype=np.int64))
return {query_in: query, doc_positive_in: doc_positive_spt,on_train: on_training}
def feed_dict_triple(query,doc_pos,doc_neg,on_training=True):
"""
input: data_sets is a dict and the value type is numpy
describe: to match the text classification the data_sets's content is the doc in df
"""
#该地方插入函数,把query_iin,doc_positive_in,doc_negative_in转化成one_hot,再转化成coo_matrix
query = train_data_set.get_one_hot_from_sentence(query)
doc_positive = train_data_set.get_one_hot_from_sentence(doc_pos)
doc_negative = train_data_set.get_one_hot_from_sentence(doc_neg)
query = coo_matrix(query)
doc_positive = coo_matrix(doc_positive)
doc_negative = coo_matrix(doc_negative)
query = tf.SparseTensorValue(
np.transpose([np.array(query.row, dtype=np.int64), np.array(query.col, dtype=np.int64)]),
np.array(query.data, dtype=np.float),
np.array(query.shape, dtype=np.int64))
doc_positive = tf.SparseTensorValue(
np.transpose([np.array(doc_positive.row, dtype=np.int64), np.array(doc_positive.col, dtype=np.int64)]),
np.array(doc_positive.data, dtype=np.float),
np.array(doc_positive.shape, dtype=np.int64))
doc_negative = tf.SparseTensorValue(
np.transpose([np.array(doc_negative.row, dtype=np.int64), np.array(doc_negative.col, dtype=np.int64)]),
np.array(doc_negative.data, dtype=np.float),
np.array(doc_negative.shape, dtype=np.int64))
return {query_in: query, doc_positive_in: doc_positive, doc_negative_in: doc_negative,on_train: on_training}
def predict_label_n_with_sess(sess,sentence_list):
result_list = []
for i,sentence in enumerate(sentence_list):
pred_prob_v,pred_label_v = sess.run([pred_prob,pred_label],feed_dict=feed_dict_predict(sentence,doc_main_question_spt))
pred_main_question = train_data_set.get_main_question_from_label_index(pred_label_v)
result_list.append(sentence + ":" +pred_main_question)
return result_list
def evaluate_test_with_sess(sess,test_question_query_list,test_question_label_list):
count = 0
acc = 0
for i,sentence in enumerate(test_question_query_list):
pred_prob_v,pred_label_v = sess.run([pred_prob,pred_label],feed_dict=feed_dict_predict(sentence,doc_main_question_spt))
pred_main_question = train_data_set.get_main_question_from_label_index(pred_label_v)
if pred_main_question == test_question_label_list[i]:
acc += 1
count += 1
return acc/float(count)
# the constant
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_string('summaries_dir', 'Summaries/', 'Summaries directory')
flags.DEFINE_string('train_write_name', 'train_fc*2', 'Summaries directory')
flags.DEFINE_string('test_write_name', 'test_fc*2', 'Summaries directory')
flags.DEFINE_string('checkpoint_name', '"model_full.ckpt".', 'Summaries directory')
flags.DEFINE_string('model_dir', 'model/', 'model directory')
flags.DEFINE_float('learning_rate', 0.1, 'Initial learning rate.')
flags.DEFINE_integer('epoch_num', 5, 'Number of epoch.')
flags.DEFINE_bool('gpu', 0, "Enable GPU or not")
flags.DEFINE_integer('print_cycle', 15, "how many batches to print")
# the data_set and dataframe
train_data_set = Data_set(data_path='data/train_data.csv',data_percent=0.4,train_percent=1) #the train dataset
test_data_df = pd.read_csv('data/test_data.csv',encoding='utf-8')
train_size, test_size = train_data_set.get_train_test_size()
train_index_list = train_data_set.train_index_list
test_index_list = train_data_set.test_index_list
test_question_query_list = list(test_data_df['query'])
test_question_label_list = list(test_data_df['main_question'])
# coo fisrt
doc_main_question = train_data_set.get_one_hot_from_main_question()
doc_main_question = coo_matrix(doc_main_question)
doc_main_question_spt = tf.SparseTensorValue(
np.transpose([np.array(doc_main_question.row, dtype=np.int64), np.array(doc_main_question.col, dtype=np.int64)]),
np.array(doc_main_question.data, dtype=np.float),
np.array(doc_main_question.shape, dtype=np.int64))
# the arg of triple-net
input_layer_num = train_data_set.get_word_num()
main_question_num = train_data_set.get_main_question_num()
query_BS = 100
# the architecture of the triple-net
is_norm = False
layer1_len = 400
layer2_len = 120
#input
query_in,doc_positive_in,doc_negative_in,on_train = input_layer(input_layer_num)
#fc1
query_layer1_out,doc_pos_layer1_out,doc_neg_layer1_out = fc_layer(query_in,doc_positive_in,doc_negative_in,input_layer_num,layer1_len,'FC1',True,is_norm)
#fc2
query_y,doc_positive_y,doc_negative_y = fc_layer(query_layer1_out,doc_pos_layer1_out,doc_neg_layer1_out,layer1_len,layer2_len,'FC2',False,is_norm)
#loss
cos_sim,prob,loss = train_loss_layer(query_y,doc_positive_y,doc_negative_y,query_BS)
#acc
accuracy = accuracy_layer(prob)
#pred_label
pred_prob,pred_label = predict_layer(query_y,doc_positive_y,main_question_num)
# Optimizer
train_step = tf.train.AdamOptimizer(FLAGS.learning_rate).minimize(loss)
merged = tf.summary.merge_all()
#evaluate
evaluate_on_test_acc,evaluae_summary = get_evaluate_test_summary()
#record predict text
predict_strings,text_summary = get_text_summaries()
#train
config = tf.ConfigProto()
if not FLAGS.gpu:
print "here we use gpu"
config = tf.ConfigProto(allow_soft_placement=True)
config.gpu_options.allow_growth = True
# 创建一个Saver对象,选择性保存变量或者模型。
saver = tf.train.Saver()
with tf.Session(config=config) as sess:
sess.run(tf.global_variables_initializer())
train_writer = tf.summary.FileWriter(FLAGS.summaries_dir + FLAGS.train_write_name, sess.graph)
test_writer = tf.summary.FileWriter(FLAGS.summaries_dir +FLAGS.test_write_name, sess.graph)
print "start training"
for epoch_id in range(FLAGS.epoch_num):
for batch_id in range(int(train_size/query_BS)):
summary_v,_,loss_v,acc_v = sess.run([merged,train_step,loss,accuracy], feed_dict=feed_dict_train(train_index_list,test_index_list,True, True, batch_id))
train_writer.add_summary(summary_v, batch_id + 1)
if batch_id % FLAGS.print_cycle == 0:
#add text_summary
query_list = random.sample(list(train_data_set.df['query']),10)
predict_strings_v = predict_label_n_with_sess(sess,query_list)
text_summary_t = sess.run(text_summary,feed_dict={predict_strings:predict_strings_v})
train_writer.add_summary(text_summary_t,int(train_size/query_BS) * epoch_id + batch_id+1)
#add evaluate_test()
evaluae_summary_t = sess.run(evaluae_summary,feed_dict={evaluate_on_test_acc:evaluate_test_with_sess(sess,test_question_query_list,test_question_label_list)})
train_writer.add_summary(evaluae_summary_t,batch_id+1)
#保存模型,每个epoch保存一次
save_path = saver.save(sess, FLAGS.model_dir+FLAGS.checkpoint_name)
print("Model saved in file: ", save_path) | [
"362418096@qq.com"
] | 362418096@qq.com |
a9b33f6c6c2f40ad46017f0a75775c17579f1e0a | 0b98732dcd3dd94a97555a8f3e8dd3524bb8ec86 | /configs/ms_rcnn/ms_rcnn_x101_64x4d_fpn_1x.py | a6b720332cf33263295dcfeeae0d85b793e5166d | [
"Apache-2.0"
] | permissive | hasanirtiza/Pedestron | e89fea2ec676f150a7266f6b65963dd6c4ec35c9 | 8ab23ec38982cfaf0ae82c77c30f10b2fff62d12 | refs/heads/master | 2023-08-06T02:53:06.368937 | 2023-04-06T13:46:27 | 2023-04-06T13:46:27 | 247,410,025 | 723 | 161 | Apache-2.0 | 2022-10-02T10:17:44 | 2020-03-15T05:52:52 | Python | UTF-8 | Python | false | false | 5,628 | py | # model settings
model = dict(
type='MaskScoringRCNN',
pretrained='open-mmlab://resnext101_64x4d',
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
style='pytorch'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_scales=[8],
anchor_ratios=[0.5, 1.0, 2.0],
anchor_strides=[4, 8, 16, 32, 64],
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0],
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=dict(
type='SharedFCBBoxHead',
num_fcs=2,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=81,
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2],
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
mask_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', out_size=14, sample_num=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
mask_head=dict(
type='FCNMaskHead',
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=81,
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)),
mask_iou_head=dict(
type='MaskIoUHead',
num_convs=4,
num_fcs=2,
roi_feat_size=14,
in_channels=256,
conv_out_channels=256,
fc_out_channels=1024,
num_classes=81))
# model training and testing settings
train_cfg = dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_across_levels=False,
nms_pre=2000,
nms_post=2000,
max_num=2000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
mask_thr_binary=0.5,
debug=False))
test_cfg = dict(
rpn=dict(
nms_across_levels=False,
nms_pre=1000,
nms_post=1000,
max_num=1000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
score_thr=0.05,
nms=dict(type='nms', iou_thr=0.5),
max_per_img=100,
mask_thr_binary=0.5))
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
data = dict(
imgs_per_gpu=2,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
img_scale=(1333, 800),
img_norm_cfg=img_norm_cfg,
size_divisor=32,
flip_ratio=0.5,
with_mask=True,
with_crowd=True,
with_label=True),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
img_scale=(1333, 800),
img_norm_cfg=img_norm_cfg,
size_divisor=32,
flip_ratio=0,
with_mask=True,
with_crowd=True,
with_label=True),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
img_scale=(1333, 800),
img_norm_cfg=img_norm_cfg,
size_divisor=32,
flip_ratio=0,
with_mask=False,
with_label=False,
test_mode=True))
# optimizer
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[8, 11])
checkpoint_config = dict(interval=1)
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
# runtime settings
total_epochs = 12
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = './work_dirs/ms_rcnn_x101_64x4d_fpn_1x'
load_from = None
resume_from = None
workflow = [('train', 1)]
| [
"chenkaidev@gmail.com"
] | chenkaidev@gmail.com |
52dc6364b0481881b567046e1443faf02235b238 | 75491989e021c515461ae94dd9e069c0e3cebd48 | /Etc/CodeUp_basic/1079.py | 795c6ba6a8bfcf45ba6538395c8b796605650995 | [] | no_license | Girin7716/PythonCoding | c60db97d269aa4a90159ae83f40c332244af6b41 | 7ac4f942aed727b5290f18ce252c5f99ad657c72 | refs/heads/master | 2023-04-20T23:33:36.077633 | 2021-05-07T11:51:16 | 2021-05-07T11:51:16 | 291,244,802 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 102 | py | chars = input().split()
for i in chars:
if i == 'q':
print(i)
break;
print(i) | [
"k3832580@naver.com"
] | k3832580@naver.com |
1961f2659d10551725b45a9d5e51b8cd2e68d7f8 | 441e375326d6d465f470d81f75298744cfe7546a | /.idea/自变量(variable)/learn_var.py | 7eef729fa2b26a4eeed00adbed5dcf0919c73852 | [] | no_license | domarshmello/corePython | 6e9abbb130c7225d40caff3d04031ae0a7e0326d | dca7b4f9dbd8e807b1b1268c6d8deb874f104f0b | refs/heads/master | 2020-03-24T00:01:55.399634 | 2018-08-13T12:34:23 | 2018-08-13T12:34:23 | 142,269,122 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 338 | py |
#将自变量1赋值给apple这自变量
apple=1
print(apple)
print("---命名规范:下划线 驼峰 ---")
apple_edd=2
print(apple_edd)
print("---命名规范:一次定义多个变量 或者分步---")
a=1
b=2
print(a,b)
print("---命名规范:一次定义多个变量---")
c,d=3,3
print(c,d)
print("----------")
r=5
g=r*6
print(r,g)
| [
"domarshmello@outlook.com"
] | domarshmello@outlook.com |
f9a52c403883a86953fde086d2dea1e2ea5e68ef | 78fa96c35a51179e86aa49874b3f3c4f3d585a86 | /model_arch/vgg_pytorch.py | 57bc6792524051bf1a7bc48a3a8a44e6c2a56ae2 | [] | no_license | suigetsu312/colorize | 6997d7d0dc3252dc07143d4aebe2c664db307bb7 | 6588cb5307bb22750ce1c4968d730db0a10c99ac | refs/heads/main | 2023-07-18T05:00:51.815785 | 2021-09-08T01:36:04 | 2021-09-08T01:36:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,483 | py | import torch
import torch.nn as nn
import torch.nn.functional as F
from utils.util import vgg_preprocess
class VGG19_pytorch(nn.Module):
"""
NOTE: no need to pre-process the input; input tensor should range in [0,1]
"""
def __init__(self, pool="max"):
super(VGG19_pytorch, self).__init__()
self.conv1_1 = nn.Conv2d(3, 64, kernel_size=3, padding=1)
self.conv1_2 = nn.Conv2d(64, 64, kernel_size=3, padding=1)
self.conv2_1 = nn.Conv2d(64, 128, kernel_size=3, padding=1)
self.conv2_2 = nn.Conv2d(128, 128, kernel_size=3, padding=1)
self.conv3_1 = nn.Conv2d(128, 256, kernel_size=3, padding=1)
self.conv3_2 = nn.Conv2d(256, 256, kernel_size=3, padding=1)
self.conv3_3 = nn.Conv2d(256, 256, kernel_size=3, padding=1)
self.conv3_4 = nn.Conv2d(256, 256, kernel_size=3, padding=1)
self.conv4_1 = nn.Conv2d(256, 512, kernel_size=3, padding=1)
self.conv4_2 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.conv4_3 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.conv4_4 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.conv5_1 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.conv5_2 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.conv5_3 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.conv5_4 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
if pool == "max":
self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2)
self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2)
self.pool3 = nn.MaxPool2d(kernel_size=2, stride=2)
self.pool4 = nn.MaxPool2d(kernel_size=2, stride=2)
self.pool5 = nn.MaxPool2d(kernel_size=2, stride=2)
elif pool == "avg":
self.pool1 = nn.AvgPool2d(kernel_size=2, stride=2)
self.pool2 = nn.AvgPool2d(kernel_size=2, stride=2)
self.pool3 = nn.AvgPool2d(kernel_size=2, stride=2)
self.pool4 = nn.AvgPool2d(kernel_size=2, stride=2)
self.pool5 = nn.AvgPool2d(kernel_size=2, stride=2)
def forward(self, x, out_keys, preprocess=True):
"""
NOTE: input tensor should range in [0,1]
"""
out = {}
if preprocess:
x = vgg_preprocess(x)
out["r11"] = F.relu(self.conv1_1(x))
out["r12"] = F.relu(self.conv1_2(out["r11"]))
out["p1"] = self.pool1(out["r12"])
out["r21"] = F.relu(self.conv2_1(out["p1"]))
out["r22"] = F.relu(self.conv2_2(out["r21"]))
out["p2"] = self.pool2(out["r22"])
out["r31"] = F.relu(self.conv3_1(out["p2"]))
out["r32"] = F.relu(self.conv3_2(out["r31"]))
out["r33"] = F.relu(self.conv3_3(out["r32"]))
out["r34"] = F.relu(self.conv3_4(out["r33"]))
out["p3"] = self.pool3(out["r34"])
out["r41"] = F.relu(self.conv4_1(out["p3"]))
out["r42"] = F.relu(self.conv4_2(out["r41"]))
out["r43"] = F.relu(self.conv4_3(out["r42"]))
out["r44"] = F.relu(self.conv4_4(out["r43"]))
out["p4"] = self.pool4(out["r44"])
out["r51"] = F.relu(self.conv5_1(out["p4"]))
out["r52"] = F.relu(self.conv5_2(out["r51"]))
out["r53"] = F.relu(self.conv5_3(out["r52"]))
out["r54"] = F.relu(self.conv5_4(out["r53"]))
out["p5"] = self.pool5(out["r54"])
return [out[key] for key in out_keys]
| [
"natsuejji1@gmail.com"
] | natsuejji1@gmail.com |
8b91709a714b1c95f8b36bf51991675a862c994d | 1d61087c63048f3409690334a509d54f98e4b5c7 | /core/notifications.py | 8e0c351f56a98f02700bd5d4fa7fa31c1801d422 | [] | no_license | dev-chip/heads_up | 2d24b0b4561a4554bd799a4032132c3534857342 | 92c83de66c2009640f00ac8d20c2135d2b883303 | refs/heads/master | 2023-07-08T18:16:59.188412 | 2021-07-20T09:59:30 | 2021-07-20T09:59:30 | 374,797,523 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 933 | py | """
Shows notifications via Windows 10 OS.
"""
__author__ = "James Cook"
__copyright__ = "Copyright (C) 2021 James Cook"
__license__ = "GNU General Public License v3"
__version__ = "1.0.0"
__maintainer__ = "James Cook"
__email__ = "contact@cookjames.uk"
from win10toast_click import ToastNotifier
def notify_win10(title, msg, app_icon_path=None):
"""
Sends a windows 10 notification.
Will freeze the program. Should be ran on a thread.
"""
toaster = ToastNotifier()
toaster.show_toast(title=title,
msg=msg,
duration=None,
icon_path=app_icon_path,
threaded=False,
callback_on_click=lambda: print(1+1))
if __name__ == "__main__":
print("Running module manual test.")
notify_win10(title="Hey! Take a break now!", msg="You should follow the 20-20-20 rule to keep your eyes healthy.") | [
"chip.ck.main@gmail.com"
] | chip.ck.main@gmail.com |
2b09af06835e7474ad61e8d98f0c2a72f6f3ed6b | dc37f36199b107933e33486761125cef2f492ae2 | /export_contacts.py | 9eb70ffd28bd589f83971c6a335fa94871265327 | [] | no_license | spookylukey/christchurch_django | ca3acd67df1695a1cd7cb462b729ad72a37e43b7 | d489e400b201b8ac56ee4065b3d6bc0f861f92f2 | refs/heads/master | 2022-12-20T03:27:26.081809 | 2015-10-15T18:36:20 | 2015-10-15T18:36:20 | 300,521,884 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,339 | py | #!/usr/bin/env python
from __future__ import unicode_literals
import os
os.environ['DJANGO_SETTINGS_MODULE'] = 'christchurch.settings'
import csv
writer = csv.writer(open("contact-list.csv", "w"))
writer.writerow(["First Name", "Last Name", "Gender (M/F)", "Student (Y/N)", "Address", "Email Address", "Phone Number", "Mobile", "Photo File Name", "Home Group", "Username", "Password", "Admin User (Y/N)", "Church member", "Include on email lists"])
from django.contrib.auth.models import User
from contacts.models import Contact
admins = {u.email: u for u in User.objects.all().filter(is_staff=True)}
for contact in Contact.objects.all():
try:
first_name, last_name = contact.name.split(' ', 2)
except ValueError:
first_name, last_name = contact.name, ""
writer.writerow([
first_name,
last_name,
"",
"N",
contact.address.strip() + "\n" + contact.post_code,
contact.email,
contact.phone_number,
contact.mobile_number,
"",
contact.home_group.name if contact.home_group else "",
admins[contact.email].username if contact.email in admins else "",
"",
"Y" if contact.email in admins else "N",
"Y" if contact.church_member else "N",
"Y" if contact.include_on_email_lists else "N",
])
| [
"L.Plant.98@cantab.net"
] | L.Plant.98@cantab.net |
77af41358982c08950c144fac88c03820ae1a378 | bb33e6be8316f35decbb2b81badf2b6dcf7df515 | /source/res/scripts/client/gui/battle_control/controllers/feedback_events.py | 745d8091451fb08bc693fbe8f33885b44f3694f5 | [] | no_license | StranikS-Scan/WorldOfTanks-Decompiled | 999c9567de38c32c760ab72c21c00ea7bc20990c | d2fe9c195825ececc728e87a02983908b7ea9199 | refs/heads/1.18 | 2023-08-25T17:39:27.718097 | 2022-09-22T06:49:44 | 2022-09-22T06:49:44 | 148,696,315 | 103 | 39 | null | 2022-09-14T17:50:03 | 2018-09-13T20:49:11 | Python | UTF-8 | Python | false | false | 16,113 | py | # Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/client/gui/battle_control/controllers/feedback_events.py
import logging
from BattleFeedbackCommon import BATTLE_EVENT_TYPE as _BET, NONE_SHELL_TYPE
from gui.battle_control.battle_constants import FEEDBACK_EVENT_ID as _FET
from constants import ATTACK_REASON, ATTACK_REASONS, BATTLE_LOG_SHELL_TYPES, ROLE_TYPE, ROLE_TYPE_TO_LABEL
_logger = logging.getLogger(__name__)
def _unpackInteger(packedData):
return packedData
def _unpackDamage(packedData):
return _DamageExtra(*_BET.unpackDamage(packedData))
def _unpackCrits(packedData):
return _CritsExtra(*_BET.unpackCrits(packedData))
def _unpackVisibility(packedData):
return _VisibilityExtra(*_BET.unpackVisibility(packedData))
def _unpackMultiStun(packedData):
return _MultiStunExtra(packedData, True)
_BATTLE_EVENT_TO_PLAYER_FEEDBACK_EVENT = {_BET.KILL: _FET.PLAYER_KILLED_ENEMY,
_BET.DAMAGE: _FET.PLAYER_DAMAGED_HP_ENEMY,
_BET.CRIT: _FET.PLAYER_DAMAGED_DEVICE_ENEMY,
_BET.SPOTTED: _FET.PLAYER_SPOTTED_ENEMY,
_BET.RADIO_ASSIST: _FET.PLAYER_ASSIST_TO_KILL_ENEMY,
_BET.TRACK_ASSIST: _FET.PLAYER_ASSIST_TO_KILL_ENEMY,
_BET.STUN_ASSIST: _FET.PLAYER_ASSIST_TO_STUN_ENEMY,
_BET.BASE_CAPTURE_POINTS: _FET.PLAYER_CAPTURED_BASE,
_BET.BASE_CAPTURE_DROPPED: _FET.PLAYER_DROPPED_CAPTURE,
_BET.BASE_CAPTURE_BLOCKED: _FET.PLAYER_BLOCKED_CAPTURE,
_BET.TANKING: _FET.PLAYER_USED_ARMOR,
_BET.RECEIVED_DAMAGE: _FET.ENEMY_DAMAGED_HP_PLAYER,
_BET.RECEIVED_CRIT: _FET.ENEMY_DAMAGED_DEVICE_PLAYER,
_BET.TARGET_VISIBILITY: _FET.VEHICLE_VISIBILITY_CHANGED,
_BET.DETECTED: _FET.VEHICLE_DETECTED,
_BET.ENEMY_SECTOR_CAPTURED: _FET.ENEMY_SECTOR_CAPTURED,
_BET.DESTRUCTIBLE_DAMAGED: _FET.DESTRUCTIBLE_DAMAGED,
_BET.DESTRUCTIBLE_DESTROYED: _FET.DESTRUCTIBLE_DESTROYED,
_BET.DESTRUCTIBLES_DEFENDED: _FET.DESTRUCTIBLES_DEFENDED,
_BET.DEFENDER_BONUS: _FET.DEFENDER_BONUS,
_BET.SMOKE_ASSIST: _FET.SMOKE_ASSIST,
_BET.INSPIRE_ASSIST: _FET.INSPIRE_ASSIST,
_BET.MULTI_STUN: _FET.PLAYER_STUN_ENEMIES,
_BET.EQUIPMENT_TIMER_EXPIRED: _FET.EQUIPMENT_TIMER_EXPIRED}
_PLAYER_FEEDBACK_EXTRA_DATA_CONVERTERS = {_FET.PLAYER_DAMAGED_HP_ENEMY: _unpackDamage,
_FET.PLAYER_ASSIST_TO_KILL_ENEMY: _unpackDamage,
_FET.PLAYER_CAPTURED_BASE: _unpackInteger,
_FET.PLAYER_DROPPED_CAPTURE: _unpackInteger,
_FET.PLAYER_BLOCKED_CAPTURE: _unpackInteger,
_FET.PLAYER_USED_ARMOR: _unpackDamage,
_FET.PLAYER_DAMAGED_DEVICE_ENEMY: _unpackCrits,
_FET.ENEMY_DAMAGED_HP_PLAYER: _unpackDamage,
_FET.ENEMY_DAMAGED_DEVICE_PLAYER: _unpackCrits,
_FET.PLAYER_ASSIST_TO_STUN_ENEMY: _unpackDamage,
_FET.VEHICLE_VISIBILITY_CHANGED: _unpackVisibility,
_FET.VEHICLE_DETECTED: _unpackVisibility,
_FET.DESTRUCTIBLE_DAMAGED: _unpackInteger,
_FET.DESTRUCTIBLES_DEFENDED: _unpackInteger,
_FET.SMOKE_ASSIST: _unpackDamage,
_FET.INSPIRE_ASSIST: _unpackDamage,
_FET.PLAYER_SPOTTED_ENEMY: _unpackVisibility,
_FET.PLAYER_STUN_ENEMIES: _unpackMultiStun}
def _getShellType(shellTypeID):
return None if shellTypeID == NONE_SHELL_TYPE else BATTLE_LOG_SHELL_TYPES(shellTypeID)
class _DamageExtra(object):
__slots__ = ('__damage', '__attackReasonID', '__isBurst', '__shellType', '__isShellGold', '__secondaryAttackReasonID', '__isRoleAction')
def __init__(self, damage=0, attackReasonID=0, isBurst=False, shellTypeID=NONE_SHELL_TYPE, shellIsGold=False, secondaryAttackReasonID=0, isRoleAction=False):
super(_DamageExtra, self).__init__()
self.__damage = damage
self.__attackReasonID = attackReasonID
self.__isBurst = bool(isBurst)
self.__shellType = _getShellType(shellTypeID)
self.__isShellGold = bool(shellIsGold)
self.__secondaryAttackReasonID = secondaryAttackReasonID
self.__isRoleAction = bool(isRoleAction)
_logger.debug('_DamageExtra isRoleAction = %s', isRoleAction)
def getDamage(self):
return self.__damage
def getAttackReasonID(self):
return self.__attackReasonID
def getSecondaryAttackReasonID(self):
return self.__secondaryAttackReasonID
def getShellType(self):
return self.__shellType
def isNone(self):
return self.isAttackReason(ATTACK_REASON.NONE)
def isBurst(self):
return self.__isBurst
def isShellGold(self):
return self.__isShellGold
def isFire(self):
return self.isAttackReason(ATTACK_REASON.FIRE)
def isBerserker(self):
return self.isAttackReason(ATTACK_REASON.BERSERKER)
def isMinefield(self):
return self.isAttackReason(ATTACK_REASON.MINEFIELD_EQ)
def isRam(self):
return self.isAttackReason(ATTACK_REASON.RAM)
def isShot(self):
return self.isAttackReason(ATTACK_REASON.SHOT)
def isWorldCollision(self):
return self.isAttackReason(ATTACK_REASON.WORLD_COLLISION)
def isDeathZone(self):
return self.isAttackReason(ATTACK_REASON.DEATH_ZONE)
def isProtectionZone(self, primary=True):
return self.isAttackReason(ATTACK_REASON.ARTILLERY_PROTECTION) or self.isAttackReason(ATTACK_REASON.ARTILLERY_SECTOR) if primary else self.isSecondaryAttackReason(ATTACK_REASON.ARTILLERY_PROTECTION) or self.isSecondaryAttackReason(ATTACK_REASON.ARTILLERY_SECTOR)
def isArtilleryEq(self, primary=True):
return self.isAttackReason(ATTACK_REASON.ARTILLERY_EQ) if primary else self.isSecondaryAttackReason(ATTACK_REASON.ARTILLERY_EQ)
def isFortArtilleryEq(self, primary=True):
return self.isAttackReason(ATTACK_REASON.FORT_ARTILLERY_EQ) if primary else self.isSecondaryAttackReason(ATTACK_REASON.FORT_ARTILLERY_EQ)
def isBomberEq(self, primary=True):
return self.isAttackReason(ATTACK_REASON.BOMBER_EQ) if primary else self.isSecondaryAttackReason(ATTACK_REASON.BOMBER_EQ)
def isBombers(self, primary=True):
return self.isAttackReason(ATTACK_REASON.BOMBERS) if primary else self.isSecondaryAttackReason(ATTACK_REASON.BOMBERS)
def isMineField(self, primary=True):
return self.isAttackReason(ATTACK_REASON.MINEFIELD_EQ) if primary else self.isSecondaryAttackReason(ATTACK_REASON.MINEFIELD_EQ)
def isDamagingSmoke(self, primary=True):
return self.isAttackReason(ATTACK_REASON.SMOKE) if primary else self.isSecondaryAttackReason(ATTACK_REASON.SMOKE)
def isCorrodingShot(self, primary=True):
return self.isAttackReason(ATTACK_REASON.CORRODING_SHOT) if primary else self.isSecondaryAttackReason(ATTACK_REASON.CORRODING_SHOT)
def isFireCircle(self, primary=True):
return self.isAttackReason(ATTACK_REASON.FIRE_CIRCLE) if primary else self.isSecondaryAttackReason(ATTACK_REASON.FIRE_CIRCLE)
def isThunderStrike(self, primary=True):
return self.isAttackReason(ATTACK_REASON.THUNDER_STRIKE) if primary else self.isSecondaryAttackReason(ATTACK_REASON.THUNDER_STRIKE)
def isAttackReason(self, attackReason):
return ATTACK_REASONS[self.__attackReasonID] == attackReason
def isSecondaryAttackReason(self, attackReason):
return ATTACK_REASONS[self.__secondaryAttackReasonID] == attackReason
def isRoleAction(self):
return self.__isRoleAction
def isSpawnedBotExplosion(self, primary=True):
return self.isAttackReason(ATTACK_REASON.SPAWNED_BOT_EXPLOSION) if primary else self.isSecondaryAttackReason(ATTACK_REASON.SPAWNED_BOT_EXPLOSION)
def isSpawnedBotRam(self, primary=True):
return self.isAttackReason(ATTACK_REASON.BRANDER_RAM) if primary else self.isSecondaryAttackReason(ATTACK_REASON.BRANDER_RAM)
def isClingBrander(self):
isShot = self.isAttackReason(ATTACK_REASON.SHOT)
isClingBrander = self.isSecondaryAttackReason(ATTACK_REASON.CLING_BRANDER)
return isShot and isClingBrander
def isClingBranderRam(self):
return self.isAttackReason(ATTACK_REASON.CLING_BRANDER_RAM)
class _VisibilityExtra(object):
__slots__ = ('__isVisible', '__isDirect', '__isRoleAction')
def __init__(self, isVisible, isDirect, isRoleAction):
super(_VisibilityExtra, self).__init__()
self.__isVisible = isVisible
self.__isDirect = isDirect
self.__isRoleAction = bool(isRoleAction)
_logger.debug('_VisibilityExtra isRoleAction = %s', isRoleAction)
def isVisible(self):
return self.__isVisible
def isDirect(self):
return self.__isDirect
def isRoleAction(self):
return self.__isRoleAction
class _MultiStunExtra(object):
__slots__ = ('__targetsAmount', '__isRoleAction')
def __init__(self, targetsAmount, isRoleAction):
super(_MultiStunExtra, self).__init__()
self.__targetsAmount = targetsAmount
self.__isRoleAction = bool(isRoleAction)
_logger.debug('_StunExtra isRoleAction = %s', isRoleAction)
def getTargetsAmount(self):
return self.__targetsAmount
def isRoleAction(self):
return self.__isRoleAction
class _CritsExtra(object):
__slots__ = ('__critsCount', '__shellType', '__isShellGold', '__attackReasonID', '__secondaryAttackReasonID')
def __init__(self, critsCount=0, attackReasonID=0, shellTypeID=NONE_SHELL_TYPE, shellIsGold=False, secondaryAttackReasonID=0):
super(_CritsExtra, self).__init__()
self.__critsCount = critsCount
self.__attackReasonID = attackReasonID
self.__shellType = _getShellType(shellTypeID)
self.__isShellGold = bool(shellIsGold)
self.__secondaryAttackReasonID = secondaryAttackReasonID
def getCritsCount(self):
return self.__critsCount
def getShellType(self):
return self.__shellType
def isShellGold(self):
return self.__isShellGold
def isFire(self):
return self.isAttackReason(ATTACK_REASON.FIRE)
def isBerserker(self):
return self.isAttackReason(ATTACK_REASON.BERSERKER)
def isMinefield(self):
return self.isAttackReason(ATTACK_REASON.MINEFIELD_EQ)
def isDamagingSmoke(self):
return self.isAttackReason(ATTACK_REASON.SMOKE)
def isCorrodingShot(self):
return self.isAttackReason(ATTACK_REASON.CORRODING_SHOT)
def isFireCircle(self):
return self.isAttackReason(ATTACK_REASON.FIRE_CIRCLE)
def isThunderStrike(self):
return self.isAttackReason(ATTACK_REASON.THUNDER_STRIKE)
def isRam(self):
return self.isAttackReason(ATTACK_REASON.RAM)
def isShot(self):
return self.isAttackReason(ATTACK_REASON.SHOT)
def isWorldCollision(self):
return self.isAttackReason(ATTACK_REASON.WORLD_COLLISION)
def isDeathZone(self):
return self.isAttackReason(ATTACK_REASON.DEATH_ZONE)
def isProtectionZone(self, primary=True):
return self.isAttackReason(ATTACK_REASON.ARTILLERY_PROTECTION) or self.isAttackReason(ATTACK_REASON.ARTILLERY_SECTOR) if primary else self.isSecondaryAttackReason(ATTACK_REASON.ARTILLERY_PROTECTION) or self.isSecondaryAttackReason(ATTACK_REASON.ARTILLERY_SECTOR)
def isArtilleryEq(self, primary=True):
return self.isAttackReason(ATTACK_REASON.ARTILLERY_EQ) if primary else self.isSecondaryAttackReason(ATTACK_REASON.ARTILLERY_EQ)
def isFortArtilleryEq(self, primary=True):
return self.isAttackReason(ATTACK_REASON.FORT_ARTILLERY_EQ) if primary else self.isSecondaryAttackReason(ATTACK_REASON.FORT_ARTILLERY_EQ)
def isBomberEq(self, primary=True):
return self.isAttackReason(ATTACK_REASON.BOMBER_EQ) if primary else self.isSecondaryAttackReason(ATTACK_REASON.BOMBER_EQ)
def isBombers(self, primary=True):
return self.isAttackReason(ATTACK_REASON.BOMBERS) if primary else self.isSecondaryAttackReason(ATTACK_REASON.BOMBERS)
def isSecondaryAttackReason(self, attackReason):
return ATTACK_REASONS[self.__secondaryAttackReasonID] == attackReason
def isAttackReason(self, attackReason):
return ATTACK_REASONS[self.__attackReasonID] == attackReason
def isClingBrander(self):
isShot = self.isAttackReason(ATTACK_REASON.SHOT)
isClingBrander = self.isSecondaryAttackReason(ATTACK_REASON.CLING_BRANDER)
return isShot and isClingBrander
def isClingBranderRam(self):
return self.isAttackReason(ATTACK_REASON.CLING_BRANDER_RAM)
class _FeedbackEvent(object):
__slots__ = ('__eventType',)
def __init__(self, feedbackEventType):
super(_FeedbackEvent, self).__init__()
self.__eventType = feedbackEventType
def getType(self):
return self.__eventType
@staticmethod
def fromDict(summaryData, additionalData=None):
raise NotImplementedError
class PlayerFeedbackEvent(_FeedbackEvent):
__slots__ = ('__battleEventType', '__targetID', '__count', '__extra', '__attackReasonID', '__isBurst', '__role')
def __init__(self, feedbackEventType, eventType, targetID, count, role, extra):
super(PlayerFeedbackEvent, self).__init__(feedbackEventType)
self.__battleEventType = eventType
self.__targetID = targetID
self.__count = count
self.__role = role
self.__extra = extra
@staticmethod
def fromDict(battleEventData, additionalData=None):
battleEventType = battleEventData['eventType']
if battleEventType in _BATTLE_EVENT_TO_PLAYER_FEEDBACK_EVENT:
feedbackEventType = _BATTLE_EVENT_TO_PLAYER_FEEDBACK_EVENT[battleEventType]
if feedbackEventType in _PLAYER_FEEDBACK_EXTRA_DATA_CONVERTERS:
converter = _PLAYER_FEEDBACK_EXTRA_DATA_CONVERTERS[feedbackEventType]
extra = converter(battleEventData['details'])
else:
extra = None
role = ROLE_TYPE_TO_LABEL[ROLE_TYPE.NOT_DEFINED]
if additionalData is not None:
role = ROLE_TYPE_TO_LABEL[additionalData.get('role') or ROLE_TYPE.NOT_DEFINED]
return PlayerFeedbackEvent(feedbackEventType, battleEventData['eventType'], battleEventData['targetID'], battleEventData['count'], role, extra)
else:
return
def getBattleEventType(self):
return self.__battleEventType
def getTargetID(self):
return self.__targetID
def getExtra(self):
return self.__extra
def getCount(self):
return self.__count
def getRole(self):
return self.__role
class BattleSummaryFeedbackEvent(_FeedbackEvent):
__slots__ = ('__damage', '__trackAssistDamage', '__radioAssistDamage', '__blockedDamage', '__stunAssist')
def __init__(self, damage, trackAssist, radioAssist, tankings, stunAssist):
super(BattleSummaryFeedbackEvent, self).__init__(_FET.DAMAGE_LOG_SUMMARY)
self.__damage = damage
self.__trackAssistDamage = trackAssist
self.__radioAssistDamage = radioAssist
self.__blockedDamage = tankings
self.__stunAssist = stunAssist
@staticmethod
def fromDict(summaryData, additionalData=None):
return BattleSummaryFeedbackEvent(damage=summaryData['damage'], trackAssist=summaryData['trackAssist'], radioAssist=summaryData['radioAssist'], tankings=summaryData['tankings'], stunAssist=summaryData['stunAssist'])
def getTotalDamage(self):
return self.__damage
def getTotalAssistDamage(self):
return self.__trackAssistDamage + self.__radioAssistDamage
def getTotalBlockedDamage(self):
return self.__blockedDamage
def getTotalStunDamage(self):
return self.__stunAssist
class PostmortemSummaryEvent(_FeedbackEvent):
__slots__ = ('__killerID', '__deathReasonID')
def __init__(self, lastKillerID, lastDeathReasonID):
super(PostmortemSummaryEvent, self).__init__(_FET.POSTMORTEM_SUMMARY)
self.__killerID = lastKillerID
self.__deathReasonID = lastDeathReasonID
@staticmethod
def fromDict(summaryData, additionalData=None):
return PostmortemSummaryEvent(lastKillerID=summaryData['lastKillerID'], lastDeathReasonID=summaryData['lastDeathReasonID'])
def getKillerID(self):
return self.__killerID
def getDeathReasonID(self):
return self.__deathReasonID
| [
"StranikS_Scan@mail.ru"
] | StranikS_Scan@mail.ru |
11b6fe0fdea944a4ae7548df8b55aed676c1cadf | 6b69998b3b166dd79767183bcddca28523f076a0 | /dove.py | 5d9f0616e1d20c439d63139639169550a89f60e1 | [] | no_license | papino1409/pino | 2eee850405fad529f18ea0385211ae53ab2dc341 | efca474bf573c72e072a5cae7cb043fab3a40857 | refs/heads/master | 2020-05-07T15:16:49.773371 | 2019-04-12T09:17:08 | 2019-04-12T09:17:08 | 180,141,290 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 124 | py | n = input("n")
n = int(n)
r = n % 2
if (r == 0):
print("le nombre n est pair")
else:
print("le nombre n est impair")
| [
"chairmangueye@gmail.com"
] | chairmangueye@gmail.com |
a530938144a63c5a9f3305cb42b937ac7024ab99 | 4413435a82e1153f6a28eb22df1748172cf2e1cd | /engine.py | 1b3a1c4b5083b118bcf3e0c8cc44d20668344602 | [
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] | permissive | Gurpreet-bit/NLP-Project-on-LDA-Topic-Modelling-Python-using-RACE-Dataset | cfd8edd6f86a97abd102b6b893343c44af562138 | 67ac8d278922a4e34cccec978055c9409b8a2098 | refs/heads/main | 2023-08-02T15:27:19.703466 | 2021-10-07T19:05:45 | 2021-10-07T19:05:45 | 414,613,729 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,085 | py | # Importing libraries
import numpy as np
import pandas as pd
import nltk
# nltk.download('punkt')
import re
# nltk.download('stopwords')
from nltk.corpus import stopwords
# stop_words = stopwords.words('english')
from nltk.stem.snowball import SnowballStemmer
from nltk.stem import WordNetLemmatizer
le=WordNetLemmatizer()
import logging
logger = logging.getLogger(__name__)
import warnings
warnings.filterwarnings("ignore")
from tqdm import tqdm
tqdm.pandas(desc="progress bar!")
import scipy.stats as stats
from collections import Counter
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.decomposition import LatentDirichletAllocation as LDA
from sklearn.decomposition import NMF, LatentDirichletAllocation, TruncatedSVD
from sklearn.metrics.pairwise import euclidean_distances
from collections import Counter
from operator import itemgetter
from ML_pipeline import dataset
from ML_pipeline import pre_processing
from ML_pipeline import vectorizing_dataset
from ML_pipeline import topic_modeling
from ML_pipeline import predict_topic
from ML_pipeline import lsa_model
from ML_pipeline import predict_lsa
from ML_pipeline import utils
from ML_pipeline import tuning_lda
print('script started')
# Reading the dataset
train_documents, test_documents = dataset.read_data("E:/PROJECTpro/PROJECTS/project_2_topic_modelling/Topic_modeling/input/documents.csv")
# Text Preprocessing
## New column having the cleaned sentences
train_documents['clean_document'] = train_documents['document'].progress_apply(lambda x: pre_processing.clean_documents(x)[0])
test_documents['clean_document'] = test_documents['document'].progress_apply(lambda x: pre_processing.clean_documents(x)[0])
## New column having the cleaned tokens
train_documents['clean_token'] = train_documents['document'].progress_apply(lambda x: pre_processing.clean_documents(x)[1])
test_documents['clean_token'] = test_documents['document'].progress_apply(lambda x: pre_processing.clean_documents(x)[1])
# train_documents.to_csv('../output/train_documents.csv', index = False)
# test_documents.to_csv('../output/test_documents.csv', index = False)
# Transforming dataset into
## Count Vectorizer
count_vect, count_vect_text = vectorizing_dataset.transform_dataset(train_documents, 'clean_document', 'count')
count_vectorized_test = count_vect.transform(test_documents['clean_document'])
## TFIDF Vectorizer
tfidf_vect, tfidf_vect_text = vectorizing_dataset.transform_dataset(train_documents, 'clean_token', 'tfidf')
tfidf_vectorized_test = tfidf_vect.transform(test_documents['clean_token'])
# Topic Modeling
## LSA
print("--------------LSA starts-------------------")
lsa_model, lsa_top = lsa_model.lsa_model( tfidf_vect_text , '../output/lsa_model_trained.pkl')
documet_topic_lsa = predict_lsa.topics_document(model_output= lsa_top, n_topics=10, data=train_documents)
lsa_keys = utils.get_keys(lsa_top)
lsa_categories, lsa_counts = utils.keys_to_counts(lsa_keys)
print("----------------LSA ends--------------------")
## LDA
print("--------------LDA starts-------------------")
lda_model, lda_model_output = topic_modeling.modeling(count_vect_text, 'count', model_path='../output/lda_trained.pkl')
'''
# Takes too much time. Run this if you have efficient computer CPU.
search_params = {'n_components': [10, 15, 20], 'learning_decay': [.5, .7, .9]}
best_lda_model = tuning_lda.tune_lda(search_params, count_vect_text, "../output/best_lda_model.pkl" )
'''
print("--------------LDA ends---------------------")
# ## NMF
print("--------------NMF starts---------------------")
nmf_model, nmf_model_output = topic_modeling.modeling(tfidf_vect_text, 'tfidf', model_path='../output/nmf_trained.pkl')
print("--------------NMF ends---------------------")
# # # Predict topic
## LDA
topic_seris_lda = predict_topic.topic_document(lda_model, count_vectorized_test, 10)
## NMF
topic_seris_nmf = predict_topic.topic_document(nmf_model, tfidf_vectorized_test, 13)
# ## Exporting the dataset with the topic attached
test_documents['index'] = [i for i in range(len(test_documents))]
## LDA
test_documents_lda = pd.merge(test_documents[['index','document']], topic_seris_lda, on = ['index'], how = 'left')
## NMF
test_documents_nmf = pd.merge(test_documents[['index','document']], topic_seris_nmf, on = ['index'], how = 'left')
path = '../output'
# LDA
test_documents_lda[['document','dominant_topic']].to_csv(path+'/'+'test_lda_1.csv', index=False)
# NMF
test_documents_nmf[['document','dominant_topic']].to_csv(path+'/'+'test_nmf_1.csv', index=False)
print('script completed successfully') | [
"noreply@github.com"
] | Gurpreet-bit.noreply@github.com |
cccef2848b460e2d4cb059a546f5c6cdd384ce29 | 6e23941b992fb00236ce7541ac4fc24f855ed265 | /intro_for_lang_resarch/ch14/ch14-1.py | 5f2595c8e809f3d503b9cd0fec976e267cca1933 | [] | no_license | meshidenn/shakyo | 4e7fac2a0ff8f10ebb8ed0ea051dd99a66ec716b | 2a2f21b79f60f014a94c863410e960846f99b48e | refs/heads/master | 2021-01-16T11:30:45.218546 | 2019-05-19T08:39:52 | 2019-05-19T08:39:52 | 99,998,914 | 0 | 0 | null | 2018-01-14T23:56:41 | 2017-08-11T06:28:41 | Jupyter Notebook | UTF-8 | Python | false | false | 1,128 | py |
# -*- conding: utf-8 -*-
# KWIC検索
target = '言う'
context_width = 10
words = []
header = True
# ファイルを読み込んで単語リストを作成
datafile = open('b2.txt', encoding='utf-8')
for line in datafile:
line = line.rstrip()
if header:
header = False
keys = line.split('\t')
continue
values = line.split('\t')
word = dict(zip(keys, values))
words.append(word)
# 検索
for i in range(len(words)):
# 検索語が見つかったら
if words[i]['語彙素'] == target:
# 左側文脈を作成
left_context = ''
for j in range(i - context_width, i):
if j < 0:
continue
left_context += words[j]['書字形']
# 右側文脈を作成
right_context = ''
for j in range(i + 1, i + 1 + context_width):
if j >= len(words):
continue
right_context += words[j]['書字形']
# 出力
output = '\t'.join([
left_context,
words[i]['書字形'],
right_context])
print(output)
| [
"hiroki@Hiroki-no-MacBook-Air.local"
] | hiroki@Hiroki-no-MacBook-Air.local |
4903e691d9ad56b2315770be44f208ba61b73758 | 8d2ecbf20f7b73c2727a1c7f086b0ceb3e04b74b | /ruhungry/wsgi.py | 3667a354625143159befef1f8a2f80b9c6b37c86 | [] | no_license | tomhogans/ruhungry | 246fd1d5d12d07b36842ec20ee7528a065438342 | 1094788c2734c2db37ad1419473668488c97c21d | refs/heads/master | 2021-01-22T11:29:52.020259 | 2014-04-14T22:49:01 | 2014-04-14T22:49:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 391 | py | """
WSGI config for ruhungry project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ruhungry.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| [
"tomhsx@gmail.com"
] | tomhsx@gmail.com |
2bbdbc052ecae23c18c9a3d5c0099519ed97be58 | 848089cbee5ba494affe16322dacfab213f9eba4 | /text_classification/tfidf_svm/svm_model.py | c0ad5a8e0d7da1efc37ce231bc552ef8fafa7da7 | [] | no_license | SFKevin/nlp_semantics | 6b337013bcce6a55a57588b1cb6b9157c4b47f27 | 222c03a74e22281b1129f22f7584e7750f2c47b8 | refs/heads/master | 2020-03-24T16:37:13.013024 | 2018-08-10T02:44:43 | 2018-08-10T02:44:43 | 142,830,389 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 811 | py | from sklearn.svm import SVC
import sklearn.metrics as meth
from tfidf_svm import data_utils
def train(x_train,y_train):
model=SVC()
model.fit(x_train,y_train)
return model
def predict(model,x_dev,y_dev):
y_pre=model.predict(x_dev)
acc=meth.accuracy_score(y_dev,y_pre)
f1score=meth.f1_score(y_dev,y_pre)
recall=meth.recall_score(y_dev,y_pre)
return acc,f1score,recall
filepath_pos="E:\\data\\rt-polaritydata\\rt-polarity.pos"
filepath_neg="E:\\data\\rt-polaritydata\\rt-polarity.neg"
if __name__ == '__main__':
x_train,y_train,x_dev,y_dev=data_utils.load_data_and_labels(filepath_pos,filepath_neg)
model=train(x_train,y_train)
acc,f1score,recall=predict(model,x_dev,y_dev)
print("accuracy: {0:.3f}, f1score: {0:.3f}, recall: {0:.3f}".format(acc,f1score,recall))
| [
"365450753@qq.com"
] | 365450753@qq.com |
6a5d15682bbaa458fe83a7acb7339950b92acdcb | 795caca6c497891e2fcd2b0253a209500744c56d | /src/models/continuous_encoder_decoder_models/encoder_decoder_variants/enc_dec_out.py | fe36585660ddbd55eae5ad88b1e6f06abb913378 | [] | no_license | RitaRamo/remote-sensing-images-caption | 29c0e0a6b5352b9b3d62c7315cd4d7ac6b0b7076 | 426d97b5d3688f6c52c51ef6e33872554d55751a | refs/heads/master | 2021-11-24T03:02:00.238003 | 2021-11-04T09:23:20 | 2021-11-04T09:23:20 | 244,619,672 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 6,534 | py | import torchvision
from torch import nn
import torch
from torch.nn.utils.rnn import pack_padded_sequence
from models.basic_encoder_decoder_models.encoder_decoder import Encoder, Decoder
from models.abtract_model import AbstractEncoderDecoderModel
import torch.nn.functional as F
from embeddings.embeddings import get_embedding_layer
from sklearn.metrics.pairwise import cosine_similarity
import numpy as np
from data_preprocessing.preprocess_tokens import OOV_TOKEN
from embeddings.embeddings import EmbeddingsType
from models.continuous_encoder_decoder_models.encoder_decoder import ContinuousEncoderDecoderModel
from embeddings.embeddings import EmbeddingsType
class VocabAttention(nn.Module):
"""
Attention Network.
"""
def __init__(self, vocab_dim, decoder_dim, embedding_vocab):
"""
:param encoder_dim: feature size of encoded images
:param decoder_dim: size of decoder's RNN
:param attention_dim: size of the attention network
"""
super(VocabAttention, self).__init__()
# linear layer to transform decoder's output
self.decoder_att = nn.Linear(decoder_dim, vocab_dim)
self.full_att = nn.Linear(vocab_dim, 1)
self.relu = nn.ReLU()
self.softmax = nn.Softmax(dim=1) # softmax layer to calculate weights
self.embedding_vocab = embedding_vocab
def forward(self, decoder_hidden):
"""
Forward propagation.
:param encoder_out: encoded images, a tensor of dimension (batch_size, num_pixels, encoder_dim)
:param decoder_hidden: previous decoder output, a tensor of dimension (batch_size, decoder_dim)
:return: attention weighted encoding, weights
"""
# (batch_size, l_regions (512), regions_dim (300))
vocab = self.embedding_vocab.repeat(decoder_hidden.size()[0], 1, 1)
query = self.decoder_att(decoder_hidden) # (batch_size, 1, encoder_dim)
att2 = self.decoder_att(decoder_hidden) # (batch_size, attention_dim)
# (batch_size, num_pixels,1) -> com squeeze(2) fica (batch_size, l_regions)
att = self.full_att(self.relu(vocab + query.unsqueeze(1))).squeeze(2)
alpha = self.softmax(att) # (batch_size, l_regions)
attention_weighted_encoding = (
vocab * alpha.unsqueeze(2)).sum(dim=1) # (batch_size, encoder_dim)
return attention_weighted_encoding, alpha
class ContinuousDecoderWithOut(Decoder):
def __init__(self, decoder_dim, embed_dim, embedding_type, vocab_size, token_to_id, post_processing, device,
encoder_dim=2048, dropout=0.5):
super(ContinuousDecoderWithOut, self).__init__(decoder_dim, embed_dim,
embedding_type, vocab_size, token_to_id, post_processing, encoder_dim, dropout)
# replace softmax with a embedding layer
self.fc = nn.Linear(decoder_dim, embed_dim)
list_wordid = list(range(vocab_size)) # ignore first 4 special tokens : "start,end, unknow, padding"
vocab = torch.transpose(torch.tensor(list_wordid).unsqueeze(-1), 0, 1)
embedding_vocab = self.embedding(vocab).to(device)
self.attention_out = VocabAttention(embed_dim, decoder_dim, embedding_vocab) # attention network
def forward(self, word, encoder_out, decoder_hidden_state, decoder_cell_state):
embeddings = self.embedding(word)
decoder_hidden_state, decoder_cell_state = self.decode_step(
embeddings, (decoder_hidden_state, decoder_cell_state)
)
scores, alpha_out = self.attention_out(self.dropout(decoder_hidden_state))
return scores, decoder_hidden_state, decoder_cell_state, alpha_out
class ContinuousEncoderDecoderOutModel(ContinuousEncoderDecoderModel):
def __init__(self,
args,
vocab_size,
token_to_id,
id_to_token,
max_len,
device
):
super().__init__(args, vocab_size, token_to_id, id_to_token, max_len, device)
def _initialize_encoder_and_decoder(self):
if (self.args.embedding_type not in [embedding.value for embedding in EmbeddingsType]):
raise ValueError(
"Continuous model should use pretrained embeddings...")
self.encoder = Encoder(self.args.image_model_type,
enable_fine_tuning=self.args.fine_tune_encoder)
self.decoder = ContinuousDecoderWithOut(
encoder_dim=self.encoder.encoder_dim,
decoder_dim=self.args.decoder_dim,
embedding_type=self.args.embedding_type,
embed_dim=self.args.embed_dim,
vocab_size=self.vocab_size,
token_to_id=self.token_to_id,
post_processing=self.args.post_processing,
device=self.device,
dropout=self.args.dropout
)
self.decoder.normalize_embeddings(self.args.no_normalization)
self.encoder = self.encoder.to(self.device)
self.decoder = self.decoder.to(self.device)
def _predict(self, encoder_out, caps, caption_lengths):
batch_size = encoder_out.size(0)
num_pixels = encoder_out.size(1)
# Create tensors to hold word predicion scores and alphas
all_predictions = torch.zeros(batch_size, max(
caption_lengths), self.decoder.embed_dim).to(self.device)
all_alphas_out = torch.zeros(batch_size, max(
caption_lengths), self.vocab_size).to(self.device)
h, c = self.decoder.init_hidden_state(encoder_out)
# Predict
for t in range(max(
caption_lengths)):
# batchsizes of current time_step are the ones with lenght bigger than time-step (i.e have not fineshed yet)
batch_size_t = sum([l > t for l in caption_lengths])
predictions, h, c, alpha_out = self.decoder(
caps[:batch_size_t, t], encoder_out[:batch_size_t], h[:batch_size_t], c[:batch_size_t])
all_predictions[:batch_size_t, t, :] = predictions
all_alphas_out[:batch_size_t, t, :] = alpha_out
return {"predictions": all_predictions, "alpha_out": all_alphas_out}
def generate_output_index(self, input_word, encoder_out, h, c):
predictions, h, c, _ = self.decoder(
input_word, encoder_out, h, c)
current_output_index = self._convert_prediction_to_output(predictions)
return current_output_index, h, c
| [
"rita.mparada.ramos@gmail.com"
] | rita.mparada.ramos@gmail.com |
5e9884959b2d7380687f93a72347f7fb57d95520 | 622afef190cb1e874608c574411a2789d2141391 | /maddpg-pytorch/main.py | 574f4810aeb0b249c230b109efe39333abc8df3c | [
"MIT"
] | permissive | kshitijkg/Multiagent-RL | 030701f9c6fccbad81ac6748ead0b5ab52306ac1 | 88ae7565a6c6575e5479cc9ee3c4427f8c7412e9 | refs/heads/master | 2023-02-04T04:39:41.455205 | 2020-12-21T11:50:00 | 2020-12-21T11:50:00 | 321,777,131 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,290 | py | import argparse
import torch
import time
import os
import numpy as np
from gym.spaces import Box, Discrete
from pathlib import Path
from torch.autograd import Variable
from torch.utils.tensorboard import SummaryWriter
from utils.make_env import make_env
from utils.buffer import ReplayBuffer, PriorityReplayBuffer
from utils.env_wrappers import SubprocVecEnv, DummyVecEnv
from algorithms.maddpg import MADDPG
import pickle
USE_CUDA = torch.cuda.is_available()
def make_parallel_env(env_id, n_rollout_threads, seed, discrete_action, benchmark):
def get_env_fn(rank):
def init_env():
env = make_env(env_id, benchmark, discrete_action=discrete_action)
env.seed(seed + rank * 1000)
np.random.seed(seed + rank * 1000)
return env
return init_env
if n_rollout_threads == 1:
return DummyVecEnv([get_env_fn(0)])
else:
return SubprocVecEnv([get_env_fn(i) for i in range(n_rollout_threads)])
def run(config):
device = torch.device('cuda' if USE_CUDA else 'cpu')
print('Using device:', device)
if device.type == 'cuda':
print(torch.cuda.get_device_name(0))
print('Memory Usage:')
print('Allocated:', round(torch.cuda.memory_allocated(0)/1024**3,1), 'GB')
print('Cached: ', round(torch.cuda.memory_cached(0)/1024**3,1), 'GB')
model_dir = Path('./models') / config.env_id / config.model_name
if not model_dir.exists():
curr_run = 'run1'
else:
exst_run_nums = [int(str(folder.name).split('run')[1]) for folder in
model_dir.iterdir() if
str(folder.name).startswith('run')]
if len(exst_run_nums) == 0:
curr_run = 'run1'
else:
curr_run = 'run%i' % (max(exst_run_nums) + 1)
run_dir = model_dir / curr_run
log_dir = run_dir / 'logs'
os.makedirs(log_dir)
print(str(log_dir))
logger = SummaryWriter(str(log_dir))
#logger = None
f = open(run_dir / "hyperparametrs.txt","w+")
f.write(str(config))
torch.manual_seed(config.seed)
np.random.seed(config.seed)
if not USE_CUDA:
torch.set_num_threads(config.n_training_threads)
env = make_parallel_env(config.env_id, config.n_rollout_threads, config.seed,
config.discrete_action, config.benchmark)
maddpg = MADDPG.init_from_env(env, agent_alg=config.agent_alg,
adversary_alg=config.adversary_alg,
tau=config.tau,
lr=config.lr,
hidden_dim=config.hidden_dim,
stochastic = config.stochastic,
commonCritic = config.commonCritic, gasil = config.gasil, dlr = config.dlr, lambda_disc = config.lambda_disc,
batch_size_disc = config.batch_size_disc, dynamic=config.dynamic)
replay_buffer = ReplayBuffer(config.buffer_length, maddpg.nagents,
[obsp.shape[0] for obsp in env.observation_space],
[acsp.shape[0] if isinstance(acsp, Box) else acsp.n
for acsp in env.action_space])
expert_replay_buffer = PriorityReplayBuffer(config.expert_buffer_length, config.episode_length, maddpg.nagents,
[obsp.shape[0] for obsp in env.observation_space],
[acsp.shape[0] if isinstance(acsp, Box) else acsp.n
for acsp in env.action_space])
t = 0
agent_info = [[[] for i in range(config.n_rollout_threads)]]
reward_info = []
total_returns = []
eval_trajectories = []
expert_average_returns = []
trajectories = []
durations = []
start_time = time.time()
expert_trajectories = []
evaluation_rewards = []
for ep_i in range(0, config.n_episodes, config.n_rollout_threads):
print("Episodes %i-%i of %i" % (ep_i + 1,
ep_i + 1 + config.n_rollout_threads,
config.n_episodes))
if ep_i%100 == 0:
mins = (time.time() - start_time)/60
durations.append(mins)
print(mins, "minutes")
start_time = time.time()
obs = env.reset()
# obs.shape = (n_rollout_threads, nagent)(nobs), nobs differs per agent so not tensor
maddpg.prep_rollouts(device='cpu')
explr_pct_remaining = max(0, config.n_exploration_eps - ep_i) / config.n_exploration_eps
maddpg.scale_noise(config.final_noise_scale + (config.init_noise_scale - config.final_noise_scale) * explr_pct_remaining)
maddpg.reset_noise()
current_episode = [[] for i in range(config.n_rollout_threads)]
current_trajectory = [[] for i in range(config.n_rollout_threads)]
current_entities = []
total_dense = None
if config.store_traj:
cur_state_ent = env.getStateEntities()
for i in range(config.n_rollout_threads):
current_entities.append(cur_state_ent[i])
cur_state = env.getState()
for i in range(config.n_rollout_threads):
current_trajectory[i].append(cur_state[i])
for et_i in range(config.episode_length):
# rearrange observations to be per agent, and convert to torch Variable
torch_obs = [Variable(torch.Tensor(np.vstack(obs[:, i])),
requires_grad=False)
for i in range(maddpg.nagents)]
# get actions as torch Variables
torch_agent_actions = maddpg.step(torch_obs, explore=True)
# convert actions to numpy arrays
agent_actions = [ac.data.numpy() for ac in torch_agent_actions]
# rearrange actions to be per environment
actions = [[ac[i] for ac in agent_actions] for i in range(config.n_rollout_threads)]
next_obs, rewards, dones, infos = env.step(actions)
if config.store_traj:
cur_state = env.getState()
for i in range(config.n_rollout_threads):
current_trajectory[i].append(cur_state[i])
for i in range(config.n_rollout_threads):
current_episode[i].append([obs[i], actions[i]])
if config.benchmark:
#Fix this
for i, info in enumerate(infos):
agent_info[-1][i].append(info['n'])
if et_i == 0:
total_dense = rewards
else:
total_dense = total_dense + rewards
replay_buffer.push(obs, agent_actions, rewards, next_obs, dones)
obs = next_obs
t += config.n_rollout_threads
if (len(replay_buffer) >= config.batch_size and
(t % config.steps_per_update) < config.n_rollout_threads and
((expert_replay_buffer.num_traj*config.episode_length >= config.batch_size_disc) == (maddpg.gasil))):
if USE_CUDA:
maddpg.prep_training(device='gpu')
else:
maddpg.prep_training(device='cpu')
if maddpg.gasil:
for update_i in range(config.num_disc_updates):
sample_normal = replay_buffer.sample(config.batch_size,to_gpu=USE_CUDA, norm_rews = False)
sample_expert = expert_replay_buffer.sample(config.batch_size_disc,
to_gpu=USE_CUDA)
maddpg.gasil_disc_update(sample_normal, sample_expert, 0, logger=logger, num_disc_permutations = config.num_disc_permutations)
for update_i in range(config.num_AC_updates):
sample_normal = replay_buffer.sample(config.batch_size,to_gpu=USE_CUDA, norm_rews = False)
maddpg.gasil_AC_update(sample_normal, 0, episode_num = ep_i, logger=logger, num_AC_permutations = config.num_AC_permutations)
else:
for update_i in range(config.num_AC_updates):
sample_normal = replay_buffer.sample(config.batch_size,to_gpu=USE_CUDA, norm_rews = False)
maddpg.update(sample_normal, 0, logger=logger, num_AC_permutations = config.num_AC_permutations)
maddpg.update_all_targets()
maddpg.prep_rollouts(device='cpu')
total_returns.append(total_dense)
if maddpg.gasil:
expert_replay_buffer.push(current_episode, total_dense, config.n_rollout_threads, current_entities, current_trajectory, config.store_traj)
expert_average_returns.append(expert_replay_buffer.get_average_return())
if config.store_traj:
for i in range(config.n_rollout_threads):
trajectories.append([current_entities[i], current_trajectory[i]])
ep_rews = replay_buffer.get_average_rewards(
config.episode_length * config.n_rollout_threads)
for a_i, a_ep_rew in enumerate(ep_rews):
logger.add_scalars('agent%i/rew' % a_i,
{'mean_episode_rewards': a_ep_rew},
ep_i)
logger.add_scalar('agent%i/mean_episode_rewards' % a_i, a_ep_rew, ep_i)
#save mean episode rewards
#save benchmarking data
agent_info.append([[] for i in range(config.n_rollout_threads)])
reward_info.append(ep_rews)
if ep_i % config.save_interval < config.n_rollout_threads:
os.makedirs(run_dir / 'incremental', exist_ok=True)
maddpg.save(run_dir / 'incremental' / ('model_ep%i.pt' % (ep_i + 1)))
maddpg.save(run_dir / 'model.pt')
#save the trajectories in the expert replay buffer
trajec = expert_replay_buffer.get_trajectories()
if config.store_traj:
expert_trajectories.append(trajec)
if ep_i % config.eval_interval < config.n_rollout_threads:
current_eval = []
current_trajectories = []
for ep_i_eval in range(0, config.n_eval_episodes, config.n_rollout_threads):
obs = env.reset()
total_eval = None
maddpg.prep_rollouts(device='cpu')
if config.store_traj:
current_trajectory = [[] for i in range(config.n_rollout_threads)]
current_entities = []
cur_state_ent = env.getStateEntities()
for i in range(config.n_rollout_threads):
current_entities.append(cur_state_ent[i])
cur_state = env.getState()
for i in range(config.n_rollout_threads):
current_trajectory[i].append(cur_state[i])
for et_i in range(config.episode_length):
torch_obs = [Variable(torch.Tensor(np.vstack(obs[:, i])),
requires_grad=False)
for i in range(maddpg.nagents)]
torch_agent_actions = maddpg.step(torch_obs, explore=False)
agent_actions = [ac.data.numpy() for ac in torch_agent_actions]
actions = [[ac[i] for ac in agent_actions] for i in range(config.n_rollout_threads)]
next_obs, rewards, dones, infos = env.step(actions)
if config.store_traj:
cur_state = env.getState()
for i in range(config.n_rollout_threads):
current_trajectory[i].append(cur_state[i])
if et_i == 0:
total_eval = rewards
else:
total_eval = total_eval + rewards
obs = next_obs
current_eval.append(total_eval)
if config.store_traj:
for i in range(config.n_rollout_threads):
current_trajectories.append([current_entities[i], current_trajectory[i]])
if config.store_traj:
eval_trajectories.append(current_trajectories)
evaluation_rewards.append(current_eval)
if config.store_traj:
with open(run_dir / 'static_trajectories.pkl', 'wb') as fp:
pickle.dump(trajectories, fp)
with open(run_dir / 'eval_static_trajectories.pkl', 'wb') as fp:
pickle.dump(eval_trajectories, fp)
if config.benchmark:
with open(run_dir / 'info.pkl', 'wb') as fp:
pickle.dump(agent_info, fp)
with open(run_dir / 'rew.pkl', 'wb') as fp:
pickle.dump(reward_info, fp)
with open(run_dir / 'eval_rew.pkl', 'wb') as fp:
pickle.dump(evaluation_rewards, fp)
with open(run_dir / 'time.pkl', 'wb') as fp:
pickle.dump(durations, fp)
with open(run_dir / 'returns.pkl', 'wb') as fp:
pickle.dump(total_returns, fp)
if maddpg.gasil:
with open(run_dir / 'expert_average.pkl', 'wb') as fp:
pickle.dump(expert_average_returns, fp)
if config.store_traj:
with open(run_dir / 'expert_trajectories.pkl', 'wb') as fp:
pickle.dump(expert_trajectories, fp)
maddpg.save(run_dir / 'model.pt')
env.close()
logger.export_scalars_to_json(str(log_dir / 'summary.json'))
logger.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("env_id", help="Name of environment")
parser.add_argument("model_name",
help="Name of directory to store " +
"model/training contents")
parser.add_argument("--seed",
default=1, type=int,
help="Random seed")
parser.add_argument("--n_rollout_threads", default=1, type=int)
parser.add_argument("--n_training_threads", default=6, type=int)
parser.add_argument("--buffer_length", default=int(1e6), type=int)
parser.add_argument("--expert_buffer_length", default=int(25), type=int)
parser.add_argument("--n_episodes", default=25000, type=int)
parser.add_argument("--n_eval_episodes", default=10, type=int)
parser.add_argument("--episode_length", default=25, type=int)
parser.add_argument("--steps_per_update", default=100, type=int)
parser.add_argument("--batch_size",
default=512, type=int,
help="Batch size for model training")
parser.add_argument("--batch_size_disc",
default=256, type=int,
help="Batch size for model training")
parser.add_argument("--num_disc_updates", default=4, type=int,
help="number of Discriminator mini batches")
parser.add_argument("--num_AC_updates", default=16, type=int,
help="number of Critic, Policy mini batches")
parser.add_argument("--num_disc_permutations", default=4, type=int,
help="number of discriminator permutations")
parser.add_argument("--num_AC_permutations", default=4, type=int,
help="number of AC permutations")
parser.add_argument("--n_exploration_eps", default=25000, type=int)
parser.add_argument("--init_noise_scale", default=0.3, type=float)
parser.add_argument("--final_noise_scale", default=0.0, type=float)
parser.add_argument("--save_interval", default=1000, type=int)
parser.add_argument("--eval_interval", default=10, type=int)
parser.add_argument("--hidden_dim", default=64, type=int)
parser.add_argument("--lr", default=0.01, type=float)
parser.add_argument("--dlr", default=0.0003, type=float)
parser.add_argument("--lambda_disc", default=0.5, type=float)
parser.add_argument("--tau", default=0.01, type=float)
parser.add_argument("--agent_alg",
default="MADDPG", type=str,
choices=['MADDPG', 'DDPG'])
parser.add_argument("--rew_shape",
default=0, type=int,
choices=[0, 1, 2])
parser.add_argument("--adversary_alg",
default="MADDPG", type=str,
choices=['MADDPG', 'DDPG'])
parser.add_argument("--discrete_action",
action='store_true')
parser.add_argument("--store_traj",
action='store_true')
parser.add_argument("--sparse_reward",
action='store_true')
parser.add_argument("--benchmark",
action='store_true')
parser.add_argument("--stochastic",
action='store_true')
parser.add_argument("--commonCritic",
action='store_true')
parser.add_argument("--gasil",
action='store_true')
parser.add_argument("--dynamic",
action='store_true')
config = parser.parse_args()
run(config)
| [
"kshitijkg01@gmail.com"
] | kshitijkg01@gmail.com |
3c53f3b56c8fe27246965a9a15cffc40b7a70f37 | bbdd0cca4912d7a9ae78ea3144ac6ae67a4e2448 | /introduction/28_feature_matching_orb.py | 6dc73a7c2dd54c8244dab24ac6aea6bb3611b733 | [
"MIT"
] | permissive | Tenjin0/python-opencv-base | 4f94d69b2601c90cbad2a36acfec7df2040c72a9 | b9732f24de688547b6d45b9d796d0ff458902874 | refs/heads/master | 2020-03-21T15:12:56.346844 | 2019-04-18T08:04:00 | 2019-04-18T08:04:00 | 138,700,152 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 778 | py |
import numpy as np
import cv2
import sys
from matplotlib import pyplot as plt
img1 = cv2.imread('images/manowar_logo.png', cv2.IMREAD_GRAYSCALE)
img2 = cv2.imread('images/manowar_single.jpg', cv2.IMREAD_GRAYSCALE)
orb = cv2.ORB_create()
if __name__ == "__main__":
if img1 is None:
print("img1 not found")
sys.exit()
if img2 is None:
print("img2 not found")
sys.exit()
kp1, des1 = orb.detectAndCompute(img1, None)
kp2, des2 = orb.detectAndCompute(img2, None)
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
matches = bf.match(des1, des2)
matches = sorted(matches, key=lambda x: x.distance)
img3 = cv2.drawMatches(img1, kp1, img2, kp2, matches[:40], img2, flags=2)
plt.imshow(img3), plt.show()
| [
"ppetit@wynd.eu"
] | ppetit@wynd.eu |
4a2fb9f16742d3718a5490b53140ab00b8c65f5a | f6ed7bc808f5536bc77166fe5c3571e5c028f308 | /neptune/internal/common/utils/files.py | c694ca7a2144941196bdd9a68e8df828c7b73206 | [
"Apache-2.0"
] | permissive | jiji-online/neptune-cli | d086bb59725b7545f3e0f80bd89e8f99ff3851a0 | 50cf680a80d141497f9331ab7cdaee49fcb90b0c | refs/heads/main | 2023-07-18T17:56:10.671562 | 2021-09-14T07:54:13 | 2021-09-14T07:54:13 | 406,275,162 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 858 | py | #
# Copyright (c) 2016, deepsense.io
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import errno
import os
import io
def create_empty_file(path):
io.open(path, 'w').close()
def create_dir_if_nonexistent(dir_path):
try:
os.makedirs(dir_path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
| [
"serhii.freidin@jiji.ng"
] | serhii.freidin@jiji.ng |
3034b0e2dc2f6fae511f9a14f1f4e669ce99bf43 | b8e6b9ac7d92941e3b8ee2f97952ff8048d9fed6 | /django_app/config/celery.py | 5f2ee85922eaca996254f9a6b2d7da3b932d1cf8 | [] | no_license | LeeHanYeong/Elasticbeanstalk-Celery-Redis-Elasticache | 00e571d90141ecf987ed5d86a90797f3de7ccae1 | 63c0b8f519a2c90f82d796affa884d3b1a440732 | refs/heads/master | 2020-06-26T10:00:30.498364 | 2017-11-16T19:59:16 | 2017-11-16T19:59:16 | 97,014,281 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 319 | py | import os
from celery import Celery
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'config.settings.debug')
app = Celery('config')
app.config_from_object('django.conf:settings', namespace='CELERY')
app.autodiscover_tasks()
@app.task(bind=True)
def debug_task(self):
print('Request: {0!r}'.format(self.request))
| [
"dev@azelf.com"
] | dev@azelf.com |
109e4804333d6ebd25d2cfb85fa122d3dfefa62a | d3023a1b2684014a68e2cec7a29081b1b01f32fb | /python2016/day21.py | 7c3d23ecc5df61653bf681af3921c957e2b23d32 | [] | no_license | kdeberk/advent-of-code | 59e51668ee3c7c761e73e8435ad1e3fd53eea15d | 13c7a1d05e2cc75dc4139de34da1642b3369cccd | refs/heads/master | 2023-04-27T10:44:44.114615 | 2023-04-23T19:52:04 | 2023-04-23T19:52:04 | 160,578,986 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,616 | py | # 2016, Day 21.
# Hash a given string using only positional operations. The characters in the
# string are not modified themselves, but only change positions in the string.
# The trick is to implement the operations and their reversals correctly.
#
# Part 1: Hash the string 'abcdefgh' according to the operations in the input file.
# Part 2: Determine original input that produced 'fbgdceah' after applying the
# instructions in the input file.
NAME = "Day 21: Scrambled Letters and Hash"
import re
from functools import reduce
# Grouping the parsing and execution logic in a single class per operation
# allows us to place related logic nearby each other.
class SwapPosition:
REGEX = r'swap position (\d+) with position (\d+)'
def __init__(self, m):
self.a, self.b = int(m[0]), int(m[1])
def exec(self, state):
state[self.a], state[self.b] = state[self.b], state[self.a]
return state
rev_exec = exec
class SwapLetter:
REGEX = r'swap letter ([a-z]) with letter ([a-z])'
def __init__(self, m):
self.a, self.b = m[0], m[1]
def exec(self, state):
idx, jdx = state.index(self.a), state.index(self.b)
state[idx], state[jdx] = state[jdx], state[idx]
return state
rev_exec = exec
class RotateOnPosition:
REGEX = r'rotate based on position of letter ([a-z])'
def __init__(self, m):
self.l = m[0]
def exec(self, state):
idx = state.index(self.l)
rot = (idx + 1 + (1 if 4 <= idx else 0)) % len(state)
return state[-rot:] + state[:-rot]
def rev_exec(self, state):
idx = state.index(self.l)
for jdx in range(0, len(state)):
rot = (jdx + 1 + (1 if 4 <= jdx else 0)) % len(state)
if idx == (jdx+rot)%len(state):
return state[idx-jdx:] + state[:idx-jdx]
assert False
class Rotate:
REGEX = r'rotate (left|right) (\d+) steps?'
def __init__(self, m):
self.dir, self.nsteps = m[0], int(m[1])
def exec(self, state):
rot = -self.nsteps if 'left' == self.dir else self.nsteps
return state[-rot:] + state[:-rot]
def rev_exec(self, state):
rot = self.nsteps if 'left' == self.dir else -self.nsteps
return state[-rot:] + state[:-rot]
class ReversePositions:
REGEX = r'reverse positions (\d) through (\d)'
def __init__(self, m):
self.start, self.end = int(m[0]), int(m[1])
def exec(self, state):
return state[:self.start] + list(reversed(state[self.start:self.end+1])) + state[self.end+1:]
rev_exec = exec
class Move:
REGEX = r'move position (\d) to position (\d)'
def __init__(self, m):
self.src, self.dst = int(m[0]), int(m[1])
def exec(self, state):
return self.move(state, self.src, self.dst)
def rev_exec(self, state):
return self.move(state, self.dst, self.src)
def move(self, state, src, dst):
if src < dst:
return state[:src] + state[src+1:dst+1] + [state[src]] + state[dst+1:]
return state[:dst] + [state[src]] + state[dst:src] + state[src+1:]
def parseInput(stream):
ins = []
for line in stream.readlines():
for k in [SwapPosition, SwapLetter, RotateOnPosition, Rotate, ReversePositions, Move]:
if m := re.search(k.REGEX, line):
ins.append(k(m.groups()))
continue
return ins
def part1(ins):
return ''.join(reduce(lambda state, ins: ins.exec(state), ins, list('abcdefgh')))
def part2(ins):
return ''.join(reduce(lambda state, ins: ins.rev_exec(state), reversed(ins), list('fbgdceah')))
| [
"kevin@dberk.nl"
] | kevin@dberk.nl |
21bc6ecaca89a962b6c47a14a1809fc53cb6ae5e | ed90fcbfd1112545fa742e07131159bb3a68246a | /smry/server-auth/ls/google-cloud-sdk/lib/googlecloudsdk/compute/subcommands/firewall_rules/list.py | 8d62e0bafc6eecf56466830a10565be1b2193749 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | wemanuel/smry | 2588f2a2a7b7639ebb6f60b9dc2833f1b4dee563 | b7f676ab7bd494d71dbb5bda1d6a9094dfaedc0a | refs/heads/master | 2021-01-10T21:56:55.226753 | 2015-08-01T13:37:06 | 2015-08-01T13:37:06 | 40,047,329 | 0 | 1 | Apache-2.0 | 2020-07-24T18:32:40 | 2015-08-01T13:26:17 | Python | UTF-8 | Python | false | false | 441 | py | # Copyright 2014 Google Inc. All Rights Reserved.
"""Command for listing firewall rules."""
from googlecloudsdk.compute.lib import base_classes
class List(base_classes.GlobalLister):
"""List Google Compute Engine firewall rules."""
@property
def service(self):
return self.compute.firewalls
@property
def resource_type(self):
return 'firewalls'
List.detailed_help = base_classes.GetGlobalListerHelp('firewall rules')
| [
"wre@thenandchange.org"
] | wre@thenandchange.org |
3bec00ebaf8a064289cbd53ff6761dd1f55da482 | cb68cb780b38b20475f470939a0dc4a4b50d1e84 | /venv/Scripts/pip-script.py | 09814c05a25f131f3154d1dc7364b61af0fa269c | [] | no_license | umairanis03/p2p-FileShare | fb9cec63bfed93047e8f39b25bcf5159fa0aa13f | 3bd71ad737e04a499c4e38bdb6a449727aa9c967 | refs/heads/master | 2021-01-03T19:14:49.261592 | 2020-02-18T16:15:29 | 2020-02-18T16:15:29 | 240,204,569 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 425 | py | #!"C:\Users\Umair Anis\PycharmProjects\Networking\P2P\venv\Scripts\python.exe"
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip')()
)
| [
"37964910+umairanis03@users.noreply.github.com"
] | 37964910+umairanis03@users.noreply.github.com |
946e3c464678c0a7390eb7be4d83697fa8d64716 | 7a809ced18ec596a11485c8693741baf162df31a | /movie-recommendations/person-similarity-based-recommendations/recommendation.py | 27adbab302a487dd8370aeabb1adbfbd3de4474d | [] | no_license | tszylkiewicz/Machine-learning | df29ea1c8dd893b7ac41519ebb0b3d616278da49 | 4db5e38f84e1a2919ad5aaff052fc1ac4aed1b6d | refs/heads/main | 2023-02-22T06:18:01.733116 | 2021-02-01T20:57:40 | 2021-02-01T20:57:40 | 318,790,788 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,107 | py | import argparse
from math import sqrt
import pandas as pd
from scipy import spatial
import operator
from ast import literal_eval
K = 4
def calculate_rating(row, similar_users):
ratings = []
users_movie_ratings = train.loc[train['movie_id'] == row['movie_id'], ['user_id', 'rating']]
for user_id, similarity in similar_users.items():
value = users_movie_ratings.loc[(train['user_id'] == user_id), 'rating']
if len(value) > 0:
ratings.append(value.iloc[0])
if(len(ratings) > K):
break
result = ratings[:K]
if(len(result) == 0):
return 3
return sum(result) / len(result)
def main(args):
train_file = args['train']
task_file = args['task']
submission_file = args['submission']
global train
train = pd.read_csv(train_file, sep=';', names=["id", "user_id", "movie_id", "rating"])
task = pd.read_csv(task_file, sep=';', names=["id", "user_id", "movie_id", "rating"])
new_df = train.pivot(index='movie_id',columns='user_id',values='rating')
correlated_users = new_df.corr(method ='pearson')
for index, row in task.iterrows():
print(str(index) + " / " + str(len(task.index)), end='\r')
similar_users = correlated_users[row['user_id']].copy()
similar_users = similar_users.drop(labels=row['user_id']).dropna()
similar_users.sort_values(ascending=False, inplace=True)
score = calculate_rating(row, similar_users)
task.loc[index, 'evaluation'] = str(int(round(score)))
task.to_csv(submission_file, sep=';', index=False, header=False)
if __name__ == '__main__':
ap = argparse.ArgumentParser()
ap.add_argument("-t", "--train", type=str,
default="train.csv", help="Train data file")
ap.add_argument("-e", "--task", type=str,
default="task.csv", help="Task data file")
ap.add_argument("-s", "--submission", type=str,
default="submission.csv", help="Submission data file")
args = vars(ap.parse_args())
main(args) | [
"43911837+tszylkiewicz@users.noreply.github.com"
] | 43911837+tszylkiewicz@users.noreply.github.com |
ef8050413a53ba46fbf7838ae42ee7b94417348b | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03304/s635103583.py | df29bcaa3b5769da632eaa3ea1863d89e01068ee | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 851 | py | import sys
import math
import copy
from heapq import heappush, heappop, heapify
from functools import cmp_to_key
from bisect import bisect_left, bisect_right
from collections import defaultdict, deque, Counter
# sys.setrecursionlimit(1000000)
# input aliases
input = sys.stdin.readline
getS = lambda: input().strip()
getN = lambda: int(input())
getList = lambda: list(map(int, input().split()))
getZList = lambda: [int(x) - 1 for x in input().split()]
INF = float("inf")
MOD = 10**9 + 7
divide = lambda x: pow(x, MOD-2, MOD)
def solve():
n, m, d = getList()
if d == 0:
each = n
else:
each = (n - d) * 2
# igai = pow(n, m-2)
all = each * (m-1) / (n * n)
ans = all
print(ans)
def main():
n = getN()
for _ in range(n):
solve()
return
if __name__ == "__main__":
# main()
solve() | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
d06c47f04c0ab40c37d0c587c686b52a6560a861 | 4e27725c9e3f5a5853d404727a7b550752ebf818 | /0x08-python-more_classes/6-rectangle.py | e04e44c6b6988bc57af4e56b1d37c9fcff1da671 | [] | no_license | vanemcb/holbertonschool-higher_level_programming | 5f382b52760dfdd3e06c6c21099680869477f12a | f496d732677ea936f4d9d831dc5615a15cad87c2 | refs/heads/main | 2023-08-01T00:00:20.812432 | 2021-09-23T01:04:39 | 2021-09-23T01:04:39 | 361,851,246 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,509 | py | #!/usr/bin/python3
""" Class that defines a rectangle. """
class Rectangle:
"""Class Rectangle"""
number_of_instances = 0
def __init__(self, width=0, height=0):
""" Object Ractangle initialization """
self.__width = width
self.__height = height
Rectangle.number_of_instances += 1
@property
def width(self):
""" Instance method to get the attribute width """
return self.__width
@width.setter
def width(self, value):
""" Instance method to set the attribute width """
if not isinstance(value, int):
raise TypeError("width must be an integer")
elif value < 0:
raise ValueError("width must be >= 0")
else:
self.__width = value
@property
def height(self):
""" Instance method to get the attribute height """
return self.__height
@height.setter
def height(self, value):
""" Instance method to set the attribute height """
if not isinstance(value, int):
raise TypeError("height must be an integer")
elif value < 0:
raise ValueError("height must be >= 0")
else:
self.__height = value
def area(self):
""" Instance method to compute the area rectangle """
return self.__width * self.__height
def perimeter(self):
""" Instance method to compute the perimeter rectangle """
if self.__width == 0 or self.__height == 0:
peri = 0
else:
peri = (self.__height * 2) + (self.__width * 2)
return peri
def __str__(self):
""" Instance method to print the rectangle with the character """
if self.__width == 0 or self.__height == 0:
display_rectangle = ""
else:
display = []
for row in range(self.__height):
row_display = ""
for column in range(self.__width):
row_display += "#"
display.append(row_display)
display_rectangle = "\n".join(display)
return display_rectangle
def __repr__(self):
""" Instance method to return a string
representation of the rectangle """
return "Rectangle({}, {})".format(self.__width, self.__height)
def __del__(self):
""" Instance method to print a message when
an object is deleted """
print("Bye rectangle...")
Rectangle.number_of_instances -= 1
| [
"vanem2201@gmail.com"
] | vanem2201@gmail.com |
cfd89dbd741b5fef385dbb4f5278621dc28ca23e | 8a2154c8006ecdb1b202c5153e3f0d37a6235553 | /Smart Doorlock.py | e1f75f18c844f5a0a9bf02216f5bea9e881e054f | [] | no_license | HyeonJun97/Capstone_Design1 | cea2761098c301d0b89b25f51b80818e1ceff23f | 08fc3fabc6646113503a81317684436731cae2cd | refs/heads/main | 2023-01-31T08:33:36.918790 | 2020-12-08T05:26:10 | 2020-12-08T05:26:10 | 315,522,714 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,941 | py | import cv2
import os
import numpy as np
from PIL import Image
import RPi.GPIO as GPIO
import time
import socket
import pyotp
import datetime
global user_name
user_name = ['','','','','']
global face_chk
face_chk = 0
global data
global confidata
confidata = ''
global count
count = 0
global securitylevel
securitylevel = 2
#Motor
STOP = 0
FORWARD = 1
BACKWARD = 2
CH1 = 0
HIGH = 1
LOW = 0
#Motor Pin
IN1 = 2 #Pin 3
IN2 = 3 #Pin 5
ENA = 4 #Pin 8
#KeyPad Pin
L1 = 5
L2 = 6
L3 = 13
L4 = 19
C1 = 12
C2 = 16
C3 = 20
C4 = 21
#LED Pin
LED1 = 23 #RED LED
LED2 = 24 #GREEN LED
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(L1, GPIO.OUT)
GPIO.setup(L2, GPIO.OUT)
GPIO.setup(L3, GPIO.OUT)
GPIO.setup(L4, GPIO.OUT)
GPIO.setup(C1, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
GPIO.setup(C2, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
GPIO.setup(C3, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
GPIO.setup(C4, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
GPIO.setup(LED1, GPIO.OUT)
GPIO.setup(LED2, GPIO.OUT)
def camera(): # User Register
global user_name
vivi = cv2.VideoCapture(-1)
vivi.set(3, 640)
vivi.set(4, 480)
face_detector = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
face_id = input('\n User ID(0~4): ')
user_name[int(face_id)] = input('\n User Name: ')
print('\n Save Face Start')
count = 0
while True:
ret, img = vivi.read()
if not ret:
print('error')
break
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_detector.detectMultiScale(gray, 1.3, 5)
for (x,y,w,h) in faces:
cv2.rectangle(img, (x,y), (x+w,y+h), (255,0,0), 2)
count += 1
cv2.imwrite("./data/" + str(face_id) + '.' + str(count) + ".jpg", gray[y:y+h,x:x+w])
cv2.imshow('image', img)
if count >= 50:
break
k = cv2.waitKey(100) & 0xff
if k == 27:
break
print('\n Save Face Finish')
vivi.release()
cv2.destroyAllWindows()
def getImagesAndLabels(path,detector):
imagePaths = [os.path.join(path,f) for f in os.listdir(path)]
faceSamples=[]
ids = []
for imagePath in imagePaths:
PIL_img = Image.open(imagePath).convert('L') # convert it to grayscale
img_numpy = np.array(PIL_img,'uint8')
id = int(os.path.split(imagePath)[-1].split(".")[0])
faces = detector.detectMultiScale(img_numpy)
for (x,y,w,h) in faces:
faceSamples.append(img_numpy[y:y+h,x:x+w])
ids.append(id)
return faceSamples,ids
def train(): #Face Training
path = 'data'
recognizer = cv2.face.LBPHFaceRecognizer_create()
detector = cv2.CascadeClassifier("haarcascade_frontalface_default.xml");
print ("\n Training faces")
faces,ids = getImagesAndLabels(path,detector)
recognizer.train(faces, np.array(ids))
recognizer.write('trainer.yml')
print("\n Faces trained".format(len(np.unique(ids))))
def recog(): #User Detect
recognizer = cv2.face.LBPHFaceRecognizer_create()
recognizer.read('trainer.yml')
faceCascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml');
font = cv2.FONT_HERSHEY_SIMPLEX
id = 0
global user_name
global face_chk
global securitylevel
vivi = cv2.VideoCapture(-1)
vivi.set(3, 640)
vivi.set(4, 480)
minW = 0.1*vivi.get(3)
minH = 0.1*vivi.get(4)
chkconfi = 0
while True:
ret, img = vivi.read()
if not ret:
print('error')
break
img = cv2.flip(img, 1)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(gray, scaleFactor = 1.2, minNeighbors = 5, minSize = (int(minW), int(minH)), )
for(x,y,w,h) in faces:
cv2.rectangle(img, (x,y), (x+w,y+h), (0,255,0), 2)
id, confidence = recognizer.predict(gray[y:y+h,x:x+w])
chkconfi = confidence
if (confidence < 100):
id = user_name[id]
confidence = " {0}%".format(round(100 - confidence))
else:
id = "unknown"
#confidence = " {0}%".format(round(100 - confidence))
cv2.putText(img, str(id), (x+5,y-5), font, 1, (255,255,255), 2)
cv2.putText(img, str(confidence), (x+5,y+h-5), font, 1, (255,255,0), 1)
cv2.imshow('camera', img)
if (id == 'unknown'):
print('Unknown User Detected!\n')
break
elif (chkconfi >= 40 and id != ''):
print(id + ' Face OK')
if securitylevel == 1:
face_chk = 2
else:
face_chk = 1
GPIO.output(23, False) #RED LED OFF
GPIO.output(24, True) #GREEN LED ON
break
k = cv2.waitKey(100) & 0xff
if k == 27:
break
print("\n Exiting Program")
vivi.release()
cv2.destroyAllWindows()
def chkLine(line, characters):
global confidata
global count
GPIO.output(line, GPIO.HIGH)
if(GPIO.input(C1) == 1):
confidata += (characters[0])
count += 1
print(confidata)
if(GPIO.input(C2) == 1):
confidata += (characters[1])
count += 1
print(confidata)
if(GPIO.input(C3) == 1):
confidata += (characters[2])
count += 1
print(confidata)
if(GPIO.input(C4) == 1):
confidata += (characters[3])
count +=1
print(confidata)
GPIO.output(line, GPIO.LOW)
def user_otp(): #OTP Publish
host = '192.168.0.11'
port = 8888
server_sock = socket.socket(socket.AF_INET)
server_sock.bind((host, port))
server_sock.listen(1)
print("OTP Check")
client_sock, addr = server_sock.accept()
print('Connected by', addr)
totp = pyotp.TOTP('GAYDAMBQGAYDAMBQGAYDAMBQGA======')
global data
data = str(totp.now())
client_sock.send(data.encode());
print('OTP: ' + data)
client_sock.close()
server_sock.close()
def chk_otp(): #Check OTP
global data
global face_chk
global confidata
global count
while True:
chkLine(L1, ["1","2","3","A"])
chkLine(L2, ["4","5","6","B"])
chkLine(L3, ["7","8","9","C"])
chkLine(L4, ["*","0","#","D"])
time.sleep(0.5)
if count == 6:
count = 0
break
if data == confidata:
face_chk = 2
confidata = ''
count = 0
confidata=''
def keyA(): # User Register, Face Train
GPIO.output(L1, GPIO.HIGH)
if(GPIO.input(C4) == 1):
camera()
train()
GPIO.output(L1, GPIO.LOW)
def keyB(): # Face Detecting
GPIO.output(L2, GPIO.HIGH)
if(GPIO.input(C4) == 1):
recog()
GPIO.output(L2, GPIO.LOW)
def keyC(): # Security Level
GPIO.output(L3, GPIO.HIGH)
global securitylevel
change = 0
if(GPIO.input(C4) == 1):
print('Security Level Setting!')
print('1:Face ID, 2:Face ID + OTP\n')
change = 1
if change == 1:
while True:
GPIO.output(L1, GPIO.HIGH)
if(GPIO.input(C1) == 1):
print('Security Level: 1(Face ID)')
securitylevel = 1
change = 0
break
elif(GPIO.input(C2) == 1):
print('Security Level: 2(Face ID + OTP)')
securitylevel = 2
change = 0
break
GPIO.output(L3, GPIO.LOW)
def keyD(): # State
GPIO.output(L4, GPIO.HIGH)
global securitylevel
global user_name
if(GPIO.input(C4) == 1):
print('Security Level: ' + str(securitylevel))
print('face_id user_name')
for i in range (0,5):
print(' ' + str(i) + ' ' + str(user_name[i]))
GPIO.output(L4, GPIO.LOW)
def setPinConfig(EN, INA, INB):
GPIO.setup(EN, GPIO.OUT)
GPIO.setup(INA, GPIO.OUT)
GPIO.setup(INB, GPIO.OUT)
pwm = GPIO.PWM(EN, 100)
pwm.start(0)
return pwm
def setMotorControl(pwm, INA, INB, speed, stat):
pwm.ChangeDutyCycle(speed)
if(stat == FORWARD):
GPIO.output(INA, HIGH)
GPIO.output(INB, LOW)
elif(stat == BACKWARD):
GPIO.output(INA, LOW)
GPIO.output(INB, HIGH)
elif(stat == STOP):
GPIO.output(INA, LOW)
GPIO.output(INB, LOW)
def setMotor(ch, speed, stat):
if(ch == CH1):
setMotorControl(pwmA, IN1, IN2, speed, stat)
GPIO.setmode(GPIO.BCM)
pwmA = setPinConfig(ENA, IN1, IN2)
print('Digital Doorlock \n')
print('A:User Register B:Face Detecting C:Security Level D:State \n')
while True:
GPIO.output(23, True) #RED LED ON
GPIO.output(24, False) #GREEN LED OFF
keyA() # User, Face Train
keyB() # Detect
keyC() # Security Level
keyD() # State
if face_chk == 1:
user_otp()
chk_otp()
if face_chk == 2:
setMotor(CH1, 100, BACKWARD)
print('Door Open!')
time.sleep(3)
setMotor(CH1, 100, STOP)
time.sleep(3)
setMotor(CH1, 100, FORWARD)
print('Door Close!')
time.sleep(3)
setMotor(CH1, 100, STOP)
face_chk = 0
time.sleep(0.5)
| [
"noreply@github.com"
] | HyeonJun97.noreply@github.com |
792ff644b761ddf8523143490260e32bd1abd2a2 | 2d8f3f52456a33984e7c55b6d17a8254a3cfd1ab | /rabbittop/utils.py | a8f0e56db965aa5d4f57eb58791ca17606402b46 | [
"MIT"
] | permissive | jve/rabbittop | daf2e24010178e54b3f6a09202f180191745157f | 1cac40f66135cff5433e3d6fac99cd0898a927de | refs/heads/master | 2021-01-01T05:32:58.628584 | 2014-09-14T16:00:46 | 2014-09-14T16:00:46 | 23,016,502 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 256 | py | def human_size(n):
# G
if n >= (1024*1024*1024):
return "%.1fG" % (n/(1024*1024*1024))
# M
if n >= (1024*1024):
return "%.1fM" % (n/(1024*1024))
# K
if n >= 1024:
return "%.1fK" % (n/1024)
return "%d" % n | [
"jvaneenbergen@gmail.com"
] | jvaneenbergen@gmail.com |
d7a2f1f9b05a161bfc7df53c0386eee19562a8b3 | 49ec41b893df6a148db3aad035e42f5aef56bc76 | /auto_nag/scripts/lot_of_cc.py | 5cbc1120f2da10046f0f968e0007bcb219abccea | [
"BSD-3-Clause"
] | permissive | davehunt/relman-auto-nag | 0348b829f899bf796ba528de319686b4279c82b3 | 9e604cfeae3b0a4501bfba53305c0599b5c1be7d | refs/heads/master | 2023-01-24T04:51:05.057412 | 2023-01-10T14:35:51 | 2023-01-10T14:35:51 | 243,492,804 | 0 | 0 | BSD-3-Clause | 2020-02-27T10:30:37 | 2020-02-27T10:30:37 | null | UTF-8 | Python | false | false | 1,481 | py | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
from auto_nag import utils
from auto_nag.bzcleaner import BzCleaner
class SeveralCc(BzCleaner):
def __init__(self):
super(SeveralCc, self).__init__()
self.nweeks = utils.get_config(self.name(), "weeks_lookup")
self.cc = utils.get_config(self.name(), "number_cc")
def description(self):
return "Bugs with several cc for the last {} weeks".format(self.nweeks)
def columns(self):
return ["id", "summary", "creation", "last_change"]
def handle_bug(self, bug, data):
bugid = str(bug["id"])
data[bugid] = {
"creation": utils.get_human_lag(bug["creation_time"]),
"last_change": utils.get_human_lag(bug["last_change_time"]),
}
return bug
def get_bz_params(self, date):
params = {
"include_fields": ["creation_time", "last_change_time"],
"resolution": "---",
"f1": "days_elapsed",
"o1": "lessthan",
"v1": self.nweeks * 7,
"f2": "cc_count",
"o2": "greaterthaneq",
"v2": self.cc,
"f3": "keywords",
"o3": "nowords",
"v3": ["meta", "intermittent"],
}
return params
if __name__ == "__main__":
SeveralCc().run()
| [
"noreply@github.com"
] | davehunt.noreply@github.com |
d36556706a39573782c45a5a36e4ccc055c80257 | 2023a98cb8337559c3674f810cb09577b1e50cc3 | /Walchand Meta Website/ABCD 22/abcd/mainsite/migrations/0012_auto_20181018_0019.py | aa71477d819015c49661d85bf858334af3129de6 | [] | no_license | Ankita1812/WMeta | 2e57c3b1d4b6a1c9a295553768c29216261ddd4d | 916ed1c5cdb03c4e163de89a5af77bee5eccd91f | refs/heads/master | 2020-05-25T16:44:08.941109 | 2019-05-21T13:05:11 | 2019-05-21T13:05:11 | 187,892,752 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 328 | py | # Generated by Django 2.1.1 on 2018-10-17 18:49
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('mainsite', '0011_auto_20181018_0010'),
]
operations = [
migrations.RenameModel(
old_name='Index',
new_name='Home',
),
]
| [
"ankichan1812@gmail.com"
] | ankichan1812@gmail.com |
6161effa51d3a1b64ea540108e05cf77778e804d | 3f1d39a0a73fe2f660bfcfffe9e7aa0860ed07ff | /functions.py | 5ae808d773799b3388869d9c2e74a00d756df301 | [] | no_license | AGoretti/Lista2_AndreGoretti_IanRocha | 0ab6c983fe657c4d2e5d32e3a935d203c2062f1f | 3bb199ee86c778cda565578925bae88bfe71d0d0 | refs/heads/master | 2020-07-21T10:59:00.643209 | 2019-09-06T20:30:22 | 2019-09-06T20:30:22 | 206,841,180 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,984 | py | import csv
import random
def nomes_reader():
with open('nomes.csv') as csvfile:
nomesCSV = csv.reader(csvfile, delimiter=',')
aux = random.randint(1,100)
auxs = str(aux)
for x in nomesCSV:
if auxs == x[0]:
return x[1]
def random_city():
with open('cidades.csv') as csvfile:
cidadesCSV = csv.reader(csvfile, delimiter=',')
aux = random.randint(1, 20)
for x in cidadesCSV:
if str(aux) == x[0]:
return x[1]
def random_cpf():
cpf1 = random.randint(100, 999)
cpf2 = random.randint(100, 999)
cpf3 = random.randint(100, 999)
cpf4 = random.randint(0,99)
cpf = str(cpf1) + "." + str(cpf2) + "." + str(cpf3) + "-" + str(cpf4)
return cpf
def random_date():
random_date.year = random.randint(1960, 2001)
random_date.date = random.randint(1, 31)
random_date.month = random.randint(1, 12)
date = str(random_date.date) + "/" + str(random_date.month) + "/" + str(random_date.year)
return date
def random_tel():
tel1 = random.randint(100, 999)
tel2 = random.randint(1000, 9999)
tel = str((3000 + tel1)) + "-" + str(tel2)
return tel
person = []
date = []
cpf = []
age = []
tel = []
city = []
year = []
for k in range(51):
person.append(nomes_reader())
date.append(random_date())
cpf.append(random_cpf())
year.append(date[k][-4:])
age.append(2019 - int(year[k]))
tel.append(random_tel())
city.append(random_city())
# def selection_sort():
# for i in range(51):
# min = i
# for index in (i+1, 51):
# if person[min] > person[index]:
# min = j
def bubble_sort():
unsorted = True
while unsorted:
unsorted = False
for i in range(50):
if person[i] > person[i+1]:
aux = person[i+1]
person[i+1] = person[i]
person[i] = aux
unsorted = True | [
"ianrocha.df@gmail.com"
] | ianrocha.df@gmail.com |
543f76e2905745463656ad443de27c544dedb1df | 48b789f51f9d7f6a0251301bc4d4c2380f5e03b0 | /spdl/deep_sprl/util/parameter_parser.py | 50c30345d9d0485a787e6dcf35c4fb25733cf5b9 | [
"MIT"
] | permissive | automl/SPaCE | 136fc354f2f62223c12f7b1782f1706a9ef557e8 | 396555d00d5ed9c64ab90232e91291b0995bd01c | refs/heads/master | 2023-05-11T15:40:22.611282 | 2021-02-05T10:18:46 | 2021-02-05T10:18:46 | 274,680,790 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 755 | py | def parse_parameters(remainder):
error = False
parameters = {}
if len(remainder) % 2 != 0:
error = True
else:
for i in range(0, len(remainder), 2):
if not remainder[i].startswith("--"):
error = True
break
else:
parameters[remainder[i][2:]] = remainder[i + 1]
if error:
raise RuntimeError("Invalid parameters specification! Must be of the form: --KEY1 VALUE --KEY2 VALUE2 ...")
return parameters
def create_override_appendix(keys, parameters):
override_appendix = ""
for key in sorted(keys):
if key in parameters:
override_appendix += "_" + key + "=" + str(parameters[key])
return override_appendix
| [
"eimer@tnt.uni-hannover.de"
] | eimer@tnt.uni-hannover.de |
6470e5104a790f16c984bcde668a934317ac2e95 | 1e8142725aa06844713d18fa38c6779aff8f8171 | /tndata_backend/notifications/migrations/0018_gcmmessage_queue_id.py | 64dd06bd40b6ed39edc8bd2ae0a208bec73ed197 | [
"MIT"
] | permissive | tndatacommons/tndata_backend | 8f4db3e5cf5272901c9087a85e21d7560240bb3b | 3d22179c581ab3da18900483930d5ecc0a5fca73 | refs/heads/master | 2020-12-03T07:53:17.339769 | 2017-03-27T06:18:58 | 2017-03-27T06:18:58 | 68,407,220 | 1 | 2 | null | 2017-03-27T06:18:59 | 2016-09-16T18:59:16 | Python | UTF-8 | Python | false | false | 437 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('notifications', '0017_auto_20151217_2000'),
]
operations = [
migrations.AddField(
model_name='gcmmessage',
name='queue_id',
field=models.CharField(max_length=128, default='', blank=True),
),
]
| [
"brad@bradmontgomery.net"
] | brad@bradmontgomery.net |
7b122931a2d1665b2d483991ac0a54efe644b77e | 612325535126eaddebc230d8c27af095c8e5cc2f | /src/net/log/stitch_net_log_files.py | aea6d7b0f58ca282bcb4daf53c9837ae3b963544 | [
"BSD-3-Clause"
] | permissive | TrellixVulnTeam/proto-quic_1V94 | 1a3a03ac7a08a494b3d4e9857b24bb8f2c2cd673 | feee14d96ee95313f236e0f0e3ff7719246c84f7 | refs/heads/master | 2023-04-01T14:36:53.888576 | 2019-10-17T02:23:04 | 2019-10-17T02:23:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,998 | py | #!/usr/bin/env python
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''
This script stitches the NetLog files in a specified directory.
The complete NetLog will be written to net-internals-log.json in the directory
passed as argument to --path.
'''
import argparse, os
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--path', action='store',
help="Specifies the complete filepath of the directory where the log "
"files are located.")
# TODO(dconnol): Automatically pull all event files matching the format
# event_file_<num>.json and remove the num_files argument.
parser.add_argument('--num_files', action='store',
help="Specifies the number of event files (not including the constants "
"file or the end_netlog file) that need need to be stitched together. "
"The number of event files passed to the script must not be greater "
"than the number of event files in the directory.")
args = parser.parse_args()
num_files = int(args.num_files)
filepath = args.path
if filepath[-1:] != "/":
filepath += "/"
os.chdir(filepath)
with open("net-internals-log.json", "w") as stitched_file:
try:
file = open("constants.json")
with file:
for line in file:
stitched_file.write(line)
except IOError:
os.remove("net-internals-log.json")
print "File \"constants.json\" not found."
return
events_written = False;
for i in range(num_files):
try:
file = open("event_file_%d.json" % i)
with file:
if not events_written:
line = file.readline();
events_written = True
for next_line in file:
if next_line.strip() == "":
line += next_line
else:
stitched_file.write(line)
line = next_line
except IOError:
os.remove("net-internals-log.json")
print "File \"event_file_%d.json\" not found." % i
return
# Remove hanging comma from last event
# TODO(dconnol): Check if the last line is a valid JSON object. If not,
# do not write the line to file. This handles incomplete logs.
line = line.strip()
if line[-1:] == ",":
stitched_file.write(line[:-1])
elif line:
raise ValueError('Last event is not properly formed')
try:
file = open("end_netlog.json")
with file:
for line in file:
stitched_file.write(line)
except IOError:
os.remove("net-internals-log.json")
print "File \"end_netlog\" not found."
return
# Delete old NetLog files
for i in range (num_files):
os.remove("event_file_%d.json" % i)
os.remove("constants.json")
os.remove("end_netlog.json")
if __name__ == "__main__":
main()
| [
"2100639007@qq.com"
] | 2100639007@qq.com |
c279470529493caf6dca7d09df9d96753ca09dc2 | d4280eca1a9badb0a4ad2aa22598616eedece373 | /Automate The Boring Stuff With Python/03/04-sameName.py | c723f7c075712db216aaaf5d638a7face06363b8 | [] | no_license | Little-Captain/py | 77ec12bb2aaafe9f709a70831266335b03f63663 | 74ba3c3449e7b234a77500a17433e141e68169f7 | refs/heads/master | 2021-06-09T11:33:23.205388 | 2019-11-22T01:17:44 | 2019-11-22T01:17:44 | 131,844,918 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 193 | py | #!/usr/bin/env python
def spam():
eggs = 'spam local'
print(eggs)
def bacon():
eggs = 'bacon local'
print(eggs)
spam()
print(eggs)
eggs = 'global'
bacon()
print(eggs) | [
"littlecaptain@foxmail.com"
] | littlecaptain@foxmail.com |
1e608fef6a87dac150de2f8b9b278e8df21419c8 | e20aa24747da2ce16634d366d03b2339ff09d588 | /plugs/get_order_code.py | fefbcc35a9bb6ed3e5a467fd8e2a1fdddbbf78ec | [] | no_license | zhangbowen2121/Auto_api_Test | 41e56b498520ecb19beb6a52851993d539eb35b3 | 509924246e667eea147cad608477aa4b2030cd46 | refs/heads/master | 2023-02-17T16:17:31.463477 | 2021-01-20T08:36:08 | 2021-01-20T08:36:08 | 325,428,685 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 419 | py | # 生成订单号
import time
def get_order_code():
#order_no = str('T'+time.strftime('%Y%m%d%H%M%S', time.localtime(time.time())))+ str(time.time()).replace('.', '')[-7:]
order_no = str(time.strftime('%d', time.localtime(time.time()))) + str(time.time()).replace('.',
'')[-7:]
return order_no | [
"902762022@qq.com"
] | 902762022@qq.com |
4a70d6c994db38d02f91ae1be1423d361c93a488 | a96e22eb8e070d5c94a3dea49bfc145f37511c11 | /lazyopt/__init__.py | 4c792de92a1d724015f4daccb254a18f207e6900 | [] | no_license | kbomb/lazyopt | 99d18cffca7bcb69f71d5022ef50d368332e37e8 | 3915583d1045f3e1bdd4886e67fd5e6e160228d9 | refs/heads/master | 2020-12-11T02:09:59.830620 | 2014-02-07T04:11:38 | 2014-02-07T04:11:38 | 16,712,326 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,438 | py | """
allows you to modify any constant from the command line.
use lazyopt.apply_all() to apply values from command line.
https://github.com/neyer/lazyopt
"""
__version__ = '1.0.0'
import inspect
import os
import sys
class ConfigurationError(Exception):
"""Invalid option specified."""
def get_as_number(value):
"""Return str `value` as a number if possible. Return None otherwise."""
if not value: return None
if value.count('.') == 1:
return float(value)
else:
return int(value)
def cast_value_str(value):
"Interpret str `value` as whatever datatype makes sense"
# first see if it's one of these constants
hard_coded = { 'None' : None, 'True' : True, 'False' : False }
try:
return hard_coded[value]
except KeyError:
# if it's not a constant see if it's a number
try:
return get_as_number(value)
except ValueError:
# it must be a string
return value
def get_argv_bindings(argv):
"""Parse `argv` and return dict of new bindings."""
results = {}
this_arg_name = None
for arg in argv:
#if we have the name of an argument,
#we are waiting for a variable
if this_arg_name:
#if the next arg is another name, this_arg_name is a flag
if arg.find('--') == 0:
value = 'True'
name = this_arg_name
this_arg_name = arg[2:]
else:
value = arg
name = this_arg_name
this_arg_name = None
#make sure they haven't given the same arg twice
if name in results:
raise ConfigurationError("duplicate arg %s" % name)
else:
# store the binding
results[name] = cast_value_str(value)
else:
#check to see if this option is an arg name
if arg.find('--') == 0:
this_arg_name = arg[2:]
#check the next arg for the value
else: pass
# this is a position argument. just ignore it.
# we looped through all the args and have one left
# so that must be a boolean flag
if this_arg_name:
if this_arg_name in results:
raise ConfigurationError("duplicate arg %s" % this_arg_name)
else:
results[this_arg_name] = True
return results
def get_module_and_var_name(var_name):
"Convert an option name to a module,variable pair."
parts = var_name.split('.')
module_name = ('.'.join(parts[:-1])).replace('-','_')
var_name = parts[-1].replace('-','_')
return module_name, var_name
def apply_binding(module_name, var_name, value):
"Set module `module_name` variable `var_name` to `value`."
__import__(module_name, globals(), locals(), [], 0)
module = sys.modules.get(module_name)
if hasattr(module, var_name):
setattr(module, var_name, value)
else:
msg = 'module %s has no value %s to confgure with value %s.'
msg = msg % (module_name, var_name, value)
raise ConfigurationError(msg)
def get_caller_module():
"get the name of the module in which this function was called."
stack = inspect.currentframe()
return inspect.getmodule(stack.f_back.f_back.f_code)
def apply_all(argv=sys.argv):
bindings = get_argv_bindings(argv)
# figure out who called into this frame so args without module names work
caller_module = get_caller_module().__name__
for name, value in bindings.items():
module_name, var_name = get_module_and_var_name(name)
if not module_name:
module_name = caller_module
apply_binding(module_name, var_name, value)
| [
"mneyer@electric-cloud.com"
] | mneyer@electric-cloud.com |
20c5b21c1d7ae5e815d3c48135d2ea50c39c0b2d | 7401160cca031bc9e821d0e483258b61baa69313 | /ejercicioRCM/ejercicioRCM/settings.py | a7ce7db3fae7bfa4f258f48ddcc42574f1653ca4 | [] | no_license | richard-mustaine99/ejercicioERP | 7fdb2c34859687fabcac00588c812f0f3e5eb3d4 | 6bd7492bbdc43f581b9662e4e64e45a8647e2530 | refs/heads/main | 2023-02-26T09:05:13.847807 | 2021-01-26T20:19:53 | 2021-01-26T20:19:53 | 333,206,187 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,380 | py | """
Django settings for ejercicioRCM project.
Generated by 'django-admin startproject' using Django 3.1.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '^!lwa_yt3wh&5gdj++6%3p1t^2pqjxs5i-8(ki0!%(*1y4_pq='
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'appEmpresa.apps.AppempresaConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'ejercicioRCM.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'ejercicioRCM.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
from django.contrib.messages import constants as messages
MESSAGE_TAGS = {
messages.DEBUG: 'alert-info',
messages.INFO: 'alert-info',
messages.SUCCESS: 'alert-success',
messages.WARNING: 'alert-warning',
messages.ERROR: 'alert-danger',
}
| [
"richard-mustaine99@hotmail.com"
] | richard-mustaine99@hotmail.com |
1bbc11411983c07e73a6f0ab5f9eff30995621b0 | a6f8aae8f552a06b82fe018246e8dcd65c27e632 | /pr089/__init__.py | 159be3c3aebc785921f28b14145490cf183d1d97 | [] | no_license | P4SSER8Y/ProjectEuler | 2339ee7676f15866ceb38cad35e21ead0dad57e9 | 15d1b681e22133fc562a08b4e8e41e582ca8e625 | refs/heads/master | 2021-06-01T09:22:11.165235 | 2016-05-06T14:02:40 | 2016-05-06T14:02:40 | 46,722,844 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 57 | py | from .pr089 import run as pyRun
run = pyRun
#run = cRun
| [
"beobachter70@163.com"
] | beobachter70@163.com |
41914df4f5e75934d7324de29ba8a9b6195bdb26 | d0158d03c2116603787da08425600324351c7553 | /polls/urls.py | aab16feaa243d110a343959aa1f4a8dba5a7707b | [] | no_license | joeyede/djtest | 014bfaf7424f2efc3666f4f9b0d7ba6383ff705d | 55f499ab3a83a3102f8a3bc4a39ac25b33287bca | refs/heads/master | 2020-03-07T21:43:21.483130 | 2018-04-02T19:37:06 | 2018-04-02T19:37:06 | 127,734,852 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 420 | py | from django.urls import path
from . import views
app_name = 'polls'
urlpatterns = [
# ex: /polls/
path('', views.index, name='index'),
# ex: /polls/5/
path('<int:question_id>/', views.detail, name='detail'),
# ex: /polls/5/results/
path('<int:question_id>/results/', views.results, name='results'),
# ex: /polls/5/vote/
path('<int:question_id>/vote/', views.vote, name='vote'),
]
| [
"joey@deskalarm.com"
] | joey@deskalarm.com |
bbdad9a19a011f002deaf6ac657756ebfc169f62 | 18177e3fe5fa53823e442b5666ca7f46c8224054 | /PitchMe/wsgi.py | 3304bd142b50050f9519debc026aed66b44319d1 | [] | no_license | ufaruqui/PitchMe | ce4594bc27c079b12547ee7e00d5eed61c485604 | 597234de3dfeae7ad224ec0dd6e0ded0a245305c | refs/heads/master | 2022-12-10T04:33:07.260387 | 2018-05-18T20:23:57 | 2018-05-18T20:23:57 | 134,000,246 | 0 | 0 | null | 2022-05-25T01:29:39 | 2018-05-18T20:19:00 | Python | UTF-8 | Python | false | false | 483 | py | """
WSGI config for PitchMe project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
from whitenoise.django import DjangoWhiteNoise
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "PitchMe.settings")
application = get_wsgi_application()
application = DjangoWhiteNoise(application)
| [
"umair.faruqui@gmail.com"
] | umair.faruqui@gmail.com |
05eeff45b07e1893a74328b6d8c347e32645c118 | 492c1f210e758e4ea0287688fe73a66f60e8a26b | /paginationprj/paginationprj/urls.py | 2d2067837e7894fc47fa09151c4464af670e49ae | [] | no_license | prasadbabu247/filteringproject | 7e986737f79035b7a8c35cd1e634a62e7c4557c4 | 3bd143476fa99822d7aad0ef5bec25e86e7a4124 | refs/heads/master | 2020-04-22T03:59:43.620136 | 2019-02-11T10:03:41 | 2019-02-11T10:03:41 | 170,107,808 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 836 | py | """paginationprj URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from pageapp import views
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^api/',views.EmployeeAPIView.as_view())
]
| [
"prasadbabu247@gmail.com"
] | prasadbabu247@gmail.com |
e583684cf83becd0b38cb5c2a88d109a5a510a8c | 6141abf6b2c71b9f21dd4b6dc80897f8f794a1a7 | /python/Core/Runner/TestRunnerHtml.py | 503ad9b92793e3ef2fc641c87189bad635e0ec58 | [
"Apache-2.0"
] | permissive | toilatester/sample-automation-frameworks-across-languages | 0fadff17c87304acb25adbe8eb3894898e1c4f7e | 4c1ceb3f8fff14ed838f94c92be7d92013c95d4a | refs/heads/main | 2023-01-31T04:58:30.173695 | 2020-12-11T05:37:23 | 2020-12-11T05:37:23 | 320,446,668 | 8 | 2 | null | null | null | null | UTF-8 | Python | false | false | 4,932 | py | import sys
from unittest import TextTestRunner, TestSuite
from unittest.signals import registerResult
from Core.Report.HTMLTestResult import HTMLTestResult
from Core.Report.ReportManager import ReportManager
from Core.Exceptions.TestContextException import TestContextException
from TestResult import REPORT_PATH
class HTMLTestRunner(TextTestRunner):
"""" A test runnUTFer class that output the results. """
def __init__(self, report_file_name="TestReport",
report_dir=REPORT_PATH, verbosity=2,
descriptions=True, fail_fast=False, buffer=False):
TextTestRunner.__init__(self, stream=sys.stderr, descriptions=descriptions, verbosity=verbosity,
failfast=fail_fast, buffer=buffer)
self.elapsed_times = True
self.result_class = HTMLTestResult
self.report_dir = report_dir
self.report_file_name = report_file_name
self.result = self.__make_result()
def __make_result(self) -> HTMLTestResult:
""" Create a TestResult object which will be used to store
information about the executed tests. """
return self.result_class(self.stream, self.descriptions, self.verbosity)
def run(self, test: TestSuite):
""" Runs the given testcase or testsuite. """
try:
self.__init_test_result_config(test)
self.__test_execution_invoke(test)
self.__test_execution_post_process()
return self.result
except Exception as e:
raise TestContextException("Has error in invoke test", e)
def __init_test_result_config(self, test: TestSuite):
registerResult(self.result)
self.result.failfast = self.failfast
self.result.buffer = self.buffer
self.result.tb_locals = self.tb_locals
self.result.fail_fast = self.failfast
if hasattr(test, 'properties'):
# junit test suite properties
self.result.properties = test.properties
def __test_execution_invoke(self, test):
self.stream.writeln("=================== Execution Invoke ===========================")
self.result.startTestRun()
test(self.result)
self.result.stopTestRun()
self.stream.writeln("=================== Stop Execution Invoke ======================")
def __test_execution_post_process(self):
self.stream.writeln()
run = self.result.testsRun
self.stream.writeln("Executed {0} test in {2}{1}\n".format(run, "s" if run != 1 else "",
self.result.suite_execution_time))
list_result_info = self.__test_suite_failed_process()
list_result_info.extend(self.__test_suite_unexpected_successes_process())
list_result_info.extend(self.__test_suite_skip_process())
list_result_info.extend(self.__test_suite_expected_fails_process())
list_result_info.extend(self.__test_suite_pass_process())
(lambda: len(list_result_info) > 0 and self.stream.writeln(
"Test Result Summary: ({})".format(", ".join(list_result_info))))()
self.__generate_html_report()
def __test_suite_failed_process(self):
list_result_info = []
if not self.result.wasSuccessful():
failed, errors = map(len, (self.result.failed_tests, self.result.errors))
if failed:
list_result_info.append("Failures={0}".format(failed))
if errors:
list_result_info.append("Errors={0}".format(errors))
return list_result_info
def __test_suite_unexpected_successes_process(self):
list_result_info = []
unexpected_successes = len(self.result.unexpected_successes_tests)
if unexpected_successes:
list_result_info.append("Unexpected Successes={}".format(unexpected_successes))
return list_result_info
def __test_suite_skip_process(self):
list_result_info = []
skipped = len(self.result.skipped_tests)
if skipped:
list_result_info.append("Skipped={}".format(skipped))
return list_result_info
def __test_suite_expected_fails_process(self):
list_result_info = []
expected_fails = len(self.result.expected_failed_tests)
if expected_fails:
list_result_info.append("Expected Failures={}".format(expected_fails))
return list_result_info
def __test_suite_pass_process(self):
list_result_info = []
passed = len(self.result.passed_tests)
if passed:
list_result_info.append("Passed={}".format(passed))
return list_result_info
def __generate_html_report(self):
self.stream.writeln("Generate HTML Report ...")
report = ReportManager(self.result)
report.generate_html_report(report_dir=self.report_dir, report_file_name=self.report_file_name)
| [
"minhhoang@kms-technology.com"
] | minhhoang@kms-technology.com |
80f9a509601a4e0f0d7b186b1a49ad915c47b415 | 5902cd8b1841b3d04764e8293664e5d91d351c0c | /3-LinEqsLSP/Example_3_3.py | 32682d1fafd5bf263b82e15ef31beddc78b1702b | [] | no_license | laviste/NMOF-Python | 303289b0d38c4843d54524be07e8ef3f443f2473 | a287722eb2bf53429b05dc3d2e2acffffcde4a6c | refs/heads/master | 2022-02-23T04:20:28.000312 | 2019-09-28T14:56:47 | 2019-09-28T14:56:47 | 201,901,166 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,008 | py | # Example 3.3
import timeit
import numpy as np
import scipy.linalg as linalg
from scipy.sparse import spdiags
from functions import lu3diag
from functions import solve3diag
n = 500; m = 100000;
c = np.arange(1,n+1) / 3; d = np.ones(n); x = np.ones((n,1));
p = -c[1:n]; q = c[0:n-1];
A = spdiags(np.hstack((np.append(p, np.nan).reshape(-1,1),
d.reshape(-1,1), np.insert(q,0,np.nan).reshape(-1,1))).T,
np.arange(-1,2),n,n,format=None)
b = (A@x).flatten()
#
start = timeit.default_timer()
A = A.toarray()
L = linalg.lu(A)[1]
U = linalg.lu(A)[2]
#for k in np.arange(m):
s1 = linalg.solve(U,(linalg.solve(L,b)))
stop = timeit.default_timer()
print('\n Sparse Matlab {0:.6f}'.format(np.fix(stop)),'seconds.')
start = timeit.default_timer()
l,u = lu3diag(p,d,q)
#for k in np.arange(m):
s2 = solve3diag(l,u,q,b)
stop = timeit.default_timer()
print('\n Sparse code {0:.6f}'.format(np.fix(stop)),'seconds.')
#Sparse Matlab 5 (sec)
# Sparse code 9 (sec) | [
"noreply@github.com"
] | laviste.noreply@github.com |
4d8fe21b212aecd68a40fdafc74015c55daf7353 | a0d4fd9c8302bd9781a0edbd18c1356d1fdb5fc3 | /web/downloader/wsgi.py | bcf6903f3b4dfc527bda4f36af28bb9f290789a2 | [] | no_license | nafisaISRAIL/downloader | 0a6e91b492354f1cee5518f90710d5cc6c953af7 | e104358a49a1165bf27fb20256cf73c707ea2504 | refs/heads/master | 2020-03-11T07:11:57.803375 | 2018-05-02T06:13:35 | 2018-05-02T06:13:35 | 129,286,053 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | """
WSGI config for downloader project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "downloader.settings")
application = get_wsgi_application()
| [
"nafisaisrail@gmail.com"
] | nafisaisrail@gmail.com |
d5220478f07a799e01c78fa9059bda5b1711cf30 | 68713480d61a74f89666e10718a936eb51a6ceaf | /k_cluster.py | 1e1154e8ee75dc5cb2e8272ff8de67a370662ab7 | [] | no_license | ellicraw/datascienceA | 3d47a5119178a20adb51defae2785c3a740d4d0c | 600f9827920636397099da2ef9879e4fac92a711 | refs/heads/master | 2021-04-15T05:12:37.189116 | 2018-04-26T19:59:56 | 2018-04-26T19:59:56 | 126,894,300 | 1 | 3 | null | 2018-04-26T19:59:57 | 2018-03-26T21:50:48 | Python | UTF-8 | Python | false | false | 4,106 | py | import copy
import random
import pandas as pd
import numpy as np
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
from datetime import datetime
df = pd.DataFrame({
'x': [5.1
,
4.9
,
4.7
,
4.6
,
5
,
5.4
,
4.6
,
5
,
4.4
,
4.9
,
5.4
,
4.8
,
4.8
,
4.3
,
5.8
,
5.7
,
5.4
,
5.1
,
5.7
,
5.1
,
5.4
,
5.1
,
4.6
,
5.1
,
4.8
,
5
,
5
,
5.2
,
5.2
,
4.7
,
4.8
,
5.4
,
5.2
,
5.5
,
4.9
,
5
,
5.5
,
4.9
,
4.4
,
5.1
,
5
,
4.5
,
4.4
,
5
,
5.1
,
4.8
,
5.1
,
4.6
,
5.3
,
5
,
7
,
6.4
,
6.9
,
5.5
,
6.5
,
5.7
,
6.3
,
4.9
,
6.6
,
5.2
,
5
,
5.9
,
6
,
6.1
,
5.6
,
6.7
,
5.6
,
5.8
,
6.2
,
5.6
,
5.9
,
6.1
,
6.3
,
6.1
,
6.4
,
6.6
,
6.8
,
6.7
,
6
,
5.7
,
5.5
,
5.5
,
5.8
,
6
,
5.4
,
6
,
6.7
,
6.3
,
5.6
,
5.5
,
5.5
,
6.1
,
5.8
,
5
,
5.6
,
5.7
,
5.7
,
6.2
,
5.1,],
'y': [3.5
,
3
,
3.2
,
3.1
,
3.6
,
3.9
,
3.4
,
3.4
,
2.9
,
3.1
,
3.7
,
3.4
,
3
,
3
,
4
,
4.4
,
3.9
,
3.5
,
3.8
,
3.8
,
3.4
,
3.7
,
3.6
,
3.3
,
3.4
,
3
,
3.4
,
3.5
,
3.4
,
3.2
,
3.1
,
3.4
,
4.1
,
4.2
,
3.1
,
3.2
,
3.5
,
3.1
,
3
,
3.4
,
3.5
,
2.3
,
3.2
,
3.5
,
3.8
,
3
,
3.8
,
3.2
,
3.7
,
3.3
,
3.2
,
3.2
,
3.1
,
2.3
,
2.8
,
2.8
,
3.3
,
2.4
,
2.9
,
2.7
,
2
,
3
,
2.2
,
2.9
,
2.9
,
3.1
,
3
,
2.7
,
2.2
,
2.5
,
3.2
,
2.8
,
2.5
,
2.8
,
2.9
,
3
,
2.8
,
3
,
2.9
,
2.6
,
2.4
,
2.4
,
2.7
,
2.7
,
3
,
3.4
,
3.1
,
2.3
,
3
,
2.5
,
2.6
,
3
,
2.6
,
2.3
,
2.7
,
3
,
2.9
,
2.9
,
2.5,]
})
random.seed(datetime.now())
k = 3
centroids = {
i+1: [np.random.randint(0, 15), np.random.randint(0, 15)]
for i in range(k)
}
fig = plt.figure(figsize=(5, 5))
plt.scatter(df['x'], df['y'], color='k')
colmap = {1: 'r', 2: 'g', 3: 'b'}
for i in centroids.keys():
plt.scatter(*centroids[i], color=colmap[i])
plt.xlim(0, 15)
plt.ylim(0, 15)
def assignment(df, centroids):
for i in centroids.keys():
# sqrt((x1 - x2)^2 - (y1 - y2)^2)
df['distance_from_{}'.format(i)] = (
np.sqrt(
(df['x'] - centroids[i][0]) ** 2
+ (df['y'] - centroids[i][1]) ** 2
)
)
centroid_distance_cols = ['distance_from_{}'.format(i) for i in centroids.keys()]
df['closest'] = df.loc[:, centroid_distance_cols].idxmin(axis=1)
df['closest'] = df['closest'].map(lambda x: int(x.lstrip('distance_from_')))
df['color'] = df['closest'].map(lambda x: colmap[x])
return df
df = assignment(df, centroids)
print(df.head())
fig = plt.figure(figsize=(5, 5))
plt.scatter(df['x'], df['y'], color=df['color'], alpha=0.5, edgecolor='k')
for i in centroids.keys():
plt.scatter(*centroids[i], color=colmap[i])
plt.xlim(0, 15)
plt.ylim(0, 15)
old_centroids = copy.deepcopy(centroids)
def update(k):
for i in centroids.keys():
centroids[i][0] = np.mean(df[df['closest'] == i]['x'])
centroids[i][1] = np.mean(df[df['closest'] == i]['y'])
return k
centroids = update(centroids)
fig = plt.figure(figsize=(5, 5))
ax = plt.axes()
plt.scatter(df['x'], df['y'], color=df['color'], alpha=0.5, edgecolor='k')
for i in centroids.keys():
plt.scatter(*centroids[i], color=colmap[i])
plt.xlim(0, 15)
plt.ylim(0, 15)
for i in old_centroids.keys():
old_x = old_centroids[i][0]
old_y = old_centroids[i][1]
dx = (centroids[i][0] - old_centroids[i][0]) * 0.75
dy = (centroids[i][1] - old_centroids[i][1]) * 0.75
ax.arrow(old_x, old_y, dx, dy, head_width=0.2, head_length=0.3, fc=colmap[i], ec=colmap[i])
df = assignment(df, centroids)
# Plot results
fig = plt.figure(figsize=(5, 5))
plt.scatter(df['x'], df['y'], color=df['color'], alpha=0.5, edgecolor='k')
for i in centroids.keys():
plt.scatter(*centroids[i], color=colmap[i])
plt.xlim(0, 15)
plt.ylim(0, 15)
# Continue until all assigned categories don't change any more
while True:
closest_centroids = df['closest'].copy(deep=True)
centroids = update(centroids)
df = assignment(df, centroids)
if closest_centroids.equals(df['closest']):
break
fig = plt.figure(figsize=(5, 5))
plt.scatter(df['x'], df['y'], color=df['color'], alpha=0.5, edgecolor='k')
for i in centroids.keys():
plt.scatter(*centroids[i], color=colmap[i])
plt.xlim(0, 15)
plt.ylim(0, 15)
plt.show()
| [
"noreply@github.com"
] | ellicraw.noreply@github.com |
f18e9a61b7f37e591016c3ddae71ff4d2ec8f781 | 829ec4e5626140ce424a33d2e02725a4335e514a | /opsramp/msp.py | c2b844d5f8a55f5d2f6f0cd6a50827d735b82f52 | [
"Apache-2.0"
] | permissive | shrivastava-shweta/python-opsramp | 8b166533a9b01e26f77bd11c4dc942db07cd7961 | 02f3cc5589e3f6622f5b7784fe20e6a16e51e881 | refs/heads/master | 2021-04-04T01:40:38.573799 | 2020-03-19T05:19:08 | 2020-03-20T11:38:55 | 248,413,746 | 0 | 0 | Apache-2.0 | 2020-03-19T04:54:10 | 2020-03-19T04:54:09 | null | UTF-8 | Python | false | false | 3,182 | py | #!/usr/bin/env python
#
# A minimal Python language binding for the OpsRamp REST API.
#
# msp.py
# Classes related to partner-level actions.
#
# (c) Copyright 2019 Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import datetime
from opsramp.base import ApiWrapper
class Clients(ApiWrapper):
def __init__(self, parent):
super(Clients, self).__init__(parent.api, 'clients')
def get(self, suffix='/minimal'):
return self.api.get(suffix)
def search(self, pattern=''):
path = '/search'
if pattern:
path += '?queryString=' + pattern
return self.api.get(path)
def create(self, definition):
assert 'name' in definition
assert 'address' in definition
assert 'timeZone' in definition
assert 'country' in definition
return self.api.post('', json=definition)
def update(self, uuid, definition):
return self.api.post('%s' % uuid, json=definition)
def activate(self, uuid):
return self.api.post('%s/activate' % uuid)
def suspend(self, uuid):
return self.api.post('%s/suspend' % uuid)
def terminate(self, uuid):
return self.api.post('%s/terminate' % uuid)
# Helper functions to create the complex structures that OpsRamp
# uses to manipulate client definitions.
@staticmethod
def mkHours(day_start=datetime.time(9, 0),
day_end=datetime.time(17, 0),
week_start=2, week_end=6,
sms_voice_notification=False):
retval = {
'businessStartHour': day_start.hour,
'businessStartMin': day_start.minute,
'businessEndHour': day_end.hour,
'businessEndMin': day_end.minute,
'businessDayStart': int(week_start),
'businessDayEnd': int(week_end),
'smsVoiceNotification': bool(sms_voice_notification)
}
return retval
# A helper function to create the complex structures that OpsRamp
# uses to define a new client. There are lots of optional fields and
# potential gotchas here and we guard against *some* of them.
@staticmethod
def mkClient(name, address, time_zone, country,
hours=None):
retval = {
'name': name,
'address': address,
'timeZone': time_zone,
'country': country
}
if hours:
retval['clientDetails'] = hours
# TODO there are lots and lots more optional fields that we
# will probably need to cater for in the fullness of time.
return retval
| [
"jo.fegan@hpe.com"
] | jo.fegan@hpe.com |
1e151665b7c67e8294d854b4b4c865a95ea8de28 | 128bce1e81db21e1039b8c5b645cb52839057fb0 | /datasets.py | 7b92d1bb80afb2379a91ba9822b05c01a016dc19 | [] | no_license | Denis21800/Pathology-classification | 3e9de105ba66b8d5f0e381cb25f4dac3b7db5503 | 4ddd4179ab8b5d61fd5f8b7bec1b0063848fb62c | refs/heads/main | 2023-06-23T16:56:04.763199 | 2021-05-12T18:31:58 | 2021-05-12T18:31:58 | 366,812,655 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,798 | py | import torch
from torch.utils.data import Dataset, DataLoader
import numpy as np
class ModelDataset(Dataset):
def __init__(self, data):
self.data = data
def __getitem__(self, index):
item = self.data.get(index)
rec = item.get('data')
pm_arr = rec.get('pm')
intensity_arr = rec.get('intensity')
x_data = np.vstack((pm_arr, intensity_arr))
y_data = rec.get('label')
file_ = rec.get('file')
o_index = rec.get('o_index')
metadata = None
o_index = o_index if o_index is not None else []
return x_data, y_data, file_, o_index
def __len__(self):
return len(self.data)
class ModelData(object):
def __init__(self, data):
assert data
self.data = data
self.train_loader = None
self.test_loader = None
self.all_data_loader = None
def create_model_data(self):
train_data = {}
test_data = {}
test_index = 0
train_index = 0
for key in self.data:
item = self.data.get(key)
rec = item.get('data')
is_test = rec.get('is_test')
if is_test == 1:
test_data.update({test_index: item})
test_index += 1
else:
train_data.update({train_index: item})
train_index += 1
test_dataset = ModelDataset(test_data)
train_dataset = ModelDataset(train_data)
all_data = ModelDataset(self.data)
if train_dataset:
self.train_loader = DataLoader(dataset=train_dataset, shuffle=True)
if test_dataset:
self.test_loader = DataLoader(dataset=test_dataset, shuffle=True)
self.all_data_loader = DataLoader(dataset=all_data, shuffle=True)
| [
"noreply@github.com"
] | Denis21800.noreply@github.com |
092b778c96eb5fba03903b3f321f5053806cbaad | fbd016b39a2736a22840dd95b8480ae33cfaf4f7 | /Assets/Python/CvUtil.py | a939fd186ed67658d538fa901e2ceb14f578c2ad | [] | no_license | max-zanko/civ4-beyond-the-sword-sdk | f2bf05d10c09c87a0dd6cd92093a22ce7fc36133 | 264d371d88f8a4d75b2ce2ad394d8ffeacc9a938 | refs/heads/master | 2021-07-05T18:05:59.673271 | 2017-10-02T14:18:29 | 2017-10-02T14:18:29 | 105,398,712 | 4 | 2 | null | null | null | null | UTF-8 | Python | false | false | 16,007 | py | ## Sid Meier's Civilization 4
## Copyright Firaxis Games 2005
#
# for error reporting
import traceback
# for file ops
import os
import sys
# For Civ game code access
from CvPythonExtensions import *
# For exception handling
SHOWEXCEPTIONS = 1
# for C++ compatibility
false=False
true=True
# globals
gc = CyGlobalContext()
FontIconMap = {}
localText = CyTranslator()
#
# Popup context enums, values greater than 999 are reserved for events
#
# DEBUG TOOLS
PopupTypeEntityEventTest = 4
PopupTypeEffectViewer = 5
# HELP SCREENS
PopupTypeMilitaryAdvisor = 103
PopupTypePlayerSelect = 104
# WORLD BUILDER
PopupTypeWBContextStart = 200
PopupTypeWBEditCity = PopupTypeWBContextStart
PopupTypeWBEditUnit = 201
PopupTypeWBContextEnd = 299
# EVENT ID VALUES (also used in popup contexts)
EventGetEspionageTarget = 4999
EventEditCityName = 5000
EventEditCity = 5001
EventPlaceObject = 5002
EventAwardTechsAndGold = 5003
EventEditUnitName = 5006
EventCityWarning = 5007
EventWBAllPlotsPopup = 5008
EventWBLandmarkPopup = 5009
EventWBScriptPopup = 5010
EventWBStartYearPopup = 5011
EventShowWonder = 5012
EventLButtonDown=1
EventLcButtonDblClick=2
EventRButtonDown=3
EventBack=4
EventForward=5
EventKeyDown=6
EventKeyUp=7
# List of unreported Events
SilentEvents = [EventEditCityName, EventEditUnitName]
# Popup defines (TODO: Expose these from C++)
FONT_CENTER_JUSTIFY=1<<2
FONT_RIGHT_JUSTIFY=1<<1
FONT_LEFT_JUSTIFY=1<<0
def convertToUnicode(s):
"if the string is non unicode, convert it to unicode by decoding it using 8859-1, latin_1"
if (isinstance(s, str)):
return s.decode("latin_1")
return s
def convertToStr(s):
"if the string is unicode, convert it to str by encoding it using 8859-1, latin_1"
if (isinstance(s, unicode)):
return s.encode("latin_1")
return s
class RedirectDebug:
"""Send Debug Messages to Civ Engine"""
def __init__(self):
self.m_PythonMgr = CyPythonMgr()
def write(self, stuff):
# if str is non unicode and contains encoded unicode data, supply the right encoder to encode it into a unicode object
if (isinstance(stuff, unicode)):
self.m_PythonMgr.debugMsgWide(stuff)
else:
self.m_PythonMgr.debugMsg(stuff)
class RedirectError:
"""Send Error Messages to Civ Engine"""
def __init__(self):
self.m_PythonMgr = CyPythonMgr()
def write(self, stuff):
# if str is non unicode and contains encoded unicode data, supply the right encoder to encode it into a unicode object
if (isinstance(stuff, unicode)):
self.m_PythonMgr.errorMsgWide(stuff)
else:
self.m_PythonMgr.errorMsg(stuff)
def myExceptHook(type, value, tb):
lines=traceback.format_exception(type, value, tb)
#pre= "---------------------Traceback lines-----------------------\n"
mid="\n".join(lines)
#post="-----------------------------------------------------------"
#total = pre+mid+post
total=mid
if SHOWEXCEPTIONS:
sys.stderr.write(total)
else:
sys.stdout.write(total)
def pyPrint(stuff):
stuff = 'PY:' + stuff + "\n"
sys.stdout.write(stuff)
def pyAssert(cond, msg):
if (cond==False):
sys.stderr.write(msg)
assert(cond, msg)
def getScoreComponent(iRawScore, iInitial, iMax, iFactor, bExponential, bFinal, bVictory):
if gc.getGame().getEstimateEndTurn() == 0:
return 0
if bFinal and bVictory:
fTurnRatio = float(gc.getGame().getGameTurn()) / float(gc.getGame().getEstimateEndTurn())
if bExponential and (iInitial != 0):
fRatio = iMax / iInitial
iMax = iInitial * pow(fRatio, fTurnRatio)
else:
iMax = iInitial + fTurnRatio * (iMax - iInitial)
iFree = (gc.getDefineINT("SCORE_FREE_PERCENT") * iMax) / 100
if (iFree + iMax) != 0:
iScore = (iFactor * (iRawScore + iFree)) / (iFree + iMax)
else:
iScore = iFactor
if bVictory:
iScore = ((100 + gc.getDefineINT("SCORE_VICTORY_PERCENT")) * iScore) / 100
if bFinal:
iScore = ((100 + gc.getDefineINT("SCORE_HANDICAP_PERCENT_OFFSET") + (gc.getGame().getHandicapType() * gc.getDefineINT("SCORE_HANDICAP_PERCENT_PER"))) * iScore) / 100
return int(iScore)
def getOppositeCardinalDirection(dir):
return (dir + 2) % CardinalDirectionTypes.NUM_CARDINALDIRECTION_TYPES
def shuffle(num, rand):
"returns a tuple of size num of shuffled numbers"
piShuffle = [0]*num
shuffleList(num, rand, piShuffle) # implemented in C for speed
return piShuffle
def spawnUnit(iUnit, pPlot, pPlayer):
pPlayer.initUnit(iUnit, pPlot.getX(), pPlot.getY(), UnitAITypes.NO_UNITAI, DirectionTypes.NO_DIRECTION)
return 1
def findInfoTypeNum(infoGetter, numInfos, typeStr):
if (typeStr == 'NONE'):
return -1
idx = gc.getInfoTypeForString(typeStr)
pyAssert(idx != -1, "Can't find type enum for type tag %s" %(typeStr,))
return idx
def getInfo(strInfoType, strInfoName): # returns info for InfoType
#set Type to lowercase
strInfoType = strInfoType.lower()
strInfoName = strInfoName.capitalize()
#get the appropriate dictionary item
infoDict = GlobalInfosMap.get(strInfoType)
#get the number of infos
numInfos = infoDict['NUM']()
#loop through each info
for i in range(numInfos):
loopInfo = infoDict['GET'](i)
if loopInfo.getDescription() == strInfoName:
#and return the one requested
return loopInfo
def AdjustBuilding(add, all, BuildingIdx, pCity): # adds/removes buildings from a city
"Function for toggling buildings in cities"
if (BuildingIdx!= -1):
if (all): #Add/Remove ALL
for i in range(BuildingIdx):
pCity.setNumRealBuildingIdx(i,add)
else:
pCity.setNumRealBuildingIdx(BuildingIdx,add)
return 0
def getIcon(iconEntry): # returns Font Icons
global FontIconMap
iconEntry = iconEntry.lower()
if (FontIconMap.has_key(iconEntry)):
return FontIconMap.get(iconEntry)
else:
return (u"%c" %(191,))
def combatDetailMessageBuilder(cdUnit, ePlayer, iChange):
if (cdUnit.iExtraCombatPercent != 0):
msg=localText.getText("TXT_KEY_COMBAT_MESSAGE_EXTRA_COMBAT_PERCENT",(cdUnit.iExtraCombatPercent * iChange,))
CyInterface().addCombatMessage(ePlayer,msg)
if (cdUnit.iAnimalCombatModifierTA != 0):
msg=localText.getText("TXT_KEY_COMBAT_MESSAGE_ANIMAL_COMBAT",(cdUnit.iAnimalCombatModifierTA * iChange,))
CyInterface().addCombatMessage(ePlayer,msg)
if (cdUnit.iAIAnimalCombatModifierTA != 0):
msg=localText.getText("TXT_KEY_COMBAT_MESSAGE_AI_ANIMAL_COMBAT",(cdUnit.iAIAnimalCombatModifierTA * iChange,))
CyInterface().addCombatMessage(ePlayer,msg)
if (cdUnit.iAnimalCombatModifierAA != 0):
msg=localText.getText("TXT_KEY_COMBAT_MESSAGE_ANIMAL_COMBAT",(cdUnit.iAnimalCombatModifierAA * iChange,))
CyInterface().addCombatMessage(ePlayer,msg)
if (cdUnit.iAIAnimalCombatModifierAA != 0):
msg=localText.getText("TXT_KEY_COMBAT_MESSAGE_AI_ANIMAL_COMBAT",(cdUnit.iAIAnimalCombatModifierAA * iChange,))
CyInterface().addCombatMessage(ePlayer,msg)
if (cdUnit.iBarbarianCombatModifierTB != 0):
msg=localText.getText("TXT_KEY_COMBAT_MESSAGE_BARBARIAN_COMBAT",(cdUnit.iBarbarianCombatModifierTB * iChange,))
CyInterface().addCombatMessage(ePlayer,msg)
if (cdUnit.iAIBarbarianCombatModifierTB != 0):
msg=localText.getText("TXT_KEY_COMBAT_MESSAGE_BARBARIAN_AI_COMBAT",(cdUnit.iAIBarbarianCombatModifierTB * iChange,))
CyInterface().addCombatMessage(ePlayer,msg)
if (cdUnit.iBarbarianCombatModifierAB != 0):
msg=localText.getText("TXT_KEY_COMBAT_MESSAGE_BARBARIAN_COMBAT",(cdUnit.iBarbarianCombatModifierAB * iChange,))
CyInterface().addCombatMessage(ePlayer,msg)
if (cdUnit.iAIBarbarianCombatModifierAB != 0):
msg=localText.getText("TXT_KEY_COMBAT_MESSAGE_BARBARIAN_AI_COMBAT",(cdUnit.iAIBarbarianCombatModifierAB * iChange,))
CyInterface().addCombatMessage(ePlayer,msg)
if (cdUnit.iPlotDefenseModifier != 0):
msg=localText.getText("TXT_KEY_COMBAT_MESSAGE_PLOT_DEFENSE",(cdUnit.iPlotDefenseModifier * iChange,))
CyInterface().addCombatMessage(ePlayer,msg)
if (cdUnit.iFortifyModifier != 0):
msg=localText.getText("TXT_KEY_COMBAT_MESSAGE_FORTIFY",(cdUnit.iFortifyModifier * iChange,))
CyInterface().addCombatMessage(ePlayer,msg)
if (cdUnit.iCityDefenseModifier != 0):
msg=localText.getText("TXT_KEY_COMBAT_MESSAGE_CITY_DEFENSE",(cdUnit.iCityDefenseModifier * iChange,))
CyInterface().addCombatMessage(ePlayer,msg)
if (cdUnit.iHillsAttackModifier != 0):
msg=localText.getText("TXT_KEY_COMBAT_MESSAGE_HILLS_ATTACK",(cdUnit.iHillsAttackModifier * iChange,))
CyInterface().addCombatMessage(ePlayer,msg)
if (cdUnit.iHillsDefenseModifier != 0):
msg=localText.getText("TXT_KEY_COMBAT_MESSAGE_HILLS",(cdUnit.iHillsDefenseModifier * iChange,))
CyInterface().addCombatMessage(ePlayer,msg)
if (cdUnit.iFeatureAttackModifier != 0):
msg=localText.getText("TXT_KEY_COMBAT_MESSAGE_FEATURE_ATTACK",(cdUnit.iFeatureAttackModifier * iChange,))
CyInterface().addCombatMessage(ePlayer,msg)
if (cdUnit.iFeatureDefenseModifier != 0):
msg=localText.getText("TXT_KEY_COMBAT_MESSAGE_FEATURE",(cdUnit.iFeatureDefenseModifier * iChange,))
CyInterface().addCombatMessage(ePlayer,msg)
if (cdUnit.iTerrainAttackModifier != 0):
msg=localText.getText("TXT_KEY_COMBAT_MESSAGE_TERRAIN_ATTACK",(cdUnit.iTerrainAttackModifier * iChange,))
CyInterface().addCombatMessage(ePlayer,msg)
if (cdUnit.iTerrainDefenseModifier != 0):
msg=localText.getText("TXT_KEY_COMBAT_MESSAGE_TERRAIN",(cdUnit.iTerrainDefenseModifier * iChange,))
CyInterface().addCombatMessage(ePlayer,msg)
if (cdUnit.iCityAttackModifier != 0):
msg=localText.getText("TXT_KEY_COMBAT_MESSAGE_CITY_ATTACK",(cdUnit.iCityAttackModifier * iChange,))
CyInterface().addCombatMessage(ePlayer,msg)
if (cdUnit.iDomainDefenseModifier != 0):
msg=localText.getText("TXT_KEY_COMBAT_MESSAGE_CITY_DOMAIN_DEFENSE",(cdUnit.iDomainDefenseModifier * iChange,))
CyInterface().addCombatMessage(ePlayer,msg)
if (cdUnit.iCityBarbarianDefenseModifier != 0):
msg=localText.getText("TXT_KEY_COMBAT_MESSAGE_CITY_BARBARIAN_DEFENSE",(cdUnit.iCityBarbarianDefenseModifier * iChange,))
CyInterface().addCombatMessage(ePlayer,msg)
if (cdUnit.iClassDefenseModifier != 0):
msg=localText.getText("TXT_KEY_COMBAT_MESSAGE_CLASS_DEFENSE",(cdUnit.iClassDefenseModifier * iChange,))
CyInterface().addCombatMessage(ePlayer,msg)
if (cdUnit.iClassAttackModifier != 0):
msg=localText.getText("TXT_KEY_COMBAT_MESSAGE_CLASS_ATTACK",(cdUnit.iClassAttackModifier * iChange,))
CyInterface().addCombatMessage(ePlayer,msg)
if (cdUnit.iCombatModifierT != 0):
msg=localText.getText("TXT_KEY_COMBAT_MESSAGE_CLASS_COMBAT",(cdUnit.iCombatModifierT * iChange,))
CyInterface().addCombatMessage(ePlayer,msg)
if (cdUnit.iCombatModifierA != 0):
msg=localText.getText("TXT_KEY_COMBAT_MESSAGE_CLASS_COMBAT",(cdUnit.iCombatModifierA * iChange,))
CyInterface().addCombatMessage(ePlayer,msg)
if (cdUnit.iDomainModifierA != 0):
msg=localText.getText("TXT_KEY_COMBAT_MESSAGE_CLASS_DOMAIN",(cdUnit.iDomainModifierA * iChange,))
CyInterface().addCombatMessage(ePlayer,msg)
if (cdUnit.iDomainModifierT != 0):
msg=localText.getText("TXT_KEY_COMBAT_MESSAGE_CLASS_DOMAIN",(cdUnit.iDomainModifierT * iChange,))
CyInterface().addCombatMessage(ePlayer,msg)
if (cdUnit.iAnimalCombatModifierA != 0):
msg=localText.getText("TXT_KEY_COMBAT_MESSAGE_CLASS_ANIMAL_COMBAT",(cdUnit.iAnimalCombatModifierA * iChange,))
CyInterface().addCombatMessage(ePlayer,msg)
if (cdUnit.iAnimalCombatModifierT != 0):
msg=localText.getText("TXT_KEY_COMBAT_MESSAGE_CLASS_ANIMAL_COMBAT",(cdUnit.iAnimalCombatModifierT * iChange,))
CyInterface().addCombatMessage(ePlayer,msg)
if (cdUnit.iRiverAttackModifier != 0):
msg=localText.getText("TXT_KEY_COMBAT_MESSAGE_CLASS_RIVER_ATTACK",(cdUnit.iRiverAttackModifier * iChange,))
CyInterface().addCombatMessage(ePlayer,msg)
if (cdUnit.iAmphibAttackModifier != 0):
msg=localText.getText("TXT_KEY_COMBAT_MESSAGE_CLASS_AMPHIB_ATTACK",(cdUnit.iAmphibAttackModifier * iChange,))
CyInterface().addCombatMessage(ePlayer,msg)
def combatMessageBuilder(cdAttacker, cdDefender, iCombatOdds):
combatMessage = ""
if (cdAttacker.eOwner == cdAttacker.eVisualOwner):
combatMessage += "%s's" %(gc.getPlayer(cdAttacker.eOwner).getName(),)
combatMessage += " %s (%.2f)" %(cdAttacker.sUnitName,cdAttacker.iCurrCombatStr/100.0,)
combatMessage += " " + localText.getText("TXT_KEY_COMBAT_MESSAGE_VS", ()) + " "
if (cdDefender.eOwner == cdDefender.eVisualOwner):
combatMessage += "%s's" %(gc.getPlayer(cdDefender.eOwner).getName(),)
combatMessage += "%s (%.2f)" %(cdDefender.sUnitName,cdDefender.iCurrCombatStr/100.0,)
CyInterface().addCombatMessage(cdAttacker.eOwner,combatMessage)
CyInterface().addCombatMessage(cdDefender.eOwner,combatMessage)
combatMessage = "%s %.1f%%" %(localText.getText("TXT_KEY_COMBAT_MESSAGE_ODDS", ()),iCombatOdds/10.0,)
CyInterface().addCombatMessage(cdAttacker.eOwner,combatMessage)
CyInterface().addCombatMessage(cdDefender.eOwner,combatMessage)
combatDetailMessageBuilder(cdAttacker,cdAttacker.eOwner,-1)
combatDetailMessageBuilder(cdDefender,cdAttacker.eOwner,1)
combatDetailMessageBuilder(cdAttacker,cdDefender.eOwner,-1)
combatDetailMessageBuilder(cdDefender,cdDefender.eOwner,1)
def initDynamicFontIcons():
global FontIconMap
info = ""
desc = ""
# add Commerce Icons
for i in range(CommerceTypes.NUM_COMMERCE_TYPES):
info = gc.getCommerceInfo(i)
desc = info.getDescription().lower()
addIconToMap(info.getChar, desc)
# add Yield Icons
for i in range(YieldTypes.NUM_YIELD_TYPES):
info = gc.getYieldInfo(i)
desc = info.getDescription().lower()
addIconToMap(info.getChar, desc)
# add Religion & Holy City Icons
for i in range(gc.getNumReligionInfos()):
info = gc.getReligionInfo(i)
desc = info.getDescription().lower()
addIconToMap(info.getChar, desc)
addIconToMap(info.getHolyCityChar, desc)
for key in OtherFontIcons.keys():
#print key
FontIconMap[key] = (u"%c" % CyGame().getSymbolID(OtherFontIcons.get(key)))
#print FontIconMap
def addIconToMap(infoChar, desc):
global FontIconMap
desc = convertToStr(desc)
print "%s - %s" %(infoChar(), desc)
uc = infoChar()
if (uc>=0):
FontIconMap[desc] = u"%c" %(uc,)
OtherFontIcons = { 'happy' : FontSymbols.HAPPY_CHAR,
'unhappy' : FontSymbols.UNHAPPY_CHAR,
'healthy' : FontSymbols.HEALTHY_CHAR,
'unhealthy' : FontSymbols.UNHEALTHY_CHAR,
'bullet' : FontSymbols.BULLET_CHAR,
'strength' : FontSymbols.STRENGTH_CHAR,
'moves' : FontSymbols.MOVES_CHAR,
'religion' : FontSymbols.RELIGION_CHAR,
'star' : FontSymbols.STAR_CHAR,
'silver star' : FontSymbols.SILVER_STAR_CHAR,
'trade' : FontSymbols.TRADE_CHAR,
'defense' : FontSymbols.DEFENSE_CHAR,
'greatpeople' : FontSymbols.GREAT_PEOPLE_CHAR,
'badgold' : FontSymbols.BAD_GOLD_CHAR,
'badfood' : FontSymbols.BAD_FOOD_CHAR,
'eatenfood' : FontSymbols.EATEN_FOOD_CHAR,
'goldenage' : FontSymbols.GOLDEN_AGE_CHAR,
'angrypop' : FontSymbols.ANGRY_POP_CHAR,
'openBorders' : FontSymbols.OPEN_BORDERS_CHAR,
'defensivePact' : FontSymbols.DEFENSIVE_PACT_CHAR,
'map' : FontSymbols.MAP_CHAR,
'occupation' : FontSymbols.OCCUPATION_CHAR,
'power' : FontSymbols.POWER_CHAR,
}
GlobalInfosMap = { 'bonus': {'NUM': gc.getNumBonusInfos, 'GET': gc.getBonusInfo},
'improvement': {'NUM': gc.getNumImprovementInfos, 'GET': gc.getImprovementInfo},
'yield': {'NUM': YieldTypes.NUM_YIELD_TYPES, 'GET': gc.getYieldInfo},
'religion': {'NUM': gc.getNumReligionInfos, 'GET': gc.getReligionInfo},
'tech': {'NUM': gc.getNumTechInfos, 'GET': gc.getTechInfo},
'unit': {'NUM': gc.getNumUnitInfos, 'GET': gc.getUnitInfo},
'civic': {'NUM': gc.getNumCivicInfos, 'GET': gc.getCivicInfo},
'building': {'NUM': gc.getNumBuildingInfos, 'GET': gc.getBuildingInfo},
'terrain': {'NUM': gc.getNumTerrainInfos, 'GET': gc.getTerrainInfo},
'trait': {'NUM': gc.getNumTraitInfos, 'GET': gc.getTraitInfo},
'feature' : {'NUM': gc.getNumFeatureInfos, 'GET': gc.getFeatureInfo},
'route': {'NUM': gc.getNumRouteInfos, 'GET': gc.getRouteInfo},
'promotion': {'NUM':gc.getNumPromotionInfos, 'GET': gc.getPromotionInfo},
} | [
"max-zanko@users.noreply.github.com"
] | max-zanko@users.noreply.github.com |
96616d835850d54569cb072a532337752be7e8d2 | ac10761e842fbde677db3c78a74400845e08904a | /lib/python/django_browserid/tests/urls.py | 3b3d01e68efff56d69df958f4b8476484cd06016 | [] | no_license | mozilla/moztrap-vendor-lib | 6d7704394ef1db72ee0514eefc25d9fcb191c4ca | d0007ae11fad91157b99feb985d19b16170fcb09 | refs/heads/master | 2023-07-03T17:19:42.477593 | 2019-03-29T15:55:04 | 2019-03-29T15:55:04 | 2,573,756 | 1 | 5 | null | 2020-06-08T14:44:16 | 2011-10-14T01:59:31 | Python | UTF-8 | Python | false | false | 343 | py | """
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""
from django.conf.urls.defaults import include, patterns
urlpatterns = patterns('',
(r'^browserid/', include('django_browserid.urls')),
)
| [
"cdawson@mozilla.com"
] | cdawson@mozilla.com |
a2701bf7690dca4bc5aa3c3a5c51677087393188 | 27e3a0b71e8ca181af8bea9df8dbe225789d3973 | /K_Nearest/Euclidean_Dist(K-Nearest).py | fa46ffdb5a9f1d7876a74b9a5b341a3deca7d6e8 | [] | no_license | Abey12525/PYTHON_GEN | 8fb9f1fcbf9e229e360324e3a2b67115be949ff6 | 52a363653719daf1ea9e0adce6ccc955477fb573 | refs/heads/master | 2021-06-03T11:52:14.341432 | 2020-02-22T18:27:31 | 2020-02-22T18:27:31 | 107,938,645 | 0 | 1 | null | 2019-10-11T11:41:16 | 2017-10-23T06:05:08 | Jupyter Notebook | UTF-8 | Python | false | false | 3,064 | py | # -*- coding: utf-8 -*-
"""
Created on Sat Dec 30 23:15:13 2017
@author: ARH
"""
# root(sum of i=1 to n (Qi-Pi)^2)
import math as m
#import matplotlib.pyplot as plt
#from matplotlib import style
from collections import Counter
import warnings as wr
import numpy as np
import time
import pandas as pd
import random
from sklearn import cross_validation , neighbors
#style.use('fivethirtyeight')
#dataset={'k':[[1,2],[3,4],[2,1]],'r':[[6,5],[7,7],[8,5]]}
#for char in 'abcdefghijklmnopqrstuvwxyz':
def math_dist(a,b):
return m.sqrt((a[0]-b[0])**2 + (a[1]-b[1])**2)
def numpy_dist(a,b):
return np.linalg.norm(a-b)
def k_nearest_neighbours(data,predict,k=3):
if len(data) >= k :
warning.warn("K is set to value less than total voting group dude !!!!")
distances = []
for group in data:
for features in data[group]:
distances.append([numpy_dist(np.array(features),np.array(predict)),group])
votes=[i[1] for i in sorted(distances) [:k]]
vote_result=Counter(votes).most_common(1)[0][0]
confidence=Counter(votes).most_common(1)[0][1]/k
return vote_result,confidence
def is_number(s):
try:
float(s)
return True
except ValueError:
pass
try:
import unicodedata
unicodedata.numeric(s)
return True
except(TypeError, ValueError):
pass
return False
def replace_de(x):
for i in range(len(x)):
for j in range(15):
if not is_number(x[i,j]):
if(len(x[i,j])<=2):
replace=0
for oj in range(len(x[i,j])):
replace=ord(x[i,j][oj])+replace
x[i,j]=replace
df=pd.read_csv('approval.txt')
df.replace('?',-99999,inplace=True)
df.replace('+',1,inplace=True)
df.replace('-',0,inplace=True)
#df.drop(['A1'],1,inplace=True)
x = np.array(df)
replace_de(x)
for i in range(len(x)):
for j in range(16):
x[i,j]=float(x[i,j])
df=pd.DataFrame(x)
#df.to_csv('tt.csv')
#np.savetxt('test.csv',x,delimiter=' ')
y=x
accuracy=[]
for i in range(25):
x=y
random.shuffle(x)
test_size = 0.2
train_set = { 1: [],0: []}
test_set={1: [],0: []}
train_data=x[:-int(test_size*len(x))]
test_data=x[-int(test_size*len(x)):]
for i in train_data:
train_set[i[-1]].append(i [ : -1])
for i in test_data:
test_set[i[-1]].append(i [ : -1])
correct = 0
total = 0
for group in test_set:
for data in test_set[group]:
vote,c = k_nearest_neighbours(train_set,data,k=4)
if group == vote:
correct +=1
total += 1
accuracy.append(correct/total)
print('Home made Accuracy :',sum(accuracy)/len(accuracy))
#eculidean_distance = m.sqrt((plot1[0]-plot2[0])**2+(plot1[1]-plot2[1])**2)
"""
for i in dataset:
for ii in dataset[i]:
plt.scatter(ii[0],ii[1],s=100,color=i)
"""
#result=k_nearest_neighbours(x,new_features,k=3)
"""
[[plt.scatter(ii[0],ii[1],s=100,color=i) for ii in dataset[i]] for i in dataset]
plt.scatter(new_features[0],new_features[1],color=result)
plt.show()
"""
| [
"noreply@github.com"
] | Abey12525.noreply@github.com |
269e0ffaa05096b410f812324e38587094ee38df | 24a52b2b363417a8bdfeb8f669ee53b7ee19f4d6 | /playa/conf.py | 7579c8aef6242a240ea812a489b5517cb84d0ca7 | [
"Apache-2.0"
] | permissive | isabella232/playa | e203997e2660babe333d4915f294530cde57ccb0 | a93335e592aa596645a60497a7c030a36ae7fec2 | refs/heads/master | 2023-03-18T23:51:35.577746 | 2011-07-15T01:07:53 | 2011-07-15T01:07:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 588 | py | """
playa.conf
~~~~~~~~~~
Represents the default values for all settings.
:copyright: (c) 2011 DISQUS.
:license: Apache License 2.0, see LICENSE for more details.
"""
import os
import os.path
class PlayaConfig(object):
ROOT = os.path.normpath(os.path.dirname(__file__))
DEBUG = True
AUDIO_PATHS = []
WEB_HOST = '0.0.0.0'
WEB_PORT = 9000
WEB_LOG_FILE = os.path.join(ROOT, 'playa.log')
WEB_PID_FILE = os.path.join(ROOT, 'playa.pid')
DATA_PATH = os.path.join(ROOT, 'data')
SECRET_KEY = '_#(wkvb#@%%!x-dd!xt&i-1g5rylz4q&t6%m5u@3&7hyuqd437' | [
"dcramer@gmail.com"
] | dcramer@gmail.com |
2f3a3e7b1fc63846b50a732c89a74e2c34911bf0 | be2e79107e2bbc9aca5e784cb7f4d28dd48576b4 | /backend/config/settings/base.py | d16744819f492bfe29b5255ff97b0271282495fb | [] | no_license | dhmit/democracy_africa | 8047228603d736be76023659227e1fcfa773ef87 | 867faa59f006ed22293ca5ffd6ab7250e51added | refs/heads/master | 2023-07-08T12:17:03.241297 | 2023-06-28T17:07:20 | 2023-06-28T17:07:20 | 233,667,679 | 6 | 2 | null | 2023-06-28T17:07:23 | 2020-01-13T18:44:27 | JavaScript | UTF-8 | Python | false | false | 3,930 | py | """
Django base settings for dhmit/democracy_africa project.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
CONFIG_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
BACKEND_DIR = os.path.dirname(CONFIG_DIR)
PROJECT_ROOT = os.path.dirname(BACKEND_DIR)
BACKEND_DATA_DIR = os.path.join(BACKEND_DIR, 'data')
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
ALLOWED_HOSTS = [
] # For production, add domains
# Application definition
INSTALLED_APPS = [
# django
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# 3rd party
'rest_framework',
'corsheaders',
'webpack_loader',
# our application code
'app',
]
MIDDLEWARE = [
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'config.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BACKEND_DIR, 'templates'),
],
'APP_DIRS': True, # our app doesn't, but our third party apps do!
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'config.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BACKEND_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
# the url where we'll look for static files
STATIC_URL = '/static/'
# where collectstatic puts static files for production
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'static')
# where collectstatic looks for static files
STATICFILES_DIRS = (
os.path.join(PROJECT_ROOT, 'build'),
os.path.join(PROJECT_ROOT, 'assets'),
)
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.AllowAny',
]
}
CORS_ORIGIN_WHITELIST = [
'http://localhost:3000',
'http://localhost:8000',
'http://localhost:8080',
]
# Django webpack loader settings
WEBPACK_LOADER = {
'DEFAULT': {
'BUNDLE_DIR_NAME': 'bundles/',
'STATS_FILE': os.path.join(PROJECT_ROOT, 'webpack-stats.json'),
}
}
| [
"rahmed@mit.edu"
] | rahmed@mit.edu |
3097b5f72ecb14cdf3330128a68a45c6bb15861f | 58901d48be7691ac81d6ff59ef25627b479df909 | /skit_project/models/soil_penetration.py | 7235729348c2ea2889764728d4425ca1179481b6 | [] | no_license | Agilis-Enterprise-Solutions/ebtesting | a390522c928d0d6f58084e3e6830c4be6976ec27 | 6411ea9ba988c2adc5713912348645e0dda02899 | refs/heads/master | 2021-04-16T15:40:27.602922 | 2020-06-08T06:08:15 | 2020-06-08T06:08:15 | 249,367,422 | 0 | 0 | null | 2020-05-19T08:45:34 | 2020-03-23T07:51:35 | Python | UTF-8 | Python | false | false | 30,401 | py | # -*- coding: utf-8 -*-
from odoo import api, fields, models,_
from datetime import datetime, date
from odoo.exceptions import UserError
import json
class SoilPenetration(models.Model):
_name = "skit.soil.penetration"
_description = "Soil Penetration Test"
name = fields.Char(string="Lab Result No:")
penetration_test_date = fields.Date("Date")
project_id = fields.Many2one('project.project', "Project Name")
location_id = fields.Many2one('skit.location')
sample_identify = fields.Char(string="Sample Identification")
qty_rep = fields.Char(string="Quantity Represented")
supplied_by = fields.Many2one('res.users', "Supplied By")
sampled_by = fields.Many2one('res.users', "Sampled By")
submitted_by = fields.Many2one('res.users', "Submitted By")
contractor = fields.Many2one('res.partner',string="Contrator",domain="[('is_company','=',True)]")
original_source = fields.Char(string="Original Source")
supplied_at = fields.Char(string="Supplied At")
spec_item_no = fields.Char(string="Spec's Item No.")
proposed_use = fields.Text(string="Proposed Use")
designation_sampled = fields.Many2one('res.partner', "Designation",
readonly=True)
designation_submitted = fields.Many2one('res.partner', "Designation",
readonly=True)
date_performed = fields.Datetime(string="Date", readonly=True)
date_submit = fields.Datetime(string="Date", readonly=True)
state = fields.Selection([('draft', 'Draft'),
('submit', 'Submit'),
('confirm', 'Confirm'),
('verify', 'Verify'),
('approved', 'Approved'),
('cancelled', 'Cancelled')], string='Status',
readonly=True, copy=False, index=True,
default='draft', track_visibility='onchange')
tested_by = fields.Many2one('res.users', "Tested By", readonly=True)
tested_date = fields.Datetime("Tested Date", readonly=True, copy=False)
checked_by = fields.Many2one('res.users', "Checked By", readonly=True)
checked_date = fields.Datetime("Checked Date", readonly=True, copy=False)
witnessed_by = fields.Many2many('res.partner',string="Witnessed By",domain="[('is_company','=',False)]")
witnessed_date = fields.Datetime("Witnessed Date")
attested_by = fields.Many2one('res.users', "Attested By", readonly=True)
attested_date = fields.Datetime("Attested Date", readonly=True, copy=False)
penetration_line_blow10_ids = fields.One2many(
'skit.penetration.line.blow10',
'penetration_id')
penetration_line_blow30_ids = fields.One2many(
'skit.penetration.line.blow30',
'penetration_id')
penetration_line_blow65_ids = fields.One2many(
'skit.penetration.line.blow65',
'penetration_id')
wt_of_cylindersoil_10 = fields.Integer("Wt.of Cyl. + Soil gms")
wt_of_cylinder_10 = fields.Integer("Wt. of Cylinder gms")
wt_of_soil_10 = fields.Integer("Wt. of Soil gms",
compute='compute_wt_of_soil_10')
wet_density_10 = fields.Float("Wet Density g/cc",
compute='compute_wet_density_10',
digits=(12, 3))
can_number_10 = fields.Char("Can No")
wt_of_can_wet_soil_10 = fields.Float("Wt. of Can + Wet Soil gms")
wt_of_can_dry_soil_10 = fields.Float("Wt. of Can + Dry Soil gms")
moisture_loss_10 = fields.Float("Moisture Loss gms",
compute='compute_moisture_loss_10')
wt_of_can_10 = fields.Float("Wt. of Can gms")
wt_of_dry_soil_10 = fields.Float("Wt. of Dry Soil gms",
compute='compute_wt_dry_soil_10')
moisture_content_10 = fields.Float("Moisture Content %",
compute='compute_moisture_content_10')
dry_density_10 = fields.Float("Dry Density gms",
compute='compute_dry_density_10')
vol_of_cylinder_10 = fields.Integer("Vol. of Cylinder cc")
task_id = fields.Integer("Task", compute='_compute_task_id')
wt_of_cylindersoil_30 = fields.Integer("Wt.of Cyl. + Soil gms")
wt_of_cylinder_30 = fields.Integer("Wt. of Cylinder gms")
wt_of_soil_30 = fields.Integer("Wt. of Soil gms",
compute='compute_wt_of_soil_30')
wet_density_30 = fields.Float("Wet Density g/cc",
compute='compute_wet_density_30',
digits=(12, 3))
can_number_30 = fields.Char("Can No")
wt_of_can_wet_soil_30 = fields.Float("Wt. of Can + Wet Soil gms")
wt_of_can_dry_soil_30 = fields.Float("Wt. of Can + Dry Soil gms")
moisture_loss_30 = fields.Float("Moisture Loss gms",
compute='compute_moisture_loss_30')
wt_of_can_30 = fields.Float("Wt. of Can gms")
wt_of_dry_soil_30 = fields.Float("Wt. of Dry Soil gms",
compute='compute_wt_dry_soil_30')
moisture_content_30 = fields.Float("Moisture Content %",
compute='compute_moisture_content_30')
dry_density_30 = fields.Float("Dry Density gms",
compute='compute_dry_density_30')
vol_of_cylinder_30 = fields.Integer("Vol. of Cylinder cc")
wt_of_cylindersoil_65 = fields.Integer("Wt.of Cyl. + Soil gms")
wt_of_cylinder_65 = fields.Integer("Wt. of Cylinder gms")
wt_of_soil_65 = fields.Integer("Wt. of Soil gms",
compute='compute_wt_of_soil_65')
wet_density_65 = fields.Float("Wet Density g/cc",
compute='compute_wet_density_65',
digits=(12, 3))
can_number_65 = fields.Char("Can No")
wt_of_can_wet_soil_65 = fields.Float("Wt. of Can + Wet Soil gms")
wt_of_can_dry_soil_65 = fields.Float("Wt. of Can + Dry Soil gms")
moisture_loss_65 = fields.Float("Moisture Loss gms",
compute='compute_moisture_loss_65')
wt_of_can_65 = fields.Float("Wt. of Can gms")
wt_of_dry_soil_65 = fields.Float("Wt. of Dry Soil gms",
compute='compute_wt_dry_soil_65')
moisture_content_65 = fields.Float("Moisture Content %",
compute='compute_moisture_content_65')
dry_density_65 = fields.Float("Dry Density gms",
compute='compute_dry_density_65')
vol_of_cylinder_65 = fields.Integer("Vol. of Cylinder cc")
mdd = fields.Float(string="MDD", digits=(12, 3))
omc = fields.Float(string="OMC")
cbr_100_per = fields.Float(string="CBR VALUE % @ 100",readonly=True)
cbr_99_per = fields.Float(string="CBR VALUE % @ 99",readonly=True)
swell = fields.Float(string="Swell(%)", digits=(12, 3))
penetration_line_graph = fields.Text(compute='_penetration_line_graph')
grade_check = fields.Boolean("check")
grade = fields.Many2one("config.abrasion",String="Grade")
@api.model
def default_material(self):
value = self.env['config.material'].search([
('name', '=', 'EMBANKMENT')], limit=1).id
if value:
return value
else:
return
kind_of_material = fields.Many2one('config.material',
string='Kind of Material',
default=default_material)
@api.onchange('kind_of_material')
def onchange_kind_of_material(self):
for material in self:
kind_of_material = material.kind_of_material
material.update({ 'spec_item_no' : kind_of_material.spec_item_no.name})
grade = kind_of_material.grading
if grade :
material.update({'grade_check' :True})
else :
material.update({'grade_check' :False})
return {
'domain':{
'grade':[(('id', 'in', grade.ids))],
},}
@api.one
def _penetration_line_graph(self):
self.penetration_line_graph = json.dumps(self.get_line_graph_datas())
@api.multi
def get_line_graph_datas(self):
datas = []
vertical=[]
horizontal=[]
xmin = [0]
xmax = [0]
ymin = 0
ymax = 0
blow10 = self.env['skit.penetration.line.blow10'].search([
('penetration_id', '=', self.id)])
blow30 = self.env['skit.penetration.line.blow30'].search([
('penetration_id', '=', self.id)])
blow65 = self.env['skit.penetration.line.blow65'].search([
('penetration_id', '=', self.id)])
xlabel = "CBR VALUE %@ 100 % ="+str(self.cbr_100_per)+" % @ 95% =" + str(
self.cbr_99_per)+"% : Swell (%) = "+str(self.swell)
# Get Max value of CBR in Blow10
b10_max_value = 0
if self.penetration_line_blow10_ids:
for b10 in blow10:
if b10.std_cbr > b10_max_value:
b10_max_value = b10.std_cbr
datas.append({"value": round(self.dry_density_10, 1),
"labels": [b10_max_value, xlabel],
"yaxis": "Dry Density(g/cc)"})
horizontal.append({"valuess":b10_max_value})
vertical.append({"value": round(self.dry_density_10, 1)})
# Get Max value of CBR in Blow30
b30_max_value = 0
if self.penetration_line_blow30_ids:
for b30 in blow30:
if b30.std_cbr > b30_max_value:
b30_max_value = b30.std_cbr
datas.append({"value": round(self.dry_density_30, 1),
"labels": [b30_max_value, xlabel],
"yaxis": "Dry Density(g/cc)"})
horizontal.append({"valuess":b30_max_value})
vertical.append({"value": round(self.dry_density_30, 1)})
# Get Max value of CBR in Blow65
b65_max_value = 0
if self.penetration_line_blow65_ids:
for b65 in blow65:
if b65.std_cbr > b65_max_value:
b65_max_value = b65.std_cbr
datas.append({"value": round(self.dry_density_65, 1),
"labels": [b65_max_value, xlabel],
"yaxis": "Dry Density(g/cc)"})
horizontal.append({"valuess":b65_max_value})
vertical.append({"value": round(self.dry_density_65, 1)})
if len(datas) >= 1:
ymaxval = max(datas, key=lambda x: x['value'])
yminval = min(datas, key=lambda x: x['value'])
ymin = yminval.get('value') - 0.1
ymax = ymaxval.get('value') + 1
xmaxval = max(datas, key=lambda x: x['labels'])
xminval = min(datas, key=lambda x: x['labels'])
xmin = xminval.get('labels')
xmax = xmaxval.get('labels')
mdd=self.mdd
mdd2 = round((mdd *0.95),1)
density_10 = round(self.dry_density_10,1)
density_30 = round(self.dry_density_30,1)
density_65 = round(self.dry_density_65,1)
x3=0
x4=0
if density_10 <= mdd <= density_30 or density_10 >= mdd >= density_30:
x1=b10_max_value
y1=density_10
x2=b30_max_value
y2 =density_30
if y1==y2 or x1==x2:
x3=0
else:
straight_y =mdd
m=round((y2-y1)/(x2-x1),5)
b=round((y1-m*x1),3)
x3= (straight_y-b)/m
elif density_30 <= mdd <= density_65 or density_30 >= mdd >= density_65:
x1=b30_max_value
y1=density_30
x2=b65_max_value
y2 =density_65
if y1==y2 or x1==x2:
x3=0
else:
straight_y =mdd
m=round((y2-y1)/(x2-x1),5)
b=round((y1-m*x1),3)
x3= (straight_y-b)/m
if density_10 <= mdd2 <= density_30 or density_10 >= mdd2 >= density_30:
x1=b10_max_value
y1=density_10
x2=b30_max_value
y2 =density_30
if y1==y2 or x1==x2:
x4=0
else:
straight_y =mdd2
m=round((y2-y1)/(x2-x1),5)
b=round((y1-m*x1),3)
x4= (straight_y-b)/m
elif density_30 <= mdd2 <= density_65 or density_30 >= mdd2 >= density_65:
x1=b30_max_value
y1=density_30
x2=b65_max_value
y2 =density_65
if y1==y2 or x1==x2:
x4=0
else:
straight_y =mdd2
m=round((y2-y1)/(x2-x1),5)
b=round((y1-m*x1),3)
x4= (straight_y-b)/m
self.write({'cbr_100_per':round(x3,2),
'cbr_99_per':round(x4,2),})
return [{'values':datas,
'horizontal':horizontal,
'v1_value':x3 ,
'v2_value':x4 ,
'vertical':vertical,
'h1_value': mdd,
'h2_value':mdd2,
'y_val': [ymin, ymax],
'x_val': [xmin[0], xmax[0]+1],
'title': "MDD ="+str(self.mdd)+" OMC ="+str(self.omc)+" %",
'id': self.id}]
# Calculate Weight of Soil - auto-computed as
# (Weight of Cylinder + Soil) - (Weight of Cylinder)
# Eg : (11715 - 7060 = 4655)
@api.depends('wt_of_cylindersoil_10', 'wt_of_cylinder_10')
def compute_wt_of_soil_10(self):
for penetration in self:
soil = penetration.wt_of_cylindersoil_10
cylinder = penetration.wt_of_cylinder_10
if soil and cylinder:
total = (soil-cylinder)
penetration.update({'wt_of_soil_10': total})
# Calculate Wet Density - auto-computed as
# (Weight of Soil) /( Volume of Cylinder)
# Eg : (4655 / 2238 = 2.080)
@api.depends('wt_of_soil_10', 'vol_of_cylinder_10')
def compute_wet_density_10(self):
for penetration in self:
soil_wt = penetration.wt_of_soil_10
vol = penetration.vol_of_cylinder_10
if soil_wt and vol:
wet = (soil_wt/vol)
penetration.update({'wet_density_10': wet})
# Calculate Moisture Loss - auto-computed as
# (Weight of Can + Wet Soil) -(Weight of Can + Dry Soil)
# Eg : (251-240 = 11.00)
@api.depends('wt_of_can_wet_soil_10', 'wt_of_can_dry_soil_10')
def compute_moisture_loss_10(self):
for penetration in self:
wet_soil = penetration.wt_of_can_wet_soil_10
dry_soil = penetration.wt_of_can_dry_soil_10
if wet_soil and dry_soil:
moisture = (wet_soil-dry_soil)
penetration.update({'moisture_loss_10': moisture})
# Calculate Weight of Dry Soil - auto-computed as
# (Weight of Can + Dry Soil) -(Weight of Can)
# Eg : (240 - 18.81 = 221.19)
@api.depends('wt_of_can_dry_soil_10', 'wt_of_can_10')
def compute_wt_dry_soil_10(self):
for penetration in self:
dry_soil = penetration.wt_of_can_dry_soil_10
wt_can = penetration.wt_of_can_10
if dry_soil and wt_can:
dry_soil = (dry_soil-wt_can)
penetration.update({'wt_of_dry_soil_10': dry_soil})
# Calculate Moisture Content - auto-computed as
# (Moisture Loss) /Weight of Dry Soil)*100
# Eg :( (11.00 /221.19 )*100= 4.973)
@api.depends('wt_of_dry_soil_10', 'moisture_loss_10')
def compute_moisture_content_10(self):
for penetration in self:
dry_soil = penetration.wt_of_dry_soil_10
loss = penetration.moisture_loss_10
if dry_soil and loss:
content = (loss/dry_soil*100)
penetration.update({'moisture_content_10': content})
# Calculate Dry Density- auto-computed as
# (Wet Density) /(100 + moisture Content)*100
# Eg : (2.080 /(100+4.973)*100 = 1.98)
@api.depends('wet_density_10', 'moisture_content_10')
def compute_dry_density_10(self):
for penetration in self:
wet = penetration.wet_density_10
content = penetration.moisture_content_10
if wet and content:
dry = (wet/(100+content)*100)
penetration.update({'dry_density_10': dry})
# Calculate Weight of Soil - auto-computed as
# (Weight of Cylinder + Soil) - (Weight of Cylinder)
# Eg : (11715 - 7060 = 4655)
@api.depends('wt_of_cylindersoil_30', 'wt_of_cylinder_30')
def compute_wt_of_soil_30(self):
for penetration in self:
soil = penetration.wt_of_cylindersoil_30
cylinder = penetration.wt_of_cylinder_30
if soil and cylinder:
total = (soil-cylinder)
penetration.update({'wt_of_soil_30': total})
# Calculate Wet Density - auto-computed as
# (Weight of Soil) /( Volume of Cylinder)
# Eg : (4655 / 2238 = 2.080)
@api.depends('wt_of_soil_30', 'vol_of_cylinder_30')
def compute_wet_density_30(self):
for penetration in self:
soil_wt = penetration.wt_of_soil_30
vol = penetration.vol_of_cylinder_30
if soil_wt and vol:
wet = (soil_wt/vol)
penetration.update({'wet_density_30': wet})
# Calculate Moisture Loss - auto-computed as
# (Weight of Can + Wet Soil) -(Weight of Can + Dry Soil)
# Eg : (251-240 = 11.00)
@api.depends('wt_of_can_wet_soil_30', 'wt_of_can_dry_soil_30')
def compute_moisture_loss_30(self):
for penetration in self:
wet_soil = penetration.wt_of_can_wet_soil_30
dry_soil = penetration.wt_of_can_dry_soil_30
if wet_soil and dry_soil:
moisture = (wet_soil-dry_soil)
penetration.update({'moisture_loss_30': moisture})
# Calculate Weight of Dry Soil - auto-computed as
# (Weight of Can + Dry Soil) -(Weight of Can)
# Eg : (240 - 18.81 = 221.19)
@api.depends('wt_of_can_dry_soil_30', 'wt_of_can_30')
def compute_wt_dry_soil_30(self):
for penetration in self:
dry_soil = penetration.wt_of_can_dry_soil_30
wt_can = penetration.wt_of_can_30
if dry_soil and wt_can:
dry_soil = (dry_soil-wt_can)
penetration.update({'wt_of_dry_soil_30': dry_soil})
# Calculate Moisture Content - auto-computed as
# (Moisture Loss) /Weight of Dry Soil)*100
# Eg :( (11.00 /221.19 )*100= 4.973)
@api.depends('wt_of_dry_soil_30', 'moisture_loss_30')
def compute_moisture_content_30(self):
for penetration in self:
dry_soil = penetration.wt_of_dry_soil_30
loss = penetration.moisture_loss_30
if dry_soil and loss:
content = (loss/dry_soil*100)
penetration.update({'moisture_content_30': content})
# Calculate Dry Density- auto-computed as
# (Wet Density) /(100 + moisture Content)*100
# Eg : (2.080 /(100+4.973)*100 = 1.98)
@api.depends('wet_density_30', 'moisture_content_30')
def compute_dry_density_30(self):
for penetration in self:
wet = penetration.wet_density_30
content = penetration.moisture_content_30
if wet and content:
dry = (wet/(100+content)*100)
penetration.update({'dry_density_30': dry})
# Calculate Weight of Soil - auto-computed as
# (Weight of Cylinder + Soil) - (Weight of Cylinder)
# Eg : (11715 - 7060 = 4655)
@api.depends('wt_of_cylindersoil_65', 'wt_of_cylinder_65')
def compute_wt_of_soil_65(self):
for penetration in self:
soil = penetration.wt_of_cylindersoil_65
cylinder = penetration.wt_of_cylinder_65
if soil and cylinder:
total = (soil-cylinder)
penetration.update({'wt_of_soil_65': total})
# Calculate Wet Density - auto-computed as
# (Weight of Soil) /( Volume of Cylinder)
# Eg : (4655 / 2238 = 2.080)
@api.depends('wt_of_soil_65', 'vol_of_cylinder_65')
def compute_wet_density_65(self):
for penetration in self:
soil_wt = penetration.wt_of_soil_65
vol = penetration.vol_of_cylinder_65
if soil_wt and vol:
wet = (soil_wt/vol)
penetration.update({'wet_density_65': wet})
# Calculate Moisture Loss - auto-computed as
# (Weight of Can + Wet Soil) -(Weight of Can + Dry Soil)
# Eg : (251-240 = 11.00)
@api.depends('wt_of_can_wet_soil_65', 'wt_of_can_dry_soil_65')
def compute_moisture_loss_65(self):
for penetration in self:
wet_soil = penetration.wt_of_can_wet_soil_65
dry_soil = penetration.wt_of_can_dry_soil_65
if wet_soil and dry_soil:
moisture = (wet_soil-dry_soil)
penetration.update({'moisture_loss_65': moisture})
# Calculate Weight of Dry Soil - auto-computed as
# (Weight of Can + Dry Soil) -(Weight of Can)
# Eg : (240 - 18.81 = 221.19)
@api.depends('wt_of_can_dry_soil_65', 'wt_of_can_65')
def compute_wt_dry_soil_65(self):
for penetration in self:
dry_soil = penetration.wt_of_can_dry_soil_65
wt_can = penetration.wt_of_can_65
if dry_soil and wt_can:
dry_soil = (dry_soil-wt_can)
penetration.update({'wt_of_dry_soil_65': dry_soil})
# Calculate Moisture Content - auto-computed as
# (Moisture Loss) /Weight of Dry Soil)*100
# Eg :( (11.00 /221.19 )*100= 4.973)
@api.depends('wt_of_dry_soil_65', 'moisture_loss_65')
def compute_moisture_content_65(self):
for penetration in self:
dry_soil = penetration.wt_of_dry_soil_65
loss = penetration.moisture_loss_65
if dry_soil and loss:
content = (loss/dry_soil*100)
penetration.update({'moisture_content_65': content})
# Calculate Dry Density- auto-computed as
# (Wet Density) /(100 + moisture Content)*100
# Eg : (2.080 /(100+4.973)*100 = 1.98)
@api.depends('wet_density_65', 'moisture_content_65')
def compute_dry_density_65(self):
for penetration in self:
wet = penetration.wet_density_65
content = penetration.moisture_content_65
if wet and content:
dry = (wet/(100+content)*100)
penetration.update({'dry_density_65': dry})
@api.depends('name')
def _compute_task_id(self):
task = self.env['project.task'].search([('name', '=', self.name)])
self.update({
'task_id': task.id})
@api.model
def create(self, vals):
vals['state'] = "draft"
if (vals.get('sampled_by')):
res_user = self.env['res.users'].search([
('id', '=', vals['sampled_by'])])
vals['designation_sampled'] = res_user.partner_id.id
vals['date_performed'] = datetime.today()
result = super(SoilPenetration, self).create(vals)
return result
@api.multi
def write(self, values):
if (values.get('sampled_by')):
res_user = self.env['res.users'].search([
('id', '=', values['sampled_by'])])
values['designation_sampled'] = res_user.partner_id.id
values['date_performed'] = datetime.today()
result = super(SoilPenetration, self).write(values)
return result
# Submit Button Action
@api.multi
def pt_action_submit(self):
user = self.env['res.users'].browse(self.env.uid)
if (self.submitted_by):
self.write({'state': 'submit',
'tested_by': user.id,
'tested_date': datetime.today(),
'designation_submitted': self.submitted_by.partner_id.id,
'date_submit': datetime.today()
})
# Confirm Button Action
@api.multi
def pt_action_confirm(self):
user = self.env['res.users'].browse(self.env.uid)
self.write({'state': 'confirm',
'checked_by': user.id,
'checked_date': datetime.today()})
# Verify Button Action
@api.multi
def pt_action_verify(self):
self.write({'state': 'verify',
})
# The Approved Button will appear when the test
# results were verified completely
@api.multi
def pt_action_approve(self):
user = self.env['res.users'].browse(self.env.uid)
if not self.penetration_test_date:
self.write({'penetration_test_date': date.today()})
self.write({'state': 'approved',
'attested_by': user.id,
'attested_date': datetime.today(),
})
# Cancelled Button Action
@api.multi
def pt_action_cancel(self):
self.write({'state': 'cancelled'})
# Reset Button that can only be activated when approved
# by the Branch Lead Technician.
# This Button will delete the results as this
# intends to repeat the test performed.
@api.multi
def pt_action_draft(self):
orders = self.filtered(lambda s: s.state in ['cancelled'])
return orders.write({
'state': 'draft'})
# auto-populated field and associated to the Sampled By field
# date/time when the Sample was Submitted
@api.onchange('sampled_by')
def _onchange_sampled_by(self):
sampled_by = self.sampled_by
if sampled_by:
self.designation_sampled = sampled_by.partner_id.id
self.date_performed = datetime.today()
class SkitPenetrationLineBlow10(models.Model):
_name = "skit.penetration.line.blow10"
_description = "Soil Penetration Line Blows10"
penetration_id = fields.Many2one('skit.soil.penetration')
penetration = fields.Float(" (mm) ")
load_tlr = fields.Integer(" TLR ")
load_load = fields.Float("Load", compute='compute_load')
std_std = fields.Float("Standard")
std_cbr = fields.Integer("CBR", compute='compute_std_cbr')
# Calculate Load - auto-computed as
# (TLR*0.1321)
# Eg: (475*0.1321=62.75)
@api.depends('load_tlr')
def compute_load(self):
for penetration in self:
penet = penetration.penetration
# if penet == 2.50 or penet == 5.00:
tlr = penetration.load_tlr
if tlr:
load = (tlr*3.031)
penetration.update({'load_load': load})
# Calculate CBR - auto-computed as
# (Load/Standard)*100
# Eg: (62.75/70.63)*100=86
@api.depends('load_load', 'std_std')
def compute_std_cbr(self):
for penetration in self:
load = penetration.load_load
std = penetration.std_std
if load and std:
cbr = (load/std)*100
cbr = round(cbr)
penetration.update({'std_cbr': cbr})
class SkitPenetrationLineBlow30(models.Model):
_name = "skit.penetration.line.blow30"
_description = "Soil Penetration Line Blows30"
penetration_id = fields.Many2one('skit.soil.penetration')
penetration = fields.Float(" (mm) ")
load_tlr = fields.Integer("TLR")
load_load = fields.Float("Load", compute='compute_load')
std_std = fields.Float("Standard")
std_cbr = fields.Integer("CBR", compute='compute_std_cbr')
# Calculate Load - auto-computed as
# (TLR*0.1321)
# Eg: (475*0.1321=62.75)
@api.depends('load_tlr')
def compute_load(self):
for penetration in self:
penet = penetration.penetration
# if penet == 2.50 or penet == 5.00:
tlr = penetration.load_tlr
if tlr:
load = (tlr*3.031)
penetration.update({'load_load': load})
# Calculate CBR - auto-computed as
# (Load/Standard)*100
# Eg: (62.75/70.63)*100=86
@api.depends('load_load', 'std_std')
def compute_std_cbr(self):
for penetration in self:
load = penetration.load_load
std = penetration.std_std
if load and std:
cbr = (load/std)*100
cbr = round(cbr)
penetration.update({'std_cbr': cbr})
class SkitPenetrationLineBlow65(models.Model):
_name = "skit.penetration.line.blow65"
_description = "Soil Penetration Line Blows65"
penetration_id = fields.Many2one('skit.soil.penetration')
penetration = fields.Float(" (mm) ")
load_tlr = fields.Integer("TLR")
load_load = fields.Float("Load", compute='compute_load')
std_std = fields.Float("Standard")
std_cbr = fields.Integer("CBR", compute='compute_std_cbr')
# Calculate Load - auto-computed as
# (TLR*0.1321)
# Eg: (475*0.1321=62.75)
@api.depends('load_tlr')
def compute_load(self):
for penetration in self:
penet = penetration.penetration
# if penet == 2.50 or penet == 5.00:
tlr = penetration.load_tlr
if tlr:
load = (tlr*3.031)
penetration.update({'load_load': load})
# Calculate CBR - auto-computed as
# (Load/Standard)*100
# Eg: (62.75/70.63)*100=86
@api.depends('load_load', 'std_std')
def compute_std_cbr(self):
for penetration in self:
load = penetration.load_load
std = penetration.std_std
if load and std:
cbr = (load/std)*100
cbr = round(cbr)
penetration.update({'std_cbr': cbr})
| [
"info@srikeshinfotech.com"
] | info@srikeshinfotech.com |
11ed9a8f8a96f31f742c9917250727d1c524d393 | 47cf58449ebc24d006930b4daba2dfbd730bcfb8 | /primos.py | 063450270a1fffd31c6efba24e5f5057b0f5d51e | [] | no_license | VictorDavid21/engcpac3 | 52443e83cde061b263a5365dc7cee93ff7a14ba3 | 60566a9fa1f4fb5df0c7fbce64206dc21d7bef84 | refs/heads/main | 2022-12-28T09:09:08.807137 | 2020-10-15T20:37:10 | 2020-10-15T20:37:10 | 304,414,749 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 618 | py | import os
from flask import Flask, jsonify, request
from math import sqrt
app = Flask(__name__)
@app.route('/')
def nao_entre_em_panico():
limite = 100
c = 1
p = 1
numero = 3
primos = "2,"
while p < limite:
ehprimo = 1
for i in range(2, numero):
if numero % i == 0:
ehprimo = 0
break
if (ehprimo):
primos = primos + str(numero) + ","
p += 1
numero+=1
return primos
if __name__ == "__main__":
port = int(os.environ.get("PORT", 5000))
app.run(host="0.0.0.0", port=port)
| [
"victorddavid4@gmail.com"
] | victorddavid4@gmail.com |
8953b4be39f753c6de58be709acfd2ce7e191b6f | 2cbb6019f067f9a48b3f1ea0c3fc0faeaa7eaa33 | /src/values.py | eb2d56745cd76dcb5f3e60b308d8eba23e15edca | [
"MIT"
] | permissive | ThatXliner/Pyxell | be6a5e6976589273454636fdc254bd2dbfc6ca6a | 72c58fd26fe1eded038ba5bf11c327c9b33cdd31 | refs/heads/master | 2023-03-14T16:46:56.383103 | 2021-01-30T18:40:20 | 2021-01-31T18:36:31 | 336,136,781 | 0 | 0 | MIT | 2021-02-05T02:05:04 | 2021-02-05T02:05:04 | null | UTF-8 | Python | false | false | 7,038 | py |
import copy
from . import codegen as c
from . import types as t
class Value:
def __init__(self, type=None):
self.type = type
def isTemplate(self):
return isinstance(self, FunctionTemplate)
def bind(self, obj):
value = copy.copy(self)
if obj is None:
return value
value.type = t.Func(value.type.args[1:], value.type.ret)
return Bind(value, obj)
class Literal(Value):
def __init__(self, value, formatter=None, **kwargs):
super().__init__(**kwargs)
self.value = value
self.formatter = formatter
def __str__(self):
if isinstance(self.formatter, str):
return self.formatter.format(self.value)
if callable(self.formatter):
return self.formatter(self.value)
return str(self.value)
def Int(x):
return Literal(int(x), '{}LL', type=t.Int)
def Rat(x):
return Literal(x, 'Rat("{}"s)', type=t.Rat)
def Float(x):
return Literal(float(x), type=t.Float)
def Bool(x):
return Literal(bool(x), lambda value: str(value).lower(), type=t.Bool)
false = Bool(False)
true = Bool(True)
def Char(x):
return Literal(str(x), "'{}'", type=t.Char)
def String(x):
return Literal(str(x), 'make_string("{}"s)', type=t.String)
class Variable(Value):
def __init__(self, type, name):
super().__init__(type)
self.name = name
def __str__(self):
return self.name
class Container(Value):
def __init__(self, type, elements, formatter):
super().__init__(type)
self.elements = elements
self.formatter = formatter
def __str__(self):
return self.formatter.format(', '.join(map(str, self.elements)))
class Array(Container):
def __init__(self, elements, subtype=None):
type = t.Array(subtype or (elements[0].type if elements else t.Unknown))
super().__init__(type, elements, f'make_array<{type.subtype}>' + '({{{}}})')
class Set(Container):
def __init__(self, elements, subtype=None):
type = t.Set(subtype or (elements[0].type if elements else t.Unknown))
super().__init__(type, elements, f'make_set<{type.subtype}>' + '({{{}}})')
class Dict(Container):
def __init__(self, keys, values, key_type=None, value_type=None):
type = t.Dict(key_type or (keys[0].type if keys else t.Unknown),
value_type or (values[0].type if values else t.Unknown))
elements = [f'{{{key}, {value}}}' for key, value in zip(keys, values)]
super().__init__(type, elements, f'make_dict<{type.key_type}, {type.value_type}>' + '({{{}}})')
self.keys = keys
self.values = values
class Nullable(Value):
def __init__(self, value, subtype=None):
super().__init__(t.Nullable(subtype or (value.type if value else t.Unknown)))
self.value = value
def __str__(self):
arg = str(self.value or '')
return f'{self.type}({arg})'
null = Nullable(None)
class Tuple(Container):
def __init__(self, elements):
type = t.Tuple([value.type for value in elements])
super().__init__(type, elements, 'std::make_tuple({})')
class Object(Value):
def __init__(self, cls):
super().__init__(cls)
def __str__(self):
return f'std::make_shared<{self.type.initializer.name}>()'
class FunctionTemplate(Value):
def __init__(self, id, typevars, type, body, env, lambda_=False):
super().__init__(type)
self.id = id
self.final = True # identifier cannot be redefined
self.bound = None
self.typevars = typevars
self.body = body
self.env = env
self.lambda_ = lambda_
self.cache = {}
def bind(self, obj):
template = copy.copy(self)
if obj is None:
return template
template.bound = obj
return template
class Attribute(Value):
def __init__(self, value, attr, **kwargs):
super().__init__(**kwargs)
self.value = value
self.attr = attr
def __str__(self):
op = '.' if self.value.type and (self.value.type == t.Rat or self.value.type.isNullable() or self.value.type.isGenerator()) else '->'
return f'{self.value}{op}{self.attr}'
class Index(Value):
def __init__(self, collection, index, **kwargs):
super().__init__(**kwargs)
self.collection = collection
self.index = index
def __str__(self):
return f'{Dereference(self.collection)}[{self.index}]'
class Call(Value):
def __init__(self, func, *args, **kwargs):
super().__init__(**kwargs)
self.func = func
self.args = args
def __str__(self):
args = ', '.join(map(str, self.args))
return f'{self.func}({args})'
def Cast(value, type):
if value.type == type:
return value
return Call(f'static_cast<{type}>', value, type=type)
def Get(tuple, index):
return Call(f'std::get<{index}>', tuple, type=tuple.type.elements[index])
def Dereference(value, type=None):
return UnaryOp('*', value, type=type)
def Extract(value):
return Dereference(value, type=value.type.subtype)
def IsNotNull(value):
return Call(Attribute(value, 'has_value'), type=t.Bool)
def IsNull(value):
return UnaryOp('!', IsNotNull(value), type=t.Bool)
class UnaryOp(Value):
def __init__(self, op, value, **kwargs):
super().__init__(**kwargs)
self.op = op
self.value = value
def __str__(self):
return f'({self.op}{self.value})'
class BinaryOp(Value):
def __init__(self, value1, op, value2, **kwargs):
super().__init__(**kwargs)
self.value1 = value1
self.op = op
self.value2 = value2
def __str__(self):
return f'({self.value1} {self.op} {self.value2})'
class TernaryOp(Value):
def __init__(self, value1, value2, value3, **kwargs):
super().__init__(**kwargs)
self.value1 = value1
self.value2 = value2
self.value3 = value3
def __str__(self):
return f'({self.value1} ? {self.value2} : {self.value3})'
class Lambda(Value):
def __init__(self, type, arg_vars, body, capture_vars=[]):
super().__init__(type)
self.capture_vars = capture_vars
self.arg_vars = arg_vars
if isinstance(body, Value):
body = c.Block(c.Statement('return', body))
self.body = body
def __str__(self):
capture = '=' + ''.join(f', &{var}' for var in self.capture_vars)
args = ', '.join([f'{arg.type} {var}' for arg, var in zip(self.type.args, self.arg_vars)])
return f'[{capture}]({args}) mutable {self.body}'
class Bind(Value):
def __init__(self, func, obj):
super().__init__(func.type)
self.func = func
self.obj = obj
def __str__(self):
# https://stackoverflow.com/a/57114008
block = c.Block(c.Statement('return', Call(self.func, self.obj, 'args...')))
return f'[&](auto&& ...args) {block}'
| [
"adam27.sol@gmail.com"
] | adam27.sol@gmail.com |
a7efb9519c5b81516b2464bfc59149f16f756ac2 | f8cb051166a454580dad7a41b9be63a86b3283b9 | /OpenLinkCheck/printer/ExcelCreater.py | eaf03336d105d4827e684aea723e4acf4080b49e | [] | no_license | od2016/opendata | 6c61a99584d6bab15587454e9f33de86710590ad | 5fe6d02d1c3cb6fa70535a75c83f4eaf5c0ab26e | refs/heads/master | 2021-01-10T05:52:51.530887 | 2016-03-10T02:33:25 | 2016-03-10T02:33:25 | 51,128,284 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,866 | py | # -*- coding: utf-8 -*-
from time import sleep
import xlsxwriter
__author__ = 'johnnytsai'
class ExcelCreater:
def __init__(self):
None
@staticmethod
def exportExcel(list, filename):
# 輸出成excel檔
print("CREATE EXCEL...")
workbook = xlsxwriter.Workbook(filename, {'strings_to_urls': False})
worksheet = workbook.add_worksheet(u'統計資料')
row = 0
col = 0
# print title
worksheet.write(row, col, u"資料集Id")
worksheet.write(row, col + 1, u"資料集名稱")
worksheet.write(row, col + 2, u"資料集描述")
worksheet.write(row, col + 3, u"資料集連結")
worksheet.write(row, col + 4, u"資料資源描述")
worksheet.write(row, col + 5, u"資料資源下載連結")
worksheet.write(row, col + 6, u"資料資源檔案格式")
worksheet.write(row, col + 7, u"連線狀態")
worksheet.write(row, col + 8, u"下載檔案格式")
worksheet.write(row, col + 9, u"Exception")
row += 1
for model in list:
worksheet.write(row, col, str(model.nid).decode('utf-8'))
worksheet.write(row, col + 1, model.title)
worksheet.write(row, col + 2, model.field_data_field_body)
worksheet.write(row, col + 3, model.link)
worksheet.write(row, col + 4, model.field_revision_field_resource_description_g)
worksheet.write(row, col + 5, model.field_data_field_resource_url_g)
worksheet.write(row, col + 6, model.taxonomy_term_data)
worksheet.write(row, col + 7, str(model.status).decode('utf-8'))
# .decode('utf-8')
worksheet.write(row, col + 8, model.type)
worksheet.write(row, col + 9, model.message)
row += 1
sleep(3)
workbook.close()
| [
"mmmaaaxxx77@gmail.com"
] | mmmaaaxxx77@gmail.com |
36d2805d467bfda0f4d4a1d7b6534bbeed3e466d | 415f62d63194f255731d6c7d2d1cc8808b2f4e62 | /gesture_recognition/ops/__init__.py | 3a0883c63704c3269092449fb76c73a62185adbb | [
"BSD-2-Clause"
] | permissive | Four4Bit/MFF-pytorch | 73281c58e0b0ee749b5bf6926eb6e5b9eab89e17 | 047f75ae849264f459cb11f61fca85a967f97038 | refs/heads/main | 2023-04-20T15:56:24.044266 | 2021-04-20T17:12:58 | 2021-04-20T17:12:58 | 334,672,574 | 0 | 0 | null | 2021-01-31T14:18:29 | 2021-01-31T14:18:28 | null | UTF-8 | Python | false | false | 48 | py | from gesture_recognition.ops.basic_ops import *
| [
"1464445232@qq.com"
] | 1464445232@qq.com |
d7c11e2bc070bba11dfba277ceacda3677487f74 | a997694a9bad0eefed928962abb5b1fcdfa7ebe7 | /sword_to_offer/57/main.py | 4e33269dbf8f56e7e7399564b9195e3688f5a930 | [
"MIT"
] | permissive | gingkg/happyleetcodeeveryday | 55f447ccca11760573a5ab2f0dfd88ea13827828 | 7101e0404afca10ec3b18caafe4c5ef67eec38b6 | refs/heads/main | 2023-07-07T09:14:27.991977 | 2021-08-15T08:49:59 | 2021-08-15T08:49:59 | 382,788,210 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 978 | py | """
剑指 Offer 57. 和为s的两个数字
输入一个递增排序的数组和一个数字s,在数组中查找两个数,使得它们的和正好是s。如果有多对数字的和等于s,则输出任意一对即可。
示例 1:
输入:nums = [2,7,11,15], target = 9
输出:[2,7] 或者 [7,2]
示例 2:
输入:nums = [10,26,30,31,47,60], target = 40
输出:[10,30] 或者 [30,10]
限制:
1 <= nums.length <= 10^5
1 <= nums[i] <= 10^6
"""
from typing import List
class Solution:
def twoSum(self, nums: List[int], target: int) -> List[int]:
n = len(nums)
i = 0
j = n - 1
while i < j:
res = nums[i] + nums[j]
if res == target:
return [nums[i], nums[j]]
elif res < target:
i = i + 1
else:
j = j - 1
if __name__ == '__main__':
S = Solution()
nums = [2, 7, 11, 15]
target = 9
print(S.twoSum(nums, target))
| [
"sby2015666@163.com"
] | sby2015666@163.com |
fe8ed128ab7ed80739fe086635747e9ebeb666af | efbb8efe9ae007b5ec35e67759379d304a57c953 | /notes_app/migrations/0005_auto_20201002_2101.py | c05fbd1e00d573e894d84400eb7c6be5cd47d225 | [] | no_license | wafaaxdev/NotesApp | 665bd3a826356bab2270e6f9559ce475d1f98af2 | 9dc2339358be59854bb33d883de40c96cd698c60 | refs/heads/master | 2022-12-28T12:57:12.504159 | 2020-10-19T17:54:33 | 2020-10-19T17:54:33 | 305,447,607 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 392 | py | # Generated by Django 3.1.2 on 2020-10-02 18:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('notes_app', '0004_auto_20201002_2039'),
]
operations = [
migrations.AlterField(
model_name='note',
name='slug',
field=models.SlugField(blank=True, null=True),
),
]
| [
"wafa.172006@gmail.com"
] | wafa.172006@gmail.com |
3e206a926d4a1b78b6add50030ca66c967260b0d | d1e68cd0066291cfd189e3707e81c263de93d6dd | /vhwhighflow/settings.py | 92944414e1b60034eebb1e9ab2f5d2ee8cdb274a | [] | no_license | OmicronBlade/vhwhighflow | 63ff8a22877c8fa12531dd6600b6cabc04f1156f | 105ac79a506366cbd59289a19c858b458d558584 | refs/heads/master | 2023-06-30T21:55:53.154646 | 2021-08-14T20:04:37 | 2021-08-14T20:04:37 | 392,459,454 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,514 | py | import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
PROJECT_PATH = os.path.realpath(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '_y#p+#emw$=6ff(3*d1akb)stxn00smzy8qo2r&wu66jx)y)n1'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['localhost','127.0.0.1','.herokuapp.com','192.168.0.195']
# Static files (CSS, JavaScript, Images)
STATIC_ROOT = os.path.join(PROJECT_PATH, 'staticfiles')
STATIC_URL = '/static/'
import logging
#LOGGING = {
# 'version': 1,
# 'disable_existing_loggers': False,
# 'handlers': {
# 'console': {
# 'class': 'logging.StreamHandler',
# },
# },
# 'loggers': {
# 'django': {
# 'handlers': ['console'],
# 'level': os.getenv('DJANGO_LOG_LEVEL', 'DEBUG'),
# },
# },
#}
# Application definition
INSTALLED_APPS = [
'vhwhighflow',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
"bootstrap4",
"bootstrap_datepicker_plus",
]
MIDDLEWARE = [
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
SECURE_SSL_REDIRECT = True
ROOT_URLCONF = 'vhwhighflow.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'vhwhighflow.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
#DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
# }
#}
#VHWFlow!
try:
import dj_database_url
ON_HEROKU = True
if 'RDS_DB_NAME' in os.environ:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': os.environ['RDS_DB_NAME'],
'USER': os.environ['RDS_USERNAME'],
'PASSWORD': os.environ['RDS_PASSWORD'],
'HOST': os.environ['RDS_HOSTNAMEIN'],
'PORT': os.environ['RDS_PORT']
}
}
else:
DATABASES = {
'default': dj_database_url.config(default='sqlite:///' + BASE_DIR + '/db.sqlite3')
}
except ImportError:
if 'RDS_DB_NAME' in os.environ:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': os.environ['RDS_DB_NAME'],
'USER': os.environ['RDS_USERNAME'],
'PASSWORD': os.environ['RDS_PASSWORD'],
'HOST': os.environ['RDS_HOSTNAMEIN'],
'PORT': os.environ['RDS_PORT']
}
}
else:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': ''
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
LANGUAGE_CODE = 'en-uk'
TIME_ZONE = 'Africa/Johannesburg'
USE_I18N = True
USE_L10N = False
USE_TZ = True
#Date formatting
DATE_INPUT_FORMATS = ['%d/%m/%Y', '%d/%m/%y']
DATETIME_INPUT_FORMATS = ['%d/%m/%Y %H:%M']
DATE_FORMAT = 'd/m/Y'
DATETIME_FORMAT = 'd/m/Y H:i'
#DATETIME_INPUT_FORMAT = [
# '%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
# '%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
# '%Y-%m-%d %H:%M', # '2006-10-25 14:30'
# '%Y-%m-%d', # '2006-10-25'
# '%d/%m/%Y %H:%M:%S', # '25/10/2006 14:30:59'
# '%d/%m/%Y %H:%M:%S.%f', # '25/10/2006 14:30:59.000200'
# '%d/%m/%Y %H:%M', # '25/10/2006 14:30'
# '%d/%m/%Y' # '25/10/2006'
#]
| [
"michaeljon.rosslee@gmail.com"
] | michaeljon.rosslee@gmail.com |
54b3a792d9337f39329eb7467933d3f80b9ddf6a | b6df34e5cc8290e4238e3b64446130763696b8e2 | /pontos_turisticos/urls.py | fb504782b607a5e3d8edc783bd5d32088f4a51cb | [] | no_license | adsons3c/api_django_rest | f6e8cbbba1b7d0d2da8f091c0eb66243ae0362bf | 1e8fb29ff8b539de6eb55a1a231a799fea344077 | refs/heads/master | 2020-04-06T18:58:31.569938 | 2018-11-15T18:21:47 | 2018-11-15T18:21:47 | 157,010,869 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,412 | py | """pontos_turisticos URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.conf.urls import include
from django.urls import path
from rest_framework import routers
from core.api.viewsets import PontoTuristicoViewSet
from atracao.api.viewsets import AtracaoViewSet
from enderecos.api.viewsets import EnderecosViewSet
from comentarios.api.viewsets import ComentarioViewSet
from avaliacao.api.viewsets import AvaliacaoViewSet
router = routers.DefaultRouter()
router.register(r'pontoturistico', PontoTuristicoViewSet)
router.register(r'atracao', AtracaoViewSet)
router.register(r'enderecos', EnderecosViewSet)
router.register(r'comentarios', ComentarioViewSet)
router.register(r'avaliacoes', AvaliacaoViewSet)
urlpatterns = [
path('', include(router.urls)),
path('admin/', admin.site.urls),
]
| [
"adsonemaneuls3c@gmail.com"
] | adsonemaneuls3c@gmail.com |
2013df2811af303bf28b622389c22251a0e40bff | 99cd943ad5deed305608a516c0596cf3e1b552e5 | /src/py/VendingMachine/vending_machine1.py | c9767d0aef06e0102daeaf59a770b9d458689ecd | [] | no_license | koukyo1994/algorithms | da8beebafe95768890a88babdba5951b01a3f975 | 6cb3350f89ddbc244071c1bc3e1a10ec9e0760ed | refs/heads/master | 2021-06-23T19:04:22.618607 | 2021-04-24T08:33:01 | 2021-04-24T08:33:01 | 200,551,709 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 639 | py | import sys
if __name__ == "__main__":
insert_price = input("insert: ")
if not insert_price.isdecimal():
print("整数を入力してください")
sys.exit()
product_price = input("product: ")
if not product_price.isdecimal():
print("整数を入力してください")
sys.exit()
change = int(insert_price) - int(product_price)
if change < 0:
print("金額が不足しています")
sys.exit()
coins = [5000, 1000, 500, 100, 50, 10, 5, 1]
for coin in coins:
n_coin = change // coin
change = change % coin
print(f"{coin}: {n_coin}")
| [
"arabiannight1994@yahoo.co.jp"
] | arabiannight1994@yahoo.co.jp |
d80794ea0e50e237fa4de23d02b0ed54e2e097f8 | 430d7b12d46397805bfa96786ee8e6bee0df791e | /normalizer/src/process_list.py | 197d246d55c64102c356f7782ed2cea3a9cb2e94 | [
"Apache-2.0"
] | permissive | socologize/OpenCNA | f374cbd31dcd344e187b1b00d579eadd008bf38b | ff2ff08b9f6439cd1176bf020ea428cc03710d17 | refs/heads/master | 2021-08-22T06:33:54.822428 | 2017-11-29T14:46:36 | 2017-11-29T14:46:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,267 | py | # -*- coding: utf-8 -*-
################################################################################
# Copyright (c) 2017 McAfee Inc. - All Rights Reserved.
################################################################################
__author__ = "Matias Marenchino"
import sys
import os
import itertools
import codecs
import logging
import re
import csv
import pandas as pd # this is
from r7r_parser import R7rParser
_FNAME_REGEX = r".*\d{14}-(.*?)-.*.log"
class ProcessListParser(R7rParser):
"""Parse process-list log file"""
def __init__(self, input_fnames, output_fname):
super(ProcessListParser, self).__init__(input_fnames, output_fname)
self.logger = logging.getLogger(__name__)
def _is_a_line_separator(self, line):
return line == '\r\n' or line == '\n'
def normalize(self):
self.logger.info(
'process_list parser. Start processing: ' + str(self.input_fnames))
match = re.match(_FNAME_REGEX, os.path.basename(self.input_fnames[0]))
if not match:
logging.error(('process_list parser. Failure: the input file '
'name ' + self.input_fnames[0] + ' does not match '
'the regex ' + _FNAME_REGEX))
sys.exit(1)
host_name = match.groups()[0]
try:
process_list = []
header_process_list = []
with codecs.open(self.input_fnames[0], encoding='utf-16') as handler:
for key, group in itertools.groupby(handler, self._is_a_line_separator):
if not key:
mylist = list(group)
if mylist:
data = {}
it_list = []
for item in mylist:
item_par = item.split('=')
data[item_par[0]] = item_par[1].rstrip("\r\n")
it_list.append(item_par[1].rstrip("\r\n"))
if not item_par[0] in header_process_list:
header_process_list.append(item_par[0])
process_list.append(it_list)
header_task_list = []
tasklist_list = []
with open(self.input_fnames[1]) as infile:
for line in infile:
if 'CPU Time Window Title' in line:
while not re.match(r'\s*\r?\n', line):
if 'Modules' in line:
break
line = next(infile, '')
if not line:
break
if line.startswith(' '):
size = len(tasklist_list)
tasklist_list[size - 1] += line.strip().rstrip("\r\n")
else:
if not line.startswith('='):
if line.strip().rstrip("\r\n"):
tasklist_list.append(line.strip())
pid_end = tasklist_list[0].find('PID') + len('PID')
pid_start = tasklist_list[0].find('PID') + len('PID') - 8
image_name = tasklist_list[0][:pid_start].strip().replace(' ', '_')
pid = tasklist_list[0][pid_start:pid_end].strip().replace(' ', '_')
modules = tasklist_list[0][pid_end:].strip().replace(' ', '_')
header_task_list.append(image_name)
header_task_list.append(pid)
header_task_list.append(modules)
task_list = []
for _, iitt in enumerate(tasklist_list[1:]):
tasklist_item = []
image_name_value = iitt[:pid_start].strip()
pid_value = iitt[pid_start:pid_end].strip()
modules_value = iitt[pid_end:].strip()
tasklist_item.append(image_name_value)
tasklist_item.append(pid_value)
tasklist_item.append(modules_value)
task_list.append(tasklist_item)
df_process_list = pd.DataFrame(
process_list, columns=header_process_list)
df_task_list = pd.DataFrame(task_list, columns=header_task_list)
df_result = df_process_list.merge(df_task_list, left_on='ProcessId',
right_on='PID', how='outer')
result = [["Host_Name", "PID", "Name", "Description",
"CommandLine", "Parent_PID", "Executable_Path", "Modules"]]
for _, row in df_result.iterrows():
pid = row['ProcessId'] if pd.isnull(row['PID']) else row['PID']
name = row['Name'] if not pd.isnull(row['Name']) else ''
description = row['Description'] if not pd.isnull(
row['Description']) else ''
command_line = row['CommandLine'] if not pd.isnull(
row['CommandLine']) else ''
parent_pid = row['ParentProcessId'] if not pd.isnull(
row['ParentProcessId']) else ''
executable_path = row['ExecutablePath'] if not pd.isnull(
row['ExecutablePath']) else ''
modules = row['Modules'] if not pd.isnull(
row['Modules']) else ''
this_line = [host_name, pid, name, description, command_line,
parent_pid, executable_path, modules]
result.append(this_line)
self.logger.info(
'process_list parser. Writing output: ' + str(self.output_fname))
folder = os.path.dirname(self.output_fname)
if not os.path.exists(folder):
os.makedirs(folder)
with open(self.output_fname, "w") as handler:
csv_writer = csv.writer(handler)
csv_writer.writerows(result)
except NameError as e:
logging.exception(
'running_process.py Failure: ' + str(e.message))
sys.exit(1)
except Exception, e:
logging.exception(
'running_process.py Failure: ' + str(e.message))
sys.exit(1)
| [
"ismael_valenzuela@mcafee.com"
] | ismael_valenzuela@mcafee.com |
52420c6646a9f53907234fb8eab6bf8562f728dc | 611cfc9081cade83a1f011d9bb1d80153db73cbb | /HillClimb.py | 8cc5e0fa1c887602f104a92cdbbba8b2498a37b8 | [] | no_license | ian-richardson-void/DM996-Group-G | 7c7a687257b9fd8caf255f8985cfc80c05df67eb | b98d728422d6d0c1157ba802d87a111047904452 | refs/heads/master | 2023-03-24T09:11:21.085925 | 2021-03-22T09:52:38 | 2021-03-22T09:52:38 | 340,159,226 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,114 | py | import backend.maze as maze
import backend.rat as rat
# Hill-climb will have a fitness function (end - ratPos)
# and will move the rat a step towards the end each turn
def move(rat):
stuck, move = checkStuck(rat)
if(stuck == False):
result = rat.move(move)
return result, True
else:
return False, False
def checkStuck(rat):
bm = fitness(rat, rat.getPos())
for p in rat.getMoves():
am = fitness(rat, rat.tempMove(p))
if(bm[0] >= am[0] and bm[1] >= am[1]):
return False, p
print("STUCK")
return True, 0
def fitness(rat, newpos):
exit = rat.maze.getExit()
coordiff = [abs(exit[0] - newpos[0]), abs(exit[1] - newpos[1])]
return coordiff
def run(maze):
print("RUNNING HILL-CLIMB OPTIMISATION")
barry = rat.Rat(maze)
barry.maze.printBoard(barry.getPos())
while(True):
result, notStuck = move(barry)
if((result == False) and (notStuck == True)):
# we have reached the exit (or made an illegal move, shouldnt be possible)
print("REACHED EXIT")
break
if(notStuck == False):
break
barry.maze.printBoard(barry.getPos())
if __name__ == "__main__":
run(maze.Maze(15, 15)) | [
"ian.richardson.void@gmail.com"
] | ian.richardson.void@gmail.com |
9492454662d9baa6149dbe4c257a23c9a281b4af | 4fc6fdad6c0f52ff0f15186e411b106b7500fd4d | /osipkd/views/tu_ppkd/ap_advist.py | 18a3f920b7295924590b5854cd16890da12ceafd | [] | no_license | aagusti/osipkd-pdpt | 03e01e327d7df26da4f4dcdd82a35ba8cfa1ce40 | 130abc77292f2f3023da6f8b785fb7ccf337a374 | refs/heads/master | 2021-01-10T14:44:35.409216 | 2015-06-01T08:19:34 | 2015-06-01T08:19:34 | 36,646,345 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,868 | py | import os
import uuid
from osipkd.tools import row2dict, xls_reader
from datetime import datetime,date
from sqlalchemy import not_, func
from pyramid.view import (view_config,)
from pyramid.httpexceptions import ( HTTPFound, )
import colander
from deform import (Form, widget, ValidationFailure, )
from osipkd.models import DBSession
from osipkd.models.apbd_anggaran import Kegiatan, KegiatanSub, KegiatanItem
from osipkd.models.pemda_model import Unit
from osipkd.models.apbd_tu import Sp2d, Advist
from datatables import ColumnDT, DataTables
from osipkd.views.base_view import BaseViews
SESS_ADD_FAILED = 'Tambah ap-advist gagal'
SESS_EDIT_FAILED = 'Edit ap-advist gagal'
class view_ap_advist_ppkd(BaseViews):
@view_config(route_name="ap-advist", renderer="templates/ap-advist/list.pt")
def view_list(self):
ses = self.request.session
req = self.request
params = req.params
url_dict = req.matchdict
return dict(project='EIS',
)
##########
# Action #
##########
@view_config(route_name='ap-advist-act', renderer='json',
permission='read')
def view_act(self):
ses = self.request.session
req = self.request
params = req.params
url_dict = req.matchdict
if url_dict['act']=='grid':
pk_id = 'id' in params and params['id'] and int(params['id']) or 0
if url_dict['act']=='grid':
columns = []
columns.append(ColumnDT('id'))
columns.append(ColumnDT('kode'))
columns.append(ColumnDT('tanggal', filter=self._DTstrftime))
columns.append(ColumnDT('nama'))
columns.append(ColumnDT('nominal'))
query = DBSession.query(Advist
).filter(Advist.tahun_id==ses['tahun'],
Advist.unit_id==ses['unit_id'] ,
).order_by(Advist.kode.asc())
rowTable = DataTables(req, Advist, query, columns)
return rowTable.output_result()
#######
# Add #
#######
def form_validator(self, form, value):
def err_kegiatan():
raise colander.Invalid(form,
'Kegiatan dengan no urut tersebut sudah ada')
def get_form(self, class_form):
schema = class_form(validator=self.form_validator)
schema.request = self.request
return Form(schema, buttons=('simpan','batal'))
def save(self, values, row=None):
if not row:
row = Advist()
row.created = datetime.now()
row.create_uid = self.request.user.id
row.from_dict(values)
row.updated = datetime.now()
row.update_uid = self.request.user.id
row.posted=0
row.disabled = 'disabled' in values and 1 or 0
if not row.kode:
tahun = self.session['tahun']
unit_kd = self.session['unit_kd']
unit_id = self.session['unit_id']
no_urut = Advist.get_norut(tahun, unit_id)+1
no = "0000%d" % no_urut
nomor = no[-5:]
row.kode = "%d" % tahun + "-%s" % unit_kd + "-BUD-%s" % nomor
DBSession.add(row)
DBSession.flush()
return row
def save_request(self, values, row=None):
if 'id' in self.request.matchdict:
values['id'] = self.request.matchdict['id']
values["nominal"]=values["nominal"].replace('.','')
row = self.save(values, row)
self.request.session.flash('Advist sudah disimpan.')
return row
def route_list(self):
return HTTPFound(location=self.request.route_url('ap-advist'))
def session_failed(request, session_name):
r = dict(form=request.session[session_name])
del request.session[session_name]
return r
@view_config(route_name='ap-advist-add', renderer='templates/ap-advist/add.pt',
permission='add')
def view_add(self):
request=self.request
form = self.get_form(AddSchema)
if request.POST:
if 'simpan' in request.POST:
controls = request.POST.items()
controls_dicted = dict(controls)
#Cek Kode Sama ato tidak
if not controls_dicted['kode']=='':
a = form.validate(controls)
b = a['kode']
c = "%s" % b
cek = DBSession.query(Advist).filter(Advist.kode==c).first()
if cek :
self.request.session.flash('Kode advist sudah ada.', 'error')
return HTTPFound(location=self.request.route_url('ap-advist-add'))
try:
c = form.validate(controls)
except ValidationFailure, e:
return dict(form=form)
row = self.save_request(controls_dicted)
return HTTPFound(location=request.route_url('ap-advist-edit',id=row.id))
return self.route_list()
elif SESS_ADD_FAILED in request.session:
del request.session[SESS_ADD_FAILED]
return dict(form=form)
########
# Edit #
########
def query_id(self):
return DBSession.query(Advist).filter(Advist.id==self.request.matchdict['id'])
def id_not_found(request):
msg = 'User ID %s not found.' % request.matchdict['id']
request.session.flash(msg, 'error')
return self.route_list()
@view_config(route_name='ap-advist-edit', renderer='templates/ap-advist/add.pt',
permission='edit')
def view_edit(self):
request = self.request
row = self.query_id().first()
uid = row.id
kode = row.kode
if not row:
return id_not_found(request)
form = self.get_form(EditSchema)
if request.POST:
if 'simpan' in request.POST:
controls = request.POST.items()
#Cek Kode Sama ato tidak
a = form.validate(controls)
b = a['kode']
c = "%s" % b
cek = DBSession.query(Advist).filter(Advist.kode==c).first()
if cek:
kode1 = DBSession.query(Advist).filter(Advist.id==uid).first()
d = kode1.kode
if d!=c:
self.request.session.flash('Kode advist sudah ada', 'error')
return HTTPFound(location=request.route_url('ap-advist-edit',id=row.id))
try:
c = form.validate(controls)
except ValidationFailure, e:
return dict(form=form)
self.save_request(dict(controls), row)
return self.route_list()
elif SESS_EDIT_FAILED in request.session:
del request.session[SESS_EDIT_FAILED]
return dict(form=form)
values = row.to_dict()
form.set_appstruct(values)
return dict(form=form)
##########
# Delete #
##########
@view_config(route_name='ap-advist-delete', renderer='templates/ap-advist/delete.pt',
permission='delete')
def view_delete(self):
q = self.query_id()
row = q.first()
request=self.request
if not row:
return id_not_found(request)
if row.nominal:
request.session.flash('Data tidak dapat dihapus, karena masih memiliki items', 'error')
return self.route_list()
form = Form(colander.Schema(), buttons=('hapus','cancel'))
values= {}
if request.POST:
if 'hapus' in request.POST:
msg = '%s dengan kode %s telah berhasil.' % (request.title, row.kode)
DBSession.query(Advist).filter(Advist.id==request.matchdict['id']).delete()
DBSession.flush()
request.session.flash(msg)
return self.route_list()
return dict(row=row, form=form.render())
class AddSchema(colander.Schema):
unit_id = colander.SchemaNode(
colander.String(),
oid = "unit_id")
tahun_id = colander.SchemaNode(
colander.Integer(),
title="Tahun",
oid = "tahun_id")
kode = colander.SchemaNode(
colander.String(),
missing=colander.drop,
title="No. Advist")
nama = colander.SchemaNode(
colander.String(),
title = "Bank/Tujuan"
)
tanggal = colander.SchemaNode(
colander.Date(),
title = "Tanggal"
)
nominal = colander.SchemaNode(
colander.String(),
missing=colander.drop,
oid="jml_total",
title="Nominal"
)
class EditSchema(AddSchema):
id = colander.SchemaNode(
colander.Integer(),
oid="id")
| [
"aa.gustiana@gmail.com"
] | aa.gustiana@gmail.com |
4bd743d7f5ee9fa1237f955b597ff9dd90dd20d7 | 4fae0d0236a5cb220cdc5841404fdfc445f73a48 | /p12403 Save Setu.py | 45b9923762a5891636cb2f88c1f26fc0f0e21b05 | [] | no_license | ammshahin/UVa-Problems | 73561fe56e4fb943d1ad466615f3c8f54e588656 | 13a2eb87c83f1a8ea08cc987e6c328bf54ed4033 | refs/heads/main | 2023-08-31T06:27:49.589367 | 2021-10-12T18:59:17 | 2021-10-12T18:59:17 | 412,751,263 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 307 | py | n = int(input())
count = 0
if n>=1 and n<=100:
while n>0:
st = str(input()).split()
if st[0] == 'donate':
num = int(st[1])
if num >= 100 and num <= 100000:
count+= num
elif st[0] == 'report':
print(count)
n-=1
| [
"ammshahin@gmail.com"
] | ammshahin@gmail.com |
cc94576c94c792df77ee28ae73dd6f41f0c2d08b | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_065/ch59_2020_03_04_19_22_17_952459.py | c7342597c4c20377297e4677c63dc63c883b744b | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 61 | py | def asteriscos(n):
result = '*' * n
return result | [
"you@example.com"
] | you@example.com |
a7f8f0c22634f97eb3d7c0222768da488778bd20 | 39a64ef0c132a02b12a87f462262dade459dc9c8 | /chap-4/4.4.3.py | 2e8703235c7def3afcde75bd0bcf7eb8dee13e3a | [] | no_license | asdlei99/tensorflow_learning | a5dc2d85c087702421c245c16e20f752a0028f59 | 6849dd49c298da5eb99a4f4dd49f68aca1df78d4 | refs/heads/master | 2020-09-20T02:30:06.330216 | 2018-05-18T15:13:10 | 2018-05-18T15:13:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 790 | py | import tensorflow as tf
v1 = tf.Variable(0, dtype=tf.float32)
step = tf.Variable(0, trainable=False)
ema = tf.train.ExponentialMovingAverage(0.99, step)
maintain_averages_op = ema.apply([v1])
with tf.Session() as sess:
# 初始化
init_op = tf.global_variables_initializer()
sess.run(init_op)
print (sess.run([v1, ema.average(v1)]))
# 更新变量v1的取值
sess.run(tf.assign(v1, 5))
sess.run(maintain_averages_op)
print (sess.run([v1, ema.average(v1)]))
# 更新step和v1的取值
sess.run(tf.assign(step, 10000))
sess.run(tf.assign(v1, 10))
sess.run(maintain_averages_op)
print (sess.run([v1, ema.average(v1)]))
# 更新一次v1的滑动平均值
sess.run(maintain_averages_op)
print (sess.run([v1, ema.average(v1)])) | [
"14021051@buaa.edu.cn"
] | 14021051@buaa.edu.cn |
7c25cfda901226e9daecc76fc096528b53ab29b1 | c32827d24eaa814d87f5ff4222e424e35aaf0639 | /parse.py | 0ba457ccddec0902ec1d3d0bf2f22df395c99963 | [] | no_license | LeoChen21/cstacks | a3c48083663749d595a7c93c8968547f294cff61 | b14da681e2c144dd02a0d110f3ee97dc5aa09c3c | refs/heads/main | 2023-04-25T18:04:39.501574 | 2021-05-21T18:08:42 | 2021-05-21T18:08:42 | 368,715,414 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,484 | py | from display import *
from matrix import *
from draw import *
import copy
"""
Goes through the file named filename and performs all of the actions listed in that file.
The file follows the following format:
Every command is a single character that takes up a line
Any command that requires arguments must have those arguments in the second line.
The commands are as follows:
push: push a copy of the current top of the coordinate system stack to the stack
pop: pop off the current top of the coordinate system stack
All the shape commands work as follows:
1) Add the shape to a temporary matrix
2) Multiply that matrix by the current top of the coordinate system stack
3) Draw the shape to the screen
4) Clear the temporary matrix
sphere: add a sphere to the POLYGON matrix -
takes 4 arguemnts (cx, cy, cz, r)
torus: add a torus to the POLYGON matrix -
takes 5 arguemnts (cx, cy, cz, r1, r2)
box: add a rectangular prism to the POLYGON matrix -
takes 6 arguemnts (x, y, z, width, height, depth)
clear: clears the edge and POLYGON matrices
circle: add a circle to the edge matrix -
takes 4 arguments (cx, cy, cz, r)
hermite: add a hermite curve to the edge matrix -
takes 8 arguments (x0, y0, x1, y1, rx0, ry0, rx1, ry1)
bezier: add a bezier curve to the edge matrix -
takes 8 arguments (x0, y0, x1, y1, x2, y2, x3, y3)
line: add a line to the edge matrix -
takes 6 arguemnts (x0, y0, z0, x1, y1, z1)
ident: set the transform matrix to the identity matrix -
scale: create a scale matrix,
then multiply the transform matrix by the scale matrix -
takes 3 arguments (sx, sy, sz)
move: create a translation matrix,
then multiply the transform matrix by the translation matrix -
takes 3 arguments (tx, ty, tz)
rotate: create a rotation matrix,
then multiply the transform matrix by the rotation matrix -
takes 2 arguments (axis, theta) axis should be x y or z
apply: apply the current transformation matrix to the edge and POLYGON matrices
display: clear the screen, then
draw the lines of the edge and POLYGON matrices to the screen
display the screen
save: clear the screen, then
draw the lines of the edge and POLYGON matrices to the screen
save the screen to a file -
takes 1 argument (file name)
quit: end parsing
See the file script for an example of the file format
"""
ARG_COMMANDS = ['box', 'torus', 'sphere', 'circle', 'bezier', 'hermite', 'line', 'scale', 'move', 'rotate', 'save' ]
#determines whether or not to clear screen when using display
def parse_file( fname, edges, polygons, csystems, screen, color ):
f = open(fname)
lines = f.readlines()
step = 100
step_3d = 20
c = 0
while c < len(lines):
line = lines[c].strip()
#print ':' + line + ':'
if line in ARG_COMMANDS:
c+= 1
args = lines[c].strip().split(' ')
if line == 'push':
csystems.append(copy.deepcopy(csystems[(len(csystems) - 1)]))
if line == 'pop':
csystems.pop()
if line == 'torus':
add_torus(polygons,
float(args[0]), float(args[1]), float(args[2]),
float(args[3]), float(args[4]), step_3d)
matrix_mult(csystems[(len(csystems) - 1)], polygons)
draw_polygons(polygons, screen, color)
polygons = []
elif line == 'sphere':
add_sphere(polygons,
float(args[0]), float(args[1]),
float(args[2]), float(args[3]), step_3d)
matrix_mult(csystems[(len(csystems) - 1)], polygons)
draw_polygons(polygons, screen, color)
polygons = []
elif line == 'box':
add_box(polygons,
float(args[0]), float(args[1]), float(args[2]),
float(args[3]), float(args[4]), float(args[5]))
matrix_mult(csystems[(len(csystems) - 1)], polygons)
draw_polygons(polygons, screen, color)
polygons = []
elif line == 'circle':
#print 'CIRCLE\t' + str(args)
add_circle(edges,
float(args[0]), float(args[1]), float(args[2]),
float(args[3]), step)
matrix_mult(csystems[(len(csystems) - 1)], edges)
draw_lines(edges, screen, color)
edges = []
elif line == 'hermite' or line == 'bezier':
#print 'curve\t' + line + ": " + str(args)
add_curve(edges,
float(args[0]), float(args[1]),
float(args[2]), float(args[3]),
float(args[4]), float(args[5]),
float(args[6]), float(args[7]),
step, line)
matrix_mult(csystems[(len(csystems) - 1)], edges)
draw_lines(edges, screen, color)
edges = []
elif line == 'line':
#print 'LINE\t' + str(args)
add_edge( edges,
float(args[0]), float(args[1]), float(args[2]),
float(args[3]), float(args[4]), float(args[5]) )
matrix_mult(csystems[(len(csystems) - 1)], edges)
draw_lines(edges, screen, color)
edges = []
elif line == 'scale':
#print 'SCALE\t' + str(args)
t = make_scale(float(args[0]), float(args[1]), float(args[2]))
matrix_mult(csystems[(len(csystems) - 1)], t)
csystems[(len(csystems) - 1)] = t
elif line == 'move':
#print 'MOVE\t' + str(args)
t = make_translate(float(args[0]), float(args[1]), float(args[2]))
matrix_mult(csystems[(len(csystems) - 1)], t)
csystems[(len(csystems) - 1)] = t
elif line == 'rotate':
#print 'ROTATE\t' + str(args)
theta = float(args[1]) * (math.pi / 180)
if args[0] == 'x':
t = make_rotX(theta)
elif args[0] == 'y':
t = make_rotY(theta)
else:
t = make_rotZ(theta)
matrix_mult(csystems[(len(csystems) - 1)], t)
csystems[(len(csystems) - 1)] = t
## elif line == 'ident':
## ident(csystems[(len(csystems) - 1)])
elif line == 'print':
for i in csystems:
print_matrix(i)
## elif line == 'apply':
## matrix_mult( csystems[(len(csystems) - 1)], edges )
## matrix_mult( csystems[(len(csystems) - 1)], polygons )
##
## elif line == 'clear':
## edges = []
## polygons = []
elif line == 'display' or line == 'save':
if line == 'display':
display(screen)
else:
save_extension(screen, args[0])
c+= 1
| [
"noreply@github.com"
] | LeoChen21.noreply@github.com |
072fbbcca32c3f69b86bccc58eaf68ee2fac2226 | a94355f2571221e0babf86c8ab0039a5481b1002 | /Codechef_py/Fair elections.py | da028a3d707f30accba60490b3e750c5365d7481 | [] | no_license | koustav2001/Codechef | 7cd9677161b28f4bdcfe58ed2b3de9a5ef402fd0 | d85dd1f565faca40916119149f7b5023115ef34a | refs/heads/main | 2023-07-20T20:53:23.005289 | 2021-09-05T08:30:07 | 2021-09-05T08:30:07 | 403,254,595 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 460 | py | t=int(input())
for i in range(t):
n,m=map(int,input().split())
a=list(map(int,input().split()))
b=list(map(int,input().split()))
c=0
x=True
while(sum(a)<sum(b)):
a.sort()
b.sort()
if(a[0]<b[-1]):
temp=b[-1]
b[-1]=a[0]
a[0]=temp
c+=1
else:
x=False
print(-1)
break
if(x==True):
print(c)
| [
"noreply@github.com"
] | koustav2001.noreply@github.com |
66f3fb2cdec76923c37dcadb5840c597b608bf92 | 488521ef3bef6e486f4a58c1208001d3bb8991f8 | /migrations/versions/38b2dc3b3496_.py | 856ba8d127deff58465594f1c9c39c311f301ef5 | [] | no_license | jimmyking/mingpai-py | 360a2967bbfafb5e24d1a09a9d324be117f224e5 | 528c577c948a7b6de052da20524bcba59650e2d2 | refs/heads/master | 2016-09-05T18:48:23.162928 | 2015-02-06T15:00:12 | 2015-02-06T15:00:12 | 25,513,971 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,088 | py | """empty message
Revision ID: 38b2dc3b3496
Revises: 276dba996e4b
Create Date: 2014-11-03 19:00:40.920282
"""
# revision identifiers, used by Alembic.
revision = '38b2dc3b3496'
down_revision = '276dba996e4b'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('order_warning', sa.Column('create_date', sa.DateTime(), nullable=True))
op.add_column('order_warning', sa.Column('create_man', sa.Integer(), nullable=True))
op.create_index(op.f('ix_order_warning_create_date'), 'order_warning', ['create_date'], unique=False)
op.add_column('orders', sa.Column('warning_type', sa.Integer(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('orders', 'warning_type')
op.drop_index(op.f('ix_order_warning_create_date'), table_name='order_warning')
op.drop_column('order_warning', 'create_man')
op.drop_column('order_warning', 'create_date')
### end Alembic commands ###
| [
"jimmyking329@qq.com"
] | jimmyking329@qq.com |
cb585cd6159253f3d1c9beb29596081629920985 | 289ab4b6eeb1a4f845ba66bd21c4a82670d554f3 | /jwtauth/views.py | 6d55c548b35d86929db0dda34ff020ac29b1ce9d | [] | no_license | 1arshan/project-e_com | 1d0765b28800ccf645dfe67ffa311ce7a6605309 | 632ed6bc4bf716777fab7c98113f754f47468705 | refs/heads/master | 2022-11-08T17:20:22.726675 | 2020-06-25T15:04:37 | 2020-06-25T15:04:37 | 270,087,413 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 324 | py | from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework.permissions import IsAuthenticated
class HelloView(APIView):
permission_classes = (IsAuthenticated,)
def get(self, request):
content = {'message': 'Hello, World!'}
return Response(content)
| [
"1arshanahmad@gmail.com"
] | 1arshanahmad@gmail.com |
4f7ff93ff049448a1624e7b2bc3823c954b70035 | 4f394a4db50ea539d189a29725c997d4b986bdf4 | /blogproject/settings.py | e3c26760ad7232d0b18299bcbda5aff7e09222de | [] | no_license | saiquit/react_django_blog | 57ff643c94661088401ba44c931c2bc7551fc8d2 | f9a0da27d683688d314ba7dedd5c37d76a08a1cf | refs/heads/master | 2023-01-31T17:28:07.079614 | 2020-12-14T10:14:47 | 2020-12-14T10:14:47 | 321,307,358 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,140 | py | """
Django settings for blogproject project.
Generated by 'django-admin startproject' using Django 3.1.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
import os
from pathlib import Path
from datetime import timedelta
import dj_database_url
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'p1_)l6%dn+a-%xbt)wdu8a1x5pv9$rsg@p3(!8nam%dxt%$#@k'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['https://djangoreact20.herokuapp.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'corsheaders',
'django_cleanup.apps.CleanupConfig',
'accounts',
'blogs',
'categories',
'comments',
]
MIDDLEWARE = [
'whitenoise.middleware.WhiteNoiseMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'blogproject.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'frontreact/build')
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'blogproject.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'newBlog',
'HOST': 'localhost',
'USER': 'postgres',
'PASSWORD': '1234'
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'frontreact/build/static')]
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'accounts.authentication.SafeJWTAuthentication',
)
}
AUTH_USER_MODEL = 'accounts.AuthorAccount'
SIMPLE_JWT = {
'ACCESS_TOKEN_LIFETIME': timedelta(hours=2),
}
prod_db = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(prod_db)
| [
"imamhossain130754@gmail.com"
] | imamhossain130754@gmail.com |
96b751bafee5bfec57c1900b3f0737d33f666c7b | 729ee5bcb31708a82b08509775786597dac02263 | /coding-challenges/week09/day05/ccQ1.py | 01507bc127c3a7c3790250ee8b5756ef255aa621 | [] | no_license | pandey-ankur-au17/Python | 67c2478316df30c2ac8ceffa6704cf5701161c27 | 287007646a694a0dd6221d02b47923935a66fcf4 | refs/heads/master | 2023-08-30T05:29:24.440447 | 2021-09-25T16:07:23 | 2021-09-25T16:07:23 | 358,367,687 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 907 | py | """
Q-1 ) Squares of a Sorted Array:(5 marks) (easy)
https://leetcode.com/problems/squares-of-a-sorted-array/
Given an integer array nums sorted in non-decreasing order, return an array of the
squares of each number sorted in non-decreasing order.
Example 1:
Input: nums = [-4,-1,0,3,10]
Output: [0,1,9,16,100]
Explanation: After squaring, the array becomes [16,1,0,9,100].
After sorting, it becomes [0,1,9,16,100].
"""
def SortedArray(nums):
n = len(nums)
i = 0
j = n - 1
k = n - 1
result = list(range(n))
while i <= j:
SqrNg = nums[i] * nums[i]
SqrPo = nums[j] * nums[j]
if SqrNg < SqrPo:
result[k] = SqrPo
j = j - 1
else:
result[k] = SqrNg
i = i + 1
k = k - 1
return result
if __name__ == "__main__":
nums = [-4,-1,0,3,10]
res = SortedArray(nums)
print(res) | [
"ankurpandey131@gmail.com"
] | ankurpandey131@gmail.com |
3f9aadad93ed4aade369261f492839cbdeab65ba | d9abebd85ae0ec3f5a9f1c608d9e9e92d64ae067 | /week5_dynamic_programming1/primitive_calculator.py | 63c9be1897be54863e08637699416690855d7c65 | [] | no_license | ChrisDACE/Coursera-Algorithms | 2e298d48769e3cfe523419673fa21044c2bdbd70 | bae3a047aeb50a3acb037573f64344b1696273c4 | refs/heads/main | 2023-03-22T21:33:14.655215 | 2021-03-19T05:29:44 | 2021-03-19T05:29:44 | 348,518,928 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,142 | py | # Uses python3
import sys
def optimal_seq_dp(n):
step_dict = {1: [1, 0]}
for curr in range(2, n + 1):
options = []
if curr % 3 == 0:
options.append(int(curr / 3))
if curr % 2 == 0:
options.append(int(curr / 2))
options.append(curr - 1)
steps = []
for i in range(len(options)):
steps.append(step_dict[options[i]][1])
prev = options[steps.index(min(steps))]
prev_v = [prev, step_dict[prev][1] + 1]
step_dict[curr] = prev_v
seq = [n]
while n > 1:
seq.append(step_dict[n][0])
n = step_dict[n][0]
return reversed(seq)
def optimal_sequence(n):
"""This greedy method is actually wrong!"""
sequence = []
while n >= 1:
sequence.append(n)
if n % 3 == 0:
n = n // 3
elif n % 2 == 0:
n = n // 2
else:
n = n - 1
return reversed(sequence)
input = sys.stdin.read()
n = int(input)
# sequence = list(optimal_sequence(n))
sequence = list(optimal_seq_dp(n))
print(len(sequence) - 1)
for x in sequence:
print(x, end=' ')
| [
"noreply@github.com"
] | ChrisDACE.noreply@github.com |
7d1cb756409cb86fdbeaaa88f81d0376e753473f | d08917e51dfde03a253aaa19e8ad559ea3d3a125 | /TrabalhoDeNumericoEDO/MetodoDeEuller.py | 3ca5cc85c7e71e87a11046b405a618f2a1aa097b | [] | no_license | borin98/TrabalhoDeCalculoNumerico | 43ab2fea6d80bbbb6ba02a39bdd3171ac6a852c5 | 94dce615b1a6a723bab18088ce6fabc0b0d3703f | refs/heads/master | 2020-03-29T20:56:04.425752 | 2018-11-27T21:16:22 | 2018-11-27T21:16:22 | 150,339,672 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,759 | py | import numpy as np
import matplotlib.pyplot as plt
from math import exp
def montaGrafico ( xResp, yResp, yPrev ) :
"""
Função que monta os gráficos
"""
plt.figure ( 0 )
plt.plot ( xResp, yResp, "--r", linewidth = 2 )
plt.xlabel("tempo (s)")
plt.ylabel("Número de indivíduos")
plt.title ( "Números de indivíduos por tempo" )
plt.grid ( True )
plt.plot ( xResp, yPrev, "--b", linewidth = 2 )
plt.xlabel("tempo (s)")
#plt.rcParams['figure.figsize'] = (0.0001,1)
plt.ylabel("Número de indivíduos")
plt.title ( "Números de indivíduos por tempo" )
plt.legend ( ["Valor Real", "Aproximação"] )
plt.grid ( True )
#plt.rcParams['figure.figsize'] = (1000,0.01)
plt.show ( )
return
def montaVetorOriginal ( h = 0, k = 0, r = 0, yo = 0, xo = 0, totalint = 0, ) :
"""
Função que monta o valor estimado
dos valores originais
"""
passo = 0
tam = totalint + 1
vetorX = np.zeros ( tam )
vetorY = np.zeros ( tam )
vetorX[0] = 0
vetorY[0] = yo
for i in range ( 0, totalint ) :
xo += h
e = np.exp ( r*xo )
y = ( ( yo*k*e )/ ( k + (yo*(e - 1) ) ) )
vetorX[i+1] = xo
vetorY[i+1] = y
return vetorX, vetorY
def main ( ) :
y = float ( input ( "Digite o valor inicial de Yo : " ) ) # valor inicial de y ( 0 ) = 1
a = float ( input ( "Digite o valor do intervalo a : " ) )
b = float ( input ( "Digite o valor do intervalo b : " ) )
h = float ( input ( "Digite o valor do espaçamento dos valores : " ) )
r = float ( input ( "Digite o valor de r : " ) )
k = float ( input ( "Digite o valor de k : " ) )
totalint = int( ( b - a ) /h) # número total de interações
tam = totalint + 1
arrayResultados = np.zeros ( tam ) # vetor que contém o resultado de cada interação
f = 0 # valor de f( x, y(x) )
Yo = y
arrayResultados[0] = y
arrayX, arrayY = montaVetorOriginal (
h = h,
r = r,
k = k,
yo = Yo,
xo = a,
totalint = totalint
)
y = Yo
# interação dos valores
for i in range ( 0, totalint ) :
arrayResultados[i+1] = y
f = ( 1 - ( y/k ) )*( h*r*y ) # valor de f( xk, y(xk) )
y = y + f # valor de y(xk+1)
print ( "Array resultado Previsão : {}\n" .format ( arrayResultados) )
print ( "Array valores Reais : {}\n" .format ( arrayY ) )
print ( "Array valores de x : {}".format ( arrayX ) )
print ( "Tam arrayResultados : {}\n".format ( len ( arrayResultados ) ) )
montaGrafico (
xResp = arrayX,
yResp = arrayY,
yPrev = arrayResultados
)
if __name__ == '__main__':
main()
| [
"noreply@github.com"
] | borin98.noreply@github.com |
bbde197208dc993a09eaeb38d3befbf3e3c3fcf1 | 7b015afde8ae74b32509083e9761b81d2e906771 | /trigrams.py | bc222b582b3dc30dbbc7cd755af93e49039a2941 | [] | no_license | oksanatkach/NLP-things | 6175e0fe1db8ffcff87a9e0410f5871c4f7b10db | 27338337f5c6c9550443ec3f619b1bff462cc6a2 | refs/heads/master | 2021-09-09T08:24:00.970581 | 2018-03-14T11:30:30 | 2018-03-14T11:30:30 | 125,201,144 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 861 | py | #!/bin/python
import sys
import string
import operator
s = 'I came from the moon. He went to the other room. She went to the drawing room.'
sents = s.split('.')
trigrams = {}
order = []
for sent in sents:
sent = sent.translate(None, string.punctuation).lower().split()
for ind in xrange(2, len(sent)):
tri = ' '.join([sent[ind - 2], sent[ind - 1], sent[ind]])
if tri in trigrams.keys():
trigrams[tri] += 1
else:
trigrams[tri] = 1
order.append(tri)
srtd = sorted(trigrams.items(), key=operator.itemgetter(1))
max_count = [tri for tri in srtd if tri[1] == srtd[-1][1]]
if max_count == 1:
print max_count[0][0]
else:
ind = len(order)
for el in max_count:
if order.index(el[0]) < ind:
ind = order.index(el[0])
answer = el[0]
print(answer)
| [
"oksana.tkach.ua@gmail.com"
] | oksana.tkach.ua@gmail.com |
8df3449ff6cd1cca9d6bef55ec2e1ffa7cdc8ecc | ae122790fe5e9fac63fec0c8b57b848b58019c1a | /meeting/models.py | 03813da1c080a4504d5212f6bf3ad0d1fea8f683 | [] | no_license | arsensokolov/castle-if.ru | 34f758af9efae32b873b6bc1a33f6ecb494d3b1b | 77330aa13cfc18761cc4274997991f46742f73ba | refs/heads/master | 2020-11-29T07:35:57.386571 | 2020-01-01T17:06:17 | 2020-01-01T17:06:17 | 230,059,507 | 0 | 0 | null | 2020-06-05T20:37:28 | 2019-12-25T07:15:08 | HTML | UTF-8 | Python | false | false | 1,361 | py | from django.db import models
from django.utils.safestring import mark_safe
def photo_upload(instance, filename):
day = '{}/{}/{}'.format(
instance.album.date.year,
instance.album.date.month,
instance.album.date.day,
)
return 'meeting/{0}/{1}'.format(day, filename)
class Album(models.Model):
title = models.CharField('заголовок', max_length=60)
date = models.DateField('дата встречи')
class Meta:
verbose_name = 'альбом'
verbose_name_plural = 'альбомы'
ordering = ['-date']
def __str__(self):
return self.title
class Photo(models.Model):
album = models.ForeignKey(Album, on_delete=models.PROTECT, verbose_name='альбом', related_name='photos')
title = models.CharField('подпись к фото', max_length=140, null=True, blank=True)
image = models.ImageField('фото', upload_to=photo_upload)
my_order = models.PositiveIntegerField('сортировка', default=0)
class Meta:
verbose_name = 'фото'
verbose_name_plural = 'фото'
ordering = ['my_order']
def __str__(self):
return '({}) {}'.format(self.id, self.title)
def preview(self):
return mark_safe('<img src="{}">'.format(self.image.url))
preview.short_description = 'просмотр' | [
"me@arsen.pw"
] | me@arsen.pw |
8e668b0f2b064342e7b3056c8c5f21977044696d | c6a9ddd072102934000890da0e046a476b8a0a58 | /exercises/migrations/0003_message_is_hidden.py | abc5f3efc4748ae292407046fb50f23b3bb39073 | [] | no_license | hkerkevin/Django_workbook1 | f67dc739d79fe9ba7a7b8ad18ff7e1ffa69737ed | 98c23ae5640372a74e28e478c4e1d4a3413a0934 | refs/heads/master | 2020-03-23T11:47:29.500859 | 2018-07-20T21:06:40 | 2018-07-20T21:06:40 | 141,521,071 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 382 | py | # Generated by Django 2.0.1 on 2018-04-10 02:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('exercises', '0002_message'),
]
operations = [
migrations.AddField(
model_name='message',
name='is_hidden',
field=models.BooleanField(default=False),
),
]
| [
"codetutoraj@gmail.com"
] | codetutoraj@gmail.com |
7d796dc0334c1962a192035b167cb7102cd75094 | d4432f419486ec497f31b1ac69807420a5d2e4ab | /main/migrations/0010_admissionenquiry.py | 0359671c8e775109fad179e45325eb98918a6d38 | [] | no_license | PrabhatP2000/Web-Designing | b144553815b0aa658164f01dbb476c54106c87cc | 2817d2cf67dba492adf06bc6c38cd4d0695fddb2 | refs/heads/main | 2023-05-10T13:40:24.233265 | 2021-06-09T17:21:30 | 2021-06-09T17:21:30 | 369,862,919 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 702 | py | # Generated by Django 3.2.3 on 2021-05-29 16:45
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0009_resultprofile'),
]
operations = [
migrations.CreateModel(
name='AdmissionEnquiry',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('NAME', models.CharField(max_length=20)),
('EMAIL', models.EmailField(max_length=254)),
('SUBJECT', models.CharField(max_length=50)),
('MESSAGE', models.CharField(max_length=200)),
],
),
]
| [
"pandeyprabhat206@gmail.com"
] | pandeyprabhat206@gmail.com |
feed39e1f437c4d336656b405b1148f3b07bb364 | cfc7eed97d4987dbe80026205b7a127f89974d51 | /ebcli/controllers/codesource.py | 6fc3968ac2ad924babbabd2783fc67143c6b4fbd | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | stefansundin/awsebcli | bf71872328c4d94f073d5d0ae0740a0316d56fcf | 8e17c8ad3d24e3c4cef9a4c5dfc6cae61bd7066d | refs/heads/main | 2022-12-06T06:34:52.601029 | 2022-02-04T05:40:53 | 2022-11-20T01:38:26 | 230,182,128 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,387 | py | # Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from ebcli.lib import utils
from ebcli.core import io
from ebcli.core.abstractcontroller import AbstractBaseController
from ebcli.resources.strings import strings, flag_text, prompts
from ebcli.operations import gitops
class CodeSourceController(AbstractBaseController):
class Meta(AbstractBaseController.Meta):
label = 'codesource'
description = strings['codesource.info']
arguments = [
(
['sourcename'],
dict(
action='store',
nargs='?',
help=flag_text['codesource.sourcename'],
choices=['codecommit', 'local'],
type=str.lower
)
),
]
usage = 'eb codesource <sourcename> [options ...]'
def do_command(self):
sourcename = self.app.pargs.sourcename
if sourcename is not None:
if sourcename == 'local':
gitops.print_current_codecommit_settings()
self.set_local()
if sourcename == 'codecommit':
self.set_codecommit()
else:
self.prompt_for_codesource()
def prompt_for_codesource(self):
gitops.print_current_codecommit_settings()
io.echo(prompts['codesource.codesourceprompt'])
setup_choices = ['CodeCommit', 'Local']
choice = utils.prompt_for_item_in_list(setup_choices, 2)
if choice == setup_choices[0]:
self.set_codecommit()
elif choice == setup_choices[1]:
self.set_local()
def set_local(self):
gitops.disable_codecommit()
io.echo(strings['codesource.localmsg'])
def set_codecommit(self):
gitops.initialize_codecommit()
| [
"aws-eb-cli@amazon.com"
] | aws-eb-cli@amazon.com |
c7fbb95fa05343cc561f50c34178cda5f263255f | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_363/ch18_2020_09_16_12_12_05_478212.py | d5e7f259a6b779b713536a1cdce9be08e76ba7cf | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 298 | py | def testa_maioridade(idade):
if idade >= 21:
return 'Liberado EUA e BRASIL'
else:
if idade >= 18:
return 'Liberado BRASIL'
else:
return 'Não está liberado'
print(testa_maioridade(17))
print(testa_maioridade(20))
print(testa_maioridade(21)) | [
"you@example.com"
] | you@example.com |
85dd60d1a0c3316bda5a5dcf3306e7bf740b7417 | b07c4f4b99a46689a650d52bf1bd1d32160f06d3 | /tests/test_cps324.py | f14fcdd50b208eaae6ee51e93dfb35fd723dfb9a | [] | no_license | nuxeo-cps/products--CPSUpgradeTests | 2d67652c26fc212c9ec9864a76b0a7b1f819e2c9 | e3b1f94eaf78278b529561b2384ea3a3479123b3 | refs/heads/main | 2023-01-22T00:46:51.434789 | 2006-09-02T08:22:30 | 2006-09-02T08:22:30 | 317,994,831 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,371 | py | # Upgrade from CPS 3.2.4
DB_NAME = 'cps324'
import os
import unittest
# Warning, nifty tapdance ahead:
# When you import testing, it sets testing home to
# $SOFTWARE_HOME/lib/python/Testing
import Testing
# But we want it to be in a directory with our custom_zodb.py, so we set it,
# but only after importing Testing (or it will be reset later).
import App.config
cfg = App.config.getConfiguration()
cfg.testinghome = os.path.join(os.path.dirname(__file__), DB_NAME)
# During the import of the ZopeLite module, the Zope Application will be
# started, and it will now use our testinghome, find our custom_zodb.py and
# use our custom ZODB.
# Actually, we import upgradetestcase, which in turn imports ZopeTestCase,
# which in turn imports ZopeLite, which in turns starts Zope.
from upgradetestcase import PreGenericSetupTestCase
# Tapdance ends.
class TestUpgrade(PreGenericSetupTestCase):
db_dir = DB_NAME
def test_upgrade(self):
self._upgrade()
self._verifyDocument()
self._verifyPublishing()
self._verifyCalendaring()
self._verifyNewsItem()
self._checkSubGroupSupport()
self._verifyFolderDestruction()
def test_suite():
return unittest.TestSuite((
unittest.makeSuite(TestUpgrade),
))
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| [
"devnull@localhost"
] | devnull@localhost |
8c09c475eebebeba17d6965c5c16882309111a9f | b8441dc1987be9e64fa3081d456b2a3060ec44d1 | /mars/core/graph/builder/tileable.py | ddfbf93711c35982d8d457f21204d791adbbb977 | [
"BSD-3-Clause",
"MIT",
"ISC",
"Apache-2.0",
"CC0-1.0",
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | mars-project/mars | f99fefbce999d58a9249bc72046787a9731c9c73 | c36c53fa22e10ef9477d9c454401a2f281375f31 | refs/heads/master | 2023-07-23T00:23:55.133015 | 2023-07-03T11:44:54 | 2023-07-03T11:44:54 | 160,543,708 | 2,704 | 362 | Apache-2.0 | 2023-09-11T07:57:35 | 2018-12-05T16:04:03 | Python | UTF-8 | Python | false | false | 1,230 | py | # Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Union, Generator
from ...mode import enter_mode
from ..entity import TileableGraph, ChunkGraph
from .base import AbstractGraphBuilder
class TileableGraphBuilder(AbstractGraphBuilder):
_graph: TileableGraph
def __init__(self, graph: TileableGraph):
super().__init__(graph=graph)
@enter_mode(build=True, kernel=True)
def _build(self) -> Union[TileableGraph, ChunkGraph]:
self._add_nodes(self._graph, list(self._graph.result_tileables), set())
return self._graph
def build(self) -> Generator[Union[TileableGraph, ChunkGraph], None, None]:
yield self._build()
| [
"noreply@github.com"
] | mars-project.noreply@github.com |
09f3f0d7ba19d0de1c025621f25427435fbf4269 | 9e059ee06b0bf943f07e2e867686c5872c03be5f | /machinelearning_labelling_postag.py | 695465e818510502498704be72712da49cc316b2 | [] | no_license | Prasanthi04/NLP_DOC-RETRIEVAL | bd0e5e9ccb07c569c7975fbba3dd53392d7e91c4 | 70d304fe711b0d7ae61e18ce3926005bc26721af | refs/heads/master | 2021-08-31T15:13:52.908468 | 2017-12-21T21:19:23 | 2017-12-21T21:19:23 | 115,047,515 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,318 | py | # -*- coding: utf-8 -*-
"""
Created on Sat Dec 16 15:53:19 2017
@author: prasa
"""
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 14 18:31:48 2017
@author: prasa
"""
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 13 16:30:22 2017
@author: prasa
"""
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 11 15:57:07 2017
@author: prasa
"""
import pandas as pd
df = pd.read_csv("C:/Drive/FALL2017/NLP/Project/traning_data_160_postag_bigram.csv",header=0, quoting=3)
df_Query1 = df['Query']
lis= list(df_Query1)
lis.sort()
def sort_max(query):
df_query = query
lis = list(df_query)
lis.sort()
return max(lis), min(lis)
def print_stat(string, maxim,minim):
print("The maximum of {} {}".format(string,maxim))
print("The minimum of {} {}".format(string,minim))
ma,mi = sort_max(df['Query'])
print_stat("Query",ma,mi)
# 0 - =0.15 Possibly relevant
# >0.15 to 1 defintely relevant
def label_race (row,string):
if (0.3<= row[string] < 0.45) :
return 'NOT'
if (0.45 <= row[string] <= 0.55) :
return 'POSSIBLY'
if (0.55 < row[string] <= 1) :
return 'DEFINITELY'
return 'Other'
df['Q1_Relevant Judgment'] = df.apply (lambda row: label_race (row, "Query"),axis=1)
#df.to_csv("C:/Drive/FALL2017/NLP/Project/TFIDF_ALONE/training_data_tfidf_only_160_1.csv",index=False, quoting = 3)
###################################################################33
df_test = pd.read_csv("C:/Drive/FALL2017/NLP/PASSAGE_TFIDF/test_data_passage_160_POSbigram.csv",header=0, quoting=3)
df_Query1_test = df_test['Query']
df_doc = df_test['DocId']
from sklearn.naive_bayes import GaussianNB
X = df['Query'].reshape(-1,1)
y = df['Q1_Relevant Judgment']
clf_nb = GaussianNB()
clf_nb.fit(X, y)
X_prednb = df_Query1_test.reshape(-1,1)
out_nb = clf_nb.predict(X_prednb)
d = {'DocId': df_doc, 'Predicted_judgment': out_nb}
df_pred = pd.DataFrame(data=d)
df_pred.to_csv("C:/Drive/FALL2017/NLP/PASSAGE_TFIDF/predicted_data_160Q_POSbigram.csv",index=False, quoting = 3)
##################################################################################################3
df_1000 = pd.read_csv("C:/Drive/FALL2017/NLP/PASSAGE_TFIDF/predicted_data_160Q_POSbigram.csv",header=0, quoting=3)
df_trec = pd.read_csv("C:/Drive/FALL2017/NLP/PASSAGE_TFIDF/TREC_EXPECTED_160.csv",header=0, quoting=3)
#list(df_trec['Query']).sort()
#list(df_1000['Predicted_judgment']).sort()
from sklearn.metrics import confusion_matrix, precision_score, recall_score
cm_nb=confusion_matrix(df_trec['Query'],df_1000['Predicted_judgment'])
precision_nb=precision_score(df_trec['Query'], df_1000['Predicted_judgment'], average='weighted')
print("precision", precision_nb)
recall_nb = recall_score(df_trec['Query'], df_1000['Predicted_judgment'], average='weighted')
print("recall",recall_nb)
#################################################################################################33
from sklearn import tree
X_dt = df['Query'].reshape(-1,1)
y_dt = df['Q1_Relevant Judgment']
clf_dt = tree.DecisionTreeClassifier()
clf_dt = clf_dt.fit(X_dt, y_dt)
out_dt = clf_dt.predict(X_prednb)
d_dt= {'DocId': df_doc, 'Predicted_judgment': out_dt}
df_dt = pd.DataFrame(data=d_dt)
df_dt.to_csv("C:/Drive/FALL2017/NLP/PASSAGE_TFIDF/predicted_data_160Q_decisiontree_POSbigram.csv",index=False, quoting = 3)
####################################################3
df_1000_dt = pd.read_csv("C:/Drive/FALL2017/NLP/PASSAGE_TFIDF/predicted_data_160Q_decisiontree_POSbigram.csv",header=0, quoting=3)
#df_trec = pd.read_csv("C:/Drive/FALL2017/NLP/PASSAGE_TFIDF/TREC_EXPECTED_160.csv",header=0, quoting=3)
cm_dt=confusion_matrix(df_trec['Query'],df_1000_dt['Predicted_judgment'])
precision_dt=precision_score(df_trec['Query'], df_1000_dt['Predicted_judgment'], average='weighted')
print("precision for decision tree", precision_dt)
recall_dt = recall_score(df_trec['Query'], df_1000_dt['Predicted_judgment'], average='weighted')
print("recall for decision tree",recall_dt)
############################################ROC CURVE################3
'''from sklearn import metrics
from ggplot import *
fpr, tpr, _ = metrics.roc_curve(df_trec['Query'], df_1000['Predicted_judgment'])
df = pd.DataFrame(dict(fpr=fpr, tpr=tpr))
ggplot(df, aes(x='fpr', y='tpr')) +\
geom_line() +\
geom_abline(linetype='dashed')''' | [
"prasanthi468@gmail.com"
] | prasanthi468@gmail.com |
b1c6f70888f4d4d08ce71f1352766f46efd37c17 | a10c658d81b4b4791288673b8bfce45c6f378d50 | /locustfile.py | 042472b29854cce5dc2e276ede13d1d5450180ab | [] | no_license | karuppiah7890/view-counter | 262445725d02445e50febe9523e74c1823f302d9 | 9c19c7aaa79a40b7ff644c67a93a505902445d95 | refs/heads/master | 2020-04-30T02:32:11.560693 | 2019-03-23T05:30:13 | 2019-03-23T05:30:13 | 176,562,451 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 241 | py | from locust import HttpLocust, TaskSet, task
class UserBehavior(TaskSet):
@task(1)
def view(self):
self.client.post("/view")
class WebsiteUser(HttpLocust):
task_set = UserBehavior
min_wait = 5000
max_wait = 9000 | [
"karuppiah7890@gmail.com"
] | karuppiah7890@gmail.com |
e8bc5dbefb0bffb093db18b353595850e853e65c | 0e3c0599bc7417714bde3751f911f3c05f238b15 | /AverageWordsReviewCalculation.py | 8dbb1ce05874feea875c2f14bf2ea69ab79c378d | [
"Apache-2.0"
] | permissive | nishitprasad/Spark-Data-Analysis-on-Amazon-real-products-reviews | ab1b8a0201869111a959df470ce7fb1108d9d1e4 | 59d1c6a68d37cbb93c8a15e3b6ef528725f59a53 | refs/heads/master | 2021-06-08T15:56:44.735622 | 2016-11-16T19:18:44 | 2016-11-16T19:18:44 | 72,976,697 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,508 | py | #import necessary library
import json
import re
from pyspark import SparkConf, SparkContext
from operator import add
conf = SparkConf()
conf.setMaster("--Spark-Master-URL--")# set to your spark master url
conf.setAppName("averageCalculation")
sc = SparkContext(conf = conf)
#Define a function to get the asin (common column) values and respective review wordcount
def getCount(line):
lineData = json.loads(line)
setOfWords = re.split(ur"[A-Za-z]+", lineData.get("reviewText"), flags = re.UNICODE)
return (str(lineData.get("asin")), len(setOfWords))
#Define a function to get Music category records having respective asin (commun column) values and an extra dummy value column (=1)
def getMusicRecords(line):
lineData = json.loads(line)
if lineData.get("categories")=="Music":
return (str(lineData.get("asin")), "1")
return None
#Read the review file and convert into RDD
reviewRDD = sc.textFile("file:///home/../review.data")
reviewRDD = reviewRDD.map(getCount) #Map the review RDD with new RDD containing asin and respective review wordcount
#Read the meta file and convert into RDD
metaRDD = sc.textFile("file:///home/../meta.data")
metaRDD = metaRDD.map(getMusicRecords) #Map the meta RDD with new RDD containing either None values, or asin and dummy value
metaRDD = metaRDD.filter(lambda line: line!=None) #Filter the RDD with only those values that do not have None values
#Joined RDD containing one common column having asid values, followed by respective nested tuple of other column values)
#The nested tuple contains just the dummy value and the wordcount
joinedRDD = sc.parallelize(sorted(metaRDD.join(reviewRDD).collect())) #sorted lexicographically
joinedRDD.saveAsTextFile("file:///home/../joinedRDD_result") # save file to a local path, starting with prefix file://
joinedRDD = joinedRDD.map(lambda line: int(line[1][1])) #Map this RDD with new RDD containing just the wordcounts
#Calculate the total sum and count to calculate the average review wordcount, store the result in a text file
totalSum = joinedRDD.reduce(add)
count = joinedRDD.count()
with open('AverageNumberOfReviewWords.txt', 'w') as f:
f.write("Total Sum: "str(totalSum) + " Total Count: " + str(count) + " Required Average: " + str(round(totalSum/float(count), 2)))
#Save respective RDDs in a folder (may contain multiple files as work is ditributed among the slaves)
reviewRDD.saveAsTextFile("file:///home/../reviewRDD_AvgCalc_MidResult")
metaRDD.saveAsTextFile("file:///home/../metaRDD_AvgCalc_MidResult")
| [
"noreply@github.com"
] | nishitprasad.noreply@github.com |
05645e14172ef65127ed8870000cdbbcdb3d3903 | a51b818e90e734be67ae588d4b9cec4180c92a67 | /blog/views.py | bbb2caf0c3cce7b4402b8a42f3682749f06f76a3 | [] | no_license | alanleegithub/webfaction | b73fb97c656a44a840cd3c0436a9e970e90f310f | dd6c9d5f3820ff625e25c52acb5a82e0b169fca5 | refs/heads/master | 2021-01-20T21:24:01.456461 | 2014-10-03T08:49:51 | 2014-10-03T08:49:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,109 | py | #from django.shortcuts import render
# Create your views here.
from django.shortcuts import render_to_response
from blog.models import Post, Comment
from django.core.context_processors import csrf
from django.template import RequestContext
from django.http import HttpResponseRedirect
from django.contrib import auth
from forms import MyRegistrationForm
from forms import PostForm, CommentForm
from django.shortcuts import render
from django.shortcuts import get_object_or_404
from calendar import HTMLCalendar
from datetime import date
def blogs(request):
if not(request.user.is_authenticated()):
request.user.username = 'None'
c = HTMLCalendar(6).formatmonth(date.today().year, date.today().month)
c = c.replace('>%s</td>' % date.today().day,
'><u><a href=#>%s</a></u></td>' % date.today().day)
return render_to_response('blogs.html',
{'blogs': Post.objects.all().order_by('-published_date'),
'user': request.user,
'calendar': c, }, context_instance=RequestContext(request))
def blog(request, post_id = 1):
if not(request.user.is_authenticated()):
request.user.username = 'None'
c = HTMLCalendar(6).formatmonth(date.today().year, date.today().month)
c = c.replace('>%s</td>' % date.today().day,
'><u><a href=#>%s</a></u></td>' % date.today().day)
return render_to_response('blog.html',
{'post': Post.objects.get(id = post_id),
'user': request.user,
'calendar': c, })
def tagpage(request, tag):
posts = Post.objects.filter(tags__name = tag)
return render_to_response('tagpage.html',
{'posts': posts, 'tag': tag})
from django.contrib.auth.forms import UserCreationForm
def login(request):
username = request.POST.get('username', '')
password = request.POST.get('password', '')
user = auth.authenticate(username=username, password=password)
if user is not None:
auth.login(request, user)
return HttpResponseRedirect('/blogs/')
return HttpResponseRedirect('/register/')
def logout(request):
auth.logout(request)
return HttpResponseRedirect('/')
def post(request):
form = PostForm(request.POST or None)
if form.is_valid():
f = form.save(commit = False)
f.author = request.user
if request.FILES:
f.docfile = request.FILES['docfile']
f.save()
return HttpResponseRedirect('/blogs/')
return render_to_response('post.html',
{'user': request.user,
'form': form}, context_instance=RequestContext(request))
def register(request):
# 2nd time around
if request.method == 'POST':
form = MyRegistrationForm(request.POST)
if form.is_valid():
form.save()
return HttpResponseRedirect('/blogs/')
return render(request, 'register.html', {'form': form})
# 1st time visit
args = {}
args.update(csrf(request))
# form with no input
args['form'] = MyRegistrationForm()
return render_to_response('register.html', args)
def register_success(request):
return render_to_response('register_success.html')
def about(request):
c = HTMLCalendar(6).formatmonth(date.today().year, date.today().month)
c = c.replace('>%s</td>' % date.today().day,
'><u><a href=#>%s</a></u></td>' % date.today().day)
return render_to_response('about.html', {'calendar': c})
def comment(request, post_id = 1):
form = CommentForm(request.POST or None)
post = get_object_or_404(Post, id = post_id)
if form.is_valid():
f = form.save(commit = False)
f.author = request.user
f.post = post
f.save()
return HttpResponseRedirect('/blogs/')
if not(request.user.is_authenticated()):
request.user.username = 'None'
post = Post.objects.get(id = post_id)
return render_to_response('comment.html',
{'comments': post.comment_set.all(),
'user': request.user,
'form': form,
'blog_id': post_id}, context_instance=RequestContext(request))
| [
"alan@example.com"
] | alan@example.com |
5a3991eb4b8c3ed78c819bcba0e4ddd17a365a1e | 5febc9d33915448f1e5418081fbcd1e01dd344d7 | /assets/media/uda-ml/code/numpy/quiz.py | 2015822421b9ed892cd2c214ebe8e24a6a46bbad | [
"MIT"
] | permissive | hetaodie/hetaodie.github.io | 56de637096b09e28f0c9fab29c6dd17b6443dea5 | 66657f1c0c93068cdfcaaf5726d6dc09769b6cd7 | refs/heads/master | 2020-04-07T00:47:23.596455 | 2020-01-30T07:45:41 | 2020-01-30T07:45:41 | 49,119,070 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 162 | py | import pandas as pd
import numpy as np
data = pd.read_csv("data.csv")
# TODO: Separate the features and the labels into arrays called X and y
X = None
y = None | [
"weixu6130@163.com"
] | weixu6130@163.com |
e410bc700516b354722dc9465f2c0b653033291c | 331b262adff5ee0eade098f6293157dc27c487ad | /punto9.py | 8f8a2ae5364c2f74aad50ae895b09c7835d8577a | [] | no_license | gsandova03/taller3_int_computacional | 43df301664ce5249465e46f68246ce5b3edf269a | c6d8f5a7ed9805e13dd9084170f35ae96148c5f7 | refs/heads/master | 2023-07-28T20:57:31.466317 | 2021-09-20T20:40:23 | 2021-09-20T20:40:23 | 408,577,780 | 0 | 0 | null | 2021-09-20T20:41:54 | 2021-09-20T19:39:55 | null | UTF-8 | Python | false | false | 814 | py | def cominsion():
for n in range( 1, 100 + 1 ):
venta = float( input(f'Ventas realizadas empleado { n }: ') )
if venta <= 200000000:
comision = venta * 0.10
print( f'Comision por ventas empleado {n}, { comision }' )
if venta >= 200000000 and venta <= 400000000:
comision = venta * 0.15
print( f'Comision por ventas empleado {n}, { comision }' )
if venta >= 400000000 and venta <= 800000000:
comision = venta * 0.20
print( f'Comision por ventas empleado {n}, { comision }' )
if venta >= 800000000 and venta <= 1600000000:
comision = venta * 0.25
print( f'Comision por ventas empleado {n}, { comision }' )
if venta > 1600000000:
comision = venta * 0.30
print( f'Comision por ventas empleado {n}, { comision }' )
cominsion() | [
"gsrivillas0328@gmail.com"
] | gsrivillas0328@gmail.com |
b1cabf9ad1949241e3a41c7c19bedb60c837cfce | 015f49cd040b82773409b01b3d40c0e19e828402 | /modules/discord/pack_actions.py | bc9ddd8396e400eb6028acf936d895c808427cc4 | [
"MIT"
] | permissive | cheesycod/FatesList | a36ff488d17f1e7ed247c6269ee94a626f468968 | 82a175b94f93540bab57262c8aa8500a008e7d7b | refs/heads/main | 2023-03-25T04:46:37.943400 | 2021-03-26T08:26:41 | 2021-03-26T08:26:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 558 | py | from ..deps import *
router = APIRouter(
prefix = "/pack",
tags = ["Pack Actions"],
include_in_schema = False
)
@router.get("/admin/add")
async def add_server_main(request: Request):
if "userid" in request.session.keys():
return await templates.TemplateResponse("pack_add_edit.html", {"request": request, "tags_fixed": server_tags_fixed, "data": {"form": (await Form.from_formdata(request))}, "error": None, "mode": "add"})
else:
return RedirectResponse("/auth/login?redirect=/pack/admin/add&pretty=to add a bot pack")
| [
"meow@683e51740c.servercheap.net"
] | meow@683e51740c.servercheap.net |
e38060a8c7d9bb18f3deb109b85e49558db91fda | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/59/usersdata/147/61122/submittedfiles/testes.py | 06774e42c5ec729f01f08e760f84be3690f8d627 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 132 | py | # -*- coding: utf-8 -*-
#COMECE AQUI ABAIXO
n=int(input('digite n:'))
x1=n//1000
b=n//1000
b2=b%100
x2=b2//100
print(x1)
print(x2)
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
81208eb6ed83070e4ec6da3f06718ccf084edad3 | 8e192e18d003b4544be03f547832d41bec2c3c44 | /app/views.py | d20d2e859f0e8c9a541ecd05c1f1c755afbc3ef8 | [] | no_license | lina9691/heroku | bbe2da995ac4a4ddc765eb6acc82f533ee4a6e13 | 0e728f7ab83c17239823cc1ffbfbef9b0d911568 | refs/heads/main | 2023-02-20T13:44:42.060560 | 2021-01-22T22:18:08 | 2021-01-22T22:18:08 | 331,484,010 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,099 | py | # from app import app
# from flask import render_template,request
import sqlite3
import json
from flask import jsonify
f = open('MrAbreu.json')
data = json.load(f)
f.close()
def createdb():
conn = sqlite3.connect ('base.db')
print ("base de donnéées ouverte avec succès")
conn.execute("CREATE TABLE Patient(Numero_utilisateur INTEGER, Mot_de_passe TEXT, Nom TEXT, Prenom TEXT, Age INTEGER, Adresse TEXT, Hematies INTEGER, Hemoglobine INTEGER, Hematocrite INTEGER, VGM INTEGER, CCMH INTEGER, TCMH INTEGER,RDW INTEGER,Polynucleaires_neutrophiles INTEGER,Polynucleaires_eosinophiles INTEGER,Polynucleaires_basophiles INTEGER,Lymphocytes INTEGER, Monocytes INTEGER)")
print ("Table créée avec succès")
conn.close()
def adduser(Numero_utilisateur,Mot_de_passe, Nom , Prenom, Age, Adresse, Hematies, Hemoglobine, Hematocrite, VGM, CCMH , TCMH, RDW , Polynucleaires_neutrophiles,Polynucleaires_eosinophiles,Polynucleaires_basophiles,Lymphocytes,Monocytes):
with sqlite3.connect("base.db") as con:
cur = con.cursor()
cur.execute("INSERT INTO Patient (Numero_utilisateur,Mot_de_passe, Nom , Prenom, Age, Adresse, Hematies, Hemoglobine, Hematocrite, VGM, CCMH , TCMH, RDW , Polynucleaires_neutrophiles,Polynucleaires_eosinophiles,Polynucleaires_basophiles,Lymphocytes,Monocytes) VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)" , (Numero_utilisateur,Mot_de_passe, Nom , Prenom, Age, Adresse, Hematies, Hemoglobine, Hematocrite, VGM, CCMH , TCMH, RDW , Polynucleaires_neutrophiles,Polynucleaires_eosinophiles,Polynucleaires_basophiles,Lymphocytes,Monocytes))
con.commit()
con.close()
def showdb():
con = sqlite3.connect('bdd.db')
cursor = con.cursor()
cursor.execute("SELECT * from Patients;")
print(cursor.fetchall())
def utilisateur():
con = sqlite3.connect('base.db')
cursor = con.cursor()
cursor.execute("SELECT Numero_utilisateur from Patient ;")
a = cursor.fetchall()
b=''
#L=[]
for i in a:
b = "".join(map(str, i))
#L.append(b)
print (b)
def mdp():
con = sqlite3.connect('base.db')
cursor = con.cursor()
cursor.execute("SELECT Mot_de_passe from Patient ;")
a = cursor.fetchall()
L=[]
for i in a:
b = "".join(map(str, i))
L.append(b)
print (b)
def recup(data):
return jsonify(data)
def transfer(data):
with sqlite3.connect('Mabase.db') as con:
cur = con.cursor()
cur.execute("INSERT INTO Patients")
def remplissagee(data):
for i in data:
Numero_utilisateur = data['Numero_utilisateur']
Adresse = data['Adresse']
Mot_de_passe = data['Mot_de_passe']
Nom = data['Nom']
Prenom = data['Prenom']
Age = data['Age']
Hematies = data['Hematies']
Hemoglobine = data['Hemoglobine']
Hematocrite = data['Hematocrite']
VGM = data['VGM']
CCMH = data['CCMH']
TCMH = data['TCMH']
RDW = data['RDW']
Polynucleaires_neutrophiles = data['Polynucleaires_neutrophiles']
Polynucleaires_eosinophiles = data['Polynucleaires_neutrophiles']
Polynucleaires_basophiles = data['Polynucleaires_basophiles']
Lymphocytes = data['Lymphocytes']
Monocytes = data['Monocytes']
adduser(Numero_utilisateur,Mot_de_passe, Nom , Prenom, Age, Adresse, Hematies, Hemoglobine, Hematocrite, VGM, CCMH , TCMH, RDW , Polynucleaires_neutrophiles,Polynucleaires_eosinophiles,Polynucleaires_basophiles,Lymphocytes,Monocytes)
# def checkdb():
# conn = sqlite3.connect('bdd.db')
# print ("base de donnéées ouverte avec succès")
# with sqlite3.connect("bdd.db") as con:
# cur = con.cursor()
# @app.route("/")
# def index():
# return render_template ('index.html')
# @app.route("/new",methods=['POST'])
# def new():
# utilisateur = request.form.get('utilisateur')
# mdp = request.form.get ('mdp')
# if rech_utilisateur()==utilisateur and rech_mdp()==mdp :
# return "ok"
# else :
# return "Utilisateur incorrect"
| [
"noreply@github.com"
] | lina9691.noreply@github.com |
217247f07d97e49398a3dd4536cd07d5bccccfae | c2520d4b137656b47d9467d0b9350ab242227a15 | /Zajecia03/zad10.py | 426caf720b5bd10f78c013226d10c4e6bd9e854a | [] | no_license | wmackowiak/Zadania | 3f5011346c74b3f99ada28cc83b3f4c2545d2fcb | 890381edfcc11261f3c317426e0b040bcb15c6ad | refs/heads/master | 2021-02-11T03:59:03.448384 | 2020-04-10T19:33:56 | 2020-04-10T19:33:56 | 244,451,523 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 208 | py | #Napisz program, który dla 10 kolejnych liczb naturalnych wyświetli sumę poprzedników. Oczekiwany wynik: 1, 3, 6, 10, 15, 21, 28, 36, 45, 55
a = 0
for i in range(1, 11):
a += i
print(a, end=" ")
| [
"w.mackowiak@wp.pl"
] | w.mackowiak@wp.pl |
36af4b17e8f6295dee32643060cf0fdc64ba9357 | 2cfb0479968bc929be47809938d060f331117139 | /app/main/views.py | 884911a53529c115e9e5231eee5a77f035fe7248 | [
"LicenseRef-scancode-sata"
] | permissive | HASSAN1A/Pitch-Platform | a6fd86b7d54c487ead10cce7c4902c3b17de1b13 | 479e0bf827910ba3fe847659100e27a0d3a1c2b1 | refs/heads/master | 2023-03-21T04:24:30.454531 | 2020-10-29T14:57:06 | 2020-10-29T14:57:06 | 306,626,064 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,754 | py | from flask import render_template,request,redirect,url_for,abort
from . import main
from flask_login import login_required,current_user
from ..models import User,Pitch,Comment
from .forms import UpdateProfile,PitchForm,CommentForm
from .. import db,photos
import markdown2
# Views
@main.route('/')
def index():
'''
View root page function that returns the index page and its data
'''
pitches=Pitch.get_all_pitches()
return render_template('index.html',pitches=pitches)
@main.route('/profile/<username>')
@login_required
def profile(username):
'''
View profile page function that returns the profile details of the current user logged in
'''
user = User.query.filter_by(username = username).first()
if user is None:
abort(404)
pitches = Pitch.get_user_pitches(user.id)
return render_template("profile/profile.html", user = user,pitches=pitches)
@main.route('/profile/<username>/update',methods = ['GET','POST'])
@login_required
def update_profile(username):
user = User.query.filter_by(username = username).first()
if user is None:
abort(404)
form = UpdateProfile()
if form.validate_on_submit():
user.bio = form.bio.data
db.session.add(user)
db.session.commit()
return redirect(url_for('.profile',username=user.username))
return render_template('profile/update.html',user=user,form =form)
@main.route('/profile/<username>/update/pic',methods= ['POST'])
@login_required
def update_pic(username):
user = User.query.filter_by(username = username).first()
if 'photo' in request.files:
filename = photos.save(request.files['photo'])
path = f'photos/{filename}'
user.profile_pic_path = path
db.session.commit()
return redirect(url_for('main.update_profile',username=username))
@main.route('/pitch/new', methods = ['GET','POST'])
@login_required
def new_pitch():
form = PitchForm()
if form.validate_on_submit():
title = form.title.data
body = form.body.data
category = form.category.data
# Updated review instance
new_pitch = Pitch(pitch_title=title,pitch_body=body,pitch_category=category,user=current_user)
# save review method
new_pitch.save_pitch()
return redirect(url_for('.index'))
title = 'New Pitch Form'
return render_template('new_pitch.html',title = title, pitch_form=form)
@main.route('/pitches/category/<category_name>')
@login_required
def pitch_by_category(category_name):
'''
View root page function that returns pitch category page with pitches from category selected
'''
pitches=Pitch.query.filter_by(pitch_category=category_name).order_by(Pitch.posted.desc()).all()
return render_template('pitch_by_category.html',pitches=pitches,category=category_name)
@main.route('/pitch_details/<pitch_id>', methods = ['GET','POST'])
@login_required
def pitch_details(pitch_id):
'''
View pitch details function that returns pitch_details and comment form
'''
form = CommentForm()
pitch=Pitch.query.get(pitch_id)
comments=Comment.query.filter_by(pitch_id=pitch_id).order_by(Comment.posted.desc()).all()
format_comments=[]
if comments:
for comment in comments:
format_comments.append(markdown2.markdown(comment.comment,extras=["code-friendly", "fenced-code-blocks"]))
if form.validate_on_submit():
comment = form.comment.data
# Updated comment instance
new_comment = Comment(comment=comment,user=current_user,pitch=pitch)
# save review method
new_comment.save_comment()
pitch.pitch_comments_count = pitch.pitch_comments_count+1
db.session.add(pitch)
db.session.commit()
return redirect(url_for('main.pitch_details',pitch_id=pitch_id))
return render_template('pitch_details.html',comment_form=form,pitch=pitch,comments=comments,format_comments=format_comments)
@main.route('/pitch_upvote/<pitch_id>')
@login_required
def pitch_upvote(pitch_id):
'''
View function to add do upvote on pitch click
'''
pitch=Pitch.query.get(pitch_id)
pitch.pitch_upvotes=pitch.pitch_upvotes+1
db.session.add(pitch)
db.session.commit()
return redirect(url_for('main.pitch_details',pitch_id=pitch_id))
@main.route('/pitch_downvote/<pitch_id>')
@login_required
def pitch_downvote(pitch_id):
'''
View function to add do downvote on pitch click
'''
pitch=Pitch.query.get(pitch_id)
pitch.pitch_downvotes=pitch.pitch_downvotes+1
db.session.add(pitch)
db.session.commit()
return redirect(url_for('main.pitch_details',pitch_id=pitch_id))
| [
"okothhassanjuma@gmail.com"
] | okothhassanjuma@gmail.com |
37cc87607e2e732c9eeea6aa1aa1e641c941fa6d | 9e744bb55ea3665c1559e2e91f93123e1103bcdd | /vid-categories-time.py | 8c606e849d08e90a78ab6fbb517822a86ebfd4be | [] | no_license | JakeOGreenwood/video-category-history-visualisation | 0c6b6e3e9eef156d5192d43030959e54e00fcb8a | 05739c3e1cf069fd69954acbfaaeaeffbb27d7f3 | refs/heads/master | 2020-12-07T01:56:06.406719 | 2020-01-08T16:31:05 | 2020-01-08T16:31:05 | 232,607,085 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,973 | py | import argparse
import requests
import pandas as pd
import re
import json
from bs4 import BeautifulSoup
class VideoTimeGraph:
def __init__(self, api_key):
self.youtube_api_url = "https://www.googleapis.com/youtube/v3/videos"
self.api_key = api_key
print("initialised")
def load_html(self, history_html):
'''
Unpacks raw video information into 2d listm stored in self.video_dataframe
list of format:
[channel_url,channel_name,video_url,video_title,utc]
'''
html_page = open(history_html)
soup = BeautifulSoup(html_page, 'html.parser')
video_section = soup.find_all(class_="mdl-cell--6-col")# Each video is kept in one of these classes
invalid_links = []
video_list = []
for elem in video_section:
# finds the two youtube links within the class, one channel link, one video
links = elem.find_all('a', href=True)
if len(links) != 2:
# Videos not included- usually deleted, removed, or made private on youtube
invalid_links.append(links)
else:
channel_url = links[1].get('href')
channel_name = links[1].get_text()
video_url = links[0].get('href')
video_title = links[0].get_text()
utc = elem.find(string=(re.compile("UTC")))
video_list.append([channel_url,channel_name,video_url,video_title,utc])
video_dataframe = {
"channel_url":channel_url,
"channel_name":channel_name,
"video_url":video_url,
"video_title":video_title,
"utc":utc
}
self.video_dataframe = pd.DataFrame(video_list, columns=["channel_url","channel_name","video_url","video_title","utc"])
#print(self.video_dataframe)
#self.video_list = video_list
def youtube_api_category_request(self, video_id_list=["Ks-_Mh1QhMc%2Cc0KYU2j0TM4%2CeIho2S0ZahI"]):
'''
Calls youtube data api v3 requesting data on videos in input list
Takes list of video Ids - returns list of categories
'''
video_id_string = "?id=" +"&".join(video_id_list)
# Video id cannot be entered into params due to percent encoding within the requests package. This is not configurable.
# Instead video ID must be entered manually
params = {"part": "snippet", "videoCategoryId": "string", "key": self.api_key}
try:
response = requests.get(self.youtube_api_url+video_id_string, params=params)
print(response.url)
response.raise_for_status()
except requests.exceptions.HTTPError as http_error:
print("HTTP error occurred accesing youtube api: %s", http_error)
except Exception as error:
print("Other error occurred : ", error)
# Json returned by api is converted to a dict and the category ID extracted.
response_dict = response.json()
# Details of other information available are in API documentation
category_id_list = []
for i in range(len(video_id_list)):
category_id_list.append(response_dict["items"][i]["snippet"]["categoryId"])
return category_id_list
def run(self, history_html):
self.load_html(history_html)
print(self.video_dataframe[:3])
#self.youtube_api_category_request()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("history",type=str,help="Your Youtube watch history HTML file")
parser.add_argument("apiKey",type=str,help="Your Youtube Data v3 API Key found on https://console.developers.google.com/apis/credentials")
args = parser.parse_args()
history_html = args.history
api_key = args.apiKey
video_time_graph = VideoTimeGraph(api_key)
video_time_graph.run(history_html)
| [
"jake.o.greenwood@gmail.com"
] | jake.o.greenwood@gmail.com |
f28bd491a6b1e977d8bc669e9ac48a373a704c28 | 6c7c657220109be0056e7ecd25eee8e382cc1d66 | /sudoku/model/analyzer.py | 06f34251518dd74452075ca21b32cd5e06d1ed78 | [] | no_license | basuke/sudoku | 3f945153e78b313375649c43e3a83bb74dae2443 | 2ef2c68ae164e5a67e7d19deede4906ad52f2519 | refs/heads/master | 2021-05-14T18:23:30.044626 | 2018-01-12T02:21:05 | 2018-01-12T02:21:05 | 116,070,968 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 413 | py | class Analyzer(object):
def __init__(self, *args):
self._args = args
def analyze(self, board):
raise RuntimeError("not implemented")
def will_bind(self, board):
pass
def did_bind(self, board):
pass
def __eq__(self, other):
"""
:type other: Analyzer
"""
return self.__class__ == other.__class__ and self._args == other._args
| [
"Basuke.Suzuki@sony.com"
] | Basuke.Suzuki@sony.com |
a015e07e61f469bafd701aeb92cd6a6cf53b4b0f | 811e1deab7b7762ba0b3f6d6d391c652b7811080 | /blog/views.py | 90daccd0f033328eb78f1aef655e55b635cab8c3 | [] | no_license | clebsonpy/TestDjangoPyCharm | 009bba8e5645bc6eed5f66f7f6d2337b9c5721f3 | 68fff30a1698d1480da5ad3d8bd3bd7344cf9cbe | refs/heads/master | 2016-09-06T18:43:26.720937 | 2015-07-14T08:00:11 | 2015-07-14T08:00:11 | 38,030,895 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,295 | py | from django.shortcuts import render, get_object_or_404
from django.utils import timezone
from .models import Post
from .forms import PostForm
from django.shortcuts import redirect
def post_list(request):
posts = Post.objects.filter(published_date__lte=timezone.now()).order_by('published_date')
return render(request, 'blog/post_list.html', {'posts': posts})
def post_detail(request, pk):
post = get_object_or_404(Post, pk=pk)
return render(request, 'blog/post_detail.html', {'post': post})
def post_new(request):
if request.method == "POST":
form = PostForm(request.POST)
if form.is_valid():
post = form.save(commit=True)
post.save
return redirect('blog.views.post_detail', pk=post.pk)
else:
form = PostForm()
return render(request, 'blog/post_new.html', {'form': form})
def post_edit(request, pk):
post = get_object_or_404(Post, pk=pk)
if request.method == "POST":
form = PostForm(request.POST, instance=post)
if form.is_valid():
post = form.save(commit=True)
post.save()
return redirect('blog.views.post_detail', pk=post.pk)
else:
form = PostForm(instance=post)
return render(request, 'blog/post_edit.html', {'form': form}) | [
"clebson2007.farias@gmail.com"
] | clebson2007.farias@gmail.com |
48fd13cd46e26454f058944a362e8996ca192344 | 2edf3a0d21117c65dffe87c3da81365c77d66679 | /dfirtrack_main/tests/system/test_system_importer_file_csv_config_based_forms.py | baa1cddf83741025adb6aacefe2ee628c2689cb3 | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | fxcebx/dfirtrack | 003748305aa412aa9ec043faa98dac45d3053b5c | 20acf4e508aeef9faf2ed1d2195918b6640c1307 | refs/heads/master | 2022-12-10T02:25:47.676855 | 2020-09-24T23:15:42 | 2020-09-24T23:15:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,548 | py | from django.core.files.uploadedfile import SimpleUploadedFile
from django.test import TestCase
from dfirtrack_main.importer.file.csv_importer_forms import SystemImporterFileCsvConfigbasedForm
class SystemImporterFileCsvConfigbasedFormTestCase(TestCase):
""" system importer file CSV config-based form tests """
def test_system_importer_file_csv_config_based_systemcsv_form_label(self):
""" test form label """
# get object
form = SystemImporterFileCsvConfigbasedForm()
# compare
self.assertEqual(form.fields['systemcsv'].label, 'CSV with systems (*)')
def test_system_importer_file_csv_config_based_form_empty(self):
""" test minimum form requirements / INVALID """
# get object
form = SystemImporterFileCsvConfigbasedForm(data = {})
# compare
self.assertFalse(form.is_valid())
def test_system_importer_file_csv_config_based_systemcsv_form_filled(self):
""" test minimum form requirements / VALID """
# get file
upload_csv = open('example_data/dfirtrack_main_importer_file_csv_system__valid.csv', 'rb')
# create dictionaries
data_dict = {}
file_dict = {
'systemcsv': SimpleUploadedFile(upload_csv.name, upload_csv.read()),
}
# get object
form = SystemImporterFileCsvConfigbasedForm(
data = data_dict,
files = file_dict,
)
# close file
upload_csv.close()
# compare
self.assertTrue(form.is_valid())
| [
"mathias.stuhlmacher@gmx.de"
] | mathias.stuhlmacher@gmx.de |
84be645266974495f0d31b201be4f7d712de4815 | 0ca5d727f41f841c396ba5937c0ca97461642d2b | /Aguilar/ejercicio26.py | 1d9dd7909ca263c2a88733f3663ded356fda73b0 | [] | no_license | ArroyoBernilla/t06.Arroyo.Aguilar | f58eec903f2f16866d689c09a0b1db250cb4d9c2 | c8f0519e97720e3c1a60f43ee1864f6315a49de3 | refs/heads/master | 2020-09-11T10:46:14.357369 | 2019-11-16T05:57:32 | 2019-11-16T05:57:32 | 222,039,539 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,002 | py | import os
#notas en base 20
#declarar variables
alumno,nota1,nota2,nota3,nota4="",0.0,0.0,0.0,0.0
#INPUT
alumno=os.sys.argv[1]
nota1=int(os.sys.argv[2])
nota2=int(os.sys.argv[3])
nota3=int(os.sys.argv[4])
nota4=int(os.sys.argv[5])
#PROCESSING
nota_final=int((nota1+nota2+nota3+nota4)/4)
#OUPUT
print(" NOTAS DEL CURSO DE MATEMATICAS")
print(" El alumno: ", alumno)
print("obtubo las siguientes notas")
print("primera nota: ", nota1)
print("segunda nota: ", nota2)
print("tercera nota: ", nota3)
print("cuarta nota: ", nota4)
print("nota final: ", nota_final )
print("COMENTARIO:")
#condicional multiple
#SI el pomedio es mayor a 17 felicitar al estudiante
#SI el pomedio esta entre las notas de 14 y 17 decirle que esta en proceso
#SI el pomedio es menor que 14 insistir que debe esforzarse
if(nota_final>17):
print("FELICITACONES HAS OBTENIDO UN MARAVILLOSO PUNTAJE")
if(nota_final>=14 and nota_final<=17):
print("ESTA EN PROCESO")
if(nota_final<14):
print("DEBE ESFORZARSE")
#fin_if
| [
"garroyo@unprg.edu.pe"
] | garroyo@unprg.edu.pe |
7c34356fc7693cae881d92047c8d025ff83373d7 | 41f548fc3052d4cd3a94e3171a0e2120705ed760 | /Gomine_DOC_Unicode/Old_crawl/shiye/shiye/items.py | ecb978c4f13f93ff5406aee5a8d1ec921ae69426 | [] | no_license | SuperShen9/Scrapy | 806f972bcd05d85bf02349c5ee7711af550c8568 | cbe141f697596d5a384bb968d7343194236a541f | refs/heads/master | 2021-01-19T13:04:19.957911 | 2018-06-27T23:47:21 | 2018-06-27T23:47:21 | 88,060,453 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 329 | py | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class ShiyeItem(scrapy.Item):
# define the fields for your item here like:
name = scrapy.Field()
code=scrapy.Field()
url=scrapy.Field()
pass
| [
"675153178@qq.com"
] | 675153178@qq.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.