blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
455d5907785f054b86fc0aaa143a61924d84a6dc | 353ad1b12fead8988e8160deb4e3e6e5e13b25a1 | /malware_classification.py | 376047eacffb36ac2c3afaa442e6d16705a88c24 | [] | no_license | jeremyhandong/malware_classification_system | 2d4522e68a923a7c066915d3906829c0e92a12c0 | d23e801ea4a0aa1bf430bc75635830519f03dced | refs/heads/master | 2022-01-16T17:46:27.789804 | 2019-08-12T03:57:29 | 2019-08-12T03:57:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,877 | py | import hashlib
import os
import re
import time
import pickle
from keras.preprocessing.sequence import pad_sequences
import malware_classification_by_static_feature.get_ngram as ngram
import numpy as np
from sklearn.externals import joblib
from tensorflow.contrib import learn
from keras.models import load_model
from malware_classification_by_dynamic_feature import get_api_sequence
def match(file_path,Bytes=1024):
'''
求文件md5值
:param file_path:
:param Bytes:
:return:
'''
md5_1 = hashlib.md5()
with open(file_path,'rb') as f:
while 1:
data =f.read(Bytes)
if data:
md5_1.update(data)
else:
break
ret = md5_1.hexdigest()
return ret
def get_example_filenames_from_dir(data_dir, max_length, is_line_as_word=False):
examples = []
if not os.path.isdir(data_dir):
return examples
for fname in os.listdir(data_dir):
if fname=="APIs.txt":
full_path = os.path.join(data_dir, fname)
f = open(full_path, "r")
data = f.read()
line_num = len(data.split("\n"))
new_lines = []
for line in data.split("\n"):
if not line.startswith("#"):
new_lines.append(line)
data = "\n".join(new_lines)
if line_num < 5:
continue
if not is_line_as_word:
examples.append(data.strip())
else:
lines = data.split("\n")
# replace each line as md5
words = [hashlib.md5(line.encode("utf-8")).hexdigest() for line in lines]
examples.append(" ".join(words[:max_length]))
f.close()
return examples
def get_document_length_limit(dir_name):
p = re.compile('sen_len=(\d+)')
m = p.search(dir_name)
if m:
return int(m.group(1))
else:
return None
def getOpcodeSequence(filename):
opcode_seq = []
p = re.compile(r'[\s]{16}([a-z]+)$|[\s]{16}([a-z]+[\s]{2})')
with open(filename,encoding='utf-8') as f:
f=list(f)
for i in range(0,len(f)):
if str(f[i]).strip()=="; Segment type: Pure code":
for j in range(i+1,len(f)):
if re.match(r"; Segment type:*",str(f[j]).strip()) or j==len(f)-1:
i=j
break
m = re.findall(p,str(f[j]))
# print(str(f[j]))
if m:
# print(m)
opc = str(m[0][1]).strip()
opc2 = str(m[0][0]).strip()
if opc!='' and opc != "align" and opc != "db":
opcode_seq.append(opc)
elif opc2!='' and opc2 != "align" and opc2 != "db":
opcode_seq.append(opc2)
# if flag==1:
# break
return opcode_seq
def static_predict(filepath):
'''
根据静态特征进行类别预测
'''
# filepath = "D:\\bat\\adwares\\test\\038cbe00178254638c76635798498f15"
# filename = match(filepath)
# filename = ".\\tmp\\" + filename + ".txt"
command = 'D:\\IDA7.0\\idat.exe -B ' + filepath
os.system(command)
opcode_sequence = getOpcodeSequence(filepath + ".asm")
op3gram = ngram.getOpcodeNgram(opcode_sequence) # 获得ngram组
selected_static_features = ngram.getStaticFeatures()
standard = {}
for feature in selected_static_features: # str
temp = feature.replace('(', '').replace(')', '').replace('\'', '')
feature_tuple = tuple([str(i).strip() for i in temp.split(',')])
if feature_tuple in op3gram:
standard[feature_tuple] = op3gram[feature_tuple]
else:
standard[feature_tuple] = 0
file_selected_static_feature_list = []
file_list = []
for num in standard.values():
file_selected_static_feature_list.append(num)
key_list = {}
# key_list["Name"] = []
# key_list["Malware"] = []
key_list["Magic"] = 0
key_list["MajorLinkerVersion"] = 0
key_list["MinorLinkerVersion"] = 0
key_list["SizeOfCode"] = 0
key_list["SizeOfInitializedData"] = 0
key_list["SizeOfUninitializedData"] = 0
key_list["AddressOfEntryPoint"] = 0
key_list["BaseOfCode"] = 0
key_list["BaseOfData"] = 0
key_list["ImageBase"] = 0
key_list["SectionAlignment"] = 0
key_list["FileAlignment"] = 0
key_list["MajorOSystemVersion"] = 0
key_list["MinorOSystemVersion"] = 0
key_list["MajorImageVersion"] = 0
key_list["MinorImageVersion"] = 0
key_list["MajorSubsystemVersion"] = 0
key_list["MinorSubsystemVersion"] = 0
key_list["Win32Version"] = 0
key_list["SizeOfImage"] = 0
key_list["SizeOfHeaders"] = 0
key_list["CheckSum"] = 0
key_list["Subsystem"] = 0
key_list["DllCharacteristics"] = 0
key_list["SizeOfStackReserve"] = 0
key_list["SizeOfStackCommit"] = 0
key_list["SizeOfHeapReserve"] = 0
key_list["SizeOfHeapCommit"] = 0
key_list["LoaderFlags"] = 0
key_list["NumberOfRvaAndSizes"] = 0
# key_list["Name"].append(os.path.basename(filepath) + ".txt")
# key_list["Malware"].append(1)
# 存储出现的头信息的名称
exit_key_list = []
# exit_key_list.append("Name")
# exit_key_list.append("Malware")
command = 'objdump -x ' + filepath
header_info = os.popen(command).read().strip()
p = r'.*\n'
header_info = re.findall(p, header_info)
for i in range(0, len(header_info)):
if re.match(r'Magic*', str(header_info[i]).strip('\n')):
for j in range(i, i + 30):
if str(header_info[j]).strip('\n') == "":
break
s = str(header_info[j]).strip('\n')
s = s.split("\t")
key = [x.strip() for x in s if x.strip() != ''][0]
value = [x.strip() for x in s if x.strip() != ''][1]
exit_key_list.append(key)
if key == "MajorLinkerVersion" or key == "MinorLinkerVersion" or key == "MajorOSystemVersion" or key == "MinorOSystemVersion" or key == "MajorImageVersion" or key == "MinorImageVersion" or key == "MajorSubsystemVersion" or key == "MinorSubsystemVersion":
key_list[key] = int(value)
else:
key_list[key] = int(value, 16)
break
for num in key_list.values():
file_selected_static_feature_list.append(num)
file_list.append(file_selected_static_feature_list)
rf_model = joblib.load('mix_rf_model.pkl') # 选择模型
static_feature_result = rf_model.predict(file_list)
return static_feature_result[0]
def dynamic_predict(filepath):
command = 'cuckoo submit ' + filepath
cuckoo_command = os.popen(command).read()
cuckoo_num = str(cuckoo_command).split('#')[1].strip('\n')
print(cuckoo_num)
cuckoo_log_path = "C:\\Users\\47892\\.cuckoo\\storage\\analyses\\" + cuckoo_num + "\\reports\\report.json"
while True:
if os.path.exists(cuckoo_log_path):
time.sleep(30)
API_sequence = get_api_sequence.extract_api_sequence_from_one_file(cuckoo_log_path)
break
maxlen = 2000
# deep learning
tokenizer = pickle.load(open('tokenizer.pkl', 'rb'))
x_test_word_ids = tokenizer.texts_to_sequences(API_sequence)
x_test_padded_seqs = pad_sequences(x_test_word_ids, maxlen=maxlen)
model = load_model("model_weight_train_cnn_4.h5")
y_pred = model.predict(x_test_padded_seqs)
meta_train = np.zeros(shape=(len(API_sequence), 7))
meta_train[:] = y_pred
predict_type_list = []
for l in y_pred:
l_tmp = l.tolist()
predict_type = l_tmp.index(max(l_tmp))
predict_type_list.append(predict_type)
return predict_type_list[0]
if __name__ == "__main__":
'''
根据静态特征进行类别预测
'''
filepath=".\\upload\\0a28b556b4b6998265fa9fe649554533"
filename=match(filepath)
filename=".\\tmp\\"+filename + ".txt"
command = 'D:\\IDA7.0\\idat.exe -B ' + filepath
os.system(command)
opcode_sequence=getOpcodeSequence(filepath+".asm")
op3gram = ngram.getOpcodeNgram(opcode_sequence) # 获得ngram组
selected_static_features=ngram.getStaticFeatures()
standard = {}
for feature in selected_static_features:#str
temp = feature.replace('(', '').replace(')', '').replace('\'', '')
feature_tuple = tuple([str(i).strip() for i in temp.split(',')])
if feature_tuple in op3gram:
standard[feature_tuple] = op3gram[feature_tuple]
else:
standard[feature_tuple] = 0
file_selected_static_feature_list=[]
file_list=[]
for num in standard.values():
file_selected_static_feature_list.append(num)
key_list = {}
# key_list["Name"] = []
# key_list["Malware"] = []
key_list["Magic"] = 0
key_list["MajorLinkerVersion"] = 0
key_list["MinorLinkerVersion"] = 0
key_list["SizeOfCode"] = 0
key_list["SizeOfInitializedData"] = 0
key_list["SizeOfUninitializedData"] = 0
key_list["AddressOfEntryPoint"] = 0
key_list["BaseOfCode"] = 0
key_list["BaseOfData"] = 0
key_list["ImageBase"] = 0
key_list["SectionAlignment"] = 0
key_list["FileAlignment"] = 0
key_list["MajorOSystemVersion"] = 0
key_list["MinorOSystemVersion"] = 0
key_list["MajorImageVersion"] = 0
key_list["MinorImageVersion"] = 0
key_list["MajorSubsystemVersion"] = 0
key_list["MinorSubsystemVersion"] = 0
key_list["Win32Version"] = 0
key_list["SizeOfImage"] = 0
key_list["SizeOfHeaders"] = 0
key_list["CheckSum"] = 0
key_list["Subsystem"] = 0
key_list["DllCharacteristics"] = 0
key_list["SizeOfStackReserve"] = 0
key_list["SizeOfStackCommit"] = 0
key_list["SizeOfHeapReserve"] = 0
key_list["SizeOfHeapCommit"] = 0
key_list["LoaderFlags"] = 0
key_list["NumberOfRvaAndSizes"] = 0
# key_list["Name"].append(os.path.basename(filepath) + ".txt")
# key_list["Malware"].append(1)
# 存储出现的头信息的名称
exit_key_list = []
# exit_key_list.append("Name")
# exit_key_list.append("Malware")
command = 'objdump -x ' + filepath
header_info = os.popen(command).read().strip()
p = r'.*\n'
header_info = re.findall(p, header_info)
for i in range(0, len(header_info)):
if re.match(r'Magic*', str(header_info[i]).strip('\n')):
for j in range(i, i + 30):
if str(header_info[j]).strip('\n') == "":
break
s = str(header_info[j]).strip('\n')
s = s.split("\t")
key = [x.strip() for x in s if x.strip() != ''][0]
value = [x.strip() for x in s if x.strip() != ''][1]
exit_key_list.append(key)
if key == "MajorLinkerVersion" or key == "MinorLinkerVersion" or key == "MajorOSystemVersion" or key == "MinorOSystemVersion" or key == "MajorImageVersion" or key == "MinorImageVersion" or key == "MajorSubsystemVersion" or key == "MinorSubsystemVersion":
key_list[key]=int(value)
else:
key_list[key]=int(value, 16)
break
for num in key_list.values():
file_selected_static_feature_list.append(num)
file_list.append(file_selected_static_feature_list)
rf_model = joblib.load('mix_rf_model.pkl')#选择模型
static_feature_result=rf_model.predict(file_list)
print(static_feature_result)#静态预测结果
'''
根据动态特征进行结果预测
'''
# command = 'cuckoo submit ' + filepath
# cuckoo_command = os.popen(command).read()
# cuckoo_num=str(cuckoo_command).split('#')[1].strip('\n')
# print(cuckoo_num)
# cuckoo_log_path="C:\\Users\\47892\\.cuckoo\\storage\\analyses\\"+cuckoo_num+"\\reports\\report.json"
# while True:
# if os.path.exists(cuckoo_log_path):
# time.sleep(30)
# API_sequence=get_api_sequence.extract_api_sequence_from_one_file(cuckoo_log_path)
# break
#
# maxlen=2000
# # deep learning
# tokenizer = pickle.load(open('tokenizer.pkl', 'rb'))
# x_test_word_ids = tokenizer.texts_to_sequences(API_sequence)
# x_test_padded_seqs = pad_sequences(x_test_word_ids, maxlen=maxlen)
# model = load_model("model_weight_train_cnn_4.h5")
# y_pred = model.predict(x_test_padded_seqs)
# meta_train = np.zeros(shape=(len(API_sequence), 7))
# meta_train[:] = y_pred
# predict_type_list = []
# for l in y_pred:
# l_tmp = l.tolist()
# predict_type = l_tmp.index(max(l_tmp))
# predict_type_list.append(predict_type)
# print(predict_type_list[0]) | [
"478922579@qq.com"
] | 478922579@qq.com |
34ea94313a57e664742570da3deff1daa9a356d8 | 652591a1974fb862a71fb43938857d981f4cd9a1 | /Camp1_Day2_assign1.py | 86c7b6b85d6d03fd7b51a63a159ee1acbe94fc50 | [] | no_license | Surote/answer_cisco_black_belt_LV1 | 0911a53ec824ff26365c63c31fec9cd86f4c037e | 30ec5a7f135502992934d97ead7aa9c31cb94680 | refs/heads/master | 2020-03-19T03:12:11.634464 | 2018-06-01T10:43:30 | 2018-06-01T10:43:30 | 135,704,732 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 296 | py | # Created: Surote Wongpaiboon
# Email : surote.wongpaiboon@dimensiondata.com
# Tel : +66-2625-0999
Sessions_Attended = {'sessions' : '1011,2344,3222,44322,555,6332,721,8789,99,1011,1124,1245,137,1499'}
print('I have attended '+str(len(Sessions_Attended['sessions'].split(',')))+' sessions!!') | [
"surote.z9@gmail.com"
] | surote.z9@gmail.com |
8eb20a63cf9ae7debe25c9b008d788862e5ee7da | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/78/usersdata/171/41777/submittedfiles/divisores.py | 701949f8b9cb8bf36079078eda939d27b7fe7166 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 191 | py | # -*- coding: utf-8 -*-
import math
n=int(input('digite n:'))
a=int(input('digite a:'))
b=int(input('digite b:'))
d=a
e=b
f=a*b
for i in range(1,n+1,1):
d=a
e=b
f=a*b
print(f) | [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
81959905ddb52d2dffeabfdfb8721b9077f22d67 | de57f4fdeaaf59b361e19fcd42f068d848a759f0 | /DRL/utils.py | 97ba972c20e01d6ef107bb08d7180f6c27cb7905 | [] | no_license | Yijun-Mao/GraphAM | c9c40e4fe7f18d76474619794132ed56bd52c1db | c61354f24313088eab84fe902db08b45873ed8ba | refs/heads/master | 2022-10-31T02:57:23.421384 | 2020-06-10T07:41:48 | 2020-06-10T07:41:48 | 253,416,405 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,909 | py | import torch
import torch.nn as nn
import os
from DRL.envs_manager import VecNormalize
# Get a render function
def get_render_func(venv):
if hasattr(venv, 'envs'):
return venv.envs[0].render
elif hasattr(venv, 'venv'):
return get_render_func(venv.venv)
elif hasattr(venv, 'env'):
return get_render_func(venv.env)
return None
def get_vec_normalize(venv):
if isinstance(venv, VecNormalize):
return venv
elif hasattr(venv, 'venv'):
return get_vec_normalize(venv.venv)
return None
# Necessary for my KFAC implementation.
class AddBias(nn.Module):
def __init__(self, bias):
super(AddBias, self).__init__()
self._bias = nn.Parameter(bias.unsqueeze(1))
def forward(self, x):
if x.dim() == 2:
bias = self._bias.t().view(1, -1)
else:
bias = self._bias.t().view(1, -1, 1, 1)
return x + bias
def init(module, weight_init, bias_init, gain=1):
weight_init(module.weight.data, gain=gain)
bias_init(module.bias.data)
return module
# https://github.com/openai/baselines/blob/master/baselines/common/tf_util.py#L87
def init_normc_(weight, gain=1):
weight.normal_(0, 1)
weight *= gain / torch.sqrt(weight.pow(2).sum(1, keepdim=True))
def save_modules(optimizer, model, args, config, save_path):
print('Storing model and optimizer to: {}'.format(save_path))
torch.save({'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict(),
'args': args,
'config': dict(config._asdict()) # Save as a Python dictionary
}, save_path)
def load_modules(optimizer, model, checkpoint):
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
return checkpoint['args']
| [
"yijunmao1997@gmail.com"
] | yijunmao1997@gmail.com |
361d8fb2dc27b5906759c3545bfa06d130ffc322 | ea477b2a8b9ac0cc456dc128728c2cf4d9742f39 | /sudoku_solver.py | d38136db4418d356630731ca98228346521c609e | [] | no_license | aymanriz/Sudoku-App | 0e93d92e311125f6b8ea49f5519f11662f9af85b | da4cba105aec6f6fd5dc6acb75bc3a044a050057 | refs/heads/master | 2022-12-22T02:54:12.717149 | 2020-09-28T12:32:29 | 2020-09-28T12:32:29 | 299,298,058 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,879 | py | import random
import pygame
import time
from Sudoku_app.Button import Button
from Sudoku_app.CheckBox import CheckBox
from Sudoku_app.InputBox import InputBox
pygame.init()
WIDTH = 450
HEIGHT=550
WIN = pygame.display.set_mode((WIDTH+100, HEIGHT))
pygame.display.set_caption("Sudoku App")
RED = (255, 0, 0)
GREEN = (0, 255, 0)
BLUE = (0, 0, 255)
YELLOW = (255, 255, 0)
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
PURPLE = (128, 0, 128)
ORANGE = (255, 165 ,0)
GREY = (128, 128, 128)
TURQUOISE = (64, 224, 208)
pygame_nums={pygame.K_0:0,pygame.K_DELETE:0,pygame.K_1:1,pygame.K_2:2,pygame.K_3:3,pygame.K_4:4,pygame.K_5:5,pygame.K_6:6,pygame.K_7:7,pygame.K_8:8,pygame.K_9:9}
subgrid_dict={0:0,1:0,2:0,3:1,4:1,5:1,6:2,7:2,8:2}
dict={0:[0,1,2],1:[3,4,5],2:[6,7,8]}
####generating a unique random sudoku board####
def fill(grid):
cell=find_min_cell(grid)
if not cell:
return True
x,y=cell
poss=grid[x][y].get_possibilities(grid)
random.shuffle(poss)
for p in poss:
grid[x][y].val=p
if fill(grid):
return True
grid[x][y].val = 0
return False
cnt=0
def create_sudoku(lim):
global cnt
grid=make_empty_grid()
fill(grid)
pos = [(x, y) for x in range(9) for y in range(9)]
random.shuffle(pos)
empty=0
for (x,y) in pos:
if empty==lim:
break
tmp=grid[x][y].val
grid[x][y].val=0
temp_grid=deepcopy(grid)
cnt=0
if not_unique(temp_grid):
grid[x][y].val=tmp
else:
empty+=1
for row in grid:
for cell in row:
cell.can_be_changed=True if cell.val==0 else False
return grid
def not_unique(grid):
global cnt
cell = find_min_cell(grid)
if not cell:
cnt+=1
if cnt>=2:
return True
return False
x, y = cell
poss = grid[x][y].get_possibilities(grid)
random.shuffle(poss)
for p in poss:
grid[x][y].val = p
if not_unique(grid):
return True
grid[x][y].val = 0
return False
def deepcopy(grid):
res=[]
for i in range(len(grid)):
res.append([])
for j in range(len(grid)):
res[i].append(Cell(i,j,grid[i][j].val,50))
return res
####end########
class Cell:
def __init__(self,x,y,val,width):
self.font=pygame.font.SysFont("Arial",18)
self.x=x
self.y=y
self.val=val
self.box_rows=dict[subgrid_dict[x]]
self.box_cols=dict[subgrid_dict[y]]
self.width=width
self.color=WHITE
self.can_be_changed=True if val==0 else False
self.selected = False
self.changed=False
def get_possibilities(self,grid):
res=[x for x in range(0,10)]
taken=self.get_vals_in_row(grid)
taken.update(self.get_vals_in_box(grid))
taken.update(self.get_vals_in_col(grid))
for x in taken:
if x in res:
res.remove(x)
if 0 in res:
res.remove(0)
return res
def get_vals_in_row(self,grid):
res={x.val for x in grid[self.x]}
return res
def get_vals_in_col(self,grid):
res=set()
for i in range(len(grid)):
res.add(grid[i][self.y].val)
return res
def get_vals_in_box(self,grid):
res=set()
for i in self.box_rows:
for j in self.box_cols:
res.add(grid[i][j].val)
return res
def make_green(self):
self.color=GREEN
def make_red(self):
self.color=RED
def make_white(self):
self.color=WHITE
def select(self):
self.selected=True
self.color=TURQUOISE
def unselect(self):
self.selected=False
self.color=WHITE
def draw(self,win):
rect=pygame.Rect(self.y*self.width,self.x*self.width+40,self.width,self.width)
pygame.draw.rect(win,self.color,rect,5)
msg=str(self.val) if self.val!=0 else ""
win.blit(self.font.render(msg, True, BLACK),(self.y*self.width+self.width//2,self.x*self.width+self.width//2+40))
def update_selected(grid,inputBox):
gap=WIDTH//9
mouse=pygame.mouse.get_pos()
click=pygame.mouse.get_pressed()
if click[0]==1:
if 0<=mouse[0]<=WIDTH and 40<=mouse[1]<WIDTH+40:
x,y=((mouse[1]-40)//gap,mouse[0]//gap)
for i in range(len(grid)):
for j in range(len(grid)):
if (i,j)==(x,y):
grid[i][j].select()
else:
grid[i][j].unselect()
inputBox.unselect()
else:
for row in grid:
for cell in row:
cell.unselect()
if inputBox.is_clicked():
inputBox.select()
else:
inputBox.unselect()
def get_selected(grid):
for row in grid:
for cell in row:
if cell.selected:
return cell
return None
def find_cell(grid):
for i in range(len(grid)):
for j in range(len(grid[i])):
if grid[i][j].val==0:
return i,j
return False
def find_min_cell(grid):
min=10
x,y=0,0
for i in range(len(grid)):
for j in range(len(grid[i])):
if grid[i][j].val==0:
if len(grid[i][j].get_possibilities(grid))<min:
min=len(grid[i][j].get_possibilities(grid))
x,y=i,j
if min==10:
return False
else:
return x,y
def backtracking_soduko(draw,grid,min,steps):
if min:
cell=find_min_cell(grid)
else:
cell=find_cell(grid)
if not cell:
return True
else:
x,y=cell
poss = grid[x][y].get_possibilities(grid)
for p in poss:
grid[x][y].val=p
if steps:
grid[x][y].make_green()
draw()
time.sleep(0.2)
if backtracking_soduko(draw,grid,min,steps):
return True
if steps:
grid[x][y].make_red()
draw()
time.sleep(0.2)
grid[x][y].val=0
if steps:
grid[x][y].make_red()
draw()
time.sleep(0.2)
return False
def make_empty_grid():
grid=[]
gap=WIDTH//9
for i in range(9):
grid.append([])
for j in range(9):
grid[i].append(Cell(i,j,0,gap))
return grid
def draw_grid_lines(win,grid,width):
gap=width//9
for i in range(10):
if i%3==0:
w=3
else:
w=1
pygame.draw.line(win,BLACK, (0,i*gap+40), (width,i*gap+40),w)#HORIZONTAL
pygame.draw.line(win,BLACK,(i*gap,40),(i*gap,width+40),w)#VERTICAL
def draw(win,grid,width,buttons):
win.fill(WHITE)
font = pygame.font.Font("freesansbold.ttf", 16)
welcome_msg = font.render("Insert a valid sudoku board,or press generate to generate a new board! ", True, BLUE, WHITE)
number_of_empty_cells=font.render("Number of empty cells :", True, BLUE, WHITE)
stepsText=font.render("Show Steps",True,BLUE,WHITE)
MVR_heuristic=font.render("Use MVR",True,BLUE,WHITE)
MVR_heuristic2=font.render("Heuristic",True,BLUE,WHITE)
win.blit(welcome_msg, (0, 10))
win.blit(number_of_empty_cells,(0,515))
win.blit(stepsText,(454,176))
win.blit(MVR_heuristic,(462,225))
win.blit(MVR_heuristic2,(462,240))
for button in buttons:
button.draw(win)
for row in grid:
for cell in row:
cell.draw(win)
draw_grid_lines(win,grid,width)
pygame.display.update()
def main():
grid=make_empty_grid()
user_defined_grid=True
generateBTN = Button(244, 515, "Generate")
clearBTN = Button(344, 515, "Clear Board")
inputBox=InputBox(190,503,40,0)
solveBTN=Button(468,298,"Solve",24)
StepsBox=CheckBox(494,195,10)
MVRbox=CheckBox(494,260,10)
buttons=[generateBTN,clearBTN,inputBox,solveBTN,StepsBox,MVRbox]
run=True
while run:
update_selected(grid,inputBox)
cell=get_selected(grid)
events=pygame.event.get()
for event in events:
if event.type == pygame.QUIT:
run=False
if event.type==pygame.KEYDOWN:
if event.key in pygame_nums:
if cell:
if cell.can_be_changed:
if pygame_nums[event.key] in cell.get_possibilities(grid) or pygame_nums[event.key]==0:#check if its a valid input
cell.val=pygame_nums[event.key]
cell.changed=True
else:#little graphics incase input is not valid
cell.val=pygame_nums[event.key]
draw(WIN,grid,WIDTH,buttons)
time.sleep(0.1)
cell.val=0
cell.make_red()
cell.draw(WIN)
pygame.display.update()
time.sleep(0.1)
cell.select()
if inputBox.selected:
if event.key!=pygame.K_DELETE:
inputBox.val=inputBox.val*10+pygame_nums[event.key]
else:
inputBox.val=0
if clearBTN.is_clicked():
grid=make_empty_grid()
user_defined_grid=True
if generateBTN.is_clicked():
if inputBox.val>81:
inputBox.val=81
grid=create_sudoku(inputBox.val)
user_defined_grid=False
if StepsBox.is_clicked():
StepsBox.click()
time.sleep(0.1)#to prevent hyperclick
if MVRbox.is_clicked():
MVRbox.click()
time.sleep(0.1)#to prevent hyperclick
if solveBTN.is_clicked():
if not user_defined_grid:
for row in grid:
for cell in row:
if cell.changed:
cell.val = 0
cell.changed = False
backtracking_soduko(lambda: draw(WIN, grid, WIDTH, buttons), grid,MVRbox.clicked,StepsBox.clicked)
draw(WIN,grid,WIDTH,buttons)
pygame.quit()
quit()
if __name__=="__main__":
main()
| [
"noreply@github.com"
] | noreply@github.com |
fd9166714314627d931b92e8df033ea9d4f2ffd2 | 54a5f5ec2c5edf924b7dc7730ee7cb2a38ac4a39 | /DataFrame_manipulation_pandas/E01_Positional_and_labeled_indexing.py | aa09f4a045dbe19bc6a45b84a5dfebd5c0c513b2 | [] | no_license | dajofischer/Datacamp | fac413ec178375cedceababaf84f6b47a61fc821 | a03d16b8f342412f1ee077f2f196ee8404e2e21c | refs/heads/master | 2020-04-05T08:38:25.361746 | 2019-03-27T20:55:57 | 2019-03-27T20:55:57 | 156,722,561 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 247 | py | # Assign the row position of election.loc['Bedford']: x
x = 4
# Assign the column position of election['winner']: y
y = 4
# Print the boolean equivalence
print(election.iloc[x, y] == election.loc['Bedford', 'winner'])
#nonsense text
#nonsenes2
| [
"dajofischer@gmail.com"
] | dajofischer@gmail.com |
17576e9cb6b7f73dd062b3ad563bfcb3ede7dd61 | 5de83af3e6740f959b2aa68ce8c1db228cf88c82 | /main.py | e7640f91aa587719888b6856d69b7a2ebda0393b | [] | no_license | Santoshge-Reddy/scraping-echoideas | d061dc5edd3420b7cdd4b38c71ead164e6b06bf5 | f7c5f24d66ab4e52aeb41daeadcd36b4a0eaba57 | refs/heads/master | 2022-12-18T19:48:23.003316 | 2020-09-26T17:22:19 | 2020-09-26T17:22:19 | 298,862,934 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,754 | py | from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.select import Select
from logging.handlers import TimedRotatingFileHandler
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import NoSuchElementException , ElementClickInterceptedException, ElementNotInteractableException, TimeoutException
from bs4 import BeautifulSoup
import os
import time
import pandas as pd
from openpyxl import Workbook
from openpyxl.utils import get_column_letter
from openpyxl import load_workbook
class ecoideaz():
"""docstring for ecoideaz"""
def __init__(self):
super(ecoideaz, self).__init__()
# self.arg = arg
# Art & Crafts
# self.url = 'https://www.ecoideaz.com/green-directory/business-category/alternative-energy-products-in-india/?w2dc_action=search&hash=e466dbfc00ec2e1ee7c3f50402ef55d7&controller=listings_controller&include_categories_children=1&categories=132&categories_path=Art+%26+Crafts+&what_search=&location_id=0&location_id_path=&address='
# Alternative Energy
# self.url = 'https://www.ecoideaz.com/green-directory/business-category/alternative-energy-products-in-india/?w2dc_action=search&hash=e466dbfc00ec2e1ee7c3f50402ef55d7&controller=listings_controller&include_categories_children=1&categories=131&categories_path=Alternative+Energy&what_search=&location_id=0&location_id_path=&address='
# Organic Food & Drinks
# self.url = 'https://www.ecoideaz.com/green-directory/business-category/alternative-energy-products-in-india/?w2dc_action=search&hash=e466dbfc00ec2e1ee7c3f50402ef55d7&controller=listings_controller&include_categories_children=1&categories=165&categories_path=Organic+Food+%26+Drinks+&what_search=&location_id=129&location_id_path=India&address='
# green funding
# self.url = 'https://www.ecoideaz.com/green-directory/business-category/alternative-energy-products-in-india/?w2dc_action=search&hash=e466dbfc00ec2e1ee7c3f50402ef55d7&controller=listings_controller&include_categories_children=1&categories=139&categories_path=Green+Funding+&what_search=&location_id=0&location_id_path=&address='
# Beauty & Personal Care
# self.url = 'https://www.ecoideaz.com/green-directory/business-category/alternative-energy-products-in-india/?w2dc_action=search&hash=e466dbfc00ec2e1ee7c3f50402ef55d7&controller=listings_controller&include_categories_children=1&categories=188&categories_path=Beauty+%26+Personal+Care+&what_search=&location_id=0&location_id_path=&address='
# E-vehicles
# self.url = 'https://www.ecoideaz.com/green-directory/business-category/alternative-energy-products-in-india/?w2dc_action=search&hash=e466dbfc00ec2e1ee7c3f50402ef55d7&controller=listings_controller&include_categories_children=1&categories=243&categories_path=E-vehicles+&what_search=&location_id=0&location_id_path=&address='
# Eco Fashion
# self.url = 'https://www.ecoideaz.com/green-directory/business-category/alternative-energy-products-in-india/?w2dc_action=search&hash=e466dbfc00ec2e1ee7c3f50402ef55d7&controller=listings_controller&include_categories_children=1&categories=134&categories_path=Eco+Fashion+&what_search=&location_id=0&location_id_path=&address='
# Eco-friendly Packaging
# self.url = 'https://www.ecoideaz.com/green-directory/business-category/alternative-energy-products-in-india/?w2dc_action=search&hash=e466dbfc00ec2e1ee7c3f50402ef55d7&controller=listings_controller&include_categories_children=1&categories=194&categories_path=Eco-friendly+Packaging+&what_search=&location_id=0&location_id_path=&address='
# Eco-tourism
# self.url = 'https://www.ecoideaz.com/green-directory/business-category/alternative-energy-products-in-india/?w2dc_action=search&hash=e466dbfc00ec2e1ee7c3f50402ef55d7&controller=listings_controller&include_categories_children=1&categories=141&categories_path=Eco-tourism+&what_search=&location_id=0&location_id_path=&address='
# Green Architecture
# self.url = 'https://www.ecoideaz.com/green-directory/business-category/alternative-energy-products-in-india/?w2dc_action=search&hash=e466dbfc00ec2e1ee7c3f50402ef55d7&controller=listings_controller&include_categories_children=1&categories=133&categories_path=Green+Architecture+&what_search=&location_id=0&location_id_path=&address='
# Green Gadgets
# self.url = 'https://www.ecoideaz.com/green-directory/business-category/alternative-energy-products-in-india/?w2dc_action=search&hash=e466dbfc00ec2e1ee7c3f50402ef55d7&controller=listings_controller&include_categories_children=1&categories=238&categories_path=Green+Gadgets+&what_search=&location_id=0&location_id_path=&address='
# Green Innovations
# self.url = 'https://www.ecoideaz.com/green-directory/business-category/alternative-energy-products-in-india/?w2dc_action=search&hash=e466dbfc00ec2e1ee7c3f50402ef55d7&controller=listings_controller&include_categories_children=1&categories=138&categories_path=Green+Innovations+&what_search=&location_id=0&location_id_path=&address='
# Green Media
# self.url = 'https://www.ecoideaz.com/green-directory/business-category/alternative-energy-products-in-india/?w2dc_action=search&hash=e466dbfc00ec2e1ee7c3f50402ef55d7&controller=listings_controller&include_categories_children=1&categories=137&categories_path=Green+Media+&what_search=&location_id=0&location_id_path=&address='
# Green Products
# self.url = 'https://www.ecoideaz.com/green-directory/business-category/alternative-energy-products-in-india/?w2dc_action=search&hash=e466dbfc00ec2e1ee7c3f50402ef55d7&controller=listings_controller&include_categories_children=1&categories=148&categories_path=Green+Products+&what_search=&location_id=0&location_id_path=&address='
# Herbal medicine
# self.url = 'https://www.ecoideaz.com/green-directory/business-category/alternative-energy-products-in-india/?w2dc_action=search&hash=e466dbfc00ec2e1ee7c3f50402ef55d7&controller=listings_controller&include_categories_children=1&categories=240&categories_path=Herbal+medicine+&what_search=&location_id=0&location_id_path=&address='
# Organic Agriculture
# self.url = 'https://www.ecoideaz.com/green-directory/business-category/alternative-energy-products-in-india/?w2dc_action=search&hash=e466dbfc00ec2e1ee7c3f50402ef55d7&controller=listings_controller&include_categories_children=1&categories=143&categories_path=Organic+Agriculture+&what_search=&location_id=0&location_id_path=&address='
# Rural Development
# self.url = 'https://www.ecoideaz.com/green-directory/business-category/alternative-energy-products-in-india/?w2dc_action=search&hash=e466dbfc00ec2e1ee7c3f50402ef55d7&controller=listings_controller&include_categories_children=1&categories=135&categories_path=Rural+Development+&what_search=&location_id=0&location_id_path=&address='
# Training & Education
# self.url = 'https://www.ecoideaz.com/green-directory/business-category/alternative-energy-products-in-india/?w2dc_action=search&hash=e466dbfc00ec2e1ee7c3f50402ef55d7&controller=listings_controller&include_categories_children=1&categories=155&categories_path=Training+%26+Education+&what_search=&location_id=0&location_id_path=&address='
# Waste Management
# self.url = 'https://www.ecoideaz.com/green-directory/business-category/alternative-energy-products-in-india/?w2dc_action=search&hash=e466dbfc00ec2e1ee7c3f50402ef55d7&controller=listings_controller&include_categories_children=1&categories=136&categories_path=Waste+Management+&what_search=&location_id=0&location_id_path=&address='
# Water Conservation
self.url = 'https://www.ecoideaz.com/green-directory/business-category/alternative-energy-products-in-india/?w2dc_action=search&hash=e466dbfc00ec2e1ee7c3f50402ef55d7&controller=listings_controller&include_categories_children=1&categories=157&categories_path=Water+Conservation+&what_search=&location_id=0&location_id_path=&address='
self.information = {}
self.scrap()
def scrap(self):
options = webdriver.ChromeOptions()
options.add_argument("start-maximized")
options.add_experimental_option("excludeSwitches", ["enable-automation"])
options.add_experimental_option('useAutomationExtension', False)
options.add_argument("--disable-notifications")
# config = configparser.ConfigParser()
prefs = {"plugins.always_open_pdf_externally": True}
options.add_experimental_option("prefs",prefs)
options.add_experimental_option("excludeSwitches",["ignore-certificate-errors"])
# options.add_argument('--disable-gpu')
# options.add_argument('--headless')
dirpath = os.getcwd()
foldername = os.path.dirname(os.path.realpath(__file__))
driver = webdriver.Chrome(options=options, executable_path= foldername + '/chromedriver.exe')
driver.get(self.url)
self.open(driver)
def open(self, driver):
# driver.get(self.url)
# print(driver.find_element_by_xpath('//*[@id="w2dc-controller-e466dbfc00ec2e1ee7c3f50402ef55d7"]/div/button'))
nextButton = []
try:
nextButton = WebDriverWait(driver, 10).until(EC.presence_of_all_elements_located((By.XPATH, '//*[@id="w2dc-controller-e466dbfc00ec2e1ee7c3f50402ef55d7"]/div/button')))
# nextButton[0].click()
except TimeoutException:
self.collectData(driver)
# print(self.information)
if len(nextButton) > 0:
try:
nextButton[0].click()
self.open(driver)
except ElementClickInterceptedException:
# time.sleep(30)
# driver.implicitly_wait(30)
# if driver.find_element_by_xpath('//*[@id="custom_field_submit_102973"]'):
# driver.switch_to_alert()
try:
driver.find_element_by_xpath('/html/body/div/a').click()
except Exception as e:
print(e)
# print('waited 30')
# prevButton = WebDriverWait(driver, 30).until(EC.presence_of_all_elements_located((By.XPATH, "/html/body/div/a")))
# prevButton[0].click()
# print('==========================')
# print('clicked')
# print('==========================')
self.open(driver)
except ElementNotInteractableException:
self.collectData(driver)
except TimeoutException:
pass
def collectData(self,driver):
articles = WebDriverWait(driver, 10).until(EC.presence_of_all_elements_located((By.XPATH, '//article')))
for article in articles:
soup = BeautifulSoup(article.get_attribute('innerHTML'), 'lxml')
title = soup.select_one('header a').text
category = soup.select_one('.w2dc-label-primary a').text
href = soup.select_one('header a')['href']
self.information[title] = {
'title' : title,
'category': category,
'link': href
}
for info in self.information:
# print(self.information[info])
url = self.information[info]['link']
driver.get(url)
articles = WebDriverWait(driver, 10).until(EC.presence_of_all_elements_located((By.XPATH, '//article')))
soup = BeautifulSoup(articles[0].get_attribute('innerHTML'), 'lxml')
# description = soup.select_one('.w2dc-field-content.w2dc-field-description').text
# website = soup.select_one('.w2dc-field-output-block-website a').text
# address = soup.select_one('.w2dc-location .w2dc-show-on-map').text
# email = soup.select_one('.w2dc-field-output-block.w2dc-field-output-block-email .w2dc-field-content a').text
try:
self.information[info]['description'] = BeautifulSoup(soup.select_one('.w2dc-field-content.w2dc-field-description').text, "lxml").text
except Exception as e:
pass
try:
self.information[info]['email'] = soup.select_one('.w2dc-field-output-block.w2dc-field-output-block-email .w2dc-field-content a').text
except Exception as e:
pass
try:
self.information[info]['address'] = BeautifulSoup(soup.select_one('.w2dc-location .w2dc-show-on-map').text, "lxml").text
except Exception as e:
pass
try:
self.information[info]['website'] = soup.select_one('.w2dc-field-output-block-website a').text
except Exception as e:
pass
sheet_name = self.information[info]['category']
# print('to excel')
df_information = pd.DataFrame(self.information)
result = df_information.transpose()
# result.to_excel("output.xlsx")
path = r"output.xlsx"
# writer = pd.ExcelWriter(path, engine='xlsxwriter')
writer = pd.ExcelWriter(path, engine='openpyxl')
book = load_workbook(path)
writer.book = book
# if os.path.exists(path):
# book = openpyxl.load_workbook(file_name)
# writer.book = book
result.to_excel(writer, sheet_name = sheet_name)
writer.save()
writer.close()
if __name__ == '__main__':
ecoideaz()
# print(result) | [
"g.santosh.sunny@gmail.com"
] | g.santosh.sunny@gmail.com |
9954328c0d050bb4d64a911f7461a367bf36a59f | 8c50265b43add0e91e30245cc7af3c2558c248f5 | /tests/python/gpu/test_tvm_bridge.py | 5c87536bdbaea32571012424a4db77dab00c19ed | [
"BSD-3-Clause",
"BSD-2-Clause-Views",
"Zlib",
"Apache-2.0",
"BSD-2-Clause",
"Intel"
] | permissive | awslabs/dynamic-training-with-apache-mxnet-on-aws | 6a67f35d7e4b12fa8bba628bd03b2b031924e211 | 1063a979417fee8c820af73860eebd2a4f670380 | refs/heads/master | 2023-08-15T11:22:36.922245 | 2022-07-06T22:44:39 | 2022-07-06T22:44:39 | 157,440,687 | 60 | 19 | Apache-2.0 | 2022-11-25T22:23:19 | 2018-11-13T20:17:09 | Python | UTF-8 | Python | false | false | 2,440 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test TVM bridge, only enable this when TVM is available"""
import logging
import mxnet as mx
import numpy as np
import unittest
def test_tvm_bridge():
# only enable test if TVM is available
try:
import tvm
import tvm.contrib.mxnet
import topi
except ImportError:
logging.warn("TVM bridge test skipped because TVM is missing...")
return
def check(target, dtype):
shape = (20,)
scale = tvm.var("scale", dtype="float32")
x = tvm.placeholder(shape, dtype=dtype)
y = tvm.placeholder(shape, dtype=dtype)
z = tvm.compute(shape, lambda i: x[i] + y[i])
zz = tvm.compute(shape, lambda *i: z(*i) * scale.astype(dtype))
ctx = mx.gpu(0) if target == "cuda" else mx.cpu(0)
target = tvm.target.create(target)
# build the function
with target:
s = topi.generic.schedule_injective(zz)
f = tvm.build(s, [x, y, zz, scale])
# get a mxnet version
mxf = tvm.contrib.mxnet.to_mxnet_func(f, const_loc=[0, 1])
xx = mx.nd.uniform(shape=shape, ctx=ctx).astype(dtype)
yy = mx.nd.uniform(shape=shape, ctx=ctx).astype(dtype)
zz = mx.nd.empty(shape=shape, ctx=ctx).astype(dtype)
# invoke myf: this runs in mxnet engine
mxf(xx, yy, zz, 10.0)
np.testing.assert_allclose(
zz.asnumpy(), (xx.asnumpy() + yy.asnumpy()) * 10)
for tgt in ["llvm", "cuda"]:
for dtype in ["int8", "uint8", "int64",
"float32", "float64"]:
check(tgt, dtype)
if __name__ == "__main__":
import nose
nose.runmodule()
| [
"vikumar@88e9fe53272d.ant.amazon.com"
] | vikumar@88e9fe53272d.ant.amazon.com |
bf67baf77b4244a60357537840bd859b20e6bc0d | 44214313c398eb7a4e4f34562b3dc3aa1338c823 | /linked_list/impl_hashmap.py | d779bd9cee9073f1444de2b64e5418487f49a2f8 | [] | no_license | arnabs542/python_leetcode | fa76ade7c8e9368714f297c18e1afae257fbd166 | 5f8a28f7b9ec7f339e881b9b3c5977fcba02d63e | refs/heads/master | 2023-02-12T19:15:31.585165 | 2021-01-06T03:40:26 | 2021-01-06T03:40:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,573 | py | import unittest
from typing import Optional, List
class ListNode:
def __init__(self, key: int, value: int):
self.key = key
self.value = value
self.next: Optional[ListNode] = None
# 看过Rust源码就知道,HashSet其实就是value=()的HashMap
class MyHashMap:
def __init__(self):
"""
Initialize your data structure here.
"""
# HashMap内部元素个数
self.elements_count: int = 0
# HashMap内部bucket数组的长度
self.capacity: int = 16384
# HashMap内部的数组, 用dummyHead的好处是Python没有显示指出引用修改,还是固定bucket数组,只修改数组各元素的next指针更好,不会出现UB
# 缺点是初始化好慢啊,容易超时
self.bucket: List[ListNode] = [ListNode(key=-1, value=0)] * self.capacity
def put(self, key: int, value: int) -> None:
if self.elements_count * 5 > self.capacity:
self._rehashing(self.capacity * 2)
index = key % self.capacity
curr_node = self.bucket[index]
while curr_node is not None:
if curr_node.key == key:
# If the key already exists in the HashMap, update the value
curr_node.value = value
return
curr_node = curr_node.next
new_node = ListNode(key, value)
new_node.next = self.bucket[index].next
self.bucket[index].next = new_node
self.elements_count += 1
def get(self, key: int) -> int:
curr_node = self.bucket[key % self.capacity]
while curr_node is not None:
if curr_node.key == key:
return curr_node.value
curr_node = curr_node.next
return -1
def remove(self, key: int) -> None:
bucket_index = key % self.capacity
curr_node = self.bucket[bucket_index]
last_node = curr_node
while curr_node is not None:
if curr_node.key == key:
# Remove
last_node.next = curr_node.next
# del curr_node
self.elements_count -= 1
return
last_node = curr_node
curr_node = curr_node.next
def contains(self, key: int) -> bool:
curr_node = self.bucket[key % self.capacity]
while curr_node is not None:
if curr_node.key == key:
return True
curr_node = curr_node.next
return False
def _rehashing(self, new_capacity: int):
new_bucket: List[ListNode] = [ListNode(key=-1, value=0)] * self.capacity * new_capacity
for node in self.bucket:
curr_node = node
while curr_node is not None:
curr_new_bucket_node = new_bucket[curr_node.key % new_capacity]
new_bucket_node = ListNode(curr_node.key, curr_node.value)
new_bucket_node.next = curr_new_bucket_node.next
curr_new_bucket_node.next = new_bucket_node
curr_node = curr_node.next
self.bucket = new_bucket
self.capacity = new_capacity
class Testing(unittest.TestCase):
def test_remove(self):
m = MyHashMap()
m.put(1, 1)
m.put(129, 129)
m.put(257, 257)
# 257->129->1
m.remove(129)
# 257->1
self.assertEqual(257, m.get(257))
self.assertEqual(-1, m.get(129))
self.assertEqual(1, m.get(1))
m.remove(1)
self.assertEqual(257, m.get(257))
self.assertEqual(-1, m.get(129))
self.assertEqual(-1, m.get(1))
m.remove(257)
self.assertEqual(-1, m.get(257))
self.assertEqual(-1, m.get(129))
self.assertEqual(-1, m.get(1))
m.put(129, 129)
m.put(1, 1)
self.assertEqual(-1, m.get(257))
self.assertEqual(129, m.get(129))
self.assertEqual(1, m.get(1))
m.remove(257)
self.assertEqual(-1, m.get(257))
self.assertEqual(129, m.get(129))
self.assertEqual(1, m.get(1))
m.remove(129)
self.assertEqual(-1, m.get(257))
self.assertEqual(-1, m.get(129))
self.assertEqual(1, m.get(1))
def test_my_hash_map(self):
my_map = MyHashMap()
my_map.put(1, 1)
my_map.put(2, 2)
self.assertEqual(1, my_map.get(1))
self.assertEqual(-1, my_map.get(3))
my_map.put(2, 1)
self.assertEqual(1, my_map.get(2))
my_map.remove(2)
self.assertEqual(-1, my_map.get(2))
my_map.put(16, 1)
| [
"pylint@yandex.com"
] | pylint@yandex.com |
46a5fedeb3b54d0277f030f2c981213929b66a7d | 0f05d92b4beae4876e1444aea2ae161df2a38698 | /transformer-function.py | 1924ed29a6668c567b20fbbf0e187f9815a86fc8 | [] | no_license | toty1992/test-remote-python | b51ac9d083bcd091014e72bb1ca27d41c33c5533 | 79d3fbe5bf0fccef07d605a53cf7e6bd1318ef9e | refs/heads/master | 2023-02-11T02:24:07.782717 | 2020-12-23T12:29:46 | 2020-12-23T12:29:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,178 | py | import json
payload='{"assetId":10}'
body='{"name":"Prueba"}'
def json_load_byteified(file_handle):
return _byteify(
json.load(file_handle, object_hook=_byteify),
ignore_dicts=True
)
def json_loads_byteified(json_text):
return _byteify(
json.loads(json_text, object_hook=_byteify),
ignore_dicts=True
)
def _byteify(data, ignore_dicts = False):
# if this is a unicode string, return its string representation
if isinstance(data, unicode):
return data.encode('utf-8')
# if this is a list of values, return list of byteified values
if isinstance(data, list):
return [ _byteify(item, ignore_dicts=True) for item in data ]
# if this is a dictionary, return dictionary of byteified keys and values
# but only if we haven't already byteified it
if isinstance(data, dict) and not ignore_dicts:
return {
_byteify(key, ignore_dicts=True): _byteify(value, ignore_dicts=True)
for key, value in data.iteritems()
}
# if it's anything else, return it in its original form
return data
origin=json_loads_byteified(payload)
extension=json_loads_byteified(body)
def function(orig,ext):
orig.update(ext)
return orig
returnvalue=function(origin, extension) | [
"gustavo@nalabs.io"
] | gustavo@nalabs.io |
348e8867c5eac121bb77e878052e88b79e701348 | 4edba43e7b6bd979e5389b9b7f01abc6f3fba247 | /survival analysis.py | e09b422cee84e96dcab3231198eb52ca55fe3f48 | [] | no_license | inbaa/Big_data | 4eca7d221b8c8f4f2b37aba331134fc6d8c724fe | fdbbf910c77d86b7b08b2be7a913295b749af87f | refs/heads/main | 2023-02-02T00:18:25.652228 | 2020-12-16T14:09:52 | 2020-12-16T14:09:52 | 310,806,327 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,070 | py | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from lifelines import KaplanMeierFitter
# load dataset
data = pd.read_csv("lung.csv")
# print(data.head())
# print(data.columns)
# data.info()
# print(data.describe())
# sex distribution histogram
# print(data["sex"].hist())
# plt.show()
kmf= KaplanMeierFitter() # Create an object for Kaplan-Meier-Fitter
#Organize the data
data.loc[data["status"]==1, "dead"]=0 # if status is 1 then dead =0
data.loc[data["status"]==2, "dead"]=1 # (status 1 - censored, 2 - dead )
# print(data.head())
# Fitting our data into an object
kmf.fit(data["time"],data["dead"])
kmf.plot() # plot diagram
# plt.title('Kaplan-Meier Estimate')
# plt.ylabel('Probability 1-Alive 0-Dead')
# plt.xlabel('Number of Days')
# plt.show()
print(kmf.event_table) #Generate event table
######## Survival probability at t=0 only
event_at_0= kmf.event_table.iloc[0,:]
survival_for_0= (event_at_0.at_risk - event_at_0.observed)/ event_at_0.at_risk
print("Surival probability at time 0 only is : ", survival_for_0)
######## Survival probability at t=5 only
event_at_5= kmf.event_table.iloc[1,:]
survival_for_5= (event_at_5.at_risk - event_at_5.observed)/ event_at_5.at_risk
print("Surival probability at time 5 only is : ", survival_for_5)
######## Survival probability at t=13 only
event_at_13= kmf.event_table.iloc[4,:]
survival_for_13= (event_at_13.at_risk - event_at_13.observed)/ event_at_13.at_risk
print("Surival probability at time 13 only is : ", survival_for_13)
##### Survival probability probability after 5 days (for t= 5)
survival_after_5= survival_for_0 * survival_for_5
print("\nSurvival Probability after 5 days : ", survival_after_5)
#### Automate the work we've done above
print("\nSurvival Probability after 5 days : ",kmf.predict(5) )
print("Survival Probability after 3 days : ",kmf.predict(13) )
print("Survival Probability after 1022 days : ",kmf.predict(1022) )
#### Survival probability for whole timeline
print("\n",kmf.survival_function_)
| [
"noreply@github.com"
] | noreply@github.com |
0147b6a179ae2b8656bba2b0077a7f694d3554a3 | dbe293cc0c4f593d54f4943e6136f8222fdf787f | /util/jsonfunction.py | 7056f9f3a2ceb2d19e5ce81be21ed6c021d753a2 | [
"Apache-2.0"
] | permissive | gquesnot/BotAi | eb181170782abdb862c17af172fe31a5dabc8671 | 5ebdca312d57a484da99e970b73bcba5a700c5e2 | refs/heads/master | 2023-06-26T23:09:23.494093 | 2021-08-03T12:30:18 | 2021-08-03T12:30:18 | 392,306,083 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,173 | py | import os
import json
def getJson(name, directory=""):
directory = "json/" + directory
files = os.listdir(directory)
for file in files:
if name + ".json" in file:
with open(os.path.join(directory, file)) as jsonFile:
data = json.load(jsonFile)
return data
return []
def applyJsonConfig(obj, name, directory=""):
for k, v in getJson(name, directory=directory).items():
setattr(obj, k, v)
def toJson(name, data, directory=""):
directory = "json/" + directory
with open(os.path.join(directory, name + ".json"), 'w') as f:
json.dump(data, f, indent=2)
def appendJson(name, data, directory=""):
datastore = getJson(directory, name)
if name == "verifiedLol":
del data['birthdate']
del data['confirm_password']
mail = data['email'][0] + "@" + data['email'][1]
data['email'] = mail
datastore.append(data)
directory = "json/" + directory
with open(os.path.join(directory, name + ".json"), 'w') as f:
json.dump(datastore, f, indent=2)
def jsonPrint(dataName, data):
print(dataName + ":", json.dumps(data, indent=2))
| [
"gaqu1994@gmail.com"
] | gaqu1994@gmail.com |
c02543ad31376c3ca8be4f60fb119a5173e5513e | 5b4eb35a89a68e19eb2b94817e49de59669be375 | /basic/socket/tcp_client.py | ac300f6c12ad274f89ecacfdcefbe0a54eb7a2f2 | [] | no_license | AdidasOriginals/python-practice | 384fae418e53df375e0bd807324b5909b20d1740 | eb2d5d7dee9c471c2c5d66839e7471eaff593ea0 | refs/heads/main | 2023-03-04T18:09:10.516990 | 2021-02-26T07:27:32 | 2021-02-26T07:27:32 | 335,481,428 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 688 | py | #!/usr/bin/env python
# _*_ coding: utf-8 _*_
# @Time : 2021/2/1 17:20
# @Author : Edison
# @Version:V 0.1
# @File : tcp_client.py
# @desc :TCP客户端
# 服务器并没有使用多线程或者异步I/O的处理方式,这也就意味着当服务器与一个客户端处于通信状态时,其他的客户端只能排队等待
from socket import socket
def main():
# 1.创建套接字对象默认使用IPv4和TCP协议
client = socket()
# 2.连接到服务器(需要指定IP地址和端口)
client.connect(('localhost', 6789))
# 3.从服务器接收数据
print(client.recv(1024).decode('utf-8'))
client.close()
if __name__ == '__main__':
main()
| [
"liutao@lrhealth.com"
] | liutao@lrhealth.com |
cc0651944f155718b8bf666b33af4dc9e98e5e3a | 5f0627c06abe22ae25c50b4ec66fb5510e6a4ef4 | /mysite/polls/views.py | 85c850d66cdd97d2f20d4a856a675ab0d18fd009 | [] | no_license | asmao7/cmput404-lab4 | e883760b35b173e5dbde5172d203b70b0af0324b | 33bbd68e8e6524bfcb96ac119e6df3f5d072af0e | refs/heads/main | 2023-03-01T20:37:29.637115 | 2021-02-04T01:57:04 | 2021-02-04T01:57:04 | 335,809,118 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,513 | py | from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import get_object_or_404, render
from django.urls import reverse
from .models import Choice, Question
def index(request):
latest_question_list = Question.objects.order_by('-pub_date')[:5]
context = {'latest_question_list': latest_question_list}
return render(request, 'polls/index.html', context)
def detail(request, question_id):
question = get_object_or_404(Question, pk=question_id)
return render(request, 'polls/detail.html', {'question': question})
def results(request, question_id):
question = get_object_or_404(Question, pk=question_id)
return render(request, 'polls/results.html', {'question': question})
def vote(request, question_id):
question = get_object_or_404(Question, pk=question_id)
try:
selected_choice = question.choice_set.get(pk=request.POST['choice'])
except (KeyError, Choice.DoesNotExist):
# Redisplay the question voting form.
return render(request, 'polls/detail.html', {
'question': question,
'error_message': "You didn't select a choice.",
})
else:
selected_choice.votes += 1
selected_choice.save()
# Always return an HttpResponseRedirect after successfully dealing
# with POST data. This prevents data from being posted twice if a
# user hits the Back button.
return HttpResponseRedirect(reverse('results', args=(question.id,))) # 'polls:results' | [
"aaali@ualberta.ca"
] | aaali@ualberta.ca |
9e8684300a753747f2ea81503addd0bd9141eee2 | 7ef5898dc861f7a5512953269db7b52d44f44bc5 | /linkml/utils/validation.py | 02ee97f62911680b3e797aa9db26dcd3bd75f727 | [
"CC0-1.0"
] | permissive | balhoff/linkml | eb5c26e9d8ace3c2a7a6f2f36872b9c2af7b97df | b27c36b24437f68878806518264f55f0f418cb0b | refs/heads/main | 2023-07-28T16:47:04.974232 | 2021-09-09T01:39:21 | 2021-09-09T01:39:21 | 404,550,589 | 0 | 0 | CC0-1.0 | 2021-09-09T01:45:37 | 2021-09-09T01:45:37 | null | UTF-8 | Python | false | false | 1,483 | py | import json
import sys
from typing import Type, Union, TextIO
import logging
import click
import jsonschema
from linkml_runtime.linkml_model import SchemaDefinition
from linkml_runtime.utils.yamlutils import as_dict, YAMLRoot
from linkml_runtime.dumpers import json_dumper
from linkml.generators.jsonschemagen import JsonSchemaGenerator
import linkml.utils.datautils as datautils
def _as_dict(inst):
# TODO: replace this with linkml_runtime.dictutils when 1.0.14 is released
inst_dict = json.loads(json_dumper.dumps(element=inst))
del inst_dict['@type']
return inst_dict
def validate_object(data: YAMLRoot, schema: Union[str, TextIO, SchemaDefinition], target_class: Type[YAMLRoot] = None,
closed: bool = True):
"""
validates instance data against a schema
:param data: LinkML instance to be validates
:param schema: LinkML schema
:param target_class: class in schema to validate against
:param closed:
:return:
"""
if target_class is None:
target_class = type(data)
inst_dict = _as_dict(data)
not_closed = not closed
jsonschemastr = JsonSchemaGenerator(schema, mergeimports=True, top_class=target_class.class_name,
not_closed=not_closed).serialize(not_closed=not_closed)
jsonschema_obj = json.loads(jsonschemastr)
return jsonschema.validate(inst_dict, schema=jsonschema_obj)
if __name__ == '__main__':
datautils.cli(sys.argv[1:]) | [
"cjm@berkeleybop.org"
] | cjm@berkeleybop.org |
273f4888a203ea14ddd21d9761a93c7258b60b68 | 5f0809a437fe6d12f2b46d08dfe2a4d55a89bbcc | /app/core/management/commands/wait_for_db.py | 42f1fba6d1509ca4f17241b70fc1787b26a9ffa4 | [] | no_license | Ian021/recipe-api | a86bbadfe1d366c233ba57e94688dc20eba95b8c | c439bfb71bbfb2b556f7133752f219ba8d8b3530 | refs/heads/master | 2022-12-14T20:59:24.995146 | 2020-09-10T23:41:45 | 2020-09-10T23:41:45 | 278,719,307 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 677 | py | import time
from django.db import connections
from django.db.utils import OperationalError
from django.core.management.base import BaseCommand
class Command(BaseCommand):
"""Django command to pause execution until database is available"""
def handle(self, *args, **options):
self.stdout.write('Waiting for the database')
db_conn = None
while db_conn is None:
try:
db_conn = connections['default']
except OperationalError:
self.stdout.write('Database unavailable. Waiting 1 sec...')
time.sleep(1)
self.stdout.write(self.style.SUCCESS('Database is available!'))
| [
"ianx021@gmail.com"
] | ianx021@gmail.com |
587af0a9afab30d6dbe975f04b48b2543833db22 | 51507929d5bf732e6e5b7085015b86d097fc404d | /python/core/keyset_writer.py | 4e417f4417071723ba236115a738874609d779e8 | [
"Apache-2.0"
] | permissive | jojodeco2/tink | a77be3fd6958070c131f4d556b349b69b65e11cb | 46d4d5d6ff09f594c5460216c5b2cb11486076db | refs/heads/master | 2020-08-04T04:46:05.526255 | 2019-10-01T10:21:02 | 2019-10-01T10:21:02 | 212,011,212 | 0 | 0 | Apache-2.0 | 2019-10-01T04:18:08 | 2019-10-01T04:18:08 | null | UTF-8 | Python | false | false | 3,498 | py | # Copyright 2019 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Writes Keysets to file."""
from __future__ import absolute_import
from __future__ import division
from __future__ import google_type_annotations
from __future__ import print_function
import abc
import io
from google.protobuf import json_format
from tink.proto import tink_pb2
from tink.python.core import tink_error
class KeysetWriter(object):
"""Knows how to write keysets to some storage system."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def write(self, keyset: tink_pb2.Keyset) -> None:
"""Tries to write a tink_pb2.Keyset to some storage system."""
pass
@abc.abstractmethod
def write_encrypted(self, encrypted_keyset: tink_pb2.EncryptedKeyset) -> None:
"""Tries to write an tink_pb2.EncryptedKeyset to some storage system."""
pass
class JsonKeysetWriter(KeysetWriter):
"""Writes keysets in proto JSON wire format to some storage system.
cf. https://developers.google.com/protocol-buffers/docs/encoding
"""
def __init__(self, text_io_stream: io.TextIOBase):
self._io_stream = text_io_stream
def write(self, keyset: tink_pb2.Keyset) -> None:
if not isinstance(keyset, tink_pb2.Keyset):
raise tink_error.TinkError('invalid keyset.')
json_keyset = json_format.MessageToJson(keyset)
# TODO(b/141106504) Needed for python 2.7 compatibility. StringIO expects
# unicode, but MessageToJson outputs UTF-8.
if isinstance(json_keyset, bytes):
json_keyset = json_keyset.decode('utf-8')
self._io_stream.write(json_keyset)
self._io_stream.flush()
def write_encrypted(self, encrypted_keyset: tink_pb2.EncryptedKeyset) -> None:
if not isinstance(encrypted_keyset, tink_pb2.EncryptedKeyset):
raise tink_error.TinkError('invalid encrypted keyset.')
json_keyset = json_format.MessageToJson(encrypted_keyset)
# TODO(b/141106504) Needed for python 2.7 compatibility. StringIO expects
# unicode, but MessageToJson outputs UTF-8.
if isinstance(json_keyset, bytes):
json_keyset = json_keyset.decode('utf-8')
self._io_stream.write(json_keyset)
self._io_stream.flush()
class BinaryKeysetWriter(KeysetWriter):
"""Writes keysets in proto binary wire format to some storage system.
cf. https://developers.google.com/protocol-buffers/docs/encoding
"""
def __init__(self, binary_io_stream: io.BufferedIOBase):
self._io_stream = binary_io_stream
def write(self, keyset: tink_pb2.Keyset) -> None:
if not isinstance(keyset, tink_pb2.Keyset):
raise tink_error.TinkError('invalid keyset.')
self._io_stream.write(keyset.SerializeToString())
self._io_stream.flush()
def write_encrypted(self, encrypted_keyset: tink_pb2.EncryptedKeyset) -> None:
if not isinstance(encrypted_keyset, tink_pb2.EncryptedKeyset):
raise tink_error.TinkError('invalid encrypted keyset.')
self._io_stream.write(encrypted_keyset.SerializeToString())
self._io_stream.flush()
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
5651d66b1dd3f7adb98ce5c7bc17e2acfe92784a | 174620e5937ac217cfdc46fa1f58493e9d59dfdd | /lib/default/lib/python2.7/site-packages/celery/concurrency/base.py | e0f2eb514c23941ee91fd0003917de8230cc1dac | [] | no_license | Saifinbox/CKANPROJECT | 6552912317019ce7dca87a1367344dbf5d978062 | 89e1cac49b282106ff4595f54a4eb84bcc8d2ee9 | refs/heads/master | 2021-01-01T06:34:37.568829 | 2017-07-17T08:48:46 | 2017-07-17T08:48:46 | 97,453,740 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,732 | py | # -*- coding: utf-8 -*-
"""
celery.concurrency.base
~~~~~~~~~~~~~~~~~~~~~~~
TaskPool interface.
"""
from __future__ import absolute_import
import logging
import os
import time
from kombu.utils.encoding import safe_repr
from celery.utils import timer2
from celery.utils.log import get_logger
logger = get_logger('celery.concurrency')
def apply_target(target, args=(), kwargs={}, callback=None,
accept_callback=None, pid=None, **_):
if accept_callback:
accept_callback(pid or os.getpid(), time.time())
callback(target(*args, **kwargs))
class BasePool(object):
RUN = 0x1
CLOSE = 0x2
TERMINATE = 0x3
Timer = timer2.Timer
#: set to true if the pool can be shutdown from within
#: a signal handler.
signal_safe = True
#: set to true if pool supports rate limits.
#: (this is here for gevent, which currently does not implement
#: the necessary timers).
rlimit_safe = True
#: set to true if pool requires the use of a mediator
#: thread (e.g. if applying new items can block the current thread).
requires_mediator = False
#: set to true if pool uses greenlets.
is_green = False
_state = None
_pool = None
#: only used by multiprocessing pool
uses_semaphore = False
def __init__(self, limit=None, putlocks=True, forking_enable=True,
**options):
self.limit = limit
self.putlocks = putlocks
self.options = options
self.forking_enable = forking_enable
self._does_debug = logger.isEnabledFor(logging.DEBUG)
def on_start(self):
pass
def did_start_ok(self):
return True
def on_stop(self):
pass
def on_apply(self, *args, **kwargs):
pass
def on_terminate(self):
pass
def on_soft_timeout(self, job):
pass
def on_hard_timeout(self, job):
pass
def maybe_handle_result(self, *args):
pass
def maintain_pool(self, *args, **kwargs):
pass
def terminate_job(self, pid):
raise NotImplementedError(
'%s does not implement kill_job' % (self.__class__, ))
def restart(self):
raise NotImplementedError(
'%s does not implement restart' % (self.__class__, ))
def stop(self):
self.on_stop()
self._state = self.TERMINATE
def terminate(self):
self._state = self.TERMINATE
self.on_terminate()
def start(self):
self.on_start()
self._state = self.RUN
def close(self):
self._state = self.CLOSE
self.on_close()
def on_close(self):
pass
def init_callbacks(self, **kwargs):
pass
def apply_async(self, target, args=[], kwargs={}, **options):
"""Equivalent of the :func:`apply` built-in function.
Callbacks should optimally return as soon as possible since
otherwise the thread which handles the result will get blocked.
"""
if self._does_debug:
logger.debug('TaskPool: Apply %s (args:%s kwargs:%s)',
target, safe_repr(args), safe_repr(kwargs))
return self.on_apply(target, args, kwargs,
waitforslot=self.putlocks,
**options)
def _get_info(self):
return {}
@property
def info(self):
return self._get_info()
@property
def active(self):
return self._state == self.RUN
@property
def num_processes(self):
return self.limit
@property
def readers(self):
return {}
@property
def writers(self):
return {}
@property
def timers(self):
return {}
| [
"muhammad.saif@inboxbiz.com"
] | muhammad.saif@inboxbiz.com |
da3518d81bee465bee82e1d01f3ec1177d8e7199 | d1295ec43356350d903108d56f5064aaafafb753 | /Read_OUT.py | 0027f41bd6d6b446161445292ec27fb173329298 | [] | no_license | Danil-phy-cmp-120/Elast | f1b9445af247377b76479d209eae1b27452163d8 | 83d0a673e75f8ec691c6627489704d4571c93df5 | refs/heads/main | 2023-02-02T06:05:23.582251 | 2020-12-22T13:11:29 | 2020-12-22T13:11:29 | 311,010,792 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,066 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import numpy as np
import os
import sys
from scipy import constants
def get_weight():
MASS = {'H':1.0079, 'He':4.0026, 'Li':6.941, 'Be':9.0122, 'B':10.811, 'C':12.011, 'N':14.007, 'O':15.999, 'F':18.998, 'Ne':20.18, 'Na':22.99, 'Mg':24.305, 'Al':26.982, 'Si':28.086, 'P':30.974, 'S':32.066, 'Cl':35.453, 'Ar':39.948, 'K':39.099, 'Ca':40.078, 'Sc':44.956, 'Ti':47.88, 'V':50.942, 'Cr':51.996, 'Mn':54.938, 'Fe':55.847, 'Co':58.933, 'Ni':58.693, 'Cu':63.546, 'Zn':65.39, 'Ga':69.723, 'Ge':72.61, 'As':74.922, 'Se':78.96, 'Br':79.904, 'Kr':83.8, 'Rb':85.468, 'Sr':87.62, 'Y':88.906, 'Zr':91.224, 'Nb':92.906, 'Mo':95.94, 'Tc':98.906, 'Ru':101.07, 'Rh':102.91, 'Pd':106.42, 'Ag':107.87, 'Cd':112.41, 'In':114.82, 'Sn':118.71, 'Sb':121.76, 'Te':127.60, 'I':126.9, 'Xe':131.29, 'Cs':132.91, 'Ba':137.33, 'La':138.91, 'Ce':140.12, 'Pr':140.91, 'Nd':144.24, 'Pm':144.91, 'Sm':150.36, 'Eu':151.97, 'Gd':157.25, 'Tb':158.93, 'Dy':162.5, 'Ho':164.93, 'Er':167.26, 'Tm':168.93, 'Yb':173.04, 'Lu':174.97, 'Hf':178.49, 'Ta':180.95, 'W':183.84, 'Re':186.21, 'Os':190.23, 'Ir':192.22, 'Pt':195.08, 'Au':196.97, 'Hg':200.59, 'Tl':204.38, 'Pb':207.2, 'Bi':208.98, 'Po':209.98, 'At':209.99, 'Rn':222, 'Fr':223.02, 'Ra':226.03, 'Ac':227.03, 'Th':232.04, 'Pa':231.04, 'U':238.03, 'Np':237.05, 'Pu':244.06, 'Am':243, 'Cm':247.07, 'Bk':247, 'Cf':251, 'Es':252, 'Fm':257.1, 'Md':258, 'No':259, 'Lr':260.11, 'Rf':261.11, 'Db':262.11, 'Sg':263.12, 'Bh':262.12, 'Hs':265, 'Mt':268, 'Ds':281, 'Rg':280}
f = open('bulk/D0/0.0/CONTCAR', "r")
contcar = f.readlines()
f.close()
element = []
inp1 = contcar[5].split()
inp2 = contcar[6].split()
for i in range(len(inp1)):
element += [inp1[i]]*int(inp2[i])
mass = 0.0
for i in range(len(element)):
mass = mass + MASS[element[i]]
return mass/1000.0, sum([int(x) for x in contcar[6].split()])
def get_energy(p):
f = open(p, "r")
outcar = f.readlines()
f.close()
for line in outcar:#[len(outcar)-len(outcar)*0.1:len(outcar)]:
inp = line.split()
if len(inp) > 4 and inp[4] == 'energy(sigma->0)':
energy = float(inp[6])
return energy
def get_volume(p):
f = open(p, "r")
outcar = f.readlines()
f.close()
for line in outcar:#[len(outcar)-len(outcar)*0.1:len(outcar)]:
inp = line.split()
if len(inp) > 3 and inp[0] == 'volume' and inp[1] == 'of' and inp[2] == 'cell':
volume = float(inp[4])
return volume
V0 = get_volume('bulk/D0/0.0/OUTCAR') * 10**-30
path = os.listdir('bulk')
DELTAS = np.linspace(-0.03, 0.03, 7)
# Рассчет отдельных модулей упругости для кубической симметрии #
if len(path) == 3:
energy = np.zeros((DELTAS.size, 3))
alfa = np.zeros(3)
for n in range(3):
for i in range(DELTAS.size):
if n == 0 and DELTAS[i] == 0.0:
energy[i, :] = [get_energy('bulk/D{}/{}/OUTCAR'.format(n, DELTAS[i]))]*3
elif DELTAS[i] != 0.0:
energy[i, n] = get_energy('bulk/D{}/{}/OUTCAR'.format(n, DELTAS[i]))
np.savetxt('energy.dat', energy, fmt='%.6f')
energy = 1.6*10**-28 * (energy - energy[3,0])/V0
for n in range(3):
alfa[n] = np.polyfit(DELTAS, energy[:,n], 2)[0]
f = open('out.dat', "wb")
np.savetxt(f, np.column_stack((DELTAS, energy)), fmt='%.5f')
f.write('\n')
np.savetxt(f, alfa, fmt='%.5f')
f.write('\n')
B = 2*alfa[0]/9
C = alfa[1]/2
C11 = (3*B + 4*C)/3
C12 = (3*B - 2*C)/3
C44 = alfa[2]/2
f.write('B = '+ str(B) + ' ')
f.write('C = ' + str(C) + '\n')
f.write('\n')
f.write('Cij:' + '\n')
f.write('C11 = ' + str(C11) + '\n')
f.write('C12 = ' + str(C12) + '\n')
f.write('C44 = ' + str(C44) + '\n')
f.write('\n')
BV=(C11+2*C12)/3
BR=(C11+2*C12)/3
GV=(C11-C12+3*C44)/5
GR=(5*C44*(C11-C12))/(4*C44+3*(C11-C12))
BH=(BV+BR)/2
GH=(GV+GR)/2
f.write('BV = ' + str(BV) + ' ')
f.write('GV = ' + str(GV) + '\n')
f.write('BR = ' + str(BR) + ' ')
f.write('GR = ' + str(GR) + '\n')
f.write('BH = ' + str(BH) + ' ')
f.write('GH = ' + str(GH) + '\n')
f.write('\n')
mu = (3*BH-2*GH)/(6*BH+2*GH)
E = (9*BH*GH)/(3*BH+GH)
func = (3*(2*(2*(1+mu)/(3-6*mu))**1.5 + ((1+mu)/(3-3*mu))**1.5)**-1)**(1.0/3.0)
Tetta = constants.hbar * ( 6*np.pi**2 * V0**0.5 * get_weight()[1] )**(1.0/3.0) * func * ((BH * 10**9 * constants.N_A) / ( constants.k**2 * get_weight()[0] ))**0.5
f.write('mu = ' + str(mu) + '\n')
f.write('E = ' + str(E) + '\n')
f.write('Tetta = ' + str(Tetta) + '\n')
f.write('\n')
if (C44 > 0 and C11 > abs(C12) and C11+2*C12 > 0):
f.write('The crystal lattice is stable under distortion')
else:
f.write('The crystal lattice is unstable under distortion')
f.close()
# Рассчет отдельных модулей упругости для тетрагональной симметрии #
if len(path) == 6:
energy = np.zeros((DELTAS.size, 6))
alfa = np.zeros(6)
for n in range(6):
for i in range(DELTAS.size):
if n == 0 and DELTAS[i] == 0.0:
energy[i, :] = [get_energy('bulk/D{}/{}/OUTCAR'.format(n, DELTAS[i]))]*6
elif DELTAS[i] != 0.0:
energy[i, n] = get_energy('bulk/D{}/{}/OUTCAR'.format(n, DELTAS[i]))
energy = 1.6*10**-28 * (energy - energy[3,0])/V0
#np.savetxt('energy.dat', energy, fmt='%.6f')
for n in range(6):
alfa[n] = np.polyfit(DELTAS, energy[:,n], 2)[0]
f = open('out.dat', "wb")
np.savetxt(f, np.column_stack((DELTAS, energy)), fmt='%.5f')
f.write('\n')
np.savetxt(f, alfa, fmt='%.5f')
f.write('\n')
C11 = (alfa[2] + 2*alfa[3] - 3*alfa[5] +alfa[4])/3
C12 = (2*alfa[3] - 2*alfa[2] - 3*alfa[5] + alfa[4])/3
C13 = (alfa[4] - 4*alfa[3] + 3*alfa[5] + alfa[2])/6
C33 = 2*alfa[5]
C44 = alfa[0]/2
C66 = alfa[1]/2
B= (C33*(C11+C12)-2*C13**2)/(C11+C12+2*C33-4*C13)
f.write('B = '+ str(B) + ' ')
f.write('\n')
f.write('Cij' + '\n')
f.write('C11 = ' + str(C11) + '\n')
f.write('C12 = ' + str(C12) + '\n')
f.write('C13 = ' + str(C13) + '\n')
f.write('C33 = ' + str(C33) + '\n')
f.write('C44 = ' + str(C44) + '\n')
f.write('C66 = ' + str(C66) + '\n')
f.write('\n')
BV=(2*(C11+C12)+4*C13+C33)/9
BR=(C33*(C11+C12)-2*C13**2)/(C11+C12+2*C33-4*C13)
GV=(12*C44+12*C66+C11+C12+2*C33-4*C13)/30
GR=(5*C44*C66*(C33*(C11+C12)-2*C13**2))/(2*(C44+C66)*(C33*(C11+C12)-2*C13**2)+3*BV*C44*C66)
BH=(BV+BR)/2
GH=(GV+GR)/2
f.write('BV = ' + str(BV) + ' ')
f.write('GV = ' + str(GV) + '\n')
f.write('BR = ' + str(BR) + ' ')
f.write('GR = ' + str(GR) + '\n')
f.write('BH = ' + str(BH) + ' ')
f.write('GH = ' + str(GH) + '\n')
f.write('\n')
mu = (3*BH-2*GH)/(6*BH+2*GH)
E = (9*BH*GH)/(3*BH+GH)
func = (3*(2*(2*(1+mu)/(3-6*mu))**1.5 + ((1+mu)/(3-3*mu))**1.5)**-1)**(1.0/3.0)
Tetta = constants.hbar * ( 6*np.pi**2 * V0**0.5 * get_weight()[1] )**(1.0/3.0) * func * ((BH * 10**9 * constants.N_A) / ( constants.k**2 * get_weight()[0] ))**0.5
f.write('mu = ' + str(mu) + '\n')
f.write('E = ' + str(E) + '\n')
f.write('Tetta = ' + str(Tetta) + '\n')
f.write('\n')
if (C11 > 0 and C33 > 0 and C44 > 0 and C66 > 0 and C11 - C12 > 0 and C11 - 2*C13 + C33 > 0 and 2*C11 + 2*C12 + 4*C13 + C33 > 0):
f.write('The crystal lattice is stable under distortion')
else:
f.write('The crystal lattice is unstable under distortion')
f.close()
# Рассчет отдельных модулей упругости для гексогоальной симметрии #
# И.Р. Шеин, В.С. Кийко, Ю.Н. Макурин, М.А. Горбунова, А.Л. Ивановский. Физика твердого тела, 2007, том 49, вып. 6 #
if len(path) == 5:
energy = np.zeros((DELTAS.size, 5))
alfa = np.zeros(5)
for n in range(5):
for i in range(DELTAS.size):
if n == 0 and DELTAS[i] == 0.0:
energy[i, :] = [get_energy('bulk/D{}/{}/OUTCAR'.format(n, DELTAS[i]))]*5
elif DELTAS[i] != 0.0:
energy[i, n] = get_energy('bulk/D{}/{}/OUTCAR'.format(n, DELTAS[i]))
energy = 1.6*10**-28 * (energy - energy[3,0])/V0
#np.savetxt('energy.dat', energy, fmt='%.6f')
for n in range(5):
alfa[n] = np.polyfit(DELTAS, energy[:,n], 2)[0]
f = open('out.dat', "wb")
np.savetxt(f, np.column_stack((DELTAS, energy)), fmt='%.5f')
f.write('\n')
np.savetxt(f, alfa, fmt='%.5f')
f.write('\n')
C11 = (alfa[0] + alfa[1])/2.0
C12 = (alfa[0] - alfa[1])/2.0
C33 = 2.0*alfa[2]
C55 = alfa[3]/2.0
C13 = (alfa[4] - C11 - C12 - C33/2.0)/2.0
C66 = alfa[1]/2.0
C = C33*(C11+C12)-2.0*C13**2
B= 2.0*alfa[4]/9.0
f.write('B = '+ str(B) + ' ')
f.write('\n')
f.write('Cij' + '\n')
f.write('C11 = ' + str(C11) + '\n')
f.write('C12 = ' + str(C12) + '\n')
f.write('C13 = ' + str(C13) + '\n')
f.write('C33 = ' + str(C33) + '\n')
f.write('C55 = ' + str(C55) + '\n')
f.write('\n')
BV=(2.0/9.0)*(C11 + C12 + 2.0*C13 + 0.5*C33)
BR=(C33*(C11+C12)-2.0*C12**2.0)/(C11+C12+2.0*C33-4.0*C13)
GV=(12.0*C55+12.0*C66+C11+C12+2.0*C33-4.0*C13)/30.0
GR=(5.0*C55*C66*(C33*(C11+C12)-2.0*C12**2.0)**2.0)/(2.0*(C55+C66)*(C33*(C11+C12)-2.0*C12**2.0)**2.0+3.0*BV*C55*C66)
BH=(BV+BR)/2
GH=(GV+GR)/2
f.write('BV = ' + str(BV) + ' ')
f.write('GV = ' + str(GV) + '\n')
f.write('BR = ' + str(BR) + ' ')
f.write('GR = ' + str(GR) + '\n')
f.write('BH = ' + str(BH) + ' ')
f.write('GH = ' + str(GH) + '\n')
f.write('\n')
s11 = C33/C + 1.0/(C11-C12)
s12 = -1.0/(C11-C12)
s13 = -C13/C
s33 = (C11+C12)/C
s55 = 1.0/C55
mu = np.array([-s12/s11, -s13/s11])
E = np.array([1.0/s11, 1.0/s33])
f.write('mu_12 = ' + str(mu[0]) + '\t' + 'mu_13 = ' + str(mu[1]) + '\n')
f.write('E_1 = ' + str(E[0]) + '\t' + 'E_3 = ' + str(E[1]) + '\n')
f.write('\n')
mu_aver = (3.0*BH-2.0*GH)/(2.0*(3.0*BH+GH))
E_aver = (9.0*BH*GH)/(3.0*BH+GH)
func = (3.0*(2.0*(2.0*(1+mu_aver)/(3.0-6.0*mu_aver))**1.5 + ((1.0+mu_aver)/(3.0-3.0*mu_aver))**1.5)**-1.0)**(1.0/3.0)
Tetta = constants.hbar * ( 6*np.pi**2 * V0**0.5 * get_weight()[1] )**(1.0/3.0) * func * ((BH * 10**9 * constants.N_A) / ( constants.k**2 * get_weight()[0] ))**0.5
f.write('mu = ' + str(mu_aver) + '\n')
f.write('E = ' + str(E_aver) + '\n')
f.write('Tetta = ' + str(Tetta) + '\n')
f.write('\n')
if (C11 > 0 and C33 > 0 and C55 > 0 and C66 > 0 and C11**2 > C12**2 and (C11+C12)*C33 > 2*C13**2 and C11*C33 > C13**2):
f.write('The crystal lattice is stable under distortion')
else:
f.write('The crystal lattice is unstable under distortion')
f.close()
| [
"noreply@github.com"
] | noreply@github.com |
fbe32ea133ee95666e0864e78e25cb6ab7054a43 | aa2dda99a76732f67d1f0e403bbd67980f7e6214 | /Indoor_Deterministic_OfflineTest29Mei.py | 2e6f3d1a8e5c565e3bc846e6350512b098774525 | [] | no_license | OttoCh/WPF | a3b4a76a123987b7c65bb1b1ade5846f0291160a | 8408be18086b5639ca4512f522bed4a7a52cfc23 | refs/heads/master | 2021-01-20T12:21:02.386255 | 2017-08-29T03:16:22 | 2017-08-29T03:16:22 | 101,713,356 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,617 | py | #offline Indoor deterministic system (fingerprinting)
#29 mei 2017
#Program ini dibuat untuk mengolah data offline hasil dari percobaan di selasar fisika
#Program ini akan menerima input dari file .txt dan juga referensi data RSSI posisi dari file .txt
#output yang dihasilkan berupa prediksi probabilitas posisi
#Penulisan file untuk referensi adalah sesuai dengan AverageData_filename
#merubah
import math
import itertools as itertools
AverageData_filename = "AverageData.txt"
TestData_filename = "TestData.txt"
Result_filename = "result.txt"
#Ref Data 6 posisi, 6 AP
#ini harusnya 6 total positionnya tapi tidak bisa karena array yg terbentuk jadi lebih kecil dari perkiraan (ada cap di sini)
totalPosition_X = 111
# ini digunakan unuk orientasi arah
totalPosition_Y = 4
totalAP = 6
#hitung ini kalkulator kombinasi
totalPossibleCombination = 20
#masukan maksimal sesungguhnya +1
MaximumXValue = 111
MaximumYValue = 4
#RefData = [[[0 for x in range(totalPosition_X)] for y in range(totalPosition_Y)] for z in range(0,10)]
RefData = [[[0 for x in range(totalAP)] for y in range(totalPosition_Y)] for z in range(totalPosition_X)]
#Realtime data. 8 AP
TestData = [0 for x in range(0,totalAP)]
#untuk menyimpan hasil perhitungan euclidian
SquaredDeltaSignalResult = [[[0 for x in range(totalAP)] for y in range(totalPosition_Y)] for z in range(totalPosition_X)]
#0,55 karena kombinasi 3 sample dari 8 adalah 56
#untuk menyimpan hasil euclidian
ResultEuclidian = [0 for x in range(0,totalPossibleCombination)]
#untuk menyimpan hasil posisi
ResultPosition_X = [-1 for x in range(0,totalPossibleCombination)]
ResultPosition_Y = [-1 for x in range(0,totalPossibleCombination)]
#untuk menyimpan posisi tebakan
MostLikelyPosition_X = [-1]
MostLikelyPosition_Y = [-1]
def ReadExistingData():
readFile = open(AverageData_filename, "r")
i=0
j=0
comment = False
for line in readFile:
#print line
k=0
#remove \n from string
a = line.rstrip()
#split by \t
res = a.split("\t")
for x in res:
#print res
if(j==4):
i=i+1
j=0
#ganti posisi koordinat x setiap kali koordinat y sudah mencapai 6
if(x=='#'):
comment = True
break
else:
#print(str(i) + " " + str(j) + " " + str(k))
RefData[i][j][k] = float(x)
k = k+1
if(comment==False):
j = j+1
else:
comment=False
def ReadTestData():
readFile = open(TestData_filename, "r")
comment = False
for line in readFile:
i=0
#remove \n from string
a = line.rstrip()
#split by \t
res = a.split("\t")
for x in res:
#print(str(i))
#ganti posisi koordinat x setiap kali koordinat y sudah mencapai 6
if(x=='#'):
comment = True
break
else:
TestData[i] = float(x)
i = i+1
#TestData[i] = TestData[i]/len(res)
def calculateAllPossibleCombination():
AP = [0,1,2,3,4,5]
combinar = 3
m=0
l=0
for item in itertools.combinations(AP,combinar):
currentExpectedPosition_X = -1
currentExpectedPosition_Y = -1
current_diftotal = 0
minimum_diftotal = 1000
for i in xrange(0,MaximumXValue):
for l in xrange(0,MaximumYValue):
for j in item:
current_diftotal += SquaredDeltaSignalResult[i][l][j]
current_diftotal = math.sqrt(current_diftotal)
if(current_diftotal<minimum_diftotal):
minimum_diftotal = current_diftotal
currentExpectedPosition_X = i
currentExpectedPosition_Y = l
ResultEuclidian[m] = minimum_diftotal
ResultPosition_X[m] = currentExpectedPosition_X
ResultPosition_Y[m] = currentExpectedPosition_Y
m+=1
def determinedProbabilityOfPosition():
textFile = open(Result_filename, "a")
textFile.write('\n')
a = 0
for i in ResultPosition_X:
j = ResultPosition_Y[a]
textFile.write("(" + str(i) + "," + str(j) + "); ")
a+=1
biggestResult = 0
textFile.close()
for x in xrange(0,MaximumXValue):
for y in range(0,MaximumYValue):
countAppearance = 0
for k in range(0, totalPossibleCombination):
if(x == ResultPosition_X[k]):
if(y == ResultPosition_Y[k]):
countAppearance += 1
result = float(countAppearance)*100.0/float(totalPossibleCombination)
if(biggestResult<result):
biggestResult = result
MostLikelyPosition_Y = y
MostLikelyPosition_X = x
if(result != 0):
textFile = open(Result_filename, "a")
textFile.write('\n' + "(" + str(x) + "," + str(y) + ") :" + '\t' + str(result) + "%")
textFile.close()
textFile = open(Result_filename, "a")
textFile.write('\n' + "Predicted Position: (" + str(MostLikelyPosition_X) + "," + str(MostLikelyPosition_Y) + ")")
textFile.close()
print('\n' + "(" + str(MostLikelyPosition_X) + "," + str(MostLikelyPosition_Y) + ")")
def calculatePosition():
#for x in range(totalPosition):
for i in range(0,MaximumXValue):
for j in range(0,MaximumYValue):
for k in range(totalAP):
total_dif=0
#print(str(i) + " " + str(j) + " " + str(k))
dif = euclidianDistance(RefData[i][j][k], TestData[k])
SquaredDeltaSignalResult[i][j][k] = dif
def euclidianDistance(num1, num2):
return (num1-num2)**2
def calculateAverageData(Data, j):
jumlahData= len(Data)
total = 0
for x in Data:
if(float(x)==0):
x=100.0
total += float(x)
avg = total/jumlahData
RealtimeData[j] = avg
return
def writeTimestamp():
import time
import datetime
ts = time.time()
st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
textFile = open(Result_filename, "a")
textFile.write("\ntimestamp: " + st + '\n')
textFile.close()
def Routine():
ReadExistingData()
ReadTestData()
writeTimestamp()
calculatePosition()
calculateAllPossibleCombination()
determinedProbabilityOfPosition()
Routine() | [
"otto.christianto.oc@gmail.com"
] | otto.christianto.oc@gmail.com |
42170e9a6ac498033863cd27ca0a6556bf1aa6c3 | 53dd5d2cfb79edc87f6c606bbfb7d0bedcf6da61 | /.history/EMR/apriori_20190422135150.py | 36ae75953708f032ec4b5b046220fb616cdb0b75 | [] | no_license | cyc19950621/python | 4add54894dc81187211aa8d45e5115903b69a182 | d184b83e73334a37d413306d3694e14a19580cb0 | refs/heads/master | 2020-04-11T20:39:34.641303 | 2019-07-02T12:54:49 | 2019-07-02T12:54:49 | 162,078,640 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,768 | py | # Apriori算法
"""
由于Apriori算法假定项集中的项是按字典序排序的,而集合本身是无序的,所以我们在必要时需要进行set和list的转换;
由于要使用字典(support_data)记录项集的支持度,需要用项集作为key,而可变集合无法作为字典的key,因此在合适时机应将项集转为固定集合frozenset。
支持度
置信度
"""
class apriori_algorithm:
# 算法初始化
def __init__(self, minSupport, dataSet):
self.minSupport = minSupport # 最小支持度
self.dataSet = dataSet # 数据集
# 生成单个物品的项集列表
def generateC1(self, dataSet):
C1 = [] # 用于存放生成的单个物品的项集列表
# 遍历数据集
for data in dataSet:
for item in data:
if [item] not in C1:
C1.append([item])
C1.sort()
return C1
# 遍历数据集,和Ck对比,计数
def generateLk_by_Ck(self, dataSet, Ck, minSupport, support_data):
"""
Generate Lk by executing a delete policy from Ck.
Args:
data_set: 数据集
Ck: A set which contains all all frequent candidate k-itemsets.
min_support: The minimum support.
support_data: A dictionary. The key is frequent itemset and the value is support.
Returns:
Lk: A set which contains all all frequent k-itemsets.
"""
D = map(set, dataSet)
C = map(frozenset, Ck)
C1 = list(C) # 关于map对象的遍历,在内循环中遍历完最后一个元素后,再次访问时会放回空列表,所以外循环第二次进入的时候是空的,需要将其转为list处理
countData = dict()
for d in D: # set遍历
for c in C1:
if c.issubset(d): # 子集判断,并非元素判断
if c not in countData.keys(): # 将集合作为字典的键使用,c为[]型
countData[c] = 1
else:
countData[c] += 1
numItems = float(len(list(dataSet)))
returnList = []
supportData = dict()
# 遍历前面得到的计数字典
for key in countData:
support = countData[key] / numItems
if support >= minSupport:
returnList.insert(0, key) # insert() 函数用于将指定对象插入列表的指定位置
support_data[key] = support
return returnList
def generate_L(self, dataSet, k, min_support):
"""
Generate all frequent itemsets.
Args:
data_set:数据集
k: 频繁项集中含有的最多的元素
min_support: 最小支持度
Returns:
L: 出现的所有频繁项集
support_data: 每个频繁项集对应的支持度
"""
support_data = {}
C1 = self.generateC1(dataSet)
L1 = self.generateLk_by_Ck(dataSet, C1, min_support, support_data)
Lksub1 = L1.copy()
L = []
L.append(Lksub1)
for i in range(2, k + 1):
Ci = self.generateCK(Lksub1, i)
Li = self.generateLk_by_Ck(dataSet, Ci, min_support, support_data)
Lksub1 = Li.copy()
L.append(Lksub1)
return L, support_data
# generateCK 候选频繁项集产生 参数 Lk频繁项集,k:项集元素个数
def generateCK(self, Lk, k):
Ck = set()
len_Lk = len(list(Lk))
list_Lk = list(Lk)
for i in range(len_Lk):
for j in range(1, len_Lk):
l1 = list(list_Lk[i])
l2 = list(list_Lk[j])
l1.sort()
l2.sort()
if l1[0:k - 2] == l2[0:k - 2]:
Ck_item = list_Lk[i] | list_Lk[j]
if self.isCk(Ck_item, list_Lk):
Ck.add(Ck_item)
# Ck.add(Ck_item)
return Ck
# 频繁项集判断
def isCk(self, Ck_item, list_Lk):
for item in Ck_item:
sub_Ck = Ck_item - frozenset([item])
if sub_Ck not in list_Lk:
return False
return True
# 生成关联规则
def generate_big_rules(self, L, support_data, min_conf):
"""
Generate big rules from frequent itemsets.
Args:
L: 所有频繁项集的列表
support_data: 每个频繁项集对应的支持度
min_conf: 最小可信度
"""
big_rule_list = []
sub_set_list = []
for i in range(0, len(L)):
for freq_set in L[i]:
for sub_set in sub_set_list:
if sub_set.issubset(freq_set):
conf = support_data[freq_set] / support_data[freq_set - sub_set]
big_rule = (freq_set - sub_set, sub_set, conf)
if conf >= min_conf and big_rule not in big_rule_list:
print(freq_set - sub_set, " => ", sub_set, "conf: ", conf)
big_rule_list.append(big_rule)
sub_set_list.append(freq_set)
return big_rule_list
if __name__ == '__main__':
minS = 0.5
dataSet = [['这个','弄','鞍山', '挨打'], ['这个', '啊'], ['鞍山', '弄', '词典', '按错'], ['鞍山', '挨打','按下','爱玩']]
apriori = apriori_algorithm(minSupport=minS, dataSet=dataSet)
L, support_data = apriori.generate_L(dataSet, 1,minS)
print(L)
print(support_data)
big_rule_list = apriori.generate_big_rules(L, support_data, 0.5) | [
"1044801968@qq.com"
] | 1044801968@qq.com |
9411ef0236ec0ef9baedddd1d0aee768b83a3a44 | a15d0ff1c2d5eae9f223d70fa74391ed9233a338 | /utils/pagers.py | 5d6686cc7b23a3e40e5a02068010920e7432c61b | [] | no_license | HuaDD/crm | d21ee68b14ecda85b389248aa162979789fc1e4f | 1b1b8a40e0952df3a61823ee2f12bf0a4f7817e5 | refs/heads/master | 2020-04-07T22:40:36.860146 | 2018-11-27T07:29:49 | 2018-11-27T07:29:49 | 158,780,167 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,141 | py | #!/usr/bin/env python
# -*- coding:utf8 -*-
class Page_Info:
def __init__(self, current_page, all_count, per_page, base_url, show_page=11):
try:
self.current_page = int(current_page)
except Exception:
self.current_page = 1
self.per_page = per_page
page_num, sur_page = divmod(all_count, per_page)
if sur_page:
page_num += 1
self.all_page = page_num
self.show_page = show_page
self.base_url = base_url
def start(self):
return (self.current_page - 1) * self.per_page
def end(self):
return self.current_page * self.per_page
def pager(self):
page_list = []
half = int((self.show_page - 1) / 2)
if self.all_page < self.show_page:
begin = 1
stop = self.all_page + 1
else:
if self.current_page <= half:
begin = 1
stop = self.show_page + 1
else:
if self.current_page + half > self.all_page:
begin = self.all_page - self.show_page + 1
stop = self.all_page + 1
else:
begin = self.current_page - half
stop = self.current_page + half + 1
if self.current_page <= 1:
prev = "<li><a href='#'>上一页</a></li>"
else:
prev = "<li><a href='%s?page=%s'>上一页</a></li>" % (self.base_url, self.current_page - 1,)
page_list.append(prev)
for i in range(begin, stop):
if i == self.current_page:
temp = "<li class='active'><a href='%s?page=%s'>%s</a></li>" % (self.base_url, i, i,)
else:
temp = "<li><a href='%s?page=%s'>%s</a></li>" % (self.base_url, i, i,)
page_list.append(temp)
if self.current_page >= self.all_page:
nex = "<li><a href='#'>下一页</a></li>"
else:
nex = "<li><a href='%s?page=%s'>下一页</a></li>" % (self.base_url, self.current_page + 1,)
page_list.append(nex)
return ''.join(page_list) | [
"158@163.com"
] | 158@163.com |
e7beea6a20b1160d41261c042760fcdd416a4503 | d861a2c638c0cb5c20ef9d3be5955b1c02b042e0 | /Training models/coordinates_extraction.py | 52c1aba6805b3da3c110c55d5a816658e5ee1b08 | [] | no_license | ElvinJun/DeepPBS | 826d3d55493a0c7a204004167201fa649e076e8d | 4a8a1361a4672159689f629dedc467d72b4897eb | refs/heads/master | 2021-07-12T17:50:46.808983 | 2021-04-20T07:48:18 | 2021-04-20T07:48:18 | 243,705,058 | 13 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,381 | py | import os
import pathlib
import numpy as np
def extract_cif(path, filename):
with open(os.path.join(path, filename), 'r') as file:
message = file.readlines()
coos = []
for line in message:
line = line.split()
if line[3] != 'CB':
x = line[10]
y = line[11]
z = line[12]
coos.append([float(x), float(y), float(z)])
coos = np.array(coos)
return coos.astype('float32')
dataset_name = 'nr_40'
LIST_PATH = 'D:\protein_structure_prediction\data\dataset/nr_list/best_rebuild_nr40.txt' # % dataset_name
DATA_PATH = 'D:\protein_structure_prediction\data\dataset/cif_remove_again'
COOR_PATH = 'D:\protein_structure_prediction\data\dataset/processed_data/%s/coordinates' % dataset_name
pathlib.Path(COOR_PATH).mkdir(parents=True, exist_ok=True)
with open(LIST_PATH, 'r') as file:
filenames = file.read().split('\n')
finished_filenames = os.listdir(COOR_PATH)
finished_num = 0
for filename in finished_filenames:
if filename in filenames:
filenames.remove(filename)
finished_num += 1
print('%d finished! %d to go!' % (finished_num, len(filenames)))
failed_filename = []
for filename in filenames:
print(filename)
coos = extract_cif(DATA_PATH, filename + '.cif')
np.save(os.path.join(COOR_PATH, filename), coos)
| [
"noreply@github.com"
] | noreply@github.com |
9d8c1186c7ca74249dc1d1313b45dab9687f4746 | 227582d196ce6708da9af5825212ff373a147274 | /Element_Formulations.py | ff12eff9ee4889d4ffb9eb2cdb9c27703faa241e | [] | no_license | eivindhugaas/FEA_program | 2fa1fa27776a4401c13fb7fb735d9774deeba3f8 | 24eedb5e1d925a8a51f5dbef261f1019177cbcd2 | refs/heads/master | 2021-10-02T07:14:09.604618 | 2018-10-09T07:34:37 | 2018-10-09T07:34:37 | 125,865,668 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,725 | py | import numpy as np
from FEAFunctions.FEAFunctions import FEA as fea
fea=fea()
'''
This script gives reaction force in one single hexahedral 3D 8 node element using three different element formulations:
- 8 integration points (full integration) using the principle of stationary potential energy.
- 1 integration point (reduced integration) using the principle of stationary potential energy.
- 8 integration points (full integration) using B-Bar method of the Hu Washizu three field formulation
with the modified B matrix formulated using the volume averaged volumetric strain to ease volumetric locking issues for nearly incompressible materials.
nodedisplacement specifies the displacement at the nodes.
nodelocations specify the nodelocation of the element's nodes in numerical order according
to a right handed x,y,z cartesian coordinate system
as demonstrated under. The nodedisplacement are specified in the same manner.
node: | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
coordinate: | x y z | x y z | x y z | x y z | x y z | x y z | x y z | x y z |
nodelocations=[-1.,-1.,-1., 1.,-1.,-1., 1.,1.,-1., -1.,1.,-1., -1.,-1.,1., 1.,-1.,1., 1.,1.,1., -1.,1.,1.]
The integration points for the fully integrated formulations are numbered and located according to the closest node through the Gausspoints array.
Results are given in order of the nodes/integration points.
Strain and stress is defined in following order: 11, 22, 33, 12, 23, 31
'''
'''
Loadcases, comment in/out what to run
'''
# --------------- No volume change ---------------
# v=0.5 #poisson ratio
# dx=0.1
# dy=dz=-dx*v*0.5
# nodedisplacement=[0.0,-dy,-dz, dx,-dy,-dz, dx,dy,-dz, 0.0,dy,-dz, 0.0,-dy,dz, dx,-dy,dz, dx,dy,dz, 0.0,dy,dz]
# nodelocations=[-1.,-1.,-1., 1.,-1.,-1., 1.,1.,-1., -1.,1.,-1., -1.,-1.,1., 1.,-1.,1., 1.,1.,1., -1.,1.,1.]
# --------------- Hourglass ---------------
# nodedisplacement=[-dx,0.0,0.0, -dx,0.0,0.0, dx,0.0,0.0, dx,0.0,0.0, dx,0.0,0.0, dx,0.0,0.0, -dx,0.0,0.0, -dx,0.0,0.0]
# -------------- Pressurized pipe to benchmark volumetric locking ----------------
nodelocations=[3.25,0.,0., 4.,0.,0., 3.923141,0.780361,0., 3.187552,0.634044,0., 3.25,0.,1., 4.,0.,1., 3.923141,0.780361,1., 3.187552,0.634044,1.]
defnodelocations=[ 3.26582, 0, 0, 4.01289, 0, 0, 3.93579, 0.782877, 0, 3.20306, 0.637129, 0, 3.26582, 0, 1, 4.01289, 0, 1, 3.93579, 0.782877, 1, 3.20306, 0.637129, 1 ] #C3D8 no
nodedisp=[]
for i in range(len(nodelocations)):
n=-nodelocations[i]+defnodelocations[i]
nodedisp.append(float(n))
nodedisplacement=nodedisp
'''
Material Parameters
'''
v=0.45
E=100
K=E/(3*(1-(2*v)))
G=(3.*K*E)/((9.*K)-E)
C=fea.stiffnessmatrix(v=v,E=E)
Volume=fea.Volume(nodelocations=nodelocations)
'''
Element formulations, print what you want by modifying the formulations, comment in/out what to run.
'''
def main():
OneFieldFullInt()
OneFieldRedInt()
ThreeFieldBBarLocking()
#----------- End input -------------------
def OneFieldFullInt():
'''
This section calculates reaction force in a fully integrated 8 node hexahedron with 8 integration points using the standard Gaussian integration method.
'''
Gausspoints=np.array([[-1./(3**0.5),-1./(3**0.5),-1./(3**0.5)] , [1./(3**0.5),-1./(3**0.5),-1./(3**0.5)] , [1./(3**0.5),1./(3**0.5),-1./(3**0.5)], [-1./(3**0.5),1./(3**0.5),-1./(3**0.5)], [-1./(3**0.5),-1./(3**0.5),1./(3**0.5)], [1./(3**0.5),-1./(3**0.5),1./(3**0.5)], [1./(3**0.5),1./(3**0.5),1./(3**0.5)], [-1./(3**0.5),1./(3**0.5),1./(3**0.5)]])
w=1.
Ke=np.zeros((24,24))
S=[]
e=[]
for Gauss in Gausspoints:
B,detJ=fea.shapefunc(nodelocations=nodelocations,exi=Gauss[0],eta=Gauss[1],zeta=Gauss[2])
Bt=B.transpose()
Ke=(w*Bt*C*B*detJ)+Ke
eu=B*(np.matrix(nodedisplacement).transpose())
e.append(eu)
s=C*eu
S.append(s)
F=Ke*(np.matrix(nodedisplacement).transpose())
print("--------------------------- Standard 8 node element formulation with eight integration points (fully integrated) ----------------------------------")
print("Reaction forces:")
print(F)
print("Stress:")
print(S)
print("Strain:")
print(e)
# --------------- Calculate rank defiency ---------------
orderKe=len(Ke)
nRB=6
rankKe=np.linalg.matrix_rank(Ke)
properrank=orderKe-nRB
rankdef=float(properrank)-float(rankKe)
print("Rank defiancy: ", rankdef)
def OneFieldRedInt():
'''
This section calculates reaction force in a reduced integration 8 node hexahedron with 1 integration point using the standard Gaussian integration method
'''
Gausspoints=np.array([[0.,0.,0.]])
w=8. #Sum of all weights must be 8. ref colorado net course.
Ke=np.zeros((24,24))
S=[]
e=[]
for Gauss in Gausspoints:
B,detJ=fea.shapefunc(nodelocations=nodelocations,exi=Gauss[0],eta=Gauss[1],zeta=Gauss[2])
eu=B*(np.matrix(nodedisplacement).transpose())
s=C*eu
S.append(s)
e.append(eu)
Bt=B.transpose()
Ke=(w*Bt*C*B*detJ)+Ke
F=Ke*(np.matrix(nodedisplacement).transpose())
print("--------------------------- Standard eight node element formulation with one integration point (reduced integration) ----------------------------------")
print("Reaction forces:")
print(F)
print("Stress:")
print(S)
print("Strain:")
print(e)
# --------------- Calculate rank defiency ---------------
orderKe=len(Ke)
nRB=6
rankKe=np.linalg.matrix_rank(Ke)
properrank=orderKe-nRB
rankdef=float(properrank)-float(rankKe)
print("Rank defiancy: ", rankdef)
def ThreeFieldBBarLocking():
'''
This section calculates reaction force, stress and strain in a fully integrated 8 node hexahedron with 8 integration points using the standard Gaussian integration method
and the B-Bar method of the Hu Washizu three field formulation with diff between volumetric strain in point and avg vol strain plus the actual strain in point
as added degrees of freedom to illustrate the locking effect and how the B-Bar can handle this.
'''
Bv=np.zeros((6,24))
Be=np.zeros((6,24))
Ke=np.zeros((24,24))
e=[]
s=[]
m=(np.matrix([1.,1.,1.,0.,0.,0.])).transpose()
Gausspoints=np.array([[-1./(3**0.5),-1./(3**0.5),-1./(3**0.5)],[1./(3**0.5),-1./(3**0.5),-1./(3**0.5)],[1./(3**0.5),1./(3**0.5),-1./(3**0.5)],
[-1./(3**0.5),1./(3**0.5),-1./(3**0.5)],[-1./(3**0.5),-1./(3**0.5),1./(3**0.5)],[1./(3**0.5),-1./(3**0.5),1./(3**0.5)],
[1./(3**0.5),1./(3**0.5),1./(3**0.5)],[-1./(3**0.5),1./(3**0.5),1./(3**0.5)]])
w=1.
for Gauss in Gausspoints:
B,detJ=fea.shapefunc(nodelocations=nodelocations,exi=Gauss[0],eta=Gauss[1],zeta=Gauss[2])
Bv=((m*m.transpose()*B*w*detJ))+Bv #B-matrix giving volume averaged (therefore the integration scheme) volumetric strain.
Bv=Bv*(1./Volume) #Divide by total volume to get average.
ev=Bv*(np.matrix(nodedisplacement).transpose()) #Volumetric strain
for Gauss in Gausspoints:
B,detJ=fea.shapefunc(nodelocations=nodelocations,exi=Gauss[0],eta=Gauss[1],zeta=Gauss[2])
Be=(B+((1./3.)*(Bv-(m*m.transpose()*B)))) #Modified B matrix
eu=Be*(np.matrix(nodedisplacement).transpose()) #Assumed strain in integration point
e.append(eu) #Append assumed strains in integration points
S=C*eu #Assumed stress in integration point
s.append(S) #Append assumed stress integration points
Ke=(Be.transpose()*C*Be*w*detJ)+Ke #Assemble element stiffness matrix, Ke.
Fs=Ke*(np.matrix(nodedisplacement).transpose()) #Get reaction forces
print("--------------------------- B-bar method on eight node element with eight integration points (fully integrated) ----------------------------------")
print("Reaction forces:")
print(Fs)
print("B-Bar Stress field:")
print(s)
print("B-Bar strain field:")
print(e)
print("Volumetric strain:")
print(ev[0,0])
main() | [
"36841103+eivindhugaas@users.noreply.github.com"
] | 36841103+eivindhugaas@users.noreply.github.com |
a07f2a35a5b3efeb0be08d7869337ac2d6e3a50f | 2830ae93146495f5591b80b77130da4b80da7f6f | /Cat_OR_Dog.py | ed741fbbfe3e4f779c818691d8c067846d68004d | [] | no_license | Imran-nazeer/Cat-or-Dog | 1a97dd2619380f76feeb1ff4de6f8c3b94776f56 | 76493c928e93e7a316f01f266e0c924c673386f6 | refs/heads/master | 2020-12-02T10:31:34.065741 | 2019-12-30T21:19:09 | 2019-12-30T21:19:09 | 230,980,430 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,494 | py |
# Part 1 - Building the CNN
# Importing the Keras libraries and packages
from keras.models import Sequential
from keras.layers import Conv2D
from keras.layers import MaxPooling2D
from keras.layers import Flatten
from keras.layers import Dense
# Initialising the CNN
classifier = Sequential()
# Step 1 - Convolution
classifier.add(Conv2D(32, (3, 3), input_shape = (64, 64, 3), activation = 'relu'))
# Step 2 - Pooling
classifier.add(MaxPooling2D(pool_size = (2, 2)))
# Adding a second convolutional layer
classifier.add(Conv2D(32, (3, 3), activation = 'relu'))
classifier.add(MaxPooling2D(pool_size = (2, 2)))
# Step 3 - Flattening
classifier.add(Flatten())
# Step 4 - Full connection
classifier.add(Dense(units = 128, activation = 'relu'))
classifier.add(Dense(units = 1, activation = 'sigmoid'))
# Compiling the CNN
classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
# Part 2 - Fitting the CNN to the images
from keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(rescale = 1./255,
shear_range = 0.2,
zoom_range = 0.2,
horizontal_flip = True)
test_datagen = ImageDataGenerator(rescale = 1./255)
training_set = train_datagen.flow_from_directory('dataset/training_set',
target_size = (64, 64),
batch_size = 32,
class_mode = 'binary')
test_set = test_datagen.flow_from_directory('dataset/test_set',
target_size = (64, 64),
batch_size = 32,
class_mode = 'binary')
classifier.fit_generator(training_set,
steps_per_epoch = 8000,
epochs = 25,
validation_data = test_set,
validation_steps = 2000)
# Part 3 - Making new predictions
import numpy as np
from keras.preprocessing import image
test_image = image.load_img('dataset/single_prediction/cat_or_dog_1.jpg', target_size = (64, 64))
test_image = image.img_to_array(test_image)
test_image = np.expand_dims(test_image, axis = 0)
result = classifier.predict(test_image)
training_set.class_indices
if result[0][0] == 1:
prediction = 'dog'
else:
prediction = 'cat' | [
"noreply@github.com"
] | noreply@github.com |
aa13bd841c98bf69edc143608a0dcaf19c026204 | 4cfbc12903651dedbc799f53a8078433196e7919 | /Pre Processing/Topic 7 - Kernal PCA/KERNEL_PCA_WITHOUT_SPLIT.py | 6974fa581f4052b61770920b3c784ba26c4341c3 | [] | no_license | gautam4941/Machine_Learning_Codes | 78bf86ab3caf6ee329c88ff18d25927125627a2c | 0009d12ca207a9b0431ea56decc293588eb447b1 | refs/heads/main | 2023-02-06T18:05:44.154641 | 2023-01-30T17:04:25 | 2023-01-30T17:04:25 | 353,594,523 | 0 | 5 | null | null | null | null | UTF-8 | Python | false | false | 1,398 | py | import pandas as pd
data = pd.read_csv('Social_Network_Ads.csv')
print( f"data :- \n{ data }\n" )
print( f"data.columns :- \n{ data.columns }\n" )
x = data.loc[ :, 'Gender' : 'EstimatedSalary' ]
y = data.loc[ :, 'Purchased' ]
print( f"x.isnull().sum() :- \n{ x.isnull().sum() }\n" )
print( f"y.isnull().sum() :- \n{ y.isnull().sum() }\n" )
print( f"x.dtypes :- \n{ x.dtypes }\n" )
print( f"y.dtypes :- \n{ y.dtypes }\n" )
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
x['Gender'] = le.fit_transform( x['Gender'] )
import matplotlib.pyplot as plt
# plt.plot( x['Age'], x['EstimatedSalary'], linestyle = '', marker = '*' )
# plt.xlabel( 'Age' )
# plt.ylabel( 'EstimatedSalary' )
# plt.title( 'Age V/s Salary' )
# plt.show()
from sklearn.decomposition import KernelPCA
kpca = KernelPCA( n_components = 2, kernel = 'rbf' ) #n_components is the number of columns getting trained
x = kpca.fit_transform( x )
print( f"After Kernal PCA, x :- \n{ x }\n" )
from sklearn.linear_model import LogisticRegression
lr = LogisticRegression()
lr.fit( x, y )
y_pred = lr.predict( x )
new_x_test = x.T
# plt.plot( x_test[0], x_test[1], linestyle = '', marker = '*' )
# plt.xlabel( 'Age' )
# plt.ylabel( 'EstimatedSalary' )
# plt.title( 'Age V/s Salary' )
# plt.show()
print( f"lr.score( x_test, y_test ) = { lr.score( x, y ) }" ) | [
"noreply@github.com"
] | noreply@github.com |
e33e3af781a4af593bf78acc8dc4120f93f12313 | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/wellf.py | 01d938845a41da448d3678520087a53125ba2d11 | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 141 | py | ii = [('RoscTTI3.py', 1), ('MedwTAI.py', 1), ('WadeJEB.py', 2), ('DibdTRL.py', 1), ('FitzRNS2.py', 1), ('HogaGMM2.py', 1), ('BeckWRE.py', 1)] | [
"prabhjyotsingh95@gmail.com"
] | prabhjyotsingh95@gmail.com |
1d698ea6a946f54475b90a8b3394eff5b5460161 | 4f740158b55850e7029e4a4e19780a81549f5ad4 | /original/wp-includes/feed-atom.py | b0b3cebbf3c36c5c7b8187165b82b270dd6567fb | [] | no_license | dellelce/woppy | 540b52a7783a701e675df566e78ab7e07334f614 | 341bef57b43b46a3506a8fe5046aa71624f5eb02 | refs/heads/master | 2021-01-22T12:02:23.522892 | 2019-03-12T15:11:42 | 2019-03-12T15:11:42 | 11,080,034 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,623 | py | #!/usr/bin/python
#-*- coding: utf-8 -*-
header("Content-Type: "+feed_content_type("atom")+"; charset="+get_option("blog_charset"), True)
more = 1
print("<?xml version="1.0" encoding=""+get_option("blog_charset")+""?"+">")
print("<feed\n xmlns="http://www.w3.org/2005/Atom"\n xmlns:thr="http://purl.org/syndication/thread/1.0"\n xml:lang="")
bloginfo_rss("language")
print(""\n xml:base="")
bloginfo_rss("url")
print("/wp-atom.php"\n ")
do_action("atom_ns")
print(" >\n <title type="text">")
bloginfo_rss("name")
wp_title_rss()
print("</title>\n <subtitle type="text">")
bloginfo_rss("description")
print("</subtitle>\n\n <updated>")
print(mysql2date("Y-m-d\TH:i:s\Z", get_lastpostmodified("GMT"), False))
print("</updated>\n\n <link rel="alternate" type="")
bloginfo_rss("html_type")
print("" href="")
bloginfo_rss("url")
print("" />\n <id>")
bloginfo("atom_url")
print("</id>\n <link rel="self" type="application/atom+xml" href="")
self_link()
print("" />\n\n ")
do_action("atom_head")
print(" ")
while :
the_post()
print(" <entry>\n <author>\n <name>")
the_author()
print("</name>\n ")
author_url = get_the_author_meta("url")
if !empty(author_url):
print(" <uri>")
the_author_meta("url")
print("</uri>\n ")
do_action("atom_author")
print(" </author>\n <title type="")
html_type_rss()
print(""><![CDATA[")
the_title_rss()
print("]]></title>\n <link rel="alternate" type="")
bloginfo_rss("html_type")
print("" href="")
the_permalink_rss()
print("" />\n <id>")
the_guid()
print("</id>\n <updated>")
print(get_post_modified_time("Y-m-d\TH:i:s\Z", True))
print("</updated>\n <published>")
print(get_post_time("Y-m-d\TH:i:s\Z", True))
print("</published>\n ")
the_category_rss("atom")
print(" <summary type="")
html_type_rss()
print(""><![CDATA[")
the_excerpt_rss()
print("]]></summary>\n")
if !get_option("rss_use_excerpt"):
print(" <content type="")
html_type_rss()
print("" xml:base="")
the_permalink_rss()
print(""><![CDATA[")
the_content_feed("atom")
print("]]></content>\n")
atom_enclosure()
do_action("atom_entry")
print(" <link rel="replies" type="")
bloginfo_rss("html_type")
print("" href="")
the_permalink_rss()
print("#comments" thr:count="")
print(get_comments_number())
print(""/>\n <link rel="replies" type="application/atom+xml" href="")
print(esc_url(get_post_comments_feed_link(0, "atom")))
print("" thr:count="")
print(get_comments_number())
print(""/>\n <thr:total>")
print(get_comments_number())
print("</thr:total>\n </entry>\n ")
print("</feed>\n")
| [
"adellelce@hotmail.com"
] | adellelce@hotmail.com |
14eab72baf2e987e62f17a2259e17e1057343f12 | 293247b29c1cb4823cb9694860e44adb4ac5846d | /lib.py | eb25e21cea071a6aeaf6ddfdb922f0ba04f86a11 | [] | no_license | cwyl02/word-count | bfb40da5725974247f83078b0f31fe769450c24c | 3c2c6ff89327f9d51de627360be72f6eb3945610 | refs/heads/master | 2023-03-01T04:25:47.990306 | 2021-02-10T05:10:08 | 2021-02-10T05:10:36 | 336,875,567 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 285 | py | from collections.abc import Iterable
def calculate(input_iter: Iterable) -> int:
word_count = 0
for line in input_iter:
word_count += count_word(line)
return word_count
def count_word(line: str) -> int:
words = line.split()
return len(words)
| [
"york.chen.hz@hotmail.com"
] | york.chen.hz@hotmail.com |
6aa9d0fb579430211f8e0b9a378ec0e2f1779e8b | ab494b52b1ed141b3f06af254c2271f7eab43af8 | /Django/Django_Intro/Time_Display/timedisplay/timedisplay/urls.py | 60ee53d5d8433d9c296bd5846ed35bd8187b5af4 | [] | no_license | richminlee/Python_Stack | 4c22891e63ead8c502d4cc3e551d34d0da00324b | 8fcaf28eb95b1ea6d8a4b397ddba8a6798a844e0 | refs/heads/master | 2022-06-02T15:16:55.911338 | 2020-05-05T01:16:38 | 2020-05-05T01:16:38 | 261,333,345 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 98 | py | from django.urls import path, include
urlpatterns = [
path('', include('time_display.urls')),] | [
"lee.richardm@gmail.com"
] | lee.richardm@gmail.com |
f0301bbd559405a5974a42522f7786833d95dfb0 | 5dc3639ebd7381a0ef17711598a3d9777fba9102 | /Aula 07/Ex009.py | 93434d9ce69f77222ec6f1a8357fb1c8170bf24d | [] | no_license | diegopnh/AulaPy2 | 53e389536c0c9bc793b298277f8433d6ef92ee27 | d40911b325ea707899fe82c7e91a68e52d145c2c | refs/heads/master | 2022-11-17T15:45:32.048258 | 2020-07-11T04:45:19 | 2020-07-11T04:45:19 | 269,830,170 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 444 | py | a = int(input('Digite o número que você quer a tabuada: '))
print('{} x {} = {}'.format(a,1,a*1))
print('{} x {} = {}'.format(a,2,a*2))
print('{} x {} = {}'.format(a,3,a*3))
print('{} x {} = {}'.format(a,4,a*4))
print('{} x {} = {}'.format(a,5,a*5))
print('{} x {} = {}'.format(a,6,a*6))
print('{} x {} = {}'.format(a,7,a*7))
print('{} x {} = {}'.format(a,8,a*8))
print('{} x {} = {}'.format(a,9,a*9))
print('{} x {} = {}'.format(a,10,a*10)) | [
"diegopnh@github.com"
] | diegopnh@github.com |
9414d045e5d47c04d4503169647515fc1145c0f2 | 37a7f5db37d3acf10bf01613b50240c2cae9aff6 | /robot.py | 304bbcbecb98f7a810b82ead80a901728d0ea968 | [] | no_license | SnovvyOwl/KHU_KongBot1 | 9377d3046c60f5818e51f946e1f149819838a41b | 5c3365a9e6785514d87151e5e1e9cd9d1e92264f | refs/heads/main | 2023-06-15T23:39:42.258850 | 2021-07-12T07:17:11 | 2021-07-12T07:17:11 | 385,155,439 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,399 | py | import motor
import sensor
class Master:
def __init__(self):
self.gyro=sensor.Gyro()
self.PLServo=motor.Servo(14,6.77)
self.PRServo=motor.Servo(12,6.77)
self.CamServo=motor.Servo(19,6.77) #NO PWM CHANNEL
self.from_client=[0,0,0,0,0,0,0,0,0]
self.CprevEr=0
self.CsumEr=0
self.PprevEr=0
self.PsumEr=0
def move(self,ANGLEVEL):
# PID GAIN
kp=20
ki=10
kd=5
target=ANGLEVEL
self.gyro.read()
AngVYER=target-self.gyro.yout
controlgain+=(AngVYER*kp)+(self.PprevEr*kd)+(self.PsumEr*ki)
##We need theta' derivative to PWM
controlgain=max(min(12,controlgain),0)
self.PLServo.turn(controlgain)
self.PRServo.turn(12-controlgain)
self.stableVision()
self.PprevEr=AngVYER
self.PsumEr+=AngVYER
def stableVision(self,client):
# PID GAIN
kp=20
ki=10
kd=5
self.gyro.read()
Er=self.Gyro.roty-client # maybe we need sampling time....
controlgain+=(Er*kp)+(self.CprevEr*kd)+(self.CsumEr*ki)
controlgain=max(min(12,controlgain),0)
self.CamServo.turn(controlgain)
self.CprevEr=Er
self.CsumEr+=Er
def stop(self):
PLServo.stop()
PRServo.stop()
CamServo.stop()
class Slave:
def __init__(self):
self.wheel=motor.DCmotor(12,19)
self.AHRS=sensor.serialConnect("/dev/ttyUSB0",115200)
self.prevEr=0
self.sumEr=0
self.controlgain=0
def turn(self,Dir,INPUT):
self.prevEr=0
self.sumEr=0
if Dir == 'TR' :
motor.DCmotor.CW(INPUT)
elif Dir == 'TL' :
motor.DCmotor.CCW(INPUT)
def horCAMstable(self,initYAW):
# PID GAIN
kp=20
ki=10
kd=5
#init error
self.AHRS.read()
YawER=initYAW-float(self.AHRS.splData[2])
self.controlgain+=(YawER*kp)+((YawER-self.prevEr)*kd)+(self.sumEr*ki)
if self.controlgain>0:
input=max(min(100,self.controlgain),0)
self.wheel.CW(input)
else:
input=-self.controlgain
input=max(min(100,input),0)
self.wheel.CCW(input)
self.prevEr=YawER
self.sumEr+=YawER
def stop(self):
self.wheel.stop() | [
"owl0614@naver.com"
] | owl0614@naver.com |
dbb1dad324fd58d7f4de5dff10e05897c96f914d | 73a5b04df64fd247facc8db96bf8691a5b8a4118 | /.vscode/ProgrammingConcept/checkEvenOdd.py | eba89746114a7a843b5c81835ef0bac192856156 | [] | no_license | skamjadali7/Python-Programming | 2803314288c4eb28bd008f940dbcebfe43c5b2b5 | cce7078cfe7256527749af734c98ab5bc8cfa51f | refs/heads/master | 2022-12-05T18:21:51.313830 | 2020-08-27T18:36:11 | 2020-08-27T18:36:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 253 | py | def EvenOdd(n):
#Declaring EvenOdd Function with parament as n in it
if n%2==0:
print("Number {} is even".format(n))
else:
print("Number {} is odd".format(n))
#Calling above EvenOdd Function with argument
EvenOdd(5)
EvenOdd(4) | [
"amjad.shaikh@indusface.com"
] | amjad.shaikh@indusface.com |
e38fb54c09c546aaa256ab269a24c5dd75f7081e | 6196bd9ebb26af3ef559a73e73a5af9c918bc6b6 | /backup_lstm.py | 71d90ff86e89cd366e22ba799dfa165462a63ee2 | [] | no_license | leanpha/P_LSTM | 2080e3b4a9c578caeb4096a84609d83049e82131 | 7e71774a6851283ea9dabb8516332e55c8ffa48c | refs/heads/master | 2021-09-24T07:25:48.919478 | 2018-10-05T09:14:56 | 2018-10-05T09:14:56 | 116,152,611 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,192 | py | # Credit: Perform sentiment analysis with LSTMs, using TensorFlow - Adit Deshpande @ O*Reilly
# customized by HoangNguyen
#
# IMPORT LIB
# =========================================
import numpy as np
import tensorflow as tf
import collections
# import matplotlib.pyplot as plt
import re
# enable logging
tf.logging.set_verbosity(tf.logging.INFO)
#
# HYPER PARAMETERS
# =========================================
params = {
'BATCH_SIZE': 128,
'EPOCHS': 100000,
'MAX_DOCUMENT_LENGTH': 0,
'EMBEDDING_SIZE': 128,
'LSTM_UNITS': 64,
'NUM_CLASSES': 5
}
#
# LOAD DATA
# =========================================
# load wordsList & wordVectors
def load_npy():
wordsList = np.load('wordsList.npy')
wordsList = wordsList.tolist()
print('wordsList loaded!')
wordVectors = np.load('wordVectors.npy')
print('wordVectors loaded!')
return wordsList, wordVectors
wordsList, wordVectors = load_npy()
print("wordsList's lenght: ", len(wordsList))
print("wordVectors' shape: ", wordVectors.shape)
def clean_lines(f):
out = True
for l in f:
l = l.strip().lower()
if l:
if out:
yield l
out = False
else:
out = True
def clean_string(l):
l = l[2:]
l = re.sub("[?\.,\-!%*\(\)\^\$\#\@\"\']", "", l)
l = l.split()
return l
def lookup_word_ids(f):
ids = np.zeros((len(f), params['MAX_DOCUMENT_LENGTH']), dtype='int32')
line_index = 0
for l in f:
word_index = 0
for w in l:
try:
ids[line_index][word_index] = wordsList.index(w)
except ValueError:
ids[line_index][word_index] = wordsList.index('UNK')
word_index += 1
if word_index >= params['MAX_DOCUMENT_LENGTH']:
break
line_index += 1
return ids
#
# Load train data
# =========================================
with open('train.txt', 'r', encoding='utf-8') as f:
file = f.read().splitlines()
file = [line for line in clean_lines(file)]
# Extract labels
labels = np.array([int(l[0]) for l in file])
# Extract sentences
sentences = [clean_string(l) for l in file]
# Edit params
params['MAX_DOCUMENT_LENGTH'] = 50
params['EMBEDDING_SIZE'] = wordVectors.shape[1]
# convert sentences to id sequences
ids = lookup_word_ids(sentences)
del file
print("Training data loaded!")
#
# Load test data
# =========================================
with open('test.txt', 'r', encoding='utf-8') as f:
file = f.read().splitlines()
file = [line for line in clean_lines(file)]
labels_test = np.array([int(l[0]) for l in file])
sentences_test = [clean_string(l) for l in file]
ids_test = lookup_word_ids(sentences_test)
del file
print("Test data loaded!")
#
# CREATE MODEL
# =========================================
def lstm_model_fn(features, labels, mode, params):
onehot_labels = tf.one_hot(labels, params['NUM_CLASSES'], 1, 0)
embed = tf.nn.embedding_lookup(wordVectors, features['input'])
# embed = tf.unstack(embed, axis=1)
loss, train_op, pred = None, None, None
lstmCell = tf.contrib.rnn.BasicLSTMCell(params['LSTM_UNITS'])
lstmCell = tf.contrib.rnn.DropoutWrapper(cell=lstmCell, output_keep_prob=0.75)
# value, _ = tf.nn.static_rnn(lstmCell, embed, dtype=tf.float32)
value, _ = tf.nn.dynamic_rnn(lstmCell, embed, dtype=tf.float32)
value = tf.transpose(value, [1, 0, 2])
last = tf.gather(value, int(value.get_shape()[0]) - 1)
# last = value[-1]
logits = tf.layers.dense(last, params['NUM_CLASSES'])
if mode == tf.estimator.ModeKeys.TRAIN or mode == tf.estimator.ModeKeys.EVAL:
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=onehot_labels))
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.AdamOptimizer()
train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())
if mode == tf.estimator.ModeKeys.PREDICT:
pred = tf.nn.softmax(logits=logits)
eval_metrics_op = {'accuracy': tf.metrics.accuracy(tf.argmax(onehot_labels), tf.argmax(logits))}
return tf.estimator.EstimatorSpec(mode, pred, loss, train_op, eval_metrics_op)
classifier = tf.estimator.Estimator(model_fn=lstm_model_fn, model_dir='backup_senti_lstm', params=params)
print("LSTM model created!")
#
# CREATE INPUT FUNCTION
# =========================================
train_input_fn = tf.estimator.inputs.numpy_input_fn(
{'input': ids},
labels,
batch_size=params['BATCH_SIZE'],
num_epochs=params['EPOCHS'],
shuffle=True)
test_input_fn = tf.estimator.inputs.numpy_input_fn(
{'input': ids_test},
labels_test,
batch_size=params['BATCH_SIZE'],
num_epochs=1,
shuffle=False)
#
# TRAINING
# =========================================
train_spec = tf.estimator.TrainSpec(train_input_fn)
eval_spec = tf.estimator.EvalSpec(test_input_fn, throttle_secs=600)
tf.estimator.train_and_evaluate(classifier, train_spec=train_spec, eval_spec=eval_spec)
print("FINISH TRAINING")
#
# EVALUATE
# =========================================
ev = classifier.evaluate(test_input_fn)
print("FINISH EVALUATING")
print(ev)
| [
"leanpha@phuyen.edu.vn"
] | leanpha@phuyen.edu.vn |
f908787619c08ee875c9a17ad5e73728c6ae5708 | 42de90de47edff1b3d89d85d65e05b1adfe15689 | /SanityTests/ARCHIVE/Harvester/tests/steps/sanity_check.py | c634e8088588565601c016f4a1cb535441d8c725 | [] | no_license | ahalester/IRM_CI_F2F_2018 | f9549b519a75d589a352a87c4c509c2eb03d5a12 | 61921c2ce9f8e08abf1477eaa87c259c2255d1a2 | refs/heads/master | 2021-07-21T09:50:31.159107 | 2019-11-21T18:22:25 | 2019-11-21T18:22:25 | 223,235,940 | 0 | 0 | null | 2020-10-13T17:39:11 | 2019-11-21T18:14:49 | Java | UTF-8 | Python | false | false | 5,861 | py | from behave import *
from hamcrest import *
from ConfigParser import *
from utils import fs, console
import os
import re
import psutil
import requests
import xml.dom.minidom
import cx_Oracle
# --------------------------------
# Given's
# --------------------------------
@given('I have installed "{program}"')
def step_impl(context, program):
assert_that(console.term_exec("which %s" % program), not contains_string('no %s in' % program))
@given('logs are stored in "{logs_path}"')
def step_impl(context, logs_path):
context.logs_path = logs_path
@given('I use "{url}" as bibliography API')
def step_impl(context, url):
context.bib_api = url
@given('"{env_var}" environment variable is set')
def step_impl(context, env_var):
assert_that(os.environ, has_key(env_var))
context.acsdata_path = os.environ.get(env_var)
@given('the configuration file "{config_file}" has the property "{property_key}" set with value "{property_value}"')
def step_impl(context, config_file, property_key, property_value):
assert_that(context.parsed_config[config_file].get('properties', property_key), property_value)
@given('I use tnsnames.ora')
def step_impl(context):
tnsnames_file = file(os.path.join(context.acsdata_path, 'config/tnsnames.ora')).read()
context.tnsnames = {}
tns_re = "^(\w+?)\s?=.*?HOST\s?=\s?(.+?)\).*?PORT\s?=\s?(\d+?)\).*?SERVICE_NAME\s?=\s?(.+?)\)"
for match in re.finditer(tns_re, tnsnames_file, re.M+re.S):
t = match.groups()
context.tnsnames[t[0]] = "%s:%s/%s" % t[1:]
@given('I use properties file "{properties_file}"')
def step_impl(context, properties_file):
if not hasattr(context, 'parsed_config'): context.parsed_config = dict()
print(context.parsed_config)
with open(os.path.join(context.acsdata_path, 'config', properties_file)) as f:
parsed_property = filter(lambda x: len(x) == 2, [line.strip().split('=') for line in f])
defaults = dict(parsed_property)
config = ConfigParser(defaults)
config.add_section('properties')
context.parsed_config[properties_file] = config
@given('I use the ASDM with uid "{asdm_uid}"')
def step_impl(context, asdm_uid):
context.asdm_uid = asdm_uid
@given('I use "{db_type}" database defined in "{config_file}"')
def step_impl(context, db_type, config_file):
context.db_name = context.parsed_config[config_file].get('properties', '%s.connection' % db_type).split('@')[1]
context.db_user = context.parsed_config[config_file].get('properties', '%s.user' % db_type)
context.db_pass = context.parsed_config[config_file].get('properties', '%s.passwd' % db_type)
# --------------------------------
# When's
# --------------------------------
@when('I look for the free space on the mountpoint of "{path}"')
def step_impl(context, path):
context.mountpoint_freespace_bytes = psutil.disk_usage(fs.find_mount_point(path)).free
@when('I look for total RAM size')
def step_impl(context):
context.total_ram_size = psutil.virtual_memory().total
@when('I fetch bibliography for bibcode "{bibcode}"')
def step_impl(context, bibcode):
context.telbib_response = requests.get(context.bib_api + "/api.php?bibcode=" + bibcode)
@when('I ping to defined database server')
def step_impl(context):
context.ping_status = console.term_exec("ping -c 1 %s" % "orasw.apotest.alma.cl")
@when('I run harvester in dry-run mode for given ASDM')
def step_impl(context):
context.stdout = console.term_exec('~/scripts/harvest -c -a %s -x -y ALL -d -uid %s' % (os.path.join(context.acsdata_path, 'config/', 'archiveConfig.properties'), context.asdm_uid))
@when('I connect to database server')
def step_impl(context):
print(context.tnsnames)
context.database_conn = cx_Oracle.connect('%s/%s@%s' % (context.db_user, context.db_pass, context.tnsnames[context.db_name]))
# --------------------------------
# Then's
# --------------------------------
@then('total RAM size should be greater than "{size}"')
def step_impl(context, size):
assert_that(context.total_ram_size, greater_than(fs.human2bytes(size)))
@then('mountpoint of "{path}" should has at least "{size}" free space')
def step_impl(context, path, size):
assert_that(context.mountpoint_freespace_bytes, greater_than_or_equal_to(fs.human2bytes(size)))
@then('version should be "{version}"')
def step_impl(context, version):
assert_that(version, equal_to(version))
@then('I should get a response with status code "{status_code:d}"')
def step_impl(context, status_code):
assert_that(context.telbib_response.status_code, equal_to(status_code))
@then('I should get the bibliography in xml format')
def step_impl(context):
xml.dom.minidom.parseString(context.telbib_response.content)
@then('I should get ping response')
def step_impl(context):
assert_that(context.ping_status, not contains_string('cannot resolve'))
@then('I should not get errors in stdout')
def step_impl(context):
assert_that(context.stdout, is_not(any_of(contains_string('Error'), contains_string('ERROR'), contains_string('SEVERE'), contains_string('CRITICAL'))))
@then('ASA tables should exist')
def step_impl(context):
asa_tables = ['ASA_BIBLIOGRAPHY', 'ASA_COLUMNS', 'ASA_ENERGY', 'ASA_MAIL_TEMPLATES', 'ASA_OUS', 'ASA_OUS_REDSHIFT', 'ASA_OUS_TEMP', 'ASA_PRODUCT_FILES', 'ASA_PROJECT', 'ASA_PROJECT_BIBLIOGRAPHY', 'ASA_SCIENCE', 'ASA_TABLES', 'ASA_TAP_SCHEMAS']
asa_tables_sql_list = ",".join(map(lambda x: "'" + x + "'", asa_tables))
sql_statement = "select count(*) from all_objects where object_type in ('TABLE','VIEW') and object_name in (%s)" % asa_tables_sql_list
print('sql:', sql_statement)
tables_num = int(context.database_conn.cursor().execute(sql_statement).fetchone()[0])
assert_that(tables_num, equal_to(len(asa_tables)))
| [
"ahale@anhydrite.cv.nrao.edu"
] | ahale@anhydrite.cv.nrao.edu |
cfb83bb056e3c43239de5025f6fb5ae8dba0edec | 2bedb17a7eb18c47d61128464ca9d3b0eece1455 | /usuarios/models.py | b7ddc8d8844119edab19c217eac175e9d7bca445 | [] | no_license | ISPM-Benguela/escolari | a520b8425c46ef79f4b96e6b4e4f0dc0ef5bc80d | c1d59df088f3e67e1f63ab1c30127b54aeb042c8 | refs/heads/master | 2020-04-08T20:28:41.918361 | 2019-02-10T09:02:26 | 2019-02-10T09:02:26 | 159,700,891 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,089 | py | from django.utils.translation import ugettext_lazy as _
from django.db import models
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from django.dispatch import receiver
from departamentos.models import Departamentos
from disciplinas.models import Disciplina
from salas.models import Turmas
from propinas.models import Propinas
class Perfil(models.Model):
ADMIN = 'A'
FUNCIONARIO = 'F'
ESTUDANTE= 'E'
PROFESSOR = 'P'
PERFIL = (
(ADMIN, _('Administrador')),
(FUNCIONARIO, _('Funcionario')),
(ESTUDANTE, _('Estudante')),
(PROFESSOR, _('Professor'))
)
user = models.OneToOneField(User, on_delete=models.CASCADE, blank=True, null=True)
departamento = models.ForeignKey(Departamentos, blank=True, null=True)
primeiro_nome = models.CharField(_('Primeiro nome'), max_length=250, blank=True, null=True, default="")
segundo_nome = models.CharField(_('Segundo nome'), max_length=250, blank=True, null=True, default="")
tipo_perfil = models.CharField(_('Tipo de perfil'), max_length=1, choices=PERFIL, default=FUNCIONARIO, blank=True, null=True)
morada = models.CharField(_('nome da turma'), max_length=250, blank=True, null=True, default="")
foto = models.FileField(default='perfil/default.jpg', upload_to='perfil/', blank=True, null=True)
# perfil do estudante
turma = models.ForeignKey(Turmas, blank=True, null=True)
propinas = models.ManyToManyField(Propinas, blank=True)
def __str__(self):
return self.user.username
@receiver(post_save, sender=User)
def create_user_perfil(sender, instance, created, **kwargs):
if created:
Perfil.objects.create(user=instance)
@receiver(post_save, sender=User)
def save_user_perfil(sender, instance, **kwargs):
instance.perfil.save()
def get_propina(self, id):
# return Propinas.objects.latest()
return Propinas.objects.all()
def get_full_name(self):
return "%s %s" % (self.primeiro_nome, self.segundo_nome)
| [
"jose.tandavala@gmail.com"
] | jose.tandavala@gmail.com |
856d1182ec17e0160261439495d9ac3909a68d26 | 0f9d90476e950cb6c383c94f1e151b0314523f3e | /bifrost.py | f2432d0d78bbd0d08c239458c0dece3123b95796 | [
"MIT"
] | permissive | pedrofausto/bifrost | e5a55cca99701dc989cd9384abe5ee64a2e102d7 | 6bc4ae9cb97b9f30ae6f6ac5ef16db3657cfaeb4 | refs/heads/master | 2020-12-25T17:17:20.530645 | 2014-05-20T00:12:33 | 2014-05-20T00:12:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24,449 | py | #!/usr/bin/env python
#
# 88 88 ad88 88 88
# 88 "" d8" "" "" ,d
# 88 88 88
# 88,dPPYba, 88 MM88MMM 8b,dPPYba, ,adPPYba, ,adPPYba, MM88MMM
# 88P' "8a 88 88 88P' "Y8 a8" "8a I8[ "" 88
# 88 d8 88 88 88 8b d8 `"Y8ba, 88
# 88b, ,a8" 88 88 88 "8a, ,a8" aa ]8I 88,
# 8Y"Ybbd8"' 88 88 88 `"YbbdP"' `"YbbdP"' "Y888
#
# INTELLIGENT WEB APPLICATION FIREWALL
# by: Jan Seidl
#
# THIS IS PRE-ALPHA SOFTWARE
# IT'S BUGGY, FALSE-POSITIVE-Y AND SUCH
# DO *NOT* USE THIS IN PRODUCTION
# - I repeat -
# DO ->*NOT*<- USE THIS IN PRODUCTION
#
# Global WAF instance
waf = None
#######################################
# Constants
#######################################
CONFIG_FILE = 'bifrost.conf'
DATABASE_FILE = 'bifrost.db'
MODE_TRAINING = 1
MODE_OPERATIONAL = 2
MODE_BYPASS = 3
ACTION_DROP = 1
ACTION_PASS = 2
CHUNK_END = '0\r\n\r\n'
#######################################
# Imports
#######################################
import ConfigParser, magic, Cookie
import sys, pprint, sqlite3, signal, textwrap
from twisted.internet import protocol, reactor
from BaseHTTPServer import BaseHTTPRequestHandler
import cgi
from httplib import HTTPResponse as HTTPR
from StringIO import StringIO
from twisted.python import log
#######################################
# Util functions
#######################################
def in_range(minval, maxval, value, tolerance):
value = float(value)
tolerance = float(tolerance)
maxval = float(maxval)
minval = float(minval)
maxval += (maxval*tolerance)
minval -= (minval*tolerance)
return (minval <= value <= maxval)
def in_average(mean, value, tolerance):
threshold = (mean*tolerance)
maxval = (mean+threshold)
minval = (mean-threshold)
return in_range(minval, maxval, value, 0.0)
# http://stackoverflow.com/questions/287871/print-in-terminal-with-colors-using-python
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
# http://pythonwise.blogspot.com.br/2010/02/parse-http-response.html
class FakeSocket(StringIO):
def makefile(self, *args, **kw):
return self
#######################################
# HTTP Request and Response Classes
#######################################
# http://stackoverflow.com/questions/2115410/does-python-have-a-module-for-parsing-http-requests-and-responses
class HTTPRequest(BaseHTTPRequestHandler):
form = {}
def __init__(self, request_text):
self.rfile = StringIO(request_text)
self.raw_requestline = self.rfile.readline()
self.error_code = self.error_message = None
self.parse_request()
if self.command == 'POST':
# Parse the form data posted
self.form = cgi.FieldStorage(
fp=self.rfile,
headers=self.headers,
environ={'REQUEST_METHOD':'POST',
'CONTENT_TYPE':self.headers['Content-Type'],
})
def send_error(self, code, message):
self.error_code = code
self.error_message = message
class HTTPResponse():
headers = {}
fp = None
length = None
chunked = None
def __init__(self, response_text):
self.fp = FakeSocket(response_text)
res = HTTPR(self.fp)
res.begin()
headers = res.getheaders()
for header in headers:
self.headers[header[0]] = header[1]
self.length = res.getheader('Content-Length')
self.chunked = res.getheader('Transfer-Encoding')
#######################################
# Endpoints
# Adapted from http://www.mostthingsweb.com/2013/08/a-basic-man-in-the-middle-proxy-with-twisted/
# Adapted from http://stackoverflow.com/a/15645169/221061
#######################################
##
# ServerProtocol = Client -> WAF | WAF -> Client
##
class ServerProtocol(protocol.Protocol):
def __init__(self):
self.buffer = None
self.client = None
def connectionMade(self):
factory = protocol.ClientFactory()
factory.protocol = ClientProtocol
factory.server = self
reactor.connectTCP(waf.server_addr, waf.server_port, factory)
def drop_connection(self):
# FIX ME -- NEED TO CLOSE THIS BOTH ENDS
print bcolors.FAIL + "Dropping connection." + bcolors.ENDC
self.transport.loseWriteConnection()
self.transport.loseConnection()
self.transport.abortConnection()
# Client => Proxy
def dataReceived(self, data):
if self.client:
self.client.write(data)
else:
self.buffer = data
# Proxy => Client
def write(self, data):
self.transport.write(data)
##
# ClientProtocol = Server -> WAF | WAF -> Server
##
class ClientProtocol(protocol.Protocol):
client_request = None
response_buffer = None
request_buffer = None
chunked = False
request_size = 0
def connectionMade(self):
self.factory.server.client = self
self.write(self.factory.server.buffer)
self.factory.server.buffer = ''
def valid_files(self, post):
mimes = []
# Get uploaded files' mime
for field in post.keys():
if post[field].filename:
file_data = post[field].file.read()
mime = waf.magic.buffer(file_data)
mimes.append(mime)
dataset = self.fetch_set('uploads', 'mimetype', None, path_only = True)
for mime in set(mimes):
if mime not in dataset:
print bcolors.WARNING + "[ANOMALY] File mimetype '%s' not allowed on requests for this URL ('%s')." % (mime, self.client_request.path) + bcolors.ENDC
return False
return True
def valid_post(self, post_keys, r_type):
dataset = self.fetch_set('postdata', 'field', None, path_only = True)
for post in post_keys:
post_name = post.strip().lower()
if post_name not in dataset:
print bcolors.WARNING + "[ANOMALY] POST field '%s' not allowed on %s for this URL ('%s')." % (post_name, 'requests' if r_type == 'req' else 'responses', self.client_request.path) + bcolors.ENDC
return False
return True
def valid_headers(self, header_keys, r_type):
dataset = self.fetch_set('headers', 'header', r_type)
for header in header_keys:
header_name = header.strip().lower()
if header_name not in dataset:
print bcolors.WARNING + "[ANOMALY] Header '%s' not allowed on %s for this URL/method ('%s','%s')." % (header_name, 'requests' if r_type == 'req' else 'responses', self.client_request.command, self.client_request.path) + bcolors.ENDC
return False
return True
def valid_cookies(self, cookie_str, r_type):
ck = Cookie.SimpleCookie()
ck.load(cookie_str)
dataset = self.fetch_set('cookies', 'cookie', r_type)
for cookie in ck.keys():
cookie_name = cookie.strip().lower()
if cookie_name not in dataset:
print bcolors.WARNING + "[ANOMALY] Cookie '%s' not allowed on %s for this URL/method ('%s','%s')." % (cookie_name, 'requests' if r_type == 'req' else 'responses', self.client_request.command, self.client_request.path) + bcolors.ENDC
return False
return True
def valid_range(self, name, ranges, value):
tolerance = waf.config.get('tolerance', name)
ret = True
if waf.config.get('analyzer', name) == 'avg':
ret = in_average(ranges[2], value, tolerance)
else:
ret = in_range(ranges[0], ranges[1], value, tolerance)
return ret
def fetch_set(self, table, field, r_type, path_only=False):
items = []
if path_only:
cursor = waf.cursor.execute("SELECT %s FROM %s WHERE path = ?" % (field, table), (self.client_request.path,))
else:
cursor = waf.cursor.execute("SELECT %s FROM %s WHERE path = ? and method = ? AND type = ?" % (field, table), (self.client_request.path, self.client_request.command, r_type))
for row in cursor:
items.append(row[0])
return set(items)
def fetch_averages(self, path, r_type):
# Get averages
query = "SELECT MIN(headers_qty) as min_hqty, \
MAX(headers_qty) as max_hqty, \
AVG(headers_qty) as hqty, \
MIN(headers_size) as mix_hsize, \
MAX(headers_size) as max_hsize, \
AVG(headers_size) as hsize, \
MIN(content_size) as min_csize, \
MAX(content_size) as max_csize, \
AVG(content_size) as csize \
FROM urls WHERE path = ? AND type = ?"
waf.cursor.execute(query, (path,r_type))
return waf.cursor.fetchone()
def analyzeRequest(self):
score = 0
request = self.client_request
command = request.command
path = request.path
# Check if page can be acessed within given method
waf.cursor.execute("SELECT method FROM urls WHERE path = ? AND method = ? AND type = 'req' GROUP BY METHOD", (path, command) );
methods = waf.cursor.fetchone()
if methods is None:
print bcolors.FAIL + "[ANOMALY] URL/method ('%s','%s') not in database." % (command, path) + bcolors.ENDC
if waf.unknown_urls_action == ACTION_PASS:
return True
else:
self.request_buffer = None
return False
averages = self.fetch_averages(path, 'req')
# Content SIZE
header_size = len(str(request.headers))
content_size = (len(self.request_buffer)-header_size)
if not self.valid_range('request_content_size', averages[6:9], content_size):
print bcolors.WARNING + "[ANOMALY] URL '%s' has an unexpected request content size (%d)." % (path, content_size) + bcolors.ENDC
score += waf.config.getint('scorer', 'request_content_size')
# Check for valid cookies
if waf.config.getint('analyzer', 'request_cookies') == 1 and 'Cookie' in request.headers:
if not self.valid_cookies(request.headers['Cookie'], 'req'):
score += waf.config.getint('scorer', 'request_cookies')
# Header sanity
if waf.config.getint('analyzer', 'request_headers') == 1:
if not self.valid_headers(request.headers.keys(), 'req'):
score += waf.config.getint('scorer', 'request_headers')
# POST sanity
if command == 'POST' and waf.config.getint('analyzer', 'request_postdata') == 1:
if not self.valid_post(request.form.keys(), 'req'):
score += waf.config.getint('scorer', 'request_postdata')
# Uploaded File MIME type sanity
if command == 'POST' and waf.config.getint('analyzer', 'upload_filetype') == 1:
if not self.valid_files(request.form):
score += waf.config.getint('scorer', 'upload_filetype')
threshold = waf.config.getint('enforcer', 'request_threshold')
if score > threshold:
print bcolors.FAIL + "[THREAT] URL '%s' scored as malicious (%d/%d)." % (path, score, threshold) + bcolors.ENDC
if waf.config.get('enforcer', 'action') == 'drop':
return False
return True
def analyzeResponse(self, response):
command = self.client_request.command
path = self.client_request.path
# Check if page can be acessed within given method
waf.cursor.execute("SELECT method FROM urls WHERE path = ? AND method = ? AND type = 'resp' GROUP BY METHOD", (path, command) );
methods = waf.cursor.fetchone()
if methods is None:
print bcolors.WARNING + "[ANOMALY] URL/method ('%s','%s') not in database." % (command, path) + bcolors.ENDC
if waf.unknown_urls_action == ACTION_PASS:
return True
else:
self.response_buffer = None
return False
averages = self.fetch_averages(path, 'resp')
score = 0
# Header QTY
header_qty = len(response.headers)
if not self.valid_range('response_header_qty', averages[0:3], header_qty):
print bcolors.WARNING + "[ANOMALY] URL '%s' has an unexpected response header quantity (%d)." % (path, header_qty) + bcolors.ENDC
score += waf.config.getint('scorer', 'response_header_qty')
# Header SIZE
header_size = len(str(response.headers))
if not self.valid_range('response_header_size', averages[3:6], header_size):
print bcolors.WARNING + "[ANOMALY] URL '%s' has an unexpected response header size (%d)." % (path, header_size) + bcolors.ENDC
score += waf.config.getint('scorer', 'response_header_size')
# Content SIZE
content_size = (len(self.response_buffer)-header_size)
if not self.valid_range('response_content_size', averages[6:9], content_size):
print bcolors.WARNING + "[ANOMALY] URL '%s' has an unexpected response content size (%d)." % (path, content_size) + bcolors.ENDC
score += waf.config.getint('scorer', 'response_content_size')
# Cookies
if waf.config.getint('analyzer', 'response_cookies') == 1 and 'set-cookie' in response.headers:
if not self.valid_cookies(response.headers['set-cookie'], 'resp'):
score += waf.config.getint('scorer', 'response_cookies')
# Header sanity
if waf.config.getint('analyzer', 'response_headers') == 1:
if not self.valid_headers(response.headers.keys(), 'resp'):
score += waf.config.getint('scorer', 'response_headers')
threshold = waf.config.getint('enforcer', 'response_threshold')
if score > threshold:
print bcolors.FAIL + "[THREAT] URL '%s' scored as malicious (%d/%d)." % (path, score, threshold) + bcolors.ENDC
if waf.config.get('enforcer', 'action') == 'drop':
return False
return True
# Server => Proxy
def dataReceived(self, data):
if waf.mode == MODE_BYPASS:
self.factory.server.write(data)
return False
if self.response_buffer is None:
self.response_buffer = data
else:
self.response_buffer += data
# All chunks received
if self.chunked and data.endswith(CHUNK_END):
self.chunked = False
elif self.chunked:
return True
response = HTTPResponse(self.response_buffer)
if not hasattr(response, 'headers'):
print bcolors.FAIL + '[ANOMALY] Malformed response.' + bcolors.ENDC
self.factory.server.drop_connection()
self.response_buffer = None
return False
# Chunked starts
if response.chunked is not None and len(self.response_buffer) == len(data):
self.chunked = True
return True
if waf.mode == MODE_OPERATIONAL:
if not self.analyzeResponse(response):
self.factory.server.drop_connection()
self.response_buffer = None
return False
header_qty = len(response.headers)
header_size = len(str(response.headers))
content_size = (len(self.response_buffer)-header_size)
print bcolors.OKGREEN + "[RESPONSE] %s %s (HEADERS: %d, HEADERSIZE: %s, CONTENTSIZE %s)" % (self.client_request.command, self.client_request.path, header_qty, header_size, content_size) + bcolors.ENDC
if waf.mode == MODE_TRAINING:
self.learnResponse(response)
self.factory.server.write(self.response_buffer)
self.response_buffer = None
def learnResponse(self, response):
header_qty = len(response.headers)
header_size = len(str(response.headers))
content_size = (len(self.response_buffer)-header_size)
waf.cursor.execute('INSERT INTO urls VALUES (?, ?, ?, ?, ?, ?, ?, ?)', (self.client_request.path, self.client_request.command, header_qty, header_size, content_size, None, None, 'resp'))
# Check for cookies
cookies = []
if 'set-cookie' in response.headers:
ck = Cookie.SimpleCookie()
ck.load(response.headers['set-cookie'])
for cookie in ck.keys():
cookie_name = cookie.strip().lower()
cookies.append((self.client_request.path, self.client_request.command, cookie_name, 'resp'))
headers = []
for header in response.headers:
header_name = header.strip().lower()
headers.append((self.client_request.path, self.client_request.command, header_name, 'resp'))
waf.cursor.executemany('INSERT OR IGNORE INTO cookies VALUES (?, ?, ?, ?)', cookies)
waf.cursor.executemany('INSERT OR IGNORE INTO headers VALUES (?, ?, ?, ?)', headers)
waf.conn.commit()
def learnRequest(self, data):
request = self.client_request
header_qty = len(request.headers)
header_size = len(str(request.headers))
content_size = (len(data)-header_size)
waf.cursor.execute('INSERT INTO urls VALUES (?, ?, ?, ?, ?, ?, ?, ?)', (request.path, request.command, header_qty, header_size, content_size, None, None, 'req'))
headers = []
cookies = []
postdata = []
mimes = []
if 'Cookie' in self.client_request.headers:
ck = Cookie.SimpleCookie()
ck.load(self.client_request.headers['Cookie'])
for cookie in ck.keys():
cookie_name = cookie.strip().lower()
cookies.append((self.client_request.path, self.client_request.command, cookie_name, 'req'))
for header in self.client_request.headers:
header_name = header.strip().lower()
headers.append((self.client_request.path, self.client_request.command, header_name, 'req'))
if request.command == "POST":
for field in request.form.keys():
if request.form[field].filename:
file_data = request.form[field].file.read()
mime = waf.magic.buffer(file_data)
mimes.append((self.client_request.path, mime))
field_name = field.strip().lower()
postdata.append((self.client_request.path, field_name))
waf.cursor.executemany('INSERT OR IGNORE INTO cookies VALUES (?, ?, ?, ?)', cookies)
waf.cursor.executemany('INSERT OR IGNORE INTO headers VALUES (?, ?, ?, ?)', headers)
waf.cursor.executemany('INSERT OR IGNORE INTO postdata VALUES (?, ?)', postdata)
waf.cursor.executemany('INSERT OR IGNORE INTO uploads VALUES (?, ?)', set(mimes))
waf.conn.commit()
# Proxy => Server
def write(self, data):
if data:
if waf.mode == MODE_BYPASS:
self.transport.write(data)
return True
if self.request_buffer is None:
self.request_buffer = data
else:
self.request_buffer += data
request = HTTPRequest(self.request_buffer)
if not hasattr(request, 'headers') or not hasattr(request, 'path') or not hasattr(request, 'command'):
print bcolors.FAIL + '[ANOMALY] Malformed request.' + bcolors.ENDC
self.factory.server.drop_connection()
self.request_buffer = None
return False
self.client_request = request
header_qty = len(request.headers)
header_size = len(str(request.headers))
content_size = (len(self.request_buffer)-header_size)
if 'Content-Length' in request.headers:
total_size = int(request.headers['Content-Length'])
if content_size < total_size:
return True
if waf.mode == MODE_OPERATIONAL:
if not self.analyzeRequest():
self.factory.server.drop_connection()
self.request_buffer = None
return False
print bcolors.OKBLUE + "[REQUEST] %s %s (HEADERS: %d, HEADERSIZE: %s, CONTENTSIZE %s)" % (request.command, request.path, header_qty, header_size, content_size) + bcolors.ENDC
if waf.mode == MODE_TRAINING:
self.learnRequest(self.request_buffer)
self.transport.write(self.request_buffer)
self.request_buffer = None
return True
class WAF(object):
conn = None
config = None
mode = None
magic = None
unknown_ulrs_action = ACTION_DROP
listen_port = 0
server_addr = None
server_port = 0
def __init__(self):
self.config = ConfigParser.RawConfigParser()
self.init_config()
self.init_logging()
self.init_magic()
self.init_db()
def print_banner(self):
print textwrap.dedent("""\
88 88 ad88 88 88
88 "" d8" "" "" ,d
88 88 88
88,dPPYba, 88 MM88MMM 8b,dPPYba, ,adPPYba, ,adPPYba, MM88MMM
88P\' "8a 88 88 88P\' "Y8 a8" "8a I8[ "" 88
88 d8 88 88 88 8b d8 `"Y8ba, 88
88b, ,a8" 88 88 88 "8a, ,a8" aa ]8I 88,
8Y"Ybbd8"\' 88 88 88 `"YbbdP"\' `"YbbdP"\' "Y888
Intelligent Web Application Firewall
by: Jan Seidl <jseidl@wroot.org>
""")
def start(self):
self.print_banner()
self.init_reactor()
def init_db(self):
self.conn = sqlite3.connect(DATABASE_FILE)
self.cursor = self.conn.cursor()
def init_magic(self):
self.magic = magic.open(magic.MAGIC_MIME)
self.magic.load()
def init_config(self):
try:
self.config.read(CONFIG_FILE)
# Mode
_mode = self.config.get('general', 'mode')
if _mode == 'training':
self.mode = MODE_TRAINING
elif _mode == 'operational':
self.mode = MODE_OPERATIONAL
else:
self.mode = MODE_BYPASS
# Unknown URLs
if self.config.get('general', 'unknown_urls') == 'drop':
self.unknown_urls_action = ACTION_DROP
else:
self.unknown_urls_action = ACTION_PASS
except Exception, e:
sys.stderr.write("No config file present %s" % str(e))
sys.exit(1)
def init_reactor(self):
factory = protocol.ServerFactory()
factory.protocol = ServerProtocol
self.listen_port = self.config.getint('general', 'listen_port')
self.server_addr = self.config.get('general', 'backend_ip')
self.server_port = self.config.getint('general', 'backend_port')
reactor.listenTCP(self.listen_port, factory)
print bcolors.HEADER + "BWAF listening at port %d (backend: %s:%d) [%s]" % (self.listen_port, self.server_addr, self.server_port, 'operational' if self.mode == MODE_OPERATIONAL else 'training') + bcolors.ENDC
reactor.run()
def init_logging(self):
log.startLogging(sys.stdout)
def __del__(self):
if self.conn is not None:
self.conn.close()
waf = WAF()
def main():
waf.start()
def reload_waf(signum, frame):
print bcolors.WARNING + "Received Signal: %s at frame: %s" % (signum, frame) + bcolors.ENDC
print bcolors.HEADER + "Reloading WAF configuration." + bcolors.ENDC
waf.init_config()
# SIGHUP Reload Config trap
signal.signal(signal.SIGHUP, reload_waf)
if __name__ == '__main__':
main()
| [
"jseidl@wroot.org"
] | jseidl@wroot.org |
febef6fc2172681866a7e8db9712cb4f44ba813f | cfd7c85c7512e4ffa27e921fad7b12313f6aa92a | /whatsappbot/manage.py | 9a2964f87023101e58e172ec30373a41029163d4 | [] | no_license | KIngSimeone/whatsapp-bot | d76df21da4cde1b4362c3132fe2a7bbc59eec1aa | db9f3b714009af3a3b75fd09cd4807a2ed1c05de | refs/heads/main | 2023-07-05T19:23:14.262880 | 2021-08-29T16:28:53 | 2021-08-29T16:28:53 | 400,136,854 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 667 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'whatsappbot.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"simeoneumoh@outlook.com"
] | simeoneumoh@outlook.com |
52e9d35294294a85212a99f7b76edae30d26f647 | 684277301b83dd8a73a8c9e73fc80dabd117782c | /read_HIK_Cam/read_save_cam.py | 60cb75d1293aac4381f6f6e038ba1890a6773908 | [] | no_license | Tr33-He11/HIK_FaceDetec_DL_System | c8b4f9ffaf3003d65ba7248a8f22a96c9ef6edac | 8c0417e93f896526cd35714f3e439b0ee3c42017 | refs/heads/master | 2020-08-20T08:25:47.263797 | 2018-01-09T09:07:19 | 2018-01-09T09:07:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 570 | py | # coding=utf-8
import os
import shlex
import subprocess
source = "rtsp://admin:520douxiao@192.168.199.64/Streaming/Channels/1"
cam_save_dir = "/data/视频/HIK_cam"
if not os.path.exists(cam_save_dir): # 如果保存文件路径不存在就创建一个
os.makedirs(cam_save_dir)
cmd = "ping -c 1 192.168.199.64"
args = shlex.split(cmd)
try:
subprocess.check_call(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
print("baidu server is up!")
except subprocess.CalledProcessError:
print("Failed to get ping.")
# cam = cv2.VideoCapture(source)
| [
"sunshine.dxiao@gmail.com"
] | sunshine.dxiao@gmail.com |
390521d51946954e2ee7afde08afbdd8c090143e | 2c6e7f3ebb57322f2b7e4389fecc6b0943253092 | /tests/test_cli.py | b62e6690e4d708a9a206cd1cd0a74fd682f2939e | [
"MIT"
] | permissive | Adaephon-GH/i3configger | d70d66a87071eec721444d787f9ae087f46f658f | 2411b579557a4404afa26efa4afe0a37d16f1f0d | refs/heads/master | 2021-01-24T07:20:11.015812 | 2017-06-04T12:39:10 | 2017-06-04T12:39:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 871 | py | import pytest
from i3configger import cli, exc
@pytest.mark.parametrize(
"args,exp",
(
([], True),
([''], False),
(['dunno'], False),
(['select-next'], False),
(['select-previous'], False),
(['set'], False),
(['select'], False),
# TODO do I want special stuff to be overridable?
(['select', 'hostname', 'ob1'], True),
(['set', 'someVar', 'someValue'], True),
(['set', 'someVar'], False),
(['select-next', 'scheme'], True),
(['select-next', 'scheme', 'extra'], False),
(['select-previous', 'scheme'], True),
(['select-previous', 'scheme', 'extra'], False),
)
)
def test_check_sanity(args, exp):
if exp:
cli.check_sanity(args)
else:
with pytest.raises(exc.I3configgerException):
cli.check_sanity(args)
| [
"oliver.bestwalter@avira.com"
] | oliver.bestwalter@avira.com |
75683d574fd6fafc97d6262c264e53f43ff0a56b | 19ee7dd974ba8b1731e9450c174df7630f63eaad | /Api/recognition/serializers.py | bc1cd767bbebc3dcfc9d20d425f5e7079f0f1748 | [] | no_license | minjjjae/No-Mask-Trace-System | 12d3a5a146f5526b9dbba5a8b75d6adc6c8a2e2b | 61c76197d7ae921823b795effd9f267c92016a97 | refs/heads/main | 2023-01-19T08:35:19.643717 | 2020-11-28T05:27:52 | 2020-11-28T05:27:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 340 | py | from recognition.models import Recognition
from rest_framework import serializers
class RecognitionSerializer(serializers.HyperlinkedModelSerializer):
image = serializers.ImageField(max_length=None, use_url=True)
class Meta:
model = Recognition
fields = ("pk", "encodeLst", "description", "created_at", "image")
| [
"bhj1684@naver.com"
] | bhj1684@naver.com |
19b16e038e42e69f3f52b17764d02d98614b0c87 | 364b36d699d0a6b5ddeb43ecc6f1123fde4eb051 | /_downloads_1ed/fig_XD_example.py | 6e7d5247c12d7f1e576f8087307c0067916096aa | [] | no_license | astroML/astroml.github.com | eae3bfd93ee2f8bc8b5129e98dadf815310ee0ca | 70f96d04dfabcd5528978b69c217d3a9a8bc370b | refs/heads/master | 2022-02-27T15:31:29.560052 | 2022-02-08T21:00:35 | 2022-02-08T21:00:35 | 5,871,703 | 2 | 5 | null | 2022-02-08T21:00:36 | 2012-09-19T12:55:23 | HTML | UTF-8 | Python | false | false | 3,918 | py | """
Extreme Deconvolution example
-----------------------------
Figure 6.11
An example of extreme deconvolution showing a simulated two-dimensional
distribution of points, where the positions are subject to errors. The top two
panels show the distributions with small (left) and large (right) errors. The
bottom panels show the densities derived from the noisy sample (top-right
panel) using extreme deconvolution; the resulting distribution closely matches
that shown in the top-left panel.
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import numpy as np
from matplotlib import pyplot as plt
from astroML.decorators import pickle_results
from astroML.density_estimation import XDGMM
from astroML.plotting.tools import draw_ellipse
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
#------------------------------------------------------------
# Sample the dataset
N = 2000
np.random.seed(0)
# generate the true data
x_true = (1.4 + 2 * np.random.random(N)) ** 2
y_true = 0.1 * x_true ** 2
# add scatter to "true" distribution
dx = 0.1 + 4. / x_true ** 2
dy = 0.1 + 10. / x_true ** 2
x_true += np.random.normal(0, dx, N)
y_true += np.random.normal(0, dy, N)
# add noise to get the "observed" distribution
dx = 0.2 + 0.5 * np.random.random(N)
dy = 0.2 + 0.5 * np.random.random(N)
x = x_true + np.random.normal(0, dx)
y = y_true + np.random.normal(0, dy)
# stack the results for computation
X = np.vstack([x, y]).T
Xerr = np.zeros(X.shape + X.shape[-1:])
diag = np.arange(X.shape[-1])
Xerr[:, diag, diag] = np.vstack([dx ** 2, dy ** 2]).T
#------------------------------------------------------------
# compute and save results
@pickle_results("XD_toy.pkl")
def compute_XD_results(n_components=10, n_iter=500):
clf = XDGMM(n_components, n_iter=n_iter)
clf.fit(X, Xerr)
return clf
clf = compute_XD_results(10, 500)
sample = clf.sample(N)
#------------------------------------------------------------
# Plot the results
fig = plt.figure(figsize=(5, 3.75))
fig.subplots_adjust(left=0.1, right=0.95,
bottom=0.1, top=0.95,
wspace=0.02, hspace=0.02)
ax1 = fig.add_subplot(221)
ax1.scatter(x_true, y_true, s=4, lw=0, c='k')
ax2 = fig.add_subplot(222)
ax2.scatter(x, y, s=4, lw=0, c='k')
ax3 = fig.add_subplot(223)
ax3.scatter(sample[:, 0], sample[:, 1], s=4, lw=0, c='k')
ax4 = fig.add_subplot(224)
for i in range(clf.n_components):
draw_ellipse(clf.mu[i], clf.V[i], scales=[2], ax=ax4,
ec='k', fc='gray', alpha=0.2)
titles = ["True Distribution", "Noisy Distribution",
"Extreme Deconvolution\n resampling",
"Extreme Deconvolution\n cluster locations"]
ax = [ax1, ax2, ax3, ax4]
for i in range(4):
ax[i].set_xlim(-1, 13)
ax[i].set_ylim(-6, 16)
ax[i].xaxis.set_major_locator(plt.MultipleLocator(4))
ax[i].yaxis.set_major_locator(plt.MultipleLocator(5))
ax[i].text(0.05, 0.95, titles[i],
ha='left', va='top', transform=ax[i].transAxes)
if i in (0, 1):
ax[i].xaxis.set_major_formatter(plt.NullFormatter())
else:
ax[i].set_xlabel('$x$')
if i in (1, 3):
ax[i].yaxis.set_major_formatter(plt.NullFormatter())
else:
ax[i].set_ylabel('$y$')
plt.show()
| [
"vanderplas@astro.washington.edu"
] | vanderplas@astro.washington.edu |
125bfeb5798b8a0418f73d6cee3d6dd29ea1cdc5 | fc9e52fef1b2aae99228057f17bf3c9905339349 | /Ch15/mrMean.py | 5e207e06ad7de0bc1df35cab75864e3ec8404d1c | [] | no_license | apollos/ML_practice | 0e5f349c31c0653049836eab4186e4a03d109c11 | 411e7acdfc619e3081c5319d1e851bedd771934d | refs/heads/master | 2021-01-10T13:52:44.390855 | 2017-05-04T14:59:32 | 2017-05-04T14:59:32 | 52,074,964 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,298 | py | '''
Created on Feb 28, 2011
@author: Song Yu
'''
from mrjob.job import MRJob
class MRmean(MRJob):
def __init__(self, *args, **kwargs):
super(MRmean, self).__init__(*args, **kwargs)
self.inCount = 0
self.inSum = 0
self.inSqSum = 0
def map(self, key, val): #needs exactly 2 arguments
if False: yield
inVal = float(val)
self.inCount += 1
self.inSum += inVal
self.inSqSum += inVal*inVal
def map_final(self):
mn = self.inSum/self.inCount
mnSq = self.inSqSum/self.inCount
yield (1, [self.inCount, mn, mnSq])
def reduce(self, key, packedValues):
cumVal=0.0; cumSumSq=0.0; cumN=0.0
for valArr in packedValues: #get values from streamed inputs
nj = float(valArr[0])
cumN += nj
cumVal += nj*float(valArr[1])
cumSumSq += nj*float(valArr[2])
mean = cumVal/cumN
var = (cumSumSq - 2*mean*cumVal + cumN*mean*mean)/cumN
yield (mean, var) #emit mean and var
def steps(self):
return ([self.mr(mapper=self.map, mapper_final=self.map_final,\
reducer=self.reduce,)])
if __name__ == '__main__':
MRmean.run()
| [
"apollos521@gmail.com"
] | apollos521@gmail.com |
734d0e6668678bfef1bf6ac219ac31757539a094 | fc315f721cff8c2f75374d5ce1788a5b8a7e3c05 | /match_yao/build_people_db.py | ff3afb103e24ca3a28bfb50ab40d20c9d5c1be91 | [] | no_license | yymao/kipac-tea-maker | a16053dda6b5c42d2945a12e7c95ae65ac991e4c | ea0c5237d597d9865ab0c2d934c3db0efce58a72 | refs/heads/master | 2020-04-15T18:19:49.303992 | 2015-10-03T16:35:56 | 2015-10-03T16:35:56 | 19,957,731 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,311 | py | from match_lib import *
import cPickle as pickle
url_base = 'http://export.arxiv.org/api/query?search_query=cat:astro-ph*+AND+au:%s&max_results=50&sortBy=submittedDate&sortOrder=descending'
with open('../kipac-people/people.pkl', 'r') as f:
people = pickle.load(f)
new_people = []
for person in people:
root = get_arxiv_xml_root(url_base%person['name_arxiv'])
wfd_all = {}
bfd_all = {}
count = 0
for entry in root.findall(xml_prefix+'entry'):
title = entry.find(xml_prefix+'title').text
if title == 'Error':
continue
count += 1
wfd, bfd = get_grams(title, entry.find(xml_prefix+'summary').text)
wd_count = float(sum(wfd.itervalues()))
bg_count = wd_count - 2.0
for k in wfd:
wfd_all[k] = wfd_all.get(k, 0) + wfd[k]/wd_count
for k in bfd:
bfd_all[k] = bfd_all.get(k, 0) + bfd[k]/bg_count
count_f = float(count)
for k in wfd_all:
wfd_all[k] /= count_f
for k in bfd_all:
bfd_all[k] /= count_f
person['count'] = count
person['wfd'] = wfd_all
person['bfd'] = bfd_all
if count:
new_people.append(person)
print person['name'], person['name_arxiv'], person['count']
with open('people.pkl', 'w') as fo:
pickle.dump(new_people, fo)
| [
"maomao.321@gmail.com"
] | maomao.321@gmail.com |
efd3596b14e13f91b5ed38a9a173426b62d558e2 | a78ed689af684134c604d72441c36e1203398bed | /Data/InterestRate.py | 2274ea38ff4d6ef97810daa6847367b887f28c5b | [
"Apache-2.0"
] | permissive | kancheng/FinMind | 60ce727d45716cd657ce6f188e2415801c7f41b1 | 751e9add737d1cabaebe4445e7e0096b22b3f5bf | refs/heads/master | 2020-06-18T20:52:20.336358 | 2019-07-09T12:31:31 | 2019-07-09T12:31:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 911 | py |
TABLE = 'InterestRate'
import os, sys
import platform
if 'Windows' in platform.platform():
PATH = "\\".join( os.path.abspath(__file__).split('\\')[:-1])
else:
PATH = "/".join( os.path.abspath(__file__).split('/')[:-1])
sys.path.append(PATH)
from BasedClass import Load
class ClassInterestRate(Load):
def __init__(self):
super(ClassInterestRate, self).__init__(TABLE,'country')
def InterestRate(select = [],date = ''):
self = ClassInterestRate()
#stock = select
if isinstance(select,int): select = str(select)
if isinstance(select,str) or isinstance(select,list):
return self.load(select,date)
#elif isinstance(select,list):
# return self.load_multi(select,date)
else:
raise(AttributeError, "Hidden attribute")
def Load_Data_List():
self = ClassInterestRate()
return list( self.get_data_list() )
| [
"sam.lin@tripsaas.com"
] | sam.lin@tripsaas.com |
6508b6eae18f254c28dd6343bef32cd4b4afd295 | 61fa932822d22ba480f7aa075573e688897ad844 | /simulation/decai/simulation/data/imdb_data_loader.py | fbc6d62dc1165cc5a608c3003156977db751c917 | [
"MIT",
"LicenseRef-scancode-generic-cla"
] | permissive | microsoft/0xDeCA10B | a8f118fa1f89f387a0b83f297250fc1846521f41 | 4066eeb2b5298c259a7c19c4d42ca35ef22e0569 | refs/heads/main | 2023-07-26T08:09:34.718104 | 2023-01-25T12:47:17 | 2023-01-25T12:47:17 | 181,561,897 | 538 | 133 | MIT | 2023-07-19T03:10:38 | 2019-04-15T20:37:11 | Python | UTF-8 | Python | false | false | 1,778 | py | from dataclasses import dataclass, field
from logging import Logger
from typing import List
import numpy as np
from injector import ClassAssistedBuilder, Module, inject, provider, singleton
from keras.datasets import imdb
from .data_loader import DataLoader
@inject
@dataclass
class ImdbDataLoader(DataLoader):
"""
Load data for sentiment analysis of IMDB reviews.
https://keras.io/datasets/#imdb-movie-reviews-sentiment-classification
"""
_logger: Logger
num_words: int = field(default=1000)
def classifications(self) -> List[str]:
return ["NEGATIVE", "POSITIVE"]
def load_data(self, train_size: int = None, test_size: int = None) -> (tuple, tuple):
self._logger.info("Loading IMDB review data using %d words.", self.num_words)
(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=self.num_words)
if train_size is not None:
x_train, y_train = x_train[:train_size], y_train[:train_size]
if test_size is not None:
x_test, y_test = x_test[:test_size], y_test[:test_size]
def get_features(data):
result = np.zeros((len(data), self.num_words), dtype='int')
for i, x in enumerate(data):
for v in x:
result[i, v] = 1
return result
x_train = get_features(x_train)
x_test = get_features(x_test)
self._logger.info("Done loading IMDB review data.")
return (x_train, y_train), (x_test, y_test)
@dataclass
class ImdbDataModule(Module):
num_words: int = field(default=1000)
@provider
@singleton
def provide_data_loader(self, builder: ClassAssistedBuilder[ImdbDataLoader]) -> DataLoader:
return builder.build(num_words=self.num_words)
| [
"noreply@github.com"
] | noreply@github.com |
e55d846ca1118bb4b0ffbdfce069db84d56d53ed | 775c898e4a61c6429f75efa36565452dadc6b824 | /questions.py | 550e5cbc710c89579f43660d8cdc5250e117e6c3 | [] | no_license | Lambtsa/math-game-python | e2013dbbf429f469cb1e17f4de43c3ddc330e6c3 | 66974f156042374cce993919b5f8c322998fb723 | refs/heads/master | 2023-05-30T14:37:19.087297 | 2021-06-23T16:37:35 | 2021-06-23T16:37:35 | 379,655,300 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 508 | py | class Question:
answer = None
text = None
def __str__(self) -> str:
return f'{self.text}'
class Add(Question):
def __init__(self, num1, num2):
super().__init__()
self.text = '{} + {}'.format(num1, num2)
self.answer = num1 + num2
class Multiply(Question):
def __init__(self, num1, num2):
super().__init__()
self.text = '{} x {}'.format(num1, num2)
self.answer = num1 * num2
if __name__ == '__main__':
pass
| [
"42414095+Lambtsa@users.noreply.github.com"
] | 42414095+Lambtsa@users.noreply.github.com |
d566d16b56c6a5020fd6c5e9ae501627d36b5c6d | 90bcb21ff3dc56e1ce742f8e3a679d7db7ad5e9a | /models/models.py | 5b856c461c5bcbe27d7a41d1f1e5f0a8428940c5 | [
"MIT"
] | permissive | raghavchalapathy/oc-nn_old | 0ee465e03a2510e72b222e6f05d06d5774629490 | e2c6c49b414e2d67a04a6f3e57e1802ea4d82996 | refs/heads/master | 2021-10-10T15:19:00.696183 | 2019-01-12T20:07:27 | 2019-01-12T20:07:27 | 108,765,892 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 586 | py | import numpy as np
import pandas as pd
from sklearn import utils
import matplotlib
dataPath = './data/'
def func_getDecision_Scores_USPS():
print "func_getDecision_Scores_USPS file ....."
return
def func_getDecision_Scores_FAKE_NEWS():
print "func_getDecision_Scores_FAKE_NEWS file ....."
return
def func_getDecision_Scores_SPAM_VS_HAM():
print "func_getDecision_Scores_SPAM_VS_HAM file ....."
return
def func_getDecision_Scores_CIFAR_10():
print "func_getDecision_Scores_CIFAR_10 file ....."
return | [
"raghav.chalapathy@gmail.com"
] | raghav.chalapathy@gmail.com |
b94c7794752b19ca740b398a5537aadc53b05234 | ce05a73f7162eb98007f6187618c88fdc8f661ff | /subtitles/migrations/0001_initial.py | 0ddd2fbfff8e04f8f3435e3ee02617626b2d884c | [] | no_license | jadolg/SubsCafe | 41511b821bbc6f957e744b21436b8fc1789bee4d | 41ddd394f656acca288b116f9823208965453d4e | refs/heads/master | 2020-06-14T22:31:14.017448 | 2017-02-12T02:45:43 | 2017-02-12T02:45:43 | 75,403,403 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 753 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-11 15:45
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Subtitulo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(max_length=500)),
('ruta', models.CharField(max_length=5000)),
('descargas', models.IntegerField(default=0)),
('ahash', models.CharField(max_length=64, unique=True)),
],
),
]
| [
"diazorozcoj@gmail.com"
] | diazorozcoj@gmail.com |
b5f2692aeda1272798fbe9e2b8bec018f8a62020 | 778ac16b20768ff801cb1fc94b352914e0ff0544 | /setup.py | 78f93fbedc25397d1a66afd229b90fde2923b8f1 | [
"MIT"
] | permissive | fifteenhex/PyLX-16A | 5ab74e35c1a9d4e23cf9752934cfdeae7652d19d | ab1c61270185d57618aa4a3b2006b8135830fd5c | refs/heads/master | 2020-04-23T16:36:20.236632 | 2020-03-02T11:02:55 | 2020-03-02T11:02:55 | 171,303,821 | 0 | 0 | null | 2019-02-18T14:59:06 | 2019-02-18T14:59:05 | null | UTF-8 | Python | false | false | 216 | py | from setuptools import setup
setup(name='lx16a',
version='0.1',
author='Ethan Lipson',
author_email='ethan.lipson@gmail.com',
license='dunno',
packages=['lx16a'],
zip_safe=False)
| [
"daniel@0x0f.com"
] | daniel@0x0f.com |
daf6299762e39365d4e0099a36ae78a1a59bcd0a | 6ec91b363b077bffd33f15300a0935124e9fb915 | /Cracking_the_Code_Interview/Leetcode/3.String/290.Word_Pattern.py | 766301dce21b1c686fdd7e0e347044af480ca094 | [] | no_license | lzxyzq/Cracking_the_Coding_Interview | 03232515ae8eb50394d46322d36b230d1a626fcf | 79dee7dab41830c4ff9e38858dad229815c719a0 | refs/heads/master | 2023-06-05T19:52:15.595289 | 2021-06-23T22:46:02 | 2021-06-23T22:46:02 | 238,068,000 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,531 | py | '''
@Author: your name
@Date: 2020-06-09 17:21:16
@LastEditTime: 2020-06-10 12:19:27
@LastEditors: Please set LastEditors
@Description: In User Settings Edit
@FilePath: /Cracking_the_Code_Interview/Leetcode/String/290.Word_Pattern.py
'''
# Given a pattern and a string str, find if str follows the same pattern.
# Here follow means a full match, such that there is a bijection between a letter in pattern and a non-empty word in str.
'''
Example 1:
Input: pattern = "abba", str = "dog cat cat dog"
Output: true
Example 2:
Input:pattern = "abba", str = "dog cat cat fish"
Output: false
Example 3:
Input: pattern = "aaaa", str = "dog cat cat dog"
Output: false
Example 4:
Input: pattern = "abba", str = "dog dog dog dog"
Output: false
'''
# Notes:
# You may assume pattern contains only lowercase letters, and str contains lowercase letters that may be separated by a single space.
# 1.split()
# 2.等长len()
# 3.hashmap key:pattern value:str
class Solution:
def wordPattern(self, pattern: str, str: str) -> bool:
str = str.split()
result = ''
if len(str) != len(pattern):
return False
d = {}
for i in range(len(pattern)):
if str[i] not in d:
if pattern[i] not in d.values():
d[str[i]] = pattern[i]
else:
return False
result += d[str[i]]
return result == pattern
pattern = "abba"
str = "dog cat cat dog"
words = str.split(' ')
tuple(zip(words, pattern)) | [
"lzxyzq@gmail.com"
] | lzxyzq@gmail.com |
4f2bfcabffed6ee871b076422fe3880a1532abdd | afdf221c5b4534a2263efea0a2ada279e6bb10f3 | /tests/test_models.py | e1c2aa30ec91eea529508cb6fe6d8cb34f1c2186 | [] | no_license | timbueno/fiddlersgreen | 84185a8635acf574e7dc6af832767691c686ac1a | 56615d46c1e03c79e0ee11243edbc6e04e928375 | HEAD | 2016-09-09T23:25:38.409625 | 2015-04-03T01:53:09 | 2015-04-03T01:53:09 | 32,966,527 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,418 | py | # -*- coding: utf-8 -*-
"""
fiddlersgreen.tests.test_models
~~~~~~~~~~~~~~~~~
model tests
"""
import time
import pytest
from fiddlersgreen.models import User, AnonymousUser, Role
@pytest.mark.usefixtures('db')
class TestUser:
def test_get_by_id(self, user):
retrieved = User.query.get(user.id)
assert retrieved == user
def test_is_not_anonymous(self, user):
assert user.is_anonymous() is False
def test_is_anonymous(self):
user = AnonymousUser()
assert user.is_anonymous()
def test_repr(self, user):
name = user.__repr__()
assert name == '<User(timbueno@gmail.com)>'
def test_default_role(self, user):
assert user.role.default
def test_password_setter(self, user):
assert user.password_hash is not None
def test_no_password_getter(self, user):
with pytest.raises(AttributeError):
user.password
def test_password_verification(self, user):
assert user.verify_password('cat')
def test_password_salts_are_random(self):
u1 = User.create(email='timbueno@gmail.com', password='cat')
u2 = User.create(email='longboxed@gmail.com', password='cat')
assert u1.password_hash != u2.password_hash
def test_get_auth_token(self, user):
token = user.get_auth_token()
assert token
def test_verify_auth_token(self, user):
token = user.get_auth_token()
retrieved = User.verify_auth_token(token)
assert retrieved == user
def test_verify_auth_token_password_change(self, user):
token = user.get_auth_token()
# Token should not change
assert token == user.get_auth_token()
# Change password
user.password = 'dog'
user.save()
# Token should be different
new_token = user.get_auth_token()
assert token != new_token
def test_ping(self, user):
last_seen_before = user.last_seen
time.sleep(1)
user.ping()
assert user.last_seen > last_seen_before
@pytest.mark.usefixtures('db')
class TestRole:
def test_insert_roles(self):
from fiddlersgreen.models import ROLES
Role.insert_roles()
for r in ROLES.keys():
role = Role.query.filter_by(name=r).first()
assert role
assert role.name == r
assert role.default is ROLES[r][0]
| [
"timbueno@gmail.com"
] | timbueno@gmail.com |
90d4521c05fa8694e637108f719918c603ba6b67 | 0c73d5bfb9aeece34d097e5d0e991dff1408860a | /releasible/github.py | 3c4528e7a84b449a33a1015a945d2e2f5b1414d9 | [
"MIT"
] | permissive | relrod/releasible | ce495cdf0a3a4d577caaf52fa97794c9627f8bf9 | 4fd16bc5786c4557dc7b17ba44710fddb5d515f5 | refs/heads/master | 2023-07-29T10:11:29.351428 | 2021-04-06T18:40:14 | 2021-04-06T18:41:39 | 340,859,856 | 0 | 1 | MIT | 2021-09-10T16:52:24 | 2021-02-21T09:10:12 | Python | UTF-8 | Python | false | false | 1,887 | py | import re
class GitHubAPICall:
def __init__(self, token, aio_session):
self.token = token
self.aio_session = aio_session
self.link = None
self.calls = 0
async def get(self, endpoint, json=True):
print(endpoint)
async with self.aio_session.get(
endpoint,
headers={
'Authorization': 'token {0}'.format(self.token),
'Accept': (
'application/vnd.github.cloak-preview, '
'application/vnd.github.groot-preview+json, '
'application/vnd.github.v3+json'
),
}
) as resp:
if resp.status != 200:
text = await resp.text()
raise Exception(
'{0} got status {1}: {2}'.format(
endpoint,
resp.status,
text))
self.calls += 1
self.link = resp.headers.get('link')
if json:
return await resp.json()
return await resp.text()
async def get_all_pages(self, endpoint, key=None):
req = self.get(endpoint)
if key is not None:
out = (await req)[key]
else:
out = req
next_page = self.next_page_url()
while next_page:
if key is not None:
out += await self.get(next_page)[key]
else:
out += await self.get(next_page)
next_page = self.next_page_url()
self.link = None
return out
def next_page_url(self):
if not self.link:
return None
sp = self.link.split(', ')
for link in sp:
match = re.match('<(.+)>; rel="next"', link)
if not match:
continue
return match[1]
| [
"rick@elrod.me"
] | rick@elrod.me |
de9f85a6afffab438d84d1d83c1d3faaad9acee0 | 1b0e5d24269e6ce019b9cd5f0f25fdfc90294e87 | /mysite/polls/tests.py | 484fc1ec18468ad2871dc08edeae078c46e44e1d | [] | no_license | amanishimwe/mysite | a9e3539c1c19c48caf8e4931fd35962eb78cd57c | 3e5fae8034965021a03958c950f194ef14851305 | refs/heads/master | 2023-02-08T06:33:18.847614 | 2020-12-28T19:20:05 | 2020-12-28T19:20:05 | 325,092,366 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,563 | py | from django.test import TestCase
import datetime
from django.utils import timezone
from .models import Question
from django.urls import reverse
# Create your tests here.
def create_question(question_text, days):
""" Create a question with the given 'question_text' and published the
given number of 'days' offset to now(negative for questions published in the past
, positive for questions that have yet to be published"""
time = timezone.now() + datetime.timedelta(days = days)
return Question.objects.create(question_text = question_text, pub_date = time)
class QuestionIndexViewTests(TestCase):
def test_no_questions(self):
"""If no Questions exists, an appropriate message is displayed."""
response = self.client.get(reverse('polls:index'))
self.assertEqual(response.status_code, 200)
self.assertContains(response, " No polls are available.")
self.assertQuerysetEqual(response.context['latest_question_list'],[])
def test_past_question(self):
""" questions with pub_date in the past are displayed on the index page."""
create_question(question_text="Past question.", days=30)
response = self.client.get(reverse('polls:index'))
self.assertQuerysetEqual(response.context['latest_question_list'], ['<Question: Past question.>'])
def test_future_question(self):
"""Questions with a pub_date in the future aren't displayed on the index page"""
create_question(question_text="future question.", days = 30)
response = self.client.get(reverse('polls:index'))
self.assertContains(response, "No polls are available.")
self.asserQuerysetEqual(response.context['latest_question_list'],[])
def test_future_question_and_past_question(self):
"""Even if both past and future questions exist, only past questions are displayed """
create_question(question_text="Past question.", days=-30)
create_question(question_text="Future question.", days=30)
response = self.client.get(reverse('polls:index'))
self.assertQuerysetEqual(response.context['latest_question_list'],['<Question:Past question.>'])
def test_two_past_questions(self):
""" The questions index page may display multiple questions."""
create_question(question_text="Past question 1.", days=-30)
create_question(question_text="Past question 2.", days=-5)
response = self.client.get(reverse('polls:index'))
self.assertQuerysetEqual(response.context['latest_question_list'],['<Question: Past question 2.>','<Question: Past question 1.>'])
class QuestionDetailViewTests(TestCase):
def test_future_question(self):
"""The Detail view of a question with a pub_date in the future return a 404 not found."""
future_question = create_question(question_text='Future question.', days=5)
url = reverse('polls:detail', args = (future_question.id,))
response = self.client.get(url)
self.assertEqual(response.status_code,404)
def test_past_question(self):
"""The detail view of a question with a pub_date in th past displays the question's text."""
past_question = create_question(question_text='Past Question.', days=-5)
url = reverse('polls:detail', args = (past_question.id,))
response = self.client.get(url)
self.assertContains(response,past_question.question_text)
class QuestionModelTests(TestCase):
def test_was_published_with_future_question(self):
"""Was_published_recently() returns False for questions
whose pub_date is in the future"""
time = timezone.now()+ datetime.timedelta(days=30)
future_question = Question(pub_date = time)
self.assertIs(future_question.was_published_recently(),False)
def test_was_published_recently_with_old_question(self):
"""Was_published_recently() returns False
for question whose pub_date is older than one day"""
time = timezone.now() - datetime.timedelta(days =1 , seconds= 1)
old_question = Question(pub_date = time)
self.assertIs(old_question.was_published_recently(), False)
def test_was_published_recently_with_recent_question(self):
"""was published_recently() returns True for questions whose pub_date
is with in the last day"""
time = timezone.now() - datetime.timedelta(hours = 23, minutes=59, seconds= 59)
recent_question = Question(pub_date = time)
self.assertIs(recent_question.was_published_recently(), True) | [
"alban.manishimwe@gmail.com"
] | alban.manishimwe@gmail.com |
6f8c4f4ccb1e05de4880c876fe4835b489fa5a94 | 00d8e3f95516dba6bf4a688f889ebf251f10ea6d | /face_recognition/prepare_data/gen_PNet_tfrecords.py | 8ae1788b5cf9eeaa59c687a5753038398117868b | [
"Apache-2.0"
] | permissive | 1160300614/faceRecognition | 7015efdd4a4d31e8f15759e8171b2dd721ff87c0 | c1470c13c82db071b04c62244e5b7d28e026c030 | refs/heads/master | 2022-10-09T12:11:50.328357 | 2020-06-12T01:34:55 | 2020-06-12T01:34:55 | 271,447,390 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:8a4275bea79f8915dc85d0535a3a7f13ef389384b3fb4fed867e0d36f406dd7f
size 4564
| [
"guanqiao000@163.com"
] | guanqiao000@163.com |
cbb9422f0b2cd4ef151418716fb78d78a14bcad9 | 8ef8e6818c977c26d937d09b46be0d748022ea09 | /cv/3d_detection/BEVFormer/pytorch/mmdetection3d/mmdet3d/datasets/nuscenes_dataset.py | 47d6e15ed9aec63b17e5b80955dd4cb32ba04939 | [
"Apache-2.0"
] | permissive | Deep-Spark/DeepSparkHub | eb5996607e63ccd2c706789f64b3cc0070e7f8ef | 9d643e88946fc4a24f2d4d073c08b05ea693f4c5 | refs/heads/master | 2023-09-01T11:26:49.648759 | 2023-08-25T01:50:18 | 2023-08-25T01:50:18 | 534,133,249 | 7 | 6 | Apache-2.0 | 2023-03-28T02:54:59 | 2022-09-08T09:07:01 | Python | UTF-8 | Python | false | false | 26,116 | py | # Copyright (c) OpenMMLab. All rights reserved.
import tempfile
from os import path as osp
import mmcv
import numpy as np
import pyquaternion
from nuscenes.utils.data_classes import Box as NuScenesBox
from ..core import show_result
from ..core.bbox import Box3DMode, Coord3DMode, LiDARInstance3DBoxes
from .builder import DATASETS
from .custom_3d import Custom3DDataset
from .pipelines import Compose
@DATASETS.register_module()
class NuScenesDataset(Custom3DDataset):
r"""NuScenes Dataset.
This class serves as the API for experiments on the NuScenes Dataset.
Please refer to `NuScenes Dataset <https://www.nuscenes.org/download>`_
for data downloading.
Args:
ann_file (str): Path of annotation file.
pipeline (list[dict], optional): Pipeline used for data processing.
Defaults to None.
data_root (str): Path of dataset root.
classes (tuple[str], optional): Classes used in the dataset.
Defaults to None.
load_interval (int, optional): Interval of loading the dataset. It is
used to uniformly sample the dataset. Defaults to 1.
with_velocity (bool, optional): Whether include velocity prediction
into the experiments. Defaults to True.
modality (dict, optional): Modality to specify the sensor data used
as input. Defaults to None.
box_type_3d (str, optional): Type of 3D box of this dataset.
Based on the `box_type_3d`, the dataset will encapsulate the box
to its original format then converted them to `box_type_3d`.
Defaults to 'LiDAR' in this dataset. Available options includes.
- 'LiDAR': Box in LiDAR coordinates.
- 'Depth': Box in depth coordinates, usually for indoor dataset.
- 'Camera': Box in camera coordinates.
filter_empty_gt (bool, optional): Whether to filter empty GT.
Defaults to True.
test_mode (bool, optional): Whether the dataset is in test mode.
Defaults to False.
eval_version (bool, optional): Configuration version of evaluation.
Defaults to 'detection_cvpr_2019'.
use_valid_flag (bool, optional): Whether to use `use_valid_flag` key
in the info file as mask to filter gt_boxes and gt_names.
Defaults to False.
"""
NameMapping = {
'movable_object.barrier': 'barrier',
'vehicle.bicycle': 'bicycle',
'vehicle.bus.bendy': 'bus',
'vehicle.bus.rigid': 'bus',
'vehicle.car': 'car',
'vehicle.construction': 'construction_vehicle',
'vehicle.motorcycle': 'motorcycle',
'human.pedestrian.adult': 'pedestrian',
'human.pedestrian.child': 'pedestrian',
'human.pedestrian.construction_worker': 'pedestrian',
'human.pedestrian.police_officer': 'pedestrian',
'movable_object.trafficcone': 'traffic_cone',
'vehicle.trailer': 'trailer',
'vehicle.truck': 'truck'
}
DefaultAttribute = {
'car': 'vehicle.parked',
'pedestrian': 'pedestrian.moving',
'trailer': 'vehicle.parked',
'truck': 'vehicle.parked',
'bus': 'vehicle.moving',
'motorcycle': 'cycle.without_rider',
'construction_vehicle': 'vehicle.parked',
'bicycle': 'cycle.without_rider',
'barrier': '',
'traffic_cone': '',
}
AttrMapping = {
'cycle.with_rider': 0,
'cycle.without_rider': 1,
'pedestrian.moving': 2,
'pedestrian.standing': 3,
'pedestrian.sitting_lying_down': 4,
'vehicle.moving': 5,
'vehicle.parked': 6,
'vehicle.stopped': 7,
}
AttrMapping_rev = [
'cycle.with_rider',
'cycle.without_rider',
'pedestrian.moving',
'pedestrian.standing',
'pedestrian.sitting_lying_down',
'vehicle.moving',
'vehicle.parked',
'vehicle.stopped',
]
# https://github.com/nutonomy/nuscenes-devkit/blob/57889ff20678577025326cfc24e57424a829be0a/python-sdk/nuscenes/eval/detection/evaluate.py#L222 # noqa
ErrNameMapping = {
'trans_err': 'mATE',
'scale_err': 'mASE',
'orient_err': 'mAOE',
'vel_err': 'mAVE',
'attr_err': 'mAAE'
}
CLASSES = ('car', 'truck', 'trailer', 'bus', 'construction_vehicle',
'bicycle', 'motorcycle', 'pedestrian', 'traffic_cone',
'barrier')
def __init__(self,
ann_file,
pipeline=None,
data_root=None,
classes=None,
load_interval=1,
with_velocity=True,
modality=None,
box_type_3d='LiDAR',
filter_empty_gt=True,
test_mode=False,
eval_version='detection_cvpr_2019',
use_valid_flag=False):
self.load_interval = load_interval
self.use_valid_flag = use_valid_flag
super().__init__(
data_root=data_root,
ann_file=ann_file,
pipeline=pipeline,
classes=classes,
modality=modality,
box_type_3d=box_type_3d,
filter_empty_gt=filter_empty_gt,
test_mode=test_mode)
self.with_velocity = with_velocity
self.eval_version = eval_version
from nuscenes.eval.detection.config import config_factory
self.eval_detection_configs = config_factory(self.eval_version)
if self.modality is None:
self.modality = dict(
use_camera=False,
use_lidar=True,
use_radar=False,
use_map=False,
use_external=False,
)
def get_cat_ids(self, idx):
"""Get category distribution of single scene.
Args:
idx (int): Index of the data_info.
Returns:
dict[list]: for each category, if the current scene
contains such boxes, store a list containing idx,
otherwise, store empty list.
"""
info = self.data_infos[idx]
if self.use_valid_flag:
mask = info['valid_flag']
gt_names = set(info['gt_names'][mask])
else:
gt_names = set(info['gt_names'])
cat_ids = []
for name in gt_names:
if name in self.CLASSES:
cat_ids.append(self.cat2id[name])
return cat_ids
def load_annotations(self, ann_file):
"""Load annotations from ann_file.
Args:
ann_file (str): Path of the annotation file.
Returns:
list[dict]: List of annotations sorted by timestamps.
"""
data = mmcv.load(ann_file, file_format='pkl')
data_infos = list(sorted(data['infos'], key=lambda e: e['timestamp']))
data_infos = data_infos[::self.load_interval]
self.metadata = data['metadata']
self.version = self.metadata['version']
return data_infos
def get_data_info(self, index):
"""Get data info according to the given index.
Args:
index (int): Index of the sample data to get.
Returns:
dict: Data information that will be passed to the data
preprocessing pipelines. It includes the following keys:
- sample_idx (str): Sample index.
- pts_filename (str): Filename of point clouds.
- sweeps (list[dict]): Infos of sweeps.
- timestamp (float): Sample timestamp.
- img_filename (str, optional): Image filename.
- lidar2img (list[np.ndarray], optional): Transformations
from lidar to different cameras.
- ann_info (dict): Annotation info.
"""
info = self.data_infos[index]
# standard protocol modified from SECOND.Pytorch
input_dict = dict(
sample_idx=info['token'],
pts_filename=info['lidar_path'],
sweeps=info['sweeps'],
timestamp=info['timestamp'] / 1e6,
)
if self.modality['use_camera']:
image_paths = []
lidar2img_rts = []
for cam_type, cam_info in info['cams'].items():
image_paths.append(cam_info['data_path'])
# obtain lidar to image transformation matrix
lidar2cam_r = np.linalg.inv(cam_info['sensor2lidar_rotation'])
lidar2cam_t = cam_info[
'sensor2lidar_translation'] @ lidar2cam_r.T
lidar2cam_rt = np.eye(4)
lidar2cam_rt[:3, :3] = lidar2cam_r.T
lidar2cam_rt[3, :3] = -lidar2cam_t
intrinsic = cam_info['cam_intrinsic']
viewpad = np.eye(4)
viewpad[:intrinsic.shape[0], :intrinsic.shape[1]] = intrinsic
lidar2img_rt = (viewpad @ lidar2cam_rt.T)
lidar2img_rts.append(lidar2img_rt)
input_dict.update(
dict(
img_filename=image_paths,
lidar2img=lidar2img_rts,
))
if not self.test_mode:
annos = self.get_ann_info(index)
input_dict['ann_info'] = annos
return input_dict
def get_ann_info(self, index):
"""Get annotation info according to the given index.
Args:
index (int): Index of the annotation data to get.
Returns:
dict: Annotation information consists of the following keys:
- gt_bboxes_3d (:obj:`LiDARInstance3DBoxes`):
3D ground truth bboxes
- gt_labels_3d (np.ndarray): Labels of ground truths.
- gt_names (list[str]): Class names of ground truths.
"""
info = self.data_infos[index]
# filter out bbox containing no points
if self.use_valid_flag:
mask = info['valid_flag']
else:
mask = info['num_lidar_pts'] > 0
gt_bboxes_3d = info['gt_boxes'][mask]
gt_names_3d = info['gt_names'][mask]
gt_labels_3d = []
for cat in gt_names_3d:
if cat in self.CLASSES:
gt_labels_3d.append(self.CLASSES.index(cat))
else:
gt_labels_3d.append(-1)
gt_labels_3d = np.array(gt_labels_3d)
if self.with_velocity:
gt_velocity = info['gt_velocity'][mask]
nan_mask = np.isnan(gt_velocity[:, 0])
gt_velocity[nan_mask] = [0.0, 0.0]
gt_bboxes_3d = np.concatenate([gt_bboxes_3d, gt_velocity], axis=-1)
# the nuscenes box center is [0.5, 0.5, 0.5], we change it to be
# the same as KITTI (0.5, 0.5, 0)
gt_bboxes_3d = LiDARInstance3DBoxes(
gt_bboxes_3d,
box_dim=gt_bboxes_3d.shape[-1],
origin=(0.5, 0.5, 0.5)).convert_to(self.box_mode_3d)
anns_results = dict(
gt_bboxes_3d=gt_bboxes_3d,
gt_labels_3d=gt_labels_3d,
gt_names=gt_names_3d)
return anns_results
def _format_bbox(self, results, jsonfile_prefix=None):
"""Convert the results to the standard format.
Args:
results (list[dict]): Testing results of the dataset.
jsonfile_prefix (str): The prefix of the output jsonfile.
You can specify the output directory/filename by
modifying the jsonfile_prefix. Default: None.
Returns:
str: Path of the output json file.
"""
nusc_annos = {}
mapped_class_names = self.CLASSES
print('Start to convert detection format...')
for sample_id, det in enumerate(mmcv.track_iter_progress(results)):
annos = []
boxes = output_to_nusc_box(det, self.with_velocity)
sample_token = self.data_infos[sample_id]['token']
boxes = lidar_nusc_box_to_global(self.data_infos[sample_id], boxes,
mapped_class_names,
self.eval_detection_configs,
self.eval_version)
for i, box in enumerate(boxes):
name = mapped_class_names[box.label]
if np.sqrt(box.velocity[0]**2 + box.velocity[1]**2) > 0.2:
if name in [
'car',
'construction_vehicle',
'bus',
'truck',
'trailer',
]:
attr = 'vehicle.moving'
elif name in ['bicycle', 'motorcycle']:
attr = 'cycle.with_rider'
else:
attr = NuScenesDataset.DefaultAttribute[name]
else:
if name in ['pedestrian']:
attr = 'pedestrian.standing'
elif name in ['bus']:
attr = 'vehicle.stopped'
else:
attr = NuScenesDataset.DefaultAttribute[name]
nusc_anno = dict(
sample_token=sample_token,
translation=box.center.tolist(),
size=box.wlh.tolist(),
rotation=box.orientation.elements.tolist(),
velocity=box.velocity[:2].tolist(),
detection_name=name,
detection_score=box.score,
attribute_name=attr)
annos.append(nusc_anno)
nusc_annos[sample_token] = annos
nusc_submissions = {
'meta': self.modality,
'results': nusc_annos,
}
mmcv.mkdir_or_exist(jsonfile_prefix)
res_path = osp.join(jsonfile_prefix, 'results_nusc.json')
print('Results writes to', res_path)
mmcv.dump(nusc_submissions, res_path)
return res_path
def _evaluate_single(self,
result_path,
logger=None,
metric='bbox',
result_name='pts_bbox'):
"""Evaluation for a single model in nuScenes protocol.
Args:
result_path (str): Path of the result file.
logger (logging.Logger | str, optional): Logger used for printing
related information during evaluation. Default: None.
metric (str, optional): Metric name used for evaluation.
Default: 'bbox'.
result_name (str, optional): Result name in the metric prefix.
Default: 'pts_bbox'.
Returns:
dict: Dictionary of evaluation details.
"""
from nuscenes import NuScenes
from nuscenes.eval.detection.evaluate import NuScenesEval
output_dir = osp.join(*osp.split(result_path)[:-1])
nusc = NuScenes(
version=self.version, dataroot=self.data_root, verbose=False)
eval_set_map = {
'v1.0-mini': 'mini_val',
'v1.0-trainval': 'val',
}
nusc_eval = NuScenesEval(
nusc,
config=self.eval_detection_configs,
result_path=result_path,
eval_set=eval_set_map[self.version],
output_dir=output_dir,
verbose=False)
nusc_eval.main(render_curves=False)
# record metrics
metrics = mmcv.load(osp.join(output_dir, 'metrics_summary.json'))
detail = dict()
metric_prefix = f'{result_name}_NuScenes'
for name in self.CLASSES:
for k, v in metrics['label_aps'][name].items():
val = float('{:.4f}'.format(v))
detail['{}/{}_AP_dist_{}'.format(metric_prefix, name, k)] = val
for k, v in metrics['label_tp_errors'][name].items():
val = float('{:.4f}'.format(v))
detail['{}/{}_{}'.format(metric_prefix, name, k)] = val
for k, v in metrics['tp_errors'].items():
val = float('{:.4f}'.format(v))
detail['{}/{}'.format(metric_prefix,
self.ErrNameMapping[k])] = val
detail['{}/NDS'.format(metric_prefix)] = metrics['nd_score']
detail['{}/mAP'.format(metric_prefix)] = metrics['mean_ap']
return detail
def format_results(self, results, jsonfile_prefix=None):
"""Format the results to json (standard format for COCO evaluation).
Args:
results (list[dict]): Testing results of the dataset.
jsonfile_prefix (str): The prefix of json files. It includes
the file path and the prefix of filename, e.g., "a/b/prefix".
If not specified, a temp file will be created. Default: None.
Returns:
tuple: Returns (result_files, tmp_dir), where `result_files` is a
dict containing the json filepaths, `tmp_dir` is the temporal
directory created for saving json files when
`jsonfile_prefix` is not specified.
"""
assert isinstance(results, list), 'results must be a list'
assert len(results) == len(self), (
'The length of results is not equal to the dataset len: {} != {}'.
format(len(results), len(self)))
if jsonfile_prefix is None:
tmp_dir = tempfile.TemporaryDirectory()
jsonfile_prefix = osp.join(tmp_dir.name, 'results')
else:
tmp_dir = None
# currently the output prediction results could be in two formats
# 1. list of dict('boxes_3d': ..., 'scores_3d': ..., 'labels_3d': ...)
# 2. list of dict('pts_bbox' or 'img_bbox':
# dict('boxes_3d': ..., 'scores_3d': ..., 'labels_3d': ...))
# this is a workaround to enable evaluation of both formats on nuScenes
# refer to https://github.com/open-mmlab/mmdetection3d/issues/449
if not ('pts_bbox' in results[0] or 'img_bbox' in results[0]):
result_files = self._format_bbox(results, jsonfile_prefix)
else:
# should take the inner dict out of 'pts_bbox' or 'img_bbox' dict
result_files = dict()
for name in results[0]:
print(f'\nFormating bboxes of {name}')
results_ = [out[name] for out in results]
tmp_file_ = osp.join(jsonfile_prefix, name)
result_files.update(
{name: self._format_bbox(results_, tmp_file_)})
return result_files, tmp_dir
def evaluate(self,
results,
metric='bbox',
logger=None,
jsonfile_prefix=None,
result_names=['pts_bbox'],
show=False,
out_dir=None,
pipeline=None):
"""Evaluation in nuScenes protocol.
Args:
results (list[dict]): Testing results of the dataset.
metric (str | list[str], optional): Metrics to be evaluated.
Default: 'bbox'.
logger (logging.Logger | str, optional): Logger used for printing
related information during evaluation. Default: None.
jsonfile_prefix (str, optional): The prefix of json files including
the file path and the prefix of filename, e.g., "a/b/prefix".
If not specified, a temp file will be created. Default: None.
show (bool, optional): Whether to visualize.
Default: False.
out_dir (str, optional): Path to save the visualization results.
Default: None.
pipeline (list[dict], optional): raw data loading for showing.
Default: None.
Returns:
dict[str, float]: Results of each evaluation metric.
"""
result_files, tmp_dir = self.format_results(results, jsonfile_prefix)
if isinstance(result_files, dict):
results_dict = dict()
for name in result_names:
print('Evaluating bboxes of {}'.format(name))
ret_dict = self._evaluate_single(result_files[name])
results_dict.update(ret_dict)
elif isinstance(result_files, str):
results_dict = self._evaluate_single(result_files)
if tmp_dir is not None:
tmp_dir.cleanup()
if show or out_dir:
self.show(results, out_dir, show=show, pipeline=pipeline)
return results_dict
def _build_default_pipeline(self):
"""Build the default pipeline for this dataset."""
pipeline = [
dict(
type='LoadPointsFromFile',
coord_type='LIDAR',
load_dim=5,
use_dim=5,
file_client_args=dict(backend='disk')),
dict(
type='LoadPointsFromMultiSweeps',
sweeps_num=10,
file_client_args=dict(backend='disk')),
dict(
type='DefaultFormatBundle3D',
class_names=self.CLASSES,
with_label=False),
dict(type='Collect3D', keys=['points'])
]
return Compose(pipeline)
def show(self, results, out_dir, show=False, pipeline=None):
"""Results visualization.
Args:
results (list[dict]): List of bounding boxes results.
out_dir (str): Output directory of visualization result.
show (bool): Whether to visualize the results online.
Default: False.
pipeline (list[dict], optional): raw data loading for showing.
Default: None.
"""
assert out_dir is not None, 'Expect out_dir, got none.'
pipeline = self._get_pipeline(pipeline)
for i, result in enumerate(results):
if 'pts_bbox' in result.keys():
result = result['pts_bbox']
data_info = self.data_infos[i]
pts_path = data_info['lidar_path']
file_name = osp.split(pts_path)[-1].split('.')[0]
points = self._extract_data(i, pipeline, 'points').numpy()
# for now we convert points into depth mode
points = Coord3DMode.convert_point(points, Coord3DMode.LIDAR,
Coord3DMode.DEPTH)
inds = result['scores_3d'] > 0.1
gt_bboxes = self.get_ann_info(i)['gt_bboxes_3d'].tensor.numpy()
show_gt_bboxes = Box3DMode.convert(gt_bboxes, Box3DMode.LIDAR,
Box3DMode.DEPTH)
pred_bboxes = result['boxes_3d'][inds].tensor.numpy()
show_pred_bboxes = Box3DMode.convert(pred_bboxes, Box3DMode.LIDAR,
Box3DMode.DEPTH)
show_result(points, show_gt_bboxes, show_pred_bboxes, out_dir,
file_name, show)
def output_to_nusc_box(detection, with_velocity=True):
"""Convert the output to the box class in the nuScenes.
Args:
detection (dict): Detection results.
- boxes_3d (:obj:`BaseInstance3DBoxes`): Detection bbox.
- scores_3d (torch.Tensor): Detection scores.
- labels_3d (torch.Tensor): Predicted box labels.
Returns:
list[:obj:`NuScenesBox`]: List of standard NuScenesBoxes.
"""
box3d = detection['boxes_3d']
scores = detection['scores_3d'].numpy()
labels = detection['labels_3d'].numpy()
box_gravity_center = box3d.gravity_center.numpy()
box_dims = box3d.dims.numpy()
box_yaw = box3d.yaw.numpy()
# our LiDAR coordinate system -> nuScenes box coordinate system
nus_box_dims = box_dims[:, [1, 0, 2]]
box_list = []
for i in range(len(box3d)):
quat = pyquaternion.Quaternion(axis=[0, 0, 1], radians=box_yaw[i])
if with_velocity:
velocity = (*box3d.tensor[i, 7:9], 0.0)
else:
velocity = (0, 0, 0)
# velo_val = np.linalg.norm(box3d[i, 7:9])
# velo_ori = box3d[i, 6]
# velocity = (
# velo_val * np.cos(velo_ori), velo_val * np.sin(velo_ori), 0.0)
box = NuScenesBox(
box_gravity_center[i],
nus_box_dims[i],
quat,
label=labels[i],
score=scores[i],
velocity=velocity)
box_list.append(box)
return box_list
def lidar_nusc_box_to_global(info,
boxes,
classes,
eval_configs,
eval_version='detection_cvpr_2019'):
"""Convert the box from ego to global coordinate.
Args:
info (dict): Info for a specific sample data, including the
calibration information.
boxes (list[:obj:`NuScenesBox`]): List of predicted NuScenesBoxes.
classes (list[str]): Mapped classes in the evaluation.
eval_configs (object): Evaluation configuration object.
eval_version (str, optional): Evaluation version.
Default: 'detection_cvpr_2019'
Returns:
list: List of standard NuScenesBoxes in the global
coordinate.
"""
box_list = []
for box in boxes:
# Move box to ego vehicle coord system
box.rotate(pyquaternion.Quaternion(info['lidar2ego_rotation']))
box.translate(np.array(info['lidar2ego_translation']))
# filter det in ego.
cls_range_map = eval_configs.class_range
radius = np.linalg.norm(box.center[:2], 2)
det_range = cls_range_map[classes[box.label]]
if radius > det_range:
continue
# Move box to global coord system
box.rotate(pyquaternion.Quaternion(info['ego2global_rotation']))
box.translate(np.array(info['ego2global_translation']))
box_list.append(box)
return box_list
| [
"mingjiang.li@iluvatar.ai"
] | mingjiang.li@iluvatar.ai |
890029df9d4976d1bfd3c218d797be6666e56f9a | 74e503994ccd867d5acf1c76e5637f1f0c5117ef | /html2obj/lib/hobj.py | c8f2cd2f7ed6d9e8765eedf282da8b34bf329566 | [
"MIT"
] | permissive | genwch/html2obj | 8bfe644a910ed9ea254612fbdf3626cb5a1ba7c1 | d445c097efe9c78f48fdf0df77f5c6a2785504fd | refs/heads/master | 2023-05-02T03:02:30.191597 | 2021-05-09T16:43:01 | 2021-05-09T16:43:01 | 360,457,926 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,773 | py | from abc import ABC
class hobj(ABC):
cols: tuple = ("_attr", "_text", "_i")
def __init__(self, rec: dict):
for k, v in rec.items():
if k == "_tag":
self.__setattr__("_name", v)
elif k in self.cols:
self.__setattr__(k, v)
else:
continue
pass
def get(self, name: str, default=None):
return self.getattr(name=name, default=default)
def getattr(self, name: str, default=None):
try:
rtn = self.__getattribute__(name)
except:
rtn = default
return rtn
def append(self, name, val, forcelist: bool = True):
if val == None:
return
vals = self.getattr(name, [])
if not(isinstance(vals, list)):
vals = [vals]
# if name !="_elem":
# val.__setattr__("_i",len(vals)+1)
vals.append(val)
if forcelist:
vals = list(set(vals))
if vals != []:
if not(forcelist):
if len(vals) == 1:
vals = vals[0]
self.__setattr__(name, vals)
def concat(self, name, val):
vals = self.getattr(name, "")
if isinstance(val, list):
vals += " ".join(val)
else:
vals += " "+val
if vals != "":
self.__setattr__(name, vals.strip())
def addchild(self, child):
ctag = child._name
self.append(name="_elem", val=ctag)
self.append(name=ctag, val=child, forcelist=False)
if ctag=="script":
return
try:
self.concat(name="_text", val=child._text)
except:
pass
def __repr__(self):
return str(self.__dict__)
| [
"genwch@hotmail.com"
] | genwch@hotmail.com |
908076fe749518b81a5d792a64a0ac250dc8aa67 | 083ca3df7dba08779976d02d848315f85c45bf75 | /LongestSubstringwithAtLeastKRepeatingCharacters5.py | 424c18652d21975839cddc18e43a9e0b3e29a8f8 | [] | no_license | jiangshen95/UbuntuLeetCode | 6427ce4dc8d9f0f6e74475faced1bcaaa9fc9f94 | fa02b469344cf7c82510249fba9aa59ae0cb4cc0 | refs/heads/master | 2021-05-07T02:04:47.215580 | 2020-06-11T02:33:35 | 2020-06-11T02:33:35 | 110,397,909 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 954 | py | class Solution:
def longestSubstring(self, s, k):
"""
:type s: str
:type k: int
:rtype: int
"""
i = 0
res = 0
while i + k <= len(s):
if i > 0 and s[i] == s[i - 1]:
i += 1
continue
m = {}
mask = 0
max_index = i
for j in range(i, len(s)):
if s[j] not in m:
m[s[j]] = 0
m[s[j]] += 1
t = ord(s[j]) - ord('a')
if m[s[j]] < k:
mask |= 1 << t
else:
mask &= ~(1 << t)
if mask == 0:
max_index = j
res = max(res, j - i + 1)
i = max_index + 1
return res
if __name__ == '__main__':
s = input()
k = int(input())
solution = Solution()
print(solution.longestSubstring(s, k))
| [
"jiangshen95@163.com"
] | jiangshen95@163.com |
519c2bc1718d2cd11a51ca45082f36a6e6625f4c | 0c633381e123d26a3b3fc86003a24970b21a5709 | /main/views.py | a13833c9e44ba82b3747b14c836c8fe50f3cbc99 | [] | no_license | brebiv/HeyTam | 466703f56f9078075237173e573e0c3762f7d88a | db9e6c1ef91182dfcbd40b966bc1c07d88103f4a | refs/heads/master | 2022-07-31T19:53:34.329887 | 2020-05-16T15:33:41 | 2020-05-16T15:33:41 | 261,739,465 | 1 | 0 | null | 2020-05-16T15:33:42 | 2020-05-06T11:34:23 | Python | UTF-8 | Python | false | false | 1,331 | py | from django.http import HttpResponse, Http404
from django.shortcuts import render
from .models import Product, Quote, Article
import random
import json
# Views built to work as single page aplication
# index() returns header of the page, FYI it's in main/base.html
# All other functions built to receive AJAX request from JS code
# They return html code for JS that put it into #main div inside base.html
def index(request):
return render(request, "main/base.html")
def main(request):
quotes = Quote.objects.all()
random_quote = random.choice(quotes)
return render(request, 'main/main.html', {"quote": random_quote})
def news(request):
all_news = Article.objects.all()
newest_news = reversed(list(all_news)) # To make news in right order, newest on the top.
return render(request, 'main/news.html', {"news": newest_news})
def products(request):
products = [p for p in Product.objects.all()]
return render(request, 'main/products.html', {"products": products})
def about(request):
return render(request, 'main/about.html')
def product(request, product_id):
try:
product = Product.objects.get(pk=product_id)
except Product.DoesNotExist:
raise Http404("Product does not exist")
else:
return render(request, 'main/product.html', {"product": product}) | [
"brebiv@gmail.com"
] | brebiv@gmail.com |
4a170f8da10696ccdc8a9c787cbca1805d51c1cb | 08bb09d990f4bc90a82594a2161eed025d709f7b | /manage.py | afbb0cdd5fea4ba817815b3c48f73b85005b9630 | [
"X11"
] | permissive | galacticpuzzlehunt/puzzlord | 6e4413cec53bde3f016520a7848df9b482cc9141 | aec59c1e4d671e7f4a06b728409e4e08923a0386 | refs/heads/master | 2023-05-28T13:46:20.013261 | 2023-03-24T15:13:01 | 2023-03-24T15:13:01 | 237,137,193 | 29 | 26 | null | 2023-05-09T22:21:33 | 2020-01-30T04:15:46 | Python | UTF-8 | Python | false | false | 623 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings.dev")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == "__main__":
main()
| [
"brian.chxn@gmail.com"
] | brian.chxn@gmail.com |
a36fdb13104066756747a6b7823da3c3c4d1b9c0 | 2e35ec2a38e8cbcf8c12e32abf66037ddc7a2eaf | /signalprocessing/settings.py | b79abea0c4056b9117581f9b3913f135503a95fb | [] | no_license | sreejasn7/signalprocessing | fe93416c330758a86561bae1aa4063de8505806a | 2c69dde9934a6d313495a4e2e3d4050ae3ad2c41 | refs/heads/master | 2020-05-02T11:25:15.589526 | 2019-03-27T06:24:27 | 2019-03-27T06:24:27 | 177,928,411 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,158 | py | """
Django settings for signalprocessing project.
Generated by 'django-admin startproject' using Django 2.0.3.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'i&w7998)27@wcgpi9p6)%h153lwjx8em82cob)3#73&_ut*r%$'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'simulations'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'signalprocessing.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'signalprocessing.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
| [
"sreeja_n@redbytes.in"
] | sreeja_n@redbytes.in |
2514e06398d1649d7e768e2219aa835bfc94e0c7 | dffd7156da8b71f4a743ec77d05c8ba031988508 | /joi/prelim/2019/yo1c/c.py | 8fa22ad3e7acb87ef87d1e4727e8bf36c56ef603 | [] | no_license | e1810/kyopro | a3a9a2ee63bc178dfa110788745a208dead37da6 | 15cf27d9ecc70cf6d82212ca0c788e327371b2dd | refs/heads/master | 2021-11-10T16:53:23.246374 | 2021-02-06T16:29:09 | 2021-10-31T06:20:50 | 252,388,049 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 183 | py | n, *a = map(int, open(0).read().split())
cnt = ans = 0
prev = 0
for i in a:
if prev>i:
ans = max(ans, cnt)
cnt = 0
cnt += 1
prev = i
print(max(ans, cnt))
| [
"v.iceele1810@gmail.com"
] | v.iceele1810@gmail.com |
0f65a9276df606adea2939ddfc94c49913cc6ae4 | 77da2843ef8658fba8ec1459cec3f7c5224db28e | /tinyBlog/__init__.py | c8645196f23d9e0b4233d5ff3c0f35a4647279d8 | [] | no_license | shlliu/tinyBlog | 10180df30c6fa6c0e5a779c9d7b48aca1f919a34 | fa2aa9f9243252395ad6d8204ab6a5817199932f | refs/heads/master | 2021-04-03T04:57:27.956483 | 2018-03-17T14:18:42 | 2018-03-17T14:18:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 451 | py | from flask import Flask
from flask_mongoengine import MongoEngine
app = Flask(__name__)
app.config["MONGODB_SETTINGS"] = {'DB' : 'my-tiny-blog'}
app.config["SECRET_KEY"] = "ThisIsS3cr3t"
db = MongoEngine(app)
def register_blueprints(app):
from tinyBlog.views import posts
from tinyBlog.admin import admin
app.register_blueprint(posts)
app.register_blueprint(admin)
register_blueprints(app)
if __name__ == '__main__':
app.run()
| [
"johnny.liu@veritas.com"
] | johnny.liu@veritas.com |
2dad3b930588113d2d3c9ee1c85c48d0ef6b339c | c727c07e8560ca938ac0708c8aed755f47e86a1d | /pictureit/search.py | 88296fa606ab3b6bee33161eef587103c147d67d | [] | no_license | starlock/PictureIt | 0272957f8c11e545b439b447700987c9b3bb5423 | a41d93a34d7892386ea409eb816f3a861ce5d5c0 | refs/heads/master | 2021-01-21T06:59:29.606231 | 2013-03-22T14:42:40 | 2013-03-22T14:42:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 773 | py | import json
from math import floor
from flask import Blueprint, render_template, Response, request
from .image_index import ImageColorIndex
mod = Blueprint("Search", __name__)
image_index = ImageColorIndex()
@mod.route("/")
def index():
return render_template("index.html")
@mod.route("/search", methods=["post"])
def search():
print "data:", request.data
data = json.loads(request.data)
images = []
for color in data["colors"]:
for image_name in image_index.search(color[0], color[1], color[2], data.get("count", 10)):
images.append("/static/data/i/product/100/0/%s" % image_name)
response = Response(json.dumps({"images":images}))
response.content_type = "application/json"
return response
| [
"william@defunct.cc"
] | william@defunct.cc |
6f7b09b3bc0afa1b87897d8811dee37992af9e92 | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /benchmark/startPyquil1566.py | adf7d2e0df4b68ca67d20975984b7e68e9320ea4 | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,272 | py | # qubit number=5
# total number=52
import pyquil
from pyquil.api import local_forest_runtime, QVMConnection
from pyquil import Program, get_qc
from pyquil.gates import *
import numpy as np
conn = QVMConnection()
def make_circuit()-> Program:
prog = Program() # circuit begin
prog += H(0) # number=3
prog += H(1) # number=4
prog += X(1) # number=48
prog += H(1) # number=26
prog += CZ(4,1) # number=27
prog += H(1) # number=28
prog += H(2) # number=5
prog += H(3) # number=6
prog += H(4) # number=21
prog += H(1) # number=34
prog += CZ(4,1) # number=35
prog += Z(4) # number=46
prog += RX(0.8011061266653969,2) # number=37
prog += H(1) # number=36
prog += H(0) # number=1
prog += H(1) # number=2
prog += H(2) # number=7
prog += H(3) # number=8
prog += CNOT(1,0) # number=38
prog += X(0) # number=39
prog += CNOT(1,0) # number=40
prog += CNOT(0,1) # number=42
prog += X(1) # number=43
prog += CNOT(0,1) # number=44
prog += X(2) # number=11
prog += Y(1) # number=45
prog += X(3) # number=12
prog += H(2) # number=41
prog += CNOT(1,0) # number=22
prog += X(4) # number=47
prog += X(0) # number=23
prog += H(0) # number=49
prog += CZ(1,0) # number=50
prog += H(0) # number=51
prog += CNOT(0,1) # number=30
prog += X(1) # number=31
prog += CNOT(0,1) # number=32
prog += X(2) # number=15
prog += H(4) # number=29
prog += X(3) # number=16
prog += H(0) # number=17
prog += H(1) # number=18
prog += H(2) # number=19
prog += H(3) # number=20
# circuit end
return prog
def summrise_results(bitstrings) -> dict:
d = {}
for l in bitstrings:
if d.get(l) is None:
d[l] = 1
else:
d[l] = d[l] + 1
return d
if __name__ == '__main__':
prog = make_circuit()
qvm = get_qc('5q-qvm')
results = qvm.run_and_measure(prog,1024)
bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T
bitstrings = [''.join(map(str, l)) for l in bitstrings]
writefile = open("../data/startPyquil1566.csv","w")
print(summrise_results(bitstrings),file=writefile)
writefile.close()
| [
"wangjiyuan123@yeah.net"
] | wangjiyuan123@yeah.net |
9c8c827b0f3203093a67a11c9a59a65143bbb9c5 | 4d7c8a2da7a6d5157d0009859f93b9b63b570a4f | /iomp/hostmanage/migrations/0003_auto_20180412_1419.py | 7f1676e7f9874fbaa22090d822c29799d10ac08e | [] | no_license | dang-1/ioms | e2e11deea527d2878488f7fa9239ff7b9becef43 | c7281efdc4d5ca9026c58519b9b364b1af430835 | refs/heads/master | 2021-04-26T22:55:37.396722 | 2018-08-29T08:46:51 | 2018-08-29T08:46:51 | 123,896,988 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 575 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-04-12 06:19
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('hostmanage', '0002_auto_20180412_1415'),
]
operations = [
migrations.RenameField(
model_name='host',
old_name='platfrom_id',
new_name='platfrom',
),
migrations.RenameField(
model_name='host',
old_name='project_id',
new_name='project',
),
]
| [
"93651849@qq.com"
] | 93651849@qq.com |
f44472ad1ded764018d0b7bd4319ee41fb0ec7a1 | 476905986cb6609759322ea54e18ede2d9c33237 | /src/gamemap/map_io.py | 4c2152eeb0d6db1159612d99872d1a931090140d | [
"MIT"
] | permissive | davout1806/Hexcrawler | 2fa66edfe60faca12a8156afe7ec9f2ce145939b | 79ca4ab9327abf08de1743612c23eb89aa53a2b9 | refs/heads/master | 2022-11-28T03:56:54.106087 | 2020-08-05T04:40:45 | 2020-08-05T04:40:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 358 | py | '''
Created on Jun 6, 2012
@author: Chris
'''
#class MapReader(object):
#'''
#classdocs
# '''
# def __init__(self, filename):
# '''
# Constructor
# '''
# def generate(self):
# return None
#class MapWriter(object):
# '''
# classdocs
# '''
#def __init__(self, filename):
| [
"cfmcdonald78@gmail.com"
] | cfmcdonald78@gmail.com |
118e45dcf19e9f6acc3caea89a3d1c4339815d84 | 764a2ef0a179ab002dad7135f64dbca2eb98a1f8 | /Approach1/simulation.py | c33fe15e2676159e83f6f9e022dcc440e1f8bd66 | [] | no_license | PRONGS-CHIRAG/BIgData-IPL-Match-Simulation | 7c5cef6b8cf8c8f4ffb3b98d7ade1431d7f9a00c | 7f21503774ae99c4bc4a28f7efd58721a7725387 | refs/heads/master | 2020-05-17T17:49:39.937481 | 2019-01-03T07:24:01 | 2019-01-03T07:24:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,192 | py | import csv
import string
import random
import pandas as pd
import numpy as np
pp_prob=pd.read_csv('/home/anup/bdProject/bd/pp_prob.csv')
batsman_cluster=pd.read_csv('/home/anup/bdProject/bd/batting_cluster.csv')
bowler_cluster=pd.read_csv('/home/anup/bdProject/bd/bowling_cluster.csv')
cluster_prob=pd.read_csv('/home/anup/bdProject/bd/clusters_prob.csv')
print(pp_prob.head())
print(batsman_cluster.head())
print(bowler_cluster.head())
print(cluster_prob.head())
def pro_fun(batsman,bowler):
a=pp_prob[(pp_prob.batsman == batsman) & (pp_prob.bowler == bowler)]
prob=[]
flag=0
x=a.empty is True
if(a.empty is False):
for i in a.values[0][2:]:
prob.append(i)
if(np.isnan(prob[-1])):
flag=1
if((flag==1) | (a.empty is True)):
b=batsman_cluster[batsman_cluster.player_name==batsman]
if(b.empty is False):
batsman_cluster_no=b.values[0][2]
else:
batsman_cluster_no=random.randint(0,9)
c=bowler_cluster[bowler_cluster.player_name==bowler]
if(c.empty is False):
bowler_cluster_no=c.values[0][2]
else:
bowler_cluster_no=random.randint(0,6)
d=cluster_prob[(cluster_prob.bowler_cluster==bowler_cluster_no) & (cluster_prob.batsman_cluster==batsman_cluster_no)]
if(flag==1):
prob[-1]=d.values[0][-1]
else:
for i in d.values[0][2:]:
prob.append(i)
return prob
#match 55
t1bat=["RV Uthappa","G Gambhir","C Munro","MK Pandey","YK Pathan","JO Holder","Shakib Al Hasan","SA Yadav","AS Rajpoot", "SP Narine", "Kuldeep Yadav"]
t2bat=['S Dhawan','DA Warner ','NV Ojha ','Yuvraj Singh','KS Williamson','DJ Hooda','MC Henriques','KV Sharma', 'B Kumar','BB Sran',
'Mustafizur Rahman']
t1bowl=["YK Pathan","AS Rajpoot","Shakib Al Hasan","SP Narine","AS Rajpoot","SP Narine","JO Holder","C Munro","JO Holder","Kuldeep Yadav","Shakib Al Hasan","Kuldeep Yadav","SP Narine","Kuldeep Yadav","Shakib Al Hasan","Kuldeep Yadav","Shakib Al Hasan","SP Narine","JO Holder","AS Rajpoot"]
t2bowl=["B Kumar","BB Sran","B Kumar","BB Sran","KS Williamson","DJ Hooda","KV Sharma","DJ Hooda","KV Sharma","MC Henriques","Mustafizur Rahman","MC Henriques","Mustafizur Rahman","KV Sharma","BB Sran","B Kumar","Mustafizur Rahman","BB Sran","Mustafizur Rahman","B Kumar"]
#innings1
bat=1
batsmen=[0,1]
innings1_run=[]
innings1_wicket=[]
score=[0,0]
wickets=[0,0]
prob={}
flag=0
for i in range(20):
bowler= t2bowl[i]
if(flag==1):
break
for j in range(6):
if(flag==1):
break
bat_bowl = (t1bat[batsmen[0]],bowler)
val = pro_fun(bat_bowl[0],bat_bowl[1])
runs_prob = val[:6]
cum_prob=0
run=0
if(bat_bowl[0] not in prob.keys()):
prob[bat_bowl[0]]=val[-1]
else:
prob[bat_bowl[0]]*=val[-1]
if(prob[bat_bowl[0]] < 0.05):
wickets[0]+=1
innings1_run.append(0)
innings1_wicket.append((i*6)+j+1)
if(wickets[0]==10):
flag=1
bat+=1
batsmen[0]=bat
else:
rand=random.random()
for y in range(6):
cum_prob+=runs_prob[y]
if(cum_prob > rand):
if(y==5):
run=6
else:
run=y
break
innings1_run.append(run)
score[0]+=run
if(run==1 or run == 3):
batsmen[0],batsmen[1]=batsmen[1],batsmen[0]
batsmen[0],batsmen[1]=batsmen[1],batsmen[0]
#innings2
bat=1
batsmen=[0,1]
innings2_run=[]
innings2_wicket=[]
wickets[1]=0
score[1]=0
flag=0
prob1={}
for i in range(20):
bowler= t1bowl[i]
if(flag==1):
break
for j in range(6):
if(flag==1):
break
bat_bowl = (t2bat[batsmen[0]],bowler)
val = pro_fun(bat_bowl[0],bat_bowl[1])
runs_prob = val[:6]
cum_prob=0
run=0
if(bat_bowl[0] not in prob1.keys()):
prob1[bat_bowl[0]]=val[-1]
else:
prob1[bat_bowl[0]]*=val[-1]
if(prob1[bat_bowl[0]] < 0.05):
wickets[1]+=1
innings2_run.append(0)
innings2_wicket.append((i*6)+j+1)
if(wickets[1]==10):
flag=1
bat+=1
batsmen[0]=bat
else:
rand=random.random()
for y in range(6):
cum_prob+=runs_prob[y]
if(cum_prob > rand):
if(y==5):
run=6
else:
run=y
break
innings2_run.append(run)
score[1]+=run
if(score[1]>score[0]):
flag=1
if(run==1 or run == 3):
batsmen[0],batsmen[1]=batsmen[1],batsmen[0]
batsmen[0],batsmen[1]=batsmen[1],batsmen[0]
print('Innings 1 ')
print('Score / wicket')
print(score[0],'/',wickets[0])
print('Innings 2 ')
print('Score / wicket')
print(score[1],'/',wickets[1])
| [
"noreply@github.com"
] | noreply@github.com |
04bcc8816e64b1bbd93b217225ea2e7991c53c7f | c4378845cb7c6d66050975e8375f13c25a117b68 | /ATR/MultiTest.py | f83c528427122f0518ca8bf0c459c5db53d3ceab | [] | no_license | ManuelHuang/allstragety | 0640bbbfd7a044a54a0617b923799d1111f7876e | 5a49e6ae405bb096a4d66650e637d43031bb5b82 | refs/heads/master | 2020-06-11T05:49:21.902324 | 2019-06-27T01:08:13 | 2019-06-27T01:08:13 | 193,866,978 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 8,314 | py | from ATR import TestStrategy
from ATR_sell import TestStrategy_sell
# from run_sell import TestStrategy_sell
import time
import backtrader as bt
import backtrader.analyzers as btanalyzers
import pandas as pd
import numpy as np
import multiprocessing
from concurrent.futures import ProcessPoolExecutor
import logging
def my_run(arg):
dataframe = pd.read_csv('xbtusd_data_201701-201712.csv', index_col=0, parse_dates=[0])
dataframe['openinterest'] = 0
data = bt.feeds.PandasData(dataname=dataframe)
cerebro = bt.Cerebro()
cerebro.adddata(data)
cerebro.broker.setcash(1000000.0)
cerebro.broker.setcommission(commission=0.0006)
cerebro.addanalyzer(btanalyzers.SharpeRatio, _name='mysharpe',timeframe=bt.TimeFrame.Weeks)
cerebro.addanalyzer(btanalyzers.DrawDown, _name='mydrawdown')
cerebro.addstrategy(TestStrategy, *arg)
cerebro.run()
logging.basicConfig(filename='ATR_buy_2017_results.log', level=logging.DEBUG)
final_value = cerebro.broker.getvalue()
sharp = cerebro.run()[0].analyzers.mysharpe.get_analysis()['sharperatio']
drawdown = cerebro.run()[0].analyzers.mydrawdown.get_analysis().max.drawdown
# 以csv格式输出,以逗号分割,五个值分别为trail_percentage, back_stop_order_percentage, ks_kx, period和Final Portfolio Value
out_message = '%s,%s,%s,%s,%s,%s,%.2f,%s,%s' % (arg[0], arg[1], arg[2], arg[3],arg[4], arg[5],final_value,sharp,drawdown)
# out_message = str(arg) + ('Final Portfolio Value: %.2f' % cerebro.broker.getvalue())
print(out_message)
logging.info(out_message)
# return out_message
# print(out_message)
# logging.info('Final Portfolio Value: %.2f' % cerebro.broker.getvalue())
# return 'Final Portfolio Value: %.2f' % cerebro.broker.getvalue()
def my_run2(arg):
dataframe = pd.read_csv('xbtusd_data_201801-201812.csv', index_col=0, parse_dates=[0])
dataframe['openinterest'] = 0
data = bt.feeds.PandasData(dataname=dataframe)
cerebro = bt.Cerebro()
cerebro.adddata(data)
cerebro.broker.setcash(1000000.0)
cerebro.broker.setcommission(commission=0.0006)
cerebro.addanalyzer(btanalyzers.SharpeRatio, _name='mysharpe',timeframe=bt.TimeFrame.Weeks)
cerebro.addanalyzer(btanalyzers.DrawDown, _name='mydrawdown')
cerebro.addstrategy(TestStrategy, *arg)
cerebro.run()
logging.basicConfig(filename='ATR_buy_2018_results.log', level=logging.DEBUG)
final_value = cerebro.broker.getvalue()
sharp = cerebro.run()[0].analyzers.mysharpe.get_analysis()['sharperatio']
drawdown = cerebro.run()[0].analyzers.mydrawdown.get_analysis().max.drawdown
# 以csv格式输出,以逗号分割,五个值分别为trail_percentage, back_stop_order_percentage, ks_kx, period和Final Portfolio Value
out_message = '%s,%s,%s,%s,%s,%s,%.2f,%s,%s' % (arg[0], arg[1], arg[2], arg[3],arg[4], arg[5],final_value,sharp,drawdown)
# out_message = str(arg) + ('Final Portfolio Value: %.2f' % cerebro.broker.getvalue())
print(out_message)
logging.info(out_message)
# return out_message
# print(out_message)
# logging.info('Final Portfolio Value: %.2f' % cerebro.broker.getvalue())
# return 'Final Portfolio Value: %.2f' % cerebro.broker.getvalue()
def my_run3(arg):
dataframe = pd.read_csv('xbtusd_data_201801-201812.csv', index_col=0, parse_dates=[0])
dataframe['openinterest'] = 0
data = bt.feeds.PandasData(dataname=dataframe)
cerebro = bt.Cerebro()
cerebro.adddata(data)
cerebro.broker.setcash(1000000.0)
cerebro.broker.setcommission(commission=0.0006)
cerebro.addanalyzer(btanalyzers.SharpeRatio, _name='mysharpe',timeframe=bt.TimeFrame.Weeks)
cerebro.addanalyzer(btanalyzers.DrawDown, _name='mydrawdown')
cerebro.addstrategy(TestStrategy_sell, *arg)
cerebro.run()
logging.basicConfig(filename='ATR_sell_2018_results.log', level=logging.DEBUG)
final_value = cerebro.broker.getvalue()
sharp = cerebro.run()[0].analyzers.mysharpe.get_analysis()['sharperatio']
drawdown = cerebro.run()[0].analyzers.mydrawdown.get_analysis().max.drawdown
# 以csv格式输出,以逗号分割,五个值分别为trail_percentage, back_stop_order_percentage, ks_kx, period和Final Portfolio Value
out_message = '%s,%s,%s,%s,%s,%s,%.2f,%s,%s' % (arg[0], arg[1], arg[2], arg[3],arg[4], arg[5],final_value,sharp,drawdown)
# out_message = str(arg) + ('Final Portfolio Value: %.2f' % cerebro.broker.getvalue())
print(out_message)
logging.info(out_message)
# return out_message
# print(out_message)
# logging.info('Final Portfolio Value: %.2f' % cerebro.broker.getvalue())
# return 'Final Portfolio Value: %.2f' % cerebro.broker.getvalue()
def my_run4(arg):
dataframe = pd.read_csv('xbtusd_data_201701-201712.csv', index_col=0, parse_dates=[0])
dataframe['openinterest'] = 0
data = bt.feeds.PandasData(dataname=dataframe)
cerebro = bt.Cerebro()
cerebro.adddata(data)
cerebro.broker.setcash(1000000.0)
cerebro.broker.setcommission(commission=0.0006)
cerebro.addanalyzer(btanalyzers.SharpeRatio, _name='mysharpe',timeframe=bt.TimeFrame.Weeks)
cerebro.addanalyzer(btanalyzers.DrawDown, _name='mydrawdown')
cerebro.addstrategy(TestStrategy_sell, *arg)
cerebro.run()
logging.basicConfig(filename='ATR_sell_2017_results.log', level=logging.DEBUG)
final_value = cerebro.broker.getvalue()
sharp = cerebro.run()[0].analyzers.mysharpe.get_analysis()['sharperatio']
drawdown = cerebro.run()[0].analyzers.mydrawdown.get_analysis().max.drawdown
# 以csv格式输出,以逗号分割,五个值分别为trail_percentage, back_stop_order_percentage, ks_kx, period和Final Portfolio Value
out_message = '%s,%s,%s,%s,%s,%s,%.2f,%s,%s' % (
arg[0], arg[1], arg[2], arg[3], arg[4], arg[5], final_value, sharp, drawdown)
# out_message = str(arg) + ('Final Portfolio Value: %.2f' % cerebro.broker.getvalue())
print(out_message)
logging.info(out_message)
# return out_message
# print(out_message)
# logging.info('Final Portfolio Value: %.2f' % cerebro.broker.getvalue())
# return 'Final Portfolio Value: %.2f' % cerebro.broker.getvalue()
if __name__ == '__main__':
start = time.time()
pool = ProcessPoolExecutor(max_workers=38)
logging.info('begin')
start = time.time()
period_atr = 60
period_sma = 60
b = 1
backup_percentage = 0.01
trail_percentage = 0.01
back_stop_order_percentage = 1.01
params = []
while period_atr <=2220:
while b <= 2:
while trail_percentage <= 0.05:
while back_stop_order_percentage <= 1.05:
params.append([period_atr, period_atr, b, 0.01, trail_percentage,
back_stop_order_percentage])
back_stop_order_percentage +=0.01
back_stop_order_percentage = 1.01
trail_percentage += 0.01
back_stop_order_percentage = 1.01
trail_percentage = 0.01
b += 1
back_stop_order_percentage = 1.01
trail_percentage = 0.01
b = 1
period_atr += 120
'''for period_atr in range(60,781,120):
for period_sma in range (60,781,120):
for b in [1,2]:
for backup_percentage in [0.01]:
for trail_percentage in np.arange(0.01,0.06,0.01):
for back_stop_order_percentage in np.arange(1.01,1.06,0.01):
params.append([period_atr,period_sma,b,backup_percentage,trail_percentage,back_stop_order_percentage])'''
print(params)
pool.map(my_run, params)
logging.info('1.log')
pool.map(my_run2, params)
logging.info('2.log')
pool.map(my_run3, params)
logging.info('3.log')
pool.map(my_run4, params)
logging.info('4.log')
pool.shutdown(wait=True)
print(time.time() - start)
# start = time.time()
# test1 = [0.021, 1.01, 1 .021]
# cerebro.addstrategy(TestStrategy, *test1)
# cerebro.run()
# print('Final Portfolio Value: %.2f' % cerebro.broker.getvalue())
# test2 = [0.021, 1.01, 1.031]
# cerebro.addstrategy(TestStrategy, *test2)
# cerebro.run()
# print('Final Portfolio Value: %.2f' % cerebro.broker.getvalue())
# print(time.time() - start)
| [
"417100727@qq.com"
] | 417100727@qq.com |
2549eab19004369a15d27519ad67845d7a8c6f38 | 39d4cec575eef9bef96bc910b450f6c7a7655b54 | /Python/DetecterMiseEnVeilleParAbsenceActiviteProgramme/DetecterMiseEnVeilleParAbsenceActiviteProgramme.py | 98e0c1ed18c3509ac99f0559675b01aa22eddb50 | [] | no_license | yanndanielou/yanndanielou-programmation | a3cc1f27fc1db55f6223c9f6eedff43a91bfebd0 | f7655f631605d2b9da6cd4fa8aadcf3c9a3518b4 | refs/heads/master | 2023-09-04T10:44:33.405902 | 2023-08-31T12:51:05 | 2023-08-31T12:51:05 | 38,930,671 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,213 | py | #from pywinauto.application import Application
import pyautogui
#For logs
import LoggerConfig
import logging
import sys
import random
import time
from datetime import timedelta
def infinite_loop_detect_mise_en_veille_par_absence_activite():
duree_attente_theorique_en_secondes = 1
time_before_wait = time.time()
while True:
logging.debug("Begin to wait")
time.sleep(duree_attente_theorique_en_secondes)
time_after_wait = time.time()
duree_wait_realisee = time_after_wait - time_before_wait
time_before_wait = time.time() # repris juste apres la precedente mesure pour detecter une veille n'importe où dans le programme
if duree_wait_realisee > 2*duree_attente_theorique_en_secondes:
#LoggerConfig.printAndLogInfo("Mise en veille du PC detectee, aux environs de :" + time.localtime(time_before_wait).strftime("%m/%d/%Y, %H:%M:%S") + " avec reprise aux environs de : " + time.localtime(time_before_wait).strftime("%m/%d/%Y, %H:%M:%S") + " . Duree de la veille estimee: " + str(timedelta(seconds=duree_wait_realisee)))
LoggerConfig.printAndLogInfo("Mise en veille du PC detectee, aux environs de :" + time.strftime('%Y-%m-%dT%H:%M:%SZ', time.localtime(time_before_wait)) + " avec reprise aux environs de : " + time.strftime('%Y-%m-%dT%H:%M:%SZ', time.localtime(time_after_wait)) + " . Duree de la veille estimee: " + str(timedelta(seconds=duree_wait_realisee)))
#else:
# logging.debug("Pas de mise en veille du PC detectee, time_before_wait: " + time.localtime(time_before_wait).strftime("%m/%d/%Y, %H:%M:%S") + " time_before_wait: " + time.localtime(time_before_wait).strftime("%m/%d/%Y, %H:%M:%S") + " . Duree de la non veille estimee: " + str(timedelta(seconds=duree_wait_realisee)))
def main(argv):
log_file_name = 'DetecterMiseEnVeilleParAbsenceActiviteProgramme' + "." + str(random.randrange(100)) + ".log"
LoggerConfig.configureLogger(log_file_name)
LoggerConfig.printAndLogInfo("Application start")
infinite_loop_detect_mise_en_veille_par_absence_activite()
LoggerConfig.printAndLogInfo("Application ended")
if __name__ == "__main__":
main(sys.argv[1:])
| [
"yanndanielou@users.noreply.github.com"
] | yanndanielou@users.noreply.github.com |
1bca12cfd8bbd19fd19799c976e21e78624bce1d | f7035ec8f66a6a8c02550737775666bb26d2bd1a | /swipeanddine/admin.py | 81f984f48b0f902fea59576013d1d2bc99f4437a | [] | no_license | emukupa/swipe | 8a5757d3163eda0ba727e72e0c34f5112d0be800 | 7759ef88a6f6feab1fdda664129f27d450294734 | refs/heads/master | 2020-03-22T18:53:24.333863 | 2018-07-13T08:01:47 | 2018-07-13T08:01:47 | 140,489,256 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 137 | py | from django.contrib import admin
from .models import Category, Food
# Register your models here.
admin.site.register((Category, Food,))
| [
"lilkups@yahoo.com"
] | lilkups@yahoo.com |
c3016792ba2b0329ef1c3a26763b126d4c7298ca | c680279492d759e35c8ff329e16794d9a90c142c | /age.py | d56da1b5107122c7b7d27801fefd26a2773bce84 | [] | no_license | bishnuprasad-sahoo/python | 93613a122d4a1d8d9f63fa430f3ce24d957c6698 | 88ea54083b667378184843cc24bf6a09f1b3258d | refs/heads/master | 2020-07-16T00:49:11.008637 | 2019-09-01T15:39:28 | 2019-09-01T15:39:28 | 205,685,388 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 231 | py | #------------------Calculate_your_age-------------------#
current_year = input("Enter Current Year : ")
birth_year = input("Enter your Birth Year : ")
age = int(current_year) - int(birth_year)
print("Your age is : ", age)
| [
"noreply@github.com"
] | noreply@github.com |
4497e161d8e06316103a36d717fe15e66be3c951 | 3b504a983f1807ae7c5af51078bfab8c187fc82d | /client/input/InputSubsystem/JoyInput.py | 03a21fda604012c74ea881e2b4fdb3fcfdc8f167 | [] | no_license | SEA-group/wowp_scripts | 7d35fd213db95ea6b3dbd1ec6d3e0f13de86ba58 | 2fe54a44df34f2dcaa6860a23b835dcd8dd21402 | refs/heads/master | 2021-09-07T23:10:13.706605 | 2018-03-02T17:23:48 | 2018-03-02T17:23:48 | 117,280,141 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,816 | py | # Embedded file name: scripts/client/input/InputSubsystem/JoyInput.py
import Keys
import BWPersonality
import InputMapping
import math
from MathExt import *
from consts import ROLL_AXIS, VERTICAL_AXIS, HORIZONTAL_AXIS, FORCE_AXIS, FLAPS_AXIS, INPUT_SYSTEM_STATE
from input.InputSubsystem.InputSubsystemBase import InputSubsystemBase
import GameEnvironment
BASE_SMOOTH_WINDOW = 10
class ProxyCamStrategy(object):
@staticmethod
def rotateCursor(*args):
pass
class JoystickExpertInput(InputSubsystemBase):
def __init__(self, profile):
self._profile = profile
self.__isRawForceAxis = True
self.__smoothStack = {}
self.__lastSmoothWin = {}
class JoyEvent:
def __init__(self):
self.deviceId = None
self.axis = None
self.value = None
return
self.__joyEvent = JoyEvent()
def pushLastEvent(self):
for deviceId in BWPersonality.axis:
for axis in BWPersonality.axis[deviceId]:
self.__joyEvent.deviceId = deviceId
self.__joyEvent.axis = axis
self.__joyEvent.value = BWPersonality.axis[deviceId][axis]
self.processJoystickEvent(self.__joyEvent)
def restart(self):
self.__smoothStack = {}
self.__lastSmoothWin = {}
def dispose(self):
self._profile = None
return
@property
def __cameraStrategy(self):
cam = GameEnvironment.getCamera()
if cam is not None:
return cam.getDefualtStrategies['CameraStrategyNormal']
else:
return ProxyCamStrategy
def processJoystickEvent(self, event):
jSet = InputMapping.g_instance.joystickSettings
rValue = 0.0
vValue = 0.0
hValue = 0.0
fValue = 0.0
if event.axis == jSet.ROLL_AXIS and (event.deviceId == jSet.ROLL_DEVICE or 0 == jSet.ROLL_DEVICE):
rValue = -event.value if jSet.INVERT_ROLL else event.value
rawValue = rValue
if abs(rValue) <= jSet.ROLL_DEAD_ZONE:
self._profile.sendData(ROLL_AXIS, 0.0, -rawValue)
else:
rValue = self.__signalSmoothing(jSet.ROLL_AXIS, rValue, jSet.ROLL_SMOOTH_WINDOW)
rValue = self.__signalDiscrete(jSet.ROLL_SENSITIVITY, rValue, event.deviceId, event.axis)
rValue = math.copysign((abs(rValue) - jSet.ROLL_DEAD_ZONE) / (1.0 - jSet.ROLL_DEAD_ZONE), rValue)
rValue = InputMapping.translateAxisValue(jSet.AXIS_X_CURVE, rValue)
rValue = clamp(-1.0, -rValue, 1.0)
self._profile.sendData(ROLL_AXIS, rValue, -rawValue)
elif event.axis == jSet.VERTICAL_AXIS and (event.deviceId == jSet.VERTICAL_DEVICE or 0 == jSet.VERTICAL_DEVICE):
vValue = -event.value if jSet.INVERT_VERTICAL else event.value
rawValue = vValue
if abs(vValue) <= jSet.VERTICAL_DEAD_ZONE:
self._profile.sendData(VERTICAL_AXIS, 0.0, rawValue)
else:
vValue = self.__signalSmoothing(jSet.VERTICAL_AXIS, vValue, jSet.VERTICAL_SMOOTH_WINDOW)
vValue = self.__signalDiscrete(jSet.VERTICAL_SENSITIVITY, vValue, event.deviceId, event.axis)
vValue = math.copysign((abs(vValue) - jSet.VERTICAL_DEAD_ZONE) / (1 - jSet.VERTICAL_DEAD_ZONE), vValue)
vValue = InputMapping.translateAxisValue(jSet.AXIS_Y_CURVE, vValue)
vValue = clamp(-1.0, -vValue, 1.0)
self._profile.sendData(VERTICAL_AXIS, vValue, -rawValue)
elif event.axis == jSet.HORIZONTAL_AXIS and (event.deviceId == jSet.HORIZONTAL_DEVICE or 0 == jSet.HORIZONTAL_DEVICE):
hValue = event.value if jSet.INVERT_HORIZONTAL else -event.value
rawValue = hValue
if abs(hValue) <= jSet.HORIZONTAL_DEAD_ZONE:
self._profile.sendData(HORIZONTAL_AXIS, 0.0, rawValue)
else:
hValue = self.__signalSmoothing(jSet.HORIZONTAL_AXIS, hValue, jSet.HORIZONTAL_SMOOTH_WINDOW)
hValue = self.__signalDiscrete(jSet.HORIZONTAL_SENSITIVITY, hValue, event.deviceId, event.axis)
hValue = InputMapping.translateAxisValue(jSet.AXIS_Z_CURVE, hValue)
hValue = math.copysign((abs(hValue) - jSet.HORIZONTAL_DEAD_ZONE) / (1 - jSet.HORIZONTAL_DEAD_ZONE), hValue)
if InputMapping.g_instance.currentProfileType == INPUT_SYSTEM_STATE.GAMEPAD_DIRECT_CONTROL:
hValue *= -1
hValue = clamp(-1.0, hValue, 1.0)
self._profile.sendData(HORIZONTAL_AXIS, hValue, rawValue)
elif event.axis == jSet.FORCE_AXIS and (event.deviceId == jSet.FORCE_DEVICE or 0 == jSet.FORCE_DEVICE):
fValue = -event.value if jSet.INVERT_FORCE else event.value
rawValue = fValue
if self.__isRawForceAxis:
fValue = self.__renormalization(fValue)
self._profile.sendData(FORCE_AXIS, fValue, rawValue)
self.__cameraStrategy.rotateCursor(vValue * 0.01, hValue * 0.01)
def setCursorCamera(self, isCursorCamera):
pass
def setRawForceAxis(self, value):
self.__isRawForceAxis = value
def __renormalization(self, x):
maxForce = InputMapping.g_instance.joystickSettings.POINT_OF_NORMAL_THRUST
deadZone = InputMapping.g_instance.joystickSettings.FORCE_DEAD_ZONE
if deadZone > 1:
deadZone = 1
if x > deadZone:
return 1
if maxForce < x <= deadZone:
return 0
return clamp(-1.0, (x + 1.0) / (max(-0.99, maxForce) + 1.0) - 1.0, 0.0)
def __signalDiscrete(self, discrete, value, deviceId, axis):
SENSITIVITY = 14 * discrete
joyDPI = BigWorld.getJoystickResolution(deviceId, axis) / pow(2.0, math.floor(SENSITIVITY))
halfSingleSignal = 0.5 / joyDPI
if abs(value) < 0.25 * halfSingleSignal or abs(value) > 1.0 - 0.25 * halfSingleSignal:
return value
absValue = math.floor(abs(value) * joyDPI) / joyDPI + halfSingleSignal
return math.copysign(absValue, value)
def __signalSmoothing(self, axis, value, win, e = 0.99):
if self.__lastSmoothWin.get(axis, None) != win:
self.__lastSmoothWin[axis] = win
if self.__smoothStack.get(axis, None):
self.__smoothStack[axis] = []
window = max(int(BASE_SMOOTH_WINDOW * win), 1)
self.__smoothStack.setdefault(axis, []).append(value)
if len(self.__smoothStack[axis]) > window:
self.__smoothStack[axis].pop(0)
val = math.copysign(1.0, value) if abs(value) >= e else sum(self.__smoothStack[axis]) / len(self.__smoothStack[axis])
return val | [
"55k@outlook.com"
] | 55k@outlook.com |
fffebb703d18db82f8c3d83c57c74c26cbb2a5d4 | 1d5c6b57d034f30ce24ba500a4d20b191f16ac43 | /21.py | 4cb91ddc8577e3f7f346641f4aa03994a117a42f | [] | no_license | Meghashrestha/assignment-python | 8d30ebcafc2e11040020ee1150c7f86afdd1504a | 150240ddaa0e166544e1b1539a652a5a1c0f229d | refs/heads/master | 2022-11-09T06:40:30.169973 | 2020-06-28T11:39:19 | 2020-06-28T11:39:19 | 275,571,936 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 698 | py | #Write a Python program to get a list, sorted in increasing order by the last
#element in each tuple from a given list of non-empty tuples.
#Sample List : [(2, 5), (1, 2), (4, 4), (2, 3), (2, 1)]
#Expected Result : [(2, 1), (1, 2), (2, 3), (4, 4), (2, 5)]
#tuple inside list
def last(n): return n[-1]
def sort_list_last(tuples):
return sorted(tuples, key=last)
print(sort_list_last([(2, 5), (1, 2), (4, 4), (2, 3), (2, 1)]))
'''
def element(ele1, ele2):
list =[]
list = []
n= input("enter the number of set you want to enter : ")
for i in range(0, n):
element1=int(input())
element2=int(input())
list.append(element1,element2)
print(list)
#print(element(ele1,ele2))
'''
| [
"meghashrestha30@gmail.com"
] | meghashrestha30@gmail.com |
9581354839b7fe4db8be1358a1636dfd77cd29b7 | ec8ee805a1cc8fefebdcc7d73d7a6e69c9c6a883 | /issue_tracker/urls.py | 845fd0d12da08ece0fc22273d66eaf40d93928b2 | [] | no_license | Code-Institute-Submissions/p5-issue-tracker | 5b3f8c177fe3ffb833fd754b84a4a63b9764b553 | 4b3a993ae45fbd2e716d727cd11f28169b4246b1 | refs/heads/master | 2020-04-01T05:36:17.361668 | 2018-10-13T20:21:31 | 2018-10-13T20:21:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 932 | py | """issue_tracker URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path ,include
from accounts import urls as urls_accounts
from tickets import urls as urls_tickets
urlpatterns = [
path('admin/', admin.site.urls),
path('', include(urls_tickets)),
path('accounts/', include(urls_accounts)),
]
| [
"sarah.loh0708@gmail.com"
] | sarah.loh0708@gmail.com |
7a7b0cb2ba35a1718311a5ace7ffe70e9f8f71bf | 7b221a4981edad73991cf1e357274b46c4054eff | /stacks/XIAOMATECH/1.0/services/NIFI/package/scripts/nifi_cli.py | e82f05e871857ac17cf7d7bf280d1558ca7ca3dc | [
"Apache-2.0"
] | permissive | aries-demos/dataops | a4e1516ef6205ad1ac5f692822e577e22ee85c70 | 436c6e89a1fdd0593a17815d3ec79c89a26d48f1 | refs/heads/master | 2020-05-29T17:20:12.854005 | 2019-05-22T06:06:00 | 2019-05-22T06:06:00 | 189,270,801 | 2 | 3 | Apache-2.0 | 2019-05-29T17:35:25 | 2019-05-29T17:35:24 | null | UTF-8 | Python | false | false | 6,474 | py | import json
import time
from resource_management.core import shell
from resource_management.core.exceptions import Fail
from resource_management.core.logger import Logger
from resource_management.libraries.functions import format
import nifi_toolkit_util_common
def nifi_cli(command=None,
subcommand=None,
errors_retries=12,
retries_pause=10,
acceptable_errors=None,
**command_args):
"""
Executes nifi cli command and returns its output.
We need execute command several times because nifi becomes responsive after some among of time.
On non super-fast vm it takes 1.5 minutes to get nifi responding for cli calls.
Also some commands can produce different errors but after some time that errors disappear.
In other works - this cli is hard to use in automated environments :).
:param command: main cli command(nifi, registry, session, etc)
:param subcommand: sub-command of main command(nifi list-reg-clients, etc)
:param errors_retries: retries count on acceptable errors
:param retries_pause: pause between call retries
:param acceptable_errors: errors that is acceptable for retry("Connection refused" error always in this list)
:param command_args: long version of command parameters
:return: command output
"""
import params
cli_env = {"JAVA_HOME": params.java_home}
cli_script = nifi_toolkit_util_common.get_toolkit_script(
"cli.sh", params.toolkit_tmp_dir, params.stack_version_buildnum)
if errors_retries < 1:
errors_retries = 1
if acceptable_errors is None:
acceptable_errors = []
acceptable_errors.append("Connection refused")
def do_retry(output):
for acceptable_error in acceptable_errors:
if acceptable_error in output:
return True
return False
cmd = [cli_script, command]
if subcommand is not None:
cmd.append(subcommand)
client_opts = nifi_toolkit_util_common.get_client_opts()
if params.nifi_ssl_enabled:
command_args.update(nifi_toolkit_util_common.get_client_opts())
command_args["proxiedEntity"] = params.nifi_initial_admin_id
else:
command_args["baseUrl"] = client_opts["baseUrl"]
for arg_name, arg_value in command_args.iteritems():
cmd.append("--" + arg_name)
cmd.append(arg_value)
for _ in range(0, errors_retries):
errors_retries -= 1
code, out = shell.call(
cmd, sudo=True, env=cli_env, logoutput=False, quiet=True)
if code != 0 and do_retry(out) and errors_retries != 0:
time.sleep(retries_pause)
continue
elif code == 0:
return out
else:
raise Fail("Failed to execute nifi cli.sh command")
def _update_impl(client_name=None,
client_id=None,
client_url=None,
existing_clients=None):
old_name = None
old_url = None
if not client_id:
if not client_name:
raise Fail(
"For client update 'client_name' or 'client_id' must be specified"
)
for description, name, uuid, url in existing_clients:
if name == client_name:
client_id = uuid
old_name = name
old_url = url
break
else:
for description, name, uuid, url in existing_clients:
if uuid == client_id:
old_name = name
old_url = url
arguments = {"registryClientId": client_id}
do_update = False
if client_name:
if client_name != old_name:
arguments["registryClientName"] = client_name
do_update = True
Logger.info(
format(
"Trying to update NIFI Client name '{old_name}' to '{client_name}'"
))
if client_url:
if client_url != old_url:
arguments["registryClientUrl"] = client_url
do_update = True
Logger.info(
format(
"Trying update url from '{old_url}' to '{client_url}' for NIFI Client with name '{old_name}'"
))
if do_update:
nifi_cli(command="nifi", subcommand="update-reg-client", **arguments)
Logger.info(format("NIFI Client '{old_name}' updated"))
else:
Logger.info(format("NIFI Client '{old_name}' is already up-to-date"))
return client_id
def create_reg_client(client_name, client_url):
client_uuid = nifi_cli(
command="nifi",
subcommand="create-reg-client",
registryClientName=client_name,
registryClientUrl=client_url).strip()
Logger.info(
format("Created NIFI client '{client_name}' with url '{client_url}'"))
return client_uuid
def list_reg_clients():
acceptable_errors = ["Error retrieving registry clients"]
Logger.info(format("Trying to retrieve NIFI clients..."))
command_result = nifi_cli(
command="nifi",
subcommand="list-reg-clients",
acceptable_errors=acceptable_errors,
outputType="json")
result_json = json.loads(command_result)
result = []
if "registries" in result_json:
for registry in result_json["registries"]:
if "component" in registry:
component = registry["component"]
if "description" in component:
description = component["description"]
else:
description = ''
result.append((description, component["name"], component["id"],
component["uri"]))
Logger.info("Retrieved:" + str(len(result)) + " clients")
return result
def update_reg_client(client_name=None, client_id=None, client_url=None):
existing_clients = list_reg_clients()
return _update_impl(
client_name=client_name,
client_id=client_id,
client_url=client_url,
existing_clients=existing_clients)
def create_or_update_reg_client(client_name, client_url):
existing_clients = list_reg_clients()
for _, name, uuid, _ in existing_clients:
if name == client_name:
return _update_impl(
client_id=uuid,
client_url=client_url,
existing_clients=existing_clients)
return create_reg_client(client_name, client_url)
| [
"xianhuawei@MacBook-Air.local"
] | xianhuawei@MacBook-Air.local |
786de26045fa9b5a4a8b336c7e583e43c3a72cbf | 8a7b80b41f03096e41601f5628277798b0de1f83 | /Vertice.py | 4eb0158275727d84a040d6505f7fce311e6618a8 | [] | no_license | MathausC/Garfos | 14180f73367e48915fa5bdfdb1732b2a9ca28392 | 8bb86d42c3b5a651ccb2bdc7003a47569005eed9 | refs/heads/master | 2023-08-10T19:19:30.465243 | 2021-09-23T14:19:03 | 2021-09-23T14:19:03 | 409,621,471 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 140 | py | class Vertice:
def __init__(self, id="", nome=""):
self.id = id
self.nome = nome
def __str__(self):
return f'{self.nome}' | [
"mathaus.guimaraes@liferay.com"
] | mathaus.guimaraes@liferay.com |
7576a12e0655d60d48e077629a181a2b755c16c6 | 697ac85074fc7ab70bb114cbe8eba2617054654f | /test.py | 034e2a56f47f2c03bb2154c23f9f261f92063bc6 | [] | no_license | cpu-3/asm | 6bf3b8f8b570ff6750e1d7ce95de3e20697d8ce7 | fa0ea62e124c7a267d31e435524a3de58aa3f11d | refs/heads/master | 2021-07-11T00:20:04.967644 | 2019-03-10T17:27:18 | 2019-03-10T17:27:18 | 150,544,549 | 1 | 0 | null | 2018-10-22T09:49:29 | 2018-09-27T07:10:06 | Python | UTF-8 | Python | false | false | 58 | py | from main import *
if __name__ == '__main__':
test()
| [
"moratorium08@gmail.com"
] | moratorium08@gmail.com |
848ad7910f44dcfab14619df8c7cf7ea1cd9e27c | 6f5879d381765e8642aa8f28003bdcc343da3853 | /src/models/usages.py | 182687aa53f87f61822db3b15fab2b201496b411 | [] | no_license | meandevstar/flask-celery | d6bc17024ce635cfd56af6071aec48e580ec2f67 | cfe581c954896e29dbccab98c06c962fb3ed34f7 | refs/heads/master | 2022-01-24T05:00:13.892271 | 2020-02-03T22:16:47 | 2020-02-03T22:16:47 | 238,063,967 | 0 | 0 | null | 2022-01-06T22:42:47 | 2020-02-03T21:17:31 | Python | UTF-8 | Python | false | false | 2,736 | py | """Usage related models and database functionality"""
from decimal import Decimal
from sqlalchemy.sql import func
from src.models.base import db
from src.models.cycles import BillingCycle
from src.constants import SubscriptionStatus
class DataUsage(db.Model):
"""Model class to represent data usage record
Note:
A daily usage record is created for a subscription each day
it is active, beginning at midnight UTC timezone.
"""
__tablename__ = "data_usages"
id = db.Column(db.Integer, primary_key=True)
mb_used = db.Column(db.Float, default=0.0)
from_date = db.Column(db.TIMESTAMP(timezone=True))
to_date = db.Column(db.TIMESTAMP(timezone=True))
subscription_id = db.Column(
db.Integer, db.ForeignKey("subscriptions.id"), nullable=False
)
subscription = db.relationship("Subscription", back_populates="data_usages")
def __repr__(self): # pragma: no cover
return (
f"<{self.__class__.__name__}: {self.id} ({self.subscription_id}) "
f"{self.mb_used} MB {self.from_date} - {self.to_date}>"
)
@classmethod
def get_statistics_for_a_subscription(cls, sid, date=None):
"""Helper method to get data usage on billing cycle of given date
Args:
sid (int): subscription id to look up
date (date): date to get billing cycle for
Returns:
dict: {
over_limit, true if data usage is over plan limit
amount_used, the amount of data used in megabytes
amount_left amount used over plan limit in megabytes
}
"""
cycle = BillingCycle.get_current_cycle(date)
default_usage = cls.query \
.filter(cls.subscription_id == sid) \
.first()
subscription = default_usage is not None and default_usage.subscription
plan = default_usage is not None and default_usage.subscription.plan
# get total amount used in current cycle
query = []
query.append(cls.subscription_id == sid)
if cycle is not None:
query.append(cls.from_date >= cycle.start_date)
query.append(cls.to_date <= cycle.end_date)
amount = cls.query \
.with_entities(func.sum(cls.mb_used)) \
.filter(*query) \
.scalar()
if plan and subscription and subscription.status != SubscriptionStatus.new:
mb_available = plan.mb_available - amount
else:
mb_available = 0
return {
"over_limit": mb_available <= 0,
"amount_used": amount,
"amount_left": mb_available if mb_available > 0 else 0
} | [
"aimoko1067@yahoo.com"
] | aimoko1067@yahoo.com |
ef18e320c181d7603f6cc50f8b4c007b64c977e5 | b8d2f095a4b7ea567ccc61ee318ba879318eec3d | /二分查找/287. 寻找重复数.py | 69bdb06bf5dbca40a1db1643ecf3e21552f93868 | [] | no_license | f1amingo/leetcode-python | a3ef78727ae696fe2e94896258cfba1b7d58b1e3 | b365ba85036e51f7a9e018767914ef22314a6780 | refs/heads/master | 2021-11-10T16:19:27.603342 | 2021-09-17T03:12:59 | 2021-09-17T03:12:59 | 205,813,698 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 621 | py | from typing import List
class Solution:
# 从[1, n]中猜一个数,再遍历一遍原数组
def findDuplicate(self, nums: List[int]) -> int:
n = len(nums) - 1
lt, rt = 1, n
while lt < rt:
mid = (lt + rt) // 2
count = 0
for num in nums:
if num <= mid:
count += 1
if count > mid:
rt = mid
else:
lt = mid + 1
return lt
assert Solution().findDuplicate([1, 3, 4, 2, 2]) == 2
assert Solution().findDuplicate([3, 1, 3, 4, 2]) == 3
| [
"zsjperiod@foxmail.com"
] | zsjperiod@foxmail.com |
2387db5938512526541f01cbe5de56f2281d0bf3 | b80e407835b55a3a552e9a44a6cb5dd160dca33a | /getTemp.py | 5c6c8f270f60c9519b23b41d6f5efec845331e89 | [] | no_license | Norberg/tempLog | 948c84c58bd8616ee4f5ae49e68afd2ca0b5914b | 27d26504db1d9f325abb603089a2a02faed6a499 | refs/heads/master | 2020-12-30T09:57:39.416642 | 2009-07-14T20:15:05 | 2009-07-14T20:15:05 | 265,543 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 919 | py | import serial, re
def getTemp():
ser = serial.Serial(port="/dev/ttyUSB0", baudrate=9600, timeout=10)
ser.open()
value = ser.readline()
ser.close()
#conversation done acording to maxim-ic documentation for DS18S20
try:
rom = re.split(" P=|R=|CRC=", value)[1]
prog = re.split(" P=|R=|CRC=", value)[2]
TEMP_LSB = int(prog.split(' ')[1], 16)
TEMP_MSB = int(prog.split(' ')[2], 16)
COUNT_REMAIN = int(prog.split(' ')[7], 16)
except:
if re.match("No more addresses.", value) is not None:
return 0.0, "null"
return getTemp()
if (TEMP_MSB == 0):
#positve temerature
TEMP_READ = TEMP_LSB*0.5
else:
#negative temperature
TEMP_READ = (TEMP_LSB-256)*0.5
#get extended temperature
TEMPERATURE = TEMP_READ - 0.25 * ((16.0 - COUNT_REMAIN)/ 16.0)
return round(TEMPERATURE, 1), rom
def main():
temp, rom = getTemp()
print "ROM:", rom, "Temp: ", temp, "C"
if __name__ == "__main__":
main()
| [
"simon@dackbrann.net"
] | simon@dackbrann.net |
9fb262d762a00d209c51b7e69df9ee412cbd5e5b | bb8decee32f0f003c786da760b7e7dc6c7330cb2 | /Page.py | 1357e95a98f31f0e1240e747296891f67afe6bf2 | [] | no_license | IPTNT123/pythonshiyan | 2b5c8b2edaed1f8747a94cd77af605e72e942a4b | 6eed463ee1614753d6b73b65ed4e84be4611346b | refs/heads/master | 2022-11-12T21:42:02.787054 | 2020-07-04T08:41:04 | 2020-07-04T08:41:04 | 275,771,240 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,032 | py | import tkinter as tk
from PIL import Image, ImageTk
import function
global cut
global imgs
global txt
global image_file
global canvas
global c_canvas
global position
window = tk.Tk()
position = 110
txt = '0'
#给窗口的可视化起名字
window.title('车牌检测')
#设定窗口的大小(长 * 宽)
window.geometry('600x800')
#放置各种元素
canvas = tk.Canvas(window, height=500, width=250)
c_canvas = tk.Canvas(window, height=500, width=270)
e_k = tk.Label(window, text='输入照片全称:', font=('Arial', 12), width=27, height=2)
e_k.pack()
e = tk.Entry(window, show=None, font=('Arial', 14))
e.pack()
l = tk.Label(window, text='原图', bg='red' ,font=('Arial', 12), width=27, height=2)
l.pack(anchor="center")
l.place(x=150,y=100)
k = tk.Label(window, text='车牌提取',bg='red',font=('Arial', 12), width=27, height=2)
k.pack(anchor="center")
k.place(x=150,y=500)
# 触发函数
def check():
global cut
global image_file
global canvas
global imgs
global c_canvas
global position
txt = e.get()
list = function.color_identify(txt)
img = Image.open(txt)
img = img.resize((300, 300))
image_file = ImageTk.PhotoImage(img) # 图片位置(
image = canvas.create_image(150, 110, anchor='n', image=image_file) # 图片锚定点
canvas.pack(side='top')
#判断是否有读取到车牌
if(len(list) < 3):
list = function.picture_identify(txt)
# 设置截取图片
imgs = Image.open('pai.jpg')
imgs = imgs.resize((250, 100))
cut = ImageTk.PhotoImage(imgs)
c_image = c_canvas.create_image(140, 0, anchor='n', image=cut) # 图片锚定点
c_text = c_canvas.create_text(140, position, text=list)
position = position + 15
c_canvas.pack(side='top')
# 定义一个按钮用来移动指定图形的在画布上的位置
b = tk.Button(window, text='运行', bg='red',command=check).pack(side="bottom")
# 主窗口循环显示
window.mainloop() | [
"noreply@github.com"
] | noreply@github.com |
240e049b2a84a79545736f10cacf378c8c895e5c | a4ae64a449a7df7e817b4e47c67bbb4e14ded5d6 | /TicTacToe/game.py | 628030aaa39ffa8c0a12ef86d426fc50d6b1d0df | [] | no_license | smsr7/Machine_Learning | 2d6679bc68ea89cbfb249039df96b13311d578a5 | c8e8f35b14a3d4b966b1fc296e21ececd340a56c | refs/heads/master | 2020-04-18T04:45:00.585008 | 2019-02-25T21:33:57 | 2019-02-25T21:33:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,734 | py | import tic
import network
import numpy as np
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
env = tic.board()
observ = 9
action = 9
win = 0
draw = 0
repeats = 0.0000000
re = 0
def run(state):
done = False
while not done:
repeatO = 0
action = int(raw_input())
next_state, reward, done, wl, repeat = env.step(action)
action = nn.act(state)
next_state, reward, done, wl, repeat = env.step(action)
while repeat-repeatO > 0:
action = nn.act(state)
if repeat > 20:
nn.replay(130)
print repeat
next_state = np.reshape(next_state, [1, 9])
nn.recall(state, action, reward, next_state, done)
state = next_state
env.display()
nn = network.DQNagent(observ, action)
for i in range(2000):
state = env.reset()
state = np.reshape(state, [1, 9])
for moves in range(30):
action = nn.act(state)
next_state, reward, done, wl, repeat = env.step(action)
next_state = np.reshape(next_state, [1, 9])
nn.recall(state, action, reward, next_state, done)
state = next_state
re = re+1
repeats = repeats + repeat
if done:
if wl == "win":
win = win+1
else:
if wl == "draw":
draw = draw + 1
print("Wins:{} Draws:{} Total:{} Repeat:{}".format(win, draw, win+draw, repeat))
print("Repeat:{} repeat:{}".format(repeats/(re),repeat))
print " "
break
if i>5:
nn.replay(52)
nn.saveModel(i)
for x in range(10):
state = env.reset()
state = np.reshape(state, [1, 9])
done = False
run(state)
| [
"noreply@github.com"
] | noreply@github.com |
acbf114c8c93ad8b5076dd4ea504d0e393f8cb43 | 1851d92ec009599f979ff68440cea9a220d263d4 | /contrib/spendfrom/spendfrom.py | a7f3cf028970b60cf38dfe70439591dc85ea845c | [
"MIT"
] | permissive | stintcoin/StintCoin | f7e8d3411f2de38bda93f1bbfdb551616fb3ca78 | 75b9fc740ed1929bde1f142502f9adbbb630d812 | refs/heads/master | 2020-04-09T01:21:18.148125 | 2018-12-13T22:08:30 | 2018-12-13T22:08:30 | 159,900,937 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,122 | py | #!/usr/bin/env python
#
# Use the raw transactions API to spend StintCoins received on particular addresses,
# and send any change back to that same address.
#
# Example usage:
# spendfrom.py # Lists available funds
# spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00
#
# Assumes it will talk to a stintcoind or stintcoin-Qt running
# on localhost.
#
# Depends on jsonrpc
#
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the stintcoin data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/StintCoin/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "StintCoin")
return os.path.expanduser("~/.stintcoin")
def read_bitcoin_config(dbdir):
"""Read the stintcoin.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "stintcoin.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a stintcoin JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 27502 if testnet else 27502
connect = "http://%s:%s@127.0.0.1:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the stintcoind we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(stintcoind):
info = stintcoind.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
stintcoind.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = stintcoind.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(stintcoind):
address_summary = dict()
address_to_account = dict()
for info in stintcoind.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = stintcoind.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = stintcoind.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-stintcoin-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(stintcoind, fromaddresses, toaddress, amount, fee):
all_coins = list_available(stintcoind)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to stintcoind.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = stintcoind.createrawtransaction(inputs, outputs)
signed_rawtx = stintcoind.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(stintcoind, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = stintcoind.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(stintcoind, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = stintcoind.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(stintcoind, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get StintCoins from")
parser.add_option("--to", dest="to", default=None,
help="address to get send StintCoins to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of stintcoin.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_bitcoin_config(options.datadir)
if options.testnet: config['testnet'] = True
stintcoind = connect_JSON(config)
if options.amount is None:
address_summary = list_available(stintcoind)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(stintcoind) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(stintcoind, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(stintcoind, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = stintcoind.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
| [
"codalata@gmail.com"
] | codalata@gmail.com |
bbd8a47cd083cdc32a34c9e24596c538d66b787f | 5d5ccb7ea3809d3ae2e5c10cd96e1375d0371247 | /astardownload/dbHelper.py | 77fed10fb276cefb94adb24558a28255039fe2dc | [] | no_license | ASTARCHEN/astardownload | 86335a9f6fec495a540d9d20986baec4076c7d35 | 90860f479a26ad44e5000e6a64be9e99b5643be0 | refs/heads/master | 2021-07-15T23:05:36.446253 | 2021-02-09T13:21:10 | 2021-02-09T13:21:10 | 96,193,728 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 274 | py | import configparser
cp = configparser.ConfigParser()
cp.read('myconfig.conf')
# cp.read('../../myconfig.conf')
db_name = cp.get('db', 'name')
db_user = cp.get('db', 'user')
db_password = cp.get('db', 'password')
db_host = cp.get('db', 'host')
db_port = cp.get('db', 'port')
| [
"chenxiaolong12315@163.com"
] | chenxiaolong12315@163.com |
b64bf32072556e2d851717eb52362ca58448eff2 | 7214a9b535de26256b56a358eb857cfb62283bbe | /rotors_simulator/rotors_gazebo/scripts/buchi_parse.py | 2bb97ea365f7b99676d8293a69f5a374f1edf302 | [
"Apache-2.0"
] | permissive | samarth-kalluraya/SafePlan_simulation | 83bb74eb0028ae4291cce7f3aa04e43ddff0d3bb | 4f9c7a2f0bac44233b81cd999b6c7c1843fe346e | refs/heads/main | 2023-03-15T00:49:22.387781 | 2021-03-08T19:12:13 | 2021-03-08T19:12:13 | 318,288,678 | 4 | 2 | null | null | null | null | UTF-8 | Python | false | false | 9,425 | py | # -*- coding: utf-8 -*-
import subprocess
import os.path
import re
import networkx as nx
import numpy as np
from networkx.classes.digraph import DiGraph
from sympy import satisfiable
from sympy.parsing.sympy_parser import parse_expr #samarth change
from itertools import combinations
class Buchi(object):
"""
construct buchi automaton graph
"""
def __init__(self, task):
"""
initialization
:param task: task specified in LTL
"""
# task specified in LTL
self.formula = task.formula
self.subformula = task.subformula
self.number_of_robots = task.number_of_robots
# graph of buchi automaton
self.buchi_graph = DiGraph(type='buchi', init=[], accept=[])
# minimal length (in terms of number of transitions) between a pair of nodes
self.min_length = dict()
def construct_buchi_graph(self):
"""
parse the output of the program ltl2ba and build the buchi automaton
"""
# directory of the program ltl2ba
dirname = os.path.dirname(__file__)
# output of the program ltl2ba
output = subprocess.check_output(dirname + "/./ltl2ba -f \"" + self.formula + "\"", shell=True).decode(
"utf-8")
# find all states/nodes in the buchi automaton
state_re = re.compile(r'\n(\w+):\n\t')
state_group = re.findall(state_re, output)
# find initial and accepting states
init = [s for s in state_group if 'init' in s]
accept = [s for s in state_group if 'accept' in s]
# finish the inilization of the graph of the buchi automaton
self.buchi_graph.graph['init'] = init
self.buchi_graph.graph['accept'] = accept
order_key = list(self.subformula.keys())
order_key.sort(reverse=True)
# for each state/node, find it transition relations
for state in state_group:
# add node
self.buchi_graph.add_node(state)
# loop over all transitions starting from current state
state_if_fi = re.findall(state + r':\n\tif(.*?)fi', output, re.DOTALL)
if state_if_fi:
relation_group = re.findall(r':: (\(.*?\)) -> goto (\w+)\n\t', state_if_fi[0])
for symbol, next_state in relation_group: # symbol that enables state to next_state
# delete edges with multiple subformulas
# if ' && ' in symbol: continue
# whether the edge is feasible in terms of atomic propositions
symbol_copy = symbol
for k in order_key:
symbol = symbol.replace('e{0}'.format(k), self.subformula[k][0])
# get the trurh assignment
truth_table = self.get_truth_assignment(symbol)
# infeasible transition
if not truth_table: continue
symbol_keys = re.findall(r'[0-9]+', symbol_copy)
avoid_regions={}
for i in range(self.number_of_robots):
avoid_regions[i]=[]
for key in truth_table:
if key!='1':
if truth_table[key]==False:
pair = key.split('_') # region-robot pair
robot_index = int(pair[1]) - 1
distance=0
for sub_f in symbol_keys:
if key in self.subformula[int(sub_f)][0]:
distance = self.subformula[int(sub_f)][3]
avoid_regions[robot_index].append((pair[0],distance))
# add edge
avoid_current_state={}
if state!=next_state:
avoid_current_state = self.buchi_graph.edges[(state,state)]['avoid']
self.buchi_graph.add_edge(state, next_state, AP = symbol_copy, AP_keys=symbol_keys,
truth=truth_table, avoid=avoid_regions, avoid_self_loop=avoid_current_state)
else:
state_skip = re.findall(state + r':\n\tskip\n', output, re.DOTALL)
if state_skip:
avoid_regions={}
avoid_current_state={}
for i in range(self.number_of_robots):
avoid_regions[i]=[]
self.buchi_graph.add_edge(state, state, AP='1', AP_keys=[], truth='1',
avoid=avoid_regions, avoid_self_loop=avoid_current_state)
def get_truth_assignment(self, symbol):
"""
get one set of truth assignment that makes the symbol true
:param symbol: logical expression which controls the transition
:return: a set of truth assignment enables the symbol
"""
# empty symbol
if symbol == '(1)':
return '1'
# non-empty symbol
else:
exp = symbol.replace('||', '|').replace('&&', '&').replace('!', '~')
# add extra constraints: a single robot can reside in at most one region
robot_region = self.robot2region(exp)
for robot, region in robot_region.items():
mutual_execlusion = list(combinations(region, 2))
# single label in the symbol
if not mutual_execlusion: continue
for i in range(len(mutual_execlusion)):
mutual_execlusion[i] = '(~(' + ' & '.join(list(mutual_execlusion[i])) + '))'
exp = '(' + exp + ') & ' + ' & '.join(mutual_execlusion)
exp=parse_expr(exp) #samarth change
# find one truth assignment that makes symbol true using function satisfiable
# truth = satisfiable(exp, algorithm="dpll")
truth = satisfiable(exp)
try:
truth_table = dict()
for key, value in truth.items():
truth_table[key.name] = value
except AttributeError:
return False
else:
return truth_table
def get_minimal_length(self):
"""
search the shortest path from a node to another, i.e., # of transitions in the path
:return:
"""
# loop over pairs of buchi states
for head_node in self.buchi_graph.nodes():
for tail_node in self.buchi_graph.nodes():
# head_node = tail_node, and tail_node is an accepting state
if head_node != tail_node and 'accept' in tail_node:
try:
length, _ = nx.algorithms.single_source_dijkstra(self.buchi_graph,
source=head_node, target=tail_node)
# couldn't find a path from head_node to tail_node
except nx.exception.NetworkXNoPath:
length = np.inf
self.min_length[(head_node, tail_node)] = length
# head_node != tail_node and tail_node is an accepting state
# move 1 step forward to all reachable states of head_node then calculate the minimal length
elif head_node == tail_node and 'accept' in tail_node:
length = np.inf
for suc in self.buchi_graph.succ[head_node]:
try:
len1, _ = nx.algorithms.single_source_dijkstra(self.buchi_graph,
source=suc, target=tail_node)
except nx.exception.NetworkXNoPath:
len1 = np.inf
if len1 < length:
length = len1 + 1
self.min_length[(head_node, tail_node)] = length
def get_feasible_accepting_state(self):
"""
get feasbile accepting/final state, or check whether an accepting state is feaasible
:return:
"""
accept = self.buchi_graph.graph['accept']
self.buchi_graph.graph['accept'] = []
for ac in accept:
for init in self.buchi_graph.graph['init']:
if self.min_length[(init, ac)] < np.inf and self.min_length[(ac, ac)] < np.inf:
self.buchi_graph.graph['accept'].append(ac)
break
def robot2region(self, symbol):
"""
pair of robot and corresponding regions in the expression
:param symbol: logical expression
:return: robot index : regions
eg: input: exp = 'l1_1 & l3_1 & l4_1 & l4_6 | l3_4 & l5_6'
output: {1: ['l1_1', 'l3_1', 'l4_1'], 4: ['l3_4'], 6: ['l4_6', 'l5_6']}
"""
robot_region = dict()
for r in range(self.number_of_robots):
findall = re.findall(r'(l\d+?_{0})[^0-9]'.format(r + 1), symbol) + re.findall(r'(c\d+?_{0})[^0-9]'.format(r + 1), symbol)
if findall:
robot_region[str(r + 1)] = findall
return robot_region | [
"samarthkalluraya@gmail.com"
] | samarthkalluraya@gmail.com |
650f604a174cb38ea73b2bbe860ec2213e010a28 | ab5b96008e067865db222aee4504db8cad639365 | /td.py | 0280080339f853e9442b9fbd3b12671037d6a624 | [] | no_license | timbmg/easy21-rl | 9b3c14b904f21823fdef9eb47a8cd0ee49042b0e | dba283486cfddcc5e5999ae5b8941353c63ab1ea | refs/heads/master | 2020-03-14T06:28:52.220845 | 2018-04-29T10:38:35 | 2018-04-29T10:38:35 | 131,485,096 | 11 | 7 | null | null | null | null | UTF-8 | Python | false | false | 2,462 | py | from environment import Easy21
import utils
import numpy as np
import dill as pickle
env = Easy21()
N0 = 100
actions = [0, 1]
def reset():
Q = np.zeros((22, 11, len(actions))) # state-action value
NSA = np.zeros((22, 11, len(actions))) # state-action counter
wins = 0
return Q, NSA, wins
Q, NSA, wins = reset()
trueQ = pickle.load(open('Q.dill', 'rb'))
# number of times state s has been visited
NS = lambda p, d: np.sum(NSA[p, d])
# step size
alpha = lambda p, d, a: 1/NSA[p, d, a]
# exploration probability
epsilon = lambda p, d: N0 / (N0 + NS(p, d))
def epsilonGreedy(p, d):
if np.random.random() < epsilon(p, d):
# explore
action = np.random.choice(actions)
else:
# exploit
action = np.argmax( [Q[p, d, a] for a in actions] )
return action
episodes = int(1e4)
lmds = list(np.arange(0,11)/10)
mselambdas = np.zeros((len(lmds), episodes))
finalMSE = np.zeros(len(lmds))
for li, lmd in enumerate(lmds):
Q, NSA, wins = reset()
for episode in range(episodes):
terminated = False
E = np.zeros((22, 11, len(actions))) # Eligibility Trace
p, d = env.initGame()
# inital state and first action
a = epsilonGreedy(p, d)
SA = list()
# Sample Environment
while not terminated:
pPrime, dPrime, r, terminated = env.step(p, d, a)
if not terminated:
aPrime = epsilonGreedy(pPrime, dPrime)
tdError = r + Q[pPrime, dPrime, aPrime] - Q[p, d, a]
else:
tdError = r - Q[p, d, a]
E[p, d, a] += 1
NSA[p, d, a] += 1
SA.append([p, d, a])
for (_p, _d, _a) in SA:
Q[_p, _d, _a] += alpha(_p, _d, _a) * tdError * E[_p, _d, _a]
E[_p, _d, _a] *= lmd
if not terminated:
p, d, a = pPrime, dPrime, aPrime
# bookkeeping
if r == 1:
wins += 1
mse = np.sum(np.square(Q-trueQ)) / (21*10*2)
mselambdas[li, episode] = mse
if episode % 1000 == 0 or episode+1==episodes:
print("Lambda=%.1f Episode %06d, MSE %5.3f, Wins %.3f"%(lmd, episode, mse, wins/(episode+1)))
finalMSE[li] = mse
print("Lambda=%.1f Episode %06d, MSE %5.3f, Wins %.3f"%(lmd, episode, mse, wins/(episode+1)))
print("--------")
utils.plotMseLambdas(finalMSE, lmds)
utils.plotMseEpisodesLambdas(mselambdas)
| [
"baumgaertner.t@gmail.com"
] | baumgaertner.t@gmail.com |
c4aeb89ef81169edcf4eb120ff153bd90e08081d | 307e6e92417b676f863007d8feee0e83abcc2db3 | /export_stockinfo_ept/__init__.py | 2309b02e25622929c3fb937d58a1b1078ec2a19d | [] | no_license | mr-prw/prowine-archive | ba6ca3b2f422e163ab299a8f36cc1497e6948f9b | 1845ea0b594d8fd712ff19aa61341f82e185202b | refs/heads/master | 2020-06-28T00:59:21.686634 | 2019-08-02T13:55:55 | 2019-08-02T13:55:55 | 200,099,714 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 41 | py | from . import wizard
from . import report | [
"noreply@github.com"
] | noreply@github.com |
ce4f4de3c6cd53f78a77f8f7d171a222a593ea7e | 4a28e3e3afb28c0455ea21cfb983c3a8284dc5dd | /Reverse.py | bc387ecd0eea4a1c2e6fe9318772782e900f4b58 | [] | no_license | omdeshmukh20/Python-3-Programming | 60f6bc4e627de9d643a429e64878a636f3875cae | 9fb4c7fa54bc26d18b69141493c7a72e0f68f7d0 | refs/heads/main | 2023-08-28T04:37:27.001888 | 2021-10-29T17:03:34 | 2021-10-29T17:03:34 | 370,008,995 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 674 | py | #Discription: Accept Number From User And Return Reverse Of That Number Using For-Loop
#Date: 14/08/21
#Author : Om Deshmukh
# Reverse Operation
def Reverse(iValue1):
iDigit = 0
iRev = 0
if iValue1 < 0:
exit("Invalid Input! | Note : Give Input Greater Than 0")
for _ in range(iValue1):
if iValue1 == 0:
break
iDigit = iValue1 % 10
iRev = (iRev * 10) + iDigit
iValue1 = iValue1 // 10
return iRev
# Entry Point
def main():
iNo1 = int(input("Enter The Number : "))
iRet = Reverse(iNo1)
print("Reverse Number is : ", iRet)
# Code Starter
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | noreply@github.com |
0684d234e85f6b170a94bbdd8fe260adcc0f1b90 | 0296bc69a0d9608ed826ad7a719395f019df098f | /old_modules/render_model_1.py | 9fc3f68c9e69607c41d3e1a6f72240c17d64ea5e | [] | no_license | jcn16/Blender_HDRmap_render | c0486a77e04c5b41a6f75f123dbdb3d10c682367 | 50e6cdb79fef83081de9830e7105dd425a235a9e | refs/heads/main | 2023-07-19T22:22:53.622052 | 2021-08-20T06:29:10 | 2021-08-20T06:29:10 | 377,757,283 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,746 | py | from math import radians, sin, cos, pi
import mathutils, bpy, argparse, random, time, os,logging
def generate_rand(a=0, b=1, only_positive=False):
x = (random.random()-0.5) * 2*b
if abs(x) < a or (only_positive and x<0):
return generate_rand(a, b, only_positive)
else:
return x
def point_at(obj, target, roll=0):
"""
Rotate obj to look at target
:arg obj: the object to be rotated. Usually the camera
:arg target: the location (3-tuple or Vector) to be looked at
:arg roll: The angle of rotation about the axis from obj to target in radians.
Based on: https://blender.stackexchange.com/a/5220/12947 (ideasman42)
"""
if not isinstance(target, mathutils.Vector):
target = mathutils.Vector(target)
loc = obj.location
# direction points from the object to the target
direction = target - loc
quat = direction.to_track_quat('-Z', 'Y')
# /usr/share/blender/scripts/addons/add_advanced_objects_menu/arrange_on_curve.py
quat = quat.to_matrix().to_4x4()
rollMatrix = mathutils.Matrix.Rotation(roll, 4, 'Z')
# remember the current location, since assigning to obj.matrix_world changes it
loc = loc.to_tuple()
obj.matrix_world = quat * rollMatrix
obj.location = loc
# init & params
logger = logging.getLogger()
logger.setLevel(logging.ERROR)
random.seed(time.time())
light_num_low, light_num_high = 6, 12
light_loc_low, light_loc_high = 3, 6
#context = bpy.context
model_path = '/media/jcn/新加卷/JCN/CLOTHES/Human_model/衬衫裙子/model_3'
model = "model_3.obj"
render_path = "/media/jcn/新加卷/JCN/CLOTHES/Results/2/%08d.png"
quat_file = "/media/jcn/新加卷/JCN/CLOTHES/Results/2/result.txt"
# Delete default cube
bpy.data.objects['Cube'].select = True
bpy.ops.object.delete()
for obj in bpy.data.objects:
bpy.data.objects[obj.name].select = True
bpy.ops.object.delete()
# rendering process
# create a scene
#scene = bpy.data.scenes.new("Scene")
scene = bpy.context.scene
context=bpy.context
# create a camera
camera_data = bpy.data.cameras.new("Camera")
camera = bpy.data.objects.new("Camera", camera_data)
distance, alpha, beta, gamma = 4.5, 1.0, 89.0, 0.0
alpha, beta, gamma = radians(alpha), radians(beta), radians(gamma)
camera.location = mathutils.Vector((distance*cos(beta)*cos(alpha), distance*cos(beta)*sin(alpha), distance*sin(beta)))
point_at(camera, mathutils.Vector((0, -0.4, 0)), roll=gamma)
print('camera by looked_at', camera.location, camera.rotation_euler, camera.rotation_euler.to_quaternion())
scene.objects.link(camera)
# Create lights (lights with random num in random directions)
# light number:6~12, point light
light_num = random.randint(a=light_num_low, b=light_num_high)
print('create %d light(s) at:', light_num)
for idx in range(light_num):
light_data = bpy.data.lamps.new('light'+str(idx), type='POINT')
light = bpy.data.objects.new('light'+str(idx), light_data)
light_loc = (generate_rand(light_loc_low, light_loc_high), generate_rand(light_loc_low, light_loc_high), generate_rand(light_loc_low, light_loc_high, True))
light.location = mathutils.Vector(light_loc)
scene.objects.link(light)
light_data = bpy.data.lamps.new('light', type='POINT')
light = bpy.data.objects.new('light', light_data)
light.location = mathutils.Vector((0, 0, 8))
scene.objects.link(light)
scene.update()
scene.render.resolution_x = 2048
scene.render.resolution_y = 2048
scene.render.resolution_percentage = 100
scene.render.alpha_mode = 'TRANSPARENT'
scene.camera = camera
path = os.path.join(model_path, model)
# make a new scene with cam and lights linked
context.screen.scene = scene
bpy.ops.scene.new(type='LINK_OBJECTS')
context.scene.name = model_path
cams = [c for c in context.scene.objects if c.type == 'CAMERA']
print(cams)
bpy.context.scene.use_nodes = True
tree = bpy.context.scene.node_tree
links = tree.links
bpy.context.scene.render.image_settings.color_depth = '8'
bpy.context.scene.render.image_settings.color_mode = 'RGB'
# Clear default nodes
for n in tree.nodes:
tree.nodes.remove(n)
# 必须设置,否则无法输出法向
bpy.context.scene.render.layers['RenderLayer'].use_pass_normal = True
bpy.context.scene.render.layers["RenderLayer"].use_pass_color = True
bpy.context.scene.render.image_settings.file_format = 'PNG'
# Create input render layer node.
render_layers = tree.nodes.new('CompositorNodeRLayers')
scale_normal = tree.nodes.new(type="CompositorNodeMixRGB")
scale_normal.blend_type = 'MULTIPLY'
scale_normal.inputs[2].default_value = (0.5, 0.5, 0.5, 1)
links.new(render_layers.outputs['Normal'], scale_normal.inputs[1])
bias_normal = tree.nodes.new(type="CompositorNodeMixRGB")
bias_normal.blend_type = 'ADD'
bias_normal.inputs[2].default_value = (0.5, 0.5, 0.5, 0)
links.new(scale_normal.outputs[0], bias_normal.inputs[1])
normal_file_output = tree.nodes.new(type="CompositorNodeOutputFile")
normal_file_output.label = 'Normal Output'
links.new(bias_normal.outputs[0], normal_file_output.inputs[0])
# Remap as other types can not represent the full range of depth.
depth_file_output = tree.nodes.new(type="CompositorNodeOutputFile")
depth_file_output.label = 'Depth Output'
map = tree.nodes.new(type="CompositorNodeMapValue")
# Size is chosen kind of arbitrarily, try out until you're satisfied with resulting depth map.
map.offset = [-0.7]
map.size = [0.1]
map.use_min = True
map.min = [0]
links.new(render_layers.outputs['Depth'], map.inputs[0])
links.new(map.outputs[0], depth_file_output.inputs[0])
# image_file_output = tree.nodes.new(type="CompositorNodeOutputFile")
# image_file_output.label = 'Image'
# links.new(render_layers.outputs['Image'], image_file_output.inputs[0])
#print('image_idx: %08d, camera: (%.3f,%.3f,%.3f)' % (image_idx, a * 180. /pi, b * 180. / pi, g * 180. / pi))
albedo_file_output = tree.nodes.new(type="CompositorNodeOutputFile")
albedo_file_output.label = 'Albedo Output'
links.new(render_layers.outputs['Color'], albedo_file_output.inputs[0])
# import model
bpy.ops.import_scene.obj(filepath=path, axis_forward='-Z', axis_up='Y', filter_glob="*.obj;*.mtl") #-Z, Y
# print('scene objects:')
for o in context.scene.objects:
print(o)
for obj in context.scene.objects:
if obj.name in ['Camera.001'] + ['light'+str(idx) for idx in range(light_num)]:
continue
else:
obj.location = mathutils.Vector((0, 0, -2.0))
obj.scale = mathutils.Vector((0.002, 0.002, 0.002))
c = cams[0]
scene = bpy.context.scene
#scene = bpy.context.scene
f_quat = open(quat_file, 'w')
image_idx = 0
for g in [0]:
g = radians(float(g))
for b in [20, -20]:
b = radians(float(b))
for a in range(1, 360, 60):
a = radians(float(a))
c.location = mathutils.Vector((distance*cos(b)*cos(a), distance*cos(b)*sin(a), distance*sin(b)))
point_at(c, mathutils.Vector((0, -0.4, 0)), roll = g)
quat = c.rotation_euler.to_quaternion()
for output_node in [normal_file_output, depth_file_output,albedo_file_output]:
output_node.base_path = ''
scene.render.filepath = '/media/jcn/新加卷/JCN/CLOTHES/Results/2/image_%03d' % image_idx
# image_file_output.file_slots[0].path = '/media/jcn/新加卷/JCN/CLOTHES/Results/2/image%d' % image_idx
normal_file_output.file_slots[0].path = '/media/jcn/新加卷/JCN/CLOTHES/Results/2/normal_%03d' % image_idx
depth_file_output.file_slots[0].path = '/media/jcn/新加卷/JCN/CLOTHES/Results/2/depth_%03d' % image_idx
albedo_file_output.file_slots[0].path = '/media/jcn/新加卷/JCN/CLOTHES/Results/2/albedo_%03d' % image_idx
bpy.ops.render.render(use_viewport=True,write_still=True)
#context.scene.render.filepath = render_path % image_idx
f_quat.write('%08d,%.4f,%.4f,%.4f,%.4f,%.4f,%.4f,%.4f\n' % (image_idx, quat[0], quat[1], quat[2], quat[3], a * 180 /pi, b * 180 / pi, g * 180 / pi))
image_idx = image_idx + 1
f_quat.close()
| [
"591599635@qq.com"
] | 591599635@qq.com |
eb893d285ceee9ef39eb80f1f31bd2245758fab6 | f72cbc110971114def6e090a6cb6e3171839ab80 | /blog_api/migrations/0003_auto_20200906_2326.py | 6cd0e213281ba8bc001ed6aaa41ebfbdb087f875 | [] | no_license | fountaein/blog_api_with_DRF | 4116ac0b34109fc0a04935bdf8df63b116d36c45 | 9eb75b949a22501f92a79b5c975b7f04a23bd268 | refs/heads/master | 2022-12-14T18:40:34.858342 | 2020-09-16T16:35:05 | 2020-09-16T16:35:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | # Generated by Django 3.1.1 on 2020-09-06 23:26
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog_api', '0002_auto_20200906_2317'),
]
operations = [
migrations.AlterField(
model_name='post',
name='slug',
field=models.SlugField(max_length=250, unique=True),
),
]
| [
"oluchi-orji"
] | oluchi-orji |
9bbee4cd840022f209e33c5f6dc59cb8818a2fef | 5d4eb8d1ea124f918ca69edb9034dbadfb600df6 | /transfer.py | 0324a714261762831a34c2f5c81b8bb5764066fa | [
"MIT"
] | permissive | woomurf/color_transfer | 4cd320ad8f4e14a3aaebd39a43dab3b790e9ba49 | 7628797b98d3491c51ac77affdbd4d7a1332d9c2 | refs/heads/master | 2022-11-07T23:58:19.132534 | 2020-07-01T08:00:53 | 2020-07-01T08:00:53 | 275,990,205 | 0 | 0 | MIT | 2020-06-30T03:44:28 | 2020-06-30T03:44:28 | null | UTF-8 | Python | false | false | 708 | py | # USAGE
# python example.py --source images/ocean_sunset.jpg --target images/ocean_day.jpg
# import the necessary packages
from color_transfer import color_transfer
import numpy as np
import cv2
def runTransfer(source, target, output=None):
# load the images
source = cv2.imread(source)
target = cv2.imread(target)
# transfer the color distribution from the source image
# to the target image
transfer = color_transfer(source, target)
print(type(transfer))
width = 300
r = width / float(transfer.shape[1])
dim = (width, int(transfer.shape[0] * r))
resized = cv2.resize(transfer, dim, interpolation = cv2.INTER_AREA)
# BGR to RGB
img = cv2.cvtColor(resized, cv2.COLOR_BGR2RGB)
return img | [
"comcom@ComComui-iMac.local"
] | comcom@ComComui-iMac.local |
cbbc3b61f70dd176ea0fa764e83afc1175e49f9e | bf07c77c6618a146631b95064b1aa0862df9e8dc | /tests/biomodel_linearise_test.py | 1aba69d4e6b0abac25f3dd8d3da704976e213584 | [] | no_license | AlgorithmicAmoeba/gpu_se | 934bd9e36fd4831cd9b5edb160d6a3287ab4883c | 18ff5dd1a245388eb0b3ee28f0b562259aed22ba | refs/heads/master | 2022-12-31T16:36:54.076664 | 2020-10-27T11:46:03 | 2020-10-27T11:46:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 851 | py | import numpy
import model
import pytest
def test_linearise():
bioreactor = model.Bioreactor(
X0=model.Bioreactor.find_SS(
numpy.array([0.06, 0.2]),
# Ng, Nx, Nfa, Ne, Nh
numpy.array([260/180, 640/24.6, 1000/116, 0, 0])
),
high_N=False
)
lin_model = model.LinearModel.create_LinearModel(
bioreactor,
x_bar=model.Bioreactor.find_SS(
numpy.array([0.04, 0.1]),
# Ng, Nx, Nfa, Ne, Nh
numpy.array([260/180, 640/24.6, 1000/116, 0, 0])
),
# Fg_in (L/h), Cg (mol/L), Fm_in (L/h)
u_bar=numpy.array([0.04, 0.1]),
T=1
)
assert lin_model.A[0, 0] == pytest.approx(0.72648)
| [
"29543948+darren-roos@users.noreply.github.com"
] | 29543948+darren-roos@users.noreply.github.com |
3f90b47ab70aaa8d7c37f4f705693a1c72b6b260 | 5650fa4c2fc3d1758f942695e2fb16d41fb29729 | /build/env/lib/python2.7/site-packages/coverage-3.2-py2.7-linux-x86_64.egg/coverage/html.py | 4d51eb3439b2555945ea132005929ca614e29cbe | [] | no_license | 2CloudMan/cloudbim | 9c0453bf06488c19d76559d0c49e7379cca41408 | 9cb19ace35fa4eefd0be022aa81d2f9d8741b801 | refs/heads/master | 2021-01-18T20:21:45.964969 | 2015-06-05T10:42:55 | 2015-06-05T10:42:55 | 32,972,220 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,162 | py | """HTML reporting for Coverage."""
import os, re, shutil
from coverage import __url__, __version__ # pylint: disable-msg=W0611
from coverage.phystokens import source_token_lines
from coverage.report import Reporter
from coverage.templite import Templite
# Disable pylint msg W0612, because a bunch of variables look unused, but
# they're accessed in a templite context via locals().
# pylint: disable-msg=W0612
def data_filename(fname):
"""Return the path to a data file of ours."""
return os.path.join(os.path.split(__file__)[0], fname)
def data(fname):
"""Return the contents of a data file of ours."""
return open(data_filename(fname)).read()
class HtmlReporter(Reporter):
"""HTML reporting."""
def __init__(self, coverage, ignore_errors=False):
super(HtmlReporter, self).__init__(coverage, ignore_errors)
self.directory = None
self.source_tmpl = Templite(data("htmlfiles/pyfile.html"), globals())
self.files = []
self.arcs = coverage.data.has_arcs()
def report(self, morfs, directory, omit_prefixes=None):
"""Generate an HTML report for `morfs`.
`morfs` is a list of modules or filenames. `directory` is where to put
the HTML files. `omit_prefixes` is a list of strings, prefixes of
modules to omit from the report.
"""
assert directory, "must provide a directory for html reporting"
# Process all the files.
self.report_files(self.html_file, morfs, directory, omit_prefixes)
# Write the index file.
self.index_file()
# Create the once-per-directory files.
for static in [
"style.css", "coverage_html.js",
"jquery-1.3.2.min.js", "jquery.tablesorter.min.js"
]:
shutil.copyfile(
data_filename("htmlfiles/" + static),
os.path.join(directory, static)
)
def html_file(self, cu, analysis):
"""Generate an HTML file for one source file."""
source = cu.source_file().read()
nums = analysis.numbers
missing_branch_arcs = analysis.missing_branch_arcs()
n_par = 0 # accumulated below.
arcs = self.arcs
# These classes determine which lines are highlighted by default.
c_run = " run hide_run"
c_exc = " exc"
c_mis = " mis"
c_par = " par" + c_run
lines = []
for lineno, line in enumerate(source_token_lines(source)):
lineno += 1 # 1-based line numbers.
# Figure out how to mark this line.
line_class = ""
annotate_html = ""
annotate_title = ""
if lineno in analysis.statements:
line_class += " stm"
if lineno in analysis.excluded:
line_class += c_exc
elif lineno in analysis.missing:
line_class += c_mis
elif self.arcs and lineno in missing_branch_arcs:
line_class += c_par
n_par += 1
annlines = []
for b in missing_branch_arcs[lineno]:
if b == -1:
annlines.append("exit")
else:
annlines.append(str(b))
annotate_html = " ".join(annlines)
if len(annlines) > 1:
annotate_title = "no jumps to these line numbers"
elif len(annlines) == 1:
annotate_title = "no jump to this line number"
elif lineno in analysis.statements:
line_class += c_run
# Build the HTML for the line
html = ""
for tok_type, tok_text in line:
if tok_type == "ws":
html += escape(tok_text)
else:
tok_html = escape(tok_text) or ' '
html += "<span class='%s'>%s</span>" % (tok_type, tok_html)
lines.append({
'html': html,
'number': lineno,
'class': line_class.strip() or "pln",
'annotate': annotate_html,
'annotate_title': annotate_title,
})
# Write the HTML page for this file.
html_filename = cu.flat_rootname() + ".html"
html_path = os.path.join(self.directory, html_filename)
html = spaceless(self.source_tmpl.render(locals()))
fhtml = open(html_path, 'w')
fhtml.write(html)
fhtml.close()
# Save this file's information for the index file.
self.files.append({
'nums': nums,
'par': n_par,
'html_filename': html_filename,
'cu': cu,
})
def index_file(self):
"""Write the index.html file for this report."""
index_tmpl = Templite(data("htmlfiles/index.html"), globals())
files = self.files
arcs = self.arcs
totals = sum([f['nums'] for f in files])
fhtml = open(os.path.join(self.directory, "index.html"), "w")
fhtml.write(index_tmpl.render(locals()))
fhtml.close()
# Helpers for templates and generating HTML
def escape(t):
"""HTML-escape the text in t."""
return (t
# Convert HTML special chars into HTML entities.
.replace("&", "&").replace("<", "<").replace(">", ">")
.replace("'", "'").replace('"', """)
# Convert runs of spaces: "......" -> " . . ."
.replace(" ", " ")
# To deal with odd-length runs, convert the final pair of spaces
# so that "....." -> " . ."
.replace(" ", " ")
)
def format_pct(p):
"""Format a percentage value for the HTML reports."""
return "%.0f" % p
def spaceless(html):
"""Squeeze out some annoying extra space from an HTML string.
Nicely-formatted templates mean lots of extra space in the result. Get
rid of some.
"""
html = re.sub(">\s+<p ", ">\n<p ", html)
return html
| [
"linmiancheng@gmail.com"
] | linmiancheng@gmail.com |
6259b00f99bd0193a97019a733fdc7d17fd4e74b | 916773e4af7367022067abf2e92bc8ab7302b1e5 | /trunk/prodRoot/desktopApp/test/imap/imapServer.py | 270d663e89626782dde57cc0a1ac2653dc86f92d | [] | no_license | weijia/ufs | 814ac76a9a44a931803971cb4edcefd79c87d807 | c43cdae2dfe89b747b6970138ccdf9ddf7f766b3 | refs/heads/master | 2016-09-01T18:35:33.754862 | 2012-08-14T09:02:40 | 2012-08-14T09:02:40 | 3,439,945 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,361 | py | from twisted.mail import imap4, maildir
from twisted.internet import reactor, defer, protocol
from twisted.cred import portal, checkers, credentials
from twisted.cred import error as credError
from twisted.python import filepath
from zope.interface import implements
import time, os, random, pickle
MAILBOXDELIMITER = "."
class IMAPUserAccount(object):
implements(imap4.IAccount)
def __init__(self, userDir):
self.dir = userDir
self.mailboxCache = {}
# make sure Inbox exists
inbox = self._getMailbox("Inbox", create=True)
def listMailboxes(self, ref, wildcard):
for box in os.listdir(self.dir):
yield box, self._getMailbox(box)
def select(self, path, rw=True):
"return an object implementing IMailbox for the given path"
return self._getMailbox(path)
def _getMailbox(self, path, create=False):
"""
Helper function to get a mailbox object at the given
path, optionally creating it if it doesn't already exist.
"""
# According to the IMAP spec, Inbox is case-insensitive
pathParts = path.split(MAILBOXDELIMITER)
if pathParts[0].lower() == 'inbox': pathParts[0] = 'Inbox'
path = MAILBOXDELIMITER.join(pathParts)
if not self.mailboxCache.has_key(path):
fullPath = os.path.join(self.dir, path)
if not os.path.exists(fullPath):
if create:
maildir.initializeMaildir(fullPath)
else:
raise KeyError, "No such mailbox"
self.mailboxCache[path] = IMAPMailbox(fullPath)
return self.mailboxCache[path]
def create(self, path):
"create a mailbox at path and return it"
self._getMailbox(path, create=True)
def delete(self, path):
"delete the mailbox at path"
raise imap4.MailboxException("Permission denied.")
def rename(self, oldname, newname):
"rename a mailbox"
oldPath = os.path.join(self.dir, oldname)
newPath = os.path.join(self.dir, newname)
os.rename(oldPath, newPath)
def isSubscribed(self, path):
"return a true value if user is subscribed to the mailbox"
return self._getMailbox(path).metadata.get('subscribed', False)
def subscribe(self, path):
"mark a mailbox as subscribed"
box = self._getMailbox(path)
box.metadata['subscribed'] = True
box.saveMetadata()
return True
def unsubscribe(self, path):
"mark a mailbox as unsubscribed"
box = self._getMailbox(path)
box.metadata['subscribed'] = False
box.saveMetadata()
return True
class ExtendedMaildir(maildir.MaildirMailbox):
"""
Extends maildir.MaildirMailbox to expose more
of the underlying filename data
"""
def __iter__(self):
"iterates through the full paths of all messages in the maildir"
return iter(self.list)
def __len__(self):
return len(self.list)
def __getitem__(self, i):
return self.list[i]
def deleteMessage(self, filename):
index = self.list.index(filename)
os.remove(filename)
del(self.list[index])
class IMAPMailbox(object):
implements(imap4.IMailbox)
def __init__(self, path):
self.maildir = ExtendedMaildir(path)
self.metadataFile = os.path.join(path, '.imap-metadata.pickle')
if os.path.exists(self.metadataFile):
self.metadata = pickle.load(file(self.metadataFile, 'r+b'))
else:
self.metadata = {}
self.initMetadata()
self.listeners = []
self._assignUIDs()
def initMetadata(self):
if not self.metadata.has_key('flags'):
self.metadata['flags'] = {} # dict of message IDs to flags
if not self.metadata.has_key('uidvalidity'):
# create a unique integer ID to identify this version of
# the mailbox, so the client could tell if it was deleted
# and replaced by a different mailbox with the same name
self.metadata['uidvalidity'] = random.randint(1000000, 9999999)
if not self.metadata.has_key('uids'):
self.metadata['uids'] = {}
if not self.metadata.has_key('uidnext'):
self.metadata['uidnext'] = 1 # next UID to be assigned
def saveMetadata(self):
pickle.dump(self.metadata, file(self.metadataFile, 'w+b'))
def _assignUIDs(self):
# make sure every message has a uid
for messagePath in self.maildir:
messageFile = os.path.basename(messagePath)
if not self.metadata['uids'].has_key(messageFile):
self.metadata['uids'][messageFile] = self.metadata['uidnext']
self.metadata['uidnext'] += 1
self.saveMetadata()
def getHierarchicalDelimiter(self):
return MAILBOXDELIMITER
def getFlags(self):
"return list of flags supported by this mailbox"
return [r'\Seen', r'\Unseen', r'\Deleted',
r'\Flagged', r'\Answered', r'\Recent']
def getMessageCount(self):
return len(self.maildir)
def getRecentCount(self):
return 0
def getUnseenCount(self):
def messageIsUnseen(filename):
filename = os.path.basename(filename)
uid = self.metadata['uids'].get(filename)
flags = self.metadata['flags'].get(uid, [])
if not r'\Seen' in flags:
return True
return len(filter(messageIsUnseen, self.maildir))
def isWriteable(self):
return True
def getUIDValidity(self):
return self.metadata['uidvalidity']
def getUID(self, messageNum):
filename = os.path.basename(self.maildir[messageNum-1])
return self.metadata['uids'][filename]
def getUIDNext(self):
return self.folder.metadata['uidnext']
def _uidMessageSetToSeqDict(self, messageSet):
"""
take a MessageSet object containing UIDs, and return
a dictionary mapping sequence numbers to filenames
"""
# if messageSet.last is None, it means 'the end', and needs to
# be set to a sane high number before attempting to iterate
# through the MessageSet
if not messageSet.last:
messageSet.last = self.metadata['uidnext']
allUIDs = []
for filename in self.maildir:
shortFilename = os.path.basename(filename)
allUIDs.append(self.metadata['uids'][shortFilename])
allUIDs.sort()
seqMap = {}
for uid in messageSet:
# the message set covers a span of UIDs. not all of them
# will necessarily exist, so check each one for validity
if uid in allUIDs:
sequence = allUIDs.index(uid)+1
seqMap[sequence] = self.maildir[sequence-1]
return seqMap
def _seqMessageSetToSeqDict(self, messageSet):
"""
take a MessageSet object containing message sequence numbers,
and return a dictionary mapping sequence number to filenames
"""
# if messageSet.last is None, it means 'the end', and needs to
# be set to a sane high number before attempting to iterate
# through the MessageSet
if not messageSet.last: messageSet.last = len(self.maildir)-1
seqMap = {}
for messageNo in messageSet:
seqMap[messageNo] = self.maildir[messageNo-1]
return seqMap
def fetch(self, messages, uid):
if uid:
messagesToFetch = self._uidMessageSetToSeqDict(messages)
else:
messagesToFetch = self._seqMessageSetToSeqDict(messages)
for seq, filename in messagesToFetch.items():
uid = self.getUID(seq)
flags = self.metadata['flags'].get(uid, [])
yield seq, MaildirMessage(file(filename).read(), uid, flags)
def addListener(self, listener):
self.listeners.append(listener)
return True
def removeListener(self, listener):
self.listeners.remove(listener)
return True
def requestStatus(self, path):
return imap4.statusRequestHelper(self, path)
def addMessage(self, msg, flags=None, date=None):
if flags is None: flags = []
return self.maildir.appendMessage(msg).addCallback(
self._addedMessage, flags)
def _addedMessage(self, _, flags):
# the first argument is the value returned from
# MaildirMailbox.appendMessage. It doesn't contain any meaningful
# information and can be discarded. Using the name "_" is a Twisted
# idiom for unimportant return values.
self._assignUIDs()
messageFile = os.path.basename(self.maildir[-1])
messageID = self.metadata['uids'][messageFile]
self.metadata['flags'][messageID] = flags
self.saveMetadata()
def store(self, messageSet, flags, mode, uid):
if uid:
messages = self._uidMessageSetToSeqDict(messageSet)
else:
messages = self._seqMessageSetToSeqDict(messageSet)
setFlags = {}
for seq, filename in messages.items():
uid = self.getUID(seq)
if mode == 0: # replace flags
messageFlags = self.metadata['flags'][uid] = flags
else:
messageFlags = self.metadata['flags'].setdefault(uid, [])
for flag in flags:
# mode 1 is append, mode -1 is delete
if mode == 1 and not messageFlags.count(flag):
messageFlags.append(flag)
elif mode == -1 and messageFlags.count(flag):
messageFlags.remove(flag)
setFlags[seq] = messageFlags
self.saveMetadata()
return setFlags
def expunge(self):
"remove all messages marked for deletion"
removed = []
for filename in self.maildir:
uid = self.metadata['uids'].get(os.path.basename(filename))
if r"\Deleted" in self.metadata['flags'].get(uid, []):
self.maildir.deleteMessage(filename)
# you could also throw away the metadata here
removed.append(uid)
return removed
def destroy(self):
"complete remove the mailbox and all its contents"
raise imap4.MailboxException("Permission denied.")
from cStringIO import StringIO
import email
class MaildirMessagePart(object):
implements(imap4.IMessagePart)
def __init__(self, mimeMessage):
self.message = mimeMessage
self.data = str(self.message)
def getHeaders(self, negate, *names):
"""
Return a dict mapping header name to header value. If *names
is empty, match all headers; if negate is true, return only
headers _not_ listed in *names.
"""
if not names: names = self.message.keys()
headers = {}
if negate:
for header in self.message.keys():
if header.upper() not in names:
headers[header.lower()] = self.message.get(header, '')
else:
for name in names:
headers[name.lower()] = self.message.get(name, '')
return headers
def getBodyFile(self):
"return a file-like object containing this message's body"
bodyData = str(self.message.get_payload())
return StringIO(bodyData)
def getSize(self):
return len(self.data)
def getInternalDate(self):
return self.message.get('Date', '')
def isMultipart(self):
return self.message.is_multipart()
def getSubPart(self, partNo):
return MaildirMessagePart(self.message.get_payload(partNo))
class MaildirMessage(MaildirMessagePart):
implements(imap4.IMessage)
def __init__(self, messageData, uid, flags):
self.data = messageData
self.message = email.message_from_string(self.data)
self.uid = uid
self.flags = flags
def getUID(self):
return self.uid
def getFlags(self):
return self.flags
class MailUserRealm(object):
implements(portal.IRealm)
avatarInterfaces = {
imap4.IAccount: IMAPUserAccount,
}
def __init__(self, baseDir):
self.baseDir = baseDir
def requestAvatar(self, avatarId, mind, *interfaces):
for requestedInterface in interfaces:
if self.avatarInterfaces.has_key(requestedInterface):
# make sure the user dir exists (avatarId is username)
userDir = os.path.join(self.baseDir, avatarId)
if not os.path.exists(userDir):
os.mkdir(userDir)
# return an instance of the correct class
avatarClass = self.avatarInterfaces[requestedInterface]
avatar = avatarClass(userDir)
# null logout function: take no arguments and do nothing
logout = lambda: None
return defer.succeed((requestedInterface, avatar, logout))
# none of the requested interfaces was supported
raise KeyError("None of the requested interfaces is supported")
def passwordFileToDict(filename):
passwords = {}
for line in file(filename):
if line and line.count(':'):
username, password = line.strip().split(':')
passwords[username] = password
return passwords
class CredentialsChecker(object):
implements(checkers.ICredentialsChecker)
credentialInterfaces = (credentials.IUsernamePassword,
credentials.IUsernameHashedPassword)
def __init__(self, passwords):
"passwords: a dict-like object mapping usernames to passwords"
self.passwords = passwords
def requestAvatarId(self, credentials):
"""
check to see if the supplied credentials authenticate.
if so, return an 'avatar id', in this case the name of
the IMAP user.
The supplied credentials will implement one of the classes
in self.credentialInterfaces. In this case both
IUsernamePassword and IUsernameHashedPassword have a
checkPassword method that takes the real password and checks
it against the supplied password.
"""
username = credentials.username
if self.passwords.has_key(username):
realPassword = self.passwords[username]
checking = defer.maybeDeferred(
credentials.checkPassword, realPassword)
# pass result of checkPassword, and the username that was
# being authenticated, to self._checkedPassword
checking.addCallback(self._checkedPassword, username)
return checking
else:
raise credError.UnauthorizedLogin("No such user")
def _checkedPassword(self, matched, username):
if matched:
# password was correct
return username
else:
raise credError.UnauthorizedLogin("Bad password")
class IMAPServerProtocol(imap4.IMAP4Server):
"Subclass of imap4.IMAP4Server that adds debugging."
debug = True
def lineReceived(self, line):
if self.debug:
print "CLIENT:", line
imap4.IMAP4Server.lineReceived(self, line)
def sendLine(self, line):
imap4.IMAP4Server.sendLine(self, line)
if self.debug:
print "SERVER:", line
class IMAPFactory(protocol.Factory):
protocol = IMAPServerProtocol
portal = None # placeholder
def buildProtocol(self, address):
p = self.protocol()
p.portal = self.portal
p.factory = self
return p
if __name__ == "__main__":
import sys
dataDir = sys.argv[1]
portal = portal.Portal(MailUserRealm(dataDir))
passwordFile = os.path.join(dataDir, 'passwords.txt')
passwords = passwordFileToDict(passwordFile)
passwordChecker = CredentialsChecker(passwords)
portal.registerChecker(passwordChecker)
factory = IMAPFactory()
factory.portal = portal
reactor.listenTCP(143, factory)
reactor.run() | [
"richardwangwang@gmail.com"
] | richardwangwang@gmail.com |
efad5ca4427330cf20a8820095d194e301d77f8e | e85ecab9e39316ba116e83d660feca54b4543d95 | /chapter03/disk_cache.py | 019a0c2db08118c30fb3763d37f26ab5b3ad0d9b | [] | no_license | xyq946692052/crawler | 08c7218ea25115099f83650db74a23238b9d3bbc | b1130e52c3eae48c03282cf1fd0f51424f5d48ae | refs/heads/master | 2020-07-04T06:49:22.711514 | 2016-10-11T17:14:09 | 2016-10-11T17:14:09 | 67,964,933 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,556 | py | import os
import re
import urlparse
import shutil
import zlib
from datetime import datetime,timedelta
try:
import cPickle as pickle
except ImportError:
import pickle
from link_crawler import link_crawler
class DiskCache:
def __init__(self,cache_dir='cache',expires=timedelta(days=30),compress=True):
self.cache_dir=cache_dir
self.expires=expires
self.compress=compress
def __getitem__(self,url):
path = self.url_to_path(url)
if os.path.exists(path):
with open(path,'rb') as fp:
data = fp.read()
if self.compress:
data=zlib.decompress(data)
result,timestamp=pickle.loads(data)
if self.has_expired(timestamp):
raise KeyError(url+' has expire')
return result
else:
raise KeyError(url+' does not exist')
def __setitem__(self,url,result):
"""save data to disk for this url"""
path=self.url_to_path(url)
folder=os.path.dirname(path)
if not os.path.exists(folder):
os.makedirs(folder)
data=pickle.dumps((result,datetime.utcnow()))
if self.compress:
data=zlib.compress(data)
with open(path,'wb') as fp:
fp.write(data)
def __delitem__(self,url):
""" remove the value at this key and any empty parent sub-directories"""
path=self._key_path(url)
try:
os.remove(path)
os.removedirs(os.path.diename(path))
except OSError:
pass
def url_to_path(self,url):
"""create file system path for this URL"""
components=urlparse.urlsplit(url)
path=components.path
if not path:
path='/index.html'
elif path.endswith('/'):
path+='index.html'
filename=components.netloc+path+components.query
filename=re.sub('[^/0-9a-zA-Z\-.,;_]','_',filename)
filename='/'.join(segment[:255] for segment in filename.split('/'))
return os.path.join(self.cache_dir,filename)
def has_expired(self,timestamp):
"""return whether this timestamp has expired"""
return datetime.utcnow()>timestamp+self.expires
def clear(self):
"""Remove all the cache values"""
if os.path.exists(self.cache_dir):
shutil.rmtree(self.cache_dir)
if __name__=='__main__':
link_crawler('http://example.webscraping.com/','/(index|view)',cache=DiskCache())
| [
"xyq2312@sina.com"
] | xyq2312@sina.com |
b409ec0ab13a58cedef6f2615324562be22891c5 | 4b1be24295e1fd62538b45f37af3938b5a9efe51 | /cart/views.py | 8abd76a338cfc5fb3cd1daaf418679153ccd791f | [] | no_license | chemt/vsad | a734f212aaed198f4cdd843206f0df170808877c | dcf6ef4b765f7166c625f928b7bc1f9627b45b38 | refs/heads/master | 2021-01-23T03:12:42.960496 | 2010-06-03T11:28:36 | 2010-06-03T11:28:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,223 | py | # -*- coding: utf-8 -*-
from cart import Cart
from django.shortcuts import render_to_response
from django.template.loader import render_to_string
from django.template import RequestContext
from django.http import HttpResponseRedirect
from django.core.mail import send_mail
from settings import REPORT_EMAILS
from django.template import loader
from forms import VIPForm
#
#from menu.models import MenuItem
#
#
#def rest_add_to_cart(request, id, quantity):
# product = MenuItem.objects.get(id=id)
# cart = Cart(request)
# cart.add(product, product.unit_price, quantity)
#
##def remove_from_cart(request, product_id):
## product = Product.objects.get(id=product_id)
## cart = Cart(request)
## cart.remove(product)
#
def get_cart(request):
return HttpResponseRedirect(request.META['HTTP_REFERER'])
#return render_to_response('cart/view_cart.html', RequestContext(request,dict(cart=Cart(request))))
def clear_cart(request):
cart=Cart(request)
cart.clear()
return get_cart(request)
def inc_cart_item(request, id):
cart=Cart(request)
cart.inc(id)
return get_cart(request)
def dec_cart_item(request, id):
cart=Cart(request)
cart.dec(id)
return get_cart(request)
def del_cart_item(request, id):
cart=Cart(request)
cart.delete(id)
return get_cart(request)
def checkout(request):
if request.method == 'POST': # If the form has been submitted...
form = VIPForm(request.POST) # A form bound to the POST data
if form.is_valid(): # All validation rules pass
cart = Cart(request)
message_text = render_to_string('cart/email.txt', { 'form': form.cleaned_data, 'cart':cart })
send_mail( u'Нове замовлення. VIP %s' % form.cleaned_data['vip'],
message_text, 'auto@vyshnevysad.com.ua',
REPORT_EMAILS,
fail_silently=False)
cart.checkout()
return render_to_response('cart/view_cart.html', RequestContext(request,dict(cart=cart)))
else:
form = VIPForm() # An unbound form
return render_to_response('cart/vip_form.html', RequestContext(request,dict(form=form))) | [
"chemt@ukr.net"
] | chemt@ukr.net |
23d34a67d9d72b6a2de9a28ef6564a9cf28f4f21 | 37ccc5a15485ad9f08f77455987c34d2807063b7 | /modules/types.py | 4a2baacfd4f1954b31d2ac2fc933f35b5bbe2522 | [] | no_license | elshadaghazade/mysql_native_client_in_python | d1268dff198c8ada8c90b3d14fa0ad2cb3f1a217 | a837d64983be300b74933da8df20bdc18c0324d1 | refs/heads/master | 2022-05-06T12:29:16.412978 | 2022-04-27T07:35:10 | 2022-04-27T07:35:10 | 135,502,611 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,198 | py | class Int:
"""see documentation: https://dev.mysql.com/doc/internals/en/integer.html"""
def __init__(self, package, length=-1, type='fix'):
self.package = package
self.length = length
self.type = type
def next(self):
# int<n>
if self.type == 'fix' and self.length > 0:
return self.package.next(self.length)
# int<lenenc>
if self.type == 'lenenc':
byte = self.package.next(1)
if byte < 0xfb:
return self.package.next(1)
elif byte == 0xfc:
return self.package.next(2)
elif byte == 0xfd:
return self.package.next(3)
elif byte == 0xfe:
return self.package.next(8)
class Str:
"""see documentation: https://dev.mysql.com/doc/internals/en/string.html"""
def __init__(self, package, length = -1, type="fix"):
self.package = package
self.length = length
self.type = type
def next(self):
# string<fix>
if self.type == 'fix' and self.length > 0:
return self.package.next(self.length, str)
# string<lenenc>
elif self.type == 'lenenc':
length = self.package.next(1)
if length == 0x00:
return ""
elif length == 0xfb:
return "NULL"
elif length == 0xff:
return "undefined"
return self.package.next(length, str)
# string<var>
elif self.type == 'var':
length = Int(self.package, type='lenenc').next()
return self.package.next(length, str)
# string<eof>
elif self.type == 'eof':
return self.package.next(type=str)
# string<null> - null terminated strings
elif self.type == 'null':
strbytes = bytearray()
byte = self.package.next(1)
while True:
if byte == 0x00:
break
else:
strbytes.append(byte)
byte = self.package.next(1)
return strbytes.decode('utf-8')
| [
"elshadaghazade@gmail.com"
] | elshadaghazade@gmail.com |
f817dc9cd7b0ee5cb3fb0d8da067107e84fabd08 | c380976b7c59dadaccabacf6b541124c967d2b5a | /.history/src/data/data_20191021130626.py | 54a8374208393201a7d3ecf5fa63dc428630f047 | [
"MIT"
] | permissive | bkraft4257/kaggle_titanic | b83603563b4a3c995b631e8142fe72e1730a0e2e | f29ea1773773109a867278c001dbd21a9f7b21dd | refs/heads/master | 2020-08-17T12:45:28.653402 | 2019-11-15T16:20:04 | 2019-11-15T16:20:04 | 215,667,760 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,770 | py | import pandas as pd
import numpy as np
from typing import Union
from pathlib import Path
from nameparser import HumanName
class ExtractData:
def __init__(self, filename: Union[str, Path], age_bins = None, drop_columns=None):
# """Extract Training Data from file or Path
# Arguments:
# filename {[str]} -- Filename of CSV data file containing data.
# drop_columns -- Columns in dataframe that should be dropped.
# """
if drop_columns is None:
drop_columns = ["age", "cabin", "name", "ticket"]
self.filename = filename
self.drop_columns = drop_columns
self.all_label_columns = ["survived"]
self.all_feature_columns = [
"pclass",
"name",
"sex",
"age",
"sibsp",
"parch",
"ticket",
"fare",
"cabin",
"embarked",
]
self.Xy_raw = None
self.extract_raw()
def extract_raw(self):
"""
Extracts data from a CSV file.
Returns:
pd.DataFrame -- [description]
"""
Xy_raw = pd.read_csv(self.filename)
Xy_raw.columns = Xy_raw.columns.str.lower().str.replace(" ", "_")
Xy_raw = Xy_raw.rename(columns={"age": "age_known"})
Xy_raw["pclass"] = Xy_raw["pclass"].astype("category")
self.Xy_raw = Xy_raw.set_index("passengerid")
class TransformData:
title_translator = {
"Mlle.": "Mrs.",
"Mme.": "Mrs.",
"Sir.": "Mr.",
"Ms.": "Mrs.",
"Rev.": "Mr.",
"": "Mr.",
"Col.": "Mr.",
"Capt.": "Mr.",
"Lady.": "Mrs.",
"the Countess. of": "Mrs.",
}
def __init__(
self,
raw_data,
adult_age_threshold_min=13,
age_bins = None,
Xy_age_estimate=None,
drop_columns=None,
):
# """Extract Training Data from file or Path
# Arguments:
# filename {[str]} -- Filename of CSV data file containing data.
# drop_columns -- Columns in dataframe that should be dropped.
# """
if age_bins is None:
age_bins = [0,10,20,30, 40, 50, 60, np.inf]
if drop_columns is None:
drop_columns = ["age", "cabin", "name", "ticket"]
self.raw = raw_data
self.adult_age_threshold_min = adult_age_threshold_min
self.Xy_age_estimate = Xy_age_estimate
self.age_bins = age_bins
self.Xy = self.raw.Xy_raw.copy()
self.extract_title()
self.extract_last_name()
self.extract_cabin_number()
self.extract_cabin_prefix()
self.estimate_age()
self.calc_is_child()
self.calc_is_travelling_alone()
def calc_is_travelling_alone(self):
self.Xy["is_travelling_alone"] = (self.Xy.sibsp == 0) & (self.Xy.parch == 0)
def calc_is_child(self):
self.Xy["is_child"] = self.Xy.age < self.adult_age_threshold_min
def extract_cabin_number(self):
self.Xy["cabin_number"] = self.Xy.ticket.str.extract("(\d+)$")
def extract_cabin_prefix(self):
self.Xy["cabin_prefix"] = self.Xy.ticket.str.extract("^(.+) ")
def extract_title(self):
"""[summary]
"""
self.Xy["title"] = (
self.Xy.name.apply(lambda x: HumanName(x).title)
.replace(self.title_translator)
.replace({"\.": ""}, regex=True)
)
def extract_last_name(self):
self.Xy["last_name"] = self.Xy.name.apply(lambda x: HumanName(x).last)
def calc_age_bins(self):
self.Xy['age_bin'] = pd.cut(Xy.age, bins=[0,10,20,30, 40, 50, 60, np.inf])
def clean(self,):
"""Clean data to remove missing data and "unnecessary" features.
Arguments:
in_raw_df {pd.DataFrame} -- Dataframe containing all columns and rows Kaggle Titanic Training Data set
"""
self.Xy = self.Xy_raw.drop(self.drop_columns, axis=1)
def estimate_age(self, groupby_columns=["sex", "title"]):
"""[summary]
Keyword Arguments:
groupby {list} -- [description] (default: {['sex','title']})
"""
if self.Xy_age_estimate is None:
Xy_age_estimate = (
self.Xy.groupby(groupby_columns).age_known.mean().to_frame().round(1)
)
Xy_age_estimate = Xy_age_estimate.rename(
columns={"age_known": "age_estimate"}
)
out_df = self.Xy.reset_index().merge(Xy_age_estimate, on=groupby_columns)
out_df["age"] = out_df["age_known"].fillna(out_df["age_estimate"])
self.Xy = out_df
self.Xy_age_estimate = Xy_age_estimate
| [
"bob.kraft@infiniteleap.net"
] | bob.kraft@infiniteleap.net |
163e2d3714cc1735092d2594a4fc4132063c42fc | 98ef763bf95851d5fe0fe20c2cf0edc4576a40b2 | /article/urls.py | 23b89fbeab33b5319f40bd5b6456c623b537f883 | [] | no_license | Mohammad-Kh80/class | b02c373e2f0c9bae3f2bd0756a59d8a3ef393169 | 2a65ae5ff5df86594fe9ea5f5ed81eb579f89d1b | refs/heads/main | 2023-07-31T02:42:10.768751 | 2021-09-24T18:48:26 | 2021-09-24T18:48:26 | 408,208,326 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 96 | py |
from django.urls import path
from .views import home
urlpatterns = [
path('home/',home),
]
| [
"Mohammad_Khaefi@yahoo.com"
] | Mohammad_Khaefi@yahoo.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.